LCOV - code coverage report
Current view: top level - pageserver/src/tenant - timeline.rs (source / functions) Coverage Total Hit
Test: 42f947419473a288706e86ecdf7c2863d760d5d7.info Lines: 61.7 % 3500 2158
Test Date: 2024-08-02 21:34:27 Functions: 58.6 % 338 198

            Line data    Source code
       1              : pub(crate) mod analysis;
       2              : pub(crate) mod compaction;
       3              : pub mod delete;
       4              : pub(crate) mod detach_ancestor;
       5              : mod eviction_task;
       6              : pub(crate) mod handle;
       7              : mod init;
       8              : pub mod layer_manager;
       9              : pub(crate) mod logical_size;
      10              : pub mod span;
      11              : pub mod uninit;
      12              : mod walreceiver;
      13              : 
      14              : use anyhow::{anyhow, bail, ensure, Context, Result};
      15              : use arc_swap::ArcSwap;
      16              : use bytes::Bytes;
      17              : use camino::Utf8Path;
      18              : use chrono::{DateTime, Utc};
      19              : use enumset::EnumSet;
      20              : use fail::fail_point;
      21              : use handle::ShardTimelineId;
      22              : use once_cell::sync::Lazy;
      23              : use pageserver_api::{
      24              :     key::{
      25              :         AUX_FILES_KEY, KEY_SIZE, METADATA_KEY_BEGIN_PREFIX, METADATA_KEY_END_PREFIX,
      26              :         NON_INHERITED_RANGE, NON_INHERITED_SPARSE_RANGE,
      27              :     },
      28              :     keyspace::{KeySpaceAccum, KeySpaceRandomAccum, SparseKeyPartitioning},
      29              :     models::{
      30              :         AtomicAuxFilePolicy, AuxFilePolicy, CompactionAlgorithm, CompactionAlgorithmSettings,
      31              :         DownloadRemoteLayersTaskInfo, DownloadRemoteLayersTaskSpawnRequest, EvictionPolicy,
      32              :         InMemoryLayerInfo, LayerMapInfo, LsnLease, TimelineState,
      33              :     },
      34              :     reltag::BlockNumber,
      35              :     shard::{ShardIdentity, ShardNumber, TenantShardId},
      36              : };
      37              : use rand::Rng;
      38              : use serde_with::serde_as;
      39              : use storage_broker::BrokerClientChannel;
      40              : use tokio::{
      41              :     runtime::Handle,
      42              :     sync::{oneshot, watch},
      43              : };
      44              : use tokio_util::sync::CancellationToken;
      45              : use tracing::*;
      46              : use utils::{
      47              :     bin_ser::BeSer,
      48              :     fs_ext, pausable_failpoint,
      49              :     sync::gate::{Gate, GateGuard},
      50              :     vec_map::VecMap,
      51              : };
      52              : 
      53              : use std::pin::pin;
      54              : use std::sync::atomic::Ordering as AtomicOrdering;
      55              : use std::sync::{Arc, Mutex, RwLock, Weak};
      56              : use std::time::{Duration, Instant, SystemTime};
      57              : use std::{
      58              :     array,
      59              :     collections::{BTreeMap, HashMap, HashSet},
      60              :     sync::atomic::AtomicU64,
      61              : };
      62              : use std::{
      63              :     cmp::{max, min},
      64              :     ops::ControlFlow,
      65              : };
      66              : use std::{
      67              :     collections::btree_map::Entry,
      68              :     ops::{Deref, Range},
      69              : };
      70              : 
      71              : use crate::{
      72              :     aux_file::AuxFileSizeEstimator,
      73              :     tenant::{
      74              :         config::defaults::DEFAULT_PITR_INTERVAL,
      75              :         layer_map::{LayerMap, SearchResult},
      76              :         metadata::TimelineMetadata,
      77              :         storage_layer::PersistentLayerDesc,
      78              :     },
      79              :     walredo,
      80              : };
      81              : use crate::{
      82              :     context::{DownloadBehavior, RequestContext},
      83              :     disk_usage_eviction_task::DiskUsageEvictionInfo,
      84              :     pgdatadir_mapping::CollectKeySpaceError,
      85              : };
      86              : use crate::{
      87              :     disk_usage_eviction_task::finite_f32,
      88              :     tenant::storage_layer::{
      89              :         AsLayerDesc, DeltaLayerWriter, EvictionError, ImageLayerWriter, InMemoryLayer, Layer,
      90              :         LayerAccessStatsReset, LayerName, ResidentLayer, ValueReconstructResult,
      91              :         ValueReconstructState, ValuesReconstructState,
      92              :     },
      93              : };
      94              : use crate::{
      95              :     disk_usage_eviction_task::EvictionCandidate, tenant::storage_layer::delta_layer::DeltaEntry,
      96              : };
      97              : use crate::{
      98              :     l0_flush::{self, L0FlushGlobalState},
      99              :     metrics::GetKind,
     100              : };
     101              : use crate::{
     102              :     metrics::ScanLatencyOngoingRecording, tenant::timeline::logical_size::CurrentLogicalSize,
     103              : };
     104              : use crate::{pgdatadir_mapping::LsnForTimestamp, tenant::tasks::BackgroundLoopKind};
     105              : use crate::{pgdatadir_mapping::MAX_AUX_FILE_V2_DELTAS, tenant::storage_layer::PersistentLayerKey};
     106              : use crate::{
     107              :     pgdatadir_mapping::{AuxFilesDirectory, DirectoryKind},
     108              :     virtual_file::{MaybeFatalIo, VirtualFile},
     109              : };
     110              : 
     111              : use crate::config::PageServerConf;
     112              : use crate::keyspace::{KeyPartitioning, KeySpace};
     113              : use crate::metrics::TimelineMetrics;
     114              : use crate::pgdatadir_mapping::CalculateLogicalSizeError;
     115              : use crate::tenant::config::TenantConfOpt;
     116              : use pageserver_api::reltag::RelTag;
     117              : use pageserver_api::shard::ShardIndex;
     118              : 
     119              : use postgres_connection::PgConnectionConfig;
     120              : use postgres_ffi::to_pg_timestamp;
     121              : use utils::{
     122              :     completion,
     123              :     generation::Generation,
     124              :     id::TimelineId,
     125              :     lsn::{AtomicLsn, Lsn, RecordLsn},
     126              :     seqwait::SeqWait,
     127              :     simple_rcu::{Rcu, RcuReadGuard},
     128              : };
     129              : 
     130              : use crate::repository::GcResult;
     131              : use crate::repository::{Key, Value};
     132              : use crate::task_mgr;
     133              : use crate::task_mgr::TaskKind;
     134              : use crate::ZERO_PAGE;
     135              : 
     136              : use self::delete::DeleteTimelineFlow;
     137              : pub(super) use self::eviction_task::EvictionTaskTenantState;
     138              : use self::eviction_task::EvictionTaskTimelineState;
     139              : use self::layer_manager::LayerManager;
     140              : use self::logical_size::LogicalSize;
     141              : use self::walreceiver::{WalReceiver, WalReceiverConf};
     142              : 
     143              : use super::{config::TenantConf, upload_queue::NotInitialized};
     144              : use super::{debug_assert_current_span_has_tenant_and_timeline_id, AttachedTenantConf};
     145              : use super::{remote_timeline_client::index::IndexPart, storage_layer::LayerFringe};
     146              : use super::{
     147              :     remote_timeline_client::RemoteTimelineClient, remote_timeline_client::WaitCompletionError,
     148              :     storage_layer::ReadableLayer,
     149              : };
     150              : use super::{
     151              :     secondary::heatmap::{HeatMapLayer, HeatMapTimeline},
     152              :     GcError,
     153              : };
     154              : 
     155              : #[derive(Debug, PartialEq, Eq, Clone, Copy)]
     156              : pub(crate) enum FlushLoopState {
     157              :     NotStarted,
     158              :     Running {
     159              :         #[cfg(test)]
     160              :         expect_initdb_optimization: bool,
     161              :         #[cfg(test)]
     162              :         initdb_optimization_count: usize,
     163              :     },
     164              :     Exited,
     165              : }
     166              : 
     167              : #[derive(Debug, Copy, Clone, PartialEq, Eq)]
     168              : pub enum ImageLayerCreationMode {
     169              :     /// Try to create image layers based on `time_for_new_image_layer`. Used in compaction code path.
     170              :     Try,
     171              :     /// Force creating the image layers if possible. For now, no image layers will be created
     172              :     /// for metadata keys. Used in compaction code path with force flag enabled.
     173              :     Force,
     174              :     /// Initial ingestion of the data, and no data should be dropped in this function. This
     175              :     /// means that no metadata keys should be included in the partitions. Used in flush frozen layer
     176              :     /// code path.
     177              :     Initial,
     178              : }
     179              : 
     180              : impl std::fmt::Display for ImageLayerCreationMode {
     181          522 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
     182          522 :         write!(f, "{:?}", self)
     183          522 :     }
     184              : }
     185              : 
     186              : /// Temporary function for immutable storage state refactor, ensures we are dropping mutex guard instead of other things.
     187              : /// Can be removed after all refactors are done.
     188           28 : fn drop_rlock<T>(rlock: tokio::sync::RwLockReadGuard<T>) {
     189           28 :     drop(rlock)
     190           28 : }
     191              : 
     192              : /// Temporary function for immutable storage state refactor, ensures we are dropping mutex guard instead of other things.
     193              : /// Can be removed after all refactors are done.
     194          550 : fn drop_wlock<T>(rlock: tokio::sync::RwLockWriteGuard<'_, T>) {
     195          550 :     drop(rlock)
     196          550 : }
     197              : 
     198              : /// The outward-facing resources required to build a Timeline
     199              : pub struct TimelineResources {
     200              :     pub remote_client: RemoteTimelineClient,
     201              :     pub timeline_get_throttle: Arc<
     202              :         crate::tenant::throttle::Throttle<&'static crate::metrics::tenant_throttling::TimelineGet>,
     203              :     >,
     204              :     pub l0_flush_global_state: l0_flush::L0FlushGlobalState,
     205              : }
     206              : 
     207              : pub(crate) struct AuxFilesState {
     208              :     pub(crate) dir: Option<AuxFilesDirectory>,
     209              :     pub(crate) n_deltas: usize,
     210              : }
     211              : 
     212              : /// The relation size cache caches relation sizes at the end of the timeline. It speeds up WAL
     213              : /// ingestion considerably, because WAL ingestion needs to check on most records if the record
     214              : /// implicitly extends the relation.  At startup, `complete_as_of` is initialized to the current end
     215              : /// of the timeline (disk_consistent_lsn).  It's used on reads of relation sizes to check if the
     216              : /// value can be used to also update the cache, see [`Timeline::update_cached_rel_size`].
     217              : pub(crate) struct RelSizeCache {
     218              :     pub(crate) complete_as_of: Lsn,
     219              :     pub(crate) map: HashMap<RelTag, (Lsn, BlockNumber)>,
     220              : }
     221              : 
     222              : pub struct Timeline {
     223              :     conf: &'static PageServerConf,
     224              :     tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
     225              : 
     226              :     myself: Weak<Self>,
     227              : 
     228              :     pub(crate) tenant_shard_id: TenantShardId,
     229              :     pub timeline_id: TimelineId,
     230              : 
     231              :     /// The generation of the tenant that instantiated us: this is used for safety when writing remote objects.
     232              :     /// Never changes for the lifetime of this [`Timeline`] object.
     233              :     ///
     234              :     /// This duplicates the generation stored in LocationConf, but that structure is mutable:
     235              :     /// this copy enforces the invariant that generatio doesn't change during a Tenant's lifetime.
     236              :     pub(crate) generation: Generation,
     237              : 
     238              :     /// The detailed sharding information from our parent Tenant.  This enables us to map keys
     239              :     /// to shards, and is constant through the lifetime of this Timeline.
     240              :     shard_identity: ShardIdentity,
     241              : 
     242              :     pub pg_version: u32,
     243              : 
     244              :     /// The tuple has two elements.
     245              :     /// 1. `LayerFileManager` keeps track of the various physical representations of the layer files (inmem, local, remote).
     246              :     /// 2. `LayerMap`, the acceleration data structure for `get_reconstruct_data`.
     247              :     ///
     248              :     /// `LayerMap` maps out the `(PAGE,LSN) / (KEY,LSN)` space, which is composed of `(KeyRange, LsnRange)` rectangles.
     249              :     /// We describe these rectangles through the `PersistentLayerDesc` struct.
     250              :     ///
     251              :     /// When we want to reconstruct a page, we first find the `PersistentLayerDesc`'s that we need for page reconstruction,
     252              :     /// using `LayerMap`. Then, we use `LayerFileManager` to get the `PersistentLayer`'s that correspond to the
     253              :     /// `PersistentLayerDesc`'s.
     254              :     ///
     255              :     /// Hence, it's important to keep things coherent. The `LayerFileManager` must always have an entry for all
     256              :     /// `PersistentLayerDesc`'s in the `LayerMap`. If it doesn't, `LayerFileManager::get_from_desc` will panic at
     257              :     /// runtime, e.g., during page reconstruction.
     258              :     ///
     259              :     /// In the future, we'll be able to split up the tuple of LayerMap and `LayerFileManager`,
     260              :     /// so that e.g. on-demand-download/eviction, and layer spreading, can operate just on `LayerFileManager`.
     261              :     pub(crate) layers: tokio::sync::RwLock<LayerManager>,
     262              : 
     263              :     last_freeze_at: AtomicLsn,
     264              :     // Atomic would be more appropriate here.
     265              :     last_freeze_ts: RwLock<Instant>,
     266              : 
     267              :     pub(crate) standby_horizon: AtomicLsn,
     268              : 
     269              :     // WAL redo manager. `None` only for broken tenants.
     270              :     walredo_mgr: Option<Arc<super::WalRedoManager>>,
     271              : 
     272              :     /// Remote storage client.
     273              :     /// See [`remote_timeline_client`](super::remote_timeline_client) module comment for details.
     274              :     pub remote_client: Arc<RemoteTimelineClient>,
     275              : 
     276              :     // What page versions do we hold in the repository? If we get a
     277              :     // request > last_record_lsn, we need to wait until we receive all
     278              :     // the WAL up to the request. The SeqWait provides functions for
     279              :     // that. TODO: If we get a request for an old LSN, such that the
     280              :     // versions have already been garbage collected away, we should
     281              :     // throw an error, but we don't track that currently.
     282              :     //
     283              :     // last_record_lsn.load().last points to the end of last processed WAL record.
     284              :     //
     285              :     // We also remember the starting point of the previous record in
     286              :     // 'last_record_lsn.load().prev'. It's used to set the xl_prev pointer of the
     287              :     // first WAL record when the node is started up. But here, we just
     288              :     // keep track of it.
     289              :     last_record_lsn: SeqWait<RecordLsn, Lsn>,
     290              : 
     291              :     // All WAL records have been processed and stored durably on files on
     292              :     // local disk, up to this LSN. On crash and restart, we need to re-process
     293              :     // the WAL starting from this point.
     294              :     //
     295              :     // Some later WAL records might have been processed and also flushed to disk
     296              :     // already, so don't be surprised to see some, but there's no guarantee on
     297              :     // them yet.
     298              :     disk_consistent_lsn: AtomicLsn,
     299              : 
     300              :     // Parent timeline that this timeline was branched from, and the LSN
     301              :     // of the branch point.
     302              :     ancestor_timeline: Option<Arc<Timeline>>,
     303              :     ancestor_lsn: Lsn,
     304              : 
     305              :     pub(super) metrics: TimelineMetrics,
     306              : 
     307              :     // `Timeline` doesn't write these metrics itself, but it manages the lifetime.  Code
     308              :     // in `crate::page_service` writes these metrics.
     309              :     pub(crate) query_metrics: crate::metrics::SmgrQueryTimePerTimeline,
     310              : 
     311              :     directory_metrics: [AtomicU64; DirectoryKind::KINDS_NUM],
     312              : 
     313              :     /// Ensures layers aren't frozen by checkpointer between
     314              :     /// [`Timeline::get_layer_for_write`] and layer reads.
     315              :     /// Locked automatically by [`TimelineWriter`] and checkpointer.
     316              :     /// Must always be acquired before the layer map/individual layer lock
     317              :     /// to avoid deadlock.
     318              :     ///
     319              :     /// The state is cleared upon freezing.
     320              :     write_lock: tokio::sync::Mutex<Option<TimelineWriterState>>,
     321              : 
     322              :     /// Used to avoid multiple `flush_loop` tasks running
     323              :     pub(super) flush_loop_state: Mutex<FlushLoopState>,
     324              : 
     325              :     /// layer_flush_start_tx can be used to wake up the layer-flushing task.
     326              :     /// - The u64 value is a counter, incremented every time a new flush cycle is requested.
     327              :     ///   The flush cycle counter is sent back on the layer_flush_done channel when
     328              :     ///   the flush finishes. You can use that to wait for the flush to finish.
     329              :     /// - The LSN is updated to max() of its current value and the latest disk_consistent_lsn
     330              :     ///   read by whoever sends an update
     331              :     layer_flush_start_tx: tokio::sync::watch::Sender<(u64, Lsn)>,
     332              :     /// to be notified when layer flushing has finished, subscribe to the layer_flush_done channel
     333              :     layer_flush_done_tx: tokio::sync::watch::Sender<(u64, Result<(), FlushLayerError>)>,
     334              : 
     335              :     // Needed to ensure that we can't create a branch at a point that was already garbage collected
     336              :     pub latest_gc_cutoff_lsn: Rcu<Lsn>,
     337              : 
     338              :     // List of child timelines and their branch points. This is needed to avoid
     339              :     // garbage collecting data that is still needed by the child timelines.
     340              :     pub(crate) gc_info: std::sync::RwLock<GcInfo>,
     341              : 
     342              :     // It may change across major versions so for simplicity
     343              :     // keep it after running initdb for a timeline.
     344              :     // It is needed in checks when we want to error on some operations
     345              :     // when they are requested for pre-initdb lsn.
     346              :     // It can be unified with latest_gc_cutoff_lsn under some "first_valid_lsn",
     347              :     // though let's keep them both for better error visibility.
     348              :     pub initdb_lsn: Lsn,
     349              : 
     350              :     /// When did we last calculate the partitioning? Make it pub to test cases.
     351              :     pub(super) partitioning: tokio::sync::Mutex<((KeyPartitioning, SparseKeyPartitioning), Lsn)>,
     352              : 
     353              :     /// Configuration: how often should the partitioning be recalculated.
     354              :     repartition_threshold: u64,
     355              : 
     356              :     last_image_layer_creation_check_at: AtomicLsn,
     357              :     last_image_layer_creation_check_instant: std::sync::Mutex<Option<Instant>>,
     358              : 
     359              :     /// Current logical size of the "datadir", at the last LSN.
     360              :     current_logical_size: LogicalSize,
     361              : 
     362              :     /// Information about the last processed message by the WAL receiver,
     363              :     /// or None if WAL receiver has not received anything for this timeline
     364              :     /// yet.
     365              :     pub last_received_wal: Mutex<Option<WalReceiverInfo>>,
     366              :     pub walreceiver: Mutex<Option<WalReceiver>>,
     367              : 
     368              :     /// Relation size cache
     369              :     pub(crate) rel_size_cache: RwLock<RelSizeCache>,
     370              : 
     371              :     download_all_remote_layers_task_info: RwLock<Option<DownloadRemoteLayersTaskInfo>>,
     372              : 
     373              :     state: watch::Sender<TimelineState>,
     374              : 
     375              :     /// Prevent two tasks from deleting the timeline at the same time. If held, the
     376              :     /// timeline is being deleted. If 'true', the timeline has already been deleted.
     377              :     pub delete_progress: Arc<tokio::sync::Mutex<DeleteTimelineFlow>>,
     378              : 
     379              :     eviction_task_timeline_state: tokio::sync::Mutex<EvictionTaskTimelineState>,
     380              : 
     381              :     /// Load or creation time information about the disk_consistent_lsn and when the loading
     382              :     /// happened. Used for consumption metrics.
     383              :     pub(crate) loaded_at: (Lsn, SystemTime),
     384              : 
     385              :     /// Gate to prevent shutdown completing while I/O is still happening to this timeline's data
     386              :     pub(crate) gate: Gate,
     387              : 
     388              :     /// Cancellation token scoped to this timeline: anything doing long-running work relating
     389              :     /// to the timeline should drop out when this token fires.
     390              :     pub(crate) cancel: CancellationToken,
     391              : 
     392              :     /// Make sure we only have one running compaction at a time in tests.
     393              :     ///
     394              :     /// Must only be taken in two places:
     395              :     /// - [`Timeline::compact`] (this file)
     396              :     /// - [`delete::delete_local_timeline_directory`]
     397              :     ///
     398              :     /// Timeline deletion will acquire both compaction and gc locks in whatever order.
     399              :     compaction_lock: tokio::sync::Mutex<()>,
     400              : 
     401              :     /// Make sure we only have one running gc at a time.
     402              :     ///
     403              :     /// Must only be taken in two places:
     404              :     /// - [`Timeline::gc`] (this file)
     405              :     /// - [`delete::delete_local_timeline_directory`]
     406              :     ///
     407              :     /// Timeline deletion will acquire both compaction and gc locks in whatever order.
     408              :     gc_lock: tokio::sync::Mutex<()>,
     409              : 
     410              :     /// Cloned from [`super::Tenant::timeline_get_throttle`] on construction.
     411              :     timeline_get_throttle: Arc<
     412              :         crate::tenant::throttle::Throttle<&'static crate::metrics::tenant_throttling::TimelineGet>,
     413              :     >,
     414              : 
     415              :     /// Keep aux directory cache to avoid it's reconstruction on each update
     416              :     pub(crate) aux_files: tokio::sync::Mutex<AuxFilesState>,
     417              : 
     418              :     /// Size estimator for aux file v2
     419              :     pub(crate) aux_file_size_estimator: AuxFileSizeEstimator,
     420              : 
     421              :     /// Indicate whether aux file v2 storage is enabled.
     422              :     pub(crate) last_aux_file_policy: AtomicAuxFilePolicy,
     423              : 
     424              :     /// Some test cases directly place keys into the timeline without actually modifying the directory
     425              :     /// keys (i.e., DB_DIR). The test cases creating such keys will put the keyspaces here, so that
     426              :     /// these keys won't get garbage-collected during compaction/GC. This field only modifies the dense
     427              :     /// keyspace return value of `collect_keyspace`. For sparse keyspaces, use AUX keys for testing, and
     428              :     /// in the future, add `extra_test_sparse_keyspace` if necessary.
     429              :     #[cfg(test)]
     430              :     pub(crate) extra_test_dense_keyspace: ArcSwap<KeySpace>,
     431              : 
     432              :     pub(crate) l0_flush_global_state: L0FlushGlobalState,
     433              : 
     434              :     pub(crate) handles: handle::PerTimelineState<crate::page_service::TenantManagerTypes>,
     435              : }
     436              : 
     437              : pub struct WalReceiverInfo {
     438              :     pub wal_source_connconf: PgConnectionConfig,
     439              :     pub last_received_msg_lsn: Lsn,
     440              :     pub last_received_msg_ts: u128,
     441              : }
     442              : 
     443              : /// Information about how much history needs to be retained, needed by
     444              : /// Garbage Collection.
     445              : #[derive(Default)]
     446              : pub(crate) struct GcInfo {
     447              :     /// Specific LSNs that are needed.
     448              :     ///
     449              :     /// Currently, this includes all points where child branches have
     450              :     /// been forked off from. In the future, could also include
     451              :     /// explicit user-defined snapshot points.
     452              :     pub(crate) retain_lsns: Vec<(Lsn, TimelineId)>,
     453              : 
     454              :     /// The cutoff coordinates, which are combined by selecting the minimum.
     455              :     pub(crate) cutoffs: GcCutoffs,
     456              : 
     457              :     /// Leases granted to particular LSNs.
     458              :     pub(crate) leases: BTreeMap<Lsn, LsnLease>,
     459              : 
     460              :     /// Whether our branch point is within our ancestor's PITR interval (for cost estimation)
     461              :     pub(crate) within_ancestor_pitr: bool,
     462              : }
     463              : 
     464              : impl GcInfo {
     465          226 :     pub(crate) fn min_cutoff(&self) -> Lsn {
     466          226 :         self.cutoffs.select_min()
     467          226 :     }
     468              : 
     469          228 :     pub(super) fn insert_child(&mut self, child_id: TimelineId, child_lsn: Lsn) {
     470          228 :         self.retain_lsns.push((child_lsn, child_id));
     471          228 :         self.retain_lsns.sort_by_key(|i| i.0);
     472          228 :     }
     473              : 
     474            2 :     pub(super) fn remove_child(&mut self, child_id: TimelineId) {
     475            2 :         self.retain_lsns.retain(|i| i.1 != child_id);
     476            2 :     }
     477              : }
     478              : 
     479              : /// The `GcInfo` component describing which Lsns need to be retained.  Functionally, this
     480              : /// is a single number (the oldest LSN which we must retain), but it internally distinguishes
     481              : /// between time-based and space-based retention for observability and consumption metrics purposes.
     482              : #[derive(Debug, Clone)]
     483              : pub(crate) struct GcCutoffs {
     484              :     /// Calculated from the [`TenantConf::gc_horizon`], this LSN indicates how much
     485              :     /// history we must keep to retain a specified number of bytes of WAL.
     486              :     pub(crate) space: Lsn,
     487              : 
     488              :     /// Calculated from [`TenantConf::pitr_interval`], this LSN indicates how much
     489              :     /// history we must keep to enable reading back at least the PITR interval duration.
     490              :     pub(crate) time: Lsn,
     491              : }
     492              : 
     493              : impl Default for GcCutoffs {
     494          398 :     fn default() -> Self {
     495          398 :         Self {
     496          398 :             space: Lsn::INVALID,
     497          398 :             time: Lsn::INVALID,
     498          398 :         }
     499          398 :     }
     500              : }
     501              : 
     502              : impl GcCutoffs {
     503          244 :     fn select_min(&self) -> Lsn {
     504          244 :         std::cmp::min(self.space, self.time)
     505          244 :     }
     506              : }
     507              : 
     508              : pub(crate) struct TimelineVisitOutcome {
     509              :     completed_keyspace: KeySpace,
     510              :     image_covered_keyspace: KeySpace,
     511              : }
     512              : 
     513              : /// An error happened in a get() operation.
     514            2 : #[derive(thiserror::Error, Debug)]
     515              : pub(crate) enum PageReconstructError {
     516              :     #[error(transparent)]
     517              :     Other(#[from] anyhow::Error),
     518              : 
     519              :     #[error("Ancestor LSN wait error: {0}")]
     520              :     AncestorLsnTimeout(WaitLsnError),
     521              : 
     522              :     #[error("timeline shutting down")]
     523              :     Cancelled,
     524              : 
     525              :     /// An error happened replaying WAL records
     526              :     #[error(transparent)]
     527              :     WalRedo(anyhow::Error),
     528              : 
     529              :     #[error("{0}")]
     530              :     MissingKey(MissingKeyError),
     531              : }
     532              : 
     533              : impl GetVectoredError {
     534              :     #[cfg(test)]
     535            6 :     pub(crate) fn is_missing_key_error(&self) -> bool {
     536            6 :         matches!(self, Self::MissingKey(_))
     537            6 :     }
     538              : }
     539              : 
     540              : pub struct MissingKeyError {
     541              :     key: Key,
     542              :     shard: ShardNumber,
     543              :     cont_lsn: Lsn,
     544              :     request_lsn: Lsn,
     545              :     ancestor_lsn: Option<Lsn>,
     546              :     traversal_path: Vec<TraversalPathItem>,
     547              :     backtrace: Option<std::backtrace::Backtrace>,
     548              : }
     549              : 
     550              : impl std::fmt::Debug for MissingKeyError {
     551            0 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
     552            0 :         write!(f, "{}", self)
     553            0 :     }
     554              : }
     555              : 
     556              : impl std::fmt::Display for MissingKeyError {
     557            0 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
     558            0 :         write!(
     559            0 :             f,
     560            0 :             "could not find data for key {} (shard {:?}) at LSN {}, request LSN {}",
     561            0 :             self.key, self.shard, self.cont_lsn, self.request_lsn
     562            0 :         )?;
     563            0 :         if let Some(ref ancestor_lsn) = self.ancestor_lsn {
     564            0 :             write!(f, ", ancestor {}", ancestor_lsn)?;
     565            0 :         }
     566              : 
     567            0 :         if !self.traversal_path.is_empty() {
     568            0 :             writeln!(f)?;
     569            0 :         }
     570              : 
     571            0 :         for (r, c, l) in &self.traversal_path {
     572            0 :             writeln!(
     573            0 :                 f,
     574            0 :                 "layer traversal: result {:?}, cont_lsn {}, layer: {}",
     575            0 :                 r, c, l,
     576            0 :             )?;
     577              :         }
     578              : 
     579            0 :         if let Some(ref backtrace) = self.backtrace {
     580            0 :             write!(f, "\n{}", backtrace)?;
     581            0 :         }
     582              : 
     583            0 :         Ok(())
     584            0 :     }
     585              : }
     586              : 
     587              : impl PageReconstructError {
     588              :     /// Returns true if this error indicates a tenant/timeline shutdown alike situation
     589            0 :     pub(crate) fn is_stopping(&self) -> bool {
     590            0 :         use PageReconstructError::*;
     591            0 :         match self {
     592            0 :             Other(_) => false,
     593            0 :             AncestorLsnTimeout(_) => false,
     594            0 :             Cancelled => true,
     595            0 :             WalRedo(_) => false,
     596            0 :             MissingKey { .. } => false,
     597              :         }
     598            0 :     }
     599              : }
     600              : 
     601            0 : #[derive(thiserror::Error, Debug)]
     602              : pub(crate) enum CreateImageLayersError {
     603              :     #[error("timeline shutting down")]
     604              :     Cancelled,
     605              : 
     606              :     #[error(transparent)]
     607              :     GetVectoredError(GetVectoredError),
     608              : 
     609              :     #[error(transparent)]
     610              :     PageReconstructError(PageReconstructError),
     611              : 
     612              :     #[error(transparent)]
     613              :     Other(#[from] anyhow::Error),
     614              : }
     615              : 
     616            0 : #[derive(thiserror::Error, Debug, Clone)]
     617              : pub(crate) enum FlushLayerError {
     618              :     /// Timeline cancellation token was cancelled
     619              :     #[error("timeline shutting down")]
     620              :     Cancelled,
     621              : 
     622              :     /// We tried to flush a layer while the Timeline is in an unexpected state
     623              :     #[error("cannot flush frozen layers when flush_loop is not running, state is {0:?}")]
     624              :     NotRunning(FlushLoopState),
     625              : 
     626              :     // Arc<> the following non-clonable error types: we must be Clone-able because the flush error is propagated from the flush
     627              :     // loop via a watch channel, where we can only borrow it.
     628              :     #[error(transparent)]
     629              :     CreateImageLayersError(Arc<CreateImageLayersError>),
     630              : 
     631              :     #[error(transparent)]
     632              :     Other(#[from] Arc<anyhow::Error>),
     633              : }
     634              : 
     635              : impl FlushLayerError {
     636              :     // When crossing from generic anyhow errors to this error type, we explicitly check
     637              :     // for timeline cancellation to avoid logging inoffensive shutdown errors as warn/err.
     638            0 :     fn from_anyhow(timeline: &Timeline, err: anyhow::Error) -> Self {
     639            0 :         let cancelled = timeline.cancel.is_cancelled()
     640              :             // The upload queue might have been shut down before the official cancellation of the timeline.
     641            0 :             || err
     642            0 :                 .downcast_ref::<NotInitialized>()
     643            0 :                 .map(NotInitialized::is_stopping)
     644            0 :                 .unwrap_or_default();
     645            0 :         if cancelled {
     646            0 :             Self::Cancelled
     647              :         } else {
     648            0 :             Self::Other(Arc::new(err))
     649              :         }
     650            0 :     }
     651              : }
     652              : 
     653            0 : #[derive(thiserror::Error, Debug)]
     654              : pub(crate) enum GetVectoredError {
     655              :     #[error("timeline shutting down")]
     656              :     Cancelled,
     657              : 
     658              :     #[error("Requested too many keys: {0} > {}", Timeline::MAX_GET_VECTORED_KEYS)]
     659              :     Oversized(u64),
     660              : 
     661              :     #[error("Requested at invalid LSN: {0}")]
     662              :     InvalidLsn(Lsn),
     663              : 
     664              :     #[error("Requested key not found: {0}")]
     665              :     MissingKey(MissingKeyError),
     666              : 
     667              :     #[error(transparent)]
     668              :     GetReadyAncestorError(GetReadyAncestorError),
     669              : 
     670              :     #[error(transparent)]
     671              :     Other(#[from] anyhow::Error),
     672              : }
     673              : 
     674            2 : #[derive(thiserror::Error, Debug)]
     675              : pub(crate) enum GetReadyAncestorError {
     676              :     #[error("Ancestor LSN wait error: {0}")]
     677              :     AncestorLsnTimeout(#[from] WaitLsnError),
     678              : 
     679              :     #[error("Bad state on timeline {timeline_id}: {state:?}")]
     680              :     BadState {
     681              :         timeline_id: TimelineId,
     682              :         state: TimelineState,
     683              :     },
     684              : 
     685              :     #[error("Cancelled")]
     686              :     Cancelled,
     687              : }
     688              : 
     689              : #[derive(Clone, Copy)]
     690              : pub enum LogicalSizeCalculationCause {
     691              :     Initial,
     692              :     ConsumptionMetricsSyntheticSize,
     693              :     EvictionTaskImitation,
     694              :     TenantSizeHandler,
     695              : }
     696              : 
     697              : pub enum GetLogicalSizePriority {
     698              :     User,
     699              :     Background,
     700              : }
     701              : 
     702            0 : #[derive(enumset::EnumSetType)]
     703              : pub(crate) enum CompactFlags {
     704              :     ForceRepartition,
     705              :     ForceImageLayerCreation,
     706              :     EnhancedGcBottomMostCompaction,
     707              : }
     708              : 
     709              : impl std::fmt::Debug for Timeline {
     710            0 :     fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
     711            0 :         write!(f, "Timeline<{}>", self.timeline_id)
     712            0 :     }
     713              : }
     714              : 
     715            0 : #[derive(thiserror::Error, Debug)]
     716              : pub(crate) enum WaitLsnError {
     717              :     // Called on a timeline which is shutting down
     718              :     #[error("Shutdown")]
     719              :     Shutdown,
     720              : 
     721              :     // Called on an timeline not in active state or shutting down
     722              :     #[error("Bad timeline state: {0:?}")]
     723              :     BadState(TimelineState),
     724              : 
     725              :     // Timeout expired while waiting for LSN to catch up with goal.
     726              :     #[error("{0}")]
     727              :     Timeout(String),
     728              : }
     729              : 
     730              : // The impls below achieve cancellation mapping for errors.
     731              : // Perhaps there's a way of achieving this with less cruft.
     732              : 
     733              : impl From<CreateImageLayersError> for CompactionError {
     734            0 :     fn from(e: CreateImageLayersError) -> Self {
     735            0 :         match e {
     736            0 :             CreateImageLayersError::Cancelled => CompactionError::ShuttingDown,
     737            0 :             CreateImageLayersError::Other(e) => {
     738            0 :                 CompactionError::Other(e.context("create image layers"))
     739              :             }
     740            0 :             _ => CompactionError::Other(e.into()),
     741              :         }
     742            0 :     }
     743              : }
     744              : 
     745              : impl From<CreateImageLayersError> for FlushLayerError {
     746            0 :     fn from(e: CreateImageLayersError) -> Self {
     747            0 :         match e {
     748            0 :             CreateImageLayersError::Cancelled => FlushLayerError::Cancelled,
     749            0 :             any => FlushLayerError::CreateImageLayersError(Arc::new(any)),
     750              :         }
     751            0 :     }
     752              : }
     753              : 
     754              : impl From<PageReconstructError> for CreateImageLayersError {
     755            0 :     fn from(e: PageReconstructError) -> Self {
     756            0 :         match e {
     757            0 :             PageReconstructError::Cancelled => CreateImageLayersError::Cancelled,
     758            0 :             _ => CreateImageLayersError::PageReconstructError(e),
     759              :         }
     760            0 :     }
     761              : }
     762              : 
     763              : impl From<GetVectoredError> for CreateImageLayersError {
     764            0 :     fn from(e: GetVectoredError) -> Self {
     765            0 :         match e {
     766            0 :             GetVectoredError::Cancelled => CreateImageLayersError::Cancelled,
     767            0 :             _ => CreateImageLayersError::GetVectoredError(e),
     768              :         }
     769            0 :     }
     770              : }
     771              : 
     772              : impl From<GetVectoredError> for PageReconstructError {
     773            6 :     fn from(e: GetVectoredError) -> Self {
     774            6 :         match e {
     775            0 :             GetVectoredError::Cancelled => PageReconstructError::Cancelled,
     776            0 :             GetVectoredError::InvalidLsn(_) => PageReconstructError::Other(anyhow!("Invalid LSN")),
     777            0 :             err @ GetVectoredError::Oversized(_) => PageReconstructError::Other(err.into()),
     778            4 :             GetVectoredError::MissingKey(err) => PageReconstructError::MissingKey(err),
     779            2 :             GetVectoredError::GetReadyAncestorError(err) => PageReconstructError::from(err),
     780            0 :             GetVectoredError::Other(err) => PageReconstructError::Other(err),
     781              :         }
     782            6 :     }
     783              : }
     784              : 
     785              : impl From<GetReadyAncestorError> for PageReconstructError {
     786            2 :     fn from(e: GetReadyAncestorError) -> Self {
     787            2 :         use GetReadyAncestorError::*;
     788            2 :         match e {
     789            0 :             AncestorLsnTimeout(wait_err) => PageReconstructError::AncestorLsnTimeout(wait_err),
     790            2 :             bad_state @ BadState { .. } => PageReconstructError::Other(anyhow::anyhow!(bad_state)),
     791            0 :             Cancelled => PageReconstructError::Cancelled,
     792              :         }
     793            2 :     }
     794              : }
     795              : 
     796              : #[derive(
     797              :     Eq,
     798              :     PartialEq,
     799              :     Debug,
     800              :     Copy,
     801              :     Clone,
     802          202 :     strum_macros::EnumString,
     803            0 :     strum_macros::Display,
     804            0 :     serde_with::DeserializeFromStr,
     805              :     serde_with::SerializeDisplay,
     806              : )]
     807              : #[strum(serialize_all = "kebab-case")]
     808              : pub enum GetVectoredImpl {
     809              :     Sequential,
     810              :     Vectored,
     811              : }
     812              : 
     813              : #[derive(
     814              :     Eq,
     815              :     PartialEq,
     816              :     Debug,
     817              :     Copy,
     818              :     Clone,
     819          202 :     strum_macros::EnumString,
     820            0 :     strum_macros::Display,
     821            0 :     serde_with::DeserializeFromStr,
     822              :     serde_with::SerializeDisplay,
     823              : )]
     824              : #[strum(serialize_all = "kebab-case")]
     825              : pub enum GetImpl {
     826              :     Legacy,
     827              :     Vectored,
     828              : }
     829              : 
     830              : pub(crate) enum WaitLsnWaiter<'a> {
     831              :     Timeline(&'a Timeline),
     832              :     Tenant,
     833              :     PageService,
     834              : }
     835              : 
     836              : /// Argument to [`Timeline::shutdown`].
     837              : #[derive(Debug, Clone, Copy)]
     838              : pub(crate) enum ShutdownMode {
     839              :     /// Graceful shutdown, may do a lot of I/O as we flush any open layers to disk and then
     840              :     /// also to remote storage.  This method can easily take multiple seconds for a busy timeline.
     841              :     ///
     842              :     /// While we are flushing, we continue to accept read I/O for LSNs ingested before
     843              :     /// the call to [`Timeline::shutdown`].
     844              :     FreezeAndFlush,
     845              :     /// Shut down immediately, without waiting for any open layers to flush.
     846              :     Hard,
     847              : }
     848              : 
     849              : struct ImageLayerCreationOutcome {
     850              :     image: Option<ResidentLayer>,
     851              :     next_start_key: Key,
     852              : }
     853              : 
     854              : /// Public interface functions
     855              : impl Timeline {
     856              :     /// Get the LSN where this branch was created
     857          102 :     pub(crate) fn get_ancestor_lsn(&self) -> Lsn {
     858          102 :         self.ancestor_lsn
     859          102 :     }
     860              : 
     861              :     /// Get the ancestor's timeline id
     862         1508 :     pub(crate) fn get_ancestor_timeline_id(&self) -> Option<TimelineId> {
     863         1508 :         self.ancestor_timeline
     864         1508 :             .as_ref()
     865         1508 :             .map(|ancestor| ancestor.timeline_id)
     866         1508 :     }
     867              : 
     868              :     /// Get the bytes written since the PITR cutoff on this branch, and
     869              :     /// whether this branch's ancestor_lsn is within its parent's PITR.
     870            0 :     pub(crate) fn get_pitr_history_stats(&self) -> (u64, bool) {
     871            0 :         let gc_info = self.gc_info.read().unwrap();
     872            0 :         let history = self
     873            0 :             .get_last_record_lsn()
     874            0 :             .checked_sub(gc_info.cutoffs.time)
     875            0 :             .unwrap_or(Lsn(0))
     876            0 :             .0;
     877            0 :         (history, gc_info.within_ancestor_pitr)
     878            0 :     }
     879              : 
     880              :     /// Lock and get timeline's GC cutoff
     881          995 :     pub(crate) fn get_latest_gc_cutoff_lsn(&self) -> RcuReadGuard<Lsn> {
     882          995 :         self.latest_gc_cutoff_lsn.read()
     883          995 :     }
     884              : 
     885              :     /// Look up given page version.
     886              :     ///
     887              :     /// If a remote layer file is needed, it is downloaded as part of this
     888              :     /// call.
     889              :     ///
     890              :     /// This method enforces [`Self::timeline_get_throttle`] internally.
     891              :     ///
     892              :     /// NOTE: It is considered an error to 'get' a key that doesn't exist. The
     893              :     /// abstraction above this needs to store suitable metadata to track what
     894              :     /// data exists with what keys, in separate metadata entries. If a
     895              :     /// non-existent key is requested, we may incorrectly return a value from
     896              :     /// an ancestor branch, for example, or waste a lot of cycles chasing the
     897              :     /// non-existing key.
     898              :     ///
     899              :     /// # Cancel-Safety
     900              :     ///
     901              :     /// This method is cancellation-safe.
     902              :     #[inline(always)]
     903       624874 :     pub(crate) async fn get(
     904       624874 :         &self,
     905       624874 :         key: Key,
     906       624874 :         lsn: Lsn,
     907       624874 :         ctx: &RequestContext,
     908       624874 :     ) -> Result<Bytes, PageReconstructError> {
     909       624874 :         if !lsn.is_valid() {
     910            0 :             return Err(PageReconstructError::Other(anyhow::anyhow!("Invalid LSN")));
     911       624874 :         }
     912       624874 : 
     913       624874 :         // This check is debug-only because of the cost of hashing, and because it's a double-check: we
     914       624874 :         // already checked the key against the shard_identity when looking up the Timeline from
     915       624874 :         // page_service.
     916       624874 :         debug_assert!(!self.shard_identity.is_key_disposable(&key));
     917              : 
     918       624874 :         self.timeline_get_throttle.throttle(ctx, 1).await;
     919              : 
     920       624874 :         match self.conf.get_impl {
     921              :             GetImpl::Legacy => {
     922            0 :                 let reconstruct_state = ValueReconstructState {
     923            0 :                     records: Vec::new(),
     924            0 :                     img: None,
     925            0 :                 };
     926            0 : 
     927            0 :                 self.get_impl(key, lsn, reconstruct_state, ctx).await
     928              :             }
     929              :             GetImpl::Vectored => {
     930       624874 :                 let keyspace = KeySpace {
     931       624874 :                     ranges: vec![key..key.next()],
     932       624874 :                 };
     933       624874 : 
     934       624874 :                 // Initialise the reconstruct state for the key with the cache
     935       624874 :                 // entry returned above.
     936       624874 :                 let mut reconstruct_state = ValuesReconstructState::new();
     937              : 
     938       624874 :                 let vectored_res = self
     939       624874 :                     .get_vectored_impl(keyspace.clone(), lsn, &mut reconstruct_state, ctx)
     940       102581 :                     .await;
     941              : 
     942       624874 :                 if self.conf.validate_vectored_get {
     943            0 :                     self.validate_get_vectored_impl(&vectored_res, keyspace, lsn, ctx)
     944            0 :                         .await;
     945       624874 :                 }
     946              : 
     947       624874 :                 let key_value = vectored_res?.pop_first();
     948       624868 :                 match key_value {
     949       624746 :                     Some((got_key, value)) => {
     950       624746 :                         if got_key != key {
     951            0 :                             error!(
     952            0 :                                 "Expected {}, but singular vectored get returned {}",
     953              :                                 key, got_key
     954              :                             );
     955            0 :                             Err(PageReconstructError::Other(anyhow!(
     956            0 :                                 "Singular vectored get returned wrong key"
     957            0 :                             )))
     958              :                         } else {
     959       624746 :                             value
     960              :                         }
     961              :                     }
     962          122 :                     None => Err(PageReconstructError::MissingKey(MissingKeyError {
     963          122 :                         key,
     964          122 :                         shard: self.shard_identity.get_shard_number(&key),
     965          122 :                         cont_lsn: Lsn(0),
     966          122 :                         request_lsn: lsn,
     967          122 :                         ancestor_lsn: None,
     968          122 :                         traversal_path: Vec::new(),
     969          122 :                         backtrace: None,
     970          122 :                     })),
     971              :                 }
     972              :             }
     973              :         }
     974       624874 :     }
     975              : 
     976              :     /// Not subject to [`Self::timeline_get_throttle`].
     977          322 :     async fn get_impl(
     978          322 :         &self,
     979          322 :         key: Key,
     980          322 :         lsn: Lsn,
     981          322 :         mut reconstruct_state: ValueReconstructState,
     982          322 :         ctx: &RequestContext,
     983          322 :     ) -> Result<Bytes, PageReconstructError> {
     984          322 :         // XXX: structured stats collection for layer eviction here.
     985          322 :         trace!(
     986            0 :             "get page request for {}@{} from task kind {:?}",
     987            0 :             key,
     988            0 :             lsn,
     989            0 :             ctx.task_kind()
     990              :         );
     991              : 
     992          322 :         let timer = crate::metrics::GET_RECONSTRUCT_DATA_TIME
     993          322 :             .for_get_kind(GetKind::Singular)
     994          322 :             .start_timer();
     995          322 :         let path = self
     996          322 :             .get_reconstruct_data(key, lsn, &mut reconstruct_state, ctx)
     997           19 :             .await?;
     998          320 :         timer.stop_and_record();
     999          320 : 
    1000          320 :         let start = Instant::now();
    1001          320 :         let res = self.reconstruct_value(key, lsn, reconstruct_state).await;
    1002          320 :         let elapsed = start.elapsed();
    1003          320 :         crate::metrics::RECONSTRUCT_TIME
    1004          320 :             .for_get_kind(GetKind::Singular)
    1005          320 :             .observe(elapsed.as_secs_f64());
    1006          320 : 
    1007          320 :         if cfg!(feature = "testing")
    1008          320 :             && res.is_err()
    1009            0 :             && !matches!(res, Err(PageReconstructError::Cancelled))
    1010              :         {
    1011              :             // it can only be walredo issue
    1012              :             use std::fmt::Write;
    1013              : 
    1014            0 :             let mut msg = String::new();
    1015            0 : 
    1016            0 :             path.into_iter().for_each(|(res, cont_lsn, layer)| {
    1017            0 :                 writeln!(
    1018            0 :                     msg,
    1019            0 :                     "- layer traversal: result {res:?}, cont_lsn {cont_lsn}, layer: {}",
    1020            0 :                     layer,
    1021            0 :                 )
    1022            0 :                 .expect("string grows")
    1023            0 :             });
    1024            0 : 
    1025            0 :             // this is to rule out or provide evidence that we could in some cases read a duplicate
    1026            0 :             // walrecord
    1027            0 :             tracing::info!("walredo failed, path:\n{msg}");
    1028          320 :         }
    1029              : 
    1030          320 :         res
    1031          322 :     }
    1032              : 
    1033              :     pub(crate) const MAX_GET_VECTORED_KEYS: u64 = 32;
    1034              :     pub(crate) const VEC_GET_LAYERS_VISITED_WARN_THRESH: f64 = 512.0;
    1035              : 
    1036              :     /// Look up multiple page versions at a given LSN
    1037              :     ///
    1038              :     /// This naive implementation will be replaced with a more efficient one
    1039              :     /// which actually vectorizes the read path.
    1040         1036 :     pub(crate) async fn get_vectored(
    1041         1036 :         &self,
    1042         1036 :         keyspace: KeySpace,
    1043         1036 :         lsn: Lsn,
    1044         1036 :         ctx: &RequestContext,
    1045         1036 :     ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
    1046         1036 :         if !lsn.is_valid() {
    1047            0 :             return Err(GetVectoredError::InvalidLsn(lsn));
    1048         1036 :         }
    1049         1036 : 
    1050         1036 :         let key_count = keyspace.total_raw_size().try_into().unwrap();
    1051         1036 :         if key_count > Timeline::MAX_GET_VECTORED_KEYS {
    1052            0 :             return Err(GetVectoredError::Oversized(key_count));
    1053         1036 :         }
    1054              : 
    1055         2072 :         for range in &keyspace.ranges {
    1056         1036 :             let mut key = range.start;
    1057         2410 :             while key != range.end {
    1058         1374 :                 assert!(!self.shard_identity.is_key_disposable(&key));
    1059         1374 :                 key = key.next();
    1060              :             }
    1061              :         }
    1062              : 
    1063         1036 :         trace!(
    1064            0 :             "get vectored request for {:?}@{} from task kind {:?} will use {} implementation",
    1065            0 :             keyspace,
    1066            0 :             lsn,
    1067            0 :             ctx.task_kind(),
    1068              :             self.conf.get_vectored_impl
    1069              :         );
    1070              : 
    1071         1036 :         let start = crate::metrics::GET_VECTORED_LATENCY
    1072         1036 :             .for_task_kind(ctx.task_kind())
    1073         1036 :             .map(|metric| (metric, Instant::now()));
    1074              : 
    1075              :         // start counting after throttle so that throttle time
    1076              :         // is always less than observation time
    1077         1036 :         let throttled = self
    1078         1036 :             .timeline_get_throttle
    1079         1036 :             .throttle(ctx, key_count as usize)
    1080            0 :             .await;
    1081              : 
    1082         1036 :         let res = match self.conf.get_vectored_impl {
    1083              :             GetVectoredImpl::Sequential => {
    1084            0 :                 self.get_vectored_sequential_impl(keyspace, lsn, ctx).await
    1085              :             }
    1086              :             GetVectoredImpl::Vectored => {
    1087         1036 :                 let vectored_res = self
    1088         1036 :                     .get_vectored_impl(
    1089         1036 :                         keyspace.clone(),
    1090         1036 :                         lsn,
    1091         1036 :                         &mut ValuesReconstructState::new(),
    1092         1036 :                         ctx,
    1093         1036 :                     )
    1094           49 :                     .await;
    1095              : 
    1096         1036 :                 if self.conf.validate_vectored_get {
    1097            0 :                     self.validate_get_vectored_impl(&vectored_res, keyspace, lsn, ctx)
    1098            0 :                         .await;
    1099         1036 :                 }
    1100              : 
    1101         1036 :                 vectored_res
    1102              :             }
    1103              :         };
    1104              : 
    1105         1036 :         if let Some((metric, start)) = start {
    1106            0 :             let elapsed = start.elapsed();
    1107            0 :             let ex_throttled = if let Some(throttled) = throttled {
    1108            0 :                 elapsed.checked_sub(throttled)
    1109              :             } else {
    1110            0 :                 Some(elapsed)
    1111              :             };
    1112              : 
    1113            0 :             if let Some(ex_throttled) = ex_throttled {
    1114            0 :                 metric.observe(ex_throttled.as_secs_f64());
    1115            0 :             } else {
    1116            0 :                 use utils::rate_limit::RateLimit;
    1117            0 :                 static LOGGED: Lazy<Mutex<RateLimit>> =
    1118            0 :                     Lazy::new(|| Mutex::new(RateLimit::new(Duration::from_secs(10))));
    1119            0 :                 let mut rate_limit = LOGGED.lock().unwrap();
    1120            0 :                 rate_limit.call(|| {
    1121            0 :                     warn!("error deducting time spent throttled; this message is logged at a global rate limit");
    1122            0 :                 });
    1123            0 :             }
    1124         1036 :         }
    1125              : 
    1126         1036 :         res
    1127         1036 :     }
    1128              : 
    1129              :     /// Scan the keyspace and return all existing key-values in the keyspace. This currently uses vectored
    1130              :     /// get underlying. Normal vectored get would throw an error when a key in the keyspace is not found
    1131              :     /// during the search, but for the scan interface, it returns all existing key-value pairs, and does
    1132              :     /// not expect each single key in the key space will be found. The semantics is closer to the RocksDB
    1133              :     /// scan iterator interface. We could optimize this interface later to avoid some checks in the vectored
    1134              :     /// get path to maintain and split the probing and to-be-probe keyspace. We also need to ensure that
    1135              :     /// the scan operation will not cause OOM in the future.
    1136           12 :     pub(crate) async fn scan(
    1137           12 :         &self,
    1138           12 :         keyspace: KeySpace,
    1139           12 :         lsn: Lsn,
    1140           12 :         ctx: &RequestContext,
    1141           12 :     ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
    1142           12 :         if !lsn.is_valid() {
    1143            0 :             return Err(GetVectoredError::InvalidLsn(lsn));
    1144           12 :         }
    1145           12 : 
    1146           12 :         trace!(
    1147            0 :             "key-value scan request for {:?}@{} from task kind {:?}",
    1148            0 :             keyspace,
    1149            0 :             lsn,
    1150            0 :             ctx.task_kind()
    1151              :         );
    1152              : 
    1153              :         // We should generalize this into Keyspace::contains in the future.
    1154           24 :         for range in &keyspace.ranges {
    1155           12 :             if range.start.field1 < METADATA_KEY_BEGIN_PREFIX
    1156           12 :                 || range.end.field1 > METADATA_KEY_END_PREFIX
    1157              :             {
    1158            0 :                 return Err(GetVectoredError::Other(anyhow::anyhow!(
    1159            0 :                     "only metadata keyspace can be scanned"
    1160            0 :                 )));
    1161           12 :             }
    1162              :         }
    1163              : 
    1164           12 :         let start = crate::metrics::SCAN_LATENCY
    1165           12 :             .for_task_kind(ctx.task_kind())
    1166           12 :             .map(ScanLatencyOngoingRecording::start_recording);
    1167              : 
    1168              :         // start counting after throttle so that throttle time
    1169              :         // is always less than observation time
    1170           12 :         let throttled = self
    1171           12 :             .timeline_get_throttle
    1172           12 :             // assume scan = 1 quota for now until we find a better way to process this
    1173           12 :             .throttle(ctx, 1)
    1174            0 :             .await;
    1175              : 
    1176           12 :         let vectored_res = self
    1177           12 :             .get_vectored_impl(
    1178           12 :                 keyspace.clone(),
    1179           12 :                 lsn,
    1180           12 :                 &mut ValuesReconstructState::default(),
    1181           12 :                 ctx,
    1182           12 :             )
    1183            0 :             .await;
    1184              : 
    1185           12 :         if let Some(recording) = start {
    1186            0 :             recording.observe(throttled);
    1187           12 :         }
    1188              : 
    1189           12 :         vectored_res
    1190           12 :     }
    1191              : 
    1192              :     /// Not subject to [`Self::timeline_get_throttle`].
    1193           12 :     pub(super) async fn get_vectored_sequential_impl(
    1194           12 :         &self,
    1195           12 :         keyspace: KeySpace,
    1196           12 :         lsn: Lsn,
    1197           12 :         ctx: &RequestContext,
    1198           12 :     ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
    1199           12 :         let mut values = BTreeMap::new();
    1200              : 
    1201           24 :         for range in keyspace.ranges {
    1202           12 :             let mut key = range.start;
    1203          334 :             while key != range.end {
    1204          322 :                 let block = self
    1205          322 :                     .get_impl(key, lsn, ValueReconstructState::default(), ctx)
    1206           19 :                     .await;
    1207              : 
    1208              :                 use PageReconstructError::*;
    1209            0 :                 match block {
    1210            0 :                     Err(Cancelled) => return Err(GetVectoredError::Cancelled),
    1211              :                     Err(MissingKey(_))
    1212            2 :                         if NON_INHERITED_RANGE.contains(&key)
    1213            2 :                             || NON_INHERITED_SPARSE_RANGE.contains(&key) =>
    1214            2 :                     {
    1215            2 :                         // Ignore missing key error for aux key range. TODO: currently, we assume non_inherited_range == aux_key_range.
    1216            2 :                         // When we add more types of keys into the page server, we should revisit this part of code and throw errors
    1217            2 :                         // accordingly.
    1218            2 :                         key = key.next();
    1219            2 :                     }
    1220            0 :                     Err(MissingKey(err)) => {
    1221            0 :                         return Err(GetVectoredError::MissingKey(err));
    1222              :                     }
    1223            0 :                     Err(Other(err))
    1224            0 :                         if err
    1225            0 :                             .to_string()
    1226            0 :                             .contains("downloading evicted layer file failed") =>
    1227            0 :                     {
    1228            0 :                         return Err(GetVectoredError::Other(err))
    1229              :                     }
    1230            0 :                     Err(Other(err))
    1231            0 :                         if err
    1232            0 :                             .chain()
    1233            0 :                             .any(|cause| cause.to_string().contains("layer loading failed")) =>
    1234              :                     {
    1235              :                         // The intent here is to achieve error parity with the vectored read path.
    1236              :                         // When vectored read fails to load a layer it fails the whole read, hence
    1237              :                         // we mimic this behaviour here to keep the validation happy.
    1238            0 :                         return Err(GetVectoredError::Other(err));
    1239              :                     }
    1240          320 :                     _ => {
    1241          320 :                         values.insert(key, block);
    1242          320 :                         key = key.next();
    1243          320 :                     }
    1244              :                 }
    1245              :             }
    1246              :         }
    1247              : 
    1248           12 :         Ok(values)
    1249           12 :     }
    1250              : 
    1251       626048 :     pub(super) async fn get_vectored_impl(
    1252       626048 :         &self,
    1253       626048 :         keyspace: KeySpace,
    1254       626048 :         lsn: Lsn,
    1255       626048 :         reconstruct_state: &mut ValuesReconstructState,
    1256       626048 :         ctx: &RequestContext,
    1257       626048 :     ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
    1258       626048 :         let get_kind = if keyspace.total_raw_size() == 1 {
    1259       625808 :             GetKind::Singular
    1260              :         } else {
    1261          240 :             GetKind::Vectored
    1262              :         };
    1263              : 
    1264       626048 :         let get_data_timer = crate::metrics::GET_RECONSTRUCT_DATA_TIME
    1265       626048 :             .for_get_kind(get_kind)
    1266       626048 :             .start_timer();
    1267       626048 :         self.get_vectored_reconstruct_data(keyspace.clone(), lsn, reconstruct_state, ctx)
    1268       114010 :             .await?;
    1269       626032 :         get_data_timer.stop_and_record();
    1270       626032 : 
    1271       626032 :         let reconstruct_timer = crate::metrics::RECONSTRUCT_TIME
    1272       626032 :             .for_get_kind(get_kind)
    1273       626032 :             .start_timer();
    1274       626032 :         let mut results: BTreeMap<Key, Result<Bytes, PageReconstructError>> = BTreeMap::new();
    1275       626032 :         let layers_visited = reconstruct_state.get_layers_visited();
    1276              : 
    1277       666556 :         for (key, res) in std::mem::take(&mut reconstruct_state.keys) {
    1278       666556 :             match res {
    1279            0 :                 Err(err) => {
    1280            0 :                     results.insert(key, Err(err));
    1281            0 :                 }
    1282       666556 :                 Ok(state) => {
    1283       666556 :                     let state = ValueReconstructState::from(state);
    1284              : 
    1285       666556 :                     let reconstruct_res = self.reconstruct_value(key, lsn, state).await;
    1286       666556 :                     results.insert(key, reconstruct_res);
    1287              :                 }
    1288              :             }
    1289              :         }
    1290       626032 :         reconstruct_timer.stop_and_record();
    1291       626032 : 
    1292       626032 :         // For aux file keys (v1 or v2) the vectored read path does not return an error
    1293       626032 :         // when they're missing. Instead they are omitted from the resulting btree
    1294       626032 :         // (this is a requirement, not a bug). Skip updating the metric in these cases
    1295       626032 :         // to avoid infinite results.
    1296       626032 :         if !results.is_empty() {
    1297       625892 :             let avg = layers_visited as f64 / results.len() as f64;
    1298       625892 :             if avg >= Self::VEC_GET_LAYERS_VISITED_WARN_THRESH {
    1299            0 :                 use utils::rate_limit::RateLimit;
    1300            0 :                 static LOGGED: Lazy<Mutex<RateLimit>> =
    1301            0 :                     Lazy::new(|| Mutex::new(RateLimit::new(Duration::from_secs(60))));
    1302            0 :                 let mut rate_limit = LOGGED.lock().unwrap();
    1303            0 :                 rate_limit.call(|| {
    1304            0 :                     tracing::info!(
    1305            0 :                       shard_id = %self.tenant_shard_id.shard_slug(),
    1306            0 :                       lsn = %lsn,
    1307            0 :                       "Vectored read for {} visited {} layers on average per key and {} in total. {}/{} pages were returned",
    1308            0 :                       keyspace, avg, layers_visited, results.len(), keyspace.total_raw_size());
    1309            0 :                 });
    1310       625892 :             }
    1311              : 
    1312              :             // Note that this is an approximation. Tracking the exact number of layers visited
    1313              :             // per key requires virtually unbounded memory usage and is inefficient
    1314              :             // (i.e. segment tree tracking each range queried from a layer)
    1315       625892 :             crate::metrics::VEC_READ_NUM_LAYERS_VISITED.observe(avg);
    1316          140 :         }
    1317              : 
    1318       626032 :         Ok(results)
    1319       626048 :     }
    1320              : 
    1321              :     /// Not subject to [`Self::timeline_get_throttle`].
    1322           12 :     pub(super) async fn validate_get_vectored_impl(
    1323           12 :         &self,
    1324           12 :         vectored_res: &Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError>,
    1325           12 :         keyspace: KeySpace,
    1326           12 :         lsn: Lsn,
    1327           12 :         ctx: &RequestContext,
    1328           12 :     ) {
    1329           12 :         if keyspace.overlaps(&Key::metadata_key_range()) {
    1330              :             // skip validation for metadata key range
    1331            0 :             return;
    1332           12 :         }
    1333              : 
    1334           12 :         let sequential_res = self
    1335           12 :             .get_vectored_sequential_impl(keyspace.clone(), lsn, ctx)
    1336           19 :             .await;
    1337              : 
    1338            0 :         fn errors_match(lhs: &GetVectoredError, rhs: &GetVectoredError) -> bool {
    1339            0 :             use GetVectoredError::*;
    1340            0 :             match (lhs, rhs) {
    1341            0 :                 (Oversized(l), Oversized(r)) => l == r,
    1342            0 :                 (InvalidLsn(l), InvalidLsn(r)) => l == r,
    1343            0 :                 (MissingKey(l), MissingKey(r)) => l.key == r.key,
    1344            0 :                 (GetReadyAncestorError(_), GetReadyAncestorError(_)) => true,
    1345            0 :                 (Other(_), Other(_)) => true,
    1346            0 :                 _ => false,
    1347              :             }
    1348            0 :         }
    1349              : 
    1350           12 :         match (&sequential_res, vectored_res) {
    1351            0 :             (Err(GetVectoredError::Cancelled), _) => {},
    1352            0 :             (_, Err(GetVectoredError::Cancelled)) => {},
    1353            0 :             (Err(seq_err), Ok(_)) => {
    1354            0 :                 panic!(concat!("Sequential get failed with {}, but vectored get did not",
    1355            0 :                                " - keyspace={:?} lsn={}"),
    1356            0 :                        seq_err, keyspace, lsn) },
    1357            0 :             (Ok(_), Err(GetVectoredError::GetReadyAncestorError(GetReadyAncestorError::AncestorLsnTimeout(_)))) => {
    1358            0 :                 // Sequential get runs after vectored get, so it is possible for the later
    1359            0 :                 // to time out while waiting for its ancestor's Lsn to become ready and for the
    1360            0 :                 // former to succeed (it essentially has a doubled wait time).
    1361            0 :             },
    1362            0 :             (Ok(_), Err(vec_err)) => {
    1363            0 :                 panic!(concat!("Vectored get failed with {}, but sequential get did not",
    1364            0 :                                " - keyspace={:?} lsn={}"),
    1365            0 :                        vec_err, keyspace, lsn) },
    1366            0 :             (Err(seq_err), Err(vec_err)) => {
    1367            0 :                 assert!(errors_match(seq_err, vec_err),
    1368            0 :                         "Mismatched errors: {seq_err} != {vec_err} - keyspace={keyspace:?} lsn={lsn}")},
    1369           12 :             (Ok(seq_values), Ok(vec_values)) => {
    1370          320 :                 seq_values.iter().zip(vec_values.iter()).for_each(|((seq_key, seq_res), (vec_key, vec_res))| {
    1371          320 :                     assert_eq!(seq_key, vec_key);
    1372          320 :                     match (seq_res, vec_res) {
    1373          320 :                         (Ok(seq_blob), Ok(vec_blob)) => {
    1374          320 :                             Self::validate_key_equivalence(seq_key, &keyspace, lsn, seq_blob, vec_blob);
    1375          320 :                         },
    1376            0 :                         (Err(err), Ok(_)) => {
    1377            0 :                             panic!(
    1378            0 :                                 concat!("Sequential get failed with {} for key {}, but vectored get did not",
    1379            0 :                                         " - keyspace={:?} lsn={}"),
    1380            0 :                                 err, seq_key, keyspace, lsn) },
    1381            0 :                         (Ok(_), Err(err)) => {
    1382            0 :                             panic!(
    1383            0 :                                 concat!("Vectored get failed with {} for key {}, but sequential get did not",
    1384            0 :                                         " - keyspace={:?} lsn={}"),
    1385            0 :                                 err, seq_key, keyspace, lsn) },
    1386            0 :                         (Err(_), Err(_)) => {}
    1387              :                     }
    1388          320 :                 })
    1389              :             }
    1390              :         }
    1391           12 :     }
    1392              : 
    1393          320 :     fn validate_key_equivalence(
    1394          320 :         key: &Key,
    1395          320 :         keyspace: &KeySpace,
    1396          320 :         lsn: Lsn,
    1397          320 :         seq: &Bytes,
    1398          320 :         vec: &Bytes,
    1399          320 :     ) {
    1400          320 :         if *key == AUX_FILES_KEY {
    1401              :             // The value reconstruct of AUX_FILES_KEY from records is not deterministic
    1402              :             // since it uses a hash map under the hood. Hence, deserialise both results
    1403              :             // before comparing.
    1404            0 :             let seq_aux_dir_res = AuxFilesDirectory::des(seq);
    1405            0 :             let vec_aux_dir_res = AuxFilesDirectory::des(vec);
    1406            0 :             match (&seq_aux_dir_res, &vec_aux_dir_res) {
    1407            0 :                 (Ok(seq_aux_dir), Ok(vec_aux_dir)) => {
    1408            0 :                     assert_eq!(
    1409              :                         seq_aux_dir, vec_aux_dir,
    1410            0 :                         "Mismatch for key {} - keyspace={:?} lsn={}",
    1411              :                         key, keyspace, lsn
    1412              :                     );
    1413              :                 }
    1414            0 :                 (Err(_), Err(_)) => {}
    1415              :                 _ => {
    1416            0 :                     panic!("Mismatch for {key}: {seq_aux_dir_res:?} != {vec_aux_dir_res:?}");
    1417              :                 }
    1418              :             }
    1419              :         } else {
    1420              :             // All other keys should reconstruct deterministically, so we simply compare the blobs.
    1421          320 :             assert_eq!(
    1422              :                 seq, vec,
    1423            0 :                 "Image mismatch for key {key} - keyspace={keyspace:?} lsn={lsn}"
    1424              :             );
    1425              :         }
    1426          320 :     }
    1427              : 
    1428              :     /// Get last or prev record separately. Same as get_last_record_rlsn().last/prev.
    1429       276802 :     pub(crate) fn get_last_record_lsn(&self) -> Lsn {
    1430       276802 :         self.last_record_lsn.load().last
    1431       276802 :     }
    1432              : 
    1433            0 :     pub(crate) fn get_prev_record_lsn(&self) -> Lsn {
    1434            0 :         self.last_record_lsn.load().prev
    1435            0 :     }
    1436              : 
    1437              :     /// Atomically get both last and prev.
    1438          226 :     pub(crate) fn get_last_record_rlsn(&self) -> RecordLsn {
    1439          226 :         self.last_record_lsn.load()
    1440          226 :     }
    1441              : 
    1442              :     /// Subscribe to callers of wait_lsn(). The value of the channel is None if there are no
    1443              :     /// wait_lsn() calls in progress, and Some(Lsn) if there is an active waiter for wait_lsn().
    1444            0 :     pub(crate) fn subscribe_for_wait_lsn_updates(&self) -> watch::Receiver<Option<Lsn>> {
    1445            0 :         self.last_record_lsn.status_receiver()
    1446            0 :     }
    1447              : 
    1448         1143 :     pub(crate) fn get_disk_consistent_lsn(&self) -> Lsn {
    1449         1143 :         self.disk_consistent_lsn.load()
    1450         1143 :     }
    1451              : 
    1452              :     /// remote_consistent_lsn from the perspective of the tenant's current generation,
    1453              :     /// not validated with control plane yet.
    1454              :     /// See [`Self::get_remote_consistent_lsn_visible`].
    1455            0 :     pub(crate) fn get_remote_consistent_lsn_projected(&self) -> Option<Lsn> {
    1456            0 :         self.remote_client.remote_consistent_lsn_projected()
    1457            0 :     }
    1458              : 
    1459              :     /// remote_consistent_lsn which the tenant is guaranteed not to go backward from,
    1460              :     /// i.e. a value of remote_consistent_lsn_projected which has undergone
    1461              :     /// generation validation in the deletion queue.
    1462            0 :     pub(crate) fn get_remote_consistent_lsn_visible(&self) -> Option<Lsn> {
    1463            0 :         self.remote_client.remote_consistent_lsn_visible()
    1464            0 :     }
    1465              : 
    1466              :     /// The sum of the file size of all historic layers in the layer map.
    1467              :     /// This method makes no distinction between local and remote layers.
    1468              :     /// Hence, the result **does not represent local filesystem usage**.
    1469            0 :     pub(crate) async fn layer_size_sum(&self) -> u64 {
    1470            0 :         let guard = self.layers.read().await;
    1471            0 :         let layer_map = guard.layer_map();
    1472            0 :         let mut size = 0;
    1473            0 :         for l in layer_map.iter_historic_layers() {
    1474            0 :             size += l.file_size;
    1475            0 :         }
    1476            0 :         size
    1477            0 :     }
    1478              : 
    1479            0 :     pub(crate) fn resident_physical_size(&self) -> u64 {
    1480            0 :         self.metrics.resident_physical_size_get()
    1481            0 :     }
    1482              : 
    1483            0 :     pub(crate) fn get_directory_metrics(&self) -> [u64; DirectoryKind::KINDS_NUM] {
    1484            0 :         array::from_fn(|idx| self.directory_metrics[idx].load(AtomicOrdering::Relaxed))
    1485            0 :     }
    1486              : 
    1487              :     ///
    1488              :     /// Wait until WAL has been received and processed up to this LSN.
    1489              :     ///
    1490              :     /// You should call this before any of the other get_* or list_* functions. Calling
    1491              :     /// those functions with an LSN that has been processed yet is an error.
    1492              :     ///
    1493       225212 :     pub(crate) async fn wait_lsn(
    1494       225212 :         &self,
    1495       225212 :         lsn: Lsn,
    1496       225212 :         who_is_waiting: WaitLsnWaiter<'_>,
    1497       225212 :         ctx: &RequestContext, /* Prepare for use by cancellation */
    1498       225212 :     ) -> Result<(), WaitLsnError> {
    1499       225212 :         let state = self.current_state();
    1500       225212 :         if self.cancel.is_cancelled() || matches!(state, TimelineState::Stopping) {
    1501            0 :             return Err(WaitLsnError::Shutdown);
    1502       225212 :         } else if !matches!(state, TimelineState::Active) {
    1503            0 :             return Err(WaitLsnError::BadState(state));
    1504       225212 :         }
    1505       225212 : 
    1506       225212 :         if cfg!(debug_assertions) {
    1507       225212 :             match ctx.task_kind() {
    1508              :                 TaskKind::WalReceiverManager
    1509              :                 | TaskKind::WalReceiverConnectionHandler
    1510              :                 | TaskKind::WalReceiverConnectionPoller => {
    1511            0 :                     let is_myself = match who_is_waiting {
    1512            0 :                         WaitLsnWaiter::Timeline(waiter) => Weak::ptr_eq(&waiter.myself, &self.myself),
    1513            0 :                         WaitLsnWaiter::Tenant | WaitLsnWaiter::PageService => unreachable!("tenant or page_service context are not expected to have task kind {:?}", ctx.task_kind()),
    1514              :                     };
    1515            0 :                     if is_myself {
    1516            0 :                         if let Err(current) = self.last_record_lsn.would_wait_for(lsn) {
    1517              :                             // walingest is the only one that can advance last_record_lsn; it should make sure to never reach here
    1518            0 :                             panic!("this timeline's walingest task is calling wait_lsn({lsn}) but we only have last_record_lsn={current}; would deadlock");
    1519            0 :                         }
    1520            0 :                     } else {
    1521            0 :                         // if another  timeline's  is waiting for us, there's no deadlock risk because
    1522            0 :                         // our walreceiver task can make progress independent of theirs
    1523            0 :                     }
    1524              :                 }
    1525       225212 :                 _ => {}
    1526              :             }
    1527            0 :         }
    1528              : 
    1529       225212 :         let _timer = crate::metrics::WAIT_LSN_TIME.start_timer();
    1530       225212 : 
    1531       225212 :         match self
    1532       225212 :             .last_record_lsn
    1533       225212 :             .wait_for_timeout(lsn, self.conf.wait_lsn_timeout)
    1534            0 :             .await
    1535              :         {
    1536       225212 :             Ok(()) => Ok(()),
    1537            0 :             Err(e) => {
    1538            0 :                 use utils::seqwait::SeqWaitError::*;
    1539            0 :                 match e {
    1540            0 :                     Shutdown => Err(WaitLsnError::Shutdown),
    1541              :                     Timeout => {
    1542              :                         // don't count the time spent waiting for lock below, and also in walreceiver.status(), towards the wait_lsn_time_histo
    1543            0 :                         drop(_timer);
    1544            0 :                         let walreceiver_status = self.walreceiver_status();
    1545            0 :                         Err(WaitLsnError::Timeout(format!(
    1546            0 :                         "Timed out while waiting for WAL record at LSN {} to arrive, last_record_lsn {} disk consistent LSN={}, WalReceiver status: {}",
    1547            0 :                         lsn,
    1548            0 :                         self.get_last_record_lsn(),
    1549            0 :                         self.get_disk_consistent_lsn(),
    1550            0 :                         walreceiver_status,
    1551            0 :                     )))
    1552              :                     }
    1553              :                 }
    1554              :             }
    1555              :         }
    1556       225212 :     }
    1557              : 
    1558            0 :     pub(crate) fn walreceiver_status(&self) -> String {
    1559            0 :         match &*self.walreceiver.lock().unwrap() {
    1560            0 :             None => "stopping or stopped".to_string(),
    1561            0 :             Some(walreceiver) => match walreceiver.status() {
    1562            0 :                 Some(status) => status.to_human_readable_string(),
    1563            0 :                 None => "Not active".to_string(),
    1564              :             },
    1565              :         }
    1566            0 :     }
    1567              : 
    1568              :     /// Check that it is valid to request operations with that lsn.
    1569          230 :     pub(crate) fn check_lsn_is_in_scope(
    1570          230 :         &self,
    1571          230 :         lsn: Lsn,
    1572          230 :         latest_gc_cutoff_lsn: &RcuReadGuard<Lsn>,
    1573          230 :     ) -> anyhow::Result<()> {
    1574          230 :         ensure!(
    1575          230 :             lsn >= **latest_gc_cutoff_lsn,
    1576            4 :             "LSN {} is earlier than latest GC cutoff {} (we might've already garbage collected needed data)",
    1577            4 :             lsn,
    1578            4 :             **latest_gc_cutoff_lsn,
    1579              :         );
    1580          226 :         Ok(())
    1581          230 :     }
    1582              : 
    1583              :     /// Obtains a temporary lease blocking garbage collection for the given LSN.
    1584              :     ///
    1585              :     /// This function will error if the requesting LSN is less than the `latest_gc_cutoff_lsn` and there is also
    1586              :     /// no existing lease to renew. If there is an existing lease in the map, the lease will be renewed only if
    1587              :     /// the request extends the lease. The returned lease is therefore the maximum between the existing lease and
    1588              :     /// the requesting lease.
    1589           14 :     pub(crate) fn make_lsn_lease(
    1590           14 :         &self,
    1591           14 :         lsn: Lsn,
    1592           14 :         length: Duration,
    1593           14 :         _ctx: &RequestContext,
    1594           14 :     ) -> anyhow::Result<LsnLease> {
    1595           12 :         let lease = {
    1596           14 :             let mut gc_info = self.gc_info.write().unwrap();
    1597           14 : 
    1598           14 :             let valid_until = SystemTime::now() + length;
    1599           14 : 
    1600           14 :             let entry = gc_info.leases.entry(lsn);
    1601              : 
    1602           12 :             let lease = {
    1603           14 :                 if let Entry::Occupied(mut occupied) = entry {
    1604            6 :                     let existing_lease = occupied.get_mut();
    1605            6 :                     if valid_until > existing_lease.valid_until {
    1606            2 :                         existing_lease.valid_until = valid_until;
    1607            2 :                         let dt: DateTime<Utc> = valid_until.into();
    1608            2 :                         info!("lease extended to {}", dt);
    1609              :                     } else {
    1610            4 :                         let dt: DateTime<Utc> = existing_lease.valid_until.into();
    1611            4 :                         info!("existing lease covers greater length, valid until {}", dt);
    1612              :                     }
    1613              : 
    1614            6 :                     existing_lease.clone()
    1615              :                 } else {
    1616              :                     // Reject already GC-ed LSN (lsn < latest_gc_cutoff)
    1617            8 :                     let latest_gc_cutoff_lsn = self.get_latest_gc_cutoff_lsn();
    1618            8 :                     if lsn < *latest_gc_cutoff_lsn {
    1619            2 :                         bail!("tried to request a page version that was garbage collected. requested at {} gc cutoff {}", lsn, *latest_gc_cutoff_lsn);
    1620            6 :                     }
    1621            6 : 
    1622            6 :                     let dt: DateTime<Utc> = valid_until.into();
    1623            6 :                     info!("lease created, valid until {}", dt);
    1624            6 :                     entry.or_insert(LsnLease { valid_until }).clone()
    1625              :                 }
    1626              :             };
    1627              : 
    1628           12 :             lease
    1629           12 :         };
    1630           12 : 
    1631           12 :         Ok(lease)
    1632           14 :     }
    1633              : 
    1634              :     /// Flush to disk all data that was written with the put_* functions
    1635         2148 :     #[instrument(skip(self), fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id))]
    1636              :     pub(crate) async fn freeze_and_flush(&self) -> Result<(), FlushLayerError> {
    1637              :         self.freeze_and_flush0().await
    1638              :     }
    1639              : 
    1640              :     // This exists to provide a non-span creating version of `freeze_and_flush` we can call without
    1641              :     // polluting the span hierarchy.
    1642         1074 :     pub(crate) async fn freeze_and_flush0(&self) -> Result<(), FlushLayerError> {
    1643         1074 :         let to_lsn = {
    1644              :             // Freeze the current open in-memory layer. It will be written to disk on next
    1645              :             // iteration.
    1646         1074 :             let mut g = self.write_lock.lock().await;
    1647              : 
    1648         1074 :             let to_lsn = self.get_last_record_lsn();
    1649         1074 :             self.freeze_inmem_layer_at(to_lsn, &mut g).await;
    1650         1074 :             to_lsn
    1651         1074 :         };
    1652         1077 :         self.flush_frozen_layers_and_wait(to_lsn).await
    1653         1074 :     }
    1654              : 
    1655              :     // Check if an open ephemeral layer should be closed: this provides
    1656              :     // background enforcement of checkpoint interval if there is no active WAL receiver, to avoid keeping
    1657              :     // an ephemeral layer open forever when idle.  It also freezes layers if the global limit on
    1658              :     // ephemeral layer bytes has been breached.
    1659            0 :     pub(super) async fn maybe_freeze_ephemeral_layer(&self) {
    1660            0 :         let Ok(mut write_guard) = self.write_lock.try_lock() else {
    1661              :             // If the write lock is held, there is an active wal receiver: rolling open layers
    1662              :             // is their responsibility while they hold this lock.
    1663            0 :             return;
    1664              :         };
    1665              : 
    1666            0 :         let Ok(layers_guard) = self.layers.try_read() else {
    1667              :             // Don't block if the layer lock is busy
    1668            0 :             return;
    1669              :         };
    1670              : 
    1671            0 :         let Some(open_layer) = &layers_guard.layer_map().open_layer else {
    1672              :             // If there is no open layer, we have no layer freezing to do.  However, we might need to generate
    1673              :             // some updates to disk_consistent_lsn and remote_consistent_lsn, in case we ingested some WAL regions
    1674              :             // that didn't result in writes to this shard.
    1675              : 
    1676              :             // Must not hold the layers lock while waiting for a flush.
    1677            0 :             drop(layers_guard);
    1678            0 : 
    1679            0 :             let last_record_lsn = self.get_last_record_lsn();
    1680            0 :             let disk_consistent_lsn = self.get_disk_consistent_lsn();
    1681            0 :             if last_record_lsn > disk_consistent_lsn {
    1682              :                 // We have no open layer, but disk_consistent_lsn is behind the last record: this indicates
    1683              :                 // we are a sharded tenant and have skipped some WAL
    1684            0 :                 let last_freeze_ts = *self.last_freeze_ts.read().unwrap();
    1685            0 :                 if last_freeze_ts.elapsed() >= self.get_checkpoint_timeout() {
    1686              :                     // Only do this if have been layer-less longer than get_checkpoint_timeout, so that a shard
    1687              :                     // without any data ingested (yet) doesn't write a remote index as soon as it
    1688              :                     // sees its LSN advance: we only do this if we've been layer-less
    1689              :                     // for some time.
    1690            0 :                     tracing::debug!(
    1691            0 :                         "Advancing disk_consistent_lsn past WAL ingest gap {} -> {}",
    1692              :                         disk_consistent_lsn,
    1693              :                         last_record_lsn
    1694              :                     );
    1695              : 
    1696              :                     // The flush loop will update remote consistent LSN as well as disk consistent LSN.
    1697            0 :                     self.flush_frozen_layers_and_wait(last_record_lsn)
    1698            0 :                         .await
    1699            0 :                         .ok();
    1700            0 :                 }
    1701            0 :             }
    1702              : 
    1703            0 :             return;
    1704              :         };
    1705              : 
    1706            0 :         let Some(current_size) = open_layer.try_len() else {
    1707              :             // Unexpected: since we hold the write guard, nobody else should be writing to this layer, so
    1708              :             // read lock to get size should always succeed.
    1709            0 :             tracing::warn!("Lock conflict while reading size of open layer");
    1710            0 :             return;
    1711              :         };
    1712              : 
    1713            0 :         let current_lsn = self.get_last_record_lsn();
    1714              : 
    1715            0 :         let checkpoint_distance_override = open_layer.tick().await;
    1716              : 
    1717            0 :         if let Some(size_override) = checkpoint_distance_override {
    1718            0 :             if current_size > size_override {
    1719              :                 // This is not harmful, but it only happens in relatively rare cases where
    1720              :                 // time-based checkpoints are not happening fast enough to keep the amount of
    1721              :                 // ephemeral data within configured limits.  It's a sign of stress on the system.
    1722            0 :                 tracing::info!("Early-rolling open layer at size {current_size} (limit {size_override}) due to dirty data pressure");
    1723            0 :             }
    1724            0 :         }
    1725              : 
    1726            0 :         let checkpoint_distance =
    1727            0 :             checkpoint_distance_override.unwrap_or(self.get_checkpoint_distance());
    1728            0 : 
    1729            0 :         if self.should_roll(
    1730            0 :             current_size,
    1731            0 :             current_size,
    1732            0 :             checkpoint_distance,
    1733            0 :             self.get_last_record_lsn(),
    1734            0 :             self.last_freeze_at.load(),
    1735            0 :             open_layer.get_opened_at(),
    1736            0 :         ) {
    1737            0 :             let at_lsn = match open_layer.info() {
    1738            0 :                 InMemoryLayerInfo::Frozen { lsn_start, lsn_end } => {
    1739            0 :                     // We may reach this point if the layer was already frozen by not yet flushed: flushing
    1740            0 :                     // happens asynchronously in the background.
    1741            0 :                     tracing::debug!(
    1742            0 :                         "Not freezing open layer, it's already frozen ({lsn_start}..{lsn_end})"
    1743              :                     );
    1744            0 :                     None
    1745              :                 }
    1746              :                 InMemoryLayerInfo::Open { .. } => {
    1747              :                     // Upgrade to a write lock and freeze the layer
    1748            0 :                     drop(layers_guard);
    1749            0 :                     let mut layers_guard = self.layers.write().await;
    1750            0 :                     let froze = layers_guard
    1751            0 :                         .try_freeze_in_memory_layer(
    1752            0 :                             current_lsn,
    1753            0 :                             &self.last_freeze_at,
    1754            0 :                             &mut write_guard,
    1755            0 :                         )
    1756            0 :                         .await;
    1757            0 :                     Some(current_lsn).filter(|_| froze)
    1758              :                 }
    1759              :             };
    1760            0 :             if let Some(lsn) = at_lsn {
    1761            0 :                 let res: Result<u64, _> = self.flush_frozen_layers(lsn);
    1762            0 :                 if let Err(e) = res {
    1763            0 :                     tracing::info!("failed to flush frozen layer after background freeze: {e:#}");
    1764            0 :                 }
    1765            0 :             }
    1766            0 :         }
    1767            0 :     }
    1768              : 
    1769              :     /// Outermost timeline compaction operation; downloads needed layers. Returns whether we have pending
    1770              :     /// compaction tasks.
    1771          364 :     pub(crate) async fn compact(
    1772          364 :         self: &Arc<Self>,
    1773          364 :         cancel: &CancellationToken,
    1774          364 :         flags: EnumSet<CompactFlags>,
    1775          364 :         ctx: &RequestContext,
    1776          364 :     ) -> Result<bool, CompactionError> {
    1777          364 :         // most likely the cancellation token is from background task, but in tests it could be the
    1778          364 :         // request task as well.
    1779          364 : 
    1780          364 :         let prepare = async move {
    1781          364 :             let guard = self.compaction_lock.lock().await;
    1782              : 
    1783          364 :             let permit = super::tasks::concurrent_background_tasks_rate_limit_permit(
    1784          364 :                 BackgroundLoopKind::Compaction,
    1785          364 :                 ctx,
    1786          364 :             )
    1787            0 :             .await;
    1788              : 
    1789          364 :             (guard, permit)
    1790          364 :         };
    1791              : 
    1792              :         // this wait probably never needs any "long time spent" logging, because we already nag if
    1793              :         // compaction task goes over it's period (20s) which is quite often in production.
    1794          364 :         let (_guard, _permit) = tokio::select! {
    1795              :             tuple = prepare => { tuple },
    1796              :             _ = self.cancel.cancelled() => return Ok(false),
    1797              :             _ = cancel.cancelled() => return Ok(false),
    1798              :         };
    1799              : 
    1800          364 :         let last_record_lsn = self.get_last_record_lsn();
    1801          364 : 
    1802          364 :         // Last record Lsn could be zero in case the timeline was just created
    1803          364 :         if !last_record_lsn.is_valid() {
    1804            0 :             warn!("Skipping compaction for potentially just initialized timeline, it has invalid last record lsn: {last_record_lsn}");
    1805            0 :             return Ok(false);
    1806          364 :         }
    1807          364 : 
    1808          364 :         match self.get_compaction_algorithm_settings().kind {
    1809              :             CompactionAlgorithm::Tiered => {
    1810            0 :                 self.compact_tiered(cancel, ctx).await?;
    1811            0 :                 Ok(false)
    1812              :             }
    1813        76988 :             CompactionAlgorithm::Legacy => self.compact_legacy(cancel, flags, ctx).await,
    1814              :         }
    1815          364 :     }
    1816              : 
    1817              :     /// Mutate the timeline with a [`TimelineWriter`].
    1818      5133160 :     pub(crate) async fn writer(&self) -> TimelineWriter<'_> {
    1819      5133160 :         TimelineWriter {
    1820      5133160 :             tl: self,
    1821      5133160 :             write_guard: self.write_lock.lock().await,
    1822              :         }
    1823      5133160 :     }
    1824              : 
    1825            0 :     pub(crate) fn activate(
    1826            0 :         self: &Arc<Self>,
    1827            0 :         parent: Arc<crate::tenant::Tenant>,
    1828            0 :         broker_client: BrokerClientChannel,
    1829            0 :         background_jobs_can_start: Option<&completion::Barrier>,
    1830            0 :         ctx: &RequestContext,
    1831            0 :     ) {
    1832            0 :         if self.tenant_shard_id.is_shard_zero() {
    1833            0 :             // Logical size is only maintained accurately on shard zero.
    1834            0 :             self.spawn_initial_logical_size_computation_task(ctx);
    1835            0 :         }
    1836            0 :         self.launch_wal_receiver(ctx, broker_client);
    1837            0 :         self.set_state(TimelineState::Active);
    1838            0 :         self.launch_eviction_task(parent, background_jobs_can_start);
    1839            0 :     }
    1840              : 
    1841              :     /// After this function returns, there are no timeline-scoped tasks are left running.
    1842              :     ///
    1843              :     /// The preferred pattern for is:
    1844              :     /// - in any spawned tasks, keep Timeline::guard open + Timeline::cancel / child token
    1845              :     /// - if early shutdown (not just cancellation) of a sub-tree of tasks is required,
    1846              :     ///   go the extra mile and keep track of JoinHandles
    1847              :     /// - Keep track of JoinHandles using a passed-down `Arc<Mutex<Option<JoinSet>>>` or similar,
    1848              :     ///   instead of spawning directly on a runtime. It is a more composable / testable pattern.
    1849              :     ///
    1850              :     /// For legacy reasons, we still have multiple tasks spawned using
    1851              :     /// `task_mgr::spawn(X, Some(tenant_id), Some(timeline_id))`.
    1852              :     /// We refer to these as "timeline-scoped task_mgr tasks".
    1853              :     /// Some of these tasks are already sensitive to Timeline::cancel while others are
    1854              :     /// not sensitive to Timeline::cancel and instead respect [`task_mgr::shutdown_token`]
    1855              :     /// or [`task_mgr::shutdown_watcher`].
    1856              :     /// We want to gradually convert the code base away from these.
    1857              :     ///
    1858              :     /// Here is an inventory of timeline-scoped task_mgr tasks that are still sensitive to
    1859              :     /// `task_mgr::shutdown_{token,watcher}` (there are also tenant-scoped and global-scoped
    1860              :     /// ones that aren't mentioned here):
    1861              :     /// - [`TaskKind::TimelineDeletionWorker`]
    1862              :     ///    - NB: also used for tenant deletion
    1863              :     /// - [`TaskKind::RemoteUploadTask`]`
    1864              :     /// - [`TaskKind::InitialLogicalSizeCalculation`]
    1865              :     /// - [`TaskKind::DownloadAllRemoteLayers`] (can we get rid of it?)
    1866              :     // Inventory of timeline-scoped task_mgr tasks that use spawn but aren't sensitive:
    1867              :     /// - [`TaskKind::Eviction`]
    1868              :     /// - [`TaskKind::LayerFlushTask`]
    1869              :     /// - [`TaskKind::OndemandLogicalSizeCalculation`]
    1870              :     /// - [`TaskKind::GarbageCollector`] (immediate_gc is timeline-scoped)
    1871            8 :     pub(crate) async fn shutdown(&self, mode: ShutdownMode) {
    1872            8 :         debug_assert_current_span_has_tenant_and_timeline_id();
    1873              : 
    1874            8 :         let try_freeze_and_flush = match mode {
    1875            6 :             ShutdownMode::FreezeAndFlush => true,
    1876            2 :             ShutdownMode::Hard => false,
    1877              :         };
    1878              : 
    1879              :         // Regardless of whether we're going to try_freeze_and_flush
    1880              :         // or not, stop ingesting any more data. Walreceiver only provides
    1881              :         // cancellation but no "wait until gone", because it uses the Timeline::gate.
    1882              :         // So, only after the self.gate.close() below will we know for sure that
    1883              :         // no walreceiver tasks are left.
    1884              :         // For `try_freeze_and_flush=true`, this means that we might still be ingesting
    1885              :         // data during the call to `self.freeze_and_flush()` below.
    1886              :         // That's not ideal, but, we don't have the concept of a ChildGuard,
    1887              :         // which is what we'd need to properly model early shutdown of the walreceiver
    1888              :         // task sub-tree before the other Timeline task sub-trees.
    1889            8 :         let walreceiver = self.walreceiver.lock().unwrap().take();
    1890            8 :         tracing::debug!(
    1891            0 :             is_some = walreceiver.is_some(),
    1892            0 :             "Waiting for WalReceiverManager..."
    1893              :         );
    1894            8 :         if let Some(walreceiver) = walreceiver {
    1895            0 :             walreceiver.cancel();
    1896            8 :         }
    1897              :         // ... and inform any waiters for newer LSNs that there won't be any.
    1898            8 :         self.last_record_lsn.shutdown();
    1899            8 : 
    1900            8 :         if try_freeze_and_flush {
    1901              :             // we shut down walreceiver above, so, we won't add anything more
    1902              :             // to the InMemoryLayer; freeze it and wait for all frozen layers
    1903              :             // to reach the disk & upload queue, then shut the upload queue and
    1904              :             // wait for it to drain.
    1905            6 :             match self.freeze_and_flush().await {
    1906              :                 Ok(_) => {
    1907              :                     // drain the upload queue
    1908              :                     // if we did not wait for completion here, it might be our shutdown process
    1909              :                     // didn't wait for remote uploads to complete at all, as new tasks can forever
    1910              :                     // be spawned.
    1911              :                     //
    1912              :                     // what is problematic is the shutting down of RemoteTimelineClient, because
    1913              :                     // obviously it does not make sense to stop while we wait for it, but what
    1914              :                     // about corner cases like s3 suddenly hanging up?
    1915            6 :                     self.remote_client.shutdown().await;
    1916              :                 }
    1917            0 :                 Err(e) => {
    1918            0 :                     // Non-fatal.  Shutdown is infallible.  Failures to flush just mean that
    1919            0 :                     // we have some extra WAL replay to do next time the timeline starts.
    1920            0 :                     warn!("failed to freeze and flush: {e:#}");
    1921              :                 }
    1922              :             }
    1923            2 :         }
    1924              : 
    1925              :         // Signal any subscribers to our cancellation token to drop out
    1926            8 :         tracing::debug!("Cancelling CancellationToken");
    1927            8 :         self.cancel.cancel();
    1928            8 : 
    1929            8 :         // Ensure Prevent new page service requests from starting.
    1930            8 :         self.handles.shutdown();
    1931            8 : 
    1932            8 :         // Transition the remote_client into a state where it's only useful for timeline deletion.
    1933            8 :         // (The deletion use case is why we can't just hook up remote_client to Self::cancel).)
    1934            8 :         self.remote_client.stop();
    1935            8 :         // As documented in remote_client.stop()'s doc comment, it's our responsibility
    1936            8 :         // to shut down the upload queue tasks.
    1937            8 :         // TODO: fix that, task management should be encapsulated inside remote_client.
    1938            8 :         task_mgr::shutdown_tasks(
    1939            8 :             Some(TaskKind::RemoteUploadTask),
    1940            8 :             Some(self.tenant_shard_id),
    1941            8 :             Some(self.timeline_id),
    1942            8 :         )
    1943            0 :         .await;
    1944              : 
    1945              :         // TODO: work toward making this a no-op. See this funciton's doc comment for more context.
    1946            8 :         tracing::debug!("Waiting for tasks...");
    1947            8 :         task_mgr::shutdown_tasks(None, Some(self.tenant_shard_id), Some(self.timeline_id)).await;
    1948              : 
    1949              :         // Finally wait until any gate-holders are complete.
    1950              :         //
    1951              :         // TODO: once above shutdown_tasks is a no-op, we can close the gate before calling shutdown_tasks
    1952              :         // and use a TBD variant of shutdown_tasks that asserts that there were no tasks left.
    1953            8 :         self.gate.close().await;
    1954              : 
    1955            8 :         self.metrics.shutdown();
    1956            8 :     }
    1957              : 
    1958          398 :     pub(crate) fn set_state(&self, new_state: TimelineState) {
    1959          398 :         match (self.current_state(), new_state) {
    1960          398 :             (equal_state_1, equal_state_2) if equal_state_1 == equal_state_2 => {
    1961            2 :                 info!("Ignoring new state, equal to the existing one: {equal_state_2:?}");
    1962              :             }
    1963            0 :             (st, TimelineState::Loading) => {
    1964            0 :                 error!("ignoring transition from {st:?} into Loading state");
    1965              :             }
    1966            0 :             (TimelineState::Broken { .. }, new_state) => {
    1967            0 :                 error!("Ignoring state update {new_state:?} for broken timeline");
    1968              :             }
    1969              :             (TimelineState::Stopping, TimelineState::Active) => {
    1970            0 :                 error!("Not activating a Stopping timeline");
    1971              :             }
    1972          396 :             (_, new_state) => {
    1973          396 :                 self.state.send_replace(new_state);
    1974          396 :             }
    1975              :         }
    1976          398 :     }
    1977              : 
    1978            2 :     pub(crate) fn set_broken(&self, reason: String) {
    1979            2 :         let backtrace_str: String = format!("{}", std::backtrace::Backtrace::force_capture());
    1980            2 :         let broken_state = TimelineState::Broken {
    1981            2 :             reason,
    1982            2 :             backtrace: backtrace_str,
    1983            2 :         };
    1984            2 :         self.set_state(broken_state);
    1985            2 : 
    1986            2 :         // Although the Broken state is not equivalent to shutdown() (shutdown will be called
    1987            2 :         // later when this tenant is detach or the process shuts down), firing the cancellation token
    1988            2 :         // here avoids the need for other tasks to watch for the Broken state explicitly.
    1989            2 :         self.cancel.cancel();
    1990            2 :     }
    1991              : 
    1992       226947 :     pub(crate) fn current_state(&self) -> TimelineState {
    1993       226947 :         self.state.borrow().clone()
    1994       226947 :     }
    1995              : 
    1996            6 :     pub(crate) fn is_broken(&self) -> bool {
    1997            6 :         matches!(&*self.state.borrow(), TimelineState::Broken { .. })
    1998            6 :     }
    1999              : 
    2000          220 :     pub(crate) fn is_active(&self) -> bool {
    2001          220 :         self.current_state() == TimelineState::Active
    2002          220 :     }
    2003              : 
    2004              :     #[allow(unused)]
    2005            0 :     pub(crate) fn is_archived(&self) -> Option<bool> {
    2006            0 :         self.remote_client.is_archived()
    2007            0 :     }
    2008              : 
    2009         1117 :     pub(crate) fn is_stopping(&self) -> bool {
    2010         1117 :         self.current_state() == TimelineState::Stopping
    2011         1117 :     }
    2012              : 
    2013            0 :     pub(crate) fn subscribe_for_state_updates(&self) -> watch::Receiver<TimelineState> {
    2014            0 :         self.state.subscribe()
    2015            0 :     }
    2016              : 
    2017       225214 :     pub(crate) async fn wait_to_become_active(
    2018       225214 :         &self,
    2019       225214 :         _ctx: &RequestContext, // Prepare for use by cancellation
    2020       225214 :     ) -> Result<(), TimelineState> {
    2021       225214 :         let mut receiver = self.state.subscribe();
    2022       225214 :         loop {
    2023       225214 :             let current_state = receiver.borrow().clone();
    2024       225214 :             match current_state {
    2025              :                 TimelineState::Loading => {
    2026            0 :                     receiver
    2027            0 :                         .changed()
    2028            0 :                         .await
    2029            0 :                         .expect("holding a reference to self");
    2030              :                 }
    2031              :                 TimelineState::Active { .. } => {
    2032       225212 :                     return Ok(());
    2033              :                 }
    2034              :                 TimelineState::Broken { .. } | TimelineState::Stopping => {
    2035              :                     // There's no chance the timeline can transition back into ::Active
    2036            2 :                     return Err(current_state);
    2037              :                 }
    2038              :             }
    2039              :         }
    2040       225214 :     }
    2041              : 
    2042            0 :     pub(crate) async fn layer_map_info(&self, reset: LayerAccessStatsReset) -> LayerMapInfo {
    2043            0 :         let guard = self.layers.read().await;
    2044            0 :         let layer_map = guard.layer_map();
    2045            0 :         let mut in_memory_layers = Vec::with_capacity(layer_map.frozen_layers.len() + 1);
    2046            0 :         if let Some(open_layer) = &layer_map.open_layer {
    2047            0 :             in_memory_layers.push(open_layer.info());
    2048            0 :         }
    2049            0 :         for frozen_layer in &layer_map.frozen_layers {
    2050            0 :             in_memory_layers.push(frozen_layer.info());
    2051            0 :         }
    2052              : 
    2053            0 :         let mut historic_layers = Vec::new();
    2054            0 :         for historic_layer in layer_map.iter_historic_layers() {
    2055            0 :             let historic_layer = guard.get_from_desc(&historic_layer);
    2056            0 :             historic_layers.push(historic_layer.info(reset));
    2057            0 :         }
    2058              : 
    2059            0 :         LayerMapInfo {
    2060            0 :             in_memory_layers,
    2061            0 :             historic_layers,
    2062            0 :         }
    2063            0 :     }
    2064              : 
    2065            0 :     #[instrument(skip_all, fields(tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))]
    2066              :     pub(crate) async fn download_layer(
    2067              :         &self,
    2068              :         layer_file_name: &LayerName,
    2069              :     ) -> anyhow::Result<Option<bool>> {
    2070              :         let Some(layer) = self.find_layer(layer_file_name).await else {
    2071              :             return Ok(None);
    2072              :         };
    2073              : 
    2074              :         layer.download().await?;
    2075              : 
    2076              :         Ok(Some(true))
    2077              :     }
    2078              : 
    2079              :     /// Evict just one layer.
    2080              :     ///
    2081              :     /// Returns `Ok(None)` in the case where the layer could not be found by its `layer_file_name`.
    2082            0 :     pub(crate) async fn evict_layer(
    2083            0 :         &self,
    2084            0 :         layer_file_name: &LayerName,
    2085            0 :     ) -> anyhow::Result<Option<bool>> {
    2086            0 :         let _gate = self
    2087            0 :             .gate
    2088            0 :             .enter()
    2089            0 :             .map_err(|_| anyhow::anyhow!("Shutting down"))?;
    2090              : 
    2091            0 :         let Some(local_layer) = self.find_layer(layer_file_name).await else {
    2092            0 :             return Ok(None);
    2093              :         };
    2094              : 
    2095              :         // curl has this by default
    2096            0 :         let timeout = std::time::Duration::from_secs(120);
    2097            0 : 
    2098            0 :         match local_layer.evict_and_wait(timeout).await {
    2099            0 :             Ok(()) => Ok(Some(true)),
    2100            0 :             Err(EvictionError::NotFound) => Ok(Some(false)),
    2101            0 :             Err(EvictionError::Downloaded) => Ok(Some(false)),
    2102            0 :             Err(EvictionError::Timeout) => Ok(Some(false)),
    2103              :         }
    2104            0 :     }
    2105              : 
    2106      4803026 :     fn should_roll(
    2107      4803026 :         &self,
    2108      4803026 :         layer_size: u64,
    2109      4803026 :         projected_layer_size: u64,
    2110      4803026 :         checkpoint_distance: u64,
    2111      4803026 :         projected_lsn: Lsn,
    2112      4803026 :         last_freeze_at: Lsn,
    2113      4803026 :         opened_at: Instant,
    2114      4803026 :     ) -> bool {
    2115      4803026 :         let distance = projected_lsn.widening_sub(last_freeze_at);
    2116      4803026 : 
    2117      4803026 :         // Rolling the open layer can be triggered by:
    2118      4803026 :         // 1. The distance from the last LSN we rolled at. This bounds the amount of WAL that
    2119      4803026 :         //    the safekeepers need to store.  For sharded tenants, we multiply by shard count to
    2120      4803026 :         //    account for how writes are distributed across shards: we expect each node to consume
    2121      4803026 :         //    1/count of the LSN on average.
    2122      4803026 :         // 2. The size of the currently open layer.
    2123      4803026 :         // 3. The time since the last roll. It helps safekeepers to regard pageserver as caught
    2124      4803026 :         //    up and suspend activity.
    2125      4803026 :         if distance >= checkpoint_distance as i128 * self.shard_identity.count.count() as i128 {
    2126            0 :             info!(
    2127            0 :                 "Will roll layer at {} with layer size {} due to LSN distance ({})",
    2128              :                 projected_lsn, layer_size, distance
    2129              :             );
    2130              : 
    2131            0 :             true
    2132      4803026 :         } else if projected_layer_size >= checkpoint_distance {
    2133           80 :             info!(
    2134            0 :                 "Will roll layer at {} with layer size {} due to layer size ({})",
    2135              :                 projected_lsn, layer_size, projected_layer_size
    2136              :             );
    2137              : 
    2138           80 :             true
    2139      4802946 :         } else if distance > 0 && opened_at.elapsed() >= self.get_checkpoint_timeout() {
    2140            0 :             info!(
    2141            0 :                 "Will roll layer at {} with layer size {} due to time since first write to the layer ({:?})",
    2142            0 :                 projected_lsn,
    2143            0 :                 layer_size,
    2144            0 :                 opened_at.elapsed()
    2145              :             );
    2146              : 
    2147            0 :             true
    2148              :         } else {
    2149      4802946 :             false
    2150              :         }
    2151      4803026 :     }
    2152              : }
    2153              : 
    2154              : /// Number of times we will compute partition within a checkpoint distance.
    2155              : const REPARTITION_FREQ_IN_CHECKPOINT_DISTANCE: u64 = 10;
    2156              : 
    2157              : // Private functions
    2158              : impl Timeline {
    2159           12 :     pub(crate) fn get_lsn_lease_length(&self) -> Duration {
    2160           12 :         let tenant_conf = self.tenant_conf.load();
    2161           12 :         tenant_conf
    2162           12 :             .tenant_conf
    2163           12 :             .lsn_lease_length
    2164           12 :             .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length)
    2165           12 :     }
    2166              : 
    2167              :     // TODO(yuchen): remove unused flag after implementing https://github.com/neondatabase/neon/issues/8072
    2168              :     #[allow(unused)]
    2169            0 :     pub(crate) fn get_lsn_lease_length_for_ts(&self) -> Duration {
    2170            0 :         let tenant_conf = self.tenant_conf.load();
    2171            0 :         tenant_conf
    2172            0 :             .tenant_conf
    2173            0 :             .lsn_lease_length_for_ts
    2174            0 :             .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length_for_ts)
    2175            0 :     }
    2176              : 
    2177          216 :     pub(crate) fn get_switch_aux_file_policy(&self) -> AuxFilePolicy {
    2178          216 :         let tenant_conf = self.tenant_conf.load();
    2179          216 :         tenant_conf
    2180          216 :             .tenant_conf
    2181          216 :             .switch_aux_file_policy
    2182          216 :             .unwrap_or(self.conf.default_tenant_conf.switch_aux_file_policy)
    2183          216 :     }
    2184              : 
    2185            0 :     pub(crate) fn get_lazy_slru_download(&self) -> bool {
    2186            0 :         let tenant_conf = self.tenant_conf.load();
    2187            0 :         tenant_conf
    2188            0 :             .tenant_conf
    2189            0 :             .lazy_slru_download
    2190            0 :             .unwrap_or(self.conf.default_tenant_conf.lazy_slru_download)
    2191            0 :     }
    2192              : 
    2193      4804418 :     fn get_checkpoint_distance(&self) -> u64 {
    2194      4804418 :         let tenant_conf = self.tenant_conf.load();
    2195      4804418 :         tenant_conf
    2196      4804418 :             .tenant_conf
    2197      4804418 :             .checkpoint_distance
    2198      4804418 :             .unwrap_or(self.conf.default_tenant_conf.checkpoint_distance)
    2199      4804418 :     }
    2200              : 
    2201      4802946 :     fn get_checkpoint_timeout(&self) -> Duration {
    2202      4802946 :         let tenant_conf = self.tenant_conf.load();
    2203      4802946 :         tenant_conf
    2204      4802946 :             .tenant_conf
    2205      4802946 :             .checkpoint_timeout
    2206      4802946 :             .unwrap_or(self.conf.default_tenant_conf.checkpoint_timeout)
    2207      4802946 :     }
    2208              : 
    2209          522 :     fn get_compaction_target_size(&self) -> u64 {
    2210          522 :         let tenant_conf = self.tenant_conf.load();
    2211          522 :         tenant_conf
    2212          522 :             .tenant_conf
    2213          522 :             .compaction_target_size
    2214          522 :             .unwrap_or(self.conf.default_tenant_conf.compaction_target_size)
    2215          522 :     }
    2216              : 
    2217          392 :     fn get_compaction_threshold(&self) -> usize {
    2218          392 :         let tenant_conf = self.tenant_conf.load();
    2219          392 :         tenant_conf
    2220          392 :             .tenant_conf
    2221          392 :             .compaction_threshold
    2222          392 :             .unwrap_or(self.conf.default_tenant_conf.compaction_threshold)
    2223          392 :     }
    2224              : 
    2225           14 :     fn get_image_creation_threshold(&self) -> usize {
    2226           14 :         let tenant_conf = self.tenant_conf.load();
    2227           14 :         tenant_conf
    2228           14 :             .tenant_conf
    2229           14 :             .image_creation_threshold
    2230           14 :             .unwrap_or(self.conf.default_tenant_conf.image_creation_threshold)
    2231           14 :     }
    2232              : 
    2233          364 :     fn get_compaction_algorithm_settings(&self) -> CompactionAlgorithmSettings {
    2234          364 :         let tenant_conf = &self.tenant_conf.load();
    2235          364 :         tenant_conf
    2236          364 :             .tenant_conf
    2237          364 :             .compaction_algorithm
    2238          364 :             .as_ref()
    2239          364 :             .unwrap_or(&self.conf.default_tenant_conf.compaction_algorithm)
    2240          364 :             .clone()
    2241          364 :     }
    2242              : 
    2243            0 :     fn get_eviction_policy(&self) -> EvictionPolicy {
    2244            0 :         let tenant_conf = self.tenant_conf.load();
    2245            0 :         tenant_conf
    2246            0 :             .tenant_conf
    2247            0 :             .eviction_policy
    2248            0 :             .unwrap_or(self.conf.default_tenant_conf.eviction_policy)
    2249            0 :     }
    2250              : 
    2251          406 :     fn get_evictions_low_residence_duration_metric_threshold(
    2252          406 :         tenant_conf: &TenantConfOpt,
    2253          406 :         default_tenant_conf: &TenantConf,
    2254          406 :     ) -> Duration {
    2255          406 :         tenant_conf
    2256          406 :             .evictions_low_residence_duration_metric_threshold
    2257          406 :             .unwrap_or(default_tenant_conf.evictions_low_residence_duration_metric_threshold)
    2258          406 :     }
    2259              : 
    2260          522 :     fn get_image_layer_creation_check_threshold(&self) -> u8 {
    2261          522 :         let tenant_conf = self.tenant_conf.load();
    2262          522 :         tenant_conf
    2263          522 :             .tenant_conf
    2264          522 :             .image_layer_creation_check_threshold
    2265          522 :             .unwrap_or(
    2266          522 :                 self.conf
    2267          522 :                     .default_tenant_conf
    2268          522 :                     .image_layer_creation_check_threshold,
    2269          522 :             )
    2270          522 :     }
    2271              : 
    2272            8 :     pub(super) fn tenant_conf_updated(&self, new_conf: &TenantConfOpt) {
    2273            8 :         // NB: Most tenant conf options are read by background loops, so,
    2274            8 :         // changes will automatically be picked up.
    2275            8 : 
    2276            8 :         // The threshold is embedded in the metric. So, we need to update it.
    2277            8 :         {
    2278            8 :             let new_threshold = Self::get_evictions_low_residence_duration_metric_threshold(
    2279            8 :                 new_conf,
    2280            8 :                 &self.conf.default_tenant_conf,
    2281            8 :             );
    2282            8 : 
    2283            8 :             let tenant_id_str = self.tenant_shard_id.tenant_id.to_string();
    2284            8 :             let shard_id_str = format!("{}", self.tenant_shard_id.shard_slug());
    2285            8 : 
    2286            8 :             let timeline_id_str = self.timeline_id.to_string();
    2287            8 :             self.metrics
    2288            8 :                 .evictions_with_low_residence_duration
    2289            8 :                 .write()
    2290            8 :                 .unwrap()
    2291            8 :                 .change_threshold(
    2292            8 :                     &tenant_id_str,
    2293            8 :                     &shard_id_str,
    2294            8 :                     &timeline_id_str,
    2295            8 :                     new_threshold,
    2296            8 :                 );
    2297            8 :         }
    2298            8 :     }
    2299              : 
    2300              :     /// Open a Timeline handle.
    2301              :     ///
    2302              :     /// Loads the metadata for the timeline into memory, but not the layer map.
    2303              :     #[allow(clippy::too_many_arguments)]
    2304          398 :     pub(super) fn new(
    2305          398 :         conf: &'static PageServerConf,
    2306          398 :         tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
    2307          398 :         metadata: &TimelineMetadata,
    2308          398 :         ancestor: Option<Arc<Timeline>>,
    2309          398 :         timeline_id: TimelineId,
    2310          398 :         tenant_shard_id: TenantShardId,
    2311          398 :         generation: Generation,
    2312          398 :         shard_identity: ShardIdentity,
    2313          398 :         walredo_mgr: Option<Arc<super::WalRedoManager>>,
    2314          398 :         resources: TimelineResources,
    2315          398 :         pg_version: u32,
    2316          398 :         state: TimelineState,
    2317          398 :         aux_file_policy: Option<AuxFilePolicy>,
    2318          398 :         cancel: CancellationToken,
    2319          398 :     ) -> Arc<Self> {
    2320          398 :         let disk_consistent_lsn = metadata.disk_consistent_lsn();
    2321          398 :         let (state, _) = watch::channel(state);
    2322          398 : 
    2323          398 :         let (layer_flush_start_tx, _) = tokio::sync::watch::channel((0, disk_consistent_lsn));
    2324          398 :         let (layer_flush_done_tx, _) = tokio::sync::watch::channel((0, Ok(())));
    2325          398 : 
    2326          398 :         let evictions_low_residence_duration_metric_threshold = {
    2327          398 :             let loaded_tenant_conf = tenant_conf.load();
    2328          398 :             Self::get_evictions_low_residence_duration_metric_threshold(
    2329          398 :                 &loaded_tenant_conf.tenant_conf,
    2330          398 :                 &conf.default_tenant_conf,
    2331          398 :             )
    2332              :         };
    2333              : 
    2334          398 :         if let Some(ancestor) = &ancestor {
    2335          228 :             let mut ancestor_gc_info = ancestor.gc_info.write().unwrap();
    2336          228 :             ancestor_gc_info.insert_child(timeline_id, metadata.ancestor_lsn());
    2337          228 :         }
    2338              : 
    2339          398 :         Arc::new_cyclic(|myself| {
    2340          398 :             let metrics = TimelineMetrics::new(
    2341          398 :                 &tenant_shard_id,
    2342          398 :                 &timeline_id,
    2343          398 :                 crate::metrics::EvictionsWithLowResidenceDurationBuilder::new(
    2344          398 :                     "mtime",
    2345          398 :                     evictions_low_residence_duration_metric_threshold,
    2346          398 :                 ),
    2347          398 :             );
    2348          398 :             let aux_file_metrics = metrics.aux_file_size_gauge.clone();
    2349              : 
    2350          398 :             let mut result = Timeline {
    2351          398 :                 conf,
    2352          398 :                 tenant_conf,
    2353          398 :                 myself: myself.clone(),
    2354          398 :                 timeline_id,
    2355          398 :                 tenant_shard_id,
    2356          398 :                 generation,
    2357          398 :                 shard_identity,
    2358          398 :                 pg_version,
    2359          398 :                 layers: Default::default(),
    2360          398 : 
    2361          398 :                 walredo_mgr,
    2362          398 :                 walreceiver: Mutex::new(None),
    2363          398 : 
    2364          398 :                 remote_client: Arc::new(resources.remote_client),
    2365          398 : 
    2366          398 :                 // initialize in-memory 'last_record_lsn' from 'disk_consistent_lsn'.
    2367          398 :                 last_record_lsn: SeqWait::new(RecordLsn {
    2368          398 :                     last: disk_consistent_lsn,
    2369          398 :                     prev: metadata.prev_record_lsn().unwrap_or(Lsn(0)),
    2370          398 :                 }),
    2371          398 :                 disk_consistent_lsn: AtomicLsn::new(disk_consistent_lsn.0),
    2372          398 : 
    2373          398 :                 last_freeze_at: AtomicLsn::new(disk_consistent_lsn.0),
    2374          398 :                 last_freeze_ts: RwLock::new(Instant::now()),
    2375          398 : 
    2376          398 :                 loaded_at: (disk_consistent_lsn, SystemTime::now()),
    2377          398 : 
    2378          398 :                 ancestor_timeline: ancestor,
    2379          398 :                 ancestor_lsn: metadata.ancestor_lsn(),
    2380          398 : 
    2381          398 :                 metrics,
    2382          398 : 
    2383          398 :                 query_metrics: crate::metrics::SmgrQueryTimePerTimeline::new(
    2384          398 :                     &tenant_shard_id,
    2385          398 :                     &timeline_id,
    2386          398 :                 ),
    2387          398 : 
    2388         2786 :                 directory_metrics: array::from_fn(|_| AtomicU64::new(0)),
    2389          398 : 
    2390          398 :                 flush_loop_state: Mutex::new(FlushLoopState::NotStarted),
    2391          398 : 
    2392          398 :                 layer_flush_start_tx,
    2393          398 :                 layer_flush_done_tx,
    2394          398 : 
    2395          398 :                 write_lock: tokio::sync::Mutex::new(None),
    2396          398 : 
    2397          398 :                 gc_info: std::sync::RwLock::new(GcInfo::default()),
    2398          398 : 
    2399          398 :                 latest_gc_cutoff_lsn: Rcu::new(metadata.latest_gc_cutoff_lsn()),
    2400          398 :                 initdb_lsn: metadata.initdb_lsn(),
    2401          398 : 
    2402          398 :                 current_logical_size: if disk_consistent_lsn.is_valid() {
    2403              :                     // we're creating timeline data with some layer files existing locally,
    2404              :                     // need to recalculate timeline's logical size based on data in the layers.
    2405          232 :                     LogicalSize::deferred_initial(disk_consistent_lsn)
    2406              :                 } else {
    2407              :                     // we're creating timeline data without any layers existing locally,
    2408              :                     // initial logical size is 0.
    2409          166 :                     LogicalSize::empty_initial()
    2410              :                 },
    2411          398 :                 partitioning: tokio::sync::Mutex::new((
    2412          398 :                     (KeyPartitioning::new(), KeyPartitioning::new().into_sparse()),
    2413          398 :                     Lsn(0),
    2414          398 :                 )),
    2415          398 :                 repartition_threshold: 0,
    2416          398 :                 last_image_layer_creation_check_at: AtomicLsn::new(0),
    2417          398 :                 last_image_layer_creation_check_instant: Mutex::new(None),
    2418          398 : 
    2419          398 :                 last_received_wal: Mutex::new(None),
    2420          398 :                 rel_size_cache: RwLock::new(RelSizeCache {
    2421          398 :                     complete_as_of: disk_consistent_lsn,
    2422          398 :                     map: HashMap::new(),
    2423          398 :                 }),
    2424          398 : 
    2425          398 :                 download_all_remote_layers_task_info: RwLock::new(None),
    2426          398 : 
    2427          398 :                 state,
    2428          398 : 
    2429          398 :                 eviction_task_timeline_state: tokio::sync::Mutex::new(
    2430          398 :                     EvictionTaskTimelineState::default(),
    2431          398 :                 ),
    2432          398 :                 delete_progress: Arc::new(tokio::sync::Mutex::new(DeleteTimelineFlow::default())),
    2433          398 : 
    2434          398 :                 cancel,
    2435          398 :                 gate: Gate::default(),
    2436          398 : 
    2437          398 :                 compaction_lock: tokio::sync::Mutex::default(),
    2438          398 :                 gc_lock: tokio::sync::Mutex::default(),
    2439          398 : 
    2440          398 :                 standby_horizon: AtomicLsn::new(0),
    2441          398 : 
    2442          398 :                 timeline_get_throttle: resources.timeline_get_throttle,
    2443          398 : 
    2444          398 :                 aux_files: tokio::sync::Mutex::new(AuxFilesState {
    2445          398 :                     dir: None,
    2446          398 :                     n_deltas: 0,
    2447          398 :                 }),
    2448          398 : 
    2449          398 :                 aux_file_size_estimator: AuxFileSizeEstimator::new(aux_file_metrics),
    2450          398 : 
    2451          398 :                 last_aux_file_policy: AtomicAuxFilePolicy::new(aux_file_policy),
    2452          398 : 
    2453          398 :                 #[cfg(test)]
    2454          398 :                 extra_test_dense_keyspace: ArcSwap::new(Arc::new(KeySpace::default())),
    2455          398 : 
    2456          398 :                 l0_flush_global_state: resources.l0_flush_global_state,
    2457          398 : 
    2458          398 :                 handles: Default::default(),
    2459          398 :             };
    2460          398 :             result.repartition_threshold =
    2461          398 :                 result.get_checkpoint_distance() / REPARTITION_FREQ_IN_CHECKPOINT_DISTANCE;
    2462          398 : 
    2463          398 :             result
    2464          398 :                 .metrics
    2465          398 :                 .last_record_gauge
    2466          398 :                 .set(disk_consistent_lsn.0 as i64);
    2467          398 :             result
    2468          398 :         })
    2469          398 :     }
    2470              : 
    2471          550 :     pub(super) fn maybe_spawn_flush_loop(self: &Arc<Self>) {
    2472          550 :         let Ok(guard) = self.gate.enter() else {
    2473            0 :             info!("cannot start flush loop when the timeline gate has already been closed");
    2474            0 :             return;
    2475              :         };
    2476          550 :         let mut flush_loop_state = self.flush_loop_state.lock().unwrap();
    2477          550 :         match *flush_loop_state {
    2478          392 :             FlushLoopState::NotStarted => (),
    2479              :             FlushLoopState::Running { .. } => {
    2480          158 :                 info!(
    2481            0 :                     "skipping attempt to start flush_loop twice {}/{}",
    2482            0 :                     self.tenant_shard_id, self.timeline_id
    2483              :                 );
    2484          158 :                 return;
    2485              :             }
    2486              :             FlushLoopState::Exited => {
    2487            0 :                 warn!(
    2488            0 :                     "ignoring attempt to restart exited flush_loop {}/{}",
    2489            0 :                     self.tenant_shard_id, self.timeline_id
    2490              :                 );
    2491            0 :                 return;
    2492              :             }
    2493              :         }
    2494              : 
    2495          392 :         let layer_flush_start_rx = self.layer_flush_start_tx.subscribe();
    2496          392 :         let self_clone = Arc::clone(self);
    2497          392 : 
    2498          392 :         debug!("spawning flush loop");
    2499          392 :         *flush_loop_state = FlushLoopState::Running {
    2500          392 :             #[cfg(test)]
    2501          392 :             expect_initdb_optimization: false,
    2502          392 :             #[cfg(test)]
    2503          392 :             initdb_optimization_count: 0,
    2504          392 :         };
    2505          392 :         task_mgr::spawn(
    2506          392 :             task_mgr::BACKGROUND_RUNTIME.handle(),
    2507          392 :             task_mgr::TaskKind::LayerFlushTask,
    2508          392 :             Some(self.tenant_shard_id),
    2509          392 :             Some(self.timeline_id),
    2510          392 :             "layer flush task",
    2511          392 :             async move {
    2512          392 :                 let _guard = guard;
    2513          392 :                 let background_ctx = RequestContext::todo_child(TaskKind::LayerFlushTask, DownloadBehavior::Error);
    2514        18004 :                 self_clone.flush_loop(layer_flush_start_rx, &background_ctx).await;
    2515            8 :                 let mut flush_loop_state = self_clone.flush_loop_state.lock().unwrap();
    2516            8 :                 assert!(matches!(*flush_loop_state, FlushLoopState::Running{..}));
    2517            8 :                 *flush_loop_state  = FlushLoopState::Exited;
    2518            8 :                 Ok(())
    2519            8 :             }
    2520          392 :             .instrument(info_span!(parent: None, "layer flush task", tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))
    2521              :         );
    2522          550 :     }
    2523              : 
    2524              :     /// Creates and starts the wal receiver.
    2525              :     ///
    2526              :     /// This function is expected to be called at most once per Timeline's lifecycle
    2527              :     /// when the timeline is activated.
    2528            0 :     fn launch_wal_receiver(
    2529            0 :         self: &Arc<Self>,
    2530            0 :         ctx: &RequestContext,
    2531            0 :         broker_client: BrokerClientChannel,
    2532            0 :     ) {
    2533            0 :         info!(
    2534            0 :             "launching WAL receiver for timeline {} of tenant {}",
    2535            0 :             self.timeline_id, self.tenant_shard_id
    2536              :         );
    2537              : 
    2538            0 :         let tenant_conf = self.tenant_conf.load();
    2539            0 :         let wal_connect_timeout = tenant_conf
    2540            0 :             .tenant_conf
    2541            0 :             .walreceiver_connect_timeout
    2542            0 :             .unwrap_or(self.conf.default_tenant_conf.walreceiver_connect_timeout);
    2543            0 :         let lagging_wal_timeout = tenant_conf
    2544            0 :             .tenant_conf
    2545            0 :             .lagging_wal_timeout
    2546            0 :             .unwrap_or(self.conf.default_tenant_conf.lagging_wal_timeout);
    2547            0 :         let max_lsn_wal_lag = tenant_conf
    2548            0 :             .tenant_conf
    2549            0 :             .max_lsn_wal_lag
    2550            0 :             .unwrap_or(self.conf.default_tenant_conf.max_lsn_wal_lag);
    2551            0 : 
    2552            0 :         let mut guard = self.walreceiver.lock().unwrap();
    2553            0 :         assert!(
    2554            0 :             guard.is_none(),
    2555            0 :             "multiple launches / re-launches of WAL receiver are not supported"
    2556              :         );
    2557            0 :         *guard = Some(WalReceiver::start(
    2558            0 :             Arc::clone(self),
    2559            0 :             WalReceiverConf {
    2560            0 :                 wal_connect_timeout,
    2561            0 :                 lagging_wal_timeout,
    2562            0 :                 max_lsn_wal_lag,
    2563            0 :                 auth_token: crate::config::SAFEKEEPER_AUTH_TOKEN.get().cloned(),
    2564            0 :                 availability_zone: self.conf.availability_zone.clone(),
    2565            0 :                 ingest_batch_size: self.conf.ingest_batch_size,
    2566            0 :             },
    2567            0 :             broker_client,
    2568            0 :             ctx,
    2569            0 :         ));
    2570            0 :     }
    2571              : 
    2572              :     /// Initialize with an empty layer map. Used when creating a new timeline.
    2573          392 :     pub(super) fn init_empty_layer_map(&self, start_lsn: Lsn) {
    2574          392 :         let mut layers = self.layers.try_write().expect(
    2575          392 :             "in the context where we call this function, no other task has access to the object",
    2576          392 :         );
    2577          392 :         layers.initialize_empty(Lsn(start_lsn.0));
    2578          392 :     }
    2579              : 
    2580              :     /// Scan the timeline directory, cleanup, populate the layer map, and schedule uploads for local-only
    2581              :     /// files.
    2582            6 :     pub(super) async fn load_layer_map(
    2583            6 :         &self,
    2584            6 :         disk_consistent_lsn: Lsn,
    2585            6 :         index_part: Option<IndexPart>,
    2586            6 :     ) -> anyhow::Result<()> {
    2587              :         use init::{Decision::*, Discovered, DismissedLayer};
    2588              :         use LayerName::*;
    2589              : 
    2590            6 :         let mut guard = self.layers.write().await;
    2591              : 
    2592            6 :         let timer = self.metrics.load_layer_map_histo.start_timer();
    2593            6 : 
    2594            6 :         // Scan timeline directory and create ImageLayerName and DeltaFilename
    2595            6 :         // structs representing all files on disk
    2596            6 :         let timeline_path = self
    2597            6 :             .conf
    2598            6 :             .timeline_path(&self.tenant_shard_id, &self.timeline_id);
    2599            6 :         let conf = self.conf;
    2600            6 :         let span = tracing::Span::current();
    2601            6 : 
    2602            6 :         // Copy to move into the task we're about to spawn
    2603            6 :         let this = self.myself.upgrade().expect("&self method holds the arc");
    2604              : 
    2605            6 :         let (loaded_layers, needs_cleanup, total_physical_size) = tokio::task::spawn_blocking({
    2606            6 :             move || {
    2607            6 :                 let _g = span.entered();
    2608            6 :                 let discovered = init::scan_timeline_dir(&timeline_path)?;
    2609            6 :                 let mut discovered_layers = Vec::with_capacity(discovered.len());
    2610            6 :                 let mut unrecognized_files = Vec::new();
    2611            6 : 
    2612            6 :                 let mut path = timeline_path;
    2613              : 
    2614           22 :                 for discovered in discovered {
    2615           16 :                     let (name, kind) = match discovered {
    2616           16 :                         Discovered::Layer(layer_file_name, local_metadata) => {
    2617           16 :                             discovered_layers.push((layer_file_name, local_metadata));
    2618           16 :                             continue;
    2619              :                         }
    2620            0 :                         Discovered::IgnoredBackup(path) => {
    2621            0 :                             std::fs::remove_file(path)
    2622            0 :                                 .or_else(fs_ext::ignore_not_found)
    2623            0 :                                 .fatal_err("Removing .old file");
    2624            0 :                             continue;
    2625              :                         }
    2626            0 :                         Discovered::Unknown(file_name) => {
    2627            0 :                             // we will later error if there are any
    2628            0 :                             unrecognized_files.push(file_name);
    2629            0 :                             continue;
    2630              :                         }
    2631            0 :                         Discovered::Ephemeral(name) => (name, "old ephemeral file"),
    2632            0 :                         Discovered::Temporary(name) => (name, "temporary timeline file"),
    2633            0 :                         Discovered::TemporaryDownload(name) => (name, "temporary download"),
    2634              :                     };
    2635            0 :                     path.push(Utf8Path::new(&name));
    2636            0 :                     init::cleanup(&path, kind)?;
    2637            0 :                     path.pop();
    2638              :                 }
    2639              : 
    2640            6 :                 if !unrecognized_files.is_empty() {
    2641              :                     // assume that if there are any there are many many.
    2642            0 :                     let n = unrecognized_files.len();
    2643            0 :                     let first = &unrecognized_files[..n.min(10)];
    2644            0 :                     anyhow::bail!(
    2645            0 :                         "unrecognized files in timeline dir (total {n}), first 10: {first:?}"
    2646            0 :                     );
    2647            6 :                 }
    2648            6 : 
    2649            6 :                 let decided =
    2650            6 :                     init::reconcile(discovered_layers, index_part.as_ref(), disk_consistent_lsn);
    2651            6 : 
    2652            6 :                 let mut loaded_layers = Vec::new();
    2653            6 :                 let mut needs_cleanup = Vec::new();
    2654            6 :                 let mut total_physical_size = 0;
    2655              : 
    2656           22 :                 for (name, decision) in decided {
    2657           16 :                     let decision = match decision {
    2658           16 :                         Ok(decision) => decision,
    2659            0 :                         Err(DismissedLayer::Future { local }) => {
    2660            0 :                             if let Some(local) = local {
    2661            0 :                                 init::cleanup_future_layer(
    2662            0 :                                     &local.local_path,
    2663            0 :                                     &name,
    2664            0 :                                     disk_consistent_lsn,
    2665            0 :                                 )?;
    2666            0 :                             }
    2667            0 :                             needs_cleanup.push(name);
    2668            0 :                             continue;
    2669              :                         }
    2670            0 :                         Err(DismissedLayer::LocalOnly(local)) => {
    2671            0 :                             init::cleanup_local_only_file(&name, &local)?;
    2672              :                             // this file never existed remotely, we will have to do rework
    2673            0 :                             continue;
    2674              :                         }
    2675            0 :                         Err(DismissedLayer::BadMetadata(local)) => {
    2676            0 :                             init::cleanup_local_file_for_remote(&local)?;
    2677              :                             // this file never existed remotely, we will have to do rework
    2678            0 :                             continue;
    2679              :                         }
    2680              :                     };
    2681              : 
    2682           16 :                     match &name {
    2683           12 :                         Delta(d) => assert!(d.lsn_range.end <= disk_consistent_lsn + 1),
    2684            4 :                         Image(i) => assert!(i.lsn <= disk_consistent_lsn),
    2685              :                     }
    2686              : 
    2687           16 :                     tracing::debug!(layer=%name, ?decision, "applied");
    2688              : 
    2689           16 :                     let layer = match decision {
    2690           16 :                         Resident { local, remote } => {
    2691           16 :                             total_physical_size += local.file_size;
    2692           16 :                             Layer::for_resident(conf, &this, local.local_path, name, remote)
    2693           16 :                                 .drop_eviction_guard()
    2694              :                         }
    2695            0 :                         Evicted(remote) => Layer::for_evicted(conf, &this, name, remote),
    2696              :                     };
    2697              : 
    2698           16 :                     loaded_layers.push(layer);
    2699              :                 }
    2700            6 :                 Ok((loaded_layers, needs_cleanup, total_physical_size))
    2701            6 :             }
    2702            6 :         })
    2703            4 :         .await
    2704            6 :         .map_err(anyhow::Error::new)
    2705            6 :         .and_then(|x| x)?;
    2706              : 
    2707            6 :         let num_layers = loaded_layers.len();
    2708            6 : 
    2709            6 :         guard.initialize_local_layers(loaded_layers, disk_consistent_lsn + 1);
    2710            6 : 
    2711            6 :         self.remote_client
    2712            6 :             .schedule_layer_file_deletion(&needs_cleanup)?;
    2713            6 :         self.remote_client
    2714            6 :             .schedule_index_upload_for_file_changes()?;
    2715              :         // This barrier orders above DELETEs before any later operations.
    2716              :         // This is critical because code executing after the barrier might
    2717              :         // create again objects with the same key that we just scheduled for deletion.
    2718              :         // For example, if we just scheduled deletion of an image layer "from the future",
    2719              :         // later compaction might run again and re-create the same image layer.
    2720              :         // "from the future" here means an image layer whose LSN is > IndexPart::disk_consistent_lsn.
    2721              :         // "same" here means same key range and LSN.
    2722              :         //
    2723              :         // Without a barrier between above DELETEs and the re-creation's PUTs,
    2724              :         // the upload queue may execute the PUT first, then the DELETE.
    2725              :         // In our example, we will end up with an IndexPart referencing a non-existent object.
    2726              :         //
    2727              :         // 1. a future image layer is created and uploaded
    2728              :         // 2. ps restart
    2729              :         // 3. the future layer from (1) is deleted during load layer map
    2730              :         // 4. image layer is re-created and uploaded
    2731              :         // 5. deletion queue would like to delete (1) but actually deletes (4)
    2732              :         // 6. delete by name works as expected, but it now deletes the wrong (later) version
    2733              :         //
    2734              :         // See https://github.com/neondatabase/neon/issues/5878
    2735              :         //
    2736              :         // NB: generation numbers naturally protect against this because they disambiguate
    2737              :         //     (1) and (4)
    2738            6 :         self.remote_client.schedule_barrier()?;
    2739              :         // Tenant::create_timeline will wait for these uploads to happen before returning, or
    2740              :         // on retry.
    2741              : 
    2742              :         // Now that we have the full layer map, we may calculate the visibility of layers within it (a global scan)
    2743            6 :         drop(guard); // drop write lock, update_layer_visibility will take a read lock.
    2744            6 :         self.update_layer_visibility().await;
    2745              : 
    2746            6 :         info!(
    2747            0 :             "loaded layer map with {} layers at {}, total physical size: {}",
    2748              :             num_layers, disk_consistent_lsn, total_physical_size
    2749              :         );
    2750              : 
    2751            6 :         timer.stop_and_record();
    2752            6 :         Ok(())
    2753            6 :     }
    2754              : 
    2755              :     /// Retrieve current logical size of the timeline.
    2756              :     ///
    2757              :     /// The size could be lagging behind the actual number, in case
    2758              :     /// the initial size calculation has not been run (gets triggered on the first size access).
    2759              :     ///
    2760              :     /// return size and boolean flag that shows if the size is exact
    2761            0 :     pub(crate) fn get_current_logical_size(
    2762            0 :         self: &Arc<Self>,
    2763            0 :         priority: GetLogicalSizePriority,
    2764            0 :         ctx: &RequestContext,
    2765            0 :     ) -> logical_size::CurrentLogicalSize {
    2766            0 :         if !self.tenant_shard_id.is_shard_zero() {
    2767              :             // Logical size is only accurately maintained on shard zero: when called elsewhere, for example
    2768              :             // when HTTP API is serving a GET for timeline zero, return zero
    2769            0 :             return logical_size::CurrentLogicalSize::Approximate(logical_size::Approximate::zero());
    2770            0 :         }
    2771            0 : 
    2772            0 :         let current_size = self.current_logical_size.current_size();
    2773            0 :         debug!("Current size: {current_size:?}");
    2774              : 
    2775            0 :         match (current_size.accuracy(), priority) {
    2776            0 :             (logical_size::Accuracy::Exact, _) => (), // nothing to do
    2777            0 :             (logical_size::Accuracy::Approximate, GetLogicalSizePriority::Background) => {
    2778            0 :                 // background task will eventually deliver an exact value, we're in no rush
    2779            0 :             }
    2780              :             (logical_size::Accuracy::Approximate, GetLogicalSizePriority::User) => {
    2781              :                 // background task is not ready, but user is asking for it now;
    2782              :                 // => make the background task skip the line
    2783              :                 // (The alternative would be to calculate the size here, but,
    2784              :                 //  it can actually take a long time if the user has a lot of rels.
    2785              :                 //  And we'll inevitable need it again; So, let the background task do the work.)
    2786            0 :                 match self
    2787            0 :                     .current_logical_size
    2788            0 :                     .cancel_wait_for_background_loop_concurrency_limit_semaphore
    2789            0 :                     .get()
    2790              :                 {
    2791            0 :                     Some(cancel) => cancel.cancel(),
    2792              :                     None => {
    2793            0 :                         let state = self.current_state();
    2794            0 :                         if matches!(
    2795            0 :                             state,
    2796              :                             TimelineState::Broken { .. } | TimelineState::Stopping
    2797            0 :                         ) {
    2798            0 : 
    2799            0 :                             // Can happen when timeline detail endpoint is used when deletion is ongoing (or its broken).
    2800            0 :                             // Don't make noise.
    2801            0 :                         } else {
    2802            0 :                             warn!("unexpected: cancel_wait_for_background_loop_concurrency_limit_semaphore not set, priority-boosting of logical size calculation will not work");
    2803            0 :                             debug_assert!(false);
    2804              :                         }
    2805              :                     }
    2806              :                 };
    2807              :             }
    2808              :         }
    2809              : 
    2810            0 :         if let CurrentLogicalSize::Approximate(_) = &current_size {
    2811            0 :             if ctx.task_kind() == TaskKind::WalReceiverConnectionHandler {
    2812            0 :                 let first = self
    2813            0 :                     .current_logical_size
    2814            0 :                     .did_return_approximate_to_walreceiver
    2815            0 :                     .compare_exchange(
    2816            0 :                         false,
    2817            0 :                         true,
    2818            0 :                         AtomicOrdering::Relaxed,
    2819            0 :                         AtomicOrdering::Relaxed,
    2820            0 :                     )
    2821            0 :                     .is_ok();
    2822            0 :                 if first {
    2823            0 :                     crate::metrics::initial_logical_size::TIMELINES_WHERE_WALRECEIVER_GOT_APPROXIMATE_SIZE.inc();
    2824            0 :                 }
    2825            0 :             }
    2826            0 :         }
    2827              : 
    2828            0 :         current_size
    2829            0 :     }
    2830              : 
    2831            0 :     fn spawn_initial_logical_size_computation_task(self: &Arc<Self>, ctx: &RequestContext) {
    2832            0 :         let Some(initial_part_end) = self.current_logical_size.initial_part_end else {
    2833              :             // nothing to do for freshly created timelines;
    2834            0 :             assert_eq!(
    2835            0 :                 self.current_logical_size.current_size().accuracy(),
    2836            0 :                 logical_size::Accuracy::Exact,
    2837            0 :             );
    2838            0 :             self.current_logical_size.initialized.add_permits(1);
    2839            0 :             return;
    2840              :         };
    2841              : 
    2842            0 :         let cancel_wait_for_background_loop_concurrency_limit_semaphore = CancellationToken::new();
    2843            0 :         let token = cancel_wait_for_background_loop_concurrency_limit_semaphore.clone();
    2844            0 :         self.current_logical_size
    2845            0 :             .cancel_wait_for_background_loop_concurrency_limit_semaphore.set(token)
    2846            0 :             .expect("initial logical size calculation task must be spawned exactly once per Timeline object");
    2847            0 : 
    2848            0 :         let self_clone = Arc::clone(self);
    2849            0 :         let background_ctx = ctx.detached_child(
    2850            0 :             TaskKind::InitialLogicalSizeCalculation,
    2851            0 :             DownloadBehavior::Download,
    2852            0 :         );
    2853            0 :         task_mgr::spawn(
    2854            0 :             task_mgr::BACKGROUND_RUNTIME.handle(),
    2855            0 :             task_mgr::TaskKind::InitialLogicalSizeCalculation,
    2856            0 :             Some(self.tenant_shard_id),
    2857            0 :             Some(self.timeline_id),
    2858            0 :             "initial size calculation",
    2859              :             // NB: don't log errors here, task_mgr will do that.
    2860            0 :             async move {
    2861            0 :                 let cancel = task_mgr::shutdown_token();
    2862            0 :                 self_clone
    2863            0 :                     .initial_logical_size_calculation_task(
    2864            0 :                         initial_part_end,
    2865            0 :                         cancel_wait_for_background_loop_concurrency_limit_semaphore,
    2866            0 :                         cancel,
    2867            0 :                         background_ctx,
    2868            0 :                     )
    2869            0 :                     .await;
    2870            0 :                 Ok(())
    2871            0 :             }
    2872            0 :             .instrument(info_span!(parent: None, "initial_size_calculation", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id)),
    2873              :         );
    2874            0 :     }
    2875              : 
    2876            0 :     async fn initial_logical_size_calculation_task(
    2877            0 :         self: Arc<Self>,
    2878            0 :         initial_part_end: Lsn,
    2879            0 :         skip_concurrency_limiter: CancellationToken,
    2880            0 :         cancel: CancellationToken,
    2881            0 :         background_ctx: RequestContext,
    2882            0 :     ) {
    2883              :         scopeguard::defer! {
    2884              :             // Irrespective of the outcome of this operation, we should unblock anyone waiting for it.
    2885              :             self.current_logical_size.initialized.add_permits(1);
    2886              :         }
    2887              : 
    2888            0 :         let try_once = |attempt: usize| {
    2889            0 :             let background_ctx = &background_ctx;
    2890            0 :             let self_ref = &self;
    2891            0 :             let skip_concurrency_limiter = &skip_concurrency_limiter;
    2892            0 :             async move {
    2893            0 :                 let cancel = task_mgr::shutdown_token();
    2894            0 :                 let wait_for_permit = super::tasks::concurrent_background_tasks_rate_limit_permit(
    2895            0 :                     BackgroundLoopKind::InitialLogicalSizeCalculation,
    2896            0 :                     background_ctx,
    2897            0 :                 );
    2898              : 
    2899              :                 use crate::metrics::initial_logical_size::StartCircumstances;
    2900            0 :                 let (_maybe_permit, circumstances) = tokio::select! {
    2901              :                     permit = wait_for_permit => {
    2902              :                         (Some(permit), StartCircumstances::AfterBackgroundTasksRateLimit)
    2903              :                     }
    2904              :                     _ = self_ref.cancel.cancelled() => {
    2905              :                         return Err(CalculateLogicalSizeError::Cancelled);
    2906              :                     }
    2907              :                     _ = cancel.cancelled() => {
    2908              :                         return Err(CalculateLogicalSizeError::Cancelled);
    2909              :                     },
    2910              :                     () = skip_concurrency_limiter.cancelled() => {
    2911              :                         // Some action that is part of a end user interaction requested logical size
    2912              :                         // => break out of the rate limit
    2913              :                         // TODO: ideally we'd not run on BackgroundRuntime but the requester's runtime;
    2914              :                         // but then again what happens if they cancel; also, we should just be using
    2915              :                         // one runtime across the entire process, so, let's leave this for now.
    2916              :                         (None, StartCircumstances::SkippedConcurrencyLimiter)
    2917              :                     }
    2918              :                 };
    2919              : 
    2920            0 :                 let metrics_guard = if attempt == 1 {
    2921            0 :                     crate::metrics::initial_logical_size::START_CALCULATION.first(circumstances)
    2922              :                 } else {
    2923            0 :                     crate::metrics::initial_logical_size::START_CALCULATION.retry(circumstances)
    2924              :                 };
    2925              : 
    2926            0 :                 let calculated_size = self_ref
    2927            0 :                     .logical_size_calculation_task(
    2928            0 :                         initial_part_end,
    2929            0 :                         LogicalSizeCalculationCause::Initial,
    2930            0 :                         background_ctx,
    2931            0 :                     )
    2932            0 :                     .await?;
    2933              : 
    2934            0 :                 self_ref
    2935            0 :                     .trigger_aux_file_size_computation(initial_part_end, background_ctx)
    2936            0 :                     .await?;
    2937              : 
    2938              :                 // TODO: add aux file size to logical size
    2939              : 
    2940            0 :                 Ok((calculated_size, metrics_guard))
    2941            0 :             }
    2942            0 :         };
    2943              : 
    2944            0 :         let retrying = async {
    2945            0 :             let mut attempt = 0;
    2946            0 :             loop {
    2947            0 :                 attempt += 1;
    2948            0 : 
    2949            0 :                 match try_once(attempt).await {
    2950            0 :                     Ok(res) => return ControlFlow::Continue(res),
    2951            0 :                     Err(CalculateLogicalSizeError::Cancelled) => return ControlFlow::Break(()),
    2952              :                     Err(
    2953            0 :                         e @ (CalculateLogicalSizeError::Decode(_)
    2954            0 :                         | CalculateLogicalSizeError::PageRead(_)),
    2955            0 :                     ) => {
    2956            0 :                         warn!(attempt, "initial size calculation failed: {e:?}");
    2957              :                         // exponential back-off doesn't make sense at these long intervals;
    2958              :                         // use fixed retry interval with generous jitter instead
    2959            0 :                         let sleep_duration = Duration::from_secs(
    2960            0 :                             u64::try_from(
    2961            0 :                                 // 1hour base
    2962            0 :                                 (60_i64 * 60_i64)
    2963            0 :                                     // 10min jitter
    2964            0 :                                     + rand::thread_rng().gen_range(-10 * 60..10 * 60),
    2965            0 :                             )
    2966            0 :                             .expect("10min < 1hour"),
    2967            0 :                         );
    2968            0 :                         tokio::time::sleep(sleep_duration).await;
    2969              :                     }
    2970              :                 }
    2971              :             }
    2972            0 :         };
    2973              : 
    2974            0 :         let (calculated_size, metrics_guard) = tokio::select! {
    2975              :             res = retrying  => {
    2976              :                 match res {
    2977              :                     ControlFlow::Continue(calculated_size) => calculated_size,
    2978              :                     ControlFlow::Break(()) => return,
    2979              :                 }
    2980              :             }
    2981              :             _ = cancel.cancelled() => {
    2982              :                 return;
    2983              :             }
    2984              :         };
    2985              : 
    2986              :         // we cannot query current_logical_size.current_size() to know the current
    2987              :         // *negative* value, only truncated to u64.
    2988            0 :         let added = self
    2989            0 :             .current_logical_size
    2990            0 :             .size_added_after_initial
    2991            0 :             .load(AtomicOrdering::Relaxed);
    2992            0 : 
    2993            0 :         let sum = calculated_size.saturating_add_signed(added);
    2994            0 : 
    2995            0 :         // set the gauge value before it can be set in `update_current_logical_size`.
    2996            0 :         self.metrics.current_logical_size_gauge.set(sum);
    2997            0 : 
    2998            0 :         self.current_logical_size
    2999            0 :             .initial_logical_size
    3000            0 :             .set((calculated_size, metrics_guard.calculation_result_saved()))
    3001            0 :             .ok()
    3002            0 :             .expect("only this task sets it");
    3003            0 :     }
    3004              : 
    3005            0 :     pub(crate) fn spawn_ondemand_logical_size_calculation(
    3006            0 :         self: &Arc<Self>,
    3007            0 :         lsn: Lsn,
    3008            0 :         cause: LogicalSizeCalculationCause,
    3009            0 :         ctx: RequestContext,
    3010            0 :     ) -> oneshot::Receiver<Result<u64, CalculateLogicalSizeError>> {
    3011            0 :         let (sender, receiver) = oneshot::channel();
    3012            0 :         let self_clone = Arc::clone(self);
    3013            0 :         // XXX if our caller loses interest, i.e., ctx is cancelled,
    3014            0 :         // we should stop the size calculation work and return an error.
    3015            0 :         // That would require restructuring this function's API to
    3016            0 :         // return the result directly, instead of a Receiver for the result.
    3017            0 :         let ctx = ctx.detached_child(
    3018            0 :             TaskKind::OndemandLogicalSizeCalculation,
    3019            0 :             DownloadBehavior::Download,
    3020            0 :         );
    3021            0 :         task_mgr::spawn(
    3022            0 :             task_mgr::BACKGROUND_RUNTIME.handle(),
    3023            0 :             task_mgr::TaskKind::OndemandLogicalSizeCalculation,
    3024            0 :             Some(self.tenant_shard_id),
    3025            0 :             Some(self.timeline_id),
    3026            0 :             "ondemand logical size calculation",
    3027            0 :             async move {
    3028            0 :                 let res = self_clone
    3029            0 :                     .logical_size_calculation_task(lsn, cause, &ctx)
    3030            0 :                     .await;
    3031            0 :                 let _ = sender.send(res).ok();
    3032            0 :                 Ok(()) // Receiver is responsible for handling errors
    3033            0 :             }
    3034            0 :             .in_current_span(),
    3035            0 :         );
    3036            0 :         receiver
    3037            0 :     }
    3038              : 
    3039              :     /// # Cancel-Safety
    3040              :     ///
    3041              :     /// This method is cancellation-safe.
    3042            0 :     #[instrument(skip_all)]
    3043              :     async fn logical_size_calculation_task(
    3044              :         self: &Arc<Self>,
    3045              :         lsn: Lsn,
    3046              :         cause: LogicalSizeCalculationCause,
    3047              :         ctx: &RequestContext,
    3048              :     ) -> Result<u64, CalculateLogicalSizeError> {
    3049              :         crate::span::debug_assert_current_span_has_tenant_and_timeline_id();
    3050              :         // We should never be calculating logical sizes on shard !=0, because these shards do not have
    3051              :         // accurate relation sizes, and they do not emit consumption metrics.
    3052              :         debug_assert!(self.tenant_shard_id.is_shard_zero());
    3053              : 
    3054              :         let guard = self
    3055              :             .gate
    3056              :             .enter()
    3057            0 :             .map_err(|_| CalculateLogicalSizeError::Cancelled)?;
    3058              : 
    3059              :         let self_calculation = Arc::clone(self);
    3060              : 
    3061            0 :         let mut calculation = pin!(async {
    3062            0 :             let ctx = ctx.attached_child();
    3063            0 :             self_calculation
    3064            0 :                 .calculate_logical_size(lsn, cause, &guard, &ctx)
    3065            0 :                 .await
    3066            0 :         });
    3067              : 
    3068              :         tokio::select! {
    3069              :             res = &mut calculation => { res }
    3070              :             _ = self.cancel.cancelled() => {
    3071              :                 debug!("cancelling logical size calculation for timeline shutdown");
    3072              :                 calculation.await
    3073              :             }
    3074              :         }
    3075              :     }
    3076              : 
    3077              :     /// Calculate the logical size of the database at the latest LSN.
    3078              :     ///
    3079              :     /// NOTE: counted incrementally, includes ancestors. This can be a slow operation,
    3080              :     /// especially if we need to download remote layers.
    3081              :     ///
    3082              :     /// # Cancel-Safety
    3083              :     ///
    3084              :     /// This method is cancellation-safe.
    3085            0 :     async fn calculate_logical_size(
    3086            0 :         &self,
    3087            0 :         up_to_lsn: Lsn,
    3088            0 :         cause: LogicalSizeCalculationCause,
    3089            0 :         _guard: &GateGuard,
    3090            0 :         ctx: &RequestContext,
    3091            0 :     ) -> Result<u64, CalculateLogicalSizeError> {
    3092            0 :         info!(
    3093            0 :             "Calculating logical size for timeline {} at {}",
    3094              :             self.timeline_id, up_to_lsn
    3095              :         );
    3096              : 
    3097              :         pausable_failpoint!("timeline-calculate-logical-size-pause");
    3098              : 
    3099              :         // See if we've already done the work for initial size calculation.
    3100              :         // This is a short-cut for timelines that are mostly unused.
    3101            0 :         if let Some(size) = self.current_logical_size.initialized_size(up_to_lsn) {
    3102            0 :             return Ok(size);
    3103            0 :         }
    3104            0 :         let storage_time_metrics = match cause {
    3105              :             LogicalSizeCalculationCause::Initial
    3106              :             | LogicalSizeCalculationCause::ConsumptionMetricsSyntheticSize
    3107            0 :             | LogicalSizeCalculationCause::TenantSizeHandler => &self.metrics.logical_size_histo,
    3108              :             LogicalSizeCalculationCause::EvictionTaskImitation => {
    3109            0 :                 &self.metrics.imitate_logical_size_histo
    3110              :             }
    3111              :         };
    3112            0 :         let timer = storage_time_metrics.start_timer();
    3113            0 :         let logical_size = self
    3114            0 :             .get_current_logical_size_non_incremental(up_to_lsn, ctx)
    3115            0 :             .await?;
    3116            0 :         debug!("calculated logical size: {logical_size}");
    3117            0 :         timer.stop_and_record();
    3118            0 :         Ok(logical_size)
    3119            0 :     }
    3120              : 
    3121              :     /// Update current logical size, adding `delta' to the old value.
    3122       270570 :     fn update_current_logical_size(&self, delta: i64) {
    3123       270570 :         let logical_size = &self.current_logical_size;
    3124       270570 :         logical_size.increment_size(delta);
    3125       270570 : 
    3126       270570 :         // Also set the value in the prometheus gauge. Note that
    3127       270570 :         // there is a race condition here: if this is is called by two
    3128       270570 :         // threads concurrently, the prometheus gauge might be set to
    3129       270570 :         // one value while current_logical_size is set to the
    3130       270570 :         // other.
    3131       270570 :         match logical_size.current_size() {
    3132       270570 :             CurrentLogicalSize::Exact(ref new_current_size) => self
    3133       270570 :                 .metrics
    3134       270570 :                 .current_logical_size_gauge
    3135       270570 :                 .set(new_current_size.into()),
    3136            0 :             CurrentLogicalSize::Approximate(_) => {
    3137            0 :                 // don't update the gauge yet, this allows us not to update the gauge back and
    3138            0 :                 // forth between the initial size calculation task.
    3139            0 :             }
    3140              :         }
    3141       270570 :     }
    3142              : 
    3143         2940 :     pub(crate) fn update_directory_entries_count(&self, kind: DirectoryKind, count: u64) {
    3144         2940 :         self.directory_metrics[kind.offset()].store(count, AtomicOrdering::Relaxed);
    3145         2940 :         let aux_metric =
    3146         2940 :             self.directory_metrics[DirectoryKind::AuxFiles.offset()].load(AtomicOrdering::Relaxed);
    3147         2940 : 
    3148         2940 :         let sum_of_entries = self
    3149         2940 :             .directory_metrics
    3150         2940 :             .iter()
    3151        20580 :             .map(|v| v.load(AtomicOrdering::Relaxed))
    3152         2940 :             .sum();
    3153         2940 :         // Set a high general threshold and a lower threshold for the auxiliary files,
    3154         2940 :         // as we can have large numbers of relations in the db directory.
    3155         2940 :         const SUM_THRESHOLD: u64 = 5000;
    3156         2940 :         const AUX_THRESHOLD: u64 = 1000;
    3157         2940 :         if sum_of_entries >= SUM_THRESHOLD || aux_metric >= AUX_THRESHOLD {
    3158            0 :             self.metrics
    3159            0 :                 .directory_entries_count_gauge
    3160            0 :                 .set(sum_of_entries);
    3161         2940 :         } else if let Some(metric) = Lazy::get(&self.metrics.directory_entries_count_gauge) {
    3162            0 :             metric.set(sum_of_entries);
    3163         2940 :         }
    3164         2940 :     }
    3165              : 
    3166            0 :     async fn find_layer(&self, layer_name: &LayerName) -> Option<Layer> {
    3167            0 :         let guard = self.layers.read().await;
    3168            0 :         for historic_layer in guard.layer_map().iter_historic_layers() {
    3169            0 :             let historic_layer_name = historic_layer.layer_name();
    3170            0 :             if layer_name == &historic_layer_name {
    3171            0 :                 return Some(guard.get_from_desc(&historic_layer));
    3172            0 :             }
    3173              :         }
    3174              : 
    3175            0 :         None
    3176            0 :     }
    3177              : 
    3178              :     /// The timeline heatmap is a hint to secondary locations from the primary location,
    3179              :     /// indicating which layers are currently on-disk on the primary.
    3180              :     ///
    3181              :     /// None is returned if the Timeline is in a state where uploading a heatmap
    3182              :     /// doesn't make sense, such as shutting down or initializing.  The caller
    3183              :     /// should treat this as a cue to simply skip doing any heatmap uploading
    3184              :     /// for this timeline.
    3185            0 :     pub(crate) async fn generate_heatmap(&self) -> Option<HeatMapTimeline> {
    3186            0 :         if !self.is_active() {
    3187            0 :             return None;
    3188            0 :         }
    3189              : 
    3190            0 :         let guard = self.layers.read().await;
    3191              : 
    3192            0 :         let resident = guard.likely_resident_layers().map(|layer| {
    3193            0 :             let last_activity_ts = layer.access_stats().latest_activity();
    3194            0 : 
    3195            0 :             HeatMapLayer::new(
    3196            0 :                 layer.layer_desc().layer_name(),
    3197            0 :                 layer.metadata(),
    3198            0 :                 last_activity_ts,
    3199            0 :             )
    3200            0 :         });
    3201            0 : 
    3202            0 :         let layers = resident.collect();
    3203            0 : 
    3204            0 :         Some(HeatMapTimeline::new(self.timeline_id, layers))
    3205            0 :     }
    3206              : 
    3207              :     /// Returns true if the given lsn is or was an ancestor branchpoint.
    3208            0 :     pub(crate) fn is_ancestor_lsn(&self, lsn: Lsn) -> bool {
    3209            0 :         // upon timeline detach, we set the ancestor_lsn to Lsn::INVALID and the store the original
    3210            0 :         // branchpoint in the value in IndexPart::lineage
    3211            0 :         self.ancestor_lsn == lsn
    3212            0 :             || (self.ancestor_lsn == Lsn::INVALID
    3213            0 :                 && self.remote_client.is_previous_ancestor_lsn(lsn))
    3214            0 :     }
    3215              : }
    3216              : 
    3217              : type TraversalId = Arc<str>;
    3218              : 
    3219              : trait TraversalLayerExt {
    3220              :     fn traversal_id(&self) -> TraversalId;
    3221              : }
    3222              : 
    3223              : impl TraversalLayerExt for Layer {
    3224          320 :     fn traversal_id(&self) -> TraversalId {
    3225          320 :         Arc::clone(self.debug_str())
    3226          320 :     }
    3227              : }
    3228              : 
    3229              : impl TraversalLayerExt for Arc<InMemoryLayer> {
    3230            0 :     fn traversal_id(&self) -> TraversalId {
    3231            0 :         Arc::clone(self.local_path_str())
    3232            0 :     }
    3233              : }
    3234              : 
    3235              : impl Timeline {
    3236              :     ///
    3237              :     /// Get a handle to a Layer for reading.
    3238              :     ///
    3239              :     /// The returned Layer might be from an ancestor timeline, if the
    3240              :     /// segment hasn't been updated on this timeline yet.
    3241              :     ///
    3242              :     /// This function takes the current timeline's locked LayerMap as an argument,
    3243              :     /// so callers can avoid potential race conditions.
    3244              :     ///
    3245              :     /// # Cancel-Safety
    3246              :     ///
    3247              :     /// This method is cancellation-safe.
    3248          322 :     async fn get_reconstruct_data(
    3249          322 :         &self,
    3250          322 :         key: Key,
    3251          322 :         request_lsn: Lsn,
    3252          322 :         reconstruct_state: &mut ValueReconstructState,
    3253          322 :         ctx: &RequestContext,
    3254          322 :     ) -> Result<Vec<TraversalPathItem>, PageReconstructError> {
    3255          322 :         // Start from the current timeline.
    3256          322 :         let mut timeline_owned;
    3257          322 :         let mut timeline = self;
    3258          322 : 
    3259          322 :         let mut read_count = scopeguard::guard(0, |cnt| {
    3260          322 :             crate::metrics::READ_NUM_LAYERS_VISITED.observe(cnt as f64)
    3261          322 :         });
    3262          322 : 
    3263          322 :         // For debugging purposes, collect the path of layers that we traversed
    3264          322 :         // through. It's included in the error message if we fail to find the key.
    3265          322 :         let mut traversal_path = Vec::<TraversalPathItem>::new();
    3266              : 
    3267          322 :         let cached_lsn = if let Some((cached_lsn, _)) = &reconstruct_state.img {
    3268            0 :             *cached_lsn
    3269              :         } else {
    3270          322 :             Lsn(0)
    3271              :         };
    3272              : 
    3273              :         // 'prev_lsn' tracks the last LSN that we were at in our search. It's used
    3274              :         // to check that each iteration make some progress, to break infinite
    3275              :         // looping if something goes wrong.
    3276          322 :         let mut prev_lsn = None;
    3277          322 : 
    3278          322 :         let mut result = ValueReconstructResult::Continue;
    3279          322 :         let mut cont_lsn = Lsn(request_lsn.0 + 1);
    3280              : 
    3281          644 :         'outer: loop {
    3282          644 :             if self.cancel.is_cancelled() {
    3283            0 :                 return Err(PageReconstructError::Cancelled);
    3284          644 :             }
    3285          644 : 
    3286          644 :             // The function should have updated 'state'
    3287          644 :             //info!("CALLED for {} at {}: {:?} with {} records, cached {}", key, cont_lsn, result, reconstruct_state.records.len(), cached_lsn);
    3288          644 :             match result {
    3289          320 :                 ValueReconstructResult::Complete => return Ok(traversal_path),
    3290              :                 ValueReconstructResult::Continue => {
    3291              :                     // If we reached an earlier cached page image, we're done.
    3292          324 :                     if cont_lsn == cached_lsn + 1 {
    3293            0 :                         return Ok(traversal_path);
    3294          324 :                     }
    3295          324 :                     if let Some(prev) = prev_lsn {
    3296            2 :                         if prev <= cont_lsn {
    3297              :                             // Didn't make any progress in last iteration. Error out to avoid
    3298              :                             // getting stuck in the loop.
    3299            2 :                             return Err(PageReconstructError::MissingKey(MissingKeyError {
    3300            2 :                                 key,
    3301            2 :                                 shard: self.shard_identity.get_shard_number(&key),
    3302            2 :                                 cont_lsn: Lsn(cont_lsn.0 - 1),
    3303            2 :                                 request_lsn,
    3304            2 :                                 ancestor_lsn: Some(timeline.ancestor_lsn),
    3305            2 :                                 traversal_path,
    3306            2 :                                 backtrace: None,
    3307            2 :                             }));
    3308            0 :                         }
    3309          322 :                     }
    3310          322 :                     prev_lsn = Some(cont_lsn);
    3311              :                 }
    3312              :                 ValueReconstructResult::Missing => {
    3313              :                     return Err(PageReconstructError::MissingKey(MissingKeyError {
    3314            0 :                         key,
    3315            0 :                         shard: self.shard_identity.get_shard_number(&key),
    3316            0 :                         cont_lsn,
    3317            0 :                         request_lsn,
    3318            0 :                         ancestor_lsn: None,
    3319            0 :                         traversal_path,
    3320            0 :                         backtrace: if cfg!(test) {
    3321            0 :                             Some(std::backtrace::Backtrace::force_capture())
    3322              :                         } else {
    3323            0 :                             None
    3324              :                         },
    3325              :                     }));
    3326              :                 }
    3327              :             }
    3328              : 
    3329              :             // Recurse into ancestor if needed
    3330          322 :             if let Some(ancestor_timeline) = timeline.ancestor_timeline.as_ref() {
    3331            2 :                 if key.is_inherited_key() && Lsn(cont_lsn.0 - 1) <= timeline.ancestor_lsn {
    3332            0 :                     trace!(
    3333            0 :                         "going into ancestor {}, cont_lsn is {}",
    3334              :                         timeline.ancestor_lsn,
    3335              :                         cont_lsn
    3336              :                     );
    3337              : 
    3338            0 :                     timeline_owned = timeline
    3339            0 :                         .get_ready_ancestor_timeline(ancestor_timeline, ctx)
    3340            0 :                         .await?;
    3341            0 :                     timeline = &*timeline_owned;
    3342            0 :                     prev_lsn = None;
    3343            0 :                     continue 'outer;
    3344            2 :                 }
    3345          320 :             }
    3346              : 
    3347          322 :             let guard = timeline.layers.read().await;
    3348          322 :             let layers = guard.layer_map();
    3349              : 
    3350              :             // Check the open and frozen in-memory layers first, in order from newest
    3351              :             // to oldest.
    3352          322 :             if let Some(open_layer) = &layers.open_layer {
    3353            0 :                 let start_lsn = open_layer.get_lsn_range().start;
    3354            0 :                 if cont_lsn > start_lsn {
    3355              :                     //info!("CHECKING for {} at {} on open layer {}", key, cont_lsn, open_layer.layer_name().display());
    3356              :                     // Get all the data needed to reconstruct the page version from this layer.
    3357              :                     // But if we have an older cached page image, no need to go past that.
    3358            0 :                     let lsn_floor = max(cached_lsn + 1, start_lsn);
    3359            0 : 
    3360            0 :                     let open_layer = open_layer.clone();
    3361            0 :                     drop(guard);
    3362            0 : 
    3363            0 :                     result = match open_layer
    3364            0 :                         .get_value_reconstruct_data(
    3365            0 :                             key,
    3366            0 :                             lsn_floor..cont_lsn,
    3367            0 :                             reconstruct_state,
    3368            0 :                             ctx,
    3369            0 :                         )
    3370            0 :                         .await
    3371              :                     {
    3372            0 :                         Ok(result) => result,
    3373            0 :                         Err(e) => return Err(PageReconstructError::from(e)),
    3374              :                     };
    3375            0 :                     cont_lsn = lsn_floor;
    3376            0 :                     *read_count += 1;
    3377            0 :                     traversal_path.push((result, cont_lsn, open_layer.traversal_id()));
    3378            0 :                     continue 'outer;
    3379            0 :                 }
    3380          322 :             }
    3381          322 :             for frozen_layer in layers.frozen_layers.iter().rev() {
    3382            0 :                 let start_lsn = frozen_layer.get_lsn_range().start;
    3383            0 :                 if cont_lsn > start_lsn {
    3384              :                     //info!("CHECKING for {} at {} on frozen layer {}", key, cont_lsn, frozen_layer.layer_name().display());
    3385            0 :                     let lsn_floor = max(cached_lsn + 1, start_lsn);
    3386            0 : 
    3387            0 :                     let frozen_layer = frozen_layer.clone();
    3388            0 :                     drop(guard);
    3389            0 : 
    3390            0 :                     result = match frozen_layer
    3391            0 :                         .get_value_reconstruct_data(
    3392            0 :                             key,
    3393            0 :                             lsn_floor..cont_lsn,
    3394            0 :                             reconstruct_state,
    3395            0 :                             ctx,
    3396            0 :                         )
    3397            0 :                         .await
    3398              :                     {
    3399            0 :                         Ok(result) => result,
    3400            0 :                         Err(e) => return Err(PageReconstructError::from(e)),
    3401              :                     };
    3402            0 :                     cont_lsn = lsn_floor;
    3403            0 :                     *read_count += 1;
    3404            0 :                     traversal_path.push((result, cont_lsn, frozen_layer.traversal_id()));
    3405            0 :                     continue 'outer;
    3406            0 :                 }
    3407              :             }
    3408              : 
    3409          322 :             if let Some(SearchResult { lsn_floor, layer }) = layers.search(key, cont_lsn) {
    3410          320 :                 let layer = guard.get_from_desc(&layer);
    3411          320 :                 drop(guard);
    3412          320 :                 // Get all the data needed to reconstruct the page version from this layer.
    3413          320 :                 // But if we have an older cached page image, no need to go past that.
    3414          320 :                 let lsn_floor = max(cached_lsn + 1, lsn_floor);
    3415          320 :                 result = match layer
    3416          320 :                     .get_value_reconstruct_data(key, lsn_floor..cont_lsn, reconstruct_state, ctx)
    3417           17 :                     .await
    3418              :                 {
    3419          320 :                     Ok(result) => result,
    3420            0 :                     Err(e) => return Err(PageReconstructError::from(e)),
    3421              :                 };
    3422          320 :                 cont_lsn = lsn_floor;
    3423          320 :                 *read_count += 1;
    3424          320 :                 traversal_path.push((result, cont_lsn, layer.traversal_id()));
    3425          320 :                 continue 'outer;
    3426            2 :             } else if timeline.ancestor_timeline.is_some() {
    3427              :                 // Nothing on this timeline. Traverse to parent
    3428            2 :                 result = ValueReconstructResult::Continue;
    3429            2 :                 cont_lsn = Lsn(timeline.ancestor_lsn.0 + 1);
    3430            2 :                 continue 'outer;
    3431              :             } else {
    3432              :                 // Nothing found
    3433            0 :                 result = ValueReconstructResult::Missing;
    3434            0 :                 continue 'outer;
    3435              :             }
    3436              :         }
    3437          322 :     }
    3438              : 
    3439              :     #[allow(clippy::doc_lazy_continuation)]
    3440              :     /// Get the data needed to reconstruct all keys in the provided keyspace
    3441              :     ///
    3442              :     /// The algorithm is as follows:
    3443              :     /// 1.   While some keys are still not done and there's a timeline to visit:
    3444              :     /// 2.   Visit the timeline (see [`Timeline::get_vectored_reconstruct_data_timeline`]:
    3445              :     /// 2.1: Build the fringe for the current keyspace
    3446              :     /// 2.2  Visit the newest layer from the fringe to collect all values for the range it
    3447              :     ///      intersects
    3448              :     /// 2.3. Pop the timeline from the fringe
    3449              :     /// 2.4. If the fringe is empty, go back to 1
    3450       626048 :     async fn get_vectored_reconstruct_data(
    3451       626048 :         &self,
    3452       626048 :         mut keyspace: KeySpace,
    3453       626048 :         request_lsn: Lsn,
    3454       626048 :         reconstruct_state: &mut ValuesReconstructState,
    3455       626048 :         ctx: &RequestContext,
    3456       626048 :     ) -> Result<(), GetVectoredError> {
    3457       626048 :         let mut timeline_owned: Arc<Timeline>;
    3458       626048 :         let mut timeline = self;
    3459       626048 : 
    3460       626048 :         let mut cont_lsn = Lsn(request_lsn.0 + 1);
    3461              : 
    3462       626046 :         let missing_keyspace = loop {
    3463       851260 :             if self.cancel.is_cancelled() {
    3464            0 :                 return Err(GetVectoredError::Cancelled);
    3465       851260 :             }
    3466              : 
    3467              :             let TimelineVisitOutcome {
    3468       851260 :                 completed_keyspace: completed,
    3469       851260 :                 image_covered_keyspace,
    3470       851260 :             } = Self::get_vectored_reconstruct_data_timeline(
    3471       851260 :                 timeline,
    3472       851260 :                 keyspace.clone(),
    3473       851260 :                 cont_lsn,
    3474       851260 :                 reconstruct_state,
    3475       851260 :                 &self.cancel,
    3476       851260 :                 ctx,
    3477       851260 :             )
    3478       114010 :             .await?;
    3479              : 
    3480       851260 :             keyspace.remove_overlapping_with(&completed);
    3481       851260 : 
    3482       851260 :             // Do not descend into the ancestor timeline for aux files.
    3483       851260 :             // We don't return a blanket [`GetVectoredError::MissingKey`] to avoid
    3484       851260 :             // stalling compaction.
    3485       851260 :             keyspace.remove_overlapping_with(&KeySpace {
    3486       851260 :                 ranges: vec![NON_INHERITED_RANGE, NON_INHERITED_SPARSE_RANGE],
    3487       851260 :             });
    3488       851260 : 
    3489       851260 :             // Keyspace is fully retrieved
    3490       851260 :             if keyspace.is_empty() {
    3491       626032 :                 break None;
    3492       225228 :             }
    3493              : 
    3494       225228 :             let Some(ancestor_timeline) = timeline.ancestor_timeline.as_ref() else {
    3495              :                 // Not fully retrieved but no ancestor timeline.
    3496           14 :                 break Some(keyspace);
    3497              :             };
    3498              : 
    3499              :             // Now we see if there are keys covered by the image layer but does not exist in the
    3500              :             // image layer, which means that the key does not exist.
    3501              : 
    3502              :             // The block below will stop the vectored search if any of the keys encountered an image layer
    3503              :             // which did not contain a snapshot for said key. Since we have already removed all completed
    3504              :             // keys from `keyspace`, we expect there to be no overlap between it and the image covered key
    3505              :             // space. If that's not the case, we had at least one key encounter a gap in the image layer
    3506              :             // and stop the search as a result of that.
    3507       225214 :             let removed = keyspace.remove_overlapping_with(&image_covered_keyspace);
    3508       225214 :             if !removed.is_empty() {
    3509            0 :                 break Some(removed);
    3510       225214 :             }
    3511       225214 :             // If we reached this point, `remove_overlapping_with` should not have made any change to the
    3512       225214 :             // keyspace.
    3513       225214 : 
    3514       225214 :             // Take the min to avoid reconstructing a page with data newer than request Lsn.
    3515       225214 :             cont_lsn = std::cmp::min(Lsn(request_lsn.0 + 1), Lsn(timeline.ancestor_lsn.0 + 1));
    3516       225214 :             timeline_owned = timeline
    3517       225214 :                 .get_ready_ancestor_timeline(ancestor_timeline, ctx)
    3518            0 :                 .await
    3519       225214 :                 .map_err(GetVectoredError::GetReadyAncestorError)?;
    3520       225212 :             timeline = &*timeline_owned;
    3521              :         };
    3522              : 
    3523       626046 :         if let Some(missing_keyspace) = missing_keyspace {
    3524           14 :             return Err(GetVectoredError::MissingKey(MissingKeyError {
    3525           14 :                 key: missing_keyspace.start().unwrap(), /* better if we can store the full keyspace */
    3526           14 :                 shard: self
    3527           14 :                     .shard_identity
    3528           14 :                     .get_shard_number(&missing_keyspace.start().unwrap()),
    3529           14 :                 cont_lsn,
    3530           14 :                 request_lsn,
    3531           14 :                 ancestor_lsn: Some(timeline.ancestor_lsn),
    3532           14 :                 traversal_path: vec![],
    3533           14 :                 backtrace: None,
    3534           14 :             }));
    3535       626032 :         }
    3536       626032 : 
    3537       626032 :         Ok(())
    3538       626048 :     }
    3539              : 
    3540              :     /// Collect the reconstruct data for a keyspace from the specified timeline.
    3541              :     ///
    3542              :     /// Maintain a fringe [`LayerFringe`] which tracks all the layers that intersect
    3543              :     /// the current keyspace. The current keyspace of the search at any given timeline
    3544              :     /// is the original keyspace minus all the keys that have been completed minus
    3545              :     /// any keys for which we couldn't find an intersecting layer. It's not tracked explicitly,
    3546              :     /// but if you merge all the keyspaces in the fringe, you get the "current keyspace".
    3547              :     ///
    3548              :     /// This is basically a depth-first search visitor implementation where a vertex
    3549              :     /// is the (layer, lsn range, key space) tuple. The fringe acts as the stack.
    3550              :     ///
    3551              :     /// At each iteration pop the top of the fringe (the layer with the highest Lsn)
    3552              :     /// and get all the required reconstruct data from the layer in one go.
    3553              :     ///
    3554              :     /// Returns the completed keyspace and the keyspaces with image coverage. The caller
    3555              :     /// decides how to deal with these two keyspaces.
    3556       851260 :     async fn get_vectored_reconstruct_data_timeline(
    3557       851260 :         timeline: &Timeline,
    3558       851260 :         keyspace: KeySpace,
    3559       851260 :         mut cont_lsn: Lsn,
    3560       851260 :         reconstruct_state: &mut ValuesReconstructState,
    3561       851260 :         cancel: &CancellationToken,
    3562       851260 :         ctx: &RequestContext,
    3563       851260 :     ) -> Result<TimelineVisitOutcome, GetVectoredError> {
    3564       851260 :         let mut unmapped_keyspace = keyspace.clone();
    3565       851260 :         let mut fringe = LayerFringe::new();
    3566       851260 : 
    3567       851260 :         let mut completed_keyspace = KeySpace::default();
    3568       851260 :         let mut image_covered_keyspace = KeySpaceRandomAccum::new();
    3569              : 
    3570      1669354 :         loop {
    3571      1669354 :             if cancel.is_cancelled() {
    3572            0 :                 return Err(GetVectoredError::Cancelled);
    3573      1669354 :             }
    3574      1669354 : 
    3575      1669354 :             let (keys_done_last_step, keys_with_image_coverage) =
    3576      1669354 :                 reconstruct_state.consume_done_keys();
    3577      1669354 :             unmapped_keyspace.remove_overlapping_with(&keys_done_last_step);
    3578      1669354 :             completed_keyspace.merge(&keys_done_last_step);
    3579      1669354 :             if let Some(keys_with_image_coverage) = keys_with_image_coverage {
    3580         7598 :                 unmapped_keyspace
    3581         7598 :                     .remove_overlapping_with(&KeySpace::single(keys_with_image_coverage.clone()));
    3582         7598 :                 image_covered_keyspace.add_range(keys_with_image_coverage);
    3583      1661756 :             }
    3584              : 
    3585              :             // Do not descent any further if the last layer we visited
    3586              :             // completed all keys in the keyspace it inspected. This is not
    3587              :             // required for correctness, but avoids visiting extra layers
    3588              :             // which turns out to be a perf bottleneck in some cases.
    3589      1669354 :             if !unmapped_keyspace.is_empty() {
    3590      1043448 :                 let guard = timeline.layers.read().await;
    3591      1043448 :                 let layers = guard.layer_map();
    3592      1043448 : 
    3593      1043448 :                 let in_memory_layer = layers.find_in_memory_layer(|l| {
    3594       910896 :                     let start_lsn = l.get_lsn_range().start;
    3595       910896 :                     cont_lsn > start_lsn
    3596      1043448 :                 });
    3597      1043448 : 
    3598      1043448 :                 match in_memory_layer {
    3599       606197 :                     Some(l) => {
    3600       606197 :                         let lsn_range = l.get_lsn_range().start..cont_lsn;
    3601       606197 :                         fringe.update(
    3602       606197 :                             ReadableLayer::InMemoryLayer(l),
    3603       606197 :                             unmapped_keyspace.clone(),
    3604       606197 :                             lsn_range,
    3605       606197 :                         );
    3606       606197 :                     }
    3607              :                     None => {
    3608       509090 :                         for range in unmapped_keyspace.ranges.iter() {
    3609       509090 :                             let results = layers.range_search(range.clone(), cont_lsn);
    3610       509090 : 
    3611       509090 :                             results
    3612       509090 :                                 .found
    3613       509090 :                                 .into_iter()
    3614       509090 :                                 .map(|(SearchResult { layer, lsn_floor }, keyspace_accum)| {
    3615       275713 :                                     (
    3616       275713 :                                         ReadableLayer::PersistentLayer(guard.get_from_desc(&layer)),
    3617       275713 :                                         keyspace_accum.to_keyspace(),
    3618       275713 :                                         lsn_floor..cont_lsn,
    3619       275713 :                                     )
    3620       509090 :                                 })
    3621       509090 :                                 .for_each(|(layer, keyspace, lsn_range)| {
    3622       275713 :                                     fringe.update(layer, keyspace, lsn_range)
    3623       509090 :                                 });
    3624       509090 :                         }
    3625              :                     }
    3626              :                 }
    3627              : 
    3628              :                 // It's safe to drop the layer map lock after planning the next round of reads.
    3629              :                 // The fringe keeps readable handles for the layers which are safe to read even
    3630              :                 // if layers were compacted or flushed.
    3631              :                 //
    3632              :                 // The more interesting consideration is: "Why is the read algorithm still correct
    3633              :                 // if the layer map changes while it is operating?". Doing a vectored read on a
    3634              :                 // timeline boils down to pushing an imaginary lsn boundary downwards for each range
    3635              :                 // covered by the read. The layer map tells us how to move the lsn downwards for a
    3636              :                 // range at *a particular point in time*. It is fine for the answer to be different
    3637              :                 // at two different time points.
    3638      1043448 :                 drop(guard);
    3639       625906 :             }
    3640              : 
    3641      1669354 :             if let Some((layer_to_read, keyspace_to_read, lsn_range)) = fringe.next_layer() {
    3642       818094 :                 let next_cont_lsn = lsn_range.start;
    3643       818094 :                 layer_to_read
    3644       818094 :                     .get_values_reconstruct_data(
    3645       818094 :                         keyspace_to_read.clone(),
    3646       818094 :                         lsn_range,
    3647       818094 :                         reconstruct_state,
    3648       818094 :                         ctx,
    3649       818094 :                     )
    3650       107815 :                     .await?;
    3651              : 
    3652       818094 :                 unmapped_keyspace = keyspace_to_read;
    3653       818094 :                 cont_lsn = next_cont_lsn;
    3654       818094 : 
    3655       818094 :                 reconstruct_state.on_layer_visited(&layer_to_read);
    3656              :             } else {
    3657       851260 :                 break;
    3658       851260 :             }
    3659       851260 :         }
    3660       851260 : 
    3661       851260 :         Ok(TimelineVisitOutcome {
    3662       851260 :             completed_keyspace,
    3663       851260 :             image_covered_keyspace: image_covered_keyspace.consume_keyspace(),
    3664       851260 :         })
    3665       851260 :     }
    3666              : 
    3667       225214 :     async fn get_ready_ancestor_timeline(
    3668       225214 :         &self,
    3669       225214 :         ancestor: &Arc<Timeline>,
    3670       225214 :         ctx: &RequestContext,
    3671       225214 :     ) -> Result<Arc<Timeline>, GetReadyAncestorError> {
    3672       225214 :         // It's possible that the ancestor timeline isn't active yet, or
    3673       225214 :         // is active but hasn't yet caught up to the branch point. Wait
    3674       225214 :         // for it.
    3675       225214 :         //
    3676       225214 :         // This cannot happen while the pageserver is running normally,
    3677       225214 :         // because you cannot create a branch from a point that isn't
    3678       225214 :         // present in the pageserver yet. However, we don't wait for the
    3679       225214 :         // branch point to be uploaded to cloud storage before creating
    3680       225214 :         // a branch. I.e., the branch LSN need not be remote consistent
    3681       225214 :         // for the branching operation to succeed.
    3682       225214 :         //
    3683       225214 :         // Hence, if we try to load a tenant in such a state where
    3684       225214 :         // 1. the existence of the branch was persisted (in IndexPart and/or locally)
    3685       225214 :         // 2. but the ancestor state is behind branch_lsn because it was not yet persisted
    3686       225214 :         // then we will need to wait for the ancestor timeline to
    3687       225214 :         // re-stream WAL up to branch_lsn before we access it.
    3688       225214 :         //
    3689       225214 :         // How can a tenant get in such a state?
    3690       225214 :         // - ungraceful pageserver process exit
    3691       225214 :         // - detach+attach => this is a bug, https://github.com/neondatabase/neon/issues/4219
    3692       225214 :         //
    3693       225214 :         // NB: this could be avoided by requiring
    3694       225214 :         //   branch_lsn >= remote_consistent_lsn
    3695       225214 :         // during branch creation.
    3696       225214 :         match ancestor.wait_to_become_active(ctx).await {
    3697       225212 :             Ok(()) => {}
    3698              :             Err(TimelineState::Stopping) => {
    3699              :                 // If an ancestor is stopping, it means the tenant is stopping: handle this the same as if this timeline was stopping.
    3700            0 :                 return Err(GetReadyAncestorError::Cancelled);
    3701              :             }
    3702            2 :             Err(state) => {
    3703            2 :                 return Err(GetReadyAncestorError::BadState {
    3704            2 :                     timeline_id: ancestor.timeline_id,
    3705            2 :                     state,
    3706            2 :                 });
    3707              :             }
    3708              :         }
    3709       225212 :         ancestor
    3710       225212 :             .wait_lsn(self.ancestor_lsn, WaitLsnWaiter::Timeline(self), ctx)
    3711            0 :             .await
    3712       225212 :             .map_err(|e| match e {
    3713            0 :                 e @ WaitLsnError::Timeout(_) => GetReadyAncestorError::AncestorLsnTimeout(e),
    3714            0 :                 WaitLsnError::Shutdown => GetReadyAncestorError::Cancelled,
    3715            0 :                 WaitLsnError::BadState(state) => GetReadyAncestorError::BadState {
    3716            0 :                     timeline_id: ancestor.timeline_id,
    3717            0 :                     state,
    3718            0 :                 },
    3719       225212 :             })?;
    3720              : 
    3721       225212 :         Ok(ancestor.clone())
    3722       225214 :     }
    3723              : 
    3724            0 :     pub(crate) fn get_ancestor_timeline(&self) -> Option<Arc<Timeline>> {
    3725            0 :         self.ancestor_timeline.clone()
    3726            0 :     }
    3727              : 
    3728         5452 :     pub(crate) fn get_shard_identity(&self) -> &ShardIdentity {
    3729         5452 :         &self.shard_identity
    3730         5452 :     }
    3731              : 
    3732              :     #[inline(always)]
    3733            0 :     pub(crate) fn shard_timeline_id(&self) -> ShardTimelineId {
    3734            0 :         ShardTimelineId {
    3735            0 :             shard_index: ShardIndex {
    3736            0 :                 shard_number: self.shard_identity.number,
    3737            0 :                 shard_count: self.shard_identity.count,
    3738            0 :             },
    3739            0 :             timeline_id: self.timeline_id,
    3740            0 :         }
    3741            0 :     }
    3742              : 
    3743              :     ///
    3744              :     /// Get a handle to the latest layer for appending.
    3745              :     ///
    3746         1256 :     async fn get_layer_for_write(
    3747         1256 :         &self,
    3748         1256 :         lsn: Lsn,
    3749         1256 :         ctx: &RequestContext,
    3750         1256 :     ) -> anyhow::Result<Arc<InMemoryLayer>> {
    3751         1256 :         let mut guard = self.layers.write().await;
    3752         1256 :         let layer = guard
    3753         1256 :             .get_layer_for_write(
    3754         1256 :                 lsn,
    3755         1256 :                 self.get_last_record_lsn(),
    3756         1256 :                 self.conf,
    3757         1256 :                 self.timeline_id,
    3758         1256 :                 self.tenant_shard_id,
    3759         1256 :                 ctx,
    3760         1256 :             )
    3761          704 :             .await?;
    3762         1256 :         Ok(layer)
    3763         1256 :     }
    3764              : 
    3765      5279062 :     pub(crate) fn finish_write(&self, new_lsn: Lsn) {
    3766      5279062 :         assert!(new_lsn.is_aligned());
    3767              : 
    3768      5279062 :         self.metrics.last_record_gauge.set(new_lsn.0 as i64);
    3769      5279062 :         self.last_record_lsn.advance(new_lsn);
    3770      5279062 :     }
    3771              : 
    3772         1154 :     async fn freeze_inmem_layer_at(
    3773         1154 :         &self,
    3774         1154 :         at: Lsn,
    3775         1154 :         write_lock: &mut tokio::sync::MutexGuard<'_, Option<TimelineWriterState>>,
    3776         1154 :     ) {
    3777         1154 :         let frozen = {
    3778         1154 :             let mut guard = self.layers.write().await;
    3779         1154 :             guard
    3780         1154 :                 .try_freeze_in_memory_layer(at, &self.last_freeze_at, write_lock)
    3781            1 :                 .await
    3782              :         };
    3783         1154 :         if frozen {
    3784         1126 :             let now = Instant::now();
    3785         1126 :             *(self.last_freeze_ts.write().unwrap()) = now;
    3786         1126 :         }
    3787         1154 :     }
    3788              : 
    3789              :     /// Layer flusher task's main loop.
    3790          392 :     async fn flush_loop(
    3791          392 :         self: &Arc<Self>,
    3792          392 :         mut layer_flush_start_rx: tokio::sync::watch::Receiver<(u64, Lsn)>,
    3793          392 :         ctx: &RequestContext,
    3794          392 :     ) {
    3795          392 :         info!("started flush loop");
    3796         1114 :         loop {
    3797         1114 :             tokio::select! {
    3798              :                 _ = self.cancel.cancelled() => {
    3799              :                     info!("shutting down layer flush task due to Timeline::cancel");
    3800              :                     break;
    3801              :                 },
    3802              :                 _ = layer_flush_start_rx.changed() => {}
    3803              :             }
    3804         1114 :             trace!("waking up");
    3805         1114 :             let (flush_counter, frozen_to_lsn) = *layer_flush_start_rx.borrow();
    3806         1114 : 
    3807         1114 :             // The highest LSN to which we flushed in the loop over frozen layers
    3808         1114 :             let mut flushed_to_lsn = Lsn(0);
    3809              : 
    3810         1114 :             let result = loop {
    3811         2240 :                 if self.cancel.is_cancelled() {
    3812            0 :                     info!("dropping out of flush loop for timeline shutdown");
    3813              :                     // Note: we do not bother transmitting into [`layer_flush_done_tx`], because
    3814              :                     // anyone waiting on that will respect self.cancel as well: they will stop
    3815              :                     // waiting at the same time we as drop out of this loop.
    3816            0 :                     return;
    3817         2240 :                 }
    3818         2240 : 
    3819         2240 :                 let timer = self.metrics.flush_time_histo.start_timer();
    3820              : 
    3821         2240 :                 let layer_to_flush = {
    3822         2240 :                     let guard = self.layers.read().await;
    3823         2240 :                     guard.layer_map().frozen_layers.front().cloned()
    3824              :                     // drop 'layers' lock to allow concurrent reads and writes
    3825              :                 };
    3826         2240 :                 let Some(layer_to_flush) = layer_to_flush else {
    3827         1114 :                     break Ok(());
    3828              :                 };
    3829        17039 :                 match self.flush_frozen_layer(layer_to_flush, ctx).await {
    3830         1126 :                     Ok(this_layer_to_lsn) => {
    3831         1126 :                         flushed_to_lsn = std::cmp::max(flushed_to_lsn, this_layer_to_lsn);
    3832         1126 :                     }
    3833              :                     Err(FlushLayerError::Cancelled) => {
    3834            0 :                         info!("dropping out of flush loop for timeline shutdown");
    3835            0 :                         return;
    3836              :                     }
    3837            0 :                     err @ Err(
    3838            0 :                         FlushLayerError::NotRunning(_)
    3839            0 :                         | FlushLayerError::Other(_)
    3840            0 :                         | FlushLayerError::CreateImageLayersError(_),
    3841            0 :                     ) => {
    3842            0 :                         error!("could not flush frozen layer: {err:?}");
    3843            0 :                         break err.map(|_| ());
    3844              :                     }
    3845              :                 }
    3846         1126 :                 timer.stop_and_record();
    3847              :             };
    3848              : 
    3849              :             // Unsharded tenants should never advance their LSN beyond the end of the
    3850              :             // highest layer they write: such gaps between layer data and the frozen LSN
    3851              :             // are only legal on sharded tenants.
    3852         1114 :             debug_assert!(
    3853         1114 :                 self.shard_identity.count.count() > 1
    3854         1114 :                     || flushed_to_lsn >= frozen_to_lsn
    3855           68 :                     || !flushed_to_lsn.is_valid()
    3856              :             );
    3857              : 
    3858         1114 :             if flushed_to_lsn < frozen_to_lsn && self.shard_identity.count.count() > 1 {
    3859              :                 // If our layer flushes didn't carry disk_consistent_lsn up to the `to_lsn` advertised
    3860              :                 // to us via layer_flush_start_rx, then advance it here.
    3861              :                 //
    3862              :                 // This path is only taken for tenants with multiple shards: single sharded tenants should
    3863              :                 // never encounter a gap in the wal.
    3864            0 :                 let old_disk_consistent_lsn = self.disk_consistent_lsn.load();
    3865            0 :                 tracing::debug!("Advancing disk_consistent_lsn across layer gap {old_disk_consistent_lsn}->{frozen_to_lsn}");
    3866            0 :                 if self.set_disk_consistent_lsn(frozen_to_lsn) {
    3867            0 :                     if let Err(e) = self.schedule_uploads(frozen_to_lsn, vec![]) {
    3868            0 :                         tracing::warn!("Failed to schedule metadata upload after updating disk_consistent_lsn: {e}");
    3869            0 :                     }
    3870            0 :                 }
    3871         1114 :             }
    3872              : 
    3873              :             // Notify any listeners that we're done
    3874         1114 :             let _ = self
    3875         1114 :                 .layer_flush_done_tx
    3876         1114 :                 .send_replace((flush_counter, result));
    3877              :         }
    3878            8 :     }
    3879              : 
    3880              :     /// Request the flush loop to write out all frozen layers up to `at_lsn` as Delta L0 files to disk.
    3881              :     /// The caller is responsible for the freezing, e.g., [`Self::freeze_inmem_layer_at`].
    3882              :     ///
    3883              :     /// `at_lsn` may be higher than the highest LSN of a frozen layer: if this is the
    3884              :     /// case, it means no data will be written between the top of the highest frozen layer and
    3885              :     /// to_lsn, e.g. because this tenant shard has ingested up to to_lsn and not written any data
    3886              :     /// locally for that part of the WAL.
    3887         1154 :     fn flush_frozen_layers(&self, at_lsn: Lsn) -> Result<u64, FlushLayerError> {
    3888         1154 :         // Increment the flush cycle counter and wake up the flush task.
    3889         1154 :         // Remember the new value, so that when we listen for the flush
    3890         1154 :         // to finish, we know when the flush that we initiated has
    3891         1154 :         // finished, instead of some other flush that was started earlier.
    3892         1154 :         let mut my_flush_request = 0;
    3893         1154 : 
    3894         1154 :         let flush_loop_state = { *self.flush_loop_state.lock().unwrap() };
    3895         1154 :         if !matches!(flush_loop_state, FlushLoopState::Running { .. }) {
    3896            0 :             return Err(FlushLayerError::NotRunning(flush_loop_state));
    3897         1154 :         }
    3898         1154 : 
    3899         1154 :         self.layer_flush_start_tx.send_modify(|(counter, lsn)| {
    3900         1154 :             my_flush_request = *counter + 1;
    3901         1154 :             *counter = my_flush_request;
    3902         1154 :             *lsn = std::cmp::max(at_lsn, *lsn);
    3903         1154 :         });
    3904         1154 : 
    3905         1154 :         Ok(my_flush_request)
    3906         1154 :     }
    3907              : 
    3908         1074 :     async fn wait_flush_completion(&self, request: u64) -> Result<(), FlushLayerError> {
    3909         1074 :         let mut rx = self.layer_flush_done_tx.subscribe();
    3910         2148 :         loop {
    3911         2148 :             {
    3912         2148 :                 let (last_result_counter, last_result) = &*rx.borrow();
    3913         2148 :                 if *last_result_counter >= request {
    3914         1074 :                     if let Err(err) = last_result {
    3915              :                         // We already logged the original error in
    3916              :                         // flush_loop. We cannot propagate it to the caller
    3917              :                         // here, because it might not be Cloneable
    3918            0 :                         return Err(err.clone());
    3919              :                     } else {
    3920         1074 :                         return Ok(());
    3921              :                     }
    3922         1074 :                 }
    3923         1074 :             }
    3924         1074 :             trace!("waiting for flush to complete");
    3925              :             tokio::select! {
    3926              :                 rx_e = rx.changed() => {
    3927            0 :                     rx_e.map_err(|_| FlushLayerError::NotRunning(*self.flush_loop_state.lock().unwrap()))?;
    3928              :                 },
    3929              :                 // Cancellation safety: we are not leaving an I/O in-flight for the flush, we're just ignoring
    3930              :                 // the notification from [`flush_loop`] that it completed.
    3931              :                 _ = self.cancel.cancelled() => {
    3932              :                     tracing::info!("Cancelled layer flush due on timeline shutdown");
    3933              :                     return Ok(())
    3934              :                 }
    3935              :             };
    3936         1074 :             trace!("done")
    3937              :         }
    3938         1074 :     }
    3939              : 
    3940         1074 :     async fn flush_frozen_layers_and_wait(&self, at_lsn: Lsn) -> Result<(), FlushLayerError> {
    3941         1074 :         let token = self.flush_frozen_layers(at_lsn)?;
    3942         1077 :         self.wait_flush_completion(token).await
    3943         1074 :     }
    3944              : 
    3945              :     /// Flush one frozen in-memory layer to disk, as a new delta layer.
    3946              :     ///
    3947              :     /// Return value is the last lsn (inclusive) of the layer that was frozen.
    3948         2252 :     #[instrument(skip_all, fields(layer=%frozen_layer))]
    3949              :     async fn flush_frozen_layer(
    3950              :         self: &Arc<Self>,
    3951              :         frozen_layer: Arc<InMemoryLayer>,
    3952              :         ctx: &RequestContext,
    3953              :     ) -> Result<Lsn, FlushLayerError> {
    3954              :         debug_assert_current_span_has_tenant_and_timeline_id();
    3955              : 
    3956              :         // As a special case, when we have just imported an image into the repository,
    3957              :         // instead of writing out a L0 delta layer, we directly write out image layer
    3958              :         // files instead. This is possible as long as *all* the data imported into the
    3959              :         // repository have the same LSN.
    3960              :         let lsn_range = frozen_layer.get_lsn_range();
    3961              : 
    3962              :         // Whether to directly create image layers for this flush, or flush them as delta layers
    3963              :         let create_image_layer =
    3964              :             lsn_range.start == self.initdb_lsn && lsn_range.end == Lsn(self.initdb_lsn.0 + 1);
    3965              : 
    3966              :         #[cfg(test)]
    3967              :         {
    3968              :             match &mut *self.flush_loop_state.lock().unwrap() {
    3969              :                 FlushLoopState::NotStarted | FlushLoopState::Exited => {
    3970              :                     panic!("flush loop not running")
    3971              :                 }
    3972              :                 FlushLoopState::Running {
    3973              :                     expect_initdb_optimization,
    3974              :                     initdb_optimization_count,
    3975              :                     ..
    3976              :                 } => {
    3977              :                     if create_image_layer {
    3978              :                         *initdb_optimization_count += 1;
    3979              :                     } else {
    3980              :                         assert!(!*expect_initdb_optimization, "expected initdb optimization");
    3981              :                     }
    3982              :                 }
    3983              :             }
    3984              :         }
    3985              : 
    3986              :         let (layers_to_upload, delta_layer_to_add) = if create_image_layer {
    3987              :             // Note: The 'ctx' in use here has DownloadBehavior::Error. We should not
    3988              :             // require downloading anything during initial import.
    3989              :             let ((rel_partition, metadata_partition), _lsn) = self
    3990              :                 .repartition(
    3991              :                     self.initdb_lsn,
    3992              :                     self.get_compaction_target_size(),
    3993              :                     EnumSet::empty(),
    3994              :                     ctx,
    3995              :                 )
    3996              :                 .await
    3997            0 :                 .map_err(|e| FlushLayerError::from_anyhow(self, e))?;
    3998              : 
    3999              :             if self.cancel.is_cancelled() {
    4000              :                 return Err(FlushLayerError::Cancelled);
    4001              :             }
    4002              : 
    4003              :             // FIXME(auxfilesv2): support multiple metadata key partitions might need initdb support as well?
    4004              :             // This code path will not be hit during regression tests. After #7099 we have a single partition
    4005              :             // with two key ranges. If someone wants to fix initdb optimization in the future, this might need
    4006              :             // to be fixed.
    4007              : 
    4008              :             // For metadata, always create delta layers.
    4009              :             let delta_layer = if !metadata_partition.parts.is_empty() {
    4010              :                 assert_eq!(
    4011              :                     metadata_partition.parts.len(),
    4012              :                     1,
    4013              :                     "currently sparse keyspace should only contain a single metadata keyspace"
    4014              :                 );
    4015              :                 let metadata_keyspace = &metadata_partition.parts[0];
    4016              :                 self.create_delta_layer(
    4017              :                     &frozen_layer,
    4018              :                     Some(
    4019              :                         metadata_keyspace.0.ranges.first().unwrap().start
    4020              :                             ..metadata_keyspace.0.ranges.last().unwrap().end,
    4021              :                     ),
    4022              :                     ctx,
    4023              :                 )
    4024              :                 .await
    4025            0 :                 .map_err(|e| FlushLayerError::from_anyhow(self, e))?
    4026              :             } else {
    4027              :                 None
    4028              :             };
    4029              : 
    4030              :             // For image layers, we add them immediately into the layer map.
    4031              :             let mut layers_to_upload = Vec::new();
    4032              :             layers_to_upload.extend(
    4033              :                 self.create_image_layers(
    4034              :                     &rel_partition,
    4035              :                     self.initdb_lsn,
    4036              :                     ImageLayerCreationMode::Initial,
    4037              :                     ctx,
    4038              :                 )
    4039              :                 .await?,
    4040              :             );
    4041              : 
    4042              :             if let Some(delta_layer) = delta_layer {
    4043              :                 layers_to_upload.push(delta_layer.clone());
    4044              :                 (layers_to_upload, Some(delta_layer))
    4045              :             } else {
    4046              :                 (layers_to_upload, None)
    4047              :             }
    4048              :         } else {
    4049              :             // Normal case, write out a L0 delta layer file.
    4050              :             // `create_delta_layer` will not modify the layer map.
    4051              :             // We will remove frozen layer and add delta layer in one atomic operation later.
    4052              :             let Some(layer) = self
    4053              :                 .create_delta_layer(&frozen_layer, None, ctx)
    4054              :                 .await
    4055            0 :                 .map_err(|e| FlushLayerError::from_anyhow(self, e))?
    4056              :             else {
    4057              :                 panic!("delta layer cannot be empty if no filter is applied");
    4058              :             };
    4059              :             (
    4060              :                 // FIXME: even though we have a single image and single delta layer assumption
    4061              :                 // we push them to vec
    4062              :                 vec![layer.clone()],
    4063              :                 Some(layer),
    4064              :             )
    4065              :         };
    4066              : 
    4067              :         pausable_failpoint!("flush-layer-cancel-after-writing-layer-out-pausable");
    4068              : 
    4069              :         if self.cancel.is_cancelled() {
    4070              :             return Err(FlushLayerError::Cancelled);
    4071              :         }
    4072              : 
    4073              :         let disk_consistent_lsn = Lsn(lsn_range.end.0 - 1);
    4074              : 
    4075              :         // The new on-disk layers are now in the layer map. We can remove the
    4076              :         // in-memory layer from the map now. The flushed layer is stored in
    4077              :         // the mapping in `create_delta_layer`.
    4078              :         {
    4079              :             let mut guard = self.layers.write().await;
    4080              : 
    4081              :             if self.cancel.is_cancelled() {
    4082              :                 return Err(FlushLayerError::Cancelled);
    4083              :             }
    4084              : 
    4085              :             guard.finish_flush_l0_layer(delta_layer_to_add.as_ref(), &frozen_layer, &self.metrics);
    4086              : 
    4087              :             if self.set_disk_consistent_lsn(disk_consistent_lsn) {
    4088              :                 // Schedule remote uploads that will reflect our new disk_consistent_lsn
    4089              :                 self.schedule_uploads(disk_consistent_lsn, layers_to_upload)
    4090            0 :                     .map_err(|e| FlushLayerError::from_anyhow(self, e))?;
    4091              :             }
    4092              :             // release lock on 'layers'
    4093              :         };
    4094              : 
    4095              :         // Backpressure mechanism: wait with continuation of the flush loop until we have uploaded all layer files.
    4096              :         // This makes us refuse ingest until the new layers have been persisted to the remote.
    4097              :         self.remote_client
    4098              :             .wait_completion()
    4099              :             .await
    4100            0 :             .map_err(|e| match e {
    4101              :                 WaitCompletionError::UploadQueueShutDownOrStopped
    4102              :                 | WaitCompletionError::NotInitialized(
    4103              :                     NotInitialized::ShuttingDown | NotInitialized::Stopped,
    4104            0 :                 ) => FlushLayerError::Cancelled,
    4105              :                 WaitCompletionError::NotInitialized(NotInitialized::Uninitialized) => {
    4106            0 :                     FlushLayerError::Other(anyhow!(e).into())
    4107              :                 }
    4108            0 :             })?;
    4109              : 
    4110              :         // FIXME: between create_delta_layer and the scheduling of the upload in `update_metadata_file`,
    4111              :         // a compaction can delete the file and then it won't be available for uploads any more.
    4112              :         // We still schedule the upload, resulting in an error, but ideally we'd somehow avoid this
    4113              :         // race situation.
    4114              :         // See https://github.com/neondatabase/neon/issues/4526
    4115              :         pausable_failpoint!("flush-frozen-pausable");
    4116              : 
    4117              :         // This failpoint is used by another test case `test_pageserver_recovery`.
    4118              :         fail_point!("flush-frozen-exit");
    4119              : 
    4120              :         Ok(Lsn(lsn_range.end.0 - 1))
    4121              :     }
    4122              : 
    4123              :     /// Return true if the value changed
    4124              :     ///
    4125              :     /// This function must only be used from the layer flush task, and may not be called concurrently.
    4126         1126 :     fn set_disk_consistent_lsn(&self, new_value: Lsn) -> bool {
    4127         1126 :         // We do a simple load/store cycle: that's why this function isn't safe for concurrent use.
    4128         1126 :         let old_value = self.disk_consistent_lsn.load();
    4129         1126 :         if new_value != old_value {
    4130         1126 :             assert!(new_value >= old_value);
    4131         1126 :             self.disk_consistent_lsn.store(new_value);
    4132         1126 :             true
    4133              :         } else {
    4134            0 :             false
    4135              :         }
    4136         1126 :     }
    4137              : 
    4138              :     /// Update metadata file
    4139         1132 :     fn schedule_uploads(
    4140         1132 :         &self,
    4141         1132 :         disk_consistent_lsn: Lsn,
    4142         1132 :         layers_to_upload: impl IntoIterator<Item = ResidentLayer>,
    4143         1132 :     ) -> anyhow::Result<()> {
    4144         1132 :         // We can only save a valid 'prev_record_lsn' value on disk if we
    4145         1132 :         // flushed *all* in-memory changes to disk. We only track
    4146         1132 :         // 'prev_record_lsn' in memory for the latest processed record, so we
    4147         1132 :         // don't remember what the correct value that corresponds to some old
    4148         1132 :         // LSN is. But if we flush everything, then the value corresponding
    4149         1132 :         // current 'last_record_lsn' is correct and we can store it on disk.
    4150         1132 :         let RecordLsn {
    4151         1132 :             last: last_record_lsn,
    4152         1132 :             prev: prev_record_lsn,
    4153         1132 :         } = self.last_record_lsn.load();
    4154         1132 :         let ondisk_prev_record_lsn = if disk_consistent_lsn == last_record_lsn {
    4155         1052 :             Some(prev_record_lsn)
    4156              :         } else {
    4157           80 :             None
    4158              :         };
    4159              : 
    4160         1132 :         let update = crate::tenant::metadata::MetadataUpdate::new(
    4161         1132 :             disk_consistent_lsn,
    4162         1132 :             ondisk_prev_record_lsn,
    4163         1132 :             *self.latest_gc_cutoff_lsn.read(),
    4164         1132 :         );
    4165         1132 : 
    4166         1132 :         fail_point!("checkpoint-before-saving-metadata", |x| bail!(
    4167            0 :             "{}",
    4168            0 :             x.unwrap()
    4169         1132 :         ));
    4170              : 
    4171         2272 :         for layer in layers_to_upload {
    4172         1140 :             self.remote_client.schedule_layer_file_upload(layer)?;
    4173              :         }
    4174         1132 :         self.remote_client
    4175         1132 :             .schedule_index_upload_for_metadata_update(&update)?;
    4176              : 
    4177         1132 :         Ok(())
    4178         1132 :     }
    4179              : 
    4180            0 :     pub(crate) async fn preserve_initdb_archive(&self) -> anyhow::Result<()> {
    4181            0 :         self.remote_client
    4182            0 :             .preserve_initdb_archive(
    4183            0 :                 &self.tenant_shard_id.tenant_id,
    4184            0 :                 &self.timeline_id,
    4185            0 :                 &self.cancel,
    4186            0 :             )
    4187            0 :             .await
    4188            0 :     }
    4189              : 
    4190              :     // Write out the given frozen in-memory layer as a new L0 delta file. This L0 file will not be tracked
    4191              :     // in layer map immediately. The caller is responsible to put it into the layer map.
    4192         1126 :     async fn create_delta_layer(
    4193         1126 :         self: &Arc<Self>,
    4194         1126 :         frozen_layer: &Arc<InMemoryLayer>,
    4195         1126 :         key_range: Option<Range<Key>>,
    4196         1126 :         ctx: &RequestContext,
    4197         1126 :     ) -> anyhow::Result<Option<ResidentLayer>> {
    4198         1126 :         let self_clone = Arc::clone(self);
    4199         1126 :         let frozen_layer = Arc::clone(frozen_layer);
    4200         1126 :         let ctx = ctx.attached_child();
    4201         1126 :         let work = async move {
    4202         1126 :             let Some(new_delta) = frozen_layer
    4203         1126 :                 .write_to_disk(&self_clone, &ctx, key_range)
    4204        10269 :                 .await?
    4205              :             else {
    4206          158 :                 return Ok(None);
    4207              :             };
    4208              :             // The write_to_disk() above calls writer.finish() which already did the fsync of the inodes.
    4209              :             // We just need to fsync the directory in which these inodes are linked,
    4210              :             // which we know to be the timeline directory.
    4211              :             //
    4212              :             // We use fatal_err() below because the after write_to_disk returns with success,
    4213              :             // the in-memory state of the filesystem already has the layer file in its final place,
    4214              :             // and subsequent pageserver code could think it's durable while it really isn't.
    4215          968 :             let timeline_dir = VirtualFile::open(
    4216          968 :                 &self_clone
    4217          968 :                     .conf
    4218          968 :                     .timeline_path(&self_clone.tenant_shard_id, &self_clone.timeline_id),
    4219          968 :                 &ctx,
    4220          968 :             )
    4221          487 :             .await
    4222          968 :             .fatal_err("VirtualFile::open for timeline dir fsync");
    4223          968 :             timeline_dir
    4224          968 :                 .sync_all()
    4225          484 :                 .await
    4226          968 :                 .fatal_err("VirtualFile::sync_all timeline dir");
    4227          968 :             anyhow::Ok(Some(new_delta))
    4228         1126 :         };
    4229              :         // Before tokio-epoll-uring, we ran write_to_disk & the sync_all inside spawn_blocking.
    4230              :         // Preserve that behavior to maintain the same behavior for `virtual_file_io_engine=std-fs`.
    4231              :         use crate::virtual_file::io_engine::IoEngine;
    4232         1126 :         match crate::virtual_file::io_engine::get() {
    4233            0 :             IoEngine::NotSet => panic!("io engine not set"),
    4234              :             IoEngine::StdFs => {
    4235          563 :                 let span = tracing::info_span!("blocking");
    4236          563 :                 tokio::task::spawn_blocking({
    4237          563 :                     move || Handle::current().block_on(work.instrument(span))
    4238          563 :                 })
    4239          563 :                 .await
    4240          563 :                 .context("spawn_blocking")
    4241          563 :                 .and_then(|x| x)
    4242              :             }
    4243              :             #[cfg(target_os = "linux")]
    4244        11235 :             IoEngine::TokioEpollUring => work.await,
    4245              :         }
    4246         1126 :     }
    4247              : 
    4248          522 :     async fn repartition(
    4249          522 :         &self,
    4250          522 :         lsn: Lsn,
    4251          522 :         partition_size: u64,
    4252          522 :         flags: EnumSet<CompactFlags>,
    4253          522 :         ctx: &RequestContext,
    4254          522 :     ) -> anyhow::Result<((KeyPartitioning, SparseKeyPartitioning), Lsn)> {
    4255          522 :         let Ok(mut partitioning_guard) = self.partitioning.try_lock() else {
    4256              :             // NB: there are two callers, one is the compaction task, of which there is only one per struct Tenant and hence Timeline.
    4257              :             // The other is the initdb optimization in flush_frozen_layer, used by `boostrap_timeline`, which runs before `.activate()`
    4258              :             // and hence before the compaction task starts.
    4259            0 :             anyhow::bail!("repartition() called concurrently, this should not happen");
    4260              :         };
    4261          522 :         let ((dense_partition, sparse_partition), partition_lsn) = &*partitioning_guard;
    4262          522 :         if lsn < *partition_lsn {
    4263            0 :             anyhow::bail!("repartition() called with LSN going backwards, this should not happen");
    4264          522 :         }
    4265          522 : 
    4266          522 :         let distance = lsn.0 - partition_lsn.0;
    4267          522 :         if *partition_lsn != Lsn(0)
    4268          262 :             && distance <= self.repartition_threshold
    4269          262 :             && !flags.contains(CompactFlags::ForceRepartition)
    4270              :         {
    4271          248 :             debug!(
    4272              :                 distance,
    4273              :                 threshold = self.repartition_threshold,
    4274            0 :                 "no repartitioning needed"
    4275              :             );
    4276          248 :             return Ok((
    4277          248 :                 (dense_partition.clone(), sparse_partition.clone()),
    4278          248 :                 *partition_lsn,
    4279          248 :             ));
    4280          274 :         }
    4281              : 
    4282        16708 :         let (dense_ks, sparse_ks) = self.collect_keyspace(lsn, ctx).await?;
    4283          274 :         let dense_partitioning = dense_ks.partition(&self.shard_identity, partition_size);
    4284          274 :         let sparse_partitioning = SparseKeyPartitioning {
    4285          274 :             parts: vec![sparse_ks],
    4286          274 :         }; // no partitioning for metadata keys for now
    4287          274 :         *partitioning_guard = ((dense_partitioning, sparse_partitioning), lsn);
    4288          274 : 
    4289          274 :         Ok((partitioning_guard.0.clone(), partitioning_guard.1))
    4290          522 :     }
    4291              : 
    4292              :     // Is it time to create a new image layer for the given partition?
    4293           14 :     async fn time_for_new_image_layer(&self, partition: &KeySpace, lsn: Lsn) -> bool {
    4294           14 :         let threshold = self.get_image_creation_threshold();
    4295              : 
    4296           14 :         let guard = self.layers.read().await;
    4297           14 :         let layers = guard.layer_map();
    4298           14 : 
    4299           14 :         let mut max_deltas = 0;
    4300           28 :         for part_range in &partition.ranges {
    4301           14 :             let image_coverage = layers.image_coverage(part_range, lsn);
    4302           28 :             for (img_range, last_img) in image_coverage {
    4303           14 :                 let img_lsn = if let Some(last_img) = last_img {
    4304            0 :                     last_img.get_lsn_range().end
    4305              :                 } else {
    4306           14 :                     Lsn(0)
    4307              :                 };
    4308              :                 // Let's consider an example:
    4309              :                 //
    4310              :                 // delta layer with LSN range 71-81
    4311              :                 // delta layer with LSN range 81-91
    4312              :                 // delta layer with LSN range 91-101
    4313              :                 // image layer at LSN 100
    4314              :                 //
    4315              :                 // If 'lsn' is still 100, i.e. no new WAL has been processed since the last image layer,
    4316              :                 // there's no need to create a new one. We check this case explicitly, to avoid passing
    4317              :                 // a bogus range to count_deltas below, with start > end. It's even possible that there
    4318              :                 // are some delta layers *later* than current 'lsn', if more WAL was processed and flushed
    4319              :                 // after we read last_record_lsn, which is passed here in the 'lsn' argument.
    4320           14 :                 if img_lsn < lsn {
    4321           14 :                     let num_deltas =
    4322           14 :                         layers.count_deltas(&img_range, &(img_lsn..lsn), Some(threshold));
    4323           14 : 
    4324           14 :                     max_deltas = max_deltas.max(num_deltas);
    4325           14 :                     if num_deltas >= threshold {
    4326            0 :                         debug!(
    4327            0 :                             "key range {}-{}, has {} deltas on this timeline in LSN range {}..{}",
    4328              :                             img_range.start, img_range.end, num_deltas, img_lsn, lsn
    4329              :                         );
    4330            0 :                         return true;
    4331           14 :                     }
    4332            0 :                 }
    4333              :             }
    4334              :         }
    4335              : 
    4336           14 :         debug!(
    4337              :             max_deltas,
    4338            0 :             "none of the partitioned ranges had >= {threshold} deltas"
    4339              :         );
    4340           14 :         false
    4341           14 :     }
    4342              : 
    4343              :     /// Create image layers for Postgres data. Assumes the caller passes a partition that is not too large,
    4344              :     /// so that at most one image layer will be produced from this function.
    4345          186 :     async fn create_image_layer_for_rel_blocks(
    4346          186 :         self: &Arc<Self>,
    4347          186 :         partition: &KeySpace,
    4348          186 :         mut image_layer_writer: ImageLayerWriter,
    4349          186 :         lsn: Lsn,
    4350          186 :         ctx: &RequestContext,
    4351          186 :         img_range: Range<Key>,
    4352          186 :         start: Key,
    4353          186 :     ) -> Result<ImageLayerCreationOutcome, CreateImageLayersError> {
    4354          186 :         let mut wrote_keys = false;
    4355          186 : 
    4356          186 :         let mut key_request_accum = KeySpaceAccum::new();
    4357         1222 :         for range in &partition.ranges {
    4358         1036 :             let mut key = range.start;
    4359         2410 :             while key < range.end {
    4360              :                 // Decide whether to retain this key: usually we do, but sharded tenants may
    4361              :                 // need to drop keys that don't belong to them.  If we retain the key, add it
    4362              :                 // to `key_request_accum` for later issuing a vectored get
    4363         1374 :                 if self.shard_identity.is_key_disposable(&key) {
    4364            0 :                     debug!(
    4365            0 :                         "Dropping key {} during compaction (it belongs on shard {:?})",
    4366            0 :                         key,
    4367            0 :                         self.shard_identity.get_shard_number(&key)
    4368              :                     );
    4369         1374 :                 } else {
    4370         1374 :                     key_request_accum.add_key(key);
    4371         1374 :                 }
    4372              : 
    4373         1374 :                 let last_key_in_range = key.next() == range.end;
    4374         1374 :                 key = key.next();
    4375         1374 : 
    4376         1374 :                 // Maybe flush `key_rest_accum`
    4377         1374 :                 if key_request_accum.raw_size() >= Timeline::MAX_GET_VECTORED_KEYS
    4378         1374 :                     || (last_key_in_range && key_request_accum.raw_size() > 0)
    4379              :                 {
    4380         1036 :                     let results = self
    4381         1036 :                         .get_vectored(key_request_accum.consume_keyspace(), lsn, ctx)
    4382           49 :                         .await?;
    4383              : 
    4384         2410 :                     for (img_key, img) in results {
    4385         1374 :                         let img = match img {
    4386         1374 :                             Ok(img) => img,
    4387            0 :                             Err(err) => {
    4388            0 :                                 // If we fail to reconstruct a VM or FSM page, we can zero the
    4389            0 :                                 // page without losing any actual user data. That seems better
    4390            0 :                                 // than failing repeatedly and getting stuck.
    4391            0 :                                 //
    4392            0 :                                 // We had a bug at one point, where we truncated the FSM and VM
    4393            0 :                                 // in the pageserver, but the Postgres didn't know about that
    4394            0 :                                 // and continued to generate incremental WAL records for pages
    4395            0 :                                 // that didn't exist in the pageserver. Trying to replay those
    4396            0 :                                 // WAL records failed to find the previous image of the page.
    4397            0 :                                 // This special case allows us to recover from that situation.
    4398            0 :                                 // See https://github.com/neondatabase/neon/issues/2601.
    4399            0 :                                 //
    4400            0 :                                 // Unfortunately we cannot do this for the main fork, or for
    4401            0 :                                 // any metadata keys, keys, as that would lead to actual data
    4402            0 :                                 // loss.
    4403            0 :                                 if img_key.is_rel_fsm_block_key() || img_key.is_rel_vm_block_key() {
    4404            0 :                                     warn!("could not reconstruct FSM or VM key {img_key}, filling with zeros: {err:?}");
    4405            0 :                                     ZERO_PAGE.clone()
    4406              :                                 } else {
    4407            0 :                                     return Err(CreateImageLayersError::PageReconstructError(err));
    4408              :                                 }
    4409              :                             }
    4410              :                         };
    4411              : 
    4412              :                         // Write all the keys we just read into our new image layer.
    4413         1484 :                         image_layer_writer.put_image(img_key, img, ctx).await?;
    4414         1374 :                         wrote_keys = true;
    4415              :                     }
    4416          338 :                 }
    4417              :             }
    4418              :         }
    4419              : 
    4420          186 :         if wrote_keys {
    4421              :             // Normal path: we have written some data into the new image layer for this
    4422              :             // partition, so flush it to disk.
    4423          375 :             let image_layer = image_layer_writer.finish(self, ctx).await?;
    4424          186 :             Ok(ImageLayerCreationOutcome {
    4425          186 :                 image: Some(image_layer),
    4426          186 :                 next_start_key: img_range.end,
    4427          186 :             })
    4428              :         } else {
    4429              :             // Special case: the image layer may be empty if this is a sharded tenant and the
    4430              :             // partition does not cover any keys owned by this shard.  In this case, to ensure
    4431              :             // we don't leave gaps between image layers, leave `start` where it is, so that the next
    4432              :             // layer we write will cover the key range that we just scanned.
    4433            0 :             tracing::debug!("no data in range {}-{}", img_range.start, img_range.end);
    4434            0 :             Ok(ImageLayerCreationOutcome {
    4435            0 :                 image: None,
    4436            0 :                 next_start_key: start,
    4437            0 :             })
    4438              :         }
    4439          186 :     }
    4440              : 
    4441              :     /// Create an image layer for metadata keys. This function produces one image layer for all metadata
    4442              :     /// keys for now. Because metadata keys cannot exceed basebackup size limit, the image layer for it
    4443              :     /// would not be too large to fit in a single image layer.
    4444              :     #[allow(clippy::too_many_arguments)]
    4445           16 :     async fn create_image_layer_for_metadata_keys(
    4446           16 :         self: &Arc<Self>,
    4447           16 :         partition: &KeySpace,
    4448           16 :         mut image_layer_writer: ImageLayerWriter,
    4449           16 :         lsn: Lsn,
    4450           16 :         ctx: &RequestContext,
    4451           16 :         img_range: Range<Key>,
    4452           16 :         mode: ImageLayerCreationMode,
    4453           16 :         start: Key,
    4454           16 :     ) -> Result<ImageLayerCreationOutcome, CreateImageLayersError> {
    4455           16 :         assert!(!matches!(mode, ImageLayerCreationMode::Initial));
    4456              : 
    4457              :         // Metadata keys image layer creation.
    4458           16 :         let mut reconstruct_state = ValuesReconstructState::default();
    4459           16 :         let data = self
    4460           16 :             .get_vectored_impl(partition.clone(), lsn, &mut reconstruct_state, ctx)
    4461         4052 :             .await?;
    4462           16 :         let (data, total_kb_retrieved, total_keys_retrieved) = {
    4463           16 :             let mut new_data = BTreeMap::new();
    4464           16 :             let mut total_kb_retrieved = 0;
    4465           16 :             let mut total_keys_retrieved = 0;
    4466        10028 :             for (k, v) in data {
    4467        10012 :                 let v = v.map_err(CreateImageLayersError::PageReconstructError)?;
    4468        10012 :                 total_kb_retrieved += KEY_SIZE + v.len();
    4469        10012 :                 total_keys_retrieved += 1;
    4470        10012 :                 new_data.insert(k, v);
    4471              :             }
    4472           16 :             (new_data, total_kb_retrieved / 1024, total_keys_retrieved)
    4473           16 :         };
    4474           16 :         let delta_files_accessed = reconstruct_state.get_delta_layers_visited();
    4475           16 : 
    4476           16 :         let trigger_generation = delta_files_accessed as usize >= MAX_AUX_FILE_V2_DELTAS;
    4477           16 :         debug!(
    4478              :             trigger_generation,
    4479              :             delta_files_accessed,
    4480              :             total_kb_retrieved,
    4481              :             total_keys_retrieved,
    4482            0 :             "generate metadata images"
    4483              :         );
    4484              : 
    4485           16 :         if !trigger_generation && mode == ImageLayerCreationMode::Try {
    4486            2 :             return Ok(ImageLayerCreationOutcome {
    4487            2 :                 image: None,
    4488            2 :                 next_start_key: img_range.end,
    4489            2 :             });
    4490           14 :         }
    4491           14 :         let mut wrote_any_image = false;
    4492        10026 :         for (k, v) in data {
    4493        10012 :             if v.is_empty() {
    4494              :                 // the key has been deleted, it does not need an image
    4495              :                 // in metadata keyspace, an empty image == tombstone
    4496            8 :                 continue;
    4497        10004 :             }
    4498        10004 :             wrote_any_image = true;
    4499        10004 : 
    4500        10004 :             // No need to handle sharding b/c metadata keys are always on the 0-th shard.
    4501        10004 : 
    4502        10004 :             // TODO: split image layers to avoid too large layer files. Too large image files are not handled
    4503        10004 :             // on the normal data path either.
    4504        10162 :             image_layer_writer.put_image(k, v, ctx).await?;
    4505              :         }
    4506              : 
    4507           14 :         if wrote_any_image {
    4508              :             // Normal path: we have written some data into the new image layer for this
    4509              :             // partition, so flush it to disk.
    4510           25 :             let image_layer = image_layer_writer.finish(self, ctx).await?;
    4511           12 :             Ok(ImageLayerCreationOutcome {
    4512           12 :                 image: Some(image_layer),
    4513           12 :                 next_start_key: img_range.end,
    4514           12 :             })
    4515              :         } else {
    4516              :             // Special case: the image layer may be empty if this is a sharded tenant and the
    4517              :             // partition does not cover any keys owned by this shard. In this case, to ensure
    4518              :             // we don't leave gaps between image layers, leave `start` where it is, so that the next
    4519              :             // layer we write will cover the key range that we just scanned.
    4520            2 :             tracing::debug!("no data in range {}-{}", img_range.start, img_range.end);
    4521            2 :             Ok(ImageLayerCreationOutcome {
    4522            2 :                 image: None,
    4523            2 :                 next_start_key: start,
    4524            2 :             })
    4525              :         }
    4526           16 :     }
    4527              : 
    4528              :     /// Predicate function which indicates whether we should check if new image layers
    4529              :     /// are required. Since checking if new image layers are required is expensive in
    4530              :     /// terms of CPU, we only do it in the following cases:
    4531              :     /// 1. If the timeline has ingested sufficient WAL to justify the cost
    4532              :     /// 2. If enough time has passed since the last check:
    4533              :     ///     1. For large tenants, we wish to perform the check more often since they
    4534              :     ///        suffer from the lack of image layers
    4535              :     ///     2. For small tenants (that can mostly fit in RAM), we use a much longer interval
    4536          522 :     fn should_check_if_image_layers_required(self: &Arc<Timeline>, lsn: Lsn) -> bool {
    4537          522 :         const LARGE_TENANT_THRESHOLD: u64 = 2 * 1024 * 1024 * 1024;
    4538          522 : 
    4539          522 :         let last_checks_at = self.last_image_layer_creation_check_at.load();
    4540          522 :         let distance = lsn
    4541          522 :             .checked_sub(last_checks_at)
    4542          522 :             .expect("Attempt to compact with LSN going backwards");
    4543          522 :         let min_distance =
    4544          522 :             self.get_image_layer_creation_check_threshold() as u64 * self.get_checkpoint_distance();
    4545          522 : 
    4546          522 :         let distance_based_decision = distance.0 >= min_distance;
    4547          522 : 
    4548          522 :         let mut time_based_decision = false;
    4549          522 :         let mut last_check_instant = self.last_image_layer_creation_check_instant.lock().unwrap();
    4550          522 :         if let CurrentLogicalSize::Exact(logical_size) = self.current_logical_size.current_size() {
    4551          420 :             let check_required_after = if Into::<u64>::into(&logical_size) >= LARGE_TENANT_THRESHOLD
    4552              :             {
    4553            0 :                 self.get_checkpoint_timeout()
    4554              :             } else {
    4555          420 :                 Duration::from_secs(3600 * 48)
    4556              :             };
    4557              : 
    4558          420 :             time_based_decision = match *last_check_instant {
    4559          262 :                 Some(last_check) => {
    4560          262 :                     let elapsed = last_check.elapsed();
    4561          262 :                     elapsed >= check_required_after
    4562              :                 }
    4563          158 :                 None => true,
    4564              :             };
    4565          102 :         }
    4566              : 
    4567              :         // Do the expensive delta layer counting only if this timeline has ingested sufficient
    4568              :         // WAL since the last check or a checkpoint timeout interval has elapsed since the last
    4569              :         // check.
    4570          522 :         let decision = distance_based_decision || time_based_decision;
    4571              : 
    4572          522 :         if decision {
    4573          160 :             self.last_image_layer_creation_check_at.store(lsn);
    4574          160 :             *last_check_instant = Some(Instant::now());
    4575          362 :         }
    4576              : 
    4577          522 :         decision
    4578          522 :     }
    4579              : 
    4580         1044 :     #[tracing::instrument(skip_all, fields(%lsn, %mode))]
    4581              :     async fn create_image_layers(
    4582              :         self: &Arc<Timeline>,
    4583              :         partitioning: &KeyPartitioning,
    4584              :         lsn: Lsn,
    4585              :         mode: ImageLayerCreationMode,
    4586              :         ctx: &RequestContext,
    4587              :     ) -> Result<Vec<ResidentLayer>, CreateImageLayersError> {
    4588              :         let timer = self.metrics.create_images_time_histo.start_timer();
    4589              :         let mut image_layers = Vec::new();
    4590              : 
    4591              :         // We need to avoid holes between generated image layers.
    4592              :         // Otherwise LayerMap::image_layer_exists will return false if key range of some layer is covered by more than one
    4593              :         // image layer with hole between them. In this case such layer can not be utilized by GC.
    4594              :         //
    4595              :         // How such hole between partitions can appear?
    4596              :         // if we have relation with relid=1 and size 100 and relation with relid=2 with size 200 then result of
    4597              :         // KeySpace::partition may contain partitions <100000000..100000099> and <200000000..200000199>.
    4598              :         // If there is delta layer <100000000..300000000> then it never be garbage collected because
    4599              :         // image layers  <100000000..100000099> and <200000000..200000199> are not completely covering it.
    4600              :         let mut start = Key::MIN;
    4601              : 
    4602              :         let check_for_image_layers = self.should_check_if_image_layers_required(lsn);
    4603              : 
    4604              :         for partition in partitioning.parts.iter() {
    4605              :             let img_range = start..partition.ranges.last().unwrap().end;
    4606              :             let compact_metadata = partition.overlaps(&Key::metadata_key_range());
    4607              :             if compact_metadata {
    4608              :                 for range in &partition.ranges {
    4609              :                     assert!(
    4610              :                         range.start.field1 >= METADATA_KEY_BEGIN_PREFIX
    4611              :                             && range.end.field1 <= METADATA_KEY_END_PREFIX,
    4612              :                         "metadata keys must be partitioned separately"
    4613              :                     );
    4614              :                 }
    4615              :                 if mode == ImageLayerCreationMode::Initial {
    4616              :                     return Err(CreateImageLayersError::Other(anyhow::anyhow!("no image layer should be created for metadata keys when flushing frozen layers")));
    4617              :                 }
    4618              :                 if mode == ImageLayerCreationMode::Try && !check_for_image_layers {
    4619              :                     // Skip compaction if there are not enough updates. Metadata compaction will do a scan and
    4620              :                     // might mess up with evictions.
    4621              :                     start = img_range.end;
    4622              :                     continue;
    4623              :                 }
    4624              :             } else if let ImageLayerCreationMode::Try = mode {
    4625              :                 // check_for_image_layers = false -> skip
    4626              :                 // check_for_image_layers = true -> check time_for_new_image_layer -> skip/generate
    4627              :                 if !check_for_image_layers || !self.time_for_new_image_layer(partition, lsn).await {
    4628              :                     start = img_range.end;
    4629              :                     continue;
    4630              :                 }
    4631              :             } else if let ImageLayerCreationMode::Force = mode {
    4632              :                 // When forced to create image layers, we might try and create them where they already
    4633              :                 // exist.  This mode is only used in tests/debug.
    4634              :                 let layers = self.layers.read().await;
    4635              :                 if layers.contains_key(&PersistentLayerKey {
    4636              :                     key_range: img_range.clone(),
    4637              :                     lsn_range: PersistentLayerDesc::image_layer_lsn_range(lsn),
    4638              :                     is_delta: false,
    4639              :                 }) {
    4640              :                     tracing::info!(
    4641              :                         "Skipping image layer at {lsn} {}..{}, already exists",
    4642              :                         img_range.start,
    4643              :                         img_range.end
    4644              :                     );
    4645              :                     continue;
    4646              :                 }
    4647              :             }
    4648              : 
    4649              :             let image_layer_writer = ImageLayerWriter::new(
    4650              :                 self.conf,
    4651              :                 self.timeline_id,
    4652              :                 self.tenant_shard_id,
    4653              :                 &img_range,
    4654              :                 lsn,
    4655              :                 ctx,
    4656              :             )
    4657              :             .await?;
    4658              : 
    4659            0 :             fail_point!("image-layer-writer-fail-before-finish", |_| {
    4660            0 :                 Err(CreateImageLayersError::Other(anyhow::anyhow!(
    4661            0 :                     "failpoint image-layer-writer-fail-before-finish"
    4662            0 :                 )))
    4663            0 :             });
    4664              : 
    4665              :             if !compact_metadata {
    4666              :                 let ImageLayerCreationOutcome {
    4667              :                     image,
    4668              :                     next_start_key,
    4669              :                 } = self
    4670              :                     .create_image_layer_for_rel_blocks(
    4671              :                         partition,
    4672              :                         image_layer_writer,
    4673              :                         lsn,
    4674              :                         ctx,
    4675              :                         img_range,
    4676              :                         start,
    4677              :                     )
    4678              :                     .await?;
    4679              : 
    4680              :                 start = next_start_key;
    4681              :                 image_layers.extend(image);
    4682              :             } else {
    4683              :                 let ImageLayerCreationOutcome {
    4684              :                     image,
    4685              :                     next_start_key,
    4686              :                 } = self
    4687              :                     .create_image_layer_for_metadata_keys(
    4688              :                         partition,
    4689              :                         image_layer_writer,
    4690              :                         lsn,
    4691              :                         ctx,
    4692              :                         img_range,
    4693              :                         mode,
    4694              :                         start,
    4695              :                     )
    4696              :                     .await?;
    4697              :                 start = next_start_key;
    4698              :                 image_layers.extend(image);
    4699              :             }
    4700              :         }
    4701              : 
    4702              :         let mut guard = self.layers.write().await;
    4703              : 
    4704              :         // FIXME: we could add the images to be uploaded *before* returning from here, but right
    4705              :         // now they are being scheduled outside of write lock
    4706              :         guard.track_new_image_layers(&image_layers, &self.metrics);
    4707              :         drop_wlock(guard);
    4708              :         timer.stop_and_record();
    4709              : 
    4710              :         // Creating image layers may have caused some previously visible layers to be covered
    4711              :         self.update_layer_visibility().await;
    4712              : 
    4713              :         Ok(image_layers)
    4714              :     }
    4715              : 
    4716              :     /// Wait until the background initial logical size calculation is complete, or
    4717              :     /// this Timeline is shut down.  Calling this function will cause the initial
    4718              :     /// logical size calculation to skip waiting for the background jobs barrier.
    4719            0 :     pub(crate) async fn await_initial_logical_size(self: Arc<Self>) {
    4720            0 :         if !self.shard_identity.is_shard_zero() {
    4721              :             // We don't populate logical size on shard >0: skip waiting for it.
    4722            0 :             return;
    4723            0 :         }
    4724            0 : 
    4725            0 :         if self.remote_client.is_deleting() {
    4726              :             // The timeline was created in a deletion-resume state, we don't expect logical size to be populated
    4727            0 :             return;
    4728            0 :         }
    4729              : 
    4730            0 :         if let Some(await_bg_cancel) = self
    4731            0 :             .current_logical_size
    4732            0 :             .cancel_wait_for_background_loop_concurrency_limit_semaphore
    4733            0 :             .get()
    4734            0 :         {
    4735            0 :             await_bg_cancel.cancel();
    4736            0 :         } else {
    4737              :             // We should not wait if we were not able to explicitly instruct
    4738              :             // the logical size cancellation to skip the concurrency limit semaphore.
    4739              :             // TODO: this is an unexpected case.  We should restructure so that it
    4740              :             // can't happen.
    4741            0 :             tracing::warn!(
    4742            0 :                 "await_initial_logical_size: can't get semaphore cancel token, skipping"
    4743              :             );
    4744            0 :             debug_assert!(false);
    4745              :         }
    4746              : 
    4747              :         tokio::select!(
    4748              :             _ = self.current_logical_size.initialized.acquire() => {},
    4749              :             _ = self.cancel.cancelled() => {}
    4750              :         )
    4751            0 :     }
    4752              : 
    4753              :     /// Detach this timeline from its ancestor by copying all of ancestors layers as this
    4754              :     /// Timelines layers up to the ancestor_lsn.
    4755              :     ///
    4756              :     /// Requires a timeline that:
    4757              :     /// - has an ancestor to detach from
    4758              :     /// - the ancestor does not have an ancestor -- follows from the original RFC limitations, not
    4759              :     ///   a technical requirement
    4760              :     ///
    4761              :     /// After the operation has been started, it cannot be canceled. Upon restart it needs to be
    4762              :     /// polled again until completion.
    4763              :     ///
    4764              :     /// During the operation all timelines sharing the data with this timeline will be reparented
    4765              :     /// from our ancestor to be branches of this timeline.
    4766            0 :     pub(crate) async fn prepare_to_detach_from_ancestor(
    4767            0 :         self: &Arc<Timeline>,
    4768            0 :         tenant: &crate::tenant::Tenant,
    4769            0 :         options: detach_ancestor::Options,
    4770            0 :         ctx: &RequestContext,
    4771            0 :     ) -> Result<detach_ancestor::Progress, detach_ancestor::Error> {
    4772            0 :         detach_ancestor::prepare(self, tenant, options, ctx).await
    4773            0 :     }
    4774              : 
    4775              :     /// Completes the ancestor detach. This method is to be called while holding the
    4776              :     /// TenantManager's tenant slot, so during this method we cannot be deleted nor can any
    4777              :     /// timeline be deleted. After this method returns successfully, tenant must be reloaded.
    4778              :     ///
    4779              :     /// Pageserver receiving a SIGKILL during this operation is not supported (yet).
    4780            0 :     pub(crate) async fn complete_detaching_timeline_ancestor(
    4781            0 :         self: &Arc<Timeline>,
    4782            0 :         tenant: &crate::tenant::Tenant,
    4783            0 :         prepared: detach_ancestor::PreparedTimelineDetach,
    4784            0 :         ctx: &RequestContext,
    4785            0 :     ) -> Result<Vec<TimelineId>, anyhow::Error> {
    4786            0 :         detach_ancestor::complete(self, tenant, prepared, ctx).await
    4787            0 :     }
    4788              : 
    4789              :     /// Switch aux file policy and schedule upload to the index part.
    4790           16 :     pub(crate) fn do_switch_aux_policy(&self, policy: AuxFilePolicy) -> anyhow::Result<()> {
    4791           16 :         self.last_aux_file_policy.store(Some(policy));
    4792           16 :         self.remote_client
    4793           16 :             .schedule_index_upload_for_aux_file_policy_update(Some(policy))?;
    4794           16 :         Ok(())
    4795           16 :     }
    4796              : }
    4797              : 
    4798              : impl Drop for Timeline {
    4799            8 :     fn drop(&mut self) {
    4800            8 :         if let Some(ancestor) = &self.ancestor_timeline {
    4801              :             // This lock should never be poisoned, but in case it is we do a .map() instead of
    4802              :             // an unwrap(), to avoid panicking in a destructor and thereby aborting the process.
    4803            2 :             if let Ok(mut gc_info) = ancestor.gc_info.write() {
    4804            2 :                 gc_info.remove_child(self.timeline_id)
    4805            0 :             }
    4806            6 :         }
    4807            8 :     }
    4808              : }
    4809              : 
    4810              : /// Top-level failure to compact.
    4811            0 : #[derive(Debug, thiserror::Error)]
    4812              : pub(crate) enum CompactionError {
    4813              :     #[error("The timeline or pageserver is shutting down")]
    4814              :     ShuttingDown,
    4815              :     /// Compaction cannot be done right now; page reconstruction and so on.
    4816              :     #[error(transparent)]
    4817              :     Other(anyhow::Error),
    4818              : }
    4819              : 
    4820              : impl From<CollectKeySpaceError> for CompactionError {
    4821            0 :     fn from(err: CollectKeySpaceError) -> Self {
    4822            0 :         match err {
    4823              :             CollectKeySpaceError::Cancelled
    4824              :             | CollectKeySpaceError::PageRead(PageReconstructError::Cancelled) => {
    4825            0 :                 CompactionError::ShuttingDown
    4826              :             }
    4827            0 :             e => CompactionError::Other(e.into()),
    4828              :         }
    4829            0 :     }
    4830              : }
    4831              : 
    4832              : impl From<super::upload_queue::NotInitialized> for CompactionError {
    4833            0 :     fn from(value: super::upload_queue::NotInitialized) -> Self {
    4834            0 :         match value {
    4835              :             super::upload_queue::NotInitialized::Uninitialized
    4836              :             | super::upload_queue::NotInitialized::Stopped => {
    4837            0 :                 CompactionError::Other(anyhow::anyhow!(value))
    4838              :             }
    4839            0 :             super::upload_queue::NotInitialized::ShuttingDown => CompactionError::ShuttingDown,
    4840              :         }
    4841            0 :     }
    4842              : }
    4843              : 
    4844              : impl CompactionError {
    4845              :     /// We cannot do compaction because we could not download a layer that is input to the compaction.
    4846            0 :     pub(crate) fn input_layer_download_failed(
    4847            0 :         e: super::storage_layer::layer::DownloadError,
    4848            0 :     ) -> Self {
    4849            0 :         match e {
    4850              :             super::storage_layer::layer::DownloadError::TimelineShutdown |
    4851              :             /* TODO DownloadCancelled correct here? */
    4852            0 :             super::storage_layer::layer::DownloadError::DownloadCancelled  => CompactionError::ShuttingDown,
    4853              :             super::storage_layer::layer::DownloadError::ContextAndConfigReallyDeniesDownloads |
    4854              :             super::storage_layer::layer::DownloadError::DownloadRequired |
    4855              :             super::storage_layer::layer::DownloadError::NotFile(_) |
    4856              :             super::storage_layer::layer::DownloadError::DownloadFailed |
    4857            0 :             super::storage_layer::layer::DownloadError::PreStatFailed(_)=>CompactionError::Other(anyhow::anyhow!(e)),
    4858              :             #[cfg(test)]
    4859            0 :             super::storage_layer::layer::DownloadError::Failpoint(_) =>  CompactionError::Other(anyhow::anyhow!(e)),
    4860              :         }
    4861            0 :     }
    4862              : }
    4863              : 
    4864              : #[serde_as]
    4865          196 : #[derive(serde::Serialize)]
    4866              : struct RecordedDuration(#[serde_as(as = "serde_with::DurationMicroSeconds")] Duration);
    4867              : 
    4868              : #[derive(Default)]
    4869              : enum DurationRecorder {
    4870              :     #[default]
    4871              :     NotStarted,
    4872              :     Recorded(RecordedDuration, tokio::time::Instant),
    4873              : }
    4874              : 
    4875              : impl DurationRecorder {
    4876          504 :     fn till_now(&self) -> DurationRecorder {
    4877          504 :         match self {
    4878              :             DurationRecorder::NotStarted => {
    4879            0 :                 panic!("must only call on recorded measurements")
    4880              :             }
    4881          504 :             DurationRecorder::Recorded(_, ended) => {
    4882          504 :                 let now = tokio::time::Instant::now();
    4883          504 :                 DurationRecorder::Recorded(RecordedDuration(now - *ended), now)
    4884          504 :             }
    4885          504 :         }
    4886          504 :     }
    4887          196 :     fn into_recorded(self) -> Option<RecordedDuration> {
    4888          196 :         match self {
    4889            0 :             DurationRecorder::NotStarted => None,
    4890          196 :             DurationRecorder::Recorded(recorded, _) => Some(recorded),
    4891              :         }
    4892          196 :     }
    4893              : }
    4894              : 
    4895              : /// Descriptor for a delta layer used in testing infra. The start/end key/lsn range of the
    4896              : /// delta layer might be different from the min/max key/lsn in the delta layer. Therefore,
    4897              : /// the layer descriptor requires the user to provide the ranges, which should cover all
    4898              : /// keys specified in the `data` field.
    4899              : #[cfg(test)]
    4900              : pub struct DeltaLayerTestDesc {
    4901              :     pub lsn_range: Range<Lsn>,
    4902              :     pub key_range: Range<Key>,
    4903              :     pub data: Vec<(Key, Lsn, Value)>,
    4904              : }
    4905              : 
    4906              : #[cfg(test)]
    4907              : impl DeltaLayerTestDesc {
    4908              :     #[allow(dead_code)]
    4909            0 :     pub fn new(lsn_range: Range<Lsn>, key_range: Range<Key>, data: Vec<(Key, Lsn, Value)>) -> Self {
    4910            0 :         Self {
    4911            0 :             lsn_range,
    4912            0 :             key_range,
    4913            0 :             data,
    4914            0 :         }
    4915            0 :     }
    4916              : 
    4917           46 :     pub fn new_with_inferred_key_range(
    4918           46 :         lsn_range: Range<Lsn>,
    4919           46 :         data: Vec<(Key, Lsn, Value)>,
    4920           46 :     ) -> Self {
    4921          108 :         let key_min = data.iter().map(|(key, _, _)| key).min().unwrap();
    4922          108 :         let key_max = data.iter().map(|(key, _, _)| key).max().unwrap();
    4923           46 :         Self {
    4924           46 :             key_range: (*key_min)..(key_max.next()),
    4925           46 :             lsn_range,
    4926           46 :             data,
    4927           46 :         }
    4928           46 :     }
    4929              : }
    4930              : 
    4931              : impl Timeline {
    4932           28 :     async fn finish_compact_batch(
    4933           28 :         self: &Arc<Self>,
    4934           28 :         new_deltas: &[ResidentLayer],
    4935           28 :         new_images: &[ResidentLayer],
    4936           28 :         layers_to_remove: &[Layer],
    4937           28 :     ) -> Result<(), CompactionError> {
    4938           28 :         let mut guard = self.layers.write().await;
    4939              : 
    4940           28 :         let mut duplicated_layers = HashSet::new();
    4941           28 : 
    4942           28 :         let mut insert_layers = Vec::with_capacity(new_deltas.len());
    4943              : 
    4944          336 :         for l in new_deltas {
    4945          308 :             if guard.contains(l.as_ref()) {
    4946              :                 // expected in tests
    4947            0 :                 tracing::error!(layer=%l, "duplicated L1 layer");
    4948              : 
    4949              :                 // good ways to cause a duplicate: we repeatedly error after taking the writelock
    4950              :                 // `guard`  on self.layers. as of writing this, there are no error returns except
    4951              :                 // for compact_level0_phase1 creating an L0, which does not happen in practice
    4952              :                 // because we have not implemented L0 => L0 compaction.
    4953            0 :                 duplicated_layers.insert(l.layer_desc().key());
    4954          308 :             } else if LayerMap::is_l0(&l.layer_desc().key_range) {
    4955            0 :                 return Err(CompactionError::Other(anyhow::anyhow!("compaction generates a L0 layer file as output, which will cause infinite compaction.")));
    4956          308 :             } else {
    4957          308 :                 insert_layers.push(l.clone());
    4958          308 :             }
    4959              :         }
    4960              : 
    4961              :         // only remove those inputs which were not outputs
    4962           28 :         let remove_layers: Vec<Layer> = layers_to_remove
    4963           28 :             .iter()
    4964          402 :             .filter(|l| !duplicated_layers.contains(&l.layer_desc().key()))
    4965           28 :             .cloned()
    4966           28 :             .collect();
    4967           28 : 
    4968           28 :         if !new_images.is_empty() {
    4969            0 :             guard.track_new_image_layers(new_images, &self.metrics);
    4970           28 :         }
    4971              : 
    4972              :         // deletion will happen later, the layer file manager calls garbage_collect_on_drop
    4973           28 :         guard.finish_compact_l0(&remove_layers, &insert_layers, &self.metrics);
    4974           28 : 
    4975           28 :         self.remote_client
    4976           28 :             .schedule_compaction_update(&remove_layers, new_deltas)?;
    4977              : 
    4978           28 :         drop_wlock(guard);
    4979           28 : 
    4980           28 :         Ok(())
    4981           28 :     }
    4982              : 
    4983            0 :     async fn rewrite_layers(
    4984            0 :         self: &Arc<Self>,
    4985            0 :         mut replace_layers: Vec<(Layer, ResidentLayer)>,
    4986            0 :         mut drop_layers: Vec<Layer>,
    4987            0 :     ) -> Result<(), super::upload_queue::NotInitialized> {
    4988            0 :         let mut guard = self.layers.write().await;
    4989              : 
    4990              :         // Trim our lists in case our caller (compaction) raced with someone else (GC) removing layers: we want
    4991              :         // to avoid double-removing, and avoid rewriting something that was removed.
    4992            0 :         replace_layers.retain(|(l, _)| guard.contains(l));
    4993            0 :         drop_layers.retain(|l| guard.contains(l));
    4994            0 : 
    4995            0 :         guard.rewrite_layers(&replace_layers, &drop_layers, &self.metrics);
    4996            0 : 
    4997            0 :         let upload_layers: Vec<_> = replace_layers.into_iter().map(|r| r.1).collect();
    4998            0 : 
    4999            0 :         self.remote_client
    5000            0 :             .schedule_compaction_update(&drop_layers, &upload_layers)?;
    5001              : 
    5002            0 :         Ok(())
    5003            0 :     }
    5004              : 
    5005              :     /// Schedules the uploads of the given image layers
    5006          364 :     fn upload_new_image_layers(
    5007          364 :         self: &Arc<Self>,
    5008          364 :         new_images: impl IntoIterator<Item = ResidentLayer>,
    5009          364 :     ) -> Result<(), super::upload_queue::NotInitialized> {
    5010          390 :         for layer in new_images {
    5011           26 :             self.remote_client.schedule_layer_file_upload(layer)?;
    5012              :         }
    5013              :         // should any new image layer been created, not uploading index_part will
    5014              :         // result in a mismatch between remote_physical_size and layermap calculated
    5015              :         // size, which will fail some tests, but should not be an issue otherwise.
    5016          364 :         self.remote_client
    5017          364 :             .schedule_index_upload_for_file_changes()?;
    5018          364 :         Ok(())
    5019          364 :     }
    5020              : 
    5021              :     /// Find the Lsns above which layer files need to be retained on
    5022              :     /// garbage collection.
    5023              :     ///
    5024              :     /// We calculate two cutoffs, one based on time and one based on WAL size.  `pitr`
    5025              :     /// controls the time cutoff (or ZERO to disable time-based retention), and `space_cutoff` controls
    5026              :     /// the space-based retention.
    5027              :     ///
    5028              :     /// This function doesn't simply to calculate time & space based retention: it treats time-based
    5029              :     /// retention as authoritative if enabled, and falls back to space-based retention if calculating
    5030              :     /// the LSN for a time point isn't possible.  Therefore the GcCutoffs::horizon in the response might
    5031              :     /// be different to the `space_cutoff` input.  Callers should treat the min() of the two cutoffs
    5032              :     /// in the response as the GC cutoff point for the timeline.
    5033         1508 :     #[instrument(skip_all, fields(timeline_id=%self.timeline_id))]
    5034              :     pub(super) async fn find_gc_cutoffs(
    5035              :         &self,
    5036              :         space_cutoff: Lsn,
    5037              :         pitr: Duration,
    5038              :         cancel: &CancellationToken,
    5039              :         ctx: &RequestContext,
    5040              :     ) -> Result<GcCutoffs, PageReconstructError> {
    5041              :         let _timer = self
    5042              :             .metrics
    5043              :             .find_gc_cutoffs_histo
    5044              :             .start_timer()
    5045              :             .record_on_drop();
    5046              : 
    5047              :         pausable_failpoint!("Timeline::find_gc_cutoffs-pausable");
    5048              : 
    5049              :         if cfg!(test) {
    5050              :             // Unit tests which specify zero PITR interval expect to avoid doing any I/O for timestamp lookup
    5051              :             if pitr == Duration::ZERO {
    5052              :                 return Ok(GcCutoffs {
    5053              :                     time: self.get_last_record_lsn(),
    5054              :                     space: space_cutoff,
    5055              :                 });
    5056              :             }
    5057              :         }
    5058              : 
    5059              :         // Calculate a time-based limit on how much to retain:
    5060              :         // - if PITR interval is set, then this is our cutoff.
    5061              :         // - if PITR interval is not set, then we do a lookup
    5062              :         //   based on DEFAULT_PITR_INTERVAL, so that size-based retention does not result in keeping history around permanently on idle databases.
    5063              :         let time_cutoff = {
    5064              :             let now = SystemTime::now();
    5065              :             let time_range = if pitr == Duration::ZERO {
    5066              :                 humantime::parse_duration(DEFAULT_PITR_INTERVAL).expect("constant is invalid")
    5067              :             } else {
    5068              :                 pitr
    5069              :             };
    5070              : 
    5071              :             // If PITR is so large or `now` is so small that this underflows, we will retain no history (highly unexpected case)
    5072              :             let time_cutoff = now.checked_sub(time_range).unwrap_or(now);
    5073              :             let timestamp = to_pg_timestamp(time_cutoff);
    5074              : 
    5075              :             match self.find_lsn_for_timestamp(timestamp, cancel, ctx).await? {
    5076              :                 LsnForTimestamp::Present(lsn) => Some(lsn),
    5077              :                 LsnForTimestamp::Future(lsn) => {
    5078              :                     // The timestamp is in the future. That sounds impossible,
    5079              :                     // but what it really means is that there hasn't been
    5080              :                     // any commits since the cutoff timestamp.
    5081              :                     //
    5082              :                     // In this case we should use the LSN of the most recent commit,
    5083              :                     // which is implicitly the last LSN in the log.
    5084              :                     debug!("future({})", lsn);
    5085              :                     Some(self.get_last_record_lsn())
    5086              :                 }
    5087              :                 LsnForTimestamp::Past(lsn) => {
    5088              :                     debug!("past({})", lsn);
    5089              :                     None
    5090              :                 }
    5091              :                 LsnForTimestamp::NoData(lsn) => {
    5092              :                     debug!("nodata({})", lsn);
    5093              :                     None
    5094              :                 }
    5095              :             }
    5096              :         };
    5097              : 
    5098              :         Ok(match (pitr, time_cutoff) {
    5099              :             (Duration::ZERO, Some(time_cutoff)) => {
    5100              :                 // PITR is not set. Retain the size-based limit, or the default time retention,
    5101              :                 // whichever requires less data.
    5102              :                 GcCutoffs {
    5103              :                     time: self.get_last_record_lsn(),
    5104              :                     space: std::cmp::max(time_cutoff, space_cutoff),
    5105              :                 }
    5106              :             }
    5107              :             (Duration::ZERO, None) => {
    5108              :                 // PITR is not set, and time lookup failed
    5109              :                 GcCutoffs {
    5110              :                     time: self.get_last_record_lsn(),
    5111              :                     space: space_cutoff,
    5112              :                 }
    5113              :             }
    5114              :             (_, None) => {
    5115              :                 // PITR interval is set & we didn't look up a timestamp successfully.  Conservatively assume PITR
    5116              :                 // cannot advance beyond what was already GC'd, and respect space-based retention
    5117              :                 GcCutoffs {
    5118              :                     time: *self.get_latest_gc_cutoff_lsn(),
    5119              :                     space: space_cutoff,
    5120              :                 }
    5121              :             }
    5122              :             (_, Some(time_cutoff)) => {
    5123              :                 // PITR interval is set and we looked up timestamp successfully.  Ignore
    5124              :                 // size based retention and make time cutoff authoritative
    5125              :                 GcCutoffs {
    5126              :                     time: time_cutoff,
    5127              :                     space: time_cutoff,
    5128              :                 }
    5129              :             }
    5130              :         })
    5131              :     }
    5132              : 
    5133              :     /// Garbage collect layer files on a timeline that are no longer needed.
    5134              :     ///
    5135              :     /// Currently, we don't make any attempt at removing unneeded page versions
    5136              :     /// within a layer file. We can only remove the whole file if it's fully
    5137              :     /// obsolete.
    5138          754 :     pub(super) async fn gc(&self) -> Result<GcResult, GcError> {
    5139              :         // this is most likely the background tasks, but it might be the spawned task from
    5140              :         // immediate_gc
    5141          753 :         let _g = tokio::select! {
    5142              :             guard = self.gc_lock.lock() => guard,
    5143              :             _ = self.cancel.cancelled() => return Ok(GcResult::default()),
    5144              :         };
    5145          753 :         let timer = self.metrics.garbage_collect_histo.start_timer();
    5146              : 
    5147              :         fail_point!("before-timeline-gc");
    5148              : 
    5149              :         // Is the timeline being deleted?
    5150          753 :         if self.is_stopping() {
    5151            0 :             return Err(GcError::TimelineCancelled);
    5152          753 :         }
    5153          753 : 
    5154          753 :         let (space_cutoff, time_cutoff, retain_lsns, max_lsn_with_valid_lease) = {
    5155          753 :             let gc_info = self.gc_info.read().unwrap();
    5156          753 : 
    5157          753 :             let space_cutoff = min(gc_info.cutoffs.space, self.get_disk_consistent_lsn());
    5158          753 :             let time_cutoff = gc_info.cutoffs.time;
    5159          753 :             let retain_lsns = gc_info
    5160          753 :                 .retain_lsns
    5161          753 :                 .iter()
    5162          753 :                 .map(|(lsn, _child_id)| *lsn)
    5163          753 :                 .collect();
    5164          753 : 
    5165          753 :             // Gets the maximum LSN that holds the valid lease.
    5166          753 :             //
    5167          753 :             // Caveat: `refresh_gc_info` is in charged of updating the lease map.
    5168          753 :             // Here, we do not check for stale leases again.
    5169          753 :             let max_lsn_with_valid_lease = gc_info.leases.last_key_value().map(|(lsn, _)| *lsn);
    5170          753 : 
    5171          753 :             (
    5172          753 :                 space_cutoff,
    5173          753 :                 time_cutoff,
    5174          753 :                 retain_lsns,
    5175          753 :                 max_lsn_with_valid_lease,
    5176          753 :             )
    5177          753 :         };
    5178          753 : 
    5179          753 :         let mut new_gc_cutoff = Lsn::min(space_cutoff, time_cutoff);
    5180          753 :         let standby_horizon = self.standby_horizon.load();
    5181          753 :         // Hold GC for the standby, but as a safety guard do it only within some
    5182          753 :         // reasonable lag.
    5183          753 :         if standby_horizon != Lsn::INVALID {
    5184            0 :             if let Some(standby_lag) = new_gc_cutoff.checked_sub(standby_horizon) {
    5185              :                 const MAX_ALLOWED_STANDBY_LAG: u64 = 10u64 << 30; // 10 GB
    5186            0 :                 if standby_lag.0 < MAX_ALLOWED_STANDBY_LAG {
    5187            0 :                     new_gc_cutoff = Lsn::min(standby_horizon, new_gc_cutoff);
    5188            0 :                     trace!("holding off GC for standby apply LSN {}", standby_horizon);
    5189              :                 } else {
    5190            0 :                     warn!(
    5191            0 :                         "standby is lagging for more than {}MB, not holding gc for it",
    5192            0 :                         MAX_ALLOWED_STANDBY_LAG / 1024 / 1024
    5193              :                     )
    5194              :                 }
    5195            0 :             }
    5196          753 :         }
    5197              : 
    5198              :         // Reset standby horizon to ignore it if it is not updated till next GC.
    5199              :         // It is an easy way to unset it when standby disappears without adding
    5200              :         // more conf options.
    5201          753 :         self.standby_horizon.store(Lsn::INVALID);
    5202          753 :         self.metrics
    5203          753 :             .standby_horizon_gauge
    5204          753 :             .set(Lsn::INVALID.0 as i64);
    5205              : 
    5206          753 :         let res = self
    5207          753 :             .gc_timeline(
    5208          753 :                 space_cutoff,
    5209          753 :                 time_cutoff,
    5210          753 :                 retain_lsns,
    5211          753 :                 max_lsn_with_valid_lease,
    5212          753 :                 new_gc_cutoff,
    5213          753 :             )
    5214          753 :             .instrument(
    5215          753 :                 info_span!("gc_timeline", timeline_id = %self.timeline_id, cutoff = %new_gc_cutoff),
    5216              :             )
    5217            0 :             .await?;
    5218              : 
    5219              :         // only record successes
    5220          753 :         timer.stop_and_record();
    5221          753 : 
    5222          753 :         Ok(res)
    5223          754 :     }
    5224              : 
    5225          753 :     async fn gc_timeline(
    5226          753 :         &self,
    5227          753 :         space_cutoff: Lsn,
    5228          753 :         time_cutoff: Lsn,
    5229          753 :         retain_lsns: Vec<Lsn>,
    5230          753 :         max_lsn_with_valid_lease: Option<Lsn>,
    5231          753 :         new_gc_cutoff: Lsn,
    5232          753 :     ) -> Result<GcResult, GcError> {
    5233          753 :         // FIXME: if there is an ongoing detach_from_ancestor, we should just skip gc
    5234          753 : 
    5235          753 :         let now = SystemTime::now();
    5236          753 :         let mut result: GcResult = GcResult::default();
    5237          753 : 
    5238          753 :         // Nothing to GC. Return early.
    5239          753 :         let latest_gc_cutoff = *self.get_latest_gc_cutoff_lsn();
    5240          753 :         if latest_gc_cutoff >= new_gc_cutoff {
    5241           22 :             info!(
    5242            0 :                 "Nothing to GC: new_gc_cutoff_lsn {new_gc_cutoff}, latest_gc_cutoff_lsn {latest_gc_cutoff}",
    5243              :             );
    5244           22 :             return Ok(result);
    5245          731 :         }
    5246              : 
    5247              :         // We need to ensure that no one tries to read page versions or create
    5248              :         // branches at a point before latest_gc_cutoff_lsn. See branch_timeline()
    5249              :         // for details. This will block until the old value is no longer in use.
    5250              :         //
    5251              :         // The GC cutoff should only ever move forwards.
    5252          731 :         let waitlist = {
    5253          731 :             let write_guard = self.latest_gc_cutoff_lsn.lock_for_write();
    5254          731 :             if *write_guard > new_gc_cutoff {
    5255            0 :                 return Err(GcError::BadLsn {
    5256            0 :                     why: format!(
    5257            0 :                         "Cannot move GC cutoff LSN backwards (was {}, new {})",
    5258            0 :                         *write_guard, new_gc_cutoff
    5259            0 :                     ),
    5260            0 :                 });
    5261          731 :             }
    5262          731 : 
    5263          731 :             write_guard.store_and_unlock(new_gc_cutoff)
    5264          731 :         };
    5265          731 :         waitlist.wait().await;
    5266              : 
    5267          731 :         info!("GC starting");
    5268              : 
    5269          731 :         debug!("retain_lsns: {:?}", retain_lsns);
    5270              : 
    5271          731 :         let mut layers_to_remove = Vec::new();
    5272              : 
    5273              :         // Scan all layers in the timeline (remote or on-disk).
    5274              :         //
    5275              :         // Garbage collect the layer if all conditions are satisfied:
    5276              :         // 1. it is older than cutoff LSN;
    5277              :         // 2. it is older than PITR interval;
    5278              :         // 3. it doesn't need to be retained for 'retain_lsns';
    5279              :         // 4. it does not need to be kept for LSNs holding valid leases.
    5280              :         // 5. newer on-disk image layers cover the layer's whole key range
    5281              :         //
    5282              :         // TODO holding a write lock is too agressive and avoidable
    5283          731 :         let mut guard = self.layers.write().await;
    5284          731 :         let layers = guard.layer_map();
    5285        12415 :         'outer: for l in layers.iter_historic_layers() {
    5286        12415 :             result.layers_total += 1;
    5287        12415 : 
    5288        12415 :             // 1. Is it newer than GC horizon cutoff point?
    5289        12415 :             if l.get_lsn_range().end > space_cutoff {
    5290          741 :                 debug!(
    5291            0 :                     "keeping {} because it's newer than space_cutoff {}",
    5292            0 :                     l.layer_name(),
    5293              :                     space_cutoff,
    5294              :                 );
    5295          741 :                 result.layers_needed_by_cutoff += 1;
    5296          741 :                 continue 'outer;
    5297        11674 :             }
    5298        11674 : 
    5299        11674 :             // 2. It is newer than PiTR cutoff point?
    5300        11674 :             if l.get_lsn_range().end > time_cutoff {
    5301            0 :                 debug!(
    5302            0 :                     "keeping {} because it's newer than time_cutoff {}",
    5303            0 :                     l.layer_name(),
    5304              :                     time_cutoff,
    5305              :                 );
    5306            0 :                 result.layers_needed_by_pitr += 1;
    5307            0 :                 continue 'outer;
    5308        11674 :             }
    5309              : 
    5310              :             // 3. Is it needed by a child branch?
    5311              :             // NOTE With that we would keep data that
    5312              :             // might be referenced by child branches forever.
    5313              :             // We can track this in child timeline GC and delete parent layers when
    5314              :             // they are no longer needed. This might be complicated with long inheritance chains.
    5315              :             //
    5316              :             // TODO Vec is not a great choice for `retain_lsns`
    5317        11674 :             for retain_lsn in &retain_lsns {
    5318              :                 // start_lsn is inclusive
    5319           10 :                 if &l.get_lsn_range().start <= retain_lsn {
    5320           10 :                     debug!(
    5321            0 :                         "keeping {} because it's still might be referenced by child branch forked at {} is_dropped: xx is_incremental: {}",
    5322            0 :                         l.layer_name(),
    5323            0 :                         retain_lsn,
    5324            0 :                         l.is_incremental(),
    5325              :                     );
    5326           10 :                     result.layers_needed_by_branches += 1;
    5327           10 :                     continue 'outer;
    5328            0 :                 }
    5329              :             }
    5330              : 
    5331              :             // 4. Is there a valid lease that requires us to keep this layer?
    5332        11664 :             if let Some(lsn) = &max_lsn_with_valid_lease {
    5333              :                 // keep if layer start <= any of the lease
    5334           18 :                 if &l.get_lsn_range().start <= lsn {
    5335           14 :                     debug!(
    5336            0 :                         "keeping {} because there is a valid lease preventing GC at {}",
    5337            0 :                         l.layer_name(),
    5338              :                         lsn,
    5339              :                     );
    5340           14 :                     result.layers_needed_by_leases += 1;
    5341           14 :                     continue 'outer;
    5342            4 :                 }
    5343        11646 :             }
    5344              : 
    5345              :             // 5. Is there a later on-disk layer for this relation?
    5346              :             //
    5347              :             // The end-LSN is exclusive, while disk_consistent_lsn is
    5348              :             // inclusive. For example, if disk_consistent_lsn is 100, it is
    5349              :             // OK for a delta layer to have end LSN 101, but if the end LSN
    5350              :             // is 102, then it might not have been fully flushed to disk
    5351              :             // before crash.
    5352              :             //
    5353              :             // For example, imagine that the following layers exist:
    5354              :             //
    5355              :             // 1000      - image (A)
    5356              :             // 1000-2000 - delta (B)
    5357              :             // 2000      - image (C)
    5358              :             // 2000-3000 - delta (D)
    5359              :             // 3000      - image (E)
    5360              :             //
    5361              :             // If GC horizon is at 2500, we can remove layers A and B, but
    5362              :             // we cannot remove C, even though it's older than 2500, because
    5363              :             // the delta layer 2000-3000 depends on it.
    5364        11650 :             if !layers
    5365        11650 :                 .image_layer_exists(&l.get_key_range(), &(l.get_lsn_range().end..new_gc_cutoff))
    5366              :             {
    5367        11642 :                 debug!("keeping {} because it is the latest layer", l.layer_name());
    5368        11642 :                 result.layers_not_updated += 1;
    5369        11642 :                 continue 'outer;
    5370            8 :             }
    5371            8 : 
    5372            8 :             // We didn't find any reason to keep this file, so remove it.
    5373            8 :             debug!(
    5374            0 :                 "garbage collecting {} is_dropped: xx is_incremental: {}",
    5375            0 :                 l.layer_name(),
    5376            0 :                 l.is_incremental(),
    5377              :             );
    5378            8 :             layers_to_remove.push(l);
    5379              :         }
    5380              : 
    5381          731 :         if !layers_to_remove.is_empty() {
    5382              :             // Persist the new GC cutoff value before we actually remove anything.
    5383              :             // This unconditionally schedules also an index_part.json update, even though, we will
    5384              :             // be doing one a bit later with the unlinked gc'd layers.
    5385            6 :             let disk_consistent_lsn = self.disk_consistent_lsn.load();
    5386            6 :             self.schedule_uploads(disk_consistent_lsn, None)
    5387            6 :                 .map_err(|e| {
    5388            0 :                     if self.cancel.is_cancelled() {
    5389            0 :                         GcError::TimelineCancelled
    5390              :                     } else {
    5391            0 :                         GcError::Remote(e)
    5392              :                     }
    5393            6 :                 })?;
    5394              : 
    5395            6 :             let gc_layers = layers_to_remove
    5396            6 :                 .iter()
    5397            8 :                 .map(|x| guard.get_from_desc(x))
    5398            6 :                 .collect::<Vec<Layer>>();
    5399            6 : 
    5400            6 :             result.layers_removed = gc_layers.len() as u64;
    5401            6 : 
    5402            6 :             self.remote_client
    5403            6 :                 .schedule_gc_update(&gc_layers)
    5404            6 :                 .map_err(|e| {
    5405            0 :                     if self.cancel.is_cancelled() {
    5406            0 :                         GcError::TimelineCancelled
    5407              :                     } else {
    5408            0 :                         GcError::Remote(e)
    5409              :                     }
    5410            6 :                 })?;
    5411              : 
    5412            6 :             guard.finish_gc_timeline(&gc_layers);
    5413            6 : 
    5414            6 :             #[cfg(feature = "testing")]
    5415            6 :             {
    5416            6 :                 result.doomed_layers = gc_layers;
    5417            6 :             }
    5418          725 :         }
    5419              : 
    5420          731 :         info!(
    5421            0 :             "GC completed removing {} layers, cutoff {}",
    5422              :             result.layers_removed, new_gc_cutoff
    5423              :         );
    5424              : 
    5425          731 :         result.elapsed = now.elapsed().unwrap_or(Duration::ZERO);
    5426          731 :         Ok(result)
    5427          753 :     }
    5428              : 
    5429              :     /// Reconstruct a value, using the given base image and WAL records in 'data'.
    5430       667174 :     async fn reconstruct_value(
    5431       667174 :         &self,
    5432       667174 :         key: Key,
    5433       667174 :         request_lsn: Lsn,
    5434       667174 :         mut data: ValueReconstructState,
    5435       667174 :     ) -> Result<Bytes, PageReconstructError> {
    5436       667174 :         // Perform WAL redo if needed
    5437       667174 :         data.records.reverse();
    5438       667174 : 
    5439       667174 :         // If we have a page image, and no WAL, we're all set
    5440       667174 :         if data.records.is_empty() {
    5441       666908 :             if let Some((img_lsn, img)) = &data.img {
    5442       666908 :                 trace!(
    5443            0 :                     "found page image for key {} at {}, no WAL redo required, req LSN {}",
    5444              :                     key,
    5445              :                     img_lsn,
    5446              :                     request_lsn,
    5447              :                 );
    5448       666908 :                 Ok(img.clone())
    5449              :             } else {
    5450            0 :                 Err(PageReconstructError::from(anyhow!(
    5451            0 :                     "base image for {key} at {request_lsn} not found"
    5452            0 :                 )))
    5453              :             }
    5454              :         } else {
    5455              :             // We need to do WAL redo.
    5456              :             //
    5457              :             // If we don't have a base image, then the oldest WAL record better initialize
    5458              :             // the page
    5459          266 :             if data.img.is_none() && !data.records.first().unwrap().1.will_init() {
    5460            0 :                 Err(PageReconstructError::from(anyhow!(
    5461            0 :                     "Base image for {} at {} not found, but got {} WAL records",
    5462            0 :                     key,
    5463            0 :                     request_lsn,
    5464            0 :                     data.records.len()
    5465            0 :                 )))
    5466              :             } else {
    5467          266 :                 if data.img.is_some() {
    5468          266 :                     trace!(
    5469            0 :                         "found {} WAL records and a base image for {} at {}, performing WAL redo",
    5470            0 :                         data.records.len(),
    5471              :                         key,
    5472              :                         request_lsn
    5473              :                     );
    5474              :                 } else {
    5475            0 :                     trace!("found {} WAL records that will init the page for {} at {}, performing WAL redo", data.records.len(), key, request_lsn);
    5476              :                 };
    5477          266 :                 let res = self
    5478          266 :                     .walredo_mgr
    5479          266 :                     .as_ref()
    5480          266 :                     .context("timeline has no walredo manager")
    5481          266 :                     .map_err(PageReconstructError::WalRedo)?
    5482          266 :                     .request_redo(key, request_lsn, data.img, data.records, self.pg_version)
    5483            0 :                     .await;
    5484          266 :                 let img = match res {
    5485          266 :                     Ok(img) => img,
    5486            0 :                     Err(walredo::Error::Cancelled) => return Err(PageReconstructError::Cancelled),
    5487            0 :                     Err(walredo::Error::Other(e)) => {
    5488            0 :                         return Err(PageReconstructError::WalRedo(
    5489            0 :                             e.context("reconstruct a page image"),
    5490            0 :                         ))
    5491              :                     }
    5492              :                 };
    5493          266 :                 Ok(img)
    5494              :             }
    5495              :         }
    5496       667174 :     }
    5497              : 
    5498            0 :     pub(crate) async fn spawn_download_all_remote_layers(
    5499            0 :         self: Arc<Self>,
    5500            0 :         request: DownloadRemoteLayersTaskSpawnRequest,
    5501            0 :     ) -> Result<DownloadRemoteLayersTaskInfo, DownloadRemoteLayersTaskInfo> {
    5502            0 :         use pageserver_api::models::DownloadRemoteLayersTaskState;
    5503            0 : 
    5504            0 :         // this is not really needed anymore; it has tests which really check the return value from
    5505            0 :         // http api. it would be better not to maintain this anymore.
    5506            0 : 
    5507            0 :         let mut status_guard = self.download_all_remote_layers_task_info.write().unwrap();
    5508            0 :         if let Some(st) = &*status_guard {
    5509            0 :             match &st.state {
    5510              :                 DownloadRemoteLayersTaskState::Running => {
    5511            0 :                     return Err(st.clone());
    5512              :                 }
    5513              :                 DownloadRemoteLayersTaskState::ShutDown
    5514            0 :                 | DownloadRemoteLayersTaskState::Completed => {
    5515            0 :                     *status_guard = None;
    5516            0 :                 }
    5517              :             }
    5518            0 :         }
    5519              : 
    5520            0 :         let self_clone = Arc::clone(&self);
    5521            0 :         let task_id = task_mgr::spawn(
    5522            0 :             task_mgr::BACKGROUND_RUNTIME.handle(),
    5523            0 :             task_mgr::TaskKind::DownloadAllRemoteLayers,
    5524            0 :             Some(self.tenant_shard_id),
    5525            0 :             Some(self.timeline_id),
    5526            0 :             "download all remote layers task",
    5527            0 :             async move {
    5528            0 :                 self_clone.download_all_remote_layers(request).await;
    5529            0 :                 let mut status_guard = self_clone.download_all_remote_layers_task_info.write().unwrap();
    5530            0 :                  match &mut *status_guard {
    5531              :                     None => {
    5532            0 :                         warn!("tasks status is supposed to be Some(), since we are running");
    5533              :                     }
    5534            0 :                     Some(st) => {
    5535            0 :                         let exp_task_id = format!("{}", task_mgr::current_task_id().unwrap());
    5536            0 :                         if st.task_id != exp_task_id {
    5537            0 :                             warn!("task id changed while we were still running, expecting {} but have {}", exp_task_id, st.task_id);
    5538            0 :                         } else {
    5539            0 :                             st.state = DownloadRemoteLayersTaskState::Completed;
    5540            0 :                         }
    5541              :                     }
    5542              :                 };
    5543            0 :                 Ok(())
    5544            0 :             }
    5545            0 :             .instrument(info_span!(parent: None, "download_all_remote_layers", tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))
    5546              :         );
    5547              : 
    5548            0 :         let initial_info = DownloadRemoteLayersTaskInfo {
    5549            0 :             task_id: format!("{task_id}"),
    5550            0 :             state: DownloadRemoteLayersTaskState::Running,
    5551            0 :             total_layer_count: 0,
    5552            0 :             successful_download_count: 0,
    5553            0 :             failed_download_count: 0,
    5554            0 :         };
    5555            0 :         *status_guard = Some(initial_info.clone());
    5556            0 : 
    5557            0 :         Ok(initial_info)
    5558            0 :     }
    5559              : 
    5560            0 :     async fn download_all_remote_layers(
    5561            0 :         self: &Arc<Self>,
    5562            0 :         request: DownloadRemoteLayersTaskSpawnRequest,
    5563            0 :     ) {
    5564              :         use pageserver_api::models::DownloadRemoteLayersTaskState;
    5565              : 
    5566            0 :         let remaining = {
    5567            0 :             let guard = self.layers.read().await;
    5568            0 :             guard
    5569            0 :                 .layer_map()
    5570            0 :                 .iter_historic_layers()
    5571            0 :                 .map(|desc| guard.get_from_desc(&desc))
    5572            0 :                 .collect::<Vec<_>>()
    5573            0 :         };
    5574            0 :         let total_layer_count = remaining.len();
    5575            0 : 
    5576            0 :         macro_rules! lock_status {
    5577            0 :             ($st:ident) => {
    5578            0 :                 let mut st = self.download_all_remote_layers_task_info.write().unwrap();
    5579            0 :                 let st = st
    5580            0 :                     .as_mut()
    5581            0 :                     .expect("this function is only called after the task has been spawned");
    5582            0 :                 assert_eq!(
    5583            0 :                     st.task_id,
    5584            0 :                     format!(
    5585            0 :                         "{}",
    5586            0 :                         task_mgr::current_task_id().expect("we run inside a task_mgr task")
    5587            0 :                     )
    5588            0 :                 );
    5589            0 :                 let $st = st;
    5590            0 :             };
    5591            0 :         }
    5592            0 : 
    5593            0 :         {
    5594            0 :             lock_status!(st);
    5595            0 :             st.total_layer_count = total_layer_count as u64;
    5596            0 :         }
    5597            0 : 
    5598            0 :         let mut remaining = remaining.into_iter();
    5599            0 :         let mut have_remaining = true;
    5600            0 :         let mut js = tokio::task::JoinSet::new();
    5601            0 : 
    5602            0 :         let cancel = task_mgr::shutdown_token();
    5603            0 : 
    5604            0 :         let limit = request.max_concurrent_downloads;
    5605              : 
    5606              :         loop {
    5607            0 :             while js.len() < limit.get() && have_remaining && !cancel.is_cancelled() {
    5608            0 :                 let Some(next) = remaining.next() else {
    5609            0 :                     have_remaining = false;
    5610            0 :                     break;
    5611              :                 };
    5612              : 
    5613            0 :                 let span = tracing::info_span!("download", layer = %next);
    5614              : 
    5615            0 :                 js.spawn(
    5616            0 :                     async move {
    5617            0 :                         let res = next.download().await;
    5618            0 :                         (next, res)
    5619            0 :                     }
    5620            0 :                     .instrument(span),
    5621            0 :                 );
    5622            0 :             }
    5623              : 
    5624            0 :             while let Some(res) = js.join_next().await {
    5625            0 :                 match res {
    5626              :                     Ok((_, Ok(_))) => {
    5627            0 :                         lock_status!(st);
    5628            0 :                         st.successful_download_count += 1;
    5629              :                     }
    5630            0 :                     Ok((layer, Err(e))) => {
    5631            0 :                         tracing::error!(%layer, "download failed: {e:#}");
    5632            0 :                         lock_status!(st);
    5633            0 :                         st.failed_download_count += 1;
    5634              :                     }
    5635            0 :                     Err(je) if je.is_cancelled() => unreachable!("not used here"),
    5636            0 :                     Err(je) if je.is_panic() => {
    5637            0 :                         lock_status!(st);
    5638            0 :                         st.failed_download_count += 1;
    5639              :                     }
    5640            0 :                     Err(je) => tracing::warn!("unknown joinerror: {je:?}"),
    5641              :                 }
    5642              :             }
    5643              : 
    5644            0 :             if js.is_empty() && (!have_remaining || cancel.is_cancelled()) {
    5645            0 :                 break;
    5646            0 :             }
    5647              :         }
    5648              : 
    5649              :         {
    5650            0 :             lock_status!(st);
    5651            0 :             st.state = DownloadRemoteLayersTaskState::Completed;
    5652            0 :         }
    5653            0 :     }
    5654              : 
    5655            0 :     pub(crate) fn get_download_all_remote_layers_task_info(
    5656            0 :         &self,
    5657            0 :     ) -> Option<DownloadRemoteLayersTaskInfo> {
    5658            0 :         self.download_all_remote_layers_task_info
    5659            0 :             .read()
    5660            0 :             .unwrap()
    5661            0 :             .clone()
    5662            0 :     }
    5663              : }
    5664              : 
    5665              : impl Timeline {
    5666              :     /// Returns non-remote layers for eviction.
    5667            0 :     pub(crate) async fn get_local_layers_for_disk_usage_eviction(&self) -> DiskUsageEvictionInfo {
    5668            0 :         let guard = self.layers.read().await;
    5669            0 :         let mut max_layer_size: Option<u64> = None;
    5670            0 : 
    5671            0 :         let resident_layers = guard
    5672            0 :             .likely_resident_layers()
    5673            0 :             .map(|layer| {
    5674            0 :                 let file_size = layer.layer_desc().file_size;
    5675            0 :                 max_layer_size = max_layer_size.map_or(Some(file_size), |m| Some(m.max(file_size)));
    5676            0 : 
    5677            0 :                 let last_activity_ts = layer.access_stats().latest_activity();
    5678            0 : 
    5679            0 :                 EvictionCandidate {
    5680            0 :                     layer: layer.into(),
    5681            0 :                     last_activity_ts,
    5682            0 :                     relative_last_activity: finite_f32::FiniteF32::ZERO,
    5683            0 :                 }
    5684            0 :             })
    5685            0 :             .collect();
    5686            0 : 
    5687            0 :         DiskUsageEvictionInfo {
    5688            0 :             max_layer_size,
    5689            0 :             resident_layers,
    5690            0 :         }
    5691            0 :     }
    5692              : 
    5693         1638 :     pub(crate) fn get_shard_index(&self) -> ShardIndex {
    5694         1638 :         ShardIndex {
    5695         1638 :             shard_number: self.tenant_shard_id.shard_number,
    5696         1638 :             shard_count: self.tenant_shard_id.shard_count,
    5697         1638 :         }
    5698         1638 :     }
    5699              : 
    5700              :     #[cfg(test)]
    5701           30 :     pub(super) fn force_advance_lsn(self: &Arc<Timeline>, new_lsn: Lsn) {
    5702           30 :         self.last_record_lsn.advance(new_lsn);
    5703           30 :     }
    5704              : 
    5705              :     #[cfg(test)]
    5706            2 :     pub(super) fn force_set_disk_consistent_lsn(&self, new_value: Lsn) {
    5707            2 :         self.disk_consistent_lsn.store(new_value);
    5708            2 :     }
    5709              : 
    5710              :     /// Force create an image layer and place it into the layer map.
    5711              :     ///
    5712              :     /// DO NOT use this function directly. Use [`Tenant::branch_timeline_test_with_layers`]
    5713              :     /// or [`Tenant::create_test_timeline_with_layers`] to ensure all these layers are placed into the layer map in one run.
    5714              :     #[cfg(test)]
    5715           42 :     pub(super) async fn force_create_image_layer(
    5716           42 :         self: &Arc<Timeline>,
    5717           42 :         lsn: Lsn,
    5718           42 :         mut images: Vec<(Key, Bytes)>,
    5719           42 :         check_start_lsn: Option<Lsn>,
    5720           42 :         ctx: &RequestContext,
    5721           42 :     ) -> anyhow::Result<()> {
    5722           42 :         let last_record_lsn = self.get_last_record_lsn();
    5723           42 :         assert!(
    5724           42 :             lsn <= last_record_lsn,
    5725            0 :             "advance last record lsn before inserting a layer, lsn={lsn}, last_record_lsn={last_record_lsn}"
    5726              :         );
    5727           42 :         if let Some(check_start_lsn) = check_start_lsn {
    5728           42 :             assert!(lsn >= check_start_lsn);
    5729            0 :         }
    5730           74 :         images.sort_unstable_by(|(ka, _), (kb, _)| ka.cmp(kb));
    5731           42 :         let min_key = *images.first().map(|(k, _)| k).unwrap();
    5732           42 :         let end_key = images.last().map(|(k, _)| k).unwrap().next();
    5733           42 :         let mut image_layer_writer = ImageLayerWriter::new(
    5734           42 :             self.conf,
    5735           42 :             self.timeline_id,
    5736           42 :             self.tenant_shard_id,
    5737           42 :             &(min_key..end_key),
    5738           42 :             lsn,
    5739           42 :             ctx,
    5740           42 :         )
    5741           21 :         .await?;
    5742          158 :         for (key, img) in images {
    5743          116 :             image_layer_writer.put_image(key, img, ctx).await?;
    5744              :         }
    5745           84 :         let image_layer = image_layer_writer.finish(self, ctx).await?;
    5746              : 
    5747           42 :         {
    5748           42 :             let mut guard = self.layers.write().await;
    5749           42 :             guard.force_insert_layer(image_layer);
    5750           42 :         }
    5751           42 : 
    5752           42 :         Ok(())
    5753           42 :     }
    5754              : 
    5755              :     /// Force create a delta layer and place it into the layer map.
    5756              :     ///
    5757              :     /// DO NOT use this function directly. Use [`Tenant::branch_timeline_test_with_layers`]
    5758              :     /// or [`Tenant::create_test_timeline_with_layers`] to ensure all these layers are placed into the layer map in one run.
    5759              :     #[cfg(test)]
    5760           46 :     pub(super) async fn force_create_delta_layer(
    5761           46 :         self: &Arc<Timeline>,
    5762           46 :         mut deltas: DeltaLayerTestDesc,
    5763           46 :         check_start_lsn: Option<Lsn>,
    5764           46 :         ctx: &RequestContext,
    5765           46 :     ) -> anyhow::Result<()> {
    5766           46 :         let last_record_lsn = self.get_last_record_lsn();
    5767           46 :         deltas
    5768           46 :             .data
    5769           62 :             .sort_unstable_by(|(ka, la, _), (kb, lb, _)| (ka, la).cmp(&(kb, lb)));
    5770           46 :         assert!(deltas.data.first().unwrap().0 >= deltas.key_range.start);
    5771           46 :         assert!(deltas.data.last().unwrap().0 < deltas.key_range.end);
    5772          154 :         for (_, lsn, _) in &deltas.data {
    5773          108 :             assert!(deltas.lsn_range.start <= *lsn && *lsn < deltas.lsn_range.end);
    5774              :         }
    5775           46 :         assert!(
    5776           46 :             deltas.lsn_range.end <= last_record_lsn,
    5777            0 :             "advance last record lsn before inserting a layer, end_lsn={}, last_record_lsn={}",
    5778              :             deltas.lsn_range.end,
    5779              :             last_record_lsn
    5780              :         );
    5781           46 :         if let Some(check_start_lsn) = check_start_lsn {
    5782           46 :             assert!(deltas.lsn_range.start >= check_start_lsn);
    5783            0 :         }
    5784              :         // check if the delta layer does not violate the LSN invariant, the legacy compaction should always produce a batch of
    5785              :         // layers of the same start/end LSN, and so should the force inserted layer
    5786              :         {
    5787              :             /// Checks if a overlaps with b, assume a/b = [start, end).
    5788           48 :             pub fn overlaps_with<T: Ord>(a: &Range<T>, b: &Range<T>) -> bool {
    5789           48 :                 !(a.end <= b.start || b.end <= a.start)
    5790           48 :             }
    5791              : 
    5792           46 :             let guard = self.layers.read().await;
    5793           88 :             for layer in guard.layer_map().iter_historic_layers() {
    5794           88 :                 if layer.is_delta()
    5795           48 :                     && overlaps_with(&layer.lsn_range, &deltas.lsn_range)
    5796           14 :                     && layer.lsn_range != deltas.lsn_range
    5797              :                 {
    5798              :                     // If a delta layer overlaps with another delta layer AND their LSN range is not the same, panic
    5799            0 :                     panic!(
    5800            0 :                         "inserted layer violates delta layer LSN invariant: current_lsn_range={}..{}, conflict_lsn_range={}..{}",
    5801            0 :                         deltas.lsn_range.start, deltas.lsn_range.end, layer.lsn_range.start, layer.lsn_range.end
    5802            0 :                     );
    5803           88 :                 }
    5804              :             }
    5805              :         }
    5806           46 :         let mut delta_layer_writer = DeltaLayerWriter::new(
    5807           46 :             self.conf,
    5808           46 :             self.timeline_id,
    5809           46 :             self.tenant_shard_id,
    5810           46 :             deltas.key_range.start,
    5811           46 :             deltas.lsn_range,
    5812           46 :             ctx,
    5813           46 :         )
    5814           23 :         .await?;
    5815          154 :         for (key, lsn, val) in deltas.data {
    5816          108 :             delta_layer_writer.put_value(key, lsn, val, ctx).await?;
    5817              :         }
    5818           46 :         let delta_layer = delta_layer_writer
    5819           46 :             .finish(deltas.key_range.end, self, ctx)
    5820          115 :             .await?;
    5821              : 
    5822           46 :         {
    5823           46 :             let mut guard = self.layers.write().await;
    5824           46 :             guard.force_insert_layer(delta_layer);
    5825           46 :         }
    5826           46 : 
    5827           46 :         Ok(())
    5828           46 :     }
    5829              : 
    5830              :     /// Return all keys at the LSN in the image layers
    5831              :     #[cfg(test)]
    5832            6 :     pub(crate) async fn inspect_image_layers(
    5833            6 :         self: &Arc<Timeline>,
    5834            6 :         lsn: Lsn,
    5835            6 :         ctx: &RequestContext,
    5836            6 :     ) -> anyhow::Result<Vec<(Key, Bytes)>> {
    5837            6 :         let mut all_data = Vec::new();
    5838            6 :         let guard = self.layers.read().await;
    5839           34 :         for layer in guard.layer_map().iter_historic_layers() {
    5840           34 :             if !layer.is_delta() && layer.image_layer_lsn() == lsn {
    5841            8 :                 let layer = guard.get_from_desc(&layer);
    5842            8 :                 let mut reconstruct_data = ValuesReconstructState::default();
    5843            8 :                 layer
    5844            8 :                     .get_values_reconstruct_data(
    5845            8 :                         KeySpace::single(Key::MIN..Key::MAX),
    5846            8 :                         lsn..Lsn(lsn.0 + 1),
    5847            8 :                         &mut reconstruct_data,
    5848            8 :                         ctx,
    5849            8 :                     )
    5850           13 :                     .await?;
    5851           80 :                 for (k, v) in reconstruct_data.keys {
    5852           72 :                     all_data.push((k, v?.img.unwrap().1));
    5853              :                 }
    5854           26 :             }
    5855              :         }
    5856            6 :         all_data.sort();
    5857            6 :         Ok(all_data)
    5858            6 :     }
    5859              : 
    5860              :     /// Get all historic layer descriptors in the layer map
    5861              :     #[cfg(test)]
    5862            2 :     pub(crate) async fn inspect_historic_layers(
    5863            2 :         self: &Arc<Timeline>,
    5864            2 :     ) -> anyhow::Result<Vec<super::storage_layer::PersistentLayerKey>> {
    5865            2 :         let mut layers = Vec::new();
    5866            2 :         let guard = self.layers.read().await;
    5867            6 :         for layer in guard.layer_map().iter_historic_layers() {
    5868            6 :             layers.push(layer.key());
    5869            6 :         }
    5870            2 :         Ok(layers)
    5871            2 :     }
    5872              : 
    5873              :     #[cfg(test)]
    5874           10 :     pub(crate) fn add_extra_test_dense_keyspace(&self, ks: KeySpace) {
    5875           10 :         let mut keyspace = self.extra_test_dense_keyspace.load().as_ref().clone();
    5876           10 :         keyspace.merge(&ks);
    5877           10 :         self.extra_test_dense_keyspace.store(Arc::new(keyspace));
    5878           10 :     }
    5879              : }
    5880              : 
    5881              : type TraversalPathItem = (ValueReconstructResult, Lsn, TraversalId);
    5882              : 
    5883              : /// Tracking writes ingestion does to a particular in-memory layer.
    5884              : ///
    5885              : /// Cleared upon freezing a layer.
    5886              : struct TimelineWriterState {
    5887              :     open_layer: Arc<InMemoryLayer>,
    5888              :     current_size: u64,
    5889              :     // Previous Lsn which passed through
    5890              :     prev_lsn: Option<Lsn>,
    5891              :     // Largest Lsn which passed through the current writer
    5892              :     max_lsn: Option<Lsn>,
    5893              :     // Cached details of the last freeze. Avoids going trough the atomic/lock on every put.
    5894              :     cached_last_freeze_at: Lsn,
    5895              : }
    5896              : 
    5897              : impl TimelineWriterState {
    5898         1256 :     fn new(open_layer: Arc<InMemoryLayer>, current_size: u64, last_freeze_at: Lsn) -> Self {
    5899         1256 :         Self {
    5900         1256 :             open_layer,
    5901         1256 :             current_size,
    5902         1256 :             prev_lsn: None,
    5903         1256 :             max_lsn: None,
    5904         1256 :             cached_last_freeze_at: last_freeze_at,
    5905         1256 :         }
    5906         1256 :     }
    5907              : }
    5908              : 
    5909              : /// Various functions to mutate the timeline.
    5910              : // TODO Currently, Deref is used to allow easy access to read methods from this trait.
    5911              : // This is probably considered a bad practice in Rust and should be fixed eventually,
    5912              : // but will cause large code changes.
    5913              : pub(crate) struct TimelineWriter<'a> {
    5914              :     tl: &'a Timeline,
    5915              :     write_guard: tokio::sync::MutexGuard<'a, Option<TimelineWriterState>>,
    5916              : }
    5917              : 
    5918              : impl Deref for TimelineWriter<'_> {
    5919              :     type Target = Timeline;
    5920              : 
    5921      4807302 :     fn deref(&self) -> &Self::Target {
    5922      4807302 :         self.tl
    5923      4807302 :     }
    5924              : }
    5925              : 
    5926              : #[derive(PartialEq)]
    5927              : enum OpenLayerAction {
    5928              :     Roll,
    5929              :     Open,
    5930              :     None,
    5931              : }
    5932              : 
    5933              : impl<'a> TimelineWriter<'a> {
    5934              :     /// Put a new page version that can be constructed from a WAL record
    5935              :     ///
    5936              :     /// This will implicitly extend the relation, if the page is beyond the
    5937              :     /// current end-of-file.
    5938      5090546 :     pub(crate) async fn put(
    5939      5090546 :         &mut self,
    5940      5090546 :         key: Key,
    5941      5090546 :         lsn: Lsn,
    5942      5090546 :         value: &Value,
    5943      5090546 :         ctx: &RequestContext,
    5944      5090546 :     ) -> anyhow::Result<()> {
    5945      5090546 :         // Avoid doing allocations for "small" values.
    5946      5090546 :         // In the regression test suite, the limit of 256 avoided allocations in 95% of cases:
    5947      5090546 :         // https://github.com/neondatabase/neon/pull/5056#discussion_r1301975061
    5948      5090546 :         let mut buf = smallvec::SmallVec::<[u8; 256]>::new();
    5949      5090546 :         value.ser_into(&mut buf)?;
    5950      5090546 :         let buf_size: u64 = buf.len().try_into().expect("oversized value buf");
    5951      5090546 : 
    5952      5090546 :         let action = self.get_open_layer_action(lsn, buf_size);
    5953      5090546 :         let layer = self.handle_open_layer_action(lsn, action, ctx).await?;
    5954      5090546 :         let res = layer.put_value(key, lsn, &buf, ctx).await;
    5955              : 
    5956      5090546 :         if res.is_ok() {
    5957      5090546 :             // Update the current size only when the entire write was ok.
    5958      5090546 :             // In case of failures, we may have had partial writes which
    5959      5090546 :             // render the size tracking out of sync. That's ok because
    5960      5090546 :             // the checkpoint distance should be significantly smaller
    5961      5090546 :             // than the S3 single shot upload limit of 5GiB.
    5962      5090546 :             let state = self.write_guard.as_mut().unwrap();
    5963      5090546 : 
    5964      5090546 :             state.current_size += buf_size;
    5965      5090546 :             state.prev_lsn = Some(lsn);
    5966      5090546 :             state.max_lsn = std::cmp::max(state.max_lsn, Some(lsn));
    5967      5090546 :         }
    5968              : 
    5969      5090546 :         res
    5970      5090546 :     }
    5971              : 
    5972      5090548 :     async fn handle_open_layer_action(
    5973      5090548 :         &mut self,
    5974      5090548 :         at: Lsn,
    5975      5090548 :         action: OpenLayerAction,
    5976      5090548 :         ctx: &RequestContext,
    5977      5090548 :     ) -> anyhow::Result<&Arc<InMemoryLayer>> {
    5978      5090548 :         match action {
    5979              :             OpenLayerAction::Roll => {
    5980           80 :                 let freeze_at = self.write_guard.as_ref().unwrap().max_lsn.unwrap();
    5981           80 :                 self.roll_layer(freeze_at).await?;
    5982           80 :                 self.open_layer(at, ctx).await?;
    5983              :             }
    5984         1176 :             OpenLayerAction::Open => self.open_layer(at, ctx).await?,
    5985              :             OpenLayerAction::None => {
    5986      5089292 :                 assert!(self.write_guard.is_some());
    5987              :             }
    5988              :         }
    5989              : 
    5990      5090548 :         Ok(&self.write_guard.as_ref().unwrap().open_layer)
    5991      5090548 :     }
    5992              : 
    5993         1256 :     async fn open_layer(&mut self, at: Lsn, ctx: &RequestContext) -> anyhow::Result<()> {
    5994         1256 :         let layer = self.tl.get_layer_for_write(at, ctx).await?;
    5995         1256 :         let initial_size = layer.size().await?;
    5996              : 
    5997         1256 :         let last_freeze_at = self.last_freeze_at.load();
    5998         1256 :         self.write_guard.replace(TimelineWriterState::new(
    5999         1256 :             layer,
    6000         1256 :             initial_size,
    6001         1256 :             last_freeze_at,
    6002         1256 :         ));
    6003         1256 : 
    6004         1256 :         Ok(())
    6005         1256 :     }
    6006              : 
    6007           80 :     async fn roll_layer(&mut self, freeze_at: Lsn) -> anyhow::Result<()> {
    6008           80 :         let current_size = self.write_guard.as_ref().unwrap().current_size;
    6009           80 : 
    6010           80 :         // self.write_guard will be taken by the freezing
    6011           80 :         self.tl
    6012           80 :             .freeze_inmem_layer_at(freeze_at, &mut self.write_guard)
    6013           10 :             .await;
    6014              : 
    6015           80 :         self.tl.flush_frozen_layers(freeze_at)?;
    6016              : 
    6017           80 :         if current_size >= self.get_checkpoint_distance() * 2 {
    6018            0 :             warn!("Flushed oversized open layer with size {}", current_size)
    6019           80 :         }
    6020              : 
    6021           80 :         Ok(())
    6022           80 :     }
    6023              : 
    6024      5090548 :     fn get_open_layer_action(&self, lsn: Lsn, new_value_size: u64) -> OpenLayerAction {
    6025      5090548 :         let state = &*self.write_guard;
    6026      5090548 :         let Some(state) = &state else {
    6027         1176 :             return OpenLayerAction::Open;
    6028              :         };
    6029              : 
    6030              :         #[cfg(feature = "testing")]
    6031      5089372 :         if state.cached_last_freeze_at < self.tl.last_freeze_at.load() {
    6032              :             // this check and assertion are not really needed because
    6033              :             // LayerManager::try_freeze_in_memory_layer will always clear out the
    6034              :             // TimelineWriterState if something is frozen. however, we can advance last_freeze_at when there
    6035              :             // is no TimelineWriterState.
    6036            0 :             assert!(
    6037            0 :                 state.open_layer.end_lsn.get().is_some(),
    6038            0 :                 "our open_layer must be outdated"
    6039              :             );
    6040              : 
    6041              :             // this would be a memory leak waiting to happen because the in-memory layer always has
    6042              :             // an index
    6043            0 :             panic!("BUG: TimelineWriterState held on to frozen in-memory layer.");
    6044      5089372 :         }
    6045      5089372 : 
    6046      5089372 :         if state.prev_lsn == Some(lsn) {
    6047              :             // Rolling mid LSN is not supported by [downstream code].
    6048              :             // Hence, only roll at LSN boundaries.
    6049              :             //
    6050              :             // [downstream code]: https://github.com/neondatabase/neon/pull/7993#discussion_r1633345422
    6051       286346 :             return OpenLayerAction::None;
    6052      4803026 :         }
    6053      4803026 : 
    6054      4803026 :         if state.current_size == 0 {
    6055              :             // Don't roll empty layers
    6056            0 :             return OpenLayerAction::None;
    6057      4803026 :         }
    6058      4803026 : 
    6059      4803026 :         if self.tl.should_roll(
    6060      4803026 :             state.current_size,
    6061      4803026 :             state.current_size + new_value_size,
    6062      4803026 :             self.get_checkpoint_distance(),
    6063      4803026 :             lsn,
    6064      4803026 :             state.cached_last_freeze_at,
    6065      4803026 :             state.open_layer.get_opened_at(),
    6066      4803026 :         ) {
    6067           80 :             OpenLayerAction::Roll
    6068              :         } else {
    6069      4802946 :             OpenLayerAction::None
    6070              :         }
    6071      5090548 :     }
    6072              : 
    6073              :     /// Put a batch of keys at the specified Lsns.
    6074              :     ///
    6075              :     /// The batch is sorted by Lsn (enforced by usage of [`utils::vec_map::VecMap`].
    6076       414046 :     pub(crate) async fn put_batch(
    6077       414046 :         &mut self,
    6078       414046 :         batch: VecMap<Lsn, (Key, Value)>,
    6079       414046 :         ctx: &RequestContext,
    6080       414046 :     ) -> anyhow::Result<()> {
    6081      1114438 :         for (lsn, (key, val)) in batch {
    6082       700392 :             self.put(key, lsn, &val, ctx).await?
    6083              :         }
    6084              : 
    6085       414046 :         Ok(())
    6086       414046 :     }
    6087              : 
    6088            2 :     pub(crate) async fn delete_batch(
    6089            2 :         &mut self,
    6090            2 :         batch: &[(Range<Key>, Lsn)],
    6091            2 :         ctx: &RequestContext,
    6092            2 :     ) -> anyhow::Result<()> {
    6093            2 :         if let Some((_, lsn)) = batch.first() {
    6094            2 :             let action = self.get_open_layer_action(*lsn, 0);
    6095            2 :             let layer = self.handle_open_layer_action(*lsn, action, ctx).await?;
    6096            2 :             layer.put_tombstones(batch).await?;
    6097            0 :         }
    6098              : 
    6099            2 :         Ok(())
    6100            2 :     }
    6101              : 
    6102              :     /// Track the end of the latest digested WAL record.
    6103              :     /// Remember the (end of) last valid WAL record remembered in the timeline.
    6104              :     ///
    6105              :     /// Call this after you have finished writing all the WAL up to 'lsn'.
    6106              :     ///
    6107              :     /// 'lsn' must be aligned. This wakes up any wait_lsn() callers waiting for
    6108              :     /// the 'lsn' or anything older. The previous last record LSN is stored alongside
    6109              :     /// the latest and can be read.
    6110      5279062 :     pub(crate) fn finish_write(&self, new_lsn: Lsn) {
    6111      5279062 :         self.tl.finish_write(new_lsn);
    6112      5279062 :     }
    6113              : 
    6114       270570 :     pub(crate) fn update_current_logical_size(&self, delta: i64) {
    6115       270570 :         self.tl.update_current_logical_size(delta)
    6116       270570 :     }
    6117              : }
    6118              : 
    6119              : // We need TimelineWriter to be send in upcoming conversion of
    6120              : // Timeline::layers to tokio::sync::RwLock.
    6121              : #[test]
    6122            2 : fn is_send() {
    6123            2 :     fn _assert_send<T: Send>() {}
    6124            2 :     _assert_send::<TimelineWriter<'_>>();
    6125            2 : }
    6126              : 
    6127              : #[cfg(test)]
    6128              : mod tests {
    6129              :     use utils::{id::TimelineId, lsn::Lsn};
    6130              : 
    6131              :     use crate::tenant::{
    6132              :         harness::TenantHarness, storage_layer::Layer, timeline::EvictionError, Timeline,
    6133              :     };
    6134              : 
    6135              :     #[tokio::test]
    6136            2 :     async fn two_layer_eviction_attempts_at_the_same_time() {
    6137            2 :         let harness = TenantHarness::create("two_layer_eviction_attempts_at_the_same_time")
    6138            2 :             .await
    6139            2 :             .unwrap();
    6140            2 : 
    6141            8 :         let (tenant, ctx) = harness.load().await;
    6142            2 :         let timeline = tenant
    6143            2 :             .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx)
    6144            4 :             .await
    6145            2 :             .unwrap();
    6146            2 : 
    6147            2 :         let layer = find_some_layer(&timeline).await;
    6148            2 :         let layer = layer
    6149            2 :             .keep_resident()
    6150            2 :             .await
    6151            2 :             .expect("no download => no downloading errors")
    6152            2 :             .drop_eviction_guard();
    6153            2 : 
    6154            2 :         let forever = std::time::Duration::from_secs(120);
    6155            2 : 
    6156            2 :         let first = layer.evict_and_wait(forever);
    6157            2 :         let second = layer.evict_and_wait(forever);
    6158            2 : 
    6159            2 :         let (first, second) = tokio::join!(first, second);
    6160            2 : 
    6161            2 :         let res = layer.keep_resident().await;
    6162            2 :         assert!(res.is_none(), "{res:?}");
    6163            2 : 
    6164            2 :         match (first, second) {
    6165            2 :             (Ok(()), Ok(())) => {
    6166            2 :                 // because there are no more timeline locks being taken on eviction path, we can
    6167            2 :                 // witness all three outcomes here.
    6168            2 :             }
    6169            2 :             (Ok(()), Err(EvictionError::NotFound)) | (Err(EvictionError::NotFound), Ok(())) => {
    6170            0 :                 // if one completes before the other, this is fine just as well.
    6171            0 :             }
    6172            2 :             other => unreachable!("unexpected {:?}", other),
    6173            2 :         }
    6174            2 :     }
    6175              : 
    6176            2 :     async fn find_some_layer(timeline: &Timeline) -> Layer {
    6177            2 :         let layers = timeline.layers.read().await;
    6178            2 :         let desc = layers
    6179            2 :             .layer_map()
    6180            2 :             .iter_historic_layers()
    6181            2 :             .next()
    6182            2 :             .expect("must find one layer to evict");
    6183            2 : 
    6184            2 :         layers.get_from_desc(&desc)
    6185            2 :     }
    6186              : }
        

Generated by: LCOV version 2.1-beta