LCOV - code coverage report
Current view: top level - pageserver/src/tenant/timeline - compaction.rs (source / functions) Coverage Total Hit
Test: 35c4cad035e5234d8d9f74554a48bbe25e157302.info Lines: 55.8 % 2321 1294
Test Date: 2025-02-18 19:18:45 Functions: 39.5 % 162 64

            Line data    Source code
       1              : //! New compaction implementation. The algorithm itself is implemented in the
       2              : //! compaction crate. This file implements the callbacks and structs that allow
       3              : //! the algorithm to drive the process.
       4              : //!
       5              : //! The old legacy algorithm is implemented directly in `timeline.rs`.
       6              : 
       7              : use std::collections::{BinaryHeap, HashMap, HashSet, VecDeque};
       8              : use std::ops::{Deref, Range};
       9              : use std::sync::Arc;
      10              : 
      11              : use super::layer_manager::LayerManager;
      12              : use super::{
      13              :     CompactFlags, CompactOptions, CreateImageLayersError, DurationRecorder, GetVectoredError,
      14              :     ImageLayerCreationMode, LastImageLayerCreationStatus, RecordedDuration, Timeline,
      15              : };
      16              : 
      17              : use anyhow::{anyhow, bail, Context};
      18              : use bytes::Bytes;
      19              : use enumset::EnumSet;
      20              : use fail::fail_point;
      21              : use itertools::Itertools;
      22              : use pageserver_api::key::KEY_SIZE;
      23              : use pageserver_api::keyspace::ShardedRange;
      24              : use pageserver_api::models::CompactInfoResponse;
      25              : use pageserver_api::shard::{ShardCount, ShardIdentity, TenantShardId};
      26              : use serde::Serialize;
      27              : use tokio_util::sync::CancellationToken;
      28              : use tracing::{debug, info, info_span, trace, warn, Instrument};
      29              : use utils::critical;
      30              : use utils::id::TimelineId;
      31              : 
      32              : use crate::context::{AccessStatsBehavior, RequestContext, RequestContextBuilder};
      33              : use crate::page_cache;
      34              : use crate::statvfs::Statvfs;
      35              : use crate::tenant::checks::check_valid_layermap;
      36              : use crate::tenant::gc_block::GcBlock;
      37              : use crate::tenant::layer_map::LayerMap;
      38              : use crate::tenant::remote_timeline_client::WaitCompletionError;
      39              : use crate::tenant::storage_layer::batch_split_writer::{
      40              :     BatchWriterResult, SplitDeltaLayerWriter, SplitImageLayerWriter,
      41              : };
      42              : use crate::tenant::storage_layer::filter_iterator::FilterIterator;
      43              : use crate::tenant::storage_layer::merge_iterator::MergeIterator;
      44              : use crate::tenant::storage_layer::{
      45              :     AsLayerDesc, PersistentLayerDesc, PersistentLayerKey, ValueReconstructState,
      46              : };
      47              : use crate::tenant::timeline::{drop_rlock, DeltaLayerWriter, ImageLayerWriter};
      48              : use crate::tenant::timeline::{ImageLayerCreationOutcome, IoConcurrency};
      49              : use crate::tenant::timeline::{Layer, ResidentLayer};
      50              : use crate::tenant::{gc_block, DeltaLayer, MaybeOffloaded};
      51              : use crate::virtual_file::{MaybeFatalIo, VirtualFile};
      52              : use pageserver_api::config::tenant_conf_defaults::DEFAULT_CHECKPOINT_DISTANCE;
      53              : 
      54              : use pageserver_api::key::Key;
      55              : use pageserver_api::keyspace::KeySpace;
      56              : use pageserver_api::record::NeonWalRecord;
      57              : use pageserver_api::value::Value;
      58              : 
      59              : use utils::lsn::Lsn;
      60              : 
      61              : use pageserver_compaction::helpers::{fully_contains, overlaps_with};
      62              : use pageserver_compaction::interface::*;
      63              : 
      64              : use super::CompactionError;
      65              : 
      66              : /// Maximum number of deltas before generating an image layer in bottom-most compaction.
      67              : const COMPACTION_DELTA_THRESHOLD: usize = 5;
      68              : 
      69              : #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
      70              : pub struct GcCompactionJobId(pub usize);
      71              : 
      72              : impl std::fmt::Display for GcCompactionJobId {
      73            0 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
      74            0 :         write!(f, "{}", self.0)
      75            0 :     }
      76              : }
      77              : 
      78              : #[derive(Debug, Clone)]
      79              : pub enum GcCompactionQueueItem {
      80              :     Manual(CompactOptions),
      81              :     SubCompactionJob(CompactOptions),
      82              :     #[allow(dead_code)]
      83              :     UpdateL2Lsn(Lsn),
      84              :     Notify(GcCompactionJobId),
      85              : }
      86              : 
      87              : impl GcCompactionQueueItem {
      88            0 :     pub fn into_compact_info_resp(
      89            0 :         self,
      90            0 :         id: GcCompactionJobId,
      91            0 :         running: bool,
      92            0 :     ) -> Option<CompactInfoResponse> {
      93            0 :         match self {
      94            0 :             GcCompactionQueueItem::Manual(options) => Some(CompactInfoResponse {
      95            0 :                 compact_key_range: options.compact_key_range,
      96            0 :                 compact_lsn_range: options.compact_lsn_range,
      97            0 :                 sub_compaction: options.sub_compaction,
      98            0 :                 running,
      99            0 :                 job_id: id.0,
     100            0 :             }),
     101            0 :             GcCompactionQueueItem::SubCompactionJob(options) => Some(CompactInfoResponse {
     102            0 :                 compact_key_range: options.compact_key_range,
     103            0 :                 compact_lsn_range: options.compact_lsn_range,
     104            0 :                 sub_compaction: options.sub_compaction,
     105            0 :                 running,
     106            0 :                 job_id: id.0,
     107            0 :             }),
     108            0 :             GcCompactionQueueItem::UpdateL2Lsn(_) => None,
     109            0 :             GcCompactionQueueItem::Notify(_) => None,
     110              :         }
     111            0 :     }
     112              : }
     113              : 
     114              : struct GcCompactionQueueInner {
     115              :     running: Option<(GcCompactionJobId, GcCompactionQueueItem)>,
     116              :     queued: VecDeque<(GcCompactionJobId, GcCompactionQueueItem)>,
     117              :     notify: HashMap<GcCompactionJobId, tokio::sync::oneshot::Sender<()>>,
     118              :     gc_guards: HashMap<GcCompactionJobId, gc_block::Guard>,
     119              :     last_id: GcCompactionJobId,
     120              : }
     121              : 
     122              : impl GcCompactionQueueInner {
     123            0 :     fn next_id(&mut self) -> GcCompactionJobId {
     124            0 :         let id = self.last_id;
     125            0 :         self.last_id = GcCompactionJobId(id.0 + 1);
     126            0 :         id
     127            0 :     }
     128              : }
     129              : 
     130              : /// A structure to store gc_compaction jobs.
     131              : pub struct GcCompactionQueue {
     132              :     /// All items in the queue, and the currently-running job.
     133              :     inner: std::sync::Mutex<GcCompactionQueueInner>,
     134              :     /// Ensure only one thread is consuming the queue.
     135              :     consumer_lock: tokio::sync::Mutex<()>,
     136              : }
     137              : 
     138              : impl GcCompactionQueue {
     139            0 :     pub fn new() -> Self {
     140            0 :         GcCompactionQueue {
     141            0 :             inner: std::sync::Mutex::new(GcCompactionQueueInner {
     142            0 :                 running: None,
     143            0 :                 queued: VecDeque::new(),
     144            0 :                 notify: HashMap::new(),
     145            0 :                 gc_guards: HashMap::new(),
     146            0 :                 last_id: GcCompactionJobId(0),
     147            0 :             }),
     148            0 :             consumer_lock: tokio::sync::Mutex::new(()),
     149            0 :         }
     150            0 :     }
     151              : 
     152            0 :     pub fn cancel_scheduled(&self) {
     153            0 :         let mut guard = self.inner.lock().unwrap();
     154            0 :         guard.queued.clear();
     155            0 :         guard.notify.clear();
     156            0 :         guard.gc_guards.clear();
     157            0 :     }
     158              : 
     159              :     /// Schedule a manual compaction job.
     160            0 :     pub fn schedule_manual_compaction(
     161            0 :         &self,
     162            0 :         options: CompactOptions,
     163            0 :         notify: Option<tokio::sync::oneshot::Sender<()>>,
     164            0 :     ) -> GcCompactionJobId {
     165            0 :         let mut guard = self.inner.lock().unwrap();
     166            0 :         let id = guard.next_id();
     167            0 :         guard
     168            0 :             .queued
     169            0 :             .push_back((id, GcCompactionQueueItem::Manual(options)));
     170            0 :         if let Some(notify) = notify {
     171            0 :             guard.notify.insert(id, notify);
     172            0 :         }
     173            0 :         info!("scheduled compaction job id={}", id);
     174            0 :         id
     175            0 :     }
     176              : 
     177              :     /// Trigger an auto compaction.
     178              :     #[allow(dead_code)]
     179            0 :     pub fn trigger_auto_compaction(&self, _: &Arc<Timeline>) {}
     180              : 
     181              :     /// Notify the caller the job has finished and unblock GC.
     182            0 :     fn notify_and_unblock(&self, id: GcCompactionJobId) {
     183            0 :         info!("compaction job id={} finished", id);
     184            0 :         let mut guard = self.inner.lock().unwrap();
     185            0 :         if let Some(blocking) = guard.gc_guards.remove(&id) {
     186            0 :             drop(blocking)
     187            0 :         }
     188            0 :         if let Some(tx) = guard.notify.remove(&id) {
     189            0 :             let _ = tx.send(());
     190            0 :         }
     191            0 :     }
     192              : 
     193            0 :     async fn handle_sub_compaction(
     194            0 :         &self,
     195            0 :         id: GcCompactionJobId,
     196            0 :         options: CompactOptions,
     197            0 :         timeline: &Arc<Timeline>,
     198            0 :         gc_block: &GcBlock,
     199            0 :     ) -> Result<(), CompactionError> {
     200            0 :         info!("running scheduled enhanced gc bottom-most compaction with sub-compaction, splitting compaction jobs");
     201            0 :         let jobs: Vec<GcCompactJob> = timeline
     202            0 :             .gc_compaction_split_jobs(
     203            0 :                 GcCompactJob::from_compact_options(options.clone()),
     204            0 :                 options.sub_compaction_max_job_size_mb,
     205            0 :             )
     206            0 :             .await
     207            0 :             .map_err(CompactionError::Other)?;
     208            0 :         if jobs.is_empty() {
     209            0 :             info!("no jobs to run, skipping scheduled compaction task");
     210            0 :             self.notify_and_unblock(id);
     211              :         } else {
     212            0 :             let gc_guard = match gc_block.start().await {
     213            0 :                 Ok(guard) => guard,
     214            0 :                 Err(e) => {
     215            0 :                     return Err(CompactionError::Other(anyhow!(
     216            0 :                         "cannot run gc-compaction because gc is blocked: {}",
     217            0 :                         e
     218            0 :                     )));
     219              :                 }
     220              :             };
     221              : 
     222            0 :             let jobs_len = jobs.len();
     223            0 :             let mut pending_tasks = Vec::new();
     224            0 :             for job in jobs {
     225              :                 // Unfortunately we need to convert the `GcCompactJob` back to `CompactionOptions`
     226              :                 // until we do further refactors to allow directly call `compact_with_gc`.
     227            0 :                 let mut flags: EnumSet<CompactFlags> = EnumSet::default();
     228            0 :                 flags |= CompactFlags::EnhancedGcBottomMostCompaction;
     229            0 :                 if job.dry_run {
     230            0 :                     flags |= CompactFlags::DryRun;
     231            0 :                 }
     232            0 :                 let options = CompactOptions {
     233            0 :                     flags,
     234            0 :                     sub_compaction: false,
     235            0 :                     compact_key_range: Some(job.compact_key_range.into()),
     236            0 :                     compact_lsn_range: Some(job.compact_lsn_range.into()),
     237            0 :                     sub_compaction_max_job_size_mb: None,
     238            0 :                 };
     239            0 :                 pending_tasks.push(GcCompactionQueueItem::SubCompactionJob(options));
     240              :             }
     241            0 :             pending_tasks.push(GcCompactionQueueItem::Notify(id));
     242            0 :             {
     243            0 :                 let mut guard = self.inner.lock().unwrap();
     244            0 :                 guard.gc_guards.insert(id, gc_guard);
     245            0 :                 let mut tasks = Vec::new();
     246            0 :                 for task in pending_tasks {
     247            0 :                     let id = guard.next_id();
     248            0 :                     tasks.push((id, task));
     249            0 :                 }
     250            0 :                 tasks.reverse();
     251            0 :                 for item in tasks {
     252            0 :                     guard.queued.push_front(item);
     253            0 :                 }
     254              :             }
     255            0 :             info!("scheduled enhanced gc bottom-most compaction with sub-compaction, split into {} jobs", jobs_len);
     256              :         }
     257            0 :         Ok(())
     258            0 :     }
     259              : 
     260              :     /// Take a job from the queue and process it. Returns if there are still pending tasks.
     261            0 :     pub async fn iteration(
     262            0 :         &self,
     263            0 :         cancel: &CancellationToken,
     264            0 :         ctx: &RequestContext,
     265            0 :         gc_block: &GcBlock,
     266            0 :         timeline: &Arc<Timeline>,
     267            0 :     ) -> Result<CompactionOutcome, CompactionError> {
     268            0 :         let _one_op_at_a_time_guard = self.consumer_lock.lock().await;
     269              :         let has_pending_tasks;
     270            0 :         let (id, item) = {
     271            0 :             let mut guard = self.inner.lock().unwrap();
     272            0 :             let Some((id, item)) = guard.queued.pop_front() else {
     273            0 :                 return Ok(CompactionOutcome::Done);
     274              :             };
     275            0 :             guard.running = Some((id, item.clone()));
     276            0 :             has_pending_tasks = !guard.queued.is_empty();
     277            0 :             (id, item)
     278            0 :         };
     279            0 : 
     280            0 :         match item {
     281            0 :             GcCompactionQueueItem::Manual(options) => {
     282            0 :                 if !options
     283            0 :                     .flags
     284            0 :                     .contains(CompactFlags::EnhancedGcBottomMostCompaction)
     285              :                 {
     286            0 :                     warn!("ignoring scheduled compaction task: scheduled task must be gc compaction: {:?}", options);
     287            0 :                 } else if options.sub_compaction {
     288            0 :                     self.handle_sub_compaction(id, options, timeline, gc_block)
     289            0 :                         .await?;
     290              :                 } else {
     291            0 :                     let gc_guard = match gc_block.start().await {
     292            0 :                         Ok(guard) => guard,
     293            0 :                         Err(e) => {
     294            0 :                             return Err(CompactionError::Other(anyhow!(
     295            0 :                                 "cannot run gc-compaction because gc is blocked: {}",
     296            0 :                                 e
     297            0 :                             )));
     298              :                         }
     299              :                     };
     300            0 :                     {
     301            0 :                         let mut guard = self.inner.lock().unwrap();
     302            0 :                         guard.gc_guards.insert(id, gc_guard);
     303            0 :                     }
     304            0 :                     let _ = timeline.compact_with_options(cancel, options, ctx).await?;
     305            0 :                     self.notify_and_unblock(id);
     306              :                 }
     307              :             }
     308            0 :             GcCompactionQueueItem::SubCompactionJob(options) => {
     309            0 :                 let _ = timeline.compact_with_options(cancel, options, ctx).await?;
     310              :             }
     311            0 :             GcCompactionQueueItem::Notify(id) => {
     312            0 :                 self.notify_and_unblock(id);
     313            0 :             }
     314              :             GcCompactionQueueItem::UpdateL2Lsn(_) => {
     315            0 :                 unreachable!()
     316              :             }
     317              :         }
     318            0 :         {
     319            0 :             let mut guard = self.inner.lock().unwrap();
     320            0 :             guard.running = None;
     321            0 :         }
     322            0 :         Ok(if has_pending_tasks {
     323            0 :             CompactionOutcome::Pending
     324              :         } else {
     325            0 :             CompactionOutcome::Done
     326              :         })
     327            0 :     }
     328              : 
     329              :     #[allow(clippy::type_complexity)]
     330            0 :     pub fn remaining_jobs(
     331            0 :         &self,
     332            0 :     ) -> (
     333            0 :         Option<(GcCompactionJobId, GcCompactionQueueItem)>,
     334            0 :         VecDeque<(GcCompactionJobId, GcCompactionQueueItem)>,
     335            0 :     ) {
     336            0 :         let guard = self.inner.lock().unwrap();
     337            0 :         (guard.running.clone(), guard.queued.clone())
     338            0 :     }
     339              : 
     340              :     #[allow(dead_code)]
     341            0 :     pub fn remaining_jobs_num(&self) -> usize {
     342            0 :         let guard = self.inner.lock().unwrap();
     343            0 :         guard.queued.len() + if guard.running.is_some() { 1 } else { 0 }
     344            0 :     }
     345              : }
     346              : 
     347              : /// A job description for the gc-compaction job. This structure describes the rectangle range that the job will
     348              : /// process. The exact layers that need to be compacted/rewritten will be generated when `compact_with_gc` gets
     349              : /// called.
     350              : #[derive(Debug, Clone)]
     351              : pub(crate) struct GcCompactJob {
     352              :     pub dry_run: bool,
     353              :     /// The key range to be compacted. The compaction algorithm will only regenerate key-value pairs within this range
     354              :     /// [left inclusive, right exclusive), and other pairs will be rewritten into new files if necessary.
     355              :     pub compact_key_range: Range<Key>,
     356              :     /// The LSN range to be compacted. The compaction algorithm will use this range to determine the layers to be
     357              :     /// selected for the compaction, and it does not guarantee the generated layers will have exactly the same LSN range
     358              :     /// as specified here. The true range being compacted is `min_lsn/max_lsn` in [`GcCompactionJobDescription`].
     359              :     /// min_lsn will always <= the lower bound specified here, and max_lsn will always >= the upper bound specified here.
     360              :     pub compact_lsn_range: Range<Lsn>,
     361              : }
     362              : 
     363              : impl GcCompactJob {
     364          108 :     pub fn from_compact_options(options: CompactOptions) -> Self {
     365          108 :         GcCompactJob {
     366          108 :             dry_run: options.flags.contains(CompactFlags::DryRun),
     367          108 :             compact_key_range: options
     368          108 :                 .compact_key_range
     369          108 :                 .map(|x| x.into())
     370          108 :                 .unwrap_or(Key::MIN..Key::MAX),
     371          108 :             compact_lsn_range: options
     372          108 :                 .compact_lsn_range
     373          108 :                 .map(|x| x.into())
     374          108 :                 .unwrap_or(Lsn::INVALID..Lsn::MAX),
     375          108 :         }
     376          108 :     }
     377              : }
     378              : 
     379              : /// A job description for the gc-compaction job. This structure is generated when `compact_with_gc` is called
     380              : /// and contains the exact layers we want to compact.
     381              : pub struct GcCompactionJobDescription {
     382              :     /// All layers to read in the compaction job
     383              :     selected_layers: Vec<Layer>,
     384              :     /// GC cutoff of the job. This is the lowest LSN that will be accessed by the read/GC path and we need to
     385              :     /// keep all deltas <= this LSN or generate an image == this LSN.
     386              :     gc_cutoff: Lsn,
     387              :     /// LSNs to retain for the job. Read path will use this LSN so we need to keep deltas <= this LSN or
     388              :     /// generate an image == this LSN.
     389              :     retain_lsns_below_horizon: Vec<Lsn>,
     390              :     /// Maximum layer LSN processed in this compaction, that is max(end_lsn of layers). Exclusive. All data
     391              :     /// \>= this LSN will be kept and will not be rewritten.
     392              :     max_layer_lsn: Lsn,
     393              :     /// Minimum layer LSN processed in this compaction, that is min(start_lsn of layers). Inclusive.
     394              :     /// All access below (strict lower than `<`) this LSN will be routed through the normal read path instead of
     395              :     /// k-merge within gc-compaction.
     396              :     min_layer_lsn: Lsn,
     397              :     /// Only compact layers overlapping with this range.
     398              :     compaction_key_range: Range<Key>,
     399              :     /// When partial compaction is enabled, these layers need to be rewritten to ensure no overlap.
     400              :     /// This field is here solely for debugging. The field will not be read once the compaction
     401              :     /// description is generated.
     402              :     rewrite_layers: Vec<Arc<PersistentLayerDesc>>,
     403              : }
     404              : 
     405              : /// The result of bottom-most compaction for a single key at each LSN.
     406              : #[derive(Debug)]
     407              : #[cfg_attr(test, derive(PartialEq))]
     408              : pub struct KeyLogAtLsn(pub Vec<(Lsn, Value)>);
     409              : 
     410              : /// The result of bottom-most compaction.
     411              : #[derive(Debug)]
     412              : #[cfg_attr(test, derive(PartialEq))]
     413              : pub(crate) struct KeyHistoryRetention {
     414              :     /// Stores logs to reconstruct the value at the given LSN, that is to say, logs <= LSN or image == LSN.
     415              :     pub(crate) below_horizon: Vec<(Lsn, KeyLogAtLsn)>,
     416              :     /// Stores logs to reconstruct the value at any LSN above the horizon, that is to say, log > LSN.
     417              :     pub(crate) above_horizon: KeyLogAtLsn,
     418              : }
     419              : 
     420              : impl KeyHistoryRetention {
     421              :     /// Hack: skip delta layer if we need to produce a layer of a same key-lsn.
     422              :     ///
     423              :     /// This can happen if we have removed some deltas in "the middle" of some existing layer's key-lsn-range.
     424              :     /// For example, consider the case where a single delta with range [0x10,0x50) exists.
     425              :     /// And we have branches at LSN 0x10, 0x20, 0x30.
     426              :     /// Then we delete branch @ 0x20.
     427              :     /// Bottom-most compaction may now delete the delta [0x20,0x30).
     428              :     /// And that wouldnt' change the shape of the layer.
     429              :     ///
     430              :     /// Note that bottom-most-gc-compaction never _adds_ new data in that case, only removes.
     431              :     ///
     432              :     /// `discard_key` will only be called when the writer reaches its target (instead of for every key), so it's fine to grab a lock inside.
     433          148 :     async fn discard_key(key: &PersistentLayerKey, tline: &Arc<Timeline>, dry_run: bool) -> bool {
     434          148 :         if dry_run {
     435            0 :             return true;
     436          148 :         }
     437          148 :         if LayerMap::is_l0(&key.key_range, key.is_delta) {
     438              :             // gc-compaction should not produce L0 deltas, otherwise it will break the layer order.
     439              :             // We should ignore such layers.
     440            0 :             return true;
     441          148 :         }
     442              :         let layer_generation;
     443              :         {
     444          148 :             let guard = tline.layers.read().await;
     445          148 :             if !guard.contains_key(key) {
     446          104 :                 return false;
     447           44 :             }
     448           44 :             layer_generation = guard.get_from_key(key).metadata().generation;
     449           44 :         }
     450           44 :         if layer_generation == tline.generation {
     451           44 :             info!(
     452              :                 key=%key,
     453              :                 ?layer_generation,
     454            0 :                 "discard layer due to duplicated layer key in the same generation",
     455              :             );
     456           44 :             true
     457              :         } else {
     458            0 :             false
     459              :         }
     460          148 :     }
     461              : 
     462              :     /// Pipe a history of a single key to the writers.
     463              :     ///
     464              :     /// If `image_writer` is none, the images will be placed into the delta layers.
     465              :     /// The delta writer will contain all images and deltas (below and above the horizon) except the bottom-most images.
     466              :     #[allow(clippy::too_many_arguments)]
     467         1244 :     async fn pipe_to(
     468         1244 :         self,
     469         1244 :         key: Key,
     470         1244 :         delta_writer: &mut SplitDeltaLayerWriter,
     471         1244 :         mut image_writer: Option<&mut SplitImageLayerWriter>,
     472         1244 :         stat: &mut CompactionStatistics,
     473         1244 :         ctx: &RequestContext,
     474         1244 :     ) -> anyhow::Result<()> {
     475         1244 :         let mut first_batch = true;
     476         4024 :         for (cutoff_lsn, KeyLogAtLsn(logs)) in self.below_horizon {
     477         2780 :             if first_batch {
     478         1244 :                 if logs.len() == 1 && logs[0].1.is_image() {
     479         1168 :                     let Value::Image(img) = &logs[0].1 else {
     480            0 :                         unreachable!()
     481              :                     };
     482         1168 :                     stat.produce_image_key(img);
     483         1168 :                     if let Some(image_writer) = image_writer.as_mut() {
     484         1168 :                         image_writer.put_image(key, img.clone(), ctx).await?;
     485              :                     } else {
     486            0 :                         delta_writer
     487            0 :                             .put_value(key, cutoff_lsn, Value::Image(img.clone()), ctx)
     488            0 :                             .await?;
     489              :                     }
     490              :                 } else {
     491          132 :                     for (lsn, val) in logs {
     492           56 :                         stat.produce_key(&val);
     493           56 :                         delta_writer.put_value(key, lsn, val, ctx).await?;
     494              :                     }
     495              :                 }
     496         1244 :                 first_batch = false;
     497              :             } else {
     498         1768 :                 for (lsn, val) in logs {
     499          232 :                     stat.produce_key(&val);
     500          232 :                     delta_writer.put_value(key, lsn, val, ctx).await?;
     501              :                 }
     502              :             }
     503              :         }
     504         1244 :         let KeyLogAtLsn(above_horizon_logs) = self.above_horizon;
     505         1360 :         for (lsn, val) in above_horizon_logs {
     506          116 :             stat.produce_key(&val);
     507          116 :             delta_writer.put_value(key, lsn, val, ctx).await?;
     508              :         }
     509         1244 :         Ok(())
     510         1244 :     }
     511              : }
     512              : 
     513              : #[derive(Debug, Serialize, Default)]
     514              : struct CompactionStatisticsNumSize {
     515              :     num: u64,
     516              :     size: u64,
     517              : }
     518              : 
     519              : #[derive(Debug, Serialize, Default)]
     520              : pub struct CompactionStatistics {
     521              :     delta_layer_visited: CompactionStatisticsNumSize,
     522              :     image_layer_visited: CompactionStatisticsNumSize,
     523              :     delta_layer_produced: CompactionStatisticsNumSize,
     524              :     image_layer_produced: CompactionStatisticsNumSize,
     525              :     num_delta_layer_discarded: usize,
     526              :     num_image_layer_discarded: usize,
     527              :     num_unique_keys_visited: usize,
     528              :     wal_keys_visited: CompactionStatisticsNumSize,
     529              :     image_keys_visited: CompactionStatisticsNumSize,
     530              :     wal_produced: CompactionStatisticsNumSize,
     531              :     image_produced: CompactionStatisticsNumSize,
     532              : }
     533              : 
     534              : impl CompactionStatistics {
     535         2084 :     fn estimated_size_of_value(val: &Value) -> usize {
     536          864 :         match val {
     537         1220 :             Value::Image(img) => img.len(),
     538            0 :             Value::WalRecord(NeonWalRecord::Postgres { rec, .. }) => rec.len(),
     539          864 :             _ => std::mem::size_of::<NeonWalRecord>(),
     540              :         }
     541         2084 :     }
     542         3272 :     fn estimated_size_of_key() -> usize {
     543         3272 :         KEY_SIZE // TODO: distinguish image layer and delta layer (count LSN in delta layer)
     544         3272 :     }
     545          172 :     fn visit_delta_layer(&mut self, size: u64) {
     546          172 :         self.delta_layer_visited.num += 1;
     547          172 :         self.delta_layer_visited.size += size;
     548          172 :     }
     549          132 :     fn visit_image_layer(&mut self, size: u64) {
     550          132 :         self.image_layer_visited.num += 1;
     551          132 :         self.image_layer_visited.size += size;
     552          132 :     }
     553         1244 :     fn on_unique_key_visited(&mut self) {
     554         1244 :         self.num_unique_keys_visited += 1;
     555         1244 :     }
     556          480 :     fn visit_wal_key(&mut self, val: &Value) {
     557          480 :         self.wal_keys_visited.num += 1;
     558          480 :         self.wal_keys_visited.size +=
     559          480 :             Self::estimated_size_of_value(val) as u64 + Self::estimated_size_of_key() as u64;
     560          480 :     }
     561         1220 :     fn visit_image_key(&mut self, val: &Value) {
     562         1220 :         self.image_keys_visited.num += 1;
     563         1220 :         self.image_keys_visited.size +=
     564         1220 :             Self::estimated_size_of_value(val) as u64 + Self::estimated_size_of_key() as u64;
     565         1220 :     }
     566          404 :     fn produce_key(&mut self, val: &Value) {
     567          404 :         match val {
     568           20 :             Value::Image(img) => self.produce_image_key(img),
     569          384 :             Value::WalRecord(_) => self.produce_wal_key(val),
     570              :         }
     571          404 :     }
     572          384 :     fn produce_wal_key(&mut self, val: &Value) {
     573          384 :         self.wal_produced.num += 1;
     574          384 :         self.wal_produced.size +=
     575          384 :             Self::estimated_size_of_value(val) as u64 + Self::estimated_size_of_key() as u64;
     576          384 :     }
     577         1188 :     fn produce_image_key(&mut self, val: &Bytes) {
     578         1188 :         self.image_produced.num += 1;
     579         1188 :         self.image_produced.size += val.len() as u64 + Self::estimated_size_of_key() as u64;
     580         1188 :     }
     581           28 :     fn discard_delta_layer(&mut self) {
     582           28 :         self.num_delta_layer_discarded += 1;
     583           28 :     }
     584           16 :     fn discard_image_layer(&mut self) {
     585           16 :         self.num_image_layer_discarded += 1;
     586           16 :     }
     587           44 :     fn produce_delta_layer(&mut self, size: u64) {
     588           44 :         self.delta_layer_produced.num += 1;
     589           44 :         self.delta_layer_produced.size += size;
     590           44 :     }
     591           60 :     fn produce_image_layer(&mut self, size: u64) {
     592           60 :         self.image_layer_produced.num += 1;
     593           60 :         self.image_layer_produced.size += size;
     594           60 :     }
     595              : }
     596              : 
     597              : #[derive(Default, Debug, Clone, Copy, PartialEq, Eq)]
     598              : pub enum CompactionOutcome {
     599              :     #[default]
     600              :     /// No layers need to be compacted after this round. Compaction doesn't need
     601              :     /// to be immediately scheduled.
     602              :     Done,
     603              :     /// Still has pending layers to be compacted after this round. Ideally, the scheduler
     604              :     /// should immediately schedule another compaction.
     605              :     Pending,
     606              :     /// A timeline needs L0 compaction. Yield and schedule an immediate L0 compaction pass (only
     607              :     /// guaranteed when `compaction_l0_first` is enabled).
     608              :     YieldForL0,
     609              :     /// Compaction was skipped, because the timeline is ineligible for compaction.
     610              :     Skipped,
     611              : }
     612              : 
     613              : impl Timeline {
     614              :     /// TODO: cancellation
     615              :     ///
     616              :     /// Returns whether the compaction has pending tasks.
     617          728 :     pub(crate) async fn compact_legacy(
     618          728 :         self: &Arc<Self>,
     619          728 :         cancel: &CancellationToken,
     620          728 :         options: CompactOptions,
     621          728 :         ctx: &RequestContext,
     622          728 :     ) -> Result<CompactionOutcome, CompactionError> {
     623          728 :         if options
     624          728 :             .flags
     625          728 :             .contains(CompactFlags::EnhancedGcBottomMostCompaction)
     626              :         {
     627            0 :             self.compact_with_gc(cancel, options, ctx)
     628            0 :                 .await
     629            0 :                 .map_err(CompactionError::Other)?;
     630            0 :             return Ok(CompactionOutcome::Done);
     631          728 :         }
     632          728 : 
     633          728 :         if options.flags.contains(CompactFlags::DryRun) {
     634            0 :             return Err(CompactionError::Other(anyhow!(
     635            0 :                 "dry-run mode is not supported for legacy compaction for now"
     636            0 :             )));
     637          728 :         }
     638          728 : 
     639          728 :         if options.compact_key_range.is_some() || options.compact_lsn_range.is_some() {
     640              :             // maybe useful in the future? could implement this at some point
     641            0 :             return Err(CompactionError::Other(anyhow!(
     642            0 :                 "compaction range is not supported for legacy compaction for now"
     643            0 :             )));
     644          728 :         }
     645          728 : 
     646          728 :         // High level strategy for compaction / image creation:
     647          728 :         //
     648          728 :         // 1. First, do a L0 compaction to ensure we move the L0
     649          728 :         // layers into the historic layer map get flat levels of
     650          728 :         // layers. If we did not compact all L0 layers, we will
     651          728 :         // prioritize compacting the timeline again and not do
     652          728 :         // any of the compactions below.
     653          728 :         //
     654          728 :         // 2. Then, calculate the desired "partitioning" of the
     655          728 :         // currently in-use key space. The goal is to partition the
     656          728 :         // key space into roughly fixed-size chunks, but also take into
     657          728 :         // account any existing image layers, and try to align the
     658          728 :         // chunk boundaries with the existing image layers to avoid
     659          728 :         // too much churn. Also try to align chunk boundaries with
     660          728 :         // relation boundaries.  In principle, we don't know about
     661          728 :         // relation boundaries here, we just deal with key-value
     662          728 :         // pairs, and the code in pgdatadir_mapping.rs knows how to
     663          728 :         // map relations into key-value pairs. But in practice we know
     664          728 :         // that 'field6' is the block number, and the fields 1-5
     665          728 :         // identify a relation. This is just an optimization,
     666          728 :         // though.
     667          728 :         //
     668          728 :         // 3. Once we know the partitioning, for each partition,
     669          728 :         // decide if it's time to create a new image layer. The
     670          728 :         // criteria is: there has been too much "churn" since the last
     671          728 :         // image layer? The "churn" is fuzzy concept, it's a
     672          728 :         // combination of too many delta files, or too much WAL in
     673          728 :         // total in the delta file. Or perhaps: if creating an image
     674          728 :         // file would allow to delete some older files.
     675          728 :         //
     676          728 :         // 4. In the end, if the tenant gets auto-sharded, we will run
     677          728 :         // a shard-ancestor compaction.
     678          728 : 
     679          728 :         // Is the timeline being deleted?
     680          728 :         if self.is_stopping() {
     681            0 :             trace!("Dropping out of compaction on timeline shutdown");
     682            0 :             return Err(CompactionError::ShuttingDown);
     683          728 :         }
     684          728 : 
     685          728 :         let target_file_size = self.get_checkpoint_distance();
     686              : 
     687              :         // Define partitioning schema if needed
     688              : 
     689              :         // 1. L0 Compact
     690          728 :         let l0_outcome = {
     691          728 :             let timer = self.metrics.compact_time_histo.start_timer();
     692          728 :             let l0_outcome = self
     693          728 :                 .compact_level0(
     694          728 :                     target_file_size,
     695          728 :                     options.flags.contains(CompactFlags::ForceL0Compaction),
     696          728 :                     ctx,
     697          728 :                 )
     698          728 :                 .await?;
     699          728 :             timer.stop_and_record();
     700          728 :             l0_outcome
     701          728 :         };
     702          728 : 
     703          728 :         if options.flags.contains(CompactFlags::OnlyL0Compaction) {
     704            0 :             return Ok(l0_outcome);
     705          728 :         }
     706          728 : 
     707          728 :         // Yield if we have pending L0 compaction. The scheduler will do another pass.
     708          728 :         if (l0_outcome == CompactionOutcome::Pending || l0_outcome == CompactionOutcome::YieldForL0)
     709            0 :             && !options.flags.contains(CompactFlags::NoYield)
     710              :         {
     711            0 :             info!("image/ancestor compaction yielding for L0 compaction");
     712            0 :             return Ok(CompactionOutcome::YieldForL0);
     713          728 :         }
     714          728 : 
     715          728 :         // 2. Repartition and create image layers if necessary
     716          728 :         match self
     717          728 :             .repartition(
     718          728 :                 self.get_last_record_lsn(),
     719          728 :                 self.get_compaction_target_size(),
     720          728 :                 options.flags,
     721          728 :                 ctx,
     722          728 :             )
     723          728 :             .await
     724              :         {
     725          728 :             Ok(((dense_partitioning, sparse_partitioning), lsn)) => {
     726          728 :                 // Disables access_stats updates, so that the files we read remain candidates for eviction after we're done with them
     727          728 :                 let image_ctx = RequestContextBuilder::extend(ctx)
     728          728 :                     .access_stats_behavior(AccessStatsBehavior::Skip)
     729          728 :                     .build();
     730          728 : 
     731          728 :                 let mut partitioning = dense_partitioning;
     732          728 :                 partitioning
     733          728 :                     .parts
     734          728 :                     .extend(sparse_partitioning.into_dense().parts);
     735              : 
     736              :                 // 3. Create new image layers for partitions that have been modified "enough".
     737          728 :                 let (image_layers, outcome) = self
     738          728 :                     .create_image_layers(
     739          728 :                         &partitioning,
     740          728 :                         lsn,
     741          728 :                         if options
     742          728 :                             .flags
     743          728 :                             .contains(CompactFlags::ForceImageLayerCreation)
     744              :                         {
     745           28 :                             ImageLayerCreationMode::Force
     746              :                         } else {
     747          700 :                             ImageLayerCreationMode::Try
     748              :                         },
     749          728 :                         &image_ctx,
     750          728 :                         self.last_image_layer_creation_status
     751          728 :                             .load()
     752          728 :                             .as_ref()
     753          728 :                             .clone(),
     754          728 :                         !options.flags.contains(CompactFlags::NoYield),
     755          728 :                     )
     756          728 :                     .await
     757          728 :                     .inspect_err(|err| {
     758              :                         if let CreateImageLayersError::GetVectoredError(
     759              :                             GetVectoredError::MissingKey(_),
     760            0 :                         ) = err
     761              :                         {
     762            0 :                             critical!("missing key during compaction: {err:?}");
     763            0 :                         }
     764          728 :                     })?;
     765              : 
     766          728 :                 self.last_image_layer_creation_status
     767          728 :                     .store(Arc::new(outcome.clone()));
     768          728 : 
     769          728 :                 self.upload_new_image_layers(image_layers)?;
     770          728 :                 if let LastImageLayerCreationStatus::Incomplete { .. } = outcome {
     771              :                     // Yield and do not do any other kind of compaction.
     772            0 :                     info!("skipping shard ancestor compaction due to pending image layer generation tasks (preempted by L0 compaction).");
     773            0 :                     return Ok(CompactionOutcome::YieldForL0);
     774          728 :                 }
     775              :             }
     776            0 :             Err(err) => {
     777            0 :                 // no partitioning? This is normal, if the timeline was just created
     778            0 :                 // as an empty timeline. Also in unit tests, when we use the timeline
     779            0 :                 // as a simple key-value store, ignoring the datadir layout. Log the
     780            0 :                 // error but continue.
     781            0 :                 //
     782            0 :                 // Suppress error when it's due to cancellation
     783            0 :                 if !self.cancel.is_cancelled() && !err.is_cancelled() {
     784            0 :                     tracing::error!("could not compact, repartitioning keyspace failed: {err:?}");
     785            0 :                 }
     786              :             }
     787              :         };
     788              : 
     789          728 :         let partition_count = self.partitioning.read().0 .0.parts.len();
     790          728 : 
     791          728 :         // 4. Shard ancestor compaction
     792          728 : 
     793          728 :         if self.shard_identity.count >= ShardCount::new(2) {
     794              :             // Limit the number of layer rewrites to the number of partitions: this means its
     795              :             // runtime should be comparable to a full round of image layer creations, rather than
     796              :             // being potentially much longer.
     797            0 :             let rewrite_max = partition_count;
     798            0 : 
     799            0 :             self.compact_shard_ancestors(rewrite_max, ctx).await?;
     800          728 :         }
     801              : 
     802          728 :         Ok(CompactionOutcome::Done)
     803          728 :     }
     804              : 
     805              :     /// Check for layers that are elegible to be rewritten:
     806              :     /// - Shard splitting: After a shard split, ancestor layers beyond pitr_interval, so that
     807              :     ///   we don't indefinitely retain keys in this shard that aren't needed.
     808              :     /// - For future use: layers beyond pitr_interval that are in formats we would
     809              :     ///   rather not maintain compatibility with indefinitely.
     810              :     ///
     811              :     /// Note: this phase may read and write many gigabytes of data: use rewrite_max to bound
     812              :     /// how much work it will try to do in each compaction pass.
     813            0 :     async fn compact_shard_ancestors(
     814            0 :         self: &Arc<Self>,
     815            0 :         rewrite_max: usize,
     816            0 :         ctx: &RequestContext,
     817            0 :     ) -> Result<(), CompactionError> {
     818            0 :         let mut drop_layers = Vec::new();
     819            0 :         let mut layers_to_rewrite: Vec<Layer> = Vec::new();
     820            0 : 
     821            0 :         // We will use the Lsn cutoff of the last GC as a threshold for rewriting layers: if a
     822            0 :         // layer is behind this Lsn, it indicates that the layer is being retained beyond the
     823            0 :         // pitr_interval, for example because a branchpoint references it.
     824            0 :         //
     825            0 :         // Holding this read guard also blocks [`Self::gc_timeline`] from entering while we
     826            0 :         // are rewriting layers.
     827            0 :         let latest_gc_cutoff = self.get_applied_gc_cutoff_lsn();
     828            0 : 
     829            0 :         tracing::info!(
     830            0 :             "latest_gc_cutoff: {}, pitr cutoff {}",
     831            0 :             *latest_gc_cutoff,
     832            0 :             self.gc_info.read().unwrap().cutoffs.time
     833              :         );
     834              : 
     835            0 :         let layers = self.layers.read().await;
     836            0 :         for layer_desc in layers.layer_map()?.iter_historic_layers() {
     837            0 :             let layer = layers.get_from_desc(&layer_desc);
     838            0 :             if layer.metadata().shard.shard_count == self.shard_identity.count {
     839              :                 // This layer does not belong to a historic ancestor, no need to re-image it.
     840            0 :                 continue;
     841            0 :             }
     842            0 : 
     843            0 :             // This layer was created on an ancestor shard: check if it contains any data for this shard.
     844            0 :             let sharded_range = ShardedRange::new(layer_desc.get_key_range(), &self.shard_identity);
     845            0 :             let layer_local_page_count = sharded_range.page_count();
     846            0 :             let layer_raw_page_count = ShardedRange::raw_size(&layer_desc.get_key_range());
     847            0 :             if layer_local_page_count == 0 {
     848              :                 // This ancestral layer only covers keys that belong to other shards.
     849              :                 // We include the full metadata in the log: if we had some critical bug that caused
     850              :                 // us to incorrectly drop layers, this would simplify manually debugging + reinstating those layers.
     851            0 :                 info!(%layer, old_metadata=?layer.metadata(),
     852            0 :                     "dropping layer after shard split, contains no keys for this shard.",
     853              :                 );
     854              : 
     855            0 :                 if cfg!(debug_assertions) {
     856              :                     // Expensive, exhaustive check of keys in this layer: this guards against ShardedRange's calculations being
     857              :                     // wrong.  If ShardedRange claims the local page count is zero, then no keys in this layer
     858              :                     // should be !is_key_disposable()
     859            0 :                     let range = layer_desc.get_key_range();
     860            0 :                     let mut key = range.start;
     861            0 :                     while key < range.end {
     862            0 :                         debug_assert!(self.shard_identity.is_key_disposable(&key));
     863            0 :                         key = key.next();
     864              :                     }
     865            0 :                 }
     866              : 
     867            0 :                 drop_layers.push(layer);
     868            0 :                 continue;
     869            0 :             } else if layer_local_page_count != u32::MAX
     870            0 :                 && layer_local_page_count == layer_raw_page_count
     871              :             {
     872            0 :                 debug!(%layer,
     873            0 :                     "layer is entirely shard local ({} keys), no need to filter it",
     874              :                     layer_local_page_count
     875              :                 );
     876            0 :                 continue;
     877            0 :             }
     878            0 : 
     879            0 :             // Don't bother re-writing a layer unless it will at least halve its size
     880            0 :             if layer_local_page_count != u32::MAX
     881            0 :                 && layer_local_page_count > layer_raw_page_count / 2
     882              :             {
     883            0 :                 debug!(%layer,
     884            0 :                     "layer is already mostly local ({}/{}), not rewriting",
     885              :                     layer_local_page_count,
     886              :                     layer_raw_page_count
     887              :                 );
     888            0 :             }
     889              : 
     890              :             // Don't bother re-writing a layer if it is within the PITR window: it will age-out eventually
     891              :             // without incurring the I/O cost of a rewrite.
     892            0 :             if layer_desc.get_lsn_range().end >= *latest_gc_cutoff {
     893            0 :                 debug!(%layer, "Skipping rewrite of layer still in GC window ({} >= {})",
     894            0 :                     layer_desc.get_lsn_range().end, *latest_gc_cutoff);
     895            0 :                 continue;
     896            0 :             }
     897            0 : 
     898            0 :             if layer_desc.is_delta() {
     899              :                 // We do not yet implement rewrite of delta layers
     900            0 :                 debug!(%layer, "Skipping rewrite of delta layer");
     901            0 :                 continue;
     902            0 :             }
     903            0 : 
     904            0 :             // Only rewrite layers if their generations differ.  This guarantees:
     905            0 :             //  - that local rewrite is safe, as local layer paths will differ between existing layer and rewritten one
     906            0 :             //  - that the layer is persistent in remote storage, as we only see old-generation'd layer via loading from remote storage
     907            0 :             if layer.metadata().generation == self.generation {
     908            0 :                 debug!(%layer, "Skipping rewrite, is not from old generation");
     909            0 :                 continue;
     910            0 :             }
     911            0 : 
     912            0 :             if layers_to_rewrite.len() >= rewrite_max {
     913            0 :                 tracing::info!(%layer, "Will rewrite layer on a future compaction, already rewrote {}",
     914            0 :                     layers_to_rewrite.len()
     915              :                 );
     916            0 :                 continue;
     917            0 :             }
     918            0 : 
     919            0 :             // Fall through: all our conditions for doing a rewrite passed.
     920            0 :             layers_to_rewrite.push(layer);
     921              :         }
     922              : 
     923              :         // Drop read lock on layer map before we start doing time-consuming I/O
     924            0 :         drop(layers);
     925            0 : 
     926            0 :         let mut replace_image_layers = Vec::new();
     927              : 
     928            0 :         for layer in layers_to_rewrite {
     929            0 :             tracing::info!(layer=%layer, "Rewriting layer after shard split...");
     930            0 :             let mut image_layer_writer = ImageLayerWriter::new(
     931            0 :                 self.conf,
     932            0 :                 self.timeline_id,
     933            0 :                 self.tenant_shard_id,
     934            0 :                 &layer.layer_desc().key_range,
     935            0 :                 layer.layer_desc().image_layer_lsn(),
     936            0 :                 ctx,
     937            0 :             )
     938            0 :             .await
     939            0 :             .map_err(CompactionError::Other)?;
     940              : 
     941              :             // Safety of layer rewrites:
     942              :             // - We are writing to a different local file path than we are reading from, so the old Layer
     943              :             //   cannot interfere with the new one.
     944              :             // - In the page cache, contents for a particular VirtualFile are stored with a file_id that
     945              :             //   is different for two layers with the same name (in `ImageLayerInner::new` we always
     946              :             //   acquire a fresh id from [`crate::page_cache::next_file_id`].  So readers do not risk
     947              :             //   reading the index from one layer file, and then data blocks from the rewritten layer file.
     948              :             // - Any readers that have a reference to the old layer will keep it alive until they are done
     949              :             //   with it. If they are trying to promote from remote storage, that will fail, but this is the same
     950              :             //   as for compaction generally: compaction is allowed to delete layers that readers might be trying to use.
     951              :             // - We do not run concurrently with other kinds of compaction, so the only layer map writes we race with are:
     952              :             //    - GC, which at worst witnesses us "undelete" a layer that they just deleted.
     953              :             //    - ingestion, which only inserts layers, therefore cannot collide with us.
     954            0 :             let resident = layer.download_and_keep_resident().await?;
     955              : 
     956            0 :             let keys_written = resident
     957            0 :                 .filter(&self.shard_identity, &mut image_layer_writer, ctx)
     958            0 :                 .await?;
     959              : 
     960            0 :             if keys_written > 0 {
     961            0 :                 let (desc, path) = image_layer_writer
     962            0 :                     .finish(ctx)
     963            0 :                     .await
     964            0 :                     .map_err(CompactionError::Other)?;
     965            0 :                 let new_layer = Layer::finish_creating(self.conf, self, desc, &path)
     966            0 :                     .map_err(CompactionError::Other)?;
     967            0 :                 tracing::info!(layer=%new_layer, "Rewrote layer, {} -> {} bytes",
     968            0 :                     layer.metadata().file_size,
     969            0 :                     new_layer.metadata().file_size);
     970              : 
     971            0 :                 replace_image_layers.push((layer, new_layer));
     972            0 :             } else {
     973            0 :                 // Drop the old layer.  Usually for this case we would already have noticed that
     974            0 :                 // the layer has no data for us with the ShardedRange check above, but
     975            0 :                 drop_layers.push(layer);
     976            0 :             }
     977              :         }
     978              : 
     979              :         // At this point, we have replaced local layer files with their rewritten form, but not yet uploaded
     980              :         // metadata to reflect that. If we restart here, the replaced layer files will look invalid (size mismatch
     981              :         // to remote index) and be removed. This is inefficient but safe.
     982            0 :         fail::fail_point!("compact-shard-ancestors-localonly");
     983            0 : 
     984            0 :         // Update the LayerMap so that readers will use the new layers, and enqueue it for writing to remote storage
     985            0 :         self.rewrite_layers(replace_image_layers, drop_layers)
     986            0 :             .await?;
     987              : 
     988            0 :         fail::fail_point!("compact-shard-ancestors-enqueued");
     989            0 : 
     990            0 :         // We wait for all uploads to complete before finishing this compaction stage.  This is not
     991            0 :         // necessary for correctness, but it simplifies testing, and avoids proceeding with another
     992            0 :         // Timeline's compaction while this timeline's uploads may be generating lots of disk I/O
     993            0 :         // load.
     994            0 :         match self.remote_client.wait_completion().await {
     995            0 :             Ok(()) => (),
     996            0 :             Err(WaitCompletionError::NotInitialized(ni)) => return Err(CompactionError::from(ni)),
     997              :             Err(WaitCompletionError::UploadQueueShutDownOrStopped) => {
     998            0 :                 return Err(CompactionError::ShuttingDown)
     999              :             }
    1000              :         }
    1001              : 
    1002            0 :         fail::fail_point!("compact-shard-ancestors-persistent");
    1003            0 : 
    1004            0 :         Ok(())
    1005            0 :     }
    1006              : 
    1007              :     /// Update the LayerVisibilityHint of layers covered by image layers, based on whether there is
    1008              :     /// an image layer between them and the most recent readable LSN (branch point or tip of timeline).  The
    1009              :     /// purpose of the visibility hint is to record which layers need to be available to service reads.
    1010              :     ///
    1011              :     /// The result may be used as an input to eviction and secondary downloads to de-prioritize layers
    1012              :     /// that we know won't be needed for reads.
    1013          460 :     pub(super) async fn update_layer_visibility(
    1014          460 :         &self,
    1015          460 :     ) -> Result<(), super::layer_manager::Shutdown> {
    1016          460 :         let head_lsn = self.get_last_record_lsn();
    1017              : 
    1018              :         // We will sweep through layers in reverse-LSN order.  We only do historic layers.  L0 deltas
    1019              :         // are implicitly left visible, because LayerVisibilityHint's default is Visible, and we never modify it here.
    1020              :         // Note that L0 deltas _can_ be covered by image layers, but we consider them 'visible' because we anticipate that
    1021              :         // they will be subject to L0->L1 compaction in the near future.
    1022          460 :         let layer_manager = self.layers.read().await;
    1023          460 :         let layer_map = layer_manager.layer_map()?;
    1024              : 
    1025          460 :         let readable_points = {
    1026          460 :             let children = self.gc_info.read().unwrap().retain_lsns.clone();
    1027          460 : 
    1028          460 :             let mut readable_points = Vec::with_capacity(children.len() + 1);
    1029          460 :             for (child_lsn, _child_timeline_id, is_offloaded) in &children {
    1030            0 :                 if *is_offloaded == MaybeOffloaded::Yes {
    1031            0 :                     continue;
    1032            0 :                 }
    1033            0 :                 readable_points.push(*child_lsn);
    1034              :             }
    1035          460 :             readable_points.push(head_lsn);
    1036          460 :             readable_points
    1037          460 :         };
    1038          460 : 
    1039          460 :         let (layer_visibility, covered) = layer_map.get_visibility(readable_points);
    1040         1168 :         for (layer_desc, visibility) in layer_visibility {
    1041          708 :             // FIXME: a more efficiency bulk zip() through the layers rather than NlogN getting each one
    1042          708 :             let layer = layer_manager.get_from_desc(&layer_desc);
    1043          708 :             layer.set_visibility(visibility);
    1044          708 :         }
    1045              : 
    1046              :         // TODO: publish our covered KeySpace to our parent, so that when they update their visibility, they can
    1047              :         // avoid assuming that everything at a branch point is visible.
    1048          460 :         drop(covered);
    1049          460 :         Ok(())
    1050          460 :     }
    1051              : 
    1052              :     /// Collect a bunch of Level 0 layer files, and compact and reshuffle them as
    1053              :     /// as Level 1 files. Returns whether the L0 layers are fully compacted.
    1054          728 :     async fn compact_level0(
    1055          728 :         self: &Arc<Self>,
    1056          728 :         target_file_size: u64,
    1057          728 :         force_compaction_ignore_threshold: bool,
    1058          728 :         ctx: &RequestContext,
    1059          728 :     ) -> Result<CompactionOutcome, CompactionError> {
    1060              :         let CompactLevel0Phase1Result {
    1061          728 :             new_layers,
    1062          728 :             deltas_to_compact,
    1063          728 :             outcome,
    1064              :         } = {
    1065          728 :             let phase1_span = info_span!("compact_level0_phase1");
    1066          728 :             let ctx = ctx.attached_child();
    1067          728 :             let mut stats = CompactLevel0Phase1StatsBuilder {
    1068          728 :                 version: Some(2),
    1069          728 :                 tenant_id: Some(self.tenant_shard_id),
    1070          728 :                 timeline_id: Some(self.timeline_id),
    1071          728 :                 ..Default::default()
    1072          728 :             };
    1073          728 : 
    1074          728 :             let begin = tokio::time::Instant::now();
    1075          728 :             let phase1_layers_locked = self.layers.read().await;
    1076          728 :             let now = tokio::time::Instant::now();
    1077          728 :             stats.read_lock_acquisition_micros =
    1078          728 :                 DurationRecorder::Recorded(RecordedDuration(now - begin), now);
    1079          728 :             self.compact_level0_phase1(
    1080          728 :                 phase1_layers_locked,
    1081          728 :                 stats,
    1082          728 :                 target_file_size,
    1083          728 :                 force_compaction_ignore_threshold,
    1084          728 :                 &ctx,
    1085          728 :             )
    1086          728 :             .instrument(phase1_span)
    1087          728 :             .await?
    1088              :         };
    1089              : 
    1090          728 :         if new_layers.is_empty() && deltas_to_compact.is_empty() {
    1091              :             // nothing to do
    1092          672 :             return Ok(CompactionOutcome::Done);
    1093           56 :         }
    1094           56 : 
    1095           56 :         self.finish_compact_batch(&new_layers, &Vec::new(), &deltas_to_compact)
    1096           56 :             .await?;
    1097           56 :         Ok(outcome)
    1098          728 :     }
    1099              : 
    1100              :     /// Level0 files first phase of compaction, explained in the [`Self::compact_legacy`] comment.
    1101          728 :     async fn compact_level0_phase1<'a>(
    1102          728 :         self: &'a Arc<Self>,
    1103          728 :         guard: tokio::sync::RwLockReadGuard<'a, LayerManager>,
    1104          728 :         mut stats: CompactLevel0Phase1StatsBuilder,
    1105          728 :         target_file_size: u64,
    1106          728 :         force_compaction_ignore_threshold: bool,
    1107          728 :         ctx: &RequestContext,
    1108          728 :     ) -> Result<CompactLevel0Phase1Result, CompactionError> {
    1109          728 :         stats.read_lock_held_spawn_blocking_startup_micros =
    1110          728 :             stats.read_lock_acquisition_micros.till_now(); // set by caller
    1111          728 :         let layers = guard.layer_map()?;
    1112          728 :         let level0_deltas = layers.level0_deltas();
    1113          728 :         stats.level0_deltas_count = Some(level0_deltas.len());
    1114          728 : 
    1115          728 :         // Only compact if enough layers have accumulated.
    1116          728 :         let threshold = self.get_compaction_threshold();
    1117          728 :         if level0_deltas.is_empty() || level0_deltas.len() < threshold {
    1118          672 :             if force_compaction_ignore_threshold {
    1119            0 :                 if !level0_deltas.is_empty() {
    1120            0 :                     info!(
    1121            0 :                         level0_deltas = level0_deltas.len(),
    1122            0 :                         threshold, "too few deltas to compact, but forcing compaction"
    1123              :                     );
    1124              :                 } else {
    1125            0 :                     info!(
    1126            0 :                         level0_deltas = level0_deltas.len(),
    1127            0 :                         threshold, "too few deltas to compact, cannot force compaction"
    1128              :                     );
    1129            0 :                     return Ok(CompactLevel0Phase1Result::default());
    1130              :                 }
    1131              :             } else {
    1132          672 :                 debug!(
    1133            0 :                     level0_deltas = level0_deltas.len(),
    1134            0 :                     threshold, "too few deltas to compact"
    1135              :                 );
    1136          672 :                 return Ok(CompactLevel0Phase1Result::default());
    1137              :             }
    1138           56 :         }
    1139              : 
    1140           56 :         let mut level0_deltas = level0_deltas
    1141           56 :             .iter()
    1142          804 :             .map(|x| guard.get_from_desc(x))
    1143           56 :             .collect::<Vec<_>>();
    1144           56 : 
    1145           56 :         // Gather the files to compact in this iteration.
    1146           56 :         //
    1147           56 :         // Start with the oldest Level 0 delta file, and collect any other
    1148           56 :         // level 0 files that form a contiguous sequence, such that the end
    1149           56 :         // LSN of previous file matches the start LSN of the next file.
    1150           56 :         //
    1151           56 :         // Note that if the files don't form such a sequence, we might
    1152           56 :         // "compact" just a single file. That's a bit pointless, but it allows
    1153           56 :         // us to get rid of the level 0 file, and compact the other files on
    1154           56 :         // the next iteration. This could probably made smarter, but such
    1155           56 :         // "gaps" in the sequence of level 0 files should only happen in case
    1156           56 :         // of a crash, partial download from cloud storage, or something like
    1157           56 :         // that, so it's not a big deal in practice.
    1158         1496 :         level0_deltas.sort_by_key(|l| l.layer_desc().lsn_range.start);
    1159           56 :         let mut level0_deltas_iter = level0_deltas.iter();
    1160           56 : 
    1161           56 :         let first_level0_delta = level0_deltas_iter.next().unwrap();
    1162           56 :         let mut prev_lsn_end = first_level0_delta.layer_desc().lsn_range.end;
    1163           56 :         let mut deltas_to_compact = Vec::with_capacity(level0_deltas.len());
    1164           56 : 
    1165           56 :         // Accumulate the size of layers in `deltas_to_compact`
    1166           56 :         let mut deltas_to_compact_bytes = 0;
    1167           56 : 
    1168           56 :         // Under normal circumstances, we will accumulate up to compaction_upper_limit L0s of size
    1169           56 :         // checkpoint_distance each.  To avoid edge cases using extra system resources, bound our
    1170           56 :         // work in this function to only operate on this much delta data at once.
    1171           56 :         //
    1172           56 :         // In general, compaction_threshold should be <= compaction_upper_limit, but in case that
    1173           56 :         // the constraint is not respected, we use the larger of the two.
    1174           56 :         let delta_size_limit = std::cmp::max(
    1175           56 :             self.get_compaction_upper_limit(),
    1176           56 :             self.get_compaction_threshold(),
    1177           56 :         ) as u64
    1178           56 :             * std::cmp::max(self.get_checkpoint_distance(), DEFAULT_CHECKPOINT_DISTANCE);
    1179           56 : 
    1180           56 :         let mut fully_compacted = true;
    1181           56 : 
    1182           56 :         deltas_to_compact.push(first_level0_delta.download_and_keep_resident().await?);
    1183          804 :         for l in level0_deltas_iter {
    1184          748 :             let lsn_range = &l.layer_desc().lsn_range;
    1185          748 : 
    1186          748 :             if lsn_range.start != prev_lsn_end {
    1187            0 :                 break;
    1188          748 :             }
    1189          748 :             deltas_to_compact.push(l.download_and_keep_resident().await?);
    1190          748 :             deltas_to_compact_bytes += l.metadata().file_size;
    1191          748 :             prev_lsn_end = lsn_range.end;
    1192          748 : 
    1193          748 :             if deltas_to_compact_bytes >= delta_size_limit {
    1194            0 :                 info!(
    1195            0 :                     l0_deltas_selected = deltas_to_compact.len(),
    1196            0 :                     l0_deltas_total = level0_deltas.len(),
    1197            0 :                     "L0 compaction picker hit max delta layer size limit: {}",
    1198              :                     delta_size_limit
    1199              :                 );
    1200            0 :                 fully_compacted = false;
    1201            0 : 
    1202            0 :                 // Proceed with compaction, but only a subset of L0s
    1203            0 :                 break;
    1204          748 :             }
    1205              :         }
    1206           56 :         let lsn_range = Range {
    1207           56 :             start: deltas_to_compact
    1208           56 :                 .first()
    1209           56 :                 .unwrap()
    1210           56 :                 .layer_desc()
    1211           56 :                 .lsn_range
    1212           56 :                 .start,
    1213           56 :             end: deltas_to_compact.last().unwrap().layer_desc().lsn_range.end,
    1214           56 :         };
    1215           56 : 
    1216           56 :         info!(
    1217            0 :             "Starting Level0 compaction in LSN range {}-{} for {} layers ({} deltas in total)",
    1218            0 :             lsn_range.start,
    1219            0 :             lsn_range.end,
    1220            0 :             deltas_to_compact.len(),
    1221            0 :             level0_deltas.len()
    1222              :         );
    1223              : 
    1224          804 :         for l in deltas_to_compact.iter() {
    1225          804 :             info!("compact includes {l}");
    1226              :         }
    1227              : 
    1228              :         // We don't need the original list of layers anymore. Drop it so that
    1229              :         // we don't accidentally use it later in the function.
    1230           56 :         drop(level0_deltas);
    1231           56 : 
    1232           56 :         stats.read_lock_held_prerequisites_micros = stats
    1233           56 :             .read_lock_held_spawn_blocking_startup_micros
    1234           56 :             .till_now();
    1235              : 
    1236              :         // TODO: replace with streaming k-merge
    1237           56 :         let all_keys = {
    1238           56 :             let mut all_keys = Vec::new();
    1239          804 :             for l in deltas_to_compact.iter() {
    1240          804 :                 if self.cancel.is_cancelled() {
    1241            0 :                     return Err(CompactionError::ShuttingDown);
    1242          804 :                 }
    1243          804 :                 let delta = l.get_as_delta(ctx).await.map_err(CompactionError::Other)?;
    1244          804 :                 let keys = delta
    1245          804 :                     .index_entries(ctx)
    1246          804 :                     .await
    1247          804 :                     .map_err(CompactionError::Other)?;
    1248          804 :                 all_keys.extend(keys);
    1249              :             }
    1250              :             // The current stdlib sorting implementation is designed in a way where it is
    1251              :             // particularly fast where the slice is made up of sorted sub-ranges.
    1252      8847562 :             all_keys.sort_by_key(|DeltaEntry { key, lsn, .. }| (*key, *lsn));
    1253           56 :             all_keys
    1254           56 :         };
    1255           56 : 
    1256           56 :         stats.read_lock_held_key_sort_micros = stats.read_lock_held_prerequisites_micros.till_now();
    1257              : 
    1258              :         // Determine N largest holes where N is number of compacted layers. The vec is sorted by key range start.
    1259              :         //
    1260              :         // A hole is a key range for which this compaction doesn't have any WAL records.
    1261              :         // Our goal in this compaction iteration is to avoid creating L1s that, in terms of their key range,
    1262              :         // cover the hole, but actually don't contain any WAL records for that key range.
    1263              :         // The reason is that the mere stack of L1s (`count_deltas`) triggers image layer creation (`create_image_layers`).
    1264              :         // That image layer creation would be useless for a hole range covered by L1s that don't contain any WAL records.
    1265              :         //
    1266              :         // The algorithm chooses holes as follows.
    1267              :         // - Slide a 2-window over the keys in key orde to get the hole range (=distance between two keys).
    1268              :         // - Filter: min threshold on range length
    1269              :         // - Rank: by coverage size (=number of image layers required to reconstruct each key in the range for which we have any data)
    1270              :         //
    1271              :         // For more details, intuition, and some ASCII art see https://github.com/neondatabase/neon/pull/3597#discussion_r1112704451
    1272              :         #[derive(PartialEq, Eq)]
    1273              :         struct Hole {
    1274              :             key_range: Range<Key>,
    1275              :             coverage_size: usize,
    1276              :         }
    1277           56 :         let holes: Vec<Hole> = {
    1278              :             use std::cmp::Ordering;
    1279              :             impl Ord for Hole {
    1280            0 :                 fn cmp(&self, other: &Self) -> Ordering {
    1281            0 :                     self.coverage_size.cmp(&other.coverage_size).reverse()
    1282            0 :                 }
    1283              :             }
    1284              :             impl PartialOrd for Hole {
    1285            0 :                 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
    1286            0 :                     Some(self.cmp(other))
    1287            0 :                 }
    1288              :             }
    1289           56 :             let max_holes = deltas_to_compact.len();
    1290           56 :             let last_record_lsn = self.get_last_record_lsn();
    1291           56 :             let min_hole_range = (target_file_size / page_cache::PAGE_SZ as u64) as i128;
    1292           56 :             let min_hole_coverage_size = 3; // TODO: something more flexible?
    1293           56 :                                             // min-heap (reserve space for one more element added before eviction)
    1294           56 :             let mut heap: BinaryHeap<Hole> = BinaryHeap::with_capacity(max_holes + 1);
    1295           56 :             let mut prev: Option<Key> = None;
    1296              : 
    1297      4128076 :             for &DeltaEntry { key: next_key, .. } in all_keys.iter() {
    1298      4128076 :                 if let Some(prev_key) = prev {
    1299              :                     // just first fast filter, do not create hole entries for metadata keys. The last hole in the
    1300              :                     // compaction is the gap between data key and metadata keys.
    1301      4128020 :                     if next_key.to_i128() - prev_key.to_i128() >= min_hole_range
    1302            0 :                         && !Key::is_metadata_key(&prev_key)
    1303              :                     {
    1304            0 :                         let key_range = prev_key..next_key;
    1305            0 :                         // Measuring hole by just subtraction of i128 representation of key range boundaries
    1306            0 :                         // has not so much sense, because largest holes will corresponds field1/field2 changes.
    1307            0 :                         // But we are mostly interested to eliminate holes which cause generation of excessive image layers.
    1308            0 :                         // That is why it is better to measure size of hole as number of covering image layers.
    1309            0 :                         let coverage_size =
    1310            0 :                             layers.image_coverage(&key_range, last_record_lsn).len();
    1311            0 :                         if coverage_size >= min_hole_coverage_size {
    1312            0 :                             heap.push(Hole {
    1313            0 :                                 key_range,
    1314            0 :                                 coverage_size,
    1315            0 :                             });
    1316            0 :                             if heap.len() > max_holes {
    1317            0 :                                 heap.pop(); // remove smallest hole
    1318            0 :                             }
    1319            0 :                         }
    1320      4128020 :                     }
    1321           56 :                 }
    1322      4128076 :                 prev = Some(next_key.next());
    1323              :             }
    1324           56 :             let mut holes = heap.into_vec();
    1325           56 :             holes.sort_unstable_by_key(|hole| hole.key_range.start);
    1326           56 :             holes
    1327           56 :         };
    1328           56 :         stats.read_lock_held_compute_holes_micros = stats.read_lock_held_key_sort_micros.till_now();
    1329           56 :         drop_rlock(guard);
    1330           56 : 
    1331           56 :         if self.cancel.is_cancelled() {
    1332            0 :             return Err(CompactionError::ShuttingDown);
    1333           56 :         }
    1334           56 : 
    1335           56 :         stats.read_lock_drop_micros = stats.read_lock_held_compute_holes_micros.till_now();
    1336              : 
    1337              :         // This iterator walks through all key-value pairs from all the layers
    1338              :         // we're compacting, in key, LSN order.
    1339              :         // If there's both a Value::Image and Value::WalRecord for the same (key,lsn),
    1340              :         // then the Value::Image is ordered before Value::WalRecord.
    1341           56 :         let mut all_values_iter = {
    1342           56 :             let mut deltas = Vec::with_capacity(deltas_to_compact.len());
    1343          804 :             for l in deltas_to_compact.iter() {
    1344          804 :                 let l = l.get_as_delta(ctx).await.map_err(CompactionError::Other)?;
    1345          804 :                 deltas.push(l);
    1346              :             }
    1347           56 :             MergeIterator::create(&deltas, &[], ctx)
    1348           56 :         };
    1349           56 : 
    1350           56 :         // This iterator walks through all keys and is needed to calculate size used by each key
    1351           56 :         let mut all_keys_iter = all_keys
    1352           56 :             .iter()
    1353      4128076 :             .map(|DeltaEntry { key, lsn, size, .. }| (*key, *lsn, *size))
    1354      4128020 :             .coalesce(|mut prev, cur| {
    1355      4128020 :                 // Coalesce keys that belong to the same key pair.
    1356      4128020 :                 // This ensures that compaction doesn't put them
    1357      4128020 :                 // into different layer files.
    1358      4128020 :                 // Still limit this by the target file size,
    1359      4128020 :                 // so that we keep the size of the files in
    1360      4128020 :                 // check.
    1361      4128020 :                 if prev.0 == cur.0 && prev.2 < target_file_size {
    1362        80076 :                     prev.2 += cur.2;
    1363        80076 :                     Ok(prev)
    1364              :                 } else {
    1365      4047944 :                     Err((prev, cur))
    1366              :                 }
    1367      4128020 :             });
    1368           56 : 
    1369           56 :         // Merge the contents of all the input delta layers into a new set
    1370           56 :         // of delta layers, based on the current partitioning.
    1371           56 :         //
    1372           56 :         // We split the new delta layers on the key dimension. We iterate through the key space, and for each key, check if including the next key to the current output layer we're building would cause the layer to become too large. If so, dump the current output layer and start new one.
    1373           56 :         // It's possible that there is a single key with so many page versions that storing all of them in a single layer file
    1374           56 :         // would be too large. In that case, we also split on the LSN dimension.
    1375           56 :         //
    1376           56 :         // LSN
    1377           56 :         //  ^
    1378           56 :         //  |
    1379           56 :         //  | +-----------+            +--+--+--+--+
    1380           56 :         //  | |           |            |  |  |  |  |
    1381           56 :         //  | +-----------+            |  |  |  |  |
    1382           56 :         //  | |           |            |  |  |  |  |
    1383           56 :         //  | +-----------+     ==>    |  |  |  |  |
    1384           56 :         //  | |           |            |  |  |  |  |
    1385           56 :         //  | +-----------+            |  |  |  |  |
    1386           56 :         //  | |           |            |  |  |  |  |
    1387           56 :         //  | +-----------+            +--+--+--+--+
    1388           56 :         //  |
    1389           56 :         //  +--------------> key
    1390           56 :         //
    1391           56 :         //
    1392           56 :         // If one key (X) has a lot of page versions:
    1393           56 :         //
    1394           56 :         // LSN
    1395           56 :         //  ^
    1396           56 :         //  |                                 (X)
    1397           56 :         //  | +-----------+            +--+--+--+--+
    1398           56 :         //  | |           |            |  |  |  |  |
    1399           56 :         //  | +-----------+            |  |  +--+  |
    1400           56 :         //  | |           |            |  |  |  |  |
    1401           56 :         //  | +-----------+     ==>    |  |  |  |  |
    1402           56 :         //  | |           |            |  |  +--+  |
    1403           56 :         //  | +-----------+            |  |  |  |  |
    1404           56 :         //  | |           |            |  |  |  |  |
    1405           56 :         //  | +-----------+            +--+--+--+--+
    1406           56 :         //  |
    1407           56 :         //  +--------------> key
    1408           56 :         // TODO: this actually divides the layers into fixed-size chunks, not
    1409           56 :         // based on the partitioning.
    1410           56 :         //
    1411           56 :         // TODO: we should also opportunistically materialize and
    1412           56 :         // garbage collect what we can.
    1413           56 :         let mut new_layers = Vec::new();
    1414           56 :         let mut prev_key: Option<Key> = None;
    1415           56 :         let mut writer: Option<DeltaLayerWriter> = None;
    1416           56 :         let mut key_values_total_size = 0u64;
    1417           56 :         let mut dup_start_lsn: Lsn = Lsn::INVALID; // start LSN of layer containing values of the single key
    1418           56 :         let mut dup_end_lsn: Lsn = Lsn::INVALID; // end LSN of layer containing values of the single key
    1419           56 :         let mut next_hole = 0; // index of next hole in holes vector
    1420           56 : 
    1421           56 :         let mut keys = 0;
    1422              : 
    1423      4128132 :         while let Some((key, lsn, value)) = all_values_iter
    1424      4128132 :             .next()
    1425      4128132 :             .await
    1426      4128132 :             .map_err(CompactionError::Other)?
    1427              :         {
    1428      4128076 :             keys += 1;
    1429      4128076 : 
    1430      4128076 :             if keys % 32_768 == 0 && self.cancel.is_cancelled() {
    1431              :                 // avoid hitting the cancellation token on every key. in benches, we end up
    1432              :                 // shuffling an order of million keys per layer, this means we'll check it
    1433              :                 // around tens of times per layer.
    1434            0 :                 return Err(CompactionError::ShuttingDown);
    1435      4128076 :             }
    1436      4128076 : 
    1437      4128076 :             let same_key = prev_key == Some(key);
    1438      4128076 :             // We need to check key boundaries once we reach next key or end of layer with the same key
    1439      4128076 :             if !same_key || lsn == dup_end_lsn {
    1440      4048000 :                 let mut next_key_size = 0u64;
    1441      4048000 :                 let is_dup_layer = dup_end_lsn.is_valid();
    1442      4048000 :                 dup_start_lsn = Lsn::INVALID;
    1443      4048000 :                 if !same_key {
    1444      4048000 :                     dup_end_lsn = Lsn::INVALID;
    1445      4048000 :                 }
    1446              :                 // Determine size occupied by this key. We stop at next key or when size becomes larger than target_file_size
    1447      4048000 :                 for (next_key, next_lsn, next_size) in all_keys_iter.by_ref() {
    1448      4048000 :                     next_key_size = next_size;
    1449      4048000 :                     if key != next_key {
    1450      4047944 :                         if dup_end_lsn.is_valid() {
    1451            0 :                             // We are writting segment with duplicates:
    1452            0 :                             // place all remaining values of this key in separate segment
    1453            0 :                             dup_start_lsn = dup_end_lsn; // new segments starts where old stops
    1454            0 :                             dup_end_lsn = lsn_range.end; // there are no more values of this key till end of LSN range
    1455      4047944 :                         }
    1456      4047944 :                         break;
    1457           56 :                     }
    1458           56 :                     key_values_total_size += next_size;
    1459           56 :                     // Check if it is time to split segment: if total keys size is larger than target file size.
    1460           56 :                     // We need to avoid generation of empty segments if next_size > target_file_size.
    1461           56 :                     if key_values_total_size > target_file_size && lsn != next_lsn {
    1462              :                         // Split key between multiple layers: such layer can contain only single key
    1463            0 :                         dup_start_lsn = if dup_end_lsn.is_valid() {
    1464            0 :                             dup_end_lsn // new segment with duplicates starts where old one stops
    1465              :                         } else {
    1466            0 :                             lsn // start with the first LSN for this key
    1467              :                         };
    1468            0 :                         dup_end_lsn = next_lsn; // upper LSN boundary is exclusive
    1469            0 :                         break;
    1470           56 :                     }
    1471              :                 }
    1472              :                 // handle case when loop reaches last key: in this case dup_end is non-zero but dup_start is not set.
    1473      4048000 :                 if dup_end_lsn.is_valid() && !dup_start_lsn.is_valid() {
    1474            0 :                     dup_start_lsn = dup_end_lsn;
    1475            0 :                     dup_end_lsn = lsn_range.end;
    1476      4048000 :                 }
    1477      4048000 :                 if writer.is_some() {
    1478      4047944 :                     let written_size = writer.as_mut().unwrap().size();
    1479      4047944 :                     let contains_hole =
    1480      4047944 :                         next_hole < holes.len() && key >= holes[next_hole].key_range.end;
    1481              :                     // check if key cause layer overflow or contains hole...
    1482      4047944 :                     if is_dup_layer
    1483      4047944 :                         || dup_end_lsn.is_valid()
    1484      4047944 :                         || written_size + key_values_total_size > target_file_size
    1485      4047384 :                         || contains_hole
    1486              :                     {
    1487              :                         // ... if so, flush previous layer and prepare to write new one
    1488          560 :                         let (desc, path) = writer
    1489          560 :                             .take()
    1490          560 :                             .unwrap()
    1491          560 :                             .finish(prev_key.unwrap().next(), ctx)
    1492          560 :                             .await
    1493          560 :                             .map_err(CompactionError::Other)?;
    1494          560 :                         let new_delta = Layer::finish_creating(self.conf, self, desc, &path)
    1495          560 :                             .map_err(CompactionError::Other)?;
    1496              : 
    1497          560 :                         new_layers.push(new_delta);
    1498          560 :                         writer = None;
    1499          560 : 
    1500          560 :                         if contains_hole {
    1501            0 :                             // skip hole
    1502            0 :                             next_hole += 1;
    1503          560 :                         }
    1504      4047384 :                     }
    1505           56 :                 }
    1506              :                 // Remember size of key value because at next iteration we will access next item
    1507      4048000 :                 key_values_total_size = next_key_size;
    1508        80076 :             }
    1509      4128076 :             fail_point!("delta-layer-writer-fail-before-finish", |_| {
    1510            0 :                 Err(CompactionError::Other(anyhow::anyhow!(
    1511            0 :                     "failpoint delta-layer-writer-fail-before-finish"
    1512            0 :                 )))
    1513      4128076 :             });
    1514              : 
    1515      4128076 :             if !self.shard_identity.is_key_disposable(&key) {
    1516      4128076 :                 if writer.is_none() {
    1517          616 :                     if self.cancel.is_cancelled() {
    1518              :                         // to be somewhat responsive to cancellation, check for each new layer
    1519            0 :                         return Err(CompactionError::ShuttingDown);
    1520          616 :                     }
    1521              :                     // Create writer if not initiaized yet
    1522          616 :                     writer = Some(
    1523              :                         DeltaLayerWriter::new(
    1524          616 :                             self.conf,
    1525          616 :                             self.timeline_id,
    1526          616 :                             self.tenant_shard_id,
    1527          616 :                             key,
    1528          616 :                             if dup_end_lsn.is_valid() {
    1529              :                                 // this is a layer containing slice of values of the same key
    1530            0 :                                 debug!("Create new dup layer {}..{}", dup_start_lsn, dup_end_lsn);
    1531            0 :                                 dup_start_lsn..dup_end_lsn
    1532              :                             } else {
    1533          616 :                                 debug!("Create new layer {}..{}", lsn_range.start, lsn_range.end);
    1534          616 :                                 lsn_range.clone()
    1535              :                             },
    1536          616 :                             ctx,
    1537          616 :                         )
    1538          616 :                         .await
    1539          616 :                         .map_err(CompactionError::Other)?,
    1540              :                     );
    1541              : 
    1542          616 :                     keys = 0;
    1543      4127460 :                 }
    1544              : 
    1545      4128076 :                 writer
    1546      4128076 :                     .as_mut()
    1547      4128076 :                     .unwrap()
    1548      4128076 :                     .put_value(key, lsn, value, ctx)
    1549      4128076 :                     .await
    1550      4128076 :                     .map_err(CompactionError::Other)?;
    1551              :             } else {
    1552            0 :                 let owner = self.shard_identity.get_shard_number(&key);
    1553            0 : 
    1554            0 :                 // This happens after a shard split, when we're compacting an L0 created by our parent shard
    1555            0 :                 debug!("dropping key {key} during compaction (it belongs on shard {owner})");
    1556              :             }
    1557              : 
    1558      4128076 :             if !new_layers.is_empty() {
    1559        39572 :                 fail_point!("after-timeline-compacted-first-L1");
    1560      4088504 :             }
    1561              : 
    1562      4128076 :             prev_key = Some(key);
    1563              :         }
    1564           56 :         if let Some(writer) = writer {
    1565           56 :             let (desc, path) = writer
    1566           56 :                 .finish(prev_key.unwrap().next(), ctx)
    1567           56 :                 .await
    1568           56 :                 .map_err(CompactionError::Other)?;
    1569           56 :             let new_delta = Layer::finish_creating(self.conf, self, desc, &path)
    1570           56 :                 .map_err(CompactionError::Other)?;
    1571           56 :             new_layers.push(new_delta);
    1572            0 :         }
    1573              : 
    1574              :         // Sync layers
    1575           56 :         if !new_layers.is_empty() {
    1576              :             // Print a warning if the created layer is larger than double the target size
    1577              :             // Add two pages for potential overhead. This should in theory be already
    1578              :             // accounted for in the target calculation, but for very small targets,
    1579              :             // we still might easily hit the limit otherwise.
    1580           56 :             let warn_limit = target_file_size * 2 + page_cache::PAGE_SZ as u64 * 2;
    1581          616 :             for layer in new_layers.iter() {
    1582          616 :                 if layer.layer_desc().file_size > warn_limit {
    1583            0 :                     warn!(
    1584              :                         %layer,
    1585            0 :                         "created delta file of size {} larger than double of target of {target_file_size}", layer.layer_desc().file_size
    1586              :                     );
    1587          616 :                 }
    1588              :             }
    1589              : 
    1590              :             // The writer.finish() above already did the fsync of the inodes.
    1591              :             // We just need to fsync the directory in which these inodes are linked,
    1592              :             // which we know to be the timeline directory.
    1593              :             //
    1594              :             // We use fatal_err() below because the after writer.finish() returns with success,
    1595              :             // the in-memory state of the filesystem already has the layer file in its final place,
    1596              :             // and subsequent pageserver code could think it's durable while it really isn't.
    1597           56 :             let timeline_dir = VirtualFile::open(
    1598           56 :                 &self
    1599           56 :                     .conf
    1600           56 :                     .timeline_path(&self.tenant_shard_id, &self.timeline_id),
    1601           56 :                 ctx,
    1602           56 :             )
    1603           56 :             .await
    1604           56 :             .fatal_err("VirtualFile::open for timeline dir fsync");
    1605           56 :             timeline_dir
    1606           56 :                 .sync_all()
    1607           56 :                 .await
    1608           56 :                 .fatal_err("VirtualFile::sync_all timeline dir");
    1609            0 :         }
    1610              : 
    1611           56 :         stats.write_layer_files_micros = stats.read_lock_drop_micros.till_now();
    1612           56 :         stats.new_deltas_count = Some(new_layers.len());
    1613          616 :         stats.new_deltas_size = Some(new_layers.iter().map(|l| l.layer_desc().file_size).sum());
    1614           56 : 
    1615           56 :         match TryInto::<CompactLevel0Phase1Stats>::try_into(stats)
    1616           56 :             .and_then(|stats| serde_json::to_string(&stats).context("serde_json::to_string"))
    1617              :         {
    1618           56 :             Ok(stats_json) => {
    1619           56 :                 info!(
    1620            0 :                     stats_json = stats_json.as_str(),
    1621            0 :                     "compact_level0_phase1 stats available"
    1622              :                 )
    1623              :             }
    1624            0 :             Err(e) => {
    1625            0 :                 warn!("compact_level0_phase1 stats failed to serialize: {:#}", e);
    1626              :             }
    1627              :         }
    1628              : 
    1629              :         // Without this, rustc complains about deltas_to_compact still
    1630              :         // being borrowed when we `.into_iter()` below.
    1631           56 :         drop(all_values_iter);
    1632           56 : 
    1633           56 :         Ok(CompactLevel0Phase1Result {
    1634           56 :             new_layers,
    1635           56 :             deltas_to_compact: deltas_to_compact
    1636           56 :                 .into_iter()
    1637          804 :                 .map(|x| x.drop_eviction_guard())
    1638           56 :                 .collect::<Vec<_>>(),
    1639           56 :             outcome: if fully_compacted {
    1640           56 :                 CompactionOutcome::Done
    1641              :             } else {
    1642            0 :                 CompactionOutcome::Pending
    1643              :             },
    1644              :         })
    1645          728 :     }
    1646              : }
    1647              : 
    1648              : #[derive(Default)]
    1649              : struct CompactLevel0Phase1Result {
    1650              :     new_layers: Vec<ResidentLayer>,
    1651              :     deltas_to_compact: Vec<Layer>,
    1652              :     // Whether we have included all L0 layers, or selected only part of them due to the
    1653              :     // L0 compaction size limit.
    1654              :     outcome: CompactionOutcome,
    1655              : }
    1656              : 
    1657              : #[derive(Default)]
    1658              : struct CompactLevel0Phase1StatsBuilder {
    1659              :     version: Option<u64>,
    1660              :     tenant_id: Option<TenantShardId>,
    1661              :     timeline_id: Option<TimelineId>,
    1662              :     read_lock_acquisition_micros: DurationRecorder,
    1663              :     read_lock_held_spawn_blocking_startup_micros: DurationRecorder,
    1664              :     read_lock_held_key_sort_micros: DurationRecorder,
    1665              :     read_lock_held_prerequisites_micros: DurationRecorder,
    1666              :     read_lock_held_compute_holes_micros: DurationRecorder,
    1667              :     read_lock_drop_micros: DurationRecorder,
    1668              :     write_layer_files_micros: DurationRecorder,
    1669              :     level0_deltas_count: Option<usize>,
    1670              :     new_deltas_count: Option<usize>,
    1671              :     new_deltas_size: Option<u64>,
    1672              : }
    1673              : 
    1674              : #[derive(serde::Serialize)]
    1675              : struct CompactLevel0Phase1Stats {
    1676              :     version: u64,
    1677              :     tenant_id: TenantShardId,
    1678              :     timeline_id: TimelineId,
    1679              :     read_lock_acquisition_micros: RecordedDuration,
    1680              :     read_lock_held_spawn_blocking_startup_micros: RecordedDuration,
    1681              :     read_lock_held_key_sort_micros: RecordedDuration,
    1682              :     read_lock_held_prerequisites_micros: RecordedDuration,
    1683              :     read_lock_held_compute_holes_micros: RecordedDuration,
    1684              :     read_lock_drop_micros: RecordedDuration,
    1685              :     write_layer_files_micros: RecordedDuration,
    1686              :     level0_deltas_count: usize,
    1687              :     new_deltas_count: usize,
    1688              :     new_deltas_size: u64,
    1689              : }
    1690              : 
    1691              : impl TryFrom<CompactLevel0Phase1StatsBuilder> for CompactLevel0Phase1Stats {
    1692              :     type Error = anyhow::Error;
    1693              : 
    1694           56 :     fn try_from(value: CompactLevel0Phase1StatsBuilder) -> Result<Self, Self::Error> {
    1695           56 :         Ok(Self {
    1696           56 :             version: value.version.ok_or_else(|| anyhow!("version not set"))?,
    1697           56 :             tenant_id: value
    1698           56 :                 .tenant_id
    1699           56 :                 .ok_or_else(|| anyhow!("tenant_id not set"))?,
    1700           56 :             timeline_id: value
    1701           56 :                 .timeline_id
    1702           56 :                 .ok_or_else(|| anyhow!("timeline_id not set"))?,
    1703           56 :             read_lock_acquisition_micros: value
    1704           56 :                 .read_lock_acquisition_micros
    1705           56 :                 .into_recorded()
    1706           56 :                 .ok_or_else(|| anyhow!("read_lock_acquisition_micros not set"))?,
    1707           56 :             read_lock_held_spawn_blocking_startup_micros: value
    1708           56 :                 .read_lock_held_spawn_blocking_startup_micros
    1709           56 :                 .into_recorded()
    1710           56 :                 .ok_or_else(|| anyhow!("read_lock_held_spawn_blocking_startup_micros not set"))?,
    1711           56 :             read_lock_held_key_sort_micros: value
    1712           56 :                 .read_lock_held_key_sort_micros
    1713           56 :                 .into_recorded()
    1714           56 :                 .ok_or_else(|| anyhow!("read_lock_held_key_sort_micros not set"))?,
    1715           56 :             read_lock_held_prerequisites_micros: value
    1716           56 :                 .read_lock_held_prerequisites_micros
    1717           56 :                 .into_recorded()
    1718           56 :                 .ok_or_else(|| anyhow!("read_lock_held_prerequisites_micros not set"))?,
    1719           56 :             read_lock_held_compute_holes_micros: value
    1720           56 :                 .read_lock_held_compute_holes_micros
    1721           56 :                 .into_recorded()
    1722           56 :                 .ok_or_else(|| anyhow!("read_lock_held_compute_holes_micros not set"))?,
    1723           56 :             read_lock_drop_micros: value
    1724           56 :                 .read_lock_drop_micros
    1725           56 :                 .into_recorded()
    1726           56 :                 .ok_or_else(|| anyhow!("read_lock_drop_micros not set"))?,
    1727           56 :             write_layer_files_micros: value
    1728           56 :                 .write_layer_files_micros
    1729           56 :                 .into_recorded()
    1730           56 :                 .ok_or_else(|| anyhow!("write_layer_files_micros not set"))?,
    1731           56 :             level0_deltas_count: value
    1732           56 :                 .level0_deltas_count
    1733           56 :                 .ok_or_else(|| anyhow!("level0_deltas_count not set"))?,
    1734           56 :             new_deltas_count: value
    1735           56 :                 .new_deltas_count
    1736           56 :                 .ok_or_else(|| anyhow!("new_deltas_count not set"))?,
    1737           56 :             new_deltas_size: value
    1738           56 :                 .new_deltas_size
    1739           56 :                 .ok_or_else(|| anyhow!("new_deltas_size not set"))?,
    1740              :         })
    1741           56 :     }
    1742              : }
    1743              : 
    1744              : impl Timeline {
    1745              :     /// Entry point for new tiered compaction algorithm.
    1746              :     ///
    1747              :     /// All the real work is in the implementation in the pageserver_compaction
    1748              :     /// crate. The code here would apply to any algorithm implemented by the
    1749              :     /// same interface, but tiered is the only one at the moment.
    1750              :     ///
    1751              :     /// TODO: cancellation
    1752            0 :     pub(crate) async fn compact_tiered(
    1753            0 :         self: &Arc<Self>,
    1754            0 :         _cancel: &CancellationToken,
    1755            0 :         ctx: &RequestContext,
    1756            0 :     ) -> Result<(), CompactionError> {
    1757            0 :         let fanout = self.get_compaction_threshold() as u64;
    1758            0 :         let target_file_size = self.get_checkpoint_distance();
    1759              : 
    1760              :         // Find the top of the historical layers
    1761            0 :         let end_lsn = {
    1762            0 :             let guard = self.layers.read().await;
    1763            0 :             let layers = guard.layer_map()?;
    1764              : 
    1765            0 :             let l0_deltas = layers.level0_deltas();
    1766            0 : 
    1767            0 :             // As an optimization, if we find that there are too few L0 layers,
    1768            0 :             // bail out early. We know that the compaction algorithm would do
    1769            0 :             // nothing in that case.
    1770            0 :             if l0_deltas.len() < fanout as usize {
    1771              :                 // doesn't need compacting
    1772            0 :                 return Ok(());
    1773            0 :             }
    1774            0 :             l0_deltas.iter().map(|l| l.lsn_range.end).max().unwrap()
    1775            0 :         };
    1776            0 : 
    1777            0 :         // Is the timeline being deleted?
    1778            0 :         if self.is_stopping() {
    1779            0 :             trace!("Dropping out of compaction on timeline shutdown");
    1780            0 :             return Err(CompactionError::ShuttingDown);
    1781            0 :         }
    1782              : 
    1783            0 :         let (dense_ks, _sparse_ks) = self.collect_keyspace(end_lsn, ctx).await?;
    1784              :         // TODO(chi): ignore sparse_keyspace for now, compact it in the future.
    1785            0 :         let mut adaptor = TimelineAdaptor::new(self, (end_lsn, dense_ks));
    1786            0 : 
    1787            0 :         pageserver_compaction::compact_tiered::compact_tiered(
    1788            0 :             &mut adaptor,
    1789            0 :             end_lsn,
    1790            0 :             target_file_size,
    1791            0 :             fanout,
    1792            0 :             ctx,
    1793            0 :         )
    1794            0 :         .await
    1795              :         // TODO: compact_tiered needs to return CompactionError
    1796            0 :         .map_err(CompactionError::Other)?;
    1797              : 
    1798            0 :         adaptor.flush_updates().await?;
    1799            0 :         Ok(())
    1800            0 :     }
    1801              : 
    1802              :     /// Take a list of images and deltas, produce images and deltas according to GC horizon and retain_lsns.
    1803              :     ///
    1804              :     /// It takes a key, the values of the key within the compaction process, a GC horizon, and all retain_lsns below the horizon.
    1805              :     /// For now, it requires the `accumulated_values` contains the full history of the key (i.e., the key with the lowest LSN is
    1806              :     /// an image or a WAL not requiring a base image). This restriction will be removed once we implement gc-compaction on branch.
    1807              :     ///
    1808              :     /// The function returns the deltas and the base image that need to be placed at each of the retain LSN. For example, we have:
    1809              :     ///
    1810              :     /// A@0x10, +B@0x20, +C@0x30, +D@0x40, +E@0x50, +F@0x60
    1811              :     /// horizon = 0x50, retain_lsn = 0x20, 0x40, delta_threshold=3
    1812              :     ///
    1813              :     /// The function will produce:
    1814              :     ///
    1815              :     /// ```plain
    1816              :     /// 0x20(retain_lsn) -> img=AB@0x20                  always produce a single image below the lowest retain LSN
    1817              :     /// 0x40(retain_lsn) -> deltas=[+C@0x30, +D@0x40]    two deltas since the last base image, keeping the deltas
    1818              :     /// 0x50(horizon)    -> deltas=[ABCDE@0x50]          three deltas since the last base image, generate an image but put it in the delta
    1819              :     /// above_horizon    -> deltas=[+F@0x60]             full history above the horizon
    1820              :     /// ```
    1821              :     ///
    1822              :     /// Note that `accumulated_values` must be sorted by LSN and should belong to a single key.
    1823         1260 :     pub(crate) async fn generate_key_retention(
    1824         1260 :         self: &Arc<Timeline>,
    1825         1260 :         key: Key,
    1826         1260 :         full_history: &[(Key, Lsn, Value)],
    1827         1260 :         horizon: Lsn,
    1828         1260 :         retain_lsn_below_horizon: &[Lsn],
    1829         1260 :         delta_threshold_cnt: usize,
    1830         1260 :         base_img_from_ancestor: Option<(Key, Lsn, Bytes)>,
    1831         1260 :     ) -> anyhow::Result<KeyHistoryRetention> {
    1832              :         // Pre-checks for the invariants
    1833              : 
    1834         1260 :         let debug_mode = cfg!(debug_assertions) || cfg!(feature = "testing");
    1835              : 
    1836         1260 :         if debug_mode {
    1837         3060 :             for (log_key, _, _) in full_history {
    1838         1800 :                 assert_eq!(log_key, &key, "mismatched key");
    1839              :             }
    1840         1260 :             for i in 1..full_history.len() {
    1841          540 :                 assert!(full_history[i - 1].1 <= full_history[i].1, "unordered LSN");
    1842          540 :                 if full_history[i - 1].1 == full_history[i].1 {
    1843            0 :                     assert!(
    1844            0 :                         matches!(full_history[i - 1].2, Value::Image(_)),
    1845            0 :                         "unordered delta/image, or duplicated delta"
    1846              :                     );
    1847          540 :                 }
    1848              :             }
    1849              :             // There was an assertion for no base image that checks if the first
    1850              :             // record in the history is `will_init` before, but it was removed.
    1851              :             // This is explained in the test cases for generate_key_retention.
    1852              :             // Search "incomplete history" for more information.
    1853         2820 :             for lsn in retain_lsn_below_horizon {
    1854         1560 :                 assert!(lsn < &horizon, "retain lsn must be below horizon")
    1855              :             }
    1856         1260 :             for i in 1..retain_lsn_below_horizon.len() {
    1857          712 :                 assert!(
    1858          712 :                     retain_lsn_below_horizon[i - 1] <= retain_lsn_below_horizon[i],
    1859            0 :                     "unordered LSN"
    1860              :                 );
    1861              :             }
    1862            0 :         }
    1863         1260 :         let has_ancestor = base_img_from_ancestor.is_some();
    1864              :         // Step 1: split history into len(retain_lsn_below_horizon) + 2 buckets, where the last bucket is for all deltas above the horizon,
    1865              :         // and the second-to-last bucket is for the horizon. Each bucket contains lsn_last_bucket < deltas <= lsn_this_bucket.
    1866         1260 :         let (mut split_history, lsn_split_points) = {
    1867         1260 :             let mut split_history = Vec::new();
    1868         1260 :             split_history.resize_with(retain_lsn_below_horizon.len() + 2, Vec::new);
    1869         1260 :             let mut lsn_split_points = Vec::with_capacity(retain_lsn_below_horizon.len() + 1);
    1870         2820 :             for lsn in retain_lsn_below_horizon {
    1871         1560 :                 lsn_split_points.push(*lsn);
    1872         1560 :             }
    1873         1260 :             lsn_split_points.push(horizon);
    1874         1260 :             let mut current_idx = 0;
    1875         3060 :             for item @ (_, lsn, _) in full_history {
    1876         2288 :                 while current_idx < lsn_split_points.len() && *lsn > lsn_split_points[current_idx] {
    1877          488 :                     current_idx += 1;
    1878          488 :                 }
    1879         1800 :                 split_history[current_idx].push(item);
    1880              :             }
    1881         1260 :             (split_history, lsn_split_points)
    1882              :         };
    1883              :         // Step 2: filter out duplicated records due to the k-merge of image/delta layers
    1884         5340 :         for split_for_lsn in &mut split_history {
    1885         4080 :             let mut prev_lsn = None;
    1886         4080 :             let mut new_split_for_lsn = Vec::with_capacity(split_for_lsn.len());
    1887         4080 :             for record @ (_, lsn, _) in std::mem::take(split_for_lsn) {
    1888         1800 :                 if let Some(prev_lsn) = &prev_lsn {
    1889          236 :                     if *prev_lsn == lsn {
    1890              :                         // The case that we have an LSN with both data from the delta layer and the image layer. As
    1891              :                         // `ValueWrapper` ensures that an image is ordered before a delta at the same LSN, we simply
    1892              :                         // drop this delta and keep the image.
    1893              :                         //
    1894              :                         // For example, we have delta layer key1@0x10, key1@0x20, and image layer key1@0x10, we will
    1895              :                         // keep the image for key1@0x10 and the delta for key1@0x20. key1@0x10 delta will be simply
    1896              :                         // dropped.
    1897              :                         //
    1898              :                         // TODO: in case we have both delta + images for a given LSN and it does not exceed the delta
    1899              :                         // threshold, we could have kept delta instead to save space. This is an optimization for the future.
    1900            0 :                         continue;
    1901          236 :                     }
    1902         1564 :                 }
    1903         1800 :                 prev_lsn = Some(lsn);
    1904         1800 :                 new_split_for_lsn.push(record);
    1905              :             }
    1906         4080 :             *split_for_lsn = new_split_for_lsn;
    1907              :         }
    1908              :         // Step 3: generate images when necessary
    1909         1260 :         let mut retention = Vec::with_capacity(split_history.len());
    1910         1260 :         let mut records_since_last_image = 0;
    1911         1260 :         let batch_cnt = split_history.len();
    1912         1260 :         assert!(
    1913         1260 :             batch_cnt >= 2,
    1914            0 :             "should have at least below + above horizon batches"
    1915              :         );
    1916         1260 :         let mut replay_history: Vec<(Key, Lsn, Value)> = Vec::new();
    1917         1260 :         if let Some((key, lsn, img)) = base_img_from_ancestor {
    1918           84 :             replay_history.push((key, lsn, Value::Image(img)));
    1919         1176 :         }
    1920              : 
    1921              :         /// Generate debug information for the replay history
    1922            0 :         fn generate_history_trace(replay_history: &[(Key, Lsn, Value)]) -> String {
    1923              :             use std::fmt::Write;
    1924            0 :             let mut output = String::new();
    1925            0 :             if let Some((key, _, _)) = replay_history.first() {
    1926            0 :                 write!(output, "key={} ", key).unwrap();
    1927            0 :                 let mut cnt = 0;
    1928            0 :                 for (_, lsn, val) in replay_history {
    1929            0 :                     if val.is_image() {
    1930            0 :                         write!(output, "i@{} ", lsn).unwrap();
    1931            0 :                     } else if val.will_init() {
    1932            0 :                         write!(output, "di@{} ", lsn).unwrap();
    1933            0 :                     } else {
    1934            0 :                         write!(output, "d@{} ", lsn).unwrap();
    1935            0 :                     }
    1936            0 :                     cnt += 1;
    1937            0 :                     if cnt >= 128 {
    1938            0 :                         write!(output, "... and more").unwrap();
    1939            0 :                         break;
    1940            0 :                     }
    1941              :                 }
    1942            0 :             } else {
    1943            0 :                 write!(output, "<no history>").unwrap();
    1944            0 :             }
    1945            0 :             output
    1946            0 :         }
    1947              : 
    1948            0 :         fn generate_debug_trace(
    1949            0 :             replay_history: Option<&[(Key, Lsn, Value)]>,
    1950            0 :             full_history: &[(Key, Lsn, Value)],
    1951            0 :             lsns: &[Lsn],
    1952            0 :             horizon: Lsn,
    1953            0 :         ) -> String {
    1954              :             use std::fmt::Write;
    1955            0 :             let mut output = String::new();
    1956            0 :             if let Some(replay_history) = replay_history {
    1957            0 :                 writeln!(
    1958            0 :                     output,
    1959            0 :                     "replay_history: {}",
    1960            0 :                     generate_history_trace(replay_history)
    1961            0 :                 )
    1962            0 :                 .unwrap();
    1963            0 :             } else {
    1964            0 :                 writeln!(output, "replay_history: <disabled>",).unwrap();
    1965            0 :             }
    1966            0 :             writeln!(
    1967            0 :                 output,
    1968            0 :                 "full_history: {}",
    1969            0 :                 generate_history_trace(full_history)
    1970            0 :             )
    1971            0 :             .unwrap();
    1972            0 :             writeln!(
    1973            0 :                 output,
    1974            0 :                 "when processing: [{}] horizon={}",
    1975            0 :                 lsns.iter().map(|l| format!("{l}")).join(","),
    1976            0 :                 horizon
    1977            0 :             )
    1978            0 :             .unwrap();
    1979            0 :             output
    1980            0 :         }
    1981              : 
    1982         1260 :         let mut key_exists = false;
    1983         4080 :         for (i, split_for_lsn) in split_history.into_iter().enumerate() {
    1984              :             // TODO: there could be image keys inside the splits, and we can compute records_since_last_image accordingly.
    1985         4080 :             records_since_last_image += split_for_lsn.len();
    1986              :             // Whether to produce an image into the final layer files
    1987         4080 :             let produce_image = if i == 0 && !has_ancestor {
    1988              :                 // We always generate images for the first batch (below horizon / lowest retain_lsn)
    1989         1176 :                 true
    1990         2904 :             } else if i == batch_cnt - 1 {
    1991              :                 // Do not generate images for the last batch (above horizon)
    1992         1260 :                 false
    1993         1644 :             } else if records_since_last_image == 0 {
    1994         1288 :                 false
    1995          356 :             } else if records_since_last_image >= delta_threshold_cnt {
    1996              :                 // Generate images when there are too many records
    1997           12 :                 true
    1998              :             } else {
    1999          344 :                 false
    2000              :             };
    2001         4080 :             replay_history.extend(split_for_lsn.iter().map(|x| (*x).clone()));
    2002              :             // Only retain the items after the last image record
    2003         5028 :             for idx in (0..replay_history.len()).rev() {
    2004         5028 :                 if replay_history[idx].2.will_init() {
    2005         4080 :                     replay_history = replay_history[idx..].to_vec();
    2006         4080 :                     break;
    2007          948 :                 }
    2008              :             }
    2009         4080 :             if replay_history.is_empty() && !key_exists {
    2010              :                 // The key does not exist at earlier LSN, we can skip this iteration.
    2011            0 :                 retention.push(Vec::new());
    2012            0 :                 continue;
    2013         4080 :             } else {
    2014         4080 :                 key_exists = true;
    2015         4080 :             }
    2016         4080 :             let Some((_, _, val)) = replay_history.first() else {
    2017            0 :                 unreachable!("replay history should not be empty once it exists")
    2018              :             };
    2019         4080 :             if !val.will_init() {
    2020            0 :                 return Err(anyhow::anyhow!("invalid history, no base image")).with_context(|| {
    2021            0 :                     generate_debug_trace(
    2022            0 :                         Some(&replay_history),
    2023            0 :                         full_history,
    2024            0 :                         retain_lsn_below_horizon,
    2025            0 :                         horizon,
    2026            0 :                     )
    2027            0 :                 });
    2028         4080 :             }
    2029              :             // Whether to reconstruct the image. In debug mode, we will generate an image
    2030              :             // at every retain_lsn to ensure data is not corrupted, but we won't put the
    2031              :             // image into the final layer.
    2032         4080 :             let generate_image = produce_image || debug_mode;
    2033         4080 :             if produce_image {
    2034         1188 :                 records_since_last_image = 0;
    2035         2892 :             }
    2036         4080 :             let img_and_lsn = if generate_image {
    2037         4080 :                 let replay_history_for_debug = if debug_mode {
    2038         4080 :                     Some(replay_history.clone())
    2039              :                 } else {
    2040            0 :                     None
    2041              :                 };
    2042         4080 :                 let replay_history_for_debug_ref = replay_history_for_debug.as_deref();
    2043         4080 :                 let history = if produce_image {
    2044         1188 :                     std::mem::take(&mut replay_history)
    2045              :                 } else {
    2046         2892 :                     replay_history.clone()
    2047              :                 };
    2048         4080 :                 let mut img = None;
    2049         4080 :                 let mut records = Vec::with_capacity(history.len());
    2050         4080 :                 if let (_, lsn, Value::Image(val)) = history.first().as_ref().unwrap() {
    2051         4036 :                     img = Some((*lsn, val.clone()));
    2052         4036 :                     for (_, lsn, val) in history.into_iter().skip(1) {
    2053          920 :                         let Value::WalRecord(rec) = val else {
    2054            0 :                             return Err(anyhow::anyhow!(
    2055            0 :                                 "invalid record, first record is image, expect walrecords"
    2056            0 :                             ))
    2057            0 :                             .with_context(|| {
    2058            0 :                                 generate_debug_trace(
    2059            0 :                                     replay_history_for_debug_ref,
    2060            0 :                                     full_history,
    2061            0 :                                     retain_lsn_below_horizon,
    2062            0 :                                     horizon,
    2063            0 :                                 )
    2064            0 :                             });
    2065              :                         };
    2066          920 :                         records.push((lsn, rec));
    2067              :                     }
    2068              :                 } else {
    2069           72 :                     for (_, lsn, val) in history.into_iter() {
    2070           72 :                         let Value::WalRecord(rec) = val else {
    2071            0 :                             return Err(anyhow::anyhow!("invalid record, first record is walrecord, expect rest are walrecord"))
    2072            0 :                                 .with_context(|| generate_debug_trace(
    2073            0 :                                     replay_history_for_debug_ref,
    2074            0 :                                     full_history,
    2075            0 :                                     retain_lsn_below_horizon,
    2076            0 :                                     horizon,
    2077            0 :                                 ));
    2078              :                         };
    2079           72 :                         records.push((lsn, rec));
    2080              :                     }
    2081              :                 }
    2082         4080 :                 records.reverse();
    2083         4080 :                 let state = ValueReconstructState { img, records };
    2084              :                 // last batch does not generate image so i is always in range, unless we force generate
    2085              :                 // an image during testing
    2086         4080 :                 let request_lsn = if i >= lsn_split_points.len() {
    2087         1260 :                     Lsn::MAX
    2088              :                 } else {
    2089         2820 :                     lsn_split_points[i]
    2090              :                 };
    2091         4080 :                 let img = self.reconstruct_value(key, request_lsn, state).await?;
    2092         4080 :                 Some((request_lsn, img))
    2093              :             } else {
    2094            0 :                 None
    2095              :             };
    2096         4080 :             if produce_image {
    2097         1188 :                 let (request_lsn, img) = img_and_lsn.unwrap();
    2098         1188 :                 replay_history.push((key, request_lsn, Value::Image(img.clone())));
    2099         1188 :                 retention.push(vec![(request_lsn, Value::Image(img))]);
    2100         2892 :             } else {
    2101         2892 :                 let deltas = split_for_lsn
    2102         2892 :                     .iter()
    2103         2892 :                     .map(|(_, lsn, value)| (*lsn, value.clone()))
    2104         2892 :                     .collect_vec();
    2105         2892 :                 retention.push(deltas);
    2106         2892 :             }
    2107              :         }
    2108         1260 :         let mut result = Vec::with_capacity(retention.len());
    2109         1260 :         assert_eq!(retention.len(), lsn_split_points.len() + 1);
    2110         4080 :         for (idx, logs) in retention.into_iter().enumerate() {
    2111         4080 :             if idx == lsn_split_points.len() {
    2112         1260 :                 return Ok(KeyHistoryRetention {
    2113         1260 :                     below_horizon: result,
    2114         1260 :                     above_horizon: KeyLogAtLsn(logs),
    2115         1260 :                 });
    2116         2820 :             } else {
    2117         2820 :                 result.push((lsn_split_points[idx], KeyLogAtLsn(logs)));
    2118         2820 :             }
    2119              :         }
    2120            0 :         unreachable!("key retention is empty")
    2121         1260 :     }
    2122              : 
    2123              :     /// Check how much space is left on the disk
    2124          104 :     async fn check_available_space(self: &Arc<Self>) -> anyhow::Result<u64> {
    2125          104 :         let tenants_dir = self.conf.tenants_path();
    2126              : 
    2127          104 :         let stat = Statvfs::get(&tenants_dir, None)
    2128          104 :             .context("statvfs failed, presumably directory got unlinked")?;
    2129              : 
    2130          104 :         let (avail_bytes, _) = stat.get_avail_total_bytes();
    2131          104 : 
    2132          104 :         Ok(avail_bytes)
    2133          104 :     }
    2134              : 
    2135              :     /// Check if the compaction can proceed safely without running out of space. We assume the size
    2136              :     /// upper bound of the produced files of a compaction job is the same as all layers involved in
    2137              :     /// the compaction. Therefore, we need `2 * layers_to_be_compacted_size` at least to do a
    2138              :     /// compaction.
    2139          104 :     async fn check_compaction_space(
    2140          104 :         self: &Arc<Self>,
    2141          104 :         layer_selection: &[Layer],
    2142          104 :     ) -> anyhow::Result<()> {
    2143          104 :         let available_space = self.check_available_space().await?;
    2144          104 :         let mut remote_layer_size = 0;
    2145          104 :         let mut all_layer_size = 0;
    2146          408 :         for layer in layer_selection {
    2147          304 :             let needs_download = layer.needs_download().await?;
    2148          304 :             if needs_download.is_some() {
    2149            0 :                 remote_layer_size += layer.layer_desc().file_size;
    2150          304 :             }
    2151          304 :             all_layer_size += layer.layer_desc().file_size;
    2152              :         }
    2153          104 :         let allocated_space = (available_space as f64 * 0.8) as u64; /* reserve 20% space for other tasks */
    2154          104 :         if all_layer_size /* space needed for newly-generated file */ + remote_layer_size /* space for downloading layers */ > allocated_space
    2155              :         {
    2156            0 :             return Err(anyhow!("not enough space for compaction: available_space={}, allocated_space={}, all_layer_size={}, remote_layer_size={}, required_space={}",
    2157            0 :                 available_space, allocated_space, all_layer_size, remote_layer_size, all_layer_size + remote_layer_size));
    2158          104 :         }
    2159          104 :         Ok(())
    2160          104 :     }
    2161              : 
    2162              :     /// Get a watermark for gc-compaction, that is the lowest LSN that we can use as the `gc_horizon` for
    2163              :     /// the compaction algorithm. It is min(space_cutoff, time_cutoff, latest_gc_cutoff, standby_horizon).
    2164              :     /// Leases and retain_lsns are considered in the gc-compaction job itself so we don't need to account for them
    2165              :     /// here.
    2166          108 :     pub(crate) fn get_gc_compaction_watermark(self: &Arc<Self>) -> Lsn {
    2167          108 :         let gc_cutoff_lsn = {
    2168          108 :             let gc_info = self.gc_info.read().unwrap();
    2169          108 :             gc_info.min_cutoff()
    2170          108 :         };
    2171          108 : 
    2172          108 :         // TODO: standby horizon should use leases so we don't really need to consider it here.
    2173          108 :         // let watermark = watermark.min(self.standby_horizon.load());
    2174          108 : 
    2175          108 :         // TODO: ensure the child branches will not use anything below the watermark, or consider
    2176          108 :         // them when computing the watermark.
    2177          108 :         gc_cutoff_lsn.min(*self.get_applied_gc_cutoff_lsn())
    2178          108 :     }
    2179              : 
    2180              :     /// Split a gc-compaction job into multiple compaction jobs. The split is based on the key range and the estimated size of the compaction job.
    2181              :     /// The function returns a list of compaction jobs that can be executed separately. If the upper bound of the compact LSN
    2182              :     /// range is not specified, we will use the latest gc_cutoff as the upper bound, so that all jobs in the jobset acts
    2183              :     /// like a full compaction of the specified keyspace.
    2184            0 :     pub(crate) async fn gc_compaction_split_jobs(
    2185            0 :         self: &Arc<Self>,
    2186            0 :         job: GcCompactJob,
    2187            0 :         sub_compaction_max_job_size_mb: Option<u64>,
    2188            0 :     ) -> anyhow::Result<Vec<GcCompactJob>> {
    2189            0 :         let compact_below_lsn = if job.compact_lsn_range.end != Lsn::MAX {
    2190            0 :             job.compact_lsn_range.end
    2191              :         } else {
    2192            0 :             self.get_gc_compaction_watermark()
    2193              :         };
    2194              : 
    2195            0 :         if compact_below_lsn == Lsn::INVALID {
    2196            0 :             tracing::warn!("no layers to compact with gc: gc_cutoff not generated yet, skipping gc bottom-most compaction");
    2197            0 :             return Ok(vec![]);
    2198            0 :         }
    2199              : 
    2200              :         // Split compaction job to about 4GB each
    2201              :         const GC_COMPACT_MAX_SIZE_MB: u64 = 4 * 1024;
    2202            0 :         let sub_compaction_max_job_size_mb =
    2203            0 :             sub_compaction_max_job_size_mb.unwrap_or(GC_COMPACT_MAX_SIZE_MB);
    2204            0 : 
    2205            0 :         let mut compact_jobs = Vec::new();
    2206            0 :         // For now, we simply use the key partitioning information; we should do a more fine-grained partitioning
    2207            0 :         // by estimating the amount of files read for a compaction job. We should also partition on LSN.
    2208            0 :         let ((dense_ks, sparse_ks), _) = self.partitioning.read().as_ref().clone();
    2209              :         // Truncate the key range to be within user specified compaction range.
    2210            0 :         fn truncate_to(
    2211            0 :             source_start: &Key,
    2212            0 :             source_end: &Key,
    2213            0 :             target_start: &Key,
    2214            0 :             target_end: &Key,
    2215            0 :         ) -> Option<(Key, Key)> {
    2216            0 :             let start = source_start.max(target_start);
    2217            0 :             let end = source_end.min(target_end);
    2218            0 :             if start < end {
    2219            0 :                 Some((*start, *end))
    2220              :             } else {
    2221            0 :                 None
    2222              :             }
    2223            0 :         }
    2224            0 :         let mut split_key_ranges = Vec::new();
    2225            0 :         let ranges = dense_ks
    2226            0 :             .parts
    2227            0 :             .iter()
    2228            0 :             .map(|partition| partition.ranges.iter())
    2229            0 :             .chain(sparse_ks.parts.iter().map(|x| x.0.ranges.iter()))
    2230            0 :             .flatten()
    2231            0 :             .cloned()
    2232            0 :             .collect_vec();
    2233            0 :         for range in ranges.iter() {
    2234            0 :             let Some((start, end)) = truncate_to(
    2235            0 :                 &range.start,
    2236            0 :                 &range.end,
    2237            0 :                 &job.compact_key_range.start,
    2238            0 :                 &job.compact_key_range.end,
    2239            0 :             ) else {
    2240            0 :                 continue;
    2241              :             };
    2242            0 :             split_key_ranges.push((start, end));
    2243              :         }
    2244            0 :         split_key_ranges.sort();
    2245            0 :         let all_layers = {
    2246            0 :             let guard = self.layers.read().await;
    2247            0 :             let layer_map = guard.layer_map()?;
    2248            0 :             layer_map.iter_historic_layers().collect_vec()
    2249            0 :         };
    2250            0 :         let mut current_start = None;
    2251            0 :         let ranges_num = split_key_ranges.len();
    2252            0 :         for (idx, (start, end)) in split_key_ranges.into_iter().enumerate() {
    2253            0 :             if current_start.is_none() {
    2254            0 :                 current_start = Some(start);
    2255            0 :             }
    2256            0 :             let start = current_start.unwrap();
    2257            0 :             if start >= end {
    2258              :                 // We have already processed this partition.
    2259            0 :                 continue;
    2260            0 :             }
    2261            0 :             let overlapping_layers = {
    2262            0 :                 let mut desc = Vec::new();
    2263            0 :                 for layer in all_layers.iter() {
    2264            0 :                     if overlaps_with(&layer.get_key_range(), &(start..end))
    2265            0 :                         && layer.get_lsn_range().start <= compact_below_lsn
    2266            0 :                     {
    2267            0 :                         desc.push(layer.clone());
    2268            0 :                     }
    2269              :                 }
    2270            0 :                 desc
    2271            0 :             };
    2272            0 :             let total_size = overlapping_layers.iter().map(|x| x.file_size).sum::<u64>();
    2273            0 :             if total_size > sub_compaction_max_job_size_mb * 1024 * 1024 || ranges_num == idx + 1 {
    2274              :                 // Try to extend the compaction range so that we include at least one full layer file.
    2275            0 :                 let extended_end = overlapping_layers
    2276            0 :                     .iter()
    2277            0 :                     .map(|layer| layer.key_range.end)
    2278            0 :                     .min();
    2279              :                 // It is possible that the search range does not contain any layer files when we reach the end of the loop.
    2280              :                 // In this case, we simply use the specified key range end.
    2281            0 :                 let end = if let Some(extended_end) = extended_end {
    2282            0 :                     extended_end.max(end)
    2283              :                 } else {
    2284            0 :                     end
    2285              :                 };
    2286            0 :                 let end = if ranges_num == idx + 1 {
    2287              :                     // extend the compaction range to the end of the key range if it's the last partition
    2288            0 :                     end.max(job.compact_key_range.end)
    2289              :                 } else {
    2290            0 :                     end
    2291              :                 };
    2292            0 :                 info!(
    2293            0 :                     "splitting compaction job: {}..{}, estimated_size={}",
    2294              :                     start, end, total_size
    2295              :                 );
    2296            0 :                 compact_jobs.push(GcCompactJob {
    2297            0 :                     dry_run: job.dry_run,
    2298            0 :                     compact_key_range: start..end,
    2299            0 :                     compact_lsn_range: job.compact_lsn_range.start..compact_below_lsn,
    2300            0 :                 });
    2301            0 :                 current_start = Some(end);
    2302            0 :             }
    2303              :         }
    2304            0 :         Ok(compact_jobs)
    2305            0 :     }
    2306              : 
    2307              :     /// An experimental compaction building block that combines compaction with garbage collection.
    2308              :     ///
    2309              :     /// The current implementation picks all delta + image layers that are below or intersecting with
    2310              :     /// the GC horizon without considering retain_lsns. Then, it does a full compaction over all these delta
    2311              :     /// layers and image layers, which generates image layers on the gc horizon, drop deltas below gc horizon,
    2312              :     /// and create delta layers with all deltas >= gc horizon.
    2313              :     ///
    2314              :     /// If `options.compact_range` is provided, it will only compact the keys within the range, aka partial compaction.
    2315              :     /// Partial compaction will read and process all layers overlapping with the key range, even if it might
    2316              :     /// contain extra keys. After the gc-compaction phase completes, delta layers that are not fully contained
    2317              :     /// within the key range will be rewritten to ensure they do not overlap with the delta layers. Providing
    2318              :     /// Key::MIN..Key..MAX to the function indicates a full compaction, though technically, `Key::MAX` is not
    2319              :     /// part of the range.
    2320              :     ///
    2321              :     /// If `options.compact_lsn_range.end` is provided, the compaction will only compact layers below or intersect with
    2322              :     /// the LSN. Otherwise, it will use the gc cutoff by default.
    2323          108 :     pub(crate) async fn compact_with_gc(
    2324          108 :         self: &Arc<Self>,
    2325          108 :         cancel: &CancellationToken,
    2326          108 :         options: CompactOptions,
    2327          108 :         ctx: &RequestContext,
    2328          108 :     ) -> anyhow::Result<()> {
    2329          108 :         let sub_compaction = options.sub_compaction;
    2330          108 :         let job = GcCompactJob::from_compact_options(options.clone());
    2331          108 :         if sub_compaction {
    2332            0 :             info!("running enhanced gc bottom-most compaction with sub-compaction, splitting compaction jobs");
    2333            0 :             let jobs = self
    2334            0 :                 .gc_compaction_split_jobs(job, options.sub_compaction_max_job_size_mb)
    2335            0 :                 .await?;
    2336            0 :             let jobs_len = jobs.len();
    2337            0 :             for (idx, job) in jobs.into_iter().enumerate() {
    2338            0 :                 info!(
    2339            0 :                     "running enhanced gc bottom-most compaction, sub-compaction {}/{}",
    2340            0 :                     idx + 1,
    2341              :                     jobs_len
    2342              :                 );
    2343            0 :                 self.compact_with_gc_inner(cancel, job, ctx).await?;
    2344              :             }
    2345            0 :             if jobs_len == 0 {
    2346            0 :                 info!("no jobs to run, skipping gc bottom-most compaction");
    2347            0 :             }
    2348            0 :             return Ok(());
    2349          108 :         }
    2350          108 :         self.compact_with_gc_inner(cancel, job, ctx).await
    2351          108 :     }
    2352              : 
    2353          108 :     async fn compact_with_gc_inner(
    2354          108 :         self: &Arc<Self>,
    2355          108 :         cancel: &CancellationToken,
    2356          108 :         job: GcCompactJob,
    2357          108 :         ctx: &RequestContext,
    2358          108 :     ) -> anyhow::Result<()> {
    2359          108 :         // Block other compaction/GC tasks from running for now. GC-compaction could run along
    2360          108 :         // with legacy compaction tasks in the future. Always ensure the lock order is compaction -> gc.
    2361          108 :         // Note that we already acquired the compaction lock when the outer `compact` function gets called.
    2362          108 : 
    2363          108 :         let gc_lock = async {
    2364          108 :             tokio::select! {
    2365          108 :                 guard = self.gc_lock.lock() => Ok(guard),
    2366              :                 // TODO: refactor to CompactionError to correctly pass cancelled error
    2367          108 :                 _ = cancel.cancelled() => Err(anyhow!("cancelled")),
    2368              :             }
    2369          108 :         };
    2370              : 
    2371          108 :         let gc_lock = crate::timed(
    2372          108 :             gc_lock,
    2373          108 :             "acquires gc lock",
    2374          108 :             std::time::Duration::from_secs(5),
    2375          108 :         )
    2376          108 :         .await?;
    2377              : 
    2378          108 :         let dry_run = job.dry_run;
    2379          108 :         let compact_key_range = job.compact_key_range;
    2380          108 :         let compact_lsn_range = job.compact_lsn_range;
    2381              : 
    2382          108 :         let debug_mode = cfg!(debug_assertions) || cfg!(feature = "testing");
    2383              : 
    2384          108 :         info!("running enhanced gc bottom-most compaction, dry_run={dry_run}, compact_key_range={}..{}, compact_lsn_range={}..{}", compact_key_range.start, compact_key_range.end, compact_lsn_range.start, compact_lsn_range.end);
    2385              : 
    2386          108 :         scopeguard::defer! {
    2387          108 :             info!("done enhanced gc bottom-most compaction");
    2388          108 :         };
    2389          108 : 
    2390          108 :         let mut stat = CompactionStatistics::default();
    2391              : 
    2392              :         // Step 0: pick all delta layers + image layers below/intersect with the GC horizon.
    2393              :         // The layer selection has the following properties:
    2394              :         // 1. If a layer is in the selection, all layers below it are in the selection.
    2395              :         // 2. Inferred from (1), for each key in the layer selection, the value can be reconstructed only with the layers in the layer selection.
    2396          104 :         let job_desc = {
    2397          108 :             let guard = self.layers.read().await;
    2398          108 :             let layers = guard.layer_map()?;
    2399          108 :             let gc_info = self.gc_info.read().unwrap();
    2400          108 :             let mut retain_lsns_below_horizon = Vec::new();
    2401          108 :             let gc_cutoff = {
    2402              :                 // Currently, gc-compaction only kicks in after the legacy gc has updated the gc_cutoff.
    2403              :                 // Therefore, it can only clean up data that cannot be cleaned up with legacy gc, instead of
    2404              :                 // cleaning everything that theoritically it could. In the future, it should use `self.gc_info`
    2405              :                 // to get the truth data.
    2406          108 :                 let real_gc_cutoff = self.get_gc_compaction_watermark();
    2407              :                 // The compaction algorithm will keep all keys above the gc_cutoff while keeping only necessary keys below the gc_cutoff for
    2408              :                 // each of the retain_lsn. Therefore, if the user-provided `compact_lsn_range.end` is larger than the real gc cutoff, we will use
    2409              :                 // the real cutoff.
    2410          108 :                 let mut gc_cutoff = if compact_lsn_range.end == Lsn::MAX {
    2411           96 :                     if real_gc_cutoff == Lsn::INVALID {
    2412              :                         // If the gc_cutoff is not generated yet, we should not compact anything.
    2413            0 :                         tracing::warn!("no layers to compact with gc: gc_cutoff not generated yet, skipping gc bottom-most compaction");
    2414            0 :                         return Ok(());
    2415           96 :                     }
    2416           96 :                     real_gc_cutoff
    2417              :                 } else {
    2418           12 :                     compact_lsn_range.end
    2419              :                 };
    2420          108 :                 if gc_cutoff > real_gc_cutoff {
    2421            8 :                     warn!("provided compact_lsn_range.end={} is larger than the real_gc_cutoff={}, using the real gc cutoff", gc_cutoff, real_gc_cutoff);
    2422            8 :                     gc_cutoff = real_gc_cutoff;
    2423          100 :                 }
    2424          108 :                 gc_cutoff
    2425              :             };
    2426          140 :             for (lsn, _timeline_id, _is_offloaded) in &gc_info.retain_lsns {
    2427          140 :                 if lsn < &gc_cutoff {
    2428          140 :                     retain_lsns_below_horizon.push(*lsn);
    2429          140 :                 }
    2430              :             }
    2431          108 :             for lsn in gc_info.leases.keys() {
    2432            0 :                 if lsn < &gc_cutoff {
    2433            0 :                     retain_lsns_below_horizon.push(*lsn);
    2434            0 :                 }
    2435              :             }
    2436          108 :             let mut selected_layers: Vec<Layer> = Vec::new();
    2437          108 :             drop(gc_info);
    2438              :             // Firstly, pick all the layers intersect or below the gc_cutoff, get the largest LSN in the selected layers.
    2439          108 :             let Some(max_layer_lsn) = layers
    2440          108 :                 .iter_historic_layers()
    2441          488 :                 .filter(|desc| desc.get_lsn_range().start <= gc_cutoff)
    2442          416 :                 .map(|desc| desc.get_lsn_range().end)
    2443          108 :                 .max()
    2444              :             else {
    2445            0 :                 info!("no layers to compact with gc: no historic layers below gc_cutoff, gc_cutoff={}", gc_cutoff);
    2446            0 :                 return Ok(());
    2447              :             };
    2448              :             // Next, if the user specifies compact_lsn_range.start, we need to filter some layers out. All the layers (strictly) below
    2449              :             // the min_layer_lsn computed as below will be filtered out and the data will be accessed using the normal read path, as if
    2450              :             // it is a branch.
    2451          108 :             let Some(min_layer_lsn) = layers
    2452          108 :                 .iter_historic_layers()
    2453          488 :                 .filter(|desc| {
    2454          488 :                     if compact_lsn_range.start == Lsn::INVALID {
    2455          396 :                         true // select all layers below if start == Lsn(0)
    2456              :                     } else {
    2457           92 :                         desc.get_lsn_range().end > compact_lsn_range.start // strictly larger than compact_above_lsn
    2458              :                     }
    2459          488 :                 })
    2460          452 :                 .map(|desc| desc.get_lsn_range().start)
    2461          108 :                 .min()
    2462              :             else {
    2463            0 :                 info!("no layers to compact with gc: no historic layers above compact_above_lsn, compact_above_lsn={}", compact_lsn_range.end);
    2464            0 :                 return Ok(());
    2465              :             };
    2466              :             // Then, pick all the layers that are below the max_layer_lsn. This is to ensure we can pick all single-key
    2467              :             // layers to compact.
    2468          108 :             let mut rewrite_layers = Vec::new();
    2469          488 :             for desc in layers.iter_historic_layers() {
    2470          488 :                 if desc.get_lsn_range().end <= max_layer_lsn
    2471          416 :                     && desc.get_lsn_range().start >= min_layer_lsn
    2472          380 :                     && overlaps_with(&desc.get_key_range(), &compact_key_range)
    2473              :                 {
    2474              :                     // If the layer overlaps with the compaction key range, we need to read it to obtain all keys within the range,
    2475              :                     // even if it might contain extra keys
    2476          304 :                     selected_layers.push(guard.get_from_desc(&desc));
    2477          304 :                     // If the layer is not fully contained within the key range, we need to rewrite it if it's a delta layer (it's fine
    2478          304 :                     // to overlap image layers)
    2479          304 :                     if desc.is_delta() && !fully_contains(&compact_key_range, &desc.get_key_range())
    2480            4 :                     {
    2481            4 :                         rewrite_layers.push(desc);
    2482          300 :                     }
    2483          184 :                 }
    2484              :             }
    2485          108 :             if selected_layers.is_empty() {
    2486            4 :                 info!("no layers to compact with gc: no layers within the key range, gc_cutoff={}, key_range={}..{}", gc_cutoff, compact_key_range.start, compact_key_range.end);
    2487            4 :                 return Ok(());
    2488          104 :             }
    2489          104 :             retain_lsns_below_horizon.sort();
    2490          104 :             GcCompactionJobDescription {
    2491          104 :                 selected_layers,
    2492          104 :                 gc_cutoff,
    2493          104 :                 retain_lsns_below_horizon,
    2494          104 :                 min_layer_lsn,
    2495          104 :                 max_layer_lsn,
    2496          104 :                 compaction_key_range: compact_key_range,
    2497          104 :                 rewrite_layers,
    2498          104 :             }
    2499              :         };
    2500          104 :         let (has_data_below, lowest_retain_lsn) = if compact_lsn_range.start != Lsn::INVALID {
    2501              :             // If we only compact above some LSN, we should get the history from the current branch below the specified LSN.
    2502              :             // We use job_desc.min_layer_lsn as if it's the lowest branch point.
    2503           16 :             (true, job_desc.min_layer_lsn)
    2504           88 :         } else if self.ancestor_timeline.is_some() {
    2505              :             // In theory, we can also use min_layer_lsn here, but using ancestor LSN makes sure the delta layers cover the
    2506              :             // LSN ranges all the way to the ancestor timeline.
    2507            4 :             (true, self.ancestor_lsn)
    2508              :         } else {
    2509           84 :             let res = job_desc
    2510           84 :                 .retain_lsns_below_horizon
    2511           84 :                 .first()
    2512           84 :                 .copied()
    2513           84 :                 .unwrap_or(job_desc.gc_cutoff);
    2514           84 :             if debug_mode {
    2515           84 :                 assert_eq!(
    2516           84 :                     res,
    2517           84 :                     job_desc
    2518           84 :                         .retain_lsns_below_horizon
    2519           84 :                         .iter()
    2520           84 :                         .min()
    2521           84 :                         .copied()
    2522           84 :                         .unwrap_or(job_desc.gc_cutoff)
    2523           84 :                 );
    2524            0 :             }
    2525           84 :             (false, res)
    2526              :         };
    2527          104 :         info!(
    2528            0 :             "picked {} layers for compaction ({} layers need rewriting) with max_layer_lsn={} min_layer_lsn={} gc_cutoff={} lowest_retain_lsn={}, key_range={}..{}, has_data_below={}",
    2529            0 :             job_desc.selected_layers.len(),
    2530            0 :             job_desc.rewrite_layers.len(),
    2531              :             job_desc.max_layer_lsn,
    2532              :             job_desc.min_layer_lsn,
    2533              :             job_desc.gc_cutoff,
    2534              :             lowest_retain_lsn,
    2535              :             job_desc.compaction_key_range.start,
    2536              :             job_desc.compaction_key_range.end,
    2537              :             has_data_below,
    2538              :         );
    2539              : 
    2540          408 :         for layer in &job_desc.selected_layers {
    2541          304 :             debug!("read layer: {}", layer.layer_desc().key());
    2542              :         }
    2543          108 :         for layer in &job_desc.rewrite_layers {
    2544            4 :             debug!("rewrite layer: {}", layer.key());
    2545              :         }
    2546              : 
    2547          104 :         self.check_compaction_space(&job_desc.selected_layers)
    2548          104 :             .await?;
    2549              : 
    2550              :         // Generate statistics for the compaction
    2551          408 :         for layer in &job_desc.selected_layers {
    2552          304 :             let desc = layer.layer_desc();
    2553          304 :             if desc.is_delta() {
    2554          172 :                 stat.visit_delta_layer(desc.file_size());
    2555          172 :             } else {
    2556          132 :                 stat.visit_image_layer(desc.file_size());
    2557          132 :             }
    2558              :         }
    2559              : 
    2560              :         // Step 1: construct a k-merge iterator over all layers.
    2561              :         // Also, verify if the layer map can be split by drawing a horizontal line at every LSN start/end split point.
    2562          104 :         let layer_names = job_desc
    2563          104 :             .selected_layers
    2564          104 :             .iter()
    2565          304 :             .map(|layer| layer.layer_desc().layer_name())
    2566          104 :             .collect_vec();
    2567          104 :         if let Some(err) = check_valid_layermap(&layer_names) {
    2568            0 :             bail!("gc-compaction layer map check failed because {}, cannot proceed with compaction due to potential data loss", err);
    2569          104 :         }
    2570          104 :         // The maximum LSN we are processing in this compaction loop
    2571          104 :         let end_lsn = job_desc
    2572          104 :             .selected_layers
    2573          104 :             .iter()
    2574          304 :             .map(|l| l.layer_desc().lsn_range.end)
    2575          104 :             .max()
    2576          104 :             .unwrap();
    2577          104 :         let mut delta_layers = Vec::new();
    2578          104 :         let mut image_layers = Vec::new();
    2579          104 :         let mut downloaded_layers = Vec::new();
    2580          104 :         let mut total_downloaded_size = 0;
    2581          104 :         let mut total_layer_size = 0;
    2582          408 :         for layer in &job_desc.selected_layers {
    2583          304 :             if layer.needs_download().await?.is_some() {
    2584            0 :                 total_downloaded_size += layer.layer_desc().file_size;
    2585          304 :             }
    2586          304 :             total_layer_size += layer.layer_desc().file_size;
    2587          304 :             let resident_layer = layer.download_and_keep_resident().await?;
    2588          304 :             downloaded_layers.push(resident_layer);
    2589              :         }
    2590          104 :         info!(
    2591            0 :             "finish downloading layers, downloaded={}, total={}, ratio={:.2}",
    2592            0 :             total_downloaded_size,
    2593            0 :             total_layer_size,
    2594            0 :             total_downloaded_size as f64 / total_layer_size as f64
    2595              :         );
    2596          408 :         for resident_layer in &downloaded_layers {
    2597          304 :             if resident_layer.layer_desc().is_delta() {
    2598          172 :                 let layer = resident_layer.get_as_delta(ctx).await?;
    2599          172 :                 delta_layers.push(layer);
    2600              :             } else {
    2601          132 :                 let layer = resident_layer.get_as_image(ctx).await?;
    2602          132 :                 image_layers.push(layer);
    2603              :             }
    2604              :         }
    2605          104 :         let (dense_ks, sparse_ks) = self.collect_gc_compaction_keyspace().await?;
    2606          104 :         let mut merge_iter = FilterIterator::create(
    2607          104 :             MergeIterator::create(&delta_layers, &image_layers, ctx),
    2608          104 :             dense_ks,
    2609          104 :             sparse_ks,
    2610          104 :         )?;
    2611              : 
    2612              :         // Step 2: Produce images+deltas.
    2613          104 :         let mut accumulated_values = Vec::new();
    2614          104 :         let mut last_key: Option<Key> = None;
    2615              : 
    2616              :         // Only create image layers when there is no ancestor branches. TODO: create covering image layer
    2617              :         // when some condition meet.
    2618          104 :         let mut image_layer_writer = if !has_data_below {
    2619              :             Some(
    2620           84 :                 SplitImageLayerWriter::new(
    2621           84 :                     self.conf,
    2622           84 :                     self.timeline_id,
    2623           84 :                     self.tenant_shard_id,
    2624           84 :                     job_desc.compaction_key_range.start,
    2625           84 :                     lowest_retain_lsn,
    2626           84 :                     self.get_compaction_target_size(),
    2627           84 :                     ctx,
    2628           84 :                 )
    2629           84 :                 .await?,
    2630              :             )
    2631              :         } else {
    2632           20 :             None
    2633              :         };
    2634              : 
    2635          104 :         let mut delta_layer_writer = SplitDeltaLayerWriter::new(
    2636          104 :             self.conf,
    2637          104 :             self.timeline_id,
    2638          104 :             self.tenant_shard_id,
    2639          104 :             lowest_retain_lsn..end_lsn,
    2640          104 :             self.get_compaction_target_size(),
    2641          104 :         )
    2642          104 :         .await?;
    2643              : 
    2644              :         #[derive(Default)]
    2645              :         struct RewritingLayers {
    2646              :             before: Option<DeltaLayerWriter>,
    2647              :             after: Option<DeltaLayerWriter>,
    2648              :         }
    2649          104 :         let mut delta_layer_rewriters = HashMap::<Arc<PersistentLayerKey>, RewritingLayers>::new();
    2650              : 
    2651              :         /// When compacting not at a bottom range (=`[0,X)`) of the root branch, we "have data below" (`has_data_below=true`).
    2652              :         /// The two cases are compaction in ancestor branches and when `compact_lsn_range.start` is set.
    2653              :         /// In those cases, we need to pull up data from below the LSN range we're compaction.
    2654              :         ///
    2655              :         /// This function unifies the cases so that later code doesn't have to think about it.
    2656              :         ///
    2657              :         /// Currently, we always get the ancestor image for each key in the child branch no matter whether the image
    2658              :         /// is needed for reconstruction. This should be fixed in the future.
    2659              :         ///
    2660              :         /// Furthermore, we should do vectored get instead of a single get, or better, use k-merge for ancestor
    2661              :         /// images.
    2662         1244 :         async fn get_ancestor_image(
    2663         1244 :             this_tline: &Arc<Timeline>,
    2664         1244 :             key: Key,
    2665         1244 :             ctx: &RequestContext,
    2666         1244 :             has_data_below: bool,
    2667         1244 :             history_lsn_point: Lsn,
    2668         1244 :         ) -> anyhow::Result<Option<(Key, Lsn, Bytes)>> {
    2669         1244 :             if !has_data_below {
    2670         1168 :                 return Ok(None);
    2671           76 :             };
    2672              :             // This function is implemented as a get of the current timeline at ancestor LSN, therefore reusing
    2673              :             // as much existing code as possible.
    2674           76 :             let img = this_tline.get(key, history_lsn_point, ctx).await?;
    2675           76 :             Ok(Some((key, history_lsn_point, img)))
    2676         1244 :         }
    2677              : 
    2678              :         // Actually, we can decide not to write to the image layer at all at this point because
    2679              :         // the key and LSN range are determined. However, to keep things simple here, we still
    2680              :         // create this writer, and discard the writer in the end.
    2681              : 
    2682         1932 :         while let Some(((key, lsn, val), desc)) = merge_iter.next_with_trace().await? {
    2683         1828 :             if cancel.is_cancelled() {
    2684            0 :                 return Err(anyhow!("cancelled")); // TODO: refactor to CompactionError and pass cancel error
    2685         1828 :             }
    2686         1828 :             if self.shard_identity.is_key_disposable(&key) {
    2687              :                 // If this shard does not need to store this key, simply skip it.
    2688              :                 //
    2689              :                 // This is not handled in the filter iterator because shard is determined by hash.
    2690              :                 // Therefore, it does not give us any performance benefit to do things like skip
    2691              :                 // a whole layer file as handling key spaces (ranges).
    2692            0 :                 if cfg!(debug_assertions) {
    2693            0 :                     let shard = self.shard_identity.shard_index();
    2694            0 :                     let owner = self.shard_identity.get_shard_number(&key);
    2695            0 :                     panic!("key {key} does not belong on shard {shard}, owned by {owner}");
    2696            0 :                 }
    2697            0 :                 continue;
    2698         1828 :             }
    2699         1828 :             if !job_desc.compaction_key_range.contains(&key) {
    2700          128 :                 if !desc.is_delta {
    2701          120 :                     continue;
    2702            8 :                 }
    2703            8 :                 let rewriter = delta_layer_rewriters.entry(desc.clone()).or_default();
    2704            8 :                 let rewriter = if key < job_desc.compaction_key_range.start {
    2705            0 :                     if rewriter.before.is_none() {
    2706            0 :                         rewriter.before = Some(
    2707            0 :                             DeltaLayerWriter::new(
    2708            0 :                                 self.conf,
    2709            0 :                                 self.timeline_id,
    2710            0 :                                 self.tenant_shard_id,
    2711            0 :                                 desc.key_range.start,
    2712            0 :                                 desc.lsn_range.clone(),
    2713            0 :                                 ctx,
    2714            0 :                             )
    2715            0 :                             .await?,
    2716              :                         );
    2717            0 :                     }
    2718            0 :                     rewriter.before.as_mut().unwrap()
    2719            8 :                 } else if key >= job_desc.compaction_key_range.end {
    2720            8 :                     if rewriter.after.is_none() {
    2721            4 :                         rewriter.after = Some(
    2722            4 :                             DeltaLayerWriter::new(
    2723            4 :                                 self.conf,
    2724            4 :                                 self.timeline_id,
    2725            4 :                                 self.tenant_shard_id,
    2726            4 :                                 job_desc.compaction_key_range.end,
    2727            4 :                                 desc.lsn_range.clone(),
    2728            4 :                                 ctx,
    2729            4 :                             )
    2730            4 :                             .await?,
    2731              :                         );
    2732            4 :                     }
    2733            8 :                     rewriter.after.as_mut().unwrap()
    2734              :                 } else {
    2735            0 :                     unreachable!()
    2736              :                 };
    2737            8 :                 rewriter.put_value(key, lsn, val, ctx).await?;
    2738            8 :                 continue;
    2739         1700 :             }
    2740         1700 :             match val {
    2741         1220 :                 Value::Image(_) => stat.visit_image_key(&val),
    2742          480 :                 Value::WalRecord(_) => stat.visit_wal_key(&val),
    2743              :             }
    2744         1700 :             if last_key.is_none() || last_key.as_ref() == Some(&key) {
    2745          560 :                 if last_key.is_none() {
    2746          104 :                     last_key = Some(key);
    2747          456 :                 }
    2748          560 :                 accumulated_values.push((key, lsn, val));
    2749              :             } else {
    2750         1140 :                 let last_key: &mut Key = last_key.as_mut().unwrap();
    2751         1140 :                 stat.on_unique_key_visited(); // TODO: adjust statistics for partial compaction
    2752         1140 :                 let retention = self
    2753         1140 :                     .generate_key_retention(
    2754         1140 :                         *last_key,
    2755         1140 :                         &accumulated_values,
    2756         1140 :                         job_desc.gc_cutoff,
    2757         1140 :                         &job_desc.retain_lsns_below_horizon,
    2758         1140 :                         COMPACTION_DELTA_THRESHOLD,
    2759         1140 :                         get_ancestor_image(self, *last_key, ctx, has_data_below, lowest_retain_lsn)
    2760         1140 :                             .await?,
    2761              :                     )
    2762         1140 :                     .await?;
    2763         1140 :                 retention
    2764         1140 :                     .pipe_to(
    2765         1140 :                         *last_key,
    2766         1140 :                         &mut delta_layer_writer,
    2767         1140 :                         image_layer_writer.as_mut(),
    2768         1140 :                         &mut stat,
    2769         1140 :                         ctx,
    2770         1140 :                     )
    2771         1140 :                     .await?;
    2772         1140 :                 accumulated_values.clear();
    2773         1140 :                 *last_key = key;
    2774         1140 :                 accumulated_values.push((key, lsn, val));
    2775              :             }
    2776              :         }
    2777              : 
    2778              :         // TODO: move the below part to the loop body
    2779          104 :         let last_key = last_key.expect("no keys produced during compaction");
    2780          104 :         stat.on_unique_key_visited();
    2781              : 
    2782          104 :         let retention = self
    2783          104 :             .generate_key_retention(
    2784          104 :                 last_key,
    2785          104 :                 &accumulated_values,
    2786          104 :                 job_desc.gc_cutoff,
    2787          104 :                 &job_desc.retain_lsns_below_horizon,
    2788          104 :                 COMPACTION_DELTA_THRESHOLD,
    2789          104 :                 get_ancestor_image(self, last_key, ctx, has_data_below, lowest_retain_lsn).await?,
    2790              :             )
    2791          104 :             .await?;
    2792          104 :         retention
    2793          104 :             .pipe_to(
    2794          104 :                 last_key,
    2795          104 :                 &mut delta_layer_writer,
    2796          104 :                 image_layer_writer.as_mut(),
    2797          104 :                 &mut stat,
    2798          104 :                 ctx,
    2799          104 :             )
    2800          104 :             .await?;
    2801              :         // end: move the above part to the loop body
    2802              : 
    2803          104 :         let mut rewrote_delta_layers = Vec::new();
    2804          108 :         for (key, writers) in delta_layer_rewriters {
    2805            4 :             if let Some(delta_writer_before) = writers.before {
    2806            0 :                 let (desc, path) = delta_writer_before
    2807            0 :                     .finish(job_desc.compaction_key_range.start, ctx)
    2808            0 :                     .await?;
    2809            0 :                 let layer = Layer::finish_creating(self.conf, self, desc, &path)?;
    2810            0 :                 rewrote_delta_layers.push(layer);
    2811            4 :             }
    2812            4 :             if let Some(delta_writer_after) = writers.after {
    2813            4 :                 let (desc, path) = delta_writer_after.finish(key.key_range.end, ctx).await?;
    2814            4 :                 let layer = Layer::finish_creating(self.conf, self, desc, &path)?;
    2815            4 :                 rewrote_delta_layers.push(layer);
    2816            0 :             }
    2817              :         }
    2818              : 
    2819          148 :         let discard = |key: &PersistentLayerKey| {
    2820          148 :             let key = key.clone();
    2821          148 :             async move { KeyHistoryRetention::discard_key(&key, self, dry_run).await }
    2822          148 :         };
    2823              : 
    2824          104 :         let produced_image_layers = if let Some(writer) = image_layer_writer {
    2825           84 :             if !dry_run {
    2826           76 :                 let end_key = job_desc.compaction_key_range.end;
    2827           76 :                 writer
    2828           76 :                     .finish_with_discard_fn(self, ctx, end_key, discard)
    2829           76 :                     .await?
    2830              :             } else {
    2831            8 :                 drop(writer);
    2832            8 :                 Vec::new()
    2833              :             }
    2834              :         } else {
    2835           20 :             Vec::new()
    2836              :         };
    2837              : 
    2838          104 :         let produced_delta_layers = if !dry_run {
    2839           96 :             delta_layer_writer
    2840           96 :                 .finish_with_discard_fn(self, ctx, discard)
    2841           96 :                 .await?
    2842              :         } else {
    2843            8 :             drop(delta_layer_writer);
    2844            8 :             Vec::new()
    2845              :         };
    2846              : 
    2847              :         // TODO: make image/delta/rewrote_delta layers generation atomic. At this point, we already generated resident layers, and if
    2848              :         // compaction is cancelled at this point, we might have some layers that are not cleaned up.
    2849          104 :         let mut compact_to = Vec::new();
    2850          104 :         let mut keep_layers = HashSet::new();
    2851          104 :         let produced_delta_layers_len = produced_delta_layers.len();
    2852          104 :         let produced_image_layers_len = produced_image_layers.len();
    2853          176 :         for action in produced_delta_layers {
    2854           72 :             match action {
    2855           44 :                 BatchWriterResult::Produced(layer) => {
    2856           44 :                     if cfg!(debug_assertions) {
    2857           44 :                         info!("produced delta layer: {}", layer.layer_desc().key());
    2858            0 :                     }
    2859           44 :                     stat.produce_delta_layer(layer.layer_desc().file_size());
    2860           44 :                     compact_to.push(layer);
    2861              :                 }
    2862           28 :                 BatchWriterResult::Discarded(l) => {
    2863           28 :                     if cfg!(debug_assertions) {
    2864           28 :                         info!("discarded delta layer: {}", l);
    2865            0 :                     }
    2866           28 :                     keep_layers.insert(l);
    2867           28 :                     stat.discard_delta_layer();
    2868              :                 }
    2869              :             }
    2870              :         }
    2871          108 :         for layer in &rewrote_delta_layers {
    2872            4 :             debug!(
    2873            0 :                 "produced rewritten delta layer: {}",
    2874            0 :                 layer.layer_desc().key()
    2875              :             );
    2876              :         }
    2877          104 :         compact_to.extend(rewrote_delta_layers);
    2878          180 :         for action in produced_image_layers {
    2879           76 :             match action {
    2880           60 :                 BatchWriterResult::Produced(layer) => {
    2881           60 :                     debug!("produced image layer: {}", layer.layer_desc().key());
    2882           60 :                     stat.produce_image_layer(layer.layer_desc().file_size());
    2883           60 :                     compact_to.push(layer);
    2884              :                 }
    2885           16 :                 BatchWriterResult::Discarded(l) => {
    2886           16 :                     debug!("discarded image layer: {}", l);
    2887           16 :                     keep_layers.insert(l);
    2888           16 :                     stat.discard_image_layer();
    2889              :                 }
    2890              :             }
    2891              :         }
    2892              : 
    2893          104 :         let mut layer_selection = job_desc.selected_layers;
    2894              : 
    2895              :         // Partial compaction might select more data than it processes, e.g., if
    2896              :         // the compaction_key_range only partially overlaps:
    2897              :         //
    2898              :         //         [---compaction_key_range---]
    2899              :         //   [---A----][----B----][----C----][----D----]
    2900              :         //
    2901              :         // For delta layers, we will rewrite the layers so that it is cut exactly at
    2902              :         // the compaction key range, so we can always discard them. However, for image
    2903              :         // layers, as we do not rewrite them for now, we need to handle them differently.
    2904              :         // Assume image layers  A, B, C, D are all in the `layer_selection`.
    2905              :         //
    2906              :         // The created image layers contain whatever is needed from B, C, and from
    2907              :         // `----]` of A, and from  `[---` of D.
    2908              :         //
    2909              :         // In contrast, `[---A` and `D----]` have not been processed, so, we must
    2910              :         // keep that data.
    2911              :         //
    2912              :         // The solution for now is to keep A and D completely if they are image layers.
    2913              :         // (layer_selection is what we'll remove from the layer map, so, retain what
    2914              :         // is _not_ fully covered by compaction_key_range).
    2915          408 :         for layer in &layer_selection {
    2916          304 :             if !layer.layer_desc().is_delta() {
    2917          132 :                 if !overlaps_with(
    2918          132 :                     &layer.layer_desc().key_range,
    2919          132 :                     &job_desc.compaction_key_range,
    2920          132 :                 ) {
    2921            0 :                     bail!("violated constraint: image layer outside of compaction key range");
    2922          132 :                 }
    2923          132 :                 if !fully_contains(
    2924          132 :                     &job_desc.compaction_key_range,
    2925          132 :                     &layer.layer_desc().key_range,
    2926          132 :                 ) {
    2927           16 :                     keep_layers.insert(layer.layer_desc().key());
    2928          116 :                 }
    2929          172 :             }
    2930              :         }
    2931              : 
    2932          304 :         layer_selection.retain(|x| !keep_layers.contains(&x.layer_desc().key()));
    2933          104 : 
    2934          104 :         info!(
    2935            0 :             "gc-compaction statistics: {}",
    2936            0 :             serde_json::to_string(&stat)?
    2937              :         );
    2938              : 
    2939          104 :         if dry_run {
    2940            8 :             return Ok(());
    2941           96 :         }
    2942           96 : 
    2943           96 :         info!(
    2944            0 :             "produced {} delta layers and {} image layers, {} layers are kept",
    2945            0 :             produced_delta_layers_len,
    2946            0 :             produced_image_layers_len,
    2947            0 :             keep_layers.len()
    2948              :         );
    2949              : 
    2950              :         // Step 3: Place back to the layer map.
    2951              : 
    2952              :         // First, do a sanity check to ensure the newly-created layer map does not contain overlaps.
    2953           96 :         let all_layers = {
    2954           96 :             let guard = self.layers.read().await;
    2955           96 :             let layer_map = guard.layer_map()?;
    2956           96 :             layer_map.iter_historic_layers().collect_vec()
    2957           96 :         };
    2958           96 : 
    2959           96 :         let mut final_layers = all_layers
    2960           96 :             .iter()
    2961          428 :             .map(|layer| layer.layer_name())
    2962           96 :             .collect::<HashSet<_>>();
    2963          304 :         for layer in &layer_selection {
    2964          208 :             final_layers.remove(&layer.layer_desc().layer_name());
    2965          208 :         }
    2966          204 :         for layer in &compact_to {
    2967          108 :             final_layers.insert(layer.layer_desc().layer_name());
    2968          108 :         }
    2969           96 :         let final_layers = final_layers.into_iter().collect_vec();
    2970              : 
    2971              :         // TODO: move this check before we call `finish` on image layer writers. However, this will require us to get the layer name before we finish
    2972              :         // the writer, so potentially, we will need a function like `ImageLayerBatchWriter::get_all_pending_layer_keys` to get all the keys that are
    2973              :         // in the writer before finalizing the persistent layers. Now we would leave some dangling layers on the disk if the check fails.
    2974           96 :         if let Some(err) = check_valid_layermap(&final_layers) {
    2975            0 :             bail!("gc-compaction layer map check failed after compaction because {}, compaction result not applied to the layer map due to potential data loss", err);
    2976           96 :         }
    2977              : 
    2978              :         // Between the sanity check and this compaction update, there could be new layers being flushed, but it should be fine because we only
    2979              :         // operate on L1 layers.
    2980              :         {
    2981              :             // Gc-compaction will rewrite the history of a key. This could happen in two ways:
    2982              :             //
    2983              :             // 1. We create an image layer to replace all the deltas below the compact LSN. In this case, assume
    2984              :             // we have 2 delta layers A and B, both below the compact LSN. We create an image layer I to replace
    2985              :             // A and B at the compact LSN. If the read path finishes reading A, yields, and now we update the layer
    2986              :             // map, the read path then cannot find any keys below A, reporting a missing key error, while the key
    2987              :             // now gets stored in I at the compact LSN.
    2988              :             //
    2989              :             // ---------------                                       ---------------
    2990              :             //   delta1@LSN20                                         image1@LSN20
    2991              :             // ---------------  (read path collects delta@LSN20,  => ---------------  (read path cannot find anything
    2992              :             //   delta1@LSN10    yields)                                               below LSN 20)
    2993              :             // ---------------
    2994              :             //
    2995              :             // 2. We create a delta layer to replace all the deltas below the compact LSN, and in the delta layers,
    2996              :             // we combines the history of a key into a single image. For example, we have deltas at LSN 1, 2, 3, 4,
    2997              :             // Assume one delta layer contains LSN 1, 2, 3 and the other contains LSN 4.
    2998              :             //
    2999              :             // We let gc-compaction combine delta 2, 3, 4 into an image at LSN 4, which produces a delta layer that
    3000              :             // contains the delta at LSN 1, the image at LSN 4. If the read path finishes reading the original delta
    3001              :             // layer containing 4, yields, and we update the layer map to put the delta layer.
    3002              :             //
    3003              :             // ---------------                                      ---------------
    3004              :             //   delta1@LSN4                                          image1@LSN4
    3005              :             // ---------------  (read path collects delta@LSN4,  => ---------------  (read path collects LSN4 and LSN1,
    3006              :             //  delta1@LSN1-3    yields)                              delta1@LSN1     which is an invalid history)
    3007              :             // ---------------                                      ---------------
    3008              :             //
    3009              :             // Therefore, the gc-compaction layer update operation should wait for all ongoing reads, block all pending reads,
    3010              :             // and only allow reads to continue after the update is finished.
    3011              : 
    3012           96 :             let update_guard = self.gc_compaction_layer_update_lock.write().await;
    3013              :             // Acquiring the update guard ensures current read operations end and new read operations are blocked.
    3014              :             // TODO: can we use `latest_gc_cutoff` Rcu to achieve the same effect?
    3015           96 :             let mut guard = self.layers.write().await;
    3016           96 :             guard
    3017           96 :                 .open_mut()?
    3018           96 :                 .finish_gc_compaction(&layer_selection, &compact_to, &self.metrics);
    3019           96 :             drop(update_guard); // Allow new reads to start ONLY after we finished updating the layer map.
    3020           96 :         };
    3021           96 : 
    3022           96 :         // Schedule an index-only upload to update the `latest_gc_cutoff` in the index_part.json.
    3023           96 :         // Otherwise, after restart, the index_part only contains the old `latest_gc_cutoff` and
    3024           96 :         // find_gc_cutoffs will try accessing things below the cutoff. TODO: ideally, this should
    3025           96 :         // be batched into `schedule_compaction_update`.
    3026           96 :         let disk_consistent_lsn = self.disk_consistent_lsn.load();
    3027           96 :         self.schedule_uploads(disk_consistent_lsn, None)?;
    3028              :         // If a layer gets rewritten throughout gc-compaction, we need to keep that layer only in `compact_to` instead
    3029              :         // of `compact_from`.
    3030           96 :         let compact_from = {
    3031           96 :             let mut compact_from = Vec::new();
    3032           96 :             let mut compact_to_set = HashMap::new();
    3033          204 :             for layer in &compact_to {
    3034          108 :                 compact_to_set.insert(layer.layer_desc().key(), layer);
    3035          108 :             }
    3036          304 :             for layer in &layer_selection {
    3037          208 :                 if let Some(to) = compact_to_set.get(&layer.layer_desc().key()) {
    3038            0 :                     tracing::info!(
    3039            0 :                         "skipping delete {} because found same layer key at different generation {}",
    3040              :                         layer, to
    3041              :                     );
    3042          208 :                 } else {
    3043          208 :                     compact_from.push(layer.clone());
    3044          208 :                 }
    3045              :             }
    3046           96 :             compact_from
    3047           96 :         };
    3048           96 :         self.remote_client
    3049           96 :             .schedule_compaction_update(&compact_from, &compact_to)?;
    3050              : 
    3051           96 :         drop(gc_lock);
    3052           96 : 
    3053           96 :         Ok(())
    3054          108 :     }
    3055              : }
    3056              : 
    3057              : struct TimelineAdaptor {
    3058              :     timeline: Arc<Timeline>,
    3059              : 
    3060              :     keyspace: (Lsn, KeySpace),
    3061              : 
    3062              :     new_deltas: Vec<ResidentLayer>,
    3063              :     new_images: Vec<ResidentLayer>,
    3064              :     layers_to_delete: Vec<Arc<PersistentLayerDesc>>,
    3065              : }
    3066              : 
    3067              : impl TimelineAdaptor {
    3068            0 :     pub fn new(timeline: &Arc<Timeline>, keyspace: (Lsn, KeySpace)) -> Self {
    3069            0 :         Self {
    3070            0 :             timeline: timeline.clone(),
    3071            0 :             keyspace,
    3072            0 :             new_images: Vec::new(),
    3073            0 :             new_deltas: Vec::new(),
    3074            0 :             layers_to_delete: Vec::new(),
    3075            0 :         }
    3076            0 :     }
    3077              : 
    3078            0 :     pub async fn flush_updates(&mut self) -> Result<(), CompactionError> {
    3079            0 :         let layers_to_delete = {
    3080            0 :             let guard = self.timeline.layers.read().await;
    3081            0 :             self.layers_to_delete
    3082            0 :                 .iter()
    3083            0 :                 .map(|x| guard.get_from_desc(x))
    3084            0 :                 .collect::<Vec<Layer>>()
    3085            0 :         };
    3086            0 :         self.timeline
    3087            0 :             .finish_compact_batch(&self.new_deltas, &self.new_images, &layers_to_delete)
    3088            0 :             .await?;
    3089              : 
    3090            0 :         self.timeline
    3091            0 :             .upload_new_image_layers(std::mem::take(&mut self.new_images))?;
    3092              : 
    3093            0 :         self.new_deltas.clear();
    3094            0 :         self.layers_to_delete.clear();
    3095            0 :         Ok(())
    3096            0 :     }
    3097              : }
    3098              : 
    3099              : #[derive(Clone)]
    3100              : struct ResidentDeltaLayer(ResidentLayer);
    3101              : #[derive(Clone)]
    3102              : struct ResidentImageLayer(ResidentLayer);
    3103              : 
    3104              : impl CompactionJobExecutor for TimelineAdaptor {
    3105              :     type Key = pageserver_api::key::Key;
    3106              : 
    3107              :     type Layer = OwnArc<PersistentLayerDesc>;
    3108              :     type DeltaLayer = ResidentDeltaLayer;
    3109              :     type ImageLayer = ResidentImageLayer;
    3110              : 
    3111              :     type RequestContext = crate::context::RequestContext;
    3112              : 
    3113            0 :     fn get_shard_identity(&self) -> &ShardIdentity {
    3114            0 :         self.timeline.get_shard_identity()
    3115            0 :     }
    3116              : 
    3117            0 :     async fn get_layers(
    3118            0 :         &mut self,
    3119            0 :         key_range: &Range<Key>,
    3120            0 :         lsn_range: &Range<Lsn>,
    3121            0 :         _ctx: &RequestContext,
    3122            0 :     ) -> anyhow::Result<Vec<OwnArc<PersistentLayerDesc>>> {
    3123            0 :         self.flush_updates().await?;
    3124              : 
    3125            0 :         let guard = self.timeline.layers.read().await;
    3126            0 :         let layer_map = guard.layer_map()?;
    3127              : 
    3128            0 :         let result = layer_map
    3129            0 :             .iter_historic_layers()
    3130            0 :             .filter(|l| {
    3131            0 :                 overlaps_with(&l.lsn_range, lsn_range) && overlaps_with(&l.key_range, key_range)
    3132            0 :             })
    3133            0 :             .map(OwnArc)
    3134            0 :             .collect();
    3135            0 :         Ok(result)
    3136            0 :     }
    3137              : 
    3138            0 :     async fn get_keyspace(
    3139            0 :         &mut self,
    3140            0 :         key_range: &Range<Key>,
    3141            0 :         lsn: Lsn,
    3142            0 :         _ctx: &RequestContext,
    3143            0 :     ) -> anyhow::Result<Vec<Range<Key>>> {
    3144            0 :         if lsn == self.keyspace.0 {
    3145            0 :             Ok(pageserver_compaction::helpers::intersect_keyspace(
    3146            0 :                 &self.keyspace.1.ranges,
    3147            0 :                 key_range,
    3148            0 :             ))
    3149              :         } else {
    3150              :             // The current compaction implementation only ever requests the key space
    3151              :             // at the compaction end LSN.
    3152            0 :             anyhow::bail!("keyspace not available for requested lsn");
    3153              :         }
    3154            0 :     }
    3155              : 
    3156            0 :     async fn downcast_delta_layer(
    3157            0 :         &self,
    3158            0 :         layer: &OwnArc<PersistentLayerDesc>,
    3159            0 :     ) -> anyhow::Result<Option<ResidentDeltaLayer>> {
    3160            0 :         // this is a lot more complex than a simple downcast...
    3161            0 :         if layer.is_delta() {
    3162            0 :             let l = {
    3163            0 :                 let guard = self.timeline.layers.read().await;
    3164            0 :                 guard.get_from_desc(layer)
    3165              :             };
    3166            0 :             let result = l.download_and_keep_resident().await?;
    3167              : 
    3168            0 :             Ok(Some(ResidentDeltaLayer(result)))
    3169              :         } else {
    3170            0 :             Ok(None)
    3171              :         }
    3172            0 :     }
    3173              : 
    3174            0 :     async fn create_image(
    3175            0 :         &mut self,
    3176            0 :         lsn: Lsn,
    3177            0 :         key_range: &Range<Key>,
    3178            0 :         ctx: &RequestContext,
    3179            0 :     ) -> anyhow::Result<()> {
    3180            0 :         Ok(self.create_image_impl(lsn, key_range, ctx).await?)
    3181            0 :     }
    3182              : 
    3183            0 :     async fn create_delta(
    3184            0 :         &mut self,
    3185            0 :         lsn_range: &Range<Lsn>,
    3186            0 :         key_range: &Range<Key>,
    3187            0 :         input_layers: &[ResidentDeltaLayer],
    3188            0 :         ctx: &RequestContext,
    3189            0 :     ) -> anyhow::Result<()> {
    3190            0 :         debug!("Create new layer {}..{}", lsn_range.start, lsn_range.end);
    3191              : 
    3192            0 :         let mut all_entries = Vec::new();
    3193            0 :         for dl in input_layers.iter() {
    3194            0 :             all_entries.extend(dl.load_keys(ctx).await?);
    3195              :         }
    3196              : 
    3197              :         // The current stdlib sorting implementation is designed in a way where it is
    3198              :         // particularly fast where the slice is made up of sorted sub-ranges.
    3199            0 :         all_entries.sort_by_key(|DeltaEntry { key, lsn, .. }| (*key, *lsn));
    3200              : 
    3201            0 :         let mut writer = DeltaLayerWriter::new(
    3202            0 :             self.timeline.conf,
    3203            0 :             self.timeline.timeline_id,
    3204            0 :             self.timeline.tenant_shard_id,
    3205            0 :             key_range.start,
    3206            0 :             lsn_range.clone(),
    3207            0 :             ctx,
    3208            0 :         )
    3209            0 :         .await?;
    3210              : 
    3211            0 :         let mut dup_values = 0;
    3212            0 : 
    3213            0 :         // This iterator walks through all key-value pairs from all the layers
    3214            0 :         // we're compacting, in key, LSN order.
    3215            0 :         let mut prev: Option<(Key, Lsn)> = None;
    3216              :         for &DeltaEntry {
    3217            0 :             key, lsn, ref val, ..
    3218            0 :         } in all_entries.iter()
    3219              :         {
    3220            0 :             if prev == Some((key, lsn)) {
    3221              :                 // This is a duplicate. Skip it.
    3222              :                 //
    3223              :                 // It can happen if compaction is interrupted after writing some
    3224              :                 // layers but not all, and we are compacting the range again.
    3225              :                 // The calculations in the algorithm assume that there are no
    3226              :                 // duplicates, so the math on targeted file size is likely off,
    3227              :                 // and we will create smaller files than expected.
    3228            0 :                 dup_values += 1;
    3229            0 :                 continue;
    3230            0 :             }
    3231              : 
    3232            0 :             let value = val.load(ctx).await?;
    3233              : 
    3234            0 :             writer.put_value(key, lsn, value, ctx).await?;
    3235              : 
    3236            0 :             prev = Some((key, lsn));
    3237              :         }
    3238              : 
    3239            0 :         if dup_values > 0 {
    3240            0 :             warn!("delta layer created with {} duplicate values", dup_values);
    3241            0 :         }
    3242              : 
    3243            0 :         fail_point!("delta-layer-writer-fail-before-finish", |_| {
    3244            0 :             Err(anyhow::anyhow!(
    3245            0 :                 "failpoint delta-layer-writer-fail-before-finish"
    3246            0 :             ))
    3247            0 :         });
    3248              : 
    3249            0 :         let (desc, path) = writer.finish(prev.unwrap().0.next(), ctx).await?;
    3250            0 :         let new_delta_layer =
    3251            0 :             Layer::finish_creating(self.timeline.conf, &self.timeline, desc, &path)?;
    3252              : 
    3253            0 :         self.new_deltas.push(new_delta_layer);
    3254            0 :         Ok(())
    3255            0 :     }
    3256              : 
    3257            0 :     async fn delete_layer(
    3258            0 :         &mut self,
    3259            0 :         layer: &OwnArc<PersistentLayerDesc>,
    3260            0 :         _ctx: &RequestContext,
    3261            0 :     ) -> anyhow::Result<()> {
    3262            0 :         self.layers_to_delete.push(layer.clone().0);
    3263            0 :         Ok(())
    3264            0 :     }
    3265              : }
    3266              : 
    3267              : impl TimelineAdaptor {
    3268            0 :     async fn create_image_impl(
    3269            0 :         &mut self,
    3270            0 :         lsn: Lsn,
    3271            0 :         key_range: &Range<Key>,
    3272            0 :         ctx: &RequestContext,
    3273            0 :     ) -> Result<(), CreateImageLayersError> {
    3274            0 :         let timer = self.timeline.metrics.create_images_time_histo.start_timer();
    3275              : 
    3276            0 :         let image_layer_writer = ImageLayerWriter::new(
    3277            0 :             self.timeline.conf,
    3278            0 :             self.timeline.timeline_id,
    3279            0 :             self.timeline.tenant_shard_id,
    3280            0 :             key_range,
    3281            0 :             lsn,
    3282            0 :             ctx,
    3283            0 :         )
    3284            0 :         .await?;
    3285              : 
    3286            0 :         fail_point!("image-layer-writer-fail-before-finish", |_| {
    3287            0 :             Err(CreateImageLayersError::Other(anyhow::anyhow!(
    3288            0 :                 "failpoint image-layer-writer-fail-before-finish"
    3289            0 :             )))
    3290            0 :         });
    3291              : 
    3292            0 :         let keyspace = KeySpace {
    3293            0 :             ranges: self.get_keyspace(key_range, lsn, ctx).await?,
    3294              :         };
    3295              :         // TODO set proper (stateful) start. The create_image_layer_for_rel_blocks function mostly
    3296            0 :         let outcome = self
    3297            0 :             .timeline
    3298            0 :             .create_image_layer_for_rel_blocks(
    3299            0 :                 &keyspace,
    3300            0 :                 image_layer_writer,
    3301            0 :                 lsn,
    3302            0 :                 ctx,
    3303            0 :                 key_range.clone(),
    3304            0 :                 IoConcurrency::sequential(),
    3305            0 :             )
    3306            0 :             .await?;
    3307              : 
    3308              :         if let ImageLayerCreationOutcome::Generated {
    3309            0 :             unfinished_image_layer,
    3310            0 :         } = outcome
    3311              :         {
    3312            0 :             let (desc, path) = unfinished_image_layer.finish(ctx).await?;
    3313            0 :             let image_layer =
    3314            0 :                 Layer::finish_creating(self.timeline.conf, &self.timeline, desc, &path)?;
    3315            0 :             self.new_images.push(image_layer);
    3316            0 :         }
    3317              : 
    3318            0 :         timer.stop_and_record();
    3319            0 : 
    3320            0 :         Ok(())
    3321            0 :     }
    3322              : }
    3323              : 
    3324              : impl CompactionRequestContext for crate::context::RequestContext {}
    3325              : 
    3326              : #[derive(Debug, Clone)]
    3327              : pub struct OwnArc<T>(pub Arc<T>);
    3328              : 
    3329              : impl<T> Deref for OwnArc<T> {
    3330              :     type Target = <Arc<T> as Deref>::Target;
    3331            0 :     fn deref(&self) -> &Self::Target {
    3332            0 :         &self.0
    3333            0 :     }
    3334              : }
    3335              : 
    3336              : impl<T> AsRef<T> for OwnArc<T> {
    3337            0 :     fn as_ref(&self) -> &T {
    3338            0 :         self.0.as_ref()
    3339            0 :     }
    3340              : }
    3341              : 
    3342              : impl CompactionLayer<Key> for OwnArc<PersistentLayerDesc> {
    3343            0 :     fn key_range(&self) -> &Range<Key> {
    3344            0 :         &self.key_range
    3345            0 :     }
    3346            0 :     fn lsn_range(&self) -> &Range<Lsn> {
    3347            0 :         &self.lsn_range
    3348            0 :     }
    3349            0 :     fn file_size(&self) -> u64 {
    3350            0 :         self.file_size
    3351            0 :     }
    3352            0 :     fn short_id(&self) -> std::string::String {
    3353            0 :         self.as_ref().short_id().to_string()
    3354            0 :     }
    3355            0 :     fn is_delta(&self) -> bool {
    3356            0 :         self.as_ref().is_delta()
    3357            0 :     }
    3358              : }
    3359              : 
    3360              : impl CompactionLayer<Key> for OwnArc<DeltaLayer> {
    3361            0 :     fn key_range(&self) -> &Range<Key> {
    3362            0 :         &self.layer_desc().key_range
    3363            0 :     }
    3364            0 :     fn lsn_range(&self) -> &Range<Lsn> {
    3365            0 :         &self.layer_desc().lsn_range
    3366            0 :     }
    3367            0 :     fn file_size(&self) -> u64 {
    3368            0 :         self.layer_desc().file_size
    3369            0 :     }
    3370            0 :     fn short_id(&self) -> std::string::String {
    3371            0 :         self.layer_desc().short_id().to_string()
    3372            0 :     }
    3373            0 :     fn is_delta(&self) -> bool {
    3374            0 :         true
    3375            0 :     }
    3376              : }
    3377              : 
    3378              : use crate::tenant::timeline::DeltaEntry;
    3379              : 
    3380              : impl CompactionLayer<Key> for ResidentDeltaLayer {
    3381            0 :     fn key_range(&self) -> &Range<Key> {
    3382            0 :         &self.0.layer_desc().key_range
    3383            0 :     }
    3384            0 :     fn lsn_range(&self) -> &Range<Lsn> {
    3385            0 :         &self.0.layer_desc().lsn_range
    3386            0 :     }
    3387            0 :     fn file_size(&self) -> u64 {
    3388            0 :         self.0.layer_desc().file_size
    3389            0 :     }
    3390            0 :     fn short_id(&self) -> std::string::String {
    3391            0 :         self.0.layer_desc().short_id().to_string()
    3392            0 :     }
    3393            0 :     fn is_delta(&self) -> bool {
    3394            0 :         true
    3395            0 :     }
    3396              : }
    3397              : 
    3398              : impl CompactionDeltaLayer<TimelineAdaptor> for ResidentDeltaLayer {
    3399              :     type DeltaEntry<'a> = DeltaEntry<'a>;
    3400              : 
    3401            0 :     async fn load_keys(&self, ctx: &RequestContext) -> anyhow::Result<Vec<DeltaEntry<'_>>> {
    3402            0 :         self.0.get_as_delta(ctx).await?.index_entries(ctx).await
    3403            0 :     }
    3404              : }
    3405              : 
    3406              : impl CompactionLayer<Key> for ResidentImageLayer {
    3407            0 :     fn key_range(&self) -> &Range<Key> {
    3408            0 :         &self.0.layer_desc().key_range
    3409            0 :     }
    3410            0 :     fn lsn_range(&self) -> &Range<Lsn> {
    3411            0 :         &self.0.layer_desc().lsn_range
    3412            0 :     }
    3413            0 :     fn file_size(&self) -> u64 {
    3414            0 :         self.0.layer_desc().file_size
    3415            0 :     }
    3416            0 :     fn short_id(&self) -> std::string::String {
    3417            0 :         self.0.layer_desc().short_id().to_string()
    3418            0 :     }
    3419            0 :     fn is_delta(&self) -> bool {
    3420            0 :         false
    3421            0 :     }
    3422              : }
    3423              : impl CompactionImageLayer<TimelineAdaptor> for ResidentImageLayer {}
        

Generated by: LCOV version 2.1-beta