LCOV - code coverage report
Current view: top level - pageserver/src/tenant/timeline - compaction.rs (source / functions) Coverage Total Hit
Test: 37bd82a80da9937a25818120dcf8e865ea9f7fd2.info Lines: 52.8 % 2718 1434
Test Date: 2025-04-11 14:30:22 Functions: 37.9 % 174 66

            Line data    Source code
       1              : //! New compaction implementation. The algorithm itself is implemented in the
       2              : //! compaction crate. This file implements the callbacks and structs that allow
       3              : //! the algorithm to drive the process.
       4              : //!
       5              : //! The old legacy algorithm is implemented directly in `timeline.rs`.
       6              : 
       7              : use std::collections::{BinaryHeap, HashMap, HashSet, VecDeque};
       8              : use std::ops::{Deref, Range};
       9              : use std::sync::Arc;
      10              : use std::time::{Duration, Instant};
      11              : 
      12              : use super::layer_manager::LayerManager;
      13              : use super::{
      14              :     CompactFlags, CompactOptions, CompactionError, CreateImageLayersError, DurationRecorder,
      15              :     GetVectoredError, ImageLayerCreationMode, LastImageLayerCreationStatus, RecordedDuration,
      16              :     Timeline,
      17              : };
      18              : 
      19              : use crate::tenant::timeline::DeltaEntry;
      20              : use crate::walredo::RedoAttemptType;
      21              : use anyhow::{Context, anyhow};
      22              : use bytes::Bytes;
      23              : use enumset::EnumSet;
      24              : use fail::fail_point;
      25              : use futures::FutureExt;
      26              : use itertools::Itertools;
      27              : use once_cell::sync::Lazy;
      28              : use pageserver_api::config::tenant_conf_defaults::DEFAULT_CHECKPOINT_DISTANCE;
      29              : use pageserver_api::key::{KEY_SIZE, Key};
      30              : use pageserver_api::keyspace::{KeySpace, ShardedRange};
      31              : use pageserver_api::models::{CompactInfoResponse, CompactKeyRange};
      32              : use pageserver_api::record::NeonWalRecord;
      33              : use pageserver_api::shard::{ShardCount, ShardIdentity, TenantShardId};
      34              : use pageserver_api::value::Value;
      35              : use pageserver_compaction::helpers::{fully_contains, overlaps_with};
      36              : use pageserver_compaction::interface::*;
      37              : use serde::Serialize;
      38              : use tokio::sync::{OwnedSemaphorePermit, Semaphore};
      39              : use tokio_util::sync::CancellationToken;
      40              : use tracing::{Instrument, debug, error, info, info_span, trace, warn};
      41              : use utils::critical;
      42              : use utils::id::TimelineId;
      43              : use utils::lsn::Lsn;
      44              : 
      45              : use crate::context::{AccessStatsBehavior, RequestContext, RequestContextBuilder};
      46              : use crate::page_cache;
      47              : use crate::statvfs::Statvfs;
      48              : use crate::tenant::checks::check_valid_layermap;
      49              : use crate::tenant::gc_block::GcBlock;
      50              : use crate::tenant::layer_map::LayerMap;
      51              : use crate::tenant::remote_timeline_client::WaitCompletionError;
      52              : use crate::tenant::remote_timeline_client::index::GcCompactionState;
      53              : use crate::tenant::storage_layer::batch_split_writer::{
      54              :     BatchWriterResult, SplitDeltaLayerWriter, SplitImageLayerWriter,
      55              : };
      56              : use crate::tenant::storage_layer::filter_iterator::FilterIterator;
      57              : use crate::tenant::storage_layer::merge_iterator::MergeIterator;
      58              : use crate::tenant::storage_layer::{
      59              :     AsLayerDesc, PersistentLayerDesc, PersistentLayerKey, ValueReconstructState,
      60              : };
      61              : use crate::tenant::tasks::log_compaction_error;
      62              : use crate::tenant::timeline::{
      63              :     DeltaLayerWriter, ImageLayerCreationOutcome, ImageLayerWriter, IoConcurrency, Layer,
      64              :     ResidentLayer, drop_rlock,
      65              : };
      66              : use crate::tenant::{DeltaLayer, MaybeOffloaded};
      67              : use crate::virtual_file::{MaybeFatalIo, VirtualFile};
      68              : 
      69              : /// Maximum number of deltas before generating an image layer in bottom-most compaction.
      70              : const COMPACTION_DELTA_THRESHOLD: usize = 5;
      71              : 
      72              : #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
      73              : pub struct GcCompactionJobId(pub usize);
      74              : 
      75              : impl std::fmt::Display for GcCompactionJobId {
      76            0 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
      77            0 :         write!(f, "{}", self.0)
      78            0 :     }
      79              : }
      80              : 
      81              : pub struct GcCompactionCombinedSettings {
      82              :     pub gc_compaction_enabled: bool,
      83              :     pub gc_compaction_initial_threshold_kb: u64,
      84              :     pub gc_compaction_ratio_percent: u64,
      85              : }
      86              : 
      87              : #[derive(Debug, Clone)]
      88              : pub enum GcCompactionQueueItem {
      89              :     MetaJob {
      90              :         /// Compaction options
      91              :         options: CompactOptions,
      92              :         /// Whether the compaction is triggered automatically (determines whether we need to update L2 LSN)
      93              :         auto: bool,
      94              :     },
      95              :     SubCompactionJob(CompactOptions),
      96              :     Notify(GcCompactionJobId, Option<Lsn>),
      97              : }
      98              : 
      99              : impl GcCompactionQueueItem {
     100            0 :     pub fn into_compact_info_resp(
     101            0 :         self,
     102            0 :         id: GcCompactionJobId,
     103            0 :         running: bool,
     104            0 :     ) -> Option<CompactInfoResponse> {
     105            0 :         match self {
     106            0 :             GcCompactionQueueItem::MetaJob { options, .. } => Some(CompactInfoResponse {
     107            0 :                 compact_key_range: options.compact_key_range,
     108            0 :                 compact_lsn_range: options.compact_lsn_range,
     109            0 :                 sub_compaction: options.sub_compaction,
     110            0 :                 running,
     111            0 :                 job_id: id.0,
     112            0 :             }),
     113            0 :             GcCompactionQueueItem::SubCompactionJob(options) => Some(CompactInfoResponse {
     114            0 :                 compact_key_range: options.compact_key_range,
     115            0 :                 compact_lsn_range: options.compact_lsn_range,
     116            0 :                 sub_compaction: options.sub_compaction,
     117            0 :                 running,
     118            0 :                 job_id: id.0,
     119            0 :             }),
     120            0 :             GcCompactionQueueItem::Notify(_, _) => None,
     121              :         }
     122            0 :     }
     123              : }
     124              : 
     125              : #[derive(Default)]
     126              : struct GcCompactionGuardItems {
     127              :     notify: Option<tokio::sync::oneshot::Sender<()>>,
     128              :     permit: Option<OwnedSemaphorePermit>,
     129              : }
     130              : 
     131              : struct GcCompactionQueueInner {
     132              :     running: Option<(GcCompactionJobId, GcCompactionQueueItem)>,
     133              :     queued: VecDeque<(GcCompactionJobId, GcCompactionQueueItem)>,
     134              :     guards: HashMap<GcCompactionJobId, GcCompactionGuardItems>,
     135              :     last_id: GcCompactionJobId,
     136              : }
     137              : 
     138              : impl GcCompactionQueueInner {
     139            0 :     fn next_id(&mut self) -> GcCompactionJobId {
     140            0 :         let id = self.last_id;
     141            0 :         self.last_id = GcCompactionJobId(id.0 + 1);
     142            0 :         id
     143            0 :     }
     144              : }
     145              : 
     146              : /// A structure to store gc_compaction jobs.
     147              : pub struct GcCompactionQueue {
     148              :     /// All items in the queue, and the currently-running job.
     149              :     inner: std::sync::Mutex<GcCompactionQueueInner>,
     150              :     /// Ensure only one thread is consuming the queue.
     151              :     consumer_lock: tokio::sync::Mutex<()>,
     152              : }
     153              : 
     154            0 : static CONCURRENT_GC_COMPACTION_TASKS: Lazy<Arc<Semaphore>> = Lazy::new(|| {
     155            0 :     // Only allow two timelines on one pageserver to run gc compaction at a time.
     156            0 :     Arc::new(Semaphore::new(2))
     157            0 : });
     158              : 
     159              : impl GcCompactionQueue {
     160            0 :     pub fn new() -> Self {
     161            0 :         GcCompactionQueue {
     162            0 :             inner: std::sync::Mutex::new(GcCompactionQueueInner {
     163            0 :                 running: None,
     164            0 :                 queued: VecDeque::new(),
     165            0 :                 guards: HashMap::new(),
     166            0 :                 last_id: GcCompactionJobId(0),
     167            0 :             }),
     168            0 :             consumer_lock: tokio::sync::Mutex::new(()),
     169            0 :         }
     170            0 :     }
     171              : 
     172            0 :     pub fn cancel_scheduled(&self) {
     173            0 :         let mut guard = self.inner.lock().unwrap();
     174            0 :         guard.queued.clear();
     175            0 :         // TODO: if there is a running job, we should keep the gc guard. However, currently, the cancel
     176            0 :         // API is only used for testing purposes, so we can drop everything here.
     177            0 :         guard.guards.clear();
     178            0 :     }
     179              : 
     180              :     /// Schedule a manual compaction job.
     181            0 :     pub fn schedule_manual_compaction(
     182            0 :         &self,
     183            0 :         options: CompactOptions,
     184            0 :         notify: Option<tokio::sync::oneshot::Sender<()>>,
     185            0 :     ) -> GcCompactionJobId {
     186            0 :         let mut guard = self.inner.lock().unwrap();
     187            0 :         let id = guard.next_id();
     188            0 :         guard.queued.push_back((
     189            0 :             id,
     190            0 :             GcCompactionQueueItem::MetaJob {
     191            0 :                 options,
     192            0 :                 auto: false,
     193            0 :             },
     194            0 :         ));
     195            0 :         guard.guards.entry(id).or_default().notify = notify;
     196            0 :         info!("scheduled compaction job id={}", id);
     197            0 :         id
     198            0 :     }
     199              : 
     200              :     /// Schedule an auto compaction job.
     201            0 :     fn schedule_auto_compaction(
     202            0 :         &self,
     203            0 :         options: CompactOptions,
     204            0 :         permit: OwnedSemaphorePermit,
     205            0 :     ) -> GcCompactionJobId {
     206            0 :         let mut guard = self.inner.lock().unwrap();
     207            0 :         let id = guard.next_id();
     208            0 :         guard.queued.push_back((
     209            0 :             id,
     210            0 :             GcCompactionQueueItem::MetaJob {
     211            0 :                 options,
     212            0 :                 auto: true,
     213            0 :             },
     214            0 :         ));
     215            0 :         guard.guards.entry(id).or_default().permit = Some(permit);
     216            0 :         id
     217            0 :     }
     218              : 
     219              :     /// Trigger an auto compaction.
     220            0 :     pub async fn trigger_auto_compaction(
     221            0 :         &self,
     222            0 :         timeline: &Arc<Timeline>,
     223            0 :     ) -> Result<(), CompactionError> {
     224            0 :         let GcCompactionCombinedSettings {
     225            0 :             gc_compaction_enabled,
     226            0 :             gc_compaction_initial_threshold_kb,
     227            0 :             gc_compaction_ratio_percent,
     228            0 :         } = timeline.get_gc_compaction_settings();
     229            0 :         if !gc_compaction_enabled {
     230            0 :             return Ok(());
     231            0 :         }
     232            0 :         if self.remaining_jobs_num() > 0 {
     233              :             // Only schedule auto compaction when the queue is empty
     234            0 :             return Ok(());
     235            0 :         }
     236            0 :         if timeline.ancestor_timeline().is_some() {
     237              :             // Do not trigger auto compaction for child timelines. We haven't tested
     238              :             // it enough in staging yet.
     239            0 :             return Ok(());
     240            0 :         }
     241            0 :         if timeline.get_gc_compaction_watermark() == Lsn::INVALID {
     242              :             // If the gc watermark is not set, we don't need to trigger auto compaction.
     243              :             // This check is the same as in `gc_compaction_split_jobs` but we don't log
     244              :             // here and we can also skip the computation of the trigger condition earlier.
     245            0 :             return Ok(());
     246            0 :         }
     247              : 
     248            0 :         let Ok(permit) = CONCURRENT_GC_COMPACTION_TASKS.clone().try_acquire_owned() else {
     249              :             // Only allow one compaction run at a time. TODO: As we do `try_acquire_owned`, we cannot ensure
     250              :             // the fairness of the lock across timelines. We should listen for both `acquire` and `l0_compaction_trigger`
     251              :             // to ensure the fairness while avoid starving other tasks.
     252            0 :             return Ok(());
     253              :         };
     254              : 
     255            0 :         let gc_compaction_state = timeline.get_gc_compaction_state();
     256            0 :         let l2_lsn = gc_compaction_state
     257            0 :             .map(|x| x.last_completed_lsn)
     258            0 :             .unwrap_or(Lsn::INVALID);
     259              : 
     260            0 :         let layers = {
     261            0 :             let guard = timeline.layers.read().await;
     262            0 :             let layer_map = guard.layer_map()?;
     263            0 :             layer_map.iter_historic_layers().collect_vec()
     264            0 :         };
     265            0 :         let mut l2_size: u64 = 0;
     266            0 :         let mut l1_size = 0;
     267            0 :         let gc_cutoff = *timeline.get_applied_gc_cutoff_lsn();
     268            0 :         for layer in layers {
     269            0 :             if layer.lsn_range.start <= l2_lsn {
     270            0 :                 l2_size += layer.file_size();
     271            0 :             } else if layer.lsn_range.start <= gc_cutoff {
     272            0 :                 l1_size += layer.file_size();
     273            0 :             }
     274              :         }
     275              : 
     276            0 :         fn trigger_compaction(
     277            0 :             l1_size: u64,
     278            0 :             l2_size: u64,
     279            0 :             gc_compaction_initial_threshold_kb: u64,
     280            0 :             gc_compaction_ratio_percent: u64,
     281            0 :         ) -> bool {
     282              :             const AUTO_TRIGGER_LIMIT: u64 = 150 * 1024 * 1024 * 1024; // 150GB
     283            0 :             if l1_size + l2_size >= AUTO_TRIGGER_LIMIT {
     284              :                 // Do not auto-trigger when physical size >= 150GB
     285            0 :                 return false;
     286            0 :             }
     287            0 :             // initial trigger
     288            0 :             if l2_size == 0 && l1_size >= gc_compaction_initial_threshold_kb * 1024 {
     289            0 :                 info!(
     290            0 :                     "trigger auto-compaction because l1_size={} >= gc_compaction_initial_threshold_kb={}",
     291              :                     l1_size, gc_compaction_initial_threshold_kb
     292              :                 );
     293            0 :                 return true;
     294            0 :             }
     295            0 :             // size ratio trigger
     296            0 :             if l2_size == 0 {
     297            0 :                 return false;
     298            0 :             }
     299            0 :             if l1_size as f64 / l2_size as f64 >= (gc_compaction_ratio_percent as f64 / 100.0) {
     300            0 :                 info!(
     301            0 :                     "trigger auto-compaction because l1_size={} / l2_size={} > gc_compaction_ratio_percent={}",
     302              :                     l1_size, l2_size, gc_compaction_ratio_percent
     303              :                 );
     304            0 :                 return true;
     305            0 :             }
     306            0 :             false
     307            0 :         }
     308              : 
     309            0 :         if trigger_compaction(
     310            0 :             l1_size,
     311            0 :             l2_size,
     312            0 :             gc_compaction_initial_threshold_kb,
     313            0 :             gc_compaction_ratio_percent,
     314            0 :         ) {
     315            0 :             self.schedule_auto_compaction(
     316            0 :                 CompactOptions {
     317            0 :                     flags: {
     318            0 :                         let mut flags = EnumSet::new();
     319            0 :                         flags |= CompactFlags::EnhancedGcBottomMostCompaction;
     320            0 :                         if timeline.get_compaction_l0_first() {
     321            0 :                             flags |= CompactFlags::YieldForL0;
     322            0 :                         }
     323            0 :                         flags
     324            0 :                     },
     325            0 :                     sub_compaction: true,
     326            0 :                     // Only auto-trigger gc-compaction over the data keyspace due to concerns in
     327            0 :                     // https://github.com/neondatabase/neon/issues/11318.
     328            0 :                     compact_key_range: Some(CompactKeyRange {
     329            0 :                         start: Key::MIN,
     330            0 :                         end: Key::metadata_key_range().start,
     331            0 :                     }),
     332            0 :                     compact_lsn_range: None,
     333            0 :                     sub_compaction_max_job_size_mb: None,
     334            0 :                 },
     335            0 :                 permit,
     336            0 :             );
     337            0 :             info!(
     338            0 :                 "scheduled auto gc-compaction: l1_size={}, l2_size={}, l2_lsn={}, gc_cutoff={}",
     339              :                 l1_size, l2_size, l2_lsn, gc_cutoff
     340              :             );
     341              :         } else {
     342            0 :             debug!(
     343            0 :                 "did not trigger auto gc-compaction: l1_size={}, l2_size={}, l2_lsn={}, gc_cutoff={}",
     344              :                 l1_size, l2_size, l2_lsn, gc_cutoff
     345              :             );
     346              :         }
     347            0 :         Ok(())
     348            0 :     }
     349              : 
     350              :     /// Notify the caller the job has finished and unblock GC.
     351            0 :     fn notify_and_unblock(&self, id: GcCompactionJobId) {
     352            0 :         info!("compaction job id={} finished", id);
     353            0 :         let mut guard = self.inner.lock().unwrap();
     354            0 :         if let Some(items) = guard.guards.remove(&id) {
     355            0 :             if let Some(tx) = items.notify {
     356            0 :                 let _ = tx.send(());
     357            0 :             }
     358            0 :         }
     359            0 :     }
     360              : 
     361            0 :     fn clear_running_job(&self) {
     362            0 :         let mut guard = self.inner.lock().unwrap();
     363            0 :         guard.running = None;
     364            0 :     }
     365              : 
     366            0 :     async fn handle_sub_compaction(
     367            0 :         &self,
     368            0 :         id: GcCompactionJobId,
     369            0 :         options: CompactOptions,
     370            0 :         timeline: &Arc<Timeline>,
     371            0 :         auto: bool,
     372            0 :     ) -> Result<(), CompactionError> {
     373            0 :         info!(
     374            0 :             "running scheduled enhanced gc bottom-most compaction with sub-compaction, splitting compaction jobs"
     375              :         );
     376            0 :         let res = timeline
     377            0 :             .gc_compaction_split_jobs(
     378            0 :                 GcCompactJob::from_compact_options(options.clone()),
     379            0 :                 options.sub_compaction_max_job_size_mb,
     380            0 :             )
     381            0 :             .await;
     382            0 :         let jobs = match res {
     383            0 :             Ok(jobs) => jobs,
     384            0 :             Err(err) => {
     385            0 :                 warn!("cannot split gc-compaction jobs: {}, unblocked gc", err);
     386            0 :                 self.notify_and_unblock(id);
     387            0 :                 return Err(err);
     388              :             }
     389              :         };
     390            0 :         if jobs.is_empty() {
     391            0 :             info!("no jobs to run, skipping scheduled compaction task");
     392            0 :             self.notify_and_unblock(id);
     393              :         } else {
     394            0 :             let jobs_len = jobs.len();
     395            0 :             let mut pending_tasks = Vec::new();
     396            0 :             // gc-compaction might pick more layers or fewer layers to compact. The L2 LSN does not need to be accurate.
     397            0 :             // And therefore, we simply assume the maximum LSN of all jobs is the expected L2 LSN.
     398            0 :             let expected_l2_lsn = jobs.iter().map(|job| job.compact_lsn_range.end).max();
     399            0 :             for job in jobs {
     400              :                 // Unfortunately we need to convert the `GcCompactJob` back to `CompactionOptions`
     401              :                 // until we do further refactors to allow directly call `compact_with_gc`.
     402            0 :                 let mut flags: EnumSet<CompactFlags> = EnumSet::default();
     403            0 :                 flags |= CompactFlags::EnhancedGcBottomMostCompaction;
     404            0 :                 if job.dry_run {
     405            0 :                     flags |= CompactFlags::DryRun;
     406            0 :                 }
     407            0 :                 if options.flags.contains(CompactFlags::YieldForL0) {
     408            0 :                     flags |= CompactFlags::YieldForL0;
     409            0 :                 }
     410            0 :                 let options = CompactOptions {
     411            0 :                     flags,
     412            0 :                     sub_compaction: false,
     413            0 :                     compact_key_range: Some(job.compact_key_range.into()),
     414            0 :                     compact_lsn_range: Some(job.compact_lsn_range.into()),
     415            0 :                     sub_compaction_max_job_size_mb: None,
     416            0 :                 };
     417            0 :                 pending_tasks.push(GcCompactionQueueItem::SubCompactionJob(options));
     418              :             }
     419              : 
     420            0 :             if !auto {
     421            0 :                 pending_tasks.push(GcCompactionQueueItem::Notify(id, None));
     422            0 :             } else {
     423            0 :                 pending_tasks.push(GcCompactionQueueItem::Notify(id, expected_l2_lsn));
     424            0 :             }
     425              : 
     426              :             {
     427            0 :                 let mut guard = self.inner.lock().unwrap();
     428            0 :                 let mut tasks = Vec::new();
     429            0 :                 for task in pending_tasks {
     430            0 :                     let id = guard.next_id();
     431            0 :                     tasks.push((id, task));
     432            0 :                 }
     433            0 :                 tasks.reverse();
     434            0 :                 for item in tasks {
     435            0 :                     guard.queued.push_front(item);
     436            0 :                 }
     437              :             }
     438            0 :             info!(
     439            0 :                 "scheduled enhanced gc bottom-most compaction with sub-compaction, split into {} jobs",
     440              :                 jobs_len
     441              :             );
     442              :         }
     443            0 :         Ok(())
     444            0 :     }
     445              : 
     446              :     /// Take a job from the queue and process it. Returns if there are still pending tasks.
     447            0 :     pub async fn iteration(
     448            0 :         &self,
     449            0 :         cancel: &CancellationToken,
     450            0 :         ctx: &RequestContext,
     451            0 :         gc_block: &GcBlock,
     452            0 :         timeline: &Arc<Timeline>,
     453            0 :     ) -> Result<CompactionOutcome, CompactionError> {
     454            0 :         let res = self.iteration_inner(cancel, ctx, gc_block, timeline).await;
     455            0 :         if let Err(err) = &res {
     456            0 :             log_compaction_error(err, None, cancel.is_cancelled(), true);
     457            0 :         }
     458            0 :         match res {
     459            0 :             Ok(res) => Ok(res),
     460            0 :             Err(CompactionError::ShuttingDown) => Err(CompactionError::ShuttingDown),
     461              :             Err(_) => {
     462              :                 // There are some cases where traditional gc might collect some layer
     463              :                 // files causing gc-compaction cannot read the full history of the key.
     464              :                 // This needs to be resolved in the long-term by improving the compaction
     465              :                 // process. For now, let's simply avoid such errors triggering the
     466              :                 // circuit breaker.
     467            0 :                 Ok(CompactionOutcome::Skipped)
     468              :             }
     469              :         }
     470            0 :     }
     471              : 
     472            0 :     async fn iteration_inner(
     473            0 :         &self,
     474            0 :         cancel: &CancellationToken,
     475            0 :         ctx: &RequestContext,
     476            0 :         gc_block: &GcBlock,
     477            0 :         timeline: &Arc<Timeline>,
     478            0 :     ) -> Result<CompactionOutcome, CompactionError> {
     479            0 :         let Ok(_one_op_at_a_time_guard) = self.consumer_lock.try_lock() else {
     480            0 :             return Err(CompactionError::AlreadyRunning(
     481            0 :                 "cannot run gc-compaction because another gc-compaction is running. This should not happen because we only call this function from the gc-compaction queue.",
     482            0 :             ));
     483              :         };
     484              :         let has_pending_tasks;
     485            0 :         let mut yield_for_l0 = false;
     486            0 :         let Some((id, item)) = ({
     487            0 :             let mut guard = self.inner.lock().unwrap();
     488            0 :             if let Some((id, item)) = guard.queued.pop_front() {
     489            0 :                 guard.running = Some((id, item.clone()));
     490            0 :                 has_pending_tasks = !guard.queued.is_empty();
     491            0 :                 Some((id, item))
     492              :             } else {
     493            0 :                 has_pending_tasks = false;
     494            0 :                 None
     495              :             }
     496              :         }) else {
     497            0 :             self.trigger_auto_compaction(timeline).await?;
     498              :             // Always yield after triggering auto-compaction. Gc-compaction is a low-priority task and we
     499              :             // have not implemented preemption mechanism yet. We always want to yield it to more important
     500              :             // tasks if there is one.
     501            0 :             return Ok(CompactionOutcome::Done);
     502              :         };
     503            0 :         match item {
     504            0 :             GcCompactionQueueItem::MetaJob { options, auto } => {
     505            0 :                 if !options
     506            0 :                     .flags
     507            0 :                     .contains(CompactFlags::EnhancedGcBottomMostCompaction)
     508              :                 {
     509            0 :                     warn!(
     510            0 :                         "ignoring scheduled compaction task: scheduled task must be gc compaction: {:?}",
     511              :                         options
     512              :                     );
     513            0 :                 } else if options.sub_compaction {
     514            0 :                     info!(
     515            0 :                         "running scheduled enhanced gc bottom-most compaction with sub-compaction, splitting compaction jobs"
     516              :                     );
     517            0 :                     self.handle_sub_compaction(id, options, timeline, auto)
     518            0 :                         .await?;
     519              :                 } else {
     520              :                     // Auto compaction always enables sub-compaction so we don't need to handle update_l2_lsn
     521              :                     // in this branch.
     522            0 :                     let _gc_guard = match gc_block.start().await {
     523            0 :                         Ok(guard) => guard,
     524            0 :                         Err(e) => {
     525            0 :                             self.notify_and_unblock(id);
     526            0 :                             self.clear_running_job();
     527            0 :                             return Err(CompactionError::Other(anyhow!(
     528            0 :                                 "cannot run gc-compaction because gc is blocked: {}",
     529            0 :                                 e
     530            0 :                             )));
     531              :                         }
     532              :                     };
     533            0 :                     let res = timeline.compact_with_options(cancel, options, ctx).await;
     534            0 :                     let compaction_result = match res {
     535            0 :                         Ok(res) => res,
     536            0 :                         Err(err) => {
     537            0 :                             warn!(%err, "failed to run gc-compaction");
     538            0 :                             self.notify_and_unblock(id);
     539            0 :                             self.clear_running_job();
     540            0 :                             return Err(err);
     541              :                         }
     542              :                     };
     543            0 :                     if compaction_result == CompactionOutcome::YieldForL0 {
     544            0 :                         yield_for_l0 = true;
     545            0 :                     }
     546              :                 }
     547              :             }
     548            0 :             GcCompactionQueueItem::SubCompactionJob(options) => {
     549              :                 // TODO: error handling, clear the queue if any task fails?
     550            0 :                 let _gc_guard = match gc_block.start().await {
     551            0 :                     Ok(guard) => guard,
     552            0 :                     Err(e) => {
     553            0 :                         self.clear_running_job();
     554            0 :                         return Err(CompactionError::Other(anyhow!(
     555            0 :                             "cannot run gc-compaction because gc is blocked: {}",
     556            0 :                             e
     557            0 :                         )));
     558              :                     }
     559              :                 };
     560            0 :                 let res = timeline.compact_with_options(cancel, options, ctx).await;
     561            0 :                 let compaction_result = match res {
     562            0 :                     Ok(res) => res,
     563            0 :                     Err(err) => {
     564            0 :                         warn!(%err, "failed to run gc-compaction subcompaction job");
     565            0 :                         self.clear_running_job();
     566            0 :                         return Err(err);
     567              :                     }
     568              :                 };
     569            0 :                 if compaction_result == CompactionOutcome::YieldForL0 {
     570            0 :                     // We will permenantly give up a task if we yield for L0 compaction: the preempted subcompaction job won't be running
     571            0 :                     // again. This ensures that we don't keep doing duplicated work within gc-compaction. Not directly returning here because
     572            0 :                     // we need to clean things up before returning from the function.
     573            0 :                     yield_for_l0 = true;
     574            0 :                 }
     575              :             }
     576            0 :             GcCompactionQueueItem::Notify(id, l2_lsn) => {
     577            0 :                 self.notify_and_unblock(id);
     578            0 :                 if let Some(l2_lsn) = l2_lsn {
     579            0 :                     let current_l2_lsn = timeline
     580            0 :                         .get_gc_compaction_state()
     581            0 :                         .map(|x| x.last_completed_lsn)
     582            0 :                         .unwrap_or(Lsn::INVALID);
     583            0 :                     if l2_lsn >= current_l2_lsn {
     584            0 :                         info!("l2_lsn updated to {}", l2_lsn);
     585            0 :                         timeline
     586            0 :                             .update_gc_compaction_state(GcCompactionState {
     587            0 :                                 last_completed_lsn: l2_lsn,
     588            0 :                             })
     589            0 :                             .map_err(CompactionError::Other)?;
     590              :                     } else {
     591            0 :                         warn!(
     592            0 :                             "l2_lsn updated to {} but it is less than the current l2_lsn {}",
     593              :                             l2_lsn, current_l2_lsn
     594              :                         );
     595              :                     }
     596            0 :                 }
     597              :             }
     598              :         }
     599            0 :         self.clear_running_job();
     600            0 :         Ok(if yield_for_l0 {
     601            0 :             tracing::info!("give up gc-compaction: yield for L0 compaction");
     602            0 :             CompactionOutcome::YieldForL0
     603            0 :         } else if has_pending_tasks {
     604            0 :             CompactionOutcome::Pending
     605              :         } else {
     606            0 :             CompactionOutcome::Done
     607              :         })
     608            0 :     }
     609              : 
     610              :     #[allow(clippy::type_complexity)]
     611            0 :     pub fn remaining_jobs(
     612            0 :         &self,
     613            0 :     ) -> (
     614            0 :         Option<(GcCompactionJobId, GcCompactionQueueItem)>,
     615            0 :         VecDeque<(GcCompactionJobId, GcCompactionQueueItem)>,
     616            0 :     ) {
     617            0 :         let guard = self.inner.lock().unwrap();
     618            0 :         (guard.running.clone(), guard.queued.clone())
     619            0 :     }
     620              : 
     621            0 :     pub fn remaining_jobs_num(&self) -> usize {
     622            0 :         let guard = self.inner.lock().unwrap();
     623            0 :         guard.queued.len() + if guard.running.is_some() { 1 } else { 0 }
     624            0 :     }
     625              : }
     626              : 
     627              : /// A job description for the gc-compaction job. This structure describes the rectangle range that the job will
     628              : /// process. The exact layers that need to be compacted/rewritten will be generated when `compact_with_gc` gets
     629              : /// called.
     630              : #[derive(Debug, Clone)]
     631              : pub(crate) struct GcCompactJob {
     632              :     pub dry_run: bool,
     633              :     /// The key range to be compacted. The compaction algorithm will only regenerate key-value pairs within this range
     634              :     /// [left inclusive, right exclusive), and other pairs will be rewritten into new files if necessary.
     635              :     pub compact_key_range: Range<Key>,
     636              :     /// The LSN range to be compacted. The compaction algorithm will use this range to determine the layers to be
     637              :     /// selected for the compaction, and it does not guarantee the generated layers will have exactly the same LSN range
     638              :     /// as specified here. The true range being compacted is `min_lsn/max_lsn` in [`GcCompactionJobDescription`].
     639              :     /// min_lsn will always <= the lower bound specified here, and max_lsn will always >= the upper bound specified here.
     640              :     pub compact_lsn_range: Range<Lsn>,
     641              : }
     642              : 
     643              : impl GcCompactJob {
     644          112 :     pub fn from_compact_options(options: CompactOptions) -> Self {
     645          112 :         GcCompactJob {
     646          112 :             dry_run: options.flags.contains(CompactFlags::DryRun),
     647          112 :             compact_key_range: options
     648          112 :                 .compact_key_range
     649          112 :                 .map(|x| x.into())
     650          112 :                 .unwrap_or(Key::MIN..Key::MAX),
     651          112 :             compact_lsn_range: options
     652          112 :                 .compact_lsn_range
     653          112 :                 .map(|x| x.into())
     654          112 :                 .unwrap_or(Lsn::INVALID..Lsn::MAX),
     655          112 :         }
     656          112 :     }
     657              : }
     658              : 
     659              : /// A job description for the gc-compaction job. This structure is generated when `compact_with_gc` is called
     660              : /// and contains the exact layers we want to compact.
     661              : pub struct GcCompactionJobDescription {
     662              :     /// All layers to read in the compaction job
     663              :     selected_layers: Vec<Layer>,
     664              :     /// GC cutoff of the job. This is the lowest LSN that will be accessed by the read/GC path and we need to
     665              :     /// keep all deltas <= this LSN or generate an image == this LSN.
     666              :     gc_cutoff: Lsn,
     667              :     /// LSNs to retain for the job. Read path will use this LSN so we need to keep deltas <= this LSN or
     668              :     /// generate an image == this LSN.
     669              :     retain_lsns_below_horizon: Vec<Lsn>,
     670              :     /// Maximum layer LSN processed in this compaction, that is max(end_lsn of layers). Exclusive. All data
     671              :     /// \>= this LSN will be kept and will not be rewritten.
     672              :     max_layer_lsn: Lsn,
     673              :     /// Minimum layer LSN processed in this compaction, that is min(start_lsn of layers). Inclusive.
     674              :     /// All access below (strict lower than `<`) this LSN will be routed through the normal read path instead of
     675              :     /// k-merge within gc-compaction.
     676              :     min_layer_lsn: Lsn,
     677              :     /// Only compact layers overlapping with this range.
     678              :     compaction_key_range: Range<Key>,
     679              :     /// When partial compaction is enabled, these layers need to be rewritten to ensure no overlap.
     680              :     /// This field is here solely for debugging. The field will not be read once the compaction
     681              :     /// description is generated.
     682              :     rewrite_layers: Vec<Arc<PersistentLayerDesc>>,
     683              : }
     684              : 
     685              : /// The result of bottom-most compaction for a single key at each LSN.
     686              : #[derive(Debug)]
     687              : #[cfg_attr(test, derive(PartialEq))]
     688              : pub struct KeyLogAtLsn(pub Vec<(Lsn, Value)>);
     689              : 
     690              : /// The result of bottom-most compaction.
     691              : #[derive(Debug)]
     692              : #[cfg_attr(test, derive(PartialEq))]
     693              : pub(crate) struct KeyHistoryRetention {
     694              :     /// Stores logs to reconstruct the value at the given LSN, that is to say, logs <= LSN or image == LSN.
     695              :     pub(crate) below_horizon: Vec<(Lsn, KeyLogAtLsn)>,
     696              :     /// Stores logs to reconstruct the value at any LSN above the horizon, that is to say, log > LSN.
     697              :     pub(crate) above_horizon: KeyLogAtLsn,
     698              : }
     699              : 
     700              : impl KeyHistoryRetention {
     701              :     /// Hack: skip delta layer if we need to produce a layer of a same key-lsn.
     702              :     ///
     703              :     /// This can happen if we have removed some deltas in "the middle" of some existing layer's key-lsn-range.
     704              :     /// For example, consider the case where a single delta with range [0x10,0x50) exists.
     705              :     /// And we have branches at LSN 0x10, 0x20, 0x30.
     706              :     /// Then we delete branch @ 0x20.
     707              :     /// Bottom-most compaction may now delete the delta [0x20,0x30).
     708              :     /// And that wouldnt' change the shape of the layer.
     709              :     ///
     710              :     /// Note that bottom-most-gc-compaction never _adds_ new data in that case, only removes.
     711              :     ///
     712              :     /// `discard_key` will only be called when the writer reaches its target (instead of for every key), so it's fine to grab a lock inside.
     713          148 :     async fn discard_key(key: &PersistentLayerKey, tline: &Arc<Timeline>, dry_run: bool) -> bool {
     714          148 :         if dry_run {
     715            0 :             return true;
     716          148 :         }
     717          148 :         if LayerMap::is_l0(&key.key_range, key.is_delta) {
     718              :             // gc-compaction should not produce L0 deltas, otherwise it will break the layer order.
     719              :             // We should ignore such layers.
     720            0 :             return true;
     721          148 :         }
     722              :         let layer_generation;
     723              :         {
     724          148 :             let guard = tline.layers.read().await;
     725          148 :             if !guard.contains_key(key) {
     726          104 :                 return false;
     727           44 :             }
     728           44 :             layer_generation = guard.get_from_key(key).metadata().generation;
     729           44 :         }
     730           44 :         if layer_generation == tline.generation {
     731           44 :             info!(
     732              :                 key=%key,
     733              :                 ?layer_generation,
     734            0 :                 "discard layer due to duplicated layer key in the same generation",
     735              :             );
     736           44 :             true
     737              :         } else {
     738            0 :             false
     739              :         }
     740          148 :     }
     741              : 
     742              :     /// Pipe a history of a single key to the writers.
     743              :     ///
     744              :     /// If `image_writer` is none, the images will be placed into the delta layers.
     745              :     /// The delta writer will contain all images and deltas (below and above the horizon) except the bottom-most images.
     746              :     #[allow(clippy::too_many_arguments)]
     747         1276 :     async fn pipe_to(
     748         1276 :         self,
     749         1276 :         key: Key,
     750         1276 :         delta_writer: &mut SplitDeltaLayerWriter,
     751         1276 :         mut image_writer: Option<&mut SplitImageLayerWriter>,
     752         1276 :         stat: &mut CompactionStatistics,
     753         1276 :         ctx: &RequestContext,
     754         1276 :     ) -> anyhow::Result<()> {
     755         1276 :         let mut first_batch = true;
     756         4088 :         for (cutoff_lsn, KeyLogAtLsn(logs)) in self.below_horizon {
     757         2812 :             if first_batch {
     758         1276 :                 if logs.len() == 1 && logs[0].1.is_image() {
     759         1200 :                     let Value::Image(img) = &logs[0].1 else {
     760            0 :                         unreachable!()
     761              :                     };
     762         1200 :                     stat.produce_image_key(img);
     763         1200 :                     if let Some(image_writer) = image_writer.as_mut() {
     764         1200 :                         image_writer.put_image(key, img.clone(), ctx).await?;
     765              :                     } else {
     766            0 :                         delta_writer
     767            0 :                             .put_value(key, cutoff_lsn, Value::Image(img.clone()), ctx)
     768            0 :                             .await?;
     769              :                     }
     770              :                 } else {
     771          132 :                     for (lsn, val) in logs {
     772           56 :                         stat.produce_key(&val);
     773           56 :                         delta_writer.put_value(key, lsn, val, ctx).await?;
     774              :                     }
     775              :                 }
     776         1276 :                 first_batch = false;
     777              :             } else {
     778         1768 :                 for (lsn, val) in logs {
     779          232 :                     stat.produce_key(&val);
     780          232 :                     delta_writer.put_value(key, lsn, val, ctx).await?;
     781              :                 }
     782              :             }
     783              :         }
     784         1276 :         let KeyLogAtLsn(above_horizon_logs) = self.above_horizon;
     785         1392 :         for (lsn, val) in above_horizon_logs {
     786          116 :             stat.produce_key(&val);
     787          116 :             delta_writer.put_value(key, lsn, val, ctx).await?;
     788              :         }
     789         1276 :         Ok(())
     790         1276 :     }
     791              : }
     792              : 
     793              : #[derive(Debug, Serialize, Default)]
     794              : struct CompactionStatisticsNumSize {
     795              :     num: u64,
     796              :     size: u64,
     797              : }
     798              : 
     799              : #[derive(Debug, Serialize, Default)]
     800              : pub struct CompactionStatistics {
     801              :     /// Delta layer visited (maybe compressed, physical size)
     802              :     delta_layer_visited: CompactionStatisticsNumSize,
     803              :     /// Image layer visited (maybe compressed, physical size)
     804              :     image_layer_visited: CompactionStatisticsNumSize,
     805              :     /// Delta layer produced (maybe compressed, physical size)
     806              :     delta_layer_produced: CompactionStatisticsNumSize,
     807              :     /// Image layer produced (maybe compressed, physical size)
     808              :     image_layer_produced: CompactionStatisticsNumSize,
     809              :     /// Delta layer discarded (maybe compressed, physical size of the layer being discarded instead of the original layer)
     810              :     delta_layer_discarded: CompactionStatisticsNumSize,
     811              :     /// Image layer discarded (maybe compressed, physical size of the layer being discarded instead of the original layer)
     812              :     image_layer_discarded: CompactionStatisticsNumSize,
     813              :     num_unique_keys_visited: usize,
     814              :     /// Delta visited (uncompressed, original size)
     815              :     wal_keys_visited: CompactionStatisticsNumSize,
     816              :     /// Image visited (uncompressed, original size)
     817              :     image_keys_visited: CompactionStatisticsNumSize,
     818              :     /// Delta produced (uncompressed, original size)
     819              :     wal_produced: CompactionStatisticsNumSize,
     820              :     /// Image produced (uncompressed, original size)
     821              :     image_produced: CompactionStatisticsNumSize,
     822              : 
     823              :     // Time spent in each phase
     824              :     time_acquire_lock_secs: f64,
     825              :     time_analyze_secs: f64,
     826              :     time_download_layer_secs: f64,
     827              :     time_to_first_kv_pair_secs: f64,
     828              :     time_main_loop_secs: f64,
     829              :     time_final_phase_secs: f64,
     830              :     time_total_secs: f64,
     831              : 
     832              :     // Summary
     833              :     /// Ratio of the key-value size after/before gc-compaction.
     834              :     uncompressed_retention_ratio: f64,
     835              :     /// Ratio of the physical size after/before gc-compaction.
     836              :     compressed_retention_ratio: f64,
     837              : }
     838              : 
     839              : impl CompactionStatistics {
     840         2136 :     fn estimated_size_of_value(val: &Value) -> usize {
     841          876 :         match val {
     842         1260 :             Value::Image(img) => img.len(),
     843            0 :             Value::WalRecord(NeonWalRecord::Postgres { rec, .. }) => rec.len(),
     844          876 :             _ => std::mem::size_of::<NeonWalRecord>(),
     845              :         }
     846         2136 :     }
     847         3356 :     fn estimated_size_of_key() -> usize {
     848         3356 :         KEY_SIZE // TODO: distinguish image layer and delta layer (count LSN in delta layer)
     849         3356 :     }
     850          176 :     fn visit_delta_layer(&mut self, size: u64) {
     851          176 :         self.delta_layer_visited.num += 1;
     852          176 :         self.delta_layer_visited.size += size;
     853          176 :     }
     854          140 :     fn visit_image_layer(&mut self, size: u64) {
     855          140 :         self.image_layer_visited.num += 1;
     856          140 :         self.image_layer_visited.size += size;
     857          140 :     }
     858         1280 :     fn on_unique_key_visited(&mut self) {
     859         1280 :         self.num_unique_keys_visited += 1;
     860         1280 :     }
     861          492 :     fn visit_wal_key(&mut self, val: &Value) {
     862          492 :         self.wal_keys_visited.num += 1;
     863          492 :         self.wal_keys_visited.size +=
     864          492 :             Self::estimated_size_of_value(val) as u64 + Self::estimated_size_of_key() as u64;
     865          492 :     }
     866         1260 :     fn visit_image_key(&mut self, val: &Value) {
     867         1260 :         self.image_keys_visited.num += 1;
     868         1260 :         self.image_keys_visited.size +=
     869         1260 :             Self::estimated_size_of_value(val) as u64 + Self::estimated_size_of_key() as u64;
     870         1260 :     }
     871          404 :     fn produce_key(&mut self, val: &Value) {
     872          404 :         match val {
     873           20 :             Value::Image(img) => self.produce_image_key(img),
     874          384 :             Value::WalRecord(_) => self.produce_wal_key(val),
     875              :         }
     876          404 :     }
     877          384 :     fn produce_wal_key(&mut self, val: &Value) {
     878          384 :         self.wal_produced.num += 1;
     879          384 :         self.wal_produced.size +=
     880          384 :             Self::estimated_size_of_value(val) as u64 + Self::estimated_size_of_key() as u64;
     881          384 :     }
     882         1220 :     fn produce_image_key(&mut self, val: &Bytes) {
     883         1220 :         self.image_produced.num += 1;
     884         1220 :         self.image_produced.size += val.len() as u64 + Self::estimated_size_of_key() as u64;
     885         1220 :     }
     886           28 :     fn discard_delta_layer(&mut self, original_size: u64) {
     887           28 :         self.delta_layer_discarded.num += 1;
     888           28 :         self.delta_layer_discarded.size += original_size;
     889           28 :     }
     890           16 :     fn discard_image_layer(&mut self, original_size: u64) {
     891           16 :         self.image_layer_discarded.num += 1;
     892           16 :         self.image_layer_discarded.size += original_size;
     893           16 :     }
     894           48 :     fn produce_delta_layer(&mut self, size: u64) {
     895           48 :         self.delta_layer_produced.num += 1;
     896           48 :         self.delta_layer_produced.size += size;
     897           48 :     }
     898           60 :     fn produce_image_layer(&mut self, size: u64) {
     899           60 :         self.image_layer_produced.num += 1;
     900           60 :         self.image_layer_produced.size += size;
     901           60 :     }
     902          104 :     fn finalize(&mut self) {
     903          104 :         let original_key_value_size = self.image_keys_visited.size + self.wal_keys_visited.size;
     904          104 :         let produced_key_value_size = self.image_produced.size + self.wal_produced.size;
     905          104 :         self.uncompressed_retention_ratio =
     906          104 :             produced_key_value_size as f64 / (original_key_value_size as f64 + 1.0); // avoid div by 0
     907          104 :         let original_physical_size = self.image_layer_visited.size + self.delta_layer_visited.size;
     908          104 :         let produced_physical_size = self.image_layer_produced.size
     909          104 :             + self.delta_layer_produced.size
     910          104 :             + self.image_layer_discarded.size
     911          104 :             + self.delta_layer_discarded.size; // Also include the discarded layers to make the ratio accurate
     912          104 :         self.compressed_retention_ratio =
     913          104 :             produced_physical_size as f64 / (original_physical_size as f64 + 1.0); // avoid div by 0
     914          104 :     }
     915              : }
     916              : 
     917              : #[derive(Default, Debug, Clone, Copy, PartialEq, Eq)]
     918              : pub enum CompactionOutcome {
     919              :     #[default]
     920              :     /// No layers need to be compacted after this round. Compaction doesn't need
     921              :     /// to be immediately scheduled.
     922              :     Done,
     923              :     /// Still has pending layers to be compacted after this round. Ideally, the scheduler
     924              :     /// should immediately schedule another compaction.
     925              :     Pending,
     926              :     /// A timeline needs L0 compaction. Yield and schedule an immediate L0 compaction pass (only
     927              :     /// guaranteed when `compaction_l0_first` is enabled).
     928              :     YieldForL0,
     929              :     /// Compaction was skipped, because the timeline is ineligible for compaction.
     930              :     Skipped,
     931              : }
     932              : 
     933              : impl Timeline {
     934              :     /// TODO: cancellation
     935              :     ///
     936              :     /// Returns whether the compaction has pending tasks.
     937          728 :     pub(crate) async fn compact_legacy(
     938          728 :         self: &Arc<Self>,
     939          728 :         cancel: &CancellationToken,
     940          728 :         options: CompactOptions,
     941          728 :         ctx: &RequestContext,
     942          728 :     ) -> Result<CompactionOutcome, CompactionError> {
     943          728 :         if options
     944          728 :             .flags
     945          728 :             .contains(CompactFlags::EnhancedGcBottomMostCompaction)
     946              :         {
     947            0 :             self.compact_with_gc(cancel, options, ctx).await?;
     948            0 :             return Ok(CompactionOutcome::Done);
     949          728 :         }
     950          728 : 
     951          728 :         if options.flags.contains(CompactFlags::DryRun) {
     952            0 :             return Err(CompactionError::Other(anyhow!(
     953            0 :                 "dry-run mode is not supported for legacy compaction for now"
     954            0 :             )));
     955          728 :         }
     956          728 : 
     957          728 :         if options.compact_key_range.is_some() || options.compact_lsn_range.is_some() {
     958              :             // maybe useful in the future? could implement this at some point
     959            0 :             return Err(CompactionError::Other(anyhow!(
     960            0 :                 "compaction range is not supported for legacy compaction for now"
     961            0 :             )));
     962          728 :         }
     963          728 : 
     964          728 :         // High level strategy for compaction / image creation:
     965          728 :         //
     966          728 :         // 1. First, do a L0 compaction to ensure we move the L0
     967          728 :         // layers into the historic layer map get flat levels of
     968          728 :         // layers. If we did not compact all L0 layers, we will
     969          728 :         // prioritize compacting the timeline again and not do
     970          728 :         // any of the compactions below.
     971          728 :         //
     972          728 :         // 2. Then, calculate the desired "partitioning" of the
     973          728 :         // currently in-use key space. The goal is to partition the
     974          728 :         // key space into roughly fixed-size chunks, but also take into
     975          728 :         // account any existing image layers, and try to align the
     976          728 :         // chunk boundaries with the existing image layers to avoid
     977          728 :         // too much churn. Also try to align chunk boundaries with
     978          728 :         // relation boundaries.  In principle, we don't know about
     979          728 :         // relation boundaries here, we just deal with key-value
     980          728 :         // pairs, and the code in pgdatadir_mapping.rs knows how to
     981          728 :         // map relations into key-value pairs. But in practice we know
     982          728 :         // that 'field6' is the block number, and the fields 1-5
     983          728 :         // identify a relation. This is just an optimization,
     984          728 :         // though.
     985          728 :         //
     986          728 :         // 3. Once we know the partitioning, for each partition,
     987          728 :         // decide if it's time to create a new image layer. The
     988          728 :         // criteria is: there has been too much "churn" since the last
     989          728 :         // image layer? The "churn" is fuzzy concept, it's a
     990          728 :         // combination of too many delta files, or too much WAL in
     991          728 :         // total in the delta file. Or perhaps: if creating an image
     992          728 :         // file would allow to delete some older files.
     993          728 :         //
     994          728 :         // 4. In the end, if the tenant gets auto-sharded, we will run
     995          728 :         // a shard-ancestor compaction.
     996          728 : 
     997          728 :         // Is the timeline being deleted?
     998          728 :         if self.is_stopping() {
     999            0 :             trace!("Dropping out of compaction on timeline shutdown");
    1000            0 :             return Err(CompactionError::ShuttingDown);
    1001          728 :         }
    1002          728 : 
    1003          728 :         let target_file_size = self.get_checkpoint_distance();
    1004              : 
    1005              :         // Define partitioning schema if needed
    1006              : 
    1007              :         // 1. L0 Compact
    1008          728 :         let l0_outcome = {
    1009          728 :             let timer = self.metrics.compact_time_histo.start_timer();
    1010          728 :             let l0_outcome = self
    1011          728 :                 .compact_level0(
    1012          728 :                     target_file_size,
    1013          728 :                     options.flags.contains(CompactFlags::ForceL0Compaction),
    1014          728 :                     ctx,
    1015          728 :                 )
    1016          728 :                 .await?;
    1017          728 :             timer.stop_and_record();
    1018          728 :             l0_outcome
    1019          728 :         };
    1020          728 : 
    1021          728 :         if options.flags.contains(CompactFlags::OnlyL0Compaction) {
    1022            0 :             return Ok(l0_outcome);
    1023          728 :         }
    1024          728 : 
    1025          728 :         // Yield if we have pending L0 compaction. The scheduler will do another pass.
    1026          728 :         if (l0_outcome == CompactionOutcome::Pending || l0_outcome == CompactionOutcome::YieldForL0)
    1027            0 :             && options.flags.contains(CompactFlags::YieldForL0)
    1028              :         {
    1029            0 :             info!("image/ancestor compaction yielding for L0 compaction");
    1030            0 :             return Ok(CompactionOutcome::YieldForL0);
    1031          728 :         }
    1032          728 : 
    1033          728 :         // 2. Repartition and create image layers if necessary
    1034          728 :         match self
    1035          728 :             .repartition(
    1036          728 :                 self.get_last_record_lsn(),
    1037          728 :                 self.get_compaction_target_size(),
    1038          728 :                 options.flags,
    1039          728 :                 ctx,
    1040          728 :             )
    1041          728 :             .await
    1042              :         {
    1043          728 :             Ok(((dense_partitioning, sparse_partitioning), lsn)) => {
    1044          728 :                 // Disables access_stats updates, so that the files we read remain candidates for eviction after we're done with them
    1045          728 :                 let image_ctx = RequestContextBuilder::from(ctx)
    1046          728 :                     .access_stats_behavior(AccessStatsBehavior::Skip)
    1047          728 :                     .attached_child();
    1048          728 : 
    1049          728 :                 let mut partitioning = dense_partitioning;
    1050          728 :                 partitioning
    1051          728 :                     .parts
    1052          728 :                     .extend(sparse_partitioning.into_dense().parts);
    1053              : 
    1054              :                 // 3. Create new image layers for partitions that have been modified "enough".
    1055          728 :                 let (image_layers, outcome) = self
    1056          728 :                     .create_image_layers(
    1057          728 :                         &partitioning,
    1058          728 :                         lsn,
    1059          728 :                         if options
    1060          728 :                             .flags
    1061          728 :                             .contains(CompactFlags::ForceImageLayerCreation)
    1062              :                         {
    1063           28 :                             ImageLayerCreationMode::Force
    1064              :                         } else {
    1065          700 :                             ImageLayerCreationMode::Try
    1066              :                         },
    1067          728 :                         &image_ctx,
    1068          728 :                         self.last_image_layer_creation_status
    1069          728 :                             .load()
    1070          728 :                             .as_ref()
    1071          728 :                             .clone(),
    1072          728 :                         options.flags.contains(CompactFlags::YieldForL0),
    1073          728 :                     )
    1074          728 :                     .await
    1075          728 :                     .inspect_err(|err| {
    1076              :                         if let CreateImageLayersError::GetVectoredError(
    1077              :                             GetVectoredError::MissingKey(_),
    1078            0 :                         ) = err
    1079              :                         {
    1080            0 :                             critical!("missing key during compaction: {err:?}");
    1081            0 :                         }
    1082          728 :                     })?;
    1083              : 
    1084          728 :                 self.last_image_layer_creation_status
    1085          728 :                     .store(Arc::new(outcome.clone()));
    1086          728 : 
    1087          728 :                 self.upload_new_image_layers(image_layers)?;
    1088          728 :                 if let LastImageLayerCreationStatus::Incomplete { .. } = outcome {
    1089              :                     // Yield and do not do any other kind of compaction.
    1090            0 :                     info!(
    1091            0 :                         "skipping shard ancestor compaction due to pending image layer generation tasks (preempted by L0 compaction)."
    1092              :                     );
    1093            0 :                     return Ok(CompactionOutcome::YieldForL0);
    1094          728 :                 }
    1095              :             }
    1096              : 
    1097              :             // Suppress errors when cancelled.
    1098            0 :             Err(_) if self.cancel.is_cancelled() => {}
    1099            0 :             Err(err) if err.is_cancel() => {}
    1100              : 
    1101              :             // Alert on critical errors that indicate data corruption.
    1102            0 :             Err(err) if err.is_critical() => {
    1103            0 :                 critical!("could not compact, repartitioning keyspace failed: {err:?}");
    1104              :             }
    1105              : 
    1106              :             // Log other errors. No partitioning? This is normal, if the timeline was just created
    1107              :             // as an empty timeline. Also in unit tests, when we use the timeline as a simple
    1108              :             // key-value store, ignoring the datadir layout. Log the error but continue.
    1109            0 :             Err(err) => error!("could not compact, repartitioning keyspace failed: {err:?}"),
    1110              :         };
    1111              : 
    1112          728 :         let partition_count = self.partitioning.read().0.0.parts.len();
    1113          728 : 
    1114          728 :         // 4. Shard ancestor compaction
    1115          728 : 
    1116          728 :         if self.shard_identity.count >= ShardCount::new(2) {
    1117              :             // Limit the number of layer rewrites to the number of partitions: this means its
    1118              :             // runtime should be comparable to a full round of image layer creations, rather than
    1119              :             // being potentially much longer.
    1120            0 :             let rewrite_max = partition_count;
    1121            0 : 
    1122            0 :             self.compact_shard_ancestors(rewrite_max, ctx).await?;
    1123          728 :         }
    1124              : 
    1125          728 :         Ok(CompactionOutcome::Done)
    1126          728 :     }
    1127              : 
    1128              :     /// Check for layers that are elegible to be rewritten:
    1129              :     /// - Shard splitting: After a shard split, ancestor layers beyond pitr_interval, so that
    1130              :     ///   we don't indefinitely retain keys in this shard that aren't needed.
    1131              :     /// - For future use: layers beyond pitr_interval that are in formats we would
    1132              :     ///   rather not maintain compatibility with indefinitely.
    1133              :     ///
    1134              :     /// Note: this phase may read and write many gigabytes of data: use rewrite_max to bound
    1135              :     /// how much work it will try to do in each compaction pass.
    1136            0 :     async fn compact_shard_ancestors(
    1137            0 :         self: &Arc<Self>,
    1138            0 :         rewrite_max: usize,
    1139            0 :         ctx: &RequestContext,
    1140            0 :     ) -> Result<(), CompactionError> {
    1141            0 :         let mut drop_layers = Vec::new();
    1142            0 :         let mut layers_to_rewrite: Vec<Layer> = Vec::new();
    1143            0 : 
    1144            0 :         // We will use the Lsn cutoff of the last GC as a threshold for rewriting layers: if a
    1145            0 :         // layer is behind this Lsn, it indicates that the layer is being retained beyond the
    1146            0 :         // pitr_interval, for example because a branchpoint references it.
    1147            0 :         //
    1148            0 :         // Holding this read guard also blocks [`Self::gc_timeline`] from entering while we
    1149            0 :         // are rewriting layers.
    1150            0 :         let latest_gc_cutoff = self.get_applied_gc_cutoff_lsn();
    1151            0 : 
    1152            0 :         tracing::info!(
    1153            0 :             "starting shard ancestor compaction, latest_gc_cutoff: {}, pitr cutoff {}",
    1154            0 :             *latest_gc_cutoff,
    1155            0 :             self.gc_info.read().unwrap().cutoffs.time
    1156              :         );
    1157              : 
    1158            0 :         let layers = self.layers.read().await;
    1159            0 :         for layer_desc in layers.layer_map()?.iter_historic_layers() {
    1160            0 :             let layer = layers.get_from_desc(&layer_desc);
    1161            0 :             if layer.metadata().shard.shard_count == self.shard_identity.count {
    1162              :                 // This layer does not belong to a historic ancestor, no need to re-image it.
    1163            0 :                 continue;
    1164            0 :             }
    1165            0 : 
    1166            0 :             // This layer was created on an ancestor shard: check if it contains any data for this shard.
    1167            0 :             let sharded_range = ShardedRange::new(layer_desc.get_key_range(), &self.shard_identity);
    1168            0 :             let layer_local_page_count = sharded_range.page_count();
    1169            0 :             let layer_raw_page_count = ShardedRange::raw_size(&layer_desc.get_key_range());
    1170            0 :             if layer_local_page_count == 0 {
    1171              :                 // This ancestral layer only covers keys that belong to other shards.
    1172              :                 // We include the full metadata in the log: if we had some critical bug that caused
    1173              :                 // us to incorrectly drop layers, this would simplify manually debugging + reinstating those layers.
    1174            0 :                 info!(%layer, old_metadata=?layer.metadata(),
    1175            0 :                     "dropping layer after shard split, contains no keys for this shard.",
    1176              :                 );
    1177              : 
    1178            0 :                 if cfg!(debug_assertions) {
    1179              :                     // Expensive, exhaustive check of keys in this layer: this guards against ShardedRange's calculations being
    1180              :                     // wrong.  If ShardedRange claims the local page count is zero, then no keys in this layer
    1181              :                     // should be !is_key_disposable()
    1182              :                     // TODO: exclude sparse keyspace from this check, otherwise it will infinitely loop.
    1183            0 :                     let range = layer_desc.get_key_range();
    1184            0 :                     let mut key = range.start;
    1185            0 :                     while key < range.end {
    1186            0 :                         debug_assert!(self.shard_identity.is_key_disposable(&key));
    1187            0 :                         key = key.next();
    1188              :                     }
    1189            0 :                 }
    1190              : 
    1191            0 :                 drop_layers.push(layer);
    1192            0 :                 continue;
    1193            0 :             } else if layer_local_page_count != u32::MAX
    1194            0 :                 && layer_local_page_count == layer_raw_page_count
    1195              :             {
    1196            0 :                 debug!(%layer,
    1197            0 :                     "layer is entirely shard local ({} keys), no need to filter it",
    1198              :                     layer_local_page_count
    1199              :                 );
    1200            0 :                 continue;
    1201            0 :             }
    1202            0 : 
    1203            0 :             // Don't bother re-writing a layer unless it will at least halve its size
    1204            0 :             if layer_local_page_count != u32::MAX
    1205            0 :                 && layer_local_page_count > layer_raw_page_count / 2
    1206              :             {
    1207            0 :                 debug!(%layer,
    1208            0 :                     "layer is already mostly local ({}/{}), not rewriting",
    1209              :                     layer_local_page_count,
    1210              :                     layer_raw_page_count
    1211              :                 );
    1212            0 :             }
    1213              : 
    1214              :             // Don't bother re-writing a layer if it is within the PITR window: it will age-out eventually
    1215              :             // without incurring the I/O cost of a rewrite.
    1216            0 :             if layer_desc.get_lsn_range().end >= *latest_gc_cutoff {
    1217            0 :                 debug!(%layer, "Skipping rewrite of layer still in GC window ({} >= {})",
    1218            0 :                     layer_desc.get_lsn_range().end, *latest_gc_cutoff);
    1219            0 :                 continue;
    1220            0 :             }
    1221            0 : 
    1222            0 :             if layer_desc.is_delta() {
    1223              :                 // We do not yet implement rewrite of delta layers
    1224            0 :                 debug!(%layer, "Skipping rewrite of delta layer");
    1225            0 :                 continue;
    1226            0 :             }
    1227            0 : 
    1228            0 :             // Only rewrite layers if their generations differ.  This guarantees:
    1229            0 :             //  - that local rewrite is safe, as local layer paths will differ between existing layer and rewritten one
    1230            0 :             //  - that the layer is persistent in remote storage, as we only see old-generation'd layer via loading from remote storage
    1231            0 :             if layer.metadata().generation == self.generation {
    1232            0 :                 debug!(%layer, "Skipping rewrite, is not from old generation");
    1233            0 :                 continue;
    1234            0 :             }
    1235            0 : 
    1236            0 :             if layers_to_rewrite.len() >= rewrite_max {
    1237            0 :                 tracing::info!(%layer, "Will rewrite layer on a future compaction, already rewrote {}",
    1238            0 :                     layers_to_rewrite.len()
    1239              :                 );
    1240            0 :                 continue;
    1241            0 :             }
    1242            0 : 
    1243            0 :             // Fall through: all our conditions for doing a rewrite passed.
    1244            0 :             layers_to_rewrite.push(layer);
    1245              :         }
    1246              : 
    1247              :         // Drop read lock on layer map before we start doing time-consuming I/O
    1248            0 :         drop(layers);
    1249            0 : 
    1250            0 :         let mut replace_image_layers = Vec::new();
    1251              : 
    1252            0 :         for layer in layers_to_rewrite {
    1253            0 :             if self.cancel.is_cancelled() {
    1254            0 :                 return Err(CompactionError::ShuttingDown);
    1255            0 :             }
    1256            0 : 
    1257            0 :             tracing::info!(layer=%layer, "Rewriting layer after shard split...");
    1258            0 :             let mut image_layer_writer = ImageLayerWriter::new(
    1259            0 :                 self.conf,
    1260            0 :                 self.timeline_id,
    1261            0 :                 self.tenant_shard_id,
    1262            0 :                 &layer.layer_desc().key_range,
    1263            0 :                 layer.layer_desc().image_layer_lsn(),
    1264            0 :                 ctx,
    1265            0 :             )
    1266            0 :             .await
    1267            0 :             .map_err(CompactionError::Other)?;
    1268              : 
    1269              :             // Safety of layer rewrites:
    1270              :             // - We are writing to a different local file path than we are reading from, so the old Layer
    1271              :             //   cannot interfere with the new one.
    1272              :             // - In the page cache, contents for a particular VirtualFile are stored with a file_id that
    1273              :             //   is different for two layers with the same name (in `ImageLayerInner::new` we always
    1274              :             //   acquire a fresh id from [`crate::page_cache::next_file_id`].  So readers do not risk
    1275              :             //   reading the index from one layer file, and then data blocks from the rewritten layer file.
    1276              :             // - Any readers that have a reference to the old layer will keep it alive until they are done
    1277              :             //   with it. If they are trying to promote from remote storage, that will fail, but this is the same
    1278              :             //   as for compaction generally: compaction is allowed to delete layers that readers might be trying to use.
    1279              :             // - We do not run concurrently with other kinds of compaction, so the only layer map writes we race with are:
    1280              :             //    - GC, which at worst witnesses us "undelete" a layer that they just deleted.
    1281              :             //    - ingestion, which only inserts layers, therefore cannot collide with us.
    1282            0 :             let resident = layer.download_and_keep_resident(ctx).await?;
    1283              : 
    1284            0 :             let keys_written = resident
    1285            0 :                 .filter(&self.shard_identity, &mut image_layer_writer, ctx)
    1286            0 :                 .await?;
    1287              : 
    1288            0 :             if keys_written > 0 {
    1289            0 :                 let (desc, path) = image_layer_writer
    1290            0 :                     .finish(ctx)
    1291            0 :                     .await
    1292            0 :                     .map_err(CompactionError::Other)?;
    1293            0 :                 let new_layer = Layer::finish_creating(self.conf, self, desc, &path)
    1294            0 :                     .map_err(CompactionError::Other)?;
    1295            0 :                 tracing::info!(layer=%new_layer, "Rewrote layer, {} -> {} bytes",
    1296            0 :                     layer.metadata().file_size,
    1297            0 :                     new_layer.metadata().file_size);
    1298              : 
    1299            0 :                 replace_image_layers.push((layer, new_layer));
    1300            0 :             } else {
    1301            0 :                 // Drop the old layer.  Usually for this case we would already have noticed that
    1302            0 :                 // the layer has no data for us with the ShardedRange check above, but
    1303            0 :                 drop_layers.push(layer);
    1304            0 :             }
    1305              :         }
    1306              : 
    1307              :         // At this point, we have replaced local layer files with their rewritten form, but not yet uploaded
    1308              :         // metadata to reflect that. If we restart here, the replaced layer files will look invalid (size mismatch
    1309              :         // to remote index) and be removed. This is inefficient but safe.
    1310            0 :         fail::fail_point!("compact-shard-ancestors-localonly");
    1311            0 : 
    1312            0 :         // Update the LayerMap so that readers will use the new layers, and enqueue it for writing to remote storage
    1313            0 :         self.rewrite_layers(replace_image_layers, drop_layers)
    1314            0 :             .await?;
    1315              : 
    1316            0 :         fail::fail_point!("compact-shard-ancestors-enqueued");
    1317            0 : 
    1318            0 :         // We wait for all uploads to complete before finishing this compaction stage.  This is not
    1319            0 :         // necessary for correctness, but it simplifies testing, and avoids proceeding with another
    1320            0 :         // Timeline's compaction while this timeline's uploads may be generating lots of disk I/O
    1321            0 :         // load.
    1322            0 :         match self.remote_client.wait_completion().await {
    1323            0 :             Ok(()) => (),
    1324            0 :             Err(WaitCompletionError::NotInitialized(ni)) => return Err(CompactionError::from(ni)),
    1325              :             Err(WaitCompletionError::UploadQueueShutDownOrStopped) => {
    1326            0 :                 return Err(CompactionError::ShuttingDown);
    1327              :             }
    1328              :         }
    1329              : 
    1330            0 :         fail::fail_point!("compact-shard-ancestors-persistent");
    1331            0 : 
    1332            0 :         Ok(())
    1333            0 :     }
    1334              : 
    1335              :     /// Update the LayerVisibilityHint of layers covered by image layers, based on whether there is
    1336              :     /// an image layer between them and the most recent readable LSN (branch point or tip of timeline).  The
    1337              :     /// purpose of the visibility hint is to record which layers need to be available to service reads.
    1338              :     ///
    1339              :     /// The result may be used as an input to eviction and secondary downloads to de-prioritize layers
    1340              :     /// that we know won't be needed for reads.
    1341          476 :     pub(crate) async fn update_layer_visibility(
    1342          476 :         &self,
    1343          476 :     ) -> Result<(), super::layer_manager::Shutdown> {
    1344          476 :         let head_lsn = self.get_last_record_lsn();
    1345              : 
    1346              :         // We will sweep through layers in reverse-LSN order.  We only do historic layers.  L0 deltas
    1347              :         // are implicitly left visible, because LayerVisibilityHint's default is Visible, and we never modify it here.
    1348              :         // Note that L0 deltas _can_ be covered by image layers, but we consider them 'visible' because we anticipate that
    1349              :         // they will be subject to L0->L1 compaction in the near future.
    1350          476 :         let layer_manager = self.layers.read().await;
    1351          476 :         let layer_map = layer_manager.layer_map()?;
    1352              : 
    1353          476 :         let readable_points = {
    1354          476 :             let children = self.gc_info.read().unwrap().retain_lsns.clone();
    1355          476 : 
    1356          476 :             let mut readable_points = Vec::with_capacity(children.len() + 1);
    1357          476 :             for (child_lsn, _child_timeline_id, is_offloaded) in &children {
    1358            0 :                 if *is_offloaded == MaybeOffloaded::Yes {
    1359            0 :                     continue;
    1360            0 :                 }
    1361            0 :                 readable_points.push(*child_lsn);
    1362              :             }
    1363          476 :             readable_points.push(head_lsn);
    1364          476 :             readable_points
    1365          476 :         };
    1366          476 : 
    1367          476 :         let (layer_visibility, covered) = layer_map.get_visibility(readable_points);
    1368         1200 :         for (layer_desc, visibility) in layer_visibility {
    1369          724 :             // FIXME: a more efficiency bulk zip() through the layers rather than NlogN getting each one
    1370          724 :             let layer = layer_manager.get_from_desc(&layer_desc);
    1371          724 :             layer.set_visibility(visibility);
    1372          724 :         }
    1373              : 
    1374              :         // TODO: publish our covered KeySpace to our parent, so that when they update their visibility, they can
    1375              :         // avoid assuming that everything at a branch point is visible.
    1376          476 :         drop(covered);
    1377          476 :         Ok(())
    1378          476 :     }
    1379              : 
    1380              :     /// Collect a bunch of Level 0 layer files, and compact and reshuffle them as
    1381              :     /// as Level 1 files. Returns whether the L0 layers are fully compacted.
    1382          728 :     async fn compact_level0(
    1383          728 :         self: &Arc<Self>,
    1384          728 :         target_file_size: u64,
    1385          728 :         force_compaction_ignore_threshold: bool,
    1386          728 :         ctx: &RequestContext,
    1387          728 :     ) -> Result<CompactionOutcome, CompactionError> {
    1388              :         let CompactLevel0Phase1Result {
    1389          728 :             new_layers,
    1390          728 :             deltas_to_compact,
    1391          728 :             outcome,
    1392              :         } = {
    1393          728 :             let phase1_span = info_span!("compact_level0_phase1");
    1394          728 :             let ctx = ctx.attached_child();
    1395          728 :             let mut stats = CompactLevel0Phase1StatsBuilder {
    1396          728 :                 version: Some(2),
    1397          728 :                 tenant_id: Some(self.tenant_shard_id),
    1398          728 :                 timeline_id: Some(self.timeline_id),
    1399          728 :                 ..Default::default()
    1400          728 :             };
    1401          728 : 
    1402          728 :             let begin = tokio::time::Instant::now();
    1403          728 :             let phase1_layers_locked = self.layers.read().await;
    1404          728 :             let now = tokio::time::Instant::now();
    1405          728 :             stats.read_lock_acquisition_micros =
    1406          728 :                 DurationRecorder::Recorded(RecordedDuration(now - begin), now);
    1407          728 :             self.compact_level0_phase1(
    1408          728 :                 phase1_layers_locked,
    1409          728 :                 stats,
    1410          728 :                 target_file_size,
    1411          728 :                 force_compaction_ignore_threshold,
    1412          728 :                 &ctx,
    1413          728 :             )
    1414          728 :             .instrument(phase1_span)
    1415          728 :             .await?
    1416              :         };
    1417              : 
    1418          728 :         if new_layers.is_empty() && deltas_to_compact.is_empty() {
    1419              :             // nothing to do
    1420          672 :             return Ok(CompactionOutcome::Done);
    1421           56 :         }
    1422           56 : 
    1423           56 :         self.finish_compact_batch(&new_layers, &Vec::new(), &deltas_to_compact)
    1424           56 :             .await?;
    1425           56 :         Ok(outcome)
    1426          728 :     }
    1427              : 
    1428              :     /// Level0 files first phase of compaction, explained in the [`Self::compact_legacy`] comment.
    1429          728 :     async fn compact_level0_phase1<'a>(
    1430          728 :         self: &'a Arc<Self>,
    1431          728 :         guard: tokio::sync::RwLockReadGuard<'a, LayerManager>,
    1432          728 :         mut stats: CompactLevel0Phase1StatsBuilder,
    1433          728 :         target_file_size: u64,
    1434          728 :         force_compaction_ignore_threshold: bool,
    1435          728 :         ctx: &RequestContext,
    1436          728 :     ) -> Result<CompactLevel0Phase1Result, CompactionError> {
    1437          728 :         stats.read_lock_held_spawn_blocking_startup_micros =
    1438          728 :             stats.read_lock_acquisition_micros.till_now(); // set by caller
    1439          728 :         let layers = guard.layer_map()?;
    1440          728 :         let level0_deltas = layers.level0_deltas();
    1441          728 :         stats.level0_deltas_count = Some(level0_deltas.len());
    1442          728 : 
    1443          728 :         // Only compact if enough layers have accumulated.
    1444          728 :         let threshold = self.get_compaction_threshold();
    1445          728 :         if level0_deltas.is_empty() || level0_deltas.len() < threshold {
    1446          672 :             if force_compaction_ignore_threshold {
    1447            0 :                 if !level0_deltas.is_empty() {
    1448            0 :                     info!(
    1449            0 :                         level0_deltas = level0_deltas.len(),
    1450            0 :                         threshold, "too few deltas to compact, but forcing compaction"
    1451              :                     );
    1452              :                 } else {
    1453            0 :                     info!(
    1454            0 :                         level0_deltas = level0_deltas.len(),
    1455            0 :                         threshold, "too few deltas to compact, cannot force compaction"
    1456              :                     );
    1457            0 :                     return Ok(CompactLevel0Phase1Result::default());
    1458              :                 }
    1459              :             } else {
    1460          672 :                 debug!(
    1461            0 :                     level0_deltas = level0_deltas.len(),
    1462            0 :                     threshold, "too few deltas to compact"
    1463              :                 );
    1464          672 :                 return Ok(CompactLevel0Phase1Result::default());
    1465              :             }
    1466           56 :         }
    1467              : 
    1468           56 :         let mut level0_deltas = level0_deltas
    1469           56 :             .iter()
    1470          804 :             .map(|x| guard.get_from_desc(x))
    1471           56 :             .collect::<Vec<_>>();
    1472           56 : 
    1473           56 :         // Gather the files to compact in this iteration.
    1474           56 :         //
    1475           56 :         // Start with the oldest Level 0 delta file, and collect any other
    1476           56 :         // level 0 files that form a contiguous sequence, such that the end
    1477           56 :         // LSN of previous file matches the start LSN of the next file.
    1478           56 :         //
    1479           56 :         // Note that if the files don't form such a sequence, we might
    1480           56 :         // "compact" just a single file. That's a bit pointless, but it allows
    1481           56 :         // us to get rid of the level 0 file, and compact the other files on
    1482           56 :         // the next iteration. This could probably made smarter, but such
    1483           56 :         // "gaps" in the sequence of level 0 files should only happen in case
    1484           56 :         // of a crash, partial download from cloud storage, or something like
    1485           56 :         // that, so it's not a big deal in practice.
    1486         1496 :         level0_deltas.sort_by_key(|l| l.layer_desc().lsn_range.start);
    1487           56 :         let mut level0_deltas_iter = level0_deltas.iter();
    1488           56 : 
    1489           56 :         let first_level0_delta = level0_deltas_iter.next().unwrap();
    1490           56 :         let mut prev_lsn_end = first_level0_delta.layer_desc().lsn_range.end;
    1491           56 :         let mut deltas_to_compact = Vec::with_capacity(level0_deltas.len());
    1492           56 : 
    1493           56 :         // Accumulate the size of layers in `deltas_to_compact`
    1494           56 :         let mut deltas_to_compact_bytes = 0;
    1495           56 : 
    1496           56 :         // Under normal circumstances, we will accumulate up to compaction_upper_limit L0s of size
    1497           56 :         // checkpoint_distance each.  To avoid edge cases using extra system resources, bound our
    1498           56 :         // work in this function to only operate on this much delta data at once.
    1499           56 :         //
    1500           56 :         // In general, compaction_threshold should be <= compaction_upper_limit, but in case that
    1501           56 :         // the constraint is not respected, we use the larger of the two.
    1502           56 :         let delta_size_limit = std::cmp::max(
    1503           56 :             self.get_compaction_upper_limit(),
    1504           56 :             self.get_compaction_threshold(),
    1505           56 :         ) as u64
    1506           56 :             * std::cmp::max(self.get_checkpoint_distance(), DEFAULT_CHECKPOINT_DISTANCE);
    1507           56 : 
    1508           56 :         let mut fully_compacted = true;
    1509           56 : 
    1510           56 :         deltas_to_compact.push(first_level0_delta.download_and_keep_resident(ctx).await?);
    1511          804 :         for l in level0_deltas_iter {
    1512          748 :             let lsn_range = &l.layer_desc().lsn_range;
    1513          748 : 
    1514          748 :             if lsn_range.start != prev_lsn_end {
    1515            0 :                 break;
    1516          748 :             }
    1517          748 :             deltas_to_compact.push(l.download_and_keep_resident(ctx).await?);
    1518          748 :             deltas_to_compact_bytes += l.metadata().file_size;
    1519          748 :             prev_lsn_end = lsn_range.end;
    1520          748 : 
    1521          748 :             if deltas_to_compact_bytes >= delta_size_limit {
    1522            0 :                 info!(
    1523            0 :                     l0_deltas_selected = deltas_to_compact.len(),
    1524            0 :                     l0_deltas_total = level0_deltas.len(),
    1525            0 :                     "L0 compaction picker hit max delta layer size limit: {}",
    1526              :                     delta_size_limit
    1527              :                 );
    1528            0 :                 fully_compacted = false;
    1529            0 : 
    1530            0 :                 // Proceed with compaction, but only a subset of L0s
    1531            0 :                 break;
    1532          748 :             }
    1533              :         }
    1534           56 :         let lsn_range = Range {
    1535           56 :             start: deltas_to_compact
    1536           56 :                 .first()
    1537           56 :                 .unwrap()
    1538           56 :                 .layer_desc()
    1539           56 :                 .lsn_range
    1540           56 :                 .start,
    1541           56 :             end: deltas_to_compact.last().unwrap().layer_desc().lsn_range.end,
    1542           56 :         };
    1543           56 : 
    1544           56 :         info!(
    1545            0 :             "Starting Level0 compaction in LSN range {}-{} for {} layers ({} deltas in total)",
    1546            0 :             lsn_range.start,
    1547            0 :             lsn_range.end,
    1548            0 :             deltas_to_compact.len(),
    1549            0 :             level0_deltas.len()
    1550              :         );
    1551              : 
    1552          804 :         for l in deltas_to_compact.iter() {
    1553          804 :             info!("compact includes {l}");
    1554              :         }
    1555              : 
    1556              :         // We don't need the original list of layers anymore. Drop it so that
    1557              :         // we don't accidentally use it later in the function.
    1558           56 :         drop(level0_deltas);
    1559           56 : 
    1560           56 :         stats.read_lock_held_prerequisites_micros = stats
    1561           56 :             .read_lock_held_spawn_blocking_startup_micros
    1562           56 :             .till_now();
    1563              : 
    1564              :         // TODO: replace with streaming k-merge
    1565           56 :         let all_keys = {
    1566           56 :             let mut all_keys = Vec::new();
    1567          804 :             for l in deltas_to_compact.iter() {
    1568          804 :                 if self.cancel.is_cancelled() {
    1569            0 :                     return Err(CompactionError::ShuttingDown);
    1570          804 :                 }
    1571          804 :                 let delta = l.get_as_delta(ctx).await.map_err(CompactionError::Other)?;
    1572          804 :                 let keys = delta
    1573          804 :                     .index_entries(ctx)
    1574          804 :                     .await
    1575          804 :                     .map_err(CompactionError::Other)?;
    1576          804 :                 all_keys.extend(keys);
    1577              :             }
    1578              :             // The current stdlib sorting implementation is designed in a way where it is
    1579              :             // particularly fast where the slice is made up of sorted sub-ranges.
    1580      8847530 :             all_keys.sort_by_key(|DeltaEntry { key, lsn, .. }| (*key, *lsn));
    1581           56 :             all_keys
    1582           56 :         };
    1583           56 : 
    1584           56 :         stats.read_lock_held_key_sort_micros = stats.read_lock_held_prerequisites_micros.till_now();
    1585              : 
    1586              :         // Determine N largest holes where N is number of compacted layers. The vec is sorted by key range start.
    1587              :         //
    1588              :         // A hole is a key range for which this compaction doesn't have any WAL records.
    1589              :         // Our goal in this compaction iteration is to avoid creating L1s that, in terms of their key range,
    1590              :         // cover the hole, but actually don't contain any WAL records for that key range.
    1591              :         // The reason is that the mere stack of L1s (`count_deltas`) triggers image layer creation (`create_image_layers`).
    1592              :         // That image layer creation would be useless for a hole range covered by L1s that don't contain any WAL records.
    1593              :         //
    1594              :         // The algorithm chooses holes as follows.
    1595              :         // - Slide a 2-window over the keys in key orde to get the hole range (=distance between two keys).
    1596              :         // - Filter: min threshold on range length
    1597              :         // - Rank: by coverage size (=number of image layers required to reconstruct each key in the range for which we have any data)
    1598              :         //
    1599              :         // For more details, intuition, and some ASCII art see https://github.com/neondatabase/neon/pull/3597#discussion_r1112704451
    1600              :         #[derive(PartialEq, Eq)]
    1601              :         struct Hole {
    1602              :             key_range: Range<Key>,
    1603              :             coverage_size: usize,
    1604              :         }
    1605           56 :         let holes: Vec<Hole> = {
    1606              :             use std::cmp::Ordering;
    1607              :             impl Ord for Hole {
    1608            0 :                 fn cmp(&self, other: &Self) -> Ordering {
    1609            0 :                     self.coverage_size.cmp(&other.coverage_size).reverse()
    1610            0 :                 }
    1611              :             }
    1612              :             impl PartialOrd for Hole {
    1613            0 :                 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
    1614            0 :                     Some(self.cmp(other))
    1615            0 :                 }
    1616              :             }
    1617           56 :             let max_holes = deltas_to_compact.len();
    1618           56 :             let last_record_lsn = self.get_last_record_lsn();
    1619           56 :             let min_hole_range = (target_file_size / page_cache::PAGE_SZ as u64) as i128;
    1620           56 :             let min_hole_coverage_size = 3; // TODO: something more flexible?
    1621           56 :             // min-heap (reserve space for one more element added before eviction)
    1622           56 :             let mut heap: BinaryHeap<Hole> = BinaryHeap::with_capacity(max_holes + 1);
    1623           56 :             let mut prev: Option<Key> = None;
    1624              : 
    1625      4128076 :             for &DeltaEntry { key: next_key, .. } in all_keys.iter() {
    1626      4128076 :                 if let Some(prev_key) = prev {
    1627              :                     // just first fast filter, do not create hole entries for metadata keys. The last hole in the
    1628              :                     // compaction is the gap between data key and metadata keys.
    1629      4128020 :                     if next_key.to_i128() - prev_key.to_i128() >= min_hole_range
    1630            0 :                         && !Key::is_metadata_key(&prev_key)
    1631              :                     {
    1632            0 :                         let key_range = prev_key..next_key;
    1633            0 :                         // Measuring hole by just subtraction of i128 representation of key range boundaries
    1634            0 :                         // has not so much sense, because largest holes will corresponds field1/field2 changes.
    1635            0 :                         // But we are mostly interested to eliminate holes which cause generation of excessive image layers.
    1636            0 :                         // That is why it is better to measure size of hole as number of covering image layers.
    1637            0 :                         let coverage_size =
    1638            0 :                             layers.image_coverage(&key_range, last_record_lsn).len();
    1639            0 :                         if coverage_size >= min_hole_coverage_size {
    1640            0 :                             heap.push(Hole {
    1641            0 :                                 key_range,
    1642            0 :                                 coverage_size,
    1643            0 :                             });
    1644            0 :                             if heap.len() > max_holes {
    1645            0 :                                 heap.pop(); // remove smallest hole
    1646            0 :                             }
    1647            0 :                         }
    1648      4128020 :                     }
    1649           56 :                 }
    1650      4128076 :                 prev = Some(next_key.next());
    1651              :             }
    1652           56 :             let mut holes = heap.into_vec();
    1653           56 :             holes.sort_unstable_by_key(|hole| hole.key_range.start);
    1654           56 :             holes
    1655           56 :         };
    1656           56 :         stats.read_lock_held_compute_holes_micros = stats.read_lock_held_key_sort_micros.till_now();
    1657           56 :         drop_rlock(guard);
    1658           56 : 
    1659           56 :         if self.cancel.is_cancelled() {
    1660            0 :             return Err(CompactionError::ShuttingDown);
    1661           56 :         }
    1662           56 : 
    1663           56 :         stats.read_lock_drop_micros = stats.read_lock_held_compute_holes_micros.till_now();
    1664              : 
    1665              :         // This iterator walks through all key-value pairs from all the layers
    1666              :         // we're compacting, in key, LSN order.
    1667              :         // If there's both a Value::Image and Value::WalRecord for the same (key,lsn),
    1668              :         // then the Value::Image is ordered before Value::WalRecord.
    1669           56 :         let mut all_values_iter = {
    1670           56 :             let mut deltas = Vec::with_capacity(deltas_to_compact.len());
    1671          804 :             for l in deltas_to_compact.iter() {
    1672          804 :                 let l = l.get_as_delta(ctx).await.map_err(CompactionError::Other)?;
    1673          804 :                 deltas.push(l);
    1674              :             }
    1675           56 :             MergeIterator::create(&deltas, &[], ctx)
    1676           56 :         };
    1677           56 : 
    1678           56 :         // This iterator walks through all keys and is needed to calculate size used by each key
    1679           56 :         let mut all_keys_iter = all_keys
    1680           56 :             .iter()
    1681      4128076 :             .map(|DeltaEntry { key, lsn, size, .. }| (*key, *lsn, *size))
    1682      4128020 :             .coalesce(|mut prev, cur| {
    1683      4128020 :                 // Coalesce keys that belong to the same key pair.
    1684      4128020 :                 // This ensures that compaction doesn't put them
    1685      4128020 :                 // into different layer files.
    1686      4128020 :                 // Still limit this by the target file size,
    1687      4128020 :                 // so that we keep the size of the files in
    1688      4128020 :                 // check.
    1689      4128020 :                 if prev.0 == cur.0 && prev.2 < target_file_size {
    1690        80076 :                     prev.2 += cur.2;
    1691        80076 :                     Ok(prev)
    1692              :                 } else {
    1693      4047944 :                     Err((prev, cur))
    1694              :                 }
    1695      4128020 :             });
    1696           56 : 
    1697           56 :         // Merge the contents of all the input delta layers into a new set
    1698           56 :         // of delta layers, based on the current partitioning.
    1699           56 :         //
    1700           56 :         // We split the new delta layers on the key dimension. We iterate through the key space, and for each key, check if including the next key to the current output layer we're building would cause the layer to become too large. If so, dump the current output layer and start new one.
    1701           56 :         // It's possible that there is a single key with so many page versions that storing all of them in a single layer file
    1702           56 :         // would be too large. In that case, we also split on the LSN dimension.
    1703           56 :         //
    1704           56 :         // LSN
    1705           56 :         //  ^
    1706           56 :         //  |
    1707           56 :         //  | +-----------+            +--+--+--+--+
    1708           56 :         //  | |           |            |  |  |  |  |
    1709           56 :         //  | +-----------+            |  |  |  |  |
    1710           56 :         //  | |           |            |  |  |  |  |
    1711           56 :         //  | +-----------+     ==>    |  |  |  |  |
    1712           56 :         //  | |           |            |  |  |  |  |
    1713           56 :         //  | +-----------+            |  |  |  |  |
    1714           56 :         //  | |           |            |  |  |  |  |
    1715           56 :         //  | +-----------+            +--+--+--+--+
    1716           56 :         //  |
    1717           56 :         //  +--------------> key
    1718           56 :         //
    1719           56 :         //
    1720           56 :         // If one key (X) has a lot of page versions:
    1721           56 :         //
    1722           56 :         // LSN
    1723           56 :         //  ^
    1724           56 :         //  |                                 (X)
    1725           56 :         //  | +-----------+            +--+--+--+--+
    1726           56 :         //  | |           |            |  |  |  |  |
    1727           56 :         //  | +-----------+            |  |  +--+  |
    1728           56 :         //  | |           |            |  |  |  |  |
    1729           56 :         //  | +-----------+     ==>    |  |  |  |  |
    1730           56 :         //  | |           |            |  |  +--+  |
    1731           56 :         //  | +-----------+            |  |  |  |  |
    1732           56 :         //  | |           |            |  |  |  |  |
    1733           56 :         //  | +-----------+            +--+--+--+--+
    1734           56 :         //  |
    1735           56 :         //  +--------------> key
    1736           56 :         // TODO: this actually divides the layers into fixed-size chunks, not
    1737           56 :         // based on the partitioning.
    1738           56 :         //
    1739           56 :         // TODO: we should also opportunistically materialize and
    1740           56 :         // garbage collect what we can.
    1741           56 :         let mut new_layers = Vec::new();
    1742           56 :         let mut prev_key: Option<Key> = None;
    1743           56 :         let mut writer: Option<DeltaLayerWriter> = None;
    1744           56 :         let mut key_values_total_size = 0u64;
    1745           56 :         let mut dup_start_lsn: Lsn = Lsn::INVALID; // start LSN of layer containing values of the single key
    1746           56 :         let mut dup_end_lsn: Lsn = Lsn::INVALID; // end LSN of layer containing values of the single key
    1747           56 :         let mut next_hole = 0; // index of next hole in holes vector
    1748           56 : 
    1749           56 :         let mut keys = 0;
    1750              : 
    1751      4128132 :         while let Some((key, lsn, value)) = all_values_iter
    1752      4128132 :             .next()
    1753      4128132 :             .await
    1754      4128132 :             .map_err(CompactionError::Other)?
    1755              :         {
    1756      4128076 :             keys += 1;
    1757      4128076 : 
    1758      4128076 :             if keys % 32_768 == 0 && self.cancel.is_cancelled() {
    1759              :                 // avoid hitting the cancellation token on every key. in benches, we end up
    1760              :                 // shuffling an order of million keys per layer, this means we'll check it
    1761              :                 // around tens of times per layer.
    1762            0 :                 return Err(CompactionError::ShuttingDown);
    1763      4128076 :             }
    1764      4128076 : 
    1765      4128076 :             let same_key = prev_key == Some(key);
    1766      4128076 :             // We need to check key boundaries once we reach next key or end of layer with the same key
    1767      4128076 :             if !same_key || lsn == dup_end_lsn {
    1768      4048000 :                 let mut next_key_size = 0u64;
    1769      4048000 :                 let is_dup_layer = dup_end_lsn.is_valid();
    1770      4048000 :                 dup_start_lsn = Lsn::INVALID;
    1771      4048000 :                 if !same_key {
    1772      4048000 :                     dup_end_lsn = Lsn::INVALID;
    1773      4048000 :                 }
    1774              :                 // Determine size occupied by this key. We stop at next key or when size becomes larger than target_file_size
    1775      4048000 :                 for (next_key, next_lsn, next_size) in all_keys_iter.by_ref() {
    1776      4048000 :                     next_key_size = next_size;
    1777      4048000 :                     if key != next_key {
    1778      4047944 :                         if dup_end_lsn.is_valid() {
    1779            0 :                             // We are writting segment with duplicates:
    1780            0 :                             // place all remaining values of this key in separate segment
    1781            0 :                             dup_start_lsn = dup_end_lsn; // new segments starts where old stops
    1782            0 :                             dup_end_lsn = lsn_range.end; // there are no more values of this key till end of LSN range
    1783      4047944 :                         }
    1784      4047944 :                         break;
    1785           56 :                     }
    1786           56 :                     key_values_total_size += next_size;
    1787           56 :                     // Check if it is time to split segment: if total keys size is larger than target file size.
    1788           56 :                     // We need to avoid generation of empty segments if next_size > target_file_size.
    1789           56 :                     if key_values_total_size > target_file_size && lsn != next_lsn {
    1790              :                         // Split key between multiple layers: such layer can contain only single key
    1791            0 :                         dup_start_lsn = if dup_end_lsn.is_valid() {
    1792            0 :                             dup_end_lsn // new segment with duplicates starts where old one stops
    1793              :                         } else {
    1794            0 :                             lsn // start with the first LSN for this key
    1795              :                         };
    1796            0 :                         dup_end_lsn = next_lsn; // upper LSN boundary is exclusive
    1797            0 :                         break;
    1798           56 :                     }
    1799              :                 }
    1800              :                 // handle case when loop reaches last key: in this case dup_end is non-zero but dup_start is not set.
    1801      4048000 :                 if dup_end_lsn.is_valid() && !dup_start_lsn.is_valid() {
    1802            0 :                     dup_start_lsn = dup_end_lsn;
    1803            0 :                     dup_end_lsn = lsn_range.end;
    1804      4048000 :                 }
    1805      4048000 :                 if writer.is_some() {
    1806      4047944 :                     let written_size = writer.as_mut().unwrap().size();
    1807      4047944 :                     let contains_hole =
    1808      4047944 :                         next_hole < holes.len() && key >= holes[next_hole].key_range.end;
    1809              :                     // check if key cause layer overflow or contains hole...
    1810      4047944 :                     if is_dup_layer
    1811      4047944 :                         || dup_end_lsn.is_valid()
    1812      4047944 :                         || written_size + key_values_total_size > target_file_size
    1813      4047384 :                         || contains_hole
    1814              :                     {
    1815              :                         // ... if so, flush previous layer and prepare to write new one
    1816          560 :                         let (desc, path) = writer
    1817          560 :                             .take()
    1818          560 :                             .unwrap()
    1819          560 :                             .finish(prev_key.unwrap().next(), ctx)
    1820          560 :                             .await
    1821          560 :                             .map_err(CompactionError::Other)?;
    1822          560 :                         let new_delta = Layer::finish_creating(self.conf, self, desc, &path)
    1823          560 :                             .map_err(CompactionError::Other)?;
    1824              : 
    1825          560 :                         new_layers.push(new_delta);
    1826          560 :                         writer = None;
    1827          560 : 
    1828          560 :                         if contains_hole {
    1829            0 :                             // skip hole
    1830            0 :                             next_hole += 1;
    1831          560 :                         }
    1832      4047384 :                     }
    1833           56 :                 }
    1834              :                 // Remember size of key value because at next iteration we will access next item
    1835      4048000 :                 key_values_total_size = next_key_size;
    1836        80076 :             }
    1837      4128076 :             fail_point!("delta-layer-writer-fail-before-finish", |_| {
    1838            0 :                 Err(CompactionError::Other(anyhow::anyhow!(
    1839            0 :                     "failpoint delta-layer-writer-fail-before-finish"
    1840            0 :                 )))
    1841      4128076 :             });
    1842              : 
    1843      4128076 :             if !self.shard_identity.is_key_disposable(&key) {
    1844      4128076 :                 if writer.is_none() {
    1845          616 :                     if self.cancel.is_cancelled() {
    1846              :                         // to be somewhat responsive to cancellation, check for each new layer
    1847            0 :                         return Err(CompactionError::ShuttingDown);
    1848          616 :                     }
    1849              :                     // Create writer if not initiaized yet
    1850          616 :                     writer = Some(
    1851              :                         DeltaLayerWriter::new(
    1852          616 :                             self.conf,
    1853          616 :                             self.timeline_id,
    1854          616 :                             self.tenant_shard_id,
    1855          616 :                             key,
    1856          616 :                             if dup_end_lsn.is_valid() {
    1857              :                                 // this is a layer containing slice of values of the same key
    1858            0 :                                 debug!("Create new dup layer {}..{}", dup_start_lsn, dup_end_lsn);
    1859            0 :                                 dup_start_lsn..dup_end_lsn
    1860              :                             } else {
    1861          616 :                                 debug!("Create new layer {}..{}", lsn_range.start, lsn_range.end);
    1862          616 :                                 lsn_range.clone()
    1863              :                             },
    1864          616 :                             ctx,
    1865          616 :                         )
    1866          616 :                         .await
    1867          616 :                         .map_err(CompactionError::Other)?,
    1868              :                     );
    1869              : 
    1870          616 :                     keys = 0;
    1871      4127460 :                 }
    1872              : 
    1873      4128076 :                 writer
    1874      4128076 :                     .as_mut()
    1875      4128076 :                     .unwrap()
    1876      4128076 :                     .put_value(key, lsn, value, ctx)
    1877      4128076 :                     .await
    1878      4128076 :                     .map_err(CompactionError::Other)?;
    1879              :             } else {
    1880            0 :                 let owner = self.shard_identity.get_shard_number(&key);
    1881            0 : 
    1882            0 :                 // This happens after a shard split, when we're compacting an L0 created by our parent shard
    1883            0 :                 debug!("dropping key {key} during compaction (it belongs on shard {owner})");
    1884              :             }
    1885              : 
    1886      4128076 :             if !new_layers.is_empty() {
    1887        39572 :                 fail_point!("after-timeline-compacted-first-L1");
    1888      4088504 :             }
    1889              : 
    1890      4128076 :             prev_key = Some(key);
    1891              :         }
    1892           56 :         if let Some(writer) = writer {
    1893           56 :             let (desc, path) = writer
    1894           56 :                 .finish(prev_key.unwrap().next(), ctx)
    1895           56 :                 .await
    1896           56 :                 .map_err(CompactionError::Other)?;
    1897           56 :             let new_delta = Layer::finish_creating(self.conf, self, desc, &path)
    1898           56 :                 .map_err(CompactionError::Other)?;
    1899           56 :             new_layers.push(new_delta);
    1900            0 :         }
    1901              : 
    1902              :         // Sync layers
    1903           56 :         if !new_layers.is_empty() {
    1904              :             // Print a warning if the created layer is larger than double the target size
    1905              :             // Add two pages for potential overhead. This should in theory be already
    1906              :             // accounted for in the target calculation, but for very small targets,
    1907              :             // we still might easily hit the limit otherwise.
    1908           56 :             let warn_limit = target_file_size * 2 + page_cache::PAGE_SZ as u64 * 2;
    1909          616 :             for layer in new_layers.iter() {
    1910          616 :                 if layer.layer_desc().file_size > warn_limit {
    1911            0 :                     warn!(
    1912              :                         %layer,
    1913            0 :                         "created delta file of size {} larger than double of target of {target_file_size}", layer.layer_desc().file_size
    1914              :                     );
    1915          616 :                 }
    1916              :             }
    1917              : 
    1918              :             // The writer.finish() above already did the fsync of the inodes.
    1919              :             // We just need to fsync the directory in which these inodes are linked,
    1920              :             // which we know to be the timeline directory.
    1921              :             //
    1922              :             // We use fatal_err() below because the after writer.finish() returns with success,
    1923              :             // the in-memory state of the filesystem already has the layer file in its final place,
    1924              :             // and subsequent pageserver code could think it's durable while it really isn't.
    1925           56 :             let timeline_dir = VirtualFile::open(
    1926           56 :                 &self
    1927           56 :                     .conf
    1928           56 :                     .timeline_path(&self.tenant_shard_id, &self.timeline_id),
    1929           56 :                 ctx,
    1930           56 :             )
    1931           56 :             .await
    1932           56 :             .fatal_err("VirtualFile::open for timeline dir fsync");
    1933           56 :             timeline_dir
    1934           56 :                 .sync_all()
    1935           56 :                 .await
    1936           56 :                 .fatal_err("VirtualFile::sync_all timeline dir");
    1937            0 :         }
    1938              : 
    1939           56 :         stats.write_layer_files_micros = stats.read_lock_drop_micros.till_now();
    1940           56 :         stats.new_deltas_count = Some(new_layers.len());
    1941          616 :         stats.new_deltas_size = Some(new_layers.iter().map(|l| l.layer_desc().file_size).sum());
    1942           56 : 
    1943           56 :         match TryInto::<CompactLevel0Phase1Stats>::try_into(stats)
    1944           56 :             .and_then(|stats| serde_json::to_string(&stats).context("serde_json::to_string"))
    1945              :         {
    1946           56 :             Ok(stats_json) => {
    1947           56 :                 info!(
    1948            0 :                     stats_json = stats_json.as_str(),
    1949            0 :                     "compact_level0_phase1 stats available"
    1950              :                 )
    1951              :             }
    1952            0 :             Err(e) => {
    1953            0 :                 warn!("compact_level0_phase1 stats failed to serialize: {:#}", e);
    1954              :             }
    1955              :         }
    1956              : 
    1957              :         // Without this, rustc complains about deltas_to_compact still
    1958              :         // being borrowed when we `.into_iter()` below.
    1959           56 :         drop(all_values_iter);
    1960           56 : 
    1961           56 :         Ok(CompactLevel0Phase1Result {
    1962           56 :             new_layers,
    1963           56 :             deltas_to_compact: deltas_to_compact
    1964           56 :                 .into_iter()
    1965          804 :                 .map(|x| x.drop_eviction_guard())
    1966           56 :                 .collect::<Vec<_>>(),
    1967           56 :             outcome: if fully_compacted {
    1968           56 :                 CompactionOutcome::Done
    1969              :             } else {
    1970            0 :                 CompactionOutcome::Pending
    1971              :             },
    1972              :         })
    1973          728 :     }
    1974              : }
    1975              : 
    1976              : #[derive(Default)]
    1977              : struct CompactLevel0Phase1Result {
    1978              :     new_layers: Vec<ResidentLayer>,
    1979              :     deltas_to_compact: Vec<Layer>,
    1980              :     // Whether we have included all L0 layers, or selected only part of them due to the
    1981              :     // L0 compaction size limit.
    1982              :     outcome: CompactionOutcome,
    1983              : }
    1984              : 
    1985              : #[derive(Default)]
    1986              : struct CompactLevel0Phase1StatsBuilder {
    1987              :     version: Option<u64>,
    1988              :     tenant_id: Option<TenantShardId>,
    1989              :     timeline_id: Option<TimelineId>,
    1990              :     read_lock_acquisition_micros: DurationRecorder,
    1991              :     read_lock_held_spawn_blocking_startup_micros: DurationRecorder,
    1992              :     read_lock_held_key_sort_micros: DurationRecorder,
    1993              :     read_lock_held_prerequisites_micros: DurationRecorder,
    1994              :     read_lock_held_compute_holes_micros: DurationRecorder,
    1995              :     read_lock_drop_micros: DurationRecorder,
    1996              :     write_layer_files_micros: DurationRecorder,
    1997              :     level0_deltas_count: Option<usize>,
    1998              :     new_deltas_count: Option<usize>,
    1999              :     new_deltas_size: Option<u64>,
    2000              : }
    2001              : 
    2002              : #[derive(serde::Serialize)]
    2003              : struct CompactLevel0Phase1Stats {
    2004              :     version: u64,
    2005              :     tenant_id: TenantShardId,
    2006              :     timeline_id: TimelineId,
    2007              :     read_lock_acquisition_micros: RecordedDuration,
    2008              :     read_lock_held_spawn_blocking_startup_micros: RecordedDuration,
    2009              :     read_lock_held_key_sort_micros: RecordedDuration,
    2010              :     read_lock_held_prerequisites_micros: RecordedDuration,
    2011              :     read_lock_held_compute_holes_micros: RecordedDuration,
    2012              :     read_lock_drop_micros: RecordedDuration,
    2013              :     write_layer_files_micros: RecordedDuration,
    2014              :     level0_deltas_count: usize,
    2015              :     new_deltas_count: usize,
    2016              :     new_deltas_size: u64,
    2017              : }
    2018              : 
    2019              : impl TryFrom<CompactLevel0Phase1StatsBuilder> for CompactLevel0Phase1Stats {
    2020              :     type Error = anyhow::Error;
    2021              : 
    2022           56 :     fn try_from(value: CompactLevel0Phase1StatsBuilder) -> Result<Self, Self::Error> {
    2023           56 :         Ok(Self {
    2024           56 :             version: value.version.ok_or_else(|| anyhow!("version not set"))?,
    2025           56 :             tenant_id: value
    2026           56 :                 .tenant_id
    2027           56 :                 .ok_or_else(|| anyhow!("tenant_id not set"))?,
    2028           56 :             timeline_id: value
    2029           56 :                 .timeline_id
    2030           56 :                 .ok_or_else(|| anyhow!("timeline_id not set"))?,
    2031           56 :             read_lock_acquisition_micros: value
    2032           56 :                 .read_lock_acquisition_micros
    2033           56 :                 .into_recorded()
    2034           56 :                 .ok_or_else(|| anyhow!("read_lock_acquisition_micros not set"))?,
    2035           56 :             read_lock_held_spawn_blocking_startup_micros: value
    2036           56 :                 .read_lock_held_spawn_blocking_startup_micros
    2037           56 :                 .into_recorded()
    2038           56 :                 .ok_or_else(|| anyhow!("read_lock_held_spawn_blocking_startup_micros not set"))?,
    2039           56 :             read_lock_held_key_sort_micros: value
    2040           56 :                 .read_lock_held_key_sort_micros
    2041           56 :                 .into_recorded()
    2042           56 :                 .ok_or_else(|| anyhow!("read_lock_held_key_sort_micros not set"))?,
    2043           56 :             read_lock_held_prerequisites_micros: value
    2044           56 :                 .read_lock_held_prerequisites_micros
    2045           56 :                 .into_recorded()
    2046           56 :                 .ok_or_else(|| anyhow!("read_lock_held_prerequisites_micros not set"))?,
    2047           56 :             read_lock_held_compute_holes_micros: value
    2048           56 :                 .read_lock_held_compute_holes_micros
    2049           56 :                 .into_recorded()
    2050           56 :                 .ok_or_else(|| anyhow!("read_lock_held_compute_holes_micros not set"))?,
    2051           56 :             read_lock_drop_micros: value
    2052           56 :                 .read_lock_drop_micros
    2053           56 :                 .into_recorded()
    2054           56 :                 .ok_or_else(|| anyhow!("read_lock_drop_micros not set"))?,
    2055           56 :             write_layer_files_micros: value
    2056           56 :                 .write_layer_files_micros
    2057           56 :                 .into_recorded()
    2058           56 :                 .ok_or_else(|| anyhow!("write_layer_files_micros not set"))?,
    2059           56 :             level0_deltas_count: value
    2060           56 :                 .level0_deltas_count
    2061           56 :                 .ok_or_else(|| anyhow!("level0_deltas_count not set"))?,
    2062           56 :             new_deltas_count: value
    2063           56 :                 .new_deltas_count
    2064           56 :                 .ok_or_else(|| anyhow!("new_deltas_count not set"))?,
    2065           56 :             new_deltas_size: value
    2066           56 :                 .new_deltas_size
    2067           56 :                 .ok_or_else(|| anyhow!("new_deltas_size not set"))?,
    2068              :         })
    2069           56 :     }
    2070              : }
    2071              : 
    2072              : impl Timeline {
    2073              :     /// Entry point for new tiered compaction algorithm.
    2074              :     ///
    2075              :     /// All the real work is in the implementation in the pageserver_compaction
    2076              :     /// crate. The code here would apply to any algorithm implemented by the
    2077              :     /// same interface, but tiered is the only one at the moment.
    2078              :     ///
    2079              :     /// TODO: cancellation
    2080            0 :     pub(crate) async fn compact_tiered(
    2081            0 :         self: &Arc<Self>,
    2082            0 :         _cancel: &CancellationToken,
    2083            0 :         ctx: &RequestContext,
    2084            0 :     ) -> Result<(), CompactionError> {
    2085            0 :         let fanout = self.get_compaction_threshold() as u64;
    2086            0 :         let target_file_size = self.get_checkpoint_distance();
    2087              : 
    2088              :         // Find the top of the historical layers
    2089            0 :         let end_lsn = {
    2090            0 :             let guard = self.layers.read().await;
    2091            0 :             let layers = guard.layer_map()?;
    2092              : 
    2093            0 :             let l0_deltas = layers.level0_deltas();
    2094            0 : 
    2095            0 :             // As an optimization, if we find that there are too few L0 layers,
    2096            0 :             // bail out early. We know that the compaction algorithm would do
    2097            0 :             // nothing in that case.
    2098            0 :             if l0_deltas.len() < fanout as usize {
    2099              :                 // doesn't need compacting
    2100            0 :                 return Ok(());
    2101            0 :             }
    2102            0 :             l0_deltas.iter().map(|l| l.lsn_range.end).max().unwrap()
    2103            0 :         };
    2104            0 : 
    2105            0 :         // Is the timeline being deleted?
    2106            0 :         if self.is_stopping() {
    2107            0 :             trace!("Dropping out of compaction on timeline shutdown");
    2108            0 :             return Err(CompactionError::ShuttingDown);
    2109            0 :         }
    2110              : 
    2111            0 :         let (dense_ks, _sparse_ks) = self.collect_keyspace(end_lsn, ctx).await?;
    2112              :         // TODO(chi): ignore sparse_keyspace for now, compact it in the future.
    2113            0 :         let mut adaptor = TimelineAdaptor::new(self, (end_lsn, dense_ks));
    2114            0 : 
    2115            0 :         pageserver_compaction::compact_tiered::compact_tiered(
    2116            0 :             &mut adaptor,
    2117            0 :             end_lsn,
    2118            0 :             target_file_size,
    2119            0 :             fanout,
    2120            0 :             ctx,
    2121            0 :         )
    2122            0 :         .await
    2123              :         // TODO: compact_tiered needs to return CompactionError
    2124            0 :         .map_err(CompactionError::Other)?;
    2125              : 
    2126            0 :         adaptor.flush_updates().await?;
    2127            0 :         Ok(())
    2128            0 :     }
    2129              : 
    2130              :     /// Take a list of images and deltas, produce images and deltas according to GC horizon and retain_lsns.
    2131              :     ///
    2132              :     /// It takes a key, the values of the key within the compaction process, a GC horizon, and all retain_lsns below the horizon.
    2133              :     /// For now, it requires the `accumulated_values` contains the full history of the key (i.e., the key with the lowest LSN is
    2134              :     /// an image or a WAL not requiring a base image). This restriction will be removed once we implement gc-compaction on branch.
    2135              :     ///
    2136              :     /// The function returns the deltas and the base image that need to be placed at each of the retain LSN. For example, we have:
    2137              :     ///
    2138              :     /// A@0x10, +B@0x20, +C@0x30, +D@0x40, +E@0x50, +F@0x60
    2139              :     /// horizon = 0x50, retain_lsn = 0x20, 0x40, delta_threshold=3
    2140              :     ///
    2141              :     /// The function will produce:
    2142              :     ///
    2143              :     /// ```plain
    2144              :     /// 0x20(retain_lsn) -> img=AB@0x20                  always produce a single image below the lowest retain LSN
    2145              :     /// 0x40(retain_lsn) -> deltas=[+C@0x30, +D@0x40]    two deltas since the last base image, keeping the deltas
    2146              :     /// 0x50(horizon)    -> deltas=[ABCDE@0x50]          three deltas since the last base image, generate an image but put it in the delta
    2147              :     /// above_horizon    -> deltas=[+F@0x60]             full history above the horizon
    2148              :     /// ```
    2149              :     ///
    2150              :     /// Note that `accumulated_values` must be sorted by LSN and should belong to a single key.
    2151         1296 :     pub(crate) async fn generate_key_retention(
    2152         1296 :         self: &Arc<Timeline>,
    2153         1296 :         key: Key,
    2154         1296 :         full_history: &[(Key, Lsn, Value)],
    2155         1296 :         horizon: Lsn,
    2156         1296 :         retain_lsn_below_horizon: &[Lsn],
    2157         1296 :         delta_threshold_cnt: usize,
    2158         1296 :         base_img_from_ancestor: Option<(Key, Lsn, Bytes)>,
    2159         1296 :     ) -> anyhow::Result<KeyHistoryRetention> {
    2160              :         // Pre-checks for the invariants
    2161              : 
    2162         1296 :         let debug_mode = cfg!(debug_assertions) || cfg!(feature = "testing");
    2163              : 
    2164         1296 :         if debug_mode {
    2165         3144 :             for (log_key, _, _) in full_history {
    2166         1848 :                 assert_eq!(log_key, &key, "mismatched key");
    2167              :             }
    2168         1296 :             for i in 1..full_history.len() {
    2169          552 :                 assert!(full_history[i - 1].1 <= full_history[i].1, "unordered LSN");
    2170          552 :                 if full_history[i - 1].1 == full_history[i].1 {
    2171            0 :                     assert!(
    2172            0 :                         matches!(full_history[i - 1].2, Value::Image(_)),
    2173            0 :                         "unordered delta/image, or duplicated delta"
    2174              :                     );
    2175          552 :                 }
    2176              :             }
    2177              :             // There was an assertion for no base image that checks if the first
    2178              :             // record in the history is `will_init` before, but it was removed.
    2179              :             // This is explained in the test cases for generate_key_retention.
    2180              :             // Search "incomplete history" for more information.
    2181         2856 :             for lsn in retain_lsn_below_horizon {
    2182         1560 :                 assert!(lsn < &horizon, "retain lsn must be below horizon")
    2183              :             }
    2184         1296 :             for i in 1..retain_lsn_below_horizon.len() {
    2185          712 :                 assert!(
    2186          712 :                     retain_lsn_below_horizon[i - 1] <= retain_lsn_below_horizon[i],
    2187            0 :                     "unordered LSN"
    2188              :                 );
    2189              :             }
    2190            0 :         }
    2191         1296 :         let has_ancestor = base_img_from_ancestor.is_some();
    2192              :         // Step 1: split history into len(retain_lsn_below_horizon) + 2 buckets, where the last bucket is for all deltas above the horizon,
    2193              :         // and the second-to-last bucket is for the horizon. Each bucket contains lsn_last_bucket < deltas <= lsn_this_bucket.
    2194         1296 :         let (mut split_history, lsn_split_points) = {
    2195         1296 :             let mut split_history = Vec::new();
    2196         1296 :             split_history.resize_with(retain_lsn_below_horizon.len() + 2, Vec::new);
    2197         1296 :             let mut lsn_split_points = Vec::with_capacity(retain_lsn_below_horizon.len() + 1);
    2198         2856 :             for lsn in retain_lsn_below_horizon {
    2199         1560 :                 lsn_split_points.push(*lsn);
    2200         1560 :             }
    2201         1296 :             lsn_split_points.push(horizon);
    2202         1296 :             let mut current_idx = 0;
    2203         3144 :             for item @ (_, lsn, _) in full_history {
    2204         2336 :                 while current_idx < lsn_split_points.len() && *lsn > lsn_split_points[current_idx] {
    2205          488 :                     current_idx += 1;
    2206          488 :                 }
    2207         1848 :                 split_history[current_idx].push(item);
    2208              :             }
    2209         1296 :             (split_history, lsn_split_points)
    2210              :         };
    2211              :         // Step 2: filter out duplicated records due to the k-merge of image/delta layers
    2212         5448 :         for split_for_lsn in &mut split_history {
    2213         4152 :             let mut prev_lsn = None;
    2214         4152 :             let mut new_split_for_lsn = Vec::with_capacity(split_for_lsn.len());
    2215         4152 :             for record @ (_, lsn, _) in std::mem::take(split_for_lsn) {
    2216         1848 :                 if let Some(prev_lsn) = &prev_lsn {
    2217          248 :                     if *prev_lsn == lsn {
    2218              :                         // The case that we have an LSN with both data from the delta layer and the image layer. As
    2219              :                         // `ValueWrapper` ensures that an image is ordered before a delta at the same LSN, we simply
    2220              :                         // drop this delta and keep the image.
    2221              :                         //
    2222              :                         // For example, we have delta layer key1@0x10, key1@0x20, and image layer key1@0x10, we will
    2223              :                         // keep the image for key1@0x10 and the delta for key1@0x20. key1@0x10 delta will be simply
    2224              :                         // dropped.
    2225              :                         //
    2226              :                         // TODO: in case we have both delta + images for a given LSN and it does not exceed the delta
    2227              :                         // threshold, we could have kept delta instead to save space. This is an optimization for the future.
    2228            0 :                         continue;
    2229          248 :                     }
    2230         1600 :                 }
    2231         1848 :                 prev_lsn = Some(lsn);
    2232         1848 :                 new_split_for_lsn.push(record);
    2233              :             }
    2234         4152 :             *split_for_lsn = new_split_for_lsn;
    2235              :         }
    2236              :         // Step 3: generate images when necessary
    2237         1296 :         let mut retention = Vec::with_capacity(split_history.len());
    2238         1296 :         let mut records_since_last_image = 0;
    2239         1296 :         let batch_cnt = split_history.len();
    2240         1296 :         assert!(
    2241         1296 :             batch_cnt >= 2,
    2242            0 :             "should have at least below + above horizon batches"
    2243              :         );
    2244         1296 :         let mut replay_history: Vec<(Key, Lsn, Value)> = Vec::new();
    2245         1296 :         if let Some((key, lsn, img)) = base_img_from_ancestor {
    2246           84 :             replay_history.push((key, lsn, Value::Image(img)));
    2247         1212 :         }
    2248              : 
    2249              :         /// Generate debug information for the replay history
    2250            0 :         fn generate_history_trace(replay_history: &[(Key, Lsn, Value)]) -> String {
    2251              :             use std::fmt::Write;
    2252            0 :             let mut output = String::new();
    2253            0 :             if let Some((key, _, _)) = replay_history.first() {
    2254            0 :                 write!(output, "key={} ", key).unwrap();
    2255            0 :                 let mut cnt = 0;
    2256            0 :                 for (_, lsn, val) in replay_history {
    2257            0 :                     if val.is_image() {
    2258            0 :                         write!(output, "i@{} ", lsn).unwrap();
    2259            0 :                     } else if val.will_init() {
    2260            0 :                         write!(output, "di@{} ", lsn).unwrap();
    2261            0 :                     } else {
    2262            0 :                         write!(output, "d@{} ", lsn).unwrap();
    2263            0 :                     }
    2264            0 :                     cnt += 1;
    2265            0 :                     if cnt >= 128 {
    2266            0 :                         write!(output, "... and more").unwrap();
    2267            0 :                         break;
    2268            0 :                     }
    2269              :                 }
    2270            0 :             } else {
    2271            0 :                 write!(output, "<no history>").unwrap();
    2272            0 :             }
    2273            0 :             output
    2274            0 :         }
    2275              : 
    2276            0 :         fn generate_debug_trace(
    2277            0 :             replay_history: Option<&[(Key, Lsn, Value)]>,
    2278            0 :             full_history: &[(Key, Lsn, Value)],
    2279            0 :             lsns: &[Lsn],
    2280            0 :             horizon: Lsn,
    2281            0 :         ) -> String {
    2282              :             use std::fmt::Write;
    2283            0 :             let mut output = String::new();
    2284            0 :             if let Some(replay_history) = replay_history {
    2285            0 :                 writeln!(
    2286            0 :                     output,
    2287            0 :                     "replay_history: {}",
    2288            0 :                     generate_history_trace(replay_history)
    2289            0 :                 )
    2290            0 :                 .unwrap();
    2291            0 :             } else {
    2292            0 :                 writeln!(output, "replay_history: <disabled>",).unwrap();
    2293            0 :             }
    2294            0 :             writeln!(
    2295            0 :                 output,
    2296            0 :                 "full_history: {}",
    2297            0 :                 generate_history_trace(full_history)
    2298            0 :             )
    2299            0 :             .unwrap();
    2300            0 :             writeln!(
    2301            0 :                 output,
    2302            0 :                 "when processing: [{}] horizon={}",
    2303            0 :                 lsns.iter().map(|l| format!("{l}")).join(","),
    2304            0 :                 horizon
    2305            0 :             )
    2306            0 :             .unwrap();
    2307            0 :             output
    2308            0 :         }
    2309              : 
    2310         1296 :         let mut key_exists = false;
    2311         4148 :         for (i, split_for_lsn) in split_history.into_iter().enumerate() {
    2312              :             // TODO: there could be image keys inside the splits, and we can compute records_since_last_image accordingly.
    2313         4148 :             records_since_last_image += split_for_lsn.len();
    2314              :             // Whether to produce an image into the final layer files
    2315         4148 :             let produce_image = if i == 0 && !has_ancestor {
    2316              :                 // We always generate images for the first batch (below horizon / lowest retain_lsn)
    2317         1212 :                 true
    2318         2936 :             } else if i == batch_cnt - 1 {
    2319              :                 // Do not generate images for the last batch (above horizon)
    2320         1292 :                 false
    2321         1644 :             } else if records_since_last_image == 0 {
    2322         1288 :                 false
    2323          356 :             } else if records_since_last_image >= delta_threshold_cnt {
    2324              :                 // Generate images when there are too many records
    2325           12 :                 true
    2326              :             } else {
    2327          344 :                 false
    2328              :             };
    2329         4148 :             replay_history.extend(split_for_lsn.iter().map(|x| (*x).clone()));
    2330              :             // Only retain the items after the last image record
    2331         5108 :             for idx in (0..replay_history.len()).rev() {
    2332         5108 :                 if replay_history[idx].2.will_init() {
    2333         4148 :                     replay_history = replay_history[idx..].to_vec();
    2334         4148 :                     break;
    2335          960 :                 }
    2336              :             }
    2337         4148 :             if replay_history.is_empty() && !key_exists {
    2338              :                 // The key does not exist at earlier LSN, we can skip this iteration.
    2339            0 :                 retention.push(Vec::new());
    2340            0 :                 continue;
    2341         4148 :             } else {
    2342         4148 :                 key_exists = true;
    2343         4148 :             }
    2344         4148 :             let Some((_, _, val)) = replay_history.first() else {
    2345            0 :                 unreachable!("replay history should not be empty once it exists")
    2346              :             };
    2347         4148 :             if !val.will_init() {
    2348            0 :                 return Err(anyhow::anyhow!("invalid history, no base image")).with_context(|| {
    2349            0 :                     generate_debug_trace(
    2350            0 :                         Some(&replay_history),
    2351            0 :                         full_history,
    2352            0 :                         retain_lsn_below_horizon,
    2353            0 :                         horizon,
    2354            0 :                     )
    2355            0 :                 });
    2356         4148 :             }
    2357              :             // Whether to reconstruct the image. In debug mode, we will generate an image
    2358              :             // at every retain_lsn to ensure data is not corrupted, but we won't put the
    2359              :             // image into the final layer.
    2360         4148 :             let generate_image = produce_image || debug_mode;
    2361         4148 :             if produce_image {
    2362         1224 :                 records_since_last_image = 0;
    2363         2924 :             }
    2364         4148 :             let img_and_lsn = if generate_image {
    2365         4148 :                 let replay_history_for_debug = if debug_mode {
    2366         4148 :                     Some(replay_history.clone())
    2367              :                 } else {
    2368            0 :                     None
    2369              :                 };
    2370         4148 :                 let replay_history_for_debug_ref = replay_history_for_debug.as_deref();
    2371         4148 :                 let history = if produce_image {
    2372         1224 :                     std::mem::take(&mut replay_history)
    2373              :                 } else {
    2374         2924 :                     replay_history.clone()
    2375              :                 };
    2376         4148 :                 let mut img = None;
    2377         4148 :                 let mut records = Vec::with_capacity(history.len());
    2378         4148 :                 if let (_, lsn, Value::Image(val)) = history.first().as_ref().unwrap() {
    2379         4104 :                     img = Some((*lsn, val.clone()));
    2380         4104 :                     for (_, lsn, val) in history.into_iter().skip(1) {
    2381          932 :                         let Value::WalRecord(rec) = val else {
    2382            0 :                             return Err(anyhow::anyhow!(
    2383            0 :                                 "invalid record, first record is image, expect walrecords"
    2384            0 :                             ))
    2385            0 :                             .with_context(|| {
    2386            0 :                                 generate_debug_trace(
    2387            0 :                                     replay_history_for_debug_ref,
    2388            0 :                                     full_history,
    2389            0 :                                     retain_lsn_below_horizon,
    2390            0 :                                     horizon,
    2391            0 :                                 )
    2392            0 :                             });
    2393              :                         };
    2394          932 :                         records.push((lsn, rec));
    2395              :                     }
    2396              :                 } else {
    2397           72 :                     for (_, lsn, val) in history.into_iter() {
    2398           72 :                         let Value::WalRecord(rec) = val else {
    2399            0 :                             return Err(anyhow::anyhow!("invalid record, first record is walrecord, expect rest are walrecord"))
    2400            0 :                                 .with_context(|| generate_debug_trace(
    2401            0 :                                     replay_history_for_debug_ref,
    2402            0 :                                     full_history,
    2403            0 :                                     retain_lsn_below_horizon,
    2404            0 :                                     horizon,
    2405            0 :                                 ));
    2406              :                         };
    2407           72 :                         records.push((lsn, rec));
    2408              :                     }
    2409              :                 }
    2410         4148 :                 records.reverse();
    2411         4148 :                 let state = ValueReconstructState { img, records };
    2412              :                 // last batch does not generate image so i is always in range, unless we force generate
    2413              :                 // an image during testing
    2414         4148 :                 let request_lsn = if i >= lsn_split_points.len() {
    2415         1292 :                     Lsn::MAX
    2416              :                 } else {
    2417         2856 :                     lsn_split_points[i]
    2418              :                 };
    2419         4148 :                 let img = self
    2420         4148 :                     .reconstruct_value(key, request_lsn, state, RedoAttemptType::GcCompaction)
    2421         4148 :                     .await?;
    2422         4144 :                 Some((request_lsn, img))
    2423              :             } else {
    2424            0 :                 None
    2425              :             };
    2426         4144 :             if produce_image {
    2427         1220 :                 let (request_lsn, img) = img_and_lsn.unwrap();
    2428         1220 :                 replay_history.push((key, request_lsn, Value::Image(img.clone())));
    2429         1220 :                 retention.push(vec![(request_lsn, Value::Image(img))]);
    2430         2924 :             } else {
    2431         2924 :                 let deltas = split_for_lsn
    2432         2924 :                     .iter()
    2433         2924 :                     .map(|(_, lsn, value)| (*lsn, value.clone()))
    2434         2924 :                     .collect_vec();
    2435         2924 :                 retention.push(deltas);
    2436         2924 :             }
    2437              :         }
    2438         1292 :         let mut result = Vec::with_capacity(retention.len());
    2439         1292 :         assert_eq!(retention.len(), lsn_split_points.len() + 1);
    2440         4144 :         for (idx, logs) in retention.into_iter().enumerate() {
    2441         4144 :             if idx == lsn_split_points.len() {
    2442         1292 :                 return Ok(KeyHistoryRetention {
    2443         1292 :                     below_horizon: result,
    2444         1292 :                     above_horizon: KeyLogAtLsn(logs),
    2445         1292 :                 });
    2446         2852 :             } else {
    2447         2852 :                 result.push((lsn_split_points[idx], KeyLogAtLsn(logs)));
    2448         2852 :             }
    2449              :         }
    2450            0 :         unreachable!("key retention is empty")
    2451         1296 :     }
    2452              : 
    2453              :     /// Check how much space is left on the disk
    2454          108 :     async fn check_available_space(self: &Arc<Self>) -> anyhow::Result<u64> {
    2455          108 :         let tenants_dir = self.conf.tenants_path();
    2456              : 
    2457          108 :         let stat = Statvfs::get(&tenants_dir, None)
    2458          108 :             .context("statvfs failed, presumably directory got unlinked")?;
    2459              : 
    2460          108 :         let (avail_bytes, _) = stat.get_avail_total_bytes();
    2461          108 : 
    2462          108 :         Ok(avail_bytes)
    2463          108 :     }
    2464              : 
    2465              :     /// Check if the compaction can proceed safely without running out of space. We assume the size
    2466              :     /// upper bound of the produced files of a compaction job is the same as all layers involved in
    2467              :     /// the compaction. Therefore, we need `2 * layers_to_be_compacted_size` at least to do a
    2468              :     /// compaction.
    2469          108 :     async fn check_compaction_space(
    2470          108 :         self: &Arc<Self>,
    2471          108 :         layer_selection: &[Layer],
    2472          108 :     ) -> Result<(), CompactionError> {
    2473          108 :         let available_space = self
    2474          108 :             .check_available_space()
    2475          108 :             .await
    2476          108 :             .map_err(CompactionError::Other)?;
    2477          108 :         let mut remote_layer_size = 0;
    2478          108 :         let mut all_layer_size = 0;
    2479          424 :         for layer in layer_selection {
    2480          316 :             let needs_download = layer
    2481          316 :                 .needs_download()
    2482          316 :                 .await
    2483          316 :                 .context("failed to check if layer needs download")
    2484          316 :                 .map_err(CompactionError::Other)?;
    2485          316 :             if needs_download.is_some() {
    2486            0 :                 remote_layer_size += layer.layer_desc().file_size;
    2487          316 :             }
    2488          316 :             all_layer_size += layer.layer_desc().file_size;
    2489              :         }
    2490          108 :         let allocated_space = (available_space as f64 * 0.8) as u64; /* reserve 20% space for other tasks */
    2491          108 :         if all_layer_size /* space needed for newly-generated file */ + remote_layer_size /* space for downloading layers */ > allocated_space
    2492              :         {
    2493            0 :             return Err(CompactionError::Other(anyhow!(
    2494            0 :                 "not enough space for compaction: available_space={}, allocated_space={}, all_layer_size={}, remote_layer_size={}, required_space={}",
    2495            0 :                 available_space,
    2496            0 :                 allocated_space,
    2497            0 :                 all_layer_size,
    2498            0 :                 remote_layer_size,
    2499            0 :                 all_layer_size + remote_layer_size
    2500            0 :             )));
    2501          108 :         }
    2502          108 :         Ok(())
    2503          108 :     }
    2504              : 
    2505              :     /// Get a watermark for gc-compaction, that is the lowest LSN that we can use as the `gc_horizon` for
    2506              :     /// the compaction algorithm. It is min(space_cutoff, time_cutoff, latest_gc_cutoff, standby_horizon).
    2507              :     /// Leases and retain_lsns are considered in the gc-compaction job itself so we don't need to account for them
    2508              :     /// here.
    2509          112 :     pub(crate) fn get_gc_compaction_watermark(self: &Arc<Self>) -> Lsn {
    2510          112 :         let gc_cutoff_lsn = {
    2511          112 :             let gc_info = self.gc_info.read().unwrap();
    2512          112 :             gc_info.min_cutoff()
    2513          112 :         };
    2514          112 : 
    2515          112 :         // TODO: standby horizon should use leases so we don't really need to consider it here.
    2516          112 :         // let watermark = watermark.min(self.standby_horizon.load());
    2517          112 : 
    2518          112 :         // TODO: ensure the child branches will not use anything below the watermark, or consider
    2519          112 :         // them when computing the watermark.
    2520          112 :         gc_cutoff_lsn.min(*self.get_applied_gc_cutoff_lsn())
    2521          112 :     }
    2522              : 
    2523              :     /// Split a gc-compaction job into multiple compaction jobs. The split is based on the key range and the estimated size of the compaction job.
    2524              :     /// The function returns a list of compaction jobs that can be executed separately. If the upper bound of the compact LSN
    2525              :     /// range is not specified, we will use the latest gc_cutoff as the upper bound, so that all jobs in the jobset acts
    2526              :     /// like a full compaction of the specified keyspace.
    2527            0 :     pub(crate) async fn gc_compaction_split_jobs(
    2528            0 :         self: &Arc<Self>,
    2529            0 :         job: GcCompactJob,
    2530            0 :         sub_compaction_max_job_size_mb: Option<u64>,
    2531            0 :     ) -> Result<Vec<GcCompactJob>, CompactionError> {
    2532            0 :         let compact_below_lsn = if job.compact_lsn_range.end != Lsn::MAX {
    2533            0 :             job.compact_lsn_range.end
    2534              :         } else {
    2535            0 :             self.get_gc_compaction_watermark()
    2536              :         };
    2537              : 
    2538            0 :         if compact_below_lsn == Lsn::INVALID {
    2539            0 :             tracing::warn!(
    2540            0 :                 "no layers to compact with gc: gc_cutoff not generated yet, skipping gc bottom-most compaction"
    2541              :             );
    2542            0 :             return Ok(vec![]);
    2543            0 :         }
    2544              : 
    2545              :         // Split compaction job to about 4GB each
    2546              :         const GC_COMPACT_MAX_SIZE_MB: u64 = 4 * 1024;
    2547            0 :         let sub_compaction_max_job_size_mb =
    2548            0 :             sub_compaction_max_job_size_mb.unwrap_or(GC_COMPACT_MAX_SIZE_MB);
    2549            0 : 
    2550            0 :         let mut compact_jobs = Vec::<GcCompactJob>::new();
    2551            0 :         // For now, we simply use the key partitioning information; we should do a more fine-grained partitioning
    2552            0 :         // by estimating the amount of files read for a compaction job. We should also partition on LSN.
    2553            0 :         let ((dense_ks, sparse_ks), _) = self.partitioning.read().as_ref().clone();
    2554              :         // Truncate the key range to be within user specified compaction range.
    2555            0 :         fn truncate_to(
    2556            0 :             source_start: &Key,
    2557            0 :             source_end: &Key,
    2558            0 :             target_start: &Key,
    2559            0 :             target_end: &Key,
    2560            0 :         ) -> Option<(Key, Key)> {
    2561            0 :             let start = source_start.max(target_start);
    2562            0 :             let end = source_end.min(target_end);
    2563            0 :             if start < end {
    2564            0 :                 Some((*start, *end))
    2565              :             } else {
    2566            0 :                 None
    2567              :             }
    2568            0 :         }
    2569            0 :         let mut split_key_ranges = Vec::new();
    2570            0 :         let ranges = dense_ks
    2571            0 :             .parts
    2572            0 :             .iter()
    2573            0 :             .map(|partition| partition.ranges.iter())
    2574            0 :             .chain(sparse_ks.parts.iter().map(|x| x.0.ranges.iter()))
    2575            0 :             .flatten()
    2576            0 :             .cloned()
    2577            0 :             .collect_vec();
    2578            0 :         for range in ranges.iter() {
    2579            0 :             let Some((start, end)) = truncate_to(
    2580            0 :                 &range.start,
    2581            0 :                 &range.end,
    2582            0 :                 &job.compact_key_range.start,
    2583            0 :                 &job.compact_key_range.end,
    2584            0 :             ) else {
    2585            0 :                 continue;
    2586              :             };
    2587            0 :             split_key_ranges.push((start, end));
    2588              :         }
    2589            0 :         split_key_ranges.sort();
    2590            0 :         let all_layers = {
    2591            0 :             let guard = self.layers.read().await;
    2592            0 :             let layer_map = guard.layer_map()?;
    2593            0 :             layer_map.iter_historic_layers().collect_vec()
    2594            0 :         };
    2595            0 :         let mut current_start = None;
    2596            0 :         let ranges_num = split_key_ranges.len();
    2597            0 :         for (idx, (start, end)) in split_key_ranges.into_iter().enumerate() {
    2598            0 :             if current_start.is_none() {
    2599            0 :                 current_start = Some(start);
    2600            0 :             }
    2601            0 :             let start = current_start.unwrap();
    2602            0 :             if start >= end {
    2603              :                 // We have already processed this partition.
    2604            0 :                 continue;
    2605            0 :             }
    2606            0 :             let overlapping_layers = {
    2607            0 :                 let mut desc = Vec::new();
    2608            0 :                 for layer in all_layers.iter() {
    2609            0 :                     if overlaps_with(&layer.get_key_range(), &(start..end))
    2610            0 :                         && layer.get_lsn_range().start <= compact_below_lsn
    2611            0 :                     {
    2612            0 :                         desc.push(layer.clone());
    2613            0 :                     }
    2614              :                 }
    2615            0 :                 desc
    2616            0 :             };
    2617            0 :             let total_size = overlapping_layers.iter().map(|x| x.file_size).sum::<u64>();
    2618            0 :             if total_size > sub_compaction_max_job_size_mb * 1024 * 1024 || ranges_num == idx + 1 {
    2619              :                 // Try to extend the compaction range so that we include at least one full layer file.
    2620            0 :                 let extended_end = overlapping_layers
    2621            0 :                     .iter()
    2622            0 :                     .map(|layer| layer.key_range.end)
    2623            0 :                     .min();
    2624              :                 // It is possible that the search range does not contain any layer files when we reach the end of the loop.
    2625              :                 // In this case, we simply use the specified key range end.
    2626            0 :                 let end = if let Some(extended_end) = extended_end {
    2627            0 :                     extended_end.max(end)
    2628              :                 } else {
    2629            0 :                     end
    2630              :                 };
    2631            0 :                 let end = if ranges_num == idx + 1 {
    2632              :                     // extend the compaction range to the end of the key range if it's the last partition
    2633            0 :                     end.max(job.compact_key_range.end)
    2634              :                 } else {
    2635            0 :                     end
    2636              :                 };
    2637            0 :                 if total_size == 0 && !compact_jobs.is_empty() {
    2638            0 :                     info!(
    2639            0 :                         "splitting compaction job: {}..{}, estimated_size={}, extending the previous job",
    2640              :                         start, end, total_size
    2641              :                     );
    2642            0 :                     compact_jobs.last_mut().unwrap().compact_key_range.end = end;
    2643            0 :                     current_start = Some(end);
    2644              :                 } else {
    2645            0 :                     info!(
    2646            0 :                         "splitting compaction job: {}..{}, estimated_size={}",
    2647              :                         start, end, total_size
    2648              :                     );
    2649            0 :                     compact_jobs.push(GcCompactJob {
    2650            0 :                         dry_run: job.dry_run,
    2651            0 :                         compact_key_range: start..end,
    2652            0 :                         compact_lsn_range: job.compact_lsn_range.start..compact_below_lsn,
    2653            0 :                     });
    2654            0 :                     current_start = Some(end);
    2655              :                 }
    2656            0 :             }
    2657              :         }
    2658            0 :         Ok(compact_jobs)
    2659            0 :     }
    2660              : 
    2661              :     /// An experimental compaction building block that combines compaction with garbage collection.
    2662              :     ///
    2663              :     /// The current implementation picks all delta + image layers that are below or intersecting with
    2664              :     /// the GC horizon without considering retain_lsns. Then, it does a full compaction over all these delta
    2665              :     /// layers and image layers, which generates image layers on the gc horizon, drop deltas below gc horizon,
    2666              :     /// and create delta layers with all deltas >= gc horizon.
    2667              :     ///
    2668              :     /// If `options.compact_range` is provided, it will only compact the keys within the range, aka partial compaction.
    2669              :     /// Partial compaction will read and process all layers overlapping with the key range, even if it might
    2670              :     /// contain extra keys. After the gc-compaction phase completes, delta layers that are not fully contained
    2671              :     /// within the key range will be rewritten to ensure they do not overlap with the delta layers. Providing
    2672              :     /// Key::MIN..Key..MAX to the function indicates a full compaction, though technically, `Key::MAX` is not
    2673              :     /// part of the range.
    2674              :     ///
    2675              :     /// If `options.compact_lsn_range.end` is provided, the compaction will only compact layers below or intersect with
    2676              :     /// the LSN. Otherwise, it will use the gc cutoff by default.
    2677          112 :     pub(crate) async fn compact_with_gc(
    2678          112 :         self: &Arc<Self>,
    2679          112 :         cancel: &CancellationToken,
    2680          112 :         options: CompactOptions,
    2681          112 :         ctx: &RequestContext,
    2682          112 :     ) -> Result<CompactionOutcome, CompactionError> {
    2683          112 :         let sub_compaction = options.sub_compaction;
    2684          112 :         let job = GcCompactJob::from_compact_options(options.clone());
    2685          112 :         let yield_for_l0 = options.flags.contains(CompactFlags::YieldForL0);
    2686          112 :         if sub_compaction {
    2687            0 :             info!(
    2688            0 :                 "running enhanced gc bottom-most compaction with sub-compaction, splitting compaction jobs"
    2689              :             );
    2690            0 :             let jobs = self
    2691            0 :                 .gc_compaction_split_jobs(job, options.sub_compaction_max_job_size_mb)
    2692            0 :                 .await?;
    2693            0 :             let jobs_len = jobs.len();
    2694            0 :             for (idx, job) in jobs.into_iter().enumerate() {
    2695            0 :                 info!(
    2696            0 :                     "running enhanced gc bottom-most compaction, sub-compaction {}/{}",
    2697            0 :                     idx + 1,
    2698              :                     jobs_len
    2699              :                 );
    2700            0 :                 self.compact_with_gc_inner(cancel, job, ctx, yield_for_l0)
    2701            0 :                     .await?;
    2702              :             }
    2703            0 :             if jobs_len == 0 {
    2704            0 :                 info!("no jobs to run, skipping gc bottom-most compaction");
    2705            0 :             }
    2706            0 :             return Ok(CompactionOutcome::Done);
    2707          112 :         }
    2708          112 :         self.compact_with_gc_inner(cancel, job, ctx, yield_for_l0)
    2709          112 :             .await
    2710          112 :     }
    2711              : 
    2712          112 :     async fn compact_with_gc_inner(
    2713          112 :         self: &Arc<Self>,
    2714          112 :         cancel: &CancellationToken,
    2715          112 :         job: GcCompactJob,
    2716          112 :         ctx: &RequestContext,
    2717          112 :         yield_for_l0: bool,
    2718          112 :     ) -> Result<CompactionOutcome, CompactionError> {
    2719          112 :         // Block other compaction/GC tasks from running for now. GC-compaction could run along
    2720          112 :         // with legacy compaction tasks in the future. Always ensure the lock order is compaction -> gc.
    2721          112 :         // Note that we already acquired the compaction lock when the outer `compact` function gets called.
    2722          112 : 
    2723          112 :         let timer = Instant::now();
    2724          112 :         let begin_timer = timer;
    2725          112 : 
    2726          112 :         let gc_lock = async {
    2727          112 :             tokio::select! {
    2728          112 :                 guard = self.gc_lock.lock() => Ok(guard),
    2729          112 :                 _ = cancel.cancelled() => Err(CompactionError::ShuttingDown),
    2730              :             }
    2731          112 :         };
    2732              : 
    2733          112 :         let time_acquire_lock = timer.elapsed();
    2734          112 :         let timer = Instant::now();
    2735              : 
    2736          112 :         let gc_lock = crate::timed(
    2737          112 :             gc_lock,
    2738          112 :             "acquires gc lock",
    2739          112 :             std::time::Duration::from_secs(5),
    2740          112 :         )
    2741          112 :         .await?;
    2742              : 
    2743          112 :         let dry_run = job.dry_run;
    2744          112 :         let compact_key_range = job.compact_key_range;
    2745          112 :         let compact_lsn_range = job.compact_lsn_range;
    2746              : 
    2747          112 :         let debug_mode = cfg!(debug_assertions) || cfg!(feature = "testing");
    2748              : 
    2749          112 :         info!(
    2750            0 :             "running enhanced gc bottom-most compaction, dry_run={dry_run}, compact_key_range={}..{}, compact_lsn_range={}..{}",
    2751              :             compact_key_range.start,
    2752              :             compact_key_range.end,
    2753              :             compact_lsn_range.start,
    2754              :             compact_lsn_range.end
    2755              :         );
    2756              : 
    2757          112 :         scopeguard::defer! {
    2758          112 :             info!("done enhanced gc bottom-most compaction");
    2759          112 :         };
    2760          112 : 
    2761          112 :         let mut stat = CompactionStatistics::default();
    2762              : 
    2763              :         // Step 0: pick all delta layers + image layers below/intersect with the GC horizon.
    2764              :         // The layer selection has the following properties:
    2765              :         // 1. If a layer is in the selection, all layers below it are in the selection.
    2766              :         // 2. Inferred from (1), for each key in the layer selection, the value can be reconstructed only with the layers in the layer selection.
    2767          108 :         let job_desc = {
    2768          112 :             let guard = self.layers.read().await;
    2769          112 :             let layers = guard.layer_map()?;
    2770          112 :             let gc_info = self.gc_info.read().unwrap();
    2771          112 :             let mut retain_lsns_below_horizon = Vec::new();
    2772          112 :             let gc_cutoff = {
    2773              :                 // Currently, gc-compaction only kicks in after the legacy gc has updated the gc_cutoff.
    2774              :                 // Therefore, it can only clean up data that cannot be cleaned up with legacy gc, instead of
    2775              :                 // cleaning everything that theoritically it could. In the future, it should use `self.gc_info`
    2776              :                 // to get the truth data.
    2777          112 :                 let real_gc_cutoff = self.get_gc_compaction_watermark();
    2778              :                 // The compaction algorithm will keep all keys above the gc_cutoff while keeping only necessary keys below the gc_cutoff for
    2779              :                 // each of the retain_lsn. Therefore, if the user-provided `compact_lsn_range.end` is larger than the real gc cutoff, we will use
    2780              :                 // the real cutoff.
    2781          112 :                 let mut gc_cutoff = if compact_lsn_range.end == Lsn::MAX {
    2782          100 :                     if real_gc_cutoff == Lsn::INVALID {
    2783              :                         // If the gc_cutoff is not generated yet, we should not compact anything.
    2784            0 :                         tracing::warn!(
    2785            0 :                             "no layers to compact with gc: gc_cutoff not generated yet, skipping gc bottom-most compaction"
    2786              :                         );
    2787            0 :                         return Ok(CompactionOutcome::Skipped);
    2788          100 :                     }
    2789          100 :                     real_gc_cutoff
    2790              :                 } else {
    2791           12 :                     compact_lsn_range.end
    2792              :                 };
    2793          112 :                 if gc_cutoff > real_gc_cutoff {
    2794            8 :                     warn!(
    2795            0 :                         "provided compact_lsn_range.end={} is larger than the real_gc_cutoff={}, using the real gc cutoff",
    2796              :                         gc_cutoff, real_gc_cutoff
    2797              :                     );
    2798            8 :                     gc_cutoff = real_gc_cutoff;
    2799          104 :                 }
    2800          112 :                 gc_cutoff
    2801              :             };
    2802          140 :             for (lsn, _timeline_id, _is_offloaded) in &gc_info.retain_lsns {
    2803          140 :                 if lsn < &gc_cutoff {
    2804          140 :                     retain_lsns_below_horizon.push(*lsn);
    2805          140 :                 }
    2806              :             }
    2807          112 :             for lsn in gc_info.leases.keys() {
    2808            0 :                 if lsn < &gc_cutoff {
    2809            0 :                     retain_lsns_below_horizon.push(*lsn);
    2810            0 :                 }
    2811              :             }
    2812          112 :             let mut selected_layers: Vec<Layer> = Vec::new();
    2813          112 :             drop(gc_info);
    2814              :             // Firstly, pick all the layers intersect or below the gc_cutoff, get the largest LSN in the selected layers.
    2815          112 :             let Some(max_layer_lsn) = layers
    2816          112 :                 .iter_historic_layers()
    2817          500 :                 .filter(|desc| desc.get_lsn_range().start <= gc_cutoff)
    2818          428 :                 .map(|desc| desc.get_lsn_range().end)
    2819          112 :                 .max()
    2820              :             else {
    2821            0 :                 info!(
    2822            0 :                     "no layers to compact with gc: no historic layers below gc_cutoff, gc_cutoff={}",
    2823              :                     gc_cutoff
    2824              :                 );
    2825            0 :                 return Ok(CompactionOutcome::Done);
    2826              :             };
    2827              :             // Next, if the user specifies compact_lsn_range.start, we need to filter some layers out. All the layers (strictly) below
    2828              :             // the min_layer_lsn computed as below will be filtered out and the data will be accessed using the normal read path, as if
    2829              :             // it is a branch.
    2830          112 :             let Some(min_layer_lsn) = layers
    2831          112 :                 .iter_historic_layers()
    2832          500 :                 .filter(|desc| {
    2833          500 :                     if compact_lsn_range.start == Lsn::INVALID {
    2834          408 :                         true // select all layers below if start == Lsn(0)
    2835              :                     } else {
    2836           92 :                         desc.get_lsn_range().end > compact_lsn_range.start // strictly larger than compact_above_lsn
    2837              :                     }
    2838          500 :                 })
    2839          464 :                 .map(|desc| desc.get_lsn_range().start)
    2840          112 :                 .min()
    2841              :             else {
    2842            0 :                 info!(
    2843            0 :                     "no layers to compact with gc: no historic layers above compact_above_lsn, compact_above_lsn={}",
    2844              :                     compact_lsn_range.end
    2845              :                 );
    2846            0 :                 return Ok(CompactionOutcome::Done);
    2847              :             };
    2848              :             // Then, pick all the layers that are below the max_layer_lsn. This is to ensure we can pick all single-key
    2849              :             // layers to compact.
    2850          112 :             let mut rewrite_layers = Vec::new();
    2851          500 :             for desc in layers.iter_historic_layers() {
    2852          500 :                 if desc.get_lsn_range().end <= max_layer_lsn
    2853          428 :                     && desc.get_lsn_range().start >= min_layer_lsn
    2854          392 :                     && overlaps_with(&desc.get_key_range(), &compact_key_range)
    2855              :                 {
    2856              :                     // If the layer overlaps with the compaction key range, we need to read it to obtain all keys within the range,
    2857              :                     // even if it might contain extra keys
    2858          316 :                     selected_layers.push(guard.get_from_desc(&desc));
    2859          316 :                     // If the layer is not fully contained within the key range, we need to rewrite it if it's a delta layer (it's fine
    2860          316 :                     // to overlap image layers)
    2861          316 :                     if desc.is_delta() && !fully_contains(&compact_key_range, &desc.get_key_range())
    2862            4 :                     {
    2863            4 :                         rewrite_layers.push(desc);
    2864          312 :                     }
    2865          184 :                 }
    2866              :             }
    2867          112 :             if selected_layers.is_empty() {
    2868            4 :                 info!(
    2869            0 :                     "no layers to compact with gc: no layers within the key range, gc_cutoff={}, key_range={}..{}",
    2870              :                     gc_cutoff, compact_key_range.start, compact_key_range.end
    2871              :                 );
    2872            4 :                 return Ok(CompactionOutcome::Done);
    2873          108 :             }
    2874          108 :             retain_lsns_below_horizon.sort();
    2875          108 :             GcCompactionJobDescription {
    2876          108 :                 selected_layers,
    2877          108 :                 gc_cutoff,
    2878          108 :                 retain_lsns_below_horizon,
    2879          108 :                 min_layer_lsn,
    2880          108 :                 max_layer_lsn,
    2881          108 :                 compaction_key_range: compact_key_range,
    2882          108 :                 rewrite_layers,
    2883          108 :             }
    2884              :         };
    2885          108 :         let (has_data_below, lowest_retain_lsn) = if compact_lsn_range.start != Lsn::INVALID {
    2886              :             // If we only compact above some LSN, we should get the history from the current branch below the specified LSN.
    2887              :             // We use job_desc.min_layer_lsn as if it's the lowest branch point.
    2888           16 :             (true, job_desc.min_layer_lsn)
    2889           92 :         } else if self.ancestor_timeline.is_some() {
    2890              :             // In theory, we can also use min_layer_lsn here, but using ancestor LSN makes sure the delta layers cover the
    2891              :             // LSN ranges all the way to the ancestor timeline.
    2892            4 :             (true, self.ancestor_lsn)
    2893              :         } else {
    2894           88 :             let res = job_desc
    2895           88 :                 .retain_lsns_below_horizon
    2896           88 :                 .first()
    2897           88 :                 .copied()
    2898           88 :                 .unwrap_or(job_desc.gc_cutoff);
    2899           88 :             if debug_mode {
    2900           88 :                 assert_eq!(
    2901           88 :                     res,
    2902           88 :                     job_desc
    2903           88 :                         .retain_lsns_below_horizon
    2904           88 :                         .iter()
    2905           88 :                         .min()
    2906           88 :                         .copied()
    2907           88 :                         .unwrap_or(job_desc.gc_cutoff)
    2908           88 :                 );
    2909            0 :             }
    2910           88 :             (false, res)
    2911              :         };
    2912          108 :         info!(
    2913            0 :             "picked {} layers for compaction ({} layers need rewriting) with max_layer_lsn={} min_layer_lsn={} gc_cutoff={} lowest_retain_lsn={}, key_range={}..{}, has_data_below={}",
    2914            0 :             job_desc.selected_layers.len(),
    2915            0 :             job_desc.rewrite_layers.len(),
    2916              :             job_desc.max_layer_lsn,
    2917              :             job_desc.min_layer_lsn,
    2918              :             job_desc.gc_cutoff,
    2919              :             lowest_retain_lsn,
    2920              :             job_desc.compaction_key_range.start,
    2921              :             job_desc.compaction_key_range.end,
    2922              :             has_data_below,
    2923              :         );
    2924              : 
    2925          108 :         let time_analyze = timer.elapsed();
    2926          108 :         let timer = Instant::now();
    2927              : 
    2928          424 :         for layer in &job_desc.selected_layers {
    2929          316 :             debug!("read layer: {}", layer.layer_desc().key());
    2930              :         }
    2931          112 :         for layer in &job_desc.rewrite_layers {
    2932            4 :             debug!("rewrite layer: {}", layer.key());
    2933              :         }
    2934              : 
    2935          108 :         self.check_compaction_space(&job_desc.selected_layers)
    2936          108 :             .await?;
    2937              : 
    2938              :         // Generate statistics for the compaction
    2939          424 :         for layer in &job_desc.selected_layers {
    2940          316 :             let desc = layer.layer_desc();
    2941          316 :             if desc.is_delta() {
    2942          176 :                 stat.visit_delta_layer(desc.file_size());
    2943          176 :             } else {
    2944          140 :                 stat.visit_image_layer(desc.file_size());
    2945          140 :             }
    2946              :         }
    2947              : 
    2948              :         // Step 1: construct a k-merge iterator over all layers.
    2949              :         // Also, verify if the layer map can be split by drawing a horizontal line at every LSN start/end split point.
    2950          108 :         let layer_names = job_desc
    2951          108 :             .selected_layers
    2952          108 :             .iter()
    2953          316 :             .map(|layer| layer.layer_desc().layer_name())
    2954          108 :             .collect_vec();
    2955          108 :         if let Some(err) = check_valid_layermap(&layer_names) {
    2956            0 :             return Err(CompactionError::Other(anyhow!(
    2957            0 :                 "gc-compaction layer map check failed because {}, cannot proceed with compaction due to potential data loss",
    2958            0 :                 err
    2959            0 :             )));
    2960          108 :         }
    2961          108 :         // The maximum LSN we are processing in this compaction loop
    2962          108 :         let end_lsn = job_desc
    2963          108 :             .selected_layers
    2964          108 :             .iter()
    2965          316 :             .map(|l| l.layer_desc().lsn_range.end)
    2966          108 :             .max()
    2967          108 :             .unwrap();
    2968          108 :         let mut delta_layers = Vec::new();
    2969          108 :         let mut image_layers = Vec::new();
    2970          108 :         let mut downloaded_layers = Vec::new();
    2971          108 :         let mut total_downloaded_size = 0;
    2972          108 :         let mut total_layer_size = 0;
    2973          424 :         for layer in &job_desc.selected_layers {
    2974          316 :             if layer
    2975          316 :                 .needs_download()
    2976          316 :                 .await
    2977          316 :                 .context("failed to check if layer needs download")
    2978          316 :                 .map_err(CompactionError::Other)?
    2979          316 :                 .is_some()
    2980            0 :             {
    2981            0 :                 total_downloaded_size += layer.layer_desc().file_size;
    2982          316 :             }
    2983          316 :             total_layer_size += layer.layer_desc().file_size;
    2984          316 :             if cancel.is_cancelled() {
    2985            0 :                 return Err(CompactionError::ShuttingDown);
    2986          316 :             }
    2987          316 :             let should_yield = yield_for_l0
    2988            0 :                 && self
    2989            0 :                     .l0_compaction_trigger
    2990            0 :                     .notified()
    2991            0 :                     .now_or_never()
    2992            0 :                     .is_some();
    2993          316 :             if should_yield {
    2994            0 :                 tracing::info!("preempt gc-compaction when downloading layers: too many L0 layers");
    2995            0 :                 return Ok(CompactionOutcome::YieldForL0);
    2996          316 :             }
    2997          316 :             let resident_layer = layer
    2998          316 :                 .download_and_keep_resident(ctx)
    2999          316 :                 .await
    3000          316 :                 .context("failed to download and keep resident layer")
    3001          316 :                 .map_err(CompactionError::Other)?;
    3002          316 :             downloaded_layers.push(resident_layer);
    3003              :         }
    3004          108 :         info!(
    3005            0 :             "finish downloading layers, downloaded={}, total={}, ratio={:.2}",
    3006            0 :             total_downloaded_size,
    3007            0 :             total_layer_size,
    3008            0 :             total_downloaded_size as f64 / total_layer_size as f64
    3009              :         );
    3010          424 :         for resident_layer in &downloaded_layers {
    3011          316 :             if resident_layer.layer_desc().is_delta() {
    3012          176 :                 let layer = resident_layer
    3013          176 :                     .get_as_delta(ctx)
    3014          176 :                     .await
    3015          176 :                     .context("failed to get delta layer")
    3016          176 :                     .map_err(CompactionError::Other)?;
    3017          176 :                 delta_layers.push(layer);
    3018              :             } else {
    3019          140 :                 let layer = resident_layer
    3020          140 :                     .get_as_image(ctx)
    3021          140 :                     .await
    3022          140 :                     .context("failed to get image layer")
    3023          140 :                     .map_err(CompactionError::Other)?;
    3024          140 :                 image_layers.push(layer);
    3025              :             }
    3026              :         }
    3027          108 :         let (dense_ks, sparse_ks) = self
    3028          108 :             .collect_gc_compaction_keyspace()
    3029          108 :             .await
    3030          108 :             .context("failed to collect gc compaction keyspace")
    3031          108 :             .map_err(CompactionError::Other)?;
    3032          108 :         let mut merge_iter = FilterIterator::create(
    3033          108 :             MergeIterator::create(&delta_layers, &image_layers, ctx),
    3034          108 :             dense_ks,
    3035          108 :             sparse_ks,
    3036          108 :         )
    3037          108 :         .context("failed to create filter iterator")
    3038          108 :         .map_err(CompactionError::Other)?;
    3039              : 
    3040          108 :         let time_download_layer = timer.elapsed();
    3041          108 :         let mut timer = Instant::now();
    3042          108 : 
    3043          108 :         // Step 2: Produce images+deltas.
    3044          108 :         let mut accumulated_values = Vec::new();
    3045          108 :         let mut last_key: Option<Key> = None;
    3046              : 
    3047              :         // Only create image layers when there is no ancestor branches. TODO: create covering image layer
    3048              :         // when some condition meet.
    3049          108 :         let mut image_layer_writer = if !has_data_below {
    3050              :             Some(
    3051           88 :                 SplitImageLayerWriter::new(
    3052           88 :                     self.conf,
    3053           88 :                     self.timeline_id,
    3054           88 :                     self.tenant_shard_id,
    3055           88 :                     job_desc.compaction_key_range.start,
    3056           88 :                     lowest_retain_lsn,
    3057           88 :                     self.get_compaction_target_size(),
    3058           88 :                     ctx,
    3059           88 :                 )
    3060           88 :                 .await
    3061           88 :                 .context("failed to create image layer writer")
    3062           88 :                 .map_err(CompactionError::Other)?,
    3063              :             )
    3064              :         } else {
    3065           20 :             None
    3066              :         };
    3067              : 
    3068          108 :         let mut delta_layer_writer = SplitDeltaLayerWriter::new(
    3069          108 :             self.conf,
    3070          108 :             self.timeline_id,
    3071          108 :             self.tenant_shard_id,
    3072          108 :             lowest_retain_lsn..end_lsn,
    3073          108 :             self.get_compaction_target_size(),
    3074          108 :         )
    3075          108 :         .await
    3076          108 :         .context("failed to create delta layer writer")
    3077          108 :         .map_err(CompactionError::Other)?;
    3078              : 
    3079              :         #[derive(Default)]
    3080              :         struct RewritingLayers {
    3081              :             before: Option<DeltaLayerWriter>,
    3082              :             after: Option<DeltaLayerWriter>,
    3083              :         }
    3084          108 :         let mut delta_layer_rewriters = HashMap::<Arc<PersistentLayerKey>, RewritingLayers>::new();
    3085              : 
    3086              :         /// When compacting not at a bottom range (=`[0,X)`) of the root branch, we "have data below" (`has_data_below=true`).
    3087              :         /// The two cases are compaction in ancestor branches and when `compact_lsn_range.start` is set.
    3088              :         /// In those cases, we need to pull up data from below the LSN range we're compaction.
    3089              :         ///
    3090              :         /// This function unifies the cases so that later code doesn't have to think about it.
    3091              :         ///
    3092              :         /// Currently, we always get the ancestor image for each key in the child branch no matter whether the image
    3093              :         /// is needed for reconstruction. This should be fixed in the future.
    3094              :         ///
    3095              :         /// Furthermore, we should do vectored get instead of a single get, or better, use k-merge for ancestor
    3096              :         /// images.
    3097         1280 :         async fn get_ancestor_image(
    3098         1280 :             this_tline: &Arc<Timeline>,
    3099         1280 :             key: Key,
    3100         1280 :             ctx: &RequestContext,
    3101         1280 :             has_data_below: bool,
    3102         1280 :             history_lsn_point: Lsn,
    3103         1280 :         ) -> anyhow::Result<Option<(Key, Lsn, Bytes)>> {
    3104         1280 :             if !has_data_below {
    3105         1204 :                 return Ok(None);
    3106           76 :             };
    3107              :             // This function is implemented as a get of the current timeline at ancestor LSN, therefore reusing
    3108              :             // as much existing code as possible.
    3109           76 :             let img = this_tline.get(key, history_lsn_point, ctx).await?;
    3110           76 :             Ok(Some((key, history_lsn_point, img)))
    3111         1280 :         }
    3112              : 
    3113              :         // Actually, we can decide not to write to the image layer at all at this point because
    3114              :         // the key and LSN range are determined. However, to keep things simple here, we still
    3115              :         // create this writer, and discard the writer in the end.
    3116          108 :         let mut time_to_first_kv_pair = None;
    3117              : 
    3118         1984 :         while let Some(((key, lsn, val), desc)) = merge_iter
    3119         1984 :             .next_with_trace()
    3120         1984 :             .await
    3121         1984 :             .context("failed to get next key-value pair")
    3122         1984 :             .map_err(CompactionError::Other)?
    3123              :         {
    3124         1880 :             if time_to_first_kv_pair.is_none() {
    3125          108 :                 time_to_first_kv_pair = Some(timer.elapsed());
    3126          108 :                 timer = Instant::now();
    3127         1772 :             }
    3128              : 
    3129         1880 :             if cancel.is_cancelled() {
    3130            0 :                 return Err(CompactionError::ShuttingDown);
    3131         1880 :             }
    3132              : 
    3133         1880 :             let should_yield = yield_for_l0
    3134            0 :                 && self
    3135            0 :                     .l0_compaction_trigger
    3136            0 :                     .notified()
    3137            0 :                     .now_or_never()
    3138            0 :                     .is_some();
    3139         1880 :             if should_yield {
    3140            0 :                 tracing::info!("preempt gc-compaction in the main loop: too many L0 layers");
    3141            0 :                 return Ok(CompactionOutcome::YieldForL0);
    3142         1880 :             }
    3143         1880 :             if self.shard_identity.is_key_disposable(&key) {
    3144              :                 // If this shard does not need to store this key, simply skip it.
    3145              :                 //
    3146              :                 // This is not handled in the filter iterator because shard is determined by hash.
    3147              :                 // Therefore, it does not give us any performance benefit to do things like skip
    3148              :                 // a whole layer file as handling key spaces (ranges).
    3149            0 :                 if cfg!(debug_assertions) {
    3150            0 :                     let shard = self.shard_identity.shard_index();
    3151            0 :                     let owner = self.shard_identity.get_shard_number(&key);
    3152            0 :                     panic!("key {key} does not belong on shard {shard}, owned by {owner}");
    3153            0 :                 }
    3154            0 :                 continue;
    3155         1880 :             }
    3156         1880 :             if !job_desc.compaction_key_range.contains(&key) {
    3157          128 :                 if !desc.is_delta {
    3158          120 :                     continue;
    3159            8 :                 }
    3160            8 :                 let rewriter = delta_layer_rewriters.entry(desc.clone()).or_default();
    3161            8 :                 let rewriter = if key < job_desc.compaction_key_range.start {
    3162            0 :                     if rewriter.before.is_none() {
    3163            0 :                         rewriter.before = Some(
    3164            0 :                             DeltaLayerWriter::new(
    3165            0 :                                 self.conf,
    3166            0 :                                 self.timeline_id,
    3167            0 :                                 self.tenant_shard_id,
    3168            0 :                                 desc.key_range.start,
    3169            0 :                                 desc.lsn_range.clone(),
    3170            0 :                                 ctx,
    3171            0 :                             )
    3172            0 :                             .await
    3173            0 :                             .context("failed to create delta layer writer")
    3174            0 :                             .map_err(CompactionError::Other)?,
    3175              :                         );
    3176            0 :                     }
    3177            0 :                     rewriter.before.as_mut().unwrap()
    3178            8 :                 } else if key >= job_desc.compaction_key_range.end {
    3179            8 :                     if rewriter.after.is_none() {
    3180            4 :                         rewriter.after = Some(
    3181            4 :                             DeltaLayerWriter::new(
    3182            4 :                                 self.conf,
    3183            4 :                                 self.timeline_id,
    3184            4 :                                 self.tenant_shard_id,
    3185            4 :                                 job_desc.compaction_key_range.end,
    3186            4 :                                 desc.lsn_range.clone(),
    3187            4 :                                 ctx,
    3188            4 :                             )
    3189            4 :                             .await
    3190            4 :                             .context("failed to create delta layer writer")
    3191            4 :                             .map_err(CompactionError::Other)?,
    3192              :                         );
    3193            4 :                     }
    3194            8 :                     rewriter.after.as_mut().unwrap()
    3195              :                 } else {
    3196            0 :                     unreachable!()
    3197              :                 };
    3198            8 :                 rewriter
    3199            8 :                     .put_value(key, lsn, val, ctx)
    3200            8 :                     .await
    3201            8 :                     .context("failed to put value")
    3202            8 :                     .map_err(CompactionError::Other)?;
    3203            8 :                 continue;
    3204         1752 :             }
    3205         1752 :             match val {
    3206         1260 :                 Value::Image(_) => stat.visit_image_key(&val),
    3207          492 :                 Value::WalRecord(_) => stat.visit_wal_key(&val),
    3208              :             }
    3209         1752 :             if last_key.is_none() || last_key.as_ref() == Some(&key) {
    3210          576 :                 if last_key.is_none() {
    3211          108 :                     last_key = Some(key);
    3212          468 :                 }
    3213          576 :                 accumulated_values.push((key, lsn, val));
    3214              :             } else {
    3215         1176 :                 let last_key: &mut Key = last_key.as_mut().unwrap();
    3216         1176 :                 stat.on_unique_key_visited(); // TODO: adjust statistics for partial compaction
    3217         1176 :                 let retention = self
    3218         1176 :                     .generate_key_retention(
    3219         1176 :                         *last_key,
    3220         1176 :                         &accumulated_values,
    3221         1176 :                         job_desc.gc_cutoff,
    3222         1176 :                         &job_desc.retain_lsns_below_horizon,
    3223         1176 :                         COMPACTION_DELTA_THRESHOLD,
    3224         1176 :                         get_ancestor_image(self, *last_key, ctx, has_data_below, lowest_retain_lsn)
    3225         1176 :                             .await
    3226         1176 :                             .context("failed to get ancestor image")
    3227         1176 :                             .map_err(CompactionError::Other)?,
    3228              :                     )
    3229         1176 :                     .await
    3230         1176 :                     .context("failed to generate key retention")
    3231         1176 :                     .map_err(CompactionError::Other)?;
    3232         1172 :                 retention
    3233         1172 :                     .pipe_to(
    3234         1172 :                         *last_key,
    3235         1172 :                         &mut delta_layer_writer,
    3236         1172 :                         image_layer_writer.as_mut(),
    3237         1172 :                         &mut stat,
    3238         1172 :                         ctx,
    3239         1172 :                     )
    3240         1172 :                     .await
    3241         1172 :                     .context("failed to pipe to delta layer writer")
    3242         1172 :                     .map_err(CompactionError::Other)?;
    3243         1172 :                 accumulated_values.clear();
    3244         1172 :                 *last_key = key;
    3245         1172 :                 accumulated_values.push((key, lsn, val));
    3246              :             }
    3247              :         }
    3248              : 
    3249              :         // TODO: move the below part to the loop body
    3250          104 :         let Some(last_key) = last_key else {
    3251            0 :             return Err(CompactionError::Other(anyhow!(
    3252            0 :                 "no keys produced during compaction"
    3253            0 :             )));
    3254              :         };
    3255          104 :         stat.on_unique_key_visited();
    3256              : 
    3257          104 :         let retention = self
    3258          104 :             .generate_key_retention(
    3259          104 :                 last_key,
    3260          104 :                 &accumulated_values,
    3261          104 :                 job_desc.gc_cutoff,
    3262          104 :                 &job_desc.retain_lsns_below_horizon,
    3263          104 :                 COMPACTION_DELTA_THRESHOLD,
    3264          104 :                 get_ancestor_image(self, last_key, ctx, has_data_below, lowest_retain_lsn)
    3265          104 :                     .await
    3266          104 :                     .context("failed to get ancestor image")
    3267          104 :                     .map_err(CompactionError::Other)?,
    3268              :             )
    3269          104 :             .await
    3270          104 :             .context("failed to generate key retention")
    3271          104 :             .map_err(CompactionError::Other)?;
    3272          104 :         retention
    3273          104 :             .pipe_to(
    3274          104 :                 last_key,
    3275          104 :                 &mut delta_layer_writer,
    3276          104 :                 image_layer_writer.as_mut(),
    3277          104 :                 &mut stat,
    3278          104 :                 ctx,
    3279          104 :             )
    3280          104 :             .await
    3281          104 :             .context("failed to pipe to delta layer writer")
    3282          104 :             .map_err(CompactionError::Other)?;
    3283              :         // end: move the above part to the loop body
    3284              : 
    3285          104 :         let time_main_loop = timer.elapsed();
    3286          104 :         let timer = Instant::now();
    3287          104 : 
    3288          104 :         let mut rewrote_delta_layers = Vec::new();
    3289          108 :         for (key, writers) in delta_layer_rewriters {
    3290            4 :             if let Some(delta_writer_before) = writers.before {
    3291            0 :                 let (desc, path) = delta_writer_before
    3292            0 :                     .finish(job_desc.compaction_key_range.start, ctx)
    3293            0 :                     .await
    3294            0 :                     .context("failed to finish delta layer writer")
    3295            0 :                     .map_err(CompactionError::Other)?;
    3296            0 :                 let layer = Layer::finish_creating(self.conf, self, desc, &path)
    3297            0 :                     .context("failed to finish creating delta layer")
    3298            0 :                     .map_err(CompactionError::Other)?;
    3299            0 :                 rewrote_delta_layers.push(layer);
    3300            4 :             }
    3301            4 :             if let Some(delta_writer_after) = writers.after {
    3302            4 :                 let (desc, path) = delta_writer_after
    3303            4 :                     .finish(key.key_range.end, ctx)
    3304            4 :                     .await
    3305            4 :                     .context("failed to finish delta layer writer")
    3306            4 :                     .map_err(CompactionError::Other)?;
    3307            4 :                 let layer = Layer::finish_creating(self.conf, self, desc, &path)
    3308            4 :                     .context("failed to finish creating delta layer")
    3309            4 :                     .map_err(CompactionError::Other)?;
    3310            4 :                 rewrote_delta_layers.push(layer);
    3311            0 :             }
    3312              :         }
    3313              : 
    3314          148 :         let discard = |key: &PersistentLayerKey| {
    3315          148 :             let key = key.clone();
    3316          148 :             async move { KeyHistoryRetention::discard_key(&key, self, dry_run).await }
    3317          148 :         };
    3318              : 
    3319          104 :         let produced_image_layers = if let Some(writer) = image_layer_writer {
    3320           84 :             if !dry_run {
    3321           76 :                 let end_key = job_desc.compaction_key_range.end;
    3322           76 :                 writer
    3323           76 :                     .finish_with_discard_fn(self, ctx, end_key, discard)
    3324           76 :                     .await
    3325           76 :                     .context("failed to finish image layer writer")
    3326           76 :                     .map_err(CompactionError::Other)?
    3327              :             } else {
    3328            8 :                 drop(writer);
    3329            8 :                 Vec::new()
    3330              :             }
    3331              :         } else {
    3332           20 :             Vec::new()
    3333              :         };
    3334              : 
    3335          104 :         let produced_delta_layers = if !dry_run {
    3336           96 :             delta_layer_writer
    3337           96 :                 .finish_with_discard_fn(self, ctx, discard)
    3338           96 :                 .await
    3339           96 :                 .context("failed to finish delta layer writer")
    3340           96 :                 .map_err(CompactionError::Other)?
    3341              :         } else {
    3342            8 :             drop(delta_layer_writer);
    3343            8 :             Vec::new()
    3344              :         };
    3345              : 
    3346              :         // TODO: make image/delta/rewrote_delta layers generation atomic. At this point, we already generated resident layers, and if
    3347              :         // compaction is cancelled at this point, we might have some layers that are not cleaned up.
    3348          104 :         let mut compact_to = Vec::new();
    3349          104 :         let mut keep_layers = HashSet::new();
    3350          104 :         let produced_delta_layers_len = produced_delta_layers.len();
    3351          104 :         let produced_image_layers_len = produced_image_layers.len();
    3352          104 : 
    3353          104 :         let layer_selection_by_key = job_desc
    3354          104 :             .selected_layers
    3355          104 :             .iter()
    3356          304 :             .map(|l| (l.layer_desc().key(), l.layer_desc().clone()))
    3357          104 :             .collect::<HashMap<_, _>>();
    3358              : 
    3359          176 :         for action in produced_delta_layers {
    3360           72 :             match action {
    3361           44 :                 BatchWriterResult::Produced(layer) => {
    3362           44 :                     if cfg!(debug_assertions) {
    3363           44 :                         info!("produced delta layer: {}", layer.layer_desc().key());
    3364            0 :                     }
    3365           44 :                     stat.produce_delta_layer(layer.layer_desc().file_size());
    3366           44 :                     compact_to.push(layer);
    3367              :                 }
    3368           28 :                 BatchWriterResult::Discarded(l) => {
    3369           28 :                     if cfg!(debug_assertions) {
    3370           28 :                         info!("discarded delta layer: {}", l);
    3371            0 :                     }
    3372           28 :                     if let Some(layer_desc) = layer_selection_by_key.get(&l) {
    3373           28 :                         stat.discard_delta_layer(layer_desc.file_size());
    3374           28 :                     } else {
    3375            0 :                         tracing::warn!(
    3376            0 :                             "discarded delta layer not in layer_selection: {}, produced a layer outside of the compaction key range?",
    3377              :                             l
    3378              :                         );
    3379            0 :                         stat.discard_delta_layer(0);
    3380              :                     }
    3381           28 :                     keep_layers.insert(l);
    3382              :                 }
    3383              :             }
    3384              :         }
    3385          108 :         for layer in &rewrote_delta_layers {
    3386            4 :             debug!(
    3387            0 :                 "produced rewritten delta layer: {}",
    3388            0 :                 layer.layer_desc().key()
    3389              :             );
    3390              :             // For now, we include rewritten delta layer size in the "produce_delta_layer". We could
    3391              :             // make it a separate statistics in the future.
    3392            4 :             stat.produce_delta_layer(layer.layer_desc().file_size());
    3393              :         }
    3394          104 :         compact_to.extend(rewrote_delta_layers);
    3395          180 :         for action in produced_image_layers {
    3396           76 :             match action {
    3397           60 :                 BatchWriterResult::Produced(layer) => {
    3398           60 :                     debug!("produced image layer: {}", layer.layer_desc().key());
    3399           60 :                     stat.produce_image_layer(layer.layer_desc().file_size());
    3400           60 :                     compact_to.push(layer);
    3401              :                 }
    3402           16 :                 BatchWriterResult::Discarded(l) => {
    3403           16 :                     debug!("discarded image layer: {}", l);
    3404           16 :                     if let Some(layer_desc) = layer_selection_by_key.get(&l) {
    3405           16 :                         stat.discard_image_layer(layer_desc.file_size());
    3406           16 :                     } else {
    3407            0 :                         tracing::warn!(
    3408            0 :                             "discarded image layer not in layer_selection: {}, produced a layer outside of the compaction key range?",
    3409              :                             l
    3410              :                         );
    3411            0 :                         stat.discard_image_layer(0);
    3412              :                     }
    3413           16 :                     keep_layers.insert(l);
    3414              :                 }
    3415              :             }
    3416              :         }
    3417              : 
    3418          104 :         let mut layer_selection = job_desc.selected_layers;
    3419              : 
    3420              :         // Partial compaction might select more data than it processes, e.g., if
    3421              :         // the compaction_key_range only partially overlaps:
    3422              :         //
    3423              :         //         [---compaction_key_range---]
    3424              :         //   [---A----][----B----][----C----][----D----]
    3425              :         //
    3426              :         // For delta layers, we will rewrite the layers so that it is cut exactly at
    3427              :         // the compaction key range, so we can always discard them. However, for image
    3428              :         // layers, as we do not rewrite them for now, we need to handle them differently.
    3429              :         // Assume image layers  A, B, C, D are all in the `layer_selection`.
    3430              :         //
    3431              :         // The created image layers contain whatever is needed from B, C, and from
    3432              :         // `----]` of A, and from  `[---` of D.
    3433              :         //
    3434              :         // In contrast, `[---A` and `D----]` have not been processed, so, we must
    3435              :         // keep that data.
    3436              :         //
    3437              :         // The solution for now is to keep A and D completely if they are image layers.
    3438              :         // (layer_selection is what we'll remove from the layer map, so, retain what
    3439              :         // is _not_ fully covered by compaction_key_range).
    3440          408 :         for layer in &layer_selection {
    3441          304 :             if !layer.layer_desc().is_delta() {
    3442          132 :                 if !overlaps_with(
    3443          132 :                     &layer.layer_desc().key_range,
    3444          132 :                     &job_desc.compaction_key_range,
    3445          132 :                 ) {
    3446            0 :                     return Err(CompactionError::Other(anyhow!(
    3447            0 :                         "violated constraint: image layer outside of compaction key range"
    3448            0 :                     )));
    3449          132 :                 }
    3450          132 :                 if !fully_contains(
    3451          132 :                     &job_desc.compaction_key_range,
    3452          132 :                     &layer.layer_desc().key_range,
    3453          132 :                 ) {
    3454           16 :                     keep_layers.insert(layer.layer_desc().key());
    3455          116 :                 }
    3456          172 :             }
    3457              :         }
    3458              : 
    3459          304 :         layer_selection.retain(|x| !keep_layers.contains(&x.layer_desc().key()));
    3460          104 : 
    3461          104 :         let time_final_phase = timer.elapsed();
    3462          104 : 
    3463          104 :         stat.time_final_phase_secs = time_final_phase.as_secs_f64();
    3464          104 :         stat.time_to_first_kv_pair_secs = time_to_first_kv_pair
    3465          104 :             .unwrap_or(Duration::ZERO)
    3466          104 :             .as_secs_f64();
    3467          104 :         stat.time_main_loop_secs = time_main_loop.as_secs_f64();
    3468          104 :         stat.time_acquire_lock_secs = time_acquire_lock.as_secs_f64();
    3469          104 :         stat.time_download_layer_secs = time_download_layer.as_secs_f64();
    3470          104 :         stat.time_analyze_secs = time_analyze.as_secs_f64();
    3471          104 :         stat.time_total_secs = begin_timer.elapsed().as_secs_f64();
    3472          104 :         stat.finalize();
    3473          104 : 
    3474          104 :         info!(
    3475            0 :             "gc-compaction statistics: {}",
    3476            0 :             serde_json::to_string(&stat)
    3477            0 :                 .context("failed to serialize gc-compaction statistics")
    3478            0 :                 .map_err(CompactionError::Other)?
    3479              :         );
    3480              : 
    3481          104 :         if dry_run {
    3482            8 :             return Ok(CompactionOutcome::Done);
    3483           96 :         }
    3484           96 : 
    3485           96 :         info!(
    3486            0 :             "produced {} delta layers and {} image layers, {} layers are kept",
    3487            0 :             produced_delta_layers_len,
    3488            0 :             produced_image_layers_len,
    3489            0 :             keep_layers.len()
    3490              :         );
    3491              : 
    3492              :         // Step 3: Place back to the layer map.
    3493              : 
    3494              :         // First, do a sanity check to ensure the newly-created layer map does not contain overlaps.
    3495           96 :         let all_layers = {
    3496           96 :             let guard = self.layers.read().await;
    3497           96 :             let layer_map = guard.layer_map()?;
    3498           96 :             layer_map.iter_historic_layers().collect_vec()
    3499           96 :         };
    3500           96 : 
    3501           96 :         let mut final_layers = all_layers
    3502           96 :             .iter()
    3503          428 :             .map(|layer| layer.layer_name())
    3504           96 :             .collect::<HashSet<_>>();
    3505          304 :         for layer in &layer_selection {
    3506          208 :             final_layers.remove(&layer.layer_desc().layer_name());
    3507          208 :         }
    3508          204 :         for layer in &compact_to {
    3509          108 :             final_layers.insert(layer.layer_desc().layer_name());
    3510          108 :         }
    3511           96 :         let final_layers = final_layers.into_iter().collect_vec();
    3512              : 
    3513              :         // TODO: move this check before we call `finish` on image layer writers. However, this will require us to get the layer name before we finish
    3514              :         // the writer, so potentially, we will need a function like `ImageLayerBatchWriter::get_all_pending_layer_keys` to get all the keys that are
    3515              :         // in the writer before finalizing the persistent layers. Now we would leave some dangling layers on the disk if the check fails.
    3516           96 :         if let Some(err) = check_valid_layermap(&final_layers) {
    3517            0 :             return Err(CompactionError::Other(anyhow!(
    3518            0 :                 "gc-compaction layer map check failed after compaction because {}, compaction result not applied to the layer map due to potential data loss",
    3519            0 :                 err
    3520            0 :             )));
    3521           96 :         }
    3522              : 
    3523              :         // Between the sanity check and this compaction update, there could be new layers being flushed, but it should be fine because we only
    3524              :         // operate on L1 layers.
    3525              :         {
    3526              :             // Gc-compaction will rewrite the history of a key. This could happen in two ways:
    3527              :             //
    3528              :             // 1. We create an image layer to replace all the deltas below the compact LSN. In this case, assume
    3529              :             // we have 2 delta layers A and B, both below the compact LSN. We create an image layer I to replace
    3530              :             // A and B at the compact LSN. If the read path finishes reading A, yields, and now we update the layer
    3531              :             // map, the read path then cannot find any keys below A, reporting a missing key error, while the key
    3532              :             // now gets stored in I at the compact LSN.
    3533              :             //
    3534              :             // ---------------                                       ---------------
    3535              :             //   delta1@LSN20                                         image1@LSN20
    3536              :             // ---------------  (read path collects delta@LSN20,  => ---------------  (read path cannot find anything
    3537              :             //   delta1@LSN10    yields)                                               below LSN 20)
    3538              :             // ---------------
    3539              :             //
    3540              :             // 2. We create a delta layer to replace all the deltas below the compact LSN, and in the delta layers,
    3541              :             // we combines the history of a key into a single image. For example, we have deltas at LSN 1, 2, 3, 4,
    3542              :             // Assume one delta layer contains LSN 1, 2, 3 and the other contains LSN 4.
    3543              :             //
    3544              :             // We let gc-compaction combine delta 2, 3, 4 into an image at LSN 4, which produces a delta layer that
    3545              :             // contains the delta at LSN 1, the image at LSN 4. If the read path finishes reading the original delta
    3546              :             // layer containing 4, yields, and we update the layer map to put the delta layer.
    3547              :             //
    3548              :             // ---------------                                      ---------------
    3549              :             //   delta1@LSN4                                          image1@LSN4
    3550              :             // ---------------  (read path collects delta@LSN4,  => ---------------  (read path collects LSN4 and LSN1,
    3551              :             //  delta1@LSN1-3    yields)                              delta1@LSN1     which is an invalid history)
    3552              :             // ---------------                                      ---------------
    3553              :             //
    3554              :             // Therefore, the gc-compaction layer update operation should wait for all ongoing reads, block all pending reads,
    3555              :             // and only allow reads to continue after the update is finished.
    3556              : 
    3557           96 :             let update_guard = self.gc_compaction_layer_update_lock.write().await;
    3558              :             // Acquiring the update guard ensures current read operations end and new read operations are blocked.
    3559              :             // TODO: can we use `latest_gc_cutoff` Rcu to achieve the same effect?
    3560           96 :             let mut guard = self.layers.write().await;
    3561           96 :             guard
    3562           96 :                 .open_mut()?
    3563           96 :                 .finish_gc_compaction(&layer_selection, &compact_to, &self.metrics);
    3564           96 :             drop(update_guard); // Allow new reads to start ONLY after we finished updating the layer map.
    3565           96 :         };
    3566           96 : 
    3567           96 :         // Schedule an index-only upload to update the `latest_gc_cutoff` in the index_part.json.
    3568           96 :         // Otherwise, after restart, the index_part only contains the old `latest_gc_cutoff` and
    3569           96 :         // find_gc_cutoffs will try accessing things below the cutoff. TODO: ideally, this should
    3570           96 :         // be batched into `schedule_compaction_update`.
    3571           96 :         let disk_consistent_lsn = self.disk_consistent_lsn.load();
    3572           96 :         self.schedule_uploads(disk_consistent_lsn, None)
    3573           96 :             .context("failed to schedule uploads")
    3574           96 :             .map_err(CompactionError::Other)?;
    3575              :         // If a layer gets rewritten throughout gc-compaction, we need to keep that layer only in `compact_to` instead
    3576              :         // of `compact_from`.
    3577           96 :         let compact_from = {
    3578           96 :             let mut compact_from = Vec::new();
    3579           96 :             let mut compact_to_set = HashMap::new();
    3580          204 :             for layer in &compact_to {
    3581          108 :                 compact_to_set.insert(layer.layer_desc().key(), layer);
    3582          108 :             }
    3583          304 :             for layer in &layer_selection {
    3584          208 :                 if let Some(to) = compact_to_set.get(&layer.layer_desc().key()) {
    3585            0 :                     tracing::info!(
    3586            0 :                         "skipping delete {} because found same layer key at different generation {}",
    3587              :                         layer,
    3588              :                         to
    3589              :                     );
    3590          208 :                 } else {
    3591          208 :                     compact_from.push(layer.clone());
    3592          208 :                 }
    3593              :             }
    3594           96 :             compact_from
    3595           96 :         };
    3596           96 :         self.remote_client
    3597           96 :             .schedule_compaction_update(&compact_from, &compact_to)?;
    3598              : 
    3599           96 :         drop(gc_lock);
    3600           96 : 
    3601           96 :         Ok(CompactionOutcome::Done)
    3602          112 :     }
    3603              : }
    3604              : 
    3605              : struct TimelineAdaptor {
    3606              :     timeline: Arc<Timeline>,
    3607              : 
    3608              :     keyspace: (Lsn, KeySpace),
    3609              : 
    3610              :     new_deltas: Vec<ResidentLayer>,
    3611              :     new_images: Vec<ResidentLayer>,
    3612              :     layers_to_delete: Vec<Arc<PersistentLayerDesc>>,
    3613              : }
    3614              : 
    3615              : impl TimelineAdaptor {
    3616            0 :     pub fn new(timeline: &Arc<Timeline>, keyspace: (Lsn, KeySpace)) -> Self {
    3617            0 :         Self {
    3618            0 :             timeline: timeline.clone(),
    3619            0 :             keyspace,
    3620            0 :             new_images: Vec::new(),
    3621            0 :             new_deltas: Vec::new(),
    3622            0 :             layers_to_delete: Vec::new(),
    3623            0 :         }
    3624            0 :     }
    3625              : 
    3626            0 :     pub async fn flush_updates(&mut self) -> Result<(), CompactionError> {
    3627            0 :         let layers_to_delete = {
    3628            0 :             let guard = self.timeline.layers.read().await;
    3629            0 :             self.layers_to_delete
    3630            0 :                 .iter()
    3631            0 :                 .map(|x| guard.get_from_desc(x))
    3632            0 :                 .collect::<Vec<Layer>>()
    3633            0 :         };
    3634            0 :         self.timeline
    3635            0 :             .finish_compact_batch(&self.new_deltas, &self.new_images, &layers_to_delete)
    3636            0 :             .await?;
    3637              : 
    3638            0 :         self.timeline
    3639            0 :             .upload_new_image_layers(std::mem::take(&mut self.new_images))?;
    3640              : 
    3641            0 :         self.new_deltas.clear();
    3642            0 :         self.layers_to_delete.clear();
    3643            0 :         Ok(())
    3644            0 :     }
    3645              : }
    3646              : 
    3647              : #[derive(Clone)]
    3648              : struct ResidentDeltaLayer(ResidentLayer);
    3649              : #[derive(Clone)]
    3650              : struct ResidentImageLayer(ResidentLayer);
    3651              : 
    3652              : impl CompactionJobExecutor for TimelineAdaptor {
    3653              :     type Key = pageserver_api::key::Key;
    3654              : 
    3655              :     type Layer = OwnArc<PersistentLayerDesc>;
    3656              :     type DeltaLayer = ResidentDeltaLayer;
    3657              :     type ImageLayer = ResidentImageLayer;
    3658              : 
    3659              :     type RequestContext = crate::context::RequestContext;
    3660              : 
    3661            0 :     fn get_shard_identity(&self) -> &ShardIdentity {
    3662            0 :         self.timeline.get_shard_identity()
    3663            0 :     }
    3664              : 
    3665            0 :     async fn get_layers(
    3666            0 :         &mut self,
    3667            0 :         key_range: &Range<Key>,
    3668            0 :         lsn_range: &Range<Lsn>,
    3669            0 :         _ctx: &RequestContext,
    3670            0 :     ) -> anyhow::Result<Vec<OwnArc<PersistentLayerDesc>>> {
    3671            0 :         self.flush_updates().await?;
    3672              : 
    3673            0 :         let guard = self.timeline.layers.read().await;
    3674            0 :         let layer_map = guard.layer_map()?;
    3675              : 
    3676            0 :         let result = layer_map
    3677            0 :             .iter_historic_layers()
    3678            0 :             .filter(|l| {
    3679            0 :                 overlaps_with(&l.lsn_range, lsn_range) && overlaps_with(&l.key_range, key_range)
    3680            0 :             })
    3681            0 :             .map(OwnArc)
    3682            0 :             .collect();
    3683            0 :         Ok(result)
    3684            0 :     }
    3685              : 
    3686            0 :     async fn get_keyspace(
    3687            0 :         &mut self,
    3688            0 :         key_range: &Range<Key>,
    3689            0 :         lsn: Lsn,
    3690            0 :         _ctx: &RequestContext,
    3691            0 :     ) -> anyhow::Result<Vec<Range<Key>>> {
    3692            0 :         if lsn == self.keyspace.0 {
    3693            0 :             Ok(pageserver_compaction::helpers::intersect_keyspace(
    3694            0 :                 &self.keyspace.1.ranges,
    3695            0 :                 key_range,
    3696            0 :             ))
    3697              :         } else {
    3698              :             // The current compaction implementation only ever requests the key space
    3699              :             // at the compaction end LSN.
    3700            0 :             anyhow::bail!("keyspace not available for requested lsn");
    3701              :         }
    3702            0 :     }
    3703              : 
    3704            0 :     async fn downcast_delta_layer(
    3705            0 :         &self,
    3706            0 :         layer: &OwnArc<PersistentLayerDesc>,
    3707            0 :         ctx: &RequestContext,
    3708            0 :     ) -> anyhow::Result<Option<ResidentDeltaLayer>> {
    3709            0 :         // this is a lot more complex than a simple downcast...
    3710            0 :         if layer.is_delta() {
    3711            0 :             let l = {
    3712            0 :                 let guard = self.timeline.layers.read().await;
    3713            0 :                 guard.get_from_desc(layer)
    3714              :             };
    3715            0 :             let result = l.download_and_keep_resident(ctx).await?;
    3716              : 
    3717            0 :             Ok(Some(ResidentDeltaLayer(result)))
    3718              :         } else {
    3719            0 :             Ok(None)
    3720              :         }
    3721            0 :     }
    3722              : 
    3723            0 :     async fn create_image(
    3724            0 :         &mut self,
    3725            0 :         lsn: Lsn,
    3726            0 :         key_range: &Range<Key>,
    3727            0 :         ctx: &RequestContext,
    3728            0 :     ) -> anyhow::Result<()> {
    3729            0 :         Ok(self.create_image_impl(lsn, key_range, ctx).await?)
    3730            0 :     }
    3731              : 
    3732            0 :     async fn create_delta(
    3733            0 :         &mut self,
    3734            0 :         lsn_range: &Range<Lsn>,
    3735            0 :         key_range: &Range<Key>,
    3736            0 :         input_layers: &[ResidentDeltaLayer],
    3737            0 :         ctx: &RequestContext,
    3738            0 :     ) -> anyhow::Result<()> {
    3739            0 :         debug!("Create new layer {}..{}", lsn_range.start, lsn_range.end);
    3740              : 
    3741            0 :         let mut all_entries = Vec::new();
    3742            0 :         for dl in input_layers.iter() {
    3743            0 :             all_entries.extend(dl.load_keys(ctx).await?);
    3744              :         }
    3745              : 
    3746              :         // The current stdlib sorting implementation is designed in a way where it is
    3747              :         // particularly fast where the slice is made up of sorted sub-ranges.
    3748            0 :         all_entries.sort_by_key(|DeltaEntry { key, lsn, .. }| (*key, *lsn));
    3749              : 
    3750            0 :         let mut writer = DeltaLayerWriter::new(
    3751            0 :             self.timeline.conf,
    3752            0 :             self.timeline.timeline_id,
    3753            0 :             self.timeline.tenant_shard_id,
    3754            0 :             key_range.start,
    3755            0 :             lsn_range.clone(),
    3756            0 :             ctx,
    3757            0 :         )
    3758            0 :         .await?;
    3759              : 
    3760            0 :         let mut dup_values = 0;
    3761            0 : 
    3762            0 :         // This iterator walks through all key-value pairs from all the layers
    3763            0 :         // we're compacting, in key, LSN order.
    3764            0 :         let mut prev: Option<(Key, Lsn)> = None;
    3765              :         for &DeltaEntry {
    3766            0 :             key, lsn, ref val, ..
    3767            0 :         } in all_entries.iter()
    3768              :         {
    3769            0 :             if prev == Some((key, lsn)) {
    3770              :                 // This is a duplicate. Skip it.
    3771              :                 //
    3772              :                 // It can happen if compaction is interrupted after writing some
    3773              :                 // layers but not all, and we are compacting the range again.
    3774              :                 // The calculations in the algorithm assume that there are no
    3775              :                 // duplicates, so the math on targeted file size is likely off,
    3776              :                 // and we will create smaller files than expected.
    3777            0 :                 dup_values += 1;
    3778            0 :                 continue;
    3779            0 :             }
    3780              : 
    3781            0 :             let value = val.load(ctx).await?;
    3782              : 
    3783            0 :             writer.put_value(key, lsn, value, ctx).await?;
    3784              : 
    3785            0 :             prev = Some((key, lsn));
    3786              :         }
    3787              : 
    3788            0 :         if dup_values > 0 {
    3789            0 :             warn!("delta layer created with {} duplicate values", dup_values);
    3790            0 :         }
    3791              : 
    3792            0 :         fail_point!("delta-layer-writer-fail-before-finish", |_| {
    3793            0 :             Err(anyhow::anyhow!(
    3794            0 :                 "failpoint delta-layer-writer-fail-before-finish"
    3795            0 :             ))
    3796            0 :         });
    3797              : 
    3798            0 :         let (desc, path) = writer.finish(prev.unwrap().0.next(), ctx).await?;
    3799            0 :         let new_delta_layer =
    3800            0 :             Layer::finish_creating(self.timeline.conf, &self.timeline, desc, &path)?;
    3801              : 
    3802            0 :         self.new_deltas.push(new_delta_layer);
    3803            0 :         Ok(())
    3804            0 :     }
    3805              : 
    3806            0 :     async fn delete_layer(
    3807            0 :         &mut self,
    3808            0 :         layer: &OwnArc<PersistentLayerDesc>,
    3809            0 :         _ctx: &RequestContext,
    3810            0 :     ) -> anyhow::Result<()> {
    3811            0 :         self.layers_to_delete.push(layer.clone().0);
    3812            0 :         Ok(())
    3813            0 :     }
    3814              : }
    3815              : 
    3816              : impl TimelineAdaptor {
    3817            0 :     async fn create_image_impl(
    3818            0 :         &mut self,
    3819            0 :         lsn: Lsn,
    3820            0 :         key_range: &Range<Key>,
    3821            0 :         ctx: &RequestContext,
    3822            0 :     ) -> Result<(), CreateImageLayersError> {
    3823            0 :         let timer = self.timeline.metrics.create_images_time_histo.start_timer();
    3824              : 
    3825            0 :         let image_layer_writer = ImageLayerWriter::new(
    3826            0 :             self.timeline.conf,
    3827            0 :             self.timeline.timeline_id,
    3828            0 :             self.timeline.tenant_shard_id,
    3829            0 :             key_range,
    3830            0 :             lsn,
    3831            0 :             ctx,
    3832            0 :         )
    3833            0 :         .await?;
    3834              : 
    3835            0 :         fail_point!("image-layer-writer-fail-before-finish", |_| {
    3836            0 :             Err(CreateImageLayersError::Other(anyhow::anyhow!(
    3837            0 :                 "failpoint image-layer-writer-fail-before-finish"
    3838            0 :             )))
    3839            0 :         });
    3840              : 
    3841            0 :         let keyspace = KeySpace {
    3842            0 :             ranges: self.get_keyspace(key_range, lsn, ctx).await?,
    3843              :         };
    3844              :         // TODO set proper (stateful) start. The create_image_layer_for_rel_blocks function mostly
    3845            0 :         let outcome = self
    3846            0 :             .timeline
    3847            0 :             .create_image_layer_for_rel_blocks(
    3848            0 :                 &keyspace,
    3849            0 :                 image_layer_writer,
    3850            0 :                 lsn,
    3851            0 :                 ctx,
    3852            0 :                 key_range.clone(),
    3853            0 :                 IoConcurrency::sequential(),
    3854            0 :             )
    3855            0 :             .await?;
    3856              : 
    3857              :         if let ImageLayerCreationOutcome::Generated {
    3858            0 :             unfinished_image_layer,
    3859            0 :         } = outcome
    3860              :         {
    3861            0 :             let (desc, path) = unfinished_image_layer.finish(ctx).await?;
    3862            0 :             let image_layer =
    3863            0 :                 Layer::finish_creating(self.timeline.conf, &self.timeline, desc, &path)?;
    3864            0 :             self.new_images.push(image_layer);
    3865            0 :         }
    3866              : 
    3867            0 :         timer.stop_and_record();
    3868            0 : 
    3869            0 :         Ok(())
    3870            0 :     }
    3871              : }
    3872              : 
    3873              : impl CompactionRequestContext for crate::context::RequestContext {}
    3874              : 
    3875              : #[derive(Debug, Clone)]
    3876              : pub struct OwnArc<T>(pub Arc<T>);
    3877              : 
    3878              : impl<T> Deref for OwnArc<T> {
    3879              :     type Target = <Arc<T> as Deref>::Target;
    3880            0 :     fn deref(&self) -> &Self::Target {
    3881            0 :         &self.0
    3882            0 :     }
    3883              : }
    3884              : 
    3885              : impl<T> AsRef<T> for OwnArc<T> {
    3886            0 :     fn as_ref(&self) -> &T {
    3887            0 :         self.0.as_ref()
    3888            0 :     }
    3889              : }
    3890              : 
    3891              : impl CompactionLayer<Key> for OwnArc<PersistentLayerDesc> {
    3892            0 :     fn key_range(&self) -> &Range<Key> {
    3893            0 :         &self.key_range
    3894            0 :     }
    3895            0 :     fn lsn_range(&self) -> &Range<Lsn> {
    3896            0 :         &self.lsn_range
    3897            0 :     }
    3898            0 :     fn file_size(&self) -> u64 {
    3899            0 :         self.file_size
    3900            0 :     }
    3901            0 :     fn short_id(&self) -> std::string::String {
    3902            0 :         self.as_ref().short_id().to_string()
    3903            0 :     }
    3904            0 :     fn is_delta(&self) -> bool {
    3905            0 :         self.as_ref().is_delta()
    3906            0 :     }
    3907              : }
    3908              : 
    3909              : impl CompactionLayer<Key> for OwnArc<DeltaLayer> {
    3910            0 :     fn key_range(&self) -> &Range<Key> {
    3911            0 :         &self.layer_desc().key_range
    3912            0 :     }
    3913            0 :     fn lsn_range(&self) -> &Range<Lsn> {
    3914            0 :         &self.layer_desc().lsn_range
    3915            0 :     }
    3916            0 :     fn file_size(&self) -> u64 {
    3917            0 :         self.layer_desc().file_size
    3918            0 :     }
    3919            0 :     fn short_id(&self) -> std::string::String {
    3920            0 :         self.layer_desc().short_id().to_string()
    3921            0 :     }
    3922            0 :     fn is_delta(&self) -> bool {
    3923            0 :         true
    3924            0 :     }
    3925              : }
    3926              : 
    3927              : impl CompactionLayer<Key> for ResidentDeltaLayer {
    3928            0 :     fn key_range(&self) -> &Range<Key> {
    3929            0 :         &self.0.layer_desc().key_range
    3930            0 :     }
    3931            0 :     fn lsn_range(&self) -> &Range<Lsn> {
    3932            0 :         &self.0.layer_desc().lsn_range
    3933            0 :     }
    3934            0 :     fn file_size(&self) -> u64 {
    3935            0 :         self.0.layer_desc().file_size
    3936            0 :     }
    3937            0 :     fn short_id(&self) -> std::string::String {
    3938            0 :         self.0.layer_desc().short_id().to_string()
    3939            0 :     }
    3940            0 :     fn is_delta(&self) -> bool {
    3941            0 :         true
    3942            0 :     }
    3943              : }
    3944              : 
    3945              : impl CompactionDeltaLayer<TimelineAdaptor> for ResidentDeltaLayer {
    3946              :     type DeltaEntry<'a> = DeltaEntry<'a>;
    3947              : 
    3948            0 :     async fn load_keys(&self, ctx: &RequestContext) -> anyhow::Result<Vec<DeltaEntry<'_>>> {
    3949            0 :         self.0.get_as_delta(ctx).await?.index_entries(ctx).await
    3950            0 :     }
    3951              : }
    3952              : 
    3953              : impl CompactionLayer<Key> for ResidentImageLayer {
    3954            0 :     fn key_range(&self) -> &Range<Key> {
    3955            0 :         &self.0.layer_desc().key_range
    3956            0 :     }
    3957            0 :     fn lsn_range(&self) -> &Range<Lsn> {
    3958            0 :         &self.0.layer_desc().lsn_range
    3959            0 :     }
    3960            0 :     fn file_size(&self) -> u64 {
    3961            0 :         self.0.layer_desc().file_size
    3962            0 :     }
    3963            0 :     fn short_id(&self) -> std::string::String {
    3964            0 :         self.0.layer_desc().short_id().to_string()
    3965            0 :     }
    3966            0 :     fn is_delta(&self) -> bool {
    3967            0 :         false
    3968            0 :     }
    3969              : }
    3970              : impl CompactionImageLayer<TimelineAdaptor> for ResidentImageLayer {}
        

Generated by: LCOV version 2.1-beta