Line data Source code
1 : //! New compaction implementation. The algorithm itself is implemented in the
2 : //! compaction crate. This file implements the callbacks and structs that allow
3 : //! the algorithm to drive the process.
4 : //!
5 : //! The old legacy algorithm is implemented directly in `timeline.rs`.
6 :
7 : use std::collections::{BinaryHeap, HashMap, HashSet, VecDeque};
8 : use std::ops::{Deref, Range};
9 : use std::sync::Arc;
10 :
11 : use super::layer_manager::LayerManager;
12 : use super::{
13 : CompactFlags, CompactOptions, CreateImageLayersError, DurationRecorder, GetVectoredError,
14 : ImageLayerCreationMode, LastImageLayerCreationStatus, PageReconstructError, RecordedDuration,
15 : Timeline,
16 : };
17 :
18 : use anyhow::{anyhow, bail, Context};
19 : use bytes::Bytes;
20 : use enumset::EnumSet;
21 : use fail::fail_point;
22 : use itertools::Itertools;
23 : use pageserver_api::key::KEY_SIZE;
24 : use pageserver_api::keyspace::ShardedRange;
25 : use pageserver_api::models::CompactInfoResponse;
26 : use pageserver_api::shard::{ShardCount, ShardIdentity, TenantShardId};
27 : use serde::Serialize;
28 : use tokio_util::sync::CancellationToken;
29 : use tracing::{debug, error, info, info_span, trace, warn, Instrument};
30 : use utils::critical;
31 : use utils::id::TimelineId;
32 :
33 : use crate::context::{AccessStatsBehavior, RequestContext, RequestContextBuilder};
34 : use crate::page_cache;
35 : use crate::pgdatadir_mapping::CollectKeySpaceError;
36 : use crate::statvfs::Statvfs;
37 : use crate::tenant::checks::check_valid_layermap;
38 : use crate::tenant::gc_block::GcBlock;
39 : use crate::tenant::layer_map::LayerMap;
40 : use crate::tenant::remote_timeline_client::WaitCompletionError;
41 : use crate::tenant::storage_layer::batch_split_writer::{
42 : BatchWriterResult, SplitDeltaLayerWriter, SplitImageLayerWriter,
43 : };
44 : use crate::tenant::storage_layer::filter_iterator::FilterIterator;
45 : use crate::tenant::storage_layer::merge_iterator::MergeIterator;
46 : use crate::tenant::storage_layer::{
47 : AsLayerDesc, PersistentLayerDesc, PersistentLayerKey, ValueReconstructState,
48 : };
49 : use crate::tenant::timeline::{drop_rlock, DeltaLayerWriter, ImageLayerWriter};
50 : use crate::tenant::timeline::{ImageLayerCreationOutcome, IoConcurrency};
51 : use crate::tenant::timeline::{Layer, ResidentLayer};
52 : use crate::tenant::{gc_block, DeltaLayer, MaybeOffloaded};
53 : use crate::virtual_file::{MaybeFatalIo, VirtualFile};
54 : use pageserver_api::config::tenant_conf_defaults::DEFAULT_CHECKPOINT_DISTANCE;
55 :
56 : use pageserver_api::key::Key;
57 : use pageserver_api::keyspace::KeySpace;
58 : use pageserver_api::record::NeonWalRecord;
59 : use pageserver_api::value::Value;
60 :
61 : use utils::lsn::Lsn;
62 :
63 : use pageserver_compaction::helpers::{fully_contains, overlaps_with};
64 : use pageserver_compaction::interface::*;
65 :
66 : use super::CompactionError;
67 :
68 : /// Maximum number of deltas before generating an image layer in bottom-most compaction.
69 : const COMPACTION_DELTA_THRESHOLD: usize = 5;
70 :
71 : #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
72 : pub struct GcCompactionJobId(pub usize);
73 :
74 : impl std::fmt::Display for GcCompactionJobId {
75 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
76 0 : write!(f, "{}", self.0)
77 0 : }
78 : }
79 :
80 : #[derive(Debug, Clone)]
81 : pub enum GcCompactionQueueItem {
82 : Manual(CompactOptions),
83 : SubCompactionJob(CompactOptions),
84 : #[allow(dead_code)]
85 : UpdateL2Lsn(Lsn),
86 : Notify(GcCompactionJobId),
87 : }
88 :
89 : impl GcCompactionQueueItem {
90 0 : pub fn into_compact_info_resp(
91 0 : self,
92 0 : id: GcCompactionJobId,
93 0 : running: bool,
94 0 : ) -> Option<CompactInfoResponse> {
95 0 : match self {
96 0 : GcCompactionQueueItem::Manual(options) => Some(CompactInfoResponse {
97 0 : compact_key_range: options.compact_key_range,
98 0 : compact_lsn_range: options.compact_lsn_range,
99 0 : sub_compaction: options.sub_compaction,
100 0 : running,
101 0 : job_id: id.0,
102 0 : }),
103 0 : GcCompactionQueueItem::SubCompactionJob(options) => Some(CompactInfoResponse {
104 0 : compact_key_range: options.compact_key_range,
105 0 : compact_lsn_range: options.compact_lsn_range,
106 0 : sub_compaction: options.sub_compaction,
107 0 : running,
108 0 : job_id: id.0,
109 0 : }),
110 0 : GcCompactionQueueItem::UpdateL2Lsn(_) => None,
111 0 : GcCompactionQueueItem::Notify(_) => None,
112 : }
113 0 : }
114 : }
115 :
116 : struct GcCompactionQueueInner {
117 : running: Option<(GcCompactionJobId, GcCompactionQueueItem)>,
118 : queued: VecDeque<(GcCompactionJobId, GcCompactionQueueItem)>,
119 : notify: HashMap<GcCompactionJobId, tokio::sync::oneshot::Sender<()>>,
120 : gc_guards: HashMap<GcCompactionJobId, gc_block::Guard>,
121 : last_id: GcCompactionJobId,
122 : }
123 :
124 : impl GcCompactionQueueInner {
125 0 : fn next_id(&mut self) -> GcCompactionJobId {
126 0 : let id = self.last_id;
127 0 : self.last_id = GcCompactionJobId(id.0 + 1);
128 0 : id
129 0 : }
130 : }
131 :
132 : /// A structure to store gc_compaction jobs.
133 : pub struct GcCompactionQueue {
134 : /// All items in the queue, and the currently-running job.
135 : inner: std::sync::Mutex<GcCompactionQueueInner>,
136 : /// Ensure only one thread is consuming the queue.
137 : consumer_lock: tokio::sync::Mutex<()>,
138 : }
139 :
140 : impl GcCompactionQueue {
141 0 : pub fn new() -> Self {
142 0 : GcCompactionQueue {
143 0 : inner: std::sync::Mutex::new(GcCompactionQueueInner {
144 0 : running: None,
145 0 : queued: VecDeque::new(),
146 0 : notify: HashMap::new(),
147 0 : gc_guards: HashMap::new(),
148 0 : last_id: GcCompactionJobId(0),
149 0 : }),
150 0 : consumer_lock: tokio::sync::Mutex::new(()),
151 0 : }
152 0 : }
153 :
154 0 : pub fn cancel_scheduled(&self) {
155 0 : let mut guard = self.inner.lock().unwrap();
156 0 : guard.queued.clear();
157 0 : guard.notify.clear();
158 0 : guard.gc_guards.clear();
159 0 : }
160 :
161 : /// Schedule a manual compaction job.
162 0 : pub fn schedule_manual_compaction(
163 0 : &self,
164 0 : options: CompactOptions,
165 0 : notify: Option<tokio::sync::oneshot::Sender<()>>,
166 0 : ) -> GcCompactionJobId {
167 0 : let mut guard = self.inner.lock().unwrap();
168 0 : let id = guard.next_id();
169 0 : guard
170 0 : .queued
171 0 : .push_back((id, GcCompactionQueueItem::Manual(options)));
172 0 : if let Some(notify) = notify {
173 0 : guard.notify.insert(id, notify);
174 0 : }
175 0 : info!("scheduled compaction job id={}", id);
176 0 : id
177 0 : }
178 :
179 : /// Trigger an auto compaction.
180 : #[allow(dead_code)]
181 0 : pub fn trigger_auto_compaction(&self, _: &Arc<Timeline>) {}
182 :
183 : /// Notify the caller the job has finished and unblock GC.
184 0 : fn notify_and_unblock(&self, id: GcCompactionJobId) {
185 0 : info!("compaction job id={} finished", id);
186 0 : let mut guard = self.inner.lock().unwrap();
187 0 : if let Some(blocking) = guard.gc_guards.remove(&id) {
188 0 : drop(blocking)
189 0 : }
190 0 : if let Some(tx) = guard.notify.remove(&id) {
191 0 : let _ = tx.send(());
192 0 : }
193 0 : }
194 :
195 0 : async fn handle_sub_compaction(
196 0 : &self,
197 0 : id: GcCompactionJobId,
198 0 : options: CompactOptions,
199 0 : timeline: &Arc<Timeline>,
200 0 : gc_block: &GcBlock,
201 0 : ) -> Result<(), CompactionError> {
202 0 : info!("running scheduled enhanced gc bottom-most compaction with sub-compaction, splitting compaction jobs");
203 0 : let jobs: Vec<GcCompactJob> = timeline
204 0 : .gc_compaction_split_jobs(
205 0 : GcCompactJob::from_compact_options(options.clone()),
206 0 : options.sub_compaction_max_job_size_mb,
207 0 : )
208 0 : .await
209 0 : .map_err(CompactionError::Other)?;
210 0 : if jobs.is_empty() {
211 0 : info!("no jobs to run, skipping scheduled compaction task");
212 0 : self.notify_and_unblock(id);
213 : } else {
214 0 : let gc_guard = match gc_block.start().await {
215 0 : Ok(guard) => guard,
216 0 : Err(e) => {
217 0 : return Err(CompactionError::Other(anyhow!(
218 0 : "cannot run gc-compaction because gc is blocked: {}",
219 0 : e
220 0 : )));
221 : }
222 : };
223 :
224 0 : let jobs_len = jobs.len();
225 0 : let mut pending_tasks = Vec::new();
226 0 : for job in jobs {
227 : // Unfortunately we need to convert the `GcCompactJob` back to `CompactionOptions`
228 : // until we do further refactors to allow directly call `compact_with_gc`.
229 0 : let mut flags: EnumSet<CompactFlags> = EnumSet::default();
230 0 : flags |= CompactFlags::EnhancedGcBottomMostCompaction;
231 0 : if job.dry_run {
232 0 : flags |= CompactFlags::DryRun;
233 0 : }
234 0 : let options = CompactOptions {
235 0 : flags,
236 0 : sub_compaction: false,
237 0 : compact_key_range: Some(job.compact_key_range.into()),
238 0 : compact_lsn_range: Some(job.compact_lsn_range.into()),
239 0 : sub_compaction_max_job_size_mb: None,
240 0 : };
241 0 : pending_tasks.push(GcCompactionQueueItem::SubCompactionJob(options));
242 : }
243 0 : pending_tasks.push(GcCompactionQueueItem::Notify(id));
244 0 : {
245 0 : let mut guard = self.inner.lock().unwrap();
246 0 : guard.gc_guards.insert(id, gc_guard);
247 0 : let mut tasks = Vec::new();
248 0 : for task in pending_tasks {
249 0 : let id = guard.next_id();
250 0 : tasks.push((id, task));
251 0 : }
252 0 : tasks.reverse();
253 0 : for item in tasks {
254 0 : guard.queued.push_front(item);
255 0 : }
256 : }
257 0 : info!("scheduled enhanced gc bottom-most compaction with sub-compaction, split into {} jobs", jobs_len);
258 : }
259 0 : Ok(())
260 0 : }
261 :
262 : /// Take a job from the queue and process it. Returns if there are still pending tasks.
263 0 : pub async fn iteration(
264 0 : &self,
265 0 : cancel: &CancellationToken,
266 0 : ctx: &RequestContext,
267 0 : gc_block: &GcBlock,
268 0 : timeline: &Arc<Timeline>,
269 0 : ) -> Result<CompactionOutcome, CompactionError> {
270 0 : let _one_op_at_a_time_guard = self.consumer_lock.lock().await;
271 : let has_pending_tasks;
272 0 : let (id, item) = {
273 0 : let mut guard = self.inner.lock().unwrap();
274 0 : let Some((id, item)) = guard.queued.pop_front() else {
275 0 : return Ok(CompactionOutcome::Done);
276 : };
277 0 : guard.running = Some((id, item.clone()));
278 0 : has_pending_tasks = !guard.queued.is_empty();
279 0 : (id, item)
280 0 : };
281 0 :
282 0 : match item {
283 0 : GcCompactionQueueItem::Manual(options) => {
284 0 : if !options
285 0 : .flags
286 0 : .contains(CompactFlags::EnhancedGcBottomMostCompaction)
287 : {
288 0 : warn!("ignoring scheduled compaction task: scheduled task must be gc compaction: {:?}", options);
289 0 : } else if options.sub_compaction {
290 0 : self.handle_sub_compaction(id, options, timeline, gc_block)
291 0 : .await?;
292 : } else {
293 0 : let gc_guard = match gc_block.start().await {
294 0 : Ok(guard) => guard,
295 0 : Err(e) => {
296 0 : return Err(CompactionError::Other(anyhow!(
297 0 : "cannot run gc-compaction because gc is blocked: {}",
298 0 : e
299 0 : )));
300 : }
301 : };
302 0 : {
303 0 : let mut guard = self.inner.lock().unwrap();
304 0 : guard.gc_guards.insert(id, gc_guard);
305 0 : }
306 0 : let _ = timeline.compact_with_options(cancel, options, ctx).await?;
307 0 : self.notify_and_unblock(id);
308 : }
309 : }
310 0 : GcCompactionQueueItem::SubCompactionJob(options) => {
311 0 : let _ = timeline.compact_with_options(cancel, options, ctx).await?;
312 : }
313 0 : GcCompactionQueueItem::Notify(id) => {
314 0 : self.notify_and_unblock(id);
315 0 : }
316 : GcCompactionQueueItem::UpdateL2Lsn(_) => {
317 0 : unreachable!()
318 : }
319 : }
320 0 : {
321 0 : let mut guard = self.inner.lock().unwrap();
322 0 : guard.running = None;
323 0 : }
324 0 : Ok(if has_pending_tasks {
325 0 : CompactionOutcome::Pending
326 : } else {
327 0 : CompactionOutcome::Done
328 : })
329 0 : }
330 :
331 : #[allow(clippy::type_complexity)]
332 0 : pub fn remaining_jobs(
333 0 : &self,
334 0 : ) -> (
335 0 : Option<(GcCompactionJobId, GcCompactionQueueItem)>,
336 0 : VecDeque<(GcCompactionJobId, GcCompactionQueueItem)>,
337 0 : ) {
338 0 : let guard = self.inner.lock().unwrap();
339 0 : (guard.running.clone(), guard.queued.clone())
340 0 : }
341 :
342 : #[allow(dead_code)]
343 0 : pub fn remaining_jobs_num(&self) -> usize {
344 0 : let guard = self.inner.lock().unwrap();
345 0 : guard.queued.len() + if guard.running.is_some() { 1 } else { 0 }
346 0 : }
347 : }
348 :
349 : /// A job description for the gc-compaction job. This structure describes the rectangle range that the job will
350 : /// process. The exact layers that need to be compacted/rewritten will be generated when `compact_with_gc` gets
351 : /// called.
352 : #[derive(Debug, Clone)]
353 : pub(crate) struct GcCompactJob {
354 : pub dry_run: bool,
355 : /// The key range to be compacted. The compaction algorithm will only regenerate key-value pairs within this range
356 : /// [left inclusive, right exclusive), and other pairs will be rewritten into new files if necessary.
357 : pub compact_key_range: Range<Key>,
358 : /// The LSN range to be compacted. The compaction algorithm will use this range to determine the layers to be
359 : /// selected for the compaction, and it does not guarantee the generated layers will have exactly the same LSN range
360 : /// as specified here. The true range being compacted is `min_lsn/max_lsn` in [`GcCompactionJobDescription`].
361 : /// min_lsn will always <= the lower bound specified here, and max_lsn will always >= the upper bound specified here.
362 : pub compact_lsn_range: Range<Lsn>,
363 : }
364 :
365 : impl GcCompactJob {
366 108 : pub fn from_compact_options(options: CompactOptions) -> Self {
367 108 : GcCompactJob {
368 108 : dry_run: options.flags.contains(CompactFlags::DryRun),
369 108 : compact_key_range: options
370 108 : .compact_key_range
371 108 : .map(|x| x.into())
372 108 : .unwrap_or(Key::MIN..Key::MAX),
373 108 : compact_lsn_range: options
374 108 : .compact_lsn_range
375 108 : .map(|x| x.into())
376 108 : .unwrap_or(Lsn::INVALID..Lsn::MAX),
377 108 : }
378 108 : }
379 : }
380 :
381 : /// A job description for the gc-compaction job. This structure is generated when `compact_with_gc` is called
382 : /// and contains the exact layers we want to compact.
383 : pub struct GcCompactionJobDescription {
384 : /// All layers to read in the compaction job
385 : selected_layers: Vec<Layer>,
386 : /// GC cutoff of the job. This is the lowest LSN that will be accessed by the read/GC path and we need to
387 : /// keep all deltas <= this LSN or generate an image == this LSN.
388 : gc_cutoff: Lsn,
389 : /// LSNs to retain for the job. Read path will use this LSN so we need to keep deltas <= this LSN or
390 : /// generate an image == this LSN.
391 : retain_lsns_below_horizon: Vec<Lsn>,
392 : /// Maximum layer LSN processed in this compaction, that is max(end_lsn of layers). Exclusive. All data
393 : /// \>= this LSN will be kept and will not be rewritten.
394 : max_layer_lsn: Lsn,
395 : /// Minimum layer LSN processed in this compaction, that is min(start_lsn of layers). Inclusive.
396 : /// All access below (strict lower than `<`) this LSN will be routed through the normal read path instead of
397 : /// k-merge within gc-compaction.
398 : min_layer_lsn: Lsn,
399 : /// Only compact layers overlapping with this range.
400 : compaction_key_range: Range<Key>,
401 : /// When partial compaction is enabled, these layers need to be rewritten to ensure no overlap.
402 : /// This field is here solely for debugging. The field will not be read once the compaction
403 : /// description is generated.
404 : rewrite_layers: Vec<Arc<PersistentLayerDesc>>,
405 : }
406 :
407 : /// The result of bottom-most compaction for a single key at each LSN.
408 : #[derive(Debug)]
409 : #[cfg_attr(test, derive(PartialEq))]
410 : pub struct KeyLogAtLsn(pub Vec<(Lsn, Value)>);
411 :
412 : /// The result of bottom-most compaction.
413 : #[derive(Debug)]
414 : #[cfg_attr(test, derive(PartialEq))]
415 : pub(crate) struct KeyHistoryRetention {
416 : /// Stores logs to reconstruct the value at the given LSN, that is to say, logs <= LSN or image == LSN.
417 : pub(crate) below_horizon: Vec<(Lsn, KeyLogAtLsn)>,
418 : /// Stores logs to reconstruct the value at any LSN above the horizon, that is to say, log > LSN.
419 : pub(crate) above_horizon: KeyLogAtLsn,
420 : }
421 :
422 : impl KeyHistoryRetention {
423 : /// Hack: skip delta layer if we need to produce a layer of a same key-lsn.
424 : ///
425 : /// This can happen if we have removed some deltas in "the middle" of some existing layer's key-lsn-range.
426 : /// For example, consider the case where a single delta with range [0x10,0x50) exists.
427 : /// And we have branches at LSN 0x10, 0x20, 0x30.
428 : /// Then we delete branch @ 0x20.
429 : /// Bottom-most compaction may now delete the delta [0x20,0x30).
430 : /// And that wouldnt' change the shape of the layer.
431 : ///
432 : /// Note that bottom-most-gc-compaction never _adds_ new data in that case, only removes.
433 : ///
434 : /// `discard_key` will only be called when the writer reaches its target (instead of for every key), so it's fine to grab a lock inside.
435 148 : async fn discard_key(key: &PersistentLayerKey, tline: &Arc<Timeline>, dry_run: bool) -> bool {
436 148 : if dry_run {
437 0 : return true;
438 148 : }
439 148 : if LayerMap::is_l0(&key.key_range, key.is_delta) {
440 : // gc-compaction should not produce L0 deltas, otherwise it will break the layer order.
441 : // We should ignore such layers.
442 0 : return true;
443 148 : }
444 : let layer_generation;
445 : {
446 148 : let guard = tline.layers.read().await;
447 148 : if !guard.contains_key(key) {
448 104 : return false;
449 44 : }
450 44 : layer_generation = guard.get_from_key(key).metadata().generation;
451 44 : }
452 44 : if layer_generation == tline.generation {
453 44 : info!(
454 : key=%key,
455 : ?layer_generation,
456 0 : "discard layer due to duplicated layer key in the same generation",
457 : );
458 44 : true
459 : } else {
460 0 : false
461 : }
462 148 : }
463 :
464 : /// Pipe a history of a single key to the writers.
465 : ///
466 : /// If `image_writer` is none, the images will be placed into the delta layers.
467 : /// The delta writer will contain all images and deltas (below and above the horizon) except the bottom-most images.
468 : #[allow(clippy::too_many_arguments)]
469 1244 : async fn pipe_to(
470 1244 : self,
471 1244 : key: Key,
472 1244 : delta_writer: &mut SplitDeltaLayerWriter,
473 1244 : mut image_writer: Option<&mut SplitImageLayerWriter>,
474 1244 : stat: &mut CompactionStatistics,
475 1244 : ctx: &RequestContext,
476 1244 : ) -> anyhow::Result<()> {
477 1244 : let mut first_batch = true;
478 4024 : for (cutoff_lsn, KeyLogAtLsn(logs)) in self.below_horizon {
479 2780 : if first_batch {
480 1244 : if logs.len() == 1 && logs[0].1.is_image() {
481 1168 : let Value::Image(img) = &logs[0].1 else {
482 0 : unreachable!()
483 : };
484 1168 : stat.produce_image_key(img);
485 1168 : if let Some(image_writer) = image_writer.as_mut() {
486 1168 : image_writer.put_image(key, img.clone(), ctx).await?;
487 : } else {
488 0 : delta_writer
489 0 : .put_value(key, cutoff_lsn, Value::Image(img.clone()), ctx)
490 0 : .await?;
491 : }
492 : } else {
493 132 : for (lsn, val) in logs {
494 56 : stat.produce_key(&val);
495 56 : delta_writer.put_value(key, lsn, val, ctx).await?;
496 : }
497 : }
498 1244 : first_batch = false;
499 : } else {
500 1768 : for (lsn, val) in logs {
501 232 : stat.produce_key(&val);
502 232 : delta_writer.put_value(key, lsn, val, ctx).await?;
503 : }
504 : }
505 : }
506 1244 : let KeyLogAtLsn(above_horizon_logs) = self.above_horizon;
507 1360 : for (lsn, val) in above_horizon_logs {
508 116 : stat.produce_key(&val);
509 116 : delta_writer.put_value(key, lsn, val, ctx).await?;
510 : }
511 1244 : Ok(())
512 1244 : }
513 : }
514 :
515 : #[derive(Debug, Serialize, Default)]
516 : struct CompactionStatisticsNumSize {
517 : num: u64,
518 : size: u64,
519 : }
520 :
521 : #[derive(Debug, Serialize, Default)]
522 : pub struct CompactionStatistics {
523 : delta_layer_visited: CompactionStatisticsNumSize,
524 : image_layer_visited: CompactionStatisticsNumSize,
525 : delta_layer_produced: CompactionStatisticsNumSize,
526 : image_layer_produced: CompactionStatisticsNumSize,
527 : num_delta_layer_discarded: usize,
528 : num_image_layer_discarded: usize,
529 : num_unique_keys_visited: usize,
530 : wal_keys_visited: CompactionStatisticsNumSize,
531 : image_keys_visited: CompactionStatisticsNumSize,
532 : wal_produced: CompactionStatisticsNumSize,
533 : image_produced: CompactionStatisticsNumSize,
534 : }
535 :
536 : impl CompactionStatistics {
537 2084 : fn estimated_size_of_value(val: &Value) -> usize {
538 864 : match val {
539 1220 : Value::Image(img) => img.len(),
540 0 : Value::WalRecord(NeonWalRecord::Postgres { rec, .. }) => rec.len(),
541 864 : _ => std::mem::size_of::<NeonWalRecord>(),
542 : }
543 2084 : }
544 3272 : fn estimated_size_of_key() -> usize {
545 3272 : KEY_SIZE // TODO: distinguish image layer and delta layer (count LSN in delta layer)
546 3272 : }
547 172 : fn visit_delta_layer(&mut self, size: u64) {
548 172 : self.delta_layer_visited.num += 1;
549 172 : self.delta_layer_visited.size += size;
550 172 : }
551 132 : fn visit_image_layer(&mut self, size: u64) {
552 132 : self.image_layer_visited.num += 1;
553 132 : self.image_layer_visited.size += size;
554 132 : }
555 1244 : fn on_unique_key_visited(&mut self) {
556 1244 : self.num_unique_keys_visited += 1;
557 1244 : }
558 480 : fn visit_wal_key(&mut self, val: &Value) {
559 480 : self.wal_keys_visited.num += 1;
560 480 : self.wal_keys_visited.size +=
561 480 : Self::estimated_size_of_value(val) as u64 + Self::estimated_size_of_key() as u64;
562 480 : }
563 1220 : fn visit_image_key(&mut self, val: &Value) {
564 1220 : self.image_keys_visited.num += 1;
565 1220 : self.image_keys_visited.size +=
566 1220 : Self::estimated_size_of_value(val) as u64 + Self::estimated_size_of_key() as u64;
567 1220 : }
568 404 : fn produce_key(&mut self, val: &Value) {
569 404 : match val {
570 20 : Value::Image(img) => self.produce_image_key(img),
571 384 : Value::WalRecord(_) => self.produce_wal_key(val),
572 : }
573 404 : }
574 384 : fn produce_wal_key(&mut self, val: &Value) {
575 384 : self.wal_produced.num += 1;
576 384 : self.wal_produced.size +=
577 384 : Self::estimated_size_of_value(val) as u64 + Self::estimated_size_of_key() as u64;
578 384 : }
579 1188 : fn produce_image_key(&mut self, val: &Bytes) {
580 1188 : self.image_produced.num += 1;
581 1188 : self.image_produced.size += val.len() as u64 + Self::estimated_size_of_key() as u64;
582 1188 : }
583 28 : fn discard_delta_layer(&mut self) {
584 28 : self.num_delta_layer_discarded += 1;
585 28 : }
586 16 : fn discard_image_layer(&mut self) {
587 16 : self.num_image_layer_discarded += 1;
588 16 : }
589 44 : fn produce_delta_layer(&mut self, size: u64) {
590 44 : self.delta_layer_produced.num += 1;
591 44 : self.delta_layer_produced.size += size;
592 44 : }
593 60 : fn produce_image_layer(&mut self, size: u64) {
594 60 : self.image_layer_produced.num += 1;
595 60 : self.image_layer_produced.size += size;
596 60 : }
597 : }
598 :
599 : #[derive(Default, Debug, Clone, Copy, PartialEq, Eq)]
600 : pub enum CompactionOutcome {
601 : #[default]
602 : /// No layers need to be compacted after this round. Compaction doesn't need
603 : /// to be immediately scheduled.
604 : Done,
605 : /// Still has pending layers to be compacted after this round. Ideally, the scheduler
606 : /// should immediately schedule another compaction.
607 : Pending,
608 : /// A timeline needs L0 compaction. Yield and schedule an immediate L0 compaction pass (only
609 : /// guaranteed when `compaction_l0_first` is enabled).
610 : YieldForL0,
611 : /// Compaction was skipped, because the timeline is ineligible for compaction.
612 : Skipped,
613 : }
614 :
615 : impl Timeline {
616 : /// TODO: cancellation
617 : ///
618 : /// Returns whether the compaction has pending tasks.
619 728 : pub(crate) async fn compact_legacy(
620 728 : self: &Arc<Self>,
621 728 : cancel: &CancellationToken,
622 728 : options: CompactOptions,
623 728 : ctx: &RequestContext,
624 728 : ) -> Result<CompactionOutcome, CompactionError> {
625 728 : if options
626 728 : .flags
627 728 : .contains(CompactFlags::EnhancedGcBottomMostCompaction)
628 : {
629 0 : self.compact_with_gc(cancel, options, ctx)
630 0 : .await
631 0 : .map_err(CompactionError::Other)?;
632 0 : return Ok(CompactionOutcome::Done);
633 728 : }
634 728 :
635 728 : if options.flags.contains(CompactFlags::DryRun) {
636 0 : return Err(CompactionError::Other(anyhow!(
637 0 : "dry-run mode is not supported for legacy compaction for now"
638 0 : )));
639 728 : }
640 728 :
641 728 : if options.compact_key_range.is_some() || options.compact_lsn_range.is_some() {
642 : // maybe useful in the future? could implement this at some point
643 0 : return Err(CompactionError::Other(anyhow!(
644 0 : "compaction range is not supported for legacy compaction for now"
645 0 : )));
646 728 : }
647 728 :
648 728 : // High level strategy for compaction / image creation:
649 728 : //
650 728 : // 1. First, do a L0 compaction to ensure we move the L0
651 728 : // layers into the historic layer map get flat levels of
652 728 : // layers. If we did not compact all L0 layers, we will
653 728 : // prioritize compacting the timeline again and not do
654 728 : // any of the compactions below.
655 728 : //
656 728 : // 2. Then, calculate the desired "partitioning" of the
657 728 : // currently in-use key space. The goal is to partition the
658 728 : // key space into roughly fixed-size chunks, but also take into
659 728 : // account any existing image layers, and try to align the
660 728 : // chunk boundaries with the existing image layers to avoid
661 728 : // too much churn. Also try to align chunk boundaries with
662 728 : // relation boundaries. In principle, we don't know about
663 728 : // relation boundaries here, we just deal with key-value
664 728 : // pairs, and the code in pgdatadir_mapping.rs knows how to
665 728 : // map relations into key-value pairs. But in practice we know
666 728 : // that 'field6' is the block number, and the fields 1-5
667 728 : // identify a relation. This is just an optimization,
668 728 : // though.
669 728 : //
670 728 : // 3. Once we know the partitioning, for each partition,
671 728 : // decide if it's time to create a new image layer. The
672 728 : // criteria is: there has been too much "churn" since the last
673 728 : // image layer? The "churn" is fuzzy concept, it's a
674 728 : // combination of too many delta files, or too much WAL in
675 728 : // total in the delta file. Or perhaps: if creating an image
676 728 : // file would allow to delete some older files.
677 728 : //
678 728 : // 4. In the end, if the tenant gets auto-sharded, we will run
679 728 : // a shard-ancestor compaction.
680 728 :
681 728 : // Is the timeline being deleted?
682 728 : if self.is_stopping() {
683 0 : trace!("Dropping out of compaction on timeline shutdown");
684 0 : return Err(CompactionError::ShuttingDown);
685 728 : }
686 728 :
687 728 : let target_file_size = self.get_checkpoint_distance();
688 :
689 : // Define partitioning schema if needed
690 :
691 : // 1. L0 Compact
692 728 : let l0_outcome = {
693 728 : let timer = self.metrics.compact_time_histo.start_timer();
694 728 : let l0_outcome = self
695 728 : .compact_level0(
696 728 : target_file_size,
697 728 : options.flags.contains(CompactFlags::ForceL0Compaction),
698 728 : ctx,
699 728 : )
700 728 : .await?;
701 728 : timer.stop_and_record();
702 728 : l0_outcome
703 728 : };
704 728 :
705 728 : if options.flags.contains(CompactFlags::OnlyL0Compaction) {
706 0 : return Ok(l0_outcome);
707 728 : }
708 728 :
709 728 : // Yield if we have pending L0 compaction. The scheduler will do another pass.
710 728 : if (l0_outcome == CompactionOutcome::Pending || l0_outcome == CompactionOutcome::YieldForL0)
711 0 : && !options.flags.contains(CompactFlags::NoYield)
712 : {
713 0 : info!("image/ancestor compaction yielding for L0 compaction");
714 0 : return Ok(CompactionOutcome::YieldForL0);
715 728 : }
716 728 :
717 728 : // 2. Repartition and create image layers if necessary
718 728 : match self
719 728 : .repartition(
720 728 : self.get_last_record_lsn(),
721 728 : self.get_compaction_target_size(),
722 728 : options.flags,
723 728 : ctx,
724 728 : )
725 728 : .await
726 : {
727 728 : Ok(((dense_partitioning, sparse_partitioning), lsn)) => {
728 728 : // Disables access_stats updates, so that the files we read remain candidates for eviction after we're done with them
729 728 : let image_ctx = RequestContextBuilder::extend(ctx)
730 728 : .access_stats_behavior(AccessStatsBehavior::Skip)
731 728 : .build();
732 728 :
733 728 : let mut partitioning = dense_partitioning;
734 728 : partitioning
735 728 : .parts
736 728 : .extend(sparse_partitioning.into_dense().parts);
737 :
738 : // 3. Create new image layers for partitions that have been modified "enough".
739 728 : let (image_layers, outcome) = self
740 728 : .create_image_layers(
741 728 : &partitioning,
742 728 : lsn,
743 728 : if options
744 728 : .flags
745 728 : .contains(CompactFlags::ForceImageLayerCreation)
746 : {
747 28 : ImageLayerCreationMode::Force
748 : } else {
749 700 : ImageLayerCreationMode::Try
750 : },
751 728 : &image_ctx,
752 728 : self.last_image_layer_creation_status
753 728 : .load()
754 728 : .as_ref()
755 728 : .clone(),
756 728 : !options.flags.contains(CompactFlags::NoYield),
757 728 : )
758 728 : .await
759 728 : .inspect_err(|err| {
760 : if let CreateImageLayersError::GetVectoredError(
761 : GetVectoredError::MissingKey(_),
762 0 : ) = err
763 : {
764 0 : critical!("missing key during compaction: {err:?}");
765 0 : }
766 728 : })?;
767 :
768 728 : self.last_image_layer_creation_status
769 728 : .store(Arc::new(outcome.clone()));
770 728 :
771 728 : self.upload_new_image_layers(image_layers)?;
772 728 : if let LastImageLayerCreationStatus::Incomplete { .. } = outcome {
773 : // Yield and do not do any other kind of compaction.
774 0 : info!("skipping shard ancestor compaction due to pending image layer generation tasks (preempted by L0 compaction).");
775 0 : return Ok(CompactionOutcome::YieldForL0);
776 728 : }
777 : }
778 :
779 : // Suppress errors when cancelled.
780 0 : Err(_) if self.cancel.is_cancelled() => {}
781 0 : Err(CompactionError::ShuttingDown) => {}
782 :
783 : // Alert on critical errors that indicate data corruption.
784 : Err(
785 0 : err @ CompactionError::CollectKeySpaceError(
786 0 : CollectKeySpaceError::Decode(_)
787 0 : | CollectKeySpaceError::PageRead(
788 0 : PageReconstructError::MissingKey(_) | PageReconstructError::WalRedo(_),
789 0 : ),
790 0 : ),
791 0 : ) => critical!("could not compact, repartitioning keyspace failed: {err:?}"),
792 :
793 : // Log other errors. No partitioning? This is normal, if the timeline was just created
794 : // as an empty timeline. Also in unit tests, when we use the timeline as a simple
795 : // key-value store, ignoring the datadir layout. Log the error but continue.
796 0 : Err(err) => error!("could not compact, repartitioning keyspace failed: {err:?}"),
797 : };
798 :
799 728 : let partition_count = self.partitioning.read().0 .0.parts.len();
800 728 :
801 728 : // 4. Shard ancestor compaction
802 728 :
803 728 : if self.shard_identity.count >= ShardCount::new(2) {
804 : // Limit the number of layer rewrites to the number of partitions: this means its
805 : // runtime should be comparable to a full round of image layer creations, rather than
806 : // being potentially much longer.
807 0 : let rewrite_max = partition_count;
808 0 :
809 0 : self.compact_shard_ancestors(rewrite_max, ctx).await?;
810 728 : }
811 :
812 728 : Ok(CompactionOutcome::Done)
813 728 : }
814 :
815 : /// Check for layers that are elegible to be rewritten:
816 : /// - Shard splitting: After a shard split, ancestor layers beyond pitr_interval, so that
817 : /// we don't indefinitely retain keys in this shard that aren't needed.
818 : /// - For future use: layers beyond pitr_interval that are in formats we would
819 : /// rather not maintain compatibility with indefinitely.
820 : ///
821 : /// Note: this phase may read and write many gigabytes of data: use rewrite_max to bound
822 : /// how much work it will try to do in each compaction pass.
823 0 : async fn compact_shard_ancestors(
824 0 : self: &Arc<Self>,
825 0 : rewrite_max: usize,
826 0 : ctx: &RequestContext,
827 0 : ) -> Result<(), CompactionError> {
828 0 : let mut drop_layers = Vec::new();
829 0 : let mut layers_to_rewrite: Vec<Layer> = Vec::new();
830 0 :
831 0 : // We will use the Lsn cutoff of the last GC as a threshold for rewriting layers: if a
832 0 : // layer is behind this Lsn, it indicates that the layer is being retained beyond the
833 0 : // pitr_interval, for example because a branchpoint references it.
834 0 : //
835 0 : // Holding this read guard also blocks [`Self::gc_timeline`] from entering while we
836 0 : // are rewriting layers.
837 0 : let latest_gc_cutoff = self.get_applied_gc_cutoff_lsn();
838 0 :
839 0 : tracing::info!(
840 0 : "latest_gc_cutoff: {}, pitr cutoff {}",
841 0 : *latest_gc_cutoff,
842 0 : self.gc_info.read().unwrap().cutoffs.time
843 : );
844 :
845 0 : let layers = self.layers.read().await;
846 0 : for layer_desc in layers.layer_map()?.iter_historic_layers() {
847 0 : let layer = layers.get_from_desc(&layer_desc);
848 0 : if layer.metadata().shard.shard_count == self.shard_identity.count {
849 : // This layer does not belong to a historic ancestor, no need to re-image it.
850 0 : continue;
851 0 : }
852 0 :
853 0 : // This layer was created on an ancestor shard: check if it contains any data for this shard.
854 0 : let sharded_range = ShardedRange::new(layer_desc.get_key_range(), &self.shard_identity);
855 0 : let layer_local_page_count = sharded_range.page_count();
856 0 : let layer_raw_page_count = ShardedRange::raw_size(&layer_desc.get_key_range());
857 0 : if layer_local_page_count == 0 {
858 : // This ancestral layer only covers keys that belong to other shards.
859 : // We include the full metadata in the log: if we had some critical bug that caused
860 : // us to incorrectly drop layers, this would simplify manually debugging + reinstating those layers.
861 0 : info!(%layer, old_metadata=?layer.metadata(),
862 0 : "dropping layer after shard split, contains no keys for this shard.",
863 : );
864 :
865 0 : if cfg!(debug_assertions) {
866 : // Expensive, exhaustive check of keys in this layer: this guards against ShardedRange's calculations being
867 : // wrong. If ShardedRange claims the local page count is zero, then no keys in this layer
868 : // should be !is_key_disposable()
869 0 : let range = layer_desc.get_key_range();
870 0 : let mut key = range.start;
871 0 : while key < range.end {
872 0 : debug_assert!(self.shard_identity.is_key_disposable(&key));
873 0 : key = key.next();
874 : }
875 0 : }
876 :
877 0 : drop_layers.push(layer);
878 0 : continue;
879 0 : } else if layer_local_page_count != u32::MAX
880 0 : && layer_local_page_count == layer_raw_page_count
881 : {
882 0 : debug!(%layer,
883 0 : "layer is entirely shard local ({} keys), no need to filter it",
884 : layer_local_page_count
885 : );
886 0 : continue;
887 0 : }
888 0 :
889 0 : // Don't bother re-writing a layer unless it will at least halve its size
890 0 : if layer_local_page_count != u32::MAX
891 0 : && layer_local_page_count > layer_raw_page_count / 2
892 : {
893 0 : debug!(%layer,
894 0 : "layer is already mostly local ({}/{}), not rewriting",
895 : layer_local_page_count,
896 : layer_raw_page_count
897 : );
898 0 : }
899 :
900 : // Don't bother re-writing a layer if it is within the PITR window: it will age-out eventually
901 : // without incurring the I/O cost of a rewrite.
902 0 : if layer_desc.get_lsn_range().end >= *latest_gc_cutoff {
903 0 : debug!(%layer, "Skipping rewrite of layer still in GC window ({} >= {})",
904 0 : layer_desc.get_lsn_range().end, *latest_gc_cutoff);
905 0 : continue;
906 0 : }
907 0 :
908 0 : if layer_desc.is_delta() {
909 : // We do not yet implement rewrite of delta layers
910 0 : debug!(%layer, "Skipping rewrite of delta layer");
911 0 : continue;
912 0 : }
913 0 :
914 0 : // Only rewrite layers if their generations differ. This guarantees:
915 0 : // - that local rewrite is safe, as local layer paths will differ between existing layer and rewritten one
916 0 : // - that the layer is persistent in remote storage, as we only see old-generation'd layer via loading from remote storage
917 0 : if layer.metadata().generation == self.generation {
918 0 : debug!(%layer, "Skipping rewrite, is not from old generation");
919 0 : continue;
920 0 : }
921 0 :
922 0 : if layers_to_rewrite.len() >= rewrite_max {
923 0 : tracing::info!(%layer, "Will rewrite layer on a future compaction, already rewrote {}",
924 0 : layers_to_rewrite.len()
925 : );
926 0 : continue;
927 0 : }
928 0 :
929 0 : // Fall through: all our conditions for doing a rewrite passed.
930 0 : layers_to_rewrite.push(layer);
931 : }
932 :
933 : // Drop read lock on layer map before we start doing time-consuming I/O
934 0 : drop(layers);
935 0 :
936 0 : let mut replace_image_layers = Vec::new();
937 :
938 0 : for layer in layers_to_rewrite {
939 0 : tracing::info!(layer=%layer, "Rewriting layer after shard split...");
940 0 : let mut image_layer_writer = ImageLayerWriter::new(
941 0 : self.conf,
942 0 : self.timeline_id,
943 0 : self.tenant_shard_id,
944 0 : &layer.layer_desc().key_range,
945 0 : layer.layer_desc().image_layer_lsn(),
946 0 : ctx,
947 0 : )
948 0 : .await
949 0 : .map_err(CompactionError::Other)?;
950 :
951 : // Safety of layer rewrites:
952 : // - We are writing to a different local file path than we are reading from, so the old Layer
953 : // cannot interfere with the new one.
954 : // - In the page cache, contents for a particular VirtualFile are stored with a file_id that
955 : // is different for two layers with the same name (in `ImageLayerInner::new` we always
956 : // acquire a fresh id from [`crate::page_cache::next_file_id`]. So readers do not risk
957 : // reading the index from one layer file, and then data blocks from the rewritten layer file.
958 : // - Any readers that have a reference to the old layer will keep it alive until they are done
959 : // with it. If they are trying to promote from remote storage, that will fail, but this is the same
960 : // as for compaction generally: compaction is allowed to delete layers that readers might be trying to use.
961 : // - We do not run concurrently with other kinds of compaction, so the only layer map writes we race with are:
962 : // - GC, which at worst witnesses us "undelete" a layer that they just deleted.
963 : // - ingestion, which only inserts layers, therefore cannot collide with us.
964 0 : let resident = layer.download_and_keep_resident().await?;
965 :
966 0 : let keys_written = resident
967 0 : .filter(&self.shard_identity, &mut image_layer_writer, ctx)
968 0 : .await?;
969 :
970 0 : if keys_written > 0 {
971 0 : let (desc, path) = image_layer_writer
972 0 : .finish(ctx)
973 0 : .await
974 0 : .map_err(CompactionError::Other)?;
975 0 : let new_layer = Layer::finish_creating(self.conf, self, desc, &path)
976 0 : .map_err(CompactionError::Other)?;
977 0 : tracing::info!(layer=%new_layer, "Rewrote layer, {} -> {} bytes",
978 0 : layer.metadata().file_size,
979 0 : new_layer.metadata().file_size);
980 :
981 0 : replace_image_layers.push((layer, new_layer));
982 0 : } else {
983 0 : // Drop the old layer. Usually for this case we would already have noticed that
984 0 : // the layer has no data for us with the ShardedRange check above, but
985 0 : drop_layers.push(layer);
986 0 : }
987 : }
988 :
989 : // At this point, we have replaced local layer files with their rewritten form, but not yet uploaded
990 : // metadata to reflect that. If we restart here, the replaced layer files will look invalid (size mismatch
991 : // to remote index) and be removed. This is inefficient but safe.
992 0 : fail::fail_point!("compact-shard-ancestors-localonly");
993 0 :
994 0 : // Update the LayerMap so that readers will use the new layers, and enqueue it for writing to remote storage
995 0 : self.rewrite_layers(replace_image_layers, drop_layers)
996 0 : .await?;
997 :
998 0 : fail::fail_point!("compact-shard-ancestors-enqueued");
999 0 :
1000 0 : // We wait for all uploads to complete before finishing this compaction stage. This is not
1001 0 : // necessary for correctness, but it simplifies testing, and avoids proceeding with another
1002 0 : // Timeline's compaction while this timeline's uploads may be generating lots of disk I/O
1003 0 : // load.
1004 0 : match self.remote_client.wait_completion().await {
1005 0 : Ok(()) => (),
1006 0 : Err(WaitCompletionError::NotInitialized(ni)) => return Err(CompactionError::from(ni)),
1007 : Err(WaitCompletionError::UploadQueueShutDownOrStopped) => {
1008 0 : return Err(CompactionError::ShuttingDown)
1009 : }
1010 : }
1011 :
1012 0 : fail::fail_point!("compact-shard-ancestors-persistent");
1013 0 :
1014 0 : Ok(())
1015 0 : }
1016 :
1017 : /// Update the LayerVisibilityHint of layers covered by image layers, based on whether there is
1018 : /// an image layer between them and the most recent readable LSN (branch point or tip of timeline). The
1019 : /// purpose of the visibility hint is to record which layers need to be available to service reads.
1020 : ///
1021 : /// The result may be used as an input to eviction and secondary downloads to de-prioritize layers
1022 : /// that we know won't be needed for reads.
1023 460 : pub(super) async fn update_layer_visibility(
1024 460 : &self,
1025 460 : ) -> Result<(), super::layer_manager::Shutdown> {
1026 460 : let head_lsn = self.get_last_record_lsn();
1027 :
1028 : // We will sweep through layers in reverse-LSN order. We only do historic layers. L0 deltas
1029 : // are implicitly left visible, because LayerVisibilityHint's default is Visible, and we never modify it here.
1030 : // Note that L0 deltas _can_ be covered by image layers, but we consider them 'visible' because we anticipate that
1031 : // they will be subject to L0->L1 compaction in the near future.
1032 460 : let layer_manager = self.layers.read().await;
1033 460 : let layer_map = layer_manager.layer_map()?;
1034 :
1035 460 : let readable_points = {
1036 460 : let children = self.gc_info.read().unwrap().retain_lsns.clone();
1037 460 :
1038 460 : let mut readable_points = Vec::with_capacity(children.len() + 1);
1039 460 : for (child_lsn, _child_timeline_id, is_offloaded) in &children {
1040 0 : if *is_offloaded == MaybeOffloaded::Yes {
1041 0 : continue;
1042 0 : }
1043 0 : readable_points.push(*child_lsn);
1044 : }
1045 460 : readable_points.push(head_lsn);
1046 460 : readable_points
1047 460 : };
1048 460 :
1049 460 : let (layer_visibility, covered) = layer_map.get_visibility(readable_points);
1050 1168 : for (layer_desc, visibility) in layer_visibility {
1051 708 : // FIXME: a more efficiency bulk zip() through the layers rather than NlogN getting each one
1052 708 : let layer = layer_manager.get_from_desc(&layer_desc);
1053 708 : layer.set_visibility(visibility);
1054 708 : }
1055 :
1056 : // TODO: publish our covered KeySpace to our parent, so that when they update their visibility, they can
1057 : // avoid assuming that everything at a branch point is visible.
1058 460 : drop(covered);
1059 460 : Ok(())
1060 460 : }
1061 :
1062 : /// Collect a bunch of Level 0 layer files, and compact and reshuffle them as
1063 : /// as Level 1 files. Returns whether the L0 layers are fully compacted.
1064 728 : async fn compact_level0(
1065 728 : self: &Arc<Self>,
1066 728 : target_file_size: u64,
1067 728 : force_compaction_ignore_threshold: bool,
1068 728 : ctx: &RequestContext,
1069 728 : ) -> Result<CompactionOutcome, CompactionError> {
1070 : let CompactLevel0Phase1Result {
1071 728 : new_layers,
1072 728 : deltas_to_compact,
1073 728 : outcome,
1074 : } = {
1075 728 : let phase1_span = info_span!("compact_level0_phase1");
1076 728 : let ctx = ctx.attached_child();
1077 728 : let mut stats = CompactLevel0Phase1StatsBuilder {
1078 728 : version: Some(2),
1079 728 : tenant_id: Some(self.tenant_shard_id),
1080 728 : timeline_id: Some(self.timeline_id),
1081 728 : ..Default::default()
1082 728 : };
1083 728 :
1084 728 : let begin = tokio::time::Instant::now();
1085 728 : let phase1_layers_locked = self.layers.read().await;
1086 728 : let now = tokio::time::Instant::now();
1087 728 : stats.read_lock_acquisition_micros =
1088 728 : DurationRecorder::Recorded(RecordedDuration(now - begin), now);
1089 728 : self.compact_level0_phase1(
1090 728 : phase1_layers_locked,
1091 728 : stats,
1092 728 : target_file_size,
1093 728 : force_compaction_ignore_threshold,
1094 728 : &ctx,
1095 728 : )
1096 728 : .instrument(phase1_span)
1097 728 : .await?
1098 : };
1099 :
1100 728 : if new_layers.is_empty() && deltas_to_compact.is_empty() {
1101 : // nothing to do
1102 672 : return Ok(CompactionOutcome::Done);
1103 56 : }
1104 56 :
1105 56 : self.finish_compact_batch(&new_layers, &Vec::new(), &deltas_to_compact)
1106 56 : .await?;
1107 56 : Ok(outcome)
1108 728 : }
1109 :
1110 : /// Level0 files first phase of compaction, explained in the [`Self::compact_legacy`] comment.
1111 728 : async fn compact_level0_phase1<'a>(
1112 728 : self: &'a Arc<Self>,
1113 728 : guard: tokio::sync::RwLockReadGuard<'a, LayerManager>,
1114 728 : mut stats: CompactLevel0Phase1StatsBuilder,
1115 728 : target_file_size: u64,
1116 728 : force_compaction_ignore_threshold: bool,
1117 728 : ctx: &RequestContext,
1118 728 : ) -> Result<CompactLevel0Phase1Result, CompactionError> {
1119 728 : stats.read_lock_held_spawn_blocking_startup_micros =
1120 728 : stats.read_lock_acquisition_micros.till_now(); // set by caller
1121 728 : let layers = guard.layer_map()?;
1122 728 : let level0_deltas = layers.level0_deltas();
1123 728 : stats.level0_deltas_count = Some(level0_deltas.len());
1124 728 :
1125 728 : // Only compact if enough layers have accumulated.
1126 728 : let threshold = self.get_compaction_threshold();
1127 728 : if level0_deltas.is_empty() || level0_deltas.len() < threshold {
1128 672 : if force_compaction_ignore_threshold {
1129 0 : if !level0_deltas.is_empty() {
1130 0 : info!(
1131 0 : level0_deltas = level0_deltas.len(),
1132 0 : threshold, "too few deltas to compact, but forcing compaction"
1133 : );
1134 : } else {
1135 0 : info!(
1136 0 : level0_deltas = level0_deltas.len(),
1137 0 : threshold, "too few deltas to compact, cannot force compaction"
1138 : );
1139 0 : return Ok(CompactLevel0Phase1Result::default());
1140 : }
1141 : } else {
1142 672 : debug!(
1143 0 : level0_deltas = level0_deltas.len(),
1144 0 : threshold, "too few deltas to compact"
1145 : );
1146 672 : return Ok(CompactLevel0Phase1Result::default());
1147 : }
1148 56 : }
1149 :
1150 56 : let mut level0_deltas = level0_deltas
1151 56 : .iter()
1152 804 : .map(|x| guard.get_from_desc(x))
1153 56 : .collect::<Vec<_>>();
1154 56 :
1155 56 : // Gather the files to compact in this iteration.
1156 56 : //
1157 56 : // Start with the oldest Level 0 delta file, and collect any other
1158 56 : // level 0 files that form a contiguous sequence, such that the end
1159 56 : // LSN of previous file matches the start LSN of the next file.
1160 56 : //
1161 56 : // Note that if the files don't form such a sequence, we might
1162 56 : // "compact" just a single file. That's a bit pointless, but it allows
1163 56 : // us to get rid of the level 0 file, and compact the other files on
1164 56 : // the next iteration. This could probably made smarter, but such
1165 56 : // "gaps" in the sequence of level 0 files should only happen in case
1166 56 : // of a crash, partial download from cloud storage, or something like
1167 56 : // that, so it's not a big deal in practice.
1168 1496 : level0_deltas.sort_by_key(|l| l.layer_desc().lsn_range.start);
1169 56 : let mut level0_deltas_iter = level0_deltas.iter();
1170 56 :
1171 56 : let first_level0_delta = level0_deltas_iter.next().unwrap();
1172 56 : let mut prev_lsn_end = first_level0_delta.layer_desc().lsn_range.end;
1173 56 : let mut deltas_to_compact = Vec::with_capacity(level0_deltas.len());
1174 56 :
1175 56 : // Accumulate the size of layers in `deltas_to_compact`
1176 56 : let mut deltas_to_compact_bytes = 0;
1177 56 :
1178 56 : // Under normal circumstances, we will accumulate up to compaction_upper_limit L0s of size
1179 56 : // checkpoint_distance each. To avoid edge cases using extra system resources, bound our
1180 56 : // work in this function to only operate on this much delta data at once.
1181 56 : //
1182 56 : // In general, compaction_threshold should be <= compaction_upper_limit, but in case that
1183 56 : // the constraint is not respected, we use the larger of the two.
1184 56 : let delta_size_limit = std::cmp::max(
1185 56 : self.get_compaction_upper_limit(),
1186 56 : self.get_compaction_threshold(),
1187 56 : ) as u64
1188 56 : * std::cmp::max(self.get_checkpoint_distance(), DEFAULT_CHECKPOINT_DISTANCE);
1189 56 :
1190 56 : let mut fully_compacted = true;
1191 56 :
1192 56 : deltas_to_compact.push(first_level0_delta.download_and_keep_resident().await?);
1193 804 : for l in level0_deltas_iter {
1194 748 : let lsn_range = &l.layer_desc().lsn_range;
1195 748 :
1196 748 : if lsn_range.start != prev_lsn_end {
1197 0 : break;
1198 748 : }
1199 748 : deltas_to_compact.push(l.download_and_keep_resident().await?);
1200 748 : deltas_to_compact_bytes += l.metadata().file_size;
1201 748 : prev_lsn_end = lsn_range.end;
1202 748 :
1203 748 : if deltas_to_compact_bytes >= delta_size_limit {
1204 0 : info!(
1205 0 : l0_deltas_selected = deltas_to_compact.len(),
1206 0 : l0_deltas_total = level0_deltas.len(),
1207 0 : "L0 compaction picker hit max delta layer size limit: {}",
1208 : delta_size_limit
1209 : );
1210 0 : fully_compacted = false;
1211 0 :
1212 0 : // Proceed with compaction, but only a subset of L0s
1213 0 : break;
1214 748 : }
1215 : }
1216 56 : let lsn_range = Range {
1217 56 : start: deltas_to_compact
1218 56 : .first()
1219 56 : .unwrap()
1220 56 : .layer_desc()
1221 56 : .lsn_range
1222 56 : .start,
1223 56 : end: deltas_to_compact.last().unwrap().layer_desc().lsn_range.end,
1224 56 : };
1225 56 :
1226 56 : info!(
1227 0 : "Starting Level0 compaction in LSN range {}-{} for {} layers ({} deltas in total)",
1228 0 : lsn_range.start,
1229 0 : lsn_range.end,
1230 0 : deltas_to_compact.len(),
1231 0 : level0_deltas.len()
1232 : );
1233 :
1234 804 : for l in deltas_to_compact.iter() {
1235 804 : info!("compact includes {l}");
1236 : }
1237 :
1238 : // We don't need the original list of layers anymore. Drop it so that
1239 : // we don't accidentally use it later in the function.
1240 56 : drop(level0_deltas);
1241 56 :
1242 56 : stats.read_lock_held_prerequisites_micros = stats
1243 56 : .read_lock_held_spawn_blocking_startup_micros
1244 56 : .till_now();
1245 :
1246 : // TODO: replace with streaming k-merge
1247 56 : let all_keys = {
1248 56 : let mut all_keys = Vec::new();
1249 804 : for l in deltas_to_compact.iter() {
1250 804 : if self.cancel.is_cancelled() {
1251 0 : return Err(CompactionError::ShuttingDown);
1252 804 : }
1253 804 : let delta = l.get_as_delta(ctx).await.map_err(CompactionError::Other)?;
1254 804 : let keys = delta
1255 804 : .index_entries(ctx)
1256 804 : .await
1257 804 : .map_err(CompactionError::Other)?;
1258 804 : all_keys.extend(keys);
1259 : }
1260 : // The current stdlib sorting implementation is designed in a way where it is
1261 : // particularly fast where the slice is made up of sorted sub-ranges.
1262 8847538 : all_keys.sort_by_key(|DeltaEntry { key, lsn, .. }| (*key, *lsn));
1263 56 : all_keys
1264 56 : };
1265 56 :
1266 56 : stats.read_lock_held_key_sort_micros = stats.read_lock_held_prerequisites_micros.till_now();
1267 :
1268 : // Determine N largest holes where N is number of compacted layers. The vec is sorted by key range start.
1269 : //
1270 : // A hole is a key range for which this compaction doesn't have any WAL records.
1271 : // Our goal in this compaction iteration is to avoid creating L1s that, in terms of their key range,
1272 : // cover the hole, but actually don't contain any WAL records for that key range.
1273 : // The reason is that the mere stack of L1s (`count_deltas`) triggers image layer creation (`create_image_layers`).
1274 : // That image layer creation would be useless for a hole range covered by L1s that don't contain any WAL records.
1275 : //
1276 : // The algorithm chooses holes as follows.
1277 : // - Slide a 2-window over the keys in key orde to get the hole range (=distance between two keys).
1278 : // - Filter: min threshold on range length
1279 : // - Rank: by coverage size (=number of image layers required to reconstruct each key in the range for which we have any data)
1280 : //
1281 : // For more details, intuition, and some ASCII art see https://github.com/neondatabase/neon/pull/3597#discussion_r1112704451
1282 : #[derive(PartialEq, Eq)]
1283 : struct Hole {
1284 : key_range: Range<Key>,
1285 : coverage_size: usize,
1286 : }
1287 56 : let holes: Vec<Hole> = {
1288 : use std::cmp::Ordering;
1289 : impl Ord for Hole {
1290 0 : fn cmp(&self, other: &Self) -> Ordering {
1291 0 : self.coverage_size.cmp(&other.coverage_size).reverse()
1292 0 : }
1293 : }
1294 : impl PartialOrd for Hole {
1295 0 : fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
1296 0 : Some(self.cmp(other))
1297 0 : }
1298 : }
1299 56 : let max_holes = deltas_to_compact.len();
1300 56 : let last_record_lsn = self.get_last_record_lsn();
1301 56 : let min_hole_range = (target_file_size / page_cache::PAGE_SZ as u64) as i128;
1302 56 : let min_hole_coverage_size = 3; // TODO: something more flexible?
1303 56 : // min-heap (reserve space for one more element added before eviction)
1304 56 : let mut heap: BinaryHeap<Hole> = BinaryHeap::with_capacity(max_holes + 1);
1305 56 : let mut prev: Option<Key> = None;
1306 :
1307 4128076 : for &DeltaEntry { key: next_key, .. } in all_keys.iter() {
1308 4128076 : if let Some(prev_key) = prev {
1309 : // just first fast filter, do not create hole entries for metadata keys. The last hole in the
1310 : // compaction is the gap between data key and metadata keys.
1311 4128020 : if next_key.to_i128() - prev_key.to_i128() >= min_hole_range
1312 0 : && !Key::is_metadata_key(&prev_key)
1313 : {
1314 0 : let key_range = prev_key..next_key;
1315 0 : // Measuring hole by just subtraction of i128 representation of key range boundaries
1316 0 : // has not so much sense, because largest holes will corresponds field1/field2 changes.
1317 0 : // But we are mostly interested to eliminate holes which cause generation of excessive image layers.
1318 0 : // That is why it is better to measure size of hole as number of covering image layers.
1319 0 : let coverage_size =
1320 0 : layers.image_coverage(&key_range, last_record_lsn).len();
1321 0 : if coverage_size >= min_hole_coverage_size {
1322 0 : heap.push(Hole {
1323 0 : key_range,
1324 0 : coverage_size,
1325 0 : });
1326 0 : if heap.len() > max_holes {
1327 0 : heap.pop(); // remove smallest hole
1328 0 : }
1329 0 : }
1330 4128020 : }
1331 56 : }
1332 4128076 : prev = Some(next_key.next());
1333 : }
1334 56 : let mut holes = heap.into_vec();
1335 56 : holes.sort_unstable_by_key(|hole| hole.key_range.start);
1336 56 : holes
1337 56 : };
1338 56 : stats.read_lock_held_compute_holes_micros = stats.read_lock_held_key_sort_micros.till_now();
1339 56 : drop_rlock(guard);
1340 56 :
1341 56 : if self.cancel.is_cancelled() {
1342 0 : return Err(CompactionError::ShuttingDown);
1343 56 : }
1344 56 :
1345 56 : stats.read_lock_drop_micros = stats.read_lock_held_compute_holes_micros.till_now();
1346 :
1347 : // This iterator walks through all key-value pairs from all the layers
1348 : // we're compacting, in key, LSN order.
1349 : // If there's both a Value::Image and Value::WalRecord for the same (key,lsn),
1350 : // then the Value::Image is ordered before Value::WalRecord.
1351 56 : let mut all_values_iter = {
1352 56 : let mut deltas = Vec::with_capacity(deltas_to_compact.len());
1353 804 : for l in deltas_to_compact.iter() {
1354 804 : let l = l.get_as_delta(ctx).await.map_err(CompactionError::Other)?;
1355 804 : deltas.push(l);
1356 : }
1357 56 : MergeIterator::create(&deltas, &[], ctx)
1358 56 : };
1359 56 :
1360 56 : // This iterator walks through all keys and is needed to calculate size used by each key
1361 56 : let mut all_keys_iter = all_keys
1362 56 : .iter()
1363 4128076 : .map(|DeltaEntry { key, lsn, size, .. }| (*key, *lsn, *size))
1364 4128020 : .coalesce(|mut prev, cur| {
1365 4128020 : // Coalesce keys that belong to the same key pair.
1366 4128020 : // This ensures that compaction doesn't put them
1367 4128020 : // into different layer files.
1368 4128020 : // Still limit this by the target file size,
1369 4128020 : // so that we keep the size of the files in
1370 4128020 : // check.
1371 4128020 : if prev.0 == cur.0 && prev.2 < target_file_size {
1372 80076 : prev.2 += cur.2;
1373 80076 : Ok(prev)
1374 : } else {
1375 4047944 : Err((prev, cur))
1376 : }
1377 4128020 : });
1378 56 :
1379 56 : // Merge the contents of all the input delta layers into a new set
1380 56 : // of delta layers, based on the current partitioning.
1381 56 : //
1382 56 : // We split the new delta layers on the key dimension. We iterate through the key space, and for each key, check if including the next key to the current output layer we're building would cause the layer to become too large. If so, dump the current output layer and start new one.
1383 56 : // It's possible that there is a single key with so many page versions that storing all of them in a single layer file
1384 56 : // would be too large. In that case, we also split on the LSN dimension.
1385 56 : //
1386 56 : // LSN
1387 56 : // ^
1388 56 : // |
1389 56 : // | +-----------+ +--+--+--+--+
1390 56 : // | | | | | | | |
1391 56 : // | +-----------+ | | | | |
1392 56 : // | | | | | | | |
1393 56 : // | +-----------+ ==> | | | | |
1394 56 : // | | | | | | | |
1395 56 : // | +-----------+ | | | | |
1396 56 : // | | | | | | | |
1397 56 : // | +-----------+ +--+--+--+--+
1398 56 : // |
1399 56 : // +--------------> key
1400 56 : //
1401 56 : //
1402 56 : // If one key (X) has a lot of page versions:
1403 56 : //
1404 56 : // LSN
1405 56 : // ^
1406 56 : // | (X)
1407 56 : // | +-----------+ +--+--+--+--+
1408 56 : // | | | | | | | |
1409 56 : // | +-----------+ | | +--+ |
1410 56 : // | | | | | | | |
1411 56 : // | +-----------+ ==> | | | | |
1412 56 : // | | | | | +--+ |
1413 56 : // | +-----------+ | | | | |
1414 56 : // | | | | | | | |
1415 56 : // | +-----------+ +--+--+--+--+
1416 56 : // |
1417 56 : // +--------------> key
1418 56 : // TODO: this actually divides the layers into fixed-size chunks, not
1419 56 : // based on the partitioning.
1420 56 : //
1421 56 : // TODO: we should also opportunistically materialize and
1422 56 : // garbage collect what we can.
1423 56 : let mut new_layers = Vec::new();
1424 56 : let mut prev_key: Option<Key> = None;
1425 56 : let mut writer: Option<DeltaLayerWriter> = None;
1426 56 : let mut key_values_total_size = 0u64;
1427 56 : let mut dup_start_lsn: Lsn = Lsn::INVALID; // start LSN of layer containing values of the single key
1428 56 : let mut dup_end_lsn: Lsn = Lsn::INVALID; // end LSN of layer containing values of the single key
1429 56 : let mut next_hole = 0; // index of next hole in holes vector
1430 56 :
1431 56 : let mut keys = 0;
1432 :
1433 4128132 : while let Some((key, lsn, value)) = all_values_iter
1434 4128132 : .next()
1435 4128132 : .await
1436 4128132 : .map_err(CompactionError::Other)?
1437 : {
1438 4128076 : keys += 1;
1439 4128076 :
1440 4128076 : if keys % 32_768 == 0 && self.cancel.is_cancelled() {
1441 : // avoid hitting the cancellation token on every key. in benches, we end up
1442 : // shuffling an order of million keys per layer, this means we'll check it
1443 : // around tens of times per layer.
1444 0 : return Err(CompactionError::ShuttingDown);
1445 4128076 : }
1446 4128076 :
1447 4128076 : let same_key = prev_key == Some(key);
1448 4128076 : // We need to check key boundaries once we reach next key or end of layer with the same key
1449 4128076 : if !same_key || lsn == dup_end_lsn {
1450 4048000 : let mut next_key_size = 0u64;
1451 4048000 : let is_dup_layer = dup_end_lsn.is_valid();
1452 4048000 : dup_start_lsn = Lsn::INVALID;
1453 4048000 : if !same_key {
1454 4048000 : dup_end_lsn = Lsn::INVALID;
1455 4048000 : }
1456 : // Determine size occupied by this key. We stop at next key or when size becomes larger than target_file_size
1457 4048000 : for (next_key, next_lsn, next_size) in all_keys_iter.by_ref() {
1458 4048000 : next_key_size = next_size;
1459 4048000 : if key != next_key {
1460 4047944 : if dup_end_lsn.is_valid() {
1461 0 : // We are writting segment with duplicates:
1462 0 : // place all remaining values of this key in separate segment
1463 0 : dup_start_lsn = dup_end_lsn; // new segments starts where old stops
1464 0 : dup_end_lsn = lsn_range.end; // there are no more values of this key till end of LSN range
1465 4047944 : }
1466 4047944 : break;
1467 56 : }
1468 56 : key_values_total_size += next_size;
1469 56 : // Check if it is time to split segment: if total keys size is larger than target file size.
1470 56 : // We need to avoid generation of empty segments if next_size > target_file_size.
1471 56 : if key_values_total_size > target_file_size && lsn != next_lsn {
1472 : // Split key between multiple layers: such layer can contain only single key
1473 0 : dup_start_lsn = if dup_end_lsn.is_valid() {
1474 0 : dup_end_lsn // new segment with duplicates starts where old one stops
1475 : } else {
1476 0 : lsn // start with the first LSN for this key
1477 : };
1478 0 : dup_end_lsn = next_lsn; // upper LSN boundary is exclusive
1479 0 : break;
1480 56 : }
1481 : }
1482 : // handle case when loop reaches last key: in this case dup_end is non-zero but dup_start is not set.
1483 4048000 : if dup_end_lsn.is_valid() && !dup_start_lsn.is_valid() {
1484 0 : dup_start_lsn = dup_end_lsn;
1485 0 : dup_end_lsn = lsn_range.end;
1486 4048000 : }
1487 4048000 : if writer.is_some() {
1488 4047944 : let written_size = writer.as_mut().unwrap().size();
1489 4047944 : let contains_hole =
1490 4047944 : next_hole < holes.len() && key >= holes[next_hole].key_range.end;
1491 : // check if key cause layer overflow or contains hole...
1492 4047944 : if is_dup_layer
1493 4047944 : || dup_end_lsn.is_valid()
1494 4047944 : || written_size + key_values_total_size > target_file_size
1495 4047384 : || contains_hole
1496 : {
1497 : // ... if so, flush previous layer and prepare to write new one
1498 560 : let (desc, path) = writer
1499 560 : .take()
1500 560 : .unwrap()
1501 560 : .finish(prev_key.unwrap().next(), ctx)
1502 560 : .await
1503 560 : .map_err(CompactionError::Other)?;
1504 560 : let new_delta = Layer::finish_creating(self.conf, self, desc, &path)
1505 560 : .map_err(CompactionError::Other)?;
1506 :
1507 560 : new_layers.push(new_delta);
1508 560 : writer = None;
1509 560 :
1510 560 : if contains_hole {
1511 0 : // skip hole
1512 0 : next_hole += 1;
1513 560 : }
1514 4047384 : }
1515 56 : }
1516 : // Remember size of key value because at next iteration we will access next item
1517 4048000 : key_values_total_size = next_key_size;
1518 80076 : }
1519 4128076 : fail_point!("delta-layer-writer-fail-before-finish", |_| {
1520 0 : Err(CompactionError::Other(anyhow::anyhow!(
1521 0 : "failpoint delta-layer-writer-fail-before-finish"
1522 0 : )))
1523 4128076 : });
1524 :
1525 4128076 : if !self.shard_identity.is_key_disposable(&key) {
1526 4128076 : if writer.is_none() {
1527 616 : if self.cancel.is_cancelled() {
1528 : // to be somewhat responsive to cancellation, check for each new layer
1529 0 : return Err(CompactionError::ShuttingDown);
1530 616 : }
1531 : // Create writer if not initiaized yet
1532 616 : writer = Some(
1533 : DeltaLayerWriter::new(
1534 616 : self.conf,
1535 616 : self.timeline_id,
1536 616 : self.tenant_shard_id,
1537 616 : key,
1538 616 : if dup_end_lsn.is_valid() {
1539 : // this is a layer containing slice of values of the same key
1540 0 : debug!("Create new dup layer {}..{}", dup_start_lsn, dup_end_lsn);
1541 0 : dup_start_lsn..dup_end_lsn
1542 : } else {
1543 616 : debug!("Create new layer {}..{}", lsn_range.start, lsn_range.end);
1544 616 : lsn_range.clone()
1545 : },
1546 616 : ctx,
1547 616 : )
1548 616 : .await
1549 616 : .map_err(CompactionError::Other)?,
1550 : );
1551 :
1552 616 : keys = 0;
1553 4127460 : }
1554 :
1555 4128076 : writer
1556 4128076 : .as_mut()
1557 4128076 : .unwrap()
1558 4128076 : .put_value(key, lsn, value, ctx)
1559 4128076 : .await
1560 4128076 : .map_err(CompactionError::Other)?;
1561 : } else {
1562 0 : let owner = self.shard_identity.get_shard_number(&key);
1563 0 :
1564 0 : // This happens after a shard split, when we're compacting an L0 created by our parent shard
1565 0 : debug!("dropping key {key} during compaction (it belongs on shard {owner})");
1566 : }
1567 :
1568 4128076 : if !new_layers.is_empty() {
1569 39572 : fail_point!("after-timeline-compacted-first-L1");
1570 4088504 : }
1571 :
1572 4128076 : prev_key = Some(key);
1573 : }
1574 56 : if let Some(writer) = writer {
1575 56 : let (desc, path) = writer
1576 56 : .finish(prev_key.unwrap().next(), ctx)
1577 56 : .await
1578 56 : .map_err(CompactionError::Other)?;
1579 56 : let new_delta = Layer::finish_creating(self.conf, self, desc, &path)
1580 56 : .map_err(CompactionError::Other)?;
1581 56 : new_layers.push(new_delta);
1582 0 : }
1583 :
1584 : // Sync layers
1585 56 : if !new_layers.is_empty() {
1586 : // Print a warning if the created layer is larger than double the target size
1587 : // Add two pages for potential overhead. This should in theory be already
1588 : // accounted for in the target calculation, but for very small targets,
1589 : // we still might easily hit the limit otherwise.
1590 56 : let warn_limit = target_file_size * 2 + page_cache::PAGE_SZ as u64 * 2;
1591 616 : for layer in new_layers.iter() {
1592 616 : if layer.layer_desc().file_size > warn_limit {
1593 0 : warn!(
1594 : %layer,
1595 0 : "created delta file of size {} larger than double of target of {target_file_size}", layer.layer_desc().file_size
1596 : );
1597 616 : }
1598 : }
1599 :
1600 : // The writer.finish() above already did the fsync of the inodes.
1601 : // We just need to fsync the directory in which these inodes are linked,
1602 : // which we know to be the timeline directory.
1603 : //
1604 : // We use fatal_err() below because the after writer.finish() returns with success,
1605 : // the in-memory state of the filesystem already has the layer file in its final place,
1606 : // and subsequent pageserver code could think it's durable while it really isn't.
1607 56 : let timeline_dir = VirtualFile::open(
1608 56 : &self
1609 56 : .conf
1610 56 : .timeline_path(&self.tenant_shard_id, &self.timeline_id),
1611 56 : ctx,
1612 56 : )
1613 56 : .await
1614 56 : .fatal_err("VirtualFile::open for timeline dir fsync");
1615 56 : timeline_dir
1616 56 : .sync_all()
1617 56 : .await
1618 56 : .fatal_err("VirtualFile::sync_all timeline dir");
1619 0 : }
1620 :
1621 56 : stats.write_layer_files_micros = stats.read_lock_drop_micros.till_now();
1622 56 : stats.new_deltas_count = Some(new_layers.len());
1623 616 : stats.new_deltas_size = Some(new_layers.iter().map(|l| l.layer_desc().file_size).sum());
1624 56 :
1625 56 : match TryInto::<CompactLevel0Phase1Stats>::try_into(stats)
1626 56 : .and_then(|stats| serde_json::to_string(&stats).context("serde_json::to_string"))
1627 : {
1628 56 : Ok(stats_json) => {
1629 56 : info!(
1630 0 : stats_json = stats_json.as_str(),
1631 0 : "compact_level0_phase1 stats available"
1632 : )
1633 : }
1634 0 : Err(e) => {
1635 0 : warn!("compact_level0_phase1 stats failed to serialize: {:#}", e);
1636 : }
1637 : }
1638 :
1639 : // Without this, rustc complains about deltas_to_compact still
1640 : // being borrowed when we `.into_iter()` below.
1641 56 : drop(all_values_iter);
1642 56 :
1643 56 : Ok(CompactLevel0Phase1Result {
1644 56 : new_layers,
1645 56 : deltas_to_compact: deltas_to_compact
1646 56 : .into_iter()
1647 804 : .map(|x| x.drop_eviction_guard())
1648 56 : .collect::<Vec<_>>(),
1649 56 : outcome: if fully_compacted {
1650 56 : CompactionOutcome::Done
1651 : } else {
1652 0 : CompactionOutcome::Pending
1653 : },
1654 : })
1655 728 : }
1656 : }
1657 :
1658 : #[derive(Default)]
1659 : struct CompactLevel0Phase1Result {
1660 : new_layers: Vec<ResidentLayer>,
1661 : deltas_to_compact: Vec<Layer>,
1662 : // Whether we have included all L0 layers, or selected only part of them due to the
1663 : // L0 compaction size limit.
1664 : outcome: CompactionOutcome,
1665 : }
1666 :
1667 : #[derive(Default)]
1668 : struct CompactLevel0Phase1StatsBuilder {
1669 : version: Option<u64>,
1670 : tenant_id: Option<TenantShardId>,
1671 : timeline_id: Option<TimelineId>,
1672 : read_lock_acquisition_micros: DurationRecorder,
1673 : read_lock_held_spawn_blocking_startup_micros: DurationRecorder,
1674 : read_lock_held_key_sort_micros: DurationRecorder,
1675 : read_lock_held_prerequisites_micros: DurationRecorder,
1676 : read_lock_held_compute_holes_micros: DurationRecorder,
1677 : read_lock_drop_micros: DurationRecorder,
1678 : write_layer_files_micros: DurationRecorder,
1679 : level0_deltas_count: Option<usize>,
1680 : new_deltas_count: Option<usize>,
1681 : new_deltas_size: Option<u64>,
1682 : }
1683 :
1684 : #[derive(serde::Serialize)]
1685 : struct CompactLevel0Phase1Stats {
1686 : version: u64,
1687 : tenant_id: TenantShardId,
1688 : timeline_id: TimelineId,
1689 : read_lock_acquisition_micros: RecordedDuration,
1690 : read_lock_held_spawn_blocking_startup_micros: RecordedDuration,
1691 : read_lock_held_key_sort_micros: RecordedDuration,
1692 : read_lock_held_prerequisites_micros: RecordedDuration,
1693 : read_lock_held_compute_holes_micros: RecordedDuration,
1694 : read_lock_drop_micros: RecordedDuration,
1695 : write_layer_files_micros: RecordedDuration,
1696 : level0_deltas_count: usize,
1697 : new_deltas_count: usize,
1698 : new_deltas_size: u64,
1699 : }
1700 :
1701 : impl TryFrom<CompactLevel0Phase1StatsBuilder> for CompactLevel0Phase1Stats {
1702 : type Error = anyhow::Error;
1703 :
1704 56 : fn try_from(value: CompactLevel0Phase1StatsBuilder) -> Result<Self, Self::Error> {
1705 56 : Ok(Self {
1706 56 : version: value.version.ok_or_else(|| anyhow!("version not set"))?,
1707 56 : tenant_id: value
1708 56 : .tenant_id
1709 56 : .ok_or_else(|| anyhow!("tenant_id not set"))?,
1710 56 : timeline_id: value
1711 56 : .timeline_id
1712 56 : .ok_or_else(|| anyhow!("timeline_id not set"))?,
1713 56 : read_lock_acquisition_micros: value
1714 56 : .read_lock_acquisition_micros
1715 56 : .into_recorded()
1716 56 : .ok_or_else(|| anyhow!("read_lock_acquisition_micros not set"))?,
1717 56 : read_lock_held_spawn_blocking_startup_micros: value
1718 56 : .read_lock_held_spawn_blocking_startup_micros
1719 56 : .into_recorded()
1720 56 : .ok_or_else(|| anyhow!("read_lock_held_spawn_blocking_startup_micros not set"))?,
1721 56 : read_lock_held_key_sort_micros: value
1722 56 : .read_lock_held_key_sort_micros
1723 56 : .into_recorded()
1724 56 : .ok_or_else(|| anyhow!("read_lock_held_key_sort_micros not set"))?,
1725 56 : read_lock_held_prerequisites_micros: value
1726 56 : .read_lock_held_prerequisites_micros
1727 56 : .into_recorded()
1728 56 : .ok_or_else(|| anyhow!("read_lock_held_prerequisites_micros not set"))?,
1729 56 : read_lock_held_compute_holes_micros: value
1730 56 : .read_lock_held_compute_holes_micros
1731 56 : .into_recorded()
1732 56 : .ok_or_else(|| anyhow!("read_lock_held_compute_holes_micros not set"))?,
1733 56 : read_lock_drop_micros: value
1734 56 : .read_lock_drop_micros
1735 56 : .into_recorded()
1736 56 : .ok_or_else(|| anyhow!("read_lock_drop_micros not set"))?,
1737 56 : write_layer_files_micros: value
1738 56 : .write_layer_files_micros
1739 56 : .into_recorded()
1740 56 : .ok_or_else(|| anyhow!("write_layer_files_micros not set"))?,
1741 56 : level0_deltas_count: value
1742 56 : .level0_deltas_count
1743 56 : .ok_or_else(|| anyhow!("level0_deltas_count not set"))?,
1744 56 : new_deltas_count: value
1745 56 : .new_deltas_count
1746 56 : .ok_or_else(|| anyhow!("new_deltas_count not set"))?,
1747 56 : new_deltas_size: value
1748 56 : .new_deltas_size
1749 56 : .ok_or_else(|| anyhow!("new_deltas_size not set"))?,
1750 : })
1751 56 : }
1752 : }
1753 :
1754 : impl Timeline {
1755 : /// Entry point for new tiered compaction algorithm.
1756 : ///
1757 : /// All the real work is in the implementation in the pageserver_compaction
1758 : /// crate. The code here would apply to any algorithm implemented by the
1759 : /// same interface, but tiered is the only one at the moment.
1760 : ///
1761 : /// TODO: cancellation
1762 0 : pub(crate) async fn compact_tiered(
1763 0 : self: &Arc<Self>,
1764 0 : _cancel: &CancellationToken,
1765 0 : ctx: &RequestContext,
1766 0 : ) -> Result<(), CompactionError> {
1767 0 : let fanout = self.get_compaction_threshold() as u64;
1768 0 : let target_file_size = self.get_checkpoint_distance();
1769 :
1770 : // Find the top of the historical layers
1771 0 : let end_lsn = {
1772 0 : let guard = self.layers.read().await;
1773 0 : let layers = guard.layer_map()?;
1774 :
1775 0 : let l0_deltas = layers.level0_deltas();
1776 0 :
1777 0 : // As an optimization, if we find that there are too few L0 layers,
1778 0 : // bail out early. We know that the compaction algorithm would do
1779 0 : // nothing in that case.
1780 0 : if l0_deltas.len() < fanout as usize {
1781 : // doesn't need compacting
1782 0 : return Ok(());
1783 0 : }
1784 0 : l0_deltas.iter().map(|l| l.lsn_range.end).max().unwrap()
1785 0 : };
1786 0 :
1787 0 : // Is the timeline being deleted?
1788 0 : if self.is_stopping() {
1789 0 : trace!("Dropping out of compaction on timeline shutdown");
1790 0 : return Err(CompactionError::ShuttingDown);
1791 0 : }
1792 :
1793 0 : let (dense_ks, _sparse_ks) = self.collect_keyspace(end_lsn, ctx).await?;
1794 : // TODO(chi): ignore sparse_keyspace for now, compact it in the future.
1795 0 : let mut adaptor = TimelineAdaptor::new(self, (end_lsn, dense_ks));
1796 0 :
1797 0 : pageserver_compaction::compact_tiered::compact_tiered(
1798 0 : &mut adaptor,
1799 0 : end_lsn,
1800 0 : target_file_size,
1801 0 : fanout,
1802 0 : ctx,
1803 0 : )
1804 0 : .await
1805 : // TODO: compact_tiered needs to return CompactionError
1806 0 : .map_err(CompactionError::Other)?;
1807 :
1808 0 : adaptor.flush_updates().await?;
1809 0 : Ok(())
1810 0 : }
1811 :
1812 : /// Take a list of images and deltas, produce images and deltas according to GC horizon and retain_lsns.
1813 : ///
1814 : /// It takes a key, the values of the key within the compaction process, a GC horizon, and all retain_lsns below the horizon.
1815 : /// For now, it requires the `accumulated_values` contains the full history of the key (i.e., the key with the lowest LSN is
1816 : /// an image or a WAL not requiring a base image). This restriction will be removed once we implement gc-compaction on branch.
1817 : ///
1818 : /// The function returns the deltas and the base image that need to be placed at each of the retain LSN. For example, we have:
1819 : ///
1820 : /// A@0x10, +B@0x20, +C@0x30, +D@0x40, +E@0x50, +F@0x60
1821 : /// horizon = 0x50, retain_lsn = 0x20, 0x40, delta_threshold=3
1822 : ///
1823 : /// The function will produce:
1824 : ///
1825 : /// ```plain
1826 : /// 0x20(retain_lsn) -> img=AB@0x20 always produce a single image below the lowest retain LSN
1827 : /// 0x40(retain_lsn) -> deltas=[+C@0x30, +D@0x40] two deltas since the last base image, keeping the deltas
1828 : /// 0x50(horizon) -> deltas=[ABCDE@0x50] three deltas since the last base image, generate an image but put it in the delta
1829 : /// above_horizon -> deltas=[+F@0x60] full history above the horizon
1830 : /// ```
1831 : ///
1832 : /// Note that `accumulated_values` must be sorted by LSN and should belong to a single key.
1833 1260 : pub(crate) async fn generate_key_retention(
1834 1260 : self: &Arc<Timeline>,
1835 1260 : key: Key,
1836 1260 : full_history: &[(Key, Lsn, Value)],
1837 1260 : horizon: Lsn,
1838 1260 : retain_lsn_below_horizon: &[Lsn],
1839 1260 : delta_threshold_cnt: usize,
1840 1260 : base_img_from_ancestor: Option<(Key, Lsn, Bytes)>,
1841 1260 : ) -> anyhow::Result<KeyHistoryRetention> {
1842 : // Pre-checks for the invariants
1843 :
1844 1260 : let debug_mode = cfg!(debug_assertions) || cfg!(feature = "testing");
1845 :
1846 1260 : if debug_mode {
1847 3060 : for (log_key, _, _) in full_history {
1848 1800 : assert_eq!(log_key, &key, "mismatched key");
1849 : }
1850 1260 : for i in 1..full_history.len() {
1851 540 : assert!(full_history[i - 1].1 <= full_history[i].1, "unordered LSN");
1852 540 : if full_history[i - 1].1 == full_history[i].1 {
1853 0 : assert!(
1854 0 : matches!(full_history[i - 1].2, Value::Image(_)),
1855 0 : "unordered delta/image, or duplicated delta"
1856 : );
1857 540 : }
1858 : }
1859 : // There was an assertion for no base image that checks if the first
1860 : // record in the history is `will_init` before, but it was removed.
1861 : // This is explained in the test cases for generate_key_retention.
1862 : // Search "incomplete history" for more information.
1863 2820 : for lsn in retain_lsn_below_horizon {
1864 1560 : assert!(lsn < &horizon, "retain lsn must be below horizon")
1865 : }
1866 1260 : for i in 1..retain_lsn_below_horizon.len() {
1867 712 : assert!(
1868 712 : retain_lsn_below_horizon[i - 1] <= retain_lsn_below_horizon[i],
1869 0 : "unordered LSN"
1870 : );
1871 : }
1872 0 : }
1873 1260 : let has_ancestor = base_img_from_ancestor.is_some();
1874 : // Step 1: split history into len(retain_lsn_below_horizon) + 2 buckets, where the last bucket is for all deltas above the horizon,
1875 : // and the second-to-last bucket is for the horizon. Each bucket contains lsn_last_bucket < deltas <= lsn_this_bucket.
1876 1260 : let (mut split_history, lsn_split_points) = {
1877 1260 : let mut split_history = Vec::new();
1878 1260 : split_history.resize_with(retain_lsn_below_horizon.len() + 2, Vec::new);
1879 1260 : let mut lsn_split_points = Vec::with_capacity(retain_lsn_below_horizon.len() + 1);
1880 2820 : for lsn in retain_lsn_below_horizon {
1881 1560 : lsn_split_points.push(*lsn);
1882 1560 : }
1883 1260 : lsn_split_points.push(horizon);
1884 1260 : let mut current_idx = 0;
1885 3060 : for item @ (_, lsn, _) in full_history {
1886 2288 : while current_idx < lsn_split_points.len() && *lsn > lsn_split_points[current_idx] {
1887 488 : current_idx += 1;
1888 488 : }
1889 1800 : split_history[current_idx].push(item);
1890 : }
1891 1260 : (split_history, lsn_split_points)
1892 : };
1893 : // Step 2: filter out duplicated records due to the k-merge of image/delta layers
1894 5340 : for split_for_lsn in &mut split_history {
1895 4080 : let mut prev_lsn = None;
1896 4080 : let mut new_split_for_lsn = Vec::with_capacity(split_for_lsn.len());
1897 4080 : for record @ (_, lsn, _) in std::mem::take(split_for_lsn) {
1898 1800 : if let Some(prev_lsn) = &prev_lsn {
1899 236 : if *prev_lsn == lsn {
1900 : // The case that we have an LSN with both data from the delta layer and the image layer. As
1901 : // `ValueWrapper` ensures that an image is ordered before a delta at the same LSN, we simply
1902 : // drop this delta and keep the image.
1903 : //
1904 : // For example, we have delta layer key1@0x10, key1@0x20, and image layer key1@0x10, we will
1905 : // keep the image for key1@0x10 and the delta for key1@0x20. key1@0x10 delta will be simply
1906 : // dropped.
1907 : //
1908 : // TODO: in case we have both delta + images for a given LSN and it does not exceed the delta
1909 : // threshold, we could have kept delta instead to save space. This is an optimization for the future.
1910 0 : continue;
1911 236 : }
1912 1564 : }
1913 1800 : prev_lsn = Some(lsn);
1914 1800 : new_split_for_lsn.push(record);
1915 : }
1916 4080 : *split_for_lsn = new_split_for_lsn;
1917 : }
1918 : // Step 3: generate images when necessary
1919 1260 : let mut retention = Vec::with_capacity(split_history.len());
1920 1260 : let mut records_since_last_image = 0;
1921 1260 : let batch_cnt = split_history.len();
1922 1260 : assert!(
1923 1260 : batch_cnt >= 2,
1924 0 : "should have at least below + above horizon batches"
1925 : );
1926 1260 : let mut replay_history: Vec<(Key, Lsn, Value)> = Vec::new();
1927 1260 : if let Some((key, lsn, img)) = base_img_from_ancestor {
1928 84 : replay_history.push((key, lsn, Value::Image(img)));
1929 1176 : }
1930 :
1931 : /// Generate debug information for the replay history
1932 0 : fn generate_history_trace(replay_history: &[(Key, Lsn, Value)]) -> String {
1933 : use std::fmt::Write;
1934 0 : let mut output = String::new();
1935 0 : if let Some((key, _, _)) = replay_history.first() {
1936 0 : write!(output, "key={} ", key).unwrap();
1937 0 : let mut cnt = 0;
1938 0 : for (_, lsn, val) in replay_history {
1939 0 : if val.is_image() {
1940 0 : write!(output, "i@{} ", lsn).unwrap();
1941 0 : } else if val.will_init() {
1942 0 : write!(output, "di@{} ", lsn).unwrap();
1943 0 : } else {
1944 0 : write!(output, "d@{} ", lsn).unwrap();
1945 0 : }
1946 0 : cnt += 1;
1947 0 : if cnt >= 128 {
1948 0 : write!(output, "... and more").unwrap();
1949 0 : break;
1950 0 : }
1951 : }
1952 0 : } else {
1953 0 : write!(output, "<no history>").unwrap();
1954 0 : }
1955 0 : output
1956 0 : }
1957 :
1958 0 : fn generate_debug_trace(
1959 0 : replay_history: Option<&[(Key, Lsn, Value)]>,
1960 0 : full_history: &[(Key, Lsn, Value)],
1961 0 : lsns: &[Lsn],
1962 0 : horizon: Lsn,
1963 0 : ) -> String {
1964 : use std::fmt::Write;
1965 0 : let mut output = String::new();
1966 0 : if let Some(replay_history) = replay_history {
1967 0 : writeln!(
1968 0 : output,
1969 0 : "replay_history: {}",
1970 0 : generate_history_trace(replay_history)
1971 0 : )
1972 0 : .unwrap();
1973 0 : } else {
1974 0 : writeln!(output, "replay_history: <disabled>",).unwrap();
1975 0 : }
1976 0 : writeln!(
1977 0 : output,
1978 0 : "full_history: {}",
1979 0 : generate_history_trace(full_history)
1980 0 : )
1981 0 : .unwrap();
1982 0 : writeln!(
1983 0 : output,
1984 0 : "when processing: [{}] horizon={}",
1985 0 : lsns.iter().map(|l| format!("{l}")).join(","),
1986 0 : horizon
1987 0 : )
1988 0 : .unwrap();
1989 0 : output
1990 0 : }
1991 :
1992 1260 : let mut key_exists = false;
1993 4080 : for (i, split_for_lsn) in split_history.into_iter().enumerate() {
1994 : // TODO: there could be image keys inside the splits, and we can compute records_since_last_image accordingly.
1995 4080 : records_since_last_image += split_for_lsn.len();
1996 : // Whether to produce an image into the final layer files
1997 4080 : let produce_image = if i == 0 && !has_ancestor {
1998 : // We always generate images for the first batch (below horizon / lowest retain_lsn)
1999 1176 : true
2000 2904 : } else if i == batch_cnt - 1 {
2001 : // Do not generate images for the last batch (above horizon)
2002 1260 : false
2003 1644 : } else if records_since_last_image == 0 {
2004 1288 : false
2005 356 : } else if records_since_last_image >= delta_threshold_cnt {
2006 : // Generate images when there are too many records
2007 12 : true
2008 : } else {
2009 344 : false
2010 : };
2011 4080 : replay_history.extend(split_for_lsn.iter().map(|x| (*x).clone()));
2012 : // Only retain the items after the last image record
2013 5028 : for idx in (0..replay_history.len()).rev() {
2014 5028 : if replay_history[idx].2.will_init() {
2015 4080 : replay_history = replay_history[idx..].to_vec();
2016 4080 : break;
2017 948 : }
2018 : }
2019 4080 : if replay_history.is_empty() && !key_exists {
2020 : // The key does not exist at earlier LSN, we can skip this iteration.
2021 0 : retention.push(Vec::new());
2022 0 : continue;
2023 4080 : } else {
2024 4080 : key_exists = true;
2025 4080 : }
2026 4080 : let Some((_, _, val)) = replay_history.first() else {
2027 0 : unreachable!("replay history should not be empty once it exists")
2028 : };
2029 4080 : if !val.will_init() {
2030 0 : return Err(anyhow::anyhow!("invalid history, no base image")).with_context(|| {
2031 0 : generate_debug_trace(
2032 0 : Some(&replay_history),
2033 0 : full_history,
2034 0 : retain_lsn_below_horizon,
2035 0 : horizon,
2036 0 : )
2037 0 : });
2038 4080 : }
2039 : // Whether to reconstruct the image. In debug mode, we will generate an image
2040 : // at every retain_lsn to ensure data is not corrupted, but we won't put the
2041 : // image into the final layer.
2042 4080 : let generate_image = produce_image || debug_mode;
2043 4080 : if produce_image {
2044 1188 : records_since_last_image = 0;
2045 2892 : }
2046 4080 : let img_and_lsn = if generate_image {
2047 4080 : let replay_history_for_debug = if debug_mode {
2048 4080 : Some(replay_history.clone())
2049 : } else {
2050 0 : None
2051 : };
2052 4080 : let replay_history_for_debug_ref = replay_history_for_debug.as_deref();
2053 4080 : let history = if produce_image {
2054 1188 : std::mem::take(&mut replay_history)
2055 : } else {
2056 2892 : replay_history.clone()
2057 : };
2058 4080 : let mut img = None;
2059 4080 : let mut records = Vec::with_capacity(history.len());
2060 4080 : if let (_, lsn, Value::Image(val)) = history.first().as_ref().unwrap() {
2061 4036 : img = Some((*lsn, val.clone()));
2062 4036 : for (_, lsn, val) in history.into_iter().skip(1) {
2063 920 : let Value::WalRecord(rec) = val else {
2064 0 : return Err(anyhow::anyhow!(
2065 0 : "invalid record, first record is image, expect walrecords"
2066 0 : ))
2067 0 : .with_context(|| {
2068 0 : generate_debug_trace(
2069 0 : replay_history_for_debug_ref,
2070 0 : full_history,
2071 0 : retain_lsn_below_horizon,
2072 0 : horizon,
2073 0 : )
2074 0 : });
2075 : };
2076 920 : records.push((lsn, rec));
2077 : }
2078 : } else {
2079 72 : for (_, lsn, val) in history.into_iter() {
2080 72 : let Value::WalRecord(rec) = val else {
2081 0 : return Err(anyhow::anyhow!("invalid record, first record is walrecord, expect rest are walrecord"))
2082 0 : .with_context(|| generate_debug_trace(
2083 0 : replay_history_for_debug_ref,
2084 0 : full_history,
2085 0 : retain_lsn_below_horizon,
2086 0 : horizon,
2087 0 : ));
2088 : };
2089 72 : records.push((lsn, rec));
2090 : }
2091 : }
2092 4080 : records.reverse();
2093 4080 : let state = ValueReconstructState { img, records };
2094 : // last batch does not generate image so i is always in range, unless we force generate
2095 : // an image during testing
2096 4080 : let request_lsn = if i >= lsn_split_points.len() {
2097 1260 : Lsn::MAX
2098 : } else {
2099 2820 : lsn_split_points[i]
2100 : };
2101 4080 : let img = self.reconstruct_value(key, request_lsn, state).await?;
2102 4080 : Some((request_lsn, img))
2103 : } else {
2104 0 : None
2105 : };
2106 4080 : if produce_image {
2107 1188 : let (request_lsn, img) = img_and_lsn.unwrap();
2108 1188 : replay_history.push((key, request_lsn, Value::Image(img.clone())));
2109 1188 : retention.push(vec![(request_lsn, Value::Image(img))]);
2110 2892 : } else {
2111 2892 : let deltas = split_for_lsn
2112 2892 : .iter()
2113 2892 : .map(|(_, lsn, value)| (*lsn, value.clone()))
2114 2892 : .collect_vec();
2115 2892 : retention.push(deltas);
2116 2892 : }
2117 : }
2118 1260 : let mut result = Vec::with_capacity(retention.len());
2119 1260 : assert_eq!(retention.len(), lsn_split_points.len() + 1);
2120 4080 : for (idx, logs) in retention.into_iter().enumerate() {
2121 4080 : if idx == lsn_split_points.len() {
2122 1260 : return Ok(KeyHistoryRetention {
2123 1260 : below_horizon: result,
2124 1260 : above_horizon: KeyLogAtLsn(logs),
2125 1260 : });
2126 2820 : } else {
2127 2820 : result.push((lsn_split_points[idx], KeyLogAtLsn(logs)));
2128 2820 : }
2129 : }
2130 0 : unreachable!("key retention is empty")
2131 1260 : }
2132 :
2133 : /// Check how much space is left on the disk
2134 104 : async fn check_available_space(self: &Arc<Self>) -> anyhow::Result<u64> {
2135 104 : let tenants_dir = self.conf.tenants_path();
2136 :
2137 104 : let stat = Statvfs::get(&tenants_dir, None)
2138 104 : .context("statvfs failed, presumably directory got unlinked")?;
2139 :
2140 104 : let (avail_bytes, _) = stat.get_avail_total_bytes();
2141 104 :
2142 104 : Ok(avail_bytes)
2143 104 : }
2144 :
2145 : /// Check if the compaction can proceed safely without running out of space. We assume the size
2146 : /// upper bound of the produced files of a compaction job is the same as all layers involved in
2147 : /// the compaction. Therefore, we need `2 * layers_to_be_compacted_size` at least to do a
2148 : /// compaction.
2149 104 : async fn check_compaction_space(
2150 104 : self: &Arc<Self>,
2151 104 : layer_selection: &[Layer],
2152 104 : ) -> anyhow::Result<()> {
2153 104 : let available_space = self.check_available_space().await?;
2154 104 : let mut remote_layer_size = 0;
2155 104 : let mut all_layer_size = 0;
2156 408 : for layer in layer_selection {
2157 304 : let needs_download = layer.needs_download().await?;
2158 304 : if needs_download.is_some() {
2159 0 : remote_layer_size += layer.layer_desc().file_size;
2160 304 : }
2161 304 : all_layer_size += layer.layer_desc().file_size;
2162 : }
2163 104 : let allocated_space = (available_space as f64 * 0.8) as u64; /* reserve 20% space for other tasks */
2164 104 : if all_layer_size /* space needed for newly-generated file */ + remote_layer_size /* space for downloading layers */ > allocated_space
2165 : {
2166 0 : return Err(anyhow!("not enough space for compaction: available_space={}, allocated_space={}, all_layer_size={}, remote_layer_size={}, required_space={}",
2167 0 : available_space, allocated_space, all_layer_size, remote_layer_size, all_layer_size + remote_layer_size));
2168 104 : }
2169 104 : Ok(())
2170 104 : }
2171 :
2172 : /// Get a watermark for gc-compaction, that is the lowest LSN that we can use as the `gc_horizon` for
2173 : /// the compaction algorithm. It is min(space_cutoff, time_cutoff, latest_gc_cutoff, standby_horizon).
2174 : /// Leases and retain_lsns are considered in the gc-compaction job itself so we don't need to account for them
2175 : /// here.
2176 108 : pub(crate) fn get_gc_compaction_watermark(self: &Arc<Self>) -> Lsn {
2177 108 : let gc_cutoff_lsn = {
2178 108 : let gc_info = self.gc_info.read().unwrap();
2179 108 : gc_info.min_cutoff()
2180 108 : };
2181 108 :
2182 108 : // TODO: standby horizon should use leases so we don't really need to consider it here.
2183 108 : // let watermark = watermark.min(self.standby_horizon.load());
2184 108 :
2185 108 : // TODO: ensure the child branches will not use anything below the watermark, or consider
2186 108 : // them when computing the watermark.
2187 108 : gc_cutoff_lsn.min(*self.get_applied_gc_cutoff_lsn())
2188 108 : }
2189 :
2190 : /// Split a gc-compaction job into multiple compaction jobs. The split is based on the key range and the estimated size of the compaction job.
2191 : /// The function returns a list of compaction jobs that can be executed separately. If the upper bound of the compact LSN
2192 : /// range is not specified, we will use the latest gc_cutoff as the upper bound, so that all jobs in the jobset acts
2193 : /// like a full compaction of the specified keyspace.
2194 0 : pub(crate) async fn gc_compaction_split_jobs(
2195 0 : self: &Arc<Self>,
2196 0 : job: GcCompactJob,
2197 0 : sub_compaction_max_job_size_mb: Option<u64>,
2198 0 : ) -> anyhow::Result<Vec<GcCompactJob>> {
2199 0 : let compact_below_lsn = if job.compact_lsn_range.end != Lsn::MAX {
2200 0 : job.compact_lsn_range.end
2201 : } else {
2202 0 : self.get_gc_compaction_watermark()
2203 : };
2204 :
2205 0 : if compact_below_lsn == Lsn::INVALID {
2206 0 : tracing::warn!("no layers to compact with gc: gc_cutoff not generated yet, skipping gc bottom-most compaction");
2207 0 : return Ok(vec![]);
2208 0 : }
2209 :
2210 : // Split compaction job to about 4GB each
2211 : const GC_COMPACT_MAX_SIZE_MB: u64 = 4 * 1024;
2212 0 : let sub_compaction_max_job_size_mb =
2213 0 : sub_compaction_max_job_size_mb.unwrap_or(GC_COMPACT_MAX_SIZE_MB);
2214 0 :
2215 0 : let mut compact_jobs = Vec::new();
2216 0 : // For now, we simply use the key partitioning information; we should do a more fine-grained partitioning
2217 0 : // by estimating the amount of files read for a compaction job. We should also partition on LSN.
2218 0 : let ((dense_ks, sparse_ks), _) = self.partitioning.read().as_ref().clone();
2219 : // Truncate the key range to be within user specified compaction range.
2220 0 : fn truncate_to(
2221 0 : source_start: &Key,
2222 0 : source_end: &Key,
2223 0 : target_start: &Key,
2224 0 : target_end: &Key,
2225 0 : ) -> Option<(Key, Key)> {
2226 0 : let start = source_start.max(target_start);
2227 0 : let end = source_end.min(target_end);
2228 0 : if start < end {
2229 0 : Some((*start, *end))
2230 : } else {
2231 0 : None
2232 : }
2233 0 : }
2234 0 : let mut split_key_ranges = Vec::new();
2235 0 : let ranges = dense_ks
2236 0 : .parts
2237 0 : .iter()
2238 0 : .map(|partition| partition.ranges.iter())
2239 0 : .chain(sparse_ks.parts.iter().map(|x| x.0.ranges.iter()))
2240 0 : .flatten()
2241 0 : .cloned()
2242 0 : .collect_vec();
2243 0 : for range in ranges.iter() {
2244 0 : let Some((start, end)) = truncate_to(
2245 0 : &range.start,
2246 0 : &range.end,
2247 0 : &job.compact_key_range.start,
2248 0 : &job.compact_key_range.end,
2249 0 : ) else {
2250 0 : continue;
2251 : };
2252 0 : split_key_ranges.push((start, end));
2253 : }
2254 0 : split_key_ranges.sort();
2255 0 : let all_layers = {
2256 0 : let guard = self.layers.read().await;
2257 0 : let layer_map = guard.layer_map()?;
2258 0 : layer_map.iter_historic_layers().collect_vec()
2259 0 : };
2260 0 : let mut current_start = None;
2261 0 : let ranges_num = split_key_ranges.len();
2262 0 : for (idx, (start, end)) in split_key_ranges.into_iter().enumerate() {
2263 0 : if current_start.is_none() {
2264 0 : current_start = Some(start);
2265 0 : }
2266 0 : let start = current_start.unwrap();
2267 0 : if start >= end {
2268 : // We have already processed this partition.
2269 0 : continue;
2270 0 : }
2271 0 : let overlapping_layers = {
2272 0 : let mut desc = Vec::new();
2273 0 : for layer in all_layers.iter() {
2274 0 : if overlaps_with(&layer.get_key_range(), &(start..end))
2275 0 : && layer.get_lsn_range().start <= compact_below_lsn
2276 0 : {
2277 0 : desc.push(layer.clone());
2278 0 : }
2279 : }
2280 0 : desc
2281 0 : };
2282 0 : let total_size = overlapping_layers.iter().map(|x| x.file_size).sum::<u64>();
2283 0 : if total_size > sub_compaction_max_job_size_mb * 1024 * 1024 || ranges_num == idx + 1 {
2284 : // Try to extend the compaction range so that we include at least one full layer file.
2285 0 : let extended_end = overlapping_layers
2286 0 : .iter()
2287 0 : .map(|layer| layer.key_range.end)
2288 0 : .min();
2289 : // It is possible that the search range does not contain any layer files when we reach the end of the loop.
2290 : // In this case, we simply use the specified key range end.
2291 0 : let end = if let Some(extended_end) = extended_end {
2292 0 : extended_end.max(end)
2293 : } else {
2294 0 : end
2295 : };
2296 0 : let end = if ranges_num == idx + 1 {
2297 : // extend the compaction range to the end of the key range if it's the last partition
2298 0 : end.max(job.compact_key_range.end)
2299 : } else {
2300 0 : end
2301 : };
2302 0 : info!(
2303 0 : "splitting compaction job: {}..{}, estimated_size={}",
2304 : start, end, total_size
2305 : );
2306 0 : compact_jobs.push(GcCompactJob {
2307 0 : dry_run: job.dry_run,
2308 0 : compact_key_range: start..end,
2309 0 : compact_lsn_range: job.compact_lsn_range.start..compact_below_lsn,
2310 0 : });
2311 0 : current_start = Some(end);
2312 0 : }
2313 : }
2314 0 : Ok(compact_jobs)
2315 0 : }
2316 :
2317 : /// An experimental compaction building block that combines compaction with garbage collection.
2318 : ///
2319 : /// The current implementation picks all delta + image layers that are below or intersecting with
2320 : /// the GC horizon without considering retain_lsns. Then, it does a full compaction over all these delta
2321 : /// layers and image layers, which generates image layers on the gc horizon, drop deltas below gc horizon,
2322 : /// and create delta layers with all deltas >= gc horizon.
2323 : ///
2324 : /// If `options.compact_range` is provided, it will only compact the keys within the range, aka partial compaction.
2325 : /// Partial compaction will read and process all layers overlapping with the key range, even if it might
2326 : /// contain extra keys. After the gc-compaction phase completes, delta layers that are not fully contained
2327 : /// within the key range will be rewritten to ensure they do not overlap with the delta layers. Providing
2328 : /// Key::MIN..Key..MAX to the function indicates a full compaction, though technically, `Key::MAX` is not
2329 : /// part of the range.
2330 : ///
2331 : /// If `options.compact_lsn_range.end` is provided, the compaction will only compact layers below or intersect with
2332 : /// the LSN. Otherwise, it will use the gc cutoff by default.
2333 108 : pub(crate) async fn compact_with_gc(
2334 108 : self: &Arc<Self>,
2335 108 : cancel: &CancellationToken,
2336 108 : options: CompactOptions,
2337 108 : ctx: &RequestContext,
2338 108 : ) -> anyhow::Result<()> {
2339 108 : let sub_compaction = options.sub_compaction;
2340 108 : let job = GcCompactJob::from_compact_options(options.clone());
2341 108 : if sub_compaction {
2342 0 : info!("running enhanced gc bottom-most compaction with sub-compaction, splitting compaction jobs");
2343 0 : let jobs = self
2344 0 : .gc_compaction_split_jobs(job, options.sub_compaction_max_job_size_mb)
2345 0 : .await?;
2346 0 : let jobs_len = jobs.len();
2347 0 : for (idx, job) in jobs.into_iter().enumerate() {
2348 0 : info!(
2349 0 : "running enhanced gc bottom-most compaction, sub-compaction {}/{}",
2350 0 : idx + 1,
2351 : jobs_len
2352 : );
2353 0 : self.compact_with_gc_inner(cancel, job, ctx).await?;
2354 : }
2355 0 : if jobs_len == 0 {
2356 0 : info!("no jobs to run, skipping gc bottom-most compaction");
2357 0 : }
2358 0 : return Ok(());
2359 108 : }
2360 108 : self.compact_with_gc_inner(cancel, job, ctx).await
2361 108 : }
2362 :
2363 108 : async fn compact_with_gc_inner(
2364 108 : self: &Arc<Self>,
2365 108 : cancel: &CancellationToken,
2366 108 : job: GcCompactJob,
2367 108 : ctx: &RequestContext,
2368 108 : ) -> anyhow::Result<()> {
2369 108 : // Block other compaction/GC tasks from running for now. GC-compaction could run along
2370 108 : // with legacy compaction tasks in the future. Always ensure the lock order is compaction -> gc.
2371 108 : // Note that we already acquired the compaction lock when the outer `compact` function gets called.
2372 108 :
2373 108 : let gc_lock = async {
2374 108 : tokio::select! {
2375 108 : guard = self.gc_lock.lock() => Ok(guard),
2376 : // TODO: refactor to CompactionError to correctly pass cancelled error
2377 108 : _ = cancel.cancelled() => Err(anyhow!("cancelled")),
2378 : }
2379 108 : };
2380 :
2381 108 : let gc_lock = crate::timed(
2382 108 : gc_lock,
2383 108 : "acquires gc lock",
2384 108 : std::time::Duration::from_secs(5),
2385 108 : )
2386 108 : .await?;
2387 :
2388 108 : let dry_run = job.dry_run;
2389 108 : let compact_key_range = job.compact_key_range;
2390 108 : let compact_lsn_range = job.compact_lsn_range;
2391 :
2392 108 : let debug_mode = cfg!(debug_assertions) || cfg!(feature = "testing");
2393 :
2394 108 : info!("running enhanced gc bottom-most compaction, dry_run={dry_run}, compact_key_range={}..{}, compact_lsn_range={}..{}", compact_key_range.start, compact_key_range.end, compact_lsn_range.start, compact_lsn_range.end);
2395 :
2396 108 : scopeguard::defer! {
2397 108 : info!("done enhanced gc bottom-most compaction");
2398 108 : };
2399 108 :
2400 108 : let mut stat = CompactionStatistics::default();
2401 :
2402 : // Step 0: pick all delta layers + image layers below/intersect with the GC horizon.
2403 : // The layer selection has the following properties:
2404 : // 1. If a layer is in the selection, all layers below it are in the selection.
2405 : // 2. Inferred from (1), for each key in the layer selection, the value can be reconstructed only with the layers in the layer selection.
2406 104 : let job_desc = {
2407 108 : let guard = self.layers.read().await;
2408 108 : let layers = guard.layer_map()?;
2409 108 : let gc_info = self.gc_info.read().unwrap();
2410 108 : let mut retain_lsns_below_horizon = Vec::new();
2411 108 : let gc_cutoff = {
2412 : // Currently, gc-compaction only kicks in after the legacy gc has updated the gc_cutoff.
2413 : // Therefore, it can only clean up data that cannot be cleaned up with legacy gc, instead of
2414 : // cleaning everything that theoritically it could. In the future, it should use `self.gc_info`
2415 : // to get the truth data.
2416 108 : let real_gc_cutoff = self.get_gc_compaction_watermark();
2417 : // The compaction algorithm will keep all keys above the gc_cutoff while keeping only necessary keys below the gc_cutoff for
2418 : // each of the retain_lsn. Therefore, if the user-provided `compact_lsn_range.end` is larger than the real gc cutoff, we will use
2419 : // the real cutoff.
2420 108 : let mut gc_cutoff = if compact_lsn_range.end == Lsn::MAX {
2421 96 : if real_gc_cutoff == Lsn::INVALID {
2422 : // If the gc_cutoff is not generated yet, we should not compact anything.
2423 0 : tracing::warn!("no layers to compact with gc: gc_cutoff not generated yet, skipping gc bottom-most compaction");
2424 0 : return Ok(());
2425 96 : }
2426 96 : real_gc_cutoff
2427 : } else {
2428 12 : compact_lsn_range.end
2429 : };
2430 108 : if gc_cutoff > real_gc_cutoff {
2431 8 : warn!("provided compact_lsn_range.end={} is larger than the real_gc_cutoff={}, using the real gc cutoff", gc_cutoff, real_gc_cutoff);
2432 8 : gc_cutoff = real_gc_cutoff;
2433 100 : }
2434 108 : gc_cutoff
2435 : };
2436 140 : for (lsn, _timeline_id, _is_offloaded) in &gc_info.retain_lsns {
2437 140 : if lsn < &gc_cutoff {
2438 140 : retain_lsns_below_horizon.push(*lsn);
2439 140 : }
2440 : }
2441 108 : for lsn in gc_info.leases.keys() {
2442 0 : if lsn < &gc_cutoff {
2443 0 : retain_lsns_below_horizon.push(*lsn);
2444 0 : }
2445 : }
2446 108 : let mut selected_layers: Vec<Layer> = Vec::new();
2447 108 : drop(gc_info);
2448 : // Firstly, pick all the layers intersect or below the gc_cutoff, get the largest LSN in the selected layers.
2449 108 : let Some(max_layer_lsn) = layers
2450 108 : .iter_historic_layers()
2451 488 : .filter(|desc| desc.get_lsn_range().start <= gc_cutoff)
2452 416 : .map(|desc| desc.get_lsn_range().end)
2453 108 : .max()
2454 : else {
2455 0 : info!("no layers to compact with gc: no historic layers below gc_cutoff, gc_cutoff={}", gc_cutoff);
2456 0 : return Ok(());
2457 : };
2458 : // Next, if the user specifies compact_lsn_range.start, we need to filter some layers out. All the layers (strictly) below
2459 : // the min_layer_lsn computed as below will be filtered out and the data will be accessed using the normal read path, as if
2460 : // it is a branch.
2461 108 : let Some(min_layer_lsn) = layers
2462 108 : .iter_historic_layers()
2463 488 : .filter(|desc| {
2464 488 : if compact_lsn_range.start == Lsn::INVALID {
2465 396 : true // select all layers below if start == Lsn(0)
2466 : } else {
2467 92 : desc.get_lsn_range().end > compact_lsn_range.start // strictly larger than compact_above_lsn
2468 : }
2469 488 : })
2470 452 : .map(|desc| desc.get_lsn_range().start)
2471 108 : .min()
2472 : else {
2473 0 : info!("no layers to compact with gc: no historic layers above compact_above_lsn, compact_above_lsn={}", compact_lsn_range.end);
2474 0 : return Ok(());
2475 : };
2476 : // Then, pick all the layers that are below the max_layer_lsn. This is to ensure we can pick all single-key
2477 : // layers to compact.
2478 108 : let mut rewrite_layers = Vec::new();
2479 488 : for desc in layers.iter_historic_layers() {
2480 488 : if desc.get_lsn_range().end <= max_layer_lsn
2481 416 : && desc.get_lsn_range().start >= min_layer_lsn
2482 380 : && overlaps_with(&desc.get_key_range(), &compact_key_range)
2483 : {
2484 : // If the layer overlaps with the compaction key range, we need to read it to obtain all keys within the range,
2485 : // even if it might contain extra keys
2486 304 : selected_layers.push(guard.get_from_desc(&desc));
2487 304 : // If the layer is not fully contained within the key range, we need to rewrite it if it's a delta layer (it's fine
2488 304 : // to overlap image layers)
2489 304 : if desc.is_delta() && !fully_contains(&compact_key_range, &desc.get_key_range())
2490 4 : {
2491 4 : rewrite_layers.push(desc);
2492 300 : }
2493 184 : }
2494 : }
2495 108 : if selected_layers.is_empty() {
2496 4 : info!("no layers to compact with gc: no layers within the key range, gc_cutoff={}, key_range={}..{}", gc_cutoff, compact_key_range.start, compact_key_range.end);
2497 4 : return Ok(());
2498 104 : }
2499 104 : retain_lsns_below_horizon.sort();
2500 104 : GcCompactionJobDescription {
2501 104 : selected_layers,
2502 104 : gc_cutoff,
2503 104 : retain_lsns_below_horizon,
2504 104 : min_layer_lsn,
2505 104 : max_layer_lsn,
2506 104 : compaction_key_range: compact_key_range,
2507 104 : rewrite_layers,
2508 104 : }
2509 : };
2510 104 : let (has_data_below, lowest_retain_lsn) = if compact_lsn_range.start != Lsn::INVALID {
2511 : // If we only compact above some LSN, we should get the history from the current branch below the specified LSN.
2512 : // We use job_desc.min_layer_lsn as if it's the lowest branch point.
2513 16 : (true, job_desc.min_layer_lsn)
2514 88 : } else if self.ancestor_timeline.is_some() {
2515 : // In theory, we can also use min_layer_lsn here, but using ancestor LSN makes sure the delta layers cover the
2516 : // LSN ranges all the way to the ancestor timeline.
2517 4 : (true, self.ancestor_lsn)
2518 : } else {
2519 84 : let res = job_desc
2520 84 : .retain_lsns_below_horizon
2521 84 : .first()
2522 84 : .copied()
2523 84 : .unwrap_or(job_desc.gc_cutoff);
2524 84 : if debug_mode {
2525 84 : assert_eq!(
2526 84 : res,
2527 84 : job_desc
2528 84 : .retain_lsns_below_horizon
2529 84 : .iter()
2530 84 : .min()
2531 84 : .copied()
2532 84 : .unwrap_or(job_desc.gc_cutoff)
2533 84 : );
2534 0 : }
2535 84 : (false, res)
2536 : };
2537 104 : info!(
2538 0 : "picked {} layers for compaction ({} layers need rewriting) with max_layer_lsn={} min_layer_lsn={} gc_cutoff={} lowest_retain_lsn={}, key_range={}..{}, has_data_below={}",
2539 0 : job_desc.selected_layers.len(),
2540 0 : job_desc.rewrite_layers.len(),
2541 : job_desc.max_layer_lsn,
2542 : job_desc.min_layer_lsn,
2543 : job_desc.gc_cutoff,
2544 : lowest_retain_lsn,
2545 : job_desc.compaction_key_range.start,
2546 : job_desc.compaction_key_range.end,
2547 : has_data_below,
2548 : );
2549 :
2550 408 : for layer in &job_desc.selected_layers {
2551 304 : debug!("read layer: {}", layer.layer_desc().key());
2552 : }
2553 108 : for layer in &job_desc.rewrite_layers {
2554 4 : debug!("rewrite layer: {}", layer.key());
2555 : }
2556 :
2557 104 : self.check_compaction_space(&job_desc.selected_layers)
2558 104 : .await?;
2559 :
2560 : // Generate statistics for the compaction
2561 408 : for layer in &job_desc.selected_layers {
2562 304 : let desc = layer.layer_desc();
2563 304 : if desc.is_delta() {
2564 172 : stat.visit_delta_layer(desc.file_size());
2565 172 : } else {
2566 132 : stat.visit_image_layer(desc.file_size());
2567 132 : }
2568 : }
2569 :
2570 : // Step 1: construct a k-merge iterator over all layers.
2571 : // Also, verify if the layer map can be split by drawing a horizontal line at every LSN start/end split point.
2572 104 : let layer_names = job_desc
2573 104 : .selected_layers
2574 104 : .iter()
2575 304 : .map(|layer| layer.layer_desc().layer_name())
2576 104 : .collect_vec();
2577 104 : if let Some(err) = check_valid_layermap(&layer_names) {
2578 0 : bail!("gc-compaction layer map check failed because {}, cannot proceed with compaction due to potential data loss", err);
2579 104 : }
2580 104 : // The maximum LSN we are processing in this compaction loop
2581 104 : let end_lsn = job_desc
2582 104 : .selected_layers
2583 104 : .iter()
2584 304 : .map(|l| l.layer_desc().lsn_range.end)
2585 104 : .max()
2586 104 : .unwrap();
2587 104 : let mut delta_layers = Vec::new();
2588 104 : let mut image_layers = Vec::new();
2589 104 : let mut downloaded_layers = Vec::new();
2590 104 : let mut total_downloaded_size = 0;
2591 104 : let mut total_layer_size = 0;
2592 408 : for layer in &job_desc.selected_layers {
2593 304 : if layer.needs_download().await?.is_some() {
2594 0 : total_downloaded_size += layer.layer_desc().file_size;
2595 304 : }
2596 304 : total_layer_size += layer.layer_desc().file_size;
2597 304 : let resident_layer = layer.download_and_keep_resident().await?;
2598 304 : downloaded_layers.push(resident_layer);
2599 : }
2600 104 : info!(
2601 0 : "finish downloading layers, downloaded={}, total={}, ratio={:.2}",
2602 0 : total_downloaded_size,
2603 0 : total_layer_size,
2604 0 : total_downloaded_size as f64 / total_layer_size as f64
2605 : );
2606 408 : for resident_layer in &downloaded_layers {
2607 304 : if resident_layer.layer_desc().is_delta() {
2608 172 : let layer = resident_layer.get_as_delta(ctx).await?;
2609 172 : delta_layers.push(layer);
2610 : } else {
2611 132 : let layer = resident_layer.get_as_image(ctx).await?;
2612 132 : image_layers.push(layer);
2613 : }
2614 : }
2615 104 : let (dense_ks, sparse_ks) = self.collect_gc_compaction_keyspace().await?;
2616 104 : let mut merge_iter = FilterIterator::create(
2617 104 : MergeIterator::create(&delta_layers, &image_layers, ctx),
2618 104 : dense_ks,
2619 104 : sparse_ks,
2620 104 : )?;
2621 :
2622 : // Step 2: Produce images+deltas.
2623 104 : let mut accumulated_values = Vec::new();
2624 104 : let mut last_key: Option<Key> = None;
2625 :
2626 : // Only create image layers when there is no ancestor branches. TODO: create covering image layer
2627 : // when some condition meet.
2628 104 : let mut image_layer_writer = if !has_data_below {
2629 : Some(
2630 84 : SplitImageLayerWriter::new(
2631 84 : self.conf,
2632 84 : self.timeline_id,
2633 84 : self.tenant_shard_id,
2634 84 : job_desc.compaction_key_range.start,
2635 84 : lowest_retain_lsn,
2636 84 : self.get_compaction_target_size(),
2637 84 : ctx,
2638 84 : )
2639 84 : .await?,
2640 : )
2641 : } else {
2642 20 : None
2643 : };
2644 :
2645 104 : let mut delta_layer_writer = SplitDeltaLayerWriter::new(
2646 104 : self.conf,
2647 104 : self.timeline_id,
2648 104 : self.tenant_shard_id,
2649 104 : lowest_retain_lsn..end_lsn,
2650 104 : self.get_compaction_target_size(),
2651 104 : )
2652 104 : .await?;
2653 :
2654 : #[derive(Default)]
2655 : struct RewritingLayers {
2656 : before: Option<DeltaLayerWriter>,
2657 : after: Option<DeltaLayerWriter>,
2658 : }
2659 104 : let mut delta_layer_rewriters = HashMap::<Arc<PersistentLayerKey>, RewritingLayers>::new();
2660 :
2661 : /// When compacting not at a bottom range (=`[0,X)`) of the root branch, we "have data below" (`has_data_below=true`).
2662 : /// The two cases are compaction in ancestor branches and when `compact_lsn_range.start` is set.
2663 : /// In those cases, we need to pull up data from below the LSN range we're compaction.
2664 : ///
2665 : /// This function unifies the cases so that later code doesn't have to think about it.
2666 : ///
2667 : /// Currently, we always get the ancestor image for each key in the child branch no matter whether the image
2668 : /// is needed for reconstruction. This should be fixed in the future.
2669 : ///
2670 : /// Furthermore, we should do vectored get instead of a single get, or better, use k-merge for ancestor
2671 : /// images.
2672 1244 : async fn get_ancestor_image(
2673 1244 : this_tline: &Arc<Timeline>,
2674 1244 : key: Key,
2675 1244 : ctx: &RequestContext,
2676 1244 : has_data_below: bool,
2677 1244 : history_lsn_point: Lsn,
2678 1244 : ) -> anyhow::Result<Option<(Key, Lsn, Bytes)>> {
2679 1244 : if !has_data_below {
2680 1168 : return Ok(None);
2681 76 : };
2682 : // This function is implemented as a get of the current timeline at ancestor LSN, therefore reusing
2683 : // as much existing code as possible.
2684 76 : let img = this_tline.get(key, history_lsn_point, ctx).await?;
2685 76 : Ok(Some((key, history_lsn_point, img)))
2686 1244 : }
2687 :
2688 : // Actually, we can decide not to write to the image layer at all at this point because
2689 : // the key and LSN range are determined. However, to keep things simple here, we still
2690 : // create this writer, and discard the writer in the end.
2691 :
2692 1932 : while let Some(((key, lsn, val), desc)) = merge_iter.next_with_trace().await? {
2693 1828 : if cancel.is_cancelled() {
2694 0 : return Err(anyhow!("cancelled")); // TODO: refactor to CompactionError and pass cancel error
2695 1828 : }
2696 1828 : if self.shard_identity.is_key_disposable(&key) {
2697 : // If this shard does not need to store this key, simply skip it.
2698 : //
2699 : // This is not handled in the filter iterator because shard is determined by hash.
2700 : // Therefore, it does not give us any performance benefit to do things like skip
2701 : // a whole layer file as handling key spaces (ranges).
2702 0 : if cfg!(debug_assertions) {
2703 0 : let shard = self.shard_identity.shard_index();
2704 0 : let owner = self.shard_identity.get_shard_number(&key);
2705 0 : panic!("key {key} does not belong on shard {shard}, owned by {owner}");
2706 0 : }
2707 0 : continue;
2708 1828 : }
2709 1828 : if !job_desc.compaction_key_range.contains(&key) {
2710 128 : if !desc.is_delta {
2711 120 : continue;
2712 8 : }
2713 8 : let rewriter = delta_layer_rewriters.entry(desc.clone()).or_default();
2714 8 : let rewriter = if key < job_desc.compaction_key_range.start {
2715 0 : if rewriter.before.is_none() {
2716 0 : rewriter.before = Some(
2717 0 : DeltaLayerWriter::new(
2718 0 : self.conf,
2719 0 : self.timeline_id,
2720 0 : self.tenant_shard_id,
2721 0 : desc.key_range.start,
2722 0 : desc.lsn_range.clone(),
2723 0 : ctx,
2724 0 : )
2725 0 : .await?,
2726 : );
2727 0 : }
2728 0 : rewriter.before.as_mut().unwrap()
2729 8 : } else if key >= job_desc.compaction_key_range.end {
2730 8 : if rewriter.after.is_none() {
2731 4 : rewriter.after = Some(
2732 4 : DeltaLayerWriter::new(
2733 4 : self.conf,
2734 4 : self.timeline_id,
2735 4 : self.tenant_shard_id,
2736 4 : job_desc.compaction_key_range.end,
2737 4 : desc.lsn_range.clone(),
2738 4 : ctx,
2739 4 : )
2740 4 : .await?,
2741 : );
2742 4 : }
2743 8 : rewriter.after.as_mut().unwrap()
2744 : } else {
2745 0 : unreachable!()
2746 : };
2747 8 : rewriter.put_value(key, lsn, val, ctx).await?;
2748 8 : continue;
2749 1700 : }
2750 1700 : match val {
2751 1220 : Value::Image(_) => stat.visit_image_key(&val),
2752 480 : Value::WalRecord(_) => stat.visit_wal_key(&val),
2753 : }
2754 1700 : if last_key.is_none() || last_key.as_ref() == Some(&key) {
2755 560 : if last_key.is_none() {
2756 104 : last_key = Some(key);
2757 456 : }
2758 560 : accumulated_values.push((key, lsn, val));
2759 : } else {
2760 1140 : let last_key: &mut Key = last_key.as_mut().unwrap();
2761 1140 : stat.on_unique_key_visited(); // TODO: adjust statistics for partial compaction
2762 1140 : let retention = self
2763 1140 : .generate_key_retention(
2764 1140 : *last_key,
2765 1140 : &accumulated_values,
2766 1140 : job_desc.gc_cutoff,
2767 1140 : &job_desc.retain_lsns_below_horizon,
2768 1140 : COMPACTION_DELTA_THRESHOLD,
2769 1140 : get_ancestor_image(self, *last_key, ctx, has_data_below, lowest_retain_lsn)
2770 1140 : .await?,
2771 : )
2772 1140 : .await?;
2773 1140 : retention
2774 1140 : .pipe_to(
2775 1140 : *last_key,
2776 1140 : &mut delta_layer_writer,
2777 1140 : image_layer_writer.as_mut(),
2778 1140 : &mut stat,
2779 1140 : ctx,
2780 1140 : )
2781 1140 : .await?;
2782 1140 : accumulated_values.clear();
2783 1140 : *last_key = key;
2784 1140 : accumulated_values.push((key, lsn, val));
2785 : }
2786 : }
2787 :
2788 : // TODO: move the below part to the loop body
2789 104 : let last_key = last_key.expect("no keys produced during compaction");
2790 104 : stat.on_unique_key_visited();
2791 :
2792 104 : let retention = self
2793 104 : .generate_key_retention(
2794 104 : last_key,
2795 104 : &accumulated_values,
2796 104 : job_desc.gc_cutoff,
2797 104 : &job_desc.retain_lsns_below_horizon,
2798 104 : COMPACTION_DELTA_THRESHOLD,
2799 104 : get_ancestor_image(self, last_key, ctx, has_data_below, lowest_retain_lsn).await?,
2800 : )
2801 104 : .await?;
2802 104 : retention
2803 104 : .pipe_to(
2804 104 : last_key,
2805 104 : &mut delta_layer_writer,
2806 104 : image_layer_writer.as_mut(),
2807 104 : &mut stat,
2808 104 : ctx,
2809 104 : )
2810 104 : .await?;
2811 : // end: move the above part to the loop body
2812 :
2813 104 : let mut rewrote_delta_layers = Vec::new();
2814 108 : for (key, writers) in delta_layer_rewriters {
2815 4 : if let Some(delta_writer_before) = writers.before {
2816 0 : let (desc, path) = delta_writer_before
2817 0 : .finish(job_desc.compaction_key_range.start, ctx)
2818 0 : .await?;
2819 0 : let layer = Layer::finish_creating(self.conf, self, desc, &path)?;
2820 0 : rewrote_delta_layers.push(layer);
2821 4 : }
2822 4 : if let Some(delta_writer_after) = writers.after {
2823 4 : let (desc, path) = delta_writer_after.finish(key.key_range.end, ctx).await?;
2824 4 : let layer = Layer::finish_creating(self.conf, self, desc, &path)?;
2825 4 : rewrote_delta_layers.push(layer);
2826 0 : }
2827 : }
2828 :
2829 148 : let discard = |key: &PersistentLayerKey| {
2830 148 : let key = key.clone();
2831 148 : async move { KeyHistoryRetention::discard_key(&key, self, dry_run).await }
2832 148 : };
2833 :
2834 104 : let produced_image_layers = if let Some(writer) = image_layer_writer {
2835 84 : if !dry_run {
2836 76 : let end_key = job_desc.compaction_key_range.end;
2837 76 : writer
2838 76 : .finish_with_discard_fn(self, ctx, end_key, discard)
2839 76 : .await?
2840 : } else {
2841 8 : drop(writer);
2842 8 : Vec::new()
2843 : }
2844 : } else {
2845 20 : Vec::new()
2846 : };
2847 :
2848 104 : let produced_delta_layers = if !dry_run {
2849 96 : delta_layer_writer
2850 96 : .finish_with_discard_fn(self, ctx, discard)
2851 96 : .await?
2852 : } else {
2853 8 : drop(delta_layer_writer);
2854 8 : Vec::new()
2855 : };
2856 :
2857 : // TODO: make image/delta/rewrote_delta layers generation atomic. At this point, we already generated resident layers, and if
2858 : // compaction is cancelled at this point, we might have some layers that are not cleaned up.
2859 104 : let mut compact_to = Vec::new();
2860 104 : let mut keep_layers = HashSet::new();
2861 104 : let produced_delta_layers_len = produced_delta_layers.len();
2862 104 : let produced_image_layers_len = produced_image_layers.len();
2863 176 : for action in produced_delta_layers {
2864 72 : match action {
2865 44 : BatchWriterResult::Produced(layer) => {
2866 44 : if cfg!(debug_assertions) {
2867 44 : info!("produced delta layer: {}", layer.layer_desc().key());
2868 0 : }
2869 44 : stat.produce_delta_layer(layer.layer_desc().file_size());
2870 44 : compact_to.push(layer);
2871 : }
2872 28 : BatchWriterResult::Discarded(l) => {
2873 28 : if cfg!(debug_assertions) {
2874 28 : info!("discarded delta layer: {}", l);
2875 0 : }
2876 28 : keep_layers.insert(l);
2877 28 : stat.discard_delta_layer();
2878 : }
2879 : }
2880 : }
2881 108 : for layer in &rewrote_delta_layers {
2882 4 : debug!(
2883 0 : "produced rewritten delta layer: {}",
2884 0 : layer.layer_desc().key()
2885 : );
2886 : }
2887 104 : compact_to.extend(rewrote_delta_layers);
2888 180 : for action in produced_image_layers {
2889 76 : match action {
2890 60 : BatchWriterResult::Produced(layer) => {
2891 60 : debug!("produced image layer: {}", layer.layer_desc().key());
2892 60 : stat.produce_image_layer(layer.layer_desc().file_size());
2893 60 : compact_to.push(layer);
2894 : }
2895 16 : BatchWriterResult::Discarded(l) => {
2896 16 : debug!("discarded image layer: {}", l);
2897 16 : keep_layers.insert(l);
2898 16 : stat.discard_image_layer();
2899 : }
2900 : }
2901 : }
2902 :
2903 104 : let mut layer_selection = job_desc.selected_layers;
2904 :
2905 : // Partial compaction might select more data than it processes, e.g., if
2906 : // the compaction_key_range only partially overlaps:
2907 : //
2908 : // [---compaction_key_range---]
2909 : // [---A----][----B----][----C----][----D----]
2910 : //
2911 : // For delta layers, we will rewrite the layers so that it is cut exactly at
2912 : // the compaction key range, so we can always discard them. However, for image
2913 : // layers, as we do not rewrite them for now, we need to handle them differently.
2914 : // Assume image layers A, B, C, D are all in the `layer_selection`.
2915 : //
2916 : // The created image layers contain whatever is needed from B, C, and from
2917 : // `----]` of A, and from `[---` of D.
2918 : //
2919 : // In contrast, `[---A` and `D----]` have not been processed, so, we must
2920 : // keep that data.
2921 : //
2922 : // The solution for now is to keep A and D completely if they are image layers.
2923 : // (layer_selection is what we'll remove from the layer map, so, retain what
2924 : // is _not_ fully covered by compaction_key_range).
2925 408 : for layer in &layer_selection {
2926 304 : if !layer.layer_desc().is_delta() {
2927 132 : if !overlaps_with(
2928 132 : &layer.layer_desc().key_range,
2929 132 : &job_desc.compaction_key_range,
2930 132 : ) {
2931 0 : bail!("violated constraint: image layer outside of compaction key range");
2932 132 : }
2933 132 : if !fully_contains(
2934 132 : &job_desc.compaction_key_range,
2935 132 : &layer.layer_desc().key_range,
2936 132 : ) {
2937 16 : keep_layers.insert(layer.layer_desc().key());
2938 116 : }
2939 172 : }
2940 : }
2941 :
2942 304 : layer_selection.retain(|x| !keep_layers.contains(&x.layer_desc().key()));
2943 104 :
2944 104 : info!(
2945 0 : "gc-compaction statistics: {}",
2946 0 : serde_json::to_string(&stat)?
2947 : );
2948 :
2949 104 : if dry_run {
2950 8 : return Ok(());
2951 96 : }
2952 96 :
2953 96 : info!(
2954 0 : "produced {} delta layers and {} image layers, {} layers are kept",
2955 0 : produced_delta_layers_len,
2956 0 : produced_image_layers_len,
2957 0 : keep_layers.len()
2958 : );
2959 :
2960 : // Step 3: Place back to the layer map.
2961 :
2962 : // First, do a sanity check to ensure the newly-created layer map does not contain overlaps.
2963 96 : let all_layers = {
2964 96 : let guard = self.layers.read().await;
2965 96 : let layer_map = guard.layer_map()?;
2966 96 : layer_map.iter_historic_layers().collect_vec()
2967 96 : };
2968 96 :
2969 96 : let mut final_layers = all_layers
2970 96 : .iter()
2971 428 : .map(|layer| layer.layer_name())
2972 96 : .collect::<HashSet<_>>();
2973 304 : for layer in &layer_selection {
2974 208 : final_layers.remove(&layer.layer_desc().layer_name());
2975 208 : }
2976 204 : for layer in &compact_to {
2977 108 : final_layers.insert(layer.layer_desc().layer_name());
2978 108 : }
2979 96 : let final_layers = final_layers.into_iter().collect_vec();
2980 :
2981 : // TODO: move this check before we call `finish` on image layer writers. However, this will require us to get the layer name before we finish
2982 : // the writer, so potentially, we will need a function like `ImageLayerBatchWriter::get_all_pending_layer_keys` to get all the keys that are
2983 : // in the writer before finalizing the persistent layers. Now we would leave some dangling layers on the disk if the check fails.
2984 96 : if let Some(err) = check_valid_layermap(&final_layers) {
2985 0 : bail!("gc-compaction layer map check failed after compaction because {}, compaction result not applied to the layer map due to potential data loss", err);
2986 96 : }
2987 :
2988 : // Between the sanity check and this compaction update, there could be new layers being flushed, but it should be fine because we only
2989 : // operate on L1 layers.
2990 : {
2991 : // Gc-compaction will rewrite the history of a key. This could happen in two ways:
2992 : //
2993 : // 1. We create an image layer to replace all the deltas below the compact LSN. In this case, assume
2994 : // we have 2 delta layers A and B, both below the compact LSN. We create an image layer I to replace
2995 : // A and B at the compact LSN. If the read path finishes reading A, yields, and now we update the layer
2996 : // map, the read path then cannot find any keys below A, reporting a missing key error, while the key
2997 : // now gets stored in I at the compact LSN.
2998 : //
2999 : // --------------- ---------------
3000 : // delta1@LSN20 image1@LSN20
3001 : // --------------- (read path collects delta@LSN20, => --------------- (read path cannot find anything
3002 : // delta1@LSN10 yields) below LSN 20)
3003 : // ---------------
3004 : //
3005 : // 2. We create a delta layer to replace all the deltas below the compact LSN, and in the delta layers,
3006 : // we combines the history of a key into a single image. For example, we have deltas at LSN 1, 2, 3, 4,
3007 : // Assume one delta layer contains LSN 1, 2, 3 and the other contains LSN 4.
3008 : //
3009 : // We let gc-compaction combine delta 2, 3, 4 into an image at LSN 4, which produces a delta layer that
3010 : // contains the delta at LSN 1, the image at LSN 4. If the read path finishes reading the original delta
3011 : // layer containing 4, yields, and we update the layer map to put the delta layer.
3012 : //
3013 : // --------------- ---------------
3014 : // delta1@LSN4 image1@LSN4
3015 : // --------------- (read path collects delta@LSN4, => --------------- (read path collects LSN4 and LSN1,
3016 : // delta1@LSN1-3 yields) delta1@LSN1 which is an invalid history)
3017 : // --------------- ---------------
3018 : //
3019 : // Therefore, the gc-compaction layer update operation should wait for all ongoing reads, block all pending reads,
3020 : // and only allow reads to continue after the update is finished.
3021 :
3022 96 : let update_guard = self.gc_compaction_layer_update_lock.write().await;
3023 : // Acquiring the update guard ensures current read operations end and new read operations are blocked.
3024 : // TODO: can we use `latest_gc_cutoff` Rcu to achieve the same effect?
3025 96 : let mut guard = self.layers.write().await;
3026 96 : guard
3027 96 : .open_mut()?
3028 96 : .finish_gc_compaction(&layer_selection, &compact_to, &self.metrics);
3029 96 : drop(update_guard); // Allow new reads to start ONLY after we finished updating the layer map.
3030 96 : };
3031 96 :
3032 96 : // Schedule an index-only upload to update the `latest_gc_cutoff` in the index_part.json.
3033 96 : // Otherwise, after restart, the index_part only contains the old `latest_gc_cutoff` and
3034 96 : // find_gc_cutoffs will try accessing things below the cutoff. TODO: ideally, this should
3035 96 : // be batched into `schedule_compaction_update`.
3036 96 : let disk_consistent_lsn = self.disk_consistent_lsn.load();
3037 96 : self.schedule_uploads(disk_consistent_lsn, None)?;
3038 : // If a layer gets rewritten throughout gc-compaction, we need to keep that layer only in `compact_to` instead
3039 : // of `compact_from`.
3040 96 : let compact_from = {
3041 96 : let mut compact_from = Vec::new();
3042 96 : let mut compact_to_set = HashMap::new();
3043 204 : for layer in &compact_to {
3044 108 : compact_to_set.insert(layer.layer_desc().key(), layer);
3045 108 : }
3046 304 : for layer in &layer_selection {
3047 208 : if let Some(to) = compact_to_set.get(&layer.layer_desc().key()) {
3048 0 : tracing::info!(
3049 0 : "skipping delete {} because found same layer key at different generation {}",
3050 : layer, to
3051 : );
3052 208 : } else {
3053 208 : compact_from.push(layer.clone());
3054 208 : }
3055 : }
3056 96 : compact_from
3057 96 : };
3058 96 : self.remote_client
3059 96 : .schedule_compaction_update(&compact_from, &compact_to)?;
3060 :
3061 96 : drop(gc_lock);
3062 96 :
3063 96 : Ok(())
3064 108 : }
3065 : }
3066 :
3067 : struct TimelineAdaptor {
3068 : timeline: Arc<Timeline>,
3069 :
3070 : keyspace: (Lsn, KeySpace),
3071 :
3072 : new_deltas: Vec<ResidentLayer>,
3073 : new_images: Vec<ResidentLayer>,
3074 : layers_to_delete: Vec<Arc<PersistentLayerDesc>>,
3075 : }
3076 :
3077 : impl TimelineAdaptor {
3078 0 : pub fn new(timeline: &Arc<Timeline>, keyspace: (Lsn, KeySpace)) -> Self {
3079 0 : Self {
3080 0 : timeline: timeline.clone(),
3081 0 : keyspace,
3082 0 : new_images: Vec::new(),
3083 0 : new_deltas: Vec::new(),
3084 0 : layers_to_delete: Vec::new(),
3085 0 : }
3086 0 : }
3087 :
3088 0 : pub async fn flush_updates(&mut self) -> Result<(), CompactionError> {
3089 0 : let layers_to_delete = {
3090 0 : let guard = self.timeline.layers.read().await;
3091 0 : self.layers_to_delete
3092 0 : .iter()
3093 0 : .map(|x| guard.get_from_desc(x))
3094 0 : .collect::<Vec<Layer>>()
3095 0 : };
3096 0 : self.timeline
3097 0 : .finish_compact_batch(&self.new_deltas, &self.new_images, &layers_to_delete)
3098 0 : .await?;
3099 :
3100 0 : self.timeline
3101 0 : .upload_new_image_layers(std::mem::take(&mut self.new_images))?;
3102 :
3103 0 : self.new_deltas.clear();
3104 0 : self.layers_to_delete.clear();
3105 0 : Ok(())
3106 0 : }
3107 : }
3108 :
3109 : #[derive(Clone)]
3110 : struct ResidentDeltaLayer(ResidentLayer);
3111 : #[derive(Clone)]
3112 : struct ResidentImageLayer(ResidentLayer);
3113 :
3114 : impl CompactionJobExecutor for TimelineAdaptor {
3115 : type Key = pageserver_api::key::Key;
3116 :
3117 : type Layer = OwnArc<PersistentLayerDesc>;
3118 : type DeltaLayer = ResidentDeltaLayer;
3119 : type ImageLayer = ResidentImageLayer;
3120 :
3121 : type RequestContext = crate::context::RequestContext;
3122 :
3123 0 : fn get_shard_identity(&self) -> &ShardIdentity {
3124 0 : self.timeline.get_shard_identity()
3125 0 : }
3126 :
3127 0 : async fn get_layers(
3128 0 : &mut self,
3129 0 : key_range: &Range<Key>,
3130 0 : lsn_range: &Range<Lsn>,
3131 0 : _ctx: &RequestContext,
3132 0 : ) -> anyhow::Result<Vec<OwnArc<PersistentLayerDesc>>> {
3133 0 : self.flush_updates().await?;
3134 :
3135 0 : let guard = self.timeline.layers.read().await;
3136 0 : let layer_map = guard.layer_map()?;
3137 :
3138 0 : let result = layer_map
3139 0 : .iter_historic_layers()
3140 0 : .filter(|l| {
3141 0 : overlaps_with(&l.lsn_range, lsn_range) && overlaps_with(&l.key_range, key_range)
3142 0 : })
3143 0 : .map(OwnArc)
3144 0 : .collect();
3145 0 : Ok(result)
3146 0 : }
3147 :
3148 0 : async fn get_keyspace(
3149 0 : &mut self,
3150 0 : key_range: &Range<Key>,
3151 0 : lsn: Lsn,
3152 0 : _ctx: &RequestContext,
3153 0 : ) -> anyhow::Result<Vec<Range<Key>>> {
3154 0 : if lsn == self.keyspace.0 {
3155 0 : Ok(pageserver_compaction::helpers::intersect_keyspace(
3156 0 : &self.keyspace.1.ranges,
3157 0 : key_range,
3158 0 : ))
3159 : } else {
3160 : // The current compaction implementation only ever requests the key space
3161 : // at the compaction end LSN.
3162 0 : anyhow::bail!("keyspace not available for requested lsn");
3163 : }
3164 0 : }
3165 :
3166 0 : async fn downcast_delta_layer(
3167 0 : &self,
3168 0 : layer: &OwnArc<PersistentLayerDesc>,
3169 0 : ) -> anyhow::Result<Option<ResidentDeltaLayer>> {
3170 0 : // this is a lot more complex than a simple downcast...
3171 0 : if layer.is_delta() {
3172 0 : let l = {
3173 0 : let guard = self.timeline.layers.read().await;
3174 0 : guard.get_from_desc(layer)
3175 : };
3176 0 : let result = l.download_and_keep_resident().await?;
3177 :
3178 0 : Ok(Some(ResidentDeltaLayer(result)))
3179 : } else {
3180 0 : Ok(None)
3181 : }
3182 0 : }
3183 :
3184 0 : async fn create_image(
3185 0 : &mut self,
3186 0 : lsn: Lsn,
3187 0 : key_range: &Range<Key>,
3188 0 : ctx: &RequestContext,
3189 0 : ) -> anyhow::Result<()> {
3190 0 : Ok(self.create_image_impl(lsn, key_range, ctx).await?)
3191 0 : }
3192 :
3193 0 : async fn create_delta(
3194 0 : &mut self,
3195 0 : lsn_range: &Range<Lsn>,
3196 0 : key_range: &Range<Key>,
3197 0 : input_layers: &[ResidentDeltaLayer],
3198 0 : ctx: &RequestContext,
3199 0 : ) -> anyhow::Result<()> {
3200 0 : debug!("Create new layer {}..{}", lsn_range.start, lsn_range.end);
3201 :
3202 0 : let mut all_entries = Vec::new();
3203 0 : for dl in input_layers.iter() {
3204 0 : all_entries.extend(dl.load_keys(ctx).await?);
3205 : }
3206 :
3207 : // The current stdlib sorting implementation is designed in a way where it is
3208 : // particularly fast where the slice is made up of sorted sub-ranges.
3209 0 : all_entries.sort_by_key(|DeltaEntry { key, lsn, .. }| (*key, *lsn));
3210 :
3211 0 : let mut writer = DeltaLayerWriter::new(
3212 0 : self.timeline.conf,
3213 0 : self.timeline.timeline_id,
3214 0 : self.timeline.tenant_shard_id,
3215 0 : key_range.start,
3216 0 : lsn_range.clone(),
3217 0 : ctx,
3218 0 : )
3219 0 : .await?;
3220 :
3221 0 : let mut dup_values = 0;
3222 0 :
3223 0 : // This iterator walks through all key-value pairs from all the layers
3224 0 : // we're compacting, in key, LSN order.
3225 0 : let mut prev: Option<(Key, Lsn)> = None;
3226 : for &DeltaEntry {
3227 0 : key, lsn, ref val, ..
3228 0 : } in all_entries.iter()
3229 : {
3230 0 : if prev == Some((key, lsn)) {
3231 : // This is a duplicate. Skip it.
3232 : //
3233 : // It can happen if compaction is interrupted after writing some
3234 : // layers but not all, and we are compacting the range again.
3235 : // The calculations in the algorithm assume that there are no
3236 : // duplicates, so the math on targeted file size is likely off,
3237 : // and we will create smaller files than expected.
3238 0 : dup_values += 1;
3239 0 : continue;
3240 0 : }
3241 :
3242 0 : let value = val.load(ctx).await?;
3243 :
3244 0 : writer.put_value(key, lsn, value, ctx).await?;
3245 :
3246 0 : prev = Some((key, lsn));
3247 : }
3248 :
3249 0 : if dup_values > 0 {
3250 0 : warn!("delta layer created with {} duplicate values", dup_values);
3251 0 : }
3252 :
3253 0 : fail_point!("delta-layer-writer-fail-before-finish", |_| {
3254 0 : Err(anyhow::anyhow!(
3255 0 : "failpoint delta-layer-writer-fail-before-finish"
3256 0 : ))
3257 0 : });
3258 :
3259 0 : let (desc, path) = writer.finish(prev.unwrap().0.next(), ctx).await?;
3260 0 : let new_delta_layer =
3261 0 : Layer::finish_creating(self.timeline.conf, &self.timeline, desc, &path)?;
3262 :
3263 0 : self.new_deltas.push(new_delta_layer);
3264 0 : Ok(())
3265 0 : }
3266 :
3267 0 : async fn delete_layer(
3268 0 : &mut self,
3269 0 : layer: &OwnArc<PersistentLayerDesc>,
3270 0 : _ctx: &RequestContext,
3271 0 : ) -> anyhow::Result<()> {
3272 0 : self.layers_to_delete.push(layer.clone().0);
3273 0 : Ok(())
3274 0 : }
3275 : }
3276 :
3277 : impl TimelineAdaptor {
3278 0 : async fn create_image_impl(
3279 0 : &mut self,
3280 0 : lsn: Lsn,
3281 0 : key_range: &Range<Key>,
3282 0 : ctx: &RequestContext,
3283 0 : ) -> Result<(), CreateImageLayersError> {
3284 0 : let timer = self.timeline.metrics.create_images_time_histo.start_timer();
3285 :
3286 0 : let image_layer_writer = ImageLayerWriter::new(
3287 0 : self.timeline.conf,
3288 0 : self.timeline.timeline_id,
3289 0 : self.timeline.tenant_shard_id,
3290 0 : key_range,
3291 0 : lsn,
3292 0 : ctx,
3293 0 : )
3294 0 : .await?;
3295 :
3296 0 : fail_point!("image-layer-writer-fail-before-finish", |_| {
3297 0 : Err(CreateImageLayersError::Other(anyhow::anyhow!(
3298 0 : "failpoint image-layer-writer-fail-before-finish"
3299 0 : )))
3300 0 : });
3301 :
3302 0 : let keyspace = KeySpace {
3303 0 : ranges: self.get_keyspace(key_range, lsn, ctx).await?,
3304 : };
3305 : // TODO set proper (stateful) start. The create_image_layer_for_rel_blocks function mostly
3306 0 : let outcome = self
3307 0 : .timeline
3308 0 : .create_image_layer_for_rel_blocks(
3309 0 : &keyspace,
3310 0 : image_layer_writer,
3311 0 : lsn,
3312 0 : ctx,
3313 0 : key_range.clone(),
3314 0 : IoConcurrency::sequential(),
3315 0 : )
3316 0 : .await?;
3317 :
3318 : if let ImageLayerCreationOutcome::Generated {
3319 0 : unfinished_image_layer,
3320 0 : } = outcome
3321 : {
3322 0 : let (desc, path) = unfinished_image_layer.finish(ctx).await?;
3323 0 : let image_layer =
3324 0 : Layer::finish_creating(self.timeline.conf, &self.timeline, desc, &path)?;
3325 0 : self.new_images.push(image_layer);
3326 0 : }
3327 :
3328 0 : timer.stop_and_record();
3329 0 :
3330 0 : Ok(())
3331 0 : }
3332 : }
3333 :
3334 : impl CompactionRequestContext for crate::context::RequestContext {}
3335 :
3336 : #[derive(Debug, Clone)]
3337 : pub struct OwnArc<T>(pub Arc<T>);
3338 :
3339 : impl<T> Deref for OwnArc<T> {
3340 : type Target = <Arc<T> as Deref>::Target;
3341 0 : fn deref(&self) -> &Self::Target {
3342 0 : &self.0
3343 0 : }
3344 : }
3345 :
3346 : impl<T> AsRef<T> for OwnArc<T> {
3347 0 : fn as_ref(&self) -> &T {
3348 0 : self.0.as_ref()
3349 0 : }
3350 : }
3351 :
3352 : impl CompactionLayer<Key> for OwnArc<PersistentLayerDesc> {
3353 0 : fn key_range(&self) -> &Range<Key> {
3354 0 : &self.key_range
3355 0 : }
3356 0 : fn lsn_range(&self) -> &Range<Lsn> {
3357 0 : &self.lsn_range
3358 0 : }
3359 0 : fn file_size(&self) -> u64 {
3360 0 : self.file_size
3361 0 : }
3362 0 : fn short_id(&self) -> std::string::String {
3363 0 : self.as_ref().short_id().to_string()
3364 0 : }
3365 0 : fn is_delta(&self) -> bool {
3366 0 : self.as_ref().is_delta()
3367 0 : }
3368 : }
3369 :
3370 : impl CompactionLayer<Key> for OwnArc<DeltaLayer> {
3371 0 : fn key_range(&self) -> &Range<Key> {
3372 0 : &self.layer_desc().key_range
3373 0 : }
3374 0 : fn lsn_range(&self) -> &Range<Lsn> {
3375 0 : &self.layer_desc().lsn_range
3376 0 : }
3377 0 : fn file_size(&self) -> u64 {
3378 0 : self.layer_desc().file_size
3379 0 : }
3380 0 : fn short_id(&self) -> std::string::String {
3381 0 : self.layer_desc().short_id().to_string()
3382 0 : }
3383 0 : fn is_delta(&self) -> bool {
3384 0 : true
3385 0 : }
3386 : }
3387 :
3388 : use crate::tenant::timeline::DeltaEntry;
3389 :
3390 : impl CompactionLayer<Key> for ResidentDeltaLayer {
3391 0 : fn key_range(&self) -> &Range<Key> {
3392 0 : &self.0.layer_desc().key_range
3393 0 : }
3394 0 : fn lsn_range(&self) -> &Range<Lsn> {
3395 0 : &self.0.layer_desc().lsn_range
3396 0 : }
3397 0 : fn file_size(&self) -> u64 {
3398 0 : self.0.layer_desc().file_size
3399 0 : }
3400 0 : fn short_id(&self) -> std::string::String {
3401 0 : self.0.layer_desc().short_id().to_string()
3402 0 : }
3403 0 : fn is_delta(&self) -> bool {
3404 0 : true
3405 0 : }
3406 : }
3407 :
3408 : impl CompactionDeltaLayer<TimelineAdaptor> for ResidentDeltaLayer {
3409 : type DeltaEntry<'a> = DeltaEntry<'a>;
3410 :
3411 0 : async fn load_keys(&self, ctx: &RequestContext) -> anyhow::Result<Vec<DeltaEntry<'_>>> {
3412 0 : self.0.get_as_delta(ctx).await?.index_entries(ctx).await
3413 0 : }
3414 : }
3415 :
3416 : impl CompactionLayer<Key> for ResidentImageLayer {
3417 0 : fn key_range(&self) -> &Range<Key> {
3418 0 : &self.0.layer_desc().key_range
3419 0 : }
3420 0 : fn lsn_range(&self) -> &Range<Lsn> {
3421 0 : &self.0.layer_desc().lsn_range
3422 0 : }
3423 0 : fn file_size(&self) -> u64 {
3424 0 : self.0.layer_desc().file_size
3425 0 : }
3426 0 : fn short_id(&self) -> std::string::String {
3427 0 : self.0.layer_desc().short_id().to_string()
3428 0 : }
3429 0 : fn is_delta(&self) -> bool {
3430 0 : false
3431 0 : }
3432 : }
3433 : impl CompactionImageLayer<TimelineAdaptor> for ResidentImageLayer {}
|