Line data Source code
1 : //! New compaction implementation. The algorithm itself is implemented in the
2 : //! compaction crate. This file implements the callbacks and structs that allow
3 : //! the algorithm to drive the process.
4 : //!
5 : //! The old legacy algorithm is implemented directly in `timeline.rs`.
6 :
7 : use std::collections::{BinaryHeap, HashMap, HashSet, VecDeque};
8 : use std::ops::{Deref, Range};
9 : use std::sync::Arc;
10 :
11 : use super::layer_manager::LayerManager;
12 : use super::{
13 : CompactFlags, CompactOptions, CreateImageLayersError, DurationRecorder, ImageLayerCreationMode,
14 : RecordedDuration, Timeline,
15 : };
16 :
17 : use anyhow::{anyhow, bail, Context};
18 : use bytes::Bytes;
19 : use enumset::EnumSet;
20 : use fail::fail_point;
21 : use itertools::Itertools;
22 : use pageserver_api::key::KEY_SIZE;
23 : use pageserver_api::keyspace::ShardedRange;
24 : use pageserver_api::models::CompactInfoResponse;
25 : use pageserver_api::shard::{ShardCount, ShardIdentity, TenantShardId};
26 : use serde::Serialize;
27 : use tokio_util::sync::CancellationToken;
28 : use tracing::{debug, info, info_span, trace, warn, Instrument};
29 : use utils::id::TimelineId;
30 :
31 : use crate::context::{AccessStatsBehavior, RequestContext, RequestContextBuilder};
32 : use crate::page_cache;
33 : use crate::statvfs::Statvfs;
34 : use crate::tenant::checks::check_valid_layermap;
35 : use crate::tenant::gc_block::GcBlock;
36 : use crate::tenant::remote_timeline_client::WaitCompletionError;
37 : use crate::tenant::storage_layer::batch_split_writer::{
38 : BatchWriterResult, SplitDeltaLayerWriter, SplitImageLayerWriter,
39 : };
40 : use crate::tenant::storage_layer::filter_iterator::FilterIterator;
41 : use crate::tenant::storage_layer::merge_iterator::MergeIterator;
42 : use crate::tenant::storage_layer::{
43 : AsLayerDesc, PersistentLayerDesc, PersistentLayerKey, ValueReconstructState,
44 : };
45 : use crate::tenant::timeline::{drop_rlock, DeltaLayerWriter, ImageLayerWriter};
46 : use crate::tenant::timeline::{ImageLayerCreationOutcome, IoConcurrency};
47 : use crate::tenant::timeline::{Layer, ResidentLayer};
48 : use crate::tenant::{gc_block, DeltaLayer, MaybeOffloaded};
49 : use crate::virtual_file::{MaybeFatalIo, VirtualFile};
50 : use pageserver_api::config::tenant_conf_defaults::DEFAULT_CHECKPOINT_DISTANCE;
51 :
52 : use pageserver_api::key::Key;
53 : use pageserver_api::keyspace::KeySpace;
54 : use pageserver_api::record::NeonWalRecord;
55 : use pageserver_api::value::Value;
56 :
57 : use utils::lsn::Lsn;
58 :
59 : use pageserver_compaction::helpers::{fully_contains, overlaps_with};
60 : use pageserver_compaction::interface::*;
61 :
62 : use super::CompactionError;
63 :
64 : /// Maximum number of deltas before generating an image layer in bottom-most compaction.
65 : const COMPACTION_DELTA_THRESHOLD: usize = 5;
66 :
67 : #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
68 : pub struct GcCompactionJobId(pub usize);
69 :
70 : impl std::fmt::Display for GcCompactionJobId {
71 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
72 0 : write!(f, "{}", self.0)
73 0 : }
74 : }
75 :
76 : #[derive(Debug, Clone)]
77 : pub enum GcCompactionQueueItem {
78 : Manual(CompactOptions),
79 : SubCompactionJob(CompactOptions),
80 : #[allow(dead_code)]
81 : UpdateL2Lsn(Lsn),
82 : Notify(GcCompactionJobId),
83 : }
84 :
85 : impl GcCompactionQueueItem {
86 0 : pub fn into_compact_info_resp(
87 0 : self,
88 0 : id: GcCompactionJobId,
89 0 : running: bool,
90 0 : ) -> Option<CompactInfoResponse> {
91 0 : match self {
92 0 : GcCompactionQueueItem::Manual(options) => Some(CompactInfoResponse {
93 0 : compact_key_range: options.compact_key_range,
94 0 : compact_lsn_range: options.compact_lsn_range,
95 0 : sub_compaction: options.sub_compaction,
96 0 : running,
97 0 : job_id: id.0,
98 0 : }),
99 0 : GcCompactionQueueItem::SubCompactionJob(options) => Some(CompactInfoResponse {
100 0 : compact_key_range: options.compact_key_range,
101 0 : compact_lsn_range: options.compact_lsn_range,
102 0 : sub_compaction: options.sub_compaction,
103 0 : running,
104 0 : job_id: id.0,
105 0 : }),
106 0 : GcCompactionQueueItem::UpdateL2Lsn(_) => None,
107 0 : GcCompactionQueueItem::Notify(_) => None,
108 : }
109 0 : }
110 : }
111 :
112 : struct GcCompactionQueueInner {
113 : running: Option<(GcCompactionJobId, GcCompactionQueueItem)>,
114 : queued: VecDeque<(GcCompactionJobId, GcCompactionQueueItem)>,
115 : notify: HashMap<GcCompactionJobId, tokio::sync::oneshot::Sender<()>>,
116 : gc_guards: HashMap<GcCompactionJobId, gc_block::Guard>,
117 : last_id: GcCompactionJobId,
118 : }
119 :
120 : impl GcCompactionQueueInner {
121 0 : fn next_id(&mut self) -> GcCompactionJobId {
122 0 : let id = self.last_id;
123 0 : self.last_id = GcCompactionJobId(id.0 + 1);
124 0 : id
125 0 : }
126 : }
127 :
128 : /// A structure to store gc_compaction jobs.
129 : pub struct GcCompactionQueue {
130 : /// All items in the queue, and the currently-running job.
131 : inner: std::sync::Mutex<GcCompactionQueueInner>,
132 : /// Ensure only one thread is consuming the queue.
133 : consumer_lock: tokio::sync::Mutex<()>,
134 : }
135 :
136 : impl GcCompactionQueue {
137 0 : pub fn new() -> Self {
138 0 : GcCompactionQueue {
139 0 : inner: std::sync::Mutex::new(GcCompactionQueueInner {
140 0 : running: None,
141 0 : queued: VecDeque::new(),
142 0 : notify: HashMap::new(),
143 0 : gc_guards: HashMap::new(),
144 0 : last_id: GcCompactionJobId(0),
145 0 : }),
146 0 : consumer_lock: tokio::sync::Mutex::new(()),
147 0 : }
148 0 : }
149 :
150 0 : pub fn cancel_scheduled(&self) {
151 0 : let mut guard = self.inner.lock().unwrap();
152 0 : guard.queued.clear();
153 0 : guard.notify.clear();
154 0 : guard.gc_guards.clear();
155 0 : }
156 :
157 : /// Schedule a manual compaction job.
158 0 : pub fn schedule_manual_compaction(
159 0 : &self,
160 0 : options: CompactOptions,
161 0 : notify: Option<tokio::sync::oneshot::Sender<()>>,
162 0 : ) -> GcCompactionJobId {
163 0 : let mut guard = self.inner.lock().unwrap();
164 0 : let id = guard.next_id();
165 0 : guard
166 0 : .queued
167 0 : .push_back((id, GcCompactionQueueItem::Manual(options)));
168 0 : if let Some(notify) = notify {
169 0 : guard.notify.insert(id, notify);
170 0 : }
171 0 : info!("scheduled compaction job id={}", id);
172 0 : id
173 0 : }
174 :
175 : /// Trigger an auto compaction.
176 : #[allow(dead_code)]
177 0 : pub fn trigger_auto_compaction(&self, _: &Arc<Timeline>) {}
178 :
179 : /// Notify the caller the job has finished and unblock GC.
180 0 : fn notify_and_unblock(&self, id: GcCompactionJobId) {
181 0 : info!("compaction job id={} finished", id);
182 0 : let mut guard = self.inner.lock().unwrap();
183 0 : if let Some(blocking) = guard.gc_guards.remove(&id) {
184 0 : drop(blocking)
185 0 : }
186 0 : if let Some(tx) = guard.notify.remove(&id) {
187 0 : let _ = tx.send(());
188 0 : }
189 0 : }
190 :
191 0 : async fn handle_sub_compaction(
192 0 : &self,
193 0 : id: GcCompactionJobId,
194 0 : options: CompactOptions,
195 0 : timeline: &Arc<Timeline>,
196 0 : gc_block: &GcBlock,
197 0 : ) -> Result<(), CompactionError> {
198 0 : info!("running scheduled enhanced gc bottom-most compaction with sub-compaction, splitting compaction jobs");
199 0 : let jobs: Vec<GcCompactJob> = timeline
200 0 : .gc_compaction_split_jobs(
201 0 : GcCompactJob::from_compact_options(options.clone()),
202 0 : options.sub_compaction_max_job_size_mb,
203 0 : )
204 0 : .await
205 0 : .map_err(CompactionError::Other)?;
206 0 : if jobs.is_empty() {
207 0 : info!("no jobs to run, skipping scheduled compaction task");
208 0 : self.notify_and_unblock(id);
209 : } else {
210 0 : let gc_guard = match gc_block.start().await {
211 0 : Ok(guard) => guard,
212 0 : Err(e) => {
213 0 : return Err(CompactionError::Other(anyhow!(
214 0 : "cannot run gc-compaction because gc is blocked: {}",
215 0 : e
216 0 : )));
217 : }
218 : };
219 :
220 0 : let jobs_len = jobs.len();
221 0 : let mut pending_tasks = Vec::new();
222 0 : for job in jobs {
223 : // Unfortunately we need to convert the `GcCompactJob` back to `CompactionOptions`
224 : // until we do further refactors to allow directly call `compact_with_gc`.
225 0 : let mut flags: EnumSet<CompactFlags> = EnumSet::default();
226 0 : flags |= CompactFlags::EnhancedGcBottomMostCompaction;
227 0 : if job.dry_run {
228 0 : flags |= CompactFlags::DryRun;
229 0 : }
230 0 : let options = CompactOptions {
231 0 : flags,
232 0 : sub_compaction: false,
233 0 : compact_key_range: Some(job.compact_key_range.into()),
234 0 : compact_lsn_range: Some(job.compact_lsn_range.into()),
235 0 : sub_compaction_max_job_size_mb: None,
236 0 : };
237 0 : pending_tasks.push(GcCompactionQueueItem::SubCompactionJob(options));
238 : }
239 0 : pending_tasks.push(GcCompactionQueueItem::Notify(id));
240 0 : {
241 0 : let mut guard = self.inner.lock().unwrap();
242 0 : guard.gc_guards.insert(id, gc_guard);
243 0 : let mut tasks = Vec::new();
244 0 : for task in pending_tasks {
245 0 : let id = guard.next_id();
246 0 : tasks.push((id, task));
247 0 : }
248 0 : tasks.reverse();
249 0 : for item in tasks {
250 0 : guard.queued.push_front(item);
251 0 : }
252 : }
253 0 : info!("scheduled enhanced gc bottom-most compaction with sub-compaction, split into {} jobs", jobs_len);
254 : }
255 0 : Ok(())
256 0 : }
257 :
258 : /// Take a job from the queue and process it. Returns if there are still pending tasks.
259 0 : pub async fn iteration(
260 0 : &self,
261 0 : cancel: &CancellationToken,
262 0 : ctx: &RequestContext,
263 0 : gc_block: &GcBlock,
264 0 : timeline: &Arc<Timeline>,
265 0 : ) -> Result<bool, CompactionError> {
266 0 : let _one_op_at_a_time_guard = self.consumer_lock.lock().await;
267 : let has_pending_tasks;
268 0 : let (id, item) = {
269 0 : let mut guard = self.inner.lock().unwrap();
270 0 : let Some((id, item)) = guard.queued.pop_front() else {
271 0 : return Ok(false);
272 : };
273 0 : guard.running = Some((id, item.clone()));
274 0 : has_pending_tasks = !guard.queued.is_empty();
275 0 : (id, item)
276 0 : };
277 0 :
278 0 : match item {
279 0 : GcCompactionQueueItem::Manual(options) => {
280 0 : if !options
281 0 : .flags
282 0 : .contains(CompactFlags::EnhancedGcBottomMostCompaction)
283 : {
284 0 : warn!("ignoring scheduled compaction task: scheduled task must be gc compaction: {:?}", options);
285 0 : } else if options.sub_compaction {
286 0 : self.handle_sub_compaction(id, options, timeline, gc_block)
287 0 : .await?;
288 : } else {
289 0 : let gc_guard = match gc_block.start().await {
290 0 : Ok(guard) => guard,
291 0 : Err(e) => {
292 0 : return Err(CompactionError::Other(anyhow!(
293 0 : "cannot run gc-compaction because gc is blocked: {}",
294 0 : e
295 0 : )));
296 : }
297 : };
298 0 : {
299 0 : let mut guard = self.inner.lock().unwrap();
300 0 : guard.gc_guards.insert(id, gc_guard);
301 0 : }
302 0 : let _ = timeline
303 0 : .compact_with_options(cancel, options, ctx)
304 0 : .instrument(info_span!("scheduled_compact_timeline", %timeline.timeline_id))
305 0 : .await?;
306 0 : self.notify_and_unblock(id);
307 : }
308 : }
309 0 : GcCompactionQueueItem::SubCompactionJob(options) => {
310 0 : let _ = timeline
311 0 : .compact_with_options(cancel, options, ctx)
312 0 : .instrument(info_span!("scheduled_compact_timeline", %timeline.timeline_id))
313 0 : .await?;
314 : }
315 0 : GcCompactionQueueItem::Notify(id) => {
316 0 : self.notify_and_unblock(id);
317 0 : }
318 : GcCompactionQueueItem::UpdateL2Lsn(_) => {
319 0 : unreachable!()
320 : }
321 : }
322 0 : {
323 0 : let mut guard = self.inner.lock().unwrap();
324 0 : guard.running = None;
325 0 : }
326 0 : Ok(has_pending_tasks)
327 0 : }
328 :
329 : #[allow(clippy::type_complexity)]
330 0 : pub fn remaining_jobs(
331 0 : &self,
332 0 : ) -> (
333 0 : Option<(GcCompactionJobId, GcCompactionQueueItem)>,
334 0 : VecDeque<(GcCompactionJobId, GcCompactionQueueItem)>,
335 0 : ) {
336 0 : let guard = self.inner.lock().unwrap();
337 0 : (guard.running.clone(), guard.queued.clone())
338 0 : }
339 :
340 : #[allow(dead_code)]
341 0 : pub fn remaining_jobs_num(&self) -> usize {
342 0 : let guard = self.inner.lock().unwrap();
343 0 : guard.queued.len() + if guard.running.is_some() { 1 } else { 0 }
344 0 : }
345 : }
346 :
347 : /// A job description for the gc-compaction job. This structure describes the rectangle range that the job will
348 : /// process. The exact layers that need to be compacted/rewritten will be generated when `compact_with_gc` gets
349 : /// called.
350 : #[derive(Debug, Clone)]
351 : pub(crate) struct GcCompactJob {
352 : pub dry_run: bool,
353 : /// The key range to be compacted. The compaction algorithm will only regenerate key-value pairs within this range
354 : /// [left inclusive, right exclusive), and other pairs will be rewritten into new files if necessary.
355 : pub compact_key_range: Range<Key>,
356 : /// The LSN range to be compacted. The compaction algorithm will use this range to determine the layers to be
357 : /// selected for the compaction, and it does not guarantee the generated layers will have exactly the same LSN range
358 : /// as specified here. The true range being compacted is `min_lsn/max_lsn` in [`GcCompactionJobDescription`].
359 : /// min_lsn will always <= the lower bound specified here, and max_lsn will always >= the upper bound specified here.
360 : pub compact_lsn_range: Range<Lsn>,
361 : }
362 :
363 : impl GcCompactJob {
364 108 : pub fn from_compact_options(options: CompactOptions) -> Self {
365 108 : GcCompactJob {
366 108 : dry_run: options.flags.contains(CompactFlags::DryRun),
367 108 : compact_key_range: options
368 108 : .compact_key_range
369 108 : .map(|x| x.into())
370 108 : .unwrap_or(Key::MIN..Key::MAX),
371 108 : compact_lsn_range: options
372 108 : .compact_lsn_range
373 108 : .map(|x| x.into())
374 108 : .unwrap_or(Lsn::INVALID..Lsn::MAX),
375 108 : }
376 108 : }
377 : }
378 :
379 : /// A job description for the gc-compaction job. This structure is generated when `compact_with_gc` is called
380 : /// and contains the exact layers we want to compact.
381 : pub struct GcCompactionJobDescription {
382 : /// All layers to read in the compaction job
383 : selected_layers: Vec<Layer>,
384 : /// GC cutoff of the job. This is the lowest LSN that will be accessed by the read/GC path and we need to
385 : /// keep all deltas <= this LSN or generate an image == this LSN.
386 : gc_cutoff: Lsn,
387 : /// LSNs to retain for the job. Read path will use this LSN so we need to keep deltas <= this LSN or
388 : /// generate an image == this LSN.
389 : retain_lsns_below_horizon: Vec<Lsn>,
390 : /// Maximum layer LSN processed in this compaction, that is max(end_lsn of layers). Exclusive. All data
391 : /// \>= this LSN will be kept and will not be rewritten.
392 : max_layer_lsn: Lsn,
393 : /// Minimum layer LSN processed in this compaction, that is min(start_lsn of layers). Inclusive.
394 : /// All access below (strict lower than `<`) this LSN will be routed through the normal read path instead of
395 : /// k-merge within gc-compaction.
396 : min_layer_lsn: Lsn,
397 : /// Only compact layers overlapping with this range.
398 : compaction_key_range: Range<Key>,
399 : /// When partial compaction is enabled, these layers need to be rewritten to ensure no overlap.
400 : /// This field is here solely for debugging. The field will not be read once the compaction
401 : /// description is generated.
402 : rewrite_layers: Vec<Arc<PersistentLayerDesc>>,
403 : }
404 :
405 : /// The result of bottom-most compaction for a single key at each LSN.
406 : #[derive(Debug)]
407 : #[cfg_attr(test, derive(PartialEq))]
408 : pub struct KeyLogAtLsn(pub Vec<(Lsn, Value)>);
409 :
410 : /// The result of bottom-most compaction.
411 : #[derive(Debug)]
412 : #[cfg_attr(test, derive(PartialEq))]
413 : pub(crate) struct KeyHistoryRetention {
414 : /// Stores logs to reconstruct the value at the given LSN, that is to say, logs <= LSN or image == LSN.
415 : pub(crate) below_horizon: Vec<(Lsn, KeyLogAtLsn)>,
416 : /// Stores logs to reconstruct the value at any LSN above the horizon, that is to say, log > LSN.
417 : pub(crate) above_horizon: KeyLogAtLsn,
418 : }
419 :
420 : impl KeyHistoryRetention {
421 : /// Hack: skip delta layer if we need to produce a layer of a same key-lsn.
422 : ///
423 : /// This can happen if we have removed some deltas in "the middle" of some existing layer's key-lsn-range.
424 : /// For example, consider the case where a single delta with range [0x10,0x50) exists.
425 : /// And we have branches at LSN 0x10, 0x20, 0x30.
426 : /// Then we delete branch @ 0x20.
427 : /// Bottom-most compaction may now delete the delta [0x20,0x30).
428 : /// And that wouldnt' change the shape of the layer.
429 : ///
430 : /// Note that bottom-most-gc-compaction never _adds_ new data in that case, only removes.
431 : ///
432 : /// `discard_key` will only be called when the writer reaches its target (instead of for every key), so it's fine to grab a lock inside.
433 148 : async fn discard_key(key: &PersistentLayerKey, tline: &Arc<Timeline>, dry_run: bool) -> bool {
434 148 : if dry_run {
435 0 : return true;
436 148 : }
437 : let layer_generation;
438 : {
439 148 : let guard = tline.layers.read().await;
440 148 : if !guard.contains_key(key) {
441 104 : return false;
442 44 : }
443 44 : layer_generation = guard.get_from_key(key).metadata().generation;
444 44 : }
445 44 : if layer_generation == tline.generation {
446 44 : info!(
447 : key=%key,
448 : ?layer_generation,
449 0 : "discard layer due to duplicated layer key in the same generation",
450 : );
451 44 : true
452 : } else {
453 0 : false
454 : }
455 148 : }
456 :
457 : /// Pipe a history of a single key to the writers.
458 : ///
459 : /// If `image_writer` is none, the images will be placed into the delta layers.
460 : /// The delta writer will contain all images and deltas (below and above the horizon) except the bottom-most images.
461 : #[allow(clippy::too_many_arguments)]
462 1244 : async fn pipe_to(
463 1244 : self,
464 1244 : key: Key,
465 1244 : delta_writer: &mut SplitDeltaLayerWriter,
466 1244 : mut image_writer: Option<&mut SplitImageLayerWriter>,
467 1244 : stat: &mut CompactionStatistics,
468 1244 : ctx: &RequestContext,
469 1244 : ) -> anyhow::Result<()> {
470 1244 : let mut first_batch = true;
471 4024 : for (cutoff_lsn, KeyLogAtLsn(logs)) in self.below_horizon {
472 2780 : if first_batch {
473 1244 : if logs.len() == 1 && logs[0].1.is_image() {
474 1168 : let Value::Image(img) = &logs[0].1 else {
475 0 : unreachable!()
476 : };
477 1168 : stat.produce_image_key(img);
478 1168 : if let Some(image_writer) = image_writer.as_mut() {
479 1168 : image_writer.put_image(key, img.clone(), ctx).await?;
480 : } else {
481 0 : delta_writer
482 0 : .put_value(key, cutoff_lsn, Value::Image(img.clone()), ctx)
483 0 : .await?;
484 : }
485 : } else {
486 132 : for (lsn, val) in logs {
487 56 : stat.produce_key(&val);
488 56 : delta_writer.put_value(key, lsn, val, ctx).await?;
489 : }
490 : }
491 1244 : first_batch = false;
492 : } else {
493 1768 : for (lsn, val) in logs {
494 232 : stat.produce_key(&val);
495 232 : delta_writer.put_value(key, lsn, val, ctx).await?;
496 : }
497 : }
498 : }
499 1244 : let KeyLogAtLsn(above_horizon_logs) = self.above_horizon;
500 1360 : for (lsn, val) in above_horizon_logs {
501 116 : stat.produce_key(&val);
502 116 : delta_writer.put_value(key, lsn, val, ctx).await?;
503 : }
504 1244 : Ok(())
505 1244 : }
506 : }
507 :
508 : #[derive(Debug, Serialize, Default)]
509 : struct CompactionStatisticsNumSize {
510 : num: u64,
511 : size: u64,
512 : }
513 :
514 : #[derive(Debug, Serialize, Default)]
515 : pub struct CompactionStatistics {
516 : delta_layer_visited: CompactionStatisticsNumSize,
517 : image_layer_visited: CompactionStatisticsNumSize,
518 : delta_layer_produced: CompactionStatisticsNumSize,
519 : image_layer_produced: CompactionStatisticsNumSize,
520 : num_delta_layer_discarded: usize,
521 : num_image_layer_discarded: usize,
522 : num_unique_keys_visited: usize,
523 : wal_keys_visited: CompactionStatisticsNumSize,
524 : image_keys_visited: CompactionStatisticsNumSize,
525 : wal_produced: CompactionStatisticsNumSize,
526 : image_produced: CompactionStatisticsNumSize,
527 : }
528 :
529 : impl CompactionStatistics {
530 2084 : fn estimated_size_of_value(val: &Value) -> usize {
531 864 : match val {
532 1220 : Value::Image(img) => img.len(),
533 0 : Value::WalRecord(NeonWalRecord::Postgres { rec, .. }) => rec.len(),
534 864 : _ => std::mem::size_of::<NeonWalRecord>(),
535 : }
536 2084 : }
537 3272 : fn estimated_size_of_key() -> usize {
538 3272 : KEY_SIZE // TODO: distinguish image layer and delta layer (count LSN in delta layer)
539 3272 : }
540 172 : fn visit_delta_layer(&mut self, size: u64) {
541 172 : self.delta_layer_visited.num += 1;
542 172 : self.delta_layer_visited.size += size;
543 172 : }
544 132 : fn visit_image_layer(&mut self, size: u64) {
545 132 : self.image_layer_visited.num += 1;
546 132 : self.image_layer_visited.size += size;
547 132 : }
548 1244 : fn on_unique_key_visited(&mut self) {
549 1244 : self.num_unique_keys_visited += 1;
550 1244 : }
551 480 : fn visit_wal_key(&mut self, val: &Value) {
552 480 : self.wal_keys_visited.num += 1;
553 480 : self.wal_keys_visited.size +=
554 480 : Self::estimated_size_of_value(val) as u64 + Self::estimated_size_of_key() as u64;
555 480 : }
556 1220 : fn visit_image_key(&mut self, val: &Value) {
557 1220 : self.image_keys_visited.num += 1;
558 1220 : self.image_keys_visited.size +=
559 1220 : Self::estimated_size_of_value(val) as u64 + Self::estimated_size_of_key() as u64;
560 1220 : }
561 404 : fn produce_key(&mut self, val: &Value) {
562 404 : match val {
563 20 : Value::Image(img) => self.produce_image_key(img),
564 384 : Value::WalRecord(_) => self.produce_wal_key(val),
565 : }
566 404 : }
567 384 : fn produce_wal_key(&mut self, val: &Value) {
568 384 : self.wal_produced.num += 1;
569 384 : self.wal_produced.size +=
570 384 : Self::estimated_size_of_value(val) as u64 + Self::estimated_size_of_key() as u64;
571 384 : }
572 1188 : fn produce_image_key(&mut self, val: &Bytes) {
573 1188 : self.image_produced.num += 1;
574 1188 : self.image_produced.size += val.len() as u64 + Self::estimated_size_of_key() as u64;
575 1188 : }
576 28 : fn discard_delta_layer(&mut self) {
577 28 : self.num_delta_layer_discarded += 1;
578 28 : }
579 16 : fn discard_image_layer(&mut self) {
580 16 : self.num_image_layer_discarded += 1;
581 16 : }
582 44 : fn produce_delta_layer(&mut self, size: u64) {
583 44 : self.delta_layer_produced.num += 1;
584 44 : self.delta_layer_produced.size += size;
585 44 : }
586 60 : fn produce_image_layer(&mut self, size: u64) {
587 60 : self.image_layer_produced.num += 1;
588 60 : self.image_layer_produced.size += size;
589 60 : }
590 : }
591 :
592 : impl Timeline {
593 : /// TODO: cancellation
594 : ///
595 : /// Returns whether the compaction has pending tasks.
596 728 : pub(crate) async fn compact_legacy(
597 728 : self: &Arc<Self>,
598 728 : cancel: &CancellationToken,
599 728 : options: CompactOptions,
600 728 : ctx: &RequestContext,
601 728 : ) -> Result<bool, CompactionError> {
602 728 : if options
603 728 : .flags
604 728 : .contains(CompactFlags::EnhancedGcBottomMostCompaction)
605 : {
606 0 : self.compact_with_gc(cancel, options, ctx)
607 0 : .await
608 0 : .map_err(CompactionError::Other)?;
609 0 : return Ok(false);
610 728 : }
611 728 :
612 728 : if options.flags.contains(CompactFlags::DryRun) {
613 0 : return Err(CompactionError::Other(anyhow!(
614 0 : "dry-run mode is not supported for legacy compaction for now"
615 0 : )));
616 728 : }
617 728 :
618 728 : if options.compact_key_range.is_some() || options.compact_lsn_range.is_some() {
619 : // maybe useful in the future? could implement this at some point
620 0 : return Err(CompactionError::Other(anyhow!(
621 0 : "compaction range is not supported for legacy compaction for now"
622 0 : )));
623 728 : }
624 728 :
625 728 : // High level strategy for compaction / image creation:
626 728 : //
627 728 : // 1. First, do a L0 compaction to ensure we move the L0
628 728 : // layers into the historic layer map get flat levels of
629 728 : // layers. If we did not compact all L0 layers, we will
630 728 : // prioritize compacting the timeline again and not do
631 728 : // any of the compactions below.
632 728 : //
633 728 : // 2. Then, calculate the desired "partitioning" of the
634 728 : // currently in-use key space. The goal is to partition the
635 728 : // key space into roughly fixed-size chunks, but also take into
636 728 : // account any existing image layers, and try to align the
637 728 : // chunk boundaries with the existing image layers to avoid
638 728 : // too much churn. Also try to align chunk boundaries with
639 728 : // relation boundaries. In principle, we don't know about
640 728 : // relation boundaries here, we just deal with key-value
641 728 : // pairs, and the code in pgdatadir_mapping.rs knows how to
642 728 : // map relations into key-value pairs. But in practice we know
643 728 : // that 'field6' is the block number, and the fields 1-5
644 728 : // identify a relation. This is just an optimization,
645 728 : // though.
646 728 : //
647 728 : // 3. Once we know the partitioning, for each partition,
648 728 : // decide if it's time to create a new image layer. The
649 728 : // criteria is: there has been too much "churn" since the last
650 728 : // image layer? The "churn" is fuzzy concept, it's a
651 728 : // combination of too many delta files, or too much WAL in
652 728 : // total in the delta file. Or perhaps: if creating an image
653 728 : // file would allow to delete some older files.
654 728 : //
655 728 : // 4. In the end, if the tenant gets auto-sharded, we will run
656 728 : // a shard-ancestor compaction.
657 728 :
658 728 : // Is the timeline being deleted?
659 728 : if self.is_stopping() {
660 0 : trace!("Dropping out of compaction on timeline shutdown");
661 0 : return Err(CompactionError::ShuttingDown);
662 728 : }
663 728 :
664 728 : let target_file_size = self.get_checkpoint_distance();
665 :
666 : // Define partitioning schema if needed
667 :
668 : // 1. L0 Compact
669 728 : let fully_compacted = {
670 728 : let timer = self.metrics.compact_time_histo.start_timer();
671 728 : let fully_compacted = self
672 728 : .compact_level0(
673 728 : target_file_size,
674 728 : options.flags.contains(CompactFlags::ForceL0Compaction),
675 728 : ctx,
676 728 : )
677 728 : .await?;
678 728 : timer.stop_and_record();
679 728 : fully_compacted
680 728 : };
681 728 :
682 728 : if !fully_compacted {
683 : // Yield and do not do any other kind of compaction. True means
684 : // that we have pending L0 compaction tasks and the compaction scheduler
685 : // will prioritize compacting this tenant/timeline again.
686 0 : info!("skipping image layer generation and shard ancestor compaction due to L0 compaction did not include all layers.");
687 0 : return Ok(true);
688 728 : }
689 :
690 : // 2. Repartition and create image layers if necessary
691 728 : let partition_count = match self
692 728 : .repartition(
693 728 : self.get_last_record_lsn(), // TODO: use L0-L1 boundary
694 728 : self.get_compaction_target_size(),
695 728 : options.flags,
696 728 : ctx,
697 728 : )
698 728 : .await
699 : {
700 728 : Ok(((dense_partitioning, sparse_partitioning), lsn)) => {
701 728 : // Disables access_stats updates, so that the files we read remain candidates for eviction after we're done with them
702 728 : let image_ctx = RequestContextBuilder::extend(ctx)
703 728 : .access_stats_behavior(AccessStatsBehavior::Skip)
704 728 : .build();
705 728 :
706 728 : let mut partitioning = dense_partitioning;
707 728 : partitioning
708 728 : .parts
709 728 : .extend(sparse_partitioning.into_dense().parts);
710 :
711 : // 3. Create new image layers for partitions that have been modified "enough".
712 728 : let image_layers = self
713 728 : .create_image_layers(
714 728 : &partitioning,
715 728 : lsn,
716 728 : if options
717 728 : .flags
718 728 : .contains(CompactFlags::ForceImageLayerCreation)
719 : {
720 28 : ImageLayerCreationMode::Force
721 : } else {
722 700 : ImageLayerCreationMode::Try
723 : },
724 728 : &image_ctx,
725 728 : )
726 728 : .await?;
727 :
728 728 : self.upload_new_image_layers(image_layers)?;
729 728 : partitioning.parts.len()
730 : }
731 0 : Err(err) => {
732 0 : // no partitioning? This is normal, if the timeline was just created
733 0 : // as an empty timeline. Also in unit tests, when we use the timeline
734 0 : // as a simple key-value store, ignoring the datadir layout. Log the
735 0 : // error but continue.
736 0 : //
737 0 : // Suppress error when it's due to cancellation
738 0 : if !self.cancel.is_cancelled() && !err.is_cancelled() {
739 0 : tracing::error!("could not compact, repartitioning keyspace failed: {err:?}");
740 0 : }
741 0 : 1
742 : }
743 : };
744 :
745 : // 4. Shard ancestor compaction
746 :
747 728 : if self.shard_identity.count >= ShardCount::new(2) {
748 : // Limit the number of layer rewrites to the number of partitions: this means its
749 : // runtime should be comparable to a full round of image layer creations, rather than
750 : // being potentially much longer.
751 0 : let rewrite_max = partition_count;
752 0 :
753 0 : self.compact_shard_ancestors(rewrite_max, ctx).await?;
754 728 : }
755 :
756 728 : Ok(false)
757 728 : }
758 :
759 : /// Check for layers that are elegible to be rewritten:
760 : /// - Shard splitting: After a shard split, ancestor layers beyond pitr_interval, so that
761 : /// we don't indefinitely retain keys in this shard that aren't needed.
762 : /// - For future use: layers beyond pitr_interval that are in formats we would
763 : /// rather not maintain compatibility with indefinitely.
764 : ///
765 : /// Note: this phase may read and write many gigabytes of data: use rewrite_max to bound
766 : /// how much work it will try to do in each compaction pass.
767 0 : async fn compact_shard_ancestors(
768 0 : self: &Arc<Self>,
769 0 : rewrite_max: usize,
770 0 : ctx: &RequestContext,
771 0 : ) -> Result<(), CompactionError> {
772 0 : let mut drop_layers = Vec::new();
773 0 : let mut layers_to_rewrite: Vec<Layer> = Vec::new();
774 0 :
775 0 : // We will use the Lsn cutoff of the last GC as a threshold for rewriting layers: if a
776 0 : // layer is behind this Lsn, it indicates that the layer is being retained beyond the
777 0 : // pitr_interval, for example because a branchpoint references it.
778 0 : //
779 0 : // Holding this read guard also blocks [`Self::gc_timeline`] from entering while we
780 0 : // are rewriting layers.
781 0 : let latest_gc_cutoff = self.get_latest_gc_cutoff_lsn();
782 0 :
783 0 : tracing::info!(
784 0 : "latest_gc_cutoff: {}, pitr cutoff {}",
785 0 : *latest_gc_cutoff,
786 0 : self.gc_info.read().unwrap().cutoffs.time
787 : );
788 :
789 0 : let layers = self.layers.read().await;
790 0 : for layer_desc in layers.layer_map()?.iter_historic_layers() {
791 0 : let layer = layers.get_from_desc(&layer_desc);
792 0 : if layer.metadata().shard.shard_count == self.shard_identity.count {
793 : // This layer does not belong to a historic ancestor, no need to re-image it.
794 0 : continue;
795 0 : }
796 0 :
797 0 : // This layer was created on an ancestor shard: check if it contains any data for this shard.
798 0 : let sharded_range = ShardedRange::new(layer_desc.get_key_range(), &self.shard_identity);
799 0 : let layer_local_page_count = sharded_range.page_count();
800 0 : let layer_raw_page_count = ShardedRange::raw_size(&layer_desc.get_key_range());
801 0 : if layer_local_page_count == 0 {
802 : // This ancestral layer only covers keys that belong to other shards.
803 : // We include the full metadata in the log: if we had some critical bug that caused
804 : // us to incorrectly drop layers, this would simplify manually debugging + reinstating those layers.
805 0 : info!(%layer, old_metadata=?layer.metadata(),
806 0 : "dropping layer after shard split, contains no keys for this shard.",
807 : );
808 :
809 0 : if cfg!(debug_assertions) {
810 : // Expensive, exhaustive check of keys in this layer: this guards against ShardedRange's calculations being
811 : // wrong. If ShardedRange claims the local page count is zero, then no keys in this layer
812 : // should be !is_key_disposable()
813 0 : let range = layer_desc.get_key_range();
814 0 : let mut key = range.start;
815 0 : while key < range.end {
816 0 : debug_assert!(self.shard_identity.is_key_disposable(&key));
817 0 : key = key.next();
818 : }
819 0 : }
820 :
821 0 : drop_layers.push(layer);
822 0 : continue;
823 0 : } else if layer_local_page_count != u32::MAX
824 0 : && layer_local_page_count == layer_raw_page_count
825 : {
826 0 : debug!(%layer,
827 0 : "layer is entirely shard local ({} keys), no need to filter it",
828 : layer_local_page_count
829 : );
830 0 : continue;
831 0 : }
832 0 :
833 0 : // Don't bother re-writing a layer unless it will at least halve its size
834 0 : if layer_local_page_count != u32::MAX
835 0 : && layer_local_page_count > layer_raw_page_count / 2
836 : {
837 0 : debug!(%layer,
838 0 : "layer is already mostly local ({}/{}), not rewriting",
839 : layer_local_page_count,
840 : layer_raw_page_count
841 : );
842 0 : }
843 :
844 : // Don't bother re-writing a layer if it is within the PITR window: it will age-out eventually
845 : // without incurring the I/O cost of a rewrite.
846 0 : if layer_desc.get_lsn_range().end >= *latest_gc_cutoff {
847 0 : debug!(%layer, "Skipping rewrite of layer still in GC window ({} >= {})",
848 0 : layer_desc.get_lsn_range().end, *latest_gc_cutoff);
849 0 : continue;
850 0 : }
851 0 :
852 0 : if layer_desc.is_delta() {
853 : // We do not yet implement rewrite of delta layers
854 0 : debug!(%layer, "Skipping rewrite of delta layer");
855 0 : continue;
856 0 : }
857 0 :
858 0 : // Only rewrite layers if their generations differ. This guarantees:
859 0 : // - that local rewrite is safe, as local layer paths will differ between existing layer and rewritten one
860 0 : // - that the layer is persistent in remote storage, as we only see old-generation'd layer via loading from remote storage
861 0 : if layer.metadata().generation == self.generation {
862 0 : debug!(%layer, "Skipping rewrite, is not from old generation");
863 0 : continue;
864 0 : }
865 0 :
866 0 : if layers_to_rewrite.len() >= rewrite_max {
867 0 : tracing::info!(%layer, "Will rewrite layer on a future compaction, already rewrote {}",
868 0 : layers_to_rewrite.len()
869 : );
870 0 : continue;
871 0 : }
872 0 :
873 0 : // Fall through: all our conditions for doing a rewrite passed.
874 0 : layers_to_rewrite.push(layer);
875 : }
876 :
877 : // Drop read lock on layer map before we start doing time-consuming I/O
878 0 : drop(layers);
879 0 :
880 0 : let mut replace_image_layers = Vec::new();
881 :
882 0 : for layer in layers_to_rewrite {
883 0 : tracing::info!(layer=%layer, "Rewriting layer after shard split...");
884 0 : let mut image_layer_writer = ImageLayerWriter::new(
885 0 : self.conf,
886 0 : self.timeline_id,
887 0 : self.tenant_shard_id,
888 0 : &layer.layer_desc().key_range,
889 0 : layer.layer_desc().image_layer_lsn(),
890 0 : ctx,
891 0 : )
892 0 : .await
893 0 : .map_err(CompactionError::Other)?;
894 :
895 : // Safety of layer rewrites:
896 : // - We are writing to a different local file path than we are reading from, so the old Layer
897 : // cannot interfere with the new one.
898 : // - In the page cache, contents for a particular VirtualFile are stored with a file_id that
899 : // is different for two layers with the same name (in `ImageLayerInner::new` we always
900 : // acquire a fresh id from [`crate::page_cache::next_file_id`]. So readers do not risk
901 : // reading the index from one layer file, and then data blocks from the rewritten layer file.
902 : // - Any readers that have a reference to the old layer will keep it alive until they are done
903 : // with it. If they are trying to promote from remote storage, that will fail, but this is the same
904 : // as for compaction generally: compaction is allowed to delete layers that readers might be trying to use.
905 : // - We do not run concurrently with other kinds of compaction, so the only layer map writes we race with are:
906 : // - GC, which at worst witnesses us "undelete" a layer that they just deleted.
907 : // - ingestion, which only inserts layers, therefore cannot collide with us.
908 0 : let resident = layer.download_and_keep_resident().await?;
909 :
910 0 : let keys_written = resident
911 0 : .filter(&self.shard_identity, &mut image_layer_writer, ctx)
912 0 : .await?;
913 :
914 0 : if keys_written > 0 {
915 0 : let (desc, path) = image_layer_writer
916 0 : .finish(ctx)
917 0 : .await
918 0 : .map_err(CompactionError::Other)?;
919 0 : let new_layer = Layer::finish_creating(self.conf, self, desc, &path)
920 0 : .map_err(CompactionError::Other)?;
921 0 : tracing::info!(layer=%new_layer, "Rewrote layer, {} -> {} bytes",
922 0 : layer.metadata().file_size,
923 0 : new_layer.metadata().file_size);
924 :
925 0 : replace_image_layers.push((layer, new_layer));
926 0 : } else {
927 0 : // Drop the old layer. Usually for this case we would already have noticed that
928 0 : // the layer has no data for us with the ShardedRange check above, but
929 0 : drop_layers.push(layer);
930 0 : }
931 : }
932 :
933 : // At this point, we have replaced local layer files with their rewritten form, but not yet uploaded
934 : // metadata to reflect that. If we restart here, the replaced layer files will look invalid (size mismatch
935 : // to remote index) and be removed. This is inefficient but safe.
936 0 : fail::fail_point!("compact-shard-ancestors-localonly");
937 0 :
938 0 : // Update the LayerMap so that readers will use the new layers, and enqueue it for writing to remote storage
939 0 : self.rewrite_layers(replace_image_layers, drop_layers)
940 0 : .await?;
941 :
942 0 : fail::fail_point!("compact-shard-ancestors-enqueued");
943 0 :
944 0 : // We wait for all uploads to complete before finishing this compaction stage. This is not
945 0 : // necessary for correctness, but it simplifies testing, and avoids proceeding with another
946 0 : // Timeline's compaction while this timeline's uploads may be generating lots of disk I/O
947 0 : // load.
948 0 : match self.remote_client.wait_completion().await {
949 0 : Ok(()) => (),
950 0 : Err(WaitCompletionError::NotInitialized(ni)) => return Err(CompactionError::from(ni)),
951 : Err(WaitCompletionError::UploadQueueShutDownOrStopped) => {
952 0 : return Err(CompactionError::ShuttingDown)
953 : }
954 : }
955 :
956 0 : fail::fail_point!("compact-shard-ancestors-persistent");
957 0 :
958 0 : Ok(())
959 0 : }
960 :
961 : /// Update the LayerVisibilityHint of layers covered by image layers, based on whether there is
962 : /// an image layer between them and the most recent readable LSN (branch point or tip of timeline). The
963 : /// purpose of the visibility hint is to record which layers need to be available to service reads.
964 : ///
965 : /// The result may be used as an input to eviction and secondary downloads to de-prioritize layers
966 : /// that we know won't be needed for reads.
967 452 : pub(super) async fn update_layer_visibility(
968 452 : &self,
969 452 : ) -> Result<(), super::layer_manager::Shutdown> {
970 452 : let head_lsn = self.get_last_record_lsn();
971 :
972 : // We will sweep through layers in reverse-LSN order. We only do historic layers. L0 deltas
973 : // are implicitly left visible, because LayerVisibilityHint's default is Visible, and we never modify it here.
974 : // Note that L0 deltas _can_ be covered by image layers, but we consider them 'visible' because we anticipate that
975 : // they will be subject to L0->L1 compaction in the near future.
976 452 : let layer_manager = self.layers.read().await;
977 452 : let layer_map = layer_manager.layer_map()?;
978 :
979 452 : let readable_points = {
980 452 : let children = self.gc_info.read().unwrap().retain_lsns.clone();
981 452 :
982 452 : let mut readable_points = Vec::with_capacity(children.len() + 1);
983 452 : for (child_lsn, _child_timeline_id, is_offloaded) in &children {
984 0 : if *is_offloaded == MaybeOffloaded::Yes {
985 0 : continue;
986 0 : }
987 0 : readable_points.push(*child_lsn);
988 : }
989 452 : readable_points.push(head_lsn);
990 452 : readable_points
991 452 : };
992 452 :
993 452 : let (layer_visibility, covered) = layer_map.get_visibility(readable_points);
994 1144 : for (layer_desc, visibility) in layer_visibility {
995 692 : // FIXME: a more efficiency bulk zip() through the layers rather than NlogN getting each one
996 692 : let layer = layer_manager.get_from_desc(&layer_desc);
997 692 : layer.set_visibility(visibility);
998 692 : }
999 :
1000 : // TODO: publish our covered KeySpace to our parent, so that when they update their visibility, they can
1001 : // avoid assuming that everything at a branch point is visible.
1002 452 : drop(covered);
1003 452 : Ok(())
1004 452 : }
1005 :
1006 : /// Collect a bunch of Level 0 layer files, and compact and reshuffle them as
1007 : /// as Level 1 files. Returns whether the L0 layers are fully compacted.
1008 728 : async fn compact_level0(
1009 728 : self: &Arc<Self>,
1010 728 : target_file_size: u64,
1011 728 : force_compaction_ignore_threshold: bool,
1012 728 : ctx: &RequestContext,
1013 728 : ) -> Result<bool, CompactionError> {
1014 : let CompactLevel0Phase1Result {
1015 728 : new_layers,
1016 728 : deltas_to_compact,
1017 728 : fully_compacted,
1018 : } = {
1019 728 : let phase1_span = info_span!("compact_level0_phase1");
1020 728 : let ctx = ctx.attached_child();
1021 728 : let mut stats = CompactLevel0Phase1StatsBuilder {
1022 728 : version: Some(2),
1023 728 : tenant_id: Some(self.tenant_shard_id),
1024 728 : timeline_id: Some(self.timeline_id),
1025 728 : ..Default::default()
1026 728 : };
1027 728 :
1028 728 : let begin = tokio::time::Instant::now();
1029 728 : let phase1_layers_locked = self.layers.read().await;
1030 728 : let now = tokio::time::Instant::now();
1031 728 : stats.read_lock_acquisition_micros =
1032 728 : DurationRecorder::Recorded(RecordedDuration(now - begin), now);
1033 728 : self.compact_level0_phase1(
1034 728 : phase1_layers_locked,
1035 728 : stats,
1036 728 : target_file_size,
1037 728 : force_compaction_ignore_threshold,
1038 728 : &ctx,
1039 728 : )
1040 728 : .instrument(phase1_span)
1041 728 : .await?
1042 : };
1043 :
1044 728 : if new_layers.is_empty() && deltas_to_compact.is_empty() {
1045 : // nothing to do
1046 672 : return Ok(true);
1047 56 : }
1048 56 :
1049 56 : self.finish_compact_batch(&new_layers, &Vec::new(), &deltas_to_compact)
1050 56 : .await?;
1051 56 : Ok(fully_compacted)
1052 728 : }
1053 :
1054 : /// Level0 files first phase of compaction, explained in the [`Self::compact_legacy`] comment.
1055 728 : async fn compact_level0_phase1<'a>(
1056 728 : self: &'a Arc<Self>,
1057 728 : guard: tokio::sync::RwLockReadGuard<'a, LayerManager>,
1058 728 : mut stats: CompactLevel0Phase1StatsBuilder,
1059 728 : target_file_size: u64,
1060 728 : force_compaction_ignore_threshold: bool,
1061 728 : ctx: &RequestContext,
1062 728 : ) -> Result<CompactLevel0Phase1Result, CompactionError> {
1063 728 : stats.read_lock_held_spawn_blocking_startup_micros =
1064 728 : stats.read_lock_acquisition_micros.till_now(); // set by caller
1065 728 : let layers = guard.layer_map()?;
1066 728 : let level0_deltas = layers.level0_deltas();
1067 728 : stats.level0_deltas_count = Some(level0_deltas.len());
1068 728 :
1069 728 : // Only compact if enough layers have accumulated.
1070 728 : let threshold = self.get_compaction_threshold();
1071 728 : if level0_deltas.is_empty() || level0_deltas.len() < threshold {
1072 672 : if force_compaction_ignore_threshold {
1073 0 : if !level0_deltas.is_empty() {
1074 0 : info!(
1075 0 : level0_deltas = level0_deltas.len(),
1076 0 : threshold, "too few deltas to compact, but forcing compaction"
1077 : );
1078 : } else {
1079 0 : info!(
1080 0 : level0_deltas = level0_deltas.len(),
1081 0 : threshold, "too few deltas to compact, cannot force compaction"
1082 : );
1083 0 : return Ok(CompactLevel0Phase1Result::default());
1084 : }
1085 : } else {
1086 672 : debug!(
1087 0 : level0_deltas = level0_deltas.len(),
1088 0 : threshold, "too few deltas to compact"
1089 : );
1090 672 : return Ok(CompactLevel0Phase1Result::default());
1091 : }
1092 56 : }
1093 :
1094 56 : let mut level0_deltas = level0_deltas
1095 56 : .iter()
1096 804 : .map(|x| guard.get_from_desc(x))
1097 56 : .collect::<Vec<_>>();
1098 56 :
1099 56 : // Gather the files to compact in this iteration.
1100 56 : //
1101 56 : // Start with the oldest Level 0 delta file, and collect any other
1102 56 : // level 0 files that form a contiguous sequence, such that the end
1103 56 : // LSN of previous file matches the start LSN of the next file.
1104 56 : //
1105 56 : // Note that if the files don't form such a sequence, we might
1106 56 : // "compact" just a single file. That's a bit pointless, but it allows
1107 56 : // us to get rid of the level 0 file, and compact the other files on
1108 56 : // the next iteration. This could probably made smarter, but such
1109 56 : // "gaps" in the sequence of level 0 files should only happen in case
1110 56 : // of a crash, partial download from cloud storage, or something like
1111 56 : // that, so it's not a big deal in practice.
1112 1496 : level0_deltas.sort_by_key(|l| l.layer_desc().lsn_range.start);
1113 56 : let mut level0_deltas_iter = level0_deltas.iter();
1114 56 :
1115 56 : let first_level0_delta = level0_deltas_iter.next().unwrap();
1116 56 : let mut prev_lsn_end = first_level0_delta.layer_desc().lsn_range.end;
1117 56 : let mut deltas_to_compact = Vec::with_capacity(level0_deltas.len());
1118 56 :
1119 56 : // Accumulate the size of layers in `deltas_to_compact`
1120 56 : let mut deltas_to_compact_bytes = 0;
1121 56 :
1122 56 : // Under normal circumstances, we will accumulate up to compaction_upper_limit L0s of size
1123 56 : // checkpoint_distance each. To avoid edge cases using extra system resources, bound our
1124 56 : // work in this function to only operate on this much delta data at once.
1125 56 : //
1126 56 : // In general, compaction_threshold should be <= compaction_upper_limit, but in case that
1127 56 : // the constraint is not respected, we use the larger of the two.
1128 56 : let delta_size_limit = std::cmp::max(
1129 56 : self.get_compaction_upper_limit(),
1130 56 : self.get_compaction_threshold(),
1131 56 : ) as u64
1132 56 : * std::cmp::max(self.get_checkpoint_distance(), DEFAULT_CHECKPOINT_DISTANCE);
1133 56 :
1134 56 : let mut fully_compacted = true;
1135 56 :
1136 56 : deltas_to_compact.push(first_level0_delta.download_and_keep_resident().await?);
1137 804 : for l in level0_deltas_iter {
1138 748 : let lsn_range = &l.layer_desc().lsn_range;
1139 748 :
1140 748 : if lsn_range.start != prev_lsn_end {
1141 0 : break;
1142 748 : }
1143 748 : deltas_to_compact.push(l.download_and_keep_resident().await?);
1144 748 : deltas_to_compact_bytes += l.metadata().file_size;
1145 748 : prev_lsn_end = lsn_range.end;
1146 748 :
1147 748 : if deltas_to_compact_bytes >= delta_size_limit {
1148 0 : info!(
1149 0 : l0_deltas_selected = deltas_to_compact.len(),
1150 0 : l0_deltas_total = level0_deltas.len(),
1151 0 : "L0 compaction picker hit max delta layer size limit: {}",
1152 : delta_size_limit
1153 : );
1154 0 : fully_compacted = false;
1155 0 :
1156 0 : // Proceed with compaction, but only a subset of L0s
1157 0 : break;
1158 748 : }
1159 : }
1160 56 : let lsn_range = Range {
1161 56 : start: deltas_to_compact
1162 56 : .first()
1163 56 : .unwrap()
1164 56 : .layer_desc()
1165 56 : .lsn_range
1166 56 : .start,
1167 56 : end: deltas_to_compact.last().unwrap().layer_desc().lsn_range.end,
1168 56 : };
1169 56 :
1170 56 : info!(
1171 0 : "Starting Level0 compaction in LSN range {}-{} for {} layers ({} deltas in total)",
1172 0 : lsn_range.start,
1173 0 : lsn_range.end,
1174 0 : deltas_to_compact.len(),
1175 0 : level0_deltas.len()
1176 : );
1177 :
1178 804 : for l in deltas_to_compact.iter() {
1179 804 : info!("compact includes {l}");
1180 : }
1181 :
1182 : // We don't need the original list of layers anymore. Drop it so that
1183 : // we don't accidentally use it later in the function.
1184 56 : drop(level0_deltas);
1185 56 :
1186 56 : stats.read_lock_held_prerequisites_micros = stats
1187 56 : .read_lock_held_spawn_blocking_startup_micros
1188 56 : .till_now();
1189 :
1190 : // TODO: replace with streaming k-merge
1191 56 : let all_keys = {
1192 56 : let mut all_keys = Vec::new();
1193 804 : for l in deltas_to_compact.iter() {
1194 804 : if self.cancel.is_cancelled() {
1195 0 : return Err(CompactionError::ShuttingDown);
1196 804 : }
1197 804 : let delta = l.get_as_delta(ctx).await.map_err(CompactionError::Other)?;
1198 804 : let keys = delta
1199 804 : .index_entries(ctx)
1200 804 : .await
1201 804 : .map_err(CompactionError::Other)?;
1202 804 : all_keys.extend(keys);
1203 : }
1204 : // The current stdlib sorting implementation is designed in a way where it is
1205 : // particularly fast where the slice is made up of sorted sub-ranges.
1206 8847612 : all_keys.sort_by_key(|DeltaEntry { key, lsn, .. }| (*key, *lsn));
1207 56 : all_keys
1208 56 : };
1209 56 :
1210 56 : stats.read_lock_held_key_sort_micros = stats.read_lock_held_prerequisites_micros.till_now();
1211 :
1212 : // Determine N largest holes where N is number of compacted layers. The vec is sorted by key range start.
1213 : //
1214 : // A hole is a key range for which this compaction doesn't have any WAL records.
1215 : // Our goal in this compaction iteration is to avoid creating L1s that, in terms of their key range,
1216 : // cover the hole, but actually don't contain any WAL records for that key range.
1217 : // The reason is that the mere stack of L1s (`count_deltas`) triggers image layer creation (`create_image_layers`).
1218 : // That image layer creation would be useless for a hole range covered by L1s that don't contain any WAL records.
1219 : //
1220 : // The algorithm chooses holes as follows.
1221 : // - Slide a 2-window over the keys in key orde to get the hole range (=distance between two keys).
1222 : // - Filter: min threshold on range length
1223 : // - Rank: by coverage size (=number of image layers required to reconstruct each key in the range for which we have any data)
1224 : //
1225 : // For more details, intuition, and some ASCII art see https://github.com/neondatabase/neon/pull/3597#discussion_r1112704451
1226 : #[derive(PartialEq, Eq)]
1227 : struct Hole {
1228 : key_range: Range<Key>,
1229 : coverage_size: usize,
1230 : }
1231 56 : let holes: Vec<Hole> = {
1232 : use std::cmp::Ordering;
1233 : impl Ord for Hole {
1234 0 : fn cmp(&self, other: &Self) -> Ordering {
1235 0 : self.coverage_size.cmp(&other.coverage_size).reverse()
1236 0 : }
1237 : }
1238 : impl PartialOrd for Hole {
1239 0 : fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
1240 0 : Some(self.cmp(other))
1241 0 : }
1242 : }
1243 56 : let max_holes = deltas_to_compact.len();
1244 56 : let last_record_lsn = self.get_last_record_lsn();
1245 56 : let min_hole_range = (target_file_size / page_cache::PAGE_SZ as u64) as i128;
1246 56 : let min_hole_coverage_size = 3; // TODO: something more flexible?
1247 56 : // min-heap (reserve space for one more element added before eviction)
1248 56 : let mut heap: BinaryHeap<Hole> = BinaryHeap::with_capacity(max_holes + 1);
1249 56 : let mut prev: Option<Key> = None;
1250 :
1251 4128076 : for &DeltaEntry { key: next_key, .. } in all_keys.iter() {
1252 4128076 : if let Some(prev_key) = prev {
1253 : // just first fast filter, do not create hole entries for metadata keys. The last hole in the
1254 : // compaction is the gap between data key and metadata keys.
1255 4128020 : if next_key.to_i128() - prev_key.to_i128() >= min_hole_range
1256 0 : && !Key::is_metadata_key(&prev_key)
1257 : {
1258 0 : let key_range = prev_key..next_key;
1259 0 : // Measuring hole by just subtraction of i128 representation of key range boundaries
1260 0 : // has not so much sense, because largest holes will corresponds field1/field2 changes.
1261 0 : // But we are mostly interested to eliminate holes which cause generation of excessive image layers.
1262 0 : // That is why it is better to measure size of hole as number of covering image layers.
1263 0 : let coverage_size =
1264 0 : layers.image_coverage(&key_range, last_record_lsn).len();
1265 0 : if coverage_size >= min_hole_coverage_size {
1266 0 : heap.push(Hole {
1267 0 : key_range,
1268 0 : coverage_size,
1269 0 : });
1270 0 : if heap.len() > max_holes {
1271 0 : heap.pop(); // remove smallest hole
1272 0 : }
1273 0 : }
1274 4128020 : }
1275 56 : }
1276 4128076 : prev = Some(next_key.next());
1277 : }
1278 56 : let mut holes = heap.into_vec();
1279 56 : holes.sort_unstable_by_key(|hole| hole.key_range.start);
1280 56 : holes
1281 56 : };
1282 56 : stats.read_lock_held_compute_holes_micros = stats.read_lock_held_key_sort_micros.till_now();
1283 56 : drop_rlock(guard);
1284 56 :
1285 56 : if self.cancel.is_cancelled() {
1286 0 : return Err(CompactionError::ShuttingDown);
1287 56 : }
1288 56 :
1289 56 : stats.read_lock_drop_micros = stats.read_lock_held_compute_holes_micros.till_now();
1290 :
1291 : // This iterator walks through all key-value pairs from all the layers
1292 : // we're compacting, in key, LSN order.
1293 : // If there's both a Value::Image and Value::WalRecord for the same (key,lsn),
1294 : // then the Value::Image is ordered before Value::WalRecord.
1295 56 : let mut all_values_iter = {
1296 56 : let mut deltas = Vec::with_capacity(deltas_to_compact.len());
1297 804 : for l in deltas_to_compact.iter() {
1298 804 : let l = l.get_as_delta(ctx).await.map_err(CompactionError::Other)?;
1299 804 : deltas.push(l);
1300 : }
1301 56 : MergeIterator::create(&deltas, &[], ctx)
1302 56 : };
1303 56 :
1304 56 : // This iterator walks through all keys and is needed to calculate size used by each key
1305 56 : let mut all_keys_iter = all_keys
1306 56 : .iter()
1307 4128076 : .map(|DeltaEntry { key, lsn, size, .. }| (*key, *lsn, *size))
1308 4128020 : .coalesce(|mut prev, cur| {
1309 4128020 : // Coalesce keys that belong to the same key pair.
1310 4128020 : // This ensures that compaction doesn't put them
1311 4128020 : // into different layer files.
1312 4128020 : // Still limit this by the target file size,
1313 4128020 : // so that we keep the size of the files in
1314 4128020 : // check.
1315 4128020 : if prev.0 == cur.0 && prev.2 < target_file_size {
1316 80076 : prev.2 += cur.2;
1317 80076 : Ok(prev)
1318 : } else {
1319 4047944 : Err((prev, cur))
1320 : }
1321 4128020 : });
1322 56 :
1323 56 : // Merge the contents of all the input delta layers into a new set
1324 56 : // of delta layers, based on the current partitioning.
1325 56 : //
1326 56 : // We split the new delta layers on the key dimension. We iterate through the key space, and for each key, check if including the next key to the current output layer we're building would cause the layer to become too large. If so, dump the current output layer and start new one.
1327 56 : // It's possible that there is a single key with so many page versions that storing all of them in a single layer file
1328 56 : // would be too large. In that case, we also split on the LSN dimension.
1329 56 : //
1330 56 : // LSN
1331 56 : // ^
1332 56 : // |
1333 56 : // | +-----------+ +--+--+--+--+
1334 56 : // | | | | | | | |
1335 56 : // | +-----------+ | | | | |
1336 56 : // | | | | | | | |
1337 56 : // | +-----------+ ==> | | | | |
1338 56 : // | | | | | | | |
1339 56 : // | +-----------+ | | | | |
1340 56 : // | | | | | | | |
1341 56 : // | +-----------+ +--+--+--+--+
1342 56 : // |
1343 56 : // +--------------> key
1344 56 : //
1345 56 : //
1346 56 : // If one key (X) has a lot of page versions:
1347 56 : //
1348 56 : // LSN
1349 56 : // ^
1350 56 : // | (X)
1351 56 : // | +-----------+ +--+--+--+--+
1352 56 : // | | | | | | | |
1353 56 : // | +-----------+ | | +--+ |
1354 56 : // | | | | | | | |
1355 56 : // | +-----------+ ==> | | | | |
1356 56 : // | | | | | +--+ |
1357 56 : // | +-----------+ | | | | |
1358 56 : // | | | | | | | |
1359 56 : // | +-----------+ +--+--+--+--+
1360 56 : // |
1361 56 : // +--------------> key
1362 56 : // TODO: this actually divides the layers into fixed-size chunks, not
1363 56 : // based on the partitioning.
1364 56 : //
1365 56 : // TODO: we should also opportunistically materialize and
1366 56 : // garbage collect what we can.
1367 56 : let mut new_layers = Vec::new();
1368 56 : let mut prev_key: Option<Key> = None;
1369 56 : let mut writer: Option<DeltaLayerWriter> = None;
1370 56 : let mut key_values_total_size = 0u64;
1371 56 : let mut dup_start_lsn: Lsn = Lsn::INVALID; // start LSN of layer containing values of the single key
1372 56 : let mut dup_end_lsn: Lsn = Lsn::INVALID; // end LSN of layer containing values of the single key
1373 56 : let mut next_hole = 0; // index of next hole in holes vector
1374 56 :
1375 56 : let mut keys = 0;
1376 :
1377 4128132 : while let Some((key, lsn, value)) = all_values_iter
1378 4128132 : .next()
1379 4128132 : .await
1380 4128132 : .map_err(CompactionError::Other)?
1381 : {
1382 4128076 : keys += 1;
1383 4128076 :
1384 4128076 : if keys % 32_768 == 0 && self.cancel.is_cancelled() {
1385 : // avoid hitting the cancellation token on every key. in benches, we end up
1386 : // shuffling an order of million keys per layer, this means we'll check it
1387 : // around tens of times per layer.
1388 0 : return Err(CompactionError::ShuttingDown);
1389 4128076 : }
1390 4128076 :
1391 4128076 : let same_key = prev_key == Some(key);
1392 4128076 : // We need to check key boundaries once we reach next key or end of layer with the same key
1393 4128076 : if !same_key || lsn == dup_end_lsn {
1394 4048000 : let mut next_key_size = 0u64;
1395 4048000 : let is_dup_layer = dup_end_lsn.is_valid();
1396 4048000 : dup_start_lsn = Lsn::INVALID;
1397 4048000 : if !same_key {
1398 4048000 : dup_end_lsn = Lsn::INVALID;
1399 4048000 : }
1400 : // Determine size occupied by this key. We stop at next key or when size becomes larger than target_file_size
1401 4048000 : for (next_key, next_lsn, next_size) in all_keys_iter.by_ref() {
1402 4048000 : next_key_size = next_size;
1403 4048000 : if key != next_key {
1404 4047944 : if dup_end_lsn.is_valid() {
1405 0 : // We are writting segment with duplicates:
1406 0 : // place all remaining values of this key in separate segment
1407 0 : dup_start_lsn = dup_end_lsn; // new segments starts where old stops
1408 0 : dup_end_lsn = lsn_range.end; // there are no more values of this key till end of LSN range
1409 4047944 : }
1410 4047944 : break;
1411 56 : }
1412 56 : key_values_total_size += next_size;
1413 56 : // Check if it is time to split segment: if total keys size is larger than target file size.
1414 56 : // We need to avoid generation of empty segments if next_size > target_file_size.
1415 56 : if key_values_total_size > target_file_size && lsn != next_lsn {
1416 : // Split key between multiple layers: such layer can contain only single key
1417 0 : dup_start_lsn = if dup_end_lsn.is_valid() {
1418 0 : dup_end_lsn // new segment with duplicates starts where old one stops
1419 : } else {
1420 0 : lsn // start with the first LSN for this key
1421 : };
1422 0 : dup_end_lsn = next_lsn; // upper LSN boundary is exclusive
1423 0 : break;
1424 56 : }
1425 : }
1426 : // handle case when loop reaches last key: in this case dup_end is non-zero but dup_start is not set.
1427 4048000 : if dup_end_lsn.is_valid() && !dup_start_lsn.is_valid() {
1428 0 : dup_start_lsn = dup_end_lsn;
1429 0 : dup_end_lsn = lsn_range.end;
1430 4048000 : }
1431 4048000 : if writer.is_some() {
1432 4047944 : let written_size = writer.as_mut().unwrap().size();
1433 4047944 : let contains_hole =
1434 4047944 : next_hole < holes.len() && key >= holes[next_hole].key_range.end;
1435 : // check if key cause layer overflow or contains hole...
1436 4047944 : if is_dup_layer
1437 4047944 : || dup_end_lsn.is_valid()
1438 4047944 : || written_size + key_values_total_size > target_file_size
1439 4047384 : || contains_hole
1440 : {
1441 : // ... if so, flush previous layer and prepare to write new one
1442 560 : let (desc, path) = writer
1443 560 : .take()
1444 560 : .unwrap()
1445 560 : .finish(prev_key.unwrap().next(), ctx)
1446 560 : .await
1447 560 : .map_err(CompactionError::Other)?;
1448 560 : let new_delta = Layer::finish_creating(self.conf, self, desc, &path)
1449 560 : .map_err(CompactionError::Other)?;
1450 :
1451 560 : new_layers.push(new_delta);
1452 560 : writer = None;
1453 560 :
1454 560 : if contains_hole {
1455 0 : // skip hole
1456 0 : next_hole += 1;
1457 560 : }
1458 4047384 : }
1459 56 : }
1460 : // Remember size of key value because at next iteration we will access next item
1461 4048000 : key_values_total_size = next_key_size;
1462 80076 : }
1463 4128076 : fail_point!("delta-layer-writer-fail-before-finish", |_| {
1464 0 : Err(CompactionError::Other(anyhow::anyhow!(
1465 0 : "failpoint delta-layer-writer-fail-before-finish"
1466 0 : )))
1467 4128076 : });
1468 :
1469 4128076 : if !self.shard_identity.is_key_disposable(&key) {
1470 4128076 : if writer.is_none() {
1471 616 : if self.cancel.is_cancelled() {
1472 : // to be somewhat responsive to cancellation, check for each new layer
1473 0 : return Err(CompactionError::ShuttingDown);
1474 616 : }
1475 : // Create writer if not initiaized yet
1476 616 : writer = Some(
1477 : DeltaLayerWriter::new(
1478 616 : self.conf,
1479 616 : self.timeline_id,
1480 616 : self.tenant_shard_id,
1481 616 : key,
1482 616 : if dup_end_lsn.is_valid() {
1483 : // this is a layer containing slice of values of the same key
1484 0 : debug!("Create new dup layer {}..{}", dup_start_lsn, dup_end_lsn);
1485 0 : dup_start_lsn..dup_end_lsn
1486 : } else {
1487 616 : debug!("Create new layer {}..{}", lsn_range.start, lsn_range.end);
1488 616 : lsn_range.clone()
1489 : },
1490 616 : ctx,
1491 616 : )
1492 616 : .await
1493 616 : .map_err(CompactionError::Other)?,
1494 : );
1495 :
1496 616 : keys = 0;
1497 4127460 : }
1498 :
1499 4128076 : writer
1500 4128076 : .as_mut()
1501 4128076 : .unwrap()
1502 4128076 : .put_value(key, lsn, value, ctx)
1503 4128076 : .await
1504 4128076 : .map_err(CompactionError::Other)?;
1505 : } else {
1506 0 : let shard = self.shard_identity.shard_index();
1507 0 : let owner = self.shard_identity.get_shard_number(&key);
1508 0 : if cfg!(debug_assertions) {
1509 0 : panic!("key {key} does not belong on shard {shard}, owned by {owner}");
1510 0 : }
1511 0 : debug!("dropping key {key} during compaction (it belongs on shard {owner})");
1512 : }
1513 :
1514 4128076 : if !new_layers.is_empty() {
1515 39572 : fail_point!("after-timeline-compacted-first-L1");
1516 4088504 : }
1517 :
1518 4128076 : prev_key = Some(key);
1519 : }
1520 56 : if let Some(writer) = writer {
1521 56 : let (desc, path) = writer
1522 56 : .finish(prev_key.unwrap().next(), ctx)
1523 56 : .await
1524 56 : .map_err(CompactionError::Other)?;
1525 56 : let new_delta = Layer::finish_creating(self.conf, self, desc, &path)
1526 56 : .map_err(CompactionError::Other)?;
1527 56 : new_layers.push(new_delta);
1528 0 : }
1529 :
1530 : // Sync layers
1531 56 : if !new_layers.is_empty() {
1532 : // Print a warning if the created layer is larger than double the target size
1533 : // Add two pages for potential overhead. This should in theory be already
1534 : // accounted for in the target calculation, but for very small targets,
1535 : // we still might easily hit the limit otherwise.
1536 56 : let warn_limit = target_file_size * 2 + page_cache::PAGE_SZ as u64 * 2;
1537 616 : for layer in new_layers.iter() {
1538 616 : if layer.layer_desc().file_size > warn_limit {
1539 0 : warn!(
1540 : %layer,
1541 0 : "created delta file of size {} larger than double of target of {target_file_size}", layer.layer_desc().file_size
1542 : );
1543 616 : }
1544 : }
1545 :
1546 : // The writer.finish() above already did the fsync of the inodes.
1547 : // We just need to fsync the directory in which these inodes are linked,
1548 : // which we know to be the timeline directory.
1549 : //
1550 : // We use fatal_err() below because the after writer.finish() returns with success,
1551 : // the in-memory state of the filesystem already has the layer file in its final place,
1552 : // and subsequent pageserver code could think it's durable while it really isn't.
1553 56 : let timeline_dir = VirtualFile::open(
1554 56 : &self
1555 56 : .conf
1556 56 : .timeline_path(&self.tenant_shard_id, &self.timeline_id),
1557 56 : ctx,
1558 56 : )
1559 56 : .await
1560 56 : .fatal_err("VirtualFile::open for timeline dir fsync");
1561 56 : timeline_dir
1562 56 : .sync_all()
1563 56 : .await
1564 56 : .fatal_err("VirtualFile::sync_all timeline dir");
1565 0 : }
1566 :
1567 56 : stats.write_layer_files_micros = stats.read_lock_drop_micros.till_now();
1568 56 : stats.new_deltas_count = Some(new_layers.len());
1569 616 : stats.new_deltas_size = Some(new_layers.iter().map(|l| l.layer_desc().file_size).sum());
1570 56 :
1571 56 : match TryInto::<CompactLevel0Phase1Stats>::try_into(stats)
1572 56 : .and_then(|stats| serde_json::to_string(&stats).context("serde_json::to_string"))
1573 : {
1574 56 : Ok(stats_json) => {
1575 56 : info!(
1576 0 : stats_json = stats_json.as_str(),
1577 0 : "compact_level0_phase1 stats available"
1578 : )
1579 : }
1580 0 : Err(e) => {
1581 0 : warn!("compact_level0_phase1 stats failed to serialize: {:#}", e);
1582 : }
1583 : }
1584 :
1585 : // Without this, rustc complains about deltas_to_compact still
1586 : // being borrowed when we `.into_iter()` below.
1587 56 : drop(all_values_iter);
1588 56 :
1589 56 : Ok(CompactLevel0Phase1Result {
1590 56 : new_layers,
1591 56 : deltas_to_compact: deltas_to_compact
1592 56 : .into_iter()
1593 804 : .map(|x| x.drop_eviction_guard())
1594 56 : .collect::<Vec<_>>(),
1595 56 : fully_compacted,
1596 56 : })
1597 728 : }
1598 : }
1599 :
1600 : #[derive(Default)]
1601 : struct CompactLevel0Phase1Result {
1602 : new_layers: Vec<ResidentLayer>,
1603 : deltas_to_compact: Vec<Layer>,
1604 : // Whether we have included all L0 layers, or selected only part of them due to the
1605 : // L0 compaction size limit.
1606 : fully_compacted: bool,
1607 : }
1608 :
1609 : #[derive(Default)]
1610 : struct CompactLevel0Phase1StatsBuilder {
1611 : version: Option<u64>,
1612 : tenant_id: Option<TenantShardId>,
1613 : timeline_id: Option<TimelineId>,
1614 : read_lock_acquisition_micros: DurationRecorder,
1615 : read_lock_held_spawn_blocking_startup_micros: DurationRecorder,
1616 : read_lock_held_key_sort_micros: DurationRecorder,
1617 : read_lock_held_prerequisites_micros: DurationRecorder,
1618 : read_lock_held_compute_holes_micros: DurationRecorder,
1619 : read_lock_drop_micros: DurationRecorder,
1620 : write_layer_files_micros: DurationRecorder,
1621 : level0_deltas_count: Option<usize>,
1622 : new_deltas_count: Option<usize>,
1623 : new_deltas_size: Option<u64>,
1624 : }
1625 :
1626 : #[derive(serde::Serialize)]
1627 : struct CompactLevel0Phase1Stats {
1628 : version: u64,
1629 : tenant_id: TenantShardId,
1630 : timeline_id: TimelineId,
1631 : read_lock_acquisition_micros: RecordedDuration,
1632 : read_lock_held_spawn_blocking_startup_micros: RecordedDuration,
1633 : read_lock_held_key_sort_micros: RecordedDuration,
1634 : read_lock_held_prerequisites_micros: RecordedDuration,
1635 : read_lock_held_compute_holes_micros: RecordedDuration,
1636 : read_lock_drop_micros: RecordedDuration,
1637 : write_layer_files_micros: RecordedDuration,
1638 : level0_deltas_count: usize,
1639 : new_deltas_count: usize,
1640 : new_deltas_size: u64,
1641 : }
1642 :
1643 : impl TryFrom<CompactLevel0Phase1StatsBuilder> for CompactLevel0Phase1Stats {
1644 : type Error = anyhow::Error;
1645 :
1646 56 : fn try_from(value: CompactLevel0Phase1StatsBuilder) -> Result<Self, Self::Error> {
1647 56 : Ok(Self {
1648 56 : version: value.version.ok_or_else(|| anyhow!("version not set"))?,
1649 56 : tenant_id: value
1650 56 : .tenant_id
1651 56 : .ok_or_else(|| anyhow!("tenant_id not set"))?,
1652 56 : timeline_id: value
1653 56 : .timeline_id
1654 56 : .ok_or_else(|| anyhow!("timeline_id not set"))?,
1655 56 : read_lock_acquisition_micros: value
1656 56 : .read_lock_acquisition_micros
1657 56 : .into_recorded()
1658 56 : .ok_or_else(|| anyhow!("read_lock_acquisition_micros not set"))?,
1659 56 : read_lock_held_spawn_blocking_startup_micros: value
1660 56 : .read_lock_held_spawn_blocking_startup_micros
1661 56 : .into_recorded()
1662 56 : .ok_or_else(|| anyhow!("read_lock_held_spawn_blocking_startup_micros not set"))?,
1663 56 : read_lock_held_key_sort_micros: value
1664 56 : .read_lock_held_key_sort_micros
1665 56 : .into_recorded()
1666 56 : .ok_or_else(|| anyhow!("read_lock_held_key_sort_micros not set"))?,
1667 56 : read_lock_held_prerequisites_micros: value
1668 56 : .read_lock_held_prerequisites_micros
1669 56 : .into_recorded()
1670 56 : .ok_or_else(|| anyhow!("read_lock_held_prerequisites_micros not set"))?,
1671 56 : read_lock_held_compute_holes_micros: value
1672 56 : .read_lock_held_compute_holes_micros
1673 56 : .into_recorded()
1674 56 : .ok_or_else(|| anyhow!("read_lock_held_compute_holes_micros not set"))?,
1675 56 : read_lock_drop_micros: value
1676 56 : .read_lock_drop_micros
1677 56 : .into_recorded()
1678 56 : .ok_or_else(|| anyhow!("read_lock_drop_micros not set"))?,
1679 56 : write_layer_files_micros: value
1680 56 : .write_layer_files_micros
1681 56 : .into_recorded()
1682 56 : .ok_or_else(|| anyhow!("write_layer_files_micros not set"))?,
1683 56 : level0_deltas_count: value
1684 56 : .level0_deltas_count
1685 56 : .ok_or_else(|| anyhow!("level0_deltas_count not set"))?,
1686 56 : new_deltas_count: value
1687 56 : .new_deltas_count
1688 56 : .ok_or_else(|| anyhow!("new_deltas_count not set"))?,
1689 56 : new_deltas_size: value
1690 56 : .new_deltas_size
1691 56 : .ok_or_else(|| anyhow!("new_deltas_size not set"))?,
1692 : })
1693 56 : }
1694 : }
1695 :
1696 : impl Timeline {
1697 : /// Entry point for new tiered compaction algorithm.
1698 : ///
1699 : /// All the real work is in the implementation in the pageserver_compaction
1700 : /// crate. The code here would apply to any algorithm implemented by the
1701 : /// same interface, but tiered is the only one at the moment.
1702 : ///
1703 : /// TODO: cancellation
1704 0 : pub(crate) async fn compact_tiered(
1705 0 : self: &Arc<Self>,
1706 0 : _cancel: &CancellationToken,
1707 0 : ctx: &RequestContext,
1708 0 : ) -> Result<(), CompactionError> {
1709 0 : let fanout = self.get_compaction_threshold() as u64;
1710 0 : let target_file_size = self.get_checkpoint_distance();
1711 :
1712 : // Find the top of the historical layers
1713 0 : let end_lsn = {
1714 0 : let guard = self.layers.read().await;
1715 0 : let layers = guard.layer_map()?;
1716 :
1717 0 : let l0_deltas = layers.level0_deltas();
1718 0 :
1719 0 : // As an optimization, if we find that there are too few L0 layers,
1720 0 : // bail out early. We know that the compaction algorithm would do
1721 0 : // nothing in that case.
1722 0 : if l0_deltas.len() < fanout as usize {
1723 : // doesn't need compacting
1724 0 : return Ok(());
1725 0 : }
1726 0 : l0_deltas.iter().map(|l| l.lsn_range.end).max().unwrap()
1727 0 : };
1728 0 :
1729 0 : // Is the timeline being deleted?
1730 0 : if self.is_stopping() {
1731 0 : trace!("Dropping out of compaction on timeline shutdown");
1732 0 : return Err(CompactionError::ShuttingDown);
1733 0 : }
1734 :
1735 0 : let (dense_ks, _sparse_ks) = self.collect_keyspace(end_lsn, ctx).await?;
1736 : // TODO(chi): ignore sparse_keyspace for now, compact it in the future.
1737 0 : let mut adaptor = TimelineAdaptor::new(self, (end_lsn, dense_ks));
1738 0 :
1739 0 : pageserver_compaction::compact_tiered::compact_tiered(
1740 0 : &mut adaptor,
1741 0 : end_lsn,
1742 0 : target_file_size,
1743 0 : fanout,
1744 0 : ctx,
1745 0 : )
1746 0 : .await
1747 : // TODO: compact_tiered needs to return CompactionError
1748 0 : .map_err(CompactionError::Other)?;
1749 :
1750 0 : adaptor.flush_updates().await?;
1751 0 : Ok(())
1752 0 : }
1753 :
1754 : /// Take a list of images and deltas, produce images and deltas according to GC horizon and retain_lsns.
1755 : ///
1756 : /// It takes a key, the values of the key within the compaction process, a GC horizon, and all retain_lsns below the horizon.
1757 : /// For now, it requires the `accumulated_values` contains the full history of the key (i.e., the key with the lowest LSN is
1758 : /// an image or a WAL not requiring a base image). This restriction will be removed once we implement gc-compaction on branch.
1759 : ///
1760 : /// The function returns the deltas and the base image that need to be placed at each of the retain LSN. For example, we have:
1761 : ///
1762 : /// A@0x10, +B@0x20, +C@0x30, +D@0x40, +E@0x50, +F@0x60
1763 : /// horizon = 0x50, retain_lsn = 0x20, 0x40, delta_threshold=3
1764 : ///
1765 : /// The function will produce:
1766 : ///
1767 : /// ```plain
1768 : /// 0x20(retain_lsn) -> img=AB@0x20 always produce a single image below the lowest retain LSN
1769 : /// 0x40(retain_lsn) -> deltas=[+C@0x30, +D@0x40] two deltas since the last base image, keeping the deltas
1770 : /// 0x50(horizon) -> deltas=[ABCDE@0x50] three deltas since the last base image, generate an image but put it in the delta
1771 : /// above_horizon -> deltas=[+F@0x60] full history above the horizon
1772 : /// ```
1773 : ///
1774 : /// Note that `accumulated_values` must be sorted by LSN and should belong to a single key.
1775 1260 : pub(crate) async fn generate_key_retention(
1776 1260 : self: &Arc<Timeline>,
1777 1260 : key: Key,
1778 1260 : full_history: &[(Key, Lsn, Value)],
1779 1260 : horizon: Lsn,
1780 1260 : retain_lsn_below_horizon: &[Lsn],
1781 1260 : delta_threshold_cnt: usize,
1782 1260 : base_img_from_ancestor: Option<(Key, Lsn, Bytes)>,
1783 1260 : ) -> anyhow::Result<KeyHistoryRetention> {
1784 : // Pre-checks for the invariants
1785 :
1786 1260 : let debug_mode = cfg!(debug_assertions) || cfg!(feature = "testing");
1787 :
1788 1260 : if debug_mode {
1789 3060 : for (log_key, _, _) in full_history {
1790 1800 : assert_eq!(log_key, &key, "mismatched key");
1791 : }
1792 1260 : for i in 1..full_history.len() {
1793 540 : assert!(full_history[i - 1].1 <= full_history[i].1, "unordered LSN");
1794 540 : if full_history[i - 1].1 == full_history[i].1 {
1795 0 : assert!(
1796 0 : matches!(full_history[i - 1].2, Value::Image(_)),
1797 0 : "unordered delta/image, or duplicated delta"
1798 : );
1799 540 : }
1800 : }
1801 : // There was an assertion for no base image that checks if the first
1802 : // record in the history is `will_init` before, but it was removed.
1803 : // This is explained in the test cases for generate_key_retention.
1804 : // Search "incomplete history" for more information.
1805 2820 : for lsn in retain_lsn_below_horizon {
1806 1560 : assert!(lsn < &horizon, "retain lsn must be below horizon")
1807 : }
1808 1260 : for i in 1..retain_lsn_below_horizon.len() {
1809 712 : assert!(
1810 712 : retain_lsn_below_horizon[i - 1] <= retain_lsn_below_horizon[i],
1811 0 : "unordered LSN"
1812 : );
1813 : }
1814 0 : }
1815 1260 : let has_ancestor = base_img_from_ancestor.is_some();
1816 : // Step 1: split history into len(retain_lsn_below_horizon) + 2 buckets, where the last bucket is for all deltas above the horizon,
1817 : // and the second-to-last bucket is for the horizon. Each bucket contains lsn_last_bucket < deltas <= lsn_this_bucket.
1818 1260 : let (mut split_history, lsn_split_points) = {
1819 1260 : let mut split_history = Vec::new();
1820 1260 : split_history.resize_with(retain_lsn_below_horizon.len() + 2, Vec::new);
1821 1260 : let mut lsn_split_points = Vec::with_capacity(retain_lsn_below_horizon.len() + 1);
1822 2820 : for lsn in retain_lsn_below_horizon {
1823 1560 : lsn_split_points.push(*lsn);
1824 1560 : }
1825 1260 : lsn_split_points.push(horizon);
1826 1260 : let mut current_idx = 0;
1827 3060 : for item @ (_, lsn, _) in full_history {
1828 2288 : while current_idx < lsn_split_points.len() && *lsn > lsn_split_points[current_idx] {
1829 488 : current_idx += 1;
1830 488 : }
1831 1800 : split_history[current_idx].push(item);
1832 : }
1833 1260 : (split_history, lsn_split_points)
1834 : };
1835 : // Step 2: filter out duplicated records due to the k-merge of image/delta layers
1836 5340 : for split_for_lsn in &mut split_history {
1837 4080 : let mut prev_lsn = None;
1838 4080 : let mut new_split_for_lsn = Vec::with_capacity(split_for_lsn.len());
1839 4080 : for record @ (_, lsn, _) in std::mem::take(split_for_lsn) {
1840 1800 : if let Some(prev_lsn) = &prev_lsn {
1841 236 : if *prev_lsn == lsn {
1842 : // The case that we have an LSN with both data from the delta layer and the image layer. As
1843 : // `ValueWrapper` ensures that an image is ordered before a delta at the same LSN, we simply
1844 : // drop this delta and keep the image.
1845 : //
1846 : // For example, we have delta layer key1@0x10, key1@0x20, and image layer key1@0x10, we will
1847 : // keep the image for key1@0x10 and the delta for key1@0x20. key1@0x10 delta will be simply
1848 : // dropped.
1849 : //
1850 : // TODO: in case we have both delta + images for a given LSN and it does not exceed the delta
1851 : // threshold, we could have kept delta instead to save space. This is an optimization for the future.
1852 0 : continue;
1853 236 : }
1854 1564 : }
1855 1800 : prev_lsn = Some(lsn);
1856 1800 : new_split_for_lsn.push(record);
1857 : }
1858 4080 : *split_for_lsn = new_split_for_lsn;
1859 : }
1860 : // Step 3: generate images when necessary
1861 1260 : let mut retention = Vec::with_capacity(split_history.len());
1862 1260 : let mut records_since_last_image = 0;
1863 1260 : let batch_cnt = split_history.len();
1864 1260 : assert!(
1865 1260 : batch_cnt >= 2,
1866 0 : "should have at least below + above horizon batches"
1867 : );
1868 1260 : let mut replay_history: Vec<(Key, Lsn, Value)> = Vec::new();
1869 1260 : if let Some((key, lsn, img)) = base_img_from_ancestor {
1870 84 : replay_history.push((key, lsn, Value::Image(img)));
1871 1176 : }
1872 :
1873 : /// Generate debug information for the replay history
1874 0 : fn generate_history_trace(replay_history: &[(Key, Lsn, Value)]) -> String {
1875 : use std::fmt::Write;
1876 0 : let mut output = String::new();
1877 0 : if let Some((key, _, _)) = replay_history.first() {
1878 0 : write!(output, "key={} ", key).unwrap();
1879 0 : let mut cnt = 0;
1880 0 : for (_, lsn, val) in replay_history {
1881 0 : if val.is_image() {
1882 0 : write!(output, "i@{} ", lsn).unwrap();
1883 0 : } else if val.will_init() {
1884 0 : write!(output, "di@{} ", lsn).unwrap();
1885 0 : } else {
1886 0 : write!(output, "d@{} ", lsn).unwrap();
1887 0 : }
1888 0 : cnt += 1;
1889 0 : if cnt >= 128 {
1890 0 : write!(output, "... and more").unwrap();
1891 0 : break;
1892 0 : }
1893 : }
1894 0 : } else {
1895 0 : write!(output, "<no history>").unwrap();
1896 0 : }
1897 0 : output
1898 0 : }
1899 :
1900 0 : fn generate_debug_trace(
1901 0 : replay_history: Option<&[(Key, Lsn, Value)]>,
1902 0 : full_history: &[(Key, Lsn, Value)],
1903 0 : lsns: &[Lsn],
1904 0 : horizon: Lsn,
1905 0 : ) -> String {
1906 : use std::fmt::Write;
1907 0 : let mut output = String::new();
1908 0 : if let Some(replay_history) = replay_history {
1909 0 : writeln!(
1910 0 : output,
1911 0 : "replay_history: {}",
1912 0 : generate_history_trace(replay_history)
1913 0 : )
1914 0 : .unwrap();
1915 0 : } else {
1916 0 : writeln!(output, "replay_history: <disabled>",).unwrap();
1917 0 : }
1918 0 : writeln!(
1919 0 : output,
1920 0 : "full_history: {}",
1921 0 : generate_history_trace(full_history)
1922 0 : )
1923 0 : .unwrap();
1924 0 : writeln!(
1925 0 : output,
1926 0 : "when processing: [{}] horizon={}",
1927 0 : lsns.iter().map(|l| format!("{l}")).join(","),
1928 0 : horizon
1929 0 : )
1930 0 : .unwrap();
1931 0 : output
1932 0 : }
1933 :
1934 1260 : let mut key_exists = false;
1935 4080 : for (i, split_for_lsn) in split_history.into_iter().enumerate() {
1936 : // TODO: there could be image keys inside the splits, and we can compute records_since_last_image accordingly.
1937 4080 : records_since_last_image += split_for_lsn.len();
1938 : // Whether to produce an image into the final layer files
1939 4080 : let produce_image = if i == 0 && !has_ancestor {
1940 : // We always generate images for the first batch (below horizon / lowest retain_lsn)
1941 1176 : true
1942 2904 : } else if i == batch_cnt - 1 {
1943 : // Do not generate images for the last batch (above horizon)
1944 1260 : false
1945 1644 : } else if records_since_last_image == 0 {
1946 1288 : false
1947 356 : } else if records_since_last_image >= delta_threshold_cnt {
1948 : // Generate images when there are too many records
1949 12 : true
1950 : } else {
1951 344 : false
1952 : };
1953 4080 : replay_history.extend(split_for_lsn.iter().map(|x| (*x).clone()));
1954 : // Only retain the items after the last image record
1955 5028 : for idx in (0..replay_history.len()).rev() {
1956 5028 : if replay_history[idx].2.will_init() {
1957 4080 : replay_history = replay_history[idx..].to_vec();
1958 4080 : break;
1959 948 : }
1960 : }
1961 4080 : if replay_history.is_empty() && !key_exists {
1962 : // The key does not exist at earlier LSN, we can skip this iteration.
1963 0 : retention.push(Vec::new());
1964 0 : continue;
1965 4080 : } else {
1966 4080 : key_exists = true;
1967 4080 : }
1968 4080 : let Some((_, _, val)) = replay_history.first() else {
1969 0 : unreachable!("replay history should not be empty once it exists")
1970 : };
1971 4080 : if !val.will_init() {
1972 0 : return Err(anyhow::anyhow!("invalid history, no base image")).with_context(|| {
1973 0 : generate_debug_trace(
1974 0 : Some(&replay_history),
1975 0 : full_history,
1976 0 : retain_lsn_below_horizon,
1977 0 : horizon,
1978 0 : )
1979 0 : });
1980 4080 : }
1981 : // Whether to reconstruct the image. In debug mode, we will generate an image
1982 : // at every retain_lsn to ensure data is not corrupted, but we won't put the
1983 : // image into the final layer.
1984 4080 : let generate_image = produce_image || debug_mode;
1985 4080 : if produce_image {
1986 1188 : records_since_last_image = 0;
1987 2892 : }
1988 4080 : let img_and_lsn = if generate_image {
1989 4080 : let replay_history_for_debug = if debug_mode {
1990 4080 : Some(replay_history.clone())
1991 : } else {
1992 0 : None
1993 : };
1994 4080 : let replay_history_for_debug_ref = replay_history_for_debug.as_deref();
1995 4080 : let history = if produce_image {
1996 1188 : std::mem::take(&mut replay_history)
1997 : } else {
1998 2892 : replay_history.clone()
1999 : };
2000 4080 : let mut img = None;
2001 4080 : let mut records = Vec::with_capacity(history.len());
2002 4080 : if let (_, lsn, Value::Image(val)) = history.first().as_ref().unwrap() {
2003 4036 : img = Some((*lsn, val.clone()));
2004 4036 : for (_, lsn, val) in history.into_iter().skip(1) {
2005 920 : let Value::WalRecord(rec) = val else {
2006 0 : return Err(anyhow::anyhow!(
2007 0 : "invalid record, first record is image, expect walrecords"
2008 0 : ))
2009 0 : .with_context(|| {
2010 0 : generate_debug_trace(
2011 0 : replay_history_for_debug_ref,
2012 0 : full_history,
2013 0 : retain_lsn_below_horizon,
2014 0 : horizon,
2015 0 : )
2016 0 : });
2017 : };
2018 920 : records.push((lsn, rec));
2019 : }
2020 : } else {
2021 72 : for (_, lsn, val) in history.into_iter() {
2022 72 : let Value::WalRecord(rec) = val else {
2023 0 : return Err(anyhow::anyhow!("invalid record, first record is walrecord, expect rest are walrecord"))
2024 0 : .with_context(|| generate_debug_trace(
2025 0 : replay_history_for_debug_ref,
2026 0 : full_history,
2027 0 : retain_lsn_below_horizon,
2028 0 : horizon,
2029 0 : ));
2030 : };
2031 72 : records.push((lsn, rec));
2032 : }
2033 : }
2034 4080 : records.reverse();
2035 4080 : let state = ValueReconstructState { img, records };
2036 : // last batch does not generate image so i is always in range, unless we force generate
2037 : // an image during testing
2038 4080 : let request_lsn = if i >= lsn_split_points.len() {
2039 1260 : Lsn::MAX
2040 : } else {
2041 2820 : lsn_split_points[i]
2042 : };
2043 4080 : let img = self.reconstruct_value(key, request_lsn, state).await?;
2044 4080 : Some((request_lsn, img))
2045 : } else {
2046 0 : None
2047 : };
2048 4080 : if produce_image {
2049 1188 : let (request_lsn, img) = img_and_lsn.unwrap();
2050 1188 : replay_history.push((key, request_lsn, Value::Image(img.clone())));
2051 1188 : retention.push(vec![(request_lsn, Value::Image(img))]);
2052 2892 : } else {
2053 2892 : let deltas = split_for_lsn
2054 2892 : .iter()
2055 2892 : .map(|(_, lsn, value)| (*lsn, value.clone()))
2056 2892 : .collect_vec();
2057 2892 : retention.push(deltas);
2058 2892 : }
2059 : }
2060 1260 : let mut result = Vec::with_capacity(retention.len());
2061 1260 : assert_eq!(retention.len(), lsn_split_points.len() + 1);
2062 4080 : for (idx, logs) in retention.into_iter().enumerate() {
2063 4080 : if idx == lsn_split_points.len() {
2064 1260 : return Ok(KeyHistoryRetention {
2065 1260 : below_horizon: result,
2066 1260 : above_horizon: KeyLogAtLsn(logs),
2067 1260 : });
2068 2820 : } else {
2069 2820 : result.push((lsn_split_points[idx], KeyLogAtLsn(logs)));
2070 2820 : }
2071 : }
2072 0 : unreachable!("key retention is empty")
2073 1260 : }
2074 :
2075 : /// Check how much space is left on the disk
2076 104 : async fn check_available_space(self: &Arc<Self>) -> anyhow::Result<u64> {
2077 104 : let tenants_dir = self.conf.tenants_path();
2078 :
2079 104 : let stat = Statvfs::get(&tenants_dir, None)
2080 104 : .context("statvfs failed, presumably directory got unlinked")?;
2081 :
2082 104 : let (avail_bytes, _) = stat.get_avail_total_bytes();
2083 104 :
2084 104 : Ok(avail_bytes)
2085 104 : }
2086 :
2087 : /// Check if the compaction can proceed safely without running out of space. We assume the size
2088 : /// upper bound of the produced files of a compaction job is the same as all layers involved in
2089 : /// the compaction. Therefore, we need `2 * layers_to_be_compacted_size` at least to do a
2090 : /// compaction.
2091 104 : async fn check_compaction_space(
2092 104 : self: &Arc<Self>,
2093 104 : layer_selection: &[Layer],
2094 104 : ) -> anyhow::Result<()> {
2095 104 : let available_space = self.check_available_space().await?;
2096 104 : let mut remote_layer_size = 0;
2097 104 : let mut all_layer_size = 0;
2098 408 : for layer in layer_selection {
2099 304 : let needs_download = layer.needs_download().await?;
2100 304 : if needs_download.is_some() {
2101 0 : remote_layer_size += layer.layer_desc().file_size;
2102 304 : }
2103 304 : all_layer_size += layer.layer_desc().file_size;
2104 : }
2105 104 : let allocated_space = (available_space as f64 * 0.8) as u64; /* reserve 20% space for other tasks */
2106 104 : if all_layer_size /* space needed for newly-generated file */ + remote_layer_size /* space for downloading layers */ > allocated_space
2107 : {
2108 0 : return Err(anyhow!("not enough space for compaction: available_space={}, allocated_space={}, all_layer_size={}, remote_layer_size={}, required_space={}",
2109 0 : available_space, allocated_space, all_layer_size, remote_layer_size, all_layer_size + remote_layer_size));
2110 104 : }
2111 104 : Ok(())
2112 104 : }
2113 :
2114 : /// Get a watermark for gc-compaction, that is the lowest LSN that we can use as the `gc_horizon` for
2115 : /// the compaction algorithm. It is min(space_cutoff, time_cutoff, latest_gc_cutoff, standby_horizon).
2116 : /// Leases and retain_lsns are considered in the gc-compaction job itself so we don't need to account for them
2117 : /// here.
2118 108 : pub(crate) fn get_gc_compaction_watermark(self: &Arc<Self>) -> Lsn {
2119 108 : let gc_cutoff_lsn = {
2120 108 : let gc_info = self.gc_info.read().unwrap();
2121 108 : gc_info.min_cutoff()
2122 108 : };
2123 108 :
2124 108 : // TODO: standby horizon should use leases so we don't really need to consider it here.
2125 108 : // let watermark = watermark.min(self.standby_horizon.load());
2126 108 :
2127 108 : // TODO: ensure the child branches will not use anything below the watermark, or consider
2128 108 : // them when computing the watermark.
2129 108 : gc_cutoff_lsn.min(*self.get_latest_gc_cutoff_lsn())
2130 108 : }
2131 :
2132 : /// Split a gc-compaction job into multiple compaction jobs. The split is based on the key range and the estimated size of the compaction job.
2133 : /// The function returns a list of compaction jobs that can be executed separately. If the upper bound of the compact LSN
2134 : /// range is not specified, we will use the latest gc_cutoff as the upper bound, so that all jobs in the jobset acts
2135 : /// like a full compaction of the specified keyspace.
2136 0 : pub(crate) async fn gc_compaction_split_jobs(
2137 0 : self: &Arc<Self>,
2138 0 : job: GcCompactJob,
2139 0 : sub_compaction_max_job_size_mb: Option<u64>,
2140 0 : ) -> anyhow::Result<Vec<GcCompactJob>> {
2141 0 : let compact_below_lsn = if job.compact_lsn_range.end != Lsn::MAX {
2142 0 : job.compact_lsn_range.end
2143 : } else {
2144 0 : self.get_gc_compaction_watermark()
2145 : };
2146 :
2147 0 : if compact_below_lsn == Lsn::INVALID {
2148 0 : tracing::warn!("no layers to compact with gc: gc_cutoff not generated yet, skipping gc bottom-most compaction");
2149 0 : return Ok(vec![]);
2150 0 : }
2151 :
2152 : // Split compaction job to about 4GB each
2153 : const GC_COMPACT_MAX_SIZE_MB: u64 = 4 * 1024;
2154 0 : let sub_compaction_max_job_size_mb =
2155 0 : sub_compaction_max_job_size_mb.unwrap_or(GC_COMPACT_MAX_SIZE_MB);
2156 0 :
2157 0 : let mut compact_jobs = Vec::new();
2158 0 : // For now, we simply use the key partitioning information; we should do a more fine-grained partitioning
2159 0 : // by estimating the amount of files read for a compaction job. We should also partition on LSN.
2160 0 : let ((dense_ks, sparse_ks), _) = self.partitioning.read().as_ref().clone();
2161 : // Truncate the key range to be within user specified compaction range.
2162 0 : fn truncate_to(
2163 0 : source_start: &Key,
2164 0 : source_end: &Key,
2165 0 : target_start: &Key,
2166 0 : target_end: &Key,
2167 0 : ) -> Option<(Key, Key)> {
2168 0 : let start = source_start.max(target_start);
2169 0 : let end = source_end.min(target_end);
2170 0 : if start < end {
2171 0 : Some((*start, *end))
2172 : } else {
2173 0 : None
2174 : }
2175 0 : }
2176 0 : let mut split_key_ranges = Vec::new();
2177 0 : let ranges = dense_ks
2178 0 : .parts
2179 0 : .iter()
2180 0 : .map(|partition| partition.ranges.iter())
2181 0 : .chain(sparse_ks.parts.iter().map(|x| x.0.ranges.iter()))
2182 0 : .flatten()
2183 0 : .cloned()
2184 0 : .collect_vec();
2185 0 : for range in ranges.iter() {
2186 0 : let Some((start, end)) = truncate_to(
2187 0 : &range.start,
2188 0 : &range.end,
2189 0 : &job.compact_key_range.start,
2190 0 : &job.compact_key_range.end,
2191 0 : ) else {
2192 0 : continue;
2193 : };
2194 0 : split_key_ranges.push((start, end));
2195 : }
2196 0 : split_key_ranges.sort();
2197 0 : let guard = self.layers.read().await;
2198 0 : let layer_map = guard.layer_map()?;
2199 0 : let mut current_start = None;
2200 0 : let ranges_num = split_key_ranges.len();
2201 0 : for (idx, (start, end)) in split_key_ranges.into_iter().enumerate() {
2202 0 : if current_start.is_none() {
2203 0 : current_start = Some(start);
2204 0 : }
2205 0 : let start = current_start.unwrap();
2206 0 : if start >= end {
2207 : // We have already processed this partition.
2208 0 : continue;
2209 0 : }
2210 0 : let res = layer_map.range_search(start..end, compact_below_lsn);
2211 0 : let total_size = res.found.keys().map(|x| x.layer.file_size()).sum::<u64>();
2212 0 : if total_size > sub_compaction_max_job_size_mb * 1024 * 1024 || ranges_num == idx + 1 {
2213 : // Try to extend the compaction range so that we include at least one full layer file.
2214 0 : let extended_end = res
2215 0 : .found
2216 0 : .keys()
2217 0 : .map(|layer| layer.layer.key_range.end)
2218 0 : .min();
2219 : // It is possible that the search range does not contain any layer files when we reach the end of the loop.
2220 : // In this case, we simply use the specified key range end.
2221 0 : let end = if let Some(extended_end) = extended_end {
2222 0 : extended_end.max(end)
2223 : } else {
2224 0 : end
2225 : };
2226 0 : let end = if ranges_num == idx + 1 {
2227 : // extend the compaction range to the end of the key range if it's the last partition
2228 0 : end.max(job.compact_key_range.end)
2229 : } else {
2230 0 : end
2231 : };
2232 0 : info!(
2233 0 : "splitting compaction job: {}..{}, estimated_size={}",
2234 : start, end, total_size
2235 : );
2236 0 : compact_jobs.push(GcCompactJob {
2237 0 : dry_run: job.dry_run,
2238 0 : compact_key_range: start..end,
2239 0 : compact_lsn_range: job.compact_lsn_range.start..compact_below_lsn,
2240 0 : });
2241 0 : current_start = Some(end);
2242 0 : }
2243 : }
2244 0 : drop(guard);
2245 0 : Ok(compact_jobs)
2246 0 : }
2247 :
2248 : /// An experimental compaction building block that combines compaction with garbage collection.
2249 : ///
2250 : /// The current implementation picks all delta + image layers that are below or intersecting with
2251 : /// the GC horizon without considering retain_lsns. Then, it does a full compaction over all these delta
2252 : /// layers and image layers, which generates image layers on the gc horizon, drop deltas below gc horizon,
2253 : /// and create delta layers with all deltas >= gc horizon.
2254 : ///
2255 : /// If `options.compact_range` is provided, it will only compact the keys within the range, aka partial compaction.
2256 : /// Partial compaction will read and process all layers overlapping with the key range, even if it might
2257 : /// contain extra keys. After the gc-compaction phase completes, delta layers that are not fully contained
2258 : /// within the key range will be rewritten to ensure they do not overlap with the delta layers. Providing
2259 : /// Key::MIN..Key..MAX to the function indicates a full compaction, though technically, `Key::MAX` is not
2260 : /// part of the range.
2261 : ///
2262 : /// If `options.compact_lsn_range.end` is provided, the compaction will only compact layers below or intersect with
2263 : /// the LSN. Otherwise, it will use the gc cutoff by default.
2264 108 : pub(crate) async fn compact_with_gc(
2265 108 : self: &Arc<Self>,
2266 108 : cancel: &CancellationToken,
2267 108 : options: CompactOptions,
2268 108 : ctx: &RequestContext,
2269 108 : ) -> anyhow::Result<()> {
2270 108 : let sub_compaction = options.sub_compaction;
2271 108 : let job = GcCompactJob::from_compact_options(options.clone());
2272 108 : if sub_compaction {
2273 0 : info!("running enhanced gc bottom-most compaction with sub-compaction, splitting compaction jobs");
2274 0 : let jobs = self
2275 0 : .gc_compaction_split_jobs(job, options.sub_compaction_max_job_size_mb)
2276 0 : .await?;
2277 0 : let jobs_len = jobs.len();
2278 0 : for (idx, job) in jobs.into_iter().enumerate() {
2279 0 : info!(
2280 0 : "running enhanced gc bottom-most compaction, sub-compaction {}/{}",
2281 0 : idx + 1,
2282 : jobs_len
2283 : );
2284 0 : self.compact_with_gc_inner(cancel, job, ctx).await?;
2285 : }
2286 0 : if jobs_len == 0 {
2287 0 : info!("no jobs to run, skipping gc bottom-most compaction");
2288 0 : }
2289 0 : return Ok(());
2290 108 : }
2291 108 : self.compact_with_gc_inner(cancel, job, ctx).await
2292 108 : }
2293 :
2294 108 : async fn compact_with_gc_inner(
2295 108 : self: &Arc<Self>,
2296 108 : cancel: &CancellationToken,
2297 108 : job: GcCompactJob,
2298 108 : ctx: &RequestContext,
2299 108 : ) -> anyhow::Result<()> {
2300 108 : // Block other compaction/GC tasks from running for now. GC-compaction could run along
2301 108 : // with legacy compaction tasks in the future. Always ensure the lock order is compaction -> gc.
2302 108 : // Note that we already acquired the compaction lock when the outer `compact` function gets called.
2303 108 :
2304 108 : let gc_lock = async {
2305 108 : tokio::select! {
2306 108 : guard = self.gc_lock.lock() => Ok(guard),
2307 : // TODO: refactor to CompactionError to correctly pass cancelled error
2308 108 : _ = cancel.cancelled() => Err(anyhow!("cancelled")),
2309 : }
2310 108 : };
2311 :
2312 108 : let gc_lock = crate::timed(
2313 108 : gc_lock,
2314 108 : "acquires gc lock",
2315 108 : std::time::Duration::from_secs(5),
2316 108 : )
2317 108 : .await?;
2318 :
2319 108 : let dry_run = job.dry_run;
2320 108 : let compact_key_range = job.compact_key_range;
2321 108 : let compact_lsn_range = job.compact_lsn_range;
2322 :
2323 108 : let debug_mode = cfg!(debug_assertions) || cfg!(feature = "testing");
2324 :
2325 108 : info!("running enhanced gc bottom-most compaction, dry_run={dry_run}, compact_key_range={}..{}, compact_lsn_range={}..{}", compact_key_range.start, compact_key_range.end, compact_lsn_range.start, compact_lsn_range.end);
2326 :
2327 108 : scopeguard::defer! {
2328 108 : info!("done enhanced gc bottom-most compaction");
2329 108 : };
2330 108 :
2331 108 : let mut stat = CompactionStatistics::default();
2332 :
2333 : // Step 0: pick all delta layers + image layers below/intersect with the GC horizon.
2334 : // The layer selection has the following properties:
2335 : // 1. If a layer is in the selection, all layers below it are in the selection.
2336 : // 2. Inferred from (1), for each key in the layer selection, the value can be reconstructed only with the layers in the layer selection.
2337 104 : let job_desc = {
2338 108 : let guard = self.layers.read().await;
2339 108 : let layers = guard.layer_map()?;
2340 108 : let gc_info = self.gc_info.read().unwrap();
2341 108 : let mut retain_lsns_below_horizon = Vec::new();
2342 108 : let gc_cutoff = {
2343 : // Currently, gc-compaction only kicks in after the legacy gc has updated the gc_cutoff.
2344 : // Therefore, it can only clean up data that cannot be cleaned up with legacy gc, instead of
2345 : // cleaning everything that theoritically it could. In the future, it should use `self.gc_info`
2346 : // to get the truth data.
2347 108 : let real_gc_cutoff = self.get_gc_compaction_watermark();
2348 : // The compaction algorithm will keep all keys above the gc_cutoff while keeping only necessary keys below the gc_cutoff for
2349 : // each of the retain_lsn. Therefore, if the user-provided `compact_lsn_range.end` is larger than the real gc cutoff, we will use
2350 : // the real cutoff.
2351 108 : let mut gc_cutoff = if compact_lsn_range.end == Lsn::MAX {
2352 96 : if real_gc_cutoff == Lsn::INVALID {
2353 : // If the gc_cutoff is not generated yet, we should not compact anything.
2354 0 : tracing::warn!("no layers to compact with gc: gc_cutoff not generated yet, skipping gc bottom-most compaction");
2355 0 : return Ok(());
2356 96 : }
2357 96 : real_gc_cutoff
2358 : } else {
2359 12 : compact_lsn_range.end
2360 : };
2361 108 : if gc_cutoff > real_gc_cutoff {
2362 8 : warn!("provided compact_lsn_range.end={} is larger than the real_gc_cutoff={}, using the real gc cutoff", gc_cutoff, real_gc_cutoff);
2363 8 : gc_cutoff = real_gc_cutoff;
2364 100 : }
2365 108 : gc_cutoff
2366 : };
2367 140 : for (lsn, _timeline_id, _is_offloaded) in &gc_info.retain_lsns {
2368 140 : if lsn < &gc_cutoff {
2369 140 : retain_lsns_below_horizon.push(*lsn);
2370 140 : }
2371 : }
2372 108 : for lsn in gc_info.leases.keys() {
2373 0 : if lsn < &gc_cutoff {
2374 0 : retain_lsns_below_horizon.push(*lsn);
2375 0 : }
2376 : }
2377 108 : let mut selected_layers: Vec<Layer> = Vec::new();
2378 108 : drop(gc_info);
2379 : // Firstly, pick all the layers intersect or below the gc_cutoff, get the largest LSN in the selected layers.
2380 108 : let Some(max_layer_lsn) = layers
2381 108 : .iter_historic_layers()
2382 488 : .filter(|desc| desc.get_lsn_range().start <= gc_cutoff)
2383 416 : .map(|desc| desc.get_lsn_range().end)
2384 108 : .max()
2385 : else {
2386 0 : info!("no layers to compact with gc: no historic layers below gc_cutoff, gc_cutoff={}", gc_cutoff);
2387 0 : return Ok(());
2388 : };
2389 : // Next, if the user specifies compact_lsn_range.start, we need to filter some layers out. All the layers (strictly) below
2390 : // the min_layer_lsn computed as below will be filtered out and the data will be accessed using the normal read path, as if
2391 : // it is a branch.
2392 108 : let Some(min_layer_lsn) = layers
2393 108 : .iter_historic_layers()
2394 488 : .filter(|desc| {
2395 488 : if compact_lsn_range.start == Lsn::INVALID {
2396 396 : true // select all layers below if start == Lsn(0)
2397 : } else {
2398 92 : desc.get_lsn_range().end > compact_lsn_range.start // strictly larger than compact_above_lsn
2399 : }
2400 488 : })
2401 452 : .map(|desc| desc.get_lsn_range().start)
2402 108 : .min()
2403 : else {
2404 0 : info!("no layers to compact with gc: no historic layers above compact_above_lsn, compact_above_lsn={}", compact_lsn_range.end);
2405 0 : return Ok(());
2406 : };
2407 : // Then, pick all the layers that are below the max_layer_lsn. This is to ensure we can pick all single-key
2408 : // layers to compact.
2409 108 : let mut rewrite_layers = Vec::new();
2410 488 : for desc in layers.iter_historic_layers() {
2411 488 : if desc.get_lsn_range().end <= max_layer_lsn
2412 416 : && desc.get_lsn_range().start >= min_layer_lsn
2413 380 : && overlaps_with(&desc.get_key_range(), &compact_key_range)
2414 : {
2415 : // If the layer overlaps with the compaction key range, we need to read it to obtain all keys within the range,
2416 : // even if it might contain extra keys
2417 304 : selected_layers.push(guard.get_from_desc(&desc));
2418 304 : // If the layer is not fully contained within the key range, we need to rewrite it if it's a delta layer (it's fine
2419 304 : // to overlap image layers)
2420 304 : if desc.is_delta() && !fully_contains(&compact_key_range, &desc.get_key_range())
2421 4 : {
2422 4 : rewrite_layers.push(desc);
2423 300 : }
2424 184 : }
2425 : }
2426 108 : if selected_layers.is_empty() {
2427 4 : info!("no layers to compact with gc: no layers within the key range, gc_cutoff={}, key_range={}..{}", gc_cutoff, compact_key_range.start, compact_key_range.end);
2428 4 : return Ok(());
2429 104 : }
2430 104 : retain_lsns_below_horizon.sort();
2431 104 : GcCompactionJobDescription {
2432 104 : selected_layers,
2433 104 : gc_cutoff,
2434 104 : retain_lsns_below_horizon,
2435 104 : min_layer_lsn,
2436 104 : max_layer_lsn,
2437 104 : compaction_key_range: compact_key_range,
2438 104 : rewrite_layers,
2439 104 : }
2440 : };
2441 104 : let (has_data_below, lowest_retain_lsn) = if compact_lsn_range.start != Lsn::INVALID {
2442 : // If we only compact above some LSN, we should get the history from the current branch below the specified LSN.
2443 : // We use job_desc.min_layer_lsn as if it's the lowest branch point.
2444 16 : (true, job_desc.min_layer_lsn)
2445 88 : } else if self.ancestor_timeline.is_some() {
2446 : // In theory, we can also use min_layer_lsn here, but using ancestor LSN makes sure the delta layers cover the
2447 : // LSN ranges all the way to the ancestor timeline.
2448 4 : (true, self.ancestor_lsn)
2449 : } else {
2450 84 : let res = job_desc
2451 84 : .retain_lsns_below_horizon
2452 84 : .first()
2453 84 : .copied()
2454 84 : .unwrap_or(job_desc.gc_cutoff);
2455 84 : if debug_mode {
2456 84 : assert_eq!(
2457 84 : res,
2458 84 : job_desc
2459 84 : .retain_lsns_below_horizon
2460 84 : .iter()
2461 84 : .min()
2462 84 : .copied()
2463 84 : .unwrap_or(job_desc.gc_cutoff)
2464 84 : );
2465 0 : }
2466 84 : (false, res)
2467 : };
2468 104 : info!(
2469 0 : "picked {} layers for compaction ({} layers need rewriting) with max_layer_lsn={} min_layer_lsn={} gc_cutoff={} lowest_retain_lsn={}, key_range={}..{}, has_data_below={}",
2470 0 : job_desc.selected_layers.len(),
2471 0 : job_desc.rewrite_layers.len(),
2472 : job_desc.max_layer_lsn,
2473 : job_desc.min_layer_lsn,
2474 : job_desc.gc_cutoff,
2475 : lowest_retain_lsn,
2476 : job_desc.compaction_key_range.start,
2477 : job_desc.compaction_key_range.end,
2478 : has_data_below,
2479 : );
2480 :
2481 408 : for layer in &job_desc.selected_layers {
2482 304 : debug!("read layer: {}", layer.layer_desc().key());
2483 : }
2484 108 : for layer in &job_desc.rewrite_layers {
2485 4 : debug!("rewrite layer: {}", layer.key());
2486 : }
2487 :
2488 104 : self.check_compaction_space(&job_desc.selected_layers)
2489 104 : .await?;
2490 :
2491 : // Generate statistics for the compaction
2492 408 : for layer in &job_desc.selected_layers {
2493 304 : let desc = layer.layer_desc();
2494 304 : if desc.is_delta() {
2495 172 : stat.visit_delta_layer(desc.file_size());
2496 172 : } else {
2497 132 : stat.visit_image_layer(desc.file_size());
2498 132 : }
2499 : }
2500 :
2501 : // Step 1: construct a k-merge iterator over all layers.
2502 : // Also, verify if the layer map can be split by drawing a horizontal line at every LSN start/end split point.
2503 104 : let layer_names = job_desc
2504 104 : .selected_layers
2505 104 : .iter()
2506 304 : .map(|layer| layer.layer_desc().layer_name())
2507 104 : .collect_vec();
2508 104 : if let Some(err) = check_valid_layermap(&layer_names) {
2509 0 : bail!("gc-compaction layer map check failed because {}, cannot proceed with compaction due to potential data loss", err);
2510 104 : }
2511 104 : // The maximum LSN we are processing in this compaction loop
2512 104 : let end_lsn = job_desc
2513 104 : .selected_layers
2514 104 : .iter()
2515 304 : .map(|l| l.layer_desc().lsn_range.end)
2516 104 : .max()
2517 104 : .unwrap();
2518 104 : let mut delta_layers = Vec::new();
2519 104 : let mut image_layers = Vec::new();
2520 104 : let mut downloaded_layers = Vec::new();
2521 104 : let mut total_downloaded_size = 0;
2522 104 : let mut total_layer_size = 0;
2523 408 : for layer in &job_desc.selected_layers {
2524 304 : if layer.needs_download().await?.is_some() {
2525 0 : total_downloaded_size += layer.layer_desc().file_size;
2526 304 : }
2527 304 : total_layer_size += layer.layer_desc().file_size;
2528 304 : let resident_layer = layer.download_and_keep_resident().await?;
2529 304 : downloaded_layers.push(resident_layer);
2530 : }
2531 104 : info!(
2532 0 : "finish downloading layers, downloaded={}, total={}, ratio={:.2}",
2533 0 : total_downloaded_size,
2534 0 : total_layer_size,
2535 0 : total_downloaded_size as f64 / total_layer_size as f64
2536 : );
2537 408 : for resident_layer in &downloaded_layers {
2538 304 : if resident_layer.layer_desc().is_delta() {
2539 172 : let layer = resident_layer.get_as_delta(ctx).await?;
2540 172 : delta_layers.push(layer);
2541 : } else {
2542 132 : let layer = resident_layer.get_as_image(ctx).await?;
2543 132 : image_layers.push(layer);
2544 : }
2545 : }
2546 104 : let (dense_ks, sparse_ks) = self.collect_gc_compaction_keyspace().await?;
2547 104 : let mut merge_iter = FilterIterator::create(
2548 104 : MergeIterator::create(&delta_layers, &image_layers, ctx),
2549 104 : dense_ks,
2550 104 : sparse_ks,
2551 104 : )?;
2552 :
2553 : // Step 2: Produce images+deltas.
2554 104 : let mut accumulated_values = Vec::new();
2555 104 : let mut last_key: Option<Key> = None;
2556 :
2557 : // Only create image layers when there is no ancestor branches. TODO: create covering image layer
2558 : // when some condition meet.
2559 104 : let mut image_layer_writer = if !has_data_below {
2560 : Some(
2561 84 : SplitImageLayerWriter::new(
2562 84 : self.conf,
2563 84 : self.timeline_id,
2564 84 : self.tenant_shard_id,
2565 84 : job_desc.compaction_key_range.start,
2566 84 : lowest_retain_lsn,
2567 84 : self.get_compaction_target_size(),
2568 84 : ctx,
2569 84 : )
2570 84 : .await?,
2571 : )
2572 : } else {
2573 20 : None
2574 : };
2575 :
2576 104 : let mut delta_layer_writer = SplitDeltaLayerWriter::new(
2577 104 : self.conf,
2578 104 : self.timeline_id,
2579 104 : self.tenant_shard_id,
2580 104 : lowest_retain_lsn..end_lsn,
2581 104 : self.get_compaction_target_size(),
2582 104 : )
2583 104 : .await?;
2584 :
2585 : #[derive(Default)]
2586 : struct RewritingLayers {
2587 : before: Option<DeltaLayerWriter>,
2588 : after: Option<DeltaLayerWriter>,
2589 : }
2590 104 : let mut delta_layer_rewriters = HashMap::<Arc<PersistentLayerKey>, RewritingLayers>::new();
2591 :
2592 : /// When compacting not at a bottom range (=`[0,X)`) of the root branch, we "have data below" (`has_data_below=true`).
2593 : /// The two cases are compaction in ancestor branches and when `compact_lsn_range.start` is set.
2594 : /// In those cases, we need to pull up data from below the LSN range we're compaction.
2595 : ///
2596 : /// This function unifies the cases so that later code doesn't have to think about it.
2597 : ///
2598 : /// Currently, we always get the ancestor image for each key in the child branch no matter whether the image
2599 : /// is needed for reconstruction. This should be fixed in the future.
2600 : ///
2601 : /// Furthermore, we should do vectored get instead of a single get, or better, use k-merge for ancestor
2602 : /// images.
2603 1244 : async fn get_ancestor_image(
2604 1244 : this_tline: &Arc<Timeline>,
2605 1244 : key: Key,
2606 1244 : ctx: &RequestContext,
2607 1244 : has_data_below: bool,
2608 1244 : history_lsn_point: Lsn,
2609 1244 : ) -> anyhow::Result<Option<(Key, Lsn, Bytes)>> {
2610 1244 : if !has_data_below {
2611 1168 : return Ok(None);
2612 76 : };
2613 : // This function is implemented as a get of the current timeline at ancestor LSN, therefore reusing
2614 : // as much existing code as possible.
2615 76 : let img = this_tline.get(key, history_lsn_point, ctx).await?;
2616 76 : Ok(Some((key, history_lsn_point, img)))
2617 1244 : }
2618 :
2619 : // Actually, we can decide not to write to the image layer at all at this point because
2620 : // the key and LSN range are determined. However, to keep things simple here, we still
2621 : // create this writer, and discard the writer in the end.
2622 :
2623 1932 : while let Some(((key, lsn, val), desc)) = merge_iter.next_with_trace().await? {
2624 1828 : if cancel.is_cancelled() {
2625 0 : return Err(anyhow!("cancelled")); // TODO: refactor to CompactionError and pass cancel error
2626 1828 : }
2627 1828 : if self.shard_identity.is_key_disposable(&key) {
2628 : // If this shard does not need to store this key, simply skip it.
2629 : //
2630 : // This is not handled in the filter iterator because shard is determined by hash.
2631 : // Therefore, it does not give us any performance benefit to do things like skip
2632 : // a whole layer file as handling key spaces (ranges).
2633 0 : if cfg!(debug_assertions) {
2634 0 : let shard = self.shard_identity.shard_index();
2635 0 : let owner = self.shard_identity.get_shard_number(&key);
2636 0 : panic!("key {key} does not belong on shard {shard}, owned by {owner}");
2637 0 : }
2638 0 : continue;
2639 1828 : }
2640 1828 : if !job_desc.compaction_key_range.contains(&key) {
2641 128 : if !desc.is_delta {
2642 120 : continue;
2643 8 : }
2644 8 : let rewriter = delta_layer_rewriters.entry(desc.clone()).or_default();
2645 8 : let rewriter = if key < job_desc.compaction_key_range.start {
2646 0 : if rewriter.before.is_none() {
2647 0 : rewriter.before = Some(
2648 0 : DeltaLayerWriter::new(
2649 0 : self.conf,
2650 0 : self.timeline_id,
2651 0 : self.tenant_shard_id,
2652 0 : desc.key_range.start,
2653 0 : desc.lsn_range.clone(),
2654 0 : ctx,
2655 0 : )
2656 0 : .await?,
2657 : );
2658 0 : }
2659 0 : rewriter.before.as_mut().unwrap()
2660 8 : } else if key >= job_desc.compaction_key_range.end {
2661 8 : if rewriter.after.is_none() {
2662 4 : rewriter.after = Some(
2663 4 : DeltaLayerWriter::new(
2664 4 : self.conf,
2665 4 : self.timeline_id,
2666 4 : self.tenant_shard_id,
2667 4 : job_desc.compaction_key_range.end,
2668 4 : desc.lsn_range.clone(),
2669 4 : ctx,
2670 4 : )
2671 4 : .await?,
2672 : );
2673 4 : }
2674 8 : rewriter.after.as_mut().unwrap()
2675 : } else {
2676 0 : unreachable!()
2677 : };
2678 8 : rewriter.put_value(key, lsn, val, ctx).await?;
2679 8 : continue;
2680 1700 : }
2681 1700 : match val {
2682 1220 : Value::Image(_) => stat.visit_image_key(&val),
2683 480 : Value::WalRecord(_) => stat.visit_wal_key(&val),
2684 : }
2685 1700 : if last_key.is_none() || last_key.as_ref() == Some(&key) {
2686 560 : if last_key.is_none() {
2687 104 : last_key = Some(key);
2688 456 : }
2689 560 : accumulated_values.push((key, lsn, val));
2690 : } else {
2691 1140 : let last_key: &mut Key = last_key.as_mut().unwrap();
2692 1140 : stat.on_unique_key_visited(); // TODO: adjust statistics for partial compaction
2693 1140 : let retention = self
2694 1140 : .generate_key_retention(
2695 1140 : *last_key,
2696 1140 : &accumulated_values,
2697 1140 : job_desc.gc_cutoff,
2698 1140 : &job_desc.retain_lsns_below_horizon,
2699 1140 : COMPACTION_DELTA_THRESHOLD,
2700 1140 : get_ancestor_image(self, *last_key, ctx, has_data_below, lowest_retain_lsn)
2701 1140 : .await?,
2702 : )
2703 1140 : .await?;
2704 1140 : retention
2705 1140 : .pipe_to(
2706 1140 : *last_key,
2707 1140 : &mut delta_layer_writer,
2708 1140 : image_layer_writer.as_mut(),
2709 1140 : &mut stat,
2710 1140 : ctx,
2711 1140 : )
2712 1140 : .await?;
2713 1140 : accumulated_values.clear();
2714 1140 : *last_key = key;
2715 1140 : accumulated_values.push((key, lsn, val));
2716 : }
2717 : }
2718 :
2719 : // TODO: move the below part to the loop body
2720 104 : let last_key = last_key.expect("no keys produced during compaction");
2721 104 : stat.on_unique_key_visited();
2722 :
2723 104 : let retention = self
2724 104 : .generate_key_retention(
2725 104 : last_key,
2726 104 : &accumulated_values,
2727 104 : job_desc.gc_cutoff,
2728 104 : &job_desc.retain_lsns_below_horizon,
2729 104 : COMPACTION_DELTA_THRESHOLD,
2730 104 : get_ancestor_image(self, last_key, ctx, has_data_below, lowest_retain_lsn).await?,
2731 : )
2732 104 : .await?;
2733 104 : retention
2734 104 : .pipe_to(
2735 104 : last_key,
2736 104 : &mut delta_layer_writer,
2737 104 : image_layer_writer.as_mut(),
2738 104 : &mut stat,
2739 104 : ctx,
2740 104 : )
2741 104 : .await?;
2742 : // end: move the above part to the loop body
2743 :
2744 104 : let mut rewrote_delta_layers = Vec::new();
2745 108 : for (key, writers) in delta_layer_rewriters {
2746 4 : if let Some(delta_writer_before) = writers.before {
2747 0 : let (desc, path) = delta_writer_before
2748 0 : .finish(job_desc.compaction_key_range.start, ctx)
2749 0 : .await?;
2750 0 : let layer = Layer::finish_creating(self.conf, self, desc, &path)?;
2751 0 : rewrote_delta_layers.push(layer);
2752 4 : }
2753 4 : if let Some(delta_writer_after) = writers.after {
2754 4 : let (desc, path) = delta_writer_after.finish(key.key_range.end, ctx).await?;
2755 4 : let layer = Layer::finish_creating(self.conf, self, desc, &path)?;
2756 4 : rewrote_delta_layers.push(layer);
2757 0 : }
2758 : }
2759 :
2760 148 : let discard = |key: &PersistentLayerKey| {
2761 148 : let key = key.clone();
2762 148 : async move { KeyHistoryRetention::discard_key(&key, self, dry_run).await }
2763 148 : };
2764 :
2765 104 : let produced_image_layers = if let Some(writer) = image_layer_writer {
2766 84 : if !dry_run {
2767 76 : let end_key = job_desc.compaction_key_range.end;
2768 76 : writer
2769 76 : .finish_with_discard_fn(self, ctx, end_key, discard)
2770 76 : .await?
2771 : } else {
2772 8 : drop(writer);
2773 8 : Vec::new()
2774 : }
2775 : } else {
2776 20 : Vec::new()
2777 : };
2778 :
2779 104 : let produced_delta_layers = if !dry_run {
2780 96 : delta_layer_writer
2781 96 : .finish_with_discard_fn(self, ctx, discard)
2782 96 : .await?
2783 : } else {
2784 8 : drop(delta_layer_writer);
2785 8 : Vec::new()
2786 : };
2787 :
2788 : // TODO: make image/delta/rewrote_delta layers generation atomic. At this point, we already generated resident layers, and if
2789 : // compaction is cancelled at this point, we might have some layers that are not cleaned up.
2790 104 : let mut compact_to = Vec::new();
2791 104 : let mut keep_layers = HashSet::new();
2792 104 : let produced_delta_layers_len = produced_delta_layers.len();
2793 104 : let produced_image_layers_len = produced_image_layers.len();
2794 176 : for action in produced_delta_layers {
2795 72 : match action {
2796 44 : BatchWriterResult::Produced(layer) => {
2797 44 : if cfg!(debug_assertions) {
2798 44 : info!("produced delta layer: {}", layer.layer_desc().key());
2799 0 : }
2800 44 : stat.produce_delta_layer(layer.layer_desc().file_size());
2801 44 : compact_to.push(layer);
2802 : }
2803 28 : BatchWriterResult::Discarded(l) => {
2804 28 : if cfg!(debug_assertions) {
2805 28 : info!("discarded delta layer: {}", l);
2806 0 : }
2807 28 : keep_layers.insert(l);
2808 28 : stat.discard_delta_layer();
2809 : }
2810 : }
2811 : }
2812 108 : for layer in &rewrote_delta_layers {
2813 4 : debug!(
2814 0 : "produced rewritten delta layer: {}",
2815 0 : layer.layer_desc().key()
2816 : );
2817 : }
2818 104 : compact_to.extend(rewrote_delta_layers);
2819 180 : for action in produced_image_layers {
2820 76 : match action {
2821 60 : BatchWriterResult::Produced(layer) => {
2822 60 : debug!("produced image layer: {}", layer.layer_desc().key());
2823 60 : stat.produce_image_layer(layer.layer_desc().file_size());
2824 60 : compact_to.push(layer);
2825 : }
2826 16 : BatchWriterResult::Discarded(l) => {
2827 16 : debug!("discarded image layer: {}", l);
2828 16 : keep_layers.insert(l);
2829 16 : stat.discard_image_layer();
2830 : }
2831 : }
2832 : }
2833 :
2834 104 : let mut layer_selection = job_desc.selected_layers;
2835 :
2836 : // Partial compaction might select more data than it processes, e.g., if
2837 : // the compaction_key_range only partially overlaps:
2838 : //
2839 : // [---compaction_key_range---]
2840 : // [---A----][----B----][----C----][----D----]
2841 : //
2842 : // For delta layers, we will rewrite the layers so that it is cut exactly at
2843 : // the compaction key range, so we can always discard them. However, for image
2844 : // layers, as we do not rewrite them for now, we need to handle them differently.
2845 : // Assume image layers A, B, C, D are all in the `layer_selection`.
2846 : //
2847 : // The created image layers contain whatever is needed from B, C, and from
2848 : // `----]` of A, and from `[---` of D.
2849 : //
2850 : // In contrast, `[---A` and `D----]` have not been processed, so, we must
2851 : // keep that data.
2852 : //
2853 : // The solution for now is to keep A and D completely if they are image layers.
2854 : // (layer_selection is what we'll remove from the layer map, so, retain what
2855 : // is _not_ fully covered by compaction_key_range).
2856 408 : for layer in &layer_selection {
2857 304 : if !layer.layer_desc().is_delta() {
2858 132 : if !overlaps_with(
2859 132 : &layer.layer_desc().key_range,
2860 132 : &job_desc.compaction_key_range,
2861 132 : ) {
2862 0 : bail!("violated constraint: image layer outside of compaction key range");
2863 132 : }
2864 132 : if !fully_contains(
2865 132 : &job_desc.compaction_key_range,
2866 132 : &layer.layer_desc().key_range,
2867 132 : ) {
2868 16 : keep_layers.insert(layer.layer_desc().key());
2869 116 : }
2870 172 : }
2871 : }
2872 :
2873 304 : layer_selection.retain(|x| !keep_layers.contains(&x.layer_desc().key()));
2874 104 :
2875 104 : info!(
2876 0 : "gc-compaction statistics: {}",
2877 0 : serde_json::to_string(&stat)?
2878 : );
2879 :
2880 104 : if dry_run {
2881 8 : return Ok(());
2882 96 : }
2883 96 :
2884 96 : info!(
2885 0 : "produced {} delta layers and {} image layers, {} layers are kept",
2886 0 : produced_delta_layers_len,
2887 0 : produced_image_layers_len,
2888 0 : keep_layers.len()
2889 : );
2890 :
2891 : // Step 3: Place back to the layer map.
2892 :
2893 : // First, do a sanity check to ensure the newly-created layer map does not contain overlaps.
2894 96 : let all_layers = {
2895 96 : let guard = self.layers.read().await;
2896 96 : let layer_map = guard.layer_map()?;
2897 96 : layer_map.iter_historic_layers().collect_vec()
2898 96 : };
2899 96 :
2900 96 : let mut final_layers = all_layers
2901 96 : .iter()
2902 428 : .map(|layer| layer.layer_name())
2903 96 : .collect::<HashSet<_>>();
2904 304 : for layer in &layer_selection {
2905 208 : final_layers.remove(&layer.layer_desc().layer_name());
2906 208 : }
2907 204 : for layer in &compact_to {
2908 108 : final_layers.insert(layer.layer_desc().layer_name());
2909 108 : }
2910 96 : let final_layers = final_layers.into_iter().collect_vec();
2911 :
2912 : // TODO: move this check before we call `finish` on image layer writers. However, this will require us to get the layer name before we finish
2913 : // the writer, so potentially, we will need a function like `ImageLayerBatchWriter::get_all_pending_layer_keys` to get all the keys that are
2914 : // in the writer before finalizing the persistent layers. Now we would leave some dangling layers on the disk if the check fails.
2915 96 : if let Some(err) = check_valid_layermap(&final_layers) {
2916 0 : bail!("gc-compaction layer map check failed after compaction because {}, compaction result not applied to the layer map due to potential data loss", err);
2917 96 : }
2918 :
2919 : // Between the sanity check and this compaction update, there could be new layers being flushed, but it should be fine because we only
2920 : // operate on L1 layers.
2921 : {
2922 96 : let mut guard = self.layers.write().await;
2923 96 : guard
2924 96 : .open_mut()?
2925 96 : .finish_gc_compaction(&layer_selection, &compact_to, &self.metrics)
2926 96 : };
2927 96 :
2928 96 : // Schedule an index-only upload to update the `latest_gc_cutoff` in the index_part.json.
2929 96 : // Otherwise, after restart, the index_part only contains the old `latest_gc_cutoff` and
2930 96 : // find_gc_cutoffs will try accessing things below the cutoff. TODO: ideally, this should
2931 96 : // be batched into `schedule_compaction_update`.
2932 96 : let disk_consistent_lsn = self.disk_consistent_lsn.load();
2933 96 : self.schedule_uploads(disk_consistent_lsn, None)?;
2934 : // If a layer gets rewritten throughout gc-compaction, we need to keep that layer only in `compact_to` instead
2935 : // of `compact_from`.
2936 96 : let compact_from = {
2937 96 : let mut compact_from = Vec::new();
2938 96 : let mut compact_to_set = HashMap::new();
2939 204 : for layer in &compact_to {
2940 108 : compact_to_set.insert(layer.layer_desc().key(), layer);
2941 108 : }
2942 304 : for layer in &layer_selection {
2943 208 : if let Some(to) = compact_to_set.get(&layer.layer_desc().key()) {
2944 0 : tracing::info!(
2945 0 : "skipping delete {} because found same layer key at different generation {}",
2946 : layer, to
2947 : );
2948 208 : } else {
2949 208 : compact_from.push(layer.clone());
2950 208 : }
2951 : }
2952 96 : compact_from
2953 96 : };
2954 96 : self.remote_client
2955 96 : .schedule_compaction_update(&compact_from, &compact_to)?;
2956 :
2957 96 : drop(gc_lock);
2958 96 :
2959 96 : Ok(())
2960 108 : }
2961 : }
2962 :
2963 : struct TimelineAdaptor {
2964 : timeline: Arc<Timeline>,
2965 :
2966 : keyspace: (Lsn, KeySpace),
2967 :
2968 : new_deltas: Vec<ResidentLayer>,
2969 : new_images: Vec<ResidentLayer>,
2970 : layers_to_delete: Vec<Arc<PersistentLayerDesc>>,
2971 : }
2972 :
2973 : impl TimelineAdaptor {
2974 0 : pub fn new(timeline: &Arc<Timeline>, keyspace: (Lsn, KeySpace)) -> Self {
2975 0 : Self {
2976 0 : timeline: timeline.clone(),
2977 0 : keyspace,
2978 0 : new_images: Vec::new(),
2979 0 : new_deltas: Vec::new(),
2980 0 : layers_to_delete: Vec::new(),
2981 0 : }
2982 0 : }
2983 :
2984 0 : pub async fn flush_updates(&mut self) -> Result<(), CompactionError> {
2985 0 : let layers_to_delete = {
2986 0 : let guard = self.timeline.layers.read().await;
2987 0 : self.layers_to_delete
2988 0 : .iter()
2989 0 : .map(|x| guard.get_from_desc(x))
2990 0 : .collect::<Vec<Layer>>()
2991 0 : };
2992 0 : self.timeline
2993 0 : .finish_compact_batch(&self.new_deltas, &self.new_images, &layers_to_delete)
2994 0 : .await?;
2995 :
2996 0 : self.timeline
2997 0 : .upload_new_image_layers(std::mem::take(&mut self.new_images))?;
2998 :
2999 0 : self.new_deltas.clear();
3000 0 : self.layers_to_delete.clear();
3001 0 : Ok(())
3002 0 : }
3003 : }
3004 :
3005 : #[derive(Clone)]
3006 : struct ResidentDeltaLayer(ResidentLayer);
3007 : #[derive(Clone)]
3008 : struct ResidentImageLayer(ResidentLayer);
3009 :
3010 : impl CompactionJobExecutor for TimelineAdaptor {
3011 : type Key = pageserver_api::key::Key;
3012 :
3013 : type Layer = OwnArc<PersistentLayerDesc>;
3014 : type DeltaLayer = ResidentDeltaLayer;
3015 : type ImageLayer = ResidentImageLayer;
3016 :
3017 : type RequestContext = crate::context::RequestContext;
3018 :
3019 0 : fn get_shard_identity(&self) -> &ShardIdentity {
3020 0 : self.timeline.get_shard_identity()
3021 0 : }
3022 :
3023 0 : async fn get_layers(
3024 0 : &mut self,
3025 0 : key_range: &Range<Key>,
3026 0 : lsn_range: &Range<Lsn>,
3027 0 : _ctx: &RequestContext,
3028 0 : ) -> anyhow::Result<Vec<OwnArc<PersistentLayerDesc>>> {
3029 0 : self.flush_updates().await?;
3030 :
3031 0 : let guard = self.timeline.layers.read().await;
3032 0 : let layer_map = guard.layer_map()?;
3033 :
3034 0 : let result = layer_map
3035 0 : .iter_historic_layers()
3036 0 : .filter(|l| {
3037 0 : overlaps_with(&l.lsn_range, lsn_range) && overlaps_with(&l.key_range, key_range)
3038 0 : })
3039 0 : .map(OwnArc)
3040 0 : .collect();
3041 0 : Ok(result)
3042 0 : }
3043 :
3044 0 : async fn get_keyspace(
3045 0 : &mut self,
3046 0 : key_range: &Range<Key>,
3047 0 : lsn: Lsn,
3048 0 : _ctx: &RequestContext,
3049 0 : ) -> anyhow::Result<Vec<Range<Key>>> {
3050 0 : if lsn == self.keyspace.0 {
3051 0 : Ok(pageserver_compaction::helpers::intersect_keyspace(
3052 0 : &self.keyspace.1.ranges,
3053 0 : key_range,
3054 0 : ))
3055 : } else {
3056 : // The current compaction implementation only ever requests the key space
3057 : // at the compaction end LSN.
3058 0 : anyhow::bail!("keyspace not available for requested lsn");
3059 : }
3060 0 : }
3061 :
3062 0 : async fn downcast_delta_layer(
3063 0 : &self,
3064 0 : layer: &OwnArc<PersistentLayerDesc>,
3065 0 : ) -> anyhow::Result<Option<ResidentDeltaLayer>> {
3066 0 : // this is a lot more complex than a simple downcast...
3067 0 : if layer.is_delta() {
3068 0 : let l = {
3069 0 : let guard = self.timeline.layers.read().await;
3070 0 : guard.get_from_desc(layer)
3071 : };
3072 0 : let result = l.download_and_keep_resident().await?;
3073 :
3074 0 : Ok(Some(ResidentDeltaLayer(result)))
3075 : } else {
3076 0 : Ok(None)
3077 : }
3078 0 : }
3079 :
3080 0 : async fn create_image(
3081 0 : &mut self,
3082 0 : lsn: Lsn,
3083 0 : key_range: &Range<Key>,
3084 0 : ctx: &RequestContext,
3085 0 : ) -> anyhow::Result<()> {
3086 0 : Ok(self.create_image_impl(lsn, key_range, ctx).await?)
3087 0 : }
3088 :
3089 0 : async fn create_delta(
3090 0 : &mut self,
3091 0 : lsn_range: &Range<Lsn>,
3092 0 : key_range: &Range<Key>,
3093 0 : input_layers: &[ResidentDeltaLayer],
3094 0 : ctx: &RequestContext,
3095 0 : ) -> anyhow::Result<()> {
3096 0 : debug!("Create new layer {}..{}", lsn_range.start, lsn_range.end);
3097 :
3098 0 : let mut all_entries = Vec::new();
3099 0 : for dl in input_layers.iter() {
3100 0 : all_entries.extend(dl.load_keys(ctx).await?);
3101 : }
3102 :
3103 : // The current stdlib sorting implementation is designed in a way where it is
3104 : // particularly fast where the slice is made up of sorted sub-ranges.
3105 0 : all_entries.sort_by_key(|DeltaEntry { key, lsn, .. }| (*key, *lsn));
3106 :
3107 0 : let mut writer = DeltaLayerWriter::new(
3108 0 : self.timeline.conf,
3109 0 : self.timeline.timeline_id,
3110 0 : self.timeline.tenant_shard_id,
3111 0 : key_range.start,
3112 0 : lsn_range.clone(),
3113 0 : ctx,
3114 0 : )
3115 0 : .await?;
3116 :
3117 0 : let mut dup_values = 0;
3118 0 :
3119 0 : // This iterator walks through all key-value pairs from all the layers
3120 0 : // we're compacting, in key, LSN order.
3121 0 : let mut prev: Option<(Key, Lsn)> = None;
3122 : for &DeltaEntry {
3123 0 : key, lsn, ref val, ..
3124 0 : } in all_entries.iter()
3125 : {
3126 0 : if prev == Some((key, lsn)) {
3127 : // This is a duplicate. Skip it.
3128 : //
3129 : // It can happen if compaction is interrupted after writing some
3130 : // layers but not all, and we are compacting the range again.
3131 : // The calculations in the algorithm assume that there are no
3132 : // duplicates, so the math on targeted file size is likely off,
3133 : // and we will create smaller files than expected.
3134 0 : dup_values += 1;
3135 0 : continue;
3136 0 : }
3137 :
3138 0 : let value = val.load(ctx).await?;
3139 :
3140 0 : writer.put_value(key, lsn, value, ctx).await?;
3141 :
3142 0 : prev = Some((key, lsn));
3143 : }
3144 :
3145 0 : if dup_values > 0 {
3146 0 : warn!("delta layer created with {} duplicate values", dup_values);
3147 0 : }
3148 :
3149 0 : fail_point!("delta-layer-writer-fail-before-finish", |_| {
3150 0 : Err(anyhow::anyhow!(
3151 0 : "failpoint delta-layer-writer-fail-before-finish"
3152 0 : ))
3153 0 : });
3154 :
3155 0 : let (desc, path) = writer.finish(prev.unwrap().0.next(), ctx).await?;
3156 0 : let new_delta_layer =
3157 0 : Layer::finish_creating(self.timeline.conf, &self.timeline, desc, &path)?;
3158 :
3159 0 : self.new_deltas.push(new_delta_layer);
3160 0 : Ok(())
3161 0 : }
3162 :
3163 0 : async fn delete_layer(
3164 0 : &mut self,
3165 0 : layer: &OwnArc<PersistentLayerDesc>,
3166 0 : _ctx: &RequestContext,
3167 0 : ) -> anyhow::Result<()> {
3168 0 : self.layers_to_delete.push(layer.clone().0);
3169 0 : Ok(())
3170 0 : }
3171 : }
3172 :
3173 : impl TimelineAdaptor {
3174 0 : async fn create_image_impl(
3175 0 : &mut self,
3176 0 : lsn: Lsn,
3177 0 : key_range: &Range<Key>,
3178 0 : ctx: &RequestContext,
3179 0 : ) -> Result<(), CreateImageLayersError> {
3180 0 : let timer = self.timeline.metrics.create_images_time_histo.start_timer();
3181 :
3182 0 : let image_layer_writer = ImageLayerWriter::new(
3183 0 : self.timeline.conf,
3184 0 : self.timeline.timeline_id,
3185 0 : self.timeline.tenant_shard_id,
3186 0 : key_range,
3187 0 : lsn,
3188 0 : ctx,
3189 0 : )
3190 0 : .await?;
3191 :
3192 0 : fail_point!("image-layer-writer-fail-before-finish", |_| {
3193 0 : Err(CreateImageLayersError::Other(anyhow::anyhow!(
3194 0 : "failpoint image-layer-writer-fail-before-finish"
3195 0 : )))
3196 0 : });
3197 :
3198 0 : let keyspace = KeySpace {
3199 0 : ranges: self.get_keyspace(key_range, lsn, ctx).await?,
3200 : };
3201 : // TODO set proper (stateful) start. The create_image_layer_for_rel_blocks function mostly
3202 0 : let start = Key::MIN;
3203 : let ImageLayerCreationOutcome {
3204 0 : unfinished_image_layer,
3205 : next_start_key: _,
3206 0 : } = self
3207 0 : .timeline
3208 0 : .create_image_layer_for_rel_blocks(
3209 0 : &keyspace,
3210 0 : image_layer_writer,
3211 0 : lsn,
3212 0 : ctx,
3213 0 : key_range.clone(),
3214 0 : start,
3215 0 : IoConcurrency::sequential(),
3216 0 : )
3217 0 : .await?;
3218 :
3219 0 : if let Some(image_layer_writer) = unfinished_image_layer {
3220 0 : let (desc, path) = image_layer_writer.finish(ctx).await?;
3221 0 : let image_layer =
3222 0 : Layer::finish_creating(self.timeline.conf, &self.timeline, desc, &path)?;
3223 0 : self.new_images.push(image_layer);
3224 0 : }
3225 :
3226 0 : timer.stop_and_record();
3227 0 :
3228 0 : Ok(())
3229 0 : }
3230 : }
3231 :
3232 : impl CompactionRequestContext for crate::context::RequestContext {}
3233 :
3234 : #[derive(Debug, Clone)]
3235 : pub struct OwnArc<T>(pub Arc<T>);
3236 :
3237 : impl<T> Deref for OwnArc<T> {
3238 : type Target = <Arc<T> as Deref>::Target;
3239 0 : fn deref(&self) -> &Self::Target {
3240 0 : &self.0
3241 0 : }
3242 : }
3243 :
3244 : impl<T> AsRef<T> for OwnArc<T> {
3245 0 : fn as_ref(&self) -> &T {
3246 0 : self.0.as_ref()
3247 0 : }
3248 : }
3249 :
3250 : impl CompactionLayer<Key> for OwnArc<PersistentLayerDesc> {
3251 0 : fn key_range(&self) -> &Range<Key> {
3252 0 : &self.key_range
3253 0 : }
3254 0 : fn lsn_range(&self) -> &Range<Lsn> {
3255 0 : &self.lsn_range
3256 0 : }
3257 0 : fn file_size(&self) -> u64 {
3258 0 : self.file_size
3259 0 : }
3260 0 : fn short_id(&self) -> std::string::String {
3261 0 : self.as_ref().short_id().to_string()
3262 0 : }
3263 0 : fn is_delta(&self) -> bool {
3264 0 : self.as_ref().is_delta()
3265 0 : }
3266 : }
3267 :
3268 : impl CompactionLayer<Key> for OwnArc<DeltaLayer> {
3269 0 : fn key_range(&self) -> &Range<Key> {
3270 0 : &self.layer_desc().key_range
3271 0 : }
3272 0 : fn lsn_range(&self) -> &Range<Lsn> {
3273 0 : &self.layer_desc().lsn_range
3274 0 : }
3275 0 : fn file_size(&self) -> u64 {
3276 0 : self.layer_desc().file_size
3277 0 : }
3278 0 : fn short_id(&self) -> std::string::String {
3279 0 : self.layer_desc().short_id().to_string()
3280 0 : }
3281 0 : fn is_delta(&self) -> bool {
3282 0 : true
3283 0 : }
3284 : }
3285 :
3286 : use crate::tenant::timeline::DeltaEntry;
3287 :
3288 : impl CompactionLayer<Key> for ResidentDeltaLayer {
3289 0 : fn key_range(&self) -> &Range<Key> {
3290 0 : &self.0.layer_desc().key_range
3291 0 : }
3292 0 : fn lsn_range(&self) -> &Range<Lsn> {
3293 0 : &self.0.layer_desc().lsn_range
3294 0 : }
3295 0 : fn file_size(&self) -> u64 {
3296 0 : self.0.layer_desc().file_size
3297 0 : }
3298 0 : fn short_id(&self) -> std::string::String {
3299 0 : self.0.layer_desc().short_id().to_string()
3300 0 : }
3301 0 : fn is_delta(&self) -> bool {
3302 0 : true
3303 0 : }
3304 : }
3305 :
3306 : impl CompactionDeltaLayer<TimelineAdaptor> for ResidentDeltaLayer {
3307 : type DeltaEntry<'a> = DeltaEntry<'a>;
3308 :
3309 0 : async fn load_keys(&self, ctx: &RequestContext) -> anyhow::Result<Vec<DeltaEntry<'_>>> {
3310 0 : self.0.get_as_delta(ctx).await?.index_entries(ctx).await
3311 0 : }
3312 : }
3313 :
3314 : impl CompactionLayer<Key> for ResidentImageLayer {
3315 0 : fn key_range(&self) -> &Range<Key> {
3316 0 : &self.0.layer_desc().key_range
3317 0 : }
3318 0 : fn lsn_range(&self) -> &Range<Lsn> {
3319 0 : &self.0.layer_desc().lsn_range
3320 0 : }
3321 0 : fn file_size(&self) -> u64 {
3322 0 : self.0.layer_desc().file_size
3323 0 : }
3324 0 : fn short_id(&self) -> std::string::String {
3325 0 : self.0.layer_desc().short_id().to_string()
3326 0 : }
3327 0 : fn is_delta(&self) -> bool {
3328 0 : false
3329 0 : }
3330 : }
3331 : impl CompactionImageLayer<TimelineAdaptor> for ResidentImageLayer {}
|