Line data Source code
1 : //! New compaction implementation. The algorithm itself is implemented in the
2 : //! compaction crate. This file implements the callbacks and structs that allow
3 : //! the algorithm to drive the process.
4 : //!
5 : //! The old legacy algorithm is implemented directly in `timeline.rs`.
6 :
7 : use std::collections::{BinaryHeap, HashMap, HashSet, VecDeque};
8 : use std::ops::{Deref, Range};
9 : use std::sync::Arc;
10 :
11 : use super::layer_manager::LayerManager;
12 : use super::{
13 : CompactFlags, CompactOptions, CreateImageLayersError, DurationRecorder, ImageLayerCreationMode,
14 : RecordedDuration, Timeline,
15 : };
16 :
17 : use anyhow::{anyhow, bail, Context};
18 : use bytes::Bytes;
19 : use enumset::EnumSet;
20 : use fail::fail_point;
21 : use itertools::Itertools;
22 : use pageserver_api::key::KEY_SIZE;
23 : use pageserver_api::keyspace::ShardedRange;
24 : use pageserver_api::models::CompactInfoResponse;
25 : use pageserver_api::shard::{ShardCount, ShardIdentity, TenantShardId};
26 : use serde::Serialize;
27 : use tokio_util::sync::CancellationToken;
28 : use tracing::{debug, info, info_span, trace, warn, Instrument};
29 : use utils::id::TimelineId;
30 :
31 : use crate::context::{AccessStatsBehavior, RequestContext, RequestContextBuilder};
32 : use crate::page_cache;
33 : use crate::statvfs::Statvfs;
34 : use crate::tenant::checks::check_valid_layermap;
35 : use crate::tenant::gc_block::GcBlock;
36 : use crate::tenant::remote_timeline_client::WaitCompletionError;
37 : use crate::tenant::storage_layer::batch_split_writer::{
38 : BatchWriterResult, SplitDeltaLayerWriter, SplitImageLayerWriter,
39 : };
40 : use crate::tenant::storage_layer::filter_iterator::FilterIterator;
41 : use crate::tenant::storage_layer::merge_iterator::MergeIterator;
42 : use crate::tenant::storage_layer::{
43 : AsLayerDesc, PersistentLayerDesc, PersistentLayerKey, ValueReconstructState,
44 : };
45 : use crate::tenant::timeline::{drop_rlock, DeltaLayerWriter, ImageLayerWriter};
46 : use crate::tenant::timeline::{ImageLayerCreationOutcome, IoConcurrency};
47 : use crate::tenant::timeline::{Layer, ResidentLayer};
48 : use crate::tenant::{gc_block, DeltaLayer, MaybeOffloaded};
49 : use crate::virtual_file::{MaybeFatalIo, VirtualFile};
50 : use pageserver_api::config::tenant_conf_defaults::{
51 : DEFAULT_CHECKPOINT_DISTANCE, DEFAULT_COMPACTION_THRESHOLD,
52 : };
53 :
54 : use pageserver_api::key::Key;
55 : use pageserver_api::keyspace::KeySpace;
56 : use pageserver_api::record::NeonWalRecord;
57 : use pageserver_api::value::Value;
58 :
59 : use utils::lsn::Lsn;
60 :
61 : use pageserver_compaction::helpers::{fully_contains, overlaps_with};
62 : use pageserver_compaction::interface::*;
63 :
64 : use super::CompactionError;
65 :
66 : /// Maximum number of deltas before generating an image layer in bottom-most compaction.
67 : const COMPACTION_DELTA_THRESHOLD: usize = 5;
68 :
69 : #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
70 : pub struct GcCompactionJobId(pub usize);
71 :
72 : impl std::fmt::Display for GcCompactionJobId {
73 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
74 0 : write!(f, "{}", self.0)
75 0 : }
76 : }
77 :
78 : #[derive(Debug, Clone)]
79 : pub enum GcCompactionQueueItem {
80 : Manual(CompactOptions),
81 : SubCompactionJob(CompactOptions),
82 : #[allow(dead_code)]
83 : UpdateL2Lsn(Lsn),
84 : Notify(GcCompactionJobId),
85 : }
86 :
87 : impl GcCompactionQueueItem {
88 0 : pub fn into_compact_info_resp(
89 0 : self,
90 0 : id: GcCompactionJobId,
91 0 : running: bool,
92 0 : ) -> Option<CompactInfoResponse> {
93 0 : match self {
94 0 : GcCompactionQueueItem::Manual(options) => Some(CompactInfoResponse {
95 0 : compact_key_range: options.compact_key_range,
96 0 : compact_lsn_range: options.compact_lsn_range,
97 0 : sub_compaction: options.sub_compaction,
98 0 : running,
99 0 : job_id: id.0,
100 0 : }),
101 0 : GcCompactionQueueItem::SubCompactionJob(options) => Some(CompactInfoResponse {
102 0 : compact_key_range: options.compact_key_range,
103 0 : compact_lsn_range: options.compact_lsn_range,
104 0 : sub_compaction: options.sub_compaction,
105 0 : running,
106 0 : job_id: id.0,
107 0 : }),
108 0 : GcCompactionQueueItem::UpdateL2Lsn(_) => None,
109 0 : GcCompactionQueueItem::Notify(_) => None,
110 : }
111 0 : }
112 : }
113 :
114 : struct GcCompactionQueueInner {
115 : running: Option<(GcCompactionJobId, GcCompactionQueueItem)>,
116 : queued: VecDeque<(GcCompactionJobId, GcCompactionQueueItem)>,
117 : notify: HashMap<GcCompactionJobId, tokio::sync::oneshot::Sender<()>>,
118 : gc_guards: HashMap<GcCompactionJobId, gc_block::Guard>,
119 : last_id: GcCompactionJobId,
120 : }
121 :
122 : impl GcCompactionQueueInner {
123 0 : fn next_id(&mut self) -> GcCompactionJobId {
124 0 : let id = self.last_id;
125 0 : self.last_id = GcCompactionJobId(id.0 + 1);
126 0 : id
127 0 : }
128 : }
129 :
130 : /// A structure to store gc_compaction jobs.
131 : pub struct GcCompactionQueue {
132 : /// All items in the queue, and the currently-running job.
133 : inner: std::sync::Mutex<GcCompactionQueueInner>,
134 : /// Ensure only one thread is consuming the queue.
135 : consumer_lock: tokio::sync::Mutex<()>,
136 : }
137 :
138 : impl GcCompactionQueue {
139 0 : pub fn new() -> Self {
140 0 : GcCompactionQueue {
141 0 : inner: std::sync::Mutex::new(GcCompactionQueueInner {
142 0 : running: None,
143 0 : queued: VecDeque::new(),
144 0 : notify: HashMap::new(),
145 0 : gc_guards: HashMap::new(),
146 0 : last_id: GcCompactionJobId(0),
147 0 : }),
148 0 : consumer_lock: tokio::sync::Mutex::new(()),
149 0 : }
150 0 : }
151 :
152 0 : pub fn cancel_scheduled(&self) {
153 0 : let mut guard = self.inner.lock().unwrap();
154 0 : guard.queued.clear();
155 0 : guard.notify.clear();
156 0 : guard.gc_guards.clear();
157 0 : }
158 :
159 : /// Schedule a manual compaction job.
160 0 : pub fn schedule_manual_compaction(
161 0 : &self,
162 0 : options: CompactOptions,
163 0 : notify: Option<tokio::sync::oneshot::Sender<()>>,
164 0 : ) -> GcCompactionJobId {
165 0 : let mut guard = self.inner.lock().unwrap();
166 0 : let id = guard.next_id();
167 0 : guard
168 0 : .queued
169 0 : .push_back((id, GcCompactionQueueItem::Manual(options)));
170 0 : if let Some(notify) = notify {
171 0 : guard.notify.insert(id, notify);
172 0 : }
173 0 : info!("scheduled compaction job id={}", id);
174 0 : id
175 0 : }
176 :
177 : /// Trigger an auto compaction.
178 : #[allow(dead_code)]
179 0 : pub fn trigger_auto_compaction(&self, _: &Arc<Timeline>) {}
180 :
181 : /// Notify the caller the job has finished and unblock GC.
182 0 : fn notify_and_unblock(&self, id: GcCompactionJobId) {
183 0 : info!("compaction job id={} finished", id);
184 0 : let mut guard = self.inner.lock().unwrap();
185 0 : if let Some(blocking) = guard.gc_guards.remove(&id) {
186 0 : drop(blocking)
187 0 : }
188 0 : if let Some(tx) = guard.notify.remove(&id) {
189 0 : let _ = tx.send(());
190 0 : }
191 0 : }
192 :
193 0 : async fn handle_sub_compaction(
194 0 : &self,
195 0 : id: GcCompactionJobId,
196 0 : options: CompactOptions,
197 0 : timeline: &Arc<Timeline>,
198 0 : gc_block: &GcBlock,
199 0 : ) -> Result<(), CompactionError> {
200 0 : info!("running scheduled enhanced gc bottom-most compaction with sub-compaction, splitting compaction jobs");
201 0 : let jobs: Vec<GcCompactJob> = timeline
202 0 : .gc_compaction_split_jobs(
203 0 : GcCompactJob::from_compact_options(options.clone()),
204 0 : options.sub_compaction_max_job_size_mb,
205 0 : )
206 0 : .await
207 0 : .map_err(CompactionError::Other)?;
208 0 : if jobs.is_empty() {
209 0 : info!("no jobs to run, skipping scheduled compaction task");
210 0 : self.notify_and_unblock(id);
211 : } else {
212 0 : let gc_guard = match gc_block.start().await {
213 0 : Ok(guard) => guard,
214 0 : Err(e) => {
215 0 : return Err(CompactionError::Other(anyhow!(
216 0 : "cannot run gc-compaction because gc is blocked: {}",
217 0 : e
218 0 : )));
219 : }
220 : };
221 :
222 0 : let jobs_len = jobs.len();
223 0 : let mut pending_tasks = Vec::new();
224 0 : for job in jobs {
225 : // Unfortunately we need to convert the `GcCompactJob` back to `CompactionOptions`
226 : // until we do further refactors to allow directly call `compact_with_gc`.
227 0 : let mut flags: EnumSet<CompactFlags> = EnumSet::default();
228 0 : flags |= CompactFlags::EnhancedGcBottomMostCompaction;
229 0 : if job.dry_run {
230 0 : flags |= CompactFlags::DryRun;
231 0 : }
232 0 : let options = CompactOptions {
233 0 : flags,
234 0 : sub_compaction: false,
235 0 : compact_key_range: Some(job.compact_key_range.into()),
236 0 : compact_lsn_range: Some(job.compact_lsn_range.into()),
237 0 : sub_compaction_max_job_size_mb: None,
238 0 : };
239 0 : pending_tasks.push(GcCompactionQueueItem::SubCompactionJob(options));
240 : }
241 0 : pending_tasks.push(GcCompactionQueueItem::Notify(id));
242 0 : {
243 0 : let mut guard = self.inner.lock().unwrap();
244 0 : guard.gc_guards.insert(id, gc_guard);
245 0 : let mut tasks = Vec::new();
246 0 : for task in pending_tasks {
247 0 : let id = guard.next_id();
248 0 : tasks.push((id, task));
249 0 : }
250 0 : tasks.reverse();
251 0 : for item in tasks {
252 0 : guard.queued.push_front(item);
253 0 : }
254 : }
255 0 : info!("scheduled enhanced gc bottom-most compaction with sub-compaction, split into {} jobs", jobs_len);
256 : }
257 0 : Ok(())
258 0 : }
259 :
260 : /// Take a job from the queue and process it. Returns if there are still pending tasks.
261 0 : pub async fn iteration(
262 0 : &self,
263 0 : cancel: &CancellationToken,
264 0 : ctx: &RequestContext,
265 0 : gc_block: &GcBlock,
266 0 : timeline: &Arc<Timeline>,
267 0 : ) -> Result<bool, CompactionError> {
268 0 : let _one_op_at_a_time_guard = self.consumer_lock.lock().await;
269 : let has_pending_tasks;
270 0 : let (id, item) = {
271 0 : let mut guard = self.inner.lock().unwrap();
272 0 : let Some((id, item)) = guard.queued.pop_front() else {
273 0 : return Ok(false);
274 : };
275 0 : guard.running = Some((id, item.clone()));
276 0 : has_pending_tasks = !guard.queued.is_empty();
277 0 : (id, item)
278 0 : };
279 0 :
280 0 : match item {
281 0 : GcCompactionQueueItem::Manual(options) => {
282 0 : if !options
283 0 : .flags
284 0 : .contains(CompactFlags::EnhancedGcBottomMostCompaction)
285 : {
286 0 : warn!("ignoring scheduled compaction task: scheduled task must be gc compaction: {:?}", options);
287 0 : } else if options.sub_compaction {
288 0 : self.handle_sub_compaction(id, options, timeline, gc_block)
289 0 : .await?;
290 : } else {
291 0 : let gc_guard = match gc_block.start().await {
292 0 : Ok(guard) => guard,
293 0 : Err(e) => {
294 0 : return Err(CompactionError::Other(anyhow!(
295 0 : "cannot run gc-compaction because gc is blocked: {}",
296 0 : e
297 0 : )));
298 : }
299 : };
300 0 : {
301 0 : let mut guard = self.inner.lock().unwrap();
302 0 : guard.gc_guards.insert(id, gc_guard);
303 0 : }
304 0 : let _ = timeline
305 0 : .compact_with_options(cancel, options, ctx)
306 0 : .instrument(info_span!("scheduled_compact_timeline", %timeline.timeline_id))
307 0 : .await?;
308 0 : self.notify_and_unblock(id);
309 : }
310 : }
311 0 : GcCompactionQueueItem::SubCompactionJob(options) => {
312 0 : let _ = timeline
313 0 : .compact_with_options(cancel, options, ctx)
314 0 : .instrument(info_span!("scheduled_compact_timeline", %timeline.timeline_id))
315 0 : .await?;
316 : }
317 0 : GcCompactionQueueItem::Notify(id) => {
318 0 : self.notify_and_unblock(id);
319 0 : }
320 : GcCompactionQueueItem::UpdateL2Lsn(_) => {
321 0 : unreachable!()
322 : }
323 : }
324 0 : {
325 0 : let mut guard = self.inner.lock().unwrap();
326 0 : guard.running = None;
327 0 : }
328 0 : Ok(has_pending_tasks)
329 0 : }
330 :
331 : #[allow(clippy::type_complexity)]
332 0 : pub fn remaining_jobs(
333 0 : &self,
334 0 : ) -> (
335 0 : Option<(GcCompactionJobId, GcCompactionQueueItem)>,
336 0 : VecDeque<(GcCompactionJobId, GcCompactionQueueItem)>,
337 0 : ) {
338 0 : let guard = self.inner.lock().unwrap();
339 0 : (guard.running.clone(), guard.queued.clone())
340 0 : }
341 :
342 : #[allow(dead_code)]
343 0 : pub fn remaining_jobs_num(&self) -> usize {
344 0 : let guard = self.inner.lock().unwrap();
345 0 : guard.queued.len() + if guard.running.is_some() { 1 } else { 0 }
346 0 : }
347 : }
348 :
349 : /// A job description for the gc-compaction job. This structure describes the rectangle range that the job will
350 : /// process. The exact layers that need to be compacted/rewritten will be generated when `compact_with_gc` gets
351 : /// called.
352 : #[derive(Debug, Clone)]
353 : pub(crate) struct GcCompactJob {
354 : pub dry_run: bool,
355 : /// The key range to be compacted. The compaction algorithm will only regenerate key-value pairs within this range
356 : /// [left inclusive, right exclusive), and other pairs will be rewritten into new files if necessary.
357 : pub compact_key_range: Range<Key>,
358 : /// The LSN range to be compacted. The compaction algorithm will use this range to determine the layers to be
359 : /// selected for the compaction, and it does not guarantee the generated layers will have exactly the same LSN range
360 : /// as specified here. The true range being compacted is `min_lsn/max_lsn` in [`GcCompactionJobDescription`].
361 : /// min_lsn will always <= the lower bound specified here, and max_lsn will always >= the upper bound specified here.
362 : pub compact_lsn_range: Range<Lsn>,
363 : }
364 :
365 : impl GcCompactJob {
366 108 : pub fn from_compact_options(options: CompactOptions) -> Self {
367 108 : GcCompactJob {
368 108 : dry_run: options.flags.contains(CompactFlags::DryRun),
369 108 : compact_key_range: options
370 108 : .compact_key_range
371 108 : .map(|x| x.into())
372 108 : .unwrap_or(Key::MIN..Key::MAX),
373 108 : compact_lsn_range: options
374 108 : .compact_lsn_range
375 108 : .map(|x| x.into())
376 108 : .unwrap_or(Lsn::INVALID..Lsn::MAX),
377 108 : }
378 108 : }
379 : }
380 :
381 : /// A job description for the gc-compaction job. This structure is generated when `compact_with_gc` is called
382 : /// and contains the exact layers we want to compact.
383 : pub struct GcCompactionJobDescription {
384 : /// All layers to read in the compaction job
385 : selected_layers: Vec<Layer>,
386 : /// GC cutoff of the job. This is the lowest LSN that will be accessed by the read/GC path and we need to
387 : /// keep all deltas <= this LSN or generate an image == this LSN.
388 : gc_cutoff: Lsn,
389 : /// LSNs to retain for the job. Read path will use this LSN so we need to keep deltas <= this LSN or
390 : /// generate an image == this LSN.
391 : retain_lsns_below_horizon: Vec<Lsn>,
392 : /// Maximum layer LSN processed in this compaction, that is max(end_lsn of layers). Exclusive. All data
393 : /// \>= this LSN will be kept and will not be rewritten.
394 : max_layer_lsn: Lsn,
395 : /// Minimum layer LSN processed in this compaction, that is min(start_lsn of layers). Inclusive.
396 : /// All access below (strict lower than `<`) this LSN will be routed through the normal read path instead of
397 : /// k-merge within gc-compaction.
398 : min_layer_lsn: Lsn,
399 : /// Only compact layers overlapping with this range.
400 : compaction_key_range: Range<Key>,
401 : /// When partial compaction is enabled, these layers need to be rewritten to ensure no overlap.
402 : /// This field is here solely for debugging. The field will not be read once the compaction
403 : /// description is generated.
404 : rewrite_layers: Vec<Arc<PersistentLayerDesc>>,
405 : }
406 :
407 : /// The result of bottom-most compaction for a single key at each LSN.
408 : #[derive(Debug)]
409 : #[cfg_attr(test, derive(PartialEq))]
410 : pub struct KeyLogAtLsn(pub Vec<(Lsn, Value)>);
411 :
412 : /// The result of bottom-most compaction.
413 : #[derive(Debug)]
414 : #[cfg_attr(test, derive(PartialEq))]
415 : pub(crate) struct KeyHistoryRetention {
416 : /// Stores logs to reconstruct the value at the given LSN, that is to say, logs <= LSN or image == LSN.
417 : pub(crate) below_horizon: Vec<(Lsn, KeyLogAtLsn)>,
418 : /// Stores logs to reconstruct the value at any LSN above the horizon, that is to say, log > LSN.
419 : pub(crate) above_horizon: KeyLogAtLsn,
420 : }
421 :
422 : impl KeyHistoryRetention {
423 : /// Hack: skip delta layer if we need to produce a layer of a same key-lsn.
424 : ///
425 : /// This can happen if we have removed some deltas in "the middle" of some existing layer's key-lsn-range.
426 : /// For example, consider the case where a single delta with range [0x10,0x50) exists.
427 : /// And we have branches at LSN 0x10, 0x20, 0x30.
428 : /// Then we delete branch @ 0x20.
429 : /// Bottom-most compaction may now delete the delta [0x20,0x30).
430 : /// And that wouldnt' change the shape of the layer.
431 : ///
432 : /// Note that bottom-most-gc-compaction never _adds_ new data in that case, only removes.
433 : ///
434 : /// `discard_key` will only be called when the writer reaches its target (instead of for every key), so it's fine to grab a lock inside.
435 148 : async fn discard_key(key: &PersistentLayerKey, tline: &Arc<Timeline>, dry_run: bool) -> bool {
436 148 : if dry_run {
437 0 : return true;
438 148 : }
439 : let layer_generation;
440 : {
441 148 : let guard = tline.layers.read().await;
442 148 : if !guard.contains_key(key) {
443 104 : return false;
444 44 : }
445 44 : layer_generation = guard.get_from_key(key).metadata().generation;
446 44 : }
447 44 : if layer_generation == tline.generation {
448 44 : info!(
449 : key=%key,
450 : ?layer_generation,
451 0 : "discard layer due to duplicated layer key in the same generation",
452 : );
453 44 : true
454 : } else {
455 0 : false
456 : }
457 148 : }
458 :
459 : /// Pipe a history of a single key to the writers.
460 : ///
461 : /// If `image_writer` is none, the images will be placed into the delta layers.
462 : /// The delta writer will contain all images and deltas (below and above the horizon) except the bottom-most images.
463 : #[allow(clippy::too_many_arguments)]
464 1244 : async fn pipe_to(
465 1244 : self,
466 1244 : key: Key,
467 1244 : delta_writer: &mut SplitDeltaLayerWriter,
468 1244 : mut image_writer: Option<&mut SplitImageLayerWriter>,
469 1244 : stat: &mut CompactionStatistics,
470 1244 : ctx: &RequestContext,
471 1244 : ) -> anyhow::Result<()> {
472 1244 : let mut first_batch = true;
473 4024 : for (cutoff_lsn, KeyLogAtLsn(logs)) in self.below_horizon {
474 2780 : if first_batch {
475 1244 : if logs.len() == 1 && logs[0].1.is_image() {
476 1168 : let Value::Image(img) = &logs[0].1 else {
477 0 : unreachable!()
478 : };
479 1168 : stat.produce_image_key(img);
480 1168 : if let Some(image_writer) = image_writer.as_mut() {
481 1168 : image_writer.put_image(key, img.clone(), ctx).await?;
482 : } else {
483 0 : delta_writer
484 0 : .put_value(key, cutoff_lsn, Value::Image(img.clone()), ctx)
485 0 : .await?;
486 : }
487 : } else {
488 132 : for (lsn, val) in logs {
489 56 : stat.produce_key(&val);
490 56 : delta_writer.put_value(key, lsn, val, ctx).await?;
491 : }
492 : }
493 1244 : first_batch = false;
494 : } else {
495 1768 : for (lsn, val) in logs {
496 232 : stat.produce_key(&val);
497 232 : delta_writer.put_value(key, lsn, val, ctx).await?;
498 : }
499 : }
500 : }
501 1244 : let KeyLogAtLsn(above_horizon_logs) = self.above_horizon;
502 1360 : for (lsn, val) in above_horizon_logs {
503 116 : stat.produce_key(&val);
504 116 : delta_writer.put_value(key, lsn, val, ctx).await?;
505 : }
506 1244 : Ok(())
507 1244 : }
508 : }
509 :
510 : #[derive(Debug, Serialize, Default)]
511 : struct CompactionStatisticsNumSize {
512 : num: u64,
513 : size: u64,
514 : }
515 :
516 : #[derive(Debug, Serialize, Default)]
517 : pub struct CompactionStatistics {
518 : delta_layer_visited: CompactionStatisticsNumSize,
519 : image_layer_visited: CompactionStatisticsNumSize,
520 : delta_layer_produced: CompactionStatisticsNumSize,
521 : image_layer_produced: CompactionStatisticsNumSize,
522 : num_delta_layer_discarded: usize,
523 : num_image_layer_discarded: usize,
524 : num_unique_keys_visited: usize,
525 : wal_keys_visited: CompactionStatisticsNumSize,
526 : image_keys_visited: CompactionStatisticsNumSize,
527 : wal_produced: CompactionStatisticsNumSize,
528 : image_produced: CompactionStatisticsNumSize,
529 : }
530 :
531 : impl CompactionStatistics {
532 2084 : fn estimated_size_of_value(val: &Value) -> usize {
533 864 : match val {
534 1220 : Value::Image(img) => img.len(),
535 0 : Value::WalRecord(NeonWalRecord::Postgres { rec, .. }) => rec.len(),
536 864 : _ => std::mem::size_of::<NeonWalRecord>(),
537 : }
538 2084 : }
539 3272 : fn estimated_size_of_key() -> usize {
540 3272 : KEY_SIZE // TODO: distinguish image layer and delta layer (count LSN in delta layer)
541 3272 : }
542 172 : fn visit_delta_layer(&mut self, size: u64) {
543 172 : self.delta_layer_visited.num += 1;
544 172 : self.delta_layer_visited.size += size;
545 172 : }
546 132 : fn visit_image_layer(&mut self, size: u64) {
547 132 : self.image_layer_visited.num += 1;
548 132 : self.image_layer_visited.size += size;
549 132 : }
550 1244 : fn on_unique_key_visited(&mut self) {
551 1244 : self.num_unique_keys_visited += 1;
552 1244 : }
553 480 : fn visit_wal_key(&mut self, val: &Value) {
554 480 : self.wal_keys_visited.num += 1;
555 480 : self.wal_keys_visited.size +=
556 480 : Self::estimated_size_of_value(val) as u64 + Self::estimated_size_of_key() as u64;
557 480 : }
558 1220 : fn visit_image_key(&mut self, val: &Value) {
559 1220 : self.image_keys_visited.num += 1;
560 1220 : self.image_keys_visited.size +=
561 1220 : Self::estimated_size_of_value(val) as u64 + Self::estimated_size_of_key() as u64;
562 1220 : }
563 404 : fn produce_key(&mut self, val: &Value) {
564 404 : match val {
565 20 : Value::Image(img) => self.produce_image_key(img),
566 384 : Value::WalRecord(_) => self.produce_wal_key(val),
567 : }
568 404 : }
569 384 : fn produce_wal_key(&mut self, val: &Value) {
570 384 : self.wal_produced.num += 1;
571 384 : self.wal_produced.size +=
572 384 : Self::estimated_size_of_value(val) as u64 + Self::estimated_size_of_key() as u64;
573 384 : }
574 1188 : fn produce_image_key(&mut self, val: &Bytes) {
575 1188 : self.image_produced.num += 1;
576 1188 : self.image_produced.size += val.len() as u64 + Self::estimated_size_of_key() as u64;
577 1188 : }
578 28 : fn discard_delta_layer(&mut self) {
579 28 : self.num_delta_layer_discarded += 1;
580 28 : }
581 16 : fn discard_image_layer(&mut self) {
582 16 : self.num_image_layer_discarded += 1;
583 16 : }
584 44 : fn produce_delta_layer(&mut self, size: u64) {
585 44 : self.delta_layer_produced.num += 1;
586 44 : self.delta_layer_produced.size += size;
587 44 : }
588 60 : fn produce_image_layer(&mut self, size: u64) {
589 60 : self.image_layer_produced.num += 1;
590 60 : self.image_layer_produced.size += size;
591 60 : }
592 : }
593 :
594 : impl Timeline {
595 : /// TODO: cancellation
596 : ///
597 : /// Returns whether the compaction has pending tasks.
598 728 : pub(crate) async fn compact_legacy(
599 728 : self: &Arc<Self>,
600 728 : cancel: &CancellationToken,
601 728 : options: CompactOptions,
602 728 : ctx: &RequestContext,
603 728 : ) -> Result<bool, CompactionError> {
604 728 : if options
605 728 : .flags
606 728 : .contains(CompactFlags::EnhancedGcBottomMostCompaction)
607 : {
608 0 : self.compact_with_gc(cancel, options, ctx)
609 0 : .await
610 0 : .map_err(CompactionError::Other)?;
611 0 : return Ok(false);
612 728 : }
613 728 :
614 728 : if options.flags.contains(CompactFlags::DryRun) {
615 0 : return Err(CompactionError::Other(anyhow!(
616 0 : "dry-run mode is not supported for legacy compaction for now"
617 0 : )));
618 728 : }
619 728 :
620 728 : if options.compact_key_range.is_some() || options.compact_lsn_range.is_some() {
621 : // maybe useful in the future? could implement this at some point
622 0 : return Err(CompactionError::Other(anyhow!(
623 0 : "compaction range is not supported for legacy compaction for now"
624 0 : )));
625 728 : }
626 728 :
627 728 : // High level strategy for compaction / image creation:
628 728 : //
629 728 : // 1. First, calculate the desired "partitioning" of the
630 728 : // currently in-use key space. The goal is to partition the
631 728 : // key space into roughly fixed-size chunks, but also take into
632 728 : // account any existing image layers, and try to align the
633 728 : // chunk boundaries with the existing image layers to avoid
634 728 : // too much churn. Also try to align chunk boundaries with
635 728 : // relation boundaries. In principle, we don't know about
636 728 : // relation boundaries here, we just deal with key-value
637 728 : // pairs, and the code in pgdatadir_mapping.rs knows how to
638 728 : // map relations into key-value pairs. But in practice we know
639 728 : // that 'field6' is the block number, and the fields 1-5
640 728 : // identify a relation. This is just an optimization,
641 728 : // though.
642 728 : //
643 728 : // 2. Once we know the partitioning, for each partition,
644 728 : // decide if it's time to create a new image layer. The
645 728 : // criteria is: there has been too much "churn" since the last
646 728 : // image layer? The "churn" is fuzzy concept, it's a
647 728 : // combination of too many delta files, or too much WAL in
648 728 : // total in the delta file. Or perhaps: if creating an image
649 728 : // file would allow to delete some older files.
650 728 : //
651 728 : // 3. After that, we compact all level0 delta files if there
652 728 : // are too many of them. While compacting, we also garbage
653 728 : // collect any page versions that are no longer needed because
654 728 : // of the new image layers we created in step 2.
655 728 : //
656 728 : // TODO: This high level strategy hasn't been implemented yet.
657 728 : // Below are functions compact_level0() and create_image_layers()
658 728 : // but they are a bit ad hoc and don't quite work like it's explained
659 728 : // above. Rewrite it.
660 728 :
661 728 : // Is the timeline being deleted?
662 728 : if self.is_stopping() {
663 0 : trace!("Dropping out of compaction on timeline shutdown");
664 0 : return Err(CompactionError::ShuttingDown);
665 728 : }
666 728 :
667 728 : let target_file_size = self.get_checkpoint_distance();
668 :
669 : // Define partitioning schema if needed
670 :
671 : // FIXME: the match should only cover repartitioning, not the next steps
672 728 : let (partition_count, has_pending_tasks) = match self
673 728 : .repartition(
674 728 : self.get_last_record_lsn(),
675 728 : self.get_compaction_target_size(),
676 728 : options.flags,
677 728 : ctx,
678 728 : )
679 728 : .await
680 : {
681 728 : Ok(((dense_partitioning, sparse_partitioning), lsn)) => {
682 728 : // Disables access_stats updates, so that the files we read remain candidates for eviction after we're done with them
683 728 : let image_ctx = RequestContextBuilder::extend(ctx)
684 728 : .access_stats_behavior(AccessStatsBehavior::Skip)
685 728 : .build();
686 728 :
687 728 : // 2. Compact
688 728 : let timer = self.metrics.compact_time_histo.start_timer();
689 728 : let fully_compacted = self
690 728 : .compact_level0(
691 728 : target_file_size,
692 728 : options.flags.contains(CompactFlags::ForceL0Compaction),
693 728 : ctx,
694 728 : )
695 728 : .await?;
696 728 : timer.stop_and_record();
697 728 :
698 728 : let mut partitioning = dense_partitioning;
699 728 : partitioning
700 728 : .parts
701 728 : .extend(sparse_partitioning.into_dense().parts);
702 728 :
703 728 : // 3. Create new image layers for partitions that have been modified
704 728 : // "enough". Skip image layer creation if L0 compaction cannot keep up.
705 728 : if fully_compacted {
706 728 : let image_layers = self
707 728 : .create_image_layers(
708 728 : &partitioning,
709 728 : lsn,
710 728 : if options
711 728 : .flags
712 728 : .contains(CompactFlags::ForceImageLayerCreation)
713 : {
714 28 : ImageLayerCreationMode::Force
715 : } else {
716 700 : ImageLayerCreationMode::Try
717 : },
718 728 : &image_ctx,
719 728 : )
720 728 : .await?;
721 :
722 728 : self.upload_new_image_layers(image_layers)?;
723 : } else {
724 0 : info!("skipping image layer generation due to L0 compaction did not include all layers.");
725 : }
726 728 : (partitioning.parts.len(), !fully_compacted)
727 : }
728 0 : Err(err) => {
729 0 : // no partitioning? This is normal, if the timeline was just created
730 0 : // as an empty timeline. Also in unit tests, when we use the timeline
731 0 : // as a simple key-value store, ignoring the datadir layout. Log the
732 0 : // error but continue.
733 0 : //
734 0 : // Suppress error when it's due to cancellation
735 0 : if !self.cancel.is_cancelled() && !err.is_cancelled() {
736 0 : tracing::error!("could not compact, repartitioning keyspace failed: {err:?}");
737 0 : }
738 0 : (1, false)
739 : }
740 : };
741 :
742 728 : if self.shard_identity.count >= ShardCount::new(2) {
743 : // Limit the number of layer rewrites to the number of partitions: this means its
744 : // runtime should be comparable to a full round of image layer creations, rather than
745 : // being potentially much longer.
746 0 : let rewrite_max = partition_count;
747 0 :
748 0 : self.compact_shard_ancestors(rewrite_max, ctx).await?;
749 728 : }
750 :
751 728 : Ok(has_pending_tasks)
752 728 : }
753 :
754 : /// Check for layers that are elegible to be rewritten:
755 : /// - Shard splitting: After a shard split, ancestor layers beyond pitr_interval, so that
756 : /// we don't indefinitely retain keys in this shard that aren't needed.
757 : /// - For future use: layers beyond pitr_interval that are in formats we would
758 : /// rather not maintain compatibility with indefinitely.
759 : ///
760 : /// Note: this phase may read and write many gigabytes of data: use rewrite_max to bound
761 : /// how much work it will try to do in each compaction pass.
762 0 : async fn compact_shard_ancestors(
763 0 : self: &Arc<Self>,
764 0 : rewrite_max: usize,
765 0 : ctx: &RequestContext,
766 0 : ) -> Result<(), CompactionError> {
767 0 : let mut drop_layers = Vec::new();
768 0 : let mut layers_to_rewrite: Vec<Layer> = Vec::new();
769 0 :
770 0 : // We will use the Lsn cutoff of the last GC as a threshold for rewriting layers: if a
771 0 : // layer is behind this Lsn, it indicates that the layer is being retained beyond the
772 0 : // pitr_interval, for example because a branchpoint references it.
773 0 : //
774 0 : // Holding this read guard also blocks [`Self::gc_timeline`] from entering while we
775 0 : // are rewriting layers.
776 0 : let latest_gc_cutoff = self.get_latest_gc_cutoff_lsn();
777 0 :
778 0 : tracing::info!(
779 0 : "latest_gc_cutoff: {}, pitr cutoff {}",
780 0 : *latest_gc_cutoff,
781 0 : self.gc_info.read().unwrap().cutoffs.time
782 : );
783 :
784 0 : let layers = self.layers.read().await;
785 0 : for layer_desc in layers.layer_map()?.iter_historic_layers() {
786 0 : let layer = layers.get_from_desc(&layer_desc);
787 0 : if layer.metadata().shard.shard_count == self.shard_identity.count {
788 : // This layer does not belong to a historic ancestor, no need to re-image it.
789 0 : continue;
790 0 : }
791 0 :
792 0 : // This layer was created on an ancestor shard: check if it contains any data for this shard.
793 0 : let sharded_range = ShardedRange::new(layer_desc.get_key_range(), &self.shard_identity);
794 0 : let layer_local_page_count = sharded_range.page_count();
795 0 : let layer_raw_page_count = ShardedRange::raw_size(&layer_desc.get_key_range());
796 0 : if layer_local_page_count == 0 {
797 : // This ancestral layer only covers keys that belong to other shards.
798 : // We include the full metadata in the log: if we had some critical bug that caused
799 : // us to incorrectly drop layers, this would simplify manually debugging + reinstating those layers.
800 0 : info!(%layer, old_metadata=?layer.metadata(),
801 0 : "dropping layer after shard split, contains no keys for this shard.",
802 : );
803 :
804 0 : if cfg!(debug_assertions) {
805 : // Expensive, exhaustive check of keys in this layer: this guards against ShardedRange's calculations being
806 : // wrong. If ShardedRange claims the local page count is zero, then no keys in this layer
807 : // should be !is_key_disposable()
808 0 : let range = layer_desc.get_key_range();
809 0 : let mut key = range.start;
810 0 : while key < range.end {
811 0 : debug_assert!(self.shard_identity.is_key_disposable(&key));
812 0 : key = key.next();
813 : }
814 0 : }
815 :
816 0 : drop_layers.push(layer);
817 0 : continue;
818 0 : } else if layer_local_page_count != u32::MAX
819 0 : && layer_local_page_count == layer_raw_page_count
820 : {
821 0 : debug!(%layer,
822 0 : "layer is entirely shard local ({} keys), no need to filter it",
823 : layer_local_page_count
824 : );
825 0 : continue;
826 0 : }
827 0 :
828 0 : // Don't bother re-writing a layer unless it will at least halve its size
829 0 : if layer_local_page_count != u32::MAX
830 0 : && layer_local_page_count > layer_raw_page_count / 2
831 : {
832 0 : debug!(%layer,
833 0 : "layer is already mostly local ({}/{}), not rewriting",
834 : layer_local_page_count,
835 : layer_raw_page_count
836 : );
837 0 : }
838 :
839 : // Don't bother re-writing a layer if it is within the PITR window: it will age-out eventually
840 : // without incurring the I/O cost of a rewrite.
841 0 : if layer_desc.get_lsn_range().end >= *latest_gc_cutoff {
842 0 : debug!(%layer, "Skipping rewrite of layer still in GC window ({} >= {})",
843 0 : layer_desc.get_lsn_range().end, *latest_gc_cutoff);
844 0 : continue;
845 0 : }
846 0 :
847 0 : if layer_desc.is_delta() {
848 : // We do not yet implement rewrite of delta layers
849 0 : debug!(%layer, "Skipping rewrite of delta layer");
850 0 : continue;
851 0 : }
852 0 :
853 0 : // Only rewrite layers if their generations differ. This guarantees:
854 0 : // - that local rewrite is safe, as local layer paths will differ between existing layer and rewritten one
855 0 : // - that the layer is persistent in remote storage, as we only see old-generation'd layer via loading from remote storage
856 0 : if layer.metadata().generation == self.generation {
857 0 : debug!(%layer, "Skipping rewrite, is not from old generation");
858 0 : continue;
859 0 : }
860 0 :
861 0 : if layers_to_rewrite.len() >= rewrite_max {
862 0 : tracing::info!(%layer, "Will rewrite layer on a future compaction, already rewrote {}",
863 0 : layers_to_rewrite.len()
864 : );
865 0 : continue;
866 0 : }
867 0 :
868 0 : // Fall through: all our conditions for doing a rewrite passed.
869 0 : layers_to_rewrite.push(layer);
870 : }
871 :
872 : // Drop read lock on layer map before we start doing time-consuming I/O
873 0 : drop(layers);
874 0 :
875 0 : let mut replace_image_layers = Vec::new();
876 :
877 0 : for layer in layers_to_rewrite {
878 0 : tracing::info!(layer=%layer, "Rewriting layer after shard split...");
879 0 : let mut image_layer_writer = ImageLayerWriter::new(
880 0 : self.conf,
881 0 : self.timeline_id,
882 0 : self.tenant_shard_id,
883 0 : &layer.layer_desc().key_range,
884 0 : layer.layer_desc().image_layer_lsn(),
885 0 : ctx,
886 0 : )
887 0 : .await
888 0 : .map_err(CompactionError::Other)?;
889 :
890 : // Safety of layer rewrites:
891 : // - We are writing to a different local file path than we are reading from, so the old Layer
892 : // cannot interfere with the new one.
893 : // - In the page cache, contents for a particular VirtualFile are stored with a file_id that
894 : // is different for two layers with the same name (in `ImageLayerInner::new` we always
895 : // acquire a fresh id from [`crate::page_cache::next_file_id`]. So readers do not risk
896 : // reading the index from one layer file, and then data blocks from the rewritten layer file.
897 : // - Any readers that have a reference to the old layer will keep it alive until they are done
898 : // with it. If they are trying to promote from remote storage, that will fail, but this is the same
899 : // as for compaction generally: compaction is allowed to delete layers that readers might be trying to use.
900 : // - We do not run concurrently with other kinds of compaction, so the only layer map writes we race with are:
901 : // - GC, which at worst witnesses us "undelete" a layer that they just deleted.
902 : // - ingestion, which only inserts layers, therefore cannot collide with us.
903 0 : let resident = layer.download_and_keep_resident().await?;
904 :
905 0 : let keys_written = resident
906 0 : .filter(&self.shard_identity, &mut image_layer_writer, ctx)
907 0 : .await?;
908 :
909 0 : if keys_written > 0 {
910 0 : let (desc, path) = image_layer_writer
911 0 : .finish(ctx)
912 0 : .await
913 0 : .map_err(CompactionError::Other)?;
914 0 : let new_layer = Layer::finish_creating(self.conf, self, desc, &path)
915 0 : .map_err(CompactionError::Other)?;
916 0 : tracing::info!(layer=%new_layer, "Rewrote layer, {} -> {} bytes",
917 0 : layer.metadata().file_size,
918 0 : new_layer.metadata().file_size);
919 :
920 0 : replace_image_layers.push((layer, new_layer));
921 0 : } else {
922 0 : // Drop the old layer. Usually for this case we would already have noticed that
923 0 : // the layer has no data for us with the ShardedRange check above, but
924 0 : drop_layers.push(layer);
925 0 : }
926 : }
927 :
928 : // At this point, we have replaced local layer files with their rewritten form, but not yet uploaded
929 : // metadata to reflect that. If we restart here, the replaced layer files will look invalid (size mismatch
930 : // to remote index) and be removed. This is inefficient but safe.
931 0 : fail::fail_point!("compact-shard-ancestors-localonly");
932 0 :
933 0 : // Update the LayerMap so that readers will use the new layers, and enqueue it for writing to remote storage
934 0 : self.rewrite_layers(replace_image_layers, drop_layers)
935 0 : .await?;
936 :
937 0 : fail::fail_point!("compact-shard-ancestors-enqueued");
938 0 :
939 0 : // We wait for all uploads to complete before finishing this compaction stage. This is not
940 0 : // necessary for correctness, but it simplifies testing, and avoids proceeding with another
941 0 : // Timeline's compaction while this timeline's uploads may be generating lots of disk I/O
942 0 : // load.
943 0 : match self.remote_client.wait_completion().await {
944 0 : Ok(()) => (),
945 0 : Err(WaitCompletionError::NotInitialized(ni)) => return Err(CompactionError::from(ni)),
946 : Err(WaitCompletionError::UploadQueueShutDownOrStopped) => {
947 0 : return Err(CompactionError::ShuttingDown)
948 : }
949 : }
950 :
951 0 : fail::fail_point!("compact-shard-ancestors-persistent");
952 0 :
953 0 : Ok(())
954 0 : }
955 :
956 : /// Update the LayerVisibilityHint of layers covered by image layers, based on whether there is
957 : /// an image layer between them and the most recent readable LSN (branch point or tip of timeline). The
958 : /// purpose of the visibility hint is to record which layers need to be available to service reads.
959 : ///
960 : /// The result may be used as an input to eviction and secondary downloads to de-prioritize layers
961 : /// that we know won't be needed for reads.
962 452 : pub(super) async fn update_layer_visibility(
963 452 : &self,
964 452 : ) -> Result<(), super::layer_manager::Shutdown> {
965 452 : let head_lsn = self.get_last_record_lsn();
966 :
967 : // We will sweep through layers in reverse-LSN order. We only do historic layers. L0 deltas
968 : // are implicitly left visible, because LayerVisibilityHint's default is Visible, and we never modify it here.
969 : // Note that L0 deltas _can_ be covered by image layers, but we consider them 'visible' because we anticipate that
970 : // they will be subject to L0->L1 compaction in the near future.
971 452 : let layer_manager = self.layers.read().await;
972 452 : let layer_map = layer_manager.layer_map()?;
973 :
974 452 : let readable_points = {
975 452 : let children = self.gc_info.read().unwrap().retain_lsns.clone();
976 452 :
977 452 : let mut readable_points = Vec::with_capacity(children.len() + 1);
978 452 : for (child_lsn, _child_timeline_id, is_offloaded) in &children {
979 0 : if *is_offloaded == MaybeOffloaded::Yes {
980 0 : continue;
981 0 : }
982 0 : readable_points.push(*child_lsn);
983 : }
984 452 : readable_points.push(head_lsn);
985 452 : readable_points
986 452 : };
987 452 :
988 452 : let (layer_visibility, covered) = layer_map.get_visibility(readable_points);
989 1144 : for (layer_desc, visibility) in layer_visibility {
990 692 : // FIXME: a more efficiency bulk zip() through the layers rather than NlogN getting each one
991 692 : let layer = layer_manager.get_from_desc(&layer_desc);
992 692 : layer.set_visibility(visibility);
993 692 : }
994 :
995 : // TODO: publish our covered KeySpace to our parent, so that when they update their visibility, they can
996 : // avoid assuming that everything at a branch point is visible.
997 452 : drop(covered);
998 452 : Ok(())
999 452 : }
1000 :
1001 : /// Collect a bunch of Level 0 layer files, and compact and reshuffle them as
1002 : /// as Level 1 files. Returns whether the L0 layers are fully compacted.
1003 728 : async fn compact_level0(
1004 728 : self: &Arc<Self>,
1005 728 : target_file_size: u64,
1006 728 : force_compaction_ignore_threshold: bool,
1007 728 : ctx: &RequestContext,
1008 728 : ) -> Result<bool, CompactionError> {
1009 : let CompactLevel0Phase1Result {
1010 728 : new_layers,
1011 728 : deltas_to_compact,
1012 728 : fully_compacted,
1013 : } = {
1014 728 : let phase1_span = info_span!("compact_level0_phase1");
1015 728 : let ctx = ctx.attached_child();
1016 728 : let mut stats = CompactLevel0Phase1StatsBuilder {
1017 728 : version: Some(2),
1018 728 : tenant_id: Some(self.tenant_shard_id),
1019 728 : timeline_id: Some(self.timeline_id),
1020 728 : ..Default::default()
1021 728 : };
1022 728 :
1023 728 : let begin = tokio::time::Instant::now();
1024 728 : let phase1_layers_locked = self.layers.read().await;
1025 728 : let now = tokio::time::Instant::now();
1026 728 : stats.read_lock_acquisition_micros =
1027 728 : DurationRecorder::Recorded(RecordedDuration(now - begin), now);
1028 728 : self.compact_level0_phase1(
1029 728 : phase1_layers_locked,
1030 728 : stats,
1031 728 : target_file_size,
1032 728 : force_compaction_ignore_threshold,
1033 728 : &ctx,
1034 728 : )
1035 728 : .instrument(phase1_span)
1036 728 : .await?
1037 : };
1038 :
1039 728 : if new_layers.is_empty() && deltas_to_compact.is_empty() {
1040 : // nothing to do
1041 672 : return Ok(true);
1042 56 : }
1043 56 :
1044 56 : self.finish_compact_batch(&new_layers, &Vec::new(), &deltas_to_compact)
1045 56 : .await?;
1046 56 : Ok(fully_compacted)
1047 728 : }
1048 :
1049 : /// Level0 files first phase of compaction, explained in the [`Self::compact_legacy`] comment.
1050 728 : async fn compact_level0_phase1<'a>(
1051 728 : self: &'a Arc<Self>,
1052 728 : guard: tokio::sync::RwLockReadGuard<'a, LayerManager>,
1053 728 : mut stats: CompactLevel0Phase1StatsBuilder,
1054 728 : target_file_size: u64,
1055 728 : force_compaction_ignore_threshold: bool,
1056 728 : ctx: &RequestContext,
1057 728 : ) -> Result<CompactLevel0Phase1Result, CompactionError> {
1058 728 : stats.read_lock_held_spawn_blocking_startup_micros =
1059 728 : stats.read_lock_acquisition_micros.till_now(); // set by caller
1060 728 : let layers = guard.layer_map()?;
1061 728 : let level0_deltas = layers.level0_deltas();
1062 728 : stats.level0_deltas_count = Some(level0_deltas.len());
1063 728 :
1064 728 : // Only compact if enough layers have accumulated.
1065 728 : let threshold = self.get_compaction_threshold();
1066 728 : if level0_deltas.is_empty() || level0_deltas.len() < threshold {
1067 672 : if force_compaction_ignore_threshold {
1068 0 : if !level0_deltas.is_empty() {
1069 0 : info!(
1070 0 : level0_deltas = level0_deltas.len(),
1071 0 : threshold, "too few deltas to compact, but forcing compaction"
1072 : );
1073 : } else {
1074 0 : info!(
1075 0 : level0_deltas = level0_deltas.len(),
1076 0 : threshold, "too few deltas to compact, cannot force compaction"
1077 : );
1078 0 : return Ok(CompactLevel0Phase1Result::default());
1079 : }
1080 : } else {
1081 672 : debug!(
1082 0 : level0_deltas = level0_deltas.len(),
1083 0 : threshold, "too few deltas to compact"
1084 : );
1085 672 : return Ok(CompactLevel0Phase1Result::default());
1086 : }
1087 56 : }
1088 :
1089 56 : let mut level0_deltas = level0_deltas
1090 56 : .iter()
1091 804 : .map(|x| guard.get_from_desc(x))
1092 56 : .collect::<Vec<_>>();
1093 56 :
1094 56 : // Gather the files to compact in this iteration.
1095 56 : //
1096 56 : // Start with the oldest Level 0 delta file, and collect any other
1097 56 : // level 0 files that form a contiguous sequence, such that the end
1098 56 : // LSN of previous file matches the start LSN of the next file.
1099 56 : //
1100 56 : // Note that if the files don't form such a sequence, we might
1101 56 : // "compact" just a single file. That's a bit pointless, but it allows
1102 56 : // us to get rid of the level 0 file, and compact the other files on
1103 56 : // the next iteration. This could probably made smarter, but such
1104 56 : // "gaps" in the sequence of level 0 files should only happen in case
1105 56 : // of a crash, partial download from cloud storage, or something like
1106 56 : // that, so it's not a big deal in practice.
1107 1496 : level0_deltas.sort_by_key(|l| l.layer_desc().lsn_range.start);
1108 56 : let mut level0_deltas_iter = level0_deltas.iter();
1109 56 :
1110 56 : let first_level0_delta = level0_deltas_iter.next().unwrap();
1111 56 : let mut prev_lsn_end = first_level0_delta.layer_desc().lsn_range.end;
1112 56 : let mut deltas_to_compact = Vec::with_capacity(level0_deltas.len());
1113 56 :
1114 56 : // Accumulate the size of layers in `deltas_to_compact`
1115 56 : let mut deltas_to_compact_bytes = 0;
1116 56 :
1117 56 : // Under normal circumstances, we will accumulate up to compaction_interval L0s of size
1118 56 : // checkpoint_distance each. To avoid edge cases using extra system resources, bound our
1119 56 : // work in this function to only operate on this much delta data at once.
1120 56 : //
1121 56 : // Take the max of the configured value & the default, so that tests that configure tiny values
1122 56 : // can still use a sensible amount of memory, but if a deployed system configures bigger values we
1123 56 : // still let them compact a full stack of L0s in one go.
1124 56 : let delta_size_limit = std::cmp::max(
1125 56 : self.get_compaction_threshold(),
1126 56 : DEFAULT_COMPACTION_THRESHOLD,
1127 56 : ) as u64
1128 56 : * std::cmp::max(self.get_checkpoint_distance(), DEFAULT_CHECKPOINT_DISTANCE);
1129 56 :
1130 56 : let mut fully_compacted = true;
1131 56 :
1132 56 : deltas_to_compact.push(first_level0_delta.download_and_keep_resident().await?);
1133 804 : for l in level0_deltas_iter {
1134 748 : let lsn_range = &l.layer_desc().lsn_range;
1135 748 :
1136 748 : if lsn_range.start != prev_lsn_end {
1137 0 : break;
1138 748 : }
1139 748 : deltas_to_compact.push(l.download_and_keep_resident().await?);
1140 748 : deltas_to_compact_bytes += l.metadata().file_size;
1141 748 : prev_lsn_end = lsn_range.end;
1142 748 :
1143 748 : if deltas_to_compact_bytes >= delta_size_limit {
1144 0 : info!(
1145 0 : l0_deltas_selected = deltas_to_compact.len(),
1146 0 : l0_deltas_total = level0_deltas.len(),
1147 0 : "L0 compaction picker hit max delta layer size limit: {}",
1148 : delta_size_limit
1149 : );
1150 0 : fully_compacted = false;
1151 0 :
1152 0 : // Proceed with compaction, but only a subset of L0s
1153 0 : break;
1154 748 : }
1155 : }
1156 56 : let lsn_range = Range {
1157 56 : start: deltas_to_compact
1158 56 : .first()
1159 56 : .unwrap()
1160 56 : .layer_desc()
1161 56 : .lsn_range
1162 56 : .start,
1163 56 : end: deltas_to_compact.last().unwrap().layer_desc().lsn_range.end,
1164 56 : };
1165 56 :
1166 56 : info!(
1167 0 : "Starting Level0 compaction in LSN range {}-{} for {} layers ({} deltas in total)",
1168 0 : lsn_range.start,
1169 0 : lsn_range.end,
1170 0 : deltas_to_compact.len(),
1171 0 : level0_deltas.len()
1172 : );
1173 :
1174 804 : for l in deltas_to_compact.iter() {
1175 804 : info!("compact includes {l}");
1176 : }
1177 :
1178 : // We don't need the original list of layers anymore. Drop it so that
1179 : // we don't accidentally use it later in the function.
1180 56 : drop(level0_deltas);
1181 56 :
1182 56 : stats.read_lock_held_prerequisites_micros = stats
1183 56 : .read_lock_held_spawn_blocking_startup_micros
1184 56 : .till_now();
1185 :
1186 : // TODO: replace with streaming k-merge
1187 56 : let all_keys = {
1188 56 : let mut all_keys = Vec::new();
1189 804 : for l in deltas_to_compact.iter() {
1190 804 : if self.cancel.is_cancelled() {
1191 0 : return Err(CompactionError::ShuttingDown);
1192 804 : }
1193 804 : let delta = l.get_as_delta(ctx).await.map_err(CompactionError::Other)?;
1194 804 : let keys = delta
1195 804 : .index_entries(ctx)
1196 804 : .await
1197 804 : .map_err(CompactionError::Other)?;
1198 804 : all_keys.extend(keys);
1199 : }
1200 : // The current stdlib sorting implementation is designed in a way where it is
1201 : // particularly fast where the slice is made up of sorted sub-ranges.
1202 8847542 : all_keys.sort_by_key(|DeltaEntry { key, lsn, .. }| (*key, *lsn));
1203 56 : all_keys
1204 56 : };
1205 56 :
1206 56 : stats.read_lock_held_key_sort_micros = stats.read_lock_held_prerequisites_micros.till_now();
1207 :
1208 : // Determine N largest holes where N is number of compacted layers. The vec is sorted by key range start.
1209 : //
1210 : // A hole is a key range for which this compaction doesn't have any WAL records.
1211 : // Our goal in this compaction iteration is to avoid creating L1s that, in terms of their key range,
1212 : // cover the hole, but actually don't contain any WAL records for that key range.
1213 : // The reason is that the mere stack of L1s (`count_deltas`) triggers image layer creation (`create_image_layers`).
1214 : // That image layer creation would be useless for a hole range covered by L1s that don't contain any WAL records.
1215 : //
1216 : // The algorithm chooses holes as follows.
1217 : // - Slide a 2-window over the keys in key orde to get the hole range (=distance between two keys).
1218 : // - Filter: min threshold on range length
1219 : // - Rank: by coverage size (=number of image layers required to reconstruct each key in the range for which we have any data)
1220 : //
1221 : // For more details, intuition, and some ASCII art see https://github.com/neondatabase/neon/pull/3597#discussion_r1112704451
1222 : #[derive(PartialEq, Eq)]
1223 : struct Hole {
1224 : key_range: Range<Key>,
1225 : coverage_size: usize,
1226 : }
1227 56 : let holes: Vec<Hole> = {
1228 : use std::cmp::Ordering;
1229 : impl Ord for Hole {
1230 0 : fn cmp(&self, other: &Self) -> Ordering {
1231 0 : self.coverage_size.cmp(&other.coverage_size).reverse()
1232 0 : }
1233 : }
1234 : impl PartialOrd for Hole {
1235 0 : fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
1236 0 : Some(self.cmp(other))
1237 0 : }
1238 : }
1239 56 : let max_holes = deltas_to_compact.len();
1240 56 : let last_record_lsn = self.get_last_record_lsn();
1241 56 : let min_hole_range = (target_file_size / page_cache::PAGE_SZ as u64) as i128;
1242 56 : let min_hole_coverage_size = 3; // TODO: something more flexible?
1243 56 : // min-heap (reserve space for one more element added before eviction)
1244 56 : let mut heap: BinaryHeap<Hole> = BinaryHeap::with_capacity(max_holes + 1);
1245 56 : let mut prev: Option<Key> = None;
1246 :
1247 4128076 : for &DeltaEntry { key: next_key, .. } in all_keys.iter() {
1248 4128076 : if let Some(prev_key) = prev {
1249 : // just first fast filter, do not create hole entries for metadata keys. The last hole in the
1250 : // compaction is the gap between data key and metadata keys.
1251 4128020 : if next_key.to_i128() - prev_key.to_i128() >= min_hole_range
1252 0 : && !Key::is_metadata_key(&prev_key)
1253 : {
1254 0 : let key_range = prev_key..next_key;
1255 0 : // Measuring hole by just subtraction of i128 representation of key range boundaries
1256 0 : // has not so much sense, because largest holes will corresponds field1/field2 changes.
1257 0 : // But we are mostly interested to eliminate holes which cause generation of excessive image layers.
1258 0 : // That is why it is better to measure size of hole as number of covering image layers.
1259 0 : let coverage_size =
1260 0 : layers.image_coverage(&key_range, last_record_lsn).len();
1261 0 : if coverage_size >= min_hole_coverage_size {
1262 0 : heap.push(Hole {
1263 0 : key_range,
1264 0 : coverage_size,
1265 0 : });
1266 0 : if heap.len() > max_holes {
1267 0 : heap.pop(); // remove smallest hole
1268 0 : }
1269 0 : }
1270 4128020 : }
1271 56 : }
1272 4128076 : prev = Some(next_key.next());
1273 : }
1274 56 : let mut holes = heap.into_vec();
1275 56 : holes.sort_unstable_by_key(|hole| hole.key_range.start);
1276 56 : holes
1277 56 : };
1278 56 : stats.read_lock_held_compute_holes_micros = stats.read_lock_held_key_sort_micros.till_now();
1279 56 : drop_rlock(guard);
1280 56 :
1281 56 : if self.cancel.is_cancelled() {
1282 0 : return Err(CompactionError::ShuttingDown);
1283 56 : }
1284 56 :
1285 56 : stats.read_lock_drop_micros = stats.read_lock_held_compute_holes_micros.till_now();
1286 :
1287 : // This iterator walks through all key-value pairs from all the layers
1288 : // we're compacting, in key, LSN order.
1289 : // If there's both a Value::Image and Value::WalRecord for the same (key,lsn),
1290 : // then the Value::Image is ordered before Value::WalRecord.
1291 56 : let mut all_values_iter = {
1292 56 : let mut deltas = Vec::with_capacity(deltas_to_compact.len());
1293 804 : for l in deltas_to_compact.iter() {
1294 804 : let l = l.get_as_delta(ctx).await.map_err(CompactionError::Other)?;
1295 804 : deltas.push(l);
1296 : }
1297 56 : MergeIterator::create(&deltas, &[], ctx)
1298 56 : };
1299 56 :
1300 56 : // This iterator walks through all keys and is needed to calculate size used by each key
1301 56 : let mut all_keys_iter = all_keys
1302 56 : .iter()
1303 4128076 : .map(|DeltaEntry { key, lsn, size, .. }| (*key, *lsn, *size))
1304 4128020 : .coalesce(|mut prev, cur| {
1305 4128020 : // Coalesce keys that belong to the same key pair.
1306 4128020 : // This ensures that compaction doesn't put them
1307 4128020 : // into different layer files.
1308 4128020 : // Still limit this by the target file size,
1309 4128020 : // so that we keep the size of the files in
1310 4128020 : // check.
1311 4128020 : if prev.0 == cur.0 && prev.2 < target_file_size {
1312 80076 : prev.2 += cur.2;
1313 80076 : Ok(prev)
1314 : } else {
1315 4047944 : Err((prev, cur))
1316 : }
1317 4128020 : });
1318 56 :
1319 56 : // Merge the contents of all the input delta layers into a new set
1320 56 : // of delta layers, based on the current partitioning.
1321 56 : //
1322 56 : // We split the new delta layers on the key dimension. We iterate through the key space, and for each key, check if including the next key to the current output layer we're building would cause the layer to become too large. If so, dump the current output layer and start new one.
1323 56 : // It's possible that there is a single key with so many page versions that storing all of them in a single layer file
1324 56 : // would be too large. In that case, we also split on the LSN dimension.
1325 56 : //
1326 56 : // LSN
1327 56 : // ^
1328 56 : // |
1329 56 : // | +-----------+ +--+--+--+--+
1330 56 : // | | | | | | | |
1331 56 : // | +-----------+ | | | | |
1332 56 : // | | | | | | | |
1333 56 : // | +-----------+ ==> | | | | |
1334 56 : // | | | | | | | |
1335 56 : // | +-----------+ | | | | |
1336 56 : // | | | | | | | |
1337 56 : // | +-----------+ +--+--+--+--+
1338 56 : // |
1339 56 : // +--------------> key
1340 56 : //
1341 56 : //
1342 56 : // If one key (X) has a lot of page versions:
1343 56 : //
1344 56 : // LSN
1345 56 : // ^
1346 56 : // | (X)
1347 56 : // | +-----------+ +--+--+--+--+
1348 56 : // | | | | | | | |
1349 56 : // | +-----------+ | | +--+ |
1350 56 : // | | | | | | | |
1351 56 : // | +-----------+ ==> | | | | |
1352 56 : // | | | | | +--+ |
1353 56 : // | +-----------+ | | | | |
1354 56 : // | | | | | | | |
1355 56 : // | +-----------+ +--+--+--+--+
1356 56 : // |
1357 56 : // +--------------> key
1358 56 : // TODO: this actually divides the layers into fixed-size chunks, not
1359 56 : // based on the partitioning.
1360 56 : //
1361 56 : // TODO: we should also opportunistically materialize and
1362 56 : // garbage collect what we can.
1363 56 : let mut new_layers = Vec::new();
1364 56 : let mut prev_key: Option<Key> = None;
1365 56 : let mut writer: Option<DeltaLayerWriter> = None;
1366 56 : let mut key_values_total_size = 0u64;
1367 56 : let mut dup_start_lsn: Lsn = Lsn::INVALID; // start LSN of layer containing values of the single key
1368 56 : let mut dup_end_lsn: Lsn = Lsn::INVALID; // end LSN of layer containing values of the single key
1369 56 : let mut next_hole = 0; // index of next hole in holes vector
1370 56 :
1371 56 : let mut keys = 0;
1372 :
1373 4128132 : while let Some((key, lsn, value)) = all_values_iter
1374 4128132 : .next()
1375 4128132 : .await
1376 4128132 : .map_err(CompactionError::Other)?
1377 : {
1378 4128076 : keys += 1;
1379 4128076 :
1380 4128076 : if keys % 32_768 == 0 && self.cancel.is_cancelled() {
1381 : // avoid hitting the cancellation token on every key. in benches, we end up
1382 : // shuffling an order of million keys per layer, this means we'll check it
1383 : // around tens of times per layer.
1384 0 : return Err(CompactionError::ShuttingDown);
1385 4128076 : }
1386 4128076 :
1387 4128076 : let same_key = prev_key == Some(key);
1388 4128076 : // We need to check key boundaries once we reach next key or end of layer with the same key
1389 4128076 : if !same_key || lsn == dup_end_lsn {
1390 4048000 : let mut next_key_size = 0u64;
1391 4048000 : let is_dup_layer = dup_end_lsn.is_valid();
1392 4048000 : dup_start_lsn = Lsn::INVALID;
1393 4048000 : if !same_key {
1394 4048000 : dup_end_lsn = Lsn::INVALID;
1395 4048000 : }
1396 : // Determine size occupied by this key. We stop at next key or when size becomes larger than target_file_size
1397 4048000 : for (next_key, next_lsn, next_size) in all_keys_iter.by_ref() {
1398 4048000 : next_key_size = next_size;
1399 4048000 : if key != next_key {
1400 4047944 : if dup_end_lsn.is_valid() {
1401 0 : // We are writting segment with duplicates:
1402 0 : // place all remaining values of this key in separate segment
1403 0 : dup_start_lsn = dup_end_lsn; // new segments starts where old stops
1404 0 : dup_end_lsn = lsn_range.end; // there are no more values of this key till end of LSN range
1405 4047944 : }
1406 4047944 : break;
1407 56 : }
1408 56 : key_values_total_size += next_size;
1409 56 : // Check if it is time to split segment: if total keys size is larger than target file size.
1410 56 : // We need to avoid generation of empty segments if next_size > target_file_size.
1411 56 : if key_values_total_size > target_file_size && lsn != next_lsn {
1412 : // Split key between multiple layers: such layer can contain only single key
1413 0 : dup_start_lsn = if dup_end_lsn.is_valid() {
1414 0 : dup_end_lsn // new segment with duplicates starts where old one stops
1415 : } else {
1416 0 : lsn // start with the first LSN for this key
1417 : };
1418 0 : dup_end_lsn = next_lsn; // upper LSN boundary is exclusive
1419 0 : break;
1420 56 : }
1421 : }
1422 : // handle case when loop reaches last key: in this case dup_end is non-zero but dup_start is not set.
1423 4048000 : if dup_end_lsn.is_valid() && !dup_start_lsn.is_valid() {
1424 0 : dup_start_lsn = dup_end_lsn;
1425 0 : dup_end_lsn = lsn_range.end;
1426 4048000 : }
1427 4048000 : if writer.is_some() {
1428 4047944 : let written_size = writer.as_mut().unwrap().size();
1429 4047944 : let contains_hole =
1430 4047944 : next_hole < holes.len() && key >= holes[next_hole].key_range.end;
1431 : // check if key cause layer overflow or contains hole...
1432 4047944 : if is_dup_layer
1433 4047944 : || dup_end_lsn.is_valid()
1434 4047944 : || written_size + key_values_total_size > target_file_size
1435 4047384 : || contains_hole
1436 : {
1437 : // ... if so, flush previous layer and prepare to write new one
1438 560 : let (desc, path) = writer
1439 560 : .take()
1440 560 : .unwrap()
1441 560 : .finish(prev_key.unwrap().next(), ctx)
1442 560 : .await
1443 560 : .map_err(CompactionError::Other)?;
1444 560 : let new_delta = Layer::finish_creating(self.conf, self, desc, &path)
1445 560 : .map_err(CompactionError::Other)?;
1446 :
1447 560 : new_layers.push(new_delta);
1448 560 : writer = None;
1449 560 :
1450 560 : if contains_hole {
1451 0 : // skip hole
1452 0 : next_hole += 1;
1453 560 : }
1454 4047384 : }
1455 56 : }
1456 : // Remember size of key value because at next iteration we will access next item
1457 4048000 : key_values_total_size = next_key_size;
1458 80076 : }
1459 4128076 : fail_point!("delta-layer-writer-fail-before-finish", |_| {
1460 0 : Err(CompactionError::Other(anyhow::anyhow!(
1461 0 : "failpoint delta-layer-writer-fail-before-finish"
1462 0 : )))
1463 4128076 : });
1464 :
1465 4128076 : if !self.shard_identity.is_key_disposable(&key) {
1466 4128076 : if writer.is_none() {
1467 616 : if self.cancel.is_cancelled() {
1468 : // to be somewhat responsive to cancellation, check for each new layer
1469 0 : return Err(CompactionError::ShuttingDown);
1470 616 : }
1471 : // Create writer if not initiaized yet
1472 616 : writer = Some(
1473 : DeltaLayerWriter::new(
1474 616 : self.conf,
1475 616 : self.timeline_id,
1476 616 : self.tenant_shard_id,
1477 616 : key,
1478 616 : if dup_end_lsn.is_valid() {
1479 : // this is a layer containing slice of values of the same key
1480 0 : debug!("Create new dup layer {}..{}", dup_start_lsn, dup_end_lsn);
1481 0 : dup_start_lsn..dup_end_lsn
1482 : } else {
1483 616 : debug!("Create new layer {}..{}", lsn_range.start, lsn_range.end);
1484 616 : lsn_range.clone()
1485 : },
1486 616 : ctx,
1487 616 : )
1488 616 : .await
1489 616 : .map_err(CompactionError::Other)?,
1490 : );
1491 :
1492 616 : keys = 0;
1493 4127460 : }
1494 :
1495 4128076 : writer
1496 4128076 : .as_mut()
1497 4128076 : .unwrap()
1498 4128076 : .put_value(key, lsn, value, ctx)
1499 4128076 : .await
1500 4128076 : .map_err(CompactionError::Other)?;
1501 : } else {
1502 0 : let shard = self.shard_identity.shard_index();
1503 0 : let owner = self.shard_identity.get_shard_number(&key);
1504 0 : if cfg!(debug_assertions) {
1505 0 : panic!("key {key} does not belong on shard {shard}, owned by {owner}");
1506 0 : }
1507 0 : debug!("dropping key {key} during compaction (it belongs on shard {owner})");
1508 : }
1509 :
1510 4128076 : if !new_layers.is_empty() {
1511 39572 : fail_point!("after-timeline-compacted-first-L1");
1512 4088504 : }
1513 :
1514 4128076 : prev_key = Some(key);
1515 : }
1516 56 : if let Some(writer) = writer {
1517 56 : let (desc, path) = writer
1518 56 : .finish(prev_key.unwrap().next(), ctx)
1519 56 : .await
1520 56 : .map_err(CompactionError::Other)?;
1521 56 : let new_delta = Layer::finish_creating(self.conf, self, desc, &path)
1522 56 : .map_err(CompactionError::Other)?;
1523 56 : new_layers.push(new_delta);
1524 0 : }
1525 :
1526 : // Sync layers
1527 56 : if !new_layers.is_empty() {
1528 : // Print a warning if the created layer is larger than double the target size
1529 : // Add two pages for potential overhead. This should in theory be already
1530 : // accounted for in the target calculation, but for very small targets,
1531 : // we still might easily hit the limit otherwise.
1532 56 : let warn_limit = target_file_size * 2 + page_cache::PAGE_SZ as u64 * 2;
1533 616 : for layer in new_layers.iter() {
1534 616 : if layer.layer_desc().file_size > warn_limit {
1535 0 : warn!(
1536 : %layer,
1537 0 : "created delta file of size {} larger than double of target of {target_file_size}", layer.layer_desc().file_size
1538 : );
1539 616 : }
1540 : }
1541 :
1542 : // The writer.finish() above already did the fsync of the inodes.
1543 : // We just need to fsync the directory in which these inodes are linked,
1544 : // which we know to be the timeline directory.
1545 : //
1546 : // We use fatal_err() below because the after writer.finish() returns with success,
1547 : // the in-memory state of the filesystem already has the layer file in its final place,
1548 : // and subsequent pageserver code could think it's durable while it really isn't.
1549 56 : let timeline_dir = VirtualFile::open(
1550 56 : &self
1551 56 : .conf
1552 56 : .timeline_path(&self.tenant_shard_id, &self.timeline_id),
1553 56 : ctx,
1554 56 : )
1555 56 : .await
1556 56 : .fatal_err("VirtualFile::open for timeline dir fsync");
1557 56 : timeline_dir
1558 56 : .sync_all()
1559 56 : .await
1560 56 : .fatal_err("VirtualFile::sync_all timeline dir");
1561 0 : }
1562 :
1563 56 : stats.write_layer_files_micros = stats.read_lock_drop_micros.till_now();
1564 56 : stats.new_deltas_count = Some(new_layers.len());
1565 616 : stats.new_deltas_size = Some(new_layers.iter().map(|l| l.layer_desc().file_size).sum());
1566 56 :
1567 56 : match TryInto::<CompactLevel0Phase1Stats>::try_into(stats)
1568 56 : .and_then(|stats| serde_json::to_string(&stats).context("serde_json::to_string"))
1569 : {
1570 56 : Ok(stats_json) => {
1571 56 : info!(
1572 0 : stats_json = stats_json.as_str(),
1573 0 : "compact_level0_phase1 stats available"
1574 : )
1575 : }
1576 0 : Err(e) => {
1577 0 : warn!("compact_level0_phase1 stats failed to serialize: {:#}", e);
1578 : }
1579 : }
1580 :
1581 : // Without this, rustc complains about deltas_to_compact still
1582 : // being borrowed when we `.into_iter()` below.
1583 56 : drop(all_values_iter);
1584 56 :
1585 56 : Ok(CompactLevel0Phase1Result {
1586 56 : new_layers,
1587 56 : deltas_to_compact: deltas_to_compact
1588 56 : .into_iter()
1589 804 : .map(|x| x.drop_eviction_guard())
1590 56 : .collect::<Vec<_>>(),
1591 56 : fully_compacted,
1592 56 : })
1593 728 : }
1594 : }
1595 :
1596 : #[derive(Default)]
1597 : struct CompactLevel0Phase1Result {
1598 : new_layers: Vec<ResidentLayer>,
1599 : deltas_to_compact: Vec<Layer>,
1600 : // Whether we have included all L0 layers, or selected only part of them due to the
1601 : // L0 compaction size limit.
1602 : fully_compacted: bool,
1603 : }
1604 :
1605 : #[derive(Default)]
1606 : struct CompactLevel0Phase1StatsBuilder {
1607 : version: Option<u64>,
1608 : tenant_id: Option<TenantShardId>,
1609 : timeline_id: Option<TimelineId>,
1610 : read_lock_acquisition_micros: DurationRecorder,
1611 : read_lock_held_spawn_blocking_startup_micros: DurationRecorder,
1612 : read_lock_held_key_sort_micros: DurationRecorder,
1613 : read_lock_held_prerequisites_micros: DurationRecorder,
1614 : read_lock_held_compute_holes_micros: DurationRecorder,
1615 : read_lock_drop_micros: DurationRecorder,
1616 : write_layer_files_micros: DurationRecorder,
1617 : level0_deltas_count: Option<usize>,
1618 : new_deltas_count: Option<usize>,
1619 : new_deltas_size: Option<u64>,
1620 : }
1621 :
1622 : #[derive(serde::Serialize)]
1623 : struct CompactLevel0Phase1Stats {
1624 : version: u64,
1625 : tenant_id: TenantShardId,
1626 : timeline_id: TimelineId,
1627 : read_lock_acquisition_micros: RecordedDuration,
1628 : read_lock_held_spawn_blocking_startup_micros: RecordedDuration,
1629 : read_lock_held_key_sort_micros: RecordedDuration,
1630 : read_lock_held_prerequisites_micros: RecordedDuration,
1631 : read_lock_held_compute_holes_micros: RecordedDuration,
1632 : read_lock_drop_micros: RecordedDuration,
1633 : write_layer_files_micros: RecordedDuration,
1634 : level0_deltas_count: usize,
1635 : new_deltas_count: usize,
1636 : new_deltas_size: u64,
1637 : }
1638 :
1639 : impl TryFrom<CompactLevel0Phase1StatsBuilder> for CompactLevel0Phase1Stats {
1640 : type Error = anyhow::Error;
1641 :
1642 56 : fn try_from(value: CompactLevel0Phase1StatsBuilder) -> Result<Self, Self::Error> {
1643 56 : Ok(Self {
1644 56 : version: value.version.ok_or_else(|| anyhow!("version not set"))?,
1645 56 : tenant_id: value
1646 56 : .tenant_id
1647 56 : .ok_or_else(|| anyhow!("tenant_id not set"))?,
1648 56 : timeline_id: value
1649 56 : .timeline_id
1650 56 : .ok_or_else(|| anyhow!("timeline_id not set"))?,
1651 56 : read_lock_acquisition_micros: value
1652 56 : .read_lock_acquisition_micros
1653 56 : .into_recorded()
1654 56 : .ok_or_else(|| anyhow!("read_lock_acquisition_micros not set"))?,
1655 56 : read_lock_held_spawn_blocking_startup_micros: value
1656 56 : .read_lock_held_spawn_blocking_startup_micros
1657 56 : .into_recorded()
1658 56 : .ok_or_else(|| anyhow!("read_lock_held_spawn_blocking_startup_micros not set"))?,
1659 56 : read_lock_held_key_sort_micros: value
1660 56 : .read_lock_held_key_sort_micros
1661 56 : .into_recorded()
1662 56 : .ok_or_else(|| anyhow!("read_lock_held_key_sort_micros not set"))?,
1663 56 : read_lock_held_prerequisites_micros: value
1664 56 : .read_lock_held_prerequisites_micros
1665 56 : .into_recorded()
1666 56 : .ok_or_else(|| anyhow!("read_lock_held_prerequisites_micros not set"))?,
1667 56 : read_lock_held_compute_holes_micros: value
1668 56 : .read_lock_held_compute_holes_micros
1669 56 : .into_recorded()
1670 56 : .ok_or_else(|| anyhow!("read_lock_held_compute_holes_micros not set"))?,
1671 56 : read_lock_drop_micros: value
1672 56 : .read_lock_drop_micros
1673 56 : .into_recorded()
1674 56 : .ok_or_else(|| anyhow!("read_lock_drop_micros not set"))?,
1675 56 : write_layer_files_micros: value
1676 56 : .write_layer_files_micros
1677 56 : .into_recorded()
1678 56 : .ok_or_else(|| anyhow!("write_layer_files_micros not set"))?,
1679 56 : level0_deltas_count: value
1680 56 : .level0_deltas_count
1681 56 : .ok_or_else(|| anyhow!("level0_deltas_count not set"))?,
1682 56 : new_deltas_count: value
1683 56 : .new_deltas_count
1684 56 : .ok_or_else(|| anyhow!("new_deltas_count not set"))?,
1685 56 : new_deltas_size: value
1686 56 : .new_deltas_size
1687 56 : .ok_or_else(|| anyhow!("new_deltas_size not set"))?,
1688 : })
1689 56 : }
1690 : }
1691 :
1692 : impl Timeline {
1693 : /// Entry point for new tiered compaction algorithm.
1694 : ///
1695 : /// All the real work is in the implementation in the pageserver_compaction
1696 : /// crate. The code here would apply to any algorithm implemented by the
1697 : /// same interface, but tiered is the only one at the moment.
1698 : ///
1699 : /// TODO: cancellation
1700 0 : pub(crate) async fn compact_tiered(
1701 0 : self: &Arc<Self>,
1702 0 : _cancel: &CancellationToken,
1703 0 : ctx: &RequestContext,
1704 0 : ) -> Result<(), CompactionError> {
1705 0 : let fanout = self.get_compaction_threshold() as u64;
1706 0 : let target_file_size = self.get_checkpoint_distance();
1707 :
1708 : // Find the top of the historical layers
1709 0 : let end_lsn = {
1710 0 : let guard = self.layers.read().await;
1711 0 : let layers = guard.layer_map()?;
1712 :
1713 0 : let l0_deltas = layers.level0_deltas();
1714 0 :
1715 0 : // As an optimization, if we find that there are too few L0 layers,
1716 0 : // bail out early. We know that the compaction algorithm would do
1717 0 : // nothing in that case.
1718 0 : if l0_deltas.len() < fanout as usize {
1719 : // doesn't need compacting
1720 0 : return Ok(());
1721 0 : }
1722 0 : l0_deltas.iter().map(|l| l.lsn_range.end).max().unwrap()
1723 0 : };
1724 0 :
1725 0 : // Is the timeline being deleted?
1726 0 : if self.is_stopping() {
1727 0 : trace!("Dropping out of compaction on timeline shutdown");
1728 0 : return Err(CompactionError::ShuttingDown);
1729 0 : }
1730 :
1731 0 : let (dense_ks, _sparse_ks) = self.collect_keyspace(end_lsn, ctx).await?;
1732 : // TODO(chi): ignore sparse_keyspace for now, compact it in the future.
1733 0 : let mut adaptor = TimelineAdaptor::new(self, (end_lsn, dense_ks));
1734 0 :
1735 0 : pageserver_compaction::compact_tiered::compact_tiered(
1736 0 : &mut adaptor,
1737 0 : end_lsn,
1738 0 : target_file_size,
1739 0 : fanout,
1740 0 : ctx,
1741 0 : )
1742 0 : .await
1743 : // TODO: compact_tiered needs to return CompactionError
1744 0 : .map_err(CompactionError::Other)?;
1745 :
1746 0 : adaptor.flush_updates().await?;
1747 0 : Ok(())
1748 0 : }
1749 :
1750 : /// Take a list of images and deltas, produce images and deltas according to GC horizon and retain_lsns.
1751 : ///
1752 : /// It takes a key, the values of the key within the compaction process, a GC horizon, and all retain_lsns below the horizon.
1753 : /// For now, it requires the `accumulated_values` contains the full history of the key (i.e., the key with the lowest LSN is
1754 : /// an image or a WAL not requiring a base image). This restriction will be removed once we implement gc-compaction on branch.
1755 : ///
1756 : /// The function returns the deltas and the base image that need to be placed at each of the retain LSN. For example, we have:
1757 : ///
1758 : /// A@0x10, +B@0x20, +C@0x30, +D@0x40, +E@0x50, +F@0x60
1759 : /// horizon = 0x50, retain_lsn = 0x20, 0x40, delta_threshold=3
1760 : ///
1761 : /// The function will produce:
1762 : ///
1763 : /// ```plain
1764 : /// 0x20(retain_lsn) -> img=AB@0x20 always produce a single image below the lowest retain LSN
1765 : /// 0x40(retain_lsn) -> deltas=[+C@0x30, +D@0x40] two deltas since the last base image, keeping the deltas
1766 : /// 0x50(horizon) -> deltas=[ABCDE@0x50] three deltas since the last base image, generate an image but put it in the delta
1767 : /// above_horizon -> deltas=[+F@0x60] full history above the horizon
1768 : /// ```
1769 : ///
1770 : /// Note that `accumulated_values` must be sorted by LSN and should belong to a single key.
1771 1260 : pub(crate) async fn generate_key_retention(
1772 1260 : self: &Arc<Timeline>,
1773 1260 : key: Key,
1774 1260 : full_history: &[(Key, Lsn, Value)],
1775 1260 : horizon: Lsn,
1776 1260 : retain_lsn_below_horizon: &[Lsn],
1777 1260 : delta_threshold_cnt: usize,
1778 1260 : base_img_from_ancestor: Option<(Key, Lsn, Bytes)>,
1779 1260 : ) -> anyhow::Result<KeyHistoryRetention> {
1780 : // Pre-checks for the invariants
1781 :
1782 1260 : let debug_mode = cfg!(debug_assertions) || cfg!(feature = "testing");
1783 :
1784 1260 : if debug_mode {
1785 3060 : for (log_key, _, _) in full_history {
1786 1800 : assert_eq!(log_key, &key, "mismatched key");
1787 : }
1788 1260 : for i in 1..full_history.len() {
1789 540 : assert!(full_history[i - 1].1 <= full_history[i].1, "unordered LSN");
1790 540 : if full_history[i - 1].1 == full_history[i].1 {
1791 0 : assert!(
1792 0 : matches!(full_history[i - 1].2, Value::Image(_)),
1793 0 : "unordered delta/image, or duplicated delta"
1794 : );
1795 540 : }
1796 : }
1797 : // There was an assertion for no base image that checks if the first
1798 : // record in the history is `will_init` before, but it was removed.
1799 : // This is explained in the test cases for generate_key_retention.
1800 : // Search "incomplete history" for more information.
1801 2820 : for lsn in retain_lsn_below_horizon {
1802 1560 : assert!(lsn < &horizon, "retain lsn must be below horizon")
1803 : }
1804 1260 : for i in 1..retain_lsn_below_horizon.len() {
1805 712 : assert!(
1806 712 : retain_lsn_below_horizon[i - 1] <= retain_lsn_below_horizon[i],
1807 0 : "unordered LSN"
1808 : );
1809 : }
1810 0 : }
1811 1260 : let has_ancestor = base_img_from_ancestor.is_some();
1812 : // Step 1: split history into len(retain_lsn_below_horizon) + 2 buckets, where the last bucket is for all deltas above the horizon,
1813 : // and the second-to-last bucket is for the horizon. Each bucket contains lsn_last_bucket < deltas <= lsn_this_bucket.
1814 1260 : let (mut split_history, lsn_split_points) = {
1815 1260 : let mut split_history = Vec::new();
1816 1260 : split_history.resize_with(retain_lsn_below_horizon.len() + 2, Vec::new);
1817 1260 : let mut lsn_split_points = Vec::with_capacity(retain_lsn_below_horizon.len() + 1);
1818 2820 : for lsn in retain_lsn_below_horizon {
1819 1560 : lsn_split_points.push(*lsn);
1820 1560 : }
1821 1260 : lsn_split_points.push(horizon);
1822 1260 : let mut current_idx = 0;
1823 3060 : for item @ (_, lsn, _) in full_history {
1824 2288 : while current_idx < lsn_split_points.len() && *lsn > lsn_split_points[current_idx] {
1825 488 : current_idx += 1;
1826 488 : }
1827 1800 : split_history[current_idx].push(item);
1828 : }
1829 1260 : (split_history, lsn_split_points)
1830 : };
1831 : // Step 2: filter out duplicated records due to the k-merge of image/delta layers
1832 5340 : for split_for_lsn in &mut split_history {
1833 4080 : let mut prev_lsn = None;
1834 4080 : let mut new_split_for_lsn = Vec::with_capacity(split_for_lsn.len());
1835 4080 : for record @ (_, lsn, _) in std::mem::take(split_for_lsn) {
1836 1800 : if let Some(prev_lsn) = &prev_lsn {
1837 236 : if *prev_lsn == lsn {
1838 : // The case that we have an LSN with both data from the delta layer and the image layer. As
1839 : // `ValueWrapper` ensures that an image is ordered before a delta at the same LSN, we simply
1840 : // drop this delta and keep the image.
1841 : //
1842 : // For example, we have delta layer key1@0x10, key1@0x20, and image layer key1@0x10, we will
1843 : // keep the image for key1@0x10 and the delta for key1@0x20. key1@0x10 delta will be simply
1844 : // dropped.
1845 : //
1846 : // TODO: in case we have both delta + images for a given LSN and it does not exceed the delta
1847 : // threshold, we could have kept delta instead to save space. This is an optimization for the future.
1848 0 : continue;
1849 236 : }
1850 1564 : }
1851 1800 : prev_lsn = Some(lsn);
1852 1800 : new_split_for_lsn.push(record);
1853 : }
1854 4080 : *split_for_lsn = new_split_for_lsn;
1855 : }
1856 : // Step 3: generate images when necessary
1857 1260 : let mut retention = Vec::with_capacity(split_history.len());
1858 1260 : let mut records_since_last_image = 0;
1859 1260 : let batch_cnt = split_history.len();
1860 1260 : assert!(
1861 1260 : batch_cnt >= 2,
1862 0 : "should have at least below + above horizon batches"
1863 : );
1864 1260 : let mut replay_history: Vec<(Key, Lsn, Value)> = Vec::new();
1865 1260 : if let Some((key, lsn, img)) = base_img_from_ancestor {
1866 84 : replay_history.push((key, lsn, Value::Image(img)));
1867 1176 : }
1868 :
1869 : /// Generate debug information for the replay history
1870 0 : fn generate_history_trace(replay_history: &[(Key, Lsn, Value)]) -> String {
1871 : use std::fmt::Write;
1872 0 : let mut output = String::new();
1873 0 : if let Some((key, _, _)) = replay_history.first() {
1874 0 : write!(output, "key={} ", key).unwrap();
1875 0 : let mut cnt = 0;
1876 0 : for (_, lsn, val) in replay_history {
1877 0 : if val.is_image() {
1878 0 : write!(output, "i@{} ", lsn).unwrap();
1879 0 : } else if val.will_init() {
1880 0 : write!(output, "di@{} ", lsn).unwrap();
1881 0 : } else {
1882 0 : write!(output, "d@{} ", lsn).unwrap();
1883 0 : }
1884 0 : cnt += 1;
1885 0 : if cnt >= 128 {
1886 0 : write!(output, "... and more").unwrap();
1887 0 : break;
1888 0 : }
1889 : }
1890 0 : } else {
1891 0 : write!(output, "<no history>").unwrap();
1892 0 : }
1893 0 : output
1894 0 : }
1895 :
1896 0 : fn generate_debug_trace(
1897 0 : replay_history: Option<&[(Key, Lsn, Value)]>,
1898 0 : full_history: &[(Key, Lsn, Value)],
1899 0 : lsns: &[Lsn],
1900 0 : horizon: Lsn,
1901 0 : ) -> String {
1902 : use std::fmt::Write;
1903 0 : let mut output = String::new();
1904 0 : if let Some(replay_history) = replay_history {
1905 0 : writeln!(
1906 0 : output,
1907 0 : "replay_history: {}",
1908 0 : generate_history_trace(replay_history)
1909 0 : )
1910 0 : .unwrap();
1911 0 : } else {
1912 0 : writeln!(output, "replay_history: <disabled>",).unwrap();
1913 0 : }
1914 0 : writeln!(
1915 0 : output,
1916 0 : "full_history: {}",
1917 0 : generate_history_trace(full_history)
1918 0 : )
1919 0 : .unwrap();
1920 0 : writeln!(
1921 0 : output,
1922 0 : "when processing: [{}] horizon={}",
1923 0 : lsns.iter().map(|l| format!("{l}")).join(","),
1924 0 : horizon
1925 0 : )
1926 0 : .unwrap();
1927 0 : output
1928 0 : }
1929 :
1930 1260 : let mut key_exists = false;
1931 4080 : for (i, split_for_lsn) in split_history.into_iter().enumerate() {
1932 : // TODO: there could be image keys inside the splits, and we can compute records_since_last_image accordingly.
1933 4080 : records_since_last_image += split_for_lsn.len();
1934 : // Whether to produce an image into the final layer files
1935 4080 : let produce_image = if i == 0 && !has_ancestor {
1936 : // We always generate images for the first batch (below horizon / lowest retain_lsn)
1937 1176 : true
1938 2904 : } else if i == batch_cnt - 1 {
1939 : // Do not generate images for the last batch (above horizon)
1940 1260 : false
1941 1644 : } else if records_since_last_image == 0 {
1942 1288 : false
1943 356 : } else if records_since_last_image >= delta_threshold_cnt {
1944 : // Generate images when there are too many records
1945 12 : true
1946 : } else {
1947 344 : false
1948 : };
1949 4080 : replay_history.extend(split_for_lsn.iter().map(|x| (*x).clone()));
1950 : // Only retain the items after the last image record
1951 5028 : for idx in (0..replay_history.len()).rev() {
1952 5028 : if replay_history[idx].2.will_init() {
1953 4080 : replay_history = replay_history[idx..].to_vec();
1954 4080 : break;
1955 948 : }
1956 : }
1957 4080 : if replay_history.is_empty() && !key_exists {
1958 : // The key does not exist at earlier LSN, we can skip this iteration.
1959 0 : retention.push(Vec::new());
1960 0 : continue;
1961 4080 : } else {
1962 4080 : key_exists = true;
1963 4080 : }
1964 4080 : let Some((_, _, val)) = replay_history.first() else {
1965 0 : unreachable!("replay history should not be empty once it exists")
1966 : };
1967 4080 : if !val.will_init() {
1968 0 : return Err(anyhow::anyhow!("invalid history, no base image")).with_context(|| {
1969 0 : generate_debug_trace(
1970 0 : Some(&replay_history),
1971 0 : full_history,
1972 0 : retain_lsn_below_horizon,
1973 0 : horizon,
1974 0 : )
1975 0 : });
1976 4080 : }
1977 : // Whether to reconstruct the image. In debug mode, we will generate an image
1978 : // at every retain_lsn to ensure data is not corrupted, but we won't put the
1979 : // image into the final layer.
1980 4080 : let generate_image = produce_image || debug_mode;
1981 4080 : if produce_image {
1982 1188 : records_since_last_image = 0;
1983 2892 : }
1984 4080 : let img_and_lsn = if generate_image {
1985 4080 : let replay_history_for_debug = if debug_mode {
1986 4080 : Some(replay_history.clone())
1987 : } else {
1988 0 : None
1989 : };
1990 4080 : let replay_history_for_debug_ref = replay_history_for_debug.as_deref();
1991 4080 : let history = if produce_image {
1992 1188 : std::mem::take(&mut replay_history)
1993 : } else {
1994 2892 : replay_history.clone()
1995 : };
1996 4080 : let mut img = None;
1997 4080 : let mut records = Vec::with_capacity(history.len());
1998 4080 : if let (_, lsn, Value::Image(val)) = history.first().as_ref().unwrap() {
1999 4036 : img = Some((*lsn, val.clone()));
2000 4036 : for (_, lsn, val) in history.into_iter().skip(1) {
2001 920 : let Value::WalRecord(rec) = val else {
2002 0 : return Err(anyhow::anyhow!(
2003 0 : "invalid record, first record is image, expect walrecords"
2004 0 : ))
2005 0 : .with_context(|| {
2006 0 : generate_debug_trace(
2007 0 : replay_history_for_debug_ref,
2008 0 : full_history,
2009 0 : retain_lsn_below_horizon,
2010 0 : horizon,
2011 0 : )
2012 0 : });
2013 : };
2014 920 : records.push((lsn, rec));
2015 : }
2016 : } else {
2017 72 : for (_, lsn, val) in history.into_iter() {
2018 72 : let Value::WalRecord(rec) = val else {
2019 0 : return Err(anyhow::anyhow!("invalid record, first record is walrecord, expect rest are walrecord"))
2020 0 : .with_context(|| generate_debug_trace(
2021 0 : replay_history_for_debug_ref,
2022 0 : full_history,
2023 0 : retain_lsn_below_horizon,
2024 0 : horizon,
2025 0 : ));
2026 : };
2027 72 : records.push((lsn, rec));
2028 : }
2029 : }
2030 4080 : records.reverse();
2031 4080 : let state = ValueReconstructState { img, records };
2032 : // last batch does not generate image so i is always in range, unless we force generate
2033 : // an image during testing
2034 4080 : let request_lsn = if i >= lsn_split_points.len() {
2035 1260 : Lsn::MAX
2036 : } else {
2037 2820 : lsn_split_points[i]
2038 : };
2039 4080 : let img = self.reconstruct_value(key, request_lsn, state).await?;
2040 4080 : Some((request_lsn, img))
2041 : } else {
2042 0 : None
2043 : };
2044 4080 : if produce_image {
2045 1188 : let (request_lsn, img) = img_and_lsn.unwrap();
2046 1188 : replay_history.push((key, request_lsn, Value::Image(img.clone())));
2047 1188 : retention.push(vec![(request_lsn, Value::Image(img))]);
2048 2892 : } else {
2049 2892 : let deltas = split_for_lsn
2050 2892 : .iter()
2051 2892 : .map(|(_, lsn, value)| (*lsn, value.clone()))
2052 2892 : .collect_vec();
2053 2892 : retention.push(deltas);
2054 2892 : }
2055 : }
2056 1260 : let mut result = Vec::with_capacity(retention.len());
2057 1260 : assert_eq!(retention.len(), lsn_split_points.len() + 1);
2058 4080 : for (idx, logs) in retention.into_iter().enumerate() {
2059 4080 : if idx == lsn_split_points.len() {
2060 1260 : return Ok(KeyHistoryRetention {
2061 1260 : below_horizon: result,
2062 1260 : above_horizon: KeyLogAtLsn(logs),
2063 1260 : });
2064 2820 : } else {
2065 2820 : result.push((lsn_split_points[idx], KeyLogAtLsn(logs)));
2066 2820 : }
2067 : }
2068 0 : unreachable!("key retention is empty")
2069 1260 : }
2070 :
2071 : /// Check how much space is left on the disk
2072 104 : async fn check_available_space(self: &Arc<Self>) -> anyhow::Result<u64> {
2073 104 : let tenants_dir = self.conf.tenants_path();
2074 :
2075 104 : let stat = Statvfs::get(&tenants_dir, None)
2076 104 : .context("statvfs failed, presumably directory got unlinked")?;
2077 :
2078 104 : let (avail_bytes, _) = stat.get_avail_total_bytes();
2079 104 :
2080 104 : Ok(avail_bytes)
2081 104 : }
2082 :
2083 : /// Check if the compaction can proceed safely without running out of space. We assume the size
2084 : /// upper bound of the produced files of a compaction job is the same as all layers involved in
2085 : /// the compaction. Therefore, we need `2 * layers_to_be_compacted_size` at least to do a
2086 : /// compaction.
2087 104 : async fn check_compaction_space(
2088 104 : self: &Arc<Self>,
2089 104 : layer_selection: &[Layer],
2090 104 : ) -> anyhow::Result<()> {
2091 104 : let available_space = self.check_available_space().await?;
2092 104 : let mut remote_layer_size = 0;
2093 104 : let mut all_layer_size = 0;
2094 408 : for layer in layer_selection {
2095 304 : let needs_download = layer.needs_download().await?;
2096 304 : if needs_download.is_some() {
2097 0 : remote_layer_size += layer.layer_desc().file_size;
2098 304 : }
2099 304 : all_layer_size += layer.layer_desc().file_size;
2100 : }
2101 104 : let allocated_space = (available_space as f64 * 0.8) as u64; /* reserve 20% space for other tasks */
2102 104 : if all_layer_size /* space needed for newly-generated file */ + remote_layer_size /* space for downloading layers */ > allocated_space
2103 : {
2104 0 : return Err(anyhow!("not enough space for compaction: available_space={}, allocated_space={}, all_layer_size={}, remote_layer_size={}, required_space={}",
2105 0 : available_space, allocated_space, all_layer_size, remote_layer_size, all_layer_size + remote_layer_size));
2106 104 : }
2107 104 : Ok(())
2108 104 : }
2109 :
2110 : /// Get a watermark for gc-compaction, that is the lowest LSN that we can use as the `gc_horizon` for
2111 : /// the compaction algorithm. It is min(space_cutoff, time_cutoff, latest_gc_cutoff, standby_horizon).
2112 : /// Leases and retain_lsns are considered in the gc-compaction job itself so we don't need to account for them
2113 : /// here.
2114 108 : pub(crate) fn get_gc_compaction_watermark(self: &Arc<Self>) -> Lsn {
2115 108 : let gc_cutoff_lsn = {
2116 108 : let gc_info = self.gc_info.read().unwrap();
2117 108 : gc_info.min_cutoff()
2118 108 : };
2119 108 :
2120 108 : // TODO: standby horizon should use leases so we don't really need to consider it here.
2121 108 : // let watermark = watermark.min(self.standby_horizon.load());
2122 108 :
2123 108 : // TODO: ensure the child branches will not use anything below the watermark, or consider
2124 108 : // them when computing the watermark.
2125 108 : gc_cutoff_lsn.min(*self.get_latest_gc_cutoff_lsn())
2126 108 : }
2127 :
2128 : /// Split a gc-compaction job into multiple compaction jobs. The split is based on the key range and the estimated size of the compaction job.
2129 : /// The function returns a list of compaction jobs that can be executed separately. If the upper bound of the compact LSN
2130 : /// range is not specified, we will use the latest gc_cutoff as the upper bound, so that all jobs in the jobset acts
2131 : /// like a full compaction of the specified keyspace.
2132 0 : pub(crate) async fn gc_compaction_split_jobs(
2133 0 : self: &Arc<Self>,
2134 0 : job: GcCompactJob,
2135 0 : sub_compaction_max_job_size_mb: Option<u64>,
2136 0 : ) -> anyhow::Result<Vec<GcCompactJob>> {
2137 0 : let compact_below_lsn = if job.compact_lsn_range.end != Lsn::MAX {
2138 0 : job.compact_lsn_range.end
2139 : } else {
2140 0 : self.get_gc_compaction_watermark()
2141 : };
2142 :
2143 0 : if compact_below_lsn == Lsn::INVALID {
2144 0 : tracing::warn!("no layers to compact with gc: gc_cutoff not generated yet, skipping gc bottom-most compaction");
2145 0 : return Ok(vec![]);
2146 0 : }
2147 :
2148 : // Split compaction job to about 4GB each
2149 : const GC_COMPACT_MAX_SIZE_MB: u64 = 4 * 1024;
2150 0 : let sub_compaction_max_job_size_mb =
2151 0 : sub_compaction_max_job_size_mb.unwrap_or(GC_COMPACT_MAX_SIZE_MB);
2152 0 :
2153 0 : let mut compact_jobs = Vec::new();
2154 0 : // For now, we simply use the key partitioning information; we should do a more fine-grained partitioning
2155 0 : // by estimating the amount of files read for a compaction job. We should also partition on LSN.
2156 0 : let ((dense_ks, sparse_ks), _) = self.partitioning.read().as_ref().clone();
2157 : // Truncate the key range to be within user specified compaction range.
2158 0 : fn truncate_to(
2159 0 : source_start: &Key,
2160 0 : source_end: &Key,
2161 0 : target_start: &Key,
2162 0 : target_end: &Key,
2163 0 : ) -> Option<(Key, Key)> {
2164 0 : let start = source_start.max(target_start);
2165 0 : let end = source_end.min(target_end);
2166 0 : if start < end {
2167 0 : Some((*start, *end))
2168 : } else {
2169 0 : None
2170 : }
2171 0 : }
2172 0 : let mut split_key_ranges = Vec::new();
2173 0 : let ranges = dense_ks
2174 0 : .parts
2175 0 : .iter()
2176 0 : .map(|partition| partition.ranges.iter())
2177 0 : .chain(sparse_ks.parts.iter().map(|x| x.0.ranges.iter()))
2178 0 : .flatten()
2179 0 : .cloned()
2180 0 : .collect_vec();
2181 0 : for range in ranges.iter() {
2182 0 : let Some((start, end)) = truncate_to(
2183 0 : &range.start,
2184 0 : &range.end,
2185 0 : &job.compact_key_range.start,
2186 0 : &job.compact_key_range.end,
2187 0 : ) else {
2188 0 : continue;
2189 : };
2190 0 : split_key_ranges.push((start, end));
2191 : }
2192 0 : split_key_ranges.sort();
2193 0 : let guard = self.layers.read().await;
2194 0 : let layer_map = guard.layer_map()?;
2195 0 : let mut current_start = None;
2196 0 : let ranges_num = split_key_ranges.len();
2197 0 : for (idx, (start, end)) in split_key_ranges.into_iter().enumerate() {
2198 0 : if current_start.is_none() {
2199 0 : current_start = Some(start);
2200 0 : }
2201 0 : let start = current_start.unwrap();
2202 0 : if start >= end {
2203 : // We have already processed this partition.
2204 0 : continue;
2205 0 : }
2206 0 : let res = layer_map.range_search(start..end, compact_below_lsn);
2207 0 : let total_size = res.found.keys().map(|x| x.layer.file_size()).sum::<u64>();
2208 0 : if total_size > sub_compaction_max_job_size_mb * 1024 * 1024 || ranges_num == idx + 1 {
2209 : // Try to extend the compaction range so that we include at least one full layer file.
2210 0 : let extended_end = res
2211 0 : .found
2212 0 : .keys()
2213 0 : .map(|layer| layer.layer.key_range.end)
2214 0 : .min();
2215 : // It is possible that the search range does not contain any layer files when we reach the end of the loop.
2216 : // In this case, we simply use the specified key range end.
2217 0 : let end = if let Some(extended_end) = extended_end {
2218 0 : extended_end.max(end)
2219 : } else {
2220 0 : end
2221 : };
2222 0 : let end = if ranges_num == idx + 1 {
2223 : // extend the compaction range to the end of the key range if it's the last partition
2224 0 : end.max(job.compact_key_range.end)
2225 : } else {
2226 0 : end
2227 : };
2228 0 : info!(
2229 0 : "splitting compaction job: {}..{}, estimated_size={}",
2230 : start, end, total_size
2231 : );
2232 0 : compact_jobs.push(GcCompactJob {
2233 0 : dry_run: job.dry_run,
2234 0 : compact_key_range: start..end,
2235 0 : compact_lsn_range: job.compact_lsn_range.start..compact_below_lsn,
2236 0 : });
2237 0 : current_start = Some(end);
2238 0 : }
2239 : }
2240 0 : drop(guard);
2241 0 : Ok(compact_jobs)
2242 0 : }
2243 :
2244 : /// An experimental compaction building block that combines compaction with garbage collection.
2245 : ///
2246 : /// The current implementation picks all delta + image layers that are below or intersecting with
2247 : /// the GC horizon without considering retain_lsns. Then, it does a full compaction over all these delta
2248 : /// layers and image layers, which generates image layers on the gc horizon, drop deltas below gc horizon,
2249 : /// and create delta layers with all deltas >= gc horizon.
2250 : ///
2251 : /// If `options.compact_range` is provided, it will only compact the keys within the range, aka partial compaction.
2252 : /// Partial compaction will read and process all layers overlapping with the key range, even if it might
2253 : /// contain extra keys. After the gc-compaction phase completes, delta layers that are not fully contained
2254 : /// within the key range will be rewritten to ensure they do not overlap with the delta layers. Providing
2255 : /// Key::MIN..Key..MAX to the function indicates a full compaction, though technically, `Key::MAX` is not
2256 : /// part of the range.
2257 : ///
2258 : /// If `options.compact_lsn_range.end` is provided, the compaction will only compact layers below or intersect with
2259 : /// the LSN. Otherwise, it will use the gc cutoff by default.
2260 108 : pub(crate) async fn compact_with_gc(
2261 108 : self: &Arc<Self>,
2262 108 : cancel: &CancellationToken,
2263 108 : options: CompactOptions,
2264 108 : ctx: &RequestContext,
2265 108 : ) -> anyhow::Result<()> {
2266 108 : let sub_compaction = options.sub_compaction;
2267 108 : let job = GcCompactJob::from_compact_options(options.clone());
2268 108 : if sub_compaction {
2269 0 : info!("running enhanced gc bottom-most compaction with sub-compaction, splitting compaction jobs");
2270 0 : let jobs = self
2271 0 : .gc_compaction_split_jobs(job, options.sub_compaction_max_job_size_mb)
2272 0 : .await?;
2273 0 : let jobs_len = jobs.len();
2274 0 : for (idx, job) in jobs.into_iter().enumerate() {
2275 0 : info!(
2276 0 : "running enhanced gc bottom-most compaction, sub-compaction {}/{}",
2277 0 : idx + 1,
2278 : jobs_len
2279 : );
2280 0 : self.compact_with_gc_inner(cancel, job, ctx).await?;
2281 : }
2282 0 : if jobs_len == 0 {
2283 0 : info!("no jobs to run, skipping gc bottom-most compaction");
2284 0 : }
2285 0 : return Ok(());
2286 108 : }
2287 108 : self.compact_with_gc_inner(cancel, job, ctx).await
2288 108 : }
2289 :
2290 108 : async fn compact_with_gc_inner(
2291 108 : self: &Arc<Self>,
2292 108 : cancel: &CancellationToken,
2293 108 : job: GcCompactJob,
2294 108 : ctx: &RequestContext,
2295 108 : ) -> anyhow::Result<()> {
2296 108 : // Block other compaction/GC tasks from running for now. GC-compaction could run along
2297 108 : // with legacy compaction tasks in the future. Always ensure the lock order is compaction -> gc.
2298 108 : // Note that we already acquired the compaction lock when the outer `compact` function gets called.
2299 108 :
2300 108 : let gc_lock = async {
2301 108 : tokio::select! {
2302 108 : guard = self.gc_lock.lock() => Ok(guard),
2303 : // TODO: refactor to CompactionError to correctly pass cancelled error
2304 108 : _ = cancel.cancelled() => Err(anyhow!("cancelled")),
2305 : }
2306 108 : };
2307 :
2308 108 : let gc_lock = crate::timed(
2309 108 : gc_lock,
2310 108 : "acquires gc lock",
2311 108 : std::time::Duration::from_secs(5),
2312 108 : )
2313 108 : .await?;
2314 :
2315 108 : let dry_run = job.dry_run;
2316 108 : let compact_key_range = job.compact_key_range;
2317 108 : let compact_lsn_range = job.compact_lsn_range;
2318 :
2319 108 : let debug_mode = cfg!(debug_assertions) || cfg!(feature = "testing");
2320 :
2321 108 : info!("running enhanced gc bottom-most compaction, dry_run={dry_run}, compact_key_range={}..{}, compact_lsn_range={}..{}", compact_key_range.start, compact_key_range.end, compact_lsn_range.start, compact_lsn_range.end);
2322 :
2323 108 : scopeguard::defer! {
2324 108 : info!("done enhanced gc bottom-most compaction");
2325 108 : };
2326 108 :
2327 108 : let mut stat = CompactionStatistics::default();
2328 :
2329 : // Step 0: pick all delta layers + image layers below/intersect with the GC horizon.
2330 : // The layer selection has the following properties:
2331 : // 1. If a layer is in the selection, all layers below it are in the selection.
2332 : // 2. Inferred from (1), for each key in the layer selection, the value can be reconstructed only with the layers in the layer selection.
2333 104 : let job_desc = {
2334 108 : let guard = self.layers.read().await;
2335 108 : let layers = guard.layer_map()?;
2336 108 : let gc_info = self.gc_info.read().unwrap();
2337 108 : let mut retain_lsns_below_horizon = Vec::new();
2338 108 : let gc_cutoff = {
2339 : // Currently, gc-compaction only kicks in after the legacy gc has updated the gc_cutoff.
2340 : // Therefore, it can only clean up data that cannot be cleaned up with legacy gc, instead of
2341 : // cleaning everything that theoritically it could. In the future, it should use `self.gc_info`
2342 : // to get the truth data.
2343 108 : let real_gc_cutoff = self.get_gc_compaction_watermark();
2344 : // The compaction algorithm will keep all keys above the gc_cutoff while keeping only necessary keys below the gc_cutoff for
2345 : // each of the retain_lsn. Therefore, if the user-provided `compact_lsn_range.end` is larger than the real gc cutoff, we will use
2346 : // the real cutoff.
2347 108 : let mut gc_cutoff = if compact_lsn_range.end == Lsn::MAX {
2348 96 : if real_gc_cutoff == Lsn::INVALID {
2349 : // If the gc_cutoff is not generated yet, we should not compact anything.
2350 0 : tracing::warn!("no layers to compact with gc: gc_cutoff not generated yet, skipping gc bottom-most compaction");
2351 0 : return Ok(());
2352 96 : }
2353 96 : real_gc_cutoff
2354 : } else {
2355 12 : compact_lsn_range.end
2356 : };
2357 108 : if gc_cutoff > real_gc_cutoff {
2358 8 : warn!("provided compact_lsn_range.end={} is larger than the real_gc_cutoff={}, using the real gc cutoff", gc_cutoff, real_gc_cutoff);
2359 8 : gc_cutoff = real_gc_cutoff;
2360 100 : }
2361 108 : gc_cutoff
2362 : };
2363 140 : for (lsn, _timeline_id, _is_offloaded) in &gc_info.retain_lsns {
2364 140 : if lsn < &gc_cutoff {
2365 140 : retain_lsns_below_horizon.push(*lsn);
2366 140 : }
2367 : }
2368 108 : for lsn in gc_info.leases.keys() {
2369 0 : if lsn < &gc_cutoff {
2370 0 : retain_lsns_below_horizon.push(*lsn);
2371 0 : }
2372 : }
2373 108 : let mut selected_layers: Vec<Layer> = Vec::new();
2374 108 : drop(gc_info);
2375 : // Firstly, pick all the layers intersect or below the gc_cutoff, get the largest LSN in the selected layers.
2376 108 : let Some(max_layer_lsn) = layers
2377 108 : .iter_historic_layers()
2378 488 : .filter(|desc| desc.get_lsn_range().start <= gc_cutoff)
2379 416 : .map(|desc| desc.get_lsn_range().end)
2380 108 : .max()
2381 : else {
2382 0 : info!("no layers to compact with gc: no historic layers below gc_cutoff, gc_cutoff={}", gc_cutoff);
2383 0 : return Ok(());
2384 : };
2385 : // Next, if the user specifies compact_lsn_range.start, we need to filter some layers out. All the layers (strictly) below
2386 : // the min_layer_lsn computed as below will be filtered out and the data will be accessed using the normal read path, as if
2387 : // it is a branch.
2388 108 : let Some(min_layer_lsn) = layers
2389 108 : .iter_historic_layers()
2390 488 : .filter(|desc| {
2391 488 : if compact_lsn_range.start == Lsn::INVALID {
2392 396 : true // select all layers below if start == Lsn(0)
2393 : } else {
2394 92 : desc.get_lsn_range().end > compact_lsn_range.start // strictly larger than compact_above_lsn
2395 : }
2396 488 : })
2397 452 : .map(|desc| desc.get_lsn_range().start)
2398 108 : .min()
2399 : else {
2400 0 : info!("no layers to compact with gc: no historic layers above compact_above_lsn, compact_above_lsn={}", compact_lsn_range.end);
2401 0 : return Ok(());
2402 : };
2403 : // Then, pick all the layers that are below the max_layer_lsn. This is to ensure we can pick all single-key
2404 : // layers to compact.
2405 108 : let mut rewrite_layers = Vec::new();
2406 488 : for desc in layers.iter_historic_layers() {
2407 488 : if desc.get_lsn_range().end <= max_layer_lsn
2408 416 : && desc.get_lsn_range().start >= min_layer_lsn
2409 380 : && overlaps_with(&desc.get_key_range(), &compact_key_range)
2410 : {
2411 : // If the layer overlaps with the compaction key range, we need to read it to obtain all keys within the range,
2412 : // even if it might contain extra keys
2413 304 : selected_layers.push(guard.get_from_desc(&desc));
2414 304 : // If the layer is not fully contained within the key range, we need to rewrite it if it's a delta layer (it's fine
2415 304 : // to overlap image layers)
2416 304 : if desc.is_delta() && !fully_contains(&compact_key_range, &desc.get_key_range())
2417 4 : {
2418 4 : rewrite_layers.push(desc);
2419 300 : }
2420 184 : }
2421 : }
2422 108 : if selected_layers.is_empty() {
2423 4 : info!("no layers to compact with gc: no layers within the key range, gc_cutoff={}, key_range={}..{}", gc_cutoff, compact_key_range.start, compact_key_range.end);
2424 4 : return Ok(());
2425 104 : }
2426 104 : retain_lsns_below_horizon.sort();
2427 104 : GcCompactionJobDescription {
2428 104 : selected_layers,
2429 104 : gc_cutoff,
2430 104 : retain_lsns_below_horizon,
2431 104 : min_layer_lsn,
2432 104 : max_layer_lsn,
2433 104 : compaction_key_range: compact_key_range,
2434 104 : rewrite_layers,
2435 104 : }
2436 : };
2437 104 : let (has_data_below, lowest_retain_lsn) = if compact_lsn_range.start != Lsn::INVALID {
2438 : // If we only compact above some LSN, we should get the history from the current branch below the specified LSN.
2439 : // We use job_desc.min_layer_lsn as if it's the lowest branch point.
2440 16 : (true, job_desc.min_layer_lsn)
2441 88 : } else if self.ancestor_timeline.is_some() {
2442 : // In theory, we can also use min_layer_lsn here, but using ancestor LSN makes sure the delta layers cover the
2443 : // LSN ranges all the way to the ancestor timeline.
2444 4 : (true, self.ancestor_lsn)
2445 : } else {
2446 84 : let res = job_desc
2447 84 : .retain_lsns_below_horizon
2448 84 : .first()
2449 84 : .copied()
2450 84 : .unwrap_or(job_desc.gc_cutoff);
2451 84 : if debug_mode {
2452 84 : assert_eq!(
2453 84 : res,
2454 84 : job_desc
2455 84 : .retain_lsns_below_horizon
2456 84 : .iter()
2457 84 : .min()
2458 84 : .copied()
2459 84 : .unwrap_or(job_desc.gc_cutoff)
2460 84 : );
2461 0 : }
2462 84 : (false, res)
2463 : };
2464 104 : info!(
2465 0 : "picked {} layers for compaction ({} layers need rewriting) with max_layer_lsn={} min_layer_lsn={} gc_cutoff={} lowest_retain_lsn={}, key_range={}..{}, has_data_below={}",
2466 0 : job_desc.selected_layers.len(),
2467 0 : job_desc.rewrite_layers.len(),
2468 : job_desc.max_layer_lsn,
2469 : job_desc.min_layer_lsn,
2470 : job_desc.gc_cutoff,
2471 : lowest_retain_lsn,
2472 : job_desc.compaction_key_range.start,
2473 : job_desc.compaction_key_range.end,
2474 : has_data_below,
2475 : );
2476 :
2477 408 : for layer in &job_desc.selected_layers {
2478 304 : debug!("read layer: {}", layer.layer_desc().key());
2479 : }
2480 108 : for layer in &job_desc.rewrite_layers {
2481 4 : debug!("rewrite layer: {}", layer.key());
2482 : }
2483 :
2484 104 : self.check_compaction_space(&job_desc.selected_layers)
2485 104 : .await?;
2486 :
2487 : // Generate statistics for the compaction
2488 408 : for layer in &job_desc.selected_layers {
2489 304 : let desc = layer.layer_desc();
2490 304 : if desc.is_delta() {
2491 172 : stat.visit_delta_layer(desc.file_size());
2492 172 : } else {
2493 132 : stat.visit_image_layer(desc.file_size());
2494 132 : }
2495 : }
2496 :
2497 : // Step 1: construct a k-merge iterator over all layers.
2498 : // Also, verify if the layer map can be split by drawing a horizontal line at every LSN start/end split point.
2499 104 : let layer_names = job_desc
2500 104 : .selected_layers
2501 104 : .iter()
2502 304 : .map(|layer| layer.layer_desc().layer_name())
2503 104 : .collect_vec();
2504 104 : if let Some(err) = check_valid_layermap(&layer_names) {
2505 0 : bail!("gc-compaction layer map check failed because {}, cannot proceed with compaction due to potential data loss", err);
2506 104 : }
2507 104 : // The maximum LSN we are processing in this compaction loop
2508 104 : let end_lsn = job_desc
2509 104 : .selected_layers
2510 104 : .iter()
2511 304 : .map(|l| l.layer_desc().lsn_range.end)
2512 104 : .max()
2513 104 : .unwrap();
2514 104 : let mut delta_layers = Vec::new();
2515 104 : let mut image_layers = Vec::new();
2516 104 : let mut downloaded_layers = Vec::new();
2517 104 : let mut total_downloaded_size = 0;
2518 104 : let mut total_layer_size = 0;
2519 408 : for layer in &job_desc.selected_layers {
2520 304 : if layer.needs_download().await?.is_some() {
2521 0 : total_downloaded_size += layer.layer_desc().file_size;
2522 304 : }
2523 304 : total_layer_size += layer.layer_desc().file_size;
2524 304 : let resident_layer = layer.download_and_keep_resident().await?;
2525 304 : downloaded_layers.push(resident_layer);
2526 : }
2527 104 : info!(
2528 0 : "finish downloading layers, downloaded={}, total={}, ratio={:.2}",
2529 0 : total_downloaded_size,
2530 0 : total_layer_size,
2531 0 : total_downloaded_size as f64 / total_layer_size as f64
2532 : );
2533 408 : for resident_layer in &downloaded_layers {
2534 304 : if resident_layer.layer_desc().is_delta() {
2535 172 : let layer = resident_layer.get_as_delta(ctx).await?;
2536 172 : delta_layers.push(layer);
2537 : } else {
2538 132 : let layer = resident_layer.get_as_image(ctx).await?;
2539 132 : image_layers.push(layer);
2540 : }
2541 : }
2542 104 : let (dense_ks, sparse_ks) = self.collect_gc_compaction_keyspace().await?;
2543 104 : let mut merge_iter = FilterIterator::create(
2544 104 : MergeIterator::create(&delta_layers, &image_layers, ctx),
2545 104 : dense_ks,
2546 104 : sparse_ks,
2547 104 : )?;
2548 :
2549 : // Step 2: Produce images+deltas.
2550 104 : let mut accumulated_values = Vec::new();
2551 104 : let mut last_key: Option<Key> = None;
2552 :
2553 : // Only create image layers when there is no ancestor branches. TODO: create covering image layer
2554 : // when some condition meet.
2555 104 : let mut image_layer_writer = if !has_data_below {
2556 : Some(
2557 84 : SplitImageLayerWriter::new(
2558 84 : self.conf,
2559 84 : self.timeline_id,
2560 84 : self.tenant_shard_id,
2561 84 : job_desc.compaction_key_range.start,
2562 84 : lowest_retain_lsn,
2563 84 : self.get_compaction_target_size(),
2564 84 : ctx,
2565 84 : )
2566 84 : .await?,
2567 : )
2568 : } else {
2569 20 : None
2570 : };
2571 :
2572 104 : let mut delta_layer_writer = SplitDeltaLayerWriter::new(
2573 104 : self.conf,
2574 104 : self.timeline_id,
2575 104 : self.tenant_shard_id,
2576 104 : lowest_retain_lsn..end_lsn,
2577 104 : self.get_compaction_target_size(),
2578 104 : )
2579 104 : .await?;
2580 :
2581 : #[derive(Default)]
2582 : struct RewritingLayers {
2583 : before: Option<DeltaLayerWriter>,
2584 : after: Option<DeltaLayerWriter>,
2585 : }
2586 104 : let mut delta_layer_rewriters = HashMap::<Arc<PersistentLayerKey>, RewritingLayers>::new();
2587 :
2588 : /// When compacting not at a bottom range (=`[0,X)`) of the root branch, we "have data below" (`has_data_below=true`).
2589 : /// The two cases are compaction in ancestor branches and when `compact_lsn_range.start` is set.
2590 : /// In those cases, we need to pull up data from below the LSN range we're compaction.
2591 : ///
2592 : /// This function unifies the cases so that later code doesn't have to think about it.
2593 : ///
2594 : /// Currently, we always get the ancestor image for each key in the child branch no matter whether the image
2595 : /// is needed for reconstruction. This should be fixed in the future.
2596 : ///
2597 : /// Furthermore, we should do vectored get instead of a single get, or better, use k-merge for ancestor
2598 : /// images.
2599 1244 : async fn get_ancestor_image(
2600 1244 : this_tline: &Arc<Timeline>,
2601 1244 : key: Key,
2602 1244 : ctx: &RequestContext,
2603 1244 : has_data_below: bool,
2604 1244 : history_lsn_point: Lsn,
2605 1244 : ) -> anyhow::Result<Option<(Key, Lsn, Bytes)>> {
2606 1244 : if !has_data_below {
2607 1168 : return Ok(None);
2608 76 : };
2609 : // This function is implemented as a get of the current timeline at ancestor LSN, therefore reusing
2610 : // as much existing code as possible.
2611 76 : let img = this_tline.get(key, history_lsn_point, ctx).await?;
2612 76 : Ok(Some((key, history_lsn_point, img)))
2613 1244 : }
2614 :
2615 : // Actually, we can decide not to write to the image layer at all at this point because
2616 : // the key and LSN range are determined. However, to keep things simple here, we still
2617 : // create this writer, and discard the writer in the end.
2618 :
2619 1932 : while let Some(((key, lsn, val), desc)) = merge_iter.next_with_trace().await? {
2620 1828 : if cancel.is_cancelled() {
2621 0 : return Err(anyhow!("cancelled")); // TODO: refactor to CompactionError and pass cancel error
2622 1828 : }
2623 1828 : if self.shard_identity.is_key_disposable(&key) {
2624 : // If this shard does not need to store this key, simply skip it.
2625 : //
2626 : // This is not handled in the filter iterator because shard is determined by hash.
2627 : // Therefore, it does not give us any performance benefit to do things like skip
2628 : // a whole layer file as handling key spaces (ranges).
2629 0 : if cfg!(debug_assertions) {
2630 0 : let shard = self.shard_identity.shard_index();
2631 0 : let owner = self.shard_identity.get_shard_number(&key);
2632 0 : panic!("key {key} does not belong on shard {shard}, owned by {owner}");
2633 0 : }
2634 0 : continue;
2635 1828 : }
2636 1828 : if !job_desc.compaction_key_range.contains(&key) {
2637 128 : if !desc.is_delta {
2638 120 : continue;
2639 8 : }
2640 8 : let rewriter = delta_layer_rewriters.entry(desc.clone()).or_default();
2641 8 : let rewriter = if key < job_desc.compaction_key_range.start {
2642 0 : if rewriter.before.is_none() {
2643 0 : rewriter.before = Some(
2644 0 : DeltaLayerWriter::new(
2645 0 : self.conf,
2646 0 : self.timeline_id,
2647 0 : self.tenant_shard_id,
2648 0 : desc.key_range.start,
2649 0 : desc.lsn_range.clone(),
2650 0 : ctx,
2651 0 : )
2652 0 : .await?,
2653 : );
2654 0 : }
2655 0 : rewriter.before.as_mut().unwrap()
2656 8 : } else if key >= job_desc.compaction_key_range.end {
2657 8 : if rewriter.after.is_none() {
2658 4 : rewriter.after = Some(
2659 4 : DeltaLayerWriter::new(
2660 4 : self.conf,
2661 4 : self.timeline_id,
2662 4 : self.tenant_shard_id,
2663 4 : job_desc.compaction_key_range.end,
2664 4 : desc.lsn_range.clone(),
2665 4 : ctx,
2666 4 : )
2667 4 : .await?,
2668 : );
2669 4 : }
2670 8 : rewriter.after.as_mut().unwrap()
2671 : } else {
2672 0 : unreachable!()
2673 : };
2674 8 : rewriter.put_value(key, lsn, val, ctx).await?;
2675 8 : continue;
2676 1700 : }
2677 1700 : match val {
2678 1220 : Value::Image(_) => stat.visit_image_key(&val),
2679 480 : Value::WalRecord(_) => stat.visit_wal_key(&val),
2680 : }
2681 1700 : if last_key.is_none() || last_key.as_ref() == Some(&key) {
2682 560 : if last_key.is_none() {
2683 104 : last_key = Some(key);
2684 456 : }
2685 560 : accumulated_values.push((key, lsn, val));
2686 : } else {
2687 1140 : let last_key: &mut Key = last_key.as_mut().unwrap();
2688 1140 : stat.on_unique_key_visited(); // TODO: adjust statistics for partial compaction
2689 1140 : let retention = self
2690 1140 : .generate_key_retention(
2691 1140 : *last_key,
2692 1140 : &accumulated_values,
2693 1140 : job_desc.gc_cutoff,
2694 1140 : &job_desc.retain_lsns_below_horizon,
2695 1140 : COMPACTION_DELTA_THRESHOLD,
2696 1140 : get_ancestor_image(self, *last_key, ctx, has_data_below, lowest_retain_lsn)
2697 1140 : .await?,
2698 : )
2699 1140 : .await?;
2700 1140 : retention
2701 1140 : .pipe_to(
2702 1140 : *last_key,
2703 1140 : &mut delta_layer_writer,
2704 1140 : image_layer_writer.as_mut(),
2705 1140 : &mut stat,
2706 1140 : ctx,
2707 1140 : )
2708 1140 : .await?;
2709 1140 : accumulated_values.clear();
2710 1140 : *last_key = key;
2711 1140 : accumulated_values.push((key, lsn, val));
2712 : }
2713 : }
2714 :
2715 : // TODO: move the below part to the loop body
2716 104 : let last_key = last_key.expect("no keys produced during compaction");
2717 104 : stat.on_unique_key_visited();
2718 :
2719 104 : let retention = self
2720 104 : .generate_key_retention(
2721 104 : last_key,
2722 104 : &accumulated_values,
2723 104 : job_desc.gc_cutoff,
2724 104 : &job_desc.retain_lsns_below_horizon,
2725 104 : COMPACTION_DELTA_THRESHOLD,
2726 104 : get_ancestor_image(self, last_key, ctx, has_data_below, lowest_retain_lsn).await?,
2727 : )
2728 104 : .await?;
2729 104 : retention
2730 104 : .pipe_to(
2731 104 : last_key,
2732 104 : &mut delta_layer_writer,
2733 104 : image_layer_writer.as_mut(),
2734 104 : &mut stat,
2735 104 : ctx,
2736 104 : )
2737 104 : .await?;
2738 : // end: move the above part to the loop body
2739 :
2740 104 : let mut rewrote_delta_layers = Vec::new();
2741 108 : for (key, writers) in delta_layer_rewriters {
2742 4 : if let Some(delta_writer_before) = writers.before {
2743 0 : let (desc, path) = delta_writer_before
2744 0 : .finish(job_desc.compaction_key_range.start, ctx)
2745 0 : .await?;
2746 0 : let layer = Layer::finish_creating(self.conf, self, desc, &path)?;
2747 0 : rewrote_delta_layers.push(layer);
2748 4 : }
2749 4 : if let Some(delta_writer_after) = writers.after {
2750 4 : let (desc, path) = delta_writer_after.finish(key.key_range.end, ctx).await?;
2751 4 : let layer = Layer::finish_creating(self.conf, self, desc, &path)?;
2752 4 : rewrote_delta_layers.push(layer);
2753 0 : }
2754 : }
2755 :
2756 148 : let discard = |key: &PersistentLayerKey| {
2757 148 : let key = key.clone();
2758 148 : async move { KeyHistoryRetention::discard_key(&key, self, dry_run).await }
2759 148 : };
2760 :
2761 104 : let produced_image_layers = if let Some(writer) = image_layer_writer {
2762 84 : if !dry_run {
2763 76 : let end_key = job_desc.compaction_key_range.end;
2764 76 : writer
2765 76 : .finish_with_discard_fn(self, ctx, end_key, discard)
2766 76 : .await?
2767 : } else {
2768 8 : drop(writer);
2769 8 : Vec::new()
2770 : }
2771 : } else {
2772 20 : Vec::new()
2773 : };
2774 :
2775 104 : let produced_delta_layers = if !dry_run {
2776 96 : delta_layer_writer
2777 96 : .finish_with_discard_fn(self, ctx, discard)
2778 96 : .await?
2779 : } else {
2780 8 : drop(delta_layer_writer);
2781 8 : Vec::new()
2782 : };
2783 :
2784 : // TODO: make image/delta/rewrote_delta layers generation atomic. At this point, we already generated resident layers, and if
2785 : // compaction is cancelled at this point, we might have some layers that are not cleaned up.
2786 104 : let mut compact_to = Vec::new();
2787 104 : let mut keep_layers = HashSet::new();
2788 104 : let produced_delta_layers_len = produced_delta_layers.len();
2789 104 : let produced_image_layers_len = produced_image_layers.len();
2790 176 : for action in produced_delta_layers {
2791 72 : match action {
2792 44 : BatchWriterResult::Produced(layer) => {
2793 44 : if cfg!(debug_assertions) {
2794 44 : info!("produced delta layer: {}", layer.layer_desc().key());
2795 0 : }
2796 44 : stat.produce_delta_layer(layer.layer_desc().file_size());
2797 44 : compact_to.push(layer);
2798 : }
2799 28 : BatchWriterResult::Discarded(l) => {
2800 28 : if cfg!(debug_assertions) {
2801 28 : info!("discarded delta layer: {}", l);
2802 0 : }
2803 28 : keep_layers.insert(l);
2804 28 : stat.discard_delta_layer();
2805 : }
2806 : }
2807 : }
2808 108 : for layer in &rewrote_delta_layers {
2809 4 : debug!(
2810 0 : "produced rewritten delta layer: {}",
2811 0 : layer.layer_desc().key()
2812 : );
2813 : }
2814 104 : compact_to.extend(rewrote_delta_layers);
2815 180 : for action in produced_image_layers {
2816 76 : match action {
2817 60 : BatchWriterResult::Produced(layer) => {
2818 60 : debug!("produced image layer: {}", layer.layer_desc().key());
2819 60 : stat.produce_image_layer(layer.layer_desc().file_size());
2820 60 : compact_to.push(layer);
2821 : }
2822 16 : BatchWriterResult::Discarded(l) => {
2823 16 : debug!("discarded image layer: {}", l);
2824 16 : keep_layers.insert(l);
2825 16 : stat.discard_image_layer();
2826 : }
2827 : }
2828 : }
2829 :
2830 104 : let mut layer_selection = job_desc.selected_layers;
2831 :
2832 : // Partial compaction might select more data than it processes, e.g., if
2833 : // the compaction_key_range only partially overlaps:
2834 : //
2835 : // [---compaction_key_range---]
2836 : // [---A----][----B----][----C----][----D----]
2837 : //
2838 : // For delta layers, we will rewrite the layers so that it is cut exactly at
2839 : // the compaction key range, so we can always discard them. However, for image
2840 : // layers, as we do not rewrite them for now, we need to handle them differently.
2841 : // Assume image layers A, B, C, D are all in the `layer_selection`.
2842 : //
2843 : // The created image layers contain whatever is needed from B, C, and from
2844 : // `----]` of A, and from `[---` of D.
2845 : //
2846 : // In contrast, `[---A` and `D----]` have not been processed, so, we must
2847 : // keep that data.
2848 : //
2849 : // The solution for now is to keep A and D completely if they are image layers.
2850 : // (layer_selection is what we'll remove from the layer map, so, retain what
2851 : // is _not_ fully covered by compaction_key_range).
2852 408 : for layer in &layer_selection {
2853 304 : if !layer.layer_desc().is_delta() {
2854 132 : if !overlaps_with(
2855 132 : &layer.layer_desc().key_range,
2856 132 : &job_desc.compaction_key_range,
2857 132 : ) {
2858 0 : bail!("violated constraint: image layer outside of compaction key range");
2859 132 : }
2860 132 : if !fully_contains(
2861 132 : &job_desc.compaction_key_range,
2862 132 : &layer.layer_desc().key_range,
2863 132 : ) {
2864 16 : keep_layers.insert(layer.layer_desc().key());
2865 116 : }
2866 172 : }
2867 : }
2868 :
2869 304 : layer_selection.retain(|x| !keep_layers.contains(&x.layer_desc().key()));
2870 104 :
2871 104 : info!(
2872 0 : "gc-compaction statistics: {}",
2873 0 : serde_json::to_string(&stat)?
2874 : );
2875 :
2876 104 : if dry_run {
2877 8 : return Ok(());
2878 96 : }
2879 96 :
2880 96 : info!(
2881 0 : "produced {} delta layers and {} image layers, {} layers are kept",
2882 0 : produced_delta_layers_len,
2883 0 : produced_image_layers_len,
2884 0 : keep_layers.len()
2885 : );
2886 :
2887 : // Step 3: Place back to the layer map.
2888 :
2889 : // First, do a sanity check to ensure the newly-created layer map does not contain overlaps.
2890 96 : let all_layers = {
2891 96 : let guard = self.layers.read().await;
2892 96 : let layer_map = guard.layer_map()?;
2893 96 : layer_map.iter_historic_layers().collect_vec()
2894 96 : };
2895 96 :
2896 96 : let mut final_layers = all_layers
2897 96 : .iter()
2898 428 : .map(|layer| layer.layer_name())
2899 96 : .collect::<HashSet<_>>();
2900 304 : for layer in &layer_selection {
2901 208 : final_layers.remove(&layer.layer_desc().layer_name());
2902 208 : }
2903 204 : for layer in &compact_to {
2904 108 : final_layers.insert(layer.layer_desc().layer_name());
2905 108 : }
2906 96 : let final_layers = final_layers.into_iter().collect_vec();
2907 :
2908 : // TODO: move this check before we call `finish` on image layer writers. However, this will require us to get the layer name before we finish
2909 : // the writer, so potentially, we will need a function like `ImageLayerBatchWriter::get_all_pending_layer_keys` to get all the keys that are
2910 : // in the writer before finalizing the persistent layers. Now we would leave some dangling layers on the disk if the check fails.
2911 96 : if let Some(err) = check_valid_layermap(&final_layers) {
2912 0 : bail!("gc-compaction layer map check failed after compaction because {}, compaction result not applied to the layer map due to potential data loss", err);
2913 96 : }
2914 :
2915 : // Between the sanity check and this compaction update, there could be new layers being flushed, but it should be fine because we only
2916 : // operate on L1 layers.
2917 : {
2918 96 : let mut guard = self.layers.write().await;
2919 96 : guard
2920 96 : .open_mut()?
2921 96 : .finish_gc_compaction(&layer_selection, &compact_to, &self.metrics)
2922 96 : };
2923 96 :
2924 96 : // Schedule an index-only upload to update the `latest_gc_cutoff` in the index_part.json.
2925 96 : // Otherwise, after restart, the index_part only contains the old `latest_gc_cutoff` and
2926 96 : // find_gc_cutoffs will try accessing things below the cutoff. TODO: ideally, this should
2927 96 : // be batched into `schedule_compaction_update`.
2928 96 : let disk_consistent_lsn = self.disk_consistent_lsn.load();
2929 96 : self.schedule_uploads(disk_consistent_lsn, None)?;
2930 : // If a layer gets rewritten throughout gc-compaction, we need to keep that layer only in `compact_to` instead
2931 : // of `compact_from`.
2932 96 : let compact_from = {
2933 96 : let mut compact_from = Vec::new();
2934 96 : let mut compact_to_set = HashMap::new();
2935 204 : for layer in &compact_to {
2936 108 : compact_to_set.insert(layer.layer_desc().key(), layer);
2937 108 : }
2938 304 : for layer in &layer_selection {
2939 208 : if let Some(to) = compact_to_set.get(&layer.layer_desc().key()) {
2940 0 : tracing::info!(
2941 0 : "skipping delete {} because found same layer key at different generation {}",
2942 : layer, to
2943 : );
2944 208 : } else {
2945 208 : compact_from.push(layer.clone());
2946 208 : }
2947 : }
2948 96 : compact_from
2949 96 : };
2950 96 : self.remote_client
2951 96 : .schedule_compaction_update(&compact_from, &compact_to)?;
2952 :
2953 96 : drop(gc_lock);
2954 96 :
2955 96 : Ok(())
2956 108 : }
2957 : }
2958 :
2959 : struct TimelineAdaptor {
2960 : timeline: Arc<Timeline>,
2961 :
2962 : keyspace: (Lsn, KeySpace),
2963 :
2964 : new_deltas: Vec<ResidentLayer>,
2965 : new_images: Vec<ResidentLayer>,
2966 : layers_to_delete: Vec<Arc<PersistentLayerDesc>>,
2967 : }
2968 :
2969 : impl TimelineAdaptor {
2970 0 : pub fn new(timeline: &Arc<Timeline>, keyspace: (Lsn, KeySpace)) -> Self {
2971 0 : Self {
2972 0 : timeline: timeline.clone(),
2973 0 : keyspace,
2974 0 : new_images: Vec::new(),
2975 0 : new_deltas: Vec::new(),
2976 0 : layers_to_delete: Vec::new(),
2977 0 : }
2978 0 : }
2979 :
2980 0 : pub async fn flush_updates(&mut self) -> Result<(), CompactionError> {
2981 0 : let layers_to_delete = {
2982 0 : let guard = self.timeline.layers.read().await;
2983 0 : self.layers_to_delete
2984 0 : .iter()
2985 0 : .map(|x| guard.get_from_desc(x))
2986 0 : .collect::<Vec<Layer>>()
2987 0 : };
2988 0 : self.timeline
2989 0 : .finish_compact_batch(&self.new_deltas, &self.new_images, &layers_to_delete)
2990 0 : .await?;
2991 :
2992 0 : self.timeline
2993 0 : .upload_new_image_layers(std::mem::take(&mut self.new_images))?;
2994 :
2995 0 : self.new_deltas.clear();
2996 0 : self.layers_to_delete.clear();
2997 0 : Ok(())
2998 0 : }
2999 : }
3000 :
3001 : #[derive(Clone)]
3002 : struct ResidentDeltaLayer(ResidentLayer);
3003 : #[derive(Clone)]
3004 : struct ResidentImageLayer(ResidentLayer);
3005 :
3006 : impl CompactionJobExecutor for TimelineAdaptor {
3007 : type Key = pageserver_api::key::Key;
3008 :
3009 : type Layer = OwnArc<PersistentLayerDesc>;
3010 : type DeltaLayer = ResidentDeltaLayer;
3011 : type ImageLayer = ResidentImageLayer;
3012 :
3013 : type RequestContext = crate::context::RequestContext;
3014 :
3015 0 : fn get_shard_identity(&self) -> &ShardIdentity {
3016 0 : self.timeline.get_shard_identity()
3017 0 : }
3018 :
3019 0 : async fn get_layers(
3020 0 : &mut self,
3021 0 : key_range: &Range<Key>,
3022 0 : lsn_range: &Range<Lsn>,
3023 0 : _ctx: &RequestContext,
3024 0 : ) -> anyhow::Result<Vec<OwnArc<PersistentLayerDesc>>> {
3025 0 : self.flush_updates().await?;
3026 :
3027 0 : let guard = self.timeline.layers.read().await;
3028 0 : let layer_map = guard.layer_map()?;
3029 :
3030 0 : let result = layer_map
3031 0 : .iter_historic_layers()
3032 0 : .filter(|l| {
3033 0 : overlaps_with(&l.lsn_range, lsn_range) && overlaps_with(&l.key_range, key_range)
3034 0 : })
3035 0 : .map(OwnArc)
3036 0 : .collect();
3037 0 : Ok(result)
3038 0 : }
3039 :
3040 0 : async fn get_keyspace(
3041 0 : &mut self,
3042 0 : key_range: &Range<Key>,
3043 0 : lsn: Lsn,
3044 0 : _ctx: &RequestContext,
3045 0 : ) -> anyhow::Result<Vec<Range<Key>>> {
3046 0 : if lsn == self.keyspace.0 {
3047 0 : Ok(pageserver_compaction::helpers::intersect_keyspace(
3048 0 : &self.keyspace.1.ranges,
3049 0 : key_range,
3050 0 : ))
3051 : } else {
3052 : // The current compaction implementation only ever requests the key space
3053 : // at the compaction end LSN.
3054 0 : anyhow::bail!("keyspace not available for requested lsn");
3055 : }
3056 0 : }
3057 :
3058 0 : async fn downcast_delta_layer(
3059 0 : &self,
3060 0 : layer: &OwnArc<PersistentLayerDesc>,
3061 0 : ) -> anyhow::Result<Option<ResidentDeltaLayer>> {
3062 0 : // this is a lot more complex than a simple downcast...
3063 0 : if layer.is_delta() {
3064 0 : let l = {
3065 0 : let guard = self.timeline.layers.read().await;
3066 0 : guard.get_from_desc(layer)
3067 : };
3068 0 : let result = l.download_and_keep_resident().await?;
3069 :
3070 0 : Ok(Some(ResidentDeltaLayer(result)))
3071 : } else {
3072 0 : Ok(None)
3073 : }
3074 0 : }
3075 :
3076 0 : async fn create_image(
3077 0 : &mut self,
3078 0 : lsn: Lsn,
3079 0 : key_range: &Range<Key>,
3080 0 : ctx: &RequestContext,
3081 0 : ) -> anyhow::Result<()> {
3082 0 : Ok(self.create_image_impl(lsn, key_range, ctx).await?)
3083 0 : }
3084 :
3085 0 : async fn create_delta(
3086 0 : &mut self,
3087 0 : lsn_range: &Range<Lsn>,
3088 0 : key_range: &Range<Key>,
3089 0 : input_layers: &[ResidentDeltaLayer],
3090 0 : ctx: &RequestContext,
3091 0 : ) -> anyhow::Result<()> {
3092 0 : debug!("Create new layer {}..{}", lsn_range.start, lsn_range.end);
3093 :
3094 0 : let mut all_entries = Vec::new();
3095 0 : for dl in input_layers.iter() {
3096 0 : all_entries.extend(dl.load_keys(ctx).await?);
3097 : }
3098 :
3099 : // The current stdlib sorting implementation is designed in a way where it is
3100 : // particularly fast where the slice is made up of sorted sub-ranges.
3101 0 : all_entries.sort_by_key(|DeltaEntry { key, lsn, .. }| (*key, *lsn));
3102 :
3103 0 : let mut writer = DeltaLayerWriter::new(
3104 0 : self.timeline.conf,
3105 0 : self.timeline.timeline_id,
3106 0 : self.timeline.tenant_shard_id,
3107 0 : key_range.start,
3108 0 : lsn_range.clone(),
3109 0 : ctx,
3110 0 : )
3111 0 : .await?;
3112 :
3113 0 : let mut dup_values = 0;
3114 0 :
3115 0 : // This iterator walks through all key-value pairs from all the layers
3116 0 : // we're compacting, in key, LSN order.
3117 0 : let mut prev: Option<(Key, Lsn)> = None;
3118 : for &DeltaEntry {
3119 0 : key, lsn, ref val, ..
3120 0 : } in all_entries.iter()
3121 : {
3122 0 : if prev == Some((key, lsn)) {
3123 : // This is a duplicate. Skip it.
3124 : //
3125 : // It can happen if compaction is interrupted after writing some
3126 : // layers but not all, and we are compacting the range again.
3127 : // The calculations in the algorithm assume that there are no
3128 : // duplicates, so the math on targeted file size is likely off,
3129 : // and we will create smaller files than expected.
3130 0 : dup_values += 1;
3131 0 : continue;
3132 0 : }
3133 :
3134 0 : let value = val.load(ctx).await?;
3135 :
3136 0 : writer.put_value(key, lsn, value, ctx).await?;
3137 :
3138 0 : prev = Some((key, lsn));
3139 : }
3140 :
3141 0 : if dup_values > 0 {
3142 0 : warn!("delta layer created with {} duplicate values", dup_values);
3143 0 : }
3144 :
3145 0 : fail_point!("delta-layer-writer-fail-before-finish", |_| {
3146 0 : Err(anyhow::anyhow!(
3147 0 : "failpoint delta-layer-writer-fail-before-finish"
3148 0 : ))
3149 0 : });
3150 :
3151 0 : let (desc, path) = writer.finish(prev.unwrap().0.next(), ctx).await?;
3152 0 : let new_delta_layer =
3153 0 : Layer::finish_creating(self.timeline.conf, &self.timeline, desc, &path)?;
3154 :
3155 0 : self.new_deltas.push(new_delta_layer);
3156 0 : Ok(())
3157 0 : }
3158 :
3159 0 : async fn delete_layer(
3160 0 : &mut self,
3161 0 : layer: &OwnArc<PersistentLayerDesc>,
3162 0 : _ctx: &RequestContext,
3163 0 : ) -> anyhow::Result<()> {
3164 0 : self.layers_to_delete.push(layer.clone().0);
3165 0 : Ok(())
3166 0 : }
3167 : }
3168 :
3169 : impl TimelineAdaptor {
3170 0 : async fn create_image_impl(
3171 0 : &mut self,
3172 0 : lsn: Lsn,
3173 0 : key_range: &Range<Key>,
3174 0 : ctx: &RequestContext,
3175 0 : ) -> Result<(), CreateImageLayersError> {
3176 0 : let timer = self.timeline.metrics.create_images_time_histo.start_timer();
3177 :
3178 0 : let image_layer_writer = ImageLayerWriter::new(
3179 0 : self.timeline.conf,
3180 0 : self.timeline.timeline_id,
3181 0 : self.timeline.tenant_shard_id,
3182 0 : key_range,
3183 0 : lsn,
3184 0 : ctx,
3185 0 : )
3186 0 : .await?;
3187 :
3188 0 : fail_point!("image-layer-writer-fail-before-finish", |_| {
3189 0 : Err(CreateImageLayersError::Other(anyhow::anyhow!(
3190 0 : "failpoint image-layer-writer-fail-before-finish"
3191 0 : )))
3192 0 : });
3193 :
3194 0 : let keyspace = KeySpace {
3195 0 : ranges: self.get_keyspace(key_range, lsn, ctx).await?,
3196 : };
3197 : // TODO set proper (stateful) start. The create_image_layer_for_rel_blocks function mostly
3198 0 : let start = Key::MIN;
3199 : let ImageLayerCreationOutcome {
3200 0 : image,
3201 : next_start_key: _,
3202 0 : } = self
3203 0 : .timeline
3204 0 : .create_image_layer_for_rel_blocks(
3205 0 : &keyspace,
3206 0 : image_layer_writer,
3207 0 : lsn,
3208 0 : ctx,
3209 0 : key_range.clone(),
3210 0 : start,
3211 0 : IoConcurrency::sequential(),
3212 0 : )
3213 0 : .await?;
3214 :
3215 0 : if let Some(image_layer) = image {
3216 0 : self.new_images.push(image_layer);
3217 0 : }
3218 :
3219 0 : timer.stop_and_record();
3220 0 :
3221 0 : Ok(())
3222 0 : }
3223 : }
3224 :
3225 : impl CompactionRequestContext for crate::context::RequestContext {}
3226 :
3227 : #[derive(Debug, Clone)]
3228 : pub struct OwnArc<T>(pub Arc<T>);
3229 :
3230 : impl<T> Deref for OwnArc<T> {
3231 : type Target = <Arc<T> as Deref>::Target;
3232 0 : fn deref(&self) -> &Self::Target {
3233 0 : &self.0
3234 0 : }
3235 : }
3236 :
3237 : impl<T> AsRef<T> for OwnArc<T> {
3238 0 : fn as_ref(&self) -> &T {
3239 0 : self.0.as_ref()
3240 0 : }
3241 : }
3242 :
3243 : impl CompactionLayer<Key> for OwnArc<PersistentLayerDesc> {
3244 0 : fn key_range(&self) -> &Range<Key> {
3245 0 : &self.key_range
3246 0 : }
3247 0 : fn lsn_range(&self) -> &Range<Lsn> {
3248 0 : &self.lsn_range
3249 0 : }
3250 0 : fn file_size(&self) -> u64 {
3251 0 : self.file_size
3252 0 : }
3253 0 : fn short_id(&self) -> std::string::String {
3254 0 : self.as_ref().short_id().to_string()
3255 0 : }
3256 0 : fn is_delta(&self) -> bool {
3257 0 : self.as_ref().is_delta()
3258 0 : }
3259 : }
3260 :
3261 : impl CompactionLayer<Key> for OwnArc<DeltaLayer> {
3262 0 : fn key_range(&self) -> &Range<Key> {
3263 0 : &self.layer_desc().key_range
3264 0 : }
3265 0 : fn lsn_range(&self) -> &Range<Lsn> {
3266 0 : &self.layer_desc().lsn_range
3267 0 : }
3268 0 : fn file_size(&self) -> u64 {
3269 0 : self.layer_desc().file_size
3270 0 : }
3271 0 : fn short_id(&self) -> std::string::String {
3272 0 : self.layer_desc().short_id().to_string()
3273 0 : }
3274 0 : fn is_delta(&self) -> bool {
3275 0 : true
3276 0 : }
3277 : }
3278 :
3279 : use crate::tenant::timeline::DeltaEntry;
3280 :
3281 : impl CompactionLayer<Key> for ResidentDeltaLayer {
3282 0 : fn key_range(&self) -> &Range<Key> {
3283 0 : &self.0.layer_desc().key_range
3284 0 : }
3285 0 : fn lsn_range(&self) -> &Range<Lsn> {
3286 0 : &self.0.layer_desc().lsn_range
3287 0 : }
3288 0 : fn file_size(&self) -> u64 {
3289 0 : self.0.layer_desc().file_size
3290 0 : }
3291 0 : fn short_id(&self) -> std::string::String {
3292 0 : self.0.layer_desc().short_id().to_string()
3293 0 : }
3294 0 : fn is_delta(&self) -> bool {
3295 0 : true
3296 0 : }
3297 : }
3298 :
3299 : impl CompactionDeltaLayer<TimelineAdaptor> for ResidentDeltaLayer {
3300 : type DeltaEntry<'a> = DeltaEntry<'a>;
3301 :
3302 0 : async fn load_keys(&self, ctx: &RequestContext) -> anyhow::Result<Vec<DeltaEntry<'_>>> {
3303 0 : self.0.get_as_delta(ctx).await?.index_entries(ctx).await
3304 0 : }
3305 : }
3306 :
3307 : impl CompactionLayer<Key> for ResidentImageLayer {
3308 0 : fn key_range(&self) -> &Range<Key> {
3309 0 : &self.0.layer_desc().key_range
3310 0 : }
3311 0 : fn lsn_range(&self) -> &Range<Lsn> {
3312 0 : &self.0.layer_desc().lsn_range
3313 0 : }
3314 0 : fn file_size(&self) -> u64 {
3315 0 : self.0.layer_desc().file_size
3316 0 : }
3317 0 : fn short_id(&self) -> std::string::String {
3318 0 : self.0.layer_desc().short_id().to_string()
3319 0 : }
3320 0 : fn is_delta(&self) -> bool {
3321 0 : false
3322 0 : }
3323 : }
3324 : impl CompactionImageLayer<TimelineAdaptor> for ResidentImageLayer {}
|