Line data Source code
1 : //! New compaction implementation. The algorithm itself is implemented in the
2 : //! compaction crate. This file implements the callbacks and structs that allow
3 : //! the algorithm to drive the process.
4 : //!
5 : //! The old legacy algorithm is implemented directly in `timeline.rs`.
6 :
7 : use std::collections::{BinaryHeap, HashMap, HashSet, VecDeque};
8 : use std::ops::{Deref, Range};
9 : use std::sync::Arc;
10 : use std::time::{Duration, Instant};
11 :
12 : use super::layer_manager::LayerManagerLockHolder;
13 : use super::{
14 : CompactFlags, CompactOptions, CompactionError, CreateImageLayersError, DurationRecorder,
15 : GetVectoredError, ImageLayerCreationMode, LastImageLayerCreationStatus, RecordedDuration,
16 : Timeline,
17 : };
18 :
19 : use crate::pgdatadir_mapping::CollectKeySpaceError;
20 : use crate::tenant::timeline::{DeltaEntry, RepartitionError};
21 : use crate::walredo::RedoAttemptType;
22 : use anyhow::{Context, anyhow};
23 : use bytes::Bytes;
24 : use enumset::EnumSet;
25 : use fail::fail_point;
26 : use futures::FutureExt;
27 : use itertools::Itertools;
28 : use once_cell::sync::Lazy;
29 : use pageserver_api::config::tenant_conf_defaults::DEFAULT_CHECKPOINT_DISTANCE;
30 : use pageserver_api::key::{KEY_SIZE, Key};
31 : use pageserver_api::keyspace::{KeySpace, ShardedRange};
32 : use pageserver_api::models::{CompactInfoResponse, CompactKeyRange};
33 : use pageserver_api::shard::{ShardCount, ShardIdentity, TenantShardId};
34 : use pageserver_compaction::helpers::{fully_contains, overlaps_with};
35 : use pageserver_compaction::interface::*;
36 : use serde::Serialize;
37 : use tokio::sync::{OwnedSemaphorePermit, Semaphore};
38 : use tokio_util::sync::CancellationToken;
39 : use tracing::{Instrument, debug, error, info, info_span, trace, warn};
40 : use utils::critical_timeline;
41 : use utils::id::TimelineId;
42 : use utils::lsn::Lsn;
43 : use wal_decoder::models::record::NeonWalRecord;
44 : use wal_decoder::models::value::Value;
45 :
46 : use crate::context::{AccessStatsBehavior, RequestContext, RequestContextBuilder};
47 : use crate::page_cache;
48 : use crate::statvfs::Statvfs;
49 : use crate::tenant::checks::check_valid_layermap;
50 : use crate::tenant::gc_block::GcBlock;
51 : use crate::tenant::layer_map::LayerMap;
52 : use crate::tenant::remote_timeline_client::WaitCompletionError;
53 : use crate::tenant::remote_timeline_client::index::GcCompactionState;
54 : use crate::tenant::storage_layer::batch_split_writer::{
55 : BatchWriterResult, SplitDeltaLayerWriter, SplitImageLayerWriter,
56 : };
57 : use crate::tenant::storage_layer::filter_iterator::FilterIterator;
58 : use crate::tenant::storage_layer::merge_iterator::MergeIterator;
59 : use crate::tenant::storage_layer::{
60 : AsLayerDesc, LayerVisibilityHint, PersistentLayerDesc, PersistentLayerKey,
61 : ValueReconstructState,
62 : };
63 : use crate::tenant::tasks::log_compaction_error;
64 : use crate::tenant::timeline::{
65 : DeltaLayerWriter, ImageLayerCreationOutcome, ImageLayerWriter, IoConcurrency, Layer,
66 : ResidentLayer, drop_layer_manager_rlock,
67 : };
68 : use crate::tenant::{DeltaLayer, MaybeOffloaded, PageReconstructError};
69 : use crate::virtual_file::{MaybeFatalIo, VirtualFile};
70 :
71 : /// Maximum number of deltas before generating an image layer in bottom-most compaction.
72 : const COMPACTION_DELTA_THRESHOLD: usize = 5;
73 :
74 : /// Ratio of shard-local pages below which we trigger shard ancestor layer rewrites. 0.3 means that
75 : /// <= 30% of layer pages must belong to the descendant shard to rewrite the layer.
76 : ///
77 : /// We choose a value < 0.5 to avoid rewriting all visible layers every time we do a power-of-two
78 : /// shard split, which gets expensive for large tenants.
79 : const ANCESTOR_COMPACTION_REWRITE_THRESHOLD: f64 = 0.3;
80 :
81 : #[derive(Default, Debug, Clone, Copy, Hash, PartialEq, Eq, Serialize)]
82 : pub struct GcCompactionJobId(pub usize);
83 :
84 : impl std::fmt::Display for GcCompactionJobId {
85 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
86 0 : write!(f, "{}", self.0)
87 0 : }
88 : }
89 :
90 : pub struct GcCompactionCombinedSettings {
91 : pub gc_compaction_enabled: bool,
92 : pub gc_compaction_verification: bool,
93 : pub gc_compaction_initial_threshold_kb: u64,
94 : pub gc_compaction_ratio_percent: u64,
95 : }
96 :
97 : #[derive(Debug, Clone)]
98 : pub enum GcCompactionQueueItem {
99 : MetaJob {
100 : /// Compaction options
101 : options: CompactOptions,
102 : /// Whether the compaction is triggered automatically (determines whether we need to update L2 LSN)
103 : auto: bool,
104 : },
105 : SubCompactionJob {
106 : i: usize,
107 : total: usize,
108 : options: CompactOptions,
109 : },
110 : Notify(GcCompactionJobId, Option<Lsn>),
111 : }
112 :
113 : /// Statistics for gc-compaction meta jobs, which contains several sub compaction jobs.
114 : #[derive(Debug, Clone, Serialize, Default)]
115 : pub struct GcCompactionMetaStatistics {
116 : /// The total number of sub compaction jobs.
117 : pub total_sub_compaction_jobs: usize,
118 : /// The total number of sub compaction jobs that failed.
119 : pub failed_sub_compaction_jobs: usize,
120 : /// The total number of sub compaction jobs that succeeded.
121 : pub succeeded_sub_compaction_jobs: usize,
122 : /// The layer size before compaction.
123 : pub before_compaction_layer_size: u64,
124 : /// The layer size after compaction.
125 : pub after_compaction_layer_size: u64,
126 : /// The start time of the meta job.
127 : pub start_time: Option<chrono::DateTime<chrono::Utc>>,
128 : /// The end time of the meta job.
129 : pub end_time: Option<chrono::DateTime<chrono::Utc>>,
130 : /// The duration of the meta job.
131 : pub duration_secs: f64,
132 : /// The id of the meta job.
133 : pub meta_job_id: GcCompactionJobId,
134 : /// The LSN below which the layers are compacted, used to compute the statistics.
135 : pub below_lsn: Lsn,
136 : /// The retention ratio of the meta job (after_compaction_layer_size / before_compaction_layer_size)
137 : pub retention_ratio: f64,
138 : }
139 :
140 : impl GcCompactionMetaStatistics {
141 0 : fn finalize(&mut self) {
142 0 : let end_time = chrono::Utc::now();
143 0 : if let Some(start_time) = self.start_time {
144 0 : if end_time > start_time {
145 0 : let delta = end_time - start_time;
146 0 : if let Ok(std_dur) = delta.to_std() {
147 0 : self.duration_secs = std_dur.as_secs_f64();
148 0 : }
149 0 : }
150 0 : }
151 0 : self.retention_ratio = self.after_compaction_layer_size as f64
152 0 : / (self.before_compaction_layer_size as f64 + 1.0);
153 0 : self.end_time = Some(end_time);
154 0 : }
155 : }
156 :
157 : impl GcCompactionQueueItem {
158 0 : pub fn into_compact_info_resp(
159 0 : self,
160 0 : id: GcCompactionJobId,
161 0 : running: bool,
162 0 : ) -> Option<CompactInfoResponse> {
163 0 : match self {
164 0 : GcCompactionQueueItem::MetaJob { options, .. } => Some(CompactInfoResponse {
165 0 : compact_key_range: options.compact_key_range,
166 0 : compact_lsn_range: options.compact_lsn_range,
167 0 : sub_compaction: options.sub_compaction,
168 0 : running,
169 0 : job_id: id.0,
170 0 : }),
171 0 : GcCompactionQueueItem::SubCompactionJob { options, .. } => Some(CompactInfoResponse {
172 0 : compact_key_range: options.compact_key_range,
173 0 : compact_lsn_range: options.compact_lsn_range,
174 0 : sub_compaction: options.sub_compaction,
175 0 : running,
176 0 : job_id: id.0,
177 0 : }),
178 0 : GcCompactionQueueItem::Notify(_, _) => None,
179 : }
180 0 : }
181 : }
182 :
183 : #[derive(Default)]
184 : struct GcCompactionGuardItems {
185 : notify: Option<tokio::sync::oneshot::Sender<()>>,
186 : permit: Option<OwnedSemaphorePermit>,
187 : }
188 :
189 : struct GcCompactionQueueInner {
190 : running: Option<(GcCompactionJobId, GcCompactionQueueItem)>,
191 : queued: VecDeque<(GcCompactionJobId, GcCompactionQueueItem)>,
192 : guards: HashMap<GcCompactionJobId, GcCompactionGuardItems>,
193 : last_id: GcCompactionJobId,
194 : meta_statistics: Option<GcCompactionMetaStatistics>,
195 : }
196 :
197 : impl GcCompactionQueueInner {
198 0 : fn next_id(&mut self) -> GcCompactionJobId {
199 0 : let id = self.last_id;
200 0 : self.last_id = GcCompactionJobId(id.0 + 1);
201 0 : id
202 0 : }
203 : }
204 :
205 : /// A structure to store gc_compaction jobs.
206 : pub struct GcCompactionQueue {
207 : /// All items in the queue, and the currently-running job.
208 : inner: std::sync::Mutex<GcCompactionQueueInner>,
209 : /// Ensure only one thread is consuming the queue.
210 : consumer_lock: tokio::sync::Mutex<()>,
211 : }
212 :
213 0 : static CONCURRENT_GC_COMPACTION_TASKS: Lazy<Arc<Semaphore>> = Lazy::new(|| {
214 : // Only allow one timeline on one pageserver to run gc compaction at a time.
215 0 : Arc::new(Semaphore::new(1))
216 0 : });
217 :
218 : impl GcCompactionQueue {
219 0 : pub fn new() -> Self {
220 0 : GcCompactionQueue {
221 0 : inner: std::sync::Mutex::new(GcCompactionQueueInner {
222 0 : running: None,
223 0 : queued: VecDeque::new(),
224 0 : guards: HashMap::new(),
225 0 : last_id: GcCompactionJobId(0),
226 0 : meta_statistics: None,
227 0 : }),
228 0 : consumer_lock: tokio::sync::Mutex::new(()),
229 0 : }
230 0 : }
231 :
232 0 : pub fn cancel_scheduled(&self) {
233 0 : let mut guard = self.inner.lock().unwrap();
234 0 : guard.queued.clear();
235 : // TODO: if there is a running job, we should keep the gc guard. However, currently, the cancel
236 : // API is only used for testing purposes, so we can drop everything here.
237 0 : guard.guards.clear();
238 0 : }
239 :
240 : /// Schedule a manual compaction job.
241 0 : pub fn schedule_manual_compaction(
242 0 : &self,
243 0 : options: CompactOptions,
244 0 : notify: Option<tokio::sync::oneshot::Sender<()>>,
245 0 : ) -> GcCompactionJobId {
246 0 : let mut guard = self.inner.lock().unwrap();
247 0 : let id = guard.next_id();
248 0 : guard.queued.push_back((
249 0 : id,
250 0 : GcCompactionQueueItem::MetaJob {
251 0 : options,
252 0 : auto: false,
253 0 : },
254 0 : ));
255 0 : guard.guards.entry(id).or_default().notify = notify;
256 0 : info!("scheduled compaction job id={}", id);
257 0 : id
258 0 : }
259 :
260 : /// Schedule an auto compaction job.
261 0 : fn schedule_auto_compaction(
262 0 : &self,
263 0 : options: CompactOptions,
264 0 : permit: OwnedSemaphorePermit,
265 0 : ) -> GcCompactionJobId {
266 0 : let mut guard = self.inner.lock().unwrap();
267 0 : let id = guard.next_id();
268 0 : guard.queued.push_back((
269 0 : id,
270 0 : GcCompactionQueueItem::MetaJob {
271 0 : options,
272 0 : auto: true,
273 0 : },
274 0 : ));
275 0 : guard.guards.entry(id).or_default().permit = Some(permit);
276 0 : id
277 0 : }
278 :
279 : /// Trigger an auto compaction.
280 0 : pub async fn trigger_auto_compaction(
281 0 : &self,
282 0 : timeline: &Arc<Timeline>,
283 0 : ) -> Result<(), CompactionError> {
284 : let GcCompactionCombinedSettings {
285 0 : gc_compaction_enabled,
286 0 : gc_compaction_initial_threshold_kb,
287 0 : gc_compaction_ratio_percent,
288 : ..
289 0 : } = timeline.get_gc_compaction_settings();
290 0 : if !gc_compaction_enabled {
291 0 : return Ok(());
292 0 : }
293 0 : if self.remaining_jobs_num() > 0 {
294 : // Only schedule auto compaction when the queue is empty
295 0 : return Ok(());
296 0 : }
297 0 : if timeline.ancestor_timeline().is_some() {
298 : // Do not trigger auto compaction for child timelines. We haven't tested
299 : // it enough in staging yet.
300 0 : return Ok(());
301 0 : }
302 0 : if timeline.get_gc_compaction_watermark() == Lsn::INVALID {
303 : // If the gc watermark is not set, we don't need to trigger auto compaction.
304 : // This check is the same as in `gc_compaction_split_jobs` but we don't log
305 : // here and we can also skip the computation of the trigger condition earlier.
306 0 : return Ok(());
307 0 : }
308 :
309 0 : let Ok(permit) = CONCURRENT_GC_COMPACTION_TASKS.clone().try_acquire_owned() else {
310 : // Only allow one compaction run at a time. TODO: As we do `try_acquire_owned`, we cannot ensure
311 : // the fairness of the lock across timelines. We should listen for both `acquire` and `l0_compaction_trigger`
312 : // to ensure the fairness while avoid starving other tasks.
313 0 : return Ok(());
314 : };
315 :
316 0 : let gc_compaction_state = timeline.get_gc_compaction_state();
317 0 : let l2_lsn = gc_compaction_state
318 0 : .map(|x| x.last_completed_lsn)
319 0 : .unwrap_or(Lsn::INVALID);
320 :
321 0 : let layers = {
322 0 : let guard = timeline
323 0 : .layers
324 0 : .read(LayerManagerLockHolder::GetLayerMapInfo)
325 0 : .await;
326 0 : let layer_map = guard.layer_map()?;
327 0 : layer_map.iter_historic_layers().collect_vec()
328 : };
329 0 : let mut l2_size: u64 = 0;
330 0 : let mut l1_size = 0;
331 0 : let gc_cutoff = *timeline.get_applied_gc_cutoff_lsn();
332 0 : for layer in layers {
333 0 : if layer.lsn_range.start <= l2_lsn {
334 0 : l2_size += layer.file_size();
335 0 : } else if layer.lsn_range.start <= gc_cutoff {
336 0 : l1_size += layer.file_size();
337 0 : }
338 : }
339 :
340 0 : fn trigger_compaction(
341 0 : l1_size: u64,
342 0 : l2_size: u64,
343 0 : gc_compaction_initial_threshold_kb: u64,
344 0 : gc_compaction_ratio_percent: u64,
345 0 : ) -> bool {
346 : const AUTO_TRIGGER_LIMIT: u64 = 150 * 1024 * 1024 * 1024; // 150GB
347 0 : if l1_size + l2_size >= AUTO_TRIGGER_LIMIT {
348 : // Do not auto-trigger when physical size >= 150GB
349 0 : return false;
350 0 : }
351 : // initial trigger
352 0 : if l2_size == 0 && l1_size >= gc_compaction_initial_threshold_kb * 1024 {
353 0 : info!(
354 0 : "trigger auto-compaction because l1_size={} >= gc_compaction_initial_threshold_kb={}",
355 : l1_size, gc_compaction_initial_threshold_kb
356 : );
357 0 : return true;
358 0 : }
359 : // size ratio trigger
360 0 : if l2_size == 0 {
361 0 : return false;
362 0 : }
363 0 : if l1_size as f64 / l2_size as f64 >= (gc_compaction_ratio_percent as f64 / 100.0) {
364 0 : info!(
365 0 : "trigger auto-compaction because l1_size={} / l2_size={} > gc_compaction_ratio_percent={}",
366 : l1_size, l2_size, gc_compaction_ratio_percent
367 : );
368 0 : return true;
369 0 : }
370 0 : false
371 0 : }
372 :
373 0 : if trigger_compaction(
374 0 : l1_size,
375 0 : l2_size,
376 0 : gc_compaction_initial_threshold_kb,
377 0 : gc_compaction_ratio_percent,
378 : ) {
379 0 : self.schedule_auto_compaction(
380 : CompactOptions {
381 : flags: {
382 0 : let mut flags = EnumSet::new();
383 0 : flags |= CompactFlags::EnhancedGcBottomMostCompaction;
384 0 : if timeline.get_compaction_l0_first() {
385 0 : flags |= CompactFlags::YieldForL0;
386 0 : }
387 0 : flags
388 : },
389 : sub_compaction: true,
390 : // Only auto-trigger gc-compaction over the data keyspace due to concerns in
391 : // https://github.com/neondatabase/neon/issues/11318.
392 0 : compact_key_range: Some(CompactKeyRange {
393 0 : start: Key::MIN,
394 0 : end: Key::metadata_key_range().start,
395 0 : }),
396 0 : compact_lsn_range: None,
397 0 : sub_compaction_max_job_size_mb: None,
398 : },
399 0 : permit,
400 : );
401 0 : info!(
402 0 : "scheduled auto gc-compaction: l1_size={}, l2_size={}, l2_lsn={}, gc_cutoff={}",
403 : l1_size, l2_size, l2_lsn, gc_cutoff
404 : );
405 : } else {
406 0 : debug!(
407 0 : "did not trigger auto gc-compaction: l1_size={}, l2_size={}, l2_lsn={}, gc_cutoff={}",
408 : l1_size, l2_size, l2_lsn, gc_cutoff
409 : );
410 : }
411 0 : Ok(())
412 0 : }
413 :
414 0 : async fn collect_layer_below_lsn(
415 0 : &self,
416 0 : timeline: &Arc<Timeline>,
417 0 : lsn: Lsn,
418 0 : ) -> Result<u64, CompactionError> {
419 0 : let guard = timeline
420 0 : .layers
421 0 : .read(LayerManagerLockHolder::GetLayerMapInfo)
422 0 : .await;
423 0 : let layer_map = guard.layer_map()?;
424 0 : let layers = layer_map.iter_historic_layers().collect_vec();
425 0 : let mut size = 0;
426 0 : for layer in layers {
427 0 : if layer.lsn_range.start <= lsn {
428 0 : size += layer.file_size();
429 0 : }
430 : }
431 0 : Ok(size)
432 0 : }
433 :
434 : /// Notify the caller the job has finished and unblock GC.
435 0 : fn notify_and_unblock(&self, id: GcCompactionJobId) {
436 0 : info!("compaction job id={} finished", id);
437 0 : let mut guard = self.inner.lock().unwrap();
438 0 : if let Some(items) = guard.guards.remove(&id) {
439 0 : if let Some(tx) = items.notify {
440 0 : let _ = tx.send(());
441 0 : }
442 0 : }
443 0 : if let Some(ref meta_statistics) = guard.meta_statistics {
444 0 : if meta_statistics.meta_job_id == id {
445 0 : if let Ok(stats) = serde_json::to_string(&meta_statistics) {
446 0 : info!(
447 0 : "gc-compaction meta statistics for job id = {}: {}",
448 : id, stats
449 : );
450 0 : }
451 0 : }
452 0 : }
453 0 : }
454 :
455 0 : fn clear_running_job(&self) {
456 0 : let mut guard = self.inner.lock().unwrap();
457 0 : guard.running = None;
458 0 : }
459 :
460 0 : async fn handle_sub_compaction(
461 0 : &self,
462 0 : id: GcCompactionJobId,
463 0 : options: CompactOptions,
464 0 : timeline: &Arc<Timeline>,
465 0 : auto: bool,
466 0 : ) -> Result<(), CompactionError> {
467 0 : info!(
468 0 : "running scheduled enhanced gc bottom-most compaction with sub-compaction, splitting compaction jobs"
469 : );
470 0 : let res = timeline
471 0 : .gc_compaction_split_jobs(
472 0 : GcCompactJob::from_compact_options(options.clone()),
473 0 : options.sub_compaction_max_job_size_mb,
474 0 : )
475 0 : .await;
476 0 : let jobs = match res {
477 0 : Ok(jobs) => jobs,
478 0 : Err(err) => {
479 0 : warn!("cannot split gc-compaction jobs: {}, unblocked gc", err);
480 0 : self.notify_and_unblock(id);
481 0 : return Err(err);
482 : }
483 : };
484 0 : if jobs.is_empty() {
485 0 : info!("no jobs to run, skipping scheduled compaction task");
486 0 : self.notify_and_unblock(id);
487 : } else {
488 0 : let jobs_len = jobs.len();
489 0 : let mut pending_tasks = Vec::new();
490 : // gc-compaction might pick more layers or fewer layers to compact. The L2 LSN does not need to be accurate.
491 : // And therefore, we simply assume the maximum LSN of all jobs is the expected L2 LSN.
492 0 : let expected_l2_lsn = jobs
493 0 : .iter()
494 0 : .map(|job| job.compact_lsn_range.end)
495 0 : .max()
496 0 : .unwrap();
497 0 : for (i, job) in jobs.into_iter().enumerate() {
498 : // Unfortunately we need to convert the `GcCompactJob` back to `CompactionOptions`
499 : // until we do further refactors to allow directly call `compact_with_gc`.
500 0 : let mut flags: EnumSet<CompactFlags> = EnumSet::default();
501 0 : flags |= CompactFlags::EnhancedGcBottomMostCompaction;
502 0 : if job.dry_run {
503 0 : flags |= CompactFlags::DryRun;
504 0 : }
505 0 : if options.flags.contains(CompactFlags::YieldForL0) {
506 0 : flags |= CompactFlags::YieldForL0;
507 0 : }
508 0 : let options = CompactOptions {
509 0 : flags,
510 0 : sub_compaction: false,
511 0 : compact_key_range: Some(job.compact_key_range.into()),
512 0 : compact_lsn_range: Some(job.compact_lsn_range.into()),
513 0 : sub_compaction_max_job_size_mb: None,
514 0 : };
515 0 : pending_tasks.push(GcCompactionQueueItem::SubCompactionJob {
516 0 : options,
517 0 : i,
518 0 : total: jobs_len,
519 0 : });
520 : }
521 :
522 0 : if !auto {
523 0 : pending_tasks.push(GcCompactionQueueItem::Notify(id, None));
524 0 : } else {
525 0 : pending_tasks.push(GcCompactionQueueItem::Notify(id, Some(expected_l2_lsn)));
526 0 : }
527 :
528 0 : let layer_size = self
529 0 : .collect_layer_below_lsn(timeline, expected_l2_lsn)
530 0 : .await?;
531 :
532 : {
533 0 : let mut guard = self.inner.lock().unwrap();
534 0 : let mut tasks = Vec::new();
535 0 : for task in pending_tasks {
536 0 : let id = guard.next_id();
537 0 : tasks.push((id, task));
538 0 : }
539 0 : tasks.reverse();
540 0 : for item in tasks {
541 0 : guard.queued.push_front(item);
542 0 : }
543 0 : guard.meta_statistics = Some(GcCompactionMetaStatistics {
544 0 : meta_job_id: id,
545 0 : start_time: Some(chrono::Utc::now()),
546 0 : before_compaction_layer_size: layer_size,
547 0 : below_lsn: expected_l2_lsn,
548 0 : total_sub_compaction_jobs: jobs_len,
549 0 : ..Default::default()
550 0 : });
551 : }
552 :
553 0 : info!(
554 0 : "scheduled enhanced gc bottom-most compaction with sub-compaction, split into {} jobs",
555 : jobs_len
556 : );
557 : }
558 0 : Ok(())
559 0 : }
560 :
561 : /// Take a job from the queue and process it. Returns if there are still pending tasks.
562 0 : pub async fn iteration(
563 0 : &self,
564 0 : cancel: &CancellationToken,
565 0 : ctx: &RequestContext,
566 0 : gc_block: &GcBlock,
567 0 : timeline: &Arc<Timeline>,
568 0 : ) -> Result<CompactionOutcome, CompactionError> {
569 0 : let res = self.iteration_inner(cancel, ctx, gc_block, timeline).await;
570 0 : if let Err(err) = &res {
571 0 : log_compaction_error(err, None, cancel.is_cancelled(), true);
572 0 : }
573 0 : match res {
574 0 : Ok(res) => Ok(res),
575 0 : Err(CompactionError::ShuttingDown) => Err(CompactionError::ShuttingDown),
576 : Err(CompactionError::Other(_)) => {
577 : // There are some cases where traditional gc might collect some layer
578 : // files causing gc-compaction cannot read the full history of the key.
579 : // This needs to be resolved in the long-term by improving the compaction
580 : // process. For now, let's simply avoid such errors triggering the
581 : // circuit breaker.
582 0 : Ok(CompactionOutcome::Skipped)
583 : }
584 : }
585 0 : }
586 :
587 0 : async fn iteration_inner(
588 0 : &self,
589 0 : cancel: &CancellationToken,
590 0 : ctx: &RequestContext,
591 0 : gc_block: &GcBlock,
592 0 : timeline: &Arc<Timeline>,
593 0 : ) -> Result<CompactionOutcome, CompactionError> {
594 0 : let Ok(_one_op_at_a_time_guard) = self.consumer_lock.try_lock() else {
595 0 : return Err(CompactionError::Other(anyhow::anyhow!(
596 0 : "cannot run gc-compaction because another gc-compaction is running. This should not happen because we only call this function from the gc-compaction queue."
597 0 : )));
598 : };
599 : let has_pending_tasks;
600 0 : let mut yield_for_l0 = false;
601 0 : let Some((id, item)) = ({
602 0 : let mut guard = self.inner.lock().unwrap();
603 0 : if let Some((id, item)) = guard.queued.pop_front() {
604 0 : guard.running = Some((id, item.clone()));
605 0 : has_pending_tasks = !guard.queued.is_empty();
606 0 : Some((id, item))
607 : } else {
608 0 : has_pending_tasks = false;
609 0 : None
610 : }
611 : }) else {
612 0 : self.trigger_auto_compaction(timeline).await?;
613 : // Always yield after triggering auto-compaction. Gc-compaction is a low-priority task and we
614 : // have not implemented preemption mechanism yet. We always want to yield it to more important
615 : // tasks if there is one.
616 0 : return Ok(CompactionOutcome::Done);
617 : };
618 0 : match item {
619 0 : GcCompactionQueueItem::MetaJob { options, auto } => {
620 0 : if !options
621 0 : .flags
622 0 : .contains(CompactFlags::EnhancedGcBottomMostCompaction)
623 : {
624 0 : warn!(
625 0 : "ignoring scheduled compaction task: scheduled task must be gc compaction: {:?}",
626 : options
627 : );
628 0 : } else if options.sub_compaction {
629 0 : info!(
630 0 : "running scheduled enhanced gc bottom-most compaction with sub-compaction, splitting compaction jobs"
631 : );
632 0 : self.handle_sub_compaction(id, options, timeline, auto)
633 0 : .await?;
634 : } else {
635 : // Auto compaction always enables sub-compaction so we don't need to handle update_l2_lsn
636 : // in this branch.
637 0 : let _gc_guard = match gc_block.start().await {
638 0 : Ok(guard) => guard,
639 0 : Err(e) => {
640 0 : self.notify_and_unblock(id);
641 0 : self.clear_running_job();
642 0 : return Err(CompactionError::Other(anyhow!(
643 0 : "cannot run gc-compaction because gc is blocked: {}",
644 0 : e
645 0 : )));
646 : }
647 : };
648 0 : let res = timeline.compact_with_options(cancel, options, ctx).await;
649 0 : let compaction_result = match res {
650 0 : Ok(res) => res,
651 0 : Err(err) => {
652 0 : warn!(%err, "failed to run gc-compaction");
653 0 : self.notify_and_unblock(id);
654 0 : self.clear_running_job();
655 0 : return Err(err);
656 : }
657 : };
658 0 : if compaction_result == CompactionOutcome::YieldForL0 {
659 0 : yield_for_l0 = true;
660 0 : }
661 : }
662 : }
663 0 : GcCompactionQueueItem::SubCompactionJob { options, i, total } => {
664 : // TODO: error handling, clear the queue if any task fails?
665 0 : let _gc_guard = match gc_block.start().await {
666 0 : Ok(guard) => guard,
667 0 : Err(e) => {
668 0 : self.clear_running_job();
669 0 : return Err(CompactionError::Other(anyhow!(
670 0 : "cannot run gc-compaction because gc is blocked: {}",
671 0 : e
672 0 : )));
673 : }
674 : };
675 0 : info!("running gc-compaction subcompaction job {}/{}", i, total);
676 0 : let res = timeline.compact_with_options(cancel, options, ctx).await;
677 0 : let compaction_result = match res {
678 0 : Ok(res) => res,
679 0 : Err(err) => {
680 0 : warn!(%err, "failed to run gc-compaction subcompaction job");
681 0 : self.clear_running_job();
682 0 : let mut guard = self.inner.lock().unwrap();
683 0 : if let Some(ref mut meta_statistics) = guard.meta_statistics {
684 0 : meta_statistics.failed_sub_compaction_jobs += 1;
685 0 : }
686 0 : return Err(err);
687 : }
688 : };
689 0 : if compaction_result == CompactionOutcome::YieldForL0 {
690 0 : // We will permenantly give up a task if we yield for L0 compaction: the preempted subcompaction job won't be running
691 0 : // again. This ensures that we don't keep doing duplicated work within gc-compaction. Not directly returning here because
692 0 : // we need to clean things up before returning from the function.
693 0 : yield_for_l0 = true;
694 0 : }
695 : {
696 0 : let mut guard = self.inner.lock().unwrap();
697 0 : if let Some(ref mut meta_statistics) = guard.meta_statistics {
698 0 : meta_statistics.succeeded_sub_compaction_jobs += 1;
699 0 : }
700 : }
701 : }
702 0 : GcCompactionQueueItem::Notify(id, l2_lsn) => {
703 0 : let below_lsn = {
704 0 : let mut guard = self.inner.lock().unwrap();
705 0 : if let Some(ref mut meta_statistics) = guard.meta_statistics {
706 0 : meta_statistics.below_lsn
707 : } else {
708 0 : Lsn::INVALID
709 : }
710 : };
711 0 : let layer_size = if below_lsn != Lsn::INVALID {
712 0 : self.collect_layer_below_lsn(timeline, below_lsn).await?
713 : } else {
714 0 : 0
715 : };
716 : {
717 0 : let mut guard = self.inner.lock().unwrap();
718 0 : if let Some(ref mut meta_statistics) = guard.meta_statistics {
719 0 : meta_statistics.after_compaction_layer_size = layer_size;
720 0 : meta_statistics.finalize();
721 0 : }
722 : }
723 0 : self.notify_and_unblock(id);
724 0 : if let Some(l2_lsn) = l2_lsn {
725 0 : let current_l2_lsn = timeline
726 0 : .get_gc_compaction_state()
727 0 : .map(|x| x.last_completed_lsn)
728 0 : .unwrap_or(Lsn::INVALID);
729 0 : if l2_lsn >= current_l2_lsn {
730 0 : info!("l2_lsn updated to {}", l2_lsn);
731 0 : timeline
732 0 : .update_gc_compaction_state(GcCompactionState {
733 0 : last_completed_lsn: l2_lsn,
734 0 : })
735 0 : .map_err(CompactionError::Other)?;
736 : } else {
737 0 : warn!(
738 0 : "l2_lsn updated to {} but it is less than the current l2_lsn {}",
739 : l2_lsn, current_l2_lsn
740 : );
741 : }
742 0 : }
743 : }
744 : }
745 0 : self.clear_running_job();
746 0 : Ok(if yield_for_l0 {
747 0 : tracing::info!("give up gc-compaction: yield for L0 compaction");
748 0 : CompactionOutcome::YieldForL0
749 0 : } else if has_pending_tasks {
750 0 : CompactionOutcome::Pending
751 : } else {
752 0 : CompactionOutcome::Done
753 : })
754 0 : }
755 :
756 : #[allow(clippy::type_complexity)]
757 0 : pub fn remaining_jobs(
758 0 : &self,
759 0 : ) -> (
760 0 : Option<(GcCompactionJobId, GcCompactionQueueItem)>,
761 0 : VecDeque<(GcCompactionJobId, GcCompactionQueueItem)>,
762 0 : ) {
763 0 : let guard = self.inner.lock().unwrap();
764 0 : (guard.running.clone(), guard.queued.clone())
765 0 : }
766 :
767 0 : pub fn remaining_jobs_num(&self) -> usize {
768 0 : let guard = self.inner.lock().unwrap();
769 0 : guard.queued.len() + if guard.running.is_some() { 1 } else { 0 }
770 0 : }
771 : }
772 :
773 : /// A job description for the gc-compaction job. This structure describes the rectangle range that the job will
774 : /// process. The exact layers that need to be compacted/rewritten will be generated when `compact_with_gc` gets
775 : /// called.
776 : #[derive(Debug, Clone)]
777 : pub(crate) struct GcCompactJob {
778 : pub dry_run: bool,
779 : /// The key range to be compacted. The compaction algorithm will only regenerate key-value pairs within this range
780 : /// [left inclusive, right exclusive), and other pairs will be rewritten into new files if necessary.
781 : pub compact_key_range: Range<Key>,
782 : /// The LSN range to be compacted. The compaction algorithm will use this range to determine the layers to be
783 : /// selected for the compaction, and it does not guarantee the generated layers will have exactly the same LSN range
784 : /// as specified here. The true range being compacted is `min_lsn/max_lsn` in [`GcCompactionJobDescription`].
785 : /// min_lsn will always <= the lower bound specified here, and max_lsn will always >= the upper bound specified here.
786 : pub compact_lsn_range: Range<Lsn>,
787 : }
788 :
789 : impl GcCompactJob {
790 28 : pub fn from_compact_options(options: CompactOptions) -> Self {
791 : GcCompactJob {
792 28 : dry_run: options.flags.contains(CompactFlags::DryRun),
793 28 : compact_key_range: options
794 28 : .compact_key_range
795 28 : .map(|x| x.into())
796 28 : .unwrap_or(Key::MIN..Key::MAX),
797 28 : compact_lsn_range: options
798 28 : .compact_lsn_range
799 28 : .map(|x| x.into())
800 28 : .unwrap_or(Lsn::INVALID..Lsn::MAX),
801 : }
802 28 : }
803 : }
804 :
805 : /// A job description for the gc-compaction job. This structure is generated when `compact_with_gc` is called
806 : /// and contains the exact layers we want to compact.
807 : pub struct GcCompactionJobDescription {
808 : /// All layers to read in the compaction job
809 : selected_layers: Vec<Layer>,
810 : /// GC cutoff of the job. This is the lowest LSN that will be accessed by the read/GC path and we need to
811 : /// keep all deltas <= this LSN or generate an image == this LSN.
812 : gc_cutoff: Lsn,
813 : /// LSNs to retain for the job. Read path will use this LSN so we need to keep deltas <= this LSN or
814 : /// generate an image == this LSN.
815 : retain_lsns_below_horizon: Vec<Lsn>,
816 : /// Maximum layer LSN processed in this compaction, that is max(end_lsn of layers). Exclusive. All data
817 : /// \>= this LSN will be kept and will not be rewritten.
818 : max_layer_lsn: Lsn,
819 : /// Minimum layer LSN processed in this compaction, that is min(start_lsn of layers). Inclusive.
820 : /// All access below (strict lower than `<`) this LSN will be routed through the normal read path instead of
821 : /// k-merge within gc-compaction.
822 : min_layer_lsn: Lsn,
823 : /// Only compact layers overlapping with this range.
824 : compaction_key_range: Range<Key>,
825 : /// When partial compaction is enabled, these layers need to be rewritten to ensure no overlap.
826 : /// This field is here solely for debugging. The field will not be read once the compaction
827 : /// description is generated.
828 : rewrite_layers: Vec<Arc<PersistentLayerDesc>>,
829 : }
830 :
831 : /// The result of bottom-most compaction for a single key at each LSN.
832 : #[derive(Debug)]
833 : #[cfg_attr(test, derive(PartialEq))]
834 : pub struct KeyLogAtLsn(pub Vec<(Lsn, Value)>);
835 :
836 : /// The result of bottom-most compaction.
837 : #[derive(Debug)]
838 : #[cfg_attr(test, derive(PartialEq))]
839 : pub(crate) struct KeyHistoryRetention {
840 : /// Stores logs to reconstruct the value at the given LSN, that is to say, logs <= LSN or image == LSN.
841 : pub(crate) below_horizon: Vec<(Lsn, KeyLogAtLsn)>,
842 : /// Stores logs to reconstruct the value at any LSN above the horizon, that is to say, log > LSN.
843 : pub(crate) above_horizon: KeyLogAtLsn,
844 : }
845 :
846 : impl KeyHistoryRetention {
847 : /// Hack: skip delta layer if we need to produce a layer of a same key-lsn.
848 : ///
849 : /// This can happen if we have removed some deltas in "the middle" of some existing layer's key-lsn-range.
850 : /// For example, consider the case where a single delta with range [0x10,0x50) exists.
851 : /// And we have branches at LSN 0x10, 0x20, 0x30.
852 : /// Then we delete branch @ 0x20.
853 : /// Bottom-most compaction may now delete the delta [0x20,0x30).
854 : /// And that wouldnt' change the shape of the layer.
855 : ///
856 : /// Note that bottom-most-gc-compaction never _adds_ new data in that case, only removes.
857 : ///
858 : /// `discard_key` will only be called when the writer reaches its target (instead of for every key), so it's fine to grab a lock inside.
859 37 : async fn discard_key(key: &PersistentLayerKey, tline: &Arc<Timeline>, dry_run: bool) -> bool {
860 37 : if dry_run {
861 0 : return true;
862 37 : }
863 37 : if LayerMap::is_l0(&key.key_range, key.is_delta) {
864 : // gc-compaction should not produce L0 deltas, otherwise it will break the layer order.
865 : // We should ignore such layers.
866 0 : return true;
867 37 : }
868 : let layer_generation;
869 : {
870 37 : let guard = tline.layers.read(LayerManagerLockHolder::Compaction).await;
871 37 : if !guard.contains_key(key) {
872 26 : return false;
873 11 : }
874 11 : layer_generation = guard.get_from_key(key).metadata().generation;
875 : }
876 11 : if layer_generation == tline.generation {
877 11 : info!(
878 : key=%key,
879 : ?layer_generation,
880 0 : "discard layer due to duplicated layer key in the same generation",
881 : );
882 11 : true
883 : } else {
884 0 : false
885 : }
886 37 : }
887 :
888 : /// Pipe a history of a single key to the writers.
889 : ///
890 : /// If `image_writer` is none, the images will be placed into the delta layers.
891 : /// The delta writer will contain all images and deltas (below and above the horizon) except the bottom-most images.
892 : #[allow(clippy::too_many_arguments)]
893 319 : async fn pipe_to(
894 319 : self,
895 319 : key: Key,
896 319 : delta_writer: &mut SplitDeltaLayerWriter<'_>,
897 319 : mut image_writer: Option<&mut SplitImageLayerWriter<'_>>,
898 319 : stat: &mut CompactionStatistics,
899 319 : ctx: &RequestContext,
900 319 : ) -> anyhow::Result<()> {
901 319 : let mut first_batch = true;
902 1022 : for (cutoff_lsn, KeyLogAtLsn(logs)) in self.below_horizon {
903 703 : if first_batch {
904 319 : if logs.len() == 1 && logs[0].1.is_image() {
905 300 : let Value::Image(img) = &logs[0].1 else {
906 0 : unreachable!()
907 : };
908 300 : stat.produce_image_key(img);
909 300 : if let Some(image_writer) = image_writer.as_mut() {
910 300 : image_writer.put_image(key, img.clone(), ctx).await?;
911 : } else {
912 0 : delta_writer
913 0 : .put_value(key, cutoff_lsn, Value::Image(img.clone()), ctx)
914 0 : .await?;
915 : }
916 : } else {
917 33 : for (lsn, val) in logs {
918 14 : stat.produce_key(&val);
919 14 : delta_writer.put_value(key, lsn, val, ctx).await?;
920 : }
921 : }
922 319 : first_batch = false;
923 : } else {
924 442 : for (lsn, val) in logs {
925 58 : stat.produce_key(&val);
926 58 : delta_writer.put_value(key, lsn, val, ctx).await?;
927 : }
928 : }
929 : }
930 319 : let KeyLogAtLsn(above_horizon_logs) = self.above_horizon;
931 348 : for (lsn, val) in above_horizon_logs {
932 29 : stat.produce_key(&val);
933 29 : delta_writer.put_value(key, lsn, val, ctx).await?;
934 : }
935 319 : Ok(())
936 319 : }
937 :
938 : /// Verify if every key in the retention is readable by replaying the logs.
939 323 : async fn verify(
940 323 : &self,
941 323 : key: Key,
942 323 : base_img_from_ancestor: &Option<(Key, Lsn, Bytes)>,
943 323 : full_history: &[(Key, Lsn, Value)],
944 323 : tline: &Arc<Timeline>,
945 323 : ) -> anyhow::Result<()> {
946 : // Usually the min_lsn should be the first record but we do a full iteration to be safe.
947 323 : let Some(min_lsn) = full_history.iter().map(|(_, lsn, _)| *lsn).min() else {
948 : // This should never happen b/c if we don't have any history of a key, we won't even do `generate_key_retention`.
949 0 : return Ok(());
950 : };
951 323 : let Some(max_lsn) = full_history.iter().map(|(_, lsn, _)| *lsn).max() else {
952 : // This should never happen b/c if we don't have any history of a key, we won't even do `generate_key_retention`.
953 0 : return Ok(());
954 : };
955 323 : let mut base_img = base_img_from_ancestor
956 323 : .as_ref()
957 323 : .map(|(_, lsn, img)| (*lsn, img));
958 323 : let mut history = Vec::new();
959 :
960 1027 : async fn collect_and_verify(
961 1027 : key: Key,
962 1027 : lsn: Lsn,
963 1027 : base_img: &Option<(Lsn, &Bytes)>,
964 1027 : history: &[(Lsn, &NeonWalRecord)],
965 1027 : tline: &Arc<Timeline>,
966 1027 : skip_empty: bool,
967 1027 : ) -> anyhow::Result<()> {
968 1027 : if base_img.is_none() && history.is_empty() {
969 0 : if skip_empty {
970 0 : return Ok(());
971 0 : }
972 0 : anyhow::bail!("verification failed: key {} has no history at {}", key, lsn);
973 1027 : };
974 :
975 1027 : let mut records = history
976 1027 : .iter()
977 1027 : .map(|(lsn, val)| (*lsn, (*val).clone()))
978 1027 : .collect::<Vec<_>>();
979 :
980 : // WAL redo requires records in the reverse LSN order
981 1027 : records.reverse();
982 1027 : let data = ValueReconstructState {
983 1027 : img: base_img.as_ref().map(|(lsn, img)| (*lsn, (*img).clone())),
984 1027 : records,
985 : };
986 :
987 1027 : tline
988 1027 : .reconstruct_value(key, lsn, data, RedoAttemptType::GcCompaction)
989 1027 : .await
990 1027 : .with_context(|| format!("verification failed for key {key} at lsn {lsn}"))?;
991 :
992 1027 : Ok(())
993 1027 : }
994 :
995 1036 : for (retain_lsn, KeyLogAtLsn(logs)) in &self.below_horizon {
996 1096 : for (lsn, val) in logs {
997 76 : match val {
998 307 : Value::Image(img) => {
999 307 : base_img = Some((*lsn, img));
1000 307 : history.clear();
1001 307 : }
1002 76 : Value::WalRecord(rec) if val.will_init() => {
1003 0 : base_img = None;
1004 0 : history.clear();
1005 0 : history.push((*lsn, rec));
1006 0 : }
1007 76 : Value::WalRecord(rec) => {
1008 76 : history.push((*lsn, rec));
1009 76 : }
1010 : }
1011 : }
1012 713 : if *retain_lsn >= min_lsn {
1013 : // Only verify after the key appears in the full history for the first time.
1014 :
1015 : // We don't modify history: in theory, we could replace the history with a single
1016 : // image as in `generate_key_retention` to make redos at later LSNs faster. But we
1017 : // want to verify everything as if they are read from the real layer map.
1018 699 : collect_and_verify(key, *retain_lsn, &base_img, &history, tline, false)
1019 699 : .await
1020 699 : .context("below horizon retain_lsn")?;
1021 14 : }
1022 : }
1023 :
1024 360 : for (lsn, val) in &self.above_horizon.0 {
1025 32 : match val {
1026 5 : Value::Image(img) => {
1027 : // Above the GC horizon, we verify every time we see an image.
1028 5 : collect_and_verify(key, *lsn, &base_img, &history, tline, true)
1029 5 : .await
1030 5 : .context("above horizon full image")?;
1031 5 : base_img = Some((*lsn, img));
1032 5 : history.clear();
1033 : }
1034 32 : Value::WalRecord(rec) if val.will_init() => {
1035 : // Above the GC horizon, we verify every time we see an init record.
1036 0 : collect_and_verify(key, *lsn, &base_img, &history, tline, true)
1037 0 : .await
1038 0 : .context("above horizon init record")?;
1039 0 : base_img = None;
1040 0 : history.clear();
1041 0 : history.push((*lsn, rec));
1042 : }
1043 32 : Value::WalRecord(rec) => {
1044 32 : history.push((*lsn, rec));
1045 32 : }
1046 : }
1047 : }
1048 : // Ensure the latest record is readable.
1049 323 : collect_and_verify(key, max_lsn, &base_img, &history, tline, false)
1050 323 : .await
1051 323 : .context("latest record")?;
1052 323 : Ok(())
1053 323 : }
1054 : }
1055 :
1056 : #[derive(Debug, Serialize, Default)]
1057 : struct CompactionStatisticsNumSize {
1058 : num: u64,
1059 : size: u64,
1060 : }
1061 :
1062 : #[derive(Debug, Serialize, Default)]
1063 : pub struct CompactionStatistics {
1064 : /// Delta layer visited (maybe compressed, physical size)
1065 : delta_layer_visited: CompactionStatisticsNumSize,
1066 : /// Image layer visited (maybe compressed, physical size)
1067 : image_layer_visited: CompactionStatisticsNumSize,
1068 : /// Delta layer produced (maybe compressed, physical size)
1069 : delta_layer_produced: CompactionStatisticsNumSize,
1070 : /// Image layer produced (maybe compressed, physical size)
1071 : image_layer_produced: CompactionStatisticsNumSize,
1072 : /// Delta layer discarded (maybe compressed, physical size of the layer being discarded instead of the original layer)
1073 : delta_layer_discarded: CompactionStatisticsNumSize,
1074 : /// Image layer discarded (maybe compressed, physical size of the layer being discarded instead of the original layer)
1075 : image_layer_discarded: CompactionStatisticsNumSize,
1076 : num_unique_keys_visited: usize,
1077 : /// Delta visited (uncompressed, original size)
1078 : wal_keys_visited: CompactionStatisticsNumSize,
1079 : /// Image visited (uncompressed, original size)
1080 : image_keys_visited: CompactionStatisticsNumSize,
1081 : /// Delta produced (uncompressed, original size)
1082 : wal_produced: CompactionStatisticsNumSize,
1083 : /// Image produced (uncompressed, original size)
1084 : image_produced: CompactionStatisticsNumSize,
1085 :
1086 : // Time spent in each phase
1087 : time_acquire_lock_secs: f64,
1088 : time_analyze_secs: f64,
1089 : time_download_layer_secs: f64,
1090 : time_to_first_kv_pair_secs: f64,
1091 : time_main_loop_secs: f64,
1092 : time_final_phase_secs: f64,
1093 : time_total_secs: f64,
1094 :
1095 : // Summary
1096 : /// Ratio of the key-value size after/before gc-compaction.
1097 : uncompressed_retention_ratio: f64,
1098 : /// Ratio of the physical size after/before gc-compaction.
1099 : compressed_retention_ratio: f64,
1100 : }
1101 :
1102 : impl CompactionStatistics {
1103 534 : fn estimated_size_of_value(val: &Value) -> usize {
1104 219 : match val {
1105 315 : Value::Image(img) => img.len(),
1106 0 : Value::WalRecord(NeonWalRecord::Postgres { rec, .. }) => rec.len(),
1107 219 : _ => std::mem::size_of::<NeonWalRecord>(),
1108 : }
1109 534 : }
1110 839 : fn estimated_size_of_key() -> usize {
1111 839 : KEY_SIZE // TODO: distinguish image layer and delta layer (count LSN in delta layer)
1112 839 : }
1113 44 : fn visit_delta_layer(&mut self, size: u64) {
1114 44 : self.delta_layer_visited.num += 1;
1115 44 : self.delta_layer_visited.size += size;
1116 44 : }
1117 35 : fn visit_image_layer(&mut self, size: u64) {
1118 35 : self.image_layer_visited.num += 1;
1119 35 : self.image_layer_visited.size += size;
1120 35 : }
1121 320 : fn on_unique_key_visited(&mut self) {
1122 320 : self.num_unique_keys_visited += 1;
1123 320 : }
1124 123 : fn visit_wal_key(&mut self, val: &Value) {
1125 123 : self.wal_keys_visited.num += 1;
1126 123 : self.wal_keys_visited.size +=
1127 123 : Self::estimated_size_of_value(val) as u64 + Self::estimated_size_of_key() as u64;
1128 123 : }
1129 315 : fn visit_image_key(&mut self, val: &Value) {
1130 315 : self.image_keys_visited.num += 1;
1131 315 : self.image_keys_visited.size +=
1132 315 : Self::estimated_size_of_value(val) as u64 + Self::estimated_size_of_key() as u64;
1133 315 : }
1134 101 : fn produce_key(&mut self, val: &Value) {
1135 101 : match val {
1136 5 : Value::Image(img) => self.produce_image_key(img),
1137 96 : Value::WalRecord(_) => self.produce_wal_key(val),
1138 : }
1139 101 : }
1140 96 : fn produce_wal_key(&mut self, val: &Value) {
1141 96 : self.wal_produced.num += 1;
1142 96 : self.wal_produced.size +=
1143 96 : Self::estimated_size_of_value(val) as u64 + Self::estimated_size_of_key() as u64;
1144 96 : }
1145 305 : fn produce_image_key(&mut self, val: &Bytes) {
1146 305 : self.image_produced.num += 1;
1147 305 : self.image_produced.size += val.len() as u64 + Self::estimated_size_of_key() as u64;
1148 305 : }
1149 7 : fn discard_delta_layer(&mut self, original_size: u64) {
1150 7 : self.delta_layer_discarded.num += 1;
1151 7 : self.delta_layer_discarded.size += original_size;
1152 7 : }
1153 4 : fn discard_image_layer(&mut self, original_size: u64) {
1154 4 : self.image_layer_discarded.num += 1;
1155 4 : self.image_layer_discarded.size += original_size;
1156 4 : }
1157 12 : fn produce_delta_layer(&mut self, size: u64) {
1158 12 : self.delta_layer_produced.num += 1;
1159 12 : self.delta_layer_produced.size += size;
1160 12 : }
1161 15 : fn produce_image_layer(&mut self, size: u64) {
1162 15 : self.image_layer_produced.num += 1;
1163 15 : self.image_layer_produced.size += size;
1164 15 : }
1165 26 : fn finalize(&mut self) {
1166 26 : let original_key_value_size = self.image_keys_visited.size + self.wal_keys_visited.size;
1167 26 : let produced_key_value_size = self.image_produced.size + self.wal_produced.size;
1168 26 : self.uncompressed_retention_ratio =
1169 26 : produced_key_value_size as f64 / (original_key_value_size as f64 + 1.0); // avoid div by 0
1170 26 : let original_physical_size = self.image_layer_visited.size + self.delta_layer_visited.size;
1171 26 : let produced_physical_size = self.image_layer_produced.size
1172 26 : + self.delta_layer_produced.size
1173 26 : + self.image_layer_discarded.size
1174 26 : + self.delta_layer_discarded.size; // Also include the discarded layers to make the ratio accurate
1175 26 : self.compressed_retention_ratio =
1176 26 : produced_physical_size as f64 / (original_physical_size as f64 + 1.0); // avoid div by 0
1177 26 : }
1178 : }
1179 :
1180 : #[derive(Default, Debug, Clone, Copy, PartialEq, Eq)]
1181 : pub enum CompactionOutcome {
1182 : #[default]
1183 : /// No layers need to be compacted after this round. Compaction doesn't need
1184 : /// to be immediately scheduled.
1185 : Done,
1186 : /// Still has pending layers to be compacted after this round. Ideally, the scheduler
1187 : /// should immediately schedule another compaction.
1188 : Pending,
1189 : /// A timeline needs L0 compaction. Yield and schedule an immediate L0 compaction pass (only
1190 : /// guaranteed when `compaction_l0_first` is enabled).
1191 : YieldForL0,
1192 : /// Compaction was skipped, because the timeline is ineligible for compaction.
1193 : Skipped,
1194 : }
1195 :
1196 : impl Timeline {
1197 : /// TODO: cancellation
1198 : ///
1199 : /// Returns whether the compaction has pending tasks.
1200 192 : pub(crate) async fn compact_legacy(
1201 192 : self: &Arc<Self>,
1202 192 : cancel: &CancellationToken,
1203 192 : options: CompactOptions,
1204 192 : ctx: &RequestContext,
1205 192 : ) -> Result<CompactionOutcome, CompactionError> {
1206 192 : if options
1207 192 : .flags
1208 192 : .contains(CompactFlags::EnhancedGcBottomMostCompaction)
1209 : {
1210 0 : self.compact_with_gc(cancel, options, ctx).await?;
1211 0 : return Ok(CompactionOutcome::Done);
1212 192 : }
1213 :
1214 192 : if options.flags.contains(CompactFlags::DryRun) {
1215 0 : return Err(CompactionError::Other(anyhow!(
1216 0 : "dry-run mode is not supported for legacy compaction for now"
1217 0 : )));
1218 192 : }
1219 :
1220 192 : if options.compact_key_range.is_some() || options.compact_lsn_range.is_some() {
1221 : // maybe useful in the future? could implement this at some point
1222 0 : return Err(CompactionError::Other(anyhow!(
1223 0 : "compaction range is not supported for legacy compaction for now"
1224 0 : )));
1225 192 : }
1226 :
1227 : // High level strategy for compaction / image creation:
1228 : //
1229 : // 1. First, do a L0 compaction to ensure we move the L0
1230 : // layers into the historic layer map get flat levels of
1231 : // layers. If we did not compact all L0 layers, we will
1232 : // prioritize compacting the timeline again and not do
1233 : // any of the compactions below.
1234 : //
1235 : // 2. Then, calculate the desired "partitioning" of the
1236 : // currently in-use key space. The goal is to partition the
1237 : // key space into roughly fixed-size chunks, but also take into
1238 : // account any existing image layers, and try to align the
1239 : // chunk boundaries with the existing image layers to avoid
1240 : // too much churn. Also try to align chunk boundaries with
1241 : // relation boundaries. In principle, we don't know about
1242 : // relation boundaries here, we just deal with key-value
1243 : // pairs, and the code in pgdatadir_mapping.rs knows how to
1244 : // map relations into key-value pairs. But in practice we know
1245 : // that 'field6' is the block number, and the fields 1-5
1246 : // identify a relation. This is just an optimization,
1247 : // though.
1248 : //
1249 : // 3. Once we know the partitioning, for each partition,
1250 : // decide if it's time to create a new image layer. The
1251 : // criteria is: there has been too much "churn" since the last
1252 : // image layer? The "churn" is fuzzy concept, it's a
1253 : // combination of too many delta files, or too much WAL in
1254 : // total in the delta file. Or perhaps: if creating an image
1255 : // file would allow to delete some older files.
1256 : //
1257 : // 4. In the end, if the tenant gets auto-sharded, we will run
1258 : // a shard-ancestor compaction.
1259 :
1260 : // Is the timeline being deleted?
1261 192 : if self.is_stopping() {
1262 0 : trace!("Dropping out of compaction on timeline shutdown");
1263 0 : return Err(CompactionError::ShuttingDown);
1264 192 : }
1265 :
1266 192 : let target_file_size = self.get_checkpoint_distance();
1267 :
1268 : // Define partitioning schema if needed
1269 :
1270 : // 1. L0 Compact
1271 192 : let l0_outcome = {
1272 192 : let timer = self.metrics.compact_time_histo.start_timer();
1273 192 : let l0_outcome = self
1274 192 : .compact_level0(
1275 192 : target_file_size,
1276 192 : options.flags.contains(CompactFlags::ForceL0Compaction),
1277 192 : ctx,
1278 192 : )
1279 192 : .await?;
1280 192 : timer.stop_and_record();
1281 192 : l0_outcome
1282 : };
1283 :
1284 192 : if options.flags.contains(CompactFlags::OnlyL0Compaction) {
1285 0 : return Ok(l0_outcome);
1286 192 : }
1287 :
1288 : // Yield if we have pending L0 compaction. The scheduler will do another pass.
1289 192 : if (l0_outcome == CompactionOutcome::Pending || l0_outcome == CompactionOutcome::YieldForL0)
1290 0 : && options.flags.contains(CompactFlags::YieldForL0)
1291 : {
1292 0 : info!("image/ancestor compaction yielding for L0 compaction");
1293 0 : return Ok(CompactionOutcome::YieldForL0);
1294 192 : }
1295 :
1296 192 : let gc_cutoff = *self.applied_gc_cutoff_lsn.read();
1297 192 : let l0_l1_boundary_lsn = {
1298 : // We do the repartition on the L0-L1 boundary. All data below the boundary
1299 : // are compacted by L0 with low read amplification, thus making the `repartition`
1300 : // function run fast.
1301 192 : let guard = self
1302 192 : .layers
1303 192 : .read(LayerManagerLockHolder::GetLayerMapInfo)
1304 192 : .await;
1305 192 : guard
1306 192 : .all_persistent_layers()
1307 192 : .iter()
1308 1219 : .map(|x| {
1309 : // Use the end LSN of delta layers OR the start LSN of image layers.
1310 1219 : if x.is_delta {
1311 1035 : x.lsn_range.end
1312 : } else {
1313 184 : x.lsn_range.start
1314 : }
1315 1219 : })
1316 192 : .max()
1317 : };
1318 :
1319 192 : let (partition_mode, partition_lsn) = if cfg!(test)
1320 0 : || cfg!(feature = "testing")
1321 0 : || self
1322 0 : .feature_resolver
1323 0 : .evaluate_boolean("image-compaction-boundary")
1324 0 : .is_ok()
1325 : {
1326 192 : let last_repartition_lsn = self.partitioning.read().1;
1327 192 : let lsn = match l0_l1_boundary_lsn {
1328 192 : Some(boundary) => gc_cutoff
1329 192 : .max(boundary)
1330 192 : .max(last_repartition_lsn)
1331 192 : .max(self.initdb_lsn)
1332 192 : .max(self.ancestor_lsn),
1333 0 : None => self.get_last_record_lsn(),
1334 : };
1335 192 : if lsn <= self.initdb_lsn || lsn <= self.ancestor_lsn {
1336 : // Do not attempt to create image layers below the initdb or ancestor LSN -- no data below it
1337 0 : ("l0_l1_boundary", self.get_last_record_lsn())
1338 : } else {
1339 192 : ("l0_l1_boundary", lsn)
1340 : }
1341 : } else {
1342 0 : ("latest_record", self.get_last_record_lsn())
1343 : };
1344 :
1345 : // 2. Repartition and create image layers if necessary
1346 192 : match self
1347 192 : .repartition(
1348 192 : partition_lsn,
1349 192 : self.get_compaction_target_size(),
1350 192 : options.flags,
1351 192 : ctx,
1352 192 : )
1353 192 : .await
1354 : {
1355 192 : Ok(((dense_partitioning, sparse_partitioning), lsn)) if lsn >= gc_cutoff => {
1356 : // Disables access_stats updates, so that the files we read remain candidates for eviction after we're done with them
1357 80 : let image_ctx = RequestContextBuilder::from(ctx)
1358 80 : .access_stats_behavior(AccessStatsBehavior::Skip)
1359 80 : .attached_child();
1360 :
1361 80 : let mut partitioning = dense_partitioning;
1362 80 : partitioning
1363 80 : .parts
1364 80 : .extend(sparse_partitioning.into_dense().parts);
1365 :
1366 : // 3. Create new image layers for partitions that have been modified "enough".
1367 80 : let mode = if options
1368 80 : .flags
1369 80 : .contains(CompactFlags::ForceImageLayerCreation)
1370 : {
1371 7 : ImageLayerCreationMode::Force
1372 : } else {
1373 73 : ImageLayerCreationMode::Try
1374 : };
1375 80 : let (image_layers, outcome) = self
1376 80 : .create_image_layers(
1377 80 : &partitioning,
1378 80 : lsn,
1379 80 : mode,
1380 80 : &image_ctx,
1381 80 : self.last_image_layer_creation_status
1382 80 : .load()
1383 80 : .as_ref()
1384 80 : .clone(),
1385 80 : options.flags.contains(CompactFlags::YieldForL0),
1386 : )
1387 80 : .instrument(info_span!("create_image_layers", mode = %mode, partition_mode = %partition_mode, lsn = %lsn))
1388 80 : .await
1389 80 : .inspect_err(|err| {
1390 : if let CreateImageLayersError::GetVectoredError(
1391 : GetVectoredError::MissingKey(_),
1392 0 : ) = err
1393 : {
1394 0 : critical_timeline!(
1395 0 : self.tenant_shard_id,
1396 0 : self.timeline_id,
1397 0 : "missing key during compaction: {err:?}"
1398 : );
1399 0 : }
1400 0 : })?;
1401 :
1402 80 : self.last_image_layer_creation_status
1403 80 : .store(Arc::new(outcome.clone()));
1404 :
1405 80 : self.upload_new_image_layers(image_layers)?;
1406 80 : if let LastImageLayerCreationStatus::Incomplete { .. } = outcome {
1407 : // Yield and do not do any other kind of compaction.
1408 0 : info!(
1409 0 : "skipping shard ancestor compaction due to pending image layer generation tasks (preempted by L0 compaction)."
1410 : );
1411 0 : return Ok(CompactionOutcome::YieldForL0);
1412 80 : }
1413 : }
1414 :
1415 : Ok(_) => {
1416 : // This happens very frequently so we don't want to log it.
1417 112 : debug!("skipping repartitioning due to image compaction LSN being below GC cutoff");
1418 : }
1419 :
1420 : // Suppress errors when cancelled.
1421 : //
1422 : // Log other errors but continue. Failure to repartition is normal, if the timeline was just created
1423 : // as an empty timeline. Also in unit tests, when we use the timeline as a simple
1424 : // key-value store, ignoring the datadir layout. Log the error but continue.
1425 : //
1426 : // TODO:
1427 : // 1. shouldn't we return early here if we observe cancellation
1428 : // 2. Experiment: can we stop checking self.cancel here?
1429 0 : Err(_) if self.cancel.is_cancelled() => {} // TODO: try how we fare removing this branch
1430 0 : Err(err) if err.is_cancel() => {}
1431 : Err(RepartitionError::CollectKeyspace(
1432 0 : e @ CollectKeySpaceError::Decode(_)
1433 0 : | e @ CollectKeySpaceError::PageRead(
1434 : PageReconstructError::MissingKey(_) | PageReconstructError::WalRedo(_),
1435 : ),
1436 : )) => {
1437 : // Alert on critical errors that indicate data corruption.
1438 0 : critical_timeline!(
1439 0 : self.tenant_shard_id,
1440 0 : self.timeline_id,
1441 0 : "could not compact, repartitioning keyspace failed: {e:?}"
1442 : );
1443 : }
1444 0 : Err(e) => error!(
1445 0 : "could not compact, repartitioning keyspace failed: {:?}",
1446 0 : e.into_anyhow()
1447 : ),
1448 : };
1449 :
1450 192 : let partition_count = self.partitioning.read().0.0.parts.len();
1451 :
1452 : // 4. Shard ancestor compaction
1453 192 : if self.get_compaction_shard_ancestor() && self.shard_identity.count >= ShardCount::new(2) {
1454 : // Limit the number of layer rewrites to the number of partitions: this means its
1455 : // runtime should be comparable to a full round of image layer creations, rather than
1456 : // being potentially much longer.
1457 0 : let rewrite_max = partition_count;
1458 :
1459 0 : let outcome = self
1460 0 : .compact_shard_ancestors(
1461 0 : rewrite_max,
1462 0 : options.flags.contains(CompactFlags::YieldForL0),
1463 0 : ctx,
1464 0 : )
1465 0 : .await?;
1466 0 : match outcome {
1467 0 : CompactionOutcome::Pending | CompactionOutcome::YieldForL0 => return Ok(outcome),
1468 0 : CompactionOutcome::Done | CompactionOutcome::Skipped => {}
1469 : }
1470 192 : }
1471 :
1472 192 : Ok(CompactionOutcome::Done)
1473 192 : }
1474 :
1475 : /// Check for layers that are elegible to be rewritten:
1476 : /// - Shard splitting: After a shard split, ancestor layers beyond pitr_interval, so that
1477 : /// we don't indefinitely retain keys in this shard that aren't needed.
1478 : /// - For future use: layers beyond pitr_interval that are in formats we would
1479 : /// rather not maintain compatibility with indefinitely.
1480 : ///
1481 : /// Note: this phase may read and write many gigabytes of data: use rewrite_max to bound
1482 : /// how much work it will try to do in each compaction pass.
1483 0 : async fn compact_shard_ancestors(
1484 0 : self: &Arc<Self>,
1485 0 : rewrite_max: usize,
1486 0 : yield_for_l0: bool,
1487 0 : ctx: &RequestContext,
1488 0 : ) -> Result<CompactionOutcome, CompactionError> {
1489 0 : let mut outcome = CompactionOutcome::Done;
1490 0 : let mut drop_layers = Vec::new();
1491 0 : let mut layers_to_rewrite: Vec<Layer> = Vec::new();
1492 :
1493 : // We will use the Lsn cutoff of the last GC as a threshold for rewriting layers: if a
1494 : // layer is behind this Lsn, it indicates that the layer is being retained beyond the
1495 : // pitr_interval, for example because a branchpoint references it.
1496 : //
1497 : // Holding this read guard also blocks [`Self::gc_timeline`] from entering while we
1498 : // are rewriting layers.
1499 0 : let latest_gc_cutoff = self.get_applied_gc_cutoff_lsn();
1500 0 : let pitr_cutoff = self.gc_info.read().unwrap().cutoffs.time;
1501 :
1502 0 : let layers = self.layers.read(LayerManagerLockHolder::Compaction).await;
1503 0 : let layers_iter = layers.layer_map()?.iter_historic_layers();
1504 0 : let (layers_total, mut layers_checked) = (layers_iter.len(), 0);
1505 0 : for layer_desc in layers_iter {
1506 0 : layers_checked += 1;
1507 0 : let layer = layers.get_from_desc(&layer_desc);
1508 0 : if layer.metadata().shard.shard_count == self.shard_identity.count {
1509 : // This layer does not belong to a historic ancestor, no need to re-image it.
1510 0 : continue;
1511 0 : }
1512 :
1513 : // This layer was created on an ancestor shard: check if it contains any data for this shard.
1514 0 : let sharded_range = ShardedRange::new(layer_desc.get_key_range(), &self.shard_identity);
1515 0 : let layer_local_page_count = sharded_range.page_count();
1516 0 : let layer_raw_page_count = ShardedRange::raw_size(&layer_desc.get_key_range());
1517 0 : if layer_local_page_count == 0 {
1518 : // This ancestral layer only covers keys that belong to other shards.
1519 : // We include the full metadata in the log: if we had some critical bug that caused
1520 : // us to incorrectly drop layers, this would simplify manually debugging + reinstating those layers.
1521 0 : debug!(%layer, old_metadata=?layer.metadata(),
1522 0 : "dropping layer after shard split, contains no keys for this shard",
1523 : );
1524 :
1525 0 : if cfg!(debug_assertions) {
1526 : // Expensive, exhaustive check of keys in this layer: this guards against ShardedRange's calculations being
1527 : // wrong. If ShardedRange claims the local page count is zero, then no keys in this layer
1528 : // should be !is_key_disposable()
1529 : // TODO: exclude sparse keyspace from this check, otherwise it will infinitely loop.
1530 0 : let range = layer_desc.get_key_range();
1531 0 : let mut key = range.start;
1532 0 : while key < range.end {
1533 0 : debug_assert!(self.shard_identity.is_key_disposable(&key));
1534 0 : key = key.next();
1535 : }
1536 0 : }
1537 :
1538 0 : drop_layers.push(layer);
1539 0 : continue;
1540 0 : } else if layer_local_page_count != u32::MAX
1541 0 : && layer_local_page_count == layer_raw_page_count
1542 : {
1543 0 : debug!(%layer,
1544 0 : "layer is entirely shard local ({} keys), no need to filter it",
1545 : layer_local_page_count
1546 : );
1547 0 : continue;
1548 0 : }
1549 :
1550 : // Only rewrite a layer if we can reclaim significant space.
1551 0 : if layer_local_page_count != u32::MAX
1552 0 : && layer_local_page_count as f64 / layer_raw_page_count as f64
1553 0 : <= ANCESTOR_COMPACTION_REWRITE_THRESHOLD
1554 : {
1555 0 : debug!(%layer,
1556 0 : "layer has a large share of local pages \
1557 0 : ({layer_local_page_count}/{layer_raw_page_count} > \
1558 0 : {ANCESTOR_COMPACTION_REWRITE_THRESHOLD}), not rewriting",
1559 : );
1560 0 : }
1561 :
1562 : // Don't bother re-writing a layer if it is within the PITR window: it will age-out eventually
1563 : // without incurring the I/O cost of a rewrite.
1564 0 : if layer_desc.get_lsn_range().end >= *latest_gc_cutoff {
1565 0 : debug!(%layer, "Skipping rewrite of layer still in GC window ({} >= {})",
1566 0 : layer_desc.get_lsn_range().end, *latest_gc_cutoff);
1567 0 : continue;
1568 0 : }
1569 :
1570 : // We do not yet implement rewrite of delta layers.
1571 0 : if layer_desc.is_delta() {
1572 0 : debug!(%layer, "Skipping rewrite of delta layer");
1573 0 : continue;
1574 0 : }
1575 :
1576 : // We don't bother rewriting layers that aren't visible, since these won't be needed by
1577 : // reads and will likely be garbage collected soon.
1578 0 : if layer.visibility() != LayerVisibilityHint::Visible {
1579 0 : debug!(%layer, "Skipping rewrite of invisible layer");
1580 0 : continue;
1581 0 : }
1582 :
1583 : // Only rewrite layers if their generations differ. This guarantees:
1584 : // - that local rewrite is safe, as local layer paths will differ between existing layer and rewritten one
1585 : // - that the layer is persistent in remote storage, as we only see old-generation'd layer via loading from remote storage
1586 0 : if layer.metadata().generation == self.generation {
1587 0 : debug!(%layer, "Skipping rewrite, is not from old generation");
1588 0 : continue;
1589 0 : }
1590 :
1591 0 : if layers_to_rewrite.len() >= rewrite_max {
1592 0 : debug!(%layer, "Will rewrite layer on a future compaction, already rewrote {}",
1593 0 : layers_to_rewrite.len()
1594 : );
1595 0 : outcome = CompactionOutcome::Pending;
1596 0 : break;
1597 0 : }
1598 :
1599 : // Fall through: all our conditions for doing a rewrite passed.
1600 0 : layers_to_rewrite.push(layer);
1601 : }
1602 :
1603 : // Drop read lock on layer map before we start doing time-consuming I/O.
1604 0 : drop(layers);
1605 :
1606 : // Drop out early if there's nothing to do.
1607 0 : if layers_to_rewrite.is_empty() && drop_layers.is_empty() {
1608 0 : return Ok(CompactionOutcome::Done);
1609 0 : }
1610 :
1611 0 : info!(
1612 0 : "starting shard ancestor compaction, rewriting {} layers and dropping {} layers, \
1613 0 : checked {layers_checked}/{layers_total} layers \
1614 0 : (latest_gc_cutoff={} pitr_cutoff={:?})",
1615 0 : layers_to_rewrite.len(),
1616 0 : drop_layers.len(),
1617 0 : *latest_gc_cutoff,
1618 : pitr_cutoff,
1619 : );
1620 0 : let started = Instant::now();
1621 :
1622 0 : let mut replace_image_layers = Vec::new();
1623 0 : let total = layers_to_rewrite.len();
1624 :
1625 0 : for (i, layer) in layers_to_rewrite.into_iter().enumerate() {
1626 0 : if self.cancel.is_cancelled() {
1627 0 : return Err(CompactionError::ShuttingDown);
1628 0 : }
1629 :
1630 0 : info!(layer=%layer, "rewriting layer after shard split: {}/{}", i, total);
1631 :
1632 0 : let mut image_layer_writer = ImageLayerWriter::new(
1633 0 : self.conf,
1634 0 : self.timeline_id,
1635 0 : self.tenant_shard_id,
1636 0 : &layer.layer_desc().key_range,
1637 0 : layer.layer_desc().image_layer_lsn(),
1638 0 : &self.gate,
1639 0 : self.cancel.clone(),
1640 0 : ctx,
1641 0 : )
1642 0 : .await
1643 0 : .map_err(CompactionError::Other)?;
1644 :
1645 : // Safety of layer rewrites:
1646 : // - We are writing to a different local file path than we are reading from, so the old Layer
1647 : // cannot interfere with the new one.
1648 : // - In the page cache, contents for a particular VirtualFile are stored with a file_id that
1649 : // is different for two layers with the same name (in `ImageLayerInner::new` we always
1650 : // acquire a fresh id from [`crate::page_cache::next_file_id`]. So readers do not risk
1651 : // reading the index from one layer file, and then data blocks from the rewritten layer file.
1652 : // - Any readers that have a reference to the old layer will keep it alive until they are done
1653 : // with it. If they are trying to promote from remote storage, that will fail, but this is the same
1654 : // as for compaction generally: compaction is allowed to delete layers that readers might be trying to use.
1655 : // - We do not run concurrently with other kinds of compaction, so the only layer map writes we race with are:
1656 : // - GC, which at worst witnesses us "undelete" a layer that they just deleted.
1657 : // - ingestion, which only inserts layers, therefore cannot collide with us.
1658 0 : let resident = layer.download_and_keep_resident(ctx).await?;
1659 :
1660 0 : let keys_written = resident
1661 0 : .filter(&self.shard_identity, &mut image_layer_writer, ctx)
1662 0 : .await?;
1663 :
1664 0 : if keys_written > 0 {
1665 0 : let (desc, path) = image_layer_writer
1666 0 : .finish(ctx)
1667 0 : .await
1668 0 : .map_err(CompactionError::Other)?;
1669 0 : let new_layer = Layer::finish_creating(self.conf, self, desc, &path)
1670 0 : .map_err(CompactionError::Other)?;
1671 0 : info!(layer=%new_layer, "rewrote layer, {} -> {} bytes",
1672 0 : layer.metadata().file_size,
1673 0 : new_layer.metadata().file_size);
1674 :
1675 0 : replace_image_layers.push((layer, new_layer));
1676 0 : } else {
1677 0 : // Drop the old layer. Usually for this case we would already have noticed that
1678 0 : // the layer has no data for us with the ShardedRange check above, but
1679 0 : drop_layers.push(layer);
1680 0 : }
1681 :
1682 : // Yield for L0 compaction if necessary, but make sure we update the layer map below
1683 : // with the work we've already done.
1684 0 : if yield_for_l0
1685 0 : && self
1686 0 : .l0_compaction_trigger
1687 0 : .notified()
1688 0 : .now_or_never()
1689 0 : .is_some()
1690 : {
1691 0 : info!("shard ancestor compaction yielding for L0 compaction");
1692 0 : outcome = CompactionOutcome::YieldForL0;
1693 0 : break;
1694 0 : }
1695 : }
1696 :
1697 0 : for layer in &drop_layers {
1698 0 : info!(%layer, old_metadata=?layer.metadata(),
1699 0 : "dropping layer after shard split (no keys for this shard)",
1700 : );
1701 : }
1702 :
1703 : // At this point, we have replaced local layer files with their rewritten form, but not yet uploaded
1704 : // metadata to reflect that. If we restart here, the replaced layer files will look invalid (size mismatch
1705 : // to remote index) and be removed. This is inefficient but safe.
1706 0 : fail::fail_point!("compact-shard-ancestors-localonly");
1707 :
1708 : // Update the LayerMap so that readers will use the new layers, and enqueue it for writing to remote storage
1709 0 : self.rewrite_layers(replace_image_layers, drop_layers)
1710 0 : .await?;
1711 :
1712 0 : fail::fail_point!("compact-shard-ancestors-enqueued");
1713 :
1714 : // We wait for all uploads to complete before finishing this compaction stage. This is not
1715 : // necessary for correctness, but it simplifies testing, and avoids proceeding with another
1716 : // Timeline's compaction while this timeline's uploads may be generating lots of disk I/O
1717 : // load.
1718 0 : if outcome != CompactionOutcome::YieldForL0 {
1719 0 : info!("shard ancestor compaction waiting for uploads");
1720 0 : tokio::select! {
1721 0 : result = self.remote_client.wait_completion() => match result {
1722 0 : Ok(()) => {},
1723 0 : Err(WaitCompletionError::NotInitialized(ni)) => return Err(CompactionError::from(ni)),
1724 : Err(WaitCompletionError::UploadQueueShutDownOrStopped) => {
1725 0 : return Err(CompactionError::ShuttingDown);
1726 : }
1727 : },
1728 : // Don't wait if there's L0 compaction to do. We don't need to update the outcome
1729 : // here, because we've already done the actual work.
1730 0 : _ = self.l0_compaction_trigger.notified(), if yield_for_l0 => {},
1731 : }
1732 0 : }
1733 :
1734 0 : info!(
1735 0 : "shard ancestor compaction done in {:.3}s{}",
1736 0 : started.elapsed().as_secs_f64(),
1737 0 : match outcome {
1738 : CompactionOutcome::Pending =>
1739 0 : format!(", with pending work (rewrite_max={rewrite_max})"),
1740 0 : CompactionOutcome::YieldForL0 => String::from(", yielding for L0 compaction"),
1741 0 : CompactionOutcome::Skipped | CompactionOutcome::Done => String::new(),
1742 : }
1743 : );
1744 :
1745 0 : fail::fail_point!("compact-shard-ancestors-persistent");
1746 :
1747 0 : Ok(outcome)
1748 0 : }
1749 :
1750 : /// Update the LayerVisibilityHint of layers covered by image layers, based on whether there is
1751 : /// an image layer between them and the most recent readable LSN (branch point or tip of timeline). The
1752 : /// purpose of the visibility hint is to record which layers need to be available to service reads.
1753 : ///
1754 : /// The result may be used as an input to eviction and secondary downloads to de-prioritize layers
1755 : /// that we know won't be needed for reads.
1756 122 : pub(crate) async fn update_layer_visibility(
1757 122 : &self,
1758 122 : ) -> Result<(), super::layer_manager::Shutdown> {
1759 122 : let head_lsn = self.get_last_record_lsn();
1760 :
1761 : // We will sweep through layers in reverse-LSN order. We only do historic layers. L0 deltas
1762 : // are implicitly left visible, because LayerVisibilityHint's default is Visible, and we never modify it here.
1763 : // Note that L0 deltas _can_ be covered by image layers, but we consider them 'visible' because we anticipate that
1764 : // they will be subject to L0->L1 compaction in the near future.
1765 122 : let layer_manager = self
1766 122 : .layers
1767 122 : .read(LayerManagerLockHolder::GetLayerMapInfo)
1768 122 : .await;
1769 122 : let layer_map = layer_manager.layer_map()?;
1770 :
1771 122 : let readable_points = {
1772 122 : let children = self.gc_info.read().unwrap().retain_lsns.clone();
1773 :
1774 122 : let mut readable_points = Vec::with_capacity(children.len() + 1);
1775 123 : for (child_lsn, _child_timeline_id, is_offloaded) in &children {
1776 1 : if *is_offloaded == MaybeOffloaded::Yes {
1777 0 : continue;
1778 1 : }
1779 1 : readable_points.push(*child_lsn);
1780 : }
1781 122 : readable_points.push(head_lsn);
1782 122 : readable_points
1783 : };
1784 :
1785 122 : let (layer_visibility, covered) = layer_map.get_visibility(readable_points);
1786 311 : for (layer_desc, visibility) in layer_visibility {
1787 189 : // FIXME: a more efficiency bulk zip() through the layers rather than NlogN getting each one
1788 189 : let layer = layer_manager.get_from_desc(&layer_desc);
1789 189 : layer.set_visibility(visibility);
1790 189 : }
1791 :
1792 : // TODO: publish our covered KeySpace to our parent, so that when they update their visibility, they can
1793 : // avoid assuming that everything at a branch point is visible.
1794 122 : drop(covered);
1795 122 : Ok(())
1796 122 : }
1797 :
1798 : /// Collect a bunch of Level 0 layer files, and compact and reshuffle them as
1799 : /// as Level 1 files. Returns whether the L0 layers are fully compacted.
1800 192 : async fn compact_level0(
1801 192 : self: &Arc<Self>,
1802 192 : target_file_size: u64,
1803 192 : force_compaction_ignore_threshold: bool,
1804 192 : ctx: &RequestContext,
1805 192 : ) -> Result<CompactionOutcome, CompactionError> {
1806 : let CompactLevel0Phase1Result {
1807 192 : new_layers,
1808 192 : deltas_to_compact,
1809 192 : outcome,
1810 : } = {
1811 192 : let phase1_span = info_span!("compact_level0_phase1");
1812 192 : let ctx = ctx.attached_child();
1813 192 : let stats = CompactLevel0Phase1StatsBuilder {
1814 192 : version: Some(2),
1815 192 : tenant_id: Some(self.tenant_shard_id),
1816 192 : timeline_id: Some(self.timeline_id),
1817 192 : ..Default::default()
1818 192 : };
1819 :
1820 192 : self.compact_level0_phase1(
1821 192 : stats,
1822 192 : target_file_size,
1823 192 : force_compaction_ignore_threshold,
1824 192 : &ctx,
1825 192 : )
1826 192 : .instrument(phase1_span)
1827 192 : .await?
1828 : };
1829 :
1830 192 : if new_layers.is_empty() && deltas_to_compact.is_empty() {
1831 : // nothing to do
1832 169 : return Ok(CompactionOutcome::Done);
1833 23 : }
1834 :
1835 23 : self.finish_compact_batch(&new_layers, &Vec::new(), &deltas_to_compact)
1836 23 : .await?;
1837 23 : Ok(outcome)
1838 192 : }
1839 :
1840 : /// Level0 files first phase of compaction, explained in the [`Self::compact_legacy`] comment.
1841 192 : async fn compact_level0_phase1(
1842 192 : self: &Arc<Self>,
1843 192 : mut stats: CompactLevel0Phase1StatsBuilder,
1844 192 : target_file_size: u64,
1845 192 : force_compaction_ignore_threshold: bool,
1846 192 : ctx: &RequestContext,
1847 192 : ) -> Result<CompactLevel0Phase1Result, CompactionError> {
1848 192 : let begin = tokio::time::Instant::now();
1849 192 : let guard = self.layers.read(LayerManagerLockHolder::Compaction).await;
1850 192 : let now = tokio::time::Instant::now();
1851 192 : stats.read_lock_acquisition_micros =
1852 192 : DurationRecorder::Recorded(RecordedDuration(now - begin), now);
1853 :
1854 192 : let layers = guard.layer_map()?;
1855 192 : let level0_deltas = layers.level0_deltas();
1856 192 : stats.level0_deltas_count = Some(level0_deltas.len());
1857 :
1858 : // Only compact if enough layers have accumulated.
1859 192 : let threshold = self.get_compaction_threshold();
1860 192 : if level0_deltas.is_empty() || level0_deltas.len() < threshold {
1861 179 : if force_compaction_ignore_threshold {
1862 12 : if !level0_deltas.is_empty() {
1863 10 : info!(
1864 0 : level0_deltas = level0_deltas.len(),
1865 0 : threshold, "too few deltas to compact, but forcing compaction"
1866 : );
1867 : } else {
1868 2 : info!(
1869 0 : level0_deltas = level0_deltas.len(),
1870 0 : threshold, "too few deltas to compact, cannot force compaction"
1871 : );
1872 2 : return Ok(CompactLevel0Phase1Result::default());
1873 : }
1874 : } else {
1875 167 : debug!(
1876 0 : level0_deltas = level0_deltas.len(),
1877 0 : threshold, "too few deltas to compact"
1878 : );
1879 167 : return Ok(CompactLevel0Phase1Result::default());
1880 : }
1881 13 : }
1882 :
1883 23 : let mut level0_deltas = level0_deltas
1884 23 : .iter()
1885 201 : .map(|x| guard.get_from_desc(x))
1886 23 : .collect::<Vec<_>>();
1887 :
1888 23 : drop_layer_manager_rlock(guard);
1889 :
1890 : // The is the last LSN that we have seen for L0 compaction in the timeline. This LSN might be updated
1891 : // by the time we finish the compaction. So we need to get it here.
1892 23 : let l0_last_record_lsn = self.get_last_record_lsn();
1893 :
1894 : // Gather the files to compact in this iteration.
1895 : //
1896 : // Start with the oldest Level 0 delta file, and collect any other
1897 : // level 0 files that form a contiguous sequence, such that the end
1898 : // LSN of previous file matches the start LSN of the next file.
1899 : //
1900 : // Note that if the files don't form such a sequence, we might
1901 : // "compact" just a single file. That's a bit pointless, but it allows
1902 : // us to get rid of the level 0 file, and compact the other files on
1903 : // the next iteration. This could probably made smarter, but such
1904 : // "gaps" in the sequence of level 0 files should only happen in case
1905 : // of a crash, partial download from cloud storage, or something like
1906 : // that, so it's not a big deal in practice.
1907 356 : level0_deltas.sort_by_key(|l| l.layer_desc().lsn_range.start);
1908 23 : let mut level0_deltas_iter = level0_deltas.iter();
1909 :
1910 23 : let first_level0_delta = level0_deltas_iter.next().unwrap();
1911 23 : let mut prev_lsn_end = first_level0_delta.layer_desc().lsn_range.end;
1912 23 : let mut deltas_to_compact = Vec::with_capacity(level0_deltas.len());
1913 :
1914 : // Accumulate the size of layers in `deltas_to_compact`
1915 23 : let mut deltas_to_compact_bytes = 0;
1916 :
1917 : // Under normal circumstances, we will accumulate up to compaction_upper_limit L0s of size
1918 : // checkpoint_distance each. To avoid edge cases using extra system resources, bound our
1919 : // work in this function to only operate on this much delta data at once.
1920 : //
1921 : // In general, compaction_threshold should be <= compaction_upper_limit, but in case that
1922 : // the constraint is not respected, we use the larger of the two.
1923 23 : let delta_size_limit = std::cmp::max(
1924 23 : self.get_compaction_upper_limit(),
1925 23 : self.get_compaction_threshold(),
1926 23 : ) as u64
1927 23 : * std::cmp::max(self.get_checkpoint_distance(), DEFAULT_CHECKPOINT_DISTANCE);
1928 :
1929 23 : let mut fully_compacted = true;
1930 :
1931 23 : deltas_to_compact.push(first_level0_delta.download_and_keep_resident(ctx).await?);
1932 201 : for l in level0_deltas_iter {
1933 178 : let lsn_range = &l.layer_desc().lsn_range;
1934 :
1935 178 : if lsn_range.start != prev_lsn_end {
1936 0 : break;
1937 178 : }
1938 178 : deltas_to_compact.push(l.download_and_keep_resident(ctx).await?);
1939 178 : deltas_to_compact_bytes += l.metadata().file_size;
1940 178 : prev_lsn_end = lsn_range.end;
1941 :
1942 178 : if deltas_to_compact_bytes >= delta_size_limit {
1943 0 : info!(
1944 0 : l0_deltas_selected = deltas_to_compact.len(),
1945 0 : l0_deltas_total = level0_deltas.len(),
1946 0 : "L0 compaction picker hit max delta layer size limit: {}",
1947 : delta_size_limit
1948 : );
1949 0 : fully_compacted = false;
1950 :
1951 : // Proceed with compaction, but only a subset of L0s
1952 0 : break;
1953 178 : }
1954 : }
1955 23 : let lsn_range = Range {
1956 23 : start: deltas_to_compact
1957 23 : .first()
1958 23 : .unwrap()
1959 23 : .layer_desc()
1960 23 : .lsn_range
1961 23 : .start,
1962 23 : end: deltas_to_compact.last().unwrap().layer_desc().lsn_range.end,
1963 23 : };
1964 :
1965 23 : info!(
1966 0 : "Starting Level0 compaction in LSN range {}-{} for {} layers ({} deltas in total)",
1967 : lsn_range.start,
1968 : lsn_range.end,
1969 0 : deltas_to_compact.len(),
1970 0 : level0_deltas.len()
1971 : );
1972 :
1973 201 : for l in deltas_to_compact.iter() {
1974 201 : info!("compact includes {l}");
1975 : }
1976 :
1977 : // We don't need the original list of layers anymore. Drop it so that
1978 : // we don't accidentally use it later in the function.
1979 23 : drop(level0_deltas);
1980 :
1981 23 : stats.compaction_prerequisites_micros = stats.read_lock_acquisition_micros.till_now();
1982 :
1983 : // TODO: replace with streaming k-merge
1984 23 : let all_keys = {
1985 23 : let mut all_keys = Vec::new();
1986 201 : for l in deltas_to_compact.iter() {
1987 201 : if self.cancel.is_cancelled() {
1988 0 : return Err(CompactionError::ShuttingDown);
1989 201 : }
1990 201 : let delta = l.get_as_delta(ctx).await.map_err(CompactionError::Other)?;
1991 201 : let keys = delta
1992 201 : .index_entries(ctx)
1993 201 : .await
1994 201 : .map_err(CompactionError::Other)?;
1995 201 : all_keys.extend(keys);
1996 : }
1997 : // The current stdlib sorting implementation is designed in a way where it is
1998 : // particularly fast where the slice is made up of sorted sub-ranges.
1999 2137942 : all_keys.sort_by_key(|DeltaEntry { key, lsn, .. }| (*key, *lsn));
2000 23 : all_keys
2001 : };
2002 :
2003 23 : stats.read_lock_held_key_sort_micros = stats.compaction_prerequisites_micros.till_now();
2004 :
2005 : // Determine N largest holes where N is number of compacted layers. The vec is sorted by key range start.
2006 : //
2007 : // A hole is a key range for which this compaction doesn't have any WAL records.
2008 : // Our goal in this compaction iteration is to avoid creating L1s that, in terms of their key range,
2009 : // cover the hole, but actually don't contain any WAL records for that key range.
2010 : // The reason is that the mere stack of L1s (`count_deltas`) triggers image layer creation (`create_image_layers`).
2011 : // That image layer creation would be useless for a hole range covered by L1s that don't contain any WAL records.
2012 : //
2013 : // The algorithm chooses holes as follows.
2014 : // - Slide a 2-window over the keys in key orde to get the hole range (=distance between two keys).
2015 : // - Filter: min threshold on range length
2016 : // - Rank: by coverage size (=number of image layers required to reconstruct each key in the range for which we have any data)
2017 : //
2018 : // For more details, intuition, and some ASCII art see https://github.com/neondatabase/neon/pull/3597#discussion_r1112704451
2019 : #[derive(PartialEq, Eq)]
2020 : struct Hole {
2021 : key_range: Range<Key>,
2022 : coverage_size: usize,
2023 : }
2024 23 : let holes: Vec<Hole> = {
2025 : use std::cmp::Ordering;
2026 : impl Ord for Hole {
2027 0 : fn cmp(&self, other: &Self) -> Ordering {
2028 0 : self.coverage_size.cmp(&other.coverage_size).reverse()
2029 0 : }
2030 : }
2031 : impl PartialOrd for Hole {
2032 0 : fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
2033 0 : Some(self.cmp(other))
2034 0 : }
2035 : }
2036 23 : let max_holes = deltas_to_compact.len();
2037 23 : let min_hole_range = (target_file_size / page_cache::PAGE_SZ as u64) as i128;
2038 23 : let min_hole_coverage_size = 3; // TODO: something more flexible?
2039 : // min-heap (reserve space for one more element added before eviction)
2040 23 : let mut heap: BinaryHeap<Hole> = BinaryHeap::with_capacity(max_holes + 1);
2041 23 : let mut prev: Option<Key> = None;
2042 :
2043 1032019 : for &DeltaEntry { key: next_key, .. } in all_keys.iter() {
2044 1032019 : if let Some(prev_key) = prev {
2045 : // just first fast filter, do not create hole entries for metadata keys. The last hole in the
2046 : // compaction is the gap between data key and metadata keys.
2047 1031996 : if next_key.to_i128() - prev_key.to_i128() >= min_hole_range
2048 274 : && !Key::is_metadata_key(&prev_key)
2049 : {
2050 0 : let key_range = prev_key..next_key;
2051 : // Measuring hole by just subtraction of i128 representation of key range boundaries
2052 : // has not so much sense, because largest holes will corresponds field1/field2 changes.
2053 : // But we are mostly interested to eliminate holes which cause generation of excessive image layers.
2054 : // That is why it is better to measure size of hole as number of covering image layers.
2055 0 : let coverage_size = {
2056 : // TODO: optimize this with copy-on-write layer map.
2057 0 : let guard = self.layers.read(LayerManagerLockHolder::Compaction).await;
2058 0 : let layers = guard.layer_map()?;
2059 0 : layers.image_coverage(&key_range, l0_last_record_lsn).len()
2060 : };
2061 0 : if coverage_size >= min_hole_coverage_size {
2062 0 : heap.push(Hole {
2063 0 : key_range,
2064 0 : coverage_size,
2065 0 : });
2066 0 : if heap.len() > max_holes {
2067 0 : heap.pop(); // remove smallest hole
2068 0 : }
2069 0 : }
2070 1031996 : }
2071 23 : }
2072 1032019 : prev = Some(next_key.next());
2073 : }
2074 23 : let mut holes = heap.into_vec();
2075 23 : holes.sort_unstable_by_key(|hole| hole.key_range.start);
2076 23 : holes
2077 : };
2078 23 : stats.read_lock_held_compute_holes_micros = stats.read_lock_held_key_sort_micros.till_now();
2079 :
2080 23 : if self.cancel.is_cancelled() {
2081 0 : return Err(CompactionError::ShuttingDown);
2082 23 : }
2083 :
2084 23 : stats.read_lock_drop_micros = stats.read_lock_held_compute_holes_micros.till_now();
2085 :
2086 : // This iterator walks through all key-value pairs from all the layers
2087 : // we're compacting, in key, LSN order.
2088 : // If there's both a Value::Image and Value::WalRecord for the same (key,lsn),
2089 : // then the Value::Image is ordered before Value::WalRecord.
2090 23 : let mut all_values_iter = {
2091 23 : let mut deltas = Vec::with_capacity(deltas_to_compact.len());
2092 201 : for l in deltas_to_compact.iter() {
2093 201 : let l = l.get_as_delta(ctx).await.map_err(CompactionError::Other)?;
2094 201 : deltas.push(l);
2095 : }
2096 23 : MergeIterator::create_with_options(
2097 23 : &deltas,
2098 23 : &[],
2099 23 : ctx,
2100 23 : 1024 * 8192, /* 8 MiB buffer per layer iterator */
2101 : 1024,
2102 : )
2103 : };
2104 :
2105 : // This iterator walks through all keys and is needed to calculate size used by each key
2106 23 : let mut all_keys_iter = all_keys
2107 23 : .iter()
2108 1032019 : .map(|DeltaEntry { key, lsn, size, .. }| (*key, *lsn, *size))
2109 1031996 : .coalesce(|mut prev, cur| {
2110 : // Coalesce keys that belong to the same key pair.
2111 : // This ensures that compaction doesn't put them
2112 : // into different layer files.
2113 : // Still limit this by the target file size,
2114 : // so that we keep the size of the files in
2115 : // check.
2116 1031996 : if prev.0 == cur.0 && prev.2 < target_file_size {
2117 14310 : prev.2 += cur.2;
2118 14310 : Ok(prev)
2119 : } else {
2120 1017686 : Err((prev, cur))
2121 : }
2122 1031996 : });
2123 :
2124 : // Merge the contents of all the input delta layers into a new set
2125 : // of delta layers, based on the current partitioning.
2126 : //
2127 : // We split the new delta layers on the key dimension. We iterate through the key space, and for each key, check if including the next key to the current output layer we're building would cause the layer to become too large. If so, dump the current output layer and start new one.
2128 : // It's possible that there is a single key with so many page versions that storing all of them in a single layer file
2129 : // would be too large. In that case, we also split on the LSN dimension.
2130 : //
2131 : // LSN
2132 : // ^
2133 : // |
2134 : // | +-----------+ +--+--+--+--+
2135 : // | | | | | | | |
2136 : // | +-----------+ | | | | |
2137 : // | | | | | | | |
2138 : // | +-----------+ ==> | | | | |
2139 : // | | | | | | | |
2140 : // | +-----------+ | | | | |
2141 : // | | | | | | | |
2142 : // | +-----------+ +--+--+--+--+
2143 : // |
2144 : // +--------------> key
2145 : //
2146 : //
2147 : // If one key (X) has a lot of page versions:
2148 : //
2149 : // LSN
2150 : // ^
2151 : // | (X)
2152 : // | +-----------+ +--+--+--+--+
2153 : // | | | | | | | |
2154 : // | +-----------+ | | +--+ |
2155 : // | | | | | | | |
2156 : // | +-----------+ ==> | | | | |
2157 : // | | | | | +--+ |
2158 : // | +-----------+ | | | | |
2159 : // | | | | | | | |
2160 : // | +-----------+ +--+--+--+--+
2161 : // |
2162 : // +--------------> key
2163 : // TODO: this actually divides the layers into fixed-size chunks, not
2164 : // based on the partitioning.
2165 : //
2166 : // TODO: we should also opportunistically materialize and
2167 : // garbage collect what we can.
2168 23 : let mut new_layers = Vec::new();
2169 23 : let mut prev_key: Option<Key> = None;
2170 23 : let mut writer: Option<DeltaLayerWriter> = None;
2171 23 : let mut key_values_total_size = 0u64;
2172 23 : let mut dup_start_lsn: Lsn = Lsn::INVALID; // start LSN of layer containing values of the single key
2173 23 : let mut dup_end_lsn: Lsn = Lsn::INVALID; // end LSN of layer containing values of the single key
2174 23 : let mut next_hole = 0; // index of next hole in holes vector
2175 :
2176 23 : let mut keys = 0;
2177 :
2178 1032042 : while let Some((key, lsn, value)) = all_values_iter
2179 1032042 : .next()
2180 1032042 : .await
2181 1032042 : .map_err(CompactionError::Other)?
2182 : {
2183 1032019 : keys += 1;
2184 :
2185 1032019 : if keys % 32_768 == 0 && self.cancel.is_cancelled() {
2186 : // avoid hitting the cancellation token on every key. in benches, we end up
2187 : // shuffling an order of million keys per layer, this means we'll check it
2188 : // around tens of times per layer.
2189 0 : return Err(CompactionError::ShuttingDown);
2190 1032019 : }
2191 :
2192 1032019 : let same_key = prev_key == Some(key);
2193 : // We need to check key boundaries once we reach next key or end of layer with the same key
2194 1032019 : if !same_key || lsn == dup_end_lsn {
2195 1017709 : let mut next_key_size = 0u64;
2196 1017709 : let is_dup_layer = dup_end_lsn.is_valid();
2197 1017709 : dup_start_lsn = Lsn::INVALID;
2198 1017709 : if !same_key {
2199 1017709 : dup_end_lsn = Lsn::INVALID;
2200 1017709 : }
2201 : // Determine size occupied by this key. We stop at next key or when size becomes larger than target_file_size
2202 1017709 : for (next_key, next_lsn, next_size) in all_keys_iter.by_ref() {
2203 1017709 : next_key_size = next_size;
2204 1017709 : if key != next_key {
2205 1017686 : if dup_end_lsn.is_valid() {
2206 0 : // We are writting segment with duplicates:
2207 0 : // place all remaining values of this key in separate segment
2208 0 : dup_start_lsn = dup_end_lsn; // new segments starts where old stops
2209 0 : dup_end_lsn = lsn_range.end; // there are no more values of this key till end of LSN range
2210 1017686 : }
2211 1017686 : break;
2212 23 : }
2213 23 : key_values_total_size += next_size;
2214 : // Check if it is time to split segment: if total keys size is larger than target file size.
2215 : // We need to avoid generation of empty segments if next_size > target_file_size.
2216 23 : if key_values_total_size > target_file_size && lsn != next_lsn {
2217 : // Split key between multiple layers: such layer can contain only single key
2218 0 : dup_start_lsn = if dup_end_lsn.is_valid() {
2219 0 : dup_end_lsn // new segment with duplicates starts where old one stops
2220 : } else {
2221 0 : lsn // start with the first LSN for this key
2222 : };
2223 0 : dup_end_lsn = next_lsn; // upper LSN boundary is exclusive
2224 0 : break;
2225 23 : }
2226 : }
2227 : // handle case when loop reaches last key: in this case dup_end is non-zero but dup_start is not set.
2228 1017709 : if dup_end_lsn.is_valid() && !dup_start_lsn.is_valid() {
2229 0 : dup_start_lsn = dup_end_lsn;
2230 0 : dup_end_lsn = lsn_range.end;
2231 1017709 : }
2232 1017709 : if writer.is_some() {
2233 1017686 : let written_size = writer.as_mut().unwrap().size();
2234 1017686 : let contains_hole =
2235 1017686 : next_hole < holes.len() && key >= holes[next_hole].key_range.end;
2236 : // check if key cause layer overflow or contains hole...
2237 1017686 : if is_dup_layer
2238 1017686 : || dup_end_lsn.is_valid()
2239 1017686 : || written_size + key_values_total_size > target_file_size
2240 1017546 : || contains_hole
2241 : {
2242 : // ... if so, flush previous layer and prepare to write new one
2243 140 : let (desc, path) = writer
2244 140 : .take()
2245 140 : .unwrap()
2246 140 : .finish(prev_key.unwrap().next(), ctx)
2247 140 : .await
2248 140 : .map_err(CompactionError::Other)?;
2249 140 : let new_delta = Layer::finish_creating(self.conf, self, desc, &path)
2250 140 : .map_err(CompactionError::Other)?;
2251 :
2252 140 : new_layers.push(new_delta);
2253 140 : writer = None;
2254 :
2255 140 : if contains_hole {
2256 0 : // skip hole
2257 0 : next_hole += 1;
2258 140 : }
2259 1017546 : }
2260 23 : }
2261 : // Remember size of key value because at next iteration we will access next item
2262 1017709 : key_values_total_size = next_key_size;
2263 14310 : }
2264 1032019 : fail_point!("delta-layer-writer-fail-before-finish", |_| {
2265 0 : Err(CompactionError::Other(anyhow::anyhow!(
2266 0 : "failpoint delta-layer-writer-fail-before-finish"
2267 0 : )))
2268 0 : });
2269 :
2270 1032019 : if !self.shard_identity.is_key_disposable(&key) {
2271 1032019 : if writer.is_none() {
2272 163 : if self.cancel.is_cancelled() {
2273 : // to be somewhat responsive to cancellation, check for each new layer
2274 0 : return Err(CompactionError::ShuttingDown);
2275 163 : }
2276 : // Create writer if not initiaized yet
2277 163 : writer = Some(
2278 163 : DeltaLayerWriter::new(
2279 163 : self.conf,
2280 163 : self.timeline_id,
2281 163 : self.tenant_shard_id,
2282 163 : key,
2283 163 : if dup_end_lsn.is_valid() {
2284 : // this is a layer containing slice of values of the same key
2285 0 : debug!("Create new dup layer {}..{}", dup_start_lsn, dup_end_lsn);
2286 0 : dup_start_lsn..dup_end_lsn
2287 : } else {
2288 163 : debug!("Create new layer {}..{}", lsn_range.start, lsn_range.end);
2289 163 : lsn_range.clone()
2290 : },
2291 163 : &self.gate,
2292 163 : self.cancel.clone(),
2293 163 : ctx,
2294 : )
2295 163 : .await
2296 163 : .map_err(CompactionError::Other)?,
2297 : );
2298 :
2299 163 : keys = 0;
2300 1031856 : }
2301 :
2302 1032019 : writer
2303 1032019 : .as_mut()
2304 1032019 : .unwrap()
2305 1032019 : .put_value(key, lsn, value, ctx)
2306 1032019 : .await?;
2307 : } else {
2308 0 : let owner = self.shard_identity.get_shard_number(&key);
2309 :
2310 : // This happens after a shard split, when we're compacting an L0 created by our parent shard
2311 0 : debug!("dropping key {key} during compaction (it belongs on shard {owner})");
2312 : }
2313 :
2314 1032019 : if !new_layers.is_empty() {
2315 9893 : fail_point!("after-timeline-compacted-first-L1");
2316 1022126 : }
2317 :
2318 1032019 : prev_key = Some(key);
2319 : }
2320 23 : if let Some(writer) = writer {
2321 23 : let (desc, path) = writer
2322 23 : .finish(prev_key.unwrap().next(), ctx)
2323 23 : .await
2324 23 : .map_err(CompactionError::Other)?;
2325 23 : let new_delta = Layer::finish_creating(self.conf, self, desc, &path)
2326 23 : .map_err(CompactionError::Other)?;
2327 23 : new_layers.push(new_delta);
2328 0 : }
2329 :
2330 : // Sync layers
2331 23 : if !new_layers.is_empty() {
2332 : // Print a warning if the created layer is larger than double the target size
2333 : // Add two pages for potential overhead. This should in theory be already
2334 : // accounted for in the target calculation, but for very small targets,
2335 : // we still might easily hit the limit otherwise.
2336 23 : let warn_limit = target_file_size * 2 + page_cache::PAGE_SZ as u64 * 2;
2337 163 : for layer in new_layers.iter() {
2338 163 : if layer.layer_desc().file_size > warn_limit {
2339 0 : warn!(
2340 : %layer,
2341 0 : "created delta file of size {} larger than double of target of {target_file_size}", layer.layer_desc().file_size
2342 : );
2343 163 : }
2344 : }
2345 :
2346 : // The writer.finish() above already did the fsync of the inodes.
2347 : // We just need to fsync the directory in which these inodes are linked,
2348 : // which we know to be the timeline directory.
2349 : //
2350 : // We use fatal_err() below because the after writer.finish() returns with success,
2351 : // the in-memory state of the filesystem already has the layer file in its final place,
2352 : // and subsequent pageserver code could think it's durable while it really isn't.
2353 23 : let timeline_dir = VirtualFile::open(
2354 23 : &self
2355 23 : .conf
2356 23 : .timeline_path(&self.tenant_shard_id, &self.timeline_id),
2357 23 : ctx,
2358 23 : )
2359 23 : .await
2360 23 : .fatal_err("VirtualFile::open for timeline dir fsync");
2361 23 : timeline_dir
2362 23 : .sync_all()
2363 23 : .await
2364 23 : .fatal_err("VirtualFile::sync_all timeline dir");
2365 0 : }
2366 :
2367 23 : stats.write_layer_files_micros = stats.read_lock_drop_micros.till_now();
2368 23 : stats.new_deltas_count = Some(new_layers.len());
2369 163 : stats.new_deltas_size = Some(new_layers.iter().map(|l| l.layer_desc().file_size).sum());
2370 :
2371 23 : match TryInto::<CompactLevel0Phase1Stats>::try_into(stats)
2372 23 : .and_then(|stats| serde_json::to_string(&stats).context("serde_json::to_string"))
2373 : {
2374 23 : Ok(stats_json) => {
2375 23 : info!(
2376 0 : stats_json = stats_json.as_str(),
2377 0 : "compact_level0_phase1 stats available"
2378 : )
2379 : }
2380 0 : Err(e) => {
2381 0 : warn!("compact_level0_phase1 stats failed to serialize: {:#}", e);
2382 : }
2383 : }
2384 :
2385 : // Without this, rustc complains about deltas_to_compact still
2386 : // being borrowed when we `.into_iter()` below.
2387 23 : drop(all_values_iter);
2388 :
2389 : Ok(CompactLevel0Phase1Result {
2390 23 : new_layers,
2391 23 : deltas_to_compact: deltas_to_compact
2392 23 : .into_iter()
2393 201 : .map(|x| x.drop_eviction_guard())
2394 23 : .collect::<Vec<_>>(),
2395 23 : outcome: if fully_compacted {
2396 23 : CompactionOutcome::Done
2397 : } else {
2398 0 : CompactionOutcome::Pending
2399 : },
2400 : })
2401 192 : }
2402 : }
2403 :
2404 : #[derive(Default)]
2405 : struct CompactLevel0Phase1Result {
2406 : new_layers: Vec<ResidentLayer>,
2407 : deltas_to_compact: Vec<Layer>,
2408 : // Whether we have included all L0 layers, or selected only part of them due to the
2409 : // L0 compaction size limit.
2410 : outcome: CompactionOutcome,
2411 : }
2412 :
2413 : #[derive(Default)]
2414 : struct CompactLevel0Phase1StatsBuilder {
2415 : version: Option<u64>,
2416 : tenant_id: Option<TenantShardId>,
2417 : timeline_id: Option<TimelineId>,
2418 : read_lock_acquisition_micros: DurationRecorder,
2419 : read_lock_held_key_sort_micros: DurationRecorder,
2420 : compaction_prerequisites_micros: DurationRecorder,
2421 : read_lock_held_compute_holes_micros: DurationRecorder,
2422 : read_lock_drop_micros: DurationRecorder,
2423 : write_layer_files_micros: DurationRecorder,
2424 : level0_deltas_count: Option<usize>,
2425 : new_deltas_count: Option<usize>,
2426 : new_deltas_size: Option<u64>,
2427 : }
2428 :
2429 : #[derive(serde::Serialize)]
2430 : struct CompactLevel0Phase1Stats {
2431 : version: u64,
2432 : tenant_id: TenantShardId,
2433 : timeline_id: TimelineId,
2434 : read_lock_acquisition_micros: RecordedDuration,
2435 : read_lock_held_key_sort_micros: RecordedDuration,
2436 : compaction_prerequisites_micros: RecordedDuration,
2437 : read_lock_held_compute_holes_micros: RecordedDuration,
2438 : read_lock_drop_micros: RecordedDuration,
2439 : write_layer_files_micros: RecordedDuration,
2440 : level0_deltas_count: usize,
2441 : new_deltas_count: usize,
2442 : new_deltas_size: u64,
2443 : }
2444 :
2445 : impl TryFrom<CompactLevel0Phase1StatsBuilder> for CompactLevel0Phase1Stats {
2446 : type Error = anyhow::Error;
2447 :
2448 23 : fn try_from(value: CompactLevel0Phase1StatsBuilder) -> Result<Self, Self::Error> {
2449 : Ok(Self {
2450 23 : version: value.version.ok_or_else(|| anyhow!("version not set"))?,
2451 23 : tenant_id: value
2452 23 : .tenant_id
2453 23 : .ok_or_else(|| anyhow!("tenant_id not set"))?,
2454 23 : timeline_id: value
2455 23 : .timeline_id
2456 23 : .ok_or_else(|| anyhow!("timeline_id not set"))?,
2457 23 : read_lock_acquisition_micros: value
2458 23 : .read_lock_acquisition_micros
2459 23 : .into_recorded()
2460 23 : .ok_or_else(|| anyhow!("read_lock_acquisition_micros not set"))?,
2461 23 : read_lock_held_key_sort_micros: value
2462 23 : .read_lock_held_key_sort_micros
2463 23 : .into_recorded()
2464 23 : .ok_or_else(|| anyhow!("read_lock_held_key_sort_micros not set"))?,
2465 23 : compaction_prerequisites_micros: value
2466 23 : .compaction_prerequisites_micros
2467 23 : .into_recorded()
2468 23 : .ok_or_else(|| anyhow!("read_lock_held_prerequisites_micros not set"))?,
2469 23 : read_lock_held_compute_holes_micros: value
2470 23 : .read_lock_held_compute_holes_micros
2471 23 : .into_recorded()
2472 23 : .ok_or_else(|| anyhow!("read_lock_held_compute_holes_micros not set"))?,
2473 23 : read_lock_drop_micros: value
2474 23 : .read_lock_drop_micros
2475 23 : .into_recorded()
2476 23 : .ok_or_else(|| anyhow!("read_lock_drop_micros not set"))?,
2477 23 : write_layer_files_micros: value
2478 23 : .write_layer_files_micros
2479 23 : .into_recorded()
2480 23 : .ok_or_else(|| anyhow!("write_layer_files_micros not set"))?,
2481 23 : level0_deltas_count: value
2482 23 : .level0_deltas_count
2483 23 : .ok_or_else(|| anyhow!("level0_deltas_count not set"))?,
2484 23 : new_deltas_count: value
2485 23 : .new_deltas_count
2486 23 : .ok_or_else(|| anyhow!("new_deltas_count not set"))?,
2487 23 : new_deltas_size: value
2488 23 : .new_deltas_size
2489 23 : .ok_or_else(|| anyhow!("new_deltas_size not set"))?,
2490 : })
2491 23 : }
2492 : }
2493 :
2494 : impl Timeline {
2495 : /// Entry point for new tiered compaction algorithm.
2496 : ///
2497 : /// All the real work is in the implementation in the pageserver_compaction
2498 : /// crate. The code here would apply to any algorithm implemented by the
2499 : /// same interface, but tiered is the only one at the moment.
2500 : ///
2501 : /// TODO: cancellation
2502 0 : pub(crate) async fn compact_tiered(
2503 0 : self: &Arc<Self>,
2504 0 : _cancel: &CancellationToken,
2505 0 : ctx: &RequestContext,
2506 0 : ) -> Result<(), CompactionError> {
2507 0 : let fanout = self.get_compaction_threshold() as u64;
2508 0 : let target_file_size = self.get_checkpoint_distance();
2509 :
2510 : // Find the top of the historical layers
2511 0 : let end_lsn = {
2512 0 : let guard = self.layers.read(LayerManagerLockHolder::Compaction).await;
2513 0 : let layers = guard.layer_map()?;
2514 :
2515 0 : let l0_deltas = layers.level0_deltas();
2516 :
2517 : // As an optimization, if we find that there are too few L0 layers,
2518 : // bail out early. We know that the compaction algorithm would do
2519 : // nothing in that case.
2520 0 : if l0_deltas.len() < fanout as usize {
2521 : // doesn't need compacting
2522 0 : return Ok(());
2523 0 : }
2524 0 : l0_deltas.iter().map(|l| l.lsn_range.end).max().unwrap()
2525 : };
2526 :
2527 : // Is the timeline being deleted?
2528 0 : if self.is_stopping() {
2529 0 : trace!("Dropping out of compaction on timeline shutdown");
2530 0 : return Err(CompactionError::ShuttingDown);
2531 0 : }
2532 :
2533 0 : let (dense_ks, _sparse_ks) = self
2534 0 : .collect_keyspace(end_lsn, ctx)
2535 0 : .await
2536 0 : .map_err(CompactionError::from_collect_keyspace)?;
2537 : // TODO(chi): ignore sparse_keyspace for now, compact it in the future.
2538 0 : let mut adaptor = TimelineAdaptor::new(self, (end_lsn, dense_ks));
2539 :
2540 0 : pageserver_compaction::compact_tiered::compact_tiered(
2541 0 : &mut adaptor,
2542 0 : end_lsn,
2543 0 : target_file_size,
2544 0 : fanout,
2545 0 : ctx,
2546 0 : )
2547 0 : .await
2548 : // TODO: compact_tiered needs to return CompactionError
2549 0 : .map_err(CompactionError::Other)?;
2550 :
2551 0 : adaptor.flush_updates().await?;
2552 0 : Ok(())
2553 0 : }
2554 :
2555 : /// Take a list of images and deltas, produce images and deltas according to GC horizon and retain_lsns.
2556 : ///
2557 : /// It takes a key, the values of the key within the compaction process, a GC horizon, and all retain_lsns below the horizon.
2558 : /// For now, it requires the `accumulated_values` contains the full history of the key (i.e., the key with the lowest LSN is
2559 : /// an image or a WAL not requiring a base image). This restriction will be removed once we implement gc-compaction on branch.
2560 : ///
2561 : /// The function returns the deltas and the base image that need to be placed at each of the retain LSN. For example, we have:
2562 : ///
2563 : /// A@0x10, +B@0x20, +C@0x30, +D@0x40, +E@0x50, +F@0x60
2564 : /// horizon = 0x50, retain_lsn = 0x20, 0x40, delta_threshold=3
2565 : ///
2566 : /// The function will produce:
2567 : ///
2568 : /// ```plain
2569 : /// 0x20(retain_lsn) -> img=AB@0x20 always produce a single image below the lowest retain LSN
2570 : /// 0x40(retain_lsn) -> deltas=[+C@0x30, +D@0x40] two deltas since the last base image, keeping the deltas
2571 : /// 0x50(horizon) -> deltas=[ABCDE@0x50] three deltas since the last base image, generate an image but put it in the delta
2572 : /// above_horizon -> deltas=[+F@0x60] full history above the horizon
2573 : /// ```
2574 : ///
2575 : /// Note that `accumulated_values` must be sorted by LSN and should belong to a single key.
2576 : #[allow(clippy::too_many_arguments)]
2577 324 : pub(crate) async fn generate_key_retention(
2578 324 : self: &Arc<Timeline>,
2579 324 : key: Key,
2580 324 : full_history: &[(Key, Lsn, Value)],
2581 324 : horizon: Lsn,
2582 324 : retain_lsn_below_horizon: &[Lsn],
2583 324 : delta_threshold_cnt: usize,
2584 324 : base_img_from_ancestor: Option<(Key, Lsn, Bytes)>,
2585 324 : verification: bool,
2586 324 : ) -> anyhow::Result<KeyHistoryRetention> {
2587 : // Pre-checks for the invariants
2588 :
2589 324 : let debug_mode = cfg!(debug_assertions) || cfg!(feature = "testing");
2590 :
2591 324 : if debug_mode {
2592 786 : for (log_key, _, _) in full_history {
2593 462 : assert_eq!(log_key, &key, "mismatched key");
2594 : }
2595 324 : for i in 1..full_history.len() {
2596 138 : assert!(full_history[i - 1].1 <= full_history[i].1, "unordered LSN");
2597 138 : if full_history[i - 1].1 == full_history[i].1 {
2598 0 : assert!(
2599 0 : matches!(full_history[i - 1].2, Value::Image(_)),
2600 0 : "unordered delta/image, or duplicated delta"
2601 : );
2602 138 : }
2603 : }
2604 : // There was an assertion for no base image that checks if the first
2605 : // record in the history is `will_init` before, but it was removed.
2606 : // This is explained in the test cases for generate_key_retention.
2607 : // Search "incomplete history" for more information.
2608 714 : for lsn in retain_lsn_below_horizon {
2609 390 : assert!(lsn < &horizon, "retain lsn must be below horizon")
2610 : }
2611 324 : for i in 1..retain_lsn_below_horizon.len() {
2612 178 : assert!(
2613 178 : retain_lsn_below_horizon[i - 1] <= retain_lsn_below_horizon[i],
2614 0 : "unordered LSN"
2615 : );
2616 : }
2617 0 : }
2618 324 : let has_ancestor = base_img_from_ancestor.is_some();
2619 : // Step 1: split history into len(retain_lsn_below_horizon) + 2 buckets, where the last bucket is for all deltas above the horizon,
2620 : // and the second-to-last bucket is for the horizon. Each bucket contains lsn_last_bucket < deltas <= lsn_this_bucket.
2621 324 : let (mut split_history, lsn_split_points) = {
2622 324 : let mut split_history = Vec::new();
2623 324 : split_history.resize_with(retain_lsn_below_horizon.len() + 2, Vec::new);
2624 324 : let mut lsn_split_points = Vec::with_capacity(retain_lsn_below_horizon.len() + 1);
2625 714 : for lsn in retain_lsn_below_horizon {
2626 390 : lsn_split_points.push(*lsn);
2627 390 : }
2628 324 : lsn_split_points.push(horizon);
2629 324 : let mut current_idx = 0;
2630 786 : for item @ (_, lsn, _) in full_history {
2631 584 : while current_idx < lsn_split_points.len() && *lsn > lsn_split_points[current_idx] {
2632 122 : current_idx += 1;
2633 122 : }
2634 462 : split_history[current_idx].push(item);
2635 : }
2636 324 : (split_history, lsn_split_points)
2637 : };
2638 : // Step 2: filter out duplicated records due to the k-merge of image/delta layers
2639 1362 : for split_for_lsn in &mut split_history {
2640 1038 : let mut prev_lsn = None;
2641 1038 : let mut new_split_for_lsn = Vec::with_capacity(split_for_lsn.len());
2642 1038 : for record @ (_, lsn, _) in std::mem::take(split_for_lsn) {
2643 462 : if let Some(prev_lsn) = &prev_lsn {
2644 62 : if *prev_lsn == lsn {
2645 : // The case that we have an LSN with both data from the delta layer and the image layer. As
2646 : // `ValueWrapper` ensures that an image is ordered before a delta at the same LSN, we simply
2647 : // drop this delta and keep the image.
2648 : //
2649 : // For example, we have delta layer key1@0x10, key1@0x20, and image layer key1@0x10, we will
2650 : // keep the image for key1@0x10 and the delta for key1@0x20. key1@0x10 delta will be simply
2651 : // dropped.
2652 : //
2653 : // TODO: in case we have both delta + images for a given LSN and it does not exceed the delta
2654 : // threshold, we could have kept delta instead to save space. This is an optimization for the future.
2655 0 : continue;
2656 62 : }
2657 400 : }
2658 462 : prev_lsn = Some(lsn);
2659 462 : new_split_for_lsn.push(record);
2660 : }
2661 1038 : *split_for_lsn = new_split_for_lsn;
2662 : }
2663 : // Step 3: generate images when necessary
2664 324 : let mut retention = Vec::with_capacity(split_history.len());
2665 324 : let mut records_since_last_image = 0;
2666 324 : let batch_cnt = split_history.len();
2667 324 : assert!(
2668 324 : batch_cnt >= 2,
2669 0 : "should have at least below + above horizon batches"
2670 : );
2671 324 : let mut replay_history: Vec<(Key, Lsn, Value)> = Vec::new();
2672 324 : if let Some((key, lsn, ref img)) = base_img_from_ancestor {
2673 21 : replay_history.push((key, lsn, Value::Image(img.clone())));
2674 303 : }
2675 :
2676 : /// Generate debug information for the replay history
2677 0 : fn generate_history_trace(replay_history: &[(Key, Lsn, Value)]) -> String {
2678 : use std::fmt::Write;
2679 0 : let mut output = String::new();
2680 0 : if let Some((key, _, _)) = replay_history.first() {
2681 0 : write!(output, "key={key} ").unwrap();
2682 0 : let mut cnt = 0;
2683 0 : for (_, lsn, val) in replay_history {
2684 0 : if val.is_image() {
2685 0 : write!(output, "i@{lsn} ").unwrap();
2686 0 : } else if val.will_init() {
2687 0 : write!(output, "di@{lsn} ").unwrap();
2688 0 : } else {
2689 0 : write!(output, "d@{lsn} ").unwrap();
2690 0 : }
2691 0 : cnt += 1;
2692 0 : if cnt >= 128 {
2693 0 : write!(output, "... and more").unwrap();
2694 0 : break;
2695 0 : }
2696 : }
2697 0 : } else {
2698 0 : write!(output, "<no history>").unwrap();
2699 0 : }
2700 0 : output
2701 0 : }
2702 :
2703 0 : fn generate_debug_trace(
2704 0 : replay_history: Option<&[(Key, Lsn, Value)]>,
2705 0 : full_history: &[(Key, Lsn, Value)],
2706 0 : lsns: &[Lsn],
2707 0 : horizon: Lsn,
2708 0 : ) -> String {
2709 : use std::fmt::Write;
2710 0 : let mut output = String::new();
2711 0 : if let Some(replay_history) = replay_history {
2712 0 : writeln!(
2713 0 : output,
2714 0 : "replay_history: {}",
2715 0 : generate_history_trace(replay_history)
2716 0 : )
2717 0 : .unwrap();
2718 0 : } else {
2719 0 : writeln!(output, "replay_history: <disabled>",).unwrap();
2720 0 : }
2721 0 : writeln!(
2722 0 : output,
2723 0 : "full_history: {}",
2724 0 : generate_history_trace(full_history)
2725 : )
2726 0 : .unwrap();
2727 0 : writeln!(
2728 0 : output,
2729 0 : "when processing: [{}] horizon={}",
2730 0 : lsns.iter().map(|l| format!("{l}")).join(","),
2731 : horizon
2732 : )
2733 0 : .unwrap();
2734 0 : output
2735 0 : }
2736 :
2737 324 : let mut key_exists = false;
2738 1037 : for (i, split_for_lsn) in split_history.into_iter().enumerate() {
2739 : // TODO: there could be image keys inside the splits, and we can compute records_since_last_image accordingly.
2740 1037 : records_since_last_image += split_for_lsn.len();
2741 : // Whether to produce an image into the final layer files
2742 1037 : let produce_image = if i == 0 && !has_ancestor {
2743 : // We always generate images for the first batch (below horizon / lowest retain_lsn)
2744 303 : true
2745 734 : } else if i == batch_cnt - 1 {
2746 : // Do not generate images for the last batch (above horizon)
2747 323 : false
2748 411 : } else if records_since_last_image == 0 {
2749 322 : false
2750 89 : } else if records_since_last_image >= delta_threshold_cnt {
2751 : // Generate images when there are too many records
2752 3 : true
2753 : } else {
2754 86 : false
2755 : };
2756 1037 : replay_history.extend(split_for_lsn.iter().map(|x| (*x).clone()));
2757 : // Only retain the items after the last image record
2758 1277 : for idx in (0..replay_history.len()).rev() {
2759 1277 : if replay_history[idx].2.will_init() {
2760 1037 : replay_history = replay_history[idx..].to_vec();
2761 1037 : break;
2762 240 : }
2763 : }
2764 1037 : if replay_history.is_empty() && !key_exists {
2765 : // The key does not exist at earlier LSN, we can skip this iteration.
2766 0 : retention.push(Vec::new());
2767 0 : continue;
2768 1037 : } else {
2769 1037 : key_exists = true;
2770 1037 : }
2771 1037 : let Some((_, _, val)) = replay_history.first() else {
2772 0 : unreachable!("replay history should not be empty once it exists")
2773 : };
2774 1037 : if !val.will_init() {
2775 0 : return Err(anyhow::anyhow!("invalid history, no base image")).with_context(|| {
2776 0 : generate_debug_trace(
2777 0 : Some(&replay_history),
2778 0 : full_history,
2779 0 : retain_lsn_below_horizon,
2780 0 : horizon,
2781 : )
2782 0 : });
2783 1037 : }
2784 : // Whether to reconstruct the image. In debug mode, we will generate an image
2785 : // at every retain_lsn to ensure data is not corrupted, but we won't put the
2786 : // image into the final layer.
2787 1037 : let img_and_lsn = if produce_image {
2788 306 : records_since_last_image = 0;
2789 306 : let replay_history_for_debug = if debug_mode {
2790 306 : Some(replay_history.clone())
2791 : } else {
2792 0 : None
2793 : };
2794 306 : let replay_history_for_debug_ref = replay_history_for_debug.as_deref();
2795 306 : let history = std::mem::take(&mut replay_history);
2796 306 : let mut img = None;
2797 306 : let mut records = Vec::with_capacity(history.len());
2798 306 : if let (_, lsn, Value::Image(val)) = history.first().as_ref().unwrap() {
2799 295 : img = Some((*lsn, val.clone()));
2800 295 : for (_, lsn, val) in history.into_iter().skip(1) {
2801 20 : let Value::WalRecord(rec) = val else {
2802 0 : return Err(anyhow::anyhow!(
2803 0 : "invalid record, first record is image, expect walrecords"
2804 0 : ))
2805 0 : .with_context(|| {
2806 0 : generate_debug_trace(
2807 0 : replay_history_for_debug_ref,
2808 0 : full_history,
2809 0 : retain_lsn_below_horizon,
2810 0 : horizon,
2811 : )
2812 0 : });
2813 : };
2814 20 : records.push((lsn, rec));
2815 : }
2816 : } else {
2817 18 : for (_, lsn, val) in history.into_iter() {
2818 18 : let Value::WalRecord(rec) = val else {
2819 0 : return Err(anyhow::anyhow!("invalid record, first record is walrecord, expect rest are walrecord"))
2820 0 : .with_context(|| generate_debug_trace(
2821 0 : replay_history_for_debug_ref,
2822 0 : full_history,
2823 0 : retain_lsn_below_horizon,
2824 0 : horizon,
2825 : ));
2826 : };
2827 18 : records.push((lsn, rec));
2828 : }
2829 : }
2830 : // WAL redo requires records in the reverse LSN order
2831 306 : records.reverse();
2832 306 : let state = ValueReconstructState { img, records };
2833 : // last batch does not generate image so i is always in range, unless we force generate
2834 : // an image during testing
2835 306 : let request_lsn = if i >= lsn_split_points.len() {
2836 0 : Lsn::MAX
2837 : } else {
2838 306 : lsn_split_points[i]
2839 : };
2840 306 : let img = self
2841 306 : .reconstruct_value(key, request_lsn, state, RedoAttemptType::GcCompaction)
2842 306 : .await?;
2843 305 : Some((request_lsn, img))
2844 : } else {
2845 731 : None
2846 : };
2847 1036 : if produce_image {
2848 305 : let (request_lsn, img) = img_and_lsn.unwrap();
2849 305 : replay_history.push((key, request_lsn, Value::Image(img.clone())));
2850 305 : retention.push(vec![(request_lsn, Value::Image(img))]);
2851 305 : } else {
2852 731 : let deltas = split_for_lsn
2853 731 : .iter()
2854 731 : .map(|(_, lsn, value)| (*lsn, value.clone()))
2855 731 : .collect_vec();
2856 731 : retention.push(deltas);
2857 : }
2858 : }
2859 323 : let mut result = Vec::with_capacity(retention.len());
2860 323 : assert_eq!(retention.len(), lsn_split_points.len() + 1);
2861 1036 : for (idx, logs) in retention.into_iter().enumerate() {
2862 1036 : if idx == lsn_split_points.len() {
2863 323 : let retention = KeyHistoryRetention {
2864 323 : below_horizon: result,
2865 323 : above_horizon: KeyLogAtLsn(logs),
2866 323 : };
2867 323 : if verification {
2868 323 : retention
2869 323 : .verify(key, &base_img_from_ancestor, full_history, self)
2870 323 : .await?;
2871 0 : }
2872 323 : return Ok(retention);
2873 713 : } else {
2874 713 : result.push((lsn_split_points[idx], KeyLogAtLsn(logs)));
2875 713 : }
2876 : }
2877 0 : unreachable!("key retention is empty")
2878 324 : }
2879 :
2880 : /// Check how much space is left on the disk
2881 27 : async fn check_available_space(self: &Arc<Self>) -> anyhow::Result<u64> {
2882 27 : let tenants_dir = self.conf.tenants_path();
2883 :
2884 27 : let stat = Statvfs::get(&tenants_dir, None)
2885 27 : .context("statvfs failed, presumably directory got unlinked")?;
2886 :
2887 27 : let (avail_bytes, _) = stat.get_avail_total_bytes();
2888 :
2889 27 : Ok(avail_bytes)
2890 27 : }
2891 :
2892 : /// Check if the compaction can proceed safely without running out of space. We assume the size
2893 : /// upper bound of the produced files of a compaction job is the same as all layers involved in
2894 : /// the compaction. Therefore, we need `2 * layers_to_be_compacted_size` at least to do a
2895 : /// compaction.
2896 27 : async fn check_compaction_space(
2897 27 : self: &Arc<Self>,
2898 27 : layer_selection: &[Layer],
2899 27 : ) -> Result<(), CompactionError> {
2900 27 : let available_space = self
2901 27 : .check_available_space()
2902 27 : .await
2903 27 : .map_err(CompactionError::Other)?;
2904 27 : let mut remote_layer_size = 0;
2905 27 : let mut all_layer_size = 0;
2906 106 : for layer in layer_selection {
2907 79 : let needs_download = layer
2908 79 : .needs_download()
2909 79 : .await
2910 79 : .context("failed to check if layer needs download")
2911 79 : .map_err(CompactionError::Other)?;
2912 79 : if needs_download.is_some() {
2913 0 : remote_layer_size += layer.layer_desc().file_size;
2914 79 : }
2915 79 : all_layer_size += layer.layer_desc().file_size;
2916 : }
2917 27 : let allocated_space = (available_space as f64 * 0.8) as u64; /* reserve 20% space for other tasks */
2918 27 : if all_layer_size /* space needed for newly-generated file */ + remote_layer_size /* space for downloading layers */ > allocated_space
2919 : {
2920 0 : return Err(CompactionError::Other(anyhow!(
2921 0 : "not enough space for compaction: available_space={}, allocated_space={}, all_layer_size={}, remote_layer_size={}, required_space={}",
2922 0 : available_space,
2923 0 : allocated_space,
2924 0 : all_layer_size,
2925 0 : remote_layer_size,
2926 0 : all_layer_size + remote_layer_size
2927 0 : )));
2928 27 : }
2929 27 : Ok(())
2930 27 : }
2931 :
2932 : /// Check to bail out of gc compaction early if it would use too much memory.
2933 27 : async fn check_memory_usage(
2934 27 : self: &Arc<Self>,
2935 27 : layer_selection: &[Layer],
2936 27 : ) -> Result<(), CompactionError> {
2937 27 : let mut estimated_memory_usage_mb = 0.0;
2938 27 : let mut num_image_layers = 0;
2939 27 : let mut num_delta_layers = 0;
2940 27 : let target_layer_size_bytes = 256 * 1024 * 1024;
2941 106 : for layer in layer_selection {
2942 79 : let layer_desc = layer.layer_desc();
2943 79 : if layer_desc.is_delta() {
2944 44 : // Delta layers at most have 1MB buffer; 3x to make it safe (there're deltas as large as 16KB).
2945 44 : // Scale it by target_layer_size_bytes so that tests can pass (some tests, e.g., `test_pageserver_gc_compaction_preempt
2946 44 : // use 3MB layer size and we need to account for that).
2947 44 : estimated_memory_usage_mb +=
2948 44 : 3.0 * (layer_desc.file_size / target_layer_size_bytes) as f64;
2949 44 : num_delta_layers += 1;
2950 44 : } else {
2951 35 : // Image layers at most have 1MB buffer but it might be compressed; assume 5x compression ratio.
2952 35 : estimated_memory_usage_mb +=
2953 35 : 5.0 * (layer_desc.file_size / target_layer_size_bytes) as f64;
2954 35 : num_image_layers += 1;
2955 35 : }
2956 : }
2957 27 : if estimated_memory_usage_mb > 1024.0 {
2958 0 : return Err(CompactionError::Other(anyhow!(
2959 0 : "estimated memory usage is too high: {}MB, giving up compaction; num_image_layers={}, num_delta_layers={}",
2960 0 : estimated_memory_usage_mb,
2961 0 : num_image_layers,
2962 0 : num_delta_layers
2963 0 : )));
2964 27 : }
2965 27 : Ok(())
2966 27 : }
2967 :
2968 : /// Get a watermark for gc-compaction, that is the lowest LSN that we can use as the `gc_horizon` for
2969 : /// the compaction algorithm. It is min(space_cutoff, time_cutoff, latest_gc_cutoff, standby_horizon).
2970 : /// Leases and retain_lsns are considered in the gc-compaction job itself so we don't need to account for them
2971 : /// here.
2972 28 : pub(crate) fn get_gc_compaction_watermark(self: &Arc<Self>) -> Lsn {
2973 28 : let gc_cutoff_lsn = {
2974 28 : let gc_info = self.gc_info.read().unwrap();
2975 28 : gc_info.min_cutoff()
2976 : };
2977 :
2978 : // TODO: standby horizon should use leases so we don't really need to consider it here.
2979 : // let watermark = watermark.min(self.standby_horizon.load());
2980 :
2981 : // TODO: ensure the child branches will not use anything below the watermark, or consider
2982 : // them when computing the watermark.
2983 28 : gc_cutoff_lsn.min(*self.get_applied_gc_cutoff_lsn())
2984 28 : }
2985 :
2986 : /// Split a gc-compaction job into multiple compaction jobs. The split is based on the key range and the estimated size of the compaction job.
2987 : /// The function returns a list of compaction jobs that can be executed separately. If the upper bound of the compact LSN
2988 : /// range is not specified, we will use the latest gc_cutoff as the upper bound, so that all jobs in the jobset acts
2989 : /// like a full compaction of the specified keyspace.
2990 0 : pub(crate) async fn gc_compaction_split_jobs(
2991 0 : self: &Arc<Self>,
2992 0 : job: GcCompactJob,
2993 0 : sub_compaction_max_job_size_mb: Option<u64>,
2994 0 : ) -> Result<Vec<GcCompactJob>, CompactionError> {
2995 0 : let compact_below_lsn = if job.compact_lsn_range.end != Lsn::MAX {
2996 0 : job.compact_lsn_range.end
2997 : } else {
2998 0 : self.get_gc_compaction_watermark()
2999 : };
3000 :
3001 0 : if compact_below_lsn == Lsn::INVALID {
3002 0 : tracing::warn!(
3003 0 : "no layers to compact with gc: gc_cutoff not generated yet, skipping gc bottom-most compaction"
3004 : );
3005 0 : return Ok(vec![]);
3006 0 : }
3007 :
3008 : // Split compaction job to about 4GB each
3009 : const GC_COMPACT_MAX_SIZE_MB: u64 = 4 * 1024;
3010 0 : let sub_compaction_max_job_size_mb =
3011 0 : sub_compaction_max_job_size_mb.unwrap_or(GC_COMPACT_MAX_SIZE_MB);
3012 :
3013 0 : let mut compact_jobs = Vec::<GcCompactJob>::new();
3014 : // For now, we simply use the key partitioning information; we should do a more fine-grained partitioning
3015 : // by estimating the amount of files read for a compaction job. We should also partition on LSN.
3016 0 : let ((dense_ks, sparse_ks), _) = self.partitioning.read().as_ref().clone();
3017 : // Truncate the key range to be within user specified compaction range.
3018 0 : fn truncate_to(
3019 0 : source_start: &Key,
3020 0 : source_end: &Key,
3021 0 : target_start: &Key,
3022 0 : target_end: &Key,
3023 0 : ) -> Option<(Key, Key)> {
3024 0 : let start = source_start.max(target_start);
3025 0 : let end = source_end.min(target_end);
3026 0 : if start < end {
3027 0 : Some((*start, *end))
3028 : } else {
3029 0 : None
3030 : }
3031 0 : }
3032 0 : let mut split_key_ranges = Vec::new();
3033 0 : let ranges = dense_ks
3034 0 : .parts
3035 0 : .iter()
3036 0 : .map(|partition| partition.ranges.iter())
3037 0 : .chain(sparse_ks.parts.iter().map(|x| x.0.ranges.iter()))
3038 0 : .flatten()
3039 0 : .cloned()
3040 0 : .collect_vec();
3041 0 : for range in ranges.iter() {
3042 0 : let Some((start, end)) = truncate_to(
3043 0 : &range.start,
3044 0 : &range.end,
3045 0 : &job.compact_key_range.start,
3046 0 : &job.compact_key_range.end,
3047 0 : ) else {
3048 0 : continue;
3049 : };
3050 0 : split_key_ranges.push((start, end));
3051 : }
3052 0 : split_key_ranges.sort();
3053 0 : let all_layers = {
3054 0 : let guard = self.layers.read(LayerManagerLockHolder::Compaction).await;
3055 0 : let layer_map = guard.layer_map()?;
3056 0 : layer_map.iter_historic_layers().collect_vec()
3057 : };
3058 0 : let mut current_start = None;
3059 0 : let ranges_num = split_key_ranges.len();
3060 0 : for (idx, (start, end)) in split_key_ranges.into_iter().enumerate() {
3061 0 : if current_start.is_none() {
3062 0 : current_start = Some(start);
3063 0 : }
3064 0 : let start = current_start.unwrap();
3065 0 : if start >= end {
3066 : // We have already processed this partition.
3067 0 : continue;
3068 0 : }
3069 0 : let overlapping_layers = {
3070 0 : let mut desc = Vec::new();
3071 0 : for layer in all_layers.iter() {
3072 0 : if overlaps_with(&layer.get_key_range(), &(start..end))
3073 0 : && layer.get_lsn_range().start <= compact_below_lsn
3074 0 : {
3075 0 : desc.push(layer.clone());
3076 0 : }
3077 : }
3078 0 : desc
3079 : };
3080 0 : let total_size = overlapping_layers.iter().map(|x| x.file_size).sum::<u64>();
3081 0 : if total_size > sub_compaction_max_job_size_mb * 1024 * 1024 || ranges_num == idx + 1 {
3082 : // Try to extend the compaction range so that we include at least one full layer file.
3083 0 : let extended_end = overlapping_layers
3084 0 : .iter()
3085 0 : .map(|layer| layer.key_range.end)
3086 0 : .min();
3087 : // It is possible that the search range does not contain any layer files when we reach the end of the loop.
3088 : // In this case, we simply use the specified key range end.
3089 0 : let end = if let Some(extended_end) = extended_end {
3090 0 : extended_end.max(end)
3091 : } else {
3092 0 : end
3093 : };
3094 0 : let end = if ranges_num == idx + 1 {
3095 : // extend the compaction range to the end of the key range if it's the last partition
3096 0 : end.max(job.compact_key_range.end)
3097 : } else {
3098 0 : end
3099 : };
3100 0 : if total_size == 0 && !compact_jobs.is_empty() {
3101 0 : info!(
3102 0 : "splitting compaction job: {}..{}, estimated_size={}, extending the previous job",
3103 : start, end, total_size
3104 : );
3105 0 : compact_jobs.last_mut().unwrap().compact_key_range.end = end;
3106 0 : current_start = Some(end);
3107 : } else {
3108 0 : info!(
3109 0 : "splitting compaction job: {}..{}, estimated_size={}",
3110 : start, end, total_size
3111 : );
3112 0 : compact_jobs.push(GcCompactJob {
3113 0 : dry_run: job.dry_run,
3114 0 : compact_key_range: start..end,
3115 0 : compact_lsn_range: job.compact_lsn_range.start..compact_below_lsn,
3116 0 : });
3117 0 : current_start = Some(end);
3118 : }
3119 0 : }
3120 : }
3121 0 : Ok(compact_jobs)
3122 0 : }
3123 :
3124 : /// An experimental compaction building block that combines compaction with garbage collection.
3125 : ///
3126 : /// The current implementation picks all delta + image layers that are below or intersecting with
3127 : /// the GC horizon without considering retain_lsns. Then, it does a full compaction over all these delta
3128 : /// layers and image layers, which generates image layers on the gc horizon, drop deltas below gc horizon,
3129 : /// and create delta layers with all deltas >= gc horizon.
3130 : ///
3131 : /// If `options.compact_range` is provided, it will only compact the keys within the range, aka partial compaction.
3132 : /// Partial compaction will read and process all layers overlapping with the key range, even if it might
3133 : /// contain extra keys. After the gc-compaction phase completes, delta layers that are not fully contained
3134 : /// within the key range will be rewritten to ensure they do not overlap with the delta layers. Providing
3135 : /// Key::MIN..Key..MAX to the function indicates a full compaction, though technically, `Key::MAX` is not
3136 : /// part of the range.
3137 : ///
3138 : /// If `options.compact_lsn_range.end` is provided, the compaction will only compact layers below or intersect with
3139 : /// the LSN. Otherwise, it will use the gc cutoff by default.
3140 28 : pub(crate) async fn compact_with_gc(
3141 28 : self: &Arc<Self>,
3142 28 : cancel: &CancellationToken,
3143 28 : options: CompactOptions,
3144 28 : ctx: &RequestContext,
3145 28 : ) -> Result<CompactionOutcome, CompactionError> {
3146 28 : let sub_compaction = options.sub_compaction;
3147 28 : let job = GcCompactJob::from_compact_options(options.clone());
3148 28 : let yield_for_l0 = options.flags.contains(CompactFlags::YieldForL0);
3149 28 : if sub_compaction {
3150 0 : info!(
3151 0 : "running enhanced gc bottom-most compaction with sub-compaction, splitting compaction jobs"
3152 : );
3153 0 : let jobs = self
3154 0 : .gc_compaction_split_jobs(job, options.sub_compaction_max_job_size_mb)
3155 0 : .await?;
3156 0 : let jobs_len = jobs.len();
3157 0 : for (idx, job) in jobs.into_iter().enumerate() {
3158 0 : let sub_compaction_progress = format!("{}/{}", idx + 1, jobs_len);
3159 0 : self.compact_with_gc_inner(cancel, job, ctx, yield_for_l0)
3160 0 : .instrument(info_span!(
3161 : "sub_compaction",
3162 : sub_compaction_progress = sub_compaction_progress
3163 : ))
3164 0 : .await?;
3165 : }
3166 0 : if jobs_len == 0 {
3167 0 : info!("no jobs to run, skipping gc bottom-most compaction");
3168 0 : }
3169 0 : return Ok(CompactionOutcome::Done);
3170 28 : }
3171 28 : self.compact_with_gc_inner(cancel, job, ctx, yield_for_l0)
3172 28 : .await
3173 28 : }
3174 :
3175 28 : async fn compact_with_gc_inner(
3176 28 : self: &Arc<Self>,
3177 28 : cancel: &CancellationToken,
3178 28 : job: GcCompactJob,
3179 28 : ctx: &RequestContext,
3180 28 : yield_for_l0: bool,
3181 28 : ) -> Result<CompactionOutcome, CompactionError> {
3182 : // Block other compaction/GC tasks from running for now. GC-compaction could run along
3183 : // with legacy compaction tasks in the future. Always ensure the lock order is compaction -> gc.
3184 : // Note that we already acquired the compaction lock when the outer `compact` function gets called.
3185 :
3186 28 : let timer = Instant::now();
3187 28 : let begin_timer = timer;
3188 :
3189 28 : let gc_lock = async {
3190 28 : tokio::select! {
3191 28 : guard = self.gc_lock.lock() => Ok(guard),
3192 28 : _ = cancel.cancelled() => Err(CompactionError::ShuttingDown),
3193 : }
3194 28 : };
3195 :
3196 28 : let time_acquire_lock = timer.elapsed();
3197 28 : let timer = Instant::now();
3198 :
3199 28 : let gc_lock = crate::timed(
3200 28 : gc_lock,
3201 28 : "acquires gc lock",
3202 28 : std::time::Duration::from_secs(5),
3203 28 : )
3204 28 : .await?;
3205 :
3206 28 : let dry_run = job.dry_run;
3207 28 : let compact_key_range = job.compact_key_range;
3208 28 : let compact_lsn_range = job.compact_lsn_range;
3209 :
3210 28 : let debug_mode = cfg!(debug_assertions) || cfg!(feature = "testing");
3211 :
3212 28 : info!(
3213 0 : "running enhanced gc bottom-most compaction, dry_run={dry_run}, compact_key_range={}..{}, compact_lsn_range={}..{}",
3214 : compact_key_range.start,
3215 : compact_key_range.end,
3216 : compact_lsn_range.start,
3217 : compact_lsn_range.end
3218 : );
3219 :
3220 28 : scopeguard::defer! {
3221 : info!("done enhanced gc bottom-most compaction");
3222 : };
3223 :
3224 28 : let mut stat = CompactionStatistics::default();
3225 :
3226 : // Step 0: pick all delta layers + image layers below/intersect with the GC horizon.
3227 : // The layer selection has the following properties:
3228 : // 1. If a layer is in the selection, all layers below it are in the selection.
3229 : // 2. Inferred from (1), for each key in the layer selection, the value can be reconstructed only with the layers in the layer selection.
3230 27 : let job_desc = {
3231 28 : let guard = self
3232 28 : .layers
3233 28 : .read(LayerManagerLockHolder::GarbageCollection)
3234 28 : .await;
3235 28 : let layers = guard.layer_map()?;
3236 28 : let gc_info = self.gc_info.read().unwrap();
3237 28 : let mut retain_lsns_below_horizon = Vec::new();
3238 28 : let gc_cutoff = {
3239 : // Currently, gc-compaction only kicks in after the legacy gc has updated the gc_cutoff.
3240 : // Therefore, it can only clean up data that cannot be cleaned up with legacy gc, instead of
3241 : // cleaning everything that theoritically it could. In the future, it should use `self.gc_info`
3242 : // to get the truth data.
3243 28 : let real_gc_cutoff = self.get_gc_compaction_watermark();
3244 : // The compaction algorithm will keep all keys above the gc_cutoff while keeping only necessary keys below the gc_cutoff for
3245 : // each of the retain_lsn. Therefore, if the user-provided `compact_lsn_range.end` is larger than the real gc cutoff, we will use
3246 : // the real cutoff.
3247 28 : let mut gc_cutoff = if compact_lsn_range.end == Lsn::MAX {
3248 25 : if real_gc_cutoff == Lsn::INVALID {
3249 : // If the gc_cutoff is not generated yet, we should not compact anything.
3250 0 : tracing::warn!(
3251 0 : "no layers to compact with gc: gc_cutoff not generated yet, skipping gc bottom-most compaction"
3252 : );
3253 0 : return Ok(CompactionOutcome::Skipped);
3254 25 : }
3255 25 : real_gc_cutoff
3256 : } else {
3257 3 : compact_lsn_range.end
3258 : };
3259 28 : if gc_cutoff > real_gc_cutoff {
3260 2 : warn!(
3261 0 : "provided compact_lsn_range.end={} is larger than the real_gc_cutoff={}, using the real gc cutoff",
3262 : gc_cutoff, real_gc_cutoff
3263 : );
3264 2 : gc_cutoff = real_gc_cutoff;
3265 26 : }
3266 28 : gc_cutoff
3267 : };
3268 35 : for (lsn, _timeline_id, _is_offloaded) in &gc_info.retain_lsns {
3269 35 : if lsn < &gc_cutoff {
3270 35 : retain_lsns_below_horizon.push(*lsn);
3271 35 : }
3272 : }
3273 28 : for lsn in gc_info.leases.keys() {
3274 0 : if lsn < &gc_cutoff {
3275 0 : retain_lsns_below_horizon.push(*lsn);
3276 0 : }
3277 : }
3278 28 : let mut selected_layers: Vec<Layer> = Vec::new();
3279 28 : drop(gc_info);
3280 : // Firstly, pick all the layers intersect or below the gc_cutoff, get the largest LSN in the selected layers.
3281 28 : let Some(max_layer_lsn) = layers
3282 28 : .iter_historic_layers()
3283 125 : .filter(|desc| desc.get_lsn_range().start <= gc_cutoff)
3284 107 : .map(|desc| desc.get_lsn_range().end)
3285 28 : .max()
3286 : else {
3287 0 : info!(
3288 0 : "no layers to compact with gc: no historic layers below gc_cutoff, gc_cutoff={}",
3289 : gc_cutoff
3290 : );
3291 0 : return Ok(CompactionOutcome::Done);
3292 : };
3293 : // Next, if the user specifies compact_lsn_range.start, we need to filter some layers out. All the layers (strictly) below
3294 : // the min_layer_lsn computed as below will be filtered out and the data will be accessed using the normal read path, as if
3295 : // it is a branch.
3296 28 : let Some(min_layer_lsn) = layers
3297 28 : .iter_historic_layers()
3298 125 : .filter(|desc| {
3299 125 : if compact_lsn_range.start == Lsn::INVALID {
3300 102 : true // select all layers below if start == Lsn(0)
3301 : } else {
3302 23 : desc.get_lsn_range().end > compact_lsn_range.start // strictly larger than compact_above_lsn
3303 : }
3304 125 : })
3305 116 : .map(|desc| desc.get_lsn_range().start)
3306 28 : .min()
3307 : else {
3308 0 : info!(
3309 0 : "no layers to compact with gc: no historic layers above compact_above_lsn, compact_above_lsn={}",
3310 : compact_lsn_range.end
3311 : );
3312 0 : return Ok(CompactionOutcome::Done);
3313 : };
3314 : // Then, pick all the layers that are below the max_layer_lsn. This is to ensure we can pick all single-key
3315 : // layers to compact.
3316 28 : let mut rewrite_layers = Vec::new();
3317 125 : for desc in layers.iter_historic_layers() {
3318 125 : if desc.get_lsn_range().end <= max_layer_lsn
3319 107 : && desc.get_lsn_range().start >= min_layer_lsn
3320 98 : && overlaps_with(&desc.get_key_range(), &compact_key_range)
3321 : {
3322 : // If the layer overlaps with the compaction key range, we need to read it to obtain all keys within the range,
3323 : // even if it might contain extra keys
3324 79 : selected_layers.push(guard.get_from_desc(&desc));
3325 : // If the layer is not fully contained within the key range, we need to rewrite it if it's a delta layer (it's fine
3326 : // to overlap image layers)
3327 79 : if desc.is_delta() && !fully_contains(&compact_key_range, &desc.get_key_range())
3328 1 : {
3329 1 : rewrite_layers.push(desc);
3330 78 : }
3331 46 : }
3332 : }
3333 28 : if selected_layers.is_empty() {
3334 1 : info!(
3335 0 : "no layers to compact with gc: no layers within the key range, gc_cutoff={}, key_range={}..{}",
3336 : gc_cutoff, compact_key_range.start, compact_key_range.end
3337 : );
3338 1 : return Ok(CompactionOutcome::Done);
3339 27 : }
3340 27 : retain_lsns_below_horizon.sort();
3341 27 : GcCompactionJobDescription {
3342 27 : selected_layers,
3343 27 : gc_cutoff,
3344 27 : retain_lsns_below_horizon,
3345 27 : min_layer_lsn,
3346 27 : max_layer_lsn,
3347 27 : compaction_key_range: compact_key_range,
3348 27 : rewrite_layers,
3349 27 : }
3350 : };
3351 27 : let (has_data_below, lowest_retain_lsn) = if compact_lsn_range.start != Lsn::INVALID {
3352 : // If we only compact above some LSN, we should get the history from the current branch below the specified LSN.
3353 : // We use job_desc.min_layer_lsn as if it's the lowest branch point.
3354 4 : (true, job_desc.min_layer_lsn)
3355 23 : } else if self.ancestor_timeline.is_some() {
3356 : // In theory, we can also use min_layer_lsn here, but using ancestor LSN makes sure the delta layers cover the
3357 : // LSN ranges all the way to the ancestor timeline.
3358 1 : (true, self.ancestor_lsn)
3359 : } else {
3360 22 : let res = job_desc
3361 22 : .retain_lsns_below_horizon
3362 22 : .first()
3363 22 : .copied()
3364 22 : .unwrap_or(job_desc.gc_cutoff);
3365 22 : if debug_mode {
3366 22 : assert_eq!(
3367 : res,
3368 22 : job_desc
3369 22 : .retain_lsns_below_horizon
3370 22 : .iter()
3371 22 : .min()
3372 22 : .copied()
3373 22 : .unwrap_or(job_desc.gc_cutoff)
3374 : );
3375 0 : }
3376 22 : (false, res)
3377 : };
3378 :
3379 27 : let verification = self.get_gc_compaction_settings().gc_compaction_verification;
3380 :
3381 27 : info!(
3382 0 : "picked {} layers for compaction ({} layers need rewriting) with max_layer_lsn={} min_layer_lsn={} gc_cutoff={} lowest_retain_lsn={}, key_range={}..{}, has_data_below={}",
3383 0 : job_desc.selected_layers.len(),
3384 0 : job_desc.rewrite_layers.len(),
3385 : job_desc.max_layer_lsn,
3386 : job_desc.min_layer_lsn,
3387 : job_desc.gc_cutoff,
3388 : lowest_retain_lsn,
3389 : job_desc.compaction_key_range.start,
3390 : job_desc.compaction_key_range.end,
3391 : has_data_below,
3392 : );
3393 :
3394 27 : let time_analyze = timer.elapsed();
3395 27 : let timer = Instant::now();
3396 :
3397 106 : for layer in &job_desc.selected_layers {
3398 79 : debug!("read layer: {}", layer.layer_desc().key());
3399 : }
3400 28 : for layer in &job_desc.rewrite_layers {
3401 1 : debug!("rewrite layer: {}", layer.key());
3402 : }
3403 :
3404 27 : self.check_compaction_space(&job_desc.selected_layers)
3405 27 : .await?;
3406 :
3407 27 : self.check_memory_usage(&job_desc.selected_layers).await?;
3408 27 : if job_desc.selected_layers.len() > 100
3409 0 : && job_desc.rewrite_layers.len() as f64 >= job_desc.selected_layers.len() as f64 * 0.7
3410 : {
3411 0 : return Err(CompactionError::Other(anyhow!(
3412 0 : "too many layers to rewrite: {} / {}, giving up compaction",
3413 0 : job_desc.rewrite_layers.len(),
3414 0 : job_desc.selected_layers.len()
3415 0 : )));
3416 27 : }
3417 :
3418 : // Generate statistics for the compaction
3419 106 : for layer in &job_desc.selected_layers {
3420 79 : let desc = layer.layer_desc();
3421 79 : if desc.is_delta() {
3422 44 : stat.visit_delta_layer(desc.file_size());
3423 44 : } else {
3424 35 : stat.visit_image_layer(desc.file_size());
3425 35 : }
3426 : }
3427 :
3428 : // Step 1: construct a k-merge iterator over all layers.
3429 : // Also, verify if the layer map can be split by drawing a horizontal line at every LSN start/end split point.
3430 27 : let layer_names = job_desc
3431 27 : .selected_layers
3432 27 : .iter()
3433 79 : .map(|layer| layer.layer_desc().layer_name())
3434 27 : .collect_vec();
3435 27 : if let Some(err) = check_valid_layermap(&layer_names) {
3436 0 : return Err(CompactionError::Other(anyhow!(
3437 0 : "gc-compaction layer map check failed because {}, cannot proceed with compaction due to potential data loss",
3438 0 : err
3439 0 : )));
3440 27 : }
3441 : // The maximum LSN we are processing in this compaction loop
3442 27 : let end_lsn = job_desc
3443 27 : .selected_layers
3444 27 : .iter()
3445 79 : .map(|l| l.layer_desc().lsn_range.end)
3446 27 : .max()
3447 27 : .unwrap();
3448 27 : let mut delta_layers = Vec::new();
3449 27 : let mut image_layers = Vec::new();
3450 27 : let mut downloaded_layers = Vec::new();
3451 27 : let mut total_downloaded_size = 0;
3452 27 : let mut total_layer_size = 0;
3453 106 : for layer in &job_desc.selected_layers {
3454 79 : if layer
3455 79 : .needs_download()
3456 79 : .await
3457 79 : .context("failed to check if layer needs download")
3458 79 : .map_err(CompactionError::Other)?
3459 79 : .is_some()
3460 0 : {
3461 0 : total_downloaded_size += layer.layer_desc().file_size;
3462 79 : }
3463 79 : total_layer_size += layer.layer_desc().file_size;
3464 79 : if cancel.is_cancelled() {
3465 0 : return Err(CompactionError::ShuttingDown);
3466 79 : }
3467 79 : let should_yield = yield_for_l0
3468 0 : && self
3469 0 : .l0_compaction_trigger
3470 0 : .notified()
3471 0 : .now_or_never()
3472 0 : .is_some();
3473 79 : if should_yield {
3474 0 : tracing::info!("preempt gc-compaction when downloading layers: too many L0 layers");
3475 0 : return Ok(CompactionOutcome::YieldForL0);
3476 79 : }
3477 79 : let resident_layer = layer
3478 79 : .download_and_keep_resident(ctx)
3479 79 : .await
3480 79 : .context("failed to download and keep resident layer")
3481 79 : .map_err(CompactionError::Other)?;
3482 79 : downloaded_layers.push(resident_layer);
3483 : }
3484 27 : info!(
3485 0 : "finish downloading layers, downloaded={}, total={}, ratio={:.2}",
3486 : total_downloaded_size,
3487 : total_layer_size,
3488 0 : total_downloaded_size as f64 / total_layer_size as f64
3489 : );
3490 106 : for resident_layer in &downloaded_layers {
3491 79 : if resident_layer.layer_desc().is_delta() {
3492 44 : let layer = resident_layer
3493 44 : .get_as_delta(ctx)
3494 44 : .await
3495 44 : .context("failed to get delta layer")
3496 44 : .map_err(CompactionError::Other)?;
3497 44 : delta_layers.push(layer);
3498 : } else {
3499 35 : let layer = resident_layer
3500 35 : .get_as_image(ctx)
3501 35 : .await
3502 35 : .context("failed to get image layer")
3503 35 : .map_err(CompactionError::Other)?;
3504 35 : image_layers.push(layer);
3505 : }
3506 : }
3507 27 : let (dense_ks, sparse_ks) = self
3508 27 : .collect_gc_compaction_keyspace()
3509 27 : .await
3510 27 : .context("failed to collect gc compaction keyspace")
3511 27 : .map_err(CompactionError::Other)?;
3512 27 : let mut merge_iter = FilterIterator::create(
3513 27 : MergeIterator::create_with_options(
3514 27 : &delta_layers,
3515 27 : &image_layers,
3516 27 : ctx,
3517 27 : 128 * 8192, /* 1MB buffer for each of the inner iterators */
3518 : 128,
3519 : ),
3520 27 : dense_ks,
3521 27 : sparse_ks,
3522 : )
3523 27 : .context("failed to create filter iterator")
3524 27 : .map_err(CompactionError::Other)?;
3525 :
3526 27 : let time_download_layer = timer.elapsed();
3527 27 : let mut timer = Instant::now();
3528 :
3529 : // Step 2: Produce images+deltas.
3530 27 : let mut accumulated_values = Vec::new();
3531 27 : let mut accumulated_values_estimated_size = 0;
3532 27 : let mut last_key: Option<Key> = None;
3533 :
3534 : // Only create image layers when there is no ancestor branches. TODO: create covering image layer
3535 : // when some condition meet.
3536 27 : let mut image_layer_writer = if !has_data_below {
3537 22 : Some(SplitImageLayerWriter::new(
3538 22 : self.conf,
3539 22 : self.timeline_id,
3540 22 : self.tenant_shard_id,
3541 22 : job_desc.compaction_key_range.start,
3542 22 : lowest_retain_lsn,
3543 22 : self.get_compaction_target_size(),
3544 22 : &self.gate,
3545 22 : self.cancel.clone(),
3546 22 : ))
3547 : } else {
3548 5 : None
3549 : };
3550 :
3551 27 : let mut delta_layer_writer = SplitDeltaLayerWriter::new(
3552 27 : self.conf,
3553 27 : self.timeline_id,
3554 27 : self.tenant_shard_id,
3555 27 : lowest_retain_lsn..end_lsn,
3556 27 : self.get_compaction_target_size(),
3557 27 : &self.gate,
3558 27 : self.cancel.clone(),
3559 : );
3560 :
3561 : #[derive(Default)]
3562 : struct RewritingLayers {
3563 : before: Option<DeltaLayerWriter>,
3564 : after: Option<DeltaLayerWriter>,
3565 : }
3566 27 : let mut delta_layer_rewriters = HashMap::<Arc<PersistentLayerKey>, RewritingLayers>::new();
3567 :
3568 : /// When compacting not at a bottom range (=`[0,X)`) of the root branch, we "have data below" (`has_data_below=true`).
3569 : /// The two cases are compaction in ancestor branches and when `compact_lsn_range.start` is set.
3570 : /// In those cases, we need to pull up data from below the LSN range we're compaction.
3571 : ///
3572 : /// This function unifies the cases so that later code doesn't have to think about it.
3573 : ///
3574 : /// Currently, we always get the ancestor image for each key in the child branch no matter whether the image
3575 : /// is needed for reconstruction. This should be fixed in the future.
3576 : ///
3577 : /// Furthermore, we should do vectored get instead of a single get, or better, use k-merge for ancestor
3578 : /// images.
3579 320 : async fn get_ancestor_image(
3580 320 : this_tline: &Arc<Timeline>,
3581 320 : key: Key,
3582 320 : ctx: &RequestContext,
3583 320 : has_data_below: bool,
3584 320 : history_lsn_point: Lsn,
3585 320 : ) -> anyhow::Result<Option<(Key, Lsn, Bytes)>> {
3586 320 : if !has_data_below {
3587 301 : return Ok(None);
3588 19 : };
3589 : // This function is implemented as a get of the current timeline at ancestor LSN, therefore reusing
3590 : // as much existing code as possible.
3591 19 : let img = this_tline.get(key, history_lsn_point, ctx).await?;
3592 19 : Ok(Some((key, history_lsn_point, img)))
3593 320 : }
3594 :
3595 : // Actually, we can decide not to write to the image layer at all at this point because
3596 : // the key and LSN range are determined. However, to keep things simple here, we still
3597 : // create this writer, and discard the writer in the end.
3598 27 : let mut time_to_first_kv_pair = None;
3599 :
3600 496 : while let Some(((key, lsn, val), desc)) = merge_iter
3601 496 : .next_with_trace()
3602 496 : .await
3603 496 : .context("failed to get next key-value pair")
3604 496 : .map_err(CompactionError::Other)?
3605 : {
3606 470 : if time_to_first_kv_pair.is_none() {
3607 27 : time_to_first_kv_pair = Some(timer.elapsed());
3608 27 : timer = Instant::now();
3609 443 : }
3610 :
3611 470 : if cancel.is_cancelled() {
3612 0 : return Err(CompactionError::ShuttingDown);
3613 470 : }
3614 :
3615 470 : let should_yield = yield_for_l0
3616 0 : && self
3617 0 : .l0_compaction_trigger
3618 0 : .notified()
3619 0 : .now_or_never()
3620 0 : .is_some();
3621 470 : if should_yield {
3622 0 : tracing::info!("preempt gc-compaction in the main loop: too many L0 layers");
3623 0 : return Ok(CompactionOutcome::YieldForL0);
3624 470 : }
3625 470 : if self.shard_identity.is_key_disposable(&key) {
3626 : // If this shard does not need to store this key, simply skip it.
3627 : //
3628 : // This is not handled in the filter iterator because shard is determined by hash.
3629 : // Therefore, it does not give us any performance benefit to do things like skip
3630 : // a whole layer file as handling key spaces (ranges).
3631 0 : if cfg!(debug_assertions) {
3632 0 : let shard = self.shard_identity.shard_index();
3633 0 : let owner = self.shard_identity.get_shard_number(&key);
3634 0 : panic!("key {key} does not belong on shard {shard}, owned by {owner}");
3635 0 : }
3636 0 : continue;
3637 470 : }
3638 470 : if !job_desc.compaction_key_range.contains(&key) {
3639 32 : if !desc.is_delta {
3640 30 : continue;
3641 2 : }
3642 2 : let rewriter = delta_layer_rewriters.entry(desc.clone()).or_default();
3643 2 : let rewriter = if key < job_desc.compaction_key_range.start {
3644 0 : if rewriter.before.is_none() {
3645 0 : rewriter.before = Some(
3646 0 : DeltaLayerWriter::new(
3647 0 : self.conf,
3648 0 : self.timeline_id,
3649 0 : self.tenant_shard_id,
3650 0 : desc.key_range.start,
3651 0 : desc.lsn_range.clone(),
3652 0 : &self.gate,
3653 0 : self.cancel.clone(),
3654 0 : ctx,
3655 0 : )
3656 0 : .await
3657 0 : .context("failed to create delta layer writer")
3658 0 : .map_err(CompactionError::Other)?,
3659 : );
3660 0 : }
3661 0 : rewriter.before.as_mut().unwrap()
3662 2 : } else if key >= job_desc.compaction_key_range.end {
3663 2 : if rewriter.after.is_none() {
3664 1 : rewriter.after = Some(
3665 1 : DeltaLayerWriter::new(
3666 1 : self.conf,
3667 1 : self.timeline_id,
3668 1 : self.tenant_shard_id,
3669 1 : job_desc.compaction_key_range.end,
3670 1 : desc.lsn_range.clone(),
3671 1 : &self.gate,
3672 1 : self.cancel.clone(),
3673 1 : ctx,
3674 1 : )
3675 1 : .await
3676 1 : .context("failed to create delta layer writer")
3677 1 : .map_err(CompactionError::Other)?,
3678 : );
3679 1 : }
3680 2 : rewriter.after.as_mut().unwrap()
3681 : } else {
3682 0 : unreachable!()
3683 : };
3684 2 : rewriter
3685 2 : .put_value(key, lsn, val, ctx)
3686 2 : .await
3687 2 : .context("failed to put value")
3688 2 : .map_err(CompactionError::Other)?;
3689 2 : continue;
3690 438 : }
3691 438 : match val {
3692 315 : Value::Image(_) => stat.visit_image_key(&val),
3693 123 : Value::WalRecord(_) => stat.visit_wal_key(&val),
3694 : }
3695 438 : if last_key.is_none() || last_key.as_ref() == Some(&key) {
3696 144 : if last_key.is_none() {
3697 27 : last_key = Some(key);
3698 117 : }
3699 144 : accumulated_values_estimated_size += val.estimated_size();
3700 144 : accumulated_values.push((key, lsn, val));
3701 :
3702 : // Accumulated values should never exceed 512MB.
3703 144 : if accumulated_values_estimated_size >= 1024 * 1024 * 512 {
3704 0 : return Err(CompactionError::Other(anyhow!(
3705 0 : "too many values for a single key: {} for key {}, {} items",
3706 0 : accumulated_values_estimated_size,
3707 0 : key,
3708 0 : accumulated_values.len()
3709 0 : )));
3710 144 : }
3711 : } else {
3712 294 : let last_key: &mut Key = last_key.as_mut().unwrap();
3713 294 : stat.on_unique_key_visited(); // TODO: adjust statistics for partial compaction
3714 294 : let retention = self
3715 294 : .generate_key_retention(
3716 294 : *last_key,
3717 294 : &accumulated_values,
3718 294 : job_desc.gc_cutoff,
3719 294 : &job_desc.retain_lsns_below_horizon,
3720 : COMPACTION_DELTA_THRESHOLD,
3721 294 : get_ancestor_image(self, *last_key, ctx, has_data_below, lowest_retain_lsn)
3722 294 : .await
3723 294 : .context("failed to get ancestor image")
3724 294 : .map_err(CompactionError::Other)?,
3725 294 : verification,
3726 : )
3727 294 : .await
3728 294 : .context("failed to generate key retention")
3729 294 : .map_err(CompactionError::Other)?;
3730 293 : retention
3731 293 : .pipe_to(
3732 293 : *last_key,
3733 293 : &mut delta_layer_writer,
3734 293 : image_layer_writer.as_mut(),
3735 293 : &mut stat,
3736 293 : ctx,
3737 293 : )
3738 293 : .await
3739 293 : .context("failed to pipe to delta layer writer")
3740 293 : .map_err(CompactionError::Other)?;
3741 293 : accumulated_values.clear();
3742 293 : *last_key = key;
3743 293 : accumulated_values_estimated_size = val.estimated_size();
3744 293 : accumulated_values.push((key, lsn, val));
3745 : }
3746 : }
3747 :
3748 : // TODO: move the below part to the loop body
3749 26 : let Some(last_key) = last_key else {
3750 0 : return Err(CompactionError::Other(anyhow!(
3751 0 : "no keys produced during compaction"
3752 0 : )));
3753 : };
3754 26 : stat.on_unique_key_visited();
3755 :
3756 26 : let retention = self
3757 26 : .generate_key_retention(
3758 26 : last_key,
3759 26 : &accumulated_values,
3760 26 : job_desc.gc_cutoff,
3761 26 : &job_desc.retain_lsns_below_horizon,
3762 : COMPACTION_DELTA_THRESHOLD,
3763 26 : get_ancestor_image(self, last_key, ctx, has_data_below, lowest_retain_lsn)
3764 26 : .await
3765 26 : .context("failed to get ancestor image")
3766 26 : .map_err(CompactionError::Other)?,
3767 26 : verification,
3768 : )
3769 26 : .await
3770 26 : .context("failed to generate key retention")
3771 26 : .map_err(CompactionError::Other)?;
3772 26 : retention
3773 26 : .pipe_to(
3774 26 : last_key,
3775 26 : &mut delta_layer_writer,
3776 26 : image_layer_writer.as_mut(),
3777 26 : &mut stat,
3778 26 : ctx,
3779 26 : )
3780 26 : .await
3781 26 : .context("failed to pipe to delta layer writer")
3782 26 : .map_err(CompactionError::Other)?;
3783 : // end: move the above part to the loop body
3784 :
3785 26 : let time_main_loop = timer.elapsed();
3786 26 : let timer = Instant::now();
3787 :
3788 26 : let mut rewrote_delta_layers = Vec::new();
3789 27 : for (key, writers) in delta_layer_rewriters {
3790 1 : if let Some(delta_writer_before) = writers.before {
3791 0 : let (desc, path) = delta_writer_before
3792 0 : .finish(job_desc.compaction_key_range.start, ctx)
3793 0 : .await
3794 0 : .context("failed to finish delta layer writer")
3795 0 : .map_err(CompactionError::Other)?;
3796 0 : let layer = Layer::finish_creating(self.conf, self, desc, &path)
3797 0 : .context("failed to finish creating delta layer")
3798 0 : .map_err(CompactionError::Other)?;
3799 0 : rewrote_delta_layers.push(layer);
3800 1 : }
3801 1 : if let Some(delta_writer_after) = writers.after {
3802 1 : let (desc, path) = delta_writer_after
3803 1 : .finish(key.key_range.end, ctx)
3804 1 : .await
3805 1 : .context("failed to finish delta layer writer")
3806 1 : .map_err(CompactionError::Other)?;
3807 1 : let layer = Layer::finish_creating(self.conf, self, desc, &path)
3808 1 : .context("failed to finish creating delta layer")
3809 1 : .map_err(CompactionError::Other)?;
3810 1 : rewrote_delta_layers.push(layer);
3811 0 : }
3812 : }
3813 :
3814 37 : let discard = |key: &PersistentLayerKey| {
3815 37 : let key = key.clone();
3816 37 : async move { KeyHistoryRetention::discard_key(&key, self, dry_run).await }
3817 37 : };
3818 :
3819 26 : let produced_image_layers = if let Some(writer) = image_layer_writer {
3820 21 : if !dry_run {
3821 19 : let end_key = job_desc.compaction_key_range.end;
3822 19 : writer
3823 19 : .finish_with_discard_fn(self, ctx, end_key, discard)
3824 19 : .await
3825 19 : .context("failed to finish image layer writer")
3826 19 : .map_err(CompactionError::Other)?
3827 : } else {
3828 2 : drop(writer);
3829 2 : Vec::new()
3830 : }
3831 : } else {
3832 5 : Vec::new()
3833 : };
3834 :
3835 26 : let produced_delta_layers = if !dry_run {
3836 24 : delta_layer_writer
3837 24 : .finish_with_discard_fn(self, ctx, discard)
3838 24 : .await
3839 24 : .context("failed to finish delta layer writer")
3840 24 : .map_err(CompactionError::Other)?
3841 : } else {
3842 2 : drop(delta_layer_writer);
3843 2 : Vec::new()
3844 : };
3845 :
3846 : // TODO: make image/delta/rewrote_delta layers generation atomic. At this point, we already generated resident layers, and if
3847 : // compaction is cancelled at this point, we might have some layers that are not cleaned up.
3848 26 : let mut compact_to = Vec::new();
3849 26 : let mut keep_layers = HashSet::new();
3850 26 : let produced_delta_layers_len = produced_delta_layers.len();
3851 26 : let produced_image_layers_len = produced_image_layers.len();
3852 :
3853 26 : let layer_selection_by_key = job_desc
3854 26 : .selected_layers
3855 26 : .iter()
3856 76 : .map(|l| (l.layer_desc().key(), l.layer_desc().clone()))
3857 26 : .collect::<HashMap<_, _>>();
3858 :
3859 44 : for action in produced_delta_layers {
3860 18 : match action {
3861 11 : BatchWriterResult::Produced(layer) => {
3862 11 : if cfg!(debug_assertions) {
3863 11 : info!("produced delta layer: {}", layer.layer_desc().key());
3864 0 : }
3865 11 : stat.produce_delta_layer(layer.layer_desc().file_size());
3866 11 : compact_to.push(layer);
3867 : }
3868 7 : BatchWriterResult::Discarded(l) => {
3869 7 : if cfg!(debug_assertions) {
3870 7 : info!("discarded delta layer: {}", l);
3871 0 : }
3872 7 : if let Some(layer_desc) = layer_selection_by_key.get(&l) {
3873 7 : stat.discard_delta_layer(layer_desc.file_size());
3874 7 : } else {
3875 0 : tracing::warn!(
3876 0 : "discarded delta layer not in layer_selection: {}, produced a layer outside of the compaction key range?",
3877 : l
3878 : );
3879 0 : stat.discard_delta_layer(0);
3880 : }
3881 7 : keep_layers.insert(l);
3882 : }
3883 : }
3884 : }
3885 27 : for layer in &rewrote_delta_layers {
3886 1 : debug!(
3887 0 : "produced rewritten delta layer: {}",
3888 0 : layer.layer_desc().key()
3889 : );
3890 : // For now, we include rewritten delta layer size in the "produce_delta_layer". We could
3891 : // make it a separate statistics in the future.
3892 1 : stat.produce_delta_layer(layer.layer_desc().file_size());
3893 : }
3894 26 : compact_to.extend(rewrote_delta_layers);
3895 45 : for action in produced_image_layers {
3896 19 : match action {
3897 15 : BatchWriterResult::Produced(layer) => {
3898 15 : debug!("produced image layer: {}", layer.layer_desc().key());
3899 15 : stat.produce_image_layer(layer.layer_desc().file_size());
3900 15 : compact_to.push(layer);
3901 : }
3902 4 : BatchWriterResult::Discarded(l) => {
3903 4 : debug!("discarded image layer: {}", l);
3904 4 : if let Some(layer_desc) = layer_selection_by_key.get(&l) {
3905 4 : stat.discard_image_layer(layer_desc.file_size());
3906 4 : } else {
3907 0 : tracing::warn!(
3908 0 : "discarded image layer not in layer_selection: {}, produced a layer outside of the compaction key range?",
3909 : l
3910 : );
3911 0 : stat.discard_image_layer(0);
3912 : }
3913 4 : keep_layers.insert(l);
3914 : }
3915 : }
3916 : }
3917 :
3918 26 : let mut layer_selection = job_desc.selected_layers;
3919 :
3920 : // Partial compaction might select more data than it processes, e.g., if
3921 : // the compaction_key_range only partially overlaps:
3922 : //
3923 : // [---compaction_key_range---]
3924 : // [---A----][----B----][----C----][----D----]
3925 : //
3926 : // For delta layers, we will rewrite the layers so that it is cut exactly at
3927 : // the compaction key range, so we can always discard them. However, for image
3928 : // layers, as we do not rewrite them for now, we need to handle them differently.
3929 : // Assume image layers A, B, C, D are all in the `layer_selection`.
3930 : //
3931 : // The created image layers contain whatever is needed from B, C, and from
3932 : // `----]` of A, and from `[---` of D.
3933 : //
3934 : // In contrast, `[---A` and `D----]` have not been processed, so, we must
3935 : // keep that data.
3936 : //
3937 : // The solution for now is to keep A and D completely if they are image layers.
3938 : // (layer_selection is what we'll remove from the layer map, so, retain what
3939 : // is _not_ fully covered by compaction_key_range).
3940 102 : for layer in &layer_selection {
3941 76 : if !layer.layer_desc().is_delta() {
3942 33 : if !overlaps_with(
3943 33 : &layer.layer_desc().key_range,
3944 33 : &job_desc.compaction_key_range,
3945 33 : ) {
3946 0 : return Err(CompactionError::Other(anyhow!(
3947 0 : "violated constraint: image layer outside of compaction key range"
3948 0 : )));
3949 33 : }
3950 33 : if !fully_contains(
3951 33 : &job_desc.compaction_key_range,
3952 33 : &layer.layer_desc().key_range,
3953 33 : ) {
3954 4 : keep_layers.insert(layer.layer_desc().key());
3955 29 : }
3956 43 : }
3957 : }
3958 :
3959 76 : layer_selection.retain(|x| !keep_layers.contains(&x.layer_desc().key()));
3960 :
3961 26 : let time_final_phase = timer.elapsed();
3962 :
3963 26 : stat.time_final_phase_secs = time_final_phase.as_secs_f64();
3964 26 : stat.time_to_first_kv_pair_secs = time_to_first_kv_pair
3965 26 : .unwrap_or(Duration::ZERO)
3966 26 : .as_secs_f64();
3967 26 : stat.time_main_loop_secs = time_main_loop.as_secs_f64();
3968 26 : stat.time_acquire_lock_secs = time_acquire_lock.as_secs_f64();
3969 26 : stat.time_download_layer_secs = time_download_layer.as_secs_f64();
3970 26 : stat.time_analyze_secs = time_analyze.as_secs_f64();
3971 26 : stat.time_total_secs = begin_timer.elapsed().as_secs_f64();
3972 26 : stat.finalize();
3973 :
3974 26 : info!(
3975 0 : "gc-compaction statistics: {}",
3976 0 : serde_json::to_string(&stat)
3977 0 : .context("failed to serialize gc-compaction statistics")
3978 0 : .map_err(CompactionError::Other)?
3979 : );
3980 :
3981 26 : if dry_run {
3982 2 : return Ok(CompactionOutcome::Done);
3983 24 : }
3984 :
3985 24 : info!(
3986 0 : "produced {} delta layers and {} image layers, {} layers are kept",
3987 : produced_delta_layers_len,
3988 : produced_image_layers_len,
3989 0 : keep_layers.len()
3990 : );
3991 :
3992 : // Step 3: Place back to the layer map.
3993 :
3994 : // First, do a sanity check to ensure the newly-created layer map does not contain overlaps.
3995 24 : let all_layers = {
3996 24 : let guard = self
3997 24 : .layers
3998 24 : .read(LayerManagerLockHolder::GarbageCollection)
3999 24 : .await;
4000 24 : let layer_map = guard.layer_map()?;
4001 24 : layer_map.iter_historic_layers().collect_vec()
4002 : };
4003 :
4004 24 : let mut final_layers = all_layers
4005 24 : .iter()
4006 107 : .map(|layer| layer.layer_name())
4007 24 : .collect::<HashSet<_>>();
4008 76 : for layer in &layer_selection {
4009 52 : final_layers.remove(&layer.layer_desc().layer_name());
4010 52 : }
4011 51 : for layer in &compact_to {
4012 27 : final_layers.insert(layer.layer_desc().layer_name());
4013 27 : }
4014 24 : let final_layers = final_layers.into_iter().collect_vec();
4015 :
4016 : // TODO: move this check before we call `finish` on image layer writers. However, this will require us to get the layer name before we finish
4017 : // the writer, so potentially, we will need a function like `ImageLayerBatchWriter::get_all_pending_layer_keys` to get all the keys that are
4018 : // in the writer before finalizing the persistent layers. Now we would leave some dangling layers on the disk if the check fails.
4019 24 : if let Some(err) = check_valid_layermap(&final_layers) {
4020 0 : return Err(CompactionError::Other(anyhow!(
4021 0 : "gc-compaction layer map check failed after compaction because {}, compaction result not applied to the layer map due to potential data loss",
4022 0 : err
4023 0 : )));
4024 24 : }
4025 :
4026 : // Between the sanity check and this compaction update, there could be new layers being flushed, but it should be fine because we only
4027 : // operate on L1 layers.
4028 : {
4029 : // Gc-compaction will rewrite the history of a key. This could happen in two ways:
4030 : //
4031 : // 1. We create an image layer to replace all the deltas below the compact LSN. In this case, assume
4032 : // we have 2 delta layers A and B, both below the compact LSN. We create an image layer I to replace
4033 : // A and B at the compact LSN. If the read path finishes reading A, yields, and now we update the layer
4034 : // map, the read path then cannot find any keys below A, reporting a missing key error, while the key
4035 : // now gets stored in I at the compact LSN.
4036 : //
4037 : // --------------- ---------------
4038 : // delta1@LSN20 image1@LSN20
4039 : // --------------- (read path collects delta@LSN20, => --------------- (read path cannot find anything
4040 : // delta1@LSN10 yields) below LSN 20)
4041 : // ---------------
4042 : //
4043 : // 2. We create a delta layer to replace all the deltas below the compact LSN, and in the delta layers,
4044 : // we combines the history of a key into a single image. For example, we have deltas at LSN 1, 2, 3, 4,
4045 : // Assume one delta layer contains LSN 1, 2, 3 and the other contains LSN 4.
4046 : //
4047 : // We let gc-compaction combine delta 2, 3, 4 into an image at LSN 4, which produces a delta layer that
4048 : // contains the delta at LSN 1, the image at LSN 4. If the read path finishes reading the original delta
4049 : // layer containing 4, yields, and we update the layer map to put the delta layer.
4050 : //
4051 : // --------------- ---------------
4052 : // delta1@LSN4 image1@LSN4
4053 : // --------------- (read path collects delta@LSN4, => --------------- (read path collects LSN4 and LSN1,
4054 : // delta1@LSN1-3 yields) delta1@LSN1 which is an invalid history)
4055 : // --------------- ---------------
4056 : //
4057 : // Therefore, the gc-compaction layer update operation should wait for all ongoing reads, block all pending reads,
4058 : // and only allow reads to continue after the update is finished.
4059 :
4060 24 : let update_guard = self.gc_compaction_layer_update_lock.write().await;
4061 : // Acquiring the update guard ensures current read operations end and new read operations are blocked.
4062 : // TODO: can we use `latest_gc_cutoff` Rcu to achieve the same effect?
4063 24 : let mut guard = self
4064 24 : .layers
4065 24 : .write(LayerManagerLockHolder::GarbageCollection)
4066 24 : .await;
4067 24 : guard
4068 24 : .open_mut()?
4069 24 : .finish_gc_compaction(&layer_selection, &compact_to, &self.metrics);
4070 24 : drop(update_guard); // Allow new reads to start ONLY after we finished updating the layer map.
4071 : };
4072 :
4073 : // Schedule an index-only upload to update the `latest_gc_cutoff` in the index_part.json.
4074 : // Otherwise, after restart, the index_part only contains the old `latest_gc_cutoff` and
4075 : // find_gc_cutoffs will try accessing things below the cutoff. TODO: ideally, this should
4076 : // be batched into `schedule_compaction_update`.
4077 24 : let disk_consistent_lsn = self.disk_consistent_lsn.load();
4078 24 : self.schedule_uploads(disk_consistent_lsn, None)
4079 24 : .context("failed to schedule uploads")
4080 24 : .map_err(CompactionError::Other)?;
4081 : // If a layer gets rewritten throughout gc-compaction, we need to keep that layer only in `compact_to` instead
4082 : // of `compact_from`.
4083 24 : let compact_from = {
4084 24 : let mut compact_from = Vec::new();
4085 24 : let mut compact_to_set = HashMap::new();
4086 51 : for layer in &compact_to {
4087 27 : compact_to_set.insert(layer.layer_desc().key(), layer);
4088 27 : }
4089 76 : for layer in &layer_selection {
4090 52 : if let Some(to) = compact_to_set.get(&layer.layer_desc().key()) {
4091 0 : tracing::info!(
4092 0 : "skipping delete {} because found same layer key at different generation {}",
4093 : layer,
4094 : to
4095 : );
4096 52 : } else {
4097 52 : compact_from.push(layer.clone());
4098 52 : }
4099 : }
4100 24 : compact_from
4101 : };
4102 24 : self.remote_client
4103 24 : .schedule_compaction_update(&compact_from, &compact_to)?;
4104 :
4105 24 : drop(gc_lock);
4106 :
4107 24 : Ok(CompactionOutcome::Done)
4108 28 : }
4109 : }
4110 :
4111 : struct TimelineAdaptor {
4112 : timeline: Arc<Timeline>,
4113 :
4114 : keyspace: (Lsn, KeySpace),
4115 :
4116 : new_deltas: Vec<ResidentLayer>,
4117 : new_images: Vec<ResidentLayer>,
4118 : layers_to_delete: Vec<Arc<PersistentLayerDesc>>,
4119 : }
4120 :
4121 : impl TimelineAdaptor {
4122 0 : pub fn new(timeline: &Arc<Timeline>, keyspace: (Lsn, KeySpace)) -> Self {
4123 0 : Self {
4124 0 : timeline: timeline.clone(),
4125 0 : keyspace,
4126 0 : new_images: Vec::new(),
4127 0 : new_deltas: Vec::new(),
4128 0 : layers_to_delete: Vec::new(),
4129 0 : }
4130 0 : }
4131 :
4132 0 : pub async fn flush_updates(&mut self) -> Result<(), CompactionError> {
4133 0 : let layers_to_delete = {
4134 0 : let guard = self
4135 0 : .timeline
4136 0 : .layers
4137 0 : .read(LayerManagerLockHolder::Compaction)
4138 0 : .await;
4139 0 : self.layers_to_delete
4140 0 : .iter()
4141 0 : .map(|x| guard.get_from_desc(x))
4142 0 : .collect::<Vec<Layer>>()
4143 : };
4144 0 : self.timeline
4145 0 : .finish_compact_batch(&self.new_deltas, &self.new_images, &layers_to_delete)
4146 0 : .await?;
4147 :
4148 0 : self.timeline
4149 0 : .upload_new_image_layers(std::mem::take(&mut self.new_images))?;
4150 :
4151 0 : self.new_deltas.clear();
4152 0 : self.layers_to_delete.clear();
4153 0 : Ok(())
4154 0 : }
4155 : }
4156 :
4157 : #[derive(Clone)]
4158 : struct ResidentDeltaLayer(ResidentLayer);
4159 : #[derive(Clone)]
4160 : struct ResidentImageLayer(ResidentLayer);
4161 :
4162 : impl CompactionJobExecutor for TimelineAdaptor {
4163 : type Key = pageserver_api::key::Key;
4164 :
4165 : type Layer = OwnArc<PersistentLayerDesc>;
4166 : type DeltaLayer = ResidentDeltaLayer;
4167 : type ImageLayer = ResidentImageLayer;
4168 :
4169 : type RequestContext = crate::context::RequestContext;
4170 :
4171 0 : fn get_shard_identity(&self) -> &ShardIdentity {
4172 0 : self.timeline.get_shard_identity()
4173 0 : }
4174 :
4175 0 : async fn get_layers(
4176 0 : &mut self,
4177 0 : key_range: &Range<Key>,
4178 0 : lsn_range: &Range<Lsn>,
4179 0 : _ctx: &RequestContext,
4180 0 : ) -> anyhow::Result<Vec<OwnArc<PersistentLayerDesc>>> {
4181 0 : self.flush_updates().await?;
4182 :
4183 0 : let guard = self
4184 0 : .timeline
4185 0 : .layers
4186 0 : .read(LayerManagerLockHolder::Compaction)
4187 0 : .await;
4188 0 : let layer_map = guard.layer_map()?;
4189 :
4190 0 : let result = layer_map
4191 0 : .iter_historic_layers()
4192 0 : .filter(|l| {
4193 0 : overlaps_with(&l.lsn_range, lsn_range) && overlaps_with(&l.key_range, key_range)
4194 0 : })
4195 0 : .map(OwnArc)
4196 0 : .collect();
4197 0 : Ok(result)
4198 0 : }
4199 :
4200 0 : async fn get_keyspace(
4201 0 : &mut self,
4202 0 : key_range: &Range<Key>,
4203 0 : lsn: Lsn,
4204 0 : _ctx: &RequestContext,
4205 0 : ) -> anyhow::Result<Vec<Range<Key>>> {
4206 0 : if lsn == self.keyspace.0 {
4207 0 : Ok(pageserver_compaction::helpers::intersect_keyspace(
4208 0 : &self.keyspace.1.ranges,
4209 0 : key_range,
4210 0 : ))
4211 : } else {
4212 : // The current compaction implementation only ever requests the key space
4213 : // at the compaction end LSN.
4214 0 : anyhow::bail!("keyspace not available for requested lsn");
4215 : }
4216 0 : }
4217 :
4218 0 : async fn downcast_delta_layer(
4219 0 : &self,
4220 0 : layer: &OwnArc<PersistentLayerDesc>,
4221 0 : ctx: &RequestContext,
4222 0 : ) -> anyhow::Result<Option<ResidentDeltaLayer>> {
4223 : // this is a lot more complex than a simple downcast...
4224 0 : if layer.is_delta() {
4225 0 : let l = {
4226 0 : let guard = self
4227 0 : .timeline
4228 0 : .layers
4229 0 : .read(LayerManagerLockHolder::Compaction)
4230 0 : .await;
4231 0 : guard.get_from_desc(layer)
4232 : };
4233 0 : let result = l.download_and_keep_resident(ctx).await?;
4234 :
4235 0 : Ok(Some(ResidentDeltaLayer(result)))
4236 : } else {
4237 0 : Ok(None)
4238 : }
4239 0 : }
4240 :
4241 0 : async fn create_image(
4242 0 : &mut self,
4243 0 : lsn: Lsn,
4244 0 : key_range: &Range<Key>,
4245 0 : ctx: &RequestContext,
4246 0 : ) -> anyhow::Result<()> {
4247 0 : Ok(self.create_image_impl(lsn, key_range, ctx).await?)
4248 0 : }
4249 :
4250 0 : async fn create_delta(
4251 0 : &mut self,
4252 0 : lsn_range: &Range<Lsn>,
4253 0 : key_range: &Range<Key>,
4254 0 : input_layers: &[ResidentDeltaLayer],
4255 0 : ctx: &RequestContext,
4256 0 : ) -> anyhow::Result<()> {
4257 0 : debug!("Create new layer {}..{}", lsn_range.start, lsn_range.end);
4258 :
4259 0 : let mut all_entries = Vec::new();
4260 0 : for dl in input_layers.iter() {
4261 0 : all_entries.extend(dl.load_keys(ctx).await?);
4262 : }
4263 :
4264 : // The current stdlib sorting implementation is designed in a way where it is
4265 : // particularly fast where the slice is made up of sorted sub-ranges.
4266 0 : all_entries.sort_by_key(|DeltaEntry { key, lsn, .. }| (*key, *lsn));
4267 :
4268 0 : let mut writer = DeltaLayerWriter::new(
4269 0 : self.timeline.conf,
4270 0 : self.timeline.timeline_id,
4271 0 : self.timeline.tenant_shard_id,
4272 0 : key_range.start,
4273 0 : lsn_range.clone(),
4274 0 : &self.timeline.gate,
4275 0 : self.timeline.cancel.clone(),
4276 0 : ctx,
4277 0 : )
4278 0 : .await?;
4279 :
4280 0 : let mut dup_values = 0;
4281 :
4282 : // This iterator walks through all key-value pairs from all the layers
4283 : // we're compacting, in key, LSN order.
4284 0 : let mut prev: Option<(Key, Lsn)> = None;
4285 : for &DeltaEntry {
4286 0 : key, lsn, ref val, ..
4287 0 : } in all_entries.iter()
4288 : {
4289 0 : if prev == Some((key, lsn)) {
4290 : // This is a duplicate. Skip it.
4291 : //
4292 : // It can happen if compaction is interrupted after writing some
4293 : // layers but not all, and we are compacting the range again.
4294 : // The calculations in the algorithm assume that there are no
4295 : // duplicates, so the math on targeted file size is likely off,
4296 : // and we will create smaller files than expected.
4297 0 : dup_values += 1;
4298 0 : continue;
4299 0 : }
4300 :
4301 0 : let value = val.load(ctx).await?;
4302 :
4303 0 : writer.put_value(key, lsn, value, ctx).await?;
4304 :
4305 0 : prev = Some((key, lsn));
4306 : }
4307 :
4308 0 : if dup_values > 0 {
4309 0 : warn!("delta layer created with {} duplicate values", dup_values);
4310 0 : }
4311 :
4312 0 : fail_point!("delta-layer-writer-fail-before-finish", |_| {
4313 0 : Err(anyhow::anyhow!(
4314 0 : "failpoint delta-layer-writer-fail-before-finish"
4315 0 : ))
4316 0 : });
4317 :
4318 0 : let (desc, path) = writer.finish(prev.unwrap().0.next(), ctx).await?;
4319 0 : let new_delta_layer =
4320 0 : Layer::finish_creating(self.timeline.conf, &self.timeline, desc, &path)?;
4321 :
4322 0 : self.new_deltas.push(new_delta_layer);
4323 0 : Ok(())
4324 0 : }
4325 :
4326 0 : async fn delete_layer(
4327 0 : &mut self,
4328 0 : layer: &OwnArc<PersistentLayerDesc>,
4329 0 : _ctx: &RequestContext,
4330 0 : ) -> anyhow::Result<()> {
4331 0 : self.layers_to_delete.push(layer.clone().0);
4332 0 : Ok(())
4333 0 : }
4334 : }
4335 :
4336 : impl TimelineAdaptor {
4337 0 : async fn create_image_impl(
4338 0 : &mut self,
4339 0 : lsn: Lsn,
4340 0 : key_range: &Range<Key>,
4341 0 : ctx: &RequestContext,
4342 0 : ) -> Result<(), CreateImageLayersError> {
4343 0 : let timer = self.timeline.metrics.create_images_time_histo.start_timer();
4344 :
4345 0 : let image_layer_writer = ImageLayerWriter::new(
4346 0 : self.timeline.conf,
4347 0 : self.timeline.timeline_id,
4348 0 : self.timeline.tenant_shard_id,
4349 0 : key_range,
4350 0 : lsn,
4351 0 : &self.timeline.gate,
4352 0 : self.timeline.cancel.clone(),
4353 0 : ctx,
4354 0 : )
4355 0 : .await
4356 0 : .map_err(CreateImageLayersError::Other)?;
4357 :
4358 0 : fail_point!("image-layer-writer-fail-before-finish", |_| {
4359 0 : Err(CreateImageLayersError::Other(anyhow::anyhow!(
4360 0 : "failpoint image-layer-writer-fail-before-finish"
4361 0 : )))
4362 0 : });
4363 :
4364 0 : let keyspace = KeySpace {
4365 0 : ranges: self
4366 0 : .get_keyspace(key_range, lsn, ctx)
4367 0 : .await
4368 0 : .map_err(CreateImageLayersError::Other)?,
4369 : };
4370 : // TODO set proper (stateful) start. The create_image_layer_for_rel_blocks function mostly
4371 0 : let outcome = self
4372 0 : .timeline
4373 0 : .create_image_layer_for_rel_blocks(
4374 0 : &keyspace,
4375 0 : image_layer_writer,
4376 0 : lsn,
4377 0 : ctx,
4378 0 : key_range.clone(),
4379 0 : IoConcurrency::sequential(),
4380 0 : None,
4381 0 : )
4382 0 : .await?;
4383 :
4384 : if let ImageLayerCreationOutcome::Generated {
4385 0 : unfinished_image_layer,
4386 0 : } = outcome
4387 : {
4388 0 : let (desc, path) = unfinished_image_layer
4389 0 : .finish(ctx)
4390 0 : .await
4391 0 : .map_err(CreateImageLayersError::Other)?;
4392 0 : let image_layer =
4393 0 : Layer::finish_creating(self.timeline.conf, &self.timeline, desc, &path)
4394 0 : .map_err(CreateImageLayersError::Other)?;
4395 0 : self.new_images.push(image_layer);
4396 0 : }
4397 :
4398 0 : timer.stop_and_record();
4399 :
4400 0 : Ok(())
4401 0 : }
4402 : }
4403 :
4404 : impl CompactionRequestContext for crate::context::RequestContext {}
4405 :
4406 : #[derive(Debug, Clone)]
4407 : pub struct OwnArc<T>(pub Arc<T>);
4408 :
4409 : impl<T> Deref for OwnArc<T> {
4410 : type Target = <Arc<T> as Deref>::Target;
4411 0 : fn deref(&self) -> &Self::Target {
4412 0 : &self.0
4413 0 : }
4414 : }
4415 :
4416 : impl<T> AsRef<T> for OwnArc<T> {
4417 0 : fn as_ref(&self) -> &T {
4418 0 : self.0.as_ref()
4419 0 : }
4420 : }
4421 :
4422 : impl CompactionLayer<Key> for OwnArc<PersistentLayerDesc> {
4423 0 : fn key_range(&self) -> &Range<Key> {
4424 0 : &self.key_range
4425 0 : }
4426 0 : fn lsn_range(&self) -> &Range<Lsn> {
4427 0 : &self.lsn_range
4428 0 : }
4429 0 : fn file_size(&self) -> u64 {
4430 0 : self.file_size
4431 0 : }
4432 0 : fn short_id(&self) -> std::string::String {
4433 0 : self.as_ref().short_id().to_string()
4434 0 : }
4435 0 : fn is_delta(&self) -> bool {
4436 0 : self.as_ref().is_delta()
4437 0 : }
4438 : }
4439 :
4440 : impl CompactionLayer<Key> for OwnArc<DeltaLayer> {
4441 0 : fn key_range(&self) -> &Range<Key> {
4442 0 : &self.layer_desc().key_range
4443 0 : }
4444 0 : fn lsn_range(&self) -> &Range<Lsn> {
4445 0 : &self.layer_desc().lsn_range
4446 0 : }
4447 0 : fn file_size(&self) -> u64 {
4448 0 : self.layer_desc().file_size
4449 0 : }
4450 0 : fn short_id(&self) -> std::string::String {
4451 0 : self.layer_desc().short_id().to_string()
4452 0 : }
4453 0 : fn is_delta(&self) -> bool {
4454 0 : true
4455 0 : }
4456 : }
4457 :
4458 : impl CompactionLayer<Key> for ResidentDeltaLayer {
4459 0 : fn key_range(&self) -> &Range<Key> {
4460 0 : &self.0.layer_desc().key_range
4461 0 : }
4462 0 : fn lsn_range(&self) -> &Range<Lsn> {
4463 0 : &self.0.layer_desc().lsn_range
4464 0 : }
4465 0 : fn file_size(&self) -> u64 {
4466 0 : self.0.layer_desc().file_size
4467 0 : }
4468 0 : fn short_id(&self) -> std::string::String {
4469 0 : self.0.layer_desc().short_id().to_string()
4470 0 : }
4471 0 : fn is_delta(&self) -> bool {
4472 0 : true
4473 0 : }
4474 : }
4475 :
4476 : impl CompactionDeltaLayer<TimelineAdaptor> for ResidentDeltaLayer {
4477 : type DeltaEntry<'a> = DeltaEntry<'a>;
4478 :
4479 0 : async fn load_keys(&self, ctx: &RequestContext) -> anyhow::Result<Vec<DeltaEntry<'_>>> {
4480 0 : self.0.get_as_delta(ctx).await?.index_entries(ctx).await
4481 0 : }
4482 : }
4483 :
4484 : impl CompactionLayer<Key> for ResidentImageLayer {
4485 0 : fn key_range(&self) -> &Range<Key> {
4486 0 : &self.0.layer_desc().key_range
4487 0 : }
4488 0 : fn lsn_range(&self) -> &Range<Lsn> {
4489 0 : &self.0.layer_desc().lsn_range
4490 0 : }
4491 0 : fn file_size(&self) -> u64 {
4492 0 : self.0.layer_desc().file_size
4493 0 : }
4494 0 : fn short_id(&self) -> std::string::String {
4495 0 : self.0.layer_desc().short_id().to_string()
4496 0 : }
4497 0 : fn is_delta(&self) -> bool {
4498 0 : false
4499 0 : }
4500 : }
4501 : impl CompactionImageLayer<TimelineAdaptor> for ResidentImageLayer {}
|