Line data Source code
1 : //! New compaction implementation. The algorithm itself is implemented in the
2 : //! compaction crate. This file implements the callbacks and structs that allow
3 : //! the algorithm to drive the process.
4 : //!
5 : //! The old legacy algorithm is implemented directly in `timeline.rs`.
6 :
7 : use std::cmp::min;
8 : use std::collections::{BinaryHeap, HashMap, HashSet, VecDeque};
9 : use std::ops::{Deref, Range};
10 : use std::sync::Arc;
11 : use std::time::{Duration, Instant};
12 :
13 : use super::layer_manager::LayerManagerLockHolder;
14 : use super::{
15 : CompactFlags, CompactOptions, CompactionError, CreateImageLayersError, DurationRecorder,
16 : GetVectoredError, ImageLayerCreationMode, LastImageLayerCreationStatus, RecordedDuration,
17 : Timeline,
18 : };
19 :
20 : use crate::pgdatadir_mapping::CollectKeySpaceError;
21 : use crate::tenant::timeline::{DeltaEntry, RepartitionError};
22 : use crate::walredo::RedoAttemptType;
23 : use anyhow::{Context, anyhow};
24 : use bytes::Bytes;
25 : use enumset::EnumSet;
26 : use fail::fail_point;
27 : use futures::FutureExt;
28 : use itertools::Itertools;
29 : use once_cell::sync::Lazy;
30 : use pageserver_api::config::tenant_conf_defaults::DEFAULT_CHECKPOINT_DISTANCE;
31 : use pageserver_api::key::{KEY_SIZE, Key};
32 : use pageserver_api::keyspace::{KeySpace, ShardedRange};
33 : use pageserver_api::models::{CompactInfoResponse, CompactKeyRange};
34 : use pageserver_api::shard::{ShardCount, ShardIdentity, TenantShardId};
35 : use pageserver_compaction::helpers::{fully_contains, overlaps_with};
36 : use pageserver_compaction::interface::*;
37 : use serde::Serialize;
38 : use tokio::sync::{OwnedSemaphorePermit, Semaphore};
39 : use tokio_util::sync::CancellationToken;
40 : use tracing::{Instrument, debug, error, info, info_span, trace, warn};
41 : use utils::critical_timeline;
42 : use utils::id::TimelineId;
43 : use utils::lsn::Lsn;
44 : use wal_decoder::models::record::NeonWalRecord;
45 : use wal_decoder::models::value::Value;
46 :
47 : use crate::context::{AccessStatsBehavior, RequestContext, RequestContextBuilder};
48 : use crate::page_cache;
49 : use crate::statvfs::Statvfs;
50 : use crate::tenant::checks::check_valid_layermap;
51 : use crate::tenant::gc_block::GcBlock;
52 : use crate::tenant::layer_map::LayerMap;
53 : use crate::tenant::remote_timeline_client::WaitCompletionError;
54 : use crate::tenant::remote_timeline_client::index::GcCompactionState;
55 : use crate::tenant::storage_layer::batch_split_writer::{
56 : BatchWriterResult, SplitDeltaLayerWriter, SplitImageLayerWriter,
57 : };
58 : use crate::tenant::storage_layer::filter_iterator::FilterIterator;
59 : use crate::tenant::storage_layer::merge_iterator::MergeIterator;
60 : use crate::tenant::storage_layer::{
61 : AsLayerDesc, LayerVisibilityHint, PersistentLayerDesc, PersistentLayerKey,
62 : ValueReconstructState,
63 : };
64 : use crate::tenant::tasks::log_compaction_error;
65 : use crate::tenant::timeline::{
66 : DeltaLayerWriter, ImageLayerCreationOutcome, ImageLayerWriter, IoConcurrency, Layer,
67 : ResidentLayer, drop_layer_manager_rlock,
68 : };
69 : use crate::tenant::{DeltaLayer, MaybeOffloaded, PageReconstructError};
70 : use crate::virtual_file::{MaybeFatalIo, VirtualFile};
71 :
72 : /// Maximum number of deltas before generating an image layer in bottom-most compaction.
73 : const COMPACTION_DELTA_THRESHOLD: usize = 5;
74 :
75 : /// Ratio of shard-local pages below which we trigger shard ancestor layer rewrites. 0.3 means that
76 : /// <= 30% of layer pages must belong to the descendant shard to rewrite the layer.
77 : ///
78 : /// We choose a value < 0.5 to avoid rewriting all visible layers every time we do a power-of-two
79 : /// shard split, which gets expensive for large tenants.
80 : const ANCESTOR_COMPACTION_REWRITE_THRESHOLD: f64 = 0.3;
81 :
82 : #[derive(Default, Debug, Clone, Copy, Hash, PartialEq, Eq, Serialize)]
83 : pub struct GcCompactionJobId(pub usize);
84 :
85 : impl std::fmt::Display for GcCompactionJobId {
86 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
87 0 : write!(f, "{}", self.0)
88 0 : }
89 : }
90 :
91 : pub struct GcCompactionCombinedSettings {
92 : pub gc_compaction_enabled: bool,
93 : pub gc_compaction_verification: bool,
94 : pub gc_compaction_initial_threshold_kb: u64,
95 : pub gc_compaction_ratio_percent: u64,
96 : }
97 :
98 : #[derive(Debug, Clone)]
99 : pub enum GcCompactionQueueItem {
100 : MetaJob {
101 : /// Compaction options
102 : options: CompactOptions,
103 : /// Whether the compaction is triggered automatically (determines whether we need to update L2 LSN)
104 : auto: bool,
105 : },
106 : SubCompactionJob {
107 : i: usize,
108 : total: usize,
109 : options: CompactOptions,
110 : },
111 : Notify(GcCompactionJobId, Option<Lsn>),
112 : }
113 :
114 : /// Statistics for gc-compaction meta jobs, which contains several sub compaction jobs.
115 : #[derive(Debug, Clone, Serialize, Default)]
116 : pub struct GcCompactionMetaStatistics {
117 : /// The total number of sub compaction jobs.
118 : pub total_sub_compaction_jobs: usize,
119 : /// The total number of sub compaction jobs that failed.
120 : pub failed_sub_compaction_jobs: usize,
121 : /// The total number of sub compaction jobs that succeeded.
122 : pub succeeded_sub_compaction_jobs: usize,
123 : /// The layer size before compaction.
124 : pub before_compaction_layer_size: u64,
125 : /// The layer size after compaction.
126 : pub after_compaction_layer_size: u64,
127 : /// The start time of the meta job.
128 : pub start_time: Option<chrono::DateTime<chrono::Utc>>,
129 : /// The end time of the meta job.
130 : pub end_time: Option<chrono::DateTime<chrono::Utc>>,
131 : /// The duration of the meta job.
132 : pub duration_secs: f64,
133 : /// The id of the meta job.
134 : pub meta_job_id: GcCompactionJobId,
135 : /// The LSN below which the layers are compacted, used to compute the statistics.
136 : pub below_lsn: Lsn,
137 : /// The retention ratio of the meta job (after_compaction_layer_size / before_compaction_layer_size)
138 : pub retention_ratio: f64,
139 : }
140 :
141 : impl GcCompactionMetaStatistics {
142 0 : fn finalize(&mut self) {
143 0 : let end_time = chrono::Utc::now();
144 0 : if let Some(start_time) = self.start_time {
145 0 : if end_time > start_time {
146 0 : let delta = end_time - start_time;
147 0 : if let Ok(std_dur) = delta.to_std() {
148 0 : self.duration_secs = std_dur.as_secs_f64();
149 0 : }
150 0 : }
151 0 : }
152 0 : self.retention_ratio = self.after_compaction_layer_size as f64
153 0 : / (self.before_compaction_layer_size as f64 + 1.0);
154 0 : self.end_time = Some(end_time);
155 0 : }
156 : }
157 :
158 : impl GcCompactionQueueItem {
159 0 : pub fn into_compact_info_resp(
160 0 : self,
161 0 : id: GcCompactionJobId,
162 0 : running: bool,
163 0 : ) -> Option<CompactInfoResponse> {
164 0 : match self {
165 0 : GcCompactionQueueItem::MetaJob { options, .. } => Some(CompactInfoResponse {
166 0 : compact_key_range: options.compact_key_range,
167 0 : compact_lsn_range: options.compact_lsn_range,
168 0 : sub_compaction: options.sub_compaction,
169 0 : running,
170 0 : job_id: id.0,
171 0 : }),
172 0 : GcCompactionQueueItem::SubCompactionJob { options, .. } => Some(CompactInfoResponse {
173 0 : compact_key_range: options.compact_key_range,
174 0 : compact_lsn_range: options.compact_lsn_range,
175 0 : sub_compaction: options.sub_compaction,
176 0 : running,
177 0 : job_id: id.0,
178 0 : }),
179 0 : GcCompactionQueueItem::Notify(_, _) => None,
180 : }
181 0 : }
182 : }
183 :
184 : #[derive(Default)]
185 : struct GcCompactionGuardItems {
186 : notify: Option<tokio::sync::oneshot::Sender<()>>,
187 : permit: Option<OwnedSemaphorePermit>,
188 : }
189 :
190 : struct GcCompactionQueueInner {
191 : running: Option<(GcCompactionJobId, GcCompactionQueueItem)>,
192 : queued: VecDeque<(GcCompactionJobId, GcCompactionQueueItem)>,
193 : guards: HashMap<GcCompactionJobId, GcCompactionGuardItems>,
194 : last_id: GcCompactionJobId,
195 : meta_statistics: Option<GcCompactionMetaStatistics>,
196 : }
197 :
198 : impl GcCompactionQueueInner {
199 0 : fn next_id(&mut self) -> GcCompactionJobId {
200 0 : let id = self.last_id;
201 0 : self.last_id = GcCompactionJobId(id.0 + 1);
202 0 : id
203 0 : }
204 : }
205 :
206 : /// A structure to store gc_compaction jobs.
207 : pub struct GcCompactionQueue {
208 : /// All items in the queue, and the currently-running job.
209 : inner: std::sync::Mutex<GcCompactionQueueInner>,
210 : /// Ensure only one thread is consuming the queue.
211 : consumer_lock: tokio::sync::Mutex<()>,
212 : }
213 :
214 0 : static CONCURRENT_GC_COMPACTION_TASKS: Lazy<Arc<Semaphore>> = Lazy::new(|| {
215 : // Only allow one timeline on one pageserver to run gc compaction at a time.
216 0 : Arc::new(Semaphore::new(1))
217 0 : });
218 :
219 : impl GcCompactionQueue {
220 0 : pub fn new() -> Self {
221 0 : GcCompactionQueue {
222 0 : inner: std::sync::Mutex::new(GcCompactionQueueInner {
223 0 : running: None,
224 0 : queued: VecDeque::new(),
225 0 : guards: HashMap::new(),
226 0 : last_id: GcCompactionJobId(0),
227 0 : meta_statistics: None,
228 0 : }),
229 0 : consumer_lock: tokio::sync::Mutex::new(()),
230 0 : }
231 0 : }
232 :
233 0 : pub fn cancel_scheduled(&self) {
234 0 : let mut guard = self.inner.lock().unwrap();
235 0 : guard.queued.clear();
236 : // TODO: if there is a running job, we should keep the gc guard. However, currently, the cancel
237 : // API is only used for testing purposes, so we can drop everything here.
238 0 : guard.guards.clear();
239 0 : }
240 :
241 : /// Schedule a manual compaction job.
242 0 : pub fn schedule_manual_compaction(
243 0 : &self,
244 0 : options: CompactOptions,
245 0 : notify: Option<tokio::sync::oneshot::Sender<()>>,
246 0 : ) -> GcCompactionJobId {
247 0 : let mut guard = self.inner.lock().unwrap();
248 0 : let id = guard.next_id();
249 0 : guard.queued.push_back((
250 0 : id,
251 0 : GcCompactionQueueItem::MetaJob {
252 0 : options,
253 0 : auto: false,
254 0 : },
255 0 : ));
256 0 : guard.guards.entry(id).or_default().notify = notify;
257 0 : info!("scheduled compaction job id={}", id);
258 0 : id
259 0 : }
260 :
261 : /// Schedule an auto compaction job.
262 0 : fn schedule_auto_compaction(
263 0 : &self,
264 0 : options: CompactOptions,
265 0 : permit: OwnedSemaphorePermit,
266 0 : ) -> GcCompactionJobId {
267 0 : let mut guard = self.inner.lock().unwrap();
268 0 : let id = guard.next_id();
269 0 : guard.queued.push_back((
270 0 : id,
271 0 : GcCompactionQueueItem::MetaJob {
272 0 : options,
273 0 : auto: true,
274 0 : },
275 0 : ));
276 0 : guard.guards.entry(id).or_default().permit = Some(permit);
277 0 : id
278 0 : }
279 :
280 : /// Trigger an auto compaction.
281 0 : pub async fn trigger_auto_compaction(
282 0 : &self,
283 0 : timeline: &Arc<Timeline>,
284 0 : ) -> Result<(), CompactionError> {
285 : let GcCompactionCombinedSettings {
286 0 : gc_compaction_enabled,
287 0 : gc_compaction_initial_threshold_kb,
288 0 : gc_compaction_ratio_percent,
289 : ..
290 0 : } = timeline.get_gc_compaction_settings();
291 0 : if !gc_compaction_enabled {
292 0 : return Ok(());
293 0 : }
294 0 : if self.remaining_jobs_num() > 0 {
295 : // Only schedule auto compaction when the queue is empty
296 0 : return Ok(());
297 0 : }
298 0 : if timeline.ancestor_timeline().is_some() {
299 : // Do not trigger auto compaction for child timelines. We haven't tested
300 : // it enough in staging yet.
301 0 : return Ok(());
302 0 : }
303 0 : if timeline.get_gc_compaction_watermark() == Lsn::INVALID {
304 : // If the gc watermark is not set, we don't need to trigger auto compaction.
305 : // This check is the same as in `gc_compaction_split_jobs` but we don't log
306 : // here and we can also skip the computation of the trigger condition earlier.
307 0 : return Ok(());
308 0 : }
309 :
310 0 : let Ok(permit) = CONCURRENT_GC_COMPACTION_TASKS.clone().try_acquire_owned() else {
311 : // Only allow one compaction run at a time. TODO: As we do `try_acquire_owned`, we cannot ensure
312 : // the fairness of the lock across timelines. We should listen for both `acquire` and `l0_compaction_trigger`
313 : // to ensure the fairness while avoid starving other tasks.
314 0 : return Ok(());
315 : };
316 :
317 0 : let gc_compaction_state = timeline.get_gc_compaction_state();
318 0 : let l2_lsn = gc_compaction_state
319 0 : .map(|x| x.last_completed_lsn)
320 0 : .unwrap_or(Lsn::INVALID);
321 :
322 0 : let layers = {
323 0 : let guard = timeline
324 0 : .layers
325 0 : .read(LayerManagerLockHolder::GetLayerMapInfo)
326 0 : .await;
327 0 : let layer_map = guard.layer_map()?;
328 0 : layer_map.iter_historic_layers().collect_vec()
329 : };
330 0 : let mut l2_size: u64 = 0;
331 0 : let mut l1_size = 0;
332 0 : let gc_cutoff = *timeline.get_applied_gc_cutoff_lsn();
333 0 : for layer in layers {
334 0 : if layer.lsn_range.start <= l2_lsn {
335 0 : l2_size += layer.file_size();
336 0 : } else if layer.lsn_range.start <= gc_cutoff {
337 0 : l1_size += layer.file_size();
338 0 : }
339 : }
340 :
341 0 : fn trigger_compaction(
342 0 : l1_size: u64,
343 0 : l2_size: u64,
344 0 : gc_compaction_initial_threshold_kb: u64,
345 0 : gc_compaction_ratio_percent: u64,
346 0 : ) -> bool {
347 : const AUTO_TRIGGER_LIMIT: u64 = 150 * 1024 * 1024 * 1024; // 150GB
348 0 : if l1_size + l2_size >= AUTO_TRIGGER_LIMIT {
349 : // Do not auto-trigger when physical size >= 150GB
350 0 : return false;
351 0 : }
352 : // initial trigger
353 0 : if l2_size == 0 && l1_size >= gc_compaction_initial_threshold_kb * 1024 {
354 0 : info!(
355 0 : "trigger auto-compaction because l1_size={} >= gc_compaction_initial_threshold_kb={}",
356 : l1_size, gc_compaction_initial_threshold_kb
357 : );
358 0 : return true;
359 0 : }
360 : // size ratio trigger
361 0 : if l2_size == 0 {
362 0 : return false;
363 0 : }
364 0 : if l1_size as f64 / l2_size as f64 >= (gc_compaction_ratio_percent as f64 / 100.0) {
365 0 : info!(
366 0 : "trigger auto-compaction because l1_size={} / l2_size={} > gc_compaction_ratio_percent={}",
367 : l1_size, l2_size, gc_compaction_ratio_percent
368 : );
369 0 : return true;
370 0 : }
371 0 : false
372 0 : }
373 :
374 0 : if trigger_compaction(
375 0 : l1_size,
376 0 : l2_size,
377 0 : gc_compaction_initial_threshold_kb,
378 0 : gc_compaction_ratio_percent,
379 : ) {
380 0 : self.schedule_auto_compaction(
381 : CompactOptions {
382 : flags: {
383 0 : let mut flags = EnumSet::new();
384 0 : flags |= CompactFlags::EnhancedGcBottomMostCompaction;
385 0 : if timeline.get_compaction_l0_first() {
386 0 : flags |= CompactFlags::YieldForL0;
387 0 : }
388 0 : flags
389 : },
390 : sub_compaction: true,
391 : // Only auto-trigger gc-compaction over the data keyspace due to concerns in
392 : // https://github.com/neondatabase/neon/issues/11318.
393 0 : compact_key_range: Some(CompactKeyRange {
394 0 : start: Key::MIN,
395 0 : end: Key::metadata_key_range().start,
396 0 : }),
397 0 : compact_lsn_range: None,
398 0 : sub_compaction_max_job_size_mb: None,
399 : },
400 0 : permit,
401 : );
402 0 : info!(
403 0 : "scheduled auto gc-compaction: l1_size={}, l2_size={}, l2_lsn={}, gc_cutoff={}",
404 : l1_size, l2_size, l2_lsn, gc_cutoff
405 : );
406 : } else {
407 0 : debug!(
408 0 : "did not trigger auto gc-compaction: l1_size={}, l2_size={}, l2_lsn={}, gc_cutoff={}",
409 : l1_size, l2_size, l2_lsn, gc_cutoff
410 : );
411 : }
412 0 : Ok(())
413 0 : }
414 :
415 0 : async fn collect_layer_below_lsn(
416 0 : &self,
417 0 : timeline: &Arc<Timeline>,
418 0 : lsn: Lsn,
419 0 : ) -> Result<u64, CompactionError> {
420 0 : let guard = timeline
421 0 : .layers
422 0 : .read(LayerManagerLockHolder::GetLayerMapInfo)
423 0 : .await;
424 0 : let layer_map = guard.layer_map()?;
425 0 : let layers = layer_map.iter_historic_layers().collect_vec();
426 0 : let mut size = 0;
427 0 : for layer in layers {
428 0 : if layer.lsn_range.start <= lsn {
429 0 : size += layer.file_size();
430 0 : }
431 : }
432 0 : Ok(size)
433 0 : }
434 :
435 : /// Notify the caller the job has finished and unblock GC.
436 0 : fn notify_and_unblock(&self, id: GcCompactionJobId) {
437 0 : info!("compaction job id={} finished", id);
438 0 : let mut guard = self.inner.lock().unwrap();
439 0 : if let Some(items) = guard.guards.remove(&id) {
440 0 : if let Some(tx) = items.notify {
441 0 : let _ = tx.send(());
442 0 : }
443 0 : }
444 0 : if let Some(ref meta_statistics) = guard.meta_statistics {
445 0 : if meta_statistics.meta_job_id == id {
446 0 : if let Ok(stats) = serde_json::to_string(&meta_statistics) {
447 0 : info!(
448 0 : "gc-compaction meta statistics for job id = {}: {}",
449 : id, stats
450 : );
451 0 : }
452 0 : }
453 0 : }
454 0 : }
455 :
456 0 : fn clear_running_job(&self) {
457 0 : let mut guard = self.inner.lock().unwrap();
458 0 : guard.running = None;
459 0 : }
460 :
461 0 : async fn handle_sub_compaction(
462 0 : &self,
463 0 : id: GcCompactionJobId,
464 0 : options: CompactOptions,
465 0 : timeline: &Arc<Timeline>,
466 0 : auto: bool,
467 0 : ) -> Result<(), CompactionError> {
468 0 : info!(
469 0 : "running scheduled enhanced gc bottom-most compaction with sub-compaction, splitting compaction jobs"
470 : );
471 0 : let res = timeline
472 0 : .gc_compaction_split_jobs(
473 0 : GcCompactJob::from_compact_options(options.clone()),
474 0 : options.sub_compaction_max_job_size_mb,
475 0 : )
476 0 : .await;
477 0 : let jobs = match res {
478 0 : Ok(jobs) => jobs,
479 0 : Err(err) => {
480 0 : warn!("cannot split gc-compaction jobs: {}, unblocked gc", err);
481 0 : self.notify_and_unblock(id);
482 0 : return Err(err);
483 : }
484 : };
485 0 : if jobs.is_empty() {
486 0 : info!("no jobs to run, skipping scheduled compaction task");
487 0 : self.notify_and_unblock(id);
488 : } else {
489 0 : let jobs_len = jobs.len();
490 0 : let mut pending_tasks = Vec::new();
491 : // gc-compaction might pick more layers or fewer layers to compact. The L2 LSN does not need to be accurate.
492 : // And therefore, we simply assume the maximum LSN of all jobs is the expected L2 LSN.
493 0 : let expected_l2_lsn = jobs
494 0 : .iter()
495 0 : .map(|job| job.compact_lsn_range.end)
496 0 : .max()
497 0 : .unwrap();
498 0 : for (i, job) in jobs.into_iter().enumerate() {
499 : // Unfortunately we need to convert the `GcCompactJob` back to `CompactionOptions`
500 : // until we do further refactors to allow directly call `compact_with_gc`.
501 0 : let mut flags: EnumSet<CompactFlags> = EnumSet::default();
502 0 : flags |= CompactFlags::EnhancedGcBottomMostCompaction;
503 0 : if job.dry_run {
504 0 : flags |= CompactFlags::DryRun;
505 0 : }
506 0 : if options.flags.contains(CompactFlags::YieldForL0) {
507 0 : flags |= CompactFlags::YieldForL0;
508 0 : }
509 0 : let options = CompactOptions {
510 0 : flags,
511 0 : sub_compaction: false,
512 0 : compact_key_range: Some(job.compact_key_range.into()),
513 0 : compact_lsn_range: Some(job.compact_lsn_range.into()),
514 0 : sub_compaction_max_job_size_mb: None,
515 0 : };
516 0 : pending_tasks.push(GcCompactionQueueItem::SubCompactionJob {
517 0 : options,
518 0 : i,
519 0 : total: jobs_len,
520 0 : });
521 : }
522 :
523 0 : if !auto {
524 0 : pending_tasks.push(GcCompactionQueueItem::Notify(id, None));
525 0 : } else {
526 0 : pending_tasks.push(GcCompactionQueueItem::Notify(id, Some(expected_l2_lsn)));
527 0 : }
528 :
529 0 : let layer_size = self
530 0 : .collect_layer_below_lsn(timeline, expected_l2_lsn)
531 0 : .await?;
532 :
533 : {
534 0 : let mut guard = self.inner.lock().unwrap();
535 0 : let mut tasks = Vec::new();
536 0 : for task in pending_tasks {
537 0 : let id = guard.next_id();
538 0 : tasks.push((id, task));
539 0 : }
540 0 : tasks.reverse();
541 0 : for item in tasks {
542 0 : guard.queued.push_front(item);
543 0 : }
544 0 : guard.meta_statistics = Some(GcCompactionMetaStatistics {
545 0 : meta_job_id: id,
546 0 : start_time: Some(chrono::Utc::now()),
547 0 : before_compaction_layer_size: layer_size,
548 0 : below_lsn: expected_l2_lsn,
549 0 : total_sub_compaction_jobs: jobs_len,
550 0 : ..Default::default()
551 0 : });
552 : }
553 :
554 0 : info!(
555 0 : "scheduled enhanced gc bottom-most compaction with sub-compaction, split into {} jobs",
556 : jobs_len
557 : );
558 : }
559 0 : Ok(())
560 0 : }
561 :
562 : /// Take a job from the queue and process it. Returns if there are still pending tasks.
563 0 : pub async fn iteration(
564 0 : &self,
565 0 : cancel: &CancellationToken,
566 0 : ctx: &RequestContext,
567 0 : gc_block: &GcBlock,
568 0 : timeline: &Arc<Timeline>,
569 0 : ) -> Result<CompactionOutcome, CompactionError> {
570 0 : let res = self.iteration_inner(cancel, ctx, gc_block, timeline).await;
571 0 : if let Err(err) = &res {
572 0 : log_compaction_error(err, None, cancel.is_cancelled(), true);
573 0 : }
574 0 : match res {
575 0 : Ok(res) => Ok(res),
576 0 : Err(e) if e.is_cancel() => Err(e),
577 : Err(_) => {
578 : // There are some cases where traditional gc might collect some layer
579 : // files causing gc-compaction cannot read the full history of the key.
580 : // This needs to be resolved in the long-term by improving the compaction
581 : // process. For now, let's simply avoid such errors triggering the
582 : // circuit breaker.
583 0 : Ok(CompactionOutcome::Skipped)
584 : }
585 : }
586 0 : }
587 :
588 0 : async fn iteration_inner(
589 0 : &self,
590 0 : cancel: &CancellationToken,
591 0 : ctx: &RequestContext,
592 0 : gc_block: &GcBlock,
593 0 : timeline: &Arc<Timeline>,
594 0 : ) -> Result<CompactionOutcome, CompactionError> {
595 0 : let Ok(_one_op_at_a_time_guard) = self.consumer_lock.try_lock() else {
596 0 : return Err(CompactionError::Other(anyhow::anyhow!(
597 0 : "cannot run gc-compaction because another gc-compaction is running. This should not happen because we only call this function from the gc-compaction queue."
598 0 : )));
599 : };
600 : let has_pending_tasks;
601 0 : let mut yield_for_l0 = false;
602 0 : let Some((id, item)) = ({
603 0 : let mut guard = self.inner.lock().unwrap();
604 0 : if let Some((id, item)) = guard.queued.pop_front() {
605 0 : guard.running = Some((id, item.clone()));
606 0 : has_pending_tasks = !guard.queued.is_empty();
607 0 : Some((id, item))
608 : } else {
609 0 : has_pending_tasks = false;
610 0 : None
611 : }
612 : }) else {
613 0 : self.trigger_auto_compaction(timeline).await?;
614 : // Always yield after triggering auto-compaction. Gc-compaction is a low-priority task and we
615 : // have not implemented preemption mechanism yet. We always want to yield it to more important
616 : // tasks if there is one.
617 0 : return Ok(CompactionOutcome::Done);
618 : };
619 0 : match item {
620 0 : GcCompactionQueueItem::MetaJob { options, auto } => {
621 0 : if !options
622 0 : .flags
623 0 : .contains(CompactFlags::EnhancedGcBottomMostCompaction)
624 : {
625 0 : warn!(
626 0 : "ignoring scheduled compaction task: scheduled task must be gc compaction: {:?}",
627 : options
628 : );
629 0 : } else if options.sub_compaction {
630 0 : info!(
631 0 : "running scheduled enhanced gc bottom-most compaction with sub-compaction, splitting compaction jobs"
632 : );
633 0 : self.handle_sub_compaction(id, options, timeline, auto)
634 0 : .await?;
635 : } else {
636 : // Auto compaction always enables sub-compaction so we don't need to handle update_l2_lsn
637 : // in this branch.
638 0 : let _gc_guard = match gc_block.start().await {
639 0 : Ok(guard) => guard,
640 0 : Err(e) => {
641 0 : self.notify_and_unblock(id);
642 0 : self.clear_running_job();
643 0 : return Err(CompactionError::Other(anyhow!(
644 0 : "cannot run gc-compaction because gc is blocked: {}",
645 0 : e
646 0 : )));
647 : }
648 : };
649 0 : let res = timeline.compact_with_options(cancel, options, ctx).await;
650 0 : let compaction_result = match res {
651 0 : Ok(res) => res,
652 0 : Err(err) => {
653 0 : warn!(%err, "failed to run gc-compaction");
654 0 : self.notify_and_unblock(id);
655 0 : self.clear_running_job();
656 0 : return Err(err);
657 : }
658 : };
659 0 : if compaction_result == CompactionOutcome::YieldForL0 {
660 0 : yield_for_l0 = true;
661 0 : }
662 : }
663 : }
664 0 : GcCompactionQueueItem::SubCompactionJob { options, i, total } => {
665 : // TODO: error handling, clear the queue if any task fails?
666 0 : let _gc_guard = match gc_block.start().await {
667 0 : Ok(guard) => guard,
668 0 : Err(e) => {
669 0 : self.clear_running_job();
670 0 : return Err(CompactionError::Other(anyhow!(
671 0 : "cannot run gc-compaction because gc is blocked: {}",
672 0 : e
673 0 : )));
674 : }
675 : };
676 0 : info!("running gc-compaction subcompaction job {}/{}", i, total);
677 0 : let res = timeline.compact_with_options(cancel, options, ctx).await;
678 0 : let compaction_result = match res {
679 0 : Ok(res) => res,
680 0 : Err(err) => {
681 0 : warn!(%err, "failed to run gc-compaction subcompaction job");
682 0 : self.clear_running_job();
683 0 : let mut guard = self.inner.lock().unwrap();
684 0 : if let Some(ref mut meta_statistics) = guard.meta_statistics {
685 0 : meta_statistics.failed_sub_compaction_jobs += 1;
686 0 : }
687 0 : return Err(err);
688 : }
689 : };
690 0 : if compaction_result == CompactionOutcome::YieldForL0 {
691 0 : // We will permenantly give up a task if we yield for L0 compaction: the preempted subcompaction job won't be running
692 0 : // again. This ensures that we don't keep doing duplicated work within gc-compaction. Not directly returning here because
693 0 : // we need to clean things up before returning from the function.
694 0 : yield_for_l0 = true;
695 0 : }
696 : {
697 0 : let mut guard = self.inner.lock().unwrap();
698 0 : if let Some(ref mut meta_statistics) = guard.meta_statistics {
699 0 : meta_statistics.succeeded_sub_compaction_jobs += 1;
700 0 : }
701 : }
702 : }
703 0 : GcCompactionQueueItem::Notify(id, l2_lsn) => {
704 0 : let below_lsn = {
705 0 : let mut guard = self.inner.lock().unwrap();
706 0 : if let Some(ref mut meta_statistics) = guard.meta_statistics {
707 0 : meta_statistics.below_lsn
708 : } else {
709 0 : Lsn::INVALID
710 : }
711 : };
712 0 : let layer_size = if below_lsn != Lsn::INVALID {
713 0 : self.collect_layer_below_lsn(timeline, below_lsn).await?
714 : } else {
715 0 : 0
716 : };
717 : {
718 0 : let mut guard = self.inner.lock().unwrap();
719 0 : if let Some(ref mut meta_statistics) = guard.meta_statistics {
720 0 : meta_statistics.after_compaction_layer_size = layer_size;
721 0 : meta_statistics.finalize();
722 0 : }
723 : }
724 0 : self.notify_and_unblock(id);
725 0 : if let Some(l2_lsn) = l2_lsn {
726 0 : let current_l2_lsn = timeline
727 0 : .get_gc_compaction_state()
728 0 : .map(|x| x.last_completed_lsn)
729 0 : .unwrap_or(Lsn::INVALID);
730 0 : if l2_lsn >= current_l2_lsn {
731 0 : info!("l2_lsn updated to {}", l2_lsn);
732 0 : timeline
733 0 : .update_gc_compaction_state(GcCompactionState {
734 0 : last_completed_lsn: l2_lsn,
735 0 : })
736 0 : .map_err(CompactionError::Other)?;
737 : } else {
738 0 : warn!(
739 0 : "l2_lsn updated to {} but it is less than the current l2_lsn {}",
740 : l2_lsn, current_l2_lsn
741 : );
742 : }
743 0 : }
744 : }
745 : }
746 0 : self.clear_running_job();
747 0 : Ok(if yield_for_l0 {
748 0 : tracing::info!("give up gc-compaction: yield for L0 compaction");
749 0 : CompactionOutcome::YieldForL0
750 0 : } else if has_pending_tasks {
751 0 : CompactionOutcome::Pending
752 : } else {
753 0 : CompactionOutcome::Done
754 : })
755 0 : }
756 :
757 : #[allow(clippy::type_complexity)]
758 0 : pub fn remaining_jobs(
759 0 : &self,
760 0 : ) -> (
761 0 : Option<(GcCompactionJobId, GcCompactionQueueItem)>,
762 0 : VecDeque<(GcCompactionJobId, GcCompactionQueueItem)>,
763 0 : ) {
764 0 : let guard = self.inner.lock().unwrap();
765 0 : (guard.running.clone(), guard.queued.clone())
766 0 : }
767 :
768 0 : pub fn remaining_jobs_num(&self) -> usize {
769 0 : let guard = self.inner.lock().unwrap();
770 0 : guard.queued.len() + if guard.running.is_some() { 1 } else { 0 }
771 0 : }
772 : }
773 :
774 : /// A job description for the gc-compaction job. This structure describes the rectangle range that the job will
775 : /// process. The exact layers that need to be compacted/rewritten will be generated when `compact_with_gc` gets
776 : /// called.
777 : #[derive(Debug, Clone)]
778 : pub(crate) struct GcCompactJob {
779 : pub dry_run: bool,
780 : /// The key range to be compacted. The compaction algorithm will only regenerate key-value pairs within this range
781 : /// [left inclusive, right exclusive), and other pairs will be rewritten into new files if necessary.
782 : pub compact_key_range: Range<Key>,
783 : /// The LSN range to be compacted. The compaction algorithm will use this range to determine the layers to be
784 : /// selected for the compaction, and it does not guarantee the generated layers will have exactly the same LSN range
785 : /// as specified here. The true range being compacted is `min_lsn/max_lsn` in [`GcCompactionJobDescription`].
786 : /// min_lsn will always <= the lower bound specified here, and max_lsn will always >= the upper bound specified here.
787 : pub compact_lsn_range: Range<Lsn>,
788 : }
789 :
790 : impl GcCompactJob {
791 28 : pub fn from_compact_options(options: CompactOptions) -> Self {
792 : GcCompactJob {
793 28 : dry_run: options.flags.contains(CompactFlags::DryRun),
794 28 : compact_key_range: options
795 28 : .compact_key_range
796 28 : .map(|x| x.into())
797 28 : .unwrap_or(Key::MIN..Key::MAX),
798 28 : compact_lsn_range: options
799 28 : .compact_lsn_range
800 28 : .map(|x| x.into())
801 28 : .unwrap_or(Lsn::INVALID..Lsn::MAX),
802 : }
803 28 : }
804 : }
805 :
806 : /// A job description for the gc-compaction job. This structure is generated when `compact_with_gc` is called
807 : /// and contains the exact layers we want to compact.
808 : pub struct GcCompactionJobDescription {
809 : /// All layers to read in the compaction job
810 : selected_layers: Vec<Layer>,
811 : /// GC cutoff of the job. This is the lowest LSN that will be accessed by the read/GC path and we need to
812 : /// keep all deltas <= this LSN or generate an image == this LSN.
813 : gc_cutoff: Lsn,
814 : /// LSNs to retain for the job. Read path will use this LSN so we need to keep deltas <= this LSN or
815 : /// generate an image == this LSN.
816 : retain_lsns_below_horizon: Vec<Lsn>,
817 : /// Maximum layer LSN processed in this compaction, that is max(end_lsn of layers). Exclusive. All data
818 : /// \>= this LSN will be kept and will not be rewritten.
819 : max_layer_lsn: Lsn,
820 : /// Minimum layer LSN processed in this compaction, that is min(start_lsn of layers). Inclusive.
821 : /// All access below (strict lower than `<`) this LSN will be routed through the normal read path instead of
822 : /// k-merge within gc-compaction.
823 : min_layer_lsn: Lsn,
824 : /// Only compact layers overlapping with this range.
825 : compaction_key_range: Range<Key>,
826 : /// When partial compaction is enabled, these layers need to be rewritten to ensure no overlap.
827 : /// This field is here solely for debugging. The field will not be read once the compaction
828 : /// description is generated.
829 : rewrite_layers: Vec<Arc<PersistentLayerDesc>>,
830 : }
831 :
832 : /// The result of bottom-most compaction for a single key at each LSN.
833 : #[derive(Debug)]
834 : #[cfg_attr(test, derive(PartialEq))]
835 : pub struct KeyLogAtLsn(pub Vec<(Lsn, Value)>);
836 :
837 : /// The result of bottom-most compaction.
838 : #[derive(Debug)]
839 : #[cfg_attr(test, derive(PartialEq))]
840 : pub(crate) struct KeyHistoryRetention {
841 : /// Stores logs to reconstruct the value at the given LSN, that is to say, logs <= LSN or image == LSN.
842 : pub(crate) below_horizon: Vec<(Lsn, KeyLogAtLsn)>,
843 : /// Stores logs to reconstruct the value at any LSN above the horizon, that is to say, log > LSN.
844 : pub(crate) above_horizon: KeyLogAtLsn,
845 : }
846 :
847 : impl KeyHistoryRetention {
848 : /// Hack: skip delta layer if we need to produce a layer of a same key-lsn.
849 : ///
850 : /// This can happen if we have removed some deltas in "the middle" of some existing layer's key-lsn-range.
851 : /// For example, consider the case where a single delta with range [0x10,0x50) exists.
852 : /// And we have branches at LSN 0x10, 0x20, 0x30.
853 : /// Then we delete branch @ 0x20.
854 : /// Bottom-most compaction may now delete the delta [0x20,0x30).
855 : /// And that wouldnt' change the shape of the layer.
856 : ///
857 : /// Note that bottom-most-gc-compaction never _adds_ new data in that case, only removes.
858 : ///
859 : /// `discard_key` will only be called when the writer reaches its target (instead of for every key), so it's fine to grab a lock inside.
860 37 : async fn discard_key(key: &PersistentLayerKey, tline: &Arc<Timeline>, dry_run: bool) -> bool {
861 37 : if dry_run {
862 0 : return true;
863 37 : }
864 37 : if LayerMap::is_l0(&key.key_range, key.is_delta) {
865 : // gc-compaction should not produce L0 deltas, otherwise it will break the layer order.
866 : // We should ignore such layers.
867 0 : return true;
868 37 : }
869 : let layer_generation;
870 : {
871 37 : let guard = tline.layers.read(LayerManagerLockHolder::Compaction).await;
872 37 : if !guard.contains_key(key) {
873 26 : return false;
874 11 : }
875 11 : layer_generation = guard.get_from_key(key).metadata().generation;
876 : }
877 11 : if layer_generation == tline.generation {
878 11 : info!(
879 : key=%key,
880 : ?layer_generation,
881 0 : "discard layer due to duplicated layer key in the same generation",
882 : );
883 11 : true
884 : } else {
885 0 : false
886 : }
887 37 : }
888 :
889 : /// Pipe a history of a single key to the writers.
890 : ///
891 : /// If `image_writer` is none, the images will be placed into the delta layers.
892 : /// The delta writer will contain all images and deltas (below and above the horizon) except the bottom-most images.
893 : #[allow(clippy::too_many_arguments)]
894 319 : async fn pipe_to(
895 319 : self,
896 319 : key: Key,
897 319 : delta_writer: &mut SplitDeltaLayerWriter<'_>,
898 319 : mut image_writer: Option<&mut SplitImageLayerWriter<'_>>,
899 319 : stat: &mut CompactionStatistics,
900 319 : ctx: &RequestContext,
901 319 : ) -> anyhow::Result<()> {
902 319 : let mut first_batch = true;
903 1022 : for (cutoff_lsn, KeyLogAtLsn(logs)) in self.below_horizon {
904 703 : if first_batch {
905 319 : if logs.len() == 1 && logs[0].1.is_image() {
906 300 : let Value::Image(img) = &logs[0].1 else {
907 0 : unreachable!()
908 : };
909 300 : stat.produce_image_key(img);
910 300 : if let Some(image_writer) = image_writer.as_mut() {
911 300 : image_writer.put_image(key, img.clone(), ctx).await?;
912 : } else {
913 0 : delta_writer
914 0 : .put_value(key, cutoff_lsn, Value::Image(img.clone()), ctx)
915 0 : .await?;
916 : }
917 : } else {
918 33 : for (lsn, val) in logs {
919 14 : stat.produce_key(&val);
920 14 : delta_writer.put_value(key, lsn, val, ctx).await?;
921 : }
922 : }
923 319 : first_batch = false;
924 : } else {
925 442 : for (lsn, val) in logs {
926 58 : stat.produce_key(&val);
927 58 : delta_writer.put_value(key, lsn, val, ctx).await?;
928 : }
929 : }
930 : }
931 319 : let KeyLogAtLsn(above_horizon_logs) = self.above_horizon;
932 348 : for (lsn, val) in above_horizon_logs {
933 29 : stat.produce_key(&val);
934 29 : delta_writer.put_value(key, lsn, val, ctx).await?;
935 : }
936 319 : Ok(())
937 319 : }
938 :
939 : /// Verify if every key in the retention is readable by replaying the logs.
940 323 : async fn verify(
941 323 : &self,
942 323 : key: Key,
943 323 : base_img_from_ancestor: &Option<(Key, Lsn, Bytes)>,
944 323 : full_history: &[(Key, Lsn, Value)],
945 323 : tline: &Arc<Timeline>,
946 323 : ) -> anyhow::Result<()> {
947 : // Usually the min_lsn should be the first record but we do a full iteration to be safe.
948 323 : let Some(min_lsn) = full_history.iter().map(|(_, lsn, _)| *lsn).min() else {
949 : // This should never happen b/c if we don't have any history of a key, we won't even do `generate_key_retention`.
950 0 : return Ok(());
951 : };
952 323 : let Some(max_lsn) = full_history.iter().map(|(_, lsn, _)| *lsn).max() else {
953 : // This should never happen b/c if we don't have any history of a key, we won't even do `generate_key_retention`.
954 0 : return Ok(());
955 : };
956 323 : let mut base_img = base_img_from_ancestor
957 323 : .as_ref()
958 323 : .map(|(_, lsn, img)| (*lsn, img));
959 323 : let mut history = Vec::new();
960 :
961 1027 : async fn collect_and_verify(
962 1027 : key: Key,
963 1027 : lsn: Lsn,
964 1027 : base_img: &Option<(Lsn, &Bytes)>,
965 1027 : history: &[(Lsn, &NeonWalRecord)],
966 1027 : tline: &Arc<Timeline>,
967 1027 : skip_empty: bool,
968 1027 : ) -> anyhow::Result<()> {
969 1027 : if base_img.is_none() && history.is_empty() {
970 0 : if skip_empty {
971 0 : return Ok(());
972 0 : }
973 0 : anyhow::bail!("verification failed: key {} has no history at {}", key, lsn);
974 1027 : };
975 :
976 1027 : let mut records = history
977 1027 : .iter()
978 1027 : .map(|(lsn, val)| (*lsn, (*val).clone()))
979 1027 : .collect::<Vec<_>>();
980 :
981 : // WAL redo requires records in the reverse LSN order
982 1027 : records.reverse();
983 1027 : let data = ValueReconstructState {
984 1027 : img: base_img.as_ref().map(|(lsn, img)| (*lsn, (*img).clone())),
985 1027 : records,
986 : };
987 :
988 1027 : tline
989 1027 : .reconstruct_value(key, lsn, data, RedoAttemptType::GcCompaction)
990 1027 : .await
991 1027 : .with_context(|| format!("verification failed for key {key} at lsn {lsn}"))?;
992 :
993 1027 : Ok(())
994 1027 : }
995 :
996 1036 : for (retain_lsn, KeyLogAtLsn(logs)) in &self.below_horizon {
997 1096 : for (lsn, val) in logs {
998 76 : match val {
999 307 : Value::Image(img) => {
1000 307 : base_img = Some((*lsn, img));
1001 307 : history.clear();
1002 307 : }
1003 76 : Value::WalRecord(rec) if val.will_init() => {
1004 0 : base_img = None;
1005 0 : history.clear();
1006 0 : history.push((*lsn, rec));
1007 0 : }
1008 76 : Value::WalRecord(rec) => {
1009 76 : history.push((*lsn, rec));
1010 76 : }
1011 : }
1012 : }
1013 713 : if *retain_lsn >= min_lsn {
1014 : // Only verify after the key appears in the full history for the first time.
1015 :
1016 : // We don't modify history: in theory, we could replace the history with a single
1017 : // image as in `generate_key_retention` to make redos at later LSNs faster. But we
1018 : // want to verify everything as if they are read from the real layer map.
1019 699 : collect_and_verify(key, *retain_lsn, &base_img, &history, tline, false)
1020 699 : .await
1021 699 : .context("below horizon retain_lsn")?;
1022 14 : }
1023 : }
1024 :
1025 360 : for (lsn, val) in &self.above_horizon.0 {
1026 32 : match val {
1027 5 : Value::Image(img) => {
1028 : // Above the GC horizon, we verify every time we see an image.
1029 5 : collect_and_verify(key, *lsn, &base_img, &history, tline, true)
1030 5 : .await
1031 5 : .context("above horizon full image")?;
1032 5 : base_img = Some((*lsn, img));
1033 5 : history.clear();
1034 : }
1035 32 : Value::WalRecord(rec) if val.will_init() => {
1036 : // Above the GC horizon, we verify every time we see an init record.
1037 0 : collect_and_verify(key, *lsn, &base_img, &history, tline, true)
1038 0 : .await
1039 0 : .context("above horizon init record")?;
1040 0 : base_img = None;
1041 0 : history.clear();
1042 0 : history.push((*lsn, rec));
1043 : }
1044 32 : Value::WalRecord(rec) => {
1045 32 : history.push((*lsn, rec));
1046 32 : }
1047 : }
1048 : }
1049 : // Ensure the latest record is readable.
1050 323 : collect_and_verify(key, max_lsn, &base_img, &history, tline, false)
1051 323 : .await
1052 323 : .context("latest record")?;
1053 323 : Ok(())
1054 323 : }
1055 : }
1056 :
1057 : #[derive(Debug, Serialize, Default)]
1058 : struct CompactionStatisticsNumSize {
1059 : num: u64,
1060 : size: u64,
1061 : }
1062 :
1063 : #[derive(Debug, Serialize, Default)]
1064 : pub struct CompactionStatistics {
1065 : /// Delta layer visited (maybe compressed, physical size)
1066 : delta_layer_visited: CompactionStatisticsNumSize,
1067 : /// Image layer visited (maybe compressed, physical size)
1068 : image_layer_visited: CompactionStatisticsNumSize,
1069 : /// Delta layer produced (maybe compressed, physical size)
1070 : delta_layer_produced: CompactionStatisticsNumSize,
1071 : /// Image layer produced (maybe compressed, physical size)
1072 : image_layer_produced: CompactionStatisticsNumSize,
1073 : /// Delta layer discarded (maybe compressed, physical size of the layer being discarded instead of the original layer)
1074 : delta_layer_discarded: CompactionStatisticsNumSize,
1075 : /// Image layer discarded (maybe compressed, physical size of the layer being discarded instead of the original layer)
1076 : image_layer_discarded: CompactionStatisticsNumSize,
1077 : num_unique_keys_visited: usize,
1078 : /// Delta visited (uncompressed, original size)
1079 : wal_keys_visited: CompactionStatisticsNumSize,
1080 : /// Image visited (uncompressed, original size)
1081 : image_keys_visited: CompactionStatisticsNumSize,
1082 : /// Delta produced (uncompressed, original size)
1083 : wal_produced: CompactionStatisticsNumSize,
1084 : /// Image produced (uncompressed, original size)
1085 : image_produced: CompactionStatisticsNumSize,
1086 :
1087 : // Time spent in each phase
1088 : time_acquire_lock_secs: f64,
1089 : time_analyze_secs: f64,
1090 : time_download_layer_secs: f64,
1091 : time_to_first_kv_pair_secs: f64,
1092 : time_main_loop_secs: f64,
1093 : time_final_phase_secs: f64,
1094 : time_total_secs: f64,
1095 :
1096 : // Summary
1097 : /// Ratio of the key-value size after/before gc-compaction.
1098 : uncompressed_retention_ratio: f64,
1099 : /// Ratio of the physical size after/before gc-compaction.
1100 : compressed_retention_ratio: f64,
1101 : }
1102 :
1103 : impl CompactionStatistics {
1104 534 : fn estimated_size_of_value(val: &Value) -> usize {
1105 219 : match val {
1106 315 : Value::Image(img) => img.len(),
1107 0 : Value::WalRecord(NeonWalRecord::Postgres { rec, .. }) => rec.len(),
1108 219 : _ => std::mem::size_of::<NeonWalRecord>(),
1109 : }
1110 534 : }
1111 839 : fn estimated_size_of_key() -> usize {
1112 839 : KEY_SIZE // TODO: distinguish image layer and delta layer (count LSN in delta layer)
1113 839 : }
1114 44 : fn visit_delta_layer(&mut self, size: u64) {
1115 44 : self.delta_layer_visited.num += 1;
1116 44 : self.delta_layer_visited.size += size;
1117 44 : }
1118 35 : fn visit_image_layer(&mut self, size: u64) {
1119 35 : self.image_layer_visited.num += 1;
1120 35 : self.image_layer_visited.size += size;
1121 35 : }
1122 320 : fn on_unique_key_visited(&mut self) {
1123 320 : self.num_unique_keys_visited += 1;
1124 320 : }
1125 123 : fn visit_wal_key(&mut self, val: &Value) {
1126 123 : self.wal_keys_visited.num += 1;
1127 123 : self.wal_keys_visited.size +=
1128 123 : Self::estimated_size_of_value(val) as u64 + Self::estimated_size_of_key() as u64;
1129 123 : }
1130 315 : fn visit_image_key(&mut self, val: &Value) {
1131 315 : self.image_keys_visited.num += 1;
1132 315 : self.image_keys_visited.size +=
1133 315 : Self::estimated_size_of_value(val) as u64 + Self::estimated_size_of_key() as u64;
1134 315 : }
1135 101 : fn produce_key(&mut self, val: &Value) {
1136 101 : match val {
1137 5 : Value::Image(img) => self.produce_image_key(img),
1138 96 : Value::WalRecord(_) => self.produce_wal_key(val),
1139 : }
1140 101 : }
1141 96 : fn produce_wal_key(&mut self, val: &Value) {
1142 96 : self.wal_produced.num += 1;
1143 96 : self.wal_produced.size +=
1144 96 : Self::estimated_size_of_value(val) as u64 + Self::estimated_size_of_key() as u64;
1145 96 : }
1146 305 : fn produce_image_key(&mut self, val: &Bytes) {
1147 305 : self.image_produced.num += 1;
1148 305 : self.image_produced.size += val.len() as u64 + Self::estimated_size_of_key() as u64;
1149 305 : }
1150 7 : fn discard_delta_layer(&mut self, original_size: u64) {
1151 7 : self.delta_layer_discarded.num += 1;
1152 7 : self.delta_layer_discarded.size += original_size;
1153 7 : }
1154 4 : fn discard_image_layer(&mut self, original_size: u64) {
1155 4 : self.image_layer_discarded.num += 1;
1156 4 : self.image_layer_discarded.size += original_size;
1157 4 : }
1158 12 : fn produce_delta_layer(&mut self, size: u64) {
1159 12 : self.delta_layer_produced.num += 1;
1160 12 : self.delta_layer_produced.size += size;
1161 12 : }
1162 15 : fn produce_image_layer(&mut self, size: u64) {
1163 15 : self.image_layer_produced.num += 1;
1164 15 : self.image_layer_produced.size += size;
1165 15 : }
1166 26 : fn finalize(&mut self) {
1167 26 : let original_key_value_size = self.image_keys_visited.size + self.wal_keys_visited.size;
1168 26 : let produced_key_value_size = self.image_produced.size + self.wal_produced.size;
1169 26 : self.uncompressed_retention_ratio =
1170 26 : produced_key_value_size as f64 / (original_key_value_size as f64 + 1.0); // avoid div by 0
1171 26 : let original_physical_size = self.image_layer_visited.size + self.delta_layer_visited.size;
1172 26 : let produced_physical_size = self.image_layer_produced.size
1173 26 : + self.delta_layer_produced.size
1174 26 : + self.image_layer_discarded.size
1175 26 : + self.delta_layer_discarded.size; // Also include the discarded layers to make the ratio accurate
1176 26 : self.compressed_retention_ratio =
1177 26 : produced_physical_size as f64 / (original_physical_size as f64 + 1.0); // avoid div by 0
1178 26 : }
1179 : }
1180 :
1181 : #[derive(Default, Debug, Clone, Copy, PartialEq, Eq)]
1182 : pub enum CompactionOutcome {
1183 : #[default]
1184 : /// No layers need to be compacted after this round. Compaction doesn't need
1185 : /// to be immediately scheduled.
1186 : Done,
1187 : /// Still has pending layers to be compacted after this round. Ideally, the scheduler
1188 : /// should immediately schedule another compaction.
1189 : Pending,
1190 : /// A timeline needs L0 compaction. Yield and schedule an immediate L0 compaction pass (only
1191 : /// guaranteed when `compaction_l0_first` is enabled).
1192 : YieldForL0,
1193 : /// Compaction was skipped, because the timeline is ineligible for compaction.
1194 : Skipped,
1195 : }
1196 :
1197 : impl Timeline {
1198 : /// TODO: cancellation
1199 : ///
1200 : /// Returns whether the compaction has pending tasks.
1201 192 : pub(crate) async fn compact_legacy(
1202 192 : self: &Arc<Self>,
1203 192 : cancel: &CancellationToken,
1204 192 : options: CompactOptions,
1205 192 : ctx: &RequestContext,
1206 192 : ) -> Result<CompactionOutcome, CompactionError> {
1207 192 : if options
1208 192 : .flags
1209 192 : .contains(CompactFlags::EnhancedGcBottomMostCompaction)
1210 : {
1211 0 : self.compact_with_gc(cancel, options, ctx).await?;
1212 0 : return Ok(CompactionOutcome::Done);
1213 192 : }
1214 :
1215 192 : if options.flags.contains(CompactFlags::DryRun) {
1216 0 : return Err(CompactionError::Other(anyhow!(
1217 0 : "dry-run mode is not supported for legacy compaction for now"
1218 0 : )));
1219 192 : }
1220 :
1221 192 : if options.compact_key_range.is_some() || options.compact_lsn_range.is_some() {
1222 : // maybe useful in the future? could implement this at some point
1223 0 : return Err(CompactionError::Other(anyhow!(
1224 0 : "compaction range is not supported for legacy compaction for now"
1225 0 : )));
1226 192 : }
1227 :
1228 : // High level strategy for compaction / image creation:
1229 : //
1230 : // 1. First, do a L0 compaction to ensure we move the L0
1231 : // layers into the historic layer map get flat levels of
1232 : // layers. If we did not compact all L0 layers, we will
1233 : // prioritize compacting the timeline again and not do
1234 : // any of the compactions below.
1235 : //
1236 : // 2. Then, calculate the desired "partitioning" of the
1237 : // currently in-use key space. The goal is to partition the
1238 : // key space into roughly fixed-size chunks, but also take into
1239 : // account any existing image layers, and try to align the
1240 : // chunk boundaries with the existing image layers to avoid
1241 : // too much churn. Also try to align chunk boundaries with
1242 : // relation boundaries. In principle, we don't know about
1243 : // relation boundaries here, we just deal with key-value
1244 : // pairs, and the code in pgdatadir_mapping.rs knows how to
1245 : // map relations into key-value pairs. But in practice we know
1246 : // that 'field6' is the block number, and the fields 1-5
1247 : // identify a relation. This is just an optimization,
1248 : // though.
1249 : //
1250 : // 3. Once we know the partitioning, for each partition,
1251 : // decide if it's time to create a new image layer. The
1252 : // criteria is: there has been too much "churn" since the last
1253 : // image layer? The "churn" is fuzzy concept, it's a
1254 : // combination of too many delta files, or too much WAL in
1255 : // total in the delta file. Or perhaps: if creating an image
1256 : // file would allow to delete some older files.
1257 : //
1258 : // 4. In the end, if the tenant gets auto-sharded, we will run
1259 : // a shard-ancestor compaction.
1260 :
1261 : // Is the timeline being deleted?
1262 192 : if self.is_stopping() {
1263 0 : trace!("Dropping out of compaction on timeline shutdown");
1264 0 : return Err(CompactionError::new_cancelled());
1265 192 : }
1266 :
1267 192 : let target_file_size = self.get_checkpoint_distance();
1268 :
1269 : // Define partitioning schema if needed
1270 :
1271 : // HADRON
1272 192 : let force_image_creation_lsn = self.get_force_image_creation_lsn();
1273 :
1274 : // 1. L0 Compact
1275 192 : let l0_outcome = {
1276 192 : let timer = self.metrics.compact_time_histo.start_timer();
1277 192 : let l0_outcome = self
1278 192 : .compact_level0(
1279 192 : target_file_size,
1280 192 : options.flags.contains(CompactFlags::ForceL0Compaction),
1281 192 : force_image_creation_lsn,
1282 192 : ctx,
1283 192 : )
1284 192 : .await?;
1285 192 : timer.stop_and_record();
1286 192 : l0_outcome
1287 : };
1288 :
1289 192 : if options.flags.contains(CompactFlags::OnlyL0Compaction) {
1290 0 : return Ok(l0_outcome);
1291 192 : }
1292 :
1293 : // Yield if we have pending L0 compaction. The scheduler will do another pass.
1294 192 : if (l0_outcome == CompactionOutcome::Pending || l0_outcome == CompactionOutcome::YieldForL0)
1295 0 : && options.flags.contains(CompactFlags::YieldForL0)
1296 : {
1297 0 : info!("image/ancestor compaction yielding for L0 compaction");
1298 0 : return Ok(CompactionOutcome::YieldForL0);
1299 192 : }
1300 :
1301 192 : let gc_cutoff = *self.applied_gc_cutoff_lsn.read();
1302 192 : let l0_l1_boundary_lsn = {
1303 : // We do the repartition on the L0-L1 boundary. All data below the boundary
1304 : // are compacted by L0 with low read amplification, thus making the `repartition`
1305 : // function run fast.
1306 192 : let guard = self
1307 192 : .layers
1308 192 : .read(LayerManagerLockHolder::GetLayerMapInfo)
1309 192 : .await;
1310 192 : guard
1311 192 : .all_persistent_layers()
1312 192 : .iter()
1313 1219 : .map(|x| {
1314 : // Use the end LSN of delta layers OR the start LSN of image layers.
1315 1219 : if x.is_delta {
1316 1035 : x.lsn_range.end
1317 : } else {
1318 184 : x.lsn_range.start
1319 : }
1320 1219 : })
1321 192 : .max()
1322 : };
1323 :
1324 192 : let (partition_mode, partition_lsn) = if cfg!(test)
1325 0 : || cfg!(feature = "testing")
1326 0 : || self
1327 0 : .feature_resolver
1328 0 : .evaluate_boolean("image-compaction-boundary")
1329 0 : .is_ok()
1330 : {
1331 192 : let last_repartition_lsn = self.partitioning.read().1;
1332 192 : let lsn = match l0_l1_boundary_lsn {
1333 192 : Some(boundary) => gc_cutoff
1334 192 : .max(boundary)
1335 192 : .max(last_repartition_lsn)
1336 192 : .max(self.initdb_lsn)
1337 192 : .max(self.ancestor_lsn),
1338 0 : None => self.get_last_record_lsn(),
1339 : };
1340 192 : if lsn <= self.initdb_lsn || lsn <= self.ancestor_lsn {
1341 : // Do not attempt to create image layers below the initdb or ancestor LSN -- no data below it
1342 0 : ("l0_l1_boundary", self.get_last_record_lsn())
1343 : } else {
1344 192 : ("l0_l1_boundary", lsn)
1345 : }
1346 : } else {
1347 0 : ("latest_record", self.get_last_record_lsn())
1348 : };
1349 :
1350 : // 2. Repartition and create image layers if necessary
1351 192 : match self
1352 192 : .repartition(
1353 192 : partition_lsn,
1354 192 : self.get_compaction_target_size(),
1355 192 : options.flags,
1356 192 : ctx,
1357 192 : )
1358 192 : .await
1359 : {
1360 192 : Ok(((dense_partitioning, sparse_partitioning), lsn)) if lsn >= gc_cutoff => {
1361 : // Disables access_stats updates, so that the files we read remain candidates for eviction after we're done with them
1362 80 : let image_ctx = RequestContextBuilder::from(ctx)
1363 80 : .access_stats_behavior(AccessStatsBehavior::Skip)
1364 80 : .attached_child();
1365 :
1366 80 : let mut partitioning = dense_partitioning;
1367 80 : partitioning
1368 80 : .parts
1369 80 : .extend(sparse_partitioning.into_dense().parts);
1370 :
1371 : // 3. Create new image layers for partitions that have been modified "enough".
1372 80 : let mode = if options
1373 80 : .flags
1374 80 : .contains(CompactFlags::ForceImageLayerCreation)
1375 : {
1376 7 : ImageLayerCreationMode::Force
1377 : } else {
1378 73 : ImageLayerCreationMode::Try
1379 : };
1380 80 : let (image_layers, outcome) = self
1381 80 : .create_image_layers(
1382 80 : &partitioning,
1383 80 : lsn,
1384 80 : force_image_creation_lsn,
1385 80 : mode,
1386 80 : &image_ctx,
1387 80 : self.last_image_layer_creation_status
1388 80 : .load()
1389 80 : .as_ref()
1390 80 : .clone(),
1391 80 : options.flags.contains(CompactFlags::YieldForL0),
1392 : )
1393 80 : .instrument(info_span!("create_image_layers", mode = %mode, partition_mode = %partition_mode, lsn = %lsn))
1394 80 : .await
1395 80 : .inspect_err(|err| {
1396 : if let CreateImageLayersError::GetVectoredError(
1397 : GetVectoredError::MissingKey(_),
1398 0 : ) = err
1399 : {
1400 0 : critical_timeline!(
1401 0 : self.tenant_shard_id,
1402 0 : self.timeline_id,
1403 0 : "missing key during compaction: {err:?}"
1404 : );
1405 0 : }
1406 0 : })?;
1407 :
1408 80 : self.last_image_layer_creation_status
1409 80 : .store(Arc::new(outcome.clone()));
1410 :
1411 80 : self.upload_new_image_layers(image_layers)?;
1412 80 : if let LastImageLayerCreationStatus::Incomplete { .. } = outcome {
1413 : // Yield and do not do any other kind of compaction.
1414 0 : info!(
1415 0 : "skipping shard ancestor compaction due to pending image layer generation tasks (preempted by L0 compaction)."
1416 : );
1417 0 : return Ok(CompactionOutcome::YieldForL0);
1418 80 : }
1419 : }
1420 :
1421 : Ok(_) => {
1422 : // This happens very frequently so we don't want to log it.
1423 112 : debug!("skipping repartitioning due to image compaction LSN being below GC cutoff");
1424 : }
1425 :
1426 : // Suppress errors when cancelled.
1427 : //
1428 : // Log other errors but continue. Failure to repartition is normal, if the timeline was just created
1429 : // as an empty timeline. Also in unit tests, when we use the timeline as a simple
1430 : // key-value store, ignoring the datadir layout. Log the error but continue.
1431 : //
1432 : // TODO:
1433 : // 1. shouldn't we return early here if we observe cancellation
1434 : // 2. Experiment: can we stop checking self.cancel here?
1435 0 : Err(_) if self.cancel.is_cancelled() => {} // TODO: try how we fare removing this branch
1436 0 : Err(err) if err.is_cancel() => {}
1437 : Err(RepartitionError::CollectKeyspace(
1438 0 : e @ CollectKeySpaceError::Decode(_)
1439 0 : | e @ CollectKeySpaceError::PageRead(
1440 : PageReconstructError::MissingKey(_) | PageReconstructError::WalRedo(_),
1441 : ),
1442 : )) => {
1443 : // Alert on critical errors that indicate data corruption.
1444 0 : critical_timeline!(
1445 0 : self.tenant_shard_id,
1446 0 : self.timeline_id,
1447 0 : "could not compact, repartitioning keyspace failed: {e:?}"
1448 : );
1449 : }
1450 0 : Err(e) => error!(
1451 0 : "could not compact, repartitioning keyspace failed: {:?}",
1452 0 : e.into_anyhow()
1453 : ),
1454 : };
1455 :
1456 192 : let partition_count = self.partitioning.read().0.0.parts.len();
1457 :
1458 : // 4. Shard ancestor compaction
1459 192 : if self.get_compaction_shard_ancestor() && self.shard_identity.count >= ShardCount::new(2) {
1460 : // Limit the number of layer rewrites to the number of partitions: this means its
1461 : // runtime should be comparable to a full round of image layer creations, rather than
1462 : // being potentially much longer.
1463 0 : let rewrite_max = partition_count;
1464 :
1465 0 : let outcome = self
1466 0 : .compact_shard_ancestors(
1467 0 : rewrite_max,
1468 0 : options.flags.contains(CompactFlags::YieldForL0),
1469 0 : ctx,
1470 0 : )
1471 0 : .await?;
1472 0 : match outcome {
1473 0 : CompactionOutcome::Pending | CompactionOutcome::YieldForL0 => return Ok(outcome),
1474 0 : CompactionOutcome::Done | CompactionOutcome::Skipped => {}
1475 : }
1476 192 : }
1477 :
1478 192 : Ok(CompactionOutcome::Done)
1479 192 : }
1480 :
1481 : /* BEGIN_HADRON */
1482 : // Get the force image creation LSN based on gc_cutoff_lsn.
1483 : // Note that this is an estimation and the workload rate may suddenly change. When that happens,
1484 : // the force image creation may be too early or too late, but eventually it should be able to catch up.
1485 193 : pub(crate) fn get_force_image_creation_lsn(self: &Arc<Self>) -> Option<Lsn> {
1486 193 : let image_creation_period = self.get_image_layer_force_creation_period()?;
1487 1 : let current_lsn = self.get_last_record_lsn();
1488 1 : let pitr_lsn = self.gc_info.read().unwrap().cutoffs.time?;
1489 1 : let pitr_interval = self.get_pitr_interval();
1490 1 : if pitr_lsn == Lsn::INVALID || pitr_interval.is_zero() {
1491 0 : tracing::warn!(
1492 0 : "pitr LSN/interval not found, skipping force image creation LSN calculation"
1493 : );
1494 0 : return None;
1495 1 : }
1496 :
1497 1 : let delta_lsn = current_lsn.checked_sub(pitr_lsn).unwrap().0
1498 1 : * image_creation_period.as_secs()
1499 1 : / pitr_interval.as_secs();
1500 1 : let force_image_creation_lsn = current_lsn.checked_sub(delta_lsn).unwrap_or(Lsn(0));
1501 :
1502 1 : tracing::info!(
1503 0 : "Tenant shard {} computed force_image_creation_lsn: {}. Current lsn: {}, image_layer_force_creation_period: {:?}, GC cutoff: {}, PITR interval: {:?}",
1504 0 : self.tenant_shard_id,
1505 : force_image_creation_lsn,
1506 : current_lsn,
1507 : image_creation_period,
1508 : pitr_lsn,
1509 : pitr_interval
1510 : );
1511 :
1512 1 : Some(force_image_creation_lsn)
1513 193 : }
1514 : /* END_HADRON */
1515 :
1516 : /// Check for layers that are elegible to be rewritten:
1517 : /// - Shard splitting: After a shard split, ancestor layers beyond pitr_interval, so that
1518 : /// we don't indefinitely retain keys in this shard that aren't needed.
1519 : /// - For future use: layers beyond pitr_interval that are in formats we would
1520 : /// rather not maintain compatibility with indefinitely.
1521 : ///
1522 : /// Note: this phase may read and write many gigabytes of data: use rewrite_max to bound
1523 : /// how much work it will try to do in each compaction pass.
1524 0 : async fn compact_shard_ancestors(
1525 0 : self: &Arc<Self>,
1526 0 : rewrite_max: usize,
1527 0 : yield_for_l0: bool,
1528 0 : ctx: &RequestContext,
1529 0 : ) -> Result<CompactionOutcome, CompactionError> {
1530 0 : let mut outcome = CompactionOutcome::Done;
1531 0 : let mut drop_layers = Vec::new();
1532 0 : let mut layers_to_rewrite: Vec<Layer> = Vec::new();
1533 :
1534 : // We will use the Lsn cutoff of the last GC as a threshold for rewriting layers: if a
1535 : // layer is behind this Lsn, it indicates that the layer is being retained beyond the
1536 : // pitr_interval, for example because a branchpoint references it.
1537 : //
1538 : // Holding this read guard also blocks [`Self::gc_timeline`] from entering while we
1539 : // are rewriting layers.
1540 0 : let latest_gc_cutoff = self.get_applied_gc_cutoff_lsn();
1541 0 : let pitr_cutoff = self.gc_info.read().unwrap().cutoffs.time;
1542 :
1543 0 : let layers = self.layers.read(LayerManagerLockHolder::Compaction).await;
1544 0 : let layers_iter = layers.layer_map()?.iter_historic_layers();
1545 0 : let (layers_total, mut layers_checked) = (layers_iter.len(), 0);
1546 0 : for layer_desc in layers_iter {
1547 0 : layers_checked += 1;
1548 0 : let layer = layers.get_from_desc(&layer_desc);
1549 0 : if layer.metadata().shard.shard_count == self.shard_identity.count {
1550 : // This layer does not belong to a historic ancestor, no need to re-image it.
1551 0 : continue;
1552 0 : }
1553 :
1554 : // This layer was created on an ancestor shard: check if it contains any data for this shard.
1555 0 : let sharded_range = ShardedRange::new(layer_desc.get_key_range(), &self.shard_identity);
1556 0 : let layer_local_page_count = sharded_range.page_count();
1557 0 : let layer_raw_page_count = ShardedRange::raw_size(&layer_desc.get_key_range());
1558 0 : if layer_local_page_count == 0 {
1559 : // This ancestral layer only covers keys that belong to other shards.
1560 : // We include the full metadata in the log: if we had some critical bug that caused
1561 : // us to incorrectly drop layers, this would simplify manually debugging + reinstating those layers.
1562 0 : debug!(%layer, old_metadata=?layer.metadata(),
1563 0 : "dropping layer after shard split, contains no keys for this shard",
1564 : );
1565 :
1566 0 : if cfg!(debug_assertions) {
1567 : // Expensive, exhaustive check of keys in this layer: this guards against ShardedRange's calculations being
1568 : // wrong. If ShardedRange claims the local page count is zero, then no keys in this layer
1569 : // should be !is_key_disposable()
1570 : // TODO: exclude sparse keyspace from this check, otherwise it will infinitely loop.
1571 0 : let range = layer_desc.get_key_range();
1572 0 : let mut key = range.start;
1573 0 : while key < range.end {
1574 0 : debug_assert!(self.shard_identity.is_key_disposable(&key));
1575 0 : key = key.next();
1576 : }
1577 0 : }
1578 :
1579 0 : drop_layers.push(layer);
1580 0 : continue;
1581 0 : } else if layer_local_page_count != u32::MAX
1582 0 : && layer_local_page_count == layer_raw_page_count
1583 : {
1584 0 : debug!(%layer,
1585 0 : "layer is entirely shard local ({} keys), no need to filter it",
1586 : layer_local_page_count
1587 : );
1588 0 : continue;
1589 0 : }
1590 :
1591 : // Only rewrite a layer if we can reclaim significant space.
1592 0 : if layer_local_page_count != u32::MAX
1593 0 : && layer_local_page_count as f64 / layer_raw_page_count as f64
1594 0 : <= ANCESTOR_COMPACTION_REWRITE_THRESHOLD
1595 : {
1596 0 : debug!(%layer,
1597 0 : "layer has a large share of local pages \
1598 0 : ({layer_local_page_count}/{layer_raw_page_count} > \
1599 0 : {ANCESTOR_COMPACTION_REWRITE_THRESHOLD}), not rewriting",
1600 : );
1601 0 : }
1602 :
1603 : // Don't bother re-writing a layer if it is within the PITR window: it will age-out eventually
1604 : // without incurring the I/O cost of a rewrite.
1605 0 : if layer_desc.get_lsn_range().end >= *latest_gc_cutoff {
1606 0 : debug!(%layer, "Skipping rewrite of layer still in GC window ({} >= {})",
1607 0 : layer_desc.get_lsn_range().end, *latest_gc_cutoff);
1608 0 : continue;
1609 0 : }
1610 :
1611 : // We do not yet implement rewrite of delta layers.
1612 0 : if layer_desc.is_delta() {
1613 0 : debug!(%layer, "Skipping rewrite of delta layer");
1614 0 : continue;
1615 0 : }
1616 :
1617 : // We don't bother rewriting layers that aren't visible, since these won't be needed by
1618 : // reads and will likely be garbage collected soon.
1619 0 : if layer.visibility() != LayerVisibilityHint::Visible {
1620 0 : debug!(%layer, "Skipping rewrite of invisible layer");
1621 0 : continue;
1622 0 : }
1623 :
1624 : // Only rewrite layers if their generations differ. This guarantees:
1625 : // - that local rewrite is safe, as local layer paths will differ between existing layer and rewritten one
1626 : // - that the layer is persistent in remote storage, as we only see old-generation'd layer via loading from remote storage
1627 0 : if layer.metadata().generation == self.generation {
1628 0 : debug!(%layer, "Skipping rewrite, is not from old generation");
1629 0 : continue;
1630 0 : }
1631 :
1632 0 : if layers_to_rewrite.len() >= rewrite_max {
1633 0 : debug!(%layer, "Will rewrite layer on a future compaction, already rewrote {}",
1634 0 : layers_to_rewrite.len()
1635 : );
1636 0 : outcome = CompactionOutcome::Pending;
1637 0 : break;
1638 0 : }
1639 :
1640 : // Fall through: all our conditions for doing a rewrite passed.
1641 0 : layers_to_rewrite.push(layer);
1642 : }
1643 :
1644 : // Drop read lock on layer map before we start doing time-consuming I/O.
1645 0 : drop(layers);
1646 :
1647 : // Drop out early if there's nothing to do.
1648 0 : if layers_to_rewrite.is_empty() && drop_layers.is_empty() {
1649 0 : return Ok(CompactionOutcome::Done);
1650 0 : }
1651 :
1652 0 : info!(
1653 0 : "starting shard ancestor compaction, rewriting {} layers and dropping {} layers, \
1654 0 : checked {layers_checked}/{layers_total} layers \
1655 0 : (latest_gc_cutoff={} pitr_cutoff={:?})",
1656 0 : layers_to_rewrite.len(),
1657 0 : drop_layers.len(),
1658 0 : *latest_gc_cutoff,
1659 : pitr_cutoff,
1660 : );
1661 0 : let started = Instant::now();
1662 :
1663 0 : let mut replace_image_layers = Vec::new();
1664 0 : let total = layers_to_rewrite.len();
1665 :
1666 0 : for (i, layer) in layers_to_rewrite.into_iter().enumerate() {
1667 0 : if self.cancel.is_cancelled() {
1668 0 : return Err(CompactionError::new_cancelled());
1669 0 : }
1670 :
1671 0 : info!(layer=%layer, "rewriting layer after shard split: {}/{}", i, total);
1672 :
1673 0 : let mut image_layer_writer = ImageLayerWriter::new(
1674 0 : self.conf,
1675 0 : self.timeline_id,
1676 0 : self.tenant_shard_id,
1677 0 : &layer.layer_desc().key_range,
1678 0 : layer.layer_desc().image_layer_lsn(),
1679 0 : &self.gate,
1680 0 : self.cancel.clone(),
1681 0 : ctx,
1682 0 : )
1683 0 : .await
1684 0 : .map_err(CompactionError::Other)?;
1685 :
1686 : // Safety of layer rewrites:
1687 : // - We are writing to a different local file path than we are reading from, so the old Layer
1688 : // cannot interfere with the new one.
1689 : // - In the page cache, contents for a particular VirtualFile are stored with a file_id that
1690 : // is different for two layers with the same name (in `ImageLayerInner::new` we always
1691 : // acquire a fresh id from [`crate::page_cache::next_file_id`]. So readers do not risk
1692 : // reading the index from one layer file, and then data blocks from the rewritten layer file.
1693 : // - Any readers that have a reference to the old layer will keep it alive until they are done
1694 : // with it. If they are trying to promote from remote storage, that will fail, but this is the same
1695 : // as for compaction generally: compaction is allowed to delete layers that readers might be trying to use.
1696 : // - We do not run concurrently with other kinds of compaction, so the only layer map writes we race with are:
1697 : // - GC, which at worst witnesses us "undelete" a layer that they just deleted.
1698 : // - ingestion, which only inserts layers, therefore cannot collide with us.
1699 0 : let resident = layer.download_and_keep_resident(ctx).await?;
1700 :
1701 0 : let keys_written = resident
1702 0 : .filter(&self.shard_identity, &mut image_layer_writer, ctx)
1703 0 : .await?;
1704 :
1705 0 : if keys_written > 0 {
1706 0 : let (desc, path) = image_layer_writer
1707 0 : .finish(ctx)
1708 0 : .await
1709 0 : .map_err(CompactionError::Other)?;
1710 0 : let new_layer = Layer::finish_creating(self.conf, self, desc, &path)
1711 0 : .map_err(CompactionError::Other)?;
1712 0 : info!(layer=%new_layer, "rewrote layer, {} -> {} bytes",
1713 0 : layer.metadata().file_size,
1714 0 : new_layer.metadata().file_size);
1715 :
1716 0 : replace_image_layers.push((layer, new_layer));
1717 0 : } else {
1718 0 : // Drop the old layer. Usually for this case we would already have noticed that
1719 0 : // the layer has no data for us with the ShardedRange check above, but
1720 0 : drop_layers.push(layer);
1721 0 : }
1722 :
1723 : // Yield for L0 compaction if necessary, but make sure we update the layer map below
1724 : // with the work we've already done.
1725 0 : if yield_for_l0
1726 0 : && self
1727 0 : .l0_compaction_trigger
1728 0 : .notified()
1729 0 : .now_or_never()
1730 0 : .is_some()
1731 : {
1732 0 : info!("shard ancestor compaction yielding for L0 compaction");
1733 0 : outcome = CompactionOutcome::YieldForL0;
1734 0 : break;
1735 0 : }
1736 : }
1737 :
1738 0 : for layer in &drop_layers {
1739 0 : info!(%layer, old_metadata=?layer.metadata(),
1740 0 : "dropping layer after shard split (no keys for this shard)",
1741 : );
1742 : }
1743 :
1744 : // At this point, we have replaced local layer files with their rewritten form, but not yet uploaded
1745 : // metadata to reflect that. If we restart here, the replaced layer files will look invalid (size mismatch
1746 : // to remote index) and be removed. This is inefficient but safe.
1747 0 : fail::fail_point!("compact-shard-ancestors-localonly");
1748 :
1749 : // Update the LayerMap so that readers will use the new layers, and enqueue it for writing to remote storage
1750 0 : self.rewrite_layers(replace_image_layers, drop_layers)
1751 0 : .await?;
1752 :
1753 0 : fail::fail_point!("compact-shard-ancestors-enqueued");
1754 :
1755 : // We wait for all uploads to complete before finishing this compaction stage. This is not
1756 : // necessary for correctness, but it simplifies testing, and avoids proceeding with another
1757 : // Timeline's compaction while this timeline's uploads may be generating lots of disk I/O
1758 : // load.
1759 0 : if outcome != CompactionOutcome::YieldForL0 {
1760 0 : info!("shard ancestor compaction waiting for uploads");
1761 0 : tokio::select! {
1762 0 : result = self.remote_client.wait_completion() => match result {
1763 0 : Ok(()) => {},
1764 0 : Err(WaitCompletionError::NotInitialized(ni)) => return Err(CompactionError::from(ni)),
1765 : Err(WaitCompletionError::UploadQueueShutDownOrStopped) => {
1766 0 : return Err(CompactionError::new_cancelled());
1767 : }
1768 : },
1769 : // Don't wait if there's L0 compaction to do. We don't need to update the outcome
1770 : // here, because we've already done the actual work.
1771 0 : _ = self.l0_compaction_trigger.notified(), if yield_for_l0 => {},
1772 : }
1773 0 : }
1774 :
1775 0 : info!(
1776 0 : "shard ancestor compaction done in {:.3}s{}",
1777 0 : started.elapsed().as_secs_f64(),
1778 0 : match outcome {
1779 : CompactionOutcome::Pending =>
1780 0 : format!(", with pending work (rewrite_max={rewrite_max})"),
1781 0 : CompactionOutcome::YieldForL0 => String::from(", yielding for L0 compaction"),
1782 0 : CompactionOutcome::Skipped | CompactionOutcome::Done => String::new(),
1783 : }
1784 : );
1785 :
1786 0 : fail::fail_point!("compact-shard-ancestors-persistent");
1787 :
1788 0 : Ok(outcome)
1789 0 : }
1790 :
1791 : /// Update the LayerVisibilityHint of layers covered by image layers, based on whether there is
1792 : /// an image layer between them and the most recent readable LSN (branch point or tip of timeline). The
1793 : /// purpose of the visibility hint is to record which layers need to be available to service reads.
1794 : ///
1795 : /// The result may be used as an input to eviction and secondary downloads to de-prioritize layers
1796 : /// that we know won't be needed for reads.
1797 123 : pub(crate) async fn update_layer_visibility(
1798 123 : &self,
1799 123 : ) -> Result<(), super::layer_manager::Shutdown> {
1800 123 : let head_lsn = self.get_last_record_lsn();
1801 :
1802 : // We will sweep through layers in reverse-LSN order. We only do historic layers. L0 deltas
1803 : // are implicitly left visible, because LayerVisibilityHint's default is Visible, and we never modify it here.
1804 : // Note that L0 deltas _can_ be covered by image layers, but we consider them 'visible' because we anticipate that
1805 : // they will be subject to L0->L1 compaction in the near future.
1806 123 : let layer_manager = self
1807 123 : .layers
1808 123 : .read(LayerManagerLockHolder::GetLayerMapInfo)
1809 123 : .await;
1810 123 : let layer_map = layer_manager.layer_map()?;
1811 :
1812 123 : let readable_points = {
1813 123 : let children = self.gc_info.read().unwrap().retain_lsns.clone();
1814 :
1815 123 : let mut readable_points = Vec::with_capacity(children.len() + 1);
1816 124 : for (child_lsn, _child_timeline_id, is_offloaded) in &children {
1817 1 : if *is_offloaded == MaybeOffloaded::Yes {
1818 0 : continue;
1819 1 : }
1820 1 : readable_points.push(*child_lsn);
1821 : }
1822 123 : readable_points.push(head_lsn);
1823 123 : readable_points
1824 : };
1825 :
1826 123 : let (layer_visibility, covered) = layer_map.get_visibility(readable_points);
1827 313 : for (layer_desc, visibility) in layer_visibility {
1828 190 : // FIXME: a more efficiency bulk zip() through the layers rather than NlogN getting each one
1829 190 : let layer = layer_manager.get_from_desc(&layer_desc);
1830 190 : layer.set_visibility(visibility);
1831 190 : }
1832 :
1833 : // TODO: publish our covered KeySpace to our parent, so that when they update their visibility, they can
1834 : // avoid assuming that everything at a branch point is visible.
1835 123 : drop(covered);
1836 123 : Ok(())
1837 123 : }
1838 :
1839 : /// Collect a bunch of Level 0 layer files, and compact and reshuffle them as
1840 : /// as Level 1 files. Returns whether the L0 layers are fully compacted.
1841 192 : async fn compact_level0(
1842 192 : self: &Arc<Self>,
1843 192 : target_file_size: u64,
1844 192 : force_compaction_ignore_threshold: bool,
1845 192 : force_compaction_lsn: Option<Lsn>,
1846 192 : ctx: &RequestContext,
1847 192 : ) -> Result<CompactionOutcome, CompactionError> {
1848 : let CompactLevel0Phase1Result {
1849 192 : new_layers,
1850 192 : deltas_to_compact,
1851 192 : outcome,
1852 : } = {
1853 192 : let phase1_span = info_span!("compact_level0_phase1");
1854 192 : let ctx = ctx.attached_child();
1855 192 : let stats = CompactLevel0Phase1StatsBuilder {
1856 192 : version: Some(2),
1857 192 : tenant_id: Some(self.tenant_shard_id),
1858 192 : timeline_id: Some(self.timeline_id),
1859 192 : ..Default::default()
1860 192 : };
1861 :
1862 192 : self.compact_level0_phase1(
1863 192 : stats,
1864 192 : target_file_size,
1865 192 : force_compaction_ignore_threshold,
1866 192 : force_compaction_lsn,
1867 192 : &ctx,
1868 192 : )
1869 192 : .instrument(phase1_span)
1870 192 : .await?
1871 : };
1872 :
1873 192 : if new_layers.is_empty() && deltas_to_compact.is_empty() {
1874 : // nothing to do
1875 169 : return Ok(CompactionOutcome::Done);
1876 23 : }
1877 :
1878 23 : self.finish_compact_batch(&new_layers, &Vec::new(), &deltas_to_compact)
1879 23 : .await?;
1880 23 : Ok(outcome)
1881 192 : }
1882 :
1883 : /// Level0 files first phase of compaction, explained in the [`Self::compact_legacy`] comment.
1884 192 : async fn compact_level0_phase1(
1885 192 : self: &Arc<Self>,
1886 192 : mut stats: CompactLevel0Phase1StatsBuilder,
1887 192 : target_file_size: u64,
1888 192 : force_compaction_ignore_threshold: bool,
1889 192 : force_compaction_lsn: Option<Lsn>,
1890 192 : ctx: &RequestContext,
1891 192 : ) -> Result<CompactLevel0Phase1Result, CompactionError> {
1892 192 : let begin = tokio::time::Instant::now();
1893 192 : let guard = self.layers.read(LayerManagerLockHolder::Compaction).await;
1894 192 : let now = tokio::time::Instant::now();
1895 192 : stats.read_lock_acquisition_micros =
1896 192 : DurationRecorder::Recorded(RecordedDuration(now - begin), now);
1897 :
1898 192 : let layers = guard.layer_map()?;
1899 192 : let level0_deltas = layers.level0_deltas();
1900 192 : stats.level0_deltas_count = Some(level0_deltas.len());
1901 :
1902 : // Only compact if enough layers have accumulated.
1903 192 : let threshold = self.get_compaction_threshold();
1904 192 : if level0_deltas.is_empty() || level0_deltas.len() < threshold {
1905 179 : if force_compaction_ignore_threshold {
1906 12 : if !level0_deltas.is_empty() {
1907 10 : info!(
1908 0 : level0_deltas = level0_deltas.len(),
1909 0 : threshold, "too few deltas to compact, but forcing compaction"
1910 : );
1911 : } else {
1912 2 : info!(
1913 0 : level0_deltas = level0_deltas.len(),
1914 0 : threshold, "too few deltas to compact, cannot force compaction"
1915 : );
1916 2 : return Ok(CompactLevel0Phase1Result::default());
1917 : }
1918 : } else {
1919 : // HADRON
1920 167 : let min_lsn = level0_deltas
1921 167 : .iter()
1922 602 : .map(|a| a.get_lsn_range().start)
1923 167 : .reduce(min);
1924 167 : if force_compaction_lsn.is_some()
1925 0 : && min_lsn.is_some()
1926 0 : && min_lsn.unwrap() < force_compaction_lsn.unwrap()
1927 : {
1928 0 : info!(
1929 0 : "forcing L0 compaction of {} L0 deltas. Min lsn: {}, force compaction lsn: {}",
1930 0 : level0_deltas.len(),
1931 0 : min_lsn.unwrap(),
1932 0 : force_compaction_lsn.unwrap()
1933 : );
1934 : } else {
1935 167 : debug!(
1936 0 : level0_deltas = level0_deltas.len(),
1937 0 : threshold, "too few deltas to compact"
1938 : );
1939 167 : return Ok(CompactLevel0Phase1Result::default());
1940 : }
1941 : }
1942 13 : }
1943 :
1944 23 : let mut level0_deltas = level0_deltas
1945 23 : .iter()
1946 201 : .map(|x| guard.get_from_desc(x))
1947 23 : .collect::<Vec<_>>();
1948 :
1949 23 : drop_layer_manager_rlock(guard);
1950 :
1951 : // The is the last LSN that we have seen for L0 compaction in the timeline. This LSN might be updated
1952 : // by the time we finish the compaction. So we need to get it here.
1953 23 : let l0_last_record_lsn = self.get_last_record_lsn();
1954 :
1955 : // Gather the files to compact in this iteration.
1956 : //
1957 : // Start with the oldest Level 0 delta file, and collect any other
1958 : // level 0 files that form a contiguous sequence, such that the end
1959 : // LSN of previous file matches the start LSN of the next file.
1960 : //
1961 : // Note that if the files don't form such a sequence, we might
1962 : // "compact" just a single file. That's a bit pointless, but it allows
1963 : // us to get rid of the level 0 file, and compact the other files on
1964 : // the next iteration. This could probably made smarter, but such
1965 : // "gaps" in the sequence of level 0 files should only happen in case
1966 : // of a crash, partial download from cloud storage, or something like
1967 : // that, so it's not a big deal in practice.
1968 356 : level0_deltas.sort_by_key(|l| l.layer_desc().lsn_range.start);
1969 23 : let mut level0_deltas_iter = level0_deltas.iter();
1970 :
1971 23 : let first_level0_delta = level0_deltas_iter.next().unwrap();
1972 23 : let mut prev_lsn_end = first_level0_delta.layer_desc().lsn_range.end;
1973 23 : let mut deltas_to_compact = Vec::with_capacity(level0_deltas.len());
1974 :
1975 : // Accumulate the size of layers in `deltas_to_compact`
1976 23 : let mut deltas_to_compact_bytes = 0;
1977 :
1978 : // Under normal circumstances, we will accumulate up to compaction_upper_limit L0s of size
1979 : // checkpoint_distance each. To avoid edge cases using extra system resources, bound our
1980 : // work in this function to only operate on this much delta data at once.
1981 : //
1982 : // In general, compaction_threshold should be <= compaction_upper_limit, but in case that
1983 : // the constraint is not respected, we use the larger of the two.
1984 23 : let delta_size_limit = std::cmp::max(
1985 23 : self.get_compaction_upper_limit(),
1986 23 : self.get_compaction_threshold(),
1987 23 : ) as u64
1988 23 : * std::cmp::max(self.get_checkpoint_distance(), DEFAULT_CHECKPOINT_DISTANCE);
1989 :
1990 23 : let mut fully_compacted = true;
1991 :
1992 23 : deltas_to_compact.push(first_level0_delta.download_and_keep_resident(ctx).await?);
1993 201 : for l in level0_deltas_iter {
1994 178 : let lsn_range = &l.layer_desc().lsn_range;
1995 :
1996 178 : if lsn_range.start != prev_lsn_end {
1997 0 : break;
1998 178 : }
1999 178 : deltas_to_compact.push(l.download_and_keep_resident(ctx).await?);
2000 178 : deltas_to_compact_bytes += l.metadata().file_size;
2001 178 : prev_lsn_end = lsn_range.end;
2002 :
2003 178 : if deltas_to_compact_bytes >= delta_size_limit {
2004 0 : info!(
2005 0 : l0_deltas_selected = deltas_to_compact.len(),
2006 0 : l0_deltas_total = level0_deltas.len(),
2007 0 : "L0 compaction picker hit max delta layer size limit: {}",
2008 : delta_size_limit
2009 : );
2010 0 : fully_compacted = false;
2011 :
2012 : // Proceed with compaction, but only a subset of L0s
2013 0 : break;
2014 178 : }
2015 : }
2016 23 : let lsn_range = Range {
2017 23 : start: deltas_to_compact
2018 23 : .first()
2019 23 : .unwrap()
2020 23 : .layer_desc()
2021 23 : .lsn_range
2022 23 : .start,
2023 23 : end: deltas_to_compact.last().unwrap().layer_desc().lsn_range.end,
2024 23 : };
2025 :
2026 23 : info!(
2027 0 : "Starting Level0 compaction in LSN range {}-{} for {} layers ({} deltas in total)",
2028 : lsn_range.start,
2029 : lsn_range.end,
2030 0 : deltas_to_compact.len(),
2031 0 : level0_deltas.len()
2032 : );
2033 :
2034 201 : for l in deltas_to_compact.iter() {
2035 201 : info!("compact includes {l}");
2036 : }
2037 :
2038 : // We don't need the original list of layers anymore. Drop it so that
2039 : // we don't accidentally use it later in the function.
2040 23 : drop(level0_deltas);
2041 :
2042 23 : stats.compaction_prerequisites_micros = stats.read_lock_acquisition_micros.till_now();
2043 :
2044 : // TODO: replace with streaming k-merge
2045 23 : let all_keys = {
2046 23 : let mut all_keys = Vec::new();
2047 201 : for l in deltas_to_compact.iter() {
2048 201 : if self.cancel.is_cancelled() {
2049 0 : return Err(CompactionError::new_cancelled());
2050 201 : }
2051 201 : let delta = l.get_as_delta(ctx).await.map_err(CompactionError::Other)?;
2052 201 : let keys = delta
2053 201 : .index_entries(ctx)
2054 201 : .await
2055 201 : .map_err(CompactionError::Other)?;
2056 201 : all_keys.extend(keys);
2057 : }
2058 : // The current stdlib sorting implementation is designed in a way where it is
2059 : // particularly fast where the slice is made up of sorted sub-ranges.
2060 2137944 : all_keys.sort_by_key(|DeltaEntry { key, lsn, .. }| (*key, *lsn));
2061 23 : all_keys
2062 : };
2063 :
2064 23 : stats.read_lock_held_key_sort_micros = stats.compaction_prerequisites_micros.till_now();
2065 :
2066 : // Determine N largest holes where N is number of compacted layers. The vec is sorted by key range start.
2067 : //
2068 : // A hole is a key range for which this compaction doesn't have any WAL records.
2069 : // Our goal in this compaction iteration is to avoid creating L1s that, in terms of their key range,
2070 : // cover the hole, but actually don't contain any WAL records for that key range.
2071 : // The reason is that the mere stack of L1s (`count_deltas`) triggers image layer creation (`create_image_layers`).
2072 : // That image layer creation would be useless for a hole range covered by L1s that don't contain any WAL records.
2073 : //
2074 : // The algorithm chooses holes as follows.
2075 : // - Slide a 2-window over the keys in key orde to get the hole range (=distance between two keys).
2076 : // - Filter: min threshold on range length
2077 : // - Rank: by coverage size (=number of image layers required to reconstruct each key in the range for which we have any data)
2078 : //
2079 : // For more details, intuition, and some ASCII art see https://github.com/neondatabase/neon/pull/3597#discussion_r1112704451
2080 : #[derive(PartialEq, Eq)]
2081 : struct Hole {
2082 : key_range: Range<Key>,
2083 : coverage_size: usize,
2084 : }
2085 23 : let holes: Vec<Hole> = {
2086 : use std::cmp::Ordering;
2087 : impl Ord for Hole {
2088 0 : fn cmp(&self, other: &Self) -> Ordering {
2089 0 : self.coverage_size.cmp(&other.coverage_size).reverse()
2090 0 : }
2091 : }
2092 : impl PartialOrd for Hole {
2093 0 : fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
2094 0 : Some(self.cmp(other))
2095 0 : }
2096 : }
2097 23 : let max_holes = deltas_to_compact.len();
2098 23 : let min_hole_range = (target_file_size / page_cache::PAGE_SZ as u64) as i128;
2099 23 : let min_hole_coverage_size = 3; // TODO: something more flexible?
2100 : // min-heap (reserve space for one more element added before eviction)
2101 23 : let mut heap: BinaryHeap<Hole> = BinaryHeap::with_capacity(max_holes + 1);
2102 23 : let mut prev: Option<Key> = None;
2103 :
2104 1032019 : for &DeltaEntry { key: next_key, .. } in all_keys.iter() {
2105 1032019 : if let Some(prev_key) = prev {
2106 : // just first fast filter, do not create hole entries for metadata keys. The last hole in the
2107 : // compaction is the gap between data key and metadata keys.
2108 1031996 : if next_key.to_i128() - prev_key.to_i128() >= min_hole_range
2109 285 : && !Key::is_metadata_key(&prev_key)
2110 : {
2111 0 : let key_range = prev_key..next_key;
2112 : // Measuring hole by just subtraction of i128 representation of key range boundaries
2113 : // has not so much sense, because largest holes will corresponds field1/field2 changes.
2114 : // But we are mostly interested to eliminate holes which cause generation of excessive image layers.
2115 : // That is why it is better to measure size of hole as number of covering image layers.
2116 0 : let coverage_size = {
2117 : // TODO: optimize this with copy-on-write layer map.
2118 0 : let guard = self.layers.read(LayerManagerLockHolder::Compaction).await;
2119 0 : let layers = guard.layer_map()?;
2120 0 : layers.image_coverage(&key_range, l0_last_record_lsn).len()
2121 : };
2122 0 : if coverage_size >= min_hole_coverage_size {
2123 0 : heap.push(Hole {
2124 0 : key_range,
2125 0 : coverage_size,
2126 0 : });
2127 0 : if heap.len() > max_holes {
2128 0 : heap.pop(); // remove smallest hole
2129 0 : }
2130 0 : }
2131 1031996 : }
2132 23 : }
2133 1032019 : prev = Some(next_key.next());
2134 : }
2135 23 : let mut holes = heap.into_vec();
2136 23 : holes.sort_unstable_by_key(|hole| hole.key_range.start);
2137 23 : holes
2138 : };
2139 23 : stats.read_lock_held_compute_holes_micros = stats.read_lock_held_key_sort_micros.till_now();
2140 :
2141 23 : if self.cancel.is_cancelled() {
2142 0 : return Err(CompactionError::new_cancelled());
2143 23 : }
2144 :
2145 23 : stats.read_lock_drop_micros = stats.read_lock_held_compute_holes_micros.till_now();
2146 :
2147 : // This iterator walks through all key-value pairs from all the layers
2148 : // we're compacting, in key, LSN order.
2149 : // If there's both a Value::Image and Value::WalRecord for the same (key,lsn),
2150 : // then the Value::Image is ordered before Value::WalRecord.
2151 23 : let mut all_values_iter = {
2152 23 : let mut deltas = Vec::with_capacity(deltas_to_compact.len());
2153 201 : for l in deltas_to_compact.iter() {
2154 201 : let l = l.get_as_delta(ctx).await.map_err(CompactionError::Other)?;
2155 201 : deltas.push(l);
2156 : }
2157 23 : MergeIterator::create_with_options(
2158 23 : &deltas,
2159 23 : &[],
2160 23 : ctx,
2161 23 : 1024 * 8192, /* 8 MiB buffer per layer iterator */
2162 : 1024,
2163 : )
2164 : };
2165 :
2166 : // This iterator walks through all keys and is needed to calculate size used by each key
2167 23 : let mut all_keys_iter = all_keys
2168 23 : .iter()
2169 1032019 : .map(|DeltaEntry { key, lsn, size, .. }| (*key, *lsn, *size))
2170 1031996 : .coalesce(|mut prev, cur| {
2171 : // Coalesce keys that belong to the same key pair.
2172 : // This ensures that compaction doesn't put them
2173 : // into different layer files.
2174 : // Still limit this by the target file size,
2175 : // so that we keep the size of the files in
2176 : // check.
2177 1031996 : if prev.0 == cur.0 && prev.2 < target_file_size {
2178 14316 : prev.2 += cur.2;
2179 14316 : Ok(prev)
2180 : } else {
2181 1017680 : Err((prev, cur))
2182 : }
2183 1031996 : });
2184 :
2185 : // Merge the contents of all the input delta layers into a new set
2186 : // of delta layers, based on the current partitioning.
2187 : //
2188 : // We split the new delta layers on the key dimension. We iterate through the key space, and for each key, check if including the next key to the current output layer we're building would cause the layer to become too large. If so, dump the current output layer and start new one.
2189 : // It's possible that there is a single key with so many page versions that storing all of them in a single layer file
2190 : // would be too large. In that case, we also split on the LSN dimension.
2191 : //
2192 : // LSN
2193 : // ^
2194 : // |
2195 : // | +-----------+ +--+--+--+--+
2196 : // | | | | | | | |
2197 : // | +-----------+ | | | | |
2198 : // | | | | | | | |
2199 : // | +-----------+ ==> | | | | |
2200 : // | | | | | | | |
2201 : // | +-----------+ | | | | |
2202 : // | | | | | | | |
2203 : // | +-----------+ +--+--+--+--+
2204 : // |
2205 : // +--------------> key
2206 : //
2207 : //
2208 : // If one key (X) has a lot of page versions:
2209 : //
2210 : // LSN
2211 : // ^
2212 : // | (X)
2213 : // | +-----------+ +--+--+--+--+
2214 : // | | | | | | | |
2215 : // | +-----------+ | | +--+ |
2216 : // | | | | | | | |
2217 : // | +-----------+ ==> | | | | |
2218 : // | | | | | +--+ |
2219 : // | +-----------+ | | | | |
2220 : // | | | | | | | |
2221 : // | +-----------+ +--+--+--+--+
2222 : // |
2223 : // +--------------> key
2224 : // TODO: this actually divides the layers into fixed-size chunks, not
2225 : // based on the partitioning.
2226 : //
2227 : // TODO: we should also opportunistically materialize and
2228 : // garbage collect what we can.
2229 23 : let mut new_layers = Vec::new();
2230 23 : let mut prev_key: Option<Key> = None;
2231 23 : let mut writer: Option<DeltaLayerWriter> = None;
2232 23 : let mut key_values_total_size = 0u64;
2233 23 : let mut dup_start_lsn: Lsn = Lsn::INVALID; // start LSN of layer containing values of the single key
2234 23 : let mut dup_end_lsn: Lsn = Lsn::INVALID; // end LSN of layer containing values of the single key
2235 23 : let mut next_hole = 0; // index of next hole in holes vector
2236 :
2237 23 : let mut keys = 0;
2238 :
2239 1032042 : while let Some((key, lsn, value)) = all_values_iter
2240 1032042 : .next()
2241 1032042 : .await
2242 1032042 : .map_err(CompactionError::Other)?
2243 : {
2244 1032019 : keys += 1;
2245 :
2246 1032019 : if keys % 32_768 == 0 && self.cancel.is_cancelled() {
2247 : // avoid hitting the cancellation token on every key. in benches, we end up
2248 : // shuffling an order of million keys per layer, this means we'll check it
2249 : // around tens of times per layer.
2250 0 : return Err(CompactionError::new_cancelled());
2251 1032019 : }
2252 :
2253 1032019 : let same_key = prev_key == Some(key);
2254 : // We need to check key boundaries once we reach next key or end of layer with the same key
2255 1032019 : if !same_key || lsn == dup_end_lsn {
2256 1017703 : let mut next_key_size = 0u64;
2257 1017703 : let is_dup_layer = dup_end_lsn.is_valid();
2258 1017703 : dup_start_lsn = Lsn::INVALID;
2259 1017703 : if !same_key {
2260 1017703 : dup_end_lsn = Lsn::INVALID;
2261 1017703 : }
2262 : // Determine size occupied by this key. We stop at next key or when size becomes larger than target_file_size
2263 1017703 : for (next_key, next_lsn, next_size) in all_keys_iter.by_ref() {
2264 1017703 : next_key_size = next_size;
2265 1017703 : if key != next_key {
2266 1017680 : if dup_end_lsn.is_valid() {
2267 0 : // We are writting segment with duplicates:
2268 0 : // place all remaining values of this key in separate segment
2269 0 : dup_start_lsn = dup_end_lsn; // new segments starts where old stops
2270 0 : dup_end_lsn = lsn_range.end; // there are no more values of this key till end of LSN range
2271 1017680 : }
2272 1017680 : break;
2273 23 : }
2274 23 : key_values_total_size += next_size;
2275 : // Check if it is time to split segment: if total keys size is larger than target file size.
2276 : // We need to avoid generation of empty segments if next_size > target_file_size.
2277 23 : if key_values_total_size > target_file_size && lsn != next_lsn {
2278 : // Split key between multiple layers: such layer can contain only single key
2279 0 : dup_start_lsn = if dup_end_lsn.is_valid() {
2280 0 : dup_end_lsn // new segment with duplicates starts where old one stops
2281 : } else {
2282 0 : lsn // start with the first LSN for this key
2283 : };
2284 0 : dup_end_lsn = next_lsn; // upper LSN boundary is exclusive
2285 0 : break;
2286 23 : }
2287 : }
2288 : // handle case when loop reaches last key: in this case dup_end is non-zero but dup_start is not set.
2289 1017703 : if dup_end_lsn.is_valid() && !dup_start_lsn.is_valid() {
2290 0 : dup_start_lsn = dup_end_lsn;
2291 0 : dup_end_lsn = lsn_range.end;
2292 1017703 : }
2293 1017703 : if writer.is_some() {
2294 1017680 : let written_size = writer.as_mut().unwrap().size();
2295 1017680 : let contains_hole =
2296 1017680 : next_hole < holes.len() && key >= holes[next_hole].key_range.end;
2297 : // check if key cause layer overflow or contains hole...
2298 1017680 : if is_dup_layer
2299 1017680 : || dup_end_lsn.is_valid()
2300 1017680 : || written_size + key_values_total_size > target_file_size
2301 1017540 : || contains_hole
2302 : {
2303 : // ... if so, flush previous layer and prepare to write new one
2304 140 : let (desc, path) = writer
2305 140 : .take()
2306 140 : .unwrap()
2307 140 : .finish(prev_key.unwrap().next(), ctx)
2308 140 : .await
2309 140 : .map_err(CompactionError::Other)?;
2310 140 : let new_delta = Layer::finish_creating(self.conf, self, desc, &path)
2311 140 : .map_err(CompactionError::Other)?;
2312 :
2313 140 : new_layers.push(new_delta);
2314 140 : writer = None;
2315 :
2316 140 : if contains_hole {
2317 0 : // skip hole
2318 0 : next_hole += 1;
2319 140 : }
2320 1017540 : }
2321 23 : }
2322 : // Remember size of key value because at next iteration we will access next item
2323 1017703 : key_values_total_size = next_key_size;
2324 14316 : }
2325 1032019 : fail_point!("delta-layer-writer-fail-before-finish", |_| {
2326 0 : Err(CompactionError::Other(anyhow::anyhow!(
2327 0 : "failpoint delta-layer-writer-fail-before-finish"
2328 0 : )))
2329 0 : });
2330 :
2331 1032019 : if !self.shard_identity.is_key_disposable(&key) {
2332 1032019 : if writer.is_none() {
2333 163 : if self.cancel.is_cancelled() {
2334 : // to be somewhat responsive to cancellation, check for each new layer
2335 0 : return Err(CompactionError::new_cancelled());
2336 163 : }
2337 : // Create writer if not initiaized yet
2338 163 : writer = Some(
2339 163 : DeltaLayerWriter::new(
2340 163 : self.conf,
2341 163 : self.timeline_id,
2342 163 : self.tenant_shard_id,
2343 163 : key,
2344 163 : if dup_end_lsn.is_valid() {
2345 : // this is a layer containing slice of values of the same key
2346 0 : debug!("Create new dup layer {}..{}", dup_start_lsn, dup_end_lsn);
2347 0 : dup_start_lsn..dup_end_lsn
2348 : } else {
2349 163 : debug!("Create new layer {}..{}", lsn_range.start, lsn_range.end);
2350 163 : lsn_range.clone()
2351 : },
2352 163 : &self.gate,
2353 163 : self.cancel.clone(),
2354 163 : ctx,
2355 : )
2356 163 : .await
2357 163 : .map_err(CompactionError::Other)?,
2358 : );
2359 :
2360 163 : keys = 0;
2361 1031856 : }
2362 :
2363 1032019 : writer
2364 1032019 : .as_mut()
2365 1032019 : .unwrap()
2366 1032019 : .put_value(key, lsn, value, ctx)
2367 1032019 : .await?;
2368 : } else {
2369 0 : let owner = self.shard_identity.get_shard_number(&key);
2370 :
2371 : // This happens after a shard split, when we're compacting an L0 created by our parent shard
2372 0 : debug!("dropping key {key} during compaction (it belongs on shard {owner})");
2373 : }
2374 :
2375 1032019 : if !new_layers.is_empty() {
2376 9893 : fail_point!("after-timeline-compacted-first-L1");
2377 1022126 : }
2378 :
2379 1032019 : prev_key = Some(key);
2380 : }
2381 23 : if let Some(writer) = writer {
2382 23 : let (desc, path) = writer
2383 23 : .finish(prev_key.unwrap().next(), ctx)
2384 23 : .await
2385 23 : .map_err(CompactionError::Other)?;
2386 23 : let new_delta = Layer::finish_creating(self.conf, self, desc, &path)
2387 23 : .map_err(CompactionError::Other)?;
2388 23 : new_layers.push(new_delta);
2389 0 : }
2390 :
2391 : // Sync layers
2392 23 : if !new_layers.is_empty() {
2393 : // Print a warning if the created layer is larger than double the target size
2394 : // Add two pages for potential overhead. This should in theory be already
2395 : // accounted for in the target calculation, but for very small targets,
2396 : // we still might easily hit the limit otherwise.
2397 23 : let warn_limit = target_file_size * 2 + page_cache::PAGE_SZ as u64 * 2;
2398 163 : for layer in new_layers.iter() {
2399 163 : if layer.layer_desc().file_size > warn_limit {
2400 0 : warn!(
2401 : %layer,
2402 0 : "created delta file of size {} larger than double of target of {target_file_size}", layer.layer_desc().file_size
2403 : );
2404 163 : }
2405 : }
2406 :
2407 : // The writer.finish() above already did the fsync of the inodes.
2408 : // We just need to fsync the directory in which these inodes are linked,
2409 : // which we know to be the timeline directory.
2410 : //
2411 : // We use fatal_err() below because the after writer.finish() returns with success,
2412 : // the in-memory state of the filesystem already has the layer file in its final place,
2413 : // and subsequent pageserver code could think it's durable while it really isn't.
2414 23 : let timeline_dir = VirtualFile::open(
2415 23 : &self
2416 23 : .conf
2417 23 : .timeline_path(&self.tenant_shard_id, &self.timeline_id),
2418 23 : ctx,
2419 23 : )
2420 23 : .await
2421 23 : .fatal_err("VirtualFile::open for timeline dir fsync");
2422 23 : timeline_dir
2423 23 : .sync_all()
2424 23 : .await
2425 23 : .fatal_err("VirtualFile::sync_all timeline dir");
2426 0 : }
2427 :
2428 23 : stats.write_layer_files_micros = stats.read_lock_drop_micros.till_now();
2429 23 : stats.new_deltas_count = Some(new_layers.len());
2430 163 : stats.new_deltas_size = Some(new_layers.iter().map(|l| l.layer_desc().file_size).sum());
2431 :
2432 23 : match TryInto::<CompactLevel0Phase1Stats>::try_into(stats)
2433 23 : .and_then(|stats| serde_json::to_string(&stats).context("serde_json::to_string"))
2434 : {
2435 23 : Ok(stats_json) => {
2436 23 : info!(
2437 0 : stats_json = stats_json.as_str(),
2438 0 : "compact_level0_phase1 stats available"
2439 : )
2440 : }
2441 0 : Err(e) => {
2442 0 : warn!("compact_level0_phase1 stats failed to serialize: {:#}", e);
2443 : }
2444 : }
2445 :
2446 : // Without this, rustc complains about deltas_to_compact still
2447 : // being borrowed when we `.into_iter()` below.
2448 23 : drop(all_values_iter);
2449 :
2450 : Ok(CompactLevel0Phase1Result {
2451 23 : new_layers,
2452 23 : deltas_to_compact: deltas_to_compact
2453 23 : .into_iter()
2454 201 : .map(|x| x.drop_eviction_guard())
2455 23 : .collect::<Vec<_>>(),
2456 23 : outcome: if fully_compacted {
2457 23 : CompactionOutcome::Done
2458 : } else {
2459 0 : CompactionOutcome::Pending
2460 : },
2461 : })
2462 192 : }
2463 : }
2464 :
2465 : #[derive(Default)]
2466 : struct CompactLevel0Phase1Result {
2467 : new_layers: Vec<ResidentLayer>,
2468 : deltas_to_compact: Vec<Layer>,
2469 : // Whether we have included all L0 layers, or selected only part of them due to the
2470 : // L0 compaction size limit.
2471 : outcome: CompactionOutcome,
2472 : }
2473 :
2474 : #[derive(Default)]
2475 : struct CompactLevel0Phase1StatsBuilder {
2476 : version: Option<u64>,
2477 : tenant_id: Option<TenantShardId>,
2478 : timeline_id: Option<TimelineId>,
2479 : read_lock_acquisition_micros: DurationRecorder,
2480 : read_lock_held_key_sort_micros: DurationRecorder,
2481 : compaction_prerequisites_micros: DurationRecorder,
2482 : read_lock_held_compute_holes_micros: DurationRecorder,
2483 : read_lock_drop_micros: DurationRecorder,
2484 : write_layer_files_micros: DurationRecorder,
2485 : level0_deltas_count: Option<usize>,
2486 : new_deltas_count: Option<usize>,
2487 : new_deltas_size: Option<u64>,
2488 : }
2489 :
2490 : #[derive(serde::Serialize)]
2491 : struct CompactLevel0Phase1Stats {
2492 : version: u64,
2493 : tenant_id: TenantShardId,
2494 : timeline_id: TimelineId,
2495 : read_lock_acquisition_micros: RecordedDuration,
2496 : read_lock_held_key_sort_micros: RecordedDuration,
2497 : compaction_prerequisites_micros: RecordedDuration,
2498 : read_lock_held_compute_holes_micros: RecordedDuration,
2499 : read_lock_drop_micros: RecordedDuration,
2500 : write_layer_files_micros: RecordedDuration,
2501 : level0_deltas_count: usize,
2502 : new_deltas_count: usize,
2503 : new_deltas_size: u64,
2504 : }
2505 :
2506 : impl TryFrom<CompactLevel0Phase1StatsBuilder> for CompactLevel0Phase1Stats {
2507 : type Error = anyhow::Error;
2508 :
2509 23 : fn try_from(value: CompactLevel0Phase1StatsBuilder) -> Result<Self, Self::Error> {
2510 : Ok(Self {
2511 23 : version: value.version.ok_or_else(|| anyhow!("version not set"))?,
2512 23 : tenant_id: value
2513 23 : .tenant_id
2514 23 : .ok_or_else(|| anyhow!("tenant_id not set"))?,
2515 23 : timeline_id: value
2516 23 : .timeline_id
2517 23 : .ok_or_else(|| anyhow!("timeline_id not set"))?,
2518 23 : read_lock_acquisition_micros: value
2519 23 : .read_lock_acquisition_micros
2520 23 : .into_recorded()
2521 23 : .ok_or_else(|| anyhow!("read_lock_acquisition_micros not set"))?,
2522 23 : read_lock_held_key_sort_micros: value
2523 23 : .read_lock_held_key_sort_micros
2524 23 : .into_recorded()
2525 23 : .ok_or_else(|| anyhow!("read_lock_held_key_sort_micros not set"))?,
2526 23 : compaction_prerequisites_micros: value
2527 23 : .compaction_prerequisites_micros
2528 23 : .into_recorded()
2529 23 : .ok_or_else(|| anyhow!("read_lock_held_prerequisites_micros not set"))?,
2530 23 : read_lock_held_compute_holes_micros: value
2531 23 : .read_lock_held_compute_holes_micros
2532 23 : .into_recorded()
2533 23 : .ok_or_else(|| anyhow!("read_lock_held_compute_holes_micros not set"))?,
2534 23 : read_lock_drop_micros: value
2535 23 : .read_lock_drop_micros
2536 23 : .into_recorded()
2537 23 : .ok_or_else(|| anyhow!("read_lock_drop_micros not set"))?,
2538 23 : write_layer_files_micros: value
2539 23 : .write_layer_files_micros
2540 23 : .into_recorded()
2541 23 : .ok_or_else(|| anyhow!("write_layer_files_micros not set"))?,
2542 23 : level0_deltas_count: value
2543 23 : .level0_deltas_count
2544 23 : .ok_or_else(|| anyhow!("level0_deltas_count not set"))?,
2545 23 : new_deltas_count: value
2546 23 : .new_deltas_count
2547 23 : .ok_or_else(|| anyhow!("new_deltas_count not set"))?,
2548 23 : new_deltas_size: value
2549 23 : .new_deltas_size
2550 23 : .ok_or_else(|| anyhow!("new_deltas_size not set"))?,
2551 : })
2552 23 : }
2553 : }
2554 :
2555 : impl Timeline {
2556 : /// Entry point for new tiered compaction algorithm.
2557 : ///
2558 : /// All the real work is in the implementation in the pageserver_compaction
2559 : /// crate. The code here would apply to any algorithm implemented by the
2560 : /// same interface, but tiered is the only one at the moment.
2561 : ///
2562 : /// TODO: cancellation
2563 0 : pub(crate) async fn compact_tiered(
2564 0 : self: &Arc<Self>,
2565 0 : _cancel: &CancellationToken,
2566 0 : ctx: &RequestContext,
2567 0 : ) -> Result<(), CompactionError> {
2568 0 : let fanout = self.get_compaction_threshold() as u64;
2569 0 : let target_file_size = self.get_checkpoint_distance();
2570 :
2571 : // Find the top of the historical layers
2572 0 : let end_lsn = {
2573 0 : let guard = self.layers.read(LayerManagerLockHolder::Compaction).await;
2574 0 : let layers = guard.layer_map()?;
2575 :
2576 0 : let l0_deltas = layers.level0_deltas();
2577 :
2578 : // As an optimization, if we find that there are too few L0 layers,
2579 : // bail out early. We know that the compaction algorithm would do
2580 : // nothing in that case.
2581 0 : if l0_deltas.len() < fanout as usize {
2582 : // doesn't need compacting
2583 0 : return Ok(());
2584 0 : }
2585 0 : l0_deltas.iter().map(|l| l.lsn_range.end).max().unwrap()
2586 : };
2587 :
2588 : // Is the timeline being deleted?
2589 0 : if self.is_stopping() {
2590 0 : trace!("Dropping out of compaction on timeline shutdown");
2591 0 : return Err(CompactionError::new_cancelled());
2592 0 : }
2593 :
2594 0 : let (dense_ks, _sparse_ks) = self
2595 0 : .collect_keyspace(end_lsn, ctx)
2596 0 : .await
2597 0 : .map_err(CompactionError::from_collect_keyspace)?;
2598 : // TODO(chi): ignore sparse_keyspace for now, compact it in the future.
2599 0 : let mut adaptor = TimelineAdaptor::new(self, (end_lsn, dense_ks));
2600 :
2601 0 : pageserver_compaction::compact_tiered::compact_tiered(
2602 0 : &mut adaptor,
2603 0 : end_lsn,
2604 0 : target_file_size,
2605 0 : fanout,
2606 0 : ctx,
2607 0 : )
2608 0 : .await
2609 : // TODO: compact_tiered needs to return CompactionError
2610 0 : .map_err(CompactionError::Other)?;
2611 :
2612 0 : adaptor.flush_updates().await?;
2613 0 : Ok(())
2614 0 : }
2615 :
2616 : /// Take a list of images and deltas, produce images and deltas according to GC horizon and retain_lsns.
2617 : ///
2618 : /// It takes a key, the values of the key within the compaction process, a GC horizon, and all retain_lsns below the horizon.
2619 : /// For now, it requires the `accumulated_values` contains the full history of the key (i.e., the key with the lowest LSN is
2620 : /// an image or a WAL not requiring a base image). This restriction will be removed once we implement gc-compaction on branch.
2621 : ///
2622 : /// The function returns the deltas and the base image that need to be placed at each of the retain LSN. For example, we have:
2623 : ///
2624 : /// A@0x10, +B@0x20, +C@0x30, +D@0x40, +E@0x50, +F@0x60
2625 : /// horizon = 0x50, retain_lsn = 0x20, 0x40, delta_threshold=3
2626 : ///
2627 : /// The function will produce:
2628 : ///
2629 : /// ```plain
2630 : /// 0x20(retain_lsn) -> img=AB@0x20 always produce a single image below the lowest retain LSN
2631 : /// 0x40(retain_lsn) -> deltas=[+C@0x30, +D@0x40] two deltas since the last base image, keeping the deltas
2632 : /// 0x50(horizon) -> deltas=[ABCDE@0x50] three deltas since the last base image, generate an image but put it in the delta
2633 : /// above_horizon -> deltas=[+F@0x60] full history above the horizon
2634 : /// ```
2635 : ///
2636 : /// Note that `accumulated_values` must be sorted by LSN and should belong to a single key.
2637 : #[allow(clippy::too_many_arguments)]
2638 324 : pub(crate) async fn generate_key_retention(
2639 324 : self: &Arc<Timeline>,
2640 324 : key: Key,
2641 324 : full_history: &[(Key, Lsn, Value)],
2642 324 : horizon: Lsn,
2643 324 : retain_lsn_below_horizon: &[Lsn],
2644 324 : delta_threshold_cnt: usize,
2645 324 : base_img_from_ancestor: Option<(Key, Lsn, Bytes)>,
2646 324 : verification: bool,
2647 324 : ) -> anyhow::Result<KeyHistoryRetention> {
2648 : // Pre-checks for the invariants
2649 :
2650 324 : let debug_mode = cfg!(debug_assertions) || cfg!(feature = "testing");
2651 :
2652 324 : if debug_mode {
2653 786 : for (log_key, _, _) in full_history {
2654 462 : assert_eq!(log_key, &key, "mismatched key");
2655 : }
2656 324 : for i in 1..full_history.len() {
2657 138 : assert!(full_history[i - 1].1 <= full_history[i].1, "unordered LSN");
2658 138 : if full_history[i - 1].1 == full_history[i].1 {
2659 0 : assert!(
2660 0 : matches!(full_history[i - 1].2, Value::Image(_)),
2661 0 : "unordered delta/image, or duplicated delta"
2662 : );
2663 138 : }
2664 : }
2665 : // There was an assertion for no base image that checks if the first
2666 : // record in the history is `will_init` before, but it was removed.
2667 : // This is explained in the test cases for generate_key_retention.
2668 : // Search "incomplete history" for more information.
2669 714 : for lsn in retain_lsn_below_horizon {
2670 390 : assert!(lsn < &horizon, "retain lsn must be below horizon")
2671 : }
2672 324 : for i in 1..retain_lsn_below_horizon.len() {
2673 178 : assert!(
2674 178 : retain_lsn_below_horizon[i - 1] <= retain_lsn_below_horizon[i],
2675 0 : "unordered LSN"
2676 : );
2677 : }
2678 0 : }
2679 324 : let has_ancestor = base_img_from_ancestor.is_some();
2680 : // Step 1: split history into len(retain_lsn_below_horizon) + 2 buckets, where the last bucket is for all deltas above the horizon,
2681 : // and the second-to-last bucket is for the horizon. Each bucket contains lsn_last_bucket < deltas <= lsn_this_bucket.
2682 324 : let (mut split_history, lsn_split_points) = {
2683 324 : let mut split_history = Vec::new();
2684 324 : split_history.resize_with(retain_lsn_below_horizon.len() + 2, Vec::new);
2685 324 : let mut lsn_split_points = Vec::with_capacity(retain_lsn_below_horizon.len() + 1);
2686 714 : for lsn in retain_lsn_below_horizon {
2687 390 : lsn_split_points.push(*lsn);
2688 390 : }
2689 324 : lsn_split_points.push(horizon);
2690 324 : let mut current_idx = 0;
2691 786 : for item @ (_, lsn, _) in full_history {
2692 584 : while current_idx < lsn_split_points.len() && *lsn > lsn_split_points[current_idx] {
2693 122 : current_idx += 1;
2694 122 : }
2695 462 : split_history[current_idx].push(item);
2696 : }
2697 324 : (split_history, lsn_split_points)
2698 : };
2699 : // Step 2: filter out duplicated records due to the k-merge of image/delta layers
2700 1362 : for split_for_lsn in &mut split_history {
2701 1038 : let mut prev_lsn = None;
2702 1038 : let mut new_split_for_lsn = Vec::with_capacity(split_for_lsn.len());
2703 1038 : for record @ (_, lsn, _) in std::mem::take(split_for_lsn) {
2704 462 : if let Some(prev_lsn) = &prev_lsn {
2705 62 : if *prev_lsn == lsn {
2706 : // The case that we have an LSN with both data from the delta layer and the image layer. As
2707 : // `ValueWrapper` ensures that an image is ordered before a delta at the same LSN, we simply
2708 : // drop this delta and keep the image.
2709 : //
2710 : // For example, we have delta layer key1@0x10, key1@0x20, and image layer key1@0x10, we will
2711 : // keep the image for key1@0x10 and the delta for key1@0x20. key1@0x10 delta will be simply
2712 : // dropped.
2713 : //
2714 : // TODO: in case we have both delta + images for a given LSN and it does not exceed the delta
2715 : // threshold, we could have kept delta instead to save space. This is an optimization for the future.
2716 0 : continue;
2717 62 : }
2718 400 : }
2719 462 : prev_lsn = Some(lsn);
2720 462 : new_split_for_lsn.push(record);
2721 : }
2722 1038 : *split_for_lsn = new_split_for_lsn;
2723 : }
2724 : // Step 3: generate images when necessary
2725 324 : let mut retention = Vec::with_capacity(split_history.len());
2726 324 : let mut records_since_last_image = 0;
2727 324 : let batch_cnt = split_history.len();
2728 324 : assert!(
2729 324 : batch_cnt >= 2,
2730 0 : "should have at least below + above horizon batches"
2731 : );
2732 324 : let mut replay_history: Vec<(Key, Lsn, Value)> = Vec::new();
2733 324 : if let Some((key, lsn, ref img)) = base_img_from_ancestor {
2734 21 : replay_history.push((key, lsn, Value::Image(img.clone())));
2735 303 : }
2736 :
2737 : /// Generate debug information for the replay history
2738 0 : fn generate_history_trace(replay_history: &[(Key, Lsn, Value)]) -> String {
2739 : use std::fmt::Write;
2740 0 : let mut output = String::new();
2741 0 : if let Some((key, _, _)) = replay_history.first() {
2742 0 : write!(output, "key={key} ").unwrap();
2743 0 : let mut cnt = 0;
2744 0 : for (_, lsn, val) in replay_history {
2745 0 : if val.is_image() {
2746 0 : write!(output, "i@{lsn} ").unwrap();
2747 0 : } else if val.will_init() {
2748 0 : write!(output, "di@{lsn} ").unwrap();
2749 0 : } else {
2750 0 : write!(output, "d@{lsn} ").unwrap();
2751 0 : }
2752 0 : cnt += 1;
2753 0 : if cnt >= 128 {
2754 0 : write!(output, "... and more").unwrap();
2755 0 : break;
2756 0 : }
2757 : }
2758 0 : } else {
2759 0 : write!(output, "<no history>").unwrap();
2760 0 : }
2761 0 : output
2762 0 : }
2763 :
2764 0 : fn generate_debug_trace(
2765 0 : replay_history: Option<&[(Key, Lsn, Value)]>,
2766 0 : full_history: &[(Key, Lsn, Value)],
2767 0 : lsns: &[Lsn],
2768 0 : horizon: Lsn,
2769 0 : ) -> String {
2770 : use std::fmt::Write;
2771 0 : let mut output = String::new();
2772 0 : if let Some(replay_history) = replay_history {
2773 0 : writeln!(
2774 0 : output,
2775 0 : "replay_history: {}",
2776 0 : generate_history_trace(replay_history)
2777 0 : )
2778 0 : .unwrap();
2779 0 : } else {
2780 0 : writeln!(output, "replay_history: <disabled>",).unwrap();
2781 0 : }
2782 0 : writeln!(
2783 0 : output,
2784 0 : "full_history: {}",
2785 0 : generate_history_trace(full_history)
2786 : )
2787 0 : .unwrap();
2788 0 : writeln!(
2789 0 : output,
2790 0 : "when processing: [{}] horizon={}",
2791 0 : lsns.iter().map(|l| format!("{l}")).join(","),
2792 : horizon
2793 : )
2794 0 : .unwrap();
2795 0 : output
2796 0 : }
2797 :
2798 324 : let mut key_exists = false;
2799 1037 : for (i, split_for_lsn) in split_history.into_iter().enumerate() {
2800 : // TODO: there could be image keys inside the splits, and we can compute records_since_last_image accordingly.
2801 1037 : records_since_last_image += split_for_lsn.len();
2802 : // Whether to produce an image into the final layer files
2803 1037 : let produce_image = if i == 0 && !has_ancestor {
2804 : // We always generate images for the first batch (below horizon / lowest retain_lsn)
2805 303 : true
2806 734 : } else if i == batch_cnt - 1 {
2807 : // Do not generate images for the last batch (above horizon)
2808 323 : false
2809 411 : } else if records_since_last_image == 0 {
2810 322 : false
2811 89 : } else if records_since_last_image >= delta_threshold_cnt {
2812 : // Generate images when there are too many records
2813 3 : true
2814 : } else {
2815 86 : false
2816 : };
2817 1037 : replay_history.extend(split_for_lsn.iter().map(|x| (*x).clone()));
2818 : // Only retain the items after the last image record
2819 1277 : for idx in (0..replay_history.len()).rev() {
2820 1277 : if replay_history[idx].2.will_init() {
2821 1037 : replay_history = replay_history[idx..].to_vec();
2822 1037 : break;
2823 240 : }
2824 : }
2825 1037 : if replay_history.is_empty() && !key_exists {
2826 : // The key does not exist at earlier LSN, we can skip this iteration.
2827 0 : retention.push(Vec::new());
2828 0 : continue;
2829 1037 : } else {
2830 1037 : key_exists = true;
2831 1037 : }
2832 1037 : let Some((_, _, val)) = replay_history.first() else {
2833 0 : unreachable!("replay history should not be empty once it exists")
2834 : };
2835 1037 : if !val.will_init() {
2836 0 : return Err(anyhow::anyhow!("invalid history, no base image")).with_context(|| {
2837 0 : generate_debug_trace(
2838 0 : Some(&replay_history),
2839 0 : full_history,
2840 0 : retain_lsn_below_horizon,
2841 0 : horizon,
2842 : )
2843 0 : });
2844 1037 : }
2845 : // Whether to reconstruct the image. In debug mode, we will generate an image
2846 : // at every retain_lsn to ensure data is not corrupted, but we won't put the
2847 : // image into the final layer.
2848 1037 : let img_and_lsn = if produce_image {
2849 306 : records_since_last_image = 0;
2850 306 : let replay_history_for_debug = if debug_mode {
2851 306 : Some(replay_history.clone())
2852 : } else {
2853 0 : None
2854 : };
2855 306 : let replay_history_for_debug_ref = replay_history_for_debug.as_deref();
2856 306 : let history = std::mem::take(&mut replay_history);
2857 306 : let mut img = None;
2858 306 : let mut records = Vec::with_capacity(history.len());
2859 306 : if let (_, lsn, Value::Image(val)) = history.first().as_ref().unwrap() {
2860 295 : img = Some((*lsn, val.clone()));
2861 295 : for (_, lsn, val) in history.into_iter().skip(1) {
2862 20 : let Value::WalRecord(rec) = val else {
2863 0 : return Err(anyhow::anyhow!(
2864 0 : "invalid record, first record is image, expect walrecords"
2865 0 : ))
2866 0 : .with_context(|| {
2867 0 : generate_debug_trace(
2868 0 : replay_history_for_debug_ref,
2869 0 : full_history,
2870 0 : retain_lsn_below_horizon,
2871 0 : horizon,
2872 : )
2873 0 : });
2874 : };
2875 20 : records.push((lsn, rec));
2876 : }
2877 : } else {
2878 18 : for (_, lsn, val) in history.into_iter() {
2879 18 : let Value::WalRecord(rec) = val else {
2880 0 : return Err(anyhow::anyhow!("invalid record, first record is walrecord, expect rest are walrecord"))
2881 0 : .with_context(|| generate_debug_trace(
2882 0 : replay_history_for_debug_ref,
2883 0 : full_history,
2884 0 : retain_lsn_below_horizon,
2885 0 : horizon,
2886 : ));
2887 : };
2888 18 : records.push((lsn, rec));
2889 : }
2890 : }
2891 : // WAL redo requires records in the reverse LSN order
2892 306 : records.reverse();
2893 306 : let state = ValueReconstructState { img, records };
2894 : // last batch does not generate image so i is always in range, unless we force generate
2895 : // an image during testing
2896 306 : let request_lsn = if i >= lsn_split_points.len() {
2897 0 : Lsn::MAX
2898 : } else {
2899 306 : lsn_split_points[i]
2900 : };
2901 306 : let img = self
2902 306 : .reconstruct_value(key, request_lsn, state, RedoAttemptType::GcCompaction)
2903 306 : .await?;
2904 305 : Some((request_lsn, img))
2905 : } else {
2906 731 : None
2907 : };
2908 1036 : if produce_image {
2909 305 : let (request_lsn, img) = img_and_lsn.unwrap();
2910 305 : replay_history.push((key, request_lsn, Value::Image(img.clone())));
2911 305 : retention.push(vec![(request_lsn, Value::Image(img))]);
2912 305 : } else {
2913 731 : let deltas = split_for_lsn
2914 731 : .iter()
2915 731 : .map(|(_, lsn, value)| (*lsn, value.clone()))
2916 731 : .collect_vec();
2917 731 : retention.push(deltas);
2918 : }
2919 : }
2920 323 : let mut result = Vec::with_capacity(retention.len());
2921 323 : assert_eq!(retention.len(), lsn_split_points.len() + 1);
2922 1036 : for (idx, logs) in retention.into_iter().enumerate() {
2923 1036 : if idx == lsn_split_points.len() {
2924 323 : let retention = KeyHistoryRetention {
2925 323 : below_horizon: result,
2926 323 : above_horizon: KeyLogAtLsn(logs),
2927 323 : };
2928 323 : if verification {
2929 323 : retention
2930 323 : .verify(key, &base_img_from_ancestor, full_history, self)
2931 323 : .await?;
2932 0 : }
2933 323 : return Ok(retention);
2934 713 : } else {
2935 713 : result.push((lsn_split_points[idx], KeyLogAtLsn(logs)));
2936 713 : }
2937 : }
2938 0 : unreachable!("key retention is empty")
2939 324 : }
2940 :
2941 : /// Check how much space is left on the disk
2942 27 : async fn check_available_space(self: &Arc<Self>) -> anyhow::Result<u64> {
2943 27 : let tenants_dir = self.conf.tenants_path();
2944 :
2945 27 : let stat = Statvfs::get(&tenants_dir, None)
2946 27 : .context("statvfs failed, presumably directory got unlinked")?;
2947 :
2948 27 : let (avail_bytes, _) = stat.get_avail_total_bytes();
2949 :
2950 27 : Ok(avail_bytes)
2951 27 : }
2952 :
2953 : /// Check if the compaction can proceed safely without running out of space. We assume the size
2954 : /// upper bound of the produced files of a compaction job is the same as all layers involved in
2955 : /// the compaction. Therefore, we need `2 * layers_to_be_compacted_size` at least to do a
2956 : /// compaction.
2957 27 : async fn check_compaction_space(
2958 27 : self: &Arc<Self>,
2959 27 : layer_selection: &[Layer],
2960 27 : ) -> Result<(), CompactionError> {
2961 27 : let available_space = self
2962 27 : .check_available_space()
2963 27 : .await
2964 27 : .map_err(CompactionError::Other)?;
2965 27 : let mut remote_layer_size = 0;
2966 27 : let mut all_layer_size = 0;
2967 106 : for layer in layer_selection {
2968 79 : let needs_download = layer
2969 79 : .needs_download()
2970 79 : .await
2971 79 : .context("failed to check if layer needs download")
2972 79 : .map_err(CompactionError::Other)?;
2973 79 : if needs_download.is_some() {
2974 0 : remote_layer_size += layer.layer_desc().file_size;
2975 79 : }
2976 79 : all_layer_size += layer.layer_desc().file_size;
2977 : }
2978 27 : let allocated_space = (available_space as f64 * 0.8) as u64; /* reserve 20% space for other tasks */
2979 27 : if all_layer_size /* space needed for newly-generated file */ + remote_layer_size /* space for downloading layers */ > allocated_space
2980 : {
2981 0 : return Err(CompactionError::Other(anyhow!(
2982 0 : "not enough space for compaction: available_space={}, allocated_space={}, all_layer_size={}, remote_layer_size={}, required_space={}",
2983 0 : available_space,
2984 0 : allocated_space,
2985 0 : all_layer_size,
2986 0 : remote_layer_size,
2987 0 : all_layer_size + remote_layer_size
2988 0 : )));
2989 27 : }
2990 27 : Ok(())
2991 27 : }
2992 :
2993 : /// Check to bail out of gc compaction early if it would use too much memory.
2994 27 : async fn check_memory_usage(
2995 27 : self: &Arc<Self>,
2996 27 : layer_selection: &[Layer],
2997 27 : ) -> Result<(), CompactionError> {
2998 27 : let mut estimated_memory_usage_mb = 0.0;
2999 27 : let mut num_image_layers = 0;
3000 27 : let mut num_delta_layers = 0;
3001 27 : let target_layer_size_bytes = 256 * 1024 * 1024;
3002 106 : for layer in layer_selection {
3003 79 : let layer_desc = layer.layer_desc();
3004 79 : if layer_desc.is_delta() {
3005 44 : // Delta layers at most have 1MB buffer; 3x to make it safe (there're deltas as large as 16KB).
3006 44 : // Scale it by target_layer_size_bytes so that tests can pass (some tests, e.g., `test_pageserver_gc_compaction_preempt
3007 44 : // use 3MB layer size and we need to account for that).
3008 44 : estimated_memory_usage_mb +=
3009 44 : 3.0 * (layer_desc.file_size / target_layer_size_bytes) as f64;
3010 44 : num_delta_layers += 1;
3011 44 : } else {
3012 35 : // Image layers at most have 1MB buffer but it might be compressed; assume 5x compression ratio.
3013 35 : estimated_memory_usage_mb +=
3014 35 : 5.0 * (layer_desc.file_size / target_layer_size_bytes) as f64;
3015 35 : num_image_layers += 1;
3016 35 : }
3017 : }
3018 27 : if estimated_memory_usage_mb > 1024.0 {
3019 0 : return Err(CompactionError::Other(anyhow!(
3020 0 : "estimated memory usage is too high: {}MB, giving up compaction; num_image_layers={}, num_delta_layers={}",
3021 0 : estimated_memory_usage_mb,
3022 0 : num_image_layers,
3023 0 : num_delta_layers
3024 0 : )));
3025 27 : }
3026 27 : Ok(())
3027 27 : }
3028 :
3029 : /// Get a watermark for gc-compaction, that is the lowest LSN that we can use as the `gc_horizon` for
3030 : /// the compaction algorithm. It is min(space_cutoff, time_cutoff, latest_gc_cutoff, standby_horizon).
3031 : /// Leases and retain_lsns are considered in the gc-compaction job itself so we don't need to account for them
3032 : /// here.
3033 28 : pub(crate) fn get_gc_compaction_watermark(self: &Arc<Self>) -> Lsn {
3034 28 : let gc_cutoff_lsn = {
3035 28 : let gc_info = self.gc_info.read().unwrap();
3036 28 : gc_info.min_cutoff()
3037 : };
3038 :
3039 : // TODO: standby horizon should use leases so we don't really need to consider it here.
3040 : // let watermark = watermark.min(self.standby_horizon.load());
3041 :
3042 : // TODO: ensure the child branches will not use anything below the watermark, or consider
3043 : // them when computing the watermark.
3044 28 : gc_cutoff_lsn.min(*self.get_applied_gc_cutoff_lsn())
3045 28 : }
3046 :
3047 : /// Split a gc-compaction job into multiple compaction jobs. The split is based on the key range and the estimated size of the compaction job.
3048 : /// The function returns a list of compaction jobs that can be executed separately. If the upper bound of the compact LSN
3049 : /// range is not specified, we will use the latest gc_cutoff as the upper bound, so that all jobs in the jobset acts
3050 : /// like a full compaction of the specified keyspace.
3051 0 : pub(crate) async fn gc_compaction_split_jobs(
3052 0 : self: &Arc<Self>,
3053 0 : job: GcCompactJob,
3054 0 : sub_compaction_max_job_size_mb: Option<u64>,
3055 0 : ) -> Result<Vec<GcCompactJob>, CompactionError> {
3056 0 : let compact_below_lsn = if job.compact_lsn_range.end != Lsn::MAX {
3057 0 : job.compact_lsn_range.end
3058 : } else {
3059 0 : self.get_gc_compaction_watermark()
3060 : };
3061 :
3062 0 : if compact_below_lsn == Lsn::INVALID {
3063 0 : tracing::warn!(
3064 0 : "no layers to compact with gc: gc_cutoff not generated yet, skipping gc bottom-most compaction"
3065 : );
3066 0 : return Ok(vec![]);
3067 0 : }
3068 :
3069 : // Split compaction job to about 4GB each
3070 : const GC_COMPACT_MAX_SIZE_MB: u64 = 4 * 1024;
3071 0 : let sub_compaction_max_job_size_mb =
3072 0 : sub_compaction_max_job_size_mb.unwrap_or(GC_COMPACT_MAX_SIZE_MB);
3073 :
3074 0 : let mut compact_jobs = Vec::<GcCompactJob>::new();
3075 : // For now, we simply use the key partitioning information; we should do a more fine-grained partitioning
3076 : // by estimating the amount of files read for a compaction job. We should also partition on LSN.
3077 0 : let ((dense_ks, sparse_ks), _) = self.partitioning.read().as_ref().clone();
3078 : // Truncate the key range to be within user specified compaction range.
3079 0 : fn truncate_to(
3080 0 : source_start: &Key,
3081 0 : source_end: &Key,
3082 0 : target_start: &Key,
3083 0 : target_end: &Key,
3084 0 : ) -> Option<(Key, Key)> {
3085 0 : let start = source_start.max(target_start);
3086 0 : let end = source_end.min(target_end);
3087 0 : if start < end {
3088 0 : Some((*start, *end))
3089 : } else {
3090 0 : None
3091 : }
3092 0 : }
3093 0 : let mut split_key_ranges = Vec::new();
3094 0 : let ranges = dense_ks
3095 0 : .parts
3096 0 : .iter()
3097 0 : .map(|partition| partition.ranges.iter())
3098 0 : .chain(sparse_ks.parts.iter().map(|x| x.0.ranges.iter()))
3099 0 : .flatten()
3100 0 : .cloned()
3101 0 : .collect_vec();
3102 0 : for range in ranges.iter() {
3103 0 : let Some((start, end)) = truncate_to(
3104 0 : &range.start,
3105 0 : &range.end,
3106 0 : &job.compact_key_range.start,
3107 0 : &job.compact_key_range.end,
3108 0 : ) else {
3109 0 : continue;
3110 : };
3111 0 : split_key_ranges.push((start, end));
3112 : }
3113 0 : split_key_ranges.sort();
3114 0 : let all_layers = {
3115 0 : let guard = self.layers.read(LayerManagerLockHolder::Compaction).await;
3116 0 : let layer_map = guard.layer_map()?;
3117 0 : layer_map.iter_historic_layers().collect_vec()
3118 : };
3119 0 : let mut current_start = None;
3120 0 : let ranges_num = split_key_ranges.len();
3121 0 : for (idx, (start, end)) in split_key_ranges.into_iter().enumerate() {
3122 0 : if current_start.is_none() {
3123 0 : current_start = Some(start);
3124 0 : }
3125 0 : let start = current_start.unwrap();
3126 0 : if start >= end {
3127 : // We have already processed this partition.
3128 0 : continue;
3129 0 : }
3130 0 : let overlapping_layers = {
3131 0 : let mut desc = Vec::new();
3132 0 : for layer in all_layers.iter() {
3133 0 : if overlaps_with(&layer.get_key_range(), &(start..end))
3134 0 : && layer.get_lsn_range().start <= compact_below_lsn
3135 0 : {
3136 0 : desc.push(layer.clone());
3137 0 : }
3138 : }
3139 0 : desc
3140 : };
3141 0 : let total_size = overlapping_layers.iter().map(|x| x.file_size).sum::<u64>();
3142 0 : if total_size > sub_compaction_max_job_size_mb * 1024 * 1024 || ranges_num == idx + 1 {
3143 : // Try to extend the compaction range so that we include at least one full layer file.
3144 0 : let extended_end = overlapping_layers
3145 0 : .iter()
3146 0 : .map(|layer| layer.key_range.end)
3147 0 : .min();
3148 : // It is possible that the search range does not contain any layer files when we reach the end of the loop.
3149 : // In this case, we simply use the specified key range end.
3150 0 : let end = if let Some(extended_end) = extended_end {
3151 0 : extended_end.max(end)
3152 : } else {
3153 0 : end
3154 : };
3155 0 : let end = if ranges_num == idx + 1 {
3156 : // extend the compaction range to the end of the key range if it's the last partition
3157 0 : end.max(job.compact_key_range.end)
3158 : } else {
3159 0 : end
3160 : };
3161 0 : if total_size == 0 && !compact_jobs.is_empty() {
3162 0 : info!(
3163 0 : "splitting compaction job: {}..{}, estimated_size={}, extending the previous job",
3164 : start, end, total_size
3165 : );
3166 0 : compact_jobs.last_mut().unwrap().compact_key_range.end = end;
3167 0 : current_start = Some(end);
3168 : } else {
3169 0 : info!(
3170 0 : "splitting compaction job: {}..{}, estimated_size={}",
3171 : start, end, total_size
3172 : );
3173 0 : compact_jobs.push(GcCompactJob {
3174 0 : dry_run: job.dry_run,
3175 0 : compact_key_range: start..end,
3176 0 : compact_lsn_range: job.compact_lsn_range.start..compact_below_lsn,
3177 0 : });
3178 0 : current_start = Some(end);
3179 : }
3180 0 : }
3181 : }
3182 0 : Ok(compact_jobs)
3183 0 : }
3184 :
3185 : /// An experimental compaction building block that combines compaction with garbage collection.
3186 : ///
3187 : /// The current implementation picks all delta + image layers that are below or intersecting with
3188 : /// the GC horizon without considering retain_lsns. Then, it does a full compaction over all these delta
3189 : /// layers and image layers, which generates image layers on the gc horizon, drop deltas below gc horizon,
3190 : /// and create delta layers with all deltas >= gc horizon.
3191 : ///
3192 : /// If `options.compact_range` is provided, it will only compact the keys within the range, aka partial compaction.
3193 : /// Partial compaction will read and process all layers overlapping with the key range, even if it might
3194 : /// contain extra keys. After the gc-compaction phase completes, delta layers that are not fully contained
3195 : /// within the key range will be rewritten to ensure they do not overlap with the delta layers. Providing
3196 : /// Key::MIN..Key..MAX to the function indicates a full compaction, though technically, `Key::MAX` is not
3197 : /// part of the range.
3198 : ///
3199 : /// If `options.compact_lsn_range.end` is provided, the compaction will only compact layers below or intersect with
3200 : /// the LSN. Otherwise, it will use the gc cutoff by default.
3201 28 : pub(crate) async fn compact_with_gc(
3202 28 : self: &Arc<Self>,
3203 28 : cancel: &CancellationToken,
3204 28 : options: CompactOptions,
3205 28 : ctx: &RequestContext,
3206 28 : ) -> Result<CompactionOutcome, CompactionError> {
3207 28 : let sub_compaction = options.sub_compaction;
3208 28 : let job = GcCompactJob::from_compact_options(options.clone());
3209 28 : let yield_for_l0 = options.flags.contains(CompactFlags::YieldForL0);
3210 28 : if sub_compaction {
3211 0 : info!(
3212 0 : "running enhanced gc bottom-most compaction with sub-compaction, splitting compaction jobs"
3213 : );
3214 0 : let jobs = self
3215 0 : .gc_compaction_split_jobs(job, options.sub_compaction_max_job_size_mb)
3216 0 : .await?;
3217 0 : let jobs_len = jobs.len();
3218 0 : for (idx, job) in jobs.into_iter().enumerate() {
3219 0 : let sub_compaction_progress = format!("{}/{}", idx + 1, jobs_len);
3220 0 : self.compact_with_gc_inner(cancel, job, ctx, yield_for_l0)
3221 0 : .instrument(info_span!(
3222 : "sub_compaction",
3223 : sub_compaction_progress = sub_compaction_progress
3224 : ))
3225 0 : .await?;
3226 : }
3227 0 : if jobs_len == 0 {
3228 0 : info!("no jobs to run, skipping gc bottom-most compaction");
3229 0 : }
3230 0 : return Ok(CompactionOutcome::Done);
3231 28 : }
3232 28 : self.compact_with_gc_inner(cancel, job, ctx, yield_for_l0)
3233 28 : .await
3234 28 : }
3235 :
3236 28 : async fn compact_with_gc_inner(
3237 28 : self: &Arc<Self>,
3238 28 : cancel: &CancellationToken,
3239 28 : job: GcCompactJob,
3240 28 : ctx: &RequestContext,
3241 28 : yield_for_l0: bool,
3242 28 : ) -> Result<CompactionOutcome, CompactionError> {
3243 : // Block other compaction/GC tasks from running for now. GC-compaction could run along
3244 : // with legacy compaction tasks in the future. Always ensure the lock order is compaction -> gc.
3245 : // Note that we already acquired the compaction lock when the outer `compact` function gets called.
3246 :
3247 28 : let timer = Instant::now();
3248 28 : let begin_timer = timer;
3249 :
3250 28 : let gc_lock = async {
3251 28 : tokio::select! {
3252 28 : guard = self.gc_lock.lock() => Ok(guard),
3253 28 : _ = cancel.cancelled() => Err(CompactionError::new_cancelled()),
3254 : }
3255 28 : };
3256 :
3257 28 : let time_acquire_lock = timer.elapsed();
3258 28 : let timer = Instant::now();
3259 :
3260 28 : let gc_lock = crate::timed(
3261 28 : gc_lock,
3262 28 : "acquires gc lock",
3263 28 : std::time::Duration::from_secs(5),
3264 28 : )
3265 28 : .await?;
3266 :
3267 28 : let dry_run = job.dry_run;
3268 28 : let compact_key_range = job.compact_key_range;
3269 28 : let compact_lsn_range = job.compact_lsn_range;
3270 :
3271 28 : let debug_mode = cfg!(debug_assertions) || cfg!(feature = "testing");
3272 :
3273 28 : info!(
3274 0 : "running enhanced gc bottom-most compaction, dry_run={dry_run}, compact_key_range={}..{}, compact_lsn_range={}..{}",
3275 : compact_key_range.start,
3276 : compact_key_range.end,
3277 : compact_lsn_range.start,
3278 : compact_lsn_range.end
3279 : );
3280 :
3281 28 : scopeguard::defer! {
3282 : info!("done enhanced gc bottom-most compaction");
3283 : };
3284 :
3285 28 : let mut stat = CompactionStatistics::default();
3286 :
3287 : // Step 0: pick all delta layers + image layers below/intersect with the GC horizon.
3288 : // The layer selection has the following properties:
3289 : // 1. If a layer is in the selection, all layers below it are in the selection.
3290 : // 2. Inferred from (1), for each key in the layer selection, the value can be reconstructed only with the layers in the layer selection.
3291 27 : let job_desc = {
3292 28 : let guard = self
3293 28 : .layers
3294 28 : .read(LayerManagerLockHolder::GarbageCollection)
3295 28 : .await;
3296 28 : let layers = guard.layer_map()?;
3297 28 : let gc_info = self.gc_info.read().unwrap();
3298 28 : let mut retain_lsns_below_horizon = Vec::new();
3299 28 : let gc_cutoff = {
3300 : // Currently, gc-compaction only kicks in after the legacy gc has updated the gc_cutoff.
3301 : // Therefore, it can only clean up data that cannot be cleaned up with legacy gc, instead of
3302 : // cleaning everything that theoritically it could. In the future, it should use `self.gc_info`
3303 : // to get the truth data.
3304 28 : let real_gc_cutoff = self.get_gc_compaction_watermark();
3305 : // The compaction algorithm will keep all keys above the gc_cutoff while keeping only necessary keys below the gc_cutoff for
3306 : // each of the retain_lsn. Therefore, if the user-provided `compact_lsn_range.end` is larger than the real gc cutoff, we will use
3307 : // the real cutoff.
3308 28 : let mut gc_cutoff = if compact_lsn_range.end == Lsn::MAX {
3309 25 : if real_gc_cutoff == Lsn::INVALID {
3310 : // If the gc_cutoff is not generated yet, we should not compact anything.
3311 0 : tracing::warn!(
3312 0 : "no layers to compact with gc: gc_cutoff not generated yet, skipping gc bottom-most compaction"
3313 : );
3314 0 : return Ok(CompactionOutcome::Skipped);
3315 25 : }
3316 25 : real_gc_cutoff
3317 : } else {
3318 3 : compact_lsn_range.end
3319 : };
3320 28 : if gc_cutoff > real_gc_cutoff {
3321 2 : warn!(
3322 0 : "provided compact_lsn_range.end={} is larger than the real_gc_cutoff={}, using the real gc cutoff",
3323 : gc_cutoff, real_gc_cutoff
3324 : );
3325 2 : gc_cutoff = real_gc_cutoff;
3326 26 : }
3327 28 : gc_cutoff
3328 : };
3329 35 : for (lsn, _timeline_id, _is_offloaded) in &gc_info.retain_lsns {
3330 35 : if lsn < &gc_cutoff {
3331 35 : retain_lsns_below_horizon.push(*lsn);
3332 35 : }
3333 : }
3334 28 : for lsn in gc_info.leases.keys() {
3335 0 : if lsn < &gc_cutoff {
3336 0 : retain_lsns_below_horizon.push(*lsn);
3337 0 : }
3338 : }
3339 28 : let mut selected_layers: Vec<Layer> = Vec::new();
3340 28 : drop(gc_info);
3341 : // Firstly, pick all the layers intersect or below the gc_cutoff, get the largest LSN in the selected layers.
3342 28 : let Some(max_layer_lsn) = layers
3343 28 : .iter_historic_layers()
3344 125 : .filter(|desc| desc.get_lsn_range().start <= gc_cutoff)
3345 107 : .map(|desc| desc.get_lsn_range().end)
3346 28 : .max()
3347 : else {
3348 0 : info!(
3349 0 : "no layers to compact with gc: no historic layers below gc_cutoff, gc_cutoff={}",
3350 : gc_cutoff
3351 : );
3352 0 : return Ok(CompactionOutcome::Done);
3353 : };
3354 : // Next, if the user specifies compact_lsn_range.start, we need to filter some layers out. All the layers (strictly) below
3355 : // the min_layer_lsn computed as below will be filtered out and the data will be accessed using the normal read path, as if
3356 : // it is a branch.
3357 28 : let Some(min_layer_lsn) = layers
3358 28 : .iter_historic_layers()
3359 125 : .filter(|desc| {
3360 125 : if compact_lsn_range.start == Lsn::INVALID {
3361 102 : true // select all layers below if start == Lsn(0)
3362 : } else {
3363 23 : desc.get_lsn_range().end > compact_lsn_range.start // strictly larger than compact_above_lsn
3364 : }
3365 125 : })
3366 116 : .map(|desc| desc.get_lsn_range().start)
3367 28 : .min()
3368 : else {
3369 0 : info!(
3370 0 : "no layers to compact with gc: no historic layers above compact_above_lsn, compact_above_lsn={}",
3371 : compact_lsn_range.end
3372 : );
3373 0 : return Ok(CompactionOutcome::Done);
3374 : };
3375 : // Then, pick all the layers that are below the max_layer_lsn. This is to ensure we can pick all single-key
3376 : // layers to compact.
3377 28 : let mut rewrite_layers = Vec::new();
3378 125 : for desc in layers.iter_historic_layers() {
3379 125 : if desc.get_lsn_range().end <= max_layer_lsn
3380 107 : && desc.get_lsn_range().start >= min_layer_lsn
3381 98 : && overlaps_with(&desc.get_key_range(), &compact_key_range)
3382 : {
3383 : // If the layer overlaps with the compaction key range, we need to read it to obtain all keys within the range,
3384 : // even if it might contain extra keys
3385 79 : selected_layers.push(guard.get_from_desc(&desc));
3386 : // If the layer is not fully contained within the key range, we need to rewrite it if it's a delta layer (it's fine
3387 : // to overlap image layers)
3388 79 : if desc.is_delta() && !fully_contains(&compact_key_range, &desc.get_key_range())
3389 1 : {
3390 1 : rewrite_layers.push(desc);
3391 78 : }
3392 46 : }
3393 : }
3394 28 : if selected_layers.is_empty() {
3395 1 : info!(
3396 0 : "no layers to compact with gc: no layers within the key range, gc_cutoff={}, key_range={}..{}",
3397 : gc_cutoff, compact_key_range.start, compact_key_range.end
3398 : );
3399 1 : return Ok(CompactionOutcome::Done);
3400 27 : }
3401 27 : retain_lsns_below_horizon.sort();
3402 27 : GcCompactionJobDescription {
3403 27 : selected_layers,
3404 27 : gc_cutoff,
3405 27 : retain_lsns_below_horizon,
3406 27 : min_layer_lsn,
3407 27 : max_layer_lsn,
3408 27 : compaction_key_range: compact_key_range,
3409 27 : rewrite_layers,
3410 27 : }
3411 : };
3412 27 : let (has_data_below, lowest_retain_lsn) = if compact_lsn_range.start != Lsn::INVALID {
3413 : // If we only compact above some LSN, we should get the history from the current branch below the specified LSN.
3414 : // We use job_desc.min_layer_lsn as if it's the lowest branch point.
3415 4 : (true, job_desc.min_layer_lsn)
3416 23 : } else if self.ancestor_timeline.is_some() {
3417 : // In theory, we can also use min_layer_lsn here, but using ancestor LSN makes sure the delta layers cover the
3418 : // LSN ranges all the way to the ancestor timeline.
3419 1 : (true, self.ancestor_lsn)
3420 : } else {
3421 22 : let res = job_desc
3422 22 : .retain_lsns_below_horizon
3423 22 : .first()
3424 22 : .copied()
3425 22 : .unwrap_or(job_desc.gc_cutoff);
3426 22 : if debug_mode {
3427 22 : assert_eq!(
3428 : res,
3429 22 : job_desc
3430 22 : .retain_lsns_below_horizon
3431 22 : .iter()
3432 22 : .min()
3433 22 : .copied()
3434 22 : .unwrap_or(job_desc.gc_cutoff)
3435 : );
3436 0 : }
3437 22 : (false, res)
3438 : };
3439 :
3440 27 : let verification = self.get_gc_compaction_settings().gc_compaction_verification;
3441 :
3442 27 : info!(
3443 0 : "picked {} layers for compaction ({} layers need rewriting) with max_layer_lsn={} min_layer_lsn={} gc_cutoff={} lowest_retain_lsn={}, key_range={}..{}, has_data_below={}",
3444 0 : job_desc.selected_layers.len(),
3445 0 : job_desc.rewrite_layers.len(),
3446 : job_desc.max_layer_lsn,
3447 : job_desc.min_layer_lsn,
3448 : job_desc.gc_cutoff,
3449 : lowest_retain_lsn,
3450 : job_desc.compaction_key_range.start,
3451 : job_desc.compaction_key_range.end,
3452 : has_data_below,
3453 : );
3454 :
3455 27 : let time_analyze = timer.elapsed();
3456 27 : let timer = Instant::now();
3457 :
3458 106 : for layer in &job_desc.selected_layers {
3459 79 : debug!("read layer: {}", layer.layer_desc().key());
3460 : }
3461 28 : for layer in &job_desc.rewrite_layers {
3462 1 : debug!("rewrite layer: {}", layer.key());
3463 : }
3464 :
3465 27 : self.check_compaction_space(&job_desc.selected_layers)
3466 27 : .await?;
3467 :
3468 27 : self.check_memory_usage(&job_desc.selected_layers).await?;
3469 27 : if job_desc.selected_layers.len() > 100
3470 0 : && job_desc.rewrite_layers.len() as f64 >= job_desc.selected_layers.len() as f64 * 0.7
3471 : {
3472 0 : return Err(CompactionError::Other(anyhow!(
3473 0 : "too many layers to rewrite: {} / {}, giving up compaction",
3474 0 : job_desc.rewrite_layers.len(),
3475 0 : job_desc.selected_layers.len()
3476 0 : )));
3477 27 : }
3478 :
3479 : // Generate statistics for the compaction
3480 106 : for layer in &job_desc.selected_layers {
3481 79 : let desc = layer.layer_desc();
3482 79 : if desc.is_delta() {
3483 44 : stat.visit_delta_layer(desc.file_size());
3484 44 : } else {
3485 35 : stat.visit_image_layer(desc.file_size());
3486 35 : }
3487 : }
3488 :
3489 : // Step 1: construct a k-merge iterator over all layers.
3490 : // Also, verify if the layer map can be split by drawing a horizontal line at every LSN start/end split point.
3491 27 : let layer_names = job_desc
3492 27 : .selected_layers
3493 27 : .iter()
3494 79 : .map(|layer| layer.layer_desc().layer_name())
3495 27 : .collect_vec();
3496 27 : if let Some(err) = check_valid_layermap(&layer_names) {
3497 0 : return Err(CompactionError::Other(anyhow!(
3498 0 : "gc-compaction layer map check failed because {}, cannot proceed with compaction due to potential data loss",
3499 0 : err
3500 0 : )));
3501 27 : }
3502 : // The maximum LSN we are processing in this compaction loop
3503 27 : let end_lsn = job_desc
3504 27 : .selected_layers
3505 27 : .iter()
3506 79 : .map(|l| l.layer_desc().lsn_range.end)
3507 27 : .max()
3508 27 : .unwrap();
3509 27 : let mut delta_layers = Vec::new();
3510 27 : let mut image_layers = Vec::new();
3511 27 : let mut downloaded_layers = Vec::new();
3512 27 : let mut total_downloaded_size = 0;
3513 27 : let mut total_layer_size = 0;
3514 106 : for layer in &job_desc.selected_layers {
3515 79 : if layer
3516 79 : .needs_download()
3517 79 : .await
3518 79 : .context("failed to check if layer needs download")
3519 79 : .map_err(CompactionError::Other)?
3520 79 : .is_some()
3521 0 : {
3522 0 : total_downloaded_size += layer.layer_desc().file_size;
3523 79 : }
3524 79 : total_layer_size += layer.layer_desc().file_size;
3525 79 : if cancel.is_cancelled() {
3526 0 : return Err(CompactionError::new_cancelled());
3527 79 : }
3528 79 : let should_yield = yield_for_l0
3529 0 : && self
3530 0 : .l0_compaction_trigger
3531 0 : .notified()
3532 0 : .now_or_never()
3533 0 : .is_some();
3534 79 : if should_yield {
3535 0 : tracing::info!("preempt gc-compaction when downloading layers: too many L0 layers");
3536 0 : return Ok(CompactionOutcome::YieldForL0);
3537 79 : }
3538 79 : let resident_layer = layer
3539 79 : .download_and_keep_resident(ctx)
3540 79 : .await
3541 79 : .context("failed to download and keep resident layer")
3542 79 : .map_err(CompactionError::Other)?;
3543 79 : downloaded_layers.push(resident_layer);
3544 : }
3545 27 : info!(
3546 0 : "finish downloading layers, downloaded={}, total={}, ratio={:.2}",
3547 : total_downloaded_size,
3548 : total_layer_size,
3549 0 : total_downloaded_size as f64 / total_layer_size as f64
3550 : );
3551 106 : for resident_layer in &downloaded_layers {
3552 79 : if resident_layer.layer_desc().is_delta() {
3553 44 : let layer = resident_layer
3554 44 : .get_as_delta(ctx)
3555 44 : .await
3556 44 : .context("failed to get delta layer")
3557 44 : .map_err(CompactionError::Other)?;
3558 44 : delta_layers.push(layer);
3559 : } else {
3560 35 : let layer = resident_layer
3561 35 : .get_as_image(ctx)
3562 35 : .await
3563 35 : .context("failed to get image layer")
3564 35 : .map_err(CompactionError::Other)?;
3565 35 : image_layers.push(layer);
3566 : }
3567 : }
3568 27 : let (dense_ks, sparse_ks) = self
3569 27 : .collect_gc_compaction_keyspace()
3570 27 : .await
3571 27 : .context("failed to collect gc compaction keyspace")
3572 27 : .map_err(CompactionError::Other)?;
3573 27 : let mut merge_iter = FilterIterator::create(
3574 27 : MergeIterator::create_with_options(
3575 27 : &delta_layers,
3576 27 : &image_layers,
3577 27 : ctx,
3578 27 : 128 * 8192, /* 1MB buffer for each of the inner iterators */
3579 : 128,
3580 : ),
3581 27 : dense_ks,
3582 27 : sparse_ks,
3583 : )
3584 27 : .context("failed to create filter iterator")
3585 27 : .map_err(CompactionError::Other)?;
3586 :
3587 27 : let time_download_layer = timer.elapsed();
3588 27 : let mut timer = Instant::now();
3589 :
3590 : // Step 2: Produce images+deltas.
3591 27 : let mut accumulated_values = Vec::new();
3592 27 : let mut accumulated_values_estimated_size = 0;
3593 27 : let mut last_key: Option<Key> = None;
3594 :
3595 : // Only create image layers when there is no ancestor branches. TODO: create covering image layer
3596 : // when some condition meet.
3597 27 : let mut image_layer_writer = if !has_data_below {
3598 22 : Some(SplitImageLayerWriter::new(
3599 22 : self.conf,
3600 22 : self.timeline_id,
3601 22 : self.tenant_shard_id,
3602 22 : job_desc.compaction_key_range.start,
3603 22 : lowest_retain_lsn,
3604 22 : self.get_compaction_target_size(),
3605 22 : &self.gate,
3606 22 : self.cancel.clone(),
3607 22 : ))
3608 : } else {
3609 5 : None
3610 : };
3611 :
3612 27 : let mut delta_layer_writer = SplitDeltaLayerWriter::new(
3613 27 : self.conf,
3614 27 : self.timeline_id,
3615 27 : self.tenant_shard_id,
3616 27 : lowest_retain_lsn..end_lsn,
3617 27 : self.get_compaction_target_size(),
3618 27 : &self.gate,
3619 27 : self.cancel.clone(),
3620 : );
3621 :
3622 : #[derive(Default)]
3623 : struct RewritingLayers {
3624 : before: Option<DeltaLayerWriter>,
3625 : after: Option<DeltaLayerWriter>,
3626 : }
3627 27 : let mut delta_layer_rewriters = HashMap::<Arc<PersistentLayerKey>, RewritingLayers>::new();
3628 :
3629 : /// When compacting not at a bottom range (=`[0,X)`) of the root branch, we "have data below" (`has_data_below=true`).
3630 : /// The two cases are compaction in ancestor branches and when `compact_lsn_range.start` is set.
3631 : /// In those cases, we need to pull up data from below the LSN range we're compaction.
3632 : ///
3633 : /// This function unifies the cases so that later code doesn't have to think about it.
3634 : ///
3635 : /// Currently, we always get the ancestor image for each key in the child branch no matter whether the image
3636 : /// is needed for reconstruction. This should be fixed in the future.
3637 : ///
3638 : /// Furthermore, we should do vectored get instead of a single get, or better, use k-merge for ancestor
3639 : /// images.
3640 320 : async fn get_ancestor_image(
3641 320 : this_tline: &Arc<Timeline>,
3642 320 : key: Key,
3643 320 : ctx: &RequestContext,
3644 320 : has_data_below: bool,
3645 320 : history_lsn_point: Lsn,
3646 320 : ) -> anyhow::Result<Option<(Key, Lsn, Bytes)>> {
3647 320 : if !has_data_below {
3648 301 : return Ok(None);
3649 19 : };
3650 : // This function is implemented as a get of the current timeline at ancestor LSN, therefore reusing
3651 : // as much existing code as possible.
3652 19 : let img = this_tline.get(key, history_lsn_point, ctx).await?;
3653 19 : Ok(Some((key, history_lsn_point, img)))
3654 320 : }
3655 :
3656 : // Actually, we can decide not to write to the image layer at all at this point because
3657 : // the key and LSN range are determined. However, to keep things simple here, we still
3658 : // create this writer, and discard the writer in the end.
3659 27 : let mut time_to_first_kv_pair = None;
3660 :
3661 496 : while let Some(((key, lsn, val), desc)) = merge_iter
3662 496 : .next_with_trace()
3663 496 : .await
3664 496 : .context("failed to get next key-value pair")
3665 496 : .map_err(CompactionError::Other)?
3666 : {
3667 470 : if time_to_first_kv_pair.is_none() {
3668 27 : time_to_first_kv_pair = Some(timer.elapsed());
3669 27 : timer = Instant::now();
3670 443 : }
3671 :
3672 470 : if cancel.is_cancelled() {
3673 0 : return Err(CompactionError::new_cancelled());
3674 470 : }
3675 :
3676 470 : let should_yield = yield_for_l0
3677 0 : && self
3678 0 : .l0_compaction_trigger
3679 0 : .notified()
3680 0 : .now_or_never()
3681 0 : .is_some();
3682 470 : if should_yield {
3683 0 : tracing::info!("preempt gc-compaction in the main loop: too many L0 layers");
3684 0 : return Ok(CompactionOutcome::YieldForL0);
3685 470 : }
3686 470 : if self.shard_identity.is_key_disposable(&key) {
3687 : // If this shard does not need to store this key, simply skip it.
3688 : //
3689 : // This is not handled in the filter iterator because shard is determined by hash.
3690 : // Therefore, it does not give us any performance benefit to do things like skip
3691 : // a whole layer file as handling key spaces (ranges).
3692 0 : if cfg!(debug_assertions) {
3693 0 : let shard = self.shard_identity.shard_index();
3694 0 : let owner = self.shard_identity.get_shard_number(&key);
3695 0 : panic!("key {key} does not belong on shard {shard}, owned by {owner}");
3696 0 : }
3697 0 : continue;
3698 470 : }
3699 470 : if !job_desc.compaction_key_range.contains(&key) {
3700 32 : if !desc.is_delta {
3701 30 : continue;
3702 2 : }
3703 2 : let rewriter = delta_layer_rewriters.entry(desc.clone()).or_default();
3704 2 : let rewriter = if key < job_desc.compaction_key_range.start {
3705 0 : if rewriter.before.is_none() {
3706 0 : rewriter.before = Some(
3707 0 : DeltaLayerWriter::new(
3708 0 : self.conf,
3709 0 : self.timeline_id,
3710 0 : self.tenant_shard_id,
3711 0 : desc.key_range.start,
3712 0 : desc.lsn_range.clone(),
3713 0 : &self.gate,
3714 0 : self.cancel.clone(),
3715 0 : ctx,
3716 0 : )
3717 0 : .await
3718 0 : .context("failed to create delta layer writer")
3719 0 : .map_err(CompactionError::Other)?,
3720 : );
3721 0 : }
3722 0 : rewriter.before.as_mut().unwrap()
3723 2 : } else if key >= job_desc.compaction_key_range.end {
3724 2 : if rewriter.after.is_none() {
3725 1 : rewriter.after = Some(
3726 1 : DeltaLayerWriter::new(
3727 1 : self.conf,
3728 1 : self.timeline_id,
3729 1 : self.tenant_shard_id,
3730 1 : job_desc.compaction_key_range.end,
3731 1 : desc.lsn_range.clone(),
3732 1 : &self.gate,
3733 1 : self.cancel.clone(),
3734 1 : ctx,
3735 1 : )
3736 1 : .await
3737 1 : .context("failed to create delta layer writer")
3738 1 : .map_err(CompactionError::Other)?,
3739 : );
3740 1 : }
3741 2 : rewriter.after.as_mut().unwrap()
3742 : } else {
3743 0 : unreachable!()
3744 : };
3745 2 : rewriter
3746 2 : .put_value(key, lsn, val, ctx)
3747 2 : .await
3748 2 : .context("failed to put value")
3749 2 : .map_err(CompactionError::Other)?;
3750 2 : continue;
3751 438 : }
3752 438 : match val {
3753 315 : Value::Image(_) => stat.visit_image_key(&val),
3754 123 : Value::WalRecord(_) => stat.visit_wal_key(&val),
3755 : }
3756 438 : if last_key.is_none() || last_key.as_ref() == Some(&key) {
3757 144 : if last_key.is_none() {
3758 27 : last_key = Some(key);
3759 117 : }
3760 144 : accumulated_values_estimated_size += val.estimated_size();
3761 144 : accumulated_values.push((key, lsn, val));
3762 :
3763 : // Accumulated values should never exceed 512MB.
3764 144 : if accumulated_values_estimated_size >= 1024 * 1024 * 512 {
3765 0 : return Err(CompactionError::Other(anyhow!(
3766 0 : "too many values for a single key: {} for key {}, {} items",
3767 0 : accumulated_values_estimated_size,
3768 0 : key,
3769 0 : accumulated_values.len()
3770 0 : )));
3771 144 : }
3772 : } else {
3773 294 : let last_key: &mut Key = last_key.as_mut().unwrap();
3774 294 : stat.on_unique_key_visited(); // TODO: adjust statistics for partial compaction
3775 294 : let retention = self
3776 294 : .generate_key_retention(
3777 294 : *last_key,
3778 294 : &accumulated_values,
3779 294 : job_desc.gc_cutoff,
3780 294 : &job_desc.retain_lsns_below_horizon,
3781 : COMPACTION_DELTA_THRESHOLD,
3782 294 : get_ancestor_image(self, *last_key, ctx, has_data_below, lowest_retain_lsn)
3783 294 : .await
3784 294 : .context("failed to get ancestor image")
3785 294 : .map_err(CompactionError::Other)?,
3786 294 : verification,
3787 : )
3788 294 : .await
3789 294 : .context("failed to generate key retention")
3790 294 : .map_err(CompactionError::Other)?;
3791 293 : retention
3792 293 : .pipe_to(
3793 293 : *last_key,
3794 293 : &mut delta_layer_writer,
3795 293 : image_layer_writer.as_mut(),
3796 293 : &mut stat,
3797 293 : ctx,
3798 293 : )
3799 293 : .await
3800 293 : .context("failed to pipe to delta layer writer")
3801 293 : .map_err(CompactionError::Other)?;
3802 293 : accumulated_values.clear();
3803 293 : *last_key = key;
3804 293 : accumulated_values_estimated_size = val.estimated_size();
3805 293 : accumulated_values.push((key, lsn, val));
3806 : }
3807 : }
3808 :
3809 : // TODO: move the below part to the loop body
3810 26 : let Some(last_key) = last_key else {
3811 0 : return Err(CompactionError::Other(anyhow!(
3812 0 : "no keys produced during compaction"
3813 0 : )));
3814 : };
3815 26 : stat.on_unique_key_visited();
3816 :
3817 26 : let retention = self
3818 26 : .generate_key_retention(
3819 26 : last_key,
3820 26 : &accumulated_values,
3821 26 : job_desc.gc_cutoff,
3822 26 : &job_desc.retain_lsns_below_horizon,
3823 : COMPACTION_DELTA_THRESHOLD,
3824 26 : get_ancestor_image(self, last_key, ctx, has_data_below, lowest_retain_lsn)
3825 26 : .await
3826 26 : .context("failed to get ancestor image")
3827 26 : .map_err(CompactionError::Other)?,
3828 26 : verification,
3829 : )
3830 26 : .await
3831 26 : .context("failed to generate key retention")
3832 26 : .map_err(CompactionError::Other)?;
3833 26 : retention
3834 26 : .pipe_to(
3835 26 : last_key,
3836 26 : &mut delta_layer_writer,
3837 26 : image_layer_writer.as_mut(),
3838 26 : &mut stat,
3839 26 : ctx,
3840 26 : )
3841 26 : .await
3842 26 : .context("failed to pipe to delta layer writer")
3843 26 : .map_err(CompactionError::Other)?;
3844 : // end: move the above part to the loop body
3845 :
3846 26 : let time_main_loop = timer.elapsed();
3847 26 : let timer = Instant::now();
3848 :
3849 26 : let mut rewrote_delta_layers = Vec::new();
3850 27 : for (key, writers) in delta_layer_rewriters {
3851 1 : if let Some(delta_writer_before) = writers.before {
3852 0 : let (desc, path) = delta_writer_before
3853 0 : .finish(job_desc.compaction_key_range.start, ctx)
3854 0 : .await
3855 0 : .context("failed to finish delta layer writer")
3856 0 : .map_err(CompactionError::Other)?;
3857 0 : let layer = Layer::finish_creating(self.conf, self, desc, &path)
3858 0 : .context("failed to finish creating delta layer")
3859 0 : .map_err(CompactionError::Other)?;
3860 0 : rewrote_delta_layers.push(layer);
3861 1 : }
3862 1 : if let Some(delta_writer_after) = writers.after {
3863 1 : let (desc, path) = delta_writer_after
3864 1 : .finish(key.key_range.end, ctx)
3865 1 : .await
3866 1 : .context("failed to finish delta layer writer")
3867 1 : .map_err(CompactionError::Other)?;
3868 1 : let layer = Layer::finish_creating(self.conf, self, desc, &path)
3869 1 : .context("failed to finish creating delta layer")
3870 1 : .map_err(CompactionError::Other)?;
3871 1 : rewrote_delta_layers.push(layer);
3872 0 : }
3873 : }
3874 :
3875 37 : let discard = |key: &PersistentLayerKey| {
3876 37 : let key = key.clone();
3877 37 : async move { KeyHistoryRetention::discard_key(&key, self, dry_run).await }
3878 37 : };
3879 :
3880 26 : let produced_image_layers = if let Some(writer) = image_layer_writer {
3881 21 : if !dry_run {
3882 19 : let end_key = job_desc.compaction_key_range.end;
3883 19 : writer
3884 19 : .finish_with_discard_fn(self, ctx, end_key, discard)
3885 19 : .await
3886 19 : .context("failed to finish image layer writer")
3887 19 : .map_err(CompactionError::Other)?
3888 : } else {
3889 2 : drop(writer);
3890 2 : Vec::new()
3891 : }
3892 : } else {
3893 5 : Vec::new()
3894 : };
3895 :
3896 26 : let produced_delta_layers = if !dry_run {
3897 24 : delta_layer_writer
3898 24 : .finish_with_discard_fn(self, ctx, discard)
3899 24 : .await
3900 24 : .context("failed to finish delta layer writer")
3901 24 : .map_err(CompactionError::Other)?
3902 : } else {
3903 2 : drop(delta_layer_writer);
3904 2 : Vec::new()
3905 : };
3906 :
3907 : // TODO: make image/delta/rewrote_delta layers generation atomic. At this point, we already generated resident layers, and if
3908 : // compaction is cancelled at this point, we might have some layers that are not cleaned up.
3909 26 : let mut compact_to = Vec::new();
3910 26 : let mut keep_layers = HashSet::new();
3911 26 : let produced_delta_layers_len = produced_delta_layers.len();
3912 26 : let produced_image_layers_len = produced_image_layers.len();
3913 :
3914 26 : let layer_selection_by_key = job_desc
3915 26 : .selected_layers
3916 26 : .iter()
3917 76 : .map(|l| (l.layer_desc().key(), l.layer_desc().clone()))
3918 26 : .collect::<HashMap<_, _>>();
3919 :
3920 44 : for action in produced_delta_layers {
3921 18 : match action {
3922 11 : BatchWriterResult::Produced(layer) => {
3923 11 : if cfg!(debug_assertions) {
3924 11 : info!("produced delta layer: {}", layer.layer_desc().key());
3925 0 : }
3926 11 : stat.produce_delta_layer(layer.layer_desc().file_size());
3927 11 : compact_to.push(layer);
3928 : }
3929 7 : BatchWriterResult::Discarded(l) => {
3930 7 : if cfg!(debug_assertions) {
3931 7 : info!("discarded delta layer: {}", l);
3932 0 : }
3933 7 : if let Some(layer_desc) = layer_selection_by_key.get(&l) {
3934 7 : stat.discard_delta_layer(layer_desc.file_size());
3935 7 : } else {
3936 0 : tracing::warn!(
3937 0 : "discarded delta layer not in layer_selection: {}, produced a layer outside of the compaction key range?",
3938 : l
3939 : );
3940 0 : stat.discard_delta_layer(0);
3941 : }
3942 7 : keep_layers.insert(l);
3943 : }
3944 : }
3945 : }
3946 27 : for layer in &rewrote_delta_layers {
3947 1 : debug!(
3948 0 : "produced rewritten delta layer: {}",
3949 0 : layer.layer_desc().key()
3950 : );
3951 : // For now, we include rewritten delta layer size in the "produce_delta_layer". We could
3952 : // make it a separate statistics in the future.
3953 1 : stat.produce_delta_layer(layer.layer_desc().file_size());
3954 : }
3955 26 : compact_to.extend(rewrote_delta_layers);
3956 45 : for action in produced_image_layers {
3957 19 : match action {
3958 15 : BatchWriterResult::Produced(layer) => {
3959 15 : debug!("produced image layer: {}", layer.layer_desc().key());
3960 15 : stat.produce_image_layer(layer.layer_desc().file_size());
3961 15 : compact_to.push(layer);
3962 : }
3963 4 : BatchWriterResult::Discarded(l) => {
3964 4 : debug!("discarded image layer: {}", l);
3965 4 : if let Some(layer_desc) = layer_selection_by_key.get(&l) {
3966 4 : stat.discard_image_layer(layer_desc.file_size());
3967 4 : } else {
3968 0 : tracing::warn!(
3969 0 : "discarded image layer not in layer_selection: {}, produced a layer outside of the compaction key range?",
3970 : l
3971 : );
3972 0 : stat.discard_image_layer(0);
3973 : }
3974 4 : keep_layers.insert(l);
3975 : }
3976 : }
3977 : }
3978 :
3979 26 : let mut layer_selection = job_desc.selected_layers;
3980 :
3981 : // Partial compaction might select more data than it processes, e.g., if
3982 : // the compaction_key_range only partially overlaps:
3983 : //
3984 : // [---compaction_key_range---]
3985 : // [---A----][----B----][----C----][----D----]
3986 : //
3987 : // For delta layers, we will rewrite the layers so that it is cut exactly at
3988 : // the compaction key range, so we can always discard them. However, for image
3989 : // layers, as we do not rewrite them for now, we need to handle them differently.
3990 : // Assume image layers A, B, C, D are all in the `layer_selection`.
3991 : //
3992 : // The created image layers contain whatever is needed from B, C, and from
3993 : // `----]` of A, and from `[---` of D.
3994 : //
3995 : // In contrast, `[---A` and `D----]` have not been processed, so, we must
3996 : // keep that data.
3997 : //
3998 : // The solution for now is to keep A and D completely if they are image layers.
3999 : // (layer_selection is what we'll remove from the layer map, so, retain what
4000 : // is _not_ fully covered by compaction_key_range).
4001 102 : for layer in &layer_selection {
4002 76 : if !layer.layer_desc().is_delta() {
4003 33 : if !overlaps_with(
4004 33 : &layer.layer_desc().key_range,
4005 33 : &job_desc.compaction_key_range,
4006 33 : ) {
4007 0 : return Err(CompactionError::Other(anyhow!(
4008 0 : "violated constraint: image layer outside of compaction key range"
4009 0 : )));
4010 33 : }
4011 33 : if !fully_contains(
4012 33 : &job_desc.compaction_key_range,
4013 33 : &layer.layer_desc().key_range,
4014 33 : ) {
4015 4 : keep_layers.insert(layer.layer_desc().key());
4016 29 : }
4017 43 : }
4018 : }
4019 :
4020 76 : layer_selection.retain(|x| !keep_layers.contains(&x.layer_desc().key()));
4021 :
4022 26 : let time_final_phase = timer.elapsed();
4023 :
4024 26 : stat.time_final_phase_secs = time_final_phase.as_secs_f64();
4025 26 : stat.time_to_first_kv_pair_secs = time_to_first_kv_pair
4026 26 : .unwrap_or(Duration::ZERO)
4027 26 : .as_secs_f64();
4028 26 : stat.time_main_loop_secs = time_main_loop.as_secs_f64();
4029 26 : stat.time_acquire_lock_secs = time_acquire_lock.as_secs_f64();
4030 26 : stat.time_download_layer_secs = time_download_layer.as_secs_f64();
4031 26 : stat.time_analyze_secs = time_analyze.as_secs_f64();
4032 26 : stat.time_total_secs = begin_timer.elapsed().as_secs_f64();
4033 26 : stat.finalize();
4034 :
4035 26 : info!(
4036 0 : "gc-compaction statistics: {}",
4037 0 : serde_json::to_string(&stat)
4038 0 : .context("failed to serialize gc-compaction statistics")
4039 0 : .map_err(CompactionError::Other)?
4040 : );
4041 :
4042 26 : if dry_run {
4043 2 : return Ok(CompactionOutcome::Done);
4044 24 : }
4045 :
4046 24 : info!(
4047 0 : "produced {} delta layers and {} image layers, {} layers are kept",
4048 : produced_delta_layers_len,
4049 : produced_image_layers_len,
4050 0 : keep_layers.len()
4051 : );
4052 :
4053 : // Step 3: Place back to the layer map.
4054 :
4055 : // First, do a sanity check to ensure the newly-created layer map does not contain overlaps.
4056 24 : let all_layers = {
4057 24 : let guard = self
4058 24 : .layers
4059 24 : .read(LayerManagerLockHolder::GarbageCollection)
4060 24 : .await;
4061 24 : let layer_map = guard.layer_map()?;
4062 24 : layer_map.iter_historic_layers().collect_vec()
4063 : };
4064 :
4065 24 : let mut final_layers = all_layers
4066 24 : .iter()
4067 107 : .map(|layer| layer.layer_name())
4068 24 : .collect::<HashSet<_>>();
4069 76 : for layer in &layer_selection {
4070 52 : final_layers.remove(&layer.layer_desc().layer_name());
4071 52 : }
4072 51 : for layer in &compact_to {
4073 27 : final_layers.insert(layer.layer_desc().layer_name());
4074 27 : }
4075 24 : let final_layers = final_layers.into_iter().collect_vec();
4076 :
4077 : // TODO: move this check before we call `finish` on image layer writers. However, this will require us to get the layer name before we finish
4078 : // the writer, so potentially, we will need a function like `ImageLayerBatchWriter::get_all_pending_layer_keys` to get all the keys that are
4079 : // in the writer before finalizing the persistent layers. Now we would leave some dangling layers on the disk if the check fails.
4080 24 : if let Some(err) = check_valid_layermap(&final_layers) {
4081 0 : return Err(CompactionError::Other(anyhow!(
4082 0 : "gc-compaction layer map check failed after compaction because {}, compaction result not applied to the layer map due to potential data loss",
4083 0 : err
4084 0 : )));
4085 24 : }
4086 :
4087 : // Between the sanity check and this compaction update, there could be new layers being flushed, but it should be fine because we only
4088 : // operate on L1 layers.
4089 : {
4090 : // Gc-compaction will rewrite the history of a key. This could happen in two ways:
4091 : //
4092 : // 1. We create an image layer to replace all the deltas below the compact LSN. In this case, assume
4093 : // we have 2 delta layers A and B, both below the compact LSN. We create an image layer I to replace
4094 : // A and B at the compact LSN. If the read path finishes reading A, yields, and now we update the layer
4095 : // map, the read path then cannot find any keys below A, reporting a missing key error, while the key
4096 : // now gets stored in I at the compact LSN.
4097 : //
4098 : // --------------- ---------------
4099 : // delta1@LSN20 image1@LSN20
4100 : // --------------- (read path collects delta@LSN20, => --------------- (read path cannot find anything
4101 : // delta1@LSN10 yields) below LSN 20)
4102 : // ---------------
4103 : //
4104 : // 2. We create a delta layer to replace all the deltas below the compact LSN, and in the delta layers,
4105 : // we combines the history of a key into a single image. For example, we have deltas at LSN 1, 2, 3, 4,
4106 : // Assume one delta layer contains LSN 1, 2, 3 and the other contains LSN 4.
4107 : //
4108 : // We let gc-compaction combine delta 2, 3, 4 into an image at LSN 4, which produces a delta layer that
4109 : // contains the delta at LSN 1, the image at LSN 4. If the read path finishes reading the original delta
4110 : // layer containing 4, yields, and we update the layer map to put the delta layer.
4111 : //
4112 : // --------------- ---------------
4113 : // delta1@LSN4 image1@LSN4
4114 : // --------------- (read path collects delta@LSN4, => --------------- (read path collects LSN4 and LSN1,
4115 : // delta1@LSN1-3 yields) delta1@LSN1 which is an invalid history)
4116 : // --------------- ---------------
4117 : //
4118 : // Therefore, the gc-compaction layer update operation should wait for all ongoing reads, block all pending reads,
4119 : // and only allow reads to continue after the update is finished.
4120 :
4121 24 : let update_guard = self.gc_compaction_layer_update_lock.write().await;
4122 : // Acquiring the update guard ensures current read operations end and new read operations are blocked.
4123 : // TODO: can we use `latest_gc_cutoff` Rcu to achieve the same effect?
4124 24 : let mut guard = self
4125 24 : .layers
4126 24 : .write(LayerManagerLockHolder::GarbageCollection)
4127 24 : .await;
4128 24 : guard
4129 24 : .open_mut()?
4130 24 : .finish_gc_compaction(&layer_selection, &compact_to, &self.metrics);
4131 24 : drop(update_guard); // Allow new reads to start ONLY after we finished updating the layer map.
4132 : };
4133 :
4134 : // Schedule an index-only upload to update the `latest_gc_cutoff` in the index_part.json.
4135 : // Otherwise, after restart, the index_part only contains the old `latest_gc_cutoff` and
4136 : // find_gc_cutoffs will try accessing things below the cutoff. TODO: ideally, this should
4137 : // be batched into `schedule_compaction_update`.
4138 24 : let disk_consistent_lsn = self.disk_consistent_lsn.load();
4139 24 : self.schedule_uploads(disk_consistent_lsn, None)
4140 24 : .context("failed to schedule uploads")
4141 24 : .map_err(CompactionError::Other)?;
4142 : // If a layer gets rewritten throughout gc-compaction, we need to keep that layer only in `compact_to` instead
4143 : // of `compact_from`.
4144 24 : let compact_from = {
4145 24 : let mut compact_from = Vec::new();
4146 24 : let mut compact_to_set = HashMap::new();
4147 51 : for layer in &compact_to {
4148 27 : compact_to_set.insert(layer.layer_desc().key(), layer);
4149 27 : }
4150 76 : for layer in &layer_selection {
4151 52 : if let Some(to) = compact_to_set.get(&layer.layer_desc().key()) {
4152 0 : tracing::info!(
4153 0 : "skipping delete {} because found same layer key at different generation {}",
4154 : layer,
4155 : to
4156 : );
4157 52 : } else {
4158 52 : compact_from.push(layer.clone());
4159 52 : }
4160 : }
4161 24 : compact_from
4162 : };
4163 24 : self.remote_client
4164 24 : .schedule_compaction_update(&compact_from, &compact_to)?;
4165 :
4166 24 : drop(gc_lock);
4167 :
4168 24 : Ok(CompactionOutcome::Done)
4169 28 : }
4170 : }
4171 :
4172 : struct TimelineAdaptor {
4173 : timeline: Arc<Timeline>,
4174 :
4175 : keyspace: (Lsn, KeySpace),
4176 :
4177 : new_deltas: Vec<ResidentLayer>,
4178 : new_images: Vec<ResidentLayer>,
4179 : layers_to_delete: Vec<Arc<PersistentLayerDesc>>,
4180 : }
4181 :
4182 : impl TimelineAdaptor {
4183 0 : pub fn new(timeline: &Arc<Timeline>, keyspace: (Lsn, KeySpace)) -> Self {
4184 0 : Self {
4185 0 : timeline: timeline.clone(),
4186 0 : keyspace,
4187 0 : new_images: Vec::new(),
4188 0 : new_deltas: Vec::new(),
4189 0 : layers_to_delete: Vec::new(),
4190 0 : }
4191 0 : }
4192 :
4193 0 : pub async fn flush_updates(&mut self) -> Result<(), CompactionError> {
4194 0 : let layers_to_delete = {
4195 0 : let guard = self
4196 0 : .timeline
4197 0 : .layers
4198 0 : .read(LayerManagerLockHolder::Compaction)
4199 0 : .await;
4200 0 : self.layers_to_delete
4201 0 : .iter()
4202 0 : .map(|x| guard.get_from_desc(x))
4203 0 : .collect::<Vec<Layer>>()
4204 : };
4205 0 : self.timeline
4206 0 : .finish_compact_batch(&self.new_deltas, &self.new_images, &layers_to_delete)
4207 0 : .await?;
4208 :
4209 0 : self.timeline
4210 0 : .upload_new_image_layers(std::mem::take(&mut self.new_images))?;
4211 :
4212 0 : self.new_deltas.clear();
4213 0 : self.layers_to_delete.clear();
4214 0 : Ok(())
4215 0 : }
4216 : }
4217 :
4218 : #[derive(Clone)]
4219 : struct ResidentDeltaLayer(ResidentLayer);
4220 : #[derive(Clone)]
4221 : struct ResidentImageLayer(ResidentLayer);
4222 :
4223 : impl CompactionJobExecutor for TimelineAdaptor {
4224 : type Key = pageserver_api::key::Key;
4225 :
4226 : type Layer = OwnArc<PersistentLayerDesc>;
4227 : type DeltaLayer = ResidentDeltaLayer;
4228 : type ImageLayer = ResidentImageLayer;
4229 :
4230 : type RequestContext = crate::context::RequestContext;
4231 :
4232 0 : fn get_shard_identity(&self) -> &ShardIdentity {
4233 0 : self.timeline.get_shard_identity()
4234 0 : }
4235 :
4236 0 : async fn get_layers(
4237 0 : &mut self,
4238 0 : key_range: &Range<Key>,
4239 0 : lsn_range: &Range<Lsn>,
4240 0 : _ctx: &RequestContext,
4241 0 : ) -> anyhow::Result<Vec<OwnArc<PersistentLayerDesc>>> {
4242 0 : self.flush_updates().await?;
4243 :
4244 0 : let guard = self
4245 0 : .timeline
4246 0 : .layers
4247 0 : .read(LayerManagerLockHolder::Compaction)
4248 0 : .await;
4249 0 : let layer_map = guard.layer_map()?;
4250 :
4251 0 : let result = layer_map
4252 0 : .iter_historic_layers()
4253 0 : .filter(|l| {
4254 0 : overlaps_with(&l.lsn_range, lsn_range) && overlaps_with(&l.key_range, key_range)
4255 0 : })
4256 0 : .map(OwnArc)
4257 0 : .collect();
4258 0 : Ok(result)
4259 0 : }
4260 :
4261 0 : async fn get_keyspace(
4262 0 : &mut self,
4263 0 : key_range: &Range<Key>,
4264 0 : lsn: Lsn,
4265 0 : _ctx: &RequestContext,
4266 0 : ) -> anyhow::Result<Vec<Range<Key>>> {
4267 0 : if lsn == self.keyspace.0 {
4268 0 : Ok(pageserver_compaction::helpers::intersect_keyspace(
4269 0 : &self.keyspace.1.ranges,
4270 0 : key_range,
4271 0 : ))
4272 : } else {
4273 : // The current compaction implementation only ever requests the key space
4274 : // at the compaction end LSN.
4275 0 : anyhow::bail!("keyspace not available for requested lsn");
4276 : }
4277 0 : }
4278 :
4279 0 : async fn downcast_delta_layer(
4280 0 : &self,
4281 0 : layer: &OwnArc<PersistentLayerDesc>,
4282 0 : ctx: &RequestContext,
4283 0 : ) -> anyhow::Result<Option<ResidentDeltaLayer>> {
4284 : // this is a lot more complex than a simple downcast...
4285 0 : if layer.is_delta() {
4286 0 : let l = {
4287 0 : let guard = self
4288 0 : .timeline
4289 0 : .layers
4290 0 : .read(LayerManagerLockHolder::Compaction)
4291 0 : .await;
4292 0 : guard.get_from_desc(layer)
4293 : };
4294 0 : let result = l.download_and_keep_resident(ctx).await?;
4295 :
4296 0 : Ok(Some(ResidentDeltaLayer(result)))
4297 : } else {
4298 0 : Ok(None)
4299 : }
4300 0 : }
4301 :
4302 0 : async fn create_image(
4303 0 : &mut self,
4304 0 : lsn: Lsn,
4305 0 : key_range: &Range<Key>,
4306 0 : ctx: &RequestContext,
4307 0 : ) -> anyhow::Result<()> {
4308 0 : Ok(self.create_image_impl(lsn, key_range, ctx).await?)
4309 0 : }
4310 :
4311 0 : async fn create_delta(
4312 0 : &mut self,
4313 0 : lsn_range: &Range<Lsn>,
4314 0 : key_range: &Range<Key>,
4315 0 : input_layers: &[ResidentDeltaLayer],
4316 0 : ctx: &RequestContext,
4317 0 : ) -> anyhow::Result<()> {
4318 0 : debug!("Create new layer {}..{}", lsn_range.start, lsn_range.end);
4319 :
4320 0 : let mut all_entries = Vec::new();
4321 0 : for dl in input_layers.iter() {
4322 0 : all_entries.extend(dl.load_keys(ctx).await?);
4323 : }
4324 :
4325 : // The current stdlib sorting implementation is designed in a way where it is
4326 : // particularly fast where the slice is made up of sorted sub-ranges.
4327 0 : all_entries.sort_by_key(|DeltaEntry { key, lsn, .. }| (*key, *lsn));
4328 :
4329 0 : let mut writer = DeltaLayerWriter::new(
4330 0 : self.timeline.conf,
4331 0 : self.timeline.timeline_id,
4332 0 : self.timeline.tenant_shard_id,
4333 0 : key_range.start,
4334 0 : lsn_range.clone(),
4335 0 : &self.timeline.gate,
4336 0 : self.timeline.cancel.clone(),
4337 0 : ctx,
4338 0 : )
4339 0 : .await?;
4340 :
4341 0 : let mut dup_values = 0;
4342 :
4343 : // This iterator walks through all key-value pairs from all the layers
4344 : // we're compacting, in key, LSN order.
4345 0 : let mut prev: Option<(Key, Lsn)> = None;
4346 : for &DeltaEntry {
4347 0 : key, lsn, ref val, ..
4348 0 : } in all_entries.iter()
4349 : {
4350 0 : if prev == Some((key, lsn)) {
4351 : // This is a duplicate. Skip it.
4352 : //
4353 : // It can happen if compaction is interrupted after writing some
4354 : // layers but not all, and we are compacting the range again.
4355 : // The calculations in the algorithm assume that there are no
4356 : // duplicates, so the math on targeted file size is likely off,
4357 : // and we will create smaller files than expected.
4358 0 : dup_values += 1;
4359 0 : continue;
4360 0 : }
4361 :
4362 0 : let value = val.load(ctx).await?;
4363 :
4364 0 : writer.put_value(key, lsn, value, ctx).await?;
4365 :
4366 0 : prev = Some((key, lsn));
4367 : }
4368 :
4369 0 : if dup_values > 0 {
4370 0 : warn!("delta layer created with {} duplicate values", dup_values);
4371 0 : }
4372 :
4373 0 : fail_point!("delta-layer-writer-fail-before-finish", |_| {
4374 0 : Err(anyhow::anyhow!(
4375 0 : "failpoint delta-layer-writer-fail-before-finish"
4376 0 : ))
4377 0 : });
4378 :
4379 0 : let (desc, path) = writer.finish(prev.unwrap().0.next(), ctx).await?;
4380 0 : let new_delta_layer =
4381 0 : Layer::finish_creating(self.timeline.conf, &self.timeline, desc, &path)?;
4382 :
4383 0 : self.new_deltas.push(new_delta_layer);
4384 0 : Ok(())
4385 0 : }
4386 :
4387 0 : async fn delete_layer(
4388 0 : &mut self,
4389 0 : layer: &OwnArc<PersistentLayerDesc>,
4390 0 : _ctx: &RequestContext,
4391 0 : ) -> anyhow::Result<()> {
4392 0 : self.layers_to_delete.push(layer.clone().0);
4393 0 : Ok(())
4394 0 : }
4395 : }
4396 :
4397 : impl TimelineAdaptor {
4398 0 : async fn create_image_impl(
4399 0 : &mut self,
4400 0 : lsn: Lsn,
4401 0 : key_range: &Range<Key>,
4402 0 : ctx: &RequestContext,
4403 0 : ) -> Result<(), CreateImageLayersError> {
4404 0 : let timer = self.timeline.metrics.create_images_time_histo.start_timer();
4405 :
4406 0 : let image_layer_writer = ImageLayerWriter::new(
4407 0 : self.timeline.conf,
4408 0 : self.timeline.timeline_id,
4409 0 : self.timeline.tenant_shard_id,
4410 0 : key_range,
4411 0 : lsn,
4412 0 : &self.timeline.gate,
4413 0 : self.timeline.cancel.clone(),
4414 0 : ctx,
4415 0 : )
4416 0 : .await
4417 0 : .map_err(CreateImageLayersError::Other)?;
4418 :
4419 0 : fail_point!("image-layer-writer-fail-before-finish", |_| {
4420 0 : Err(CreateImageLayersError::Other(anyhow::anyhow!(
4421 0 : "failpoint image-layer-writer-fail-before-finish"
4422 0 : )))
4423 0 : });
4424 :
4425 0 : let keyspace = KeySpace {
4426 0 : ranges: self
4427 0 : .get_keyspace(key_range, lsn, ctx)
4428 0 : .await
4429 0 : .map_err(CreateImageLayersError::Other)?,
4430 : };
4431 : // TODO set proper (stateful) start. The create_image_layer_for_rel_blocks function mostly
4432 0 : let outcome = self
4433 0 : .timeline
4434 0 : .create_image_layer_for_rel_blocks(
4435 0 : &keyspace,
4436 0 : image_layer_writer,
4437 0 : lsn,
4438 0 : ctx,
4439 0 : key_range.clone(),
4440 0 : IoConcurrency::sequential(),
4441 0 : None,
4442 0 : )
4443 0 : .await?;
4444 :
4445 : if let ImageLayerCreationOutcome::Generated {
4446 0 : unfinished_image_layer,
4447 0 : } = outcome
4448 : {
4449 0 : let (desc, path) = unfinished_image_layer
4450 0 : .finish(ctx)
4451 0 : .await
4452 0 : .map_err(CreateImageLayersError::Other)?;
4453 0 : let image_layer =
4454 0 : Layer::finish_creating(self.timeline.conf, &self.timeline, desc, &path)
4455 0 : .map_err(CreateImageLayersError::Other)?;
4456 0 : self.new_images.push(image_layer);
4457 0 : }
4458 :
4459 0 : timer.stop_and_record();
4460 :
4461 0 : Ok(())
4462 0 : }
4463 : }
4464 :
4465 : impl CompactionRequestContext for crate::context::RequestContext {}
4466 :
4467 : #[derive(Debug, Clone)]
4468 : pub struct OwnArc<T>(pub Arc<T>);
4469 :
4470 : impl<T> Deref for OwnArc<T> {
4471 : type Target = <Arc<T> as Deref>::Target;
4472 0 : fn deref(&self) -> &Self::Target {
4473 0 : &self.0
4474 0 : }
4475 : }
4476 :
4477 : impl<T> AsRef<T> for OwnArc<T> {
4478 0 : fn as_ref(&self) -> &T {
4479 0 : self.0.as_ref()
4480 0 : }
4481 : }
4482 :
4483 : impl CompactionLayer<Key> for OwnArc<PersistentLayerDesc> {
4484 0 : fn key_range(&self) -> &Range<Key> {
4485 0 : &self.key_range
4486 0 : }
4487 0 : fn lsn_range(&self) -> &Range<Lsn> {
4488 0 : &self.lsn_range
4489 0 : }
4490 0 : fn file_size(&self) -> u64 {
4491 0 : self.file_size
4492 0 : }
4493 0 : fn short_id(&self) -> std::string::String {
4494 0 : self.as_ref().short_id().to_string()
4495 0 : }
4496 0 : fn is_delta(&self) -> bool {
4497 0 : self.as_ref().is_delta()
4498 0 : }
4499 : }
4500 :
4501 : impl CompactionLayer<Key> for OwnArc<DeltaLayer> {
4502 0 : fn key_range(&self) -> &Range<Key> {
4503 0 : &self.layer_desc().key_range
4504 0 : }
4505 0 : fn lsn_range(&self) -> &Range<Lsn> {
4506 0 : &self.layer_desc().lsn_range
4507 0 : }
4508 0 : fn file_size(&self) -> u64 {
4509 0 : self.layer_desc().file_size
4510 0 : }
4511 0 : fn short_id(&self) -> std::string::String {
4512 0 : self.layer_desc().short_id().to_string()
4513 0 : }
4514 0 : fn is_delta(&self) -> bool {
4515 0 : true
4516 0 : }
4517 : }
4518 :
4519 : impl CompactionLayer<Key> for ResidentDeltaLayer {
4520 0 : fn key_range(&self) -> &Range<Key> {
4521 0 : &self.0.layer_desc().key_range
4522 0 : }
4523 0 : fn lsn_range(&self) -> &Range<Lsn> {
4524 0 : &self.0.layer_desc().lsn_range
4525 0 : }
4526 0 : fn file_size(&self) -> u64 {
4527 0 : self.0.layer_desc().file_size
4528 0 : }
4529 0 : fn short_id(&self) -> std::string::String {
4530 0 : self.0.layer_desc().short_id().to_string()
4531 0 : }
4532 0 : fn is_delta(&self) -> bool {
4533 0 : true
4534 0 : }
4535 : }
4536 :
4537 : impl CompactionDeltaLayer<TimelineAdaptor> for ResidentDeltaLayer {
4538 : type DeltaEntry<'a> = DeltaEntry<'a>;
4539 :
4540 0 : async fn load_keys(&self, ctx: &RequestContext) -> anyhow::Result<Vec<DeltaEntry<'_>>> {
4541 0 : self.0.get_as_delta(ctx).await?.index_entries(ctx).await
4542 0 : }
4543 : }
4544 :
4545 : impl CompactionLayer<Key> for ResidentImageLayer {
4546 0 : fn key_range(&self) -> &Range<Key> {
4547 0 : &self.0.layer_desc().key_range
4548 0 : }
4549 0 : fn lsn_range(&self) -> &Range<Lsn> {
4550 0 : &self.0.layer_desc().lsn_range
4551 0 : }
4552 0 : fn file_size(&self) -> u64 {
4553 0 : self.0.layer_desc().file_size
4554 0 : }
4555 0 : fn short_id(&self) -> std::string::String {
4556 0 : self.0.layer_desc().short_id().to_string()
4557 0 : }
4558 0 : fn is_delta(&self) -> bool {
4559 0 : false
4560 0 : }
4561 : }
4562 : impl CompactionImageLayer<TimelineAdaptor> for ResidentImageLayer {}
|