Line data Source code
1 : //! New compaction implementation. The algorithm itself is implemented in the
2 : //! compaction crate. This file implements the callbacks and structs that allow
3 : //! the algorithm to drive the process.
4 : //!
5 : //! The old legacy algorithm is implemented directly in `timeline.rs`.
6 :
7 : use std::collections::{BinaryHeap, HashSet};
8 : use std::ops::{Deref, Range};
9 : use std::sync::Arc;
10 :
11 : use super::layer_manager::LayerManager;
12 : use super::{
13 : CompactFlags, CreateImageLayersError, DurationRecorder, ImageLayerCreationMode,
14 : RecordedDuration, Timeline,
15 : };
16 :
17 : use anyhow::{anyhow, bail, Context};
18 : use bytes::Bytes;
19 : use enumset::EnumSet;
20 : use fail::fail_point;
21 : use itertools::Itertools;
22 : use pageserver_api::key::KEY_SIZE;
23 : use pageserver_api::keyspace::ShardedRange;
24 : use pageserver_api::shard::{ShardCount, ShardIdentity, TenantShardId};
25 : use serde::Serialize;
26 : use tokio_util::sync::CancellationToken;
27 : use tracing::{debug, info, info_span, trace, warn, Instrument};
28 : use utils::id::TimelineId;
29 :
30 : use crate::context::{AccessStatsBehavior, RequestContext, RequestContextBuilder};
31 : use crate::page_cache;
32 : use crate::tenant::checks::check_valid_layermap;
33 : use crate::tenant::remote_timeline_client::WaitCompletionError;
34 : use crate::tenant::storage_layer::filter_iterator::FilterIterator;
35 : use crate::tenant::storage_layer::merge_iterator::MergeIterator;
36 : use crate::tenant::storage_layer::split_writer::{
37 : SplitDeltaLayerWriter, SplitImageLayerWriter, SplitWriterResult,
38 : };
39 : use crate::tenant::storage_layer::{
40 : AsLayerDesc, PersistentLayerDesc, PersistentLayerKey, ValueReconstructState,
41 : };
42 : use crate::tenant::timeline::ImageLayerCreationOutcome;
43 : use crate::tenant::timeline::{drop_rlock, DeltaLayerWriter, ImageLayerWriter};
44 : use crate::tenant::timeline::{Layer, ResidentLayer};
45 : use crate::tenant::DeltaLayer;
46 : use crate::virtual_file::{MaybeFatalIo, VirtualFile};
47 : use pageserver_api::config::tenant_conf_defaults::{
48 : DEFAULT_CHECKPOINT_DISTANCE, DEFAULT_COMPACTION_THRESHOLD,
49 : };
50 :
51 : use crate::keyspace::KeySpace;
52 : use crate::repository::{Key, Value};
53 : use crate::walrecord::NeonWalRecord;
54 :
55 : use utils::lsn::Lsn;
56 :
57 : use pageserver_compaction::helpers::overlaps_with;
58 : use pageserver_compaction::interface::*;
59 :
60 : use super::CompactionError;
61 :
62 : /// Maximum number of deltas before generating an image layer in bottom-most compaction.
63 : const COMPACTION_DELTA_THRESHOLD: usize = 5;
64 :
65 : /// The result of bottom-most compaction for a single key at each LSN.
66 : #[derive(Debug)]
67 : #[cfg_attr(test, derive(PartialEq))]
68 : pub struct KeyLogAtLsn(pub Vec<(Lsn, Value)>);
69 :
70 : /// The result of bottom-most compaction.
71 : #[derive(Debug)]
72 : #[cfg_attr(test, derive(PartialEq))]
73 : pub(crate) struct KeyHistoryRetention {
74 : /// Stores logs to reconstruct the value at the given LSN, that is to say, logs <= LSN or image == LSN.
75 : pub(crate) below_horizon: Vec<(Lsn, KeyLogAtLsn)>,
76 : /// Stores logs to reconstruct the value at any LSN above the horizon, that is to say, log > LSN.
77 : pub(crate) above_horizon: KeyLogAtLsn,
78 : }
79 :
80 : impl KeyHistoryRetention {
81 : /// Hack: skip delta layer if we need to produce a layer of a same key-lsn.
82 : ///
83 : /// This can happen if we have removed some deltas in "the middle" of some existing layer's key-lsn-range.
84 : /// For example, consider the case where a single delta with range [0x10,0x50) exists.
85 : /// And we have branches at LSN 0x10, 0x20, 0x30.
86 : /// Then we delete branch @ 0x20.
87 : /// Bottom-most compaction may now delete the delta [0x20,0x30).
88 : /// And that wouldnt' change the shape of the layer.
89 : ///
90 : /// Note that bottom-most-gc-compaction never _adds_ new data in that case, only removes.
91 : ///
92 : /// `discard_key` will only be called when the writer reaches its target (instead of for every key), so it's fine to grab a lock inside.
93 114 : async fn discard_key(key: &PersistentLayerKey, tline: &Arc<Timeline>, dry_run: bool) -> bool {
94 114 : if dry_run {
95 0 : return true;
96 114 : }
97 114 : let guard = tline.layers.read().await;
98 114 : if !guard.contains_key(key) {
99 66 : return false;
100 48 : }
101 48 : let layer_generation = guard.get_from_key(key).metadata().generation;
102 48 : drop(guard);
103 48 : if layer_generation == tline.generation {
104 48 : info!(
105 : key=%key,
106 : ?layer_generation,
107 0 : "discard layer due to duplicated layer key in the same generation",
108 : );
109 48 : true
110 : } else {
111 0 : false
112 : }
113 114 : }
114 :
115 : /// Pipe a history of a single key to the writers.
116 : ///
117 : /// If `image_writer` is none, the images will be placed into the delta layers.
118 : /// The delta writer will contain all images and deltas (below and above the horizon) except the bottom-most images.
119 : #[allow(clippy::too_many_arguments)]
120 1266 : async fn pipe_to(
121 1266 : self,
122 1266 : key: Key,
123 1266 : tline: &Arc<Timeline>,
124 1266 : delta_writer: &mut SplitDeltaLayerWriter,
125 1266 : mut image_writer: Option<&mut SplitImageLayerWriter>,
126 1266 : stat: &mut CompactionStatistics,
127 1266 : dry_run: bool,
128 1266 : ctx: &RequestContext,
129 1266 : ) -> anyhow::Result<()> {
130 1266 : let mut first_batch = true;
131 1266 : let discard = |key: &PersistentLayerKey| {
132 0 : let key = key.clone();
133 0 : async move { Self::discard_key(&key, tline, dry_run).await }
134 0 : };
135 4206 : for (cutoff_lsn, KeyLogAtLsn(logs)) in self.below_horizon {
136 2940 : if first_batch {
137 1266 : if logs.len() == 1 && logs[0].1.is_image() {
138 1224 : let Value::Image(img) = &logs[0].1 else {
139 0 : unreachable!()
140 : };
141 1224 : stat.produce_image_key(img);
142 1224 : if let Some(image_writer) = image_writer.as_mut() {
143 1224 : image_writer
144 1224 : .put_image_with_discard_fn(key, img.clone(), tline, ctx, discard)
145 1242 : .await?;
146 : } else {
147 0 : delta_writer
148 0 : .put_value_with_discard_fn(
149 0 : key,
150 0 : cutoff_lsn,
151 0 : Value::Image(img.clone()),
152 0 : tline,
153 0 : ctx,
154 0 : discard,
155 0 : )
156 0 : .await?;
157 : }
158 : } else {
159 84 : for (lsn, val) in logs {
160 42 : stat.produce_key(&val);
161 42 : delta_writer
162 42 : .put_value_with_discard_fn(key, lsn, val, tline, ctx, discard)
163 3 : .await?;
164 : }
165 : }
166 1266 : first_batch = false;
167 : } else {
168 1920 : for (lsn, val) in logs {
169 246 : stat.produce_key(&val);
170 246 : delta_writer
171 246 : .put_value_with_discard_fn(key, lsn, val, tline, ctx, discard)
172 24 : .await?;
173 : }
174 : }
175 : }
176 1266 : let KeyLogAtLsn(above_horizon_logs) = self.above_horizon;
177 1362 : for (lsn, val) in above_horizon_logs {
178 96 : stat.produce_key(&val);
179 96 : delta_writer
180 96 : .put_value_with_discard_fn(key, lsn, val, tline, ctx, discard)
181 6 : .await?;
182 : }
183 1266 : Ok(())
184 1266 : }
185 : }
186 :
187 : #[derive(Debug, Serialize, Default)]
188 : struct CompactionStatisticsNumSize {
189 : num: u64,
190 : size: u64,
191 : }
192 :
193 : #[derive(Debug, Serialize, Default)]
194 : pub struct CompactionStatistics {
195 : delta_layer_visited: CompactionStatisticsNumSize,
196 : image_layer_visited: CompactionStatisticsNumSize,
197 : delta_layer_produced: CompactionStatisticsNumSize,
198 : image_layer_produced: CompactionStatisticsNumSize,
199 : num_delta_layer_discarded: usize,
200 : num_image_layer_discarded: usize,
201 : num_unique_keys_visited: usize,
202 : wal_keys_visited: CompactionStatisticsNumSize,
203 : image_keys_visited: CompactionStatisticsNumSize,
204 : wal_produced: CompactionStatisticsNumSize,
205 : image_produced: CompactionStatisticsNumSize,
206 : }
207 :
208 : impl CompactionStatistics {
209 2058 : fn estimated_size_of_value(val: &Value) -> usize {
210 798 : match val {
211 1260 : Value::Image(img) => img.len(),
212 0 : Value::WalRecord(NeonWalRecord::Postgres { rec, .. }) => rec.len(),
213 798 : _ => std::mem::size_of::<NeonWalRecord>(),
214 : }
215 2058 : }
216 3288 : fn estimated_size_of_key() -> usize {
217 3288 : KEY_SIZE // TODO: distinguish image layer and delta layer (count LSN in delta layer)
218 3288 : }
219 138 : fn visit_delta_layer(&mut self, size: u64) {
220 138 : self.delta_layer_visited.num += 1;
221 138 : self.delta_layer_visited.size += size;
222 138 : }
223 108 : fn visit_image_layer(&mut self, size: u64) {
224 108 : self.image_layer_visited.num += 1;
225 108 : self.image_layer_visited.size += size;
226 108 : }
227 1266 : fn on_unique_key_visited(&mut self) {
228 1266 : self.num_unique_keys_visited += 1;
229 1266 : }
230 420 : fn visit_wal_key(&mut self, val: &Value) {
231 420 : self.wal_keys_visited.num += 1;
232 420 : self.wal_keys_visited.size +=
233 420 : Self::estimated_size_of_value(val) as u64 + Self::estimated_size_of_key() as u64;
234 420 : }
235 1260 : fn visit_image_key(&mut self, val: &Value) {
236 1260 : self.image_keys_visited.num += 1;
237 1260 : self.image_keys_visited.size +=
238 1260 : Self::estimated_size_of_value(val) as u64 + Self::estimated_size_of_key() as u64;
239 1260 : }
240 384 : fn produce_key(&mut self, val: &Value) {
241 384 : match val {
242 6 : Value::Image(img) => self.produce_image_key(img),
243 378 : Value::WalRecord(_) => self.produce_wal_key(val),
244 : }
245 384 : }
246 378 : fn produce_wal_key(&mut self, val: &Value) {
247 378 : self.wal_produced.num += 1;
248 378 : self.wal_produced.size +=
249 378 : Self::estimated_size_of_value(val) as u64 + Self::estimated_size_of_key() as u64;
250 378 : }
251 1230 : fn produce_image_key(&mut self, val: &Bytes) {
252 1230 : self.image_produced.num += 1;
253 1230 : self.image_produced.size += val.len() as u64 + Self::estimated_size_of_key() as u64;
254 1230 : }
255 24 : fn discard_delta_layer(&mut self) {
256 24 : self.num_delta_layer_discarded += 1;
257 24 : }
258 24 : fn discard_image_layer(&mut self) {
259 24 : self.num_image_layer_discarded += 1;
260 24 : }
261 30 : fn produce_delta_layer(&mut self, size: u64) {
262 30 : self.delta_layer_produced.num += 1;
263 30 : self.delta_layer_produced.size += size;
264 30 : }
265 36 : fn produce_image_layer(&mut self, size: u64) {
266 36 : self.image_layer_produced.num += 1;
267 36 : self.image_layer_produced.size += size;
268 36 : }
269 : }
270 :
271 : impl Timeline {
272 : /// TODO: cancellation
273 : ///
274 : /// Returns whether the compaction has pending tasks.
275 1092 : pub(crate) async fn compact_legacy(
276 1092 : self: &Arc<Self>,
277 1092 : cancel: &CancellationToken,
278 1092 : flags: EnumSet<CompactFlags>,
279 1092 : ctx: &RequestContext,
280 1092 : ) -> Result<bool, CompactionError> {
281 1092 : if flags.contains(CompactFlags::EnhancedGcBottomMostCompaction) {
282 0 : self.compact_with_gc(cancel, flags, ctx)
283 0 : .await
284 0 : .map_err(CompactionError::Other)?;
285 0 : return Ok(false);
286 1092 : }
287 1092 :
288 1092 : if flags.contains(CompactFlags::DryRun) {
289 0 : return Err(CompactionError::Other(anyhow!(
290 0 : "dry-run mode is not supported for legacy compaction for now"
291 0 : )));
292 1092 : }
293 1092 :
294 1092 : // High level strategy for compaction / image creation:
295 1092 : //
296 1092 : // 1. First, calculate the desired "partitioning" of the
297 1092 : // currently in-use key space. The goal is to partition the
298 1092 : // key space into roughly fixed-size chunks, but also take into
299 1092 : // account any existing image layers, and try to align the
300 1092 : // chunk boundaries with the existing image layers to avoid
301 1092 : // too much churn. Also try to align chunk boundaries with
302 1092 : // relation boundaries. In principle, we don't know about
303 1092 : // relation boundaries here, we just deal with key-value
304 1092 : // pairs, and the code in pgdatadir_mapping.rs knows how to
305 1092 : // map relations into key-value pairs. But in practice we know
306 1092 : // that 'field6' is the block number, and the fields 1-5
307 1092 : // identify a relation. This is just an optimization,
308 1092 : // though.
309 1092 : //
310 1092 : // 2. Once we know the partitioning, for each partition,
311 1092 : // decide if it's time to create a new image layer. The
312 1092 : // criteria is: there has been too much "churn" since the last
313 1092 : // image layer? The "churn" is fuzzy concept, it's a
314 1092 : // combination of too many delta files, or too much WAL in
315 1092 : // total in the delta file. Or perhaps: if creating an image
316 1092 : // file would allow to delete some older files.
317 1092 : //
318 1092 : // 3. After that, we compact all level0 delta files if there
319 1092 : // are too many of them. While compacting, we also garbage
320 1092 : // collect any page versions that are no longer needed because
321 1092 : // of the new image layers we created in step 2.
322 1092 : //
323 1092 : // TODO: This high level strategy hasn't been implemented yet.
324 1092 : // Below are functions compact_level0() and create_image_layers()
325 1092 : // but they are a bit ad hoc and don't quite work like it's explained
326 1092 : // above. Rewrite it.
327 1092 :
328 1092 : // Is the timeline being deleted?
329 1092 : if self.is_stopping() {
330 0 : trace!("Dropping out of compaction on timeline shutdown");
331 0 : return Err(CompactionError::ShuttingDown);
332 1092 : }
333 1092 :
334 1092 : let target_file_size = self.get_checkpoint_distance();
335 :
336 : // Define partitioning schema if needed
337 :
338 : // FIXME: the match should only cover repartitioning, not the next steps
339 1092 : let (partition_count, has_pending_tasks) = match self
340 1092 : .repartition(
341 1092 : self.get_last_record_lsn(),
342 1092 : self.get_compaction_target_size(),
343 1092 : flags,
344 1092 : ctx,
345 1092 : )
346 48122 : .await
347 : {
348 1092 : Ok(((dense_partitioning, sparse_partitioning), lsn)) => {
349 1092 : // Disables access_stats updates, so that the files we read remain candidates for eviction after we're done with them
350 1092 : let image_ctx = RequestContextBuilder::extend(ctx)
351 1092 : .access_stats_behavior(AccessStatsBehavior::Skip)
352 1092 : .build();
353 1092 :
354 1092 : // 2. Compact
355 1092 : let timer = self.metrics.compact_time_histo.start_timer();
356 29656 : let fully_compacted = self.compact_level0(target_file_size, ctx).await?;
357 1092 : timer.stop_and_record();
358 1092 :
359 1092 : let mut partitioning = dense_partitioning;
360 1092 : partitioning
361 1092 : .parts
362 1092 : .extend(sparse_partitioning.into_dense().parts);
363 1092 :
364 1092 : // 3. Create new image layers for partitions that have been modified
365 1092 : // "enough". Skip image layer creation if L0 compaction cannot keep up.
366 1092 : if fully_compacted {
367 1092 : let image_layers = self
368 1092 : .create_image_layers(
369 1092 : &partitioning,
370 1092 : lsn,
371 1092 : if flags.contains(CompactFlags::ForceImageLayerCreation) {
372 42 : ImageLayerCreationMode::Force
373 : } else {
374 1050 : ImageLayerCreationMode::Try
375 : },
376 1092 : &image_ctx,
377 : )
378 40943 : .await?;
379 :
380 1092 : self.upload_new_image_layers(image_layers)?;
381 : } else {
382 0 : info!("skipping image layer generation due to L0 compaction did not include all layers.");
383 : }
384 1092 : (partitioning.parts.len(), !fully_compacted)
385 : }
386 0 : Err(err) => {
387 0 : // no partitioning? This is normal, if the timeline was just created
388 0 : // as an empty timeline. Also in unit tests, when we use the timeline
389 0 : // as a simple key-value store, ignoring the datadir layout. Log the
390 0 : // error but continue.
391 0 : //
392 0 : // Suppress error when it's due to cancellation
393 0 : if !self.cancel.is_cancelled() {
394 0 : tracing::error!("could not compact, repartitioning keyspace failed: {err:?}");
395 0 : }
396 0 : (1, false)
397 : }
398 : };
399 :
400 1092 : if self.shard_identity.count >= ShardCount::new(2) {
401 : // Limit the number of layer rewrites to the number of partitions: this means its
402 : // runtime should be comparable to a full round of image layer creations, rather than
403 : // being potentially much longer.
404 0 : let rewrite_max = partition_count;
405 0 :
406 0 : self.compact_shard_ancestors(rewrite_max, ctx).await?;
407 1092 : }
408 :
409 1092 : Ok(has_pending_tasks)
410 1092 : }
411 :
412 : /// Check for layers that are elegible to be rewritten:
413 : /// - Shard splitting: After a shard split, ancestor layers beyond pitr_interval, so that
414 : /// we don't indefinitely retain keys in this shard that aren't needed.
415 : /// - For future use: layers beyond pitr_interval that are in formats we would
416 : /// rather not maintain compatibility with indefinitely.
417 : ///
418 : /// Note: this phase may read and write many gigabytes of data: use rewrite_max to bound
419 : /// how much work it will try to do in each compaction pass.
420 0 : async fn compact_shard_ancestors(
421 0 : self: &Arc<Self>,
422 0 : rewrite_max: usize,
423 0 : ctx: &RequestContext,
424 0 : ) -> Result<(), CompactionError> {
425 0 : let mut drop_layers = Vec::new();
426 0 : let mut layers_to_rewrite: Vec<Layer> = Vec::new();
427 0 :
428 0 : // We will use the Lsn cutoff of the last GC as a threshold for rewriting layers: if a
429 0 : // layer is behind this Lsn, it indicates that the layer is being retained beyond the
430 0 : // pitr_interval, for example because a branchpoint references it.
431 0 : //
432 0 : // Holding this read guard also blocks [`Self::gc_timeline`] from entering while we
433 0 : // are rewriting layers.
434 0 : let latest_gc_cutoff = self.get_latest_gc_cutoff_lsn();
435 0 :
436 0 : tracing::info!(
437 0 : "latest_gc_cutoff: {}, pitr cutoff {}",
438 0 : *latest_gc_cutoff,
439 0 : self.gc_info.read().unwrap().cutoffs.time
440 : );
441 :
442 0 : let layers = self.layers.read().await;
443 0 : for layer_desc in layers.layer_map()?.iter_historic_layers() {
444 0 : let layer = layers.get_from_desc(&layer_desc);
445 0 : if layer.metadata().shard.shard_count == self.shard_identity.count {
446 : // This layer does not belong to a historic ancestor, no need to re-image it.
447 0 : continue;
448 0 : }
449 0 :
450 0 : // This layer was created on an ancestor shard: check if it contains any data for this shard.
451 0 : let sharded_range = ShardedRange::new(layer_desc.get_key_range(), &self.shard_identity);
452 0 : let layer_local_page_count = sharded_range.page_count();
453 0 : let layer_raw_page_count = ShardedRange::raw_size(&layer_desc.get_key_range());
454 0 : if layer_local_page_count == 0 {
455 : // This ancestral layer only covers keys that belong to other shards.
456 : // We include the full metadata in the log: if we had some critical bug that caused
457 : // us to incorrectly drop layers, this would simplify manually debugging + reinstating those layers.
458 0 : info!(%layer, old_metadata=?layer.metadata(),
459 0 : "dropping layer after shard split, contains no keys for this shard.",
460 : );
461 :
462 0 : if cfg!(debug_assertions) {
463 : // Expensive, exhaustive check of keys in this layer: this guards against ShardedRange's calculations being
464 : // wrong. If ShardedRange claims the local page count is zero, then no keys in this layer
465 : // should be !is_key_disposable()
466 0 : let range = layer_desc.get_key_range();
467 0 : let mut key = range.start;
468 0 : while key < range.end {
469 0 : debug_assert!(self.shard_identity.is_key_disposable(&key));
470 0 : key = key.next();
471 : }
472 0 : }
473 :
474 0 : drop_layers.push(layer);
475 0 : continue;
476 0 : } else if layer_local_page_count != u32::MAX
477 0 : && layer_local_page_count == layer_raw_page_count
478 : {
479 0 : debug!(%layer,
480 0 : "layer is entirely shard local ({} keys), no need to filter it",
481 : layer_local_page_count
482 : );
483 0 : continue;
484 0 : }
485 0 :
486 0 : // Don't bother re-writing a layer unless it will at least halve its size
487 0 : if layer_local_page_count != u32::MAX
488 0 : && layer_local_page_count > layer_raw_page_count / 2
489 : {
490 0 : debug!(%layer,
491 0 : "layer is already mostly local ({}/{}), not rewriting",
492 : layer_local_page_count,
493 : layer_raw_page_count
494 : );
495 0 : }
496 :
497 : // Don't bother re-writing a layer if it is within the PITR window: it will age-out eventually
498 : // without incurring the I/O cost of a rewrite.
499 0 : if layer_desc.get_lsn_range().end >= *latest_gc_cutoff {
500 0 : debug!(%layer, "Skipping rewrite of layer still in GC window ({} >= {})",
501 0 : layer_desc.get_lsn_range().end, *latest_gc_cutoff);
502 0 : continue;
503 0 : }
504 0 :
505 0 : if layer_desc.is_delta() {
506 : // We do not yet implement rewrite of delta layers
507 0 : debug!(%layer, "Skipping rewrite of delta layer");
508 0 : continue;
509 0 : }
510 0 :
511 0 : // Only rewrite layers if their generations differ. This guarantees:
512 0 : // - that local rewrite is safe, as local layer paths will differ between existing layer and rewritten one
513 0 : // - that the layer is persistent in remote storage, as we only see old-generation'd layer via loading from remote storage
514 0 : if layer.metadata().generation == self.generation {
515 0 : debug!(%layer, "Skipping rewrite, is not from old generation");
516 0 : continue;
517 0 : }
518 0 :
519 0 : if layers_to_rewrite.len() >= rewrite_max {
520 0 : tracing::info!(%layer, "Will rewrite layer on a future compaction, already rewrote {}",
521 0 : layers_to_rewrite.len()
522 : );
523 0 : continue;
524 0 : }
525 0 :
526 0 : // Fall through: all our conditions for doing a rewrite passed.
527 0 : layers_to_rewrite.push(layer);
528 : }
529 :
530 : // Drop read lock on layer map before we start doing time-consuming I/O
531 0 : drop(layers);
532 0 :
533 0 : let mut replace_image_layers = Vec::new();
534 :
535 0 : for layer in layers_to_rewrite {
536 0 : tracing::info!(layer=%layer, "Rewriting layer after shard split...");
537 0 : let mut image_layer_writer = ImageLayerWriter::new(
538 0 : self.conf,
539 0 : self.timeline_id,
540 0 : self.tenant_shard_id,
541 0 : &layer.layer_desc().key_range,
542 0 : layer.layer_desc().image_layer_lsn(),
543 0 : ctx,
544 0 : )
545 0 : .await
546 0 : .map_err(CompactionError::Other)?;
547 :
548 : // Safety of layer rewrites:
549 : // - We are writing to a different local file path than we are reading from, so the old Layer
550 : // cannot interfere with the new one.
551 : // - In the page cache, contents for a particular VirtualFile are stored with a file_id that
552 : // is different for two layers with the same name (in `ImageLayerInner::new` we always
553 : // acquire a fresh id from [`crate::page_cache::next_file_id`]. So readers do not risk
554 : // reading the index from one layer file, and then data blocks from the rewritten layer file.
555 : // - Any readers that have a reference to the old layer will keep it alive until they are done
556 : // with it. If they are trying to promote from remote storage, that will fail, but this is the same
557 : // as for compaction generally: compaction is allowed to delete layers that readers might be trying to use.
558 : // - We do not run concurrently with other kinds of compaction, so the only layer map writes we race with are:
559 : // - GC, which at worst witnesses us "undelete" a layer that they just deleted.
560 : // - ingestion, which only inserts layers, therefore cannot collide with us.
561 0 : let resident = layer.download_and_keep_resident().await?;
562 :
563 0 : let keys_written = resident
564 0 : .filter(&self.shard_identity, &mut image_layer_writer, ctx)
565 0 : .await?;
566 :
567 0 : if keys_written > 0 {
568 0 : let (desc, path) = image_layer_writer
569 0 : .finish(ctx)
570 0 : .await
571 0 : .map_err(CompactionError::Other)?;
572 0 : let new_layer = Layer::finish_creating(self.conf, self, desc, &path)
573 0 : .map_err(CompactionError::Other)?;
574 0 : tracing::info!(layer=%new_layer, "Rewrote layer, {} -> {} bytes",
575 0 : layer.metadata().file_size,
576 0 : new_layer.metadata().file_size);
577 :
578 0 : replace_image_layers.push((layer, new_layer));
579 0 : } else {
580 0 : // Drop the old layer. Usually for this case we would already have noticed that
581 0 : // the layer has no data for us with the ShardedRange check above, but
582 0 : drop_layers.push(layer);
583 0 : }
584 : }
585 :
586 : // At this point, we have replaced local layer files with their rewritten form, but not yet uploaded
587 : // metadata to reflect that. If we restart here, the replaced layer files will look invalid (size mismatch
588 : // to remote index) and be removed. This is inefficient but safe.
589 0 : fail::fail_point!("compact-shard-ancestors-localonly");
590 0 :
591 0 : // Update the LayerMap so that readers will use the new layers, and enqueue it for writing to remote storage
592 0 : self.rewrite_layers(replace_image_layers, drop_layers)
593 0 : .await?;
594 :
595 0 : fail::fail_point!("compact-shard-ancestors-enqueued");
596 0 :
597 0 : // We wait for all uploads to complete before finishing this compaction stage. This is not
598 0 : // necessary for correctness, but it simplifies testing, and avoids proceeding with another
599 0 : // Timeline's compaction while this timeline's uploads may be generating lots of disk I/O
600 0 : // load.
601 0 : match self.remote_client.wait_completion().await {
602 0 : Ok(()) => (),
603 0 : Err(WaitCompletionError::NotInitialized(ni)) => return Err(CompactionError::from(ni)),
604 : Err(WaitCompletionError::UploadQueueShutDownOrStopped) => {
605 0 : return Err(CompactionError::ShuttingDown)
606 : }
607 : }
608 :
609 0 : fail::fail_point!("compact-shard-ancestors-persistent");
610 0 :
611 0 : Ok(())
612 0 : }
613 :
614 : /// Update the LayerVisibilityHint of layers covered by image layers, based on whether there is
615 : /// an image layer between them and the most recent readable LSN (branch point or tip of timeline). The
616 : /// purpose of the visibility hint is to record which layers need to be available to service reads.
617 : ///
618 : /// The result may be used as an input to eviction and secondary downloads to de-prioritize layers
619 : /// that we know won't be needed for reads.
620 594 : pub(super) async fn update_layer_visibility(
621 594 : &self,
622 594 : ) -> Result<(), super::layer_manager::Shutdown> {
623 594 : let head_lsn = self.get_last_record_lsn();
624 :
625 : // We will sweep through layers in reverse-LSN order. We only do historic layers. L0 deltas
626 : // are implicitly left visible, because LayerVisibilityHint's default is Visible, and we never modify it here.
627 : // Note that L0 deltas _can_ be covered by image layers, but we consider them 'visible' because we anticipate that
628 : // they will be subject to L0->L1 compaction in the near future.
629 594 : let layer_manager = self.layers.read().await;
630 594 : let layer_map = layer_manager.layer_map()?;
631 :
632 594 : let readable_points = {
633 594 : let children = self.gc_info.read().unwrap().retain_lsns.clone();
634 594 :
635 594 : let mut readable_points = Vec::with_capacity(children.len() + 1);
636 594 : for (child_lsn, _child_timeline_id) in &children {
637 0 : readable_points.push(*child_lsn);
638 0 : }
639 594 : readable_points.push(head_lsn);
640 594 : readable_points
641 594 : };
642 594 :
643 594 : let (layer_visibility, covered) = layer_map.get_visibility(readable_points);
644 1524 : for (layer_desc, visibility) in layer_visibility {
645 930 : // FIXME: a more efficiency bulk zip() through the layers rather than NlogN getting each one
646 930 : let layer = layer_manager.get_from_desc(&layer_desc);
647 930 : layer.set_visibility(visibility);
648 930 : }
649 :
650 : // TODO: publish our covered KeySpace to our parent, so that when they update their visibility, they can
651 : // avoid assuming that everything at a branch point is visible.
652 594 : drop(covered);
653 594 : Ok(())
654 594 : }
655 :
656 : /// Collect a bunch of Level 0 layer files, and compact and reshuffle them as
657 : /// as Level 1 files. Returns whether the L0 layers are fully compacted.
658 1092 : async fn compact_level0(
659 1092 : self: &Arc<Self>,
660 1092 : target_file_size: u64,
661 1092 : ctx: &RequestContext,
662 1092 : ) -> Result<bool, CompactionError> {
663 : let CompactLevel0Phase1Result {
664 1092 : new_layers,
665 1092 : deltas_to_compact,
666 1092 : fully_compacted,
667 : } = {
668 1092 : let phase1_span = info_span!("compact_level0_phase1");
669 1092 : let ctx = ctx.attached_child();
670 1092 : let mut stats = CompactLevel0Phase1StatsBuilder {
671 1092 : version: Some(2),
672 1092 : tenant_id: Some(self.tenant_shard_id),
673 1092 : timeline_id: Some(self.timeline_id),
674 1092 : ..Default::default()
675 1092 : };
676 1092 :
677 1092 : let begin = tokio::time::Instant::now();
678 1092 : let phase1_layers_locked = self.layers.read().await;
679 1092 : let now = tokio::time::Instant::now();
680 1092 : stats.read_lock_acquisition_micros =
681 1092 : DurationRecorder::Recorded(RecordedDuration(now - begin), now);
682 1092 : self.compact_level0_phase1(phase1_layers_locked, stats, target_file_size, &ctx)
683 1092 : .instrument(phase1_span)
684 29652 : .await?
685 : };
686 :
687 1092 : if new_layers.is_empty() && deltas_to_compact.is_empty() {
688 : // nothing to do
689 1008 : return Ok(true);
690 84 : }
691 84 :
692 84 : self.finish_compact_batch(&new_layers, &Vec::new(), &deltas_to_compact)
693 3 : .await?;
694 84 : Ok(fully_compacted)
695 1092 : }
696 :
697 : /// Level0 files first phase of compaction, explained in the [`Self::compact_legacy`] comment.
698 1092 : async fn compact_level0_phase1<'a>(
699 1092 : self: &'a Arc<Self>,
700 1092 : guard: tokio::sync::RwLockReadGuard<'a, LayerManager>,
701 1092 : mut stats: CompactLevel0Phase1StatsBuilder,
702 1092 : target_file_size: u64,
703 1092 : ctx: &RequestContext,
704 1092 : ) -> Result<CompactLevel0Phase1Result, CompactionError> {
705 1092 : stats.read_lock_held_spawn_blocking_startup_micros =
706 1092 : stats.read_lock_acquisition_micros.till_now(); // set by caller
707 1092 : let layers = guard.layer_map()?;
708 1092 : let level0_deltas = layers.level0_deltas();
709 1092 : stats.level0_deltas_count = Some(level0_deltas.len());
710 1092 :
711 1092 : // Only compact if enough layers have accumulated.
712 1092 : let threshold = self.get_compaction_threshold();
713 1092 : if level0_deltas.is_empty() || level0_deltas.len() < threshold {
714 1008 : debug!(
715 0 : level0_deltas = level0_deltas.len(),
716 0 : threshold, "too few deltas to compact"
717 : );
718 1008 : return Ok(CompactLevel0Phase1Result::default());
719 84 : }
720 84 :
721 84 : let mut level0_deltas = level0_deltas
722 84 : .iter()
723 1206 : .map(|x| guard.get_from_desc(x))
724 84 : .collect::<Vec<_>>();
725 84 :
726 84 : // Gather the files to compact in this iteration.
727 84 : //
728 84 : // Start with the oldest Level 0 delta file, and collect any other
729 84 : // level 0 files that form a contiguous sequence, such that the end
730 84 : // LSN of previous file matches the start LSN of the next file.
731 84 : //
732 84 : // Note that if the files don't form such a sequence, we might
733 84 : // "compact" just a single file. That's a bit pointless, but it allows
734 84 : // us to get rid of the level 0 file, and compact the other files on
735 84 : // the next iteration. This could probably made smarter, but such
736 84 : // "gaps" in the sequence of level 0 files should only happen in case
737 84 : // of a crash, partial download from cloud storage, or something like
738 84 : // that, so it's not a big deal in practice.
739 2244 : level0_deltas.sort_by_key(|l| l.layer_desc().lsn_range.start);
740 84 : let mut level0_deltas_iter = level0_deltas.iter();
741 84 :
742 84 : let first_level0_delta = level0_deltas_iter.next().unwrap();
743 84 : let mut prev_lsn_end = first_level0_delta.layer_desc().lsn_range.end;
744 84 : let mut deltas_to_compact = Vec::with_capacity(level0_deltas.len());
745 84 :
746 84 : // Accumulate the size of layers in `deltas_to_compact`
747 84 : let mut deltas_to_compact_bytes = 0;
748 84 :
749 84 : // Under normal circumstances, we will accumulate up to compaction_interval L0s of size
750 84 : // checkpoint_distance each. To avoid edge cases using extra system resources, bound our
751 84 : // work in this function to only operate on this much delta data at once.
752 84 : //
753 84 : // Take the max of the configured value & the default, so that tests that configure tiny values
754 84 : // can still use a sensible amount of memory, but if a deployed system configures bigger values we
755 84 : // still let them compact a full stack of L0s in one go.
756 84 : let delta_size_limit = std::cmp::max(
757 84 : self.get_compaction_threshold(),
758 84 : DEFAULT_COMPACTION_THRESHOLD,
759 84 : ) as u64
760 84 : * std::cmp::max(self.get_checkpoint_distance(), DEFAULT_CHECKPOINT_DISTANCE);
761 84 :
762 84 : let mut fully_compacted = true;
763 84 :
764 84 : deltas_to_compact.push(first_level0_delta.download_and_keep_resident().await?);
765 1206 : for l in level0_deltas_iter {
766 1122 : let lsn_range = &l.layer_desc().lsn_range;
767 1122 :
768 1122 : if lsn_range.start != prev_lsn_end {
769 0 : break;
770 1122 : }
771 1122 : deltas_to_compact.push(l.download_and_keep_resident().await?);
772 1122 : deltas_to_compact_bytes += l.metadata().file_size;
773 1122 : prev_lsn_end = lsn_range.end;
774 1122 :
775 1122 : if deltas_to_compact_bytes >= delta_size_limit {
776 0 : info!(
777 0 : l0_deltas_selected = deltas_to_compact.len(),
778 0 : l0_deltas_total = level0_deltas.len(),
779 0 : "L0 compaction picker hit max delta layer size limit: {}",
780 : delta_size_limit
781 : );
782 0 : fully_compacted = false;
783 0 :
784 0 : // Proceed with compaction, but only a subset of L0s
785 0 : break;
786 1122 : }
787 : }
788 84 : let lsn_range = Range {
789 84 : start: deltas_to_compact
790 84 : .first()
791 84 : .unwrap()
792 84 : .layer_desc()
793 84 : .lsn_range
794 84 : .start,
795 84 : end: deltas_to_compact.last().unwrap().layer_desc().lsn_range.end,
796 84 : };
797 84 :
798 84 : info!(
799 0 : "Starting Level0 compaction in LSN range {}-{} for {} layers ({} deltas in total)",
800 0 : lsn_range.start,
801 0 : lsn_range.end,
802 0 : deltas_to_compact.len(),
803 0 : level0_deltas.len()
804 : );
805 :
806 1206 : for l in deltas_to_compact.iter() {
807 1206 : info!("compact includes {l}");
808 : }
809 :
810 : // We don't need the original list of layers anymore. Drop it so that
811 : // we don't accidentally use it later in the function.
812 84 : drop(level0_deltas);
813 84 :
814 84 : stats.read_lock_held_prerequisites_micros = stats
815 84 : .read_lock_held_spawn_blocking_startup_micros
816 84 : .till_now();
817 :
818 : // TODO: replace with streaming k-merge
819 84 : let all_keys = {
820 84 : let mut all_keys = Vec::new();
821 1206 : for l in deltas_to_compact.iter() {
822 1206 : if self.cancel.is_cancelled() {
823 0 : return Err(CompactionError::ShuttingDown);
824 1206 : }
825 7083 : all_keys.extend(l.load_keys(ctx).await.map_err(CompactionError::Other)?);
826 : }
827 : // The current stdlib sorting implementation is designed in a way where it is
828 : // particularly fast where the slice is made up of sorted sub-ranges.
829 13271312 : all_keys.sort_by_key(|DeltaEntry { key, lsn, .. }| (*key, *lsn));
830 84 : all_keys
831 84 : };
832 84 :
833 84 : stats.read_lock_held_key_sort_micros = stats.read_lock_held_prerequisites_micros.till_now();
834 :
835 : // Determine N largest holes where N is number of compacted layers. The vec is sorted by key range start.
836 : //
837 : // A hole is a key range for which this compaction doesn't have any WAL records.
838 : // Our goal in this compaction iteration is to avoid creating L1s that, in terms of their key range,
839 : // cover the hole, but actually don't contain any WAL records for that key range.
840 : // The reason is that the mere stack of L1s (`count_deltas`) triggers image layer creation (`create_image_layers`).
841 : // That image layer creation would be useless for a hole range covered by L1s that don't contain any WAL records.
842 : //
843 : // The algorithm chooses holes as follows.
844 : // - Slide a 2-window over the keys in key orde to get the hole range (=distance between two keys).
845 : // - Filter: min threshold on range length
846 : // - Rank: by coverage size (=number of image layers required to reconstruct each key in the range for which we have any data)
847 : //
848 : // For more details, intuition, and some ASCII art see https://github.com/neondatabase/neon/pull/3597#discussion_r1112704451
849 : #[derive(PartialEq, Eq)]
850 : struct Hole {
851 : key_range: Range<Key>,
852 : coverage_size: usize,
853 : }
854 84 : let holes: Vec<Hole> = {
855 : use std::cmp::Ordering;
856 : impl Ord for Hole {
857 0 : fn cmp(&self, other: &Self) -> Ordering {
858 0 : self.coverage_size.cmp(&other.coverage_size).reverse()
859 0 : }
860 : }
861 : impl PartialOrd for Hole {
862 0 : fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
863 0 : Some(self.cmp(other))
864 0 : }
865 : }
866 84 : let max_holes = deltas_to_compact.len();
867 84 : let last_record_lsn = self.get_last_record_lsn();
868 84 : let min_hole_range = (target_file_size / page_cache::PAGE_SZ as u64) as i128;
869 84 : let min_hole_coverage_size = 3; // TODO: something more flexible?
870 84 : // min-heap (reserve space for one more element added before eviction)
871 84 : let mut heap: BinaryHeap<Hole> = BinaryHeap::with_capacity(max_holes + 1);
872 84 : let mut prev: Option<Key> = None;
873 :
874 6192114 : for &DeltaEntry { key: next_key, .. } in all_keys.iter() {
875 6192114 : if let Some(prev_key) = prev {
876 : // just first fast filter, do not create hole entries for metadata keys. The last hole in the
877 : // compaction is the gap between data key and metadata keys.
878 6192030 : if next_key.to_i128() - prev_key.to_i128() >= min_hole_range
879 0 : && !Key::is_metadata_key(&prev_key)
880 : {
881 0 : let key_range = prev_key..next_key;
882 0 : // Measuring hole by just subtraction of i128 representation of key range boundaries
883 0 : // has not so much sense, because largest holes will corresponds field1/field2 changes.
884 0 : // But we are mostly interested to eliminate holes which cause generation of excessive image layers.
885 0 : // That is why it is better to measure size of hole as number of covering image layers.
886 0 : let coverage_size =
887 0 : layers.image_coverage(&key_range, last_record_lsn).len();
888 0 : if coverage_size >= min_hole_coverage_size {
889 0 : heap.push(Hole {
890 0 : key_range,
891 0 : coverage_size,
892 0 : });
893 0 : if heap.len() > max_holes {
894 0 : heap.pop(); // remove smallest hole
895 0 : }
896 0 : }
897 6192030 : }
898 84 : }
899 6192114 : prev = Some(next_key.next());
900 : }
901 84 : let mut holes = heap.into_vec();
902 84 : holes.sort_unstable_by_key(|hole| hole.key_range.start);
903 84 : holes
904 84 : };
905 84 : stats.read_lock_held_compute_holes_micros = stats.read_lock_held_key_sort_micros.till_now();
906 84 : drop_rlock(guard);
907 84 :
908 84 : if self.cancel.is_cancelled() {
909 0 : return Err(CompactionError::ShuttingDown);
910 84 : }
911 84 :
912 84 : stats.read_lock_drop_micros = stats.read_lock_held_compute_holes_micros.till_now();
913 :
914 : // This iterator walks through all key-value pairs from all the layers
915 : // we're compacting, in key, LSN order.
916 : // If there's both a Value::Image and Value::WalRecord for the same (key,lsn),
917 : // then the Value::Image is ordered before Value::WalRecord.
918 84 : let mut all_values_iter = {
919 84 : let mut deltas = Vec::with_capacity(deltas_to_compact.len());
920 1206 : for l in deltas_to_compact.iter() {
921 1206 : let l = l.get_as_delta(ctx).await.map_err(CompactionError::Other)?;
922 1206 : deltas.push(l);
923 : }
924 84 : MergeIterator::create(&deltas, &[], ctx)
925 84 : };
926 84 :
927 84 : // This iterator walks through all keys and is needed to calculate size used by each key
928 84 : let mut all_keys_iter = all_keys
929 84 : .iter()
930 6192114 : .map(|DeltaEntry { key, lsn, size, .. }| (*key, *lsn, *size))
931 6192030 : .coalesce(|mut prev, cur| {
932 6192030 : // Coalesce keys that belong to the same key pair.
933 6192030 : // This ensures that compaction doesn't put them
934 6192030 : // into different layer files.
935 6192030 : // Still limit this by the target file size,
936 6192030 : // so that we keep the size of the files in
937 6192030 : // check.
938 6192030 : if prev.0 == cur.0 && prev.2 < target_file_size {
939 120114 : prev.2 += cur.2;
940 120114 : Ok(prev)
941 : } else {
942 6071916 : Err((prev, cur))
943 : }
944 6192030 : });
945 84 :
946 84 : // Merge the contents of all the input delta layers into a new set
947 84 : // of delta layers, based on the current partitioning.
948 84 : //
949 84 : // We split the new delta layers on the key dimension. We iterate through the key space, and for each key, check if including the next key to the current output layer we're building would cause the layer to become too large. If so, dump the current output layer and start new one.
950 84 : // It's possible that there is a single key with so many page versions that storing all of them in a single layer file
951 84 : // would be too large. In that case, we also split on the LSN dimension.
952 84 : //
953 84 : // LSN
954 84 : // ^
955 84 : // |
956 84 : // | +-----------+ +--+--+--+--+
957 84 : // | | | | | | | |
958 84 : // | +-----------+ | | | | |
959 84 : // | | | | | | | |
960 84 : // | +-----------+ ==> | | | | |
961 84 : // | | | | | | | |
962 84 : // | +-----------+ | | | | |
963 84 : // | | | | | | | |
964 84 : // | +-----------+ +--+--+--+--+
965 84 : // |
966 84 : // +--------------> key
967 84 : //
968 84 : //
969 84 : // If one key (X) has a lot of page versions:
970 84 : //
971 84 : // LSN
972 84 : // ^
973 84 : // | (X)
974 84 : // | +-----------+ +--+--+--+--+
975 84 : // | | | | | | | |
976 84 : // | +-----------+ | | +--+ |
977 84 : // | | | | | | | |
978 84 : // | +-----------+ ==> | | | | |
979 84 : // | | | | | +--+ |
980 84 : // | +-----------+ | | | | |
981 84 : // | | | | | | | |
982 84 : // | +-----------+ +--+--+--+--+
983 84 : // |
984 84 : // +--------------> key
985 84 : // TODO: this actually divides the layers into fixed-size chunks, not
986 84 : // based on the partitioning.
987 84 : //
988 84 : // TODO: we should also opportunistically materialize and
989 84 : // garbage collect what we can.
990 84 : let mut new_layers = Vec::new();
991 84 : let mut prev_key: Option<Key> = None;
992 84 : let mut writer: Option<DeltaLayerWriter> = None;
993 84 : let mut key_values_total_size = 0u64;
994 84 : let mut dup_start_lsn: Lsn = Lsn::INVALID; // start LSN of layer containing values of the single key
995 84 : let mut dup_end_lsn: Lsn = Lsn::INVALID; // end LSN of layer containing values of the single key
996 84 : let mut next_hole = 0; // index of next hole in holes vector
997 84 :
998 84 : let mut keys = 0;
999 :
1000 6192198 : while let Some((key, lsn, value)) = all_values_iter
1001 6192198 : .next()
1002 10249 : .await
1003 6192198 : .map_err(CompactionError::Other)?
1004 : {
1005 6192114 : keys += 1;
1006 6192114 :
1007 6192114 : if keys % 32_768 == 0 && self.cancel.is_cancelled() {
1008 : // avoid hitting the cancellation token on every key. in benches, we end up
1009 : // shuffling an order of million keys per layer, this means we'll check it
1010 : // around tens of times per layer.
1011 0 : return Err(CompactionError::ShuttingDown);
1012 6192114 : }
1013 6192114 :
1014 6192114 : let same_key = prev_key.map_or(false, |prev_key| prev_key == key);
1015 6192114 : // We need to check key boundaries once we reach next key or end of layer with the same key
1016 6192114 : if !same_key || lsn == dup_end_lsn {
1017 6072000 : let mut next_key_size = 0u64;
1018 6072000 : let is_dup_layer = dup_end_lsn.is_valid();
1019 6072000 : dup_start_lsn = Lsn::INVALID;
1020 6072000 : if !same_key {
1021 6072000 : dup_end_lsn = Lsn::INVALID;
1022 6072000 : }
1023 : // Determine size occupied by this key. We stop at next key or when size becomes larger than target_file_size
1024 6072000 : for (next_key, next_lsn, next_size) in all_keys_iter.by_ref() {
1025 6072000 : next_key_size = next_size;
1026 6072000 : if key != next_key {
1027 6071916 : if dup_end_lsn.is_valid() {
1028 0 : // We are writting segment with duplicates:
1029 0 : // place all remaining values of this key in separate segment
1030 0 : dup_start_lsn = dup_end_lsn; // new segments starts where old stops
1031 0 : dup_end_lsn = lsn_range.end; // there are no more values of this key till end of LSN range
1032 6071916 : }
1033 6071916 : break;
1034 84 : }
1035 84 : key_values_total_size += next_size;
1036 84 : // Check if it is time to split segment: if total keys size is larger than target file size.
1037 84 : // We need to avoid generation of empty segments if next_size > target_file_size.
1038 84 : if key_values_total_size > target_file_size && lsn != next_lsn {
1039 : // Split key between multiple layers: such layer can contain only single key
1040 0 : dup_start_lsn = if dup_end_lsn.is_valid() {
1041 0 : dup_end_lsn // new segment with duplicates starts where old one stops
1042 : } else {
1043 0 : lsn // start with the first LSN for this key
1044 : };
1045 0 : dup_end_lsn = next_lsn; // upper LSN boundary is exclusive
1046 0 : break;
1047 84 : }
1048 : }
1049 : // handle case when loop reaches last key: in this case dup_end is non-zero but dup_start is not set.
1050 6072000 : if dup_end_lsn.is_valid() && !dup_start_lsn.is_valid() {
1051 0 : dup_start_lsn = dup_end_lsn;
1052 0 : dup_end_lsn = lsn_range.end;
1053 6072000 : }
1054 6072000 : if writer.is_some() {
1055 6071916 : let written_size = writer.as_mut().unwrap().size();
1056 6071916 : let contains_hole =
1057 6071916 : next_hole < holes.len() && key >= holes[next_hole].key_range.end;
1058 : // check if key cause layer overflow or contains hole...
1059 6071916 : if is_dup_layer
1060 6071916 : || dup_end_lsn.is_valid()
1061 6071916 : || written_size + key_values_total_size > target_file_size
1062 6071076 : || contains_hole
1063 : {
1064 : // ... if so, flush previous layer and prepare to write new one
1065 840 : let (desc, path) = writer
1066 840 : .take()
1067 840 : .unwrap()
1068 840 : .finish(prev_key.unwrap().next(), ctx)
1069 2130 : .await
1070 840 : .map_err(CompactionError::Other)?;
1071 840 : let new_delta = Layer::finish_creating(self.conf, self, desc, &path)
1072 840 : .map_err(CompactionError::Other)?;
1073 :
1074 840 : new_layers.push(new_delta);
1075 840 : writer = None;
1076 840 :
1077 840 : if contains_hole {
1078 0 : // skip hole
1079 0 : next_hole += 1;
1080 840 : }
1081 6071076 : }
1082 84 : }
1083 : // Remember size of key value because at next iteration we will access next item
1084 6072000 : key_values_total_size = next_key_size;
1085 120114 : }
1086 6192114 : fail_point!("delta-layer-writer-fail-before-finish", |_| {
1087 0 : Err(CompactionError::Other(anyhow::anyhow!(
1088 0 : "failpoint delta-layer-writer-fail-before-finish"
1089 0 : )))
1090 6192114 : });
1091 :
1092 6192114 : if !self.shard_identity.is_key_disposable(&key) {
1093 6192114 : if writer.is_none() {
1094 924 : if self.cancel.is_cancelled() {
1095 : // to be somewhat responsive to cancellation, check for each new layer
1096 0 : return Err(CompactionError::ShuttingDown);
1097 924 : }
1098 : // Create writer if not initiaized yet
1099 924 : writer = Some(
1100 : DeltaLayerWriter::new(
1101 924 : self.conf,
1102 924 : self.timeline_id,
1103 924 : self.tenant_shard_id,
1104 924 : key,
1105 924 : if dup_end_lsn.is_valid() {
1106 : // this is a layer containing slice of values of the same key
1107 0 : debug!("Create new dup layer {}..{}", dup_start_lsn, dup_end_lsn);
1108 0 : dup_start_lsn..dup_end_lsn
1109 : } else {
1110 924 : debug!("Create new layer {}..{}", lsn_range.start, lsn_range.end);
1111 924 : lsn_range.clone()
1112 : },
1113 924 : ctx,
1114 : )
1115 462 : .await
1116 924 : .map_err(CompactionError::Other)?,
1117 : );
1118 :
1119 924 : keys = 0;
1120 6191190 : }
1121 :
1122 6192114 : writer
1123 6192114 : .as_mut()
1124 6192114 : .unwrap()
1125 6192114 : .put_value(key, lsn, value, ctx)
1126 3675 : .await
1127 6192114 : .map_err(CompactionError::Other)?;
1128 : } else {
1129 0 : debug!(
1130 0 : "Dropping key {} during compaction (it belongs on shard {:?})",
1131 0 : key,
1132 0 : self.shard_identity.get_shard_number(&key)
1133 : );
1134 : }
1135 :
1136 6192114 : if !new_layers.is_empty() {
1137 59358 : fail_point!("after-timeline-compacted-first-L1");
1138 6132756 : }
1139 :
1140 6192114 : prev_key = Some(key);
1141 : }
1142 84 : if let Some(writer) = writer {
1143 84 : let (desc, path) = writer
1144 84 : .finish(prev_key.unwrap().next(), ctx)
1145 5969 : .await
1146 84 : .map_err(CompactionError::Other)?;
1147 84 : let new_delta = Layer::finish_creating(self.conf, self, desc, &path)
1148 84 : .map_err(CompactionError::Other)?;
1149 84 : new_layers.push(new_delta);
1150 0 : }
1151 :
1152 : // Sync layers
1153 84 : if !new_layers.is_empty() {
1154 : // Print a warning if the created layer is larger than double the target size
1155 : // Add two pages for potential overhead. This should in theory be already
1156 : // accounted for in the target calculation, but for very small targets,
1157 : // we still might easily hit the limit otherwise.
1158 84 : let warn_limit = target_file_size * 2 + page_cache::PAGE_SZ as u64 * 2;
1159 924 : for layer in new_layers.iter() {
1160 924 : if layer.layer_desc().file_size > warn_limit {
1161 0 : warn!(
1162 : %layer,
1163 0 : "created delta file of size {} larger than double of target of {target_file_size}", layer.layer_desc().file_size
1164 : );
1165 924 : }
1166 : }
1167 :
1168 : // The writer.finish() above already did the fsync of the inodes.
1169 : // We just need to fsync the directory in which these inodes are linked,
1170 : // which we know to be the timeline directory.
1171 : //
1172 : // We use fatal_err() below because the after writer.finish() returns with success,
1173 : // the in-memory state of the filesystem already has the layer file in its final place,
1174 : // and subsequent pageserver code could think it's durable while it really isn't.
1175 84 : let timeline_dir = VirtualFile::open(
1176 84 : &self
1177 84 : .conf
1178 84 : .timeline_path(&self.tenant_shard_id, &self.timeline_id),
1179 84 : ctx,
1180 84 : )
1181 42 : .await
1182 84 : .fatal_err("VirtualFile::open for timeline dir fsync");
1183 84 : timeline_dir
1184 84 : .sync_all()
1185 42 : .await
1186 84 : .fatal_err("VirtualFile::sync_all timeline dir");
1187 0 : }
1188 :
1189 84 : stats.write_layer_files_micros = stats.read_lock_drop_micros.till_now();
1190 84 : stats.new_deltas_count = Some(new_layers.len());
1191 924 : stats.new_deltas_size = Some(new_layers.iter().map(|l| l.layer_desc().file_size).sum());
1192 84 :
1193 84 : match TryInto::<CompactLevel0Phase1Stats>::try_into(stats)
1194 84 : .and_then(|stats| serde_json::to_string(&stats).context("serde_json::to_string"))
1195 : {
1196 84 : Ok(stats_json) => {
1197 84 : info!(
1198 0 : stats_json = stats_json.as_str(),
1199 0 : "compact_level0_phase1 stats available"
1200 : )
1201 : }
1202 0 : Err(e) => {
1203 0 : warn!("compact_level0_phase1 stats failed to serialize: {:#}", e);
1204 : }
1205 : }
1206 :
1207 : // Without this, rustc complains about deltas_to_compact still
1208 : // being borrowed when we `.into_iter()` below.
1209 84 : drop(all_values_iter);
1210 84 :
1211 84 : Ok(CompactLevel0Phase1Result {
1212 84 : new_layers,
1213 84 : deltas_to_compact: deltas_to_compact
1214 84 : .into_iter()
1215 1206 : .map(|x| x.drop_eviction_guard())
1216 84 : .collect::<Vec<_>>(),
1217 84 : fully_compacted,
1218 84 : })
1219 1092 : }
1220 : }
1221 :
1222 : #[derive(Default)]
1223 : struct CompactLevel0Phase1Result {
1224 : new_layers: Vec<ResidentLayer>,
1225 : deltas_to_compact: Vec<Layer>,
1226 : // Whether we have included all L0 layers, or selected only part of them due to the
1227 : // L0 compaction size limit.
1228 : fully_compacted: bool,
1229 : }
1230 :
1231 : #[derive(Default)]
1232 : struct CompactLevel0Phase1StatsBuilder {
1233 : version: Option<u64>,
1234 : tenant_id: Option<TenantShardId>,
1235 : timeline_id: Option<TimelineId>,
1236 : read_lock_acquisition_micros: DurationRecorder,
1237 : read_lock_held_spawn_blocking_startup_micros: DurationRecorder,
1238 : read_lock_held_key_sort_micros: DurationRecorder,
1239 : read_lock_held_prerequisites_micros: DurationRecorder,
1240 : read_lock_held_compute_holes_micros: DurationRecorder,
1241 : read_lock_drop_micros: DurationRecorder,
1242 : write_layer_files_micros: DurationRecorder,
1243 : level0_deltas_count: Option<usize>,
1244 : new_deltas_count: Option<usize>,
1245 : new_deltas_size: Option<u64>,
1246 : }
1247 :
1248 : #[derive(serde::Serialize)]
1249 : struct CompactLevel0Phase1Stats {
1250 : version: u64,
1251 : tenant_id: TenantShardId,
1252 : timeline_id: TimelineId,
1253 : read_lock_acquisition_micros: RecordedDuration,
1254 : read_lock_held_spawn_blocking_startup_micros: RecordedDuration,
1255 : read_lock_held_key_sort_micros: RecordedDuration,
1256 : read_lock_held_prerequisites_micros: RecordedDuration,
1257 : read_lock_held_compute_holes_micros: RecordedDuration,
1258 : read_lock_drop_micros: RecordedDuration,
1259 : write_layer_files_micros: RecordedDuration,
1260 : level0_deltas_count: usize,
1261 : new_deltas_count: usize,
1262 : new_deltas_size: u64,
1263 : }
1264 :
1265 : impl TryFrom<CompactLevel0Phase1StatsBuilder> for CompactLevel0Phase1Stats {
1266 : type Error = anyhow::Error;
1267 :
1268 84 : fn try_from(value: CompactLevel0Phase1StatsBuilder) -> Result<Self, Self::Error> {
1269 84 : Ok(Self {
1270 84 : version: value.version.ok_or_else(|| anyhow!("version not set"))?,
1271 84 : tenant_id: value
1272 84 : .tenant_id
1273 84 : .ok_or_else(|| anyhow!("tenant_id not set"))?,
1274 84 : timeline_id: value
1275 84 : .timeline_id
1276 84 : .ok_or_else(|| anyhow!("timeline_id not set"))?,
1277 84 : read_lock_acquisition_micros: value
1278 84 : .read_lock_acquisition_micros
1279 84 : .into_recorded()
1280 84 : .ok_or_else(|| anyhow!("read_lock_acquisition_micros not set"))?,
1281 84 : read_lock_held_spawn_blocking_startup_micros: value
1282 84 : .read_lock_held_spawn_blocking_startup_micros
1283 84 : .into_recorded()
1284 84 : .ok_or_else(|| anyhow!("read_lock_held_spawn_blocking_startup_micros not set"))?,
1285 84 : read_lock_held_key_sort_micros: value
1286 84 : .read_lock_held_key_sort_micros
1287 84 : .into_recorded()
1288 84 : .ok_or_else(|| anyhow!("read_lock_held_key_sort_micros not set"))?,
1289 84 : read_lock_held_prerequisites_micros: value
1290 84 : .read_lock_held_prerequisites_micros
1291 84 : .into_recorded()
1292 84 : .ok_or_else(|| anyhow!("read_lock_held_prerequisites_micros not set"))?,
1293 84 : read_lock_held_compute_holes_micros: value
1294 84 : .read_lock_held_compute_holes_micros
1295 84 : .into_recorded()
1296 84 : .ok_or_else(|| anyhow!("read_lock_held_compute_holes_micros not set"))?,
1297 84 : read_lock_drop_micros: value
1298 84 : .read_lock_drop_micros
1299 84 : .into_recorded()
1300 84 : .ok_or_else(|| anyhow!("read_lock_drop_micros not set"))?,
1301 84 : write_layer_files_micros: value
1302 84 : .write_layer_files_micros
1303 84 : .into_recorded()
1304 84 : .ok_or_else(|| anyhow!("write_layer_files_micros not set"))?,
1305 84 : level0_deltas_count: value
1306 84 : .level0_deltas_count
1307 84 : .ok_or_else(|| anyhow!("level0_deltas_count not set"))?,
1308 84 : new_deltas_count: value
1309 84 : .new_deltas_count
1310 84 : .ok_or_else(|| anyhow!("new_deltas_count not set"))?,
1311 84 : new_deltas_size: value
1312 84 : .new_deltas_size
1313 84 : .ok_or_else(|| anyhow!("new_deltas_size not set"))?,
1314 : })
1315 84 : }
1316 : }
1317 :
1318 : impl Timeline {
1319 : /// Entry point for new tiered compaction algorithm.
1320 : ///
1321 : /// All the real work is in the implementation in the pageserver_compaction
1322 : /// crate. The code here would apply to any algorithm implemented by the
1323 : /// same interface, but tiered is the only one at the moment.
1324 : ///
1325 : /// TODO: cancellation
1326 0 : pub(crate) async fn compact_tiered(
1327 0 : self: &Arc<Self>,
1328 0 : _cancel: &CancellationToken,
1329 0 : ctx: &RequestContext,
1330 0 : ) -> Result<(), CompactionError> {
1331 0 : let fanout = self.get_compaction_threshold() as u64;
1332 0 : let target_file_size = self.get_checkpoint_distance();
1333 :
1334 : // Find the top of the historical layers
1335 0 : let end_lsn = {
1336 0 : let guard = self.layers.read().await;
1337 0 : let layers = guard.layer_map()?;
1338 :
1339 0 : let l0_deltas = layers.level0_deltas();
1340 0 :
1341 0 : // As an optimization, if we find that there are too few L0 layers,
1342 0 : // bail out early. We know that the compaction algorithm would do
1343 0 : // nothing in that case.
1344 0 : if l0_deltas.len() < fanout as usize {
1345 : // doesn't need compacting
1346 0 : return Ok(());
1347 0 : }
1348 0 : l0_deltas.iter().map(|l| l.lsn_range.end).max().unwrap()
1349 0 : };
1350 0 :
1351 0 : // Is the timeline being deleted?
1352 0 : if self.is_stopping() {
1353 0 : trace!("Dropping out of compaction on timeline shutdown");
1354 0 : return Err(CompactionError::ShuttingDown);
1355 0 : }
1356 :
1357 0 : let (dense_ks, _sparse_ks) = self.collect_keyspace(end_lsn, ctx).await?;
1358 : // TODO(chi): ignore sparse_keyspace for now, compact it in the future.
1359 0 : let mut adaptor = TimelineAdaptor::new(self, (end_lsn, dense_ks));
1360 0 :
1361 0 : pageserver_compaction::compact_tiered::compact_tiered(
1362 0 : &mut adaptor,
1363 0 : end_lsn,
1364 0 : target_file_size,
1365 0 : fanout,
1366 0 : ctx,
1367 0 : )
1368 0 : .await
1369 : // TODO: compact_tiered needs to return CompactionError
1370 0 : .map_err(CompactionError::Other)?;
1371 :
1372 0 : adaptor.flush_updates().await?;
1373 0 : Ok(())
1374 0 : }
1375 :
1376 : /// Take a list of images and deltas, produce images and deltas according to GC horizon and retain_lsns.
1377 : ///
1378 : /// It takes a key, the values of the key within the compaction process, a GC horizon, and all retain_lsns below the horizon.
1379 : /// For now, it requires the `accumulated_values` contains the full history of the key (i.e., the key with the lowest LSN is
1380 : /// an image or a WAL not requiring a base image). This restriction will be removed once we implement gc-compaction on branch.
1381 : ///
1382 : /// The function returns the deltas and the base image that need to be placed at each of the retain LSN. For example, we have:
1383 : ///
1384 : /// A@0x10, +B@0x20, +C@0x30, +D@0x40, +E@0x50, +F@0x60
1385 : /// horizon = 0x50, retain_lsn = 0x20, 0x40, delta_threshold=3
1386 : ///
1387 : /// The function will produce:
1388 : ///
1389 : /// ```plain
1390 : /// 0x20(retain_lsn) -> img=AB@0x20 always produce a single image below the lowest retain LSN
1391 : /// 0x40(retain_lsn) -> deltas=[+C@0x30, +D@0x40] two deltas since the last base image, keeping the deltas
1392 : /// 0x50(horizon) -> deltas=[ABCDE@0x50] three deltas since the last base image, generate an image but put it in the delta
1393 : /// above_horizon -> deltas=[+F@0x60] full history above the horizon
1394 : /// ```
1395 : ///
1396 : /// Note that `accumulated_values` must be sorted by LSN and should belong to a single key.
1397 1290 : pub(crate) async fn generate_key_retention(
1398 1290 : self: &Arc<Timeline>,
1399 1290 : key: Key,
1400 1290 : full_history: &[(Key, Lsn, Value)],
1401 1290 : horizon: Lsn,
1402 1290 : retain_lsn_below_horizon: &[Lsn],
1403 1290 : delta_threshold_cnt: usize,
1404 1290 : base_img_from_ancestor: Option<(Key, Lsn, Bytes)>,
1405 1290 : ) -> anyhow::Result<KeyHistoryRetention> {
1406 1290 : // Pre-checks for the invariants
1407 1290 : if cfg!(debug_assertions) {
1408 3120 : for (log_key, _, _) in full_history {
1409 1830 : assert_eq!(log_key, &key, "mismatched key");
1410 : }
1411 1290 : for i in 1..full_history.len() {
1412 540 : assert!(full_history[i - 1].1 <= full_history[i].1, "unordered LSN");
1413 540 : if full_history[i - 1].1 == full_history[i].1 {
1414 0 : assert!(
1415 0 : matches!(full_history[i - 1].2, Value::Image(_)),
1416 0 : "unordered delta/image, or duplicated delta"
1417 : );
1418 540 : }
1419 : }
1420 : // There was an assertion for no base image that checks if the first
1421 : // record in the history is `will_init` before, but it was removed.
1422 : // This is explained in the test cases for generate_key_retention.
1423 : // Search "incomplete history" for more information.
1424 3000 : for lsn in retain_lsn_below_horizon {
1425 1710 : assert!(lsn < &horizon, "retain lsn must be below horizon")
1426 : }
1427 1290 : for i in 1..retain_lsn_below_horizon.len() {
1428 834 : assert!(
1429 834 : retain_lsn_below_horizon[i - 1] <= retain_lsn_below_horizon[i],
1430 0 : "unordered LSN"
1431 : );
1432 : }
1433 0 : }
1434 1290 : let has_ancestor = base_img_from_ancestor.is_some();
1435 : // Step 1: split history into len(retain_lsn_below_horizon) + 2 buckets, where the last bucket is for all deltas above the horizon,
1436 : // and the second-to-last bucket is for the horizon. Each bucket contains lsn_last_bucket < deltas <= lsn_this_bucket.
1437 1290 : let (mut split_history, lsn_split_points) = {
1438 1290 : let mut split_history = Vec::new();
1439 1290 : split_history.resize_with(retain_lsn_below_horizon.len() + 2, Vec::new);
1440 1290 : let mut lsn_split_points = Vec::with_capacity(retain_lsn_below_horizon.len() + 1);
1441 3000 : for lsn in retain_lsn_below_horizon {
1442 1710 : lsn_split_points.push(*lsn);
1443 1710 : }
1444 1290 : lsn_split_points.push(horizon);
1445 1290 : let mut current_idx = 0;
1446 3120 : for item @ (_, lsn, _) in full_history {
1447 2316 : while current_idx < lsn_split_points.len() && *lsn > lsn_split_points[current_idx] {
1448 486 : current_idx += 1;
1449 486 : }
1450 1830 : split_history[current_idx].push(item);
1451 : }
1452 1290 : (split_history, lsn_split_points)
1453 : };
1454 : // Step 2: filter out duplicated records due to the k-merge of image/delta layers
1455 5580 : for split_for_lsn in &mut split_history {
1456 4290 : let mut prev_lsn = None;
1457 4290 : let mut new_split_for_lsn = Vec::with_capacity(split_for_lsn.len());
1458 4290 : for record @ (_, lsn, _) in std::mem::take(split_for_lsn) {
1459 1830 : if let Some(prev_lsn) = &prev_lsn {
1460 198 : if *prev_lsn == lsn {
1461 : // The case that we have an LSN with both data from the delta layer and the image layer. As
1462 : // `ValueWrapper` ensures that an image is ordered before a delta at the same LSN, we simply
1463 : // drop this delta and keep the image.
1464 : //
1465 : // For example, we have delta layer key1@0x10, key1@0x20, and image layer key1@0x10, we will
1466 : // keep the image for key1@0x10 and the delta for key1@0x20. key1@0x10 delta will be simply
1467 : // dropped.
1468 : //
1469 : // TODO: in case we have both delta + images for a given LSN and it does not exceed the delta
1470 : // threshold, we could have kept delta instead to save space. This is an optimization for the future.
1471 0 : continue;
1472 198 : }
1473 1632 : }
1474 1830 : prev_lsn = Some(lsn);
1475 1830 : new_split_for_lsn.push(record);
1476 : }
1477 4290 : *split_for_lsn = new_split_for_lsn;
1478 : }
1479 : // Step 3: generate images when necessary
1480 1290 : let mut retention = Vec::with_capacity(split_history.len());
1481 1290 : let mut records_since_last_image = 0;
1482 1290 : let batch_cnt = split_history.len();
1483 1290 : assert!(
1484 1290 : batch_cnt >= 2,
1485 0 : "should have at least below + above horizon batches"
1486 : );
1487 1290 : let mut replay_history: Vec<(Key, Lsn, Value)> = Vec::new();
1488 1290 : if let Some((key, lsn, img)) = base_img_from_ancestor {
1489 54 : replay_history.push((key, lsn, Value::Image(img)));
1490 1236 : }
1491 :
1492 : /// Generate debug information for the replay history
1493 0 : fn generate_history_trace(replay_history: &[(Key, Lsn, Value)]) -> String {
1494 : use std::fmt::Write;
1495 0 : let mut output = String::new();
1496 0 : if let Some((key, _, _)) = replay_history.first() {
1497 0 : write!(output, "key={} ", key).unwrap();
1498 0 : let mut cnt = 0;
1499 0 : for (_, lsn, val) in replay_history {
1500 0 : if val.is_image() {
1501 0 : write!(output, "i@{} ", lsn).unwrap();
1502 0 : } else if val.will_init() {
1503 0 : write!(output, "di@{} ", lsn).unwrap();
1504 0 : } else {
1505 0 : write!(output, "d@{} ", lsn).unwrap();
1506 0 : }
1507 0 : cnt += 1;
1508 0 : if cnt >= 128 {
1509 0 : write!(output, "... and more").unwrap();
1510 0 : break;
1511 0 : }
1512 : }
1513 0 : } else {
1514 0 : write!(output, "<no history>").unwrap();
1515 0 : }
1516 0 : output
1517 0 : }
1518 :
1519 0 : fn generate_debug_trace(
1520 0 : replay_history: Option<&[(Key, Lsn, Value)]>,
1521 0 : full_history: &[(Key, Lsn, Value)],
1522 0 : lsns: &[Lsn],
1523 0 : horizon: Lsn,
1524 0 : ) -> String {
1525 : use std::fmt::Write;
1526 0 : let mut output = String::new();
1527 0 : if let Some(replay_history) = replay_history {
1528 0 : writeln!(
1529 0 : output,
1530 0 : "replay_history: {}",
1531 0 : generate_history_trace(replay_history)
1532 0 : )
1533 0 : .unwrap();
1534 0 : } else {
1535 0 : writeln!(output, "replay_history: <disabled>",).unwrap();
1536 0 : }
1537 0 : writeln!(
1538 0 : output,
1539 0 : "full_history: {}",
1540 0 : generate_history_trace(full_history)
1541 0 : )
1542 0 : .unwrap();
1543 0 : writeln!(
1544 0 : output,
1545 0 : "when processing: [{}] horizon={}",
1546 0 : lsns.iter().map(|l| format!("{l}")).join(","),
1547 0 : horizon
1548 0 : )
1549 0 : .unwrap();
1550 0 : output
1551 0 : }
1552 :
1553 4290 : for (i, split_for_lsn) in split_history.into_iter().enumerate() {
1554 : // TODO: there could be image keys inside the splits, and we can compute records_since_last_image accordingly.
1555 4290 : records_since_last_image += split_for_lsn.len();
1556 4290 : let generate_image = if i == 0 && !has_ancestor {
1557 : // We always generate images for the first batch (below horizon / lowest retain_lsn)
1558 1236 : true
1559 3054 : } else if i == batch_cnt - 1 {
1560 : // Do not generate images for the last batch (above horizon)
1561 1290 : false
1562 1764 : } else if records_since_last_image >= delta_threshold_cnt {
1563 : // Generate images when there are too many records
1564 18 : true
1565 : } else {
1566 1746 : false
1567 : };
1568 4290 : replay_history.extend(split_for_lsn.iter().map(|x| (*x).clone()));
1569 : // Only retain the items after the last image record
1570 5274 : for idx in (0..replay_history.len()).rev() {
1571 5274 : if replay_history[idx].2.will_init() {
1572 4290 : replay_history = replay_history[idx..].to_vec();
1573 4290 : break;
1574 984 : }
1575 : }
1576 4290 : if let Some((_, _, val)) = replay_history.first() {
1577 4290 : if !val.will_init() {
1578 0 : return Err(anyhow::anyhow!("invalid history, no base image")).with_context(
1579 0 : || {
1580 0 : generate_debug_trace(
1581 0 : Some(&replay_history),
1582 0 : full_history,
1583 0 : retain_lsn_below_horizon,
1584 0 : horizon,
1585 0 : )
1586 0 : },
1587 0 : );
1588 4290 : }
1589 0 : }
1590 4290 : if generate_image && records_since_last_image > 0 {
1591 1254 : records_since_last_image = 0;
1592 1254 : let replay_history_for_debug = if cfg!(debug_assertions) {
1593 1254 : Some(replay_history.clone())
1594 : } else {
1595 0 : None
1596 : };
1597 1254 : let replay_history_for_debug_ref = replay_history_for_debug.as_deref();
1598 1254 : let history = std::mem::take(&mut replay_history);
1599 1254 : let mut img = None;
1600 1254 : let mut records = Vec::with_capacity(history.len());
1601 1254 : if let (_, lsn, Value::Image(val)) = history.first().as_ref().unwrap() {
1602 1254 : img = Some((*lsn, val.clone()));
1603 1254 : for (_, lsn, val) in history.into_iter().skip(1) {
1604 102 : let Value::WalRecord(rec) = val else {
1605 0 : return Err(anyhow::anyhow!(
1606 0 : "invalid record, first record is image, expect walrecords"
1607 0 : ))
1608 0 : .with_context(|| {
1609 0 : generate_debug_trace(
1610 0 : replay_history_for_debug_ref,
1611 0 : full_history,
1612 0 : retain_lsn_below_horizon,
1613 0 : horizon,
1614 0 : )
1615 0 : });
1616 : };
1617 102 : records.push((lsn, rec));
1618 : }
1619 : } else {
1620 0 : for (_, lsn, val) in history.into_iter() {
1621 0 : let Value::WalRecord(rec) = val else {
1622 0 : return Err(anyhow::anyhow!("invalid record, first record is walrecord, expect rest are walrecord"))
1623 0 : .with_context(|| generate_debug_trace(
1624 0 : replay_history_for_debug_ref,
1625 0 : full_history,
1626 0 : retain_lsn_below_horizon,
1627 0 : horizon,
1628 0 : ));
1629 : };
1630 0 : records.push((lsn, rec));
1631 : }
1632 : }
1633 1254 : records.reverse();
1634 1254 : let state = ValueReconstructState { img, records };
1635 1254 : let request_lsn = lsn_split_points[i]; // last batch does not generate image so i is always in range
1636 1254 : let img = self.reconstruct_value(key, request_lsn, state).await?;
1637 1254 : replay_history.push((key, request_lsn, Value::Image(img.clone())));
1638 1254 : retention.push(vec![(request_lsn, Value::Image(img))]);
1639 3036 : } else {
1640 3036 : let deltas = split_for_lsn
1641 3036 : .iter()
1642 3036 : .map(|(_, lsn, value)| (*lsn, value.clone()))
1643 3036 : .collect_vec();
1644 3036 : retention.push(deltas);
1645 3036 : }
1646 : }
1647 1290 : let mut result = Vec::with_capacity(retention.len());
1648 1290 : assert_eq!(retention.len(), lsn_split_points.len() + 1);
1649 4290 : for (idx, logs) in retention.into_iter().enumerate() {
1650 4290 : if idx == lsn_split_points.len() {
1651 1290 : return Ok(KeyHistoryRetention {
1652 1290 : below_horizon: result,
1653 1290 : above_horizon: KeyLogAtLsn(logs),
1654 1290 : });
1655 3000 : } else {
1656 3000 : result.push((lsn_split_points[idx], KeyLogAtLsn(logs)));
1657 3000 : }
1658 : }
1659 0 : unreachable!("key retention is empty")
1660 1290 : }
1661 :
1662 : /// An experimental compaction building block that combines compaction with garbage collection.
1663 : ///
1664 : /// The current implementation picks all delta + image layers that are below or intersecting with
1665 : /// the GC horizon without considering retain_lsns. Then, it does a full compaction over all these delta
1666 : /// layers and image layers, which generates image layers on the gc horizon, drop deltas below gc horizon,
1667 : /// and create delta layers with all deltas >= gc horizon.
1668 78 : pub(crate) async fn compact_with_gc(
1669 78 : self: &Arc<Self>,
1670 78 : cancel: &CancellationToken,
1671 78 : flags: EnumSet<CompactFlags>,
1672 78 : ctx: &RequestContext,
1673 78 : ) -> anyhow::Result<()> {
1674 : use std::collections::BTreeSet;
1675 :
1676 : // Block other compaction/GC tasks from running for now. GC-compaction could run along
1677 : // with legacy compaction tasks in the future. Always ensure the lock order is compaction -> gc.
1678 : // Note that we already acquired the compaction lock when the outer `compact` function gets called.
1679 :
1680 78 : let gc_lock = async {
1681 78 : tokio::select! {
1682 78 : guard = self.gc_lock.lock() => Ok(guard),
1683 : // TODO: refactor to CompactionError to correctly pass cancelled error
1684 78 : _ = cancel.cancelled() => Err(anyhow!("cancelled")),
1685 : }
1686 78 : };
1687 :
1688 78 : let gc_lock = crate::timed(
1689 78 : gc_lock,
1690 78 : "acquires gc lock",
1691 78 : std::time::Duration::from_secs(5),
1692 78 : )
1693 3 : .await?;
1694 :
1695 78 : let dry_run = flags.contains(CompactFlags::DryRun);
1696 78 :
1697 78 : info!("running enhanced gc bottom-most compaction, dry_run={dry_run}");
1698 :
1699 78 : scopeguard::defer! {
1700 78 : info!("done enhanced gc bottom-most compaction");
1701 78 : };
1702 78 :
1703 78 : let mut stat = CompactionStatistics::default();
1704 :
1705 : // Step 0: pick all delta layers + image layers below/intersect with the GC horizon.
1706 : // The layer selection has the following properties:
1707 : // 1. If a layer is in the selection, all layers below it are in the selection.
1708 : // 2. Inferred from (1), for each key in the layer selection, the value can be reconstructed only with the layers in the layer selection.
1709 78 : let (layer_selection, gc_cutoff, retain_lsns_below_horizon) = {
1710 78 : let guard = self.layers.read().await;
1711 78 : let layers = guard.layer_map()?;
1712 78 : let gc_info = self.gc_info.read().unwrap();
1713 78 : let mut retain_lsns_below_horizon = Vec::new();
1714 78 : let gc_cutoff = gc_info.cutoffs.select_min();
1715 102 : for (lsn, _timeline_id) in &gc_info.retain_lsns {
1716 102 : if lsn < &gc_cutoff {
1717 102 : retain_lsns_below_horizon.push(*lsn);
1718 102 : }
1719 : }
1720 78 : for lsn in gc_info.leases.keys() {
1721 0 : if lsn < &gc_cutoff {
1722 0 : retain_lsns_below_horizon.push(*lsn);
1723 0 : }
1724 : }
1725 78 : let mut selected_layers = Vec::new();
1726 78 : drop(gc_info);
1727 : // Pick all the layers intersect or below the gc_cutoff, get the largest LSN in the selected layers.
1728 78 : let Some(max_layer_lsn) = layers
1729 78 : .iter_historic_layers()
1730 300 : .filter(|desc| desc.get_lsn_range().start <= gc_cutoff)
1731 246 : .map(|desc| desc.get_lsn_range().end)
1732 78 : .max()
1733 : else {
1734 0 : info!("no layers to compact with gc");
1735 0 : return Ok(());
1736 : };
1737 : // Then, pick all the layers that are below the max_layer_lsn. This is to ensure we can pick all single-key
1738 : // layers to compact.
1739 300 : for desc in layers.iter_historic_layers() {
1740 300 : if desc.get_lsn_range().end <= max_layer_lsn {
1741 246 : selected_layers.push(guard.get_from_desc(&desc));
1742 246 : }
1743 : }
1744 78 : if selected_layers.is_empty() {
1745 0 : info!("no layers to compact with gc");
1746 0 : return Ok(());
1747 78 : }
1748 78 : retain_lsns_below_horizon.sort();
1749 78 : (selected_layers, gc_cutoff, retain_lsns_below_horizon)
1750 : };
1751 78 : let lowest_retain_lsn = if self.ancestor_timeline.is_some() {
1752 6 : Lsn(self.ancestor_lsn.0 + 1)
1753 : } else {
1754 72 : let res = retain_lsns_below_horizon
1755 72 : .first()
1756 72 : .copied()
1757 72 : .unwrap_or(gc_cutoff);
1758 72 : if cfg!(debug_assertions) {
1759 72 : assert_eq!(
1760 72 : res,
1761 72 : retain_lsns_below_horizon
1762 72 : .iter()
1763 72 : .min()
1764 72 : .copied()
1765 72 : .unwrap_or(gc_cutoff)
1766 72 : );
1767 0 : }
1768 72 : res
1769 : };
1770 78 : info!(
1771 0 : "picked {} layers for compaction with gc_cutoff={} lowest_retain_lsn={}",
1772 0 : layer_selection.len(),
1773 : gc_cutoff,
1774 : lowest_retain_lsn
1775 : );
1776 :
1777 : // Step 1: (In the future) construct a k-merge iterator over all layers. For now, simply collect all keys + LSNs.
1778 : // Also, verify if the layer map can be split by drawing a horizontal line at every LSN start/end split point.
1779 78 : let mut lsn_split_point = BTreeSet::new(); // TODO: use a better data structure (range tree / range set?)
1780 324 : for layer in &layer_selection {
1781 246 : let desc = layer.layer_desc();
1782 246 : if desc.is_delta() {
1783 : // ignore single-key layer files
1784 138 : if desc.key_range.start.next() != desc.key_range.end {
1785 102 : let lsn_range = &desc.lsn_range;
1786 102 : lsn_split_point.insert(lsn_range.start);
1787 102 : lsn_split_point.insert(lsn_range.end);
1788 102 : }
1789 138 : stat.visit_delta_layer(desc.file_size());
1790 108 : } else {
1791 108 : stat.visit_image_layer(desc.file_size());
1792 108 : }
1793 : }
1794 78 : let layer_names: Vec<crate::tenant::storage_layer::LayerName> = layer_selection
1795 78 : .iter()
1796 246 : .map(|layer| layer.layer_desc().layer_name())
1797 78 : .collect_vec();
1798 78 : if let Some(err) = check_valid_layermap(&layer_names) {
1799 0 : bail!("cannot run gc-compaction because {}", err);
1800 78 : }
1801 78 : // The maximum LSN we are processing in this compaction loop
1802 78 : let end_lsn = layer_selection
1803 78 : .iter()
1804 246 : .map(|l| l.layer_desc().lsn_range.end)
1805 78 : .max()
1806 78 : .unwrap();
1807 78 : // We don't want any of the produced layers to cover the full key range (i.e., MIN..MAX) b/c it will then be recognized
1808 78 : // as an L0 layer.
1809 78 : let mut delta_layers = Vec::new();
1810 78 : let mut image_layers = Vec::new();
1811 78 : let mut downloaded_layers = Vec::new();
1812 324 : for layer in &layer_selection {
1813 246 : let resident_layer = layer.download_and_keep_resident().await?;
1814 246 : downloaded_layers.push(resident_layer);
1815 : }
1816 324 : for resident_layer in &downloaded_layers {
1817 246 : if resident_layer.layer_desc().is_delta() {
1818 138 : let layer = resident_layer.get_as_delta(ctx).await?;
1819 138 : delta_layers.push(layer);
1820 : } else {
1821 108 : let layer = resident_layer.get_as_image(ctx).await?;
1822 108 : image_layers.push(layer);
1823 : }
1824 : }
1825 78 : let (dense_ks, sparse_ks) = self.collect_gc_compaction_keyspace().await?;
1826 78 : let mut merge_iter = FilterIterator::create(
1827 78 : MergeIterator::create(&delta_layers, &image_layers, ctx),
1828 78 : dense_ks,
1829 78 : sparse_ks,
1830 78 : )?;
1831 : // Step 2: Produce images+deltas. TODO: ensure newly-produced delta does not overlap with other deltas.
1832 : // Data of the same key.
1833 78 : let mut accumulated_values = Vec::new();
1834 78 : let mut last_key: Option<Key> = None;
1835 :
1836 : // Only create image layers when there is no ancestor branches. TODO: create covering image layer
1837 : // when some condition meet.
1838 78 : let mut image_layer_writer = if self.ancestor_timeline.is_none() {
1839 : Some(
1840 72 : SplitImageLayerWriter::new(
1841 72 : self.conf,
1842 72 : self.timeline_id,
1843 72 : self.tenant_shard_id,
1844 72 : Key::MIN,
1845 72 : lowest_retain_lsn,
1846 72 : self.get_compaction_target_size(),
1847 72 : ctx,
1848 72 : )
1849 36 : .await?,
1850 : )
1851 : } else {
1852 6 : None
1853 : };
1854 :
1855 78 : let mut delta_layer_writer = SplitDeltaLayerWriter::new(
1856 78 : self.conf,
1857 78 : self.timeline_id,
1858 78 : self.tenant_shard_id,
1859 78 : lowest_retain_lsn..end_lsn,
1860 78 : self.get_compaction_target_size(),
1861 78 : )
1862 0 : .await?;
1863 :
1864 : /// Returns None if there is no ancestor branch. Throw an error when the key is not found.
1865 : ///
1866 : /// Currently, we always get the ancestor image for each key in the child branch no matter whether the image
1867 : /// is needed for reconstruction. This should be fixed in the future.
1868 : ///
1869 : /// Furthermore, we should do vectored get instead of a single get, or better, use k-merge for ancestor
1870 : /// images.
1871 1266 : async fn get_ancestor_image(
1872 1266 : tline: &Arc<Timeline>,
1873 1266 : key: Key,
1874 1266 : ctx: &RequestContext,
1875 1266 : ) -> anyhow::Result<Option<(Key, Lsn, Bytes)>> {
1876 1266 : if tline.ancestor_timeline.is_none() {
1877 1224 : return Ok(None);
1878 42 : };
1879 : // This function is implemented as a get of the current timeline at ancestor LSN, therefore reusing
1880 : // as much existing code as possible.
1881 42 : let img = tline.get(key, tline.ancestor_lsn, ctx).await?;
1882 42 : Ok(Some((key, tline.ancestor_lsn, img)))
1883 1266 : }
1884 :
1885 : // Actually, we can decide not to write to the image layer at all at this point because
1886 : // the key and LSN range are determined. However, to keep things simple here, we still
1887 : // create this writer, and discard the writer in the end.
1888 :
1889 1758 : while let Some((key, lsn, val)) = merge_iter.next().await? {
1890 1680 : if cancel.is_cancelled() {
1891 0 : return Err(anyhow!("cancelled")); // TODO: refactor to CompactionError and pass cancel error
1892 1680 : }
1893 1680 : match val {
1894 1260 : Value::Image(_) => stat.visit_image_key(&val),
1895 420 : Value::WalRecord(_) => stat.visit_wal_key(&val),
1896 : }
1897 1680 : if last_key.is_none() || last_key.as_ref() == Some(&key) {
1898 492 : if last_key.is_none() {
1899 78 : last_key = Some(key);
1900 414 : }
1901 492 : accumulated_values.push((key, lsn, val));
1902 : } else {
1903 1188 : let last_key = last_key.as_mut().unwrap();
1904 1188 : stat.on_unique_key_visited();
1905 1188 : let retention = self
1906 1188 : .generate_key_retention(
1907 1188 : *last_key,
1908 1188 : &accumulated_values,
1909 1188 : gc_cutoff,
1910 1188 : &retain_lsns_below_horizon,
1911 1188 : COMPACTION_DELTA_THRESHOLD,
1912 1188 : get_ancestor_image(self, *last_key, ctx).await?,
1913 : )
1914 0 : .await?;
1915 : // Put the image into the image layer. Currently we have a single big layer for the compaction.
1916 1188 : retention
1917 1188 : .pipe_to(
1918 1188 : *last_key,
1919 1188 : self,
1920 1188 : &mut delta_layer_writer,
1921 1188 : image_layer_writer.as_mut(),
1922 1188 : &mut stat,
1923 1188 : dry_run,
1924 1188 : ctx,
1925 1188 : )
1926 1203 : .await?;
1927 1188 : accumulated_values.clear();
1928 1188 : *last_key = key;
1929 1188 : accumulated_values.push((key, lsn, val));
1930 : }
1931 : }
1932 :
1933 78 : let last_key = last_key.expect("no keys produced during compaction");
1934 78 : // TODO: move this part to the loop body
1935 78 : stat.on_unique_key_visited();
1936 78 : let retention = self
1937 78 : .generate_key_retention(
1938 78 : last_key,
1939 78 : &accumulated_values,
1940 78 : gc_cutoff,
1941 78 : &retain_lsns_below_horizon,
1942 78 : COMPACTION_DELTA_THRESHOLD,
1943 78 : get_ancestor_image(self, last_key, ctx).await?,
1944 : )
1945 0 : .await?;
1946 : // Put the image into the image layer. Currently we have a single big layer for the compaction.
1947 78 : retention
1948 78 : .pipe_to(
1949 78 : last_key,
1950 78 : self,
1951 78 : &mut delta_layer_writer,
1952 78 : image_layer_writer.as_mut(),
1953 78 : &mut stat,
1954 78 : dry_run,
1955 78 : ctx,
1956 78 : )
1957 72 : .await?;
1958 :
1959 114 : let discard = |key: &PersistentLayerKey| {
1960 114 : let key = key.clone();
1961 114 : async move { KeyHistoryRetention::discard_key(&key, self, dry_run).await }
1962 114 : };
1963 :
1964 78 : let produced_image_layers = if let Some(writer) = image_layer_writer {
1965 72 : if !dry_run {
1966 60 : writer
1967 60 : .finish_with_discard_fn(self, ctx, Key::MAX, discard)
1968 72 : .await?
1969 : } else {
1970 12 : let (layers, _) = writer.take()?;
1971 12 : assert!(layers.is_empty(), "image layers produced in dry run mode?");
1972 12 : Vec::new()
1973 : }
1974 : } else {
1975 6 : Vec::new()
1976 : };
1977 :
1978 78 : let produced_delta_layers = if !dry_run {
1979 66 : delta_layer_writer
1980 66 : .finish_with_discard_fn(self, ctx, discard)
1981 78 : .await?
1982 : } else {
1983 12 : let (layers, _) = delta_layer_writer.take()?;
1984 12 : assert!(layers.is_empty(), "delta layers produced in dry run mode?");
1985 12 : Vec::new()
1986 : };
1987 :
1988 78 : let mut compact_to = Vec::new();
1989 78 : let mut keep_layers = HashSet::new();
1990 78 : let produced_delta_layers_len = produced_delta_layers.len();
1991 78 : let produced_image_layers_len = produced_image_layers.len();
1992 132 : for action in produced_delta_layers {
1993 54 : match action {
1994 30 : SplitWriterResult::Produced(layer) => {
1995 30 : stat.produce_delta_layer(layer.layer_desc().file_size());
1996 30 : compact_to.push(layer);
1997 30 : }
1998 24 : SplitWriterResult::Discarded(l) => {
1999 24 : keep_layers.insert(l);
2000 24 : stat.discard_delta_layer();
2001 24 : }
2002 : }
2003 : }
2004 138 : for action in produced_image_layers {
2005 60 : match action {
2006 36 : SplitWriterResult::Produced(layer) => {
2007 36 : stat.produce_image_layer(layer.layer_desc().file_size());
2008 36 : compact_to.push(layer);
2009 36 : }
2010 24 : SplitWriterResult::Discarded(l) => {
2011 24 : keep_layers.insert(l);
2012 24 : stat.discard_image_layer();
2013 24 : }
2014 : }
2015 : }
2016 78 : let mut layer_selection = layer_selection;
2017 246 : layer_selection.retain(|x| !keep_layers.contains(&x.layer_desc().key()));
2018 78 :
2019 78 : info!(
2020 0 : "gc-compaction statistics: {}",
2021 0 : serde_json::to_string(&stat)?
2022 : );
2023 :
2024 78 : if dry_run {
2025 12 : return Ok(());
2026 66 : }
2027 66 :
2028 66 : info!(
2029 0 : "produced {} delta layers and {} image layers, {} layers are kept",
2030 0 : produced_delta_layers_len,
2031 0 : produced_image_layers_len,
2032 0 : layer_selection.len()
2033 : );
2034 :
2035 : // Step 3: Place back to the layer map.
2036 : {
2037 66 : let mut guard = self.layers.write().await;
2038 66 : guard
2039 66 : .open_mut()?
2040 66 : .finish_gc_compaction(&layer_selection, &compact_to, &self.metrics)
2041 66 : };
2042 66 : self.remote_client
2043 66 : .schedule_compaction_update(&layer_selection, &compact_to)?;
2044 :
2045 66 : drop(gc_lock);
2046 66 :
2047 66 : Ok(())
2048 78 : }
2049 : }
2050 :
2051 : struct TimelineAdaptor {
2052 : timeline: Arc<Timeline>,
2053 :
2054 : keyspace: (Lsn, KeySpace),
2055 :
2056 : new_deltas: Vec<ResidentLayer>,
2057 : new_images: Vec<ResidentLayer>,
2058 : layers_to_delete: Vec<Arc<PersistentLayerDesc>>,
2059 : }
2060 :
2061 : impl TimelineAdaptor {
2062 0 : pub fn new(timeline: &Arc<Timeline>, keyspace: (Lsn, KeySpace)) -> Self {
2063 0 : Self {
2064 0 : timeline: timeline.clone(),
2065 0 : keyspace,
2066 0 : new_images: Vec::new(),
2067 0 : new_deltas: Vec::new(),
2068 0 : layers_to_delete: Vec::new(),
2069 0 : }
2070 0 : }
2071 :
2072 0 : pub async fn flush_updates(&mut self) -> Result<(), CompactionError> {
2073 0 : let layers_to_delete = {
2074 0 : let guard = self.timeline.layers.read().await;
2075 0 : self.layers_to_delete
2076 0 : .iter()
2077 0 : .map(|x| guard.get_from_desc(x))
2078 0 : .collect::<Vec<Layer>>()
2079 0 : };
2080 0 : self.timeline
2081 0 : .finish_compact_batch(&self.new_deltas, &self.new_images, &layers_to_delete)
2082 0 : .await?;
2083 :
2084 0 : self.timeline
2085 0 : .upload_new_image_layers(std::mem::take(&mut self.new_images))?;
2086 :
2087 0 : self.new_deltas.clear();
2088 0 : self.layers_to_delete.clear();
2089 0 : Ok(())
2090 0 : }
2091 : }
2092 :
2093 : #[derive(Clone)]
2094 : struct ResidentDeltaLayer(ResidentLayer);
2095 : #[derive(Clone)]
2096 : struct ResidentImageLayer(ResidentLayer);
2097 :
2098 : impl CompactionJobExecutor for TimelineAdaptor {
2099 : type Key = crate::repository::Key;
2100 :
2101 : type Layer = OwnArc<PersistentLayerDesc>;
2102 : type DeltaLayer = ResidentDeltaLayer;
2103 : type ImageLayer = ResidentImageLayer;
2104 :
2105 : type RequestContext = crate::context::RequestContext;
2106 :
2107 0 : fn get_shard_identity(&self) -> &ShardIdentity {
2108 0 : self.timeline.get_shard_identity()
2109 0 : }
2110 :
2111 0 : async fn get_layers(
2112 0 : &mut self,
2113 0 : key_range: &Range<Key>,
2114 0 : lsn_range: &Range<Lsn>,
2115 0 : _ctx: &RequestContext,
2116 0 : ) -> anyhow::Result<Vec<OwnArc<PersistentLayerDesc>>> {
2117 0 : self.flush_updates().await?;
2118 :
2119 0 : let guard = self.timeline.layers.read().await;
2120 0 : let layer_map = guard.layer_map()?;
2121 :
2122 0 : let result = layer_map
2123 0 : .iter_historic_layers()
2124 0 : .filter(|l| {
2125 0 : overlaps_with(&l.lsn_range, lsn_range) && overlaps_with(&l.key_range, key_range)
2126 0 : })
2127 0 : .map(OwnArc)
2128 0 : .collect();
2129 0 : Ok(result)
2130 0 : }
2131 :
2132 0 : async fn get_keyspace(
2133 0 : &mut self,
2134 0 : key_range: &Range<Key>,
2135 0 : lsn: Lsn,
2136 0 : _ctx: &RequestContext,
2137 0 : ) -> anyhow::Result<Vec<Range<Key>>> {
2138 0 : if lsn == self.keyspace.0 {
2139 0 : Ok(pageserver_compaction::helpers::intersect_keyspace(
2140 0 : &self.keyspace.1.ranges,
2141 0 : key_range,
2142 0 : ))
2143 : } else {
2144 : // The current compaction implementation only ever requests the key space
2145 : // at the compaction end LSN.
2146 0 : anyhow::bail!("keyspace not available for requested lsn");
2147 : }
2148 0 : }
2149 :
2150 0 : async fn downcast_delta_layer(
2151 0 : &self,
2152 0 : layer: &OwnArc<PersistentLayerDesc>,
2153 0 : ) -> anyhow::Result<Option<ResidentDeltaLayer>> {
2154 0 : // this is a lot more complex than a simple downcast...
2155 0 : if layer.is_delta() {
2156 0 : let l = {
2157 0 : let guard = self.timeline.layers.read().await;
2158 0 : guard.get_from_desc(layer)
2159 : };
2160 0 : let result = l.download_and_keep_resident().await?;
2161 :
2162 0 : Ok(Some(ResidentDeltaLayer(result)))
2163 : } else {
2164 0 : Ok(None)
2165 : }
2166 0 : }
2167 :
2168 0 : async fn create_image(
2169 0 : &mut self,
2170 0 : lsn: Lsn,
2171 0 : key_range: &Range<Key>,
2172 0 : ctx: &RequestContext,
2173 0 : ) -> anyhow::Result<()> {
2174 0 : Ok(self.create_image_impl(lsn, key_range, ctx).await?)
2175 0 : }
2176 :
2177 0 : async fn create_delta(
2178 0 : &mut self,
2179 0 : lsn_range: &Range<Lsn>,
2180 0 : key_range: &Range<Key>,
2181 0 : input_layers: &[ResidentDeltaLayer],
2182 0 : ctx: &RequestContext,
2183 0 : ) -> anyhow::Result<()> {
2184 0 : debug!("Create new layer {}..{}", lsn_range.start, lsn_range.end);
2185 :
2186 0 : let mut all_entries = Vec::new();
2187 0 : for dl in input_layers.iter() {
2188 0 : all_entries.extend(dl.load_keys(ctx).await?);
2189 : }
2190 :
2191 : // The current stdlib sorting implementation is designed in a way where it is
2192 : // particularly fast where the slice is made up of sorted sub-ranges.
2193 0 : all_entries.sort_by_key(|DeltaEntry { key, lsn, .. }| (*key, *lsn));
2194 :
2195 0 : let mut writer = DeltaLayerWriter::new(
2196 0 : self.timeline.conf,
2197 0 : self.timeline.timeline_id,
2198 0 : self.timeline.tenant_shard_id,
2199 0 : key_range.start,
2200 0 : lsn_range.clone(),
2201 0 : ctx,
2202 0 : )
2203 0 : .await?;
2204 :
2205 0 : let mut dup_values = 0;
2206 0 :
2207 0 : // This iterator walks through all key-value pairs from all the layers
2208 0 : // we're compacting, in key, LSN order.
2209 0 : let mut prev: Option<(Key, Lsn)> = None;
2210 : for &DeltaEntry {
2211 0 : key, lsn, ref val, ..
2212 0 : } in all_entries.iter()
2213 : {
2214 0 : if prev == Some((key, lsn)) {
2215 : // This is a duplicate. Skip it.
2216 : //
2217 : // It can happen if compaction is interrupted after writing some
2218 : // layers but not all, and we are compacting the range again.
2219 : // The calculations in the algorithm assume that there are no
2220 : // duplicates, so the math on targeted file size is likely off,
2221 : // and we will create smaller files than expected.
2222 0 : dup_values += 1;
2223 0 : continue;
2224 0 : }
2225 :
2226 0 : let value = val.load(ctx).await?;
2227 :
2228 0 : writer.put_value(key, lsn, value, ctx).await?;
2229 :
2230 0 : prev = Some((key, lsn));
2231 : }
2232 :
2233 0 : if dup_values > 0 {
2234 0 : warn!("delta layer created with {} duplicate values", dup_values);
2235 0 : }
2236 :
2237 0 : fail_point!("delta-layer-writer-fail-before-finish", |_| {
2238 0 : Err(anyhow::anyhow!(
2239 0 : "failpoint delta-layer-writer-fail-before-finish"
2240 0 : ))
2241 0 : });
2242 :
2243 0 : let (desc, path) = writer.finish(prev.unwrap().0.next(), ctx).await?;
2244 0 : let new_delta_layer =
2245 0 : Layer::finish_creating(self.timeline.conf, &self.timeline, desc, &path)?;
2246 :
2247 0 : self.new_deltas.push(new_delta_layer);
2248 0 : Ok(())
2249 0 : }
2250 :
2251 0 : async fn delete_layer(
2252 0 : &mut self,
2253 0 : layer: &OwnArc<PersistentLayerDesc>,
2254 0 : _ctx: &RequestContext,
2255 0 : ) -> anyhow::Result<()> {
2256 0 : self.layers_to_delete.push(layer.clone().0);
2257 0 : Ok(())
2258 0 : }
2259 : }
2260 :
2261 : impl TimelineAdaptor {
2262 0 : async fn create_image_impl(
2263 0 : &mut self,
2264 0 : lsn: Lsn,
2265 0 : key_range: &Range<Key>,
2266 0 : ctx: &RequestContext,
2267 0 : ) -> Result<(), CreateImageLayersError> {
2268 0 : let timer = self.timeline.metrics.create_images_time_histo.start_timer();
2269 :
2270 0 : let image_layer_writer = ImageLayerWriter::new(
2271 0 : self.timeline.conf,
2272 0 : self.timeline.timeline_id,
2273 0 : self.timeline.tenant_shard_id,
2274 0 : key_range,
2275 0 : lsn,
2276 0 : ctx,
2277 0 : )
2278 0 : .await?;
2279 :
2280 0 : fail_point!("image-layer-writer-fail-before-finish", |_| {
2281 0 : Err(CreateImageLayersError::Other(anyhow::anyhow!(
2282 0 : "failpoint image-layer-writer-fail-before-finish"
2283 0 : )))
2284 0 : });
2285 :
2286 0 : let keyspace = KeySpace {
2287 0 : ranges: self.get_keyspace(key_range, lsn, ctx).await?,
2288 : };
2289 : // TODO set proper (stateful) start. The create_image_layer_for_rel_blocks function mostly
2290 0 : let start = Key::MIN;
2291 : let ImageLayerCreationOutcome {
2292 0 : image,
2293 : next_start_key: _,
2294 0 : } = self
2295 0 : .timeline
2296 0 : .create_image_layer_for_rel_blocks(
2297 0 : &keyspace,
2298 0 : image_layer_writer,
2299 0 : lsn,
2300 0 : ctx,
2301 0 : key_range.clone(),
2302 0 : start,
2303 0 : )
2304 0 : .await?;
2305 :
2306 0 : if let Some(image_layer) = image {
2307 0 : self.new_images.push(image_layer);
2308 0 : }
2309 :
2310 0 : timer.stop_and_record();
2311 0 :
2312 0 : Ok(())
2313 0 : }
2314 : }
2315 :
2316 : impl CompactionRequestContext for crate::context::RequestContext {}
2317 :
2318 : #[derive(Debug, Clone)]
2319 : pub struct OwnArc<T>(pub Arc<T>);
2320 :
2321 : impl<T> Deref for OwnArc<T> {
2322 : type Target = <Arc<T> as Deref>::Target;
2323 0 : fn deref(&self) -> &Self::Target {
2324 0 : &self.0
2325 0 : }
2326 : }
2327 :
2328 : impl<T> AsRef<T> for OwnArc<T> {
2329 0 : fn as_ref(&self) -> &T {
2330 0 : self.0.as_ref()
2331 0 : }
2332 : }
2333 :
2334 : impl CompactionLayer<Key> for OwnArc<PersistentLayerDesc> {
2335 0 : fn key_range(&self) -> &Range<Key> {
2336 0 : &self.key_range
2337 0 : }
2338 0 : fn lsn_range(&self) -> &Range<Lsn> {
2339 0 : &self.lsn_range
2340 0 : }
2341 0 : fn file_size(&self) -> u64 {
2342 0 : self.file_size
2343 0 : }
2344 0 : fn short_id(&self) -> std::string::String {
2345 0 : self.as_ref().short_id().to_string()
2346 0 : }
2347 0 : fn is_delta(&self) -> bool {
2348 0 : self.as_ref().is_delta()
2349 0 : }
2350 : }
2351 :
2352 : impl CompactionLayer<Key> for OwnArc<DeltaLayer> {
2353 0 : fn key_range(&self) -> &Range<Key> {
2354 0 : &self.layer_desc().key_range
2355 0 : }
2356 0 : fn lsn_range(&self) -> &Range<Lsn> {
2357 0 : &self.layer_desc().lsn_range
2358 0 : }
2359 0 : fn file_size(&self) -> u64 {
2360 0 : self.layer_desc().file_size
2361 0 : }
2362 0 : fn short_id(&self) -> std::string::String {
2363 0 : self.layer_desc().short_id().to_string()
2364 0 : }
2365 0 : fn is_delta(&self) -> bool {
2366 0 : true
2367 0 : }
2368 : }
2369 :
2370 : use crate::tenant::timeline::DeltaEntry;
2371 :
2372 : impl CompactionLayer<Key> for ResidentDeltaLayer {
2373 0 : fn key_range(&self) -> &Range<Key> {
2374 0 : &self.0.layer_desc().key_range
2375 0 : }
2376 0 : fn lsn_range(&self) -> &Range<Lsn> {
2377 0 : &self.0.layer_desc().lsn_range
2378 0 : }
2379 0 : fn file_size(&self) -> u64 {
2380 0 : self.0.layer_desc().file_size
2381 0 : }
2382 0 : fn short_id(&self) -> std::string::String {
2383 0 : self.0.layer_desc().short_id().to_string()
2384 0 : }
2385 0 : fn is_delta(&self) -> bool {
2386 0 : true
2387 0 : }
2388 : }
2389 :
2390 : impl CompactionDeltaLayer<TimelineAdaptor> for ResidentDeltaLayer {
2391 : type DeltaEntry<'a> = DeltaEntry<'a>;
2392 :
2393 0 : async fn load_keys<'a>(&self, ctx: &RequestContext) -> anyhow::Result<Vec<DeltaEntry<'_>>> {
2394 0 : self.0.load_keys(ctx).await
2395 0 : }
2396 : }
2397 :
2398 : impl CompactionLayer<Key> for ResidentImageLayer {
2399 0 : fn key_range(&self) -> &Range<Key> {
2400 0 : &self.0.layer_desc().key_range
2401 0 : }
2402 0 : fn lsn_range(&self) -> &Range<Lsn> {
2403 0 : &self.0.layer_desc().lsn_range
2404 0 : }
2405 0 : fn file_size(&self) -> u64 {
2406 0 : self.0.layer_desc().file_size
2407 0 : }
2408 0 : fn short_id(&self) -> std::string::String {
2409 0 : self.0.layer_desc().short_id().to_string()
2410 0 : }
2411 0 : fn is_delta(&self) -> bool {
2412 0 : false
2413 0 : }
2414 : }
2415 : impl CompactionImageLayer<TimelineAdaptor> for ResidentImageLayer {}
|