Line data Source code
1 : pub(crate) mod analysis;
2 : pub(crate) mod compaction;
3 : pub mod delete;
4 : pub(crate) mod detach_ancestor;
5 : mod eviction_task;
6 : pub(crate) mod handle;
7 : mod heatmap_layers_downloader;
8 : pub(crate) mod import_pgdata;
9 : mod init;
10 : pub mod layer_manager;
11 : pub(crate) mod logical_size;
12 : pub mod offload;
13 : pub mod span;
14 : pub mod uninit;
15 : mod walreceiver;
16 :
17 : use std::array;
18 : use std::cmp::{max, min};
19 : use std::collections::btree_map::Entry;
20 : use std::collections::{BTreeMap, HashMap, HashSet};
21 : use std::ops::{ControlFlow, Deref, Range};
22 : use std::sync::atomic::{AtomicBool, AtomicU64, Ordering as AtomicOrdering};
23 : use std::sync::{Arc, Mutex, OnceLock, RwLock, Weak};
24 : use std::time::{Duration, Instant, SystemTime};
25 :
26 : use crate::PERF_TRACE_TARGET;
27 : use crate::walredo::RedoAttemptType;
28 : use anyhow::{Context, Result, anyhow, bail, ensure};
29 : use arc_swap::{ArcSwap, ArcSwapOption};
30 : use bytes::Bytes;
31 : use camino::Utf8Path;
32 : use chrono::{DateTime, Utc};
33 : use compaction::{CompactionOutcome, GcCompactionCombinedSettings};
34 : use enumset::EnumSet;
35 : use fail::fail_point;
36 : use futures::stream::FuturesUnordered;
37 : use futures::{FutureExt, StreamExt};
38 : use handle::ShardTimelineId;
39 : use layer_manager::Shutdown;
40 : use offload::OffloadError;
41 : use once_cell::sync::Lazy;
42 : use pageserver_api::config::tenant_conf_defaults::DEFAULT_PITR_INTERVAL;
43 : use pageserver_api::key::{
44 : KEY_SIZE, Key, METADATA_KEY_BEGIN_PREFIX, METADATA_KEY_END_PREFIX, NON_INHERITED_RANGE,
45 : SPARSE_RANGE,
46 : };
47 : use pageserver_api::keyspace::{KeySpaceAccum, KeySpaceRandomAccum, SparseKeyPartitioning};
48 : use pageserver_api::models::{
49 : CompactKeyRange, CompactLsnRange, CompactionAlgorithm, CompactionAlgorithmSettings,
50 : DetachBehavior, DownloadRemoteLayersTaskInfo, DownloadRemoteLayersTaskSpawnRequest,
51 : EvictionPolicy, InMemoryLayerInfo, LayerMapInfo, LsnLease, PageTraceEvent, RelSizeMigration,
52 : TimelineState,
53 : };
54 : use pageserver_api::reltag::{BlockNumber, RelTag};
55 : use pageserver_api::shard::{ShardIdentity, ShardIndex, ShardNumber, TenantShardId};
56 : #[cfg(test)]
57 : use pageserver_api::value::Value;
58 : use postgres_connection::PgConnectionConfig;
59 : use postgres_ffi::v14::xlog_utils;
60 : use postgres_ffi::{WAL_SEGMENT_SIZE, to_pg_timestamp};
61 : use rand::Rng;
62 : use remote_storage::DownloadError;
63 : use serde_with::serde_as;
64 : use storage_broker::BrokerClientChannel;
65 : use tokio::runtime::Handle;
66 : use tokio::sync::mpsc::Sender;
67 : use tokio::sync::{Notify, oneshot, watch};
68 : use tokio_util::sync::CancellationToken;
69 : use tracing::*;
70 : use utils::generation::Generation;
71 : use utils::guard_arc_swap::GuardArcSwap;
72 : use utils::id::TimelineId;
73 : use utils::logging::{MonitorSlowFutureCallback, monitor_slow_future};
74 : use utils::lsn::{AtomicLsn, Lsn, RecordLsn};
75 : use utils::postgres_client::PostgresClientProtocol;
76 : use utils::rate_limit::RateLimit;
77 : use utils::seqwait::SeqWait;
78 : use utils::simple_rcu::{Rcu, RcuReadGuard};
79 : use utils::sync::gate::{Gate, GateGuard};
80 : use utils::{completion, critical, fs_ext, pausable_failpoint};
81 : use wal_decoder::serialized_batch::{SerializedValueBatch, ValueMeta};
82 :
83 : use self::delete::DeleteTimelineFlow;
84 : pub(super) use self::eviction_task::EvictionTaskTenantState;
85 : use self::eviction_task::EvictionTaskTimelineState;
86 : use self::layer_manager::LayerManager;
87 : use self::logical_size::LogicalSize;
88 : use self::walreceiver::{WalReceiver, WalReceiverConf};
89 : use super::remote_timeline_client::RemoteTimelineClient;
90 : use super::remote_timeline_client::index::{GcCompactionState, IndexPart};
91 : use super::secondary::heatmap::HeatMapLayer;
92 : use super::storage_layer::{LayerFringe, LayerVisibilityHint, ReadableLayer};
93 : use super::tasks::log_compaction_error;
94 : use super::upload_queue::NotInitialized;
95 : use super::{
96 : AttachedTenantConf, GcError, HeatMapTimeline, MaybeOffloaded,
97 : debug_assert_current_span_has_tenant_and_timeline_id,
98 : };
99 : use crate::aux_file::AuxFileSizeEstimator;
100 : use crate::config::PageServerConf;
101 : use crate::context::{
102 : DownloadBehavior, PerfInstrumentFutureExt, RequestContext, RequestContextBuilder,
103 : };
104 : use crate::disk_usage_eviction_task::{DiskUsageEvictionInfo, EvictionCandidate, finite_f32};
105 : use crate::keyspace::{KeyPartitioning, KeySpace};
106 : use crate::l0_flush::{self, L0FlushGlobalState};
107 : use crate::metrics::{
108 : DELTAS_PER_READ_GLOBAL, LAYERS_PER_READ_AMORTIZED_GLOBAL, LAYERS_PER_READ_BATCH_GLOBAL,
109 : LAYERS_PER_READ_GLOBAL, ScanLatencyOngoingRecording, TimelineMetrics,
110 : };
111 : use crate::page_service::TenantManagerTypes;
112 : use crate::pgdatadir_mapping::{
113 : CalculateLogicalSizeError, CollectKeySpaceError, DirectoryKind, LsnForTimestamp,
114 : MAX_AUX_FILE_V2_DELTAS, MetricsUpdate,
115 : };
116 : use crate::task_mgr::TaskKind;
117 : use crate::tenant::config::AttachmentMode;
118 : use crate::tenant::gc_result::GcResult;
119 : use crate::tenant::layer_map::LayerMap;
120 : use crate::tenant::metadata::TimelineMetadata;
121 : use crate::tenant::storage_layer::delta_layer::DeltaEntry;
122 : use crate::tenant::storage_layer::inmemory_layer::IndexEntry;
123 : use crate::tenant::storage_layer::{
124 : AsLayerDesc, BatchLayerWriter, DeltaLayerWriter, EvictionError, ImageLayerName,
125 : ImageLayerWriter, InMemoryLayer, IoConcurrency, Layer, LayerAccessStatsReset, LayerName,
126 : PersistentLayerDesc, PersistentLayerKey, ResidentLayer, ValueReconstructSituation,
127 : ValueReconstructState, ValuesReconstructState,
128 : };
129 : use crate::tenant::tasks::BackgroundLoopKind;
130 : use crate::tenant::timeline::logical_size::CurrentLogicalSize;
131 : use crate::virtual_file::{MaybeFatalIo, VirtualFile};
132 : use crate::walingest::WalLagCooldown;
133 : use crate::{ZERO_PAGE, task_mgr, walredo};
134 :
135 : #[derive(Debug, PartialEq, Eq, Clone, Copy)]
136 : pub(crate) enum FlushLoopState {
137 : NotStarted,
138 : Running {
139 : #[cfg(test)]
140 : expect_initdb_optimization: bool,
141 : #[cfg(test)]
142 : initdb_optimization_count: usize,
143 : },
144 : Exited,
145 : }
146 :
147 : #[derive(Debug, Copy, Clone, PartialEq, Eq)]
148 : pub enum ImageLayerCreationMode {
149 : /// Try to create image layers based on `time_for_new_image_layer`. Used in compaction code path.
150 : Try,
151 : /// Force creating the image layers if possible. For now, no image layers will be created
152 : /// for metadata keys. Used in compaction code path with force flag enabled.
153 : Force,
154 : /// Initial ingestion of the data, and no data should be dropped in this function. This
155 : /// means that no metadata keys should be included in the partitions. Used in flush frozen layer
156 : /// code path.
157 : Initial,
158 : }
159 :
160 : #[derive(Clone, Debug, Default)]
161 : pub enum LastImageLayerCreationStatus {
162 : Incomplete {
163 : /// The last key of the partition (exclusive) that was processed in the last
164 : /// image layer creation attempt. We will continue from this key in the next
165 : /// attempt.
166 : last_key: Key,
167 : },
168 : Complete,
169 : #[default]
170 : Initial,
171 : }
172 :
173 : impl std::fmt::Display for ImageLayerCreationMode {
174 3480 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
175 3480 : write!(f, "{:?}", self)
176 3480 : }
177 : }
178 :
179 : /// Temporary function for immutable storage state refactor, ensures we are dropping mutex guard instead of other things.
180 : /// Can be removed after all refactors are done.
181 168 : fn drop_rlock<T>(rlock: tokio::sync::RwLockReadGuard<T>) {
182 168 : drop(rlock)
183 168 : }
184 :
185 : /// Temporary function for immutable storage state refactor, ensures we are dropping mutex guard instead of other things.
186 : /// Can be removed after all refactors are done.
187 3648 : fn drop_wlock<T>(rlock: tokio::sync::RwLockWriteGuard<'_, T>) {
188 3648 : drop(rlock)
189 3648 : }
190 :
191 : /// The outward-facing resources required to build a Timeline
192 : pub struct TimelineResources {
193 : pub remote_client: RemoteTimelineClient,
194 : pub pagestream_throttle: Arc<crate::tenant::throttle::Throttle>,
195 : pub pagestream_throttle_metrics: Arc<crate::metrics::tenant_throttling::Pagestream>,
196 : pub l0_compaction_trigger: Arc<Notify>,
197 : pub l0_flush_global_state: l0_flush::L0FlushGlobalState,
198 : }
199 :
200 : /// The relation size cache caches relation sizes at the end of the timeline. It speeds up WAL
201 : /// ingestion considerably, because WAL ingestion needs to check on most records if the record
202 : /// implicitly extends the relation. At startup, `complete_as_of` is initialized to the current end
203 : /// of the timeline (disk_consistent_lsn). It's used on reads of relation sizes to check if the
204 : /// value can be used to also update the cache, see [`Timeline::update_cached_rel_size`].
205 : pub(crate) struct RelSizeCache {
206 : pub(crate) complete_as_of: Lsn,
207 : pub(crate) map: HashMap<RelTag, (Lsn, BlockNumber)>,
208 : }
209 :
210 : pub struct Timeline {
211 : pub(crate) conf: &'static PageServerConf,
212 : tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
213 :
214 : myself: Weak<Self>,
215 :
216 : pub(crate) tenant_shard_id: TenantShardId,
217 : pub timeline_id: TimelineId,
218 :
219 : /// The generation of the tenant that instantiated us: this is used for safety when writing remote objects.
220 : /// Never changes for the lifetime of this [`Timeline`] object.
221 : ///
222 : /// This duplicates the generation stored in LocationConf, but that structure is mutable:
223 : /// this copy enforces the invariant that generatio doesn't change during a Tenant's lifetime.
224 : pub(crate) generation: Generation,
225 :
226 : /// The detailed sharding information from our parent Tenant. This enables us to map keys
227 : /// to shards, and is constant through the lifetime of this Timeline.
228 : shard_identity: ShardIdentity,
229 :
230 : pub pg_version: u32,
231 :
232 : /// The tuple has two elements.
233 : /// 1. `LayerFileManager` keeps track of the various physical representations of the layer files (inmem, local, remote).
234 : /// 2. `LayerMap`, the acceleration data structure for `get_reconstruct_data`.
235 : ///
236 : /// `LayerMap` maps out the `(PAGE,LSN) / (KEY,LSN)` space, which is composed of `(KeyRange, LsnRange)` rectangles.
237 : /// We describe these rectangles through the `PersistentLayerDesc` struct.
238 : ///
239 : /// When we want to reconstruct a page, we first find the `PersistentLayerDesc`'s that we need for page reconstruction,
240 : /// using `LayerMap`. Then, we use `LayerFileManager` to get the `PersistentLayer`'s that correspond to the
241 : /// `PersistentLayerDesc`'s.
242 : ///
243 : /// Hence, it's important to keep things coherent. The `LayerFileManager` must always have an entry for all
244 : /// `PersistentLayerDesc`'s in the `LayerMap`. If it doesn't, `LayerFileManager::get_from_desc` will panic at
245 : /// runtime, e.g., during page reconstruction.
246 : ///
247 : /// In the future, we'll be able to split up the tuple of LayerMap and `LayerFileManager`,
248 : /// so that e.g. on-demand-download/eviction, and layer spreading, can operate just on `LayerFileManager`.
249 : pub(crate) layers: tokio::sync::RwLock<LayerManager>,
250 :
251 : last_freeze_at: AtomicLsn,
252 : // Atomic would be more appropriate here.
253 : last_freeze_ts: RwLock<Instant>,
254 :
255 : pub(crate) standby_horizon: AtomicLsn,
256 :
257 : // WAL redo manager. `None` only for broken tenants.
258 : walredo_mgr: Option<Arc<super::WalRedoManager>>,
259 :
260 : /// Remote storage client.
261 : /// See [`remote_timeline_client`](super::remote_timeline_client) module comment for details.
262 : pub(crate) remote_client: Arc<RemoteTimelineClient>,
263 :
264 : // What page versions do we hold in the repository? If we get a
265 : // request > last_record_lsn, we need to wait until we receive all
266 : // the WAL up to the request. The SeqWait provides functions for
267 : // that. TODO: If we get a request for an old LSN, such that the
268 : // versions have already been garbage collected away, we should
269 : // throw an error, but we don't track that currently.
270 : //
271 : // last_record_lsn.load().last points to the end of last processed WAL record.
272 : //
273 : // We also remember the starting point of the previous record in
274 : // 'last_record_lsn.load().prev'. It's used to set the xl_prev pointer of the
275 : // first WAL record when the node is started up. But here, we just
276 : // keep track of it.
277 : last_record_lsn: SeqWait<RecordLsn, Lsn>,
278 :
279 : // All WAL records have been processed and stored durably on files on
280 : // local disk, up to this LSN. On crash and restart, we need to re-process
281 : // the WAL starting from this point.
282 : //
283 : // Some later WAL records might have been processed and also flushed to disk
284 : // already, so don't be surprised to see some, but there's no guarantee on
285 : // them yet.
286 : disk_consistent_lsn: AtomicLsn,
287 :
288 : // Parent timeline that this timeline was branched from, and the LSN
289 : // of the branch point.
290 : ancestor_timeline: Option<Arc<Timeline>>,
291 : ancestor_lsn: Lsn,
292 :
293 : // The LSN of gc-compaction that was last applied to this timeline.
294 : gc_compaction_state: ArcSwap<Option<GcCompactionState>>,
295 :
296 : pub(crate) metrics: Arc<TimelineMetrics>,
297 :
298 : // `Timeline` doesn't write these metrics itself, but it manages the lifetime. Code
299 : // in `crate::page_service` writes these metrics.
300 : pub(crate) query_metrics: crate::metrics::SmgrQueryTimePerTimeline,
301 :
302 : directory_metrics_inited: [AtomicBool; DirectoryKind::KINDS_NUM],
303 : directory_metrics: [AtomicU64; DirectoryKind::KINDS_NUM],
304 :
305 : /// Ensures layers aren't frozen by checkpointer between
306 : /// [`Timeline::get_layer_for_write`] and layer reads.
307 : /// Locked automatically by [`TimelineWriter`] and checkpointer.
308 : /// Must always be acquired before the layer map/individual layer lock
309 : /// to avoid deadlock.
310 : ///
311 : /// The state is cleared upon freezing.
312 : write_lock: tokio::sync::Mutex<Option<TimelineWriterState>>,
313 :
314 : /// Used to avoid multiple `flush_loop` tasks running
315 : pub(super) flush_loop_state: Mutex<FlushLoopState>,
316 :
317 : /// layer_flush_start_tx can be used to wake up the layer-flushing task.
318 : /// - The u64 value is a counter, incremented every time a new flush cycle is requested.
319 : /// The flush cycle counter is sent back on the layer_flush_done channel when
320 : /// the flush finishes. You can use that to wait for the flush to finish.
321 : /// - The LSN is updated to max() of its current value and the latest disk_consistent_lsn
322 : /// read by whoever sends an update
323 : layer_flush_start_tx: tokio::sync::watch::Sender<(u64, Lsn)>,
324 : /// to be notified when layer flushing has finished, subscribe to the layer_flush_done channel
325 : layer_flush_done_tx: tokio::sync::watch::Sender<(u64, Result<(), FlushLayerError>)>,
326 :
327 : // The LSN at which we have executed GC: whereas [`Self::gc_info`] records the LSN at which
328 : // we _intend_ to GC (i.e. the PITR cutoff), this LSN records where we actually last did it.
329 : // Because PITR interval is mutable, it's possible for this LSN to be earlier or later than
330 : // the planned GC cutoff.
331 : pub applied_gc_cutoff_lsn: Rcu<Lsn>,
332 :
333 : pub(crate) gc_compaction_layer_update_lock: tokio::sync::RwLock<()>,
334 :
335 : // List of child timelines and their branch points. This is needed to avoid
336 : // garbage collecting data that is still needed by the child timelines.
337 : pub(crate) gc_info: std::sync::RwLock<GcInfo>,
338 :
339 : pub(crate) last_image_layer_creation_status: ArcSwap<LastImageLayerCreationStatus>,
340 :
341 : // It may change across major versions so for simplicity
342 : // keep it after running initdb for a timeline.
343 : // It is needed in checks when we want to error on some operations
344 : // when they are requested for pre-initdb lsn.
345 : // It can be unified with latest_gc_cutoff_lsn under some "first_valid_lsn",
346 : // though let's keep them both for better error visibility.
347 : pub initdb_lsn: Lsn,
348 :
349 : /// The repartitioning result. Allows a single writer and multiple readers.
350 : pub(crate) partitioning: GuardArcSwap<((KeyPartitioning, SparseKeyPartitioning), Lsn)>,
351 :
352 : /// Configuration: how often should the partitioning be recalculated.
353 : repartition_threshold: u64,
354 :
355 : last_image_layer_creation_check_at: AtomicLsn,
356 : last_image_layer_creation_check_instant: std::sync::Mutex<Option<Instant>>,
357 :
358 : /// Current logical size of the "datadir", at the last LSN.
359 : current_logical_size: LogicalSize,
360 :
361 : /// Information about the last processed message by the WAL receiver,
362 : /// or None if WAL receiver has not received anything for this timeline
363 : /// yet.
364 : pub last_received_wal: Mutex<Option<WalReceiverInfo>>,
365 : pub walreceiver: Mutex<Option<WalReceiver>>,
366 :
367 : /// Relation size cache
368 : pub(crate) rel_size_cache: RwLock<RelSizeCache>,
369 :
370 : download_all_remote_layers_task_info: RwLock<Option<DownloadRemoteLayersTaskInfo>>,
371 :
372 : state: watch::Sender<TimelineState>,
373 :
374 : /// Prevent two tasks from deleting the timeline at the same time. If held, the
375 : /// timeline is being deleted. If 'true', the timeline has already been deleted.
376 : pub delete_progress: TimelineDeleteProgress,
377 :
378 : eviction_task_timeline_state: tokio::sync::Mutex<EvictionTaskTimelineState>,
379 :
380 : /// Load or creation time information about the disk_consistent_lsn and when the loading
381 : /// happened. Used for consumption metrics.
382 : pub(crate) loaded_at: (Lsn, SystemTime),
383 :
384 : /// Gate to prevent shutdown completing while I/O is still happening to this timeline's data
385 : pub(crate) gate: Gate,
386 :
387 : /// Cancellation token scoped to this timeline: anything doing long-running work relating
388 : /// to the timeline should drop out when this token fires.
389 : pub(crate) cancel: CancellationToken,
390 :
391 : /// Make sure we only have one running compaction at a time in tests.
392 : ///
393 : /// Must only be taken in two places:
394 : /// - [`Timeline::compact`] (this file)
395 : /// - [`delete::delete_local_timeline_directory`]
396 : ///
397 : /// Timeline deletion will acquire both compaction and gc locks in whatever order.
398 : compaction_lock: tokio::sync::Mutex<()>,
399 :
400 : /// If true, the last compaction failed.
401 : compaction_failed: AtomicBool,
402 :
403 : /// Notifies the tenant compaction loop that there is pending L0 compaction work.
404 : l0_compaction_trigger: Arc<Notify>,
405 :
406 : /// Make sure we only have one running gc at a time.
407 : ///
408 : /// Must only be taken in two places:
409 : /// - [`Timeline::gc`] (this file)
410 : /// - [`delete::delete_local_timeline_directory`]
411 : ///
412 : /// Timeline deletion will acquire both compaction and gc locks in whatever order.
413 : gc_lock: tokio::sync::Mutex<()>,
414 :
415 : /// Cloned from [`super::TenantShard::pagestream_throttle`] on construction.
416 : pub(crate) pagestream_throttle: Arc<crate::tenant::throttle::Throttle>,
417 :
418 : /// Size estimator for aux file v2
419 : pub(crate) aux_file_size_estimator: AuxFileSizeEstimator,
420 :
421 : /// Some test cases directly place keys into the timeline without actually modifying the directory
422 : /// keys (i.e., DB_DIR). The test cases creating such keys will put the keyspaces here, so that
423 : /// these keys won't get garbage-collected during compaction/GC. This field only modifies the dense
424 : /// keyspace return value of `collect_keyspace`. For sparse keyspaces, use AUX keys for testing, and
425 : /// in the future, add `extra_test_sparse_keyspace` if necessary.
426 : #[cfg(test)]
427 : pub(crate) extra_test_dense_keyspace: ArcSwap<KeySpace>,
428 :
429 : pub(crate) l0_flush_global_state: L0FlushGlobalState,
430 :
431 : pub(crate) handles: handle::PerTimelineState<TenantManagerTypes>,
432 :
433 : pub(crate) attach_wal_lag_cooldown: Arc<OnceLock<WalLagCooldown>>,
434 :
435 : /// Cf. [`crate::tenant::CreateTimelineIdempotency`].
436 : pub(crate) create_idempotency: crate::tenant::CreateTimelineIdempotency,
437 :
438 : /// If Some, collects GetPage metadata for an ongoing PageTrace.
439 : pub(crate) page_trace: ArcSwapOption<Sender<PageTraceEvent>>,
440 :
441 : pub(super) previous_heatmap: ArcSwapOption<PreviousHeatmap>,
442 :
443 : /// May host a background Tokio task which downloads all the layers from the current
444 : /// heatmap on demand.
445 : heatmap_layers_downloader: Mutex<Option<heatmap_layers_downloader::HeatmapLayersDownloader>>,
446 :
447 : pub(crate) rel_size_v2_status: ArcSwapOption<RelSizeMigration>,
448 :
449 : wait_lsn_log_slow: tokio::sync::Semaphore,
450 : }
451 :
452 : pub(crate) enum PreviousHeatmap {
453 : Active {
454 : heatmap: HeatMapTimeline,
455 : read_at: std::time::Instant,
456 : // End LSN covered by the heatmap if known
457 : end_lsn: Option<Lsn>,
458 : },
459 : Obsolete,
460 : }
461 :
462 : pub type TimelineDeleteProgress = Arc<tokio::sync::Mutex<DeleteTimelineFlow>>;
463 :
464 : pub struct WalReceiverInfo {
465 : pub wal_source_connconf: PgConnectionConfig,
466 : pub last_received_msg_lsn: Lsn,
467 : pub last_received_msg_ts: u128,
468 : }
469 :
470 : /// Information about how much history needs to be retained, needed by
471 : /// Garbage Collection.
472 : #[derive(Default)]
473 : pub(crate) struct GcInfo {
474 : /// Specific LSNs that are needed.
475 : ///
476 : /// Currently, this includes all points where child branches have
477 : /// been forked off from. In the future, could also include
478 : /// explicit user-defined snapshot points.
479 : pub(crate) retain_lsns: Vec<(Lsn, TimelineId, MaybeOffloaded)>,
480 :
481 : /// The cutoff coordinates, which are combined by selecting the minimum.
482 : pub(crate) cutoffs: GcCutoffs,
483 :
484 : /// Leases granted to particular LSNs.
485 : pub(crate) leases: BTreeMap<Lsn, LsnLease>,
486 :
487 : /// Whether our branch point is within our ancestor's PITR interval (for cost estimation)
488 : pub(crate) within_ancestor_pitr: bool,
489 : }
490 :
491 : impl GcInfo {
492 1848 : pub(crate) fn min_cutoff(&self) -> Lsn {
493 1848 : self.cutoffs.select_min()
494 1848 : }
495 :
496 1428 : pub(super) fn insert_child(
497 1428 : &mut self,
498 1428 : child_id: TimelineId,
499 1428 : child_lsn: Lsn,
500 1428 : is_offloaded: MaybeOffloaded,
501 1428 : ) {
502 1428 : self.retain_lsns.push((child_lsn, child_id, is_offloaded));
503 1428 : self.retain_lsns.sort_by_key(|i| i.0);
504 1428 : }
505 :
506 24 : pub(super) fn remove_child_maybe_offloaded(
507 24 : &mut self,
508 24 : child_id: TimelineId,
509 24 : maybe_offloaded: MaybeOffloaded,
510 24 : ) -> bool {
511 24 : // Remove at most one element. Needed for correctness if there is two live `Timeline` objects referencing
512 24 : // the same timeline. Shouldn't but maybe can occur when Arc's live longer than intended.
513 24 : let mut removed = false;
514 36 : self.retain_lsns.retain(|i| {
515 36 : if removed {
516 12 : return true;
517 24 : }
518 24 : let remove = i.1 == child_id && i.2 == maybe_offloaded;
519 24 : removed |= remove;
520 24 : !remove
521 36 : });
522 24 : removed
523 24 : }
524 :
525 24 : pub(super) fn remove_child_not_offloaded(&mut self, child_id: TimelineId) -> bool {
526 24 : self.remove_child_maybe_offloaded(child_id, MaybeOffloaded::No)
527 24 : }
528 :
529 0 : pub(super) fn remove_child_offloaded(&mut self, child_id: TimelineId) -> bool {
530 0 : self.remove_child_maybe_offloaded(child_id, MaybeOffloaded::Yes)
531 0 : }
532 1428 : pub(crate) fn lsn_covered_by_lease(&self, lsn: Lsn) -> bool {
533 1428 : self.leases.contains_key(&lsn)
534 1428 : }
535 : }
536 :
537 : /// The `GcInfo` component describing which Lsns need to be retained. Functionally, this
538 : /// is a single number (the oldest LSN which we must retain), but it internally distinguishes
539 : /// between time-based and space-based retention for observability and consumption metrics purposes.
540 : #[derive(Debug, Clone)]
541 : pub(crate) struct GcCutoffs {
542 : /// Calculated from the [`pageserver_api::models::TenantConfig::gc_horizon`], this LSN indicates how much
543 : /// history we must keep to retain a specified number of bytes of WAL.
544 : pub(crate) space: Lsn,
545 :
546 : /// Calculated from [`pageserver_api::models::TenantConfig::pitr_interval`], this LSN indicates how much
547 : /// history we must keep to enable reading back at least the PITR interval duration.
548 : pub(crate) time: Lsn,
549 : }
550 :
551 : impl Default for GcCutoffs {
552 2784 : fn default() -> Self {
553 2784 : Self {
554 2784 : space: Lsn::INVALID,
555 2784 : time: Lsn::INVALID,
556 2784 : }
557 2784 : }
558 : }
559 :
560 : impl GcCutoffs {
561 1848 : fn select_min(&self) -> Lsn {
562 1848 : std::cmp::min(self.space, self.time)
563 1848 : }
564 : }
565 :
566 : pub(crate) struct TimelineVisitOutcome {
567 : completed_keyspace: KeySpace,
568 : image_covered_keyspace: KeySpace,
569 : }
570 :
571 : /// An error happened in a get() operation.
572 : #[derive(thiserror::Error, Debug)]
573 : pub(crate) enum PageReconstructError {
574 : #[error(transparent)]
575 : Other(anyhow::Error),
576 :
577 : #[error("Ancestor LSN wait error: {0}")]
578 : AncestorLsnTimeout(WaitLsnError),
579 :
580 : #[error("timeline shutting down")]
581 : Cancelled,
582 :
583 : /// An error happened replaying WAL records
584 : #[error(transparent)]
585 : WalRedo(anyhow::Error),
586 :
587 : #[error("{0}")]
588 : MissingKey(Box<MissingKeyError>),
589 : }
590 :
591 : impl From<anyhow::Error> for PageReconstructError {
592 0 : fn from(value: anyhow::Error) -> Self {
593 0 : // with walingest.rs many PageReconstructError are wrapped in as anyhow::Error
594 0 : match value.downcast::<PageReconstructError>() {
595 0 : Ok(pre) => pre,
596 0 : Err(other) => PageReconstructError::Other(other),
597 : }
598 0 : }
599 : }
600 :
601 : impl From<utils::bin_ser::DeserializeError> for PageReconstructError {
602 0 : fn from(value: utils::bin_ser::DeserializeError) -> Self {
603 0 : PageReconstructError::Other(anyhow::Error::new(value).context("deserialization failure"))
604 0 : }
605 : }
606 :
607 : impl From<layer_manager::Shutdown> for PageReconstructError {
608 0 : fn from(_: layer_manager::Shutdown) -> Self {
609 0 : PageReconstructError::Cancelled
610 0 : }
611 : }
612 :
613 : impl GetVectoredError {
614 : #[cfg(test)]
615 36 : pub(crate) fn is_missing_key_error(&self) -> bool {
616 36 : matches!(self, Self::MissingKey(_))
617 36 : }
618 : }
619 :
620 : impl From<layer_manager::Shutdown> for GetVectoredError {
621 0 : fn from(_: layer_manager::Shutdown) -> Self {
622 0 : GetVectoredError::Cancelled
623 0 : }
624 : }
625 :
626 : /// A layer identifier when used in the [`ReadPath`] structure. This enum is for observability purposes
627 : /// only and not used by the "real read path".
628 : pub enum ReadPathLayerId {
629 : PersistentLayer(PersistentLayerKey),
630 : InMemoryLayer(Range<Lsn>),
631 : }
632 :
633 : impl std::fmt::Display for ReadPathLayerId {
634 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
635 0 : match self {
636 0 : ReadPathLayerId::PersistentLayer(key) => write!(f, "{}", key),
637 0 : ReadPathLayerId::InMemoryLayer(range) => {
638 0 : write!(f, "in-mem {}..{}", range.start, range.end)
639 : }
640 : }
641 0 : }
642 : }
643 : pub struct ReadPath {
644 : keyspace: KeySpace,
645 : lsn: Lsn,
646 : path: Vec<(ReadPathLayerId, KeySpace, Range<Lsn>)>,
647 : }
648 :
649 : impl ReadPath {
650 3778701 : pub fn new(keyspace: KeySpace, lsn: Lsn) -> Self {
651 3778701 : Self {
652 3778701 : keyspace,
653 3778701 : lsn,
654 3778701 : path: Vec::new(),
655 3778701 : }
656 3778701 : }
657 :
658 5314966 : pub fn record_layer_visit(
659 5314966 : &mut self,
660 5314966 : layer_to_read: &ReadableLayer,
661 5314966 : keyspace_to_read: &KeySpace,
662 5314966 : lsn_range: &Range<Lsn>,
663 5314966 : ) {
664 5314966 : let id = match layer_to_read {
665 1597760 : ReadableLayer::PersistentLayer(layer) => {
666 1597760 : ReadPathLayerId::PersistentLayer(layer.layer_desc().key())
667 : }
668 3717206 : ReadableLayer::InMemoryLayer(layer) => {
669 3717206 : ReadPathLayerId::InMemoryLayer(layer.get_lsn_range())
670 : }
671 : };
672 5314966 : self.path
673 5314966 : .push((id, keyspace_to_read.clone(), lsn_range.clone()));
674 5314966 : }
675 : }
676 :
677 : impl std::fmt::Display for ReadPath {
678 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
679 0 : writeln!(f, "Read path for {} at lsn {}:", self.keyspace, self.lsn)?;
680 0 : for (idx, (layer_id, keyspace, lsn_range)) in self.path.iter().enumerate() {
681 0 : writeln!(
682 0 : f,
683 0 : "{}: {} {}..{} {}",
684 0 : idx, layer_id, lsn_range.start, lsn_range.end, keyspace
685 0 : )?;
686 : }
687 0 : Ok(())
688 0 : }
689 : }
690 :
691 : #[derive(thiserror::Error)]
692 : pub struct MissingKeyError {
693 : keyspace: KeySpace,
694 : shard: ShardNumber,
695 : query: Option<VersionedKeySpaceQuery>,
696 : // This is largest request LSN from the get page request batch
697 : original_hwm_lsn: Lsn,
698 : ancestor_lsn: Option<Lsn>,
699 : /// Debug information about the read path if there's an error
700 : read_path: Option<ReadPath>,
701 : backtrace: Option<std::backtrace::Backtrace>,
702 : }
703 :
704 : impl MissingKeyError {
705 84 : fn enrich(&mut self, query: VersionedKeySpaceQuery) {
706 84 : self.query = Some(query);
707 84 : }
708 : }
709 :
710 : impl std::fmt::Debug for MissingKeyError {
711 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
712 0 : write!(f, "{}", self)
713 0 : }
714 : }
715 :
716 : impl std::fmt::Display for MissingKeyError {
717 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
718 0 : write!(
719 0 : f,
720 0 : "could not find data for key {} (shard {:?}), original HWM LSN {}",
721 0 : self.keyspace, self.shard, self.original_hwm_lsn
722 0 : )?;
723 :
724 0 : if let Some(ref ancestor_lsn) = self.ancestor_lsn {
725 0 : write!(f, ", ancestor {}", ancestor_lsn)?;
726 0 : }
727 :
728 0 : if let Some(ref query) = self.query {
729 0 : write!(f, ", query {}", query)?;
730 0 : }
731 :
732 0 : if let Some(ref read_path) = self.read_path {
733 0 : write!(f, "\n{}", read_path)?;
734 0 : }
735 :
736 0 : if let Some(ref backtrace) = self.backtrace {
737 0 : write!(f, "\n{}", backtrace)?;
738 0 : }
739 :
740 0 : Ok(())
741 0 : }
742 : }
743 :
744 : impl PageReconstructError {
745 : /// Returns true if this error indicates a tenant/timeline shutdown alike situation
746 0 : pub(crate) fn is_stopping(&self) -> bool {
747 : use PageReconstructError::*;
748 0 : match self {
749 0 : Cancelled => true,
750 0 : Other(_) | AncestorLsnTimeout(_) | WalRedo(_) | MissingKey(_) => false,
751 : }
752 0 : }
753 : }
754 :
755 : #[derive(thiserror::Error, Debug)]
756 : pub(crate) enum CreateImageLayersError {
757 : #[error("timeline shutting down")]
758 : Cancelled,
759 :
760 : #[error("read failed")]
761 : GetVectoredError(#[source] GetVectoredError),
762 :
763 : #[error("reconstruction failed")]
764 : PageReconstructError(#[source] PageReconstructError),
765 :
766 : #[error(transparent)]
767 : Other(#[from] anyhow::Error),
768 : }
769 :
770 : impl From<layer_manager::Shutdown> for CreateImageLayersError {
771 0 : fn from(_: layer_manager::Shutdown) -> Self {
772 0 : CreateImageLayersError::Cancelled
773 0 : }
774 : }
775 :
776 : #[derive(thiserror::Error, Debug, Clone)]
777 : pub(crate) enum FlushLayerError {
778 : /// Timeline cancellation token was cancelled
779 : #[error("timeline shutting down")]
780 : Cancelled,
781 :
782 : /// We tried to flush a layer while the Timeline is in an unexpected state
783 : #[error("cannot flush frozen layers when flush_loop is not running, state is {0:?}")]
784 : NotRunning(FlushLoopState),
785 :
786 : // Arc<> the following non-clonable error types: we must be Clone-able because the flush error is propagated from the flush
787 : // loop via a watch channel, where we can only borrow it.
788 : #[error("create image layers (shared)")]
789 : CreateImageLayersError(Arc<CreateImageLayersError>),
790 :
791 : #[error("other (shared)")]
792 : Other(#[from] Arc<anyhow::Error>),
793 : }
794 :
795 : impl FlushLayerError {
796 : // When crossing from generic anyhow errors to this error type, we explicitly check
797 : // for timeline cancellation to avoid logging inoffensive shutdown errors as warn/err.
798 0 : fn from_anyhow(timeline: &Timeline, err: anyhow::Error) -> Self {
799 0 : let cancelled = timeline.cancel.is_cancelled()
800 : // The upload queue might have been shut down before the official cancellation of the timeline.
801 0 : || err
802 0 : .downcast_ref::<NotInitialized>()
803 0 : .map(NotInitialized::is_stopping)
804 0 : .unwrap_or_default();
805 0 : if cancelled {
806 0 : Self::Cancelled
807 : } else {
808 0 : Self::Other(Arc::new(err))
809 : }
810 0 : }
811 : }
812 :
813 : impl From<layer_manager::Shutdown> for FlushLayerError {
814 0 : fn from(_: layer_manager::Shutdown) -> Self {
815 0 : FlushLayerError::Cancelled
816 0 : }
817 : }
818 :
819 : #[derive(thiserror::Error, Debug)]
820 : pub(crate) enum GetVectoredError {
821 : #[error("timeline shutting down")]
822 : Cancelled,
823 :
824 : #[error("requested too many keys: {0} > {}", Timeline::MAX_GET_VECTORED_KEYS)]
825 : Oversized(u64),
826 :
827 : #[error("requested at invalid LSN: {0}")]
828 : InvalidLsn(Lsn),
829 :
830 : #[error("requested key not found: {0}")]
831 : MissingKey(Box<MissingKeyError>),
832 :
833 : #[error("ancestry walk")]
834 : GetReadyAncestorError(#[source] GetReadyAncestorError),
835 :
836 : #[error(transparent)]
837 : Other(#[from] anyhow::Error),
838 : }
839 :
840 : impl From<GetReadyAncestorError> for GetVectoredError {
841 12 : fn from(value: GetReadyAncestorError) -> Self {
842 : use GetReadyAncestorError::*;
843 12 : match value {
844 0 : Cancelled => GetVectoredError::Cancelled,
845 : AncestorLsnTimeout(_) | BadState { .. } => {
846 12 : GetVectoredError::GetReadyAncestorError(value)
847 : }
848 : }
849 12 : }
850 : }
851 :
852 : #[derive(thiserror::Error, Debug)]
853 : pub(crate) enum GetReadyAncestorError {
854 : #[error("ancestor LSN wait error")]
855 : AncestorLsnTimeout(#[from] WaitLsnError),
856 :
857 : #[error("bad state on timeline {timeline_id}: {state:?}")]
858 : BadState {
859 : timeline_id: TimelineId,
860 : state: TimelineState,
861 : },
862 :
863 : #[error("cancelled")]
864 : Cancelled,
865 : }
866 :
867 : #[derive(Clone, Copy)]
868 : pub enum LogicalSizeCalculationCause {
869 : Initial,
870 : ConsumptionMetricsSyntheticSize,
871 : EvictionTaskImitation,
872 : TenantSizeHandler,
873 : }
874 :
875 : pub enum GetLogicalSizePriority {
876 : User,
877 : Background,
878 : }
879 :
880 0 : #[derive(Debug, enumset::EnumSetType)]
881 : pub(crate) enum CompactFlags {
882 : ForceRepartition,
883 : ForceImageLayerCreation,
884 : ForceL0Compaction,
885 : OnlyL0Compaction,
886 : EnhancedGcBottomMostCompaction,
887 : DryRun,
888 : /// Makes image compaction yield if there's pending L0 compaction. This should always be used in
889 : /// the background compaction task, since we want to aggressively compact down L0 to bound
890 : /// read amplification.
891 : ///
892 : /// It only makes sense to use this when `compaction_l0_first` is enabled (such that we yield to
893 : /// an L0 compaction pass), and without `OnlyL0Compaction` (L0 compaction shouldn't yield for L0
894 : /// compaction).
895 : YieldForL0,
896 : }
897 :
898 : #[serde_with::serde_as]
899 0 : #[derive(Debug, Clone, serde::Deserialize)]
900 : pub(crate) struct CompactRequest {
901 : pub compact_key_range: Option<CompactKeyRange>,
902 : pub compact_lsn_range: Option<CompactLsnRange>,
903 : /// Whether the compaction job should be scheduled.
904 : #[serde(default)]
905 : pub scheduled: bool,
906 : /// Whether the compaction job should be split across key ranges.
907 : #[serde(default)]
908 : pub sub_compaction: bool,
909 : /// Max job size for each subcompaction job.
910 : pub sub_compaction_max_job_size_mb: Option<u64>,
911 : }
912 :
913 0 : #[derive(Debug, Clone, serde::Deserialize)]
914 : pub(crate) struct MarkInvisibleRequest {
915 : #[serde(default)]
916 : pub is_visible: Option<bool>,
917 : }
918 :
919 : #[derive(Debug, Clone, Default)]
920 : pub(crate) struct CompactOptions {
921 : pub flags: EnumSet<CompactFlags>,
922 : /// If set, the compaction will only compact the key range specified by this option.
923 : /// This option is only used by GC compaction. For the full explanation, see [`compaction::GcCompactJob`].
924 : pub compact_key_range: Option<CompactKeyRange>,
925 : /// If set, the compaction will only compact the LSN within this value.
926 : /// This option is only used by GC compaction. For the full explanation, see [`compaction::GcCompactJob`].
927 : pub compact_lsn_range: Option<CompactLsnRange>,
928 : /// Enable sub-compaction (split compaction job across key ranges).
929 : /// This option is only used by GC compaction.
930 : pub sub_compaction: bool,
931 : /// Set job size for the GC compaction.
932 : /// This option is only used by GC compaction.
933 : pub sub_compaction_max_job_size_mb: Option<u64>,
934 : }
935 :
936 : impl std::fmt::Debug for Timeline {
937 0 : fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
938 0 : write!(f, "Timeline<{}>", self.timeline_id)
939 0 : }
940 : }
941 :
942 : #[derive(thiserror::Error, Debug, Clone)]
943 : pub(crate) enum WaitLsnError {
944 : // Called on a timeline which is shutting down
945 : #[error("Shutdown")]
946 : Shutdown,
947 :
948 : // Called on an timeline not in active state or shutting down
949 : #[error("Bad timeline state: {0:?}")]
950 : BadState(TimelineState),
951 :
952 : // Timeout expired while waiting for LSN to catch up with goal.
953 : #[error("{0}")]
954 : Timeout(String),
955 : }
956 :
957 : // The impls below achieve cancellation mapping for errors.
958 : // Perhaps there's a way of achieving this with less cruft.
959 :
960 : impl From<CreateImageLayersError> for CompactionError {
961 0 : fn from(e: CreateImageLayersError) -> Self {
962 0 : match e {
963 0 : CreateImageLayersError::Cancelled => CompactionError::ShuttingDown,
964 0 : CreateImageLayersError::Other(e) => {
965 0 : CompactionError::Other(e.context("create image layers"))
966 : }
967 0 : _ => CompactionError::Other(e.into()),
968 : }
969 0 : }
970 : }
971 :
972 : impl From<CreateImageLayersError> for FlushLayerError {
973 0 : fn from(e: CreateImageLayersError) -> Self {
974 0 : match e {
975 0 : CreateImageLayersError::Cancelled => FlushLayerError::Cancelled,
976 0 : any => FlushLayerError::CreateImageLayersError(Arc::new(any)),
977 : }
978 0 : }
979 : }
980 :
981 : impl From<PageReconstructError> for CreateImageLayersError {
982 0 : fn from(e: PageReconstructError) -> Self {
983 0 : match e {
984 0 : PageReconstructError::Cancelled => CreateImageLayersError::Cancelled,
985 0 : _ => CreateImageLayersError::PageReconstructError(e),
986 : }
987 0 : }
988 : }
989 :
990 : impl From<GetVectoredError> for CreateImageLayersError {
991 0 : fn from(e: GetVectoredError) -> Self {
992 0 : match e {
993 0 : GetVectoredError::Cancelled => CreateImageLayersError::Cancelled,
994 0 : _ => CreateImageLayersError::GetVectoredError(e),
995 : }
996 0 : }
997 : }
998 :
999 : impl From<GetVectoredError> for PageReconstructError {
1000 36 : fn from(e: GetVectoredError) -> Self {
1001 36 : match e {
1002 0 : GetVectoredError::Cancelled => PageReconstructError::Cancelled,
1003 0 : GetVectoredError::InvalidLsn(_) => PageReconstructError::Other(anyhow!("Invalid LSN")),
1004 0 : err @ GetVectoredError::Oversized(_) => PageReconstructError::Other(err.into()),
1005 24 : GetVectoredError::MissingKey(err) => PageReconstructError::MissingKey(err),
1006 12 : GetVectoredError::GetReadyAncestorError(err) => PageReconstructError::from(err),
1007 0 : GetVectoredError::Other(err) => PageReconstructError::Other(err),
1008 : }
1009 36 : }
1010 : }
1011 :
1012 : impl From<GetReadyAncestorError> for PageReconstructError {
1013 12 : fn from(e: GetReadyAncestorError) -> Self {
1014 : use GetReadyAncestorError::*;
1015 12 : match e {
1016 0 : AncestorLsnTimeout(wait_err) => PageReconstructError::AncestorLsnTimeout(wait_err),
1017 12 : bad_state @ BadState { .. } => PageReconstructError::Other(anyhow::anyhow!(bad_state)),
1018 0 : Cancelled => PageReconstructError::Cancelled,
1019 : }
1020 12 : }
1021 : }
1022 :
1023 : pub(crate) enum WaitLsnTimeout {
1024 : Custom(Duration),
1025 : // Use the [`PageServerConf::wait_lsn_timeout`] default
1026 : Default,
1027 : }
1028 :
1029 : pub(crate) enum WaitLsnWaiter<'a> {
1030 : Timeline(&'a Timeline),
1031 : Tenant,
1032 : PageService,
1033 : HttpEndpoint,
1034 : }
1035 :
1036 : /// Argument to [`Timeline::shutdown`].
1037 : #[derive(Debug, Clone, Copy)]
1038 : pub(crate) enum ShutdownMode {
1039 : /// Graceful shutdown, may do a lot of I/O as we flush any open layers to disk and then
1040 : /// also to remote storage. This method can easily take multiple seconds for a busy timeline.
1041 : ///
1042 : /// While we are flushing, we continue to accept read I/O for LSNs ingested before
1043 : /// the call to [`Timeline::shutdown`].
1044 : FreezeAndFlush,
1045 : /// Only flush the layers to the remote storage without freezing any open layers. Flush the deletion
1046 : /// queue. This is the mode used by ancestor detach and any other operations that reloads a tenant
1047 : /// but not increasing the generation number. Note that this mode cannot be used at tenant shutdown,
1048 : /// as flushing the deletion queue at that time will cause shutdown-in-progress errors.
1049 : Reload,
1050 : /// Shut down immediately, without waiting for any open layers to flush.
1051 : Hard,
1052 : }
1053 :
1054 : #[allow(clippy::large_enum_variant, reason = "TODO")]
1055 : enum ImageLayerCreationOutcome {
1056 : /// We generated an image layer
1057 : Generated {
1058 : unfinished_image_layer: ImageLayerWriter,
1059 : },
1060 : /// The key range is empty
1061 : Empty,
1062 : /// (Only used in metadata image layer creation), after reading the metadata keys, we decide to skip
1063 : /// the image layer creation.
1064 : Skip,
1065 : }
1066 :
1067 : /// Public interface functions
1068 : impl Timeline {
1069 : /// Get the LSN where this branch was created
1070 264 : pub(crate) fn get_ancestor_lsn(&self) -> Lsn {
1071 264 : self.ancestor_lsn
1072 264 : }
1073 :
1074 : /// Get the ancestor's timeline id
1075 456 : pub(crate) fn get_ancestor_timeline_id(&self) -> Option<TimelineId> {
1076 456 : self.ancestor_timeline
1077 456 : .as_ref()
1078 456 : .map(|ancestor| ancestor.timeline_id)
1079 456 : }
1080 :
1081 : /// Get the ancestor timeline
1082 12 : pub(crate) fn ancestor_timeline(&self) -> Option<&Arc<Timeline>> {
1083 12 : self.ancestor_timeline.as_ref()
1084 12 : }
1085 :
1086 : /// Get the bytes written since the PITR cutoff on this branch, and
1087 : /// whether this branch's ancestor_lsn is within its parent's PITR.
1088 0 : pub(crate) fn get_pitr_history_stats(&self) -> (u64, bool) {
1089 0 : let gc_info = self.gc_info.read().unwrap();
1090 0 : let history = self
1091 0 : .get_last_record_lsn()
1092 0 : .checked_sub(gc_info.cutoffs.time)
1093 0 : .unwrap_or(Lsn(0))
1094 0 : .0;
1095 0 : (history, gc_info.within_ancestor_pitr)
1096 0 : }
1097 :
1098 : /// Read timeline's GC cutoff: this is the LSN at which GC has started to happen
1099 5132417 : pub(crate) fn get_applied_gc_cutoff_lsn(&self) -> RcuReadGuard<Lsn> {
1100 5132417 : self.applied_gc_cutoff_lsn.read()
1101 5132417 : }
1102 :
1103 : /// Read timeline's planned GC cutoff: this is the logical end of history that users
1104 : /// are allowed to read (based on configured PITR), even if physically we have more history.
1105 0 : pub(crate) fn get_gc_cutoff_lsn(&self) -> Lsn {
1106 0 : self.gc_info.read().unwrap().cutoffs.time
1107 0 : }
1108 :
1109 : /// Look up given page version.
1110 : ///
1111 : /// If a remote layer file is needed, it is downloaded as part of this
1112 : /// call.
1113 : ///
1114 : /// This method enforces [`Self::pagestream_throttle`] internally.
1115 : ///
1116 : /// NOTE: It is considered an error to 'get' a key that doesn't exist. The
1117 : /// abstraction above this needs to store suitable metadata to track what
1118 : /// data exists with what keys, in separate metadata entries. If a
1119 : /// non-existent key is requested, we may incorrectly return a value from
1120 : /// an ancestor branch, for example, or waste a lot of cycles chasing the
1121 : /// non-existing key.
1122 : ///
1123 : /// # Cancel-Safety
1124 : ///
1125 : /// This method is cancellation-safe.
1126 : #[inline(always)]
1127 3645921 : pub(crate) async fn get(
1128 3645921 : &self,
1129 3645921 : key: Key,
1130 3645921 : lsn: Lsn,
1131 3645921 : ctx: &RequestContext,
1132 3645921 : ) -> Result<Bytes, PageReconstructError> {
1133 3645921 : if !lsn.is_valid() {
1134 0 : return Err(PageReconstructError::Other(anyhow::anyhow!("Invalid LSN")));
1135 3645921 : }
1136 3645921 :
1137 3645921 : // This check is debug-only because of the cost of hashing, and because it's a double-check: we
1138 3645921 : // already checked the key against the shard_identity when looking up the Timeline from
1139 3645921 : // page_service.
1140 3645921 : debug_assert!(!self.shard_identity.is_key_disposable(&key));
1141 :
1142 3645921 : let mut reconstruct_state = ValuesReconstructState::new(IoConcurrency::sequential());
1143 3645921 :
1144 3645921 : let query = VersionedKeySpaceQuery::uniform(KeySpace::single(key..key.next()), lsn);
1145 :
1146 3645921 : let vectored_res = self
1147 3645921 : .get_vectored_impl(query, &mut reconstruct_state, ctx)
1148 3645921 : .await;
1149 :
1150 3645921 : let key_value = vectored_res?.pop_first();
1151 3645885 : match key_value {
1152 3645813 : Some((got_key, value)) => {
1153 3645813 : if got_key != key {
1154 0 : error!(
1155 0 : "Expected {}, but singular vectored get returned {}",
1156 : key, got_key
1157 : );
1158 0 : Err(PageReconstructError::Other(anyhow!(
1159 0 : "Singular vectored get returned wrong key"
1160 0 : )))
1161 : } else {
1162 3645813 : value
1163 : }
1164 : }
1165 72 : None => Err(PageReconstructError::MissingKey(Box::new(
1166 72 : MissingKeyError {
1167 72 : keyspace: KeySpace::single(key..key.next()),
1168 72 : shard: self.shard_identity.get_shard_number(&key),
1169 72 : original_hwm_lsn: lsn,
1170 72 : ancestor_lsn: None,
1171 72 : backtrace: None,
1172 72 : read_path: None,
1173 72 : query: None,
1174 72 : },
1175 72 : ))),
1176 : }
1177 3645921 : }
1178 :
1179 : pub(crate) const MAX_GET_VECTORED_KEYS: u64 = 32;
1180 : pub(crate) const LAYERS_VISITED_WARN_THRESHOLD: u32 = 100;
1181 :
1182 : /// Look up multiple page versions at a given LSN
1183 : ///
1184 : /// This naive implementation will be replaced with a more efficient one
1185 : /// which actually vectorizes the read path.
1186 130512 : pub(crate) async fn get_vectored(
1187 130512 : &self,
1188 130512 : query: VersionedKeySpaceQuery,
1189 130512 : io_concurrency: super::storage_layer::IoConcurrency,
1190 130512 : ctx: &RequestContext,
1191 130512 : ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
1192 130512 : let total_keyspace = query.total_keyspace();
1193 130512 :
1194 130512 : let key_count = total_keyspace.total_raw_size().try_into().unwrap();
1195 130512 : if key_count > Timeline::MAX_GET_VECTORED_KEYS {
1196 0 : return Err(GetVectoredError::Oversized(key_count));
1197 130512 : }
1198 :
1199 410052 : for range in &total_keyspace.ranges {
1200 279540 : let mut key = range.start;
1201 783636 : while key != range.end {
1202 504096 : assert!(!self.shard_identity.is_key_disposable(&key));
1203 504096 : key = key.next();
1204 : }
1205 : }
1206 :
1207 130512 : trace!(
1208 0 : "get vectored query {} from task kind {:?}",
1209 0 : query,
1210 0 : ctx.task_kind(),
1211 : );
1212 :
1213 130512 : let start = crate::metrics::GET_VECTORED_LATENCY
1214 130512 : .for_task_kind(ctx.task_kind())
1215 130512 : .map(|metric| (metric, Instant::now()));
1216 :
1217 130512 : let res = self
1218 130512 : .get_vectored_impl(query, &mut ValuesReconstructState::new(io_concurrency), ctx)
1219 130512 : .await;
1220 :
1221 130512 : if let Some((metric, start)) = start {
1222 0 : let elapsed = start.elapsed();
1223 0 : metric.observe(elapsed.as_secs_f64());
1224 130512 : }
1225 :
1226 130512 : res
1227 130512 : }
1228 :
1229 : /// Scan the keyspace and return all existing key-values in the keyspace. This currently uses vectored
1230 : /// get underlying. Normal vectored get would throw an error when a key in the keyspace is not found
1231 : /// during the search, but for the scan interface, it returns all existing key-value pairs, and does
1232 : /// not expect each single key in the key space will be found. The semantics is closer to the RocksDB
1233 : /// scan iterator interface. We could optimize this interface later to avoid some checks in the vectored
1234 : /// get path to maintain and split the probing and to-be-probe keyspace. We also need to ensure that
1235 : /// the scan operation will not cause OOM in the future.
1236 72 : pub(crate) async fn scan(
1237 72 : &self,
1238 72 : keyspace: KeySpace,
1239 72 : lsn: Lsn,
1240 72 : ctx: &RequestContext,
1241 72 : io_concurrency: super::storage_layer::IoConcurrency,
1242 72 : ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
1243 72 : if !lsn.is_valid() {
1244 0 : return Err(GetVectoredError::InvalidLsn(lsn));
1245 72 : }
1246 72 :
1247 72 : trace!(
1248 0 : "key-value scan request for {:?}@{} from task kind {:?}",
1249 0 : keyspace,
1250 0 : lsn,
1251 0 : ctx.task_kind()
1252 : );
1253 :
1254 : // We should generalize this into Keyspace::contains in the future.
1255 144 : for range in &keyspace.ranges {
1256 72 : if range.start.field1 < METADATA_KEY_BEGIN_PREFIX
1257 72 : || range.end.field1 > METADATA_KEY_END_PREFIX
1258 : {
1259 0 : return Err(GetVectoredError::Other(anyhow::anyhow!(
1260 0 : "only metadata keyspace can be scanned"
1261 0 : )));
1262 72 : }
1263 : }
1264 :
1265 72 : let start = crate::metrics::SCAN_LATENCY
1266 72 : .for_task_kind(ctx.task_kind())
1267 72 : .map(ScanLatencyOngoingRecording::start_recording);
1268 72 :
1269 72 : let query = VersionedKeySpaceQuery::uniform(keyspace, lsn);
1270 :
1271 72 : let vectored_res = self
1272 72 : .get_vectored_impl(query, &mut ValuesReconstructState::new(io_concurrency), ctx)
1273 72 : .await;
1274 :
1275 72 : if let Some(recording) = start {
1276 0 : recording.observe();
1277 72 : }
1278 :
1279 72 : vectored_res
1280 72 : }
1281 :
1282 3778701 : pub(super) async fn get_vectored_impl(
1283 3778701 : &self,
1284 3778701 : query: VersionedKeySpaceQuery,
1285 3778701 : reconstruct_state: &mut ValuesReconstructState,
1286 3778701 : ctx: &RequestContext,
1287 3778701 : ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
1288 3778701 : let read_path = if self.conf.enable_read_path_debugging || ctx.read_path_debug() {
1289 : Some(ReadPath::new(
1290 3778701 : query.total_keyspace(),
1291 3778701 : query.high_watermark_lsn()?,
1292 : ))
1293 : } else {
1294 0 : None
1295 : };
1296 :
1297 3778701 : reconstruct_state.read_path = read_path;
1298 :
1299 3778701 : let redo_attempt_type = if ctx.task_kind() == TaskKind::Compaction {
1300 0 : RedoAttemptType::LegacyCompaction
1301 : } else {
1302 3778701 : RedoAttemptType::ReadPage
1303 : };
1304 :
1305 3778701 : let traversal_res: Result<(), _> = {
1306 3778701 : let ctx = RequestContextBuilder::from(ctx)
1307 3778701 : .perf_span(|crnt_perf_span| {
1308 0 : info_span!(
1309 : target: PERF_TRACE_TARGET,
1310 0 : parent: crnt_perf_span,
1311 : "PLAN_IO",
1312 : )
1313 3778701 : })
1314 3778701 : .attached_child();
1315 3778701 :
1316 3778701 : self.get_vectored_reconstruct_data(query.clone(), reconstruct_state, &ctx)
1317 3778701 : .maybe_perf_instrument(&ctx, |crnt_perf_span| crnt_perf_span.clone())
1318 3778701 : .await
1319 : };
1320 :
1321 3778701 : if let Err(err) = traversal_res {
1322 : // Wait for all the spawned IOs to complete.
1323 : // See comments on `spawn_io` inside `storage_layer` for more details.
1324 96 : let mut collect_futs = std::mem::take(&mut reconstruct_state.keys)
1325 96 : .into_values()
1326 96 : .map(|state| state.collect_pending_ios())
1327 96 : .collect::<FuturesUnordered<_>>();
1328 96 : while collect_futs.next().await.is_some() {}
1329 :
1330 : // Enrich the missing key error with the original query.
1331 96 : if let GetVectoredError::MissingKey(mut missing_err) = err {
1332 84 : missing_err.enrich(query.clone());
1333 84 : return Err(GetVectoredError::MissingKey(missing_err));
1334 12 : }
1335 12 :
1336 12 : return Err(err);
1337 3778605 : };
1338 3778605 :
1339 3778605 : let layers_visited = reconstruct_state.get_layers_visited();
1340 3778605 :
1341 3778605 : let ctx = RequestContextBuilder::from(ctx)
1342 3778605 : .perf_span(|crnt_perf_span| {
1343 0 : info_span!(
1344 : target: PERF_TRACE_TARGET,
1345 0 : parent: crnt_perf_span,
1346 : "RECONSTRUCT",
1347 : )
1348 3778605 : })
1349 3778605 : .attached_child();
1350 3778605 :
1351 3778605 : let futs = FuturesUnordered::new();
1352 4392717 : for (key, state) in std::mem::take(&mut reconstruct_state.keys) {
1353 4392717 : let req_lsn_for_key = query.map_key_to_lsn(&key);
1354 4392717 :
1355 4392717 : futs.push({
1356 4392717 : let walredo_self = self.myself.upgrade().expect("&self method holds the arc");
1357 4392717 : let ctx = RequestContextBuilder::from(&ctx)
1358 4392717 : .perf_span(|crnt_perf_span| {
1359 0 : info_span!(
1360 : target: PERF_TRACE_TARGET,
1361 0 : parent: crnt_perf_span,
1362 : "RECONSTRUCT_KEY",
1363 : key = %key,
1364 : )
1365 4392717 : })
1366 4392717 : .attached_child();
1367 4392717 :
1368 4392717 : async move {
1369 4392717 : assert_eq!(state.situation, ValueReconstructSituation::Complete);
1370 :
1371 4392717 : let res = state
1372 4392717 : .collect_pending_ios()
1373 4392717 : .maybe_perf_instrument(&ctx, |crnt_perf_span| {
1374 0 : info_span!(
1375 : target: PERF_TRACE_TARGET,
1376 0 : parent: crnt_perf_span,
1377 : "WAIT_FOR_IO_COMPLETIONS",
1378 : )
1379 4392717 : })
1380 4392717 : .await;
1381 :
1382 4392717 : let converted = match res {
1383 4392717 : Ok(ok) => ok,
1384 0 : Err(err) => {
1385 0 : return (key, Err(err));
1386 : }
1387 : };
1388 4392717 : DELTAS_PER_READ_GLOBAL.observe(converted.num_deltas() as f64);
1389 4392717 :
1390 4392717 : // The walredo module expects the records to be descending in terms of Lsn.
1391 4392717 : // And we submit the IOs in that order, so, there shuold be no need to sort here.
1392 4392717 : debug_assert!(
1393 4392717 : converted
1394 4392717 : .records
1395 16839048 : .is_sorted_by_key(|(lsn, _)| std::cmp::Reverse(*lsn)),
1396 0 : "{converted:?}"
1397 : );
1398 :
1399 4392717 : let walredo_deltas = converted.num_deltas();
1400 4392717 : let walredo_res = walredo_self
1401 4392717 : .reconstruct_value(key, req_lsn_for_key, converted, redo_attempt_type)
1402 4392717 : .maybe_perf_instrument(&ctx, |crnt_perf_span| {
1403 0 : info_span!(
1404 : target: PERF_TRACE_TARGET,
1405 0 : parent: crnt_perf_span,
1406 : "WALREDO",
1407 : deltas = %walredo_deltas,
1408 : )
1409 4392717 : })
1410 4392717 : .await;
1411 :
1412 4392717 : (key, walredo_res)
1413 4392717 : }
1414 4392717 : });
1415 4392717 : }
1416 :
1417 3778605 : let results = futs
1418 3778605 : .collect::<BTreeMap<Key, Result<Bytes, PageReconstructError>>>()
1419 3778605 : .maybe_perf_instrument(&ctx, |crnt_perf_span| crnt_perf_span.clone())
1420 3778605 : .await;
1421 :
1422 : // For aux file keys (v1 or v2) the vectored read path does not return an error
1423 : // when they're missing. Instead they are omitted from the resulting btree
1424 : // (this is a requirement, not a bug). Skip updating the metric in these cases
1425 : // to avoid infinite results.
1426 3778605 : if !results.is_empty() {
1427 3777093 : if layers_visited >= Self::LAYERS_VISITED_WARN_THRESHOLD {
1428 0 : let total_keyspace = query.total_keyspace();
1429 0 : let max_request_lsn = query.high_watermark_lsn().expect("Validated previously");
1430 0 :
1431 0 : static LOG_PACER: Lazy<Mutex<RateLimit>> =
1432 0 : Lazy::new(|| Mutex::new(RateLimit::new(Duration::from_secs(60))));
1433 0 : LOG_PACER.lock().unwrap().call(|| {
1434 0 : let num_keys = total_keyspace.total_raw_size();
1435 0 : let num_pages = results.len();
1436 0 : tracing::info!(
1437 0 : shard_id = %self.tenant_shard_id.shard_slug(),
1438 0 : lsn = %max_request_lsn,
1439 0 : "Vectored read for {total_keyspace} visited {layers_visited} layers. Returned {num_pages}/{num_keys} pages.",
1440 : );
1441 0 : });
1442 3777093 : }
1443 :
1444 : // Records the number of layers visited in a few different ways:
1445 : //
1446 : // * LAYERS_PER_READ: all layers count towards every read in the batch, because each
1447 : // layer directly affects its observed latency.
1448 : //
1449 : // * LAYERS_PER_READ_BATCH: all layers count towards each batch, to get the per-batch
1450 : // layer visits and access cost.
1451 : //
1452 : // * LAYERS_PER_READ_AMORTIZED: the average layer count per read, to get the amortized
1453 : // read amplification after batching.
1454 3777093 : let layers_visited = layers_visited as f64;
1455 3777093 : let avg_layers_visited = layers_visited / results.len() as f64;
1456 3777093 : LAYERS_PER_READ_BATCH_GLOBAL.observe(layers_visited);
1457 8169810 : for _ in &results {
1458 4392717 : self.metrics.layers_per_read.observe(layers_visited);
1459 4392717 : LAYERS_PER_READ_GLOBAL.observe(layers_visited);
1460 4392717 : LAYERS_PER_READ_AMORTIZED_GLOBAL.observe(avg_layers_visited);
1461 4392717 : }
1462 1512 : }
1463 :
1464 3778605 : Ok(results)
1465 3778701 : }
1466 :
1467 : /// Get last or prev record separately. Same as get_last_record_rlsn().last/prev.
1468 1647420 : pub(crate) fn get_last_record_lsn(&self) -> Lsn {
1469 1647420 : self.last_record_lsn.load().last
1470 1647420 : }
1471 :
1472 0 : pub(crate) fn get_prev_record_lsn(&self) -> Lsn {
1473 0 : self.last_record_lsn.load().prev
1474 0 : }
1475 :
1476 : /// Atomically get both last and prev.
1477 1404 : pub(crate) fn get_last_record_rlsn(&self) -> RecordLsn {
1478 1404 : self.last_record_lsn.load()
1479 1404 : }
1480 :
1481 : /// Subscribe to callers of wait_lsn(). The value of the channel is None if there are no
1482 : /// wait_lsn() calls in progress, and Some(Lsn) if there is an active waiter for wait_lsn().
1483 0 : pub(crate) fn subscribe_for_wait_lsn_updates(&self) -> watch::Receiver<Option<Lsn>> {
1484 0 : self.last_record_lsn.status_receiver()
1485 0 : }
1486 :
1487 2760 : pub(crate) fn get_disk_consistent_lsn(&self) -> Lsn {
1488 2760 : self.disk_consistent_lsn.load()
1489 2760 : }
1490 :
1491 : /// remote_consistent_lsn from the perspective of the tenant's current generation,
1492 : /// not validated with control plane yet.
1493 : /// See [`Self::get_remote_consistent_lsn_visible`].
1494 0 : pub(crate) fn get_remote_consistent_lsn_projected(&self) -> Option<Lsn> {
1495 0 : self.remote_client.remote_consistent_lsn_projected()
1496 0 : }
1497 :
1498 : /// remote_consistent_lsn which the tenant is guaranteed not to go backward from,
1499 : /// i.e. a value of remote_consistent_lsn_projected which has undergone
1500 : /// generation validation in the deletion queue.
1501 0 : pub(crate) fn get_remote_consistent_lsn_visible(&self) -> Option<Lsn> {
1502 0 : self.remote_client.remote_consistent_lsn_visible()
1503 0 : }
1504 :
1505 : /// The sum of the file size of all historic layers in the layer map.
1506 : /// This method makes no distinction between local and remote layers.
1507 : /// Hence, the result **does not represent local filesystem usage**.
1508 0 : pub(crate) async fn layer_size_sum(&self) -> u64 {
1509 0 : let guard = self.layers.read().await;
1510 0 : guard.layer_size_sum()
1511 0 : }
1512 :
1513 0 : pub(crate) fn resident_physical_size(&self) -> u64 {
1514 0 : self.metrics.resident_physical_size_get()
1515 0 : }
1516 :
1517 0 : pub(crate) fn get_directory_metrics(&self) -> [u64; DirectoryKind::KINDS_NUM] {
1518 0 : array::from_fn(|idx| self.directory_metrics[idx].load(AtomicOrdering::Relaxed))
1519 0 : }
1520 :
1521 : ///
1522 : /// Wait until WAL has been received and processed up to this LSN.
1523 : ///
1524 : /// You should call this before any of the other get_* or list_* functions. Calling
1525 : /// those functions with an LSN that has been processed yet is an error.
1526 : ///
1527 1351760 : pub(crate) async fn wait_lsn(
1528 1351760 : &self,
1529 1351760 : lsn: Lsn,
1530 1351760 : who_is_waiting: WaitLsnWaiter<'_>,
1531 1351760 : timeout: WaitLsnTimeout,
1532 1351760 : ctx: &RequestContext, /* Prepare for use by cancellation */
1533 1351760 : ) -> Result<(), WaitLsnError> {
1534 1351760 : let state = self.current_state();
1535 1351760 : if self.cancel.is_cancelled() || matches!(state, TimelineState::Stopping) {
1536 0 : return Err(WaitLsnError::Shutdown);
1537 1351760 : } else if !matches!(state, TimelineState::Active) {
1538 0 : return Err(WaitLsnError::BadState(state));
1539 1351760 : }
1540 1351760 :
1541 1351760 : if cfg!(debug_assertions) {
1542 1351760 : match ctx.task_kind() {
1543 : TaskKind::WalReceiverManager
1544 : | TaskKind::WalReceiverConnectionHandler
1545 : | TaskKind::WalReceiverConnectionPoller => {
1546 0 : let is_myself = match who_is_waiting {
1547 0 : WaitLsnWaiter::Timeline(waiter) => {
1548 0 : Weak::ptr_eq(&waiter.myself, &self.myself)
1549 : }
1550 : WaitLsnWaiter::Tenant
1551 : | WaitLsnWaiter::PageService
1552 0 : | WaitLsnWaiter::HttpEndpoint => unreachable!(
1553 0 : "tenant or page_service context are not expected to have task kind {:?}",
1554 0 : ctx.task_kind()
1555 0 : ),
1556 : };
1557 0 : if is_myself {
1558 0 : if let Err(current) = self.last_record_lsn.would_wait_for(lsn) {
1559 : // walingest is the only one that can advance last_record_lsn; it should make sure to never reach here
1560 0 : panic!(
1561 0 : "this timeline's walingest task is calling wait_lsn({lsn}) but we only have last_record_lsn={current}; would deadlock"
1562 0 : );
1563 0 : }
1564 0 : } else {
1565 0 : // if another timeline's is waiting for us, there's no deadlock risk because
1566 0 : // our walreceiver task can make progress independent of theirs
1567 0 : }
1568 : }
1569 1351760 : _ => {}
1570 : }
1571 0 : }
1572 :
1573 1351760 : let timeout = match timeout {
1574 0 : WaitLsnTimeout::Custom(t) => t,
1575 1351760 : WaitLsnTimeout::Default => self.conf.wait_lsn_timeout,
1576 : };
1577 :
1578 1351760 : let timer = crate::metrics::WAIT_LSN_TIME.start_timer();
1579 1351760 : let start_finish_counterpair_guard = self.metrics.wait_lsn_start_finish_counterpair.guard();
1580 1351760 :
1581 1351760 : let wait_for_timeout = self.last_record_lsn.wait_for_timeout(lsn, timeout);
1582 1351760 : let wait_for_timeout = std::pin::pin!(wait_for_timeout);
1583 1351760 : // Use threshold of 1 because even 1 second of wait for ingest is very much abnormal.
1584 1351760 : let log_slow_threshold = Duration::from_secs(1);
1585 1351760 : // Use period of 10 to avoid flooding logs during an outage that affects all timelines.
1586 1351760 : let log_slow_period = Duration::from_secs(10);
1587 1351760 : let mut logging_permit = None;
1588 1351760 : let wait_for_timeout = monitor_slow_future(
1589 1351760 : log_slow_threshold,
1590 1351760 : log_slow_period,
1591 1351760 : wait_for_timeout,
1592 1351760 : |MonitorSlowFutureCallback {
1593 : ready,
1594 : is_slow,
1595 : elapsed_total,
1596 : elapsed_since_last_callback,
1597 1351760 : }| {
1598 1351760 : self.metrics
1599 1351760 : .wait_lsn_in_progress_micros
1600 1351760 : .inc_by(u64::try_from(elapsed_since_last_callback.as_micros()).unwrap());
1601 1351760 : if !is_slow {
1602 1351760 : return;
1603 0 : }
1604 0 : // It's slow, see if we should log it.
1605 0 : // (We limit the logging to one per invocation per timeline to avoid excessive
1606 0 : // logging during an extended broker / networking outage that affects all timelines.)
1607 0 : if logging_permit.is_none() {
1608 0 : logging_permit = self.wait_lsn_log_slow.try_acquire().ok();
1609 0 : }
1610 0 : if logging_permit.is_none() {
1611 0 : return;
1612 0 : }
1613 0 : // We log it.
1614 0 : if ready {
1615 0 : info!(
1616 0 : "slow wait_lsn completed after {:.3}s",
1617 0 : elapsed_total.as_secs_f64()
1618 : );
1619 : } else {
1620 0 : info!(
1621 0 : "slow wait_lsn still running for {:.3}s",
1622 0 : elapsed_total.as_secs_f64()
1623 : );
1624 : }
1625 1351760 : },
1626 1351760 : );
1627 1351760 : let res = wait_for_timeout.await;
1628 : // don't count the time spent waiting for lock below, and also in walreceiver.status(), towards the wait_lsn_time_histo
1629 1351760 : drop(logging_permit);
1630 1351760 : drop(start_finish_counterpair_guard);
1631 1351760 : drop(timer);
1632 1351760 : match res {
1633 1351760 : Ok(()) => Ok(()),
1634 0 : Err(e) => {
1635 : use utils::seqwait::SeqWaitError::*;
1636 0 : match e {
1637 0 : Shutdown => Err(WaitLsnError::Shutdown),
1638 : Timeout => {
1639 0 : let walreceiver_status = self.walreceiver_status();
1640 0 : Err(WaitLsnError::Timeout(format!(
1641 0 : "Timed out while waiting for WAL record at LSN {} to arrive, last_record_lsn {} disk consistent LSN={}, WalReceiver status: {}",
1642 0 : lsn,
1643 0 : self.get_last_record_lsn(),
1644 0 : self.get_disk_consistent_lsn(),
1645 0 : walreceiver_status,
1646 0 : )))
1647 : }
1648 : }
1649 : }
1650 : }
1651 1351760 : }
1652 :
1653 0 : pub(crate) fn walreceiver_status(&self) -> String {
1654 0 : match &*self.walreceiver.lock().unwrap() {
1655 0 : None => "stopping or stopped".to_string(),
1656 0 : Some(walreceiver) => match walreceiver.status() {
1657 0 : Some(status) => status.to_human_readable_string(),
1658 0 : None => "Not active".to_string(),
1659 : },
1660 : }
1661 0 : }
1662 :
1663 : /// Check that it is valid to request operations with that lsn.
1664 1428 : pub(crate) fn check_lsn_is_in_scope(
1665 1428 : &self,
1666 1428 : lsn: Lsn,
1667 1428 : latest_gc_cutoff_lsn: &RcuReadGuard<Lsn>,
1668 1428 : ) -> anyhow::Result<()> {
1669 1428 : ensure!(
1670 1428 : lsn >= **latest_gc_cutoff_lsn,
1671 24 : "LSN {} is earlier than latest GC cutoff {} (we might've already garbage collected needed data)",
1672 24 : lsn,
1673 24 : **latest_gc_cutoff_lsn,
1674 : );
1675 1404 : Ok(())
1676 1428 : }
1677 :
1678 : /// Initializes an LSN lease. The function will return an error if the requested LSN is less than the `latest_gc_cutoff_lsn`.
1679 60 : pub(crate) fn init_lsn_lease(
1680 60 : &self,
1681 60 : lsn: Lsn,
1682 60 : length: Duration,
1683 60 : ctx: &RequestContext,
1684 60 : ) -> anyhow::Result<LsnLease> {
1685 60 : self.make_lsn_lease(lsn, length, true, ctx)
1686 60 : }
1687 :
1688 : /// Renews a lease at a particular LSN. The requested LSN is not validated against the `latest_gc_cutoff_lsn` when we are in the grace period.
1689 24 : pub(crate) fn renew_lsn_lease(
1690 24 : &self,
1691 24 : lsn: Lsn,
1692 24 : length: Duration,
1693 24 : ctx: &RequestContext,
1694 24 : ) -> anyhow::Result<LsnLease> {
1695 24 : self.make_lsn_lease(lsn, length, false, ctx)
1696 24 : }
1697 :
1698 : /// Obtains a temporary lease blocking garbage collection for the given LSN.
1699 : ///
1700 : /// If we are in `AttachedSingle` mode and is not blocked by the lsn lease deadline, this function will error
1701 : /// if the requesting LSN is less than the `latest_gc_cutoff_lsn` and there is no existing request present.
1702 : ///
1703 : /// If there is an existing lease in the map, the lease will be renewed only if the request extends the lease.
1704 : /// The returned lease is therefore the maximum between the existing lease and the requesting lease.
1705 84 : fn make_lsn_lease(
1706 84 : &self,
1707 84 : lsn: Lsn,
1708 84 : length: Duration,
1709 84 : init: bool,
1710 84 : _ctx: &RequestContext,
1711 84 : ) -> anyhow::Result<LsnLease> {
1712 72 : let lease = {
1713 : // Normalize the requested LSN to be aligned, and move to the first record
1714 : // if it points to the beginning of the page (header).
1715 84 : let lsn = xlog_utils::normalize_lsn(lsn, WAL_SEGMENT_SIZE);
1716 84 :
1717 84 : let mut gc_info = self.gc_info.write().unwrap();
1718 84 : let planned_cutoff = gc_info.min_cutoff();
1719 84 :
1720 84 : let valid_until = SystemTime::now() + length;
1721 84 :
1722 84 : let entry = gc_info.leases.entry(lsn);
1723 84 :
1724 84 : match entry {
1725 36 : Entry::Occupied(mut occupied) => {
1726 36 : let existing_lease = occupied.get_mut();
1727 36 : if valid_until > existing_lease.valid_until {
1728 12 : existing_lease.valid_until = valid_until;
1729 12 : let dt: DateTime<Utc> = valid_until.into();
1730 12 : info!("lease extended to {}", dt);
1731 : } else {
1732 24 : let dt: DateTime<Utc> = existing_lease.valid_until.into();
1733 24 : info!("existing lease covers greater length, valid until {}", dt);
1734 : }
1735 :
1736 36 : existing_lease.clone()
1737 : }
1738 48 : Entry::Vacant(vacant) => {
1739 : // Reject already GC-ed LSN if we are in AttachedSingle and
1740 : // not blocked by the lsn lease deadline.
1741 48 : let validate = {
1742 48 : let conf = self.tenant_conf.load();
1743 48 : conf.location.attach_mode == AttachmentMode::Single
1744 48 : && !conf.is_gc_blocked_by_lsn_lease_deadline()
1745 : };
1746 :
1747 48 : if init || validate {
1748 48 : let latest_gc_cutoff_lsn = self.get_applied_gc_cutoff_lsn();
1749 48 : if lsn < *latest_gc_cutoff_lsn {
1750 12 : bail!(
1751 12 : "tried to request an lsn lease for an lsn below the latest gc cutoff. requested at {} gc cutoff {}",
1752 12 : lsn,
1753 12 : *latest_gc_cutoff_lsn
1754 12 : );
1755 36 : }
1756 36 : if lsn < planned_cutoff {
1757 0 : bail!(
1758 0 : "tried to request an lsn lease for an lsn below the planned gc cutoff. requested at {} planned gc cutoff {}",
1759 0 : lsn,
1760 0 : planned_cutoff
1761 0 : );
1762 36 : }
1763 0 : }
1764 :
1765 36 : let dt: DateTime<Utc> = valid_until.into();
1766 36 : info!("lease created, valid until {}", dt);
1767 36 : vacant.insert(LsnLease { valid_until }).clone()
1768 : }
1769 : }
1770 : };
1771 :
1772 72 : Ok(lease)
1773 84 : }
1774 :
1775 : /// Freeze the current open in-memory layer. It will be written to disk on next iteration.
1776 : /// Returns the flush request ID which can be awaited with wait_flush_completion().
1777 : #[instrument(skip(self), fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id))]
1778 : pub(crate) async fn freeze(&self) -> Result<u64, FlushLayerError> {
1779 : self.freeze0().await
1780 : }
1781 :
1782 : /// Freeze and flush the open in-memory layer, waiting for it to be written to disk.
1783 : #[instrument(skip(self), fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id))]
1784 : pub(crate) async fn freeze_and_flush(&self) -> Result<(), FlushLayerError> {
1785 : self.freeze_and_flush0().await
1786 : }
1787 :
1788 : /// Freeze the current open in-memory layer. It will be written to disk on next iteration.
1789 : /// Returns the flush request ID which can be awaited with wait_flush_completion().
1790 6792 : pub(crate) async fn freeze0(&self) -> Result<u64, FlushLayerError> {
1791 6792 : let mut g = self.write_lock.lock().await;
1792 6792 : let to_lsn = self.get_last_record_lsn();
1793 6792 : self.freeze_inmem_layer_at(to_lsn, &mut g).await
1794 6792 : }
1795 :
1796 : // This exists to provide a non-span creating version of `freeze_and_flush` we can call without
1797 : // polluting the span hierarchy.
1798 6792 : pub(crate) async fn freeze_and_flush0(&self) -> Result<(), FlushLayerError> {
1799 6792 : let token = self.freeze0().await?;
1800 6792 : self.wait_flush_completion(token).await
1801 6792 : }
1802 :
1803 : // Check if an open ephemeral layer should be closed: this provides
1804 : // background enforcement of checkpoint interval if there is no active WAL receiver, to avoid keeping
1805 : // an ephemeral layer open forever when idle. It also freezes layers if the global limit on
1806 : // ephemeral layer bytes has been breached.
1807 0 : pub(super) async fn maybe_freeze_ephemeral_layer(&self) {
1808 0 : let Ok(mut write_guard) = self.write_lock.try_lock() else {
1809 : // If the write lock is held, there is an active wal receiver: rolling open layers
1810 : // is their responsibility while they hold this lock.
1811 0 : return;
1812 : };
1813 :
1814 : // FIXME: why not early exit? because before #7927 the state would had been cleared every
1815 : // time, and this was missed.
1816 : // if write_guard.is_none() { return; }
1817 :
1818 0 : let Ok(layers_guard) = self.layers.try_read() else {
1819 : // Don't block if the layer lock is busy
1820 0 : return;
1821 : };
1822 :
1823 0 : let Ok(lm) = layers_guard.layer_map() else {
1824 0 : return;
1825 : };
1826 :
1827 0 : let Some(open_layer) = &lm.open_layer else {
1828 : // If there is no open layer, we have no layer freezing to do. However, we might need to generate
1829 : // some updates to disk_consistent_lsn and remote_consistent_lsn, in case we ingested some WAL regions
1830 : // that didn't result in writes to this shard.
1831 :
1832 : // Must not hold the layers lock while waiting for a flush.
1833 0 : drop(layers_guard);
1834 0 :
1835 0 : let last_record_lsn = self.get_last_record_lsn();
1836 0 : let disk_consistent_lsn = self.get_disk_consistent_lsn();
1837 0 : if last_record_lsn > disk_consistent_lsn {
1838 : // We have no open layer, but disk_consistent_lsn is behind the last record: this indicates
1839 : // we are a sharded tenant and have skipped some WAL
1840 0 : let last_freeze_ts = *self.last_freeze_ts.read().unwrap();
1841 0 : if last_freeze_ts.elapsed() >= self.get_checkpoint_timeout() {
1842 : // Only do this if have been layer-less longer than get_checkpoint_timeout, so that a shard
1843 : // without any data ingested (yet) doesn't write a remote index as soon as it
1844 : // sees its LSN advance: we only do this if we've been layer-less
1845 : // for some time.
1846 0 : tracing::debug!(
1847 0 : "Advancing disk_consistent_lsn past WAL ingest gap {} -> {}",
1848 : disk_consistent_lsn,
1849 : last_record_lsn
1850 : );
1851 :
1852 : // The flush loop will update remote consistent LSN as well as disk consistent LSN.
1853 : // We know there is no open layer, so we can request freezing without actually
1854 : // freezing anything. This is true even if we have dropped the layers_guard, we
1855 : // still hold the write_guard.
1856 0 : let _ = async {
1857 0 : let token = self
1858 0 : .freeze_inmem_layer_at(last_record_lsn, &mut write_guard)
1859 0 : .await?;
1860 0 : self.wait_flush_completion(token).await
1861 0 : }
1862 0 : .await;
1863 0 : }
1864 0 : }
1865 :
1866 0 : return;
1867 : };
1868 :
1869 0 : let Some(current_size) = open_layer.try_len() else {
1870 : // Unexpected: since we hold the write guard, nobody else should be writing to this layer, so
1871 : // read lock to get size should always succeed.
1872 0 : tracing::warn!("Lock conflict while reading size of open layer");
1873 0 : return;
1874 : };
1875 :
1876 0 : let current_lsn = self.get_last_record_lsn();
1877 :
1878 0 : let checkpoint_distance_override = open_layer.tick().await;
1879 :
1880 0 : if let Some(size_override) = checkpoint_distance_override {
1881 0 : if current_size > size_override {
1882 : // This is not harmful, but it only happens in relatively rare cases where
1883 : // time-based checkpoints are not happening fast enough to keep the amount of
1884 : // ephemeral data within configured limits. It's a sign of stress on the system.
1885 0 : tracing::info!(
1886 0 : "Early-rolling open layer at size {current_size} (limit {size_override}) due to dirty data pressure"
1887 : );
1888 0 : }
1889 0 : }
1890 :
1891 0 : let checkpoint_distance =
1892 0 : checkpoint_distance_override.unwrap_or(self.get_checkpoint_distance());
1893 0 :
1894 0 : if self.should_roll(
1895 0 : current_size,
1896 0 : current_size,
1897 0 : checkpoint_distance,
1898 0 : self.get_last_record_lsn(),
1899 0 : self.last_freeze_at.load(),
1900 0 : open_layer.get_opened_at(),
1901 0 : ) {
1902 0 : match open_layer.info() {
1903 0 : InMemoryLayerInfo::Frozen { lsn_start, lsn_end } => {
1904 0 : // We may reach this point if the layer was already frozen by not yet flushed: flushing
1905 0 : // happens asynchronously in the background.
1906 0 : tracing::debug!(
1907 0 : "Not freezing open layer, it's already frozen ({lsn_start}..{lsn_end})"
1908 : );
1909 : }
1910 : InMemoryLayerInfo::Open { .. } => {
1911 : // Upgrade to a write lock and freeze the layer
1912 0 : drop(layers_guard);
1913 0 : let res = self
1914 0 : .freeze_inmem_layer_at(current_lsn, &mut write_guard)
1915 0 : .await;
1916 :
1917 0 : if let Err(e) = res {
1918 0 : tracing::info!(
1919 0 : "failed to flush frozen layer after background freeze: {e:#}"
1920 : );
1921 0 : }
1922 : }
1923 : }
1924 0 : }
1925 0 : }
1926 :
1927 : /// Checks if the internal state of the timeline is consistent with it being able to be offloaded.
1928 : ///
1929 : /// This is neccessary but not sufficient for offloading of the timeline as it might have
1930 : /// child timelines that are not offloaded yet.
1931 0 : pub(crate) fn can_offload(&self) -> (bool, &'static str) {
1932 0 : if self.remote_client.is_archived() != Some(true) {
1933 0 : return (false, "the timeline is not archived");
1934 0 : }
1935 0 : if !self.remote_client.no_pending_work() {
1936 : // if the remote client is still processing some work, we can't offload
1937 0 : return (false, "the upload queue is not drained yet");
1938 0 : }
1939 0 :
1940 0 : (true, "ok")
1941 0 : }
1942 :
1943 : /// Outermost timeline compaction operation; downloads needed layers. Returns whether we have pending
1944 : /// compaction tasks.
1945 2184 : pub(crate) async fn compact(
1946 2184 : self: &Arc<Self>,
1947 2184 : cancel: &CancellationToken,
1948 2184 : flags: EnumSet<CompactFlags>,
1949 2184 : ctx: &RequestContext,
1950 2184 : ) -> Result<CompactionOutcome, CompactionError> {
1951 2184 : let res = self
1952 2184 : .compact_with_options(
1953 2184 : cancel,
1954 2184 : CompactOptions {
1955 2184 : flags,
1956 2184 : compact_key_range: None,
1957 2184 : compact_lsn_range: None,
1958 2184 : sub_compaction: false,
1959 2184 : sub_compaction_max_job_size_mb: None,
1960 2184 : },
1961 2184 : ctx,
1962 2184 : )
1963 2184 : .await;
1964 2184 : if let Err(err) = &res {
1965 0 : log_compaction_error(err, None, cancel.is_cancelled(), false);
1966 2184 : }
1967 2184 : res
1968 2184 : }
1969 :
1970 : /// Outermost timeline compaction operation; downloads needed layers.
1971 : ///
1972 : /// NB: the cancellation token is usually from a background task, but can also come from a
1973 : /// request task.
1974 2184 : pub(crate) async fn compact_with_options(
1975 2184 : self: &Arc<Self>,
1976 2184 : cancel: &CancellationToken,
1977 2184 : options: CompactOptions,
1978 2184 : ctx: &RequestContext,
1979 2184 : ) -> Result<CompactionOutcome, CompactionError> {
1980 2184 : // Acquire the compaction lock and task semaphore.
1981 2184 : //
1982 2184 : // L0-only compaction uses a separate semaphore (if enabled) to make sure it isn't starved
1983 2184 : // out by other background tasks (including image compaction). We request this via
1984 2184 : // `BackgroundLoopKind::L0Compaction`.
1985 2184 : //
1986 2184 : // Yield for pending L0 compaction while waiting for the semaphore.
1987 2184 : let is_l0_only = options.flags.contains(CompactFlags::OnlyL0Compaction);
1988 2184 : let semaphore_kind = match is_l0_only && self.get_compaction_l0_semaphore() {
1989 0 : true => BackgroundLoopKind::L0Compaction,
1990 2184 : false => BackgroundLoopKind::Compaction,
1991 : };
1992 2184 : let yield_for_l0 = options.flags.contains(CompactFlags::YieldForL0);
1993 2184 : if yield_for_l0 {
1994 : // If this is an L0 pass, it doesn't make sense to yield for L0.
1995 0 : debug_assert!(!is_l0_only, "YieldForL0 during L0 pass");
1996 : // If `compaction_l0_first` is disabled, there's no point yielding.
1997 0 : debug_assert!(self.get_compaction_l0_first(), "YieldForL0 without L0 pass");
1998 2184 : }
1999 :
2000 2184 : let acquire = async move {
2001 2184 : let guard = self.compaction_lock.lock().await;
2002 2184 : let permit = super::tasks::acquire_concurrency_permit(semaphore_kind, ctx).await;
2003 2184 : (guard, permit)
2004 2184 : };
2005 :
2006 2184 : let (_guard, _permit) = tokio::select! {
2007 2184 : (guard, permit) = acquire => (guard, permit),
2008 2184 : _ = self.l0_compaction_trigger.notified(), if yield_for_l0 => {
2009 0 : return Ok(CompactionOutcome::YieldForL0);
2010 : }
2011 2184 : _ = self.cancel.cancelled() => return Ok(CompactionOutcome::Skipped),
2012 2184 : _ = cancel.cancelled() => return Ok(CompactionOutcome::Skipped),
2013 : };
2014 :
2015 2184 : let last_record_lsn = self.get_last_record_lsn();
2016 2184 :
2017 2184 : // Last record Lsn could be zero in case the timeline was just created
2018 2184 : if !last_record_lsn.is_valid() {
2019 0 : warn!(
2020 0 : "Skipping compaction for potentially just initialized timeline, it has invalid last record lsn: {last_record_lsn}"
2021 : );
2022 0 : return Ok(CompactionOutcome::Skipped);
2023 2184 : }
2024 :
2025 2184 : let result = match self.get_compaction_algorithm_settings().kind {
2026 : CompactionAlgorithm::Tiered => {
2027 0 : self.compact_tiered(cancel, ctx).await?;
2028 0 : Ok(CompactionOutcome::Done)
2029 : }
2030 2184 : CompactionAlgorithm::Legacy => self.compact_legacy(cancel, options, ctx).await,
2031 : };
2032 :
2033 : // Signal compaction failure to avoid L0 flush stalls when it's broken.
2034 0 : match &result {
2035 2184 : Ok(_) => self.compaction_failed.store(false, AtomicOrdering::Relaxed),
2036 0 : Err(e) if e.is_cancel() => {}
2037 0 : Err(CompactionError::ShuttingDown) => {
2038 0 : // Covered by the `Err(e) if e.is_cancel()` branch.
2039 0 : }
2040 0 : Err(CompactionError::AlreadyRunning(_)) => {
2041 0 : // Covered by the `Err(e) if e.is_cancel()` branch.
2042 0 : }
2043 : Err(CompactionError::Other(_)) => {
2044 0 : self.compaction_failed.store(true, AtomicOrdering::Relaxed)
2045 : }
2046 : Err(CompactionError::CollectKeySpaceError(_)) => {
2047 : // Cancelled errors are covered by the `Err(e) if e.is_cancel()` branch.
2048 0 : self.compaction_failed.store(true, AtomicOrdering::Relaxed)
2049 : }
2050 : // Don't change the current value on offload failure or shutdown. We don't want to
2051 : // abruptly stall nor resume L0 flushes in these cases.
2052 0 : Err(CompactionError::Offload(_)) => {}
2053 : };
2054 :
2055 2184 : result
2056 2184 : }
2057 :
2058 : /// Mutate the timeline with a [`TimelineWriter`].
2059 30799212 : pub(crate) async fn writer(&self) -> TimelineWriter<'_> {
2060 30799212 : TimelineWriter {
2061 30799212 : tl: self,
2062 30799212 : write_guard: self.write_lock.lock().await,
2063 : }
2064 30799212 : }
2065 :
2066 0 : pub(crate) fn activate(
2067 0 : self: &Arc<Self>,
2068 0 : parent: Arc<crate::tenant::TenantShard>,
2069 0 : broker_client: BrokerClientChannel,
2070 0 : background_jobs_can_start: Option<&completion::Barrier>,
2071 0 : ctx: &RequestContext,
2072 0 : ) {
2073 0 : if self.tenant_shard_id.is_shard_zero() {
2074 0 : // Logical size is only maintained accurately on shard zero.
2075 0 : self.spawn_initial_logical_size_computation_task(ctx);
2076 0 : }
2077 0 : self.launch_wal_receiver(ctx, broker_client);
2078 0 : self.set_state(TimelineState::Active);
2079 0 : self.launch_eviction_task(parent, background_jobs_can_start);
2080 0 : }
2081 :
2082 : /// After this function returns, there are no timeline-scoped tasks are left running.
2083 : ///
2084 : /// The preferred pattern for is:
2085 : /// - in any spawned tasks, keep Timeline::guard open + Timeline::cancel / child token
2086 : /// - if early shutdown (not just cancellation) of a sub-tree of tasks is required,
2087 : /// go the extra mile and keep track of JoinHandles
2088 : /// - Keep track of JoinHandles using a passed-down `Arc<Mutex<Option<JoinSet>>>` or similar,
2089 : /// instead of spawning directly on a runtime. It is a more composable / testable pattern.
2090 : ///
2091 : /// For legacy reasons, we still have multiple tasks spawned using
2092 : /// `task_mgr::spawn(X, Some(tenant_id), Some(timeline_id))`.
2093 : /// We refer to these as "timeline-scoped task_mgr tasks".
2094 : /// Some of these tasks are already sensitive to Timeline::cancel while others are
2095 : /// not sensitive to Timeline::cancel and instead respect [`task_mgr::shutdown_token`]
2096 : /// or [`task_mgr::shutdown_watcher`].
2097 : /// We want to gradually convert the code base away from these.
2098 : ///
2099 : /// Here is an inventory of timeline-scoped task_mgr tasks that are still sensitive to
2100 : /// `task_mgr::shutdown_{token,watcher}` (there are also tenant-scoped and global-scoped
2101 : /// ones that aren't mentioned here):
2102 : /// - [`TaskKind::TimelineDeletionWorker`]
2103 : /// - NB: also used for tenant deletion
2104 : /// - [`TaskKind::RemoteUploadTask`]`
2105 : /// - [`TaskKind::InitialLogicalSizeCalculation`]
2106 : /// - [`TaskKind::DownloadAllRemoteLayers`] (can we get rid of it?)
2107 : // Inventory of timeline-scoped task_mgr tasks that use spawn but aren't sensitive:
2108 : /// - [`TaskKind::Eviction`]
2109 : /// - [`TaskKind::LayerFlushTask`]
2110 : /// - [`TaskKind::OndemandLogicalSizeCalculation`]
2111 : /// - [`TaskKind::GarbageCollector`] (immediate_gc is timeline-scoped)
2112 60 : pub(crate) async fn shutdown(&self, mode: ShutdownMode) {
2113 60 : debug_assert_current_span_has_tenant_and_timeline_id();
2114 60 :
2115 60 : // Regardless of whether we're going to try_freeze_and_flush
2116 60 : // or not, stop ingesting any more data. Walreceiver only provides
2117 60 : // cancellation but no "wait until gone", because it uses the Timeline::gate.
2118 60 : // So, only after the self.gate.close() below will we know for sure that
2119 60 : // no walreceiver tasks are left.
2120 60 : // For `try_freeze_and_flush=true`, this means that we might still be ingesting
2121 60 : // data during the call to `self.freeze_and_flush()` below.
2122 60 : // That's not ideal, but, we don't have the concept of a ChildGuard,
2123 60 : // which is what we'd need to properly model early shutdown of the walreceiver
2124 60 : // task sub-tree before the other Timeline task sub-trees.
2125 60 : let walreceiver = self.walreceiver.lock().unwrap().take();
2126 60 : tracing::debug!(
2127 0 : is_some = walreceiver.is_some(),
2128 0 : "Waiting for WalReceiverManager..."
2129 : );
2130 60 : if let Some(walreceiver) = walreceiver {
2131 0 : walreceiver.cancel();
2132 60 : }
2133 : // ... and inform any waiters for newer LSNs that there won't be any.
2134 60 : self.last_record_lsn.shutdown();
2135 60 :
2136 60 : if let ShutdownMode::FreezeAndFlush = mode {
2137 36 : let do_flush = if let Some((open, frozen)) = self
2138 36 : .layers
2139 36 : .read()
2140 36 : .await
2141 36 : .layer_map()
2142 36 : .map(|lm| (lm.open_layer.is_some(), lm.frozen_layers.len()))
2143 36 : .ok()
2144 36 : .filter(|(open, frozen)| *open || *frozen > 0)
2145 : {
2146 0 : if self.remote_client.is_archived() == Some(true) {
2147 : // No point flushing on shutdown for an archived timeline: it is not important
2148 : // to have it nice and fresh after our restart, and trying to flush here might
2149 : // race with trying to offload it (which also stops the flush loop)
2150 0 : false
2151 : } else {
2152 0 : tracing::info!(?open, frozen, "flushing and freezing on shutdown");
2153 0 : true
2154 : }
2155 : } else {
2156 : // this is double-shutdown, it'll be a no-op
2157 36 : true
2158 : };
2159 :
2160 : // we shut down walreceiver above, so, we won't add anything more
2161 : // to the InMemoryLayer; freeze it and wait for all frozen layers
2162 : // to reach the disk & upload queue, then shut the upload queue and
2163 : // wait for it to drain.
2164 36 : if do_flush {
2165 36 : match self.freeze_and_flush().await {
2166 : Ok(_) => {
2167 : // drain the upload queue
2168 : // if we did not wait for completion here, it might be our shutdown process
2169 : // didn't wait for remote uploads to complete at all, as new tasks can forever
2170 : // be spawned.
2171 : //
2172 : // what is problematic is the shutting down of RemoteTimelineClient, because
2173 : // obviously it does not make sense to stop while we wait for it, but what
2174 : // about corner cases like s3 suddenly hanging up?
2175 36 : self.remote_client.shutdown().await;
2176 : }
2177 : Err(FlushLayerError::Cancelled) => {
2178 : // this is likely the second shutdown, ignore silently.
2179 : // TODO: this can be removed once https://github.com/neondatabase/neon/issues/5080
2180 0 : debug_assert!(self.cancel.is_cancelled());
2181 : }
2182 0 : Err(e) => {
2183 0 : // Non-fatal. Shutdown is infallible. Failures to flush just mean that
2184 0 : // we have some extra WAL replay to do next time the timeline starts.
2185 0 : warn!("failed to freeze and flush: {e:#}");
2186 : }
2187 : }
2188 :
2189 : // `self.remote_client.shutdown().await` above should have already flushed everything from the queue, but
2190 : // we also do a final check here to ensure that the queue is empty.
2191 36 : if !self.remote_client.no_pending_work() {
2192 0 : warn!(
2193 0 : "still have pending work in remote upload queue, but continuing shutting down anyways"
2194 : );
2195 36 : }
2196 0 : }
2197 24 : }
2198 :
2199 60 : if let ShutdownMode::Reload = mode {
2200 : // drain the upload queue
2201 12 : self.remote_client.shutdown().await;
2202 12 : if !self.remote_client.no_pending_work() {
2203 0 : warn!(
2204 0 : "still have pending work in remote upload queue, but continuing shutting down anyways"
2205 : );
2206 12 : }
2207 48 : }
2208 :
2209 : // Signal any subscribers to our cancellation token to drop out
2210 60 : tracing::debug!("Cancelling CancellationToken");
2211 60 : self.cancel.cancel();
2212 60 :
2213 60 : // If we have a background task downloading heatmap layers stop it.
2214 60 : // The background downloads are sensitive to timeline cancellation (done above),
2215 60 : // so the drain will be immediate.
2216 60 : self.stop_and_drain_heatmap_layers_download().await;
2217 :
2218 : // Ensure Prevent new page service requests from starting.
2219 60 : self.handles.shutdown();
2220 60 :
2221 60 : // Transition the remote_client into a state where it's only useful for timeline deletion.
2222 60 : // (The deletion use case is why we can't just hook up remote_client to Self::cancel).)
2223 60 : self.remote_client.stop();
2224 60 :
2225 60 : // As documented in remote_client.stop()'s doc comment, it's our responsibility
2226 60 : // to shut down the upload queue tasks.
2227 60 : // TODO: fix that, task management should be encapsulated inside remote_client.
2228 60 : task_mgr::shutdown_tasks(
2229 60 : Some(TaskKind::RemoteUploadTask),
2230 60 : Some(self.tenant_shard_id),
2231 60 : Some(self.timeline_id),
2232 60 : )
2233 60 : .await;
2234 :
2235 : // TODO: work toward making this a no-op. See this function's doc comment for more context.
2236 60 : tracing::debug!("Waiting for tasks...");
2237 60 : task_mgr::shutdown_tasks(None, Some(self.tenant_shard_id), Some(self.timeline_id)).await;
2238 :
2239 : {
2240 : // Allow any remaining in-memory layers to do cleanup -- until that, they hold the gate
2241 : // open.
2242 60 : let mut write_guard = self.write_lock.lock().await;
2243 60 : self.layers.write().await.shutdown(&mut write_guard);
2244 60 : }
2245 60 :
2246 60 : // Finally wait until any gate-holders are complete.
2247 60 : //
2248 60 : // TODO: once above shutdown_tasks is a no-op, we can close the gate before calling shutdown_tasks
2249 60 : // and use a TBD variant of shutdown_tasks that asserts that there were no tasks left.
2250 60 : self.gate.close().await;
2251 :
2252 60 : self.metrics.shutdown();
2253 60 : }
2254 :
2255 2796 : pub(crate) fn set_state(&self, new_state: TimelineState) {
2256 2796 : match (self.current_state(), new_state) {
2257 2796 : (equal_state_1, equal_state_2) if equal_state_1 == equal_state_2 => {
2258 12 : info!("Ignoring new state, equal to the existing one: {equal_state_2:?}");
2259 : }
2260 0 : (st, TimelineState::Loading) => {
2261 0 : error!("ignoring transition from {st:?} into Loading state");
2262 : }
2263 0 : (TimelineState::Broken { .. }, new_state) => {
2264 0 : error!("Ignoring state update {new_state:?} for broken timeline");
2265 : }
2266 : (TimelineState::Stopping, TimelineState::Active) => {
2267 0 : error!("Not activating a Stopping timeline");
2268 : }
2269 2784 : (_, new_state) => {
2270 2784 : self.state.send_replace(new_state);
2271 2784 : }
2272 : }
2273 2796 : }
2274 :
2275 12 : pub(crate) fn set_broken(&self, reason: String) {
2276 12 : let backtrace_str: String = format!("{}", std::backtrace::Backtrace::force_capture());
2277 12 : let broken_state = TimelineState::Broken {
2278 12 : reason,
2279 12 : backtrace: backtrace_str,
2280 12 : };
2281 12 : self.set_state(broken_state);
2282 12 :
2283 12 : // Although the Broken state is not equivalent to shutdown() (shutdown will be called
2284 12 : // later when this tenant is detach or the process shuts down), firing the cancellation token
2285 12 : // here avoids the need for other tasks to watch for the Broken state explicitly.
2286 12 : self.cancel.cancel();
2287 12 : }
2288 :
2289 1358276 : pub(crate) fn current_state(&self) -> TimelineState {
2290 1358276 : self.state.borrow().clone()
2291 1358276 : }
2292 :
2293 36 : pub(crate) fn is_broken(&self) -> bool {
2294 36 : matches!(&*self.state.borrow(), TimelineState::Broken { .. })
2295 36 : }
2296 :
2297 1512 : pub(crate) fn is_active(&self) -> bool {
2298 1512 : self.current_state() == TimelineState::Active
2299 1512 : }
2300 :
2301 96 : pub(crate) fn is_archived(&self) -> Option<bool> {
2302 96 : self.remote_client.is_archived()
2303 96 : }
2304 :
2305 96 : pub(crate) fn is_invisible(&self) -> Option<bool> {
2306 96 : self.remote_client.is_invisible()
2307 96 : }
2308 :
2309 2208 : pub(crate) fn is_stopping(&self) -> bool {
2310 2208 : self.current_state() == TimelineState::Stopping
2311 2208 : }
2312 :
2313 0 : pub(crate) fn subscribe_for_state_updates(&self) -> watch::Receiver<TimelineState> {
2314 0 : self.state.subscribe()
2315 0 : }
2316 :
2317 1351772 : pub(crate) async fn wait_to_become_active(
2318 1351772 : &self,
2319 1351772 : _ctx: &RequestContext, // Prepare for use by cancellation
2320 1351772 : ) -> Result<(), TimelineState> {
2321 1351772 : let mut receiver = self.state.subscribe();
2322 : loop {
2323 1351772 : let current_state = receiver.borrow().clone();
2324 1351772 : match current_state {
2325 : TimelineState::Loading => {
2326 0 : receiver
2327 0 : .changed()
2328 0 : .await
2329 0 : .expect("holding a reference to self");
2330 : }
2331 : TimelineState::Active => {
2332 1351760 : return Ok(());
2333 : }
2334 : TimelineState::Broken { .. } | TimelineState::Stopping => {
2335 : // There's no chance the timeline can transition back into ::Active
2336 12 : return Err(current_state);
2337 : }
2338 : }
2339 : }
2340 1351772 : }
2341 :
2342 0 : pub(crate) async fn layer_map_info(
2343 0 : &self,
2344 0 : reset: LayerAccessStatsReset,
2345 0 : ) -> Result<LayerMapInfo, layer_manager::Shutdown> {
2346 0 : let guard = self.layers.read().await;
2347 0 : let layer_map = guard.layer_map()?;
2348 0 : let mut in_memory_layers = Vec::with_capacity(layer_map.frozen_layers.len() + 1);
2349 0 : if let Some(open_layer) = &layer_map.open_layer {
2350 0 : in_memory_layers.push(open_layer.info());
2351 0 : }
2352 0 : for frozen_layer in &layer_map.frozen_layers {
2353 0 : in_memory_layers.push(frozen_layer.info());
2354 0 : }
2355 :
2356 0 : let historic_layers = layer_map
2357 0 : .iter_historic_layers()
2358 0 : .map(|desc| guard.get_from_desc(&desc).info(reset))
2359 0 : .collect();
2360 0 :
2361 0 : Ok(LayerMapInfo {
2362 0 : in_memory_layers,
2363 0 : historic_layers,
2364 0 : })
2365 0 : }
2366 :
2367 : #[instrument(skip_all, fields(tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))]
2368 : pub(crate) async fn download_layer(
2369 : &self,
2370 : layer_file_name: &LayerName,
2371 : ctx: &RequestContext,
2372 : ) -> Result<Option<bool>, super::storage_layer::layer::DownloadError> {
2373 : let Some(layer) = self
2374 : .find_layer(layer_file_name)
2375 : .await
2376 0 : .map_err(|e| match e {
2377 0 : layer_manager::Shutdown => {
2378 0 : super::storage_layer::layer::DownloadError::TimelineShutdown
2379 0 : }
2380 0 : })?
2381 : else {
2382 : return Ok(None);
2383 : };
2384 :
2385 : layer.download(ctx).await?;
2386 :
2387 : Ok(Some(true))
2388 : }
2389 :
2390 : /// Evict just one layer.
2391 : ///
2392 : /// Returns `Ok(None)` in the case where the layer could not be found by its `layer_file_name`.
2393 0 : pub(crate) async fn evict_layer(
2394 0 : &self,
2395 0 : layer_file_name: &LayerName,
2396 0 : ) -> anyhow::Result<Option<bool>> {
2397 0 : let _gate = self
2398 0 : .gate
2399 0 : .enter()
2400 0 : .map_err(|_| anyhow::anyhow!("Shutting down"))?;
2401 :
2402 0 : let Some(local_layer) = self.find_layer(layer_file_name).await? else {
2403 0 : return Ok(None);
2404 : };
2405 :
2406 : // curl has this by default
2407 0 : let timeout = std::time::Duration::from_secs(120);
2408 0 :
2409 0 : match local_layer.evict_and_wait(timeout).await {
2410 0 : Ok(()) => Ok(Some(true)),
2411 0 : Err(EvictionError::NotFound) => Ok(Some(false)),
2412 0 : Err(EvictionError::Downloaded) => Ok(Some(false)),
2413 0 : Err(EvictionError::Timeout) => Ok(Some(false)),
2414 : }
2415 0 : }
2416 :
2417 28818060 : fn should_roll(
2418 28818060 : &self,
2419 28818060 : layer_size: u64,
2420 28818060 : projected_layer_size: u64,
2421 28818060 : checkpoint_distance: u64,
2422 28818060 : projected_lsn: Lsn,
2423 28818060 : last_freeze_at: Lsn,
2424 28818060 : opened_at: Instant,
2425 28818060 : ) -> bool {
2426 28818060 : let distance = projected_lsn.widening_sub(last_freeze_at);
2427 28818060 :
2428 28818060 : // Rolling the open layer can be triggered by:
2429 28818060 : // 1. The distance from the last LSN we rolled at. This bounds the amount of WAL that
2430 28818060 : // the safekeepers need to store. For sharded tenants, we multiply by shard count to
2431 28818060 : // account for how writes are distributed across shards: we expect each node to consume
2432 28818060 : // 1/count of the LSN on average.
2433 28818060 : // 2. The size of the currently open layer.
2434 28818060 : // 3. The time since the last roll. It helps safekeepers to regard pageserver as caught
2435 28818060 : // up and suspend activity.
2436 28818060 : if distance >= checkpoint_distance as i128 * self.shard_identity.count.count() as i128 {
2437 0 : info!(
2438 0 : "Will roll layer at {} with layer size {} due to LSN distance ({})",
2439 : projected_lsn, layer_size, distance
2440 : );
2441 :
2442 0 : true
2443 28818060 : } else if projected_layer_size >= checkpoint_distance {
2444 : // NB: this check is relied upon by:
2445 480 : let _ = IndexEntry::validate_checkpoint_distance;
2446 480 : info!(
2447 0 : "Will roll layer at {} with layer size {} due to layer size ({})",
2448 : projected_lsn, layer_size, projected_layer_size
2449 : );
2450 :
2451 480 : true
2452 28817580 : } else if distance > 0 && opened_at.elapsed() >= self.get_checkpoint_timeout() {
2453 0 : info!(
2454 0 : "Will roll layer at {} with layer size {} due to time since first write to the layer ({:?})",
2455 0 : projected_lsn,
2456 0 : layer_size,
2457 0 : opened_at.elapsed()
2458 : );
2459 :
2460 0 : true
2461 : } else {
2462 28817580 : false
2463 : }
2464 28818060 : }
2465 : }
2466 :
2467 : /// Number of times we will compute partition within a checkpoint distance.
2468 : const REPARTITION_FREQ_IN_CHECKPOINT_DISTANCE: u64 = 10;
2469 :
2470 : // Private functions
2471 : impl Timeline {
2472 72 : pub(crate) fn get_lsn_lease_length(&self) -> Duration {
2473 72 : let tenant_conf = self.tenant_conf.load();
2474 72 : tenant_conf
2475 72 : .tenant_conf
2476 72 : .lsn_lease_length
2477 72 : .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length)
2478 72 : }
2479 :
2480 0 : pub(crate) fn get_lsn_lease_length_for_ts(&self) -> Duration {
2481 0 : let tenant_conf = self.tenant_conf.load();
2482 0 : tenant_conf
2483 0 : .tenant_conf
2484 0 : .lsn_lease_length_for_ts
2485 0 : .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length_for_ts)
2486 0 : }
2487 :
2488 0 : pub(crate) fn is_gc_blocked_by_lsn_lease_deadline(&self) -> bool {
2489 0 : let tenant_conf = self.tenant_conf.load();
2490 0 : tenant_conf.is_gc_blocked_by_lsn_lease_deadline()
2491 0 : }
2492 :
2493 0 : pub(crate) fn get_lazy_slru_download(&self) -> bool {
2494 0 : let tenant_conf = self.tenant_conf.load();
2495 0 : tenant_conf
2496 0 : .tenant_conf
2497 0 : .lazy_slru_download
2498 0 : .unwrap_or(self.conf.default_tenant_conf.lazy_slru_download)
2499 0 : }
2500 :
2501 : /// Checks if a get page request should get perf tracing
2502 : ///
2503 : /// The configuration priority is: tenant config override, default tenant config,
2504 : /// pageserver config.
2505 0 : pub(crate) fn is_get_page_request_sampled(&self) -> bool {
2506 0 : let tenant_conf = self.tenant_conf.load();
2507 0 : let ratio = tenant_conf
2508 0 : .tenant_conf
2509 0 : .sampling_ratio
2510 0 : .flatten()
2511 0 : .or(self.conf.default_tenant_conf.sampling_ratio)
2512 0 : .or(self.conf.tracing.as_ref().map(|t| t.sampling_ratio));
2513 0 :
2514 0 : match ratio {
2515 0 : Some(r) => {
2516 0 : if r.numerator == 0 {
2517 0 : false
2518 : } else {
2519 0 : rand::thread_rng().gen_range(0..r.denominator) < r.numerator
2520 : }
2521 : }
2522 0 : None => false,
2523 : }
2524 0 : }
2525 :
2526 28827156 : fn get_checkpoint_distance(&self) -> u64 {
2527 28827156 : let tenant_conf = self.tenant_conf.load();
2528 28827156 : tenant_conf
2529 28827156 : .tenant_conf
2530 28827156 : .checkpoint_distance
2531 28827156 : .unwrap_or(self.conf.default_tenant_conf.checkpoint_distance)
2532 28827156 : }
2533 :
2534 28817580 : fn get_checkpoint_timeout(&self) -> Duration {
2535 28817580 : let tenant_conf = self.tenant_conf.load();
2536 28817580 : tenant_conf
2537 28817580 : .tenant_conf
2538 28817580 : .checkpoint_timeout
2539 28817580 : .unwrap_or(self.conf.default_tenant_conf.checkpoint_timeout)
2540 28817580 : }
2541 :
2542 15168 : fn get_compaction_period(&self) -> Duration {
2543 15168 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2544 15168 : tenant_conf
2545 15168 : .compaction_period
2546 15168 : .unwrap_or(self.conf.default_tenant_conf.compaction_period)
2547 15168 : }
2548 :
2549 4068 : fn get_compaction_target_size(&self) -> u64 {
2550 4068 : let tenant_conf = self.tenant_conf.load();
2551 4068 : tenant_conf
2552 4068 : .tenant_conf
2553 4068 : .compaction_target_size
2554 4068 : .unwrap_or(self.conf.default_tenant_conf.compaction_target_size)
2555 4068 : }
2556 :
2557 9480 : fn get_compaction_threshold(&self) -> usize {
2558 9480 : let tenant_conf = self.tenant_conf.load();
2559 9480 : tenant_conf
2560 9480 : .tenant_conf
2561 9480 : .compaction_threshold
2562 9480 : .unwrap_or(self.conf.default_tenant_conf.compaction_threshold)
2563 9480 : }
2564 :
2565 : /// Returns `true` if the rel_size_v2 config is enabled. NOTE: the write path and read path
2566 : /// should look at `get_rel_size_v2_status()` to get the actual status of the timeline. It is
2567 : /// possible that the index part persists the state while the config doesn't get persisted.
2568 11676 : pub(crate) fn get_rel_size_v2_enabled(&self) -> bool {
2569 11676 : let tenant_conf = self.tenant_conf.load();
2570 11676 : tenant_conf
2571 11676 : .tenant_conf
2572 11676 : .rel_size_v2_enabled
2573 11676 : .unwrap_or(self.conf.default_tenant_conf.rel_size_v2_enabled)
2574 11676 : }
2575 :
2576 13188 : pub(crate) fn get_rel_size_v2_status(&self) -> RelSizeMigration {
2577 13188 : self.rel_size_v2_status
2578 13188 : .load()
2579 13188 : .as_ref()
2580 13188 : .map(|s| s.as_ref().clone())
2581 13188 : .unwrap_or(RelSizeMigration::Legacy)
2582 13188 : }
2583 :
2584 168 : fn get_compaction_upper_limit(&self) -> usize {
2585 168 : let tenant_conf = self.tenant_conf.load();
2586 168 : tenant_conf
2587 168 : .tenant_conf
2588 168 : .compaction_upper_limit
2589 168 : .unwrap_or(self.conf.default_tenant_conf.compaction_upper_limit)
2590 168 : }
2591 :
2592 0 : pub fn get_compaction_l0_first(&self) -> bool {
2593 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2594 0 : tenant_conf
2595 0 : .compaction_l0_first
2596 0 : .unwrap_or(self.conf.default_tenant_conf.compaction_l0_first)
2597 0 : }
2598 :
2599 0 : pub fn get_compaction_l0_semaphore(&self) -> bool {
2600 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2601 0 : tenant_conf
2602 0 : .compaction_l0_semaphore
2603 0 : .unwrap_or(self.conf.default_tenant_conf.compaction_l0_semaphore)
2604 0 : }
2605 :
2606 7584 : fn get_l0_flush_delay_threshold(&self) -> Option<usize> {
2607 : // By default, delay L0 flushes at 3x the compaction threshold. The compaction threshold
2608 : // defaults to 10, and L0 compaction is generally able to keep L0 counts below 30.
2609 : const DEFAULT_L0_FLUSH_DELAY_FACTOR: usize = 3;
2610 :
2611 : // If compaction is disabled, don't delay.
2612 7584 : if self.get_compaction_period() == Duration::ZERO {
2613 7572 : return None;
2614 12 : }
2615 12 :
2616 12 : let compaction_threshold = self.get_compaction_threshold();
2617 12 : let tenant_conf = self.tenant_conf.load();
2618 12 : let l0_flush_delay_threshold = tenant_conf
2619 12 : .tenant_conf
2620 12 : .l0_flush_delay_threshold
2621 12 : .or(self.conf.default_tenant_conf.l0_flush_delay_threshold)
2622 12 : .unwrap_or(DEFAULT_L0_FLUSH_DELAY_FACTOR * compaction_threshold);
2623 12 :
2624 12 : // 0 disables backpressure.
2625 12 : if l0_flush_delay_threshold == 0 {
2626 0 : return None;
2627 12 : }
2628 12 :
2629 12 : // Clamp the flush delay threshold to the compaction threshold; it doesn't make sense to
2630 12 : // backpressure flushes below this.
2631 12 : // TODO: the tenant config should have validation to prevent this instead.
2632 12 : debug_assert!(l0_flush_delay_threshold >= compaction_threshold);
2633 12 : Some(max(l0_flush_delay_threshold, compaction_threshold))
2634 7584 : }
2635 :
2636 7584 : fn get_l0_flush_stall_threshold(&self) -> Option<usize> {
2637 : // Disable L0 stalls by default. Stalling can cause unavailability if L0 compaction isn't
2638 : // responsive, and it can e.g. block on other compaction via the compaction semaphore or
2639 : // sibling timelines. We need more confidence before enabling this.
2640 : const DEFAULT_L0_FLUSH_STALL_FACTOR: usize = 0; // TODO: default to e.g. 5
2641 :
2642 : // If compaction is disabled, don't stall.
2643 7584 : if self.get_compaction_period() == Duration::ZERO {
2644 7572 : return None;
2645 12 : }
2646 12 :
2647 12 : // If compaction is failing, don't stall and try to keep the tenant alive. This may not be a
2648 12 : // good idea: read amp can grow unbounded, leading to terrible performance, and we may take
2649 12 : // on unbounded compaction debt that can take a long time to fix once compaction comes back
2650 12 : // online. At least we'll delay flushes, slowing down the growth and buying some time.
2651 12 : if self.compaction_failed.load(AtomicOrdering::Relaxed) {
2652 0 : return None;
2653 12 : }
2654 12 :
2655 12 : let compaction_threshold = self.get_compaction_threshold();
2656 12 : let tenant_conf = self.tenant_conf.load();
2657 12 : let l0_flush_stall_threshold = tenant_conf
2658 12 : .tenant_conf
2659 12 : .l0_flush_stall_threshold
2660 12 : .or(self.conf.default_tenant_conf.l0_flush_stall_threshold);
2661 12 :
2662 12 : // Tests sometimes set compaction_threshold=1 to generate lots of layer files, and don't
2663 12 : // handle the 20-second compaction delay. Some (e.g. `test_backward_compatibility`) can't
2664 12 : // easily adjust the L0 backpressure settings, so just disable stalls in this case.
2665 12 : if cfg!(feature = "testing")
2666 12 : && compaction_threshold == 1
2667 0 : && l0_flush_stall_threshold.is_none()
2668 : {
2669 0 : return None;
2670 12 : }
2671 12 :
2672 12 : let l0_flush_stall_threshold = l0_flush_stall_threshold
2673 12 : .unwrap_or(DEFAULT_L0_FLUSH_STALL_FACTOR * compaction_threshold);
2674 12 :
2675 12 : // 0 disables backpressure.
2676 12 : if l0_flush_stall_threshold == 0 {
2677 12 : return None;
2678 0 : }
2679 0 :
2680 0 : // Clamp the flush stall threshold to the compaction threshold; it doesn't make sense to
2681 0 : // backpressure flushes below this.
2682 0 : // TODO: the tenant config should have validation to prevent this instead.
2683 0 : debug_assert!(l0_flush_stall_threshold >= compaction_threshold);
2684 0 : Some(max(l0_flush_stall_threshold, compaction_threshold))
2685 7584 : }
2686 :
2687 84 : fn get_image_creation_threshold(&self) -> usize {
2688 84 : let tenant_conf = self.tenant_conf.load();
2689 84 : tenant_conf
2690 84 : .tenant_conf
2691 84 : .image_creation_threshold
2692 84 : .unwrap_or(self.conf.default_tenant_conf.image_creation_threshold)
2693 84 : }
2694 :
2695 2184 : fn get_compaction_algorithm_settings(&self) -> CompactionAlgorithmSettings {
2696 2184 : let tenant_conf = &self.tenant_conf.load();
2697 2184 : tenant_conf
2698 2184 : .tenant_conf
2699 2184 : .compaction_algorithm
2700 2184 : .as_ref()
2701 2184 : .unwrap_or(&self.conf.default_tenant_conf.compaction_algorithm)
2702 2184 : .clone()
2703 2184 : }
2704 :
2705 2184 : pub fn get_compaction_shard_ancestor(&self) -> bool {
2706 2184 : let tenant_conf = self.tenant_conf.load();
2707 2184 : tenant_conf
2708 2184 : .tenant_conf
2709 2184 : .compaction_shard_ancestor
2710 2184 : .unwrap_or(self.conf.default_tenant_conf.compaction_shard_ancestor)
2711 2184 : }
2712 :
2713 0 : fn get_eviction_policy(&self) -> EvictionPolicy {
2714 0 : let tenant_conf = self.tenant_conf.load();
2715 0 : tenant_conf
2716 0 : .tenant_conf
2717 0 : .eviction_policy
2718 0 : .unwrap_or(self.conf.default_tenant_conf.eviction_policy)
2719 0 : }
2720 :
2721 2784 : fn get_evictions_low_residence_duration_metric_threshold(
2722 2784 : tenant_conf: &pageserver_api::models::TenantConfig,
2723 2784 : default_tenant_conf: &pageserver_api::config::TenantConfigToml,
2724 2784 : ) -> Duration {
2725 2784 : tenant_conf
2726 2784 : .evictions_low_residence_duration_metric_threshold
2727 2784 : .unwrap_or(default_tenant_conf.evictions_low_residence_duration_metric_threshold)
2728 2784 : }
2729 :
2730 3480 : fn get_image_layer_creation_check_threshold(&self) -> u8 {
2731 3480 : let tenant_conf = self.tenant_conf.load();
2732 3480 : tenant_conf
2733 3480 : .tenant_conf
2734 3480 : .image_layer_creation_check_threshold
2735 3480 : .unwrap_or(
2736 3480 : self.conf
2737 3480 : .default_tenant_conf
2738 3480 : .image_layer_creation_check_threshold,
2739 3480 : )
2740 3480 : }
2741 :
2742 324 : fn get_gc_compaction_settings(&self) -> GcCompactionCombinedSettings {
2743 324 : let tenant_conf = &self.tenant_conf.load();
2744 324 : let gc_compaction_enabled = tenant_conf
2745 324 : .tenant_conf
2746 324 : .gc_compaction_enabled
2747 324 : .unwrap_or(self.conf.default_tenant_conf.gc_compaction_enabled);
2748 324 : let gc_compaction_verification = tenant_conf
2749 324 : .tenant_conf
2750 324 : .gc_compaction_verification
2751 324 : .unwrap_or(self.conf.default_tenant_conf.gc_compaction_verification);
2752 324 : let gc_compaction_initial_threshold_kb = tenant_conf
2753 324 : .tenant_conf
2754 324 : .gc_compaction_initial_threshold_kb
2755 324 : .unwrap_or(
2756 324 : self.conf
2757 324 : .default_tenant_conf
2758 324 : .gc_compaction_initial_threshold_kb,
2759 324 : );
2760 324 : let gc_compaction_ratio_percent = tenant_conf
2761 324 : .tenant_conf
2762 324 : .gc_compaction_ratio_percent
2763 324 : .unwrap_or(self.conf.default_tenant_conf.gc_compaction_ratio_percent);
2764 324 : GcCompactionCombinedSettings {
2765 324 : gc_compaction_enabled,
2766 324 : gc_compaction_verification,
2767 324 : gc_compaction_initial_threshold_kb,
2768 324 : gc_compaction_ratio_percent,
2769 324 : }
2770 324 : }
2771 :
2772 0 : fn get_image_creation_preempt_threshold(&self) -> usize {
2773 0 : let tenant_conf = self.tenant_conf.load();
2774 0 : tenant_conf
2775 0 : .tenant_conf
2776 0 : .image_creation_preempt_threshold
2777 0 : .unwrap_or(
2778 0 : self.conf
2779 0 : .default_tenant_conf
2780 0 : .image_creation_preempt_threshold,
2781 0 : )
2782 0 : }
2783 :
2784 : /// Resolve the effective WAL receiver protocol to use for this tenant.
2785 : ///
2786 : /// Priority order is:
2787 : /// 1. Tenant config override
2788 : /// 2. Default value for tenant config override
2789 : /// 3. Pageserver config override
2790 : /// 4. Pageserver config default
2791 0 : pub fn resolve_wal_receiver_protocol(&self) -> PostgresClientProtocol {
2792 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2793 0 : tenant_conf
2794 0 : .wal_receiver_protocol_override
2795 0 : .or(self.conf.default_tenant_conf.wal_receiver_protocol_override)
2796 0 : .unwrap_or(self.conf.wal_receiver_protocol)
2797 0 : }
2798 :
2799 0 : pub(super) fn tenant_conf_updated(&self, new_conf: &AttachedTenantConf) {
2800 0 : // NB: Most tenant conf options are read by background loops, so,
2801 0 : // changes will automatically be picked up.
2802 0 :
2803 0 : // The threshold is embedded in the metric. So, we need to update it.
2804 0 : {
2805 0 : let new_threshold = Self::get_evictions_low_residence_duration_metric_threshold(
2806 0 : &new_conf.tenant_conf,
2807 0 : &self.conf.default_tenant_conf,
2808 0 : );
2809 0 :
2810 0 : let tenant_id_str = self.tenant_shard_id.tenant_id.to_string();
2811 0 : let shard_id_str = format!("{}", self.tenant_shard_id.shard_slug());
2812 0 :
2813 0 : let timeline_id_str = self.timeline_id.to_string();
2814 0 :
2815 0 : self.remote_client.update_config(&new_conf.location);
2816 0 :
2817 0 : self.metrics
2818 0 : .evictions_with_low_residence_duration
2819 0 : .write()
2820 0 : .unwrap()
2821 0 : .change_threshold(
2822 0 : &tenant_id_str,
2823 0 : &shard_id_str,
2824 0 : &timeline_id_str,
2825 0 : new_threshold,
2826 0 : );
2827 0 : }
2828 0 : }
2829 :
2830 : /// Open a Timeline handle.
2831 : ///
2832 : /// Loads the metadata for the timeline into memory, but not the layer map.
2833 : #[allow(clippy::too_many_arguments)]
2834 2784 : pub(super) fn new(
2835 2784 : conf: &'static PageServerConf,
2836 2784 : tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
2837 2784 : metadata: &TimelineMetadata,
2838 2784 : previous_heatmap: Option<PreviousHeatmap>,
2839 2784 : ancestor: Option<Arc<Timeline>>,
2840 2784 : timeline_id: TimelineId,
2841 2784 : tenant_shard_id: TenantShardId,
2842 2784 : generation: Generation,
2843 2784 : shard_identity: ShardIdentity,
2844 2784 : walredo_mgr: Option<Arc<super::WalRedoManager>>,
2845 2784 : resources: TimelineResources,
2846 2784 : pg_version: u32,
2847 2784 : state: TimelineState,
2848 2784 : attach_wal_lag_cooldown: Arc<OnceLock<WalLagCooldown>>,
2849 2784 : create_idempotency: crate::tenant::CreateTimelineIdempotency,
2850 2784 : gc_compaction_state: Option<GcCompactionState>,
2851 2784 : rel_size_v2_status: Option<RelSizeMigration>,
2852 2784 : cancel: CancellationToken,
2853 2784 : ) -> Arc<Self> {
2854 2784 : let disk_consistent_lsn = metadata.disk_consistent_lsn();
2855 2784 : let (state, _) = watch::channel(state);
2856 2784 :
2857 2784 : let (layer_flush_start_tx, _) = tokio::sync::watch::channel((0, disk_consistent_lsn));
2858 2784 : let (layer_flush_done_tx, _) = tokio::sync::watch::channel((0, Ok(())));
2859 2784 :
2860 2784 : let evictions_low_residence_duration_metric_threshold = {
2861 2784 : let loaded_tenant_conf = tenant_conf.load();
2862 2784 : Self::get_evictions_low_residence_duration_metric_threshold(
2863 2784 : &loaded_tenant_conf.tenant_conf,
2864 2784 : &conf.default_tenant_conf,
2865 2784 : )
2866 : };
2867 :
2868 2784 : if let Some(ancestor) = &ancestor {
2869 1416 : let mut ancestor_gc_info = ancestor.gc_info.write().unwrap();
2870 1416 : // If we construct an explicit timeline object, it's obviously not offloaded
2871 1416 : let is_offloaded = MaybeOffloaded::No;
2872 1416 : ancestor_gc_info.insert_child(timeline_id, metadata.ancestor_lsn(), is_offloaded);
2873 1416 : }
2874 :
2875 2784 : Arc::new_cyclic(|myself| {
2876 2784 : let metrics = Arc::new(TimelineMetrics::new(
2877 2784 : &tenant_shard_id,
2878 2784 : &timeline_id,
2879 2784 : crate::metrics::EvictionsWithLowResidenceDurationBuilder::new(
2880 2784 : "mtime",
2881 2784 : evictions_low_residence_duration_metric_threshold,
2882 2784 : ),
2883 2784 : ));
2884 2784 : let aux_file_metrics = metrics.aux_file_size_gauge.clone();
2885 :
2886 2784 : let mut result = Timeline {
2887 2784 : conf,
2888 2784 : tenant_conf,
2889 2784 : myself: myself.clone(),
2890 2784 : timeline_id,
2891 2784 : tenant_shard_id,
2892 2784 : generation,
2893 2784 : shard_identity,
2894 2784 : pg_version,
2895 2784 : layers: Default::default(),
2896 2784 : gc_compaction_layer_update_lock: tokio::sync::RwLock::new(()),
2897 2784 :
2898 2784 : walredo_mgr,
2899 2784 : walreceiver: Mutex::new(None),
2900 2784 :
2901 2784 : remote_client: Arc::new(resources.remote_client),
2902 2784 :
2903 2784 : // initialize in-memory 'last_record_lsn' from 'disk_consistent_lsn'.
2904 2784 : last_record_lsn: SeqWait::new(RecordLsn {
2905 2784 : last: disk_consistent_lsn,
2906 2784 : prev: metadata.prev_record_lsn().unwrap_or(Lsn(0)),
2907 2784 : }),
2908 2784 : disk_consistent_lsn: AtomicLsn::new(disk_consistent_lsn.0),
2909 2784 :
2910 2784 : gc_compaction_state: ArcSwap::new(Arc::new(gc_compaction_state)),
2911 2784 :
2912 2784 : last_freeze_at: AtomicLsn::new(disk_consistent_lsn.0),
2913 2784 : last_freeze_ts: RwLock::new(Instant::now()),
2914 2784 :
2915 2784 : loaded_at: (disk_consistent_lsn, SystemTime::now()),
2916 2784 :
2917 2784 : ancestor_timeline: ancestor,
2918 2784 : ancestor_lsn: metadata.ancestor_lsn(),
2919 2784 :
2920 2784 : metrics,
2921 2784 :
2922 2784 : query_metrics: crate::metrics::SmgrQueryTimePerTimeline::new(
2923 2784 : &tenant_shard_id,
2924 2784 : &timeline_id,
2925 2784 : resources.pagestream_throttle_metrics,
2926 2784 : ),
2927 2784 :
2928 22272 : directory_metrics: array::from_fn(|_| AtomicU64::new(0)),
2929 22272 : directory_metrics_inited: array::from_fn(|_| AtomicBool::new(false)),
2930 2784 :
2931 2784 : flush_loop_state: Mutex::new(FlushLoopState::NotStarted),
2932 2784 :
2933 2784 : layer_flush_start_tx,
2934 2784 : layer_flush_done_tx,
2935 2784 :
2936 2784 : write_lock: tokio::sync::Mutex::new(None),
2937 2784 :
2938 2784 : gc_info: std::sync::RwLock::new(GcInfo::default()),
2939 2784 :
2940 2784 : last_image_layer_creation_status: ArcSwap::new(Arc::new(
2941 2784 : LastImageLayerCreationStatus::default(),
2942 2784 : )),
2943 2784 :
2944 2784 : applied_gc_cutoff_lsn: Rcu::new(metadata.latest_gc_cutoff_lsn()),
2945 2784 : initdb_lsn: metadata.initdb_lsn(),
2946 2784 :
2947 2784 : current_logical_size: if disk_consistent_lsn.is_valid() {
2948 : // we're creating timeline data with some layer files existing locally,
2949 : // need to recalculate timeline's logical size based on data in the layers.
2950 1440 : LogicalSize::deferred_initial(disk_consistent_lsn)
2951 : } else {
2952 : // we're creating timeline data without any layers existing locally,
2953 : // initial logical size is 0.
2954 1344 : LogicalSize::empty_initial()
2955 : },
2956 :
2957 2784 : partitioning: GuardArcSwap::new((
2958 2784 : (KeyPartitioning::new(), KeyPartitioning::new().into_sparse()),
2959 2784 : Lsn(0),
2960 2784 : )),
2961 2784 : repartition_threshold: 0,
2962 2784 : last_image_layer_creation_check_at: AtomicLsn::new(0),
2963 2784 : last_image_layer_creation_check_instant: Mutex::new(None),
2964 2784 :
2965 2784 : last_received_wal: Mutex::new(None),
2966 2784 : rel_size_cache: RwLock::new(RelSizeCache {
2967 2784 : complete_as_of: disk_consistent_lsn,
2968 2784 : map: HashMap::new(),
2969 2784 : }),
2970 2784 :
2971 2784 : download_all_remote_layers_task_info: RwLock::new(None),
2972 2784 :
2973 2784 : state,
2974 2784 :
2975 2784 : eviction_task_timeline_state: tokio::sync::Mutex::new(
2976 2784 : EvictionTaskTimelineState::default(),
2977 2784 : ),
2978 2784 : delete_progress: TimelineDeleteProgress::default(),
2979 2784 :
2980 2784 : cancel,
2981 2784 : gate: Gate::default(),
2982 2784 :
2983 2784 : compaction_lock: tokio::sync::Mutex::default(),
2984 2784 : compaction_failed: AtomicBool::default(),
2985 2784 : l0_compaction_trigger: resources.l0_compaction_trigger,
2986 2784 : gc_lock: tokio::sync::Mutex::default(),
2987 2784 :
2988 2784 : standby_horizon: AtomicLsn::new(0),
2989 2784 :
2990 2784 : pagestream_throttle: resources.pagestream_throttle,
2991 2784 :
2992 2784 : aux_file_size_estimator: AuxFileSizeEstimator::new(aux_file_metrics),
2993 2784 :
2994 2784 : #[cfg(test)]
2995 2784 : extra_test_dense_keyspace: ArcSwap::new(Arc::new(KeySpace::default())),
2996 2784 :
2997 2784 : l0_flush_global_state: resources.l0_flush_global_state,
2998 2784 :
2999 2784 : handles: Default::default(),
3000 2784 :
3001 2784 : attach_wal_lag_cooldown,
3002 2784 :
3003 2784 : create_idempotency,
3004 2784 :
3005 2784 : page_trace: Default::default(),
3006 2784 :
3007 2784 : previous_heatmap: ArcSwapOption::from_pointee(previous_heatmap),
3008 2784 :
3009 2784 : heatmap_layers_downloader: Mutex::new(None),
3010 2784 :
3011 2784 : rel_size_v2_status: ArcSwapOption::from_pointee(rel_size_v2_status),
3012 2784 :
3013 2784 : wait_lsn_log_slow: tokio::sync::Semaphore::new(1),
3014 2784 : };
3015 2784 :
3016 2784 : result.repartition_threshold =
3017 2784 : result.get_checkpoint_distance() / REPARTITION_FREQ_IN_CHECKPOINT_DISTANCE;
3018 2784 :
3019 2784 : result
3020 2784 : .metrics
3021 2784 : .last_record_lsn_gauge
3022 2784 : .set(disk_consistent_lsn.0 as i64);
3023 2784 : result
3024 2784 : })
3025 2784 : }
3026 :
3027 4044 : pub(super) fn maybe_spawn_flush_loop(self: &Arc<Self>) {
3028 4044 : let Ok(guard) = self.gate.enter() else {
3029 0 : info!("cannot start flush loop when the timeline gate has already been closed");
3030 0 : return;
3031 : };
3032 4044 : let mut flush_loop_state = self.flush_loop_state.lock().unwrap();
3033 4044 : match *flush_loop_state {
3034 2748 : FlushLoopState::NotStarted => (),
3035 : FlushLoopState::Running { .. } => {
3036 1296 : info!(
3037 0 : "skipping attempt to start flush_loop twice {}/{}",
3038 0 : self.tenant_shard_id, self.timeline_id
3039 : );
3040 1296 : return;
3041 : }
3042 : FlushLoopState::Exited => {
3043 0 : info!(
3044 0 : "ignoring attempt to restart exited flush_loop {}/{}",
3045 0 : self.tenant_shard_id, self.timeline_id
3046 : );
3047 0 : return;
3048 : }
3049 : }
3050 :
3051 2748 : let layer_flush_start_rx = self.layer_flush_start_tx.subscribe();
3052 2748 : let self_clone = Arc::clone(self);
3053 2748 :
3054 2748 : debug!("spawning flush loop");
3055 2748 : *flush_loop_state = FlushLoopState::Running {
3056 2748 : #[cfg(test)]
3057 2748 : expect_initdb_optimization: false,
3058 2748 : #[cfg(test)]
3059 2748 : initdb_optimization_count: 0,
3060 2748 : };
3061 2748 : task_mgr::spawn(
3062 2748 : task_mgr::BACKGROUND_RUNTIME.handle(),
3063 2748 : task_mgr::TaskKind::LayerFlushTask,
3064 2748 : self.tenant_shard_id,
3065 2748 : Some(self.timeline_id),
3066 2748 : "layer flush task",
3067 2748 : async move {
3068 2748 : let _guard = guard;
3069 2748 : let background_ctx = RequestContext::todo_child(TaskKind::LayerFlushTask, DownloadBehavior::Error).with_scope_timeline(&self_clone);
3070 2748 : self_clone.flush_loop(layer_flush_start_rx, &background_ctx).await;
3071 60 : let mut flush_loop_state = self_clone.flush_loop_state.lock().unwrap();
3072 60 : assert!(matches!(*flush_loop_state, FlushLoopState::Running{..}));
3073 60 : *flush_loop_state = FlushLoopState::Exited;
3074 60 : Ok(())
3075 60 : }
3076 2748 : .instrument(info_span!(parent: None, "layer flush task", tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))
3077 : );
3078 4044 : }
3079 :
3080 0 : pub(crate) fn update_gc_compaction_state(
3081 0 : &self,
3082 0 : gc_compaction_state: GcCompactionState,
3083 0 : ) -> anyhow::Result<()> {
3084 0 : self.gc_compaction_state
3085 0 : .store(Arc::new(Some(gc_compaction_state.clone())));
3086 0 : self.remote_client
3087 0 : .schedule_index_upload_for_gc_compaction_state_update(gc_compaction_state)
3088 0 : }
3089 :
3090 0 : pub(crate) fn update_rel_size_v2_status(
3091 0 : &self,
3092 0 : rel_size_v2_status: RelSizeMigration,
3093 0 : ) -> anyhow::Result<()> {
3094 0 : self.rel_size_v2_status
3095 0 : .store(Some(Arc::new(rel_size_v2_status.clone())));
3096 0 : self.remote_client
3097 0 : .schedule_index_upload_for_rel_size_v2_status_update(rel_size_v2_status)
3098 0 : }
3099 :
3100 0 : pub(crate) fn get_gc_compaction_state(&self) -> Option<GcCompactionState> {
3101 0 : self.gc_compaction_state.load_full().as_ref().clone()
3102 0 : }
3103 :
3104 : /// Creates and starts the wal receiver.
3105 : ///
3106 : /// This function is expected to be called at most once per Timeline's lifecycle
3107 : /// when the timeline is activated.
3108 0 : fn launch_wal_receiver(
3109 0 : self: &Arc<Self>,
3110 0 : ctx: &RequestContext,
3111 0 : broker_client: BrokerClientChannel,
3112 0 : ) {
3113 0 : info!(
3114 0 : "launching WAL receiver for timeline {} of tenant {}",
3115 0 : self.timeline_id, self.tenant_shard_id
3116 : );
3117 :
3118 0 : let tenant_conf = self.tenant_conf.load();
3119 0 : let wal_connect_timeout = tenant_conf
3120 0 : .tenant_conf
3121 0 : .walreceiver_connect_timeout
3122 0 : .unwrap_or(self.conf.default_tenant_conf.walreceiver_connect_timeout);
3123 0 : let lagging_wal_timeout = tenant_conf
3124 0 : .tenant_conf
3125 0 : .lagging_wal_timeout
3126 0 : .unwrap_or(self.conf.default_tenant_conf.lagging_wal_timeout);
3127 0 : let max_lsn_wal_lag = tenant_conf
3128 0 : .tenant_conf
3129 0 : .max_lsn_wal_lag
3130 0 : .unwrap_or(self.conf.default_tenant_conf.max_lsn_wal_lag);
3131 0 :
3132 0 : let mut guard = self.walreceiver.lock().unwrap();
3133 0 : assert!(
3134 0 : guard.is_none(),
3135 0 : "multiple launches / re-launches of WAL receiver are not supported"
3136 : );
3137 0 : *guard = Some(WalReceiver::start(
3138 0 : Arc::clone(self),
3139 0 : WalReceiverConf {
3140 0 : protocol: self.resolve_wal_receiver_protocol(),
3141 0 : wal_connect_timeout,
3142 0 : lagging_wal_timeout,
3143 0 : max_lsn_wal_lag,
3144 0 : auth_token: crate::config::SAFEKEEPER_AUTH_TOKEN.get().cloned(),
3145 0 : availability_zone: self.conf.availability_zone.clone(),
3146 0 : ingest_batch_size: self.conf.ingest_batch_size,
3147 0 : validate_wal_contiguity: self.conf.validate_wal_contiguity,
3148 0 : },
3149 0 : broker_client,
3150 0 : ctx,
3151 0 : ));
3152 0 : }
3153 :
3154 : /// Initialize with an empty layer map. Used when creating a new timeline.
3155 2748 : pub(super) fn init_empty_layer_map(&self, start_lsn: Lsn) {
3156 2748 : let mut layers = self.layers.try_write().expect(
3157 2748 : "in the context where we call this function, no other task has access to the object",
3158 2748 : );
3159 2748 : layers
3160 2748 : .open_mut()
3161 2748 : .expect("in this context the LayerManager must still be open")
3162 2748 : .initialize_empty(Lsn(start_lsn.0));
3163 2748 : }
3164 :
3165 : /// Scan the timeline directory, cleanup, populate the layer map, and schedule uploads for local-only
3166 : /// files.
3167 36 : pub(super) async fn load_layer_map(
3168 36 : &self,
3169 36 : disk_consistent_lsn: Lsn,
3170 36 : index_part: IndexPart,
3171 36 : ) -> anyhow::Result<()> {
3172 : use LayerName::*;
3173 : use init::Decision::*;
3174 : use init::{Discovered, DismissedLayer};
3175 :
3176 36 : let mut guard = self.layers.write().await;
3177 :
3178 36 : let timer = self.metrics.load_layer_map_histo.start_timer();
3179 36 :
3180 36 : // Scan timeline directory and create ImageLayerName and DeltaFilename
3181 36 : // structs representing all files on disk
3182 36 : let timeline_path = self
3183 36 : .conf
3184 36 : .timeline_path(&self.tenant_shard_id, &self.timeline_id);
3185 36 : let conf = self.conf;
3186 36 : let span = tracing::Span::current();
3187 36 :
3188 36 : // Copy to move into the task we're about to spawn
3189 36 : let this = self.myself.upgrade().expect("&self method holds the arc");
3190 :
3191 36 : let (loaded_layers, needs_cleanup, total_physical_size) = tokio::task::spawn_blocking({
3192 36 : move || {
3193 36 : let _g = span.entered();
3194 36 : let discovered = init::scan_timeline_dir(&timeline_path)?;
3195 36 : let mut discovered_layers = Vec::with_capacity(discovered.len());
3196 36 : let mut unrecognized_files = Vec::new();
3197 36 :
3198 36 : let mut path = timeline_path;
3199 :
3200 132 : for discovered in discovered {
3201 96 : let (name, kind) = match discovered {
3202 96 : Discovered::Layer(layer_file_name, local_metadata) => {
3203 96 : discovered_layers.push((layer_file_name, local_metadata));
3204 96 : continue;
3205 : }
3206 0 : Discovered::IgnoredBackup(path) => {
3207 0 : std::fs::remove_file(path)
3208 0 : .or_else(fs_ext::ignore_not_found)
3209 0 : .fatal_err("Removing .old file");
3210 0 : continue;
3211 : }
3212 0 : Discovered::Unknown(file_name) => {
3213 0 : // we will later error if there are any
3214 0 : unrecognized_files.push(file_name);
3215 0 : continue;
3216 : }
3217 0 : Discovered::Ephemeral(name) => (name, "old ephemeral file"),
3218 0 : Discovered::Temporary(name) => (name, "temporary timeline file"),
3219 0 : Discovered::TemporaryDownload(name) => (name, "temporary download"),
3220 : };
3221 0 : path.push(Utf8Path::new(&name));
3222 0 : init::cleanup(&path, kind)?;
3223 0 : path.pop();
3224 : }
3225 :
3226 36 : if !unrecognized_files.is_empty() {
3227 : // assume that if there are any there are many many.
3228 0 : let n = unrecognized_files.len();
3229 0 : let first = &unrecognized_files[..n.min(10)];
3230 0 : anyhow::bail!(
3231 0 : "unrecognized files in timeline dir (total {n}), first 10: {first:?}"
3232 0 : );
3233 36 : }
3234 36 :
3235 36 : let decided = init::reconcile(discovered_layers, &index_part, disk_consistent_lsn);
3236 36 :
3237 36 : let mut loaded_layers = Vec::new();
3238 36 : let mut needs_cleanup = Vec::new();
3239 36 : let mut total_physical_size = 0;
3240 :
3241 132 : for (name, decision) in decided {
3242 96 : let decision = match decision {
3243 96 : Ok(decision) => decision,
3244 0 : Err(DismissedLayer::Future { local }) => {
3245 0 : if let Some(local) = local {
3246 0 : init::cleanup_future_layer(
3247 0 : &local.local_path,
3248 0 : &name,
3249 0 : disk_consistent_lsn,
3250 0 : )?;
3251 0 : }
3252 0 : needs_cleanup.push(name);
3253 0 : continue;
3254 : }
3255 0 : Err(DismissedLayer::LocalOnly(local)) => {
3256 0 : init::cleanup_local_only_file(&name, &local)?;
3257 : // this file never existed remotely, we will have to do rework
3258 0 : continue;
3259 : }
3260 0 : Err(DismissedLayer::BadMetadata(local)) => {
3261 0 : init::cleanup_local_file_for_remote(&local)?;
3262 : // this file never existed remotely, we will have to do rework
3263 0 : continue;
3264 : }
3265 : };
3266 :
3267 96 : match &name {
3268 72 : Delta(d) => assert!(d.lsn_range.end <= disk_consistent_lsn + 1),
3269 24 : Image(i) => assert!(i.lsn <= disk_consistent_lsn),
3270 : }
3271 :
3272 96 : tracing::debug!(layer=%name, ?decision, "applied");
3273 :
3274 96 : let layer = match decision {
3275 96 : Resident { local, remote } => {
3276 96 : total_physical_size += local.file_size;
3277 96 : Layer::for_resident(conf, &this, local.local_path, name, remote)
3278 96 : .drop_eviction_guard()
3279 : }
3280 0 : Evicted(remote) => Layer::for_evicted(conf, &this, name, remote),
3281 : };
3282 :
3283 96 : loaded_layers.push(layer);
3284 : }
3285 36 : Ok((loaded_layers, needs_cleanup, total_physical_size))
3286 36 : }
3287 36 : })
3288 36 : .await
3289 36 : .map_err(anyhow::Error::new)
3290 36 : .and_then(|x| x)?;
3291 :
3292 36 : let num_layers = loaded_layers.len();
3293 36 :
3294 36 : guard
3295 36 : .open_mut()
3296 36 : .expect("layermanager must be open during init")
3297 36 : .initialize_local_layers(loaded_layers, disk_consistent_lsn + 1);
3298 36 :
3299 36 : self.remote_client
3300 36 : .schedule_layer_file_deletion(&needs_cleanup)?;
3301 36 : self.remote_client
3302 36 : .schedule_index_upload_for_file_changes()?;
3303 : // This barrier orders above DELETEs before any later operations.
3304 : // This is critical because code executing after the barrier might
3305 : // create again objects with the same key that we just scheduled for deletion.
3306 : // For example, if we just scheduled deletion of an image layer "from the future",
3307 : // later compaction might run again and re-create the same image layer.
3308 : // "from the future" here means an image layer whose LSN is > IndexPart::disk_consistent_lsn.
3309 : // "same" here means same key range and LSN.
3310 : //
3311 : // Without a barrier between above DELETEs and the re-creation's PUTs,
3312 : // the upload queue may execute the PUT first, then the DELETE.
3313 : // In our example, we will end up with an IndexPart referencing a non-existent object.
3314 : //
3315 : // 1. a future image layer is created and uploaded
3316 : // 2. ps restart
3317 : // 3. the future layer from (1) is deleted during load layer map
3318 : // 4. image layer is re-created and uploaded
3319 : // 5. deletion queue would like to delete (1) but actually deletes (4)
3320 : // 6. delete by name works as expected, but it now deletes the wrong (later) version
3321 : //
3322 : // See https://github.com/neondatabase/neon/issues/5878
3323 : //
3324 : // NB: generation numbers naturally protect against this because they disambiguate
3325 : // (1) and (4)
3326 : // TODO: this is basically a no-op now, should we remove it?
3327 36 : self.remote_client.schedule_barrier()?;
3328 : // TenantShard::create_timeline will wait for these uploads to happen before returning, or
3329 : // on retry.
3330 :
3331 : // Now that we have the full layer map, we may calculate the visibility of layers within it (a global scan)
3332 36 : drop(guard); // drop write lock, update_layer_visibility will take a read lock.
3333 36 : self.update_layer_visibility().await?;
3334 :
3335 36 : info!(
3336 0 : "loaded layer map with {} layers at {}, total physical size: {}",
3337 : num_layers, disk_consistent_lsn, total_physical_size
3338 : );
3339 :
3340 36 : timer.stop_and_record();
3341 36 : Ok(())
3342 36 : }
3343 :
3344 : /// Retrieve current logical size of the timeline.
3345 : ///
3346 : /// The size could be lagging behind the actual number, in case
3347 : /// the initial size calculation has not been run (gets triggered on the first size access).
3348 : ///
3349 : /// return size and boolean flag that shows if the size is exact
3350 0 : pub(crate) fn get_current_logical_size(
3351 0 : self: &Arc<Self>,
3352 0 : priority: GetLogicalSizePriority,
3353 0 : ctx: &RequestContext,
3354 0 : ) -> logical_size::CurrentLogicalSize {
3355 0 : if !self.tenant_shard_id.is_shard_zero() {
3356 : // Logical size is only accurately maintained on shard zero: when called elsewhere, for example
3357 : // when HTTP API is serving a GET for timeline zero, return zero
3358 0 : return logical_size::CurrentLogicalSize::Approximate(logical_size::Approximate::zero());
3359 0 : }
3360 0 :
3361 0 : let current_size = self.current_logical_size.current_size();
3362 0 : debug!("Current size: {current_size:?}");
3363 :
3364 0 : match (current_size.accuracy(), priority) {
3365 0 : (logical_size::Accuracy::Exact, _) => (), // nothing to do
3366 0 : (logical_size::Accuracy::Approximate, GetLogicalSizePriority::Background) => {
3367 0 : // background task will eventually deliver an exact value, we're in no rush
3368 0 : }
3369 : (logical_size::Accuracy::Approximate, GetLogicalSizePriority::User) => {
3370 : // background task is not ready, but user is asking for it now;
3371 : // => make the background task skip the line
3372 : // (The alternative would be to calculate the size here, but,
3373 : // it can actually take a long time if the user has a lot of rels.
3374 : // And we'll inevitable need it again; So, let the background task do the work.)
3375 0 : match self
3376 0 : .current_logical_size
3377 0 : .cancel_wait_for_background_loop_concurrency_limit_semaphore
3378 0 : .get()
3379 : {
3380 0 : Some(cancel) => cancel.cancel(),
3381 : None => {
3382 0 : match self.current_state() {
3383 0 : TimelineState::Broken { .. } | TimelineState::Stopping => {
3384 0 : // Can happen when timeline detail endpoint is used when deletion is ongoing (or its broken).
3385 0 : // Don't make noise.
3386 0 : }
3387 : TimelineState::Loading => {
3388 : // Import does not return an activated timeline.
3389 0 : info!(
3390 0 : "discarding priority boost for logical size calculation because timeline is not yet active"
3391 : );
3392 : }
3393 : TimelineState::Active => {
3394 : // activation should be setting the once cell
3395 0 : warn!(
3396 0 : "unexpected: cancel_wait_for_background_loop_concurrency_limit_semaphore not set, priority-boosting of logical size calculation will not work"
3397 : );
3398 0 : debug_assert!(false);
3399 : }
3400 : }
3401 : }
3402 : }
3403 : }
3404 : }
3405 :
3406 0 : if let CurrentLogicalSize::Approximate(_) = ¤t_size {
3407 0 : if ctx.task_kind() == TaskKind::WalReceiverConnectionHandler {
3408 0 : let first = self
3409 0 : .current_logical_size
3410 0 : .did_return_approximate_to_walreceiver
3411 0 : .compare_exchange(
3412 0 : false,
3413 0 : true,
3414 0 : AtomicOrdering::Relaxed,
3415 0 : AtomicOrdering::Relaxed,
3416 0 : )
3417 0 : .is_ok();
3418 0 : if first {
3419 0 : crate::metrics::initial_logical_size::TIMELINES_WHERE_WALRECEIVER_GOT_APPROXIMATE_SIZE.inc();
3420 0 : }
3421 0 : }
3422 0 : }
3423 :
3424 0 : current_size
3425 0 : }
3426 :
3427 0 : fn spawn_initial_logical_size_computation_task(self: &Arc<Self>, ctx: &RequestContext) {
3428 0 : let Some(initial_part_end) = self.current_logical_size.initial_part_end else {
3429 : // nothing to do for freshly created timelines;
3430 0 : assert_eq!(
3431 0 : self.current_logical_size.current_size().accuracy(),
3432 0 : logical_size::Accuracy::Exact,
3433 0 : );
3434 0 : self.current_logical_size.initialized.add_permits(1);
3435 0 : return;
3436 : };
3437 :
3438 0 : let cancel_wait_for_background_loop_concurrency_limit_semaphore = CancellationToken::new();
3439 0 : let token = cancel_wait_for_background_loop_concurrency_limit_semaphore.clone();
3440 0 : self.current_logical_size
3441 0 : .cancel_wait_for_background_loop_concurrency_limit_semaphore.set(token)
3442 0 : .expect("initial logical size calculation task must be spawned exactly once per Timeline object");
3443 0 :
3444 0 : let self_clone = Arc::clone(self);
3445 0 : let background_ctx = ctx.detached_child(
3446 0 : TaskKind::InitialLogicalSizeCalculation,
3447 0 : DownloadBehavior::Download,
3448 0 : );
3449 0 : task_mgr::spawn(
3450 0 : task_mgr::BACKGROUND_RUNTIME.handle(),
3451 0 : task_mgr::TaskKind::InitialLogicalSizeCalculation,
3452 0 : self.tenant_shard_id,
3453 0 : Some(self.timeline_id),
3454 0 : "initial size calculation",
3455 : // NB: don't log errors here, task_mgr will do that.
3456 0 : async move {
3457 0 : self_clone
3458 0 : .initial_logical_size_calculation_task(
3459 0 : initial_part_end,
3460 0 : cancel_wait_for_background_loop_concurrency_limit_semaphore,
3461 0 : background_ctx,
3462 0 : )
3463 0 : .await;
3464 0 : Ok(())
3465 0 : }
3466 0 : .instrument(info_span!(parent: None, "initial_size_calculation", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id)),
3467 : );
3468 0 : }
3469 :
3470 : /// # Cancellation
3471 : ///
3472 : /// This method is sensitive to `Timeline::cancel`.
3473 : ///
3474 : /// It is _not_ sensitive to task_mgr::shutdown_token().
3475 : ///
3476 : /// # Cancel-Safety
3477 : ///
3478 : /// It does Timeline IO, hence this should be polled to completion because
3479 : /// we could be leaving in-flight IOs behind, which is safe, but annoying
3480 : /// to reason about.
3481 0 : async fn initial_logical_size_calculation_task(
3482 0 : self: Arc<Self>,
3483 0 : initial_part_end: Lsn,
3484 0 : skip_concurrency_limiter: CancellationToken,
3485 0 : background_ctx: RequestContext,
3486 0 : ) {
3487 0 : scopeguard::defer! {
3488 0 : // Irrespective of the outcome of this operation, we should unblock anyone waiting for it.
3489 0 : self.current_logical_size.initialized.add_permits(1);
3490 0 : }
3491 0 :
3492 0 : let try_once = |attempt: usize| {
3493 0 : let background_ctx = &background_ctx;
3494 0 : let self_ref = &self;
3495 0 : let skip_concurrency_limiter = &skip_concurrency_limiter;
3496 0 : async move {
3497 0 : let wait_for_permit = super::tasks::acquire_concurrency_permit(
3498 0 : BackgroundLoopKind::InitialLogicalSizeCalculation,
3499 0 : background_ctx,
3500 0 : );
3501 :
3502 : use crate::metrics::initial_logical_size::StartCircumstances;
3503 0 : let (_maybe_permit, circumstances) = tokio::select! {
3504 0 : permit = wait_for_permit => {
3505 0 : (Some(permit), StartCircumstances::AfterBackgroundTasksRateLimit)
3506 : }
3507 0 : _ = self_ref.cancel.cancelled() => {
3508 0 : return Err(CalculateLogicalSizeError::Cancelled);
3509 : }
3510 0 : () = skip_concurrency_limiter.cancelled() => {
3511 : // Some action that is part of a end user interaction requested logical size
3512 : // => break out of the rate limit
3513 : // TODO: ideally we'd not run on BackgroundRuntime but the requester's runtime;
3514 : // but then again what happens if they cancel; also, we should just be using
3515 : // one runtime across the entire process, so, let's leave this for now.
3516 0 : (None, StartCircumstances::SkippedConcurrencyLimiter)
3517 : }
3518 : };
3519 :
3520 0 : let metrics_guard = if attempt == 1 {
3521 0 : crate::metrics::initial_logical_size::START_CALCULATION.first(circumstances)
3522 : } else {
3523 0 : crate::metrics::initial_logical_size::START_CALCULATION.retry(circumstances)
3524 : };
3525 :
3526 0 : let io_concurrency = IoConcurrency::spawn_from_conf(
3527 0 : self_ref.conf,
3528 0 : self_ref
3529 0 : .gate
3530 0 : .enter()
3531 0 : .map_err(|_| CalculateLogicalSizeError::Cancelled)?,
3532 : );
3533 :
3534 0 : let calculated_size = self_ref
3535 0 : .logical_size_calculation_task(
3536 0 : initial_part_end,
3537 0 : LogicalSizeCalculationCause::Initial,
3538 0 : background_ctx,
3539 0 : )
3540 0 : .await?;
3541 :
3542 0 : self_ref
3543 0 : .trigger_aux_file_size_computation(
3544 0 : initial_part_end,
3545 0 : background_ctx,
3546 0 : io_concurrency,
3547 0 : )
3548 0 : .await?;
3549 :
3550 : // TODO: add aux file size to logical size
3551 :
3552 0 : Ok((calculated_size, metrics_guard))
3553 0 : }
3554 0 : };
3555 :
3556 0 : let retrying = async {
3557 0 : let mut attempt = 0;
3558 : loop {
3559 0 : attempt += 1;
3560 0 :
3561 0 : match try_once(attempt).await {
3562 0 : Ok(res) => return ControlFlow::Continue(res),
3563 0 : Err(CalculateLogicalSizeError::Cancelled) => return ControlFlow::Break(()),
3564 : Err(
3565 0 : e @ (CalculateLogicalSizeError::Decode(_)
3566 0 : | CalculateLogicalSizeError::PageRead(_)),
3567 0 : ) => {
3568 0 : warn!(attempt, "initial size calculation failed: {e:?}");
3569 : // exponential back-off doesn't make sense at these long intervals;
3570 : // use fixed retry interval with generous jitter instead
3571 0 : let sleep_duration = Duration::from_secs(
3572 0 : u64::try_from(
3573 0 : // 1hour base
3574 0 : (60_i64 * 60_i64)
3575 0 : // 10min jitter
3576 0 : + rand::thread_rng().gen_range(-10 * 60..10 * 60),
3577 0 : )
3578 0 : .expect("10min < 1hour"),
3579 0 : );
3580 0 : tokio::select! {
3581 0 : _ = tokio::time::sleep(sleep_duration) => {}
3582 0 : _ = self.cancel.cancelled() => return ControlFlow::Break(()),
3583 : }
3584 : }
3585 : }
3586 : }
3587 0 : };
3588 :
3589 0 : let (calculated_size, metrics_guard) = match retrying.await {
3590 0 : ControlFlow::Continue(calculated_size) => calculated_size,
3591 0 : ControlFlow::Break(()) => return,
3592 : };
3593 :
3594 : // we cannot query current_logical_size.current_size() to know the current
3595 : // *negative* value, only truncated to u64.
3596 0 : let added = self
3597 0 : .current_logical_size
3598 0 : .size_added_after_initial
3599 0 : .load(AtomicOrdering::Relaxed);
3600 0 :
3601 0 : let sum = calculated_size.saturating_add_signed(added);
3602 0 :
3603 0 : // set the gauge value before it can be set in `update_current_logical_size`.
3604 0 : self.metrics.current_logical_size_gauge.set(sum);
3605 0 :
3606 0 : self.current_logical_size
3607 0 : .initial_logical_size
3608 0 : .set((calculated_size, metrics_guard.calculation_result_saved()))
3609 0 : .ok()
3610 0 : .expect("only this task sets it");
3611 0 : }
3612 :
3613 84 : pub(crate) fn spawn_ondemand_logical_size_calculation(
3614 84 : self: &Arc<Self>,
3615 84 : lsn: Lsn,
3616 84 : cause: LogicalSizeCalculationCause,
3617 84 : ctx: RequestContext,
3618 84 : ) -> oneshot::Receiver<Result<u64, CalculateLogicalSizeError>> {
3619 84 : let (sender, receiver) = oneshot::channel();
3620 84 : let self_clone = Arc::clone(self);
3621 84 : // XXX if our caller loses interest, i.e., ctx is cancelled,
3622 84 : // we should stop the size calculation work and return an error.
3623 84 : // That would require restructuring this function's API to
3624 84 : // return the result directly, instead of a Receiver for the result.
3625 84 : let ctx = ctx.detached_child(
3626 84 : TaskKind::OndemandLogicalSizeCalculation,
3627 84 : DownloadBehavior::Download,
3628 84 : );
3629 84 : task_mgr::spawn(
3630 84 : task_mgr::BACKGROUND_RUNTIME.handle(),
3631 84 : task_mgr::TaskKind::OndemandLogicalSizeCalculation,
3632 84 : self.tenant_shard_id,
3633 84 : Some(self.timeline_id),
3634 84 : "ondemand logical size calculation",
3635 84 : async move {
3636 84 : let res = self_clone
3637 84 : .logical_size_calculation_task(lsn, cause, &ctx)
3638 84 : .await;
3639 84 : let _ = sender.send(res).ok();
3640 84 : Ok(()) // Receiver is responsible for handling errors
3641 84 : }
3642 84 : .in_current_span(),
3643 84 : );
3644 84 : receiver
3645 84 : }
3646 :
3647 : #[instrument(skip_all)]
3648 : async fn logical_size_calculation_task(
3649 : self: &Arc<Self>,
3650 : lsn: Lsn,
3651 : cause: LogicalSizeCalculationCause,
3652 : ctx: &RequestContext,
3653 : ) -> Result<u64, CalculateLogicalSizeError> {
3654 : crate::span::debug_assert_current_span_has_tenant_and_timeline_id();
3655 : // We should never be calculating logical sizes on shard !=0, because these shards do not have
3656 : // accurate relation sizes, and they do not emit consumption metrics.
3657 : debug_assert!(self.tenant_shard_id.is_shard_zero());
3658 :
3659 : let guard = self
3660 : .gate
3661 : .enter()
3662 0 : .map_err(|_| CalculateLogicalSizeError::Cancelled)?;
3663 :
3664 : self.calculate_logical_size(lsn, cause, &guard, ctx).await
3665 : }
3666 :
3667 : /// Calculate the logical size of the database at the latest LSN.
3668 : ///
3669 : /// NOTE: counted incrementally, includes ancestors. This can be a slow operation,
3670 : /// especially if we need to download remote layers.
3671 84 : async fn calculate_logical_size(
3672 84 : &self,
3673 84 : up_to_lsn: Lsn,
3674 84 : cause: LogicalSizeCalculationCause,
3675 84 : _guard: &GateGuard,
3676 84 : ctx: &RequestContext,
3677 84 : ) -> Result<u64, CalculateLogicalSizeError> {
3678 84 : info!(
3679 0 : "Calculating logical size for timeline {} at {}",
3680 : self.timeline_id, up_to_lsn
3681 : );
3682 :
3683 84 : if let Err(()) = pausable_failpoint!("timeline-calculate-logical-size-pause", &self.cancel)
3684 : {
3685 0 : return Err(CalculateLogicalSizeError::Cancelled);
3686 84 : }
3687 :
3688 : // See if we've already done the work for initial size calculation.
3689 : // This is a short-cut for timelines that are mostly unused.
3690 84 : if let Some(size) = self.current_logical_size.initialized_size(up_to_lsn) {
3691 0 : return Ok(size);
3692 84 : }
3693 84 : let storage_time_metrics = match cause {
3694 : LogicalSizeCalculationCause::Initial
3695 : | LogicalSizeCalculationCause::ConsumptionMetricsSyntheticSize
3696 0 : | LogicalSizeCalculationCause::TenantSizeHandler => &self.metrics.logical_size_histo,
3697 : LogicalSizeCalculationCause::EvictionTaskImitation => {
3698 84 : &self.metrics.imitate_logical_size_histo
3699 : }
3700 : };
3701 84 : let timer = storage_time_metrics.start_timer();
3702 84 : let logical_size = self
3703 84 : .get_current_logical_size_non_incremental(up_to_lsn, ctx)
3704 84 : .await?;
3705 84 : debug!("calculated logical size: {logical_size}");
3706 84 : timer.stop_and_record();
3707 84 : Ok(logical_size)
3708 84 : }
3709 :
3710 : /// Update current logical size, adding `delta' to the old value.
3711 1623420 : fn update_current_logical_size(&self, delta: i64) {
3712 1623420 : let logical_size = &self.current_logical_size;
3713 1623420 : logical_size.increment_size(delta);
3714 1623420 :
3715 1623420 : // Also set the value in the prometheus gauge. Note that
3716 1623420 : // there is a race condition here: if this is is called by two
3717 1623420 : // threads concurrently, the prometheus gauge might be set to
3718 1623420 : // one value while current_logical_size is set to the
3719 1623420 : // other.
3720 1623420 : match logical_size.current_size() {
3721 1623420 : CurrentLogicalSize::Exact(ref new_current_size) => self
3722 1623420 : .metrics
3723 1623420 : .current_logical_size_gauge
3724 1623420 : .set(new_current_size.into()),
3725 0 : CurrentLogicalSize::Approximate(_) => {
3726 0 : // don't update the gauge yet, this allows us not to update the gauge back and
3727 0 : // forth between the initial size calculation task.
3728 0 : }
3729 : }
3730 1623420 : }
3731 :
3732 18144 : pub(crate) fn update_directory_entries_count(&self, kind: DirectoryKind, count: MetricsUpdate) {
3733 18144 : // TODO: this directory metrics is not correct -- we could have multiple reldirs in the system
3734 18144 : // for each of the database, but we only store one value, and therefore each pgdirmodification
3735 18144 : // would overwrite the previous value if they modify different databases.
3736 18144 :
3737 18144 : match count {
3738 6612 : MetricsUpdate::Set(count) => {
3739 6612 : self.directory_metrics[kind.offset()].store(count, AtomicOrdering::Relaxed);
3740 6612 : self.directory_metrics_inited[kind.offset()].store(true, AtomicOrdering::Relaxed);
3741 6612 : }
3742 11520 : MetricsUpdate::Add(count) => {
3743 11520 : // TODO: these operations are not atomic; but we only have one writer to the metrics, so
3744 11520 : // it's fine.
3745 11520 : if self.directory_metrics_inited[kind.offset()].load(AtomicOrdering::Relaxed) {
3746 11520 : // The metrics has been initialized with `MetricsUpdate::Set` before, so we can add/sub
3747 11520 : // the value reliably.
3748 11520 : self.directory_metrics[kind.offset()].fetch_add(count, AtomicOrdering::Relaxed);
3749 11520 : }
3750 : // Otherwise, ignore this update
3751 : }
3752 12 : MetricsUpdate::Sub(count) => {
3753 12 : // TODO: these operations are not atomic; but we only have one writer to the metrics, so
3754 12 : // it's fine.
3755 12 : if self.directory_metrics_inited[kind.offset()].load(AtomicOrdering::Relaxed) {
3756 12 : // The metrics has been initialized with `MetricsUpdate::Set` before.
3757 12 : // The operation could overflow so we need to normalize the value.
3758 12 : let prev_val =
3759 12 : self.directory_metrics[kind.offset()].load(AtomicOrdering::Relaxed);
3760 12 : let res = prev_val.saturating_sub(count);
3761 12 : self.directory_metrics[kind.offset()].store(res, AtomicOrdering::Relaxed);
3762 12 : }
3763 : // Otherwise, ignore this update
3764 : }
3765 : };
3766 :
3767 : // TODO: remove this, there's no place in the code that updates this aux metrics.
3768 18144 : let aux_metric =
3769 18144 : self.directory_metrics[DirectoryKind::AuxFiles.offset()].load(AtomicOrdering::Relaxed);
3770 18144 :
3771 18144 : let sum_of_entries = self
3772 18144 : .directory_metrics
3773 18144 : .iter()
3774 145152 : .map(|v| v.load(AtomicOrdering::Relaxed))
3775 18144 : .sum();
3776 : // Set a high general threshold and a lower threshold for the auxiliary files,
3777 : // as we can have large numbers of relations in the db directory.
3778 : const SUM_THRESHOLD: u64 = 5000;
3779 : const AUX_THRESHOLD: u64 = 1000;
3780 18144 : if sum_of_entries >= SUM_THRESHOLD || aux_metric >= AUX_THRESHOLD {
3781 0 : self.metrics
3782 0 : .directory_entries_count_gauge
3783 0 : .set(sum_of_entries);
3784 18144 : } else if let Some(metric) = Lazy::get(&self.metrics.directory_entries_count_gauge) {
3785 0 : metric.set(sum_of_entries);
3786 18144 : }
3787 18144 : }
3788 :
3789 0 : async fn find_layer(
3790 0 : &self,
3791 0 : layer_name: &LayerName,
3792 0 : ) -> Result<Option<Layer>, layer_manager::Shutdown> {
3793 0 : let guard = self.layers.read().await;
3794 0 : let layer = guard
3795 0 : .layer_map()?
3796 0 : .iter_historic_layers()
3797 0 : .find(|l| &l.layer_name() == layer_name)
3798 0 : .map(|found| guard.get_from_desc(&found));
3799 0 : Ok(layer)
3800 0 : }
3801 :
3802 0 : pub(super) fn should_keep_previous_heatmap(&self, new_heatmap_end_lsn: Lsn) -> bool {
3803 0 : let crnt = self.previous_heatmap.load();
3804 0 : match crnt.as_deref() {
3805 0 : Some(PreviousHeatmap::Active { end_lsn, .. }) => match end_lsn {
3806 0 : Some(crnt_end_lsn) => *crnt_end_lsn > new_heatmap_end_lsn,
3807 0 : None => true,
3808 : },
3809 0 : Some(PreviousHeatmap::Obsolete) => false,
3810 0 : None => false,
3811 : }
3812 0 : }
3813 :
3814 : /// The timeline heatmap is a hint to secondary locations from the primary location,
3815 : /// indicating which layers are currently on-disk on the primary.
3816 : ///
3817 : /// None is returned if the Timeline is in a state where uploading a heatmap
3818 : /// doesn't make sense, such as shutting down or initializing. The caller
3819 : /// should treat this as a cue to simply skip doing any heatmap uploading
3820 : /// for this timeline.
3821 96 : pub(crate) async fn generate_heatmap(&self) -> Option<HeatMapTimeline> {
3822 96 : if !self.is_active() {
3823 0 : return None;
3824 96 : }
3825 :
3826 96 : let guard = self.layers.read().await;
3827 :
3828 : // Firstly, if there's any heatmap left over from when this location
3829 : // was a secondary, take that into account. Keep layers that are:
3830 : // * present in the layer map
3831 : // * visible
3832 : // * non-resident
3833 : // * not evicted since we read the heatmap
3834 : //
3835 : // Without this, a new cold, attached location would clobber the previous
3836 : // heatamp.
3837 96 : let previous_heatmap = self.previous_heatmap.load();
3838 96 : let visible_non_resident = match previous_heatmap.as_deref() {
3839 : Some(PreviousHeatmap::Active {
3840 72 : heatmap, read_at, ..
3841 276 : }) => Some(heatmap.all_layers().filter_map(|hl| {
3842 276 : let desc: PersistentLayerDesc = hl.name.clone().into();
3843 276 : let layer = guard.try_get_from_key(&desc.key())?;
3844 :
3845 276 : if layer.visibility() == LayerVisibilityHint::Covered {
3846 0 : return None;
3847 276 : }
3848 276 :
3849 276 : if layer.is_likely_resident() {
3850 120 : return None;
3851 156 : }
3852 156 :
3853 156 : if layer.last_evicted_at().happened_after(*read_at) {
3854 36 : return None;
3855 120 : }
3856 120 :
3857 120 : Some((desc, hl.metadata.clone(), hl.access_time, hl.cold))
3858 276 : })),
3859 0 : Some(PreviousHeatmap::Obsolete) => None,
3860 24 : None => None,
3861 : };
3862 :
3863 : // Secondly, all currently visible, resident layers are included.
3864 216 : let resident = guard.likely_resident_layers().filter_map(|layer| {
3865 216 : match layer.visibility() {
3866 : LayerVisibilityHint::Visible => {
3867 : // Layer is visible to one or more read LSNs: elegible for inclusion in layer map
3868 204 : let last_activity_ts = layer.latest_activity();
3869 204 : Some((
3870 204 : layer.layer_desc().clone(),
3871 204 : layer.metadata(),
3872 204 : last_activity_ts,
3873 204 : false, // these layers are not cold
3874 204 : ))
3875 : }
3876 : LayerVisibilityHint::Covered => {
3877 : // Layer is resident but unlikely to be read: not elegible for inclusion in heatmap.
3878 12 : None
3879 : }
3880 : }
3881 216 : });
3882 :
3883 96 : let mut layers = match visible_non_resident {
3884 72 : Some(non_resident) => {
3885 72 : let mut non_resident = non_resident.peekable();
3886 72 : if non_resident.peek().is_none() {
3887 24 : tracing::info!(timeline_id=%self.timeline_id, "Previous heatmap now obsolete");
3888 24 : self.previous_heatmap
3889 24 : .store(Some(PreviousHeatmap::Obsolete.into()));
3890 48 : }
3891 :
3892 72 : non_resident.chain(resident).collect::<Vec<_>>()
3893 : }
3894 24 : None => resident.collect::<Vec<_>>(),
3895 : };
3896 :
3897 : // Sort layers in order of which to download first. For a large set of layers to download, we
3898 : // want to prioritize those layers which are most likely to still be in the resident many minutes
3899 : // or hours later:
3900 : // - Cold layers go last for convenience when a human inspects the heatmap.
3901 : // - Download L0s last, because they churn the fastest: L0s on a fast-writing tenant might
3902 : // only exist for a few minutes before being compacted into L1s.
3903 : // - For L1 & image layers, download most recent LSNs first: the older the LSN, the sooner
3904 : // the layer is likely to be covered by an image layer during compaction.
3905 696 : layers.sort_by_key(|(desc, _meta, _atime, cold)| {
3906 696 : std::cmp::Reverse((
3907 696 : *cold,
3908 696 : !LayerMap::is_l0(&desc.key_range, desc.is_delta),
3909 696 : desc.lsn_range.end,
3910 696 : ))
3911 696 : });
3912 96 :
3913 96 : let layers = layers
3914 96 : .into_iter()
3915 324 : .map(|(desc, meta, atime, cold)| {
3916 324 : HeatMapLayer::new(desc.layer_name(), meta, atime, cold)
3917 324 : })
3918 96 : .collect();
3919 96 :
3920 96 : Some(HeatMapTimeline::new(self.timeline_id, layers))
3921 96 : }
3922 :
3923 0 : pub(super) async fn generate_unarchival_heatmap(&self, end_lsn: Lsn) -> PreviousHeatmap {
3924 0 : let guard = self.layers.read().await;
3925 :
3926 0 : let now = SystemTime::now();
3927 0 : let mut heatmap_layers = Vec::default();
3928 0 : for vl in guard.visible_layers() {
3929 0 : if vl.layer_desc().get_lsn_range().start >= end_lsn {
3930 0 : continue;
3931 0 : }
3932 0 :
3933 0 : let hl = HeatMapLayer {
3934 0 : name: vl.layer_desc().layer_name(),
3935 0 : metadata: vl.metadata(),
3936 0 : access_time: now,
3937 0 : cold: true,
3938 0 : };
3939 0 : heatmap_layers.push(hl);
3940 : }
3941 :
3942 0 : tracing::info!(
3943 0 : "Generating unarchival heatmap with {} layers",
3944 0 : heatmap_layers.len()
3945 : );
3946 :
3947 0 : let heatmap = HeatMapTimeline::new(self.timeline_id, heatmap_layers);
3948 0 : PreviousHeatmap::Active {
3949 0 : heatmap,
3950 0 : read_at: Instant::now(),
3951 0 : end_lsn: Some(end_lsn),
3952 0 : }
3953 0 : }
3954 :
3955 : /// Returns true if the given lsn is or was an ancestor branchpoint.
3956 0 : pub(crate) fn is_ancestor_lsn(&self, lsn: Lsn) -> bool {
3957 0 : // upon timeline detach, we set the ancestor_lsn to Lsn::INVALID and the store the original
3958 0 : // branchpoint in the value in IndexPart::lineage
3959 0 : self.ancestor_lsn == lsn
3960 0 : || (self.ancestor_lsn == Lsn::INVALID
3961 0 : && self.remote_client.is_previous_ancestor_lsn(lsn))
3962 0 : }
3963 : }
3964 :
3965 : #[derive(Clone)]
3966 : /// Type representing a query in the ([`Lsn`], [`Key`]) space.
3967 : /// In other words, a set of segments in a 2D space.
3968 : ///
3969 : /// This representation has the advatange of avoiding hash map
3970 : /// allocations for uniform queries.
3971 : pub(crate) enum VersionedKeySpaceQuery {
3972 : /// Variant for queries at a single [`Lsn`]
3973 : Uniform { keyspace: KeySpace, lsn: Lsn },
3974 : /// Variant for queries at multiple [`Lsn`]s
3975 : Scattered {
3976 : keyspaces_at_lsn: Vec<(Lsn, KeySpace)>,
3977 : },
3978 : }
3979 :
3980 : impl VersionedKeySpaceQuery {
3981 3656397 : pub(crate) fn uniform(keyspace: KeySpace, lsn: Lsn) -> Self {
3982 3656397 : Self::Uniform { keyspace, lsn }
3983 3656397 : }
3984 :
3985 122304 : pub(crate) fn scattered(keyspaces_at_lsn: Vec<(Lsn, KeySpace)>) -> Self {
3986 122304 : Self::Scattered { keyspaces_at_lsn }
3987 122304 : }
3988 :
3989 : /// Returns the most recent (largest) LSN included in the query.
3990 : /// If any of the LSNs included in the query are invalid, returns
3991 : /// an error instead.
3992 7557402 : fn high_watermark_lsn(&self) -> Result<Lsn, GetVectoredError> {
3993 7557402 : match self {
3994 7312794 : Self::Uniform { lsn, .. } => {
3995 7312794 : if !lsn.is_valid() {
3996 0 : return Err(GetVectoredError::InvalidLsn(*lsn));
3997 7312794 : }
3998 7312794 :
3999 7312794 : Ok(*lsn)
4000 : }
4001 244608 : Self::Scattered { keyspaces_at_lsn } => {
4002 244608 : let mut max_lsn = None;
4003 506616 : for (lsn, _keyspace) in keyspaces_at_lsn.iter() {
4004 506616 : if !lsn.is_valid() {
4005 0 : return Err(GetVectoredError::InvalidLsn(*lsn));
4006 506616 : }
4007 506616 : max_lsn = std::cmp::max(max_lsn, Some(lsn));
4008 : }
4009 :
4010 244608 : if let Some(computed) = max_lsn {
4011 244608 : Ok(*computed)
4012 : } else {
4013 0 : Err(GetVectoredError::Other(anyhow!("empty input")))
4014 : }
4015 : }
4016 : }
4017 7557402 : }
4018 :
4019 : /// Returns the total keyspace being queried: the result of projecting
4020 : /// everything in the key dimensions onto the key axis.
4021 3910773 : fn total_keyspace(&self) -> KeySpace {
4022 3910773 : match self {
4023 3666165 : Self::Uniform { keyspace, .. } => keyspace.clone(),
4024 244608 : Self::Scattered { keyspaces_at_lsn } => keyspaces_at_lsn
4025 244608 : .iter()
4026 506616 : .map(|(_lsn, keyspace)| keyspace)
4027 506616 : .fold(KeySpace::default(), |mut acc, v| {
4028 506616 : acc.merge(v);
4029 506616 : acc
4030 506616 : }),
4031 : }
4032 3910773 : }
4033 :
4034 : /// Returns LSN for a specific key.
4035 : ///
4036 : /// Invariant: requested key must be part of [`Self::total_keyspace`]
4037 4776717 : pub(super) fn map_key_to_lsn(&self, key: &Key) -> Lsn {
4038 4776717 : match self {
4039 3898413 : Self::Uniform { lsn, .. } => *lsn,
4040 878304 : Self::Scattered { keyspaces_at_lsn } => {
4041 878304 : keyspaces_at_lsn
4042 878304 : .iter()
4043 5037290 : .find(|(_lsn, keyspace)| keyspace.contains(key))
4044 878304 : .expect("Returned key was requested")
4045 878304 : .0
4046 : }
4047 : }
4048 4776717 : }
4049 :
4050 : /// Remove any parts of the query (segments) which overlap with the provided
4051 : /// key space (also segments).
4052 11612694 : fn remove_overlapping_with(&mut self, to_remove: &KeySpace) -> KeySpace {
4053 11612694 : match self {
4054 11368086 : Self::Uniform { keyspace, .. } => keyspace.remove_overlapping_with(to_remove),
4055 244608 : Self::Scattered { keyspaces_at_lsn } => {
4056 244608 : let mut removed_accum = KeySpaceRandomAccum::new();
4057 506616 : keyspaces_at_lsn.iter_mut().for_each(|(_lsn, keyspace)| {
4058 506616 : let removed = keyspace.remove_overlapping_with(to_remove);
4059 506616 : removed_accum.add_keyspace(removed);
4060 506616 : });
4061 244608 :
4062 244608 : removed_accum.to_keyspace()
4063 : }
4064 : }
4065 11612694 : }
4066 :
4067 5130461 : fn is_empty(&self) -> bool {
4068 5130461 : match self {
4069 5008157 : Self::Uniform { keyspace, .. } => keyspace.is_empty(),
4070 122304 : Self::Scattered { keyspaces_at_lsn } => keyspaces_at_lsn
4071 122304 : .iter()
4072 253308 : .all(|(_lsn, keyspace)| keyspace.is_empty()),
4073 : }
4074 5130461 : }
4075 :
4076 : /// "Lower" the query on the LSN dimension
4077 1351772 : fn lower(&mut self, to: Lsn) {
4078 1351772 : match self {
4079 1351772 : Self::Uniform { lsn, .. } => {
4080 1351772 : // If the originally requested LSN is smaller than the starting
4081 1351772 : // LSN of the ancestor we are descending into, we need to respect that.
4082 1351772 : // Hence the min.
4083 1351772 : *lsn = std::cmp::min(*lsn, to);
4084 1351772 : }
4085 0 : Self::Scattered { keyspaces_at_lsn } => {
4086 0 : keyspaces_at_lsn.iter_mut().for_each(|(lsn, _keyspace)| {
4087 0 : *lsn = std::cmp::min(*lsn, to);
4088 0 : });
4089 0 : }
4090 : }
4091 1351772 : }
4092 : }
4093 :
4094 : impl std::fmt::Display for VersionedKeySpaceQuery {
4095 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
4096 0 : write!(f, "[")?;
4097 :
4098 0 : match self {
4099 0 : VersionedKeySpaceQuery::Uniform { keyspace, lsn } => {
4100 0 : write!(f, "{keyspace} @ {lsn}")?;
4101 : }
4102 0 : VersionedKeySpaceQuery::Scattered { keyspaces_at_lsn } => {
4103 0 : for (lsn, keyspace) in keyspaces_at_lsn.iter() {
4104 0 : write!(f, "{keyspace} @ {lsn},")?;
4105 : }
4106 : }
4107 : }
4108 :
4109 0 : write!(f, "]")
4110 0 : }
4111 : }
4112 :
4113 : impl Timeline {
4114 : #[allow(clippy::doc_lazy_continuation)]
4115 : /// Get the data needed to reconstruct all keys in the provided keyspace
4116 : ///
4117 : /// The algorithm is as follows:
4118 : /// 1. While some keys are still not done and there's a timeline to visit:
4119 : /// 2. Visit the timeline (see [`Timeline::get_vectored_reconstruct_data_timeline`]:
4120 : /// 2.1: Build the fringe for the current keyspace
4121 : /// 2.2 Visit the newest layer from the fringe to collect all values for the range it
4122 : /// intersects
4123 : /// 2.3. Pop the timeline from the fringe
4124 : /// 2.4. If the fringe is empty, go back to 1
4125 3778701 : async fn get_vectored_reconstruct_data(
4126 3778701 : &self,
4127 3778701 : mut query: VersionedKeySpaceQuery,
4128 3778701 : reconstruct_state: &mut ValuesReconstructState,
4129 3778701 : ctx: &RequestContext,
4130 3778701 : ) -> Result<(), GetVectoredError> {
4131 3778701 : let original_hwm_lsn = query.high_watermark_lsn().unwrap();
4132 3778701 :
4133 3778701 : let mut timeline_owned: Arc<Timeline>;
4134 3778701 : let mut timeline = self;
4135 :
4136 3778689 : let missing_keyspace = loop {
4137 5130461 : if self.cancel.is_cancelled() {
4138 0 : return Err(GetVectoredError::Cancelled);
4139 5130461 : }
4140 :
4141 : let TimelineVisitOutcome {
4142 5130461 : completed_keyspace: completed,
4143 5130461 : image_covered_keyspace,
4144 : } = {
4145 5130461 : let ctx = RequestContextBuilder::from(ctx)
4146 5130461 : .perf_span(|crnt_perf_span| {
4147 0 : info_span!(
4148 : target: PERF_TRACE_TARGET,
4149 0 : parent: crnt_perf_span,
4150 : "PLAN_IO_TIMELINE",
4151 : timeline = %timeline.timeline_id,
4152 0 : high_watermark_lsn = %query.high_watermark_lsn().unwrap(),
4153 : )
4154 5130461 : })
4155 5130461 : .attached_child();
4156 5130461 :
4157 5130461 : Self::get_vectored_reconstruct_data_timeline(
4158 5130461 : timeline,
4159 5130461 : &query,
4160 5130461 : reconstruct_state,
4161 5130461 : &self.cancel,
4162 5130461 : &ctx,
4163 5130461 : )
4164 5130461 : .maybe_perf_instrument(&ctx, |crnt_perf_span| crnt_perf_span.clone())
4165 5130461 : .await?
4166 : };
4167 :
4168 5130461 : query.remove_overlapping_with(&completed);
4169 5130461 :
4170 5130461 : // Do not descend into the ancestor timeline for aux files.
4171 5130461 : // We don't return a blanket [`GetVectoredError::MissingKey`] to avoid
4172 5130461 : // stalling compaction.
4173 5130461 : query.remove_overlapping_with(&KeySpace {
4174 5130461 : ranges: vec![NON_INHERITED_RANGE, Key::sparse_non_inherited_keyspace()],
4175 5130461 : });
4176 5130461 :
4177 5130461 : // Keyspace is fully retrieved
4178 5130461 : if query.is_empty() {
4179 3777129 : break None;
4180 1353332 : }
4181 :
4182 1353332 : let Some(ancestor_timeline) = timeline.ancestor_timeline.as_ref() else {
4183 : // Not fully retrieved but no ancestor timeline.
4184 1560 : break Some(query.total_keyspace());
4185 : };
4186 :
4187 : // Now we see if there are keys covered by the image layer but does not exist in the
4188 : // image layer, which means that the key does not exist.
4189 :
4190 : // The block below will stop the vectored search if any of the keys encountered an image layer
4191 : // which did not contain a snapshot for said key. Since we have already removed all completed
4192 : // keys from `keyspace`, we expect there to be no overlap between it and the image covered key
4193 : // space. If that's not the case, we had at least one key encounter a gap in the image layer
4194 : // and stop the search as a result of that.
4195 1351772 : let mut removed = query.remove_overlapping_with(&image_covered_keyspace);
4196 1351772 : // Do not fire missing key error and end early for sparse keys. Note that we hava already removed
4197 1351772 : // non-inherited keyspaces before, so we can safely do a full `SPARSE_RANGE` remove instead of
4198 1351772 : // figuring out what is the inherited key range and do a fine-grained pruning.
4199 1351772 : removed.remove_overlapping_with(&KeySpace {
4200 1351772 : ranges: vec![SPARSE_RANGE],
4201 1351772 : });
4202 1351772 : if !removed.is_empty() {
4203 0 : break Some(removed);
4204 1351772 : }
4205 1351772 :
4206 1351772 : // Each key range in the original query is at some point in the LSN space.
4207 1351772 : // When descending into the ancestor, lower all ranges in the LSN space
4208 1351772 : // such that new changes on the parent timeline are not visible.
4209 1351772 : query.lower(timeline.ancestor_lsn);
4210 1351772 :
4211 1351772 : let ctx = RequestContextBuilder::from(ctx)
4212 1351772 : .perf_span(|crnt_perf_span| {
4213 0 : info_span!(
4214 : target: PERF_TRACE_TARGET,
4215 0 : parent: crnt_perf_span,
4216 : "GET_ANCESTOR",
4217 : timeline = %timeline.timeline_id,
4218 0 : ancestor = %ancestor_timeline.timeline_id,
4219 : ancestor_lsn = %timeline.ancestor_lsn
4220 : )
4221 1351772 : })
4222 1351772 : .attached_child();
4223 :
4224 1351772 : timeline_owned = timeline
4225 1351772 : .get_ready_ancestor_timeline(ancestor_timeline, &ctx)
4226 1351772 : .maybe_perf_instrument(&ctx, |crnt_perf_span| crnt_perf_span.clone())
4227 1351772 : .await?;
4228 1351760 : timeline = &*timeline_owned;
4229 : };
4230 :
4231 : // Remove sparse keys from the keyspace so that it doesn't fire errors.
4232 3778689 : let missing_keyspace = if let Some(missing_keyspace) = missing_keyspace {
4233 1560 : let mut missing_keyspace = missing_keyspace;
4234 1560 : missing_keyspace.remove_overlapping_with(&KeySpace {
4235 1560 : ranges: vec![SPARSE_RANGE],
4236 1560 : });
4237 1560 : if missing_keyspace.is_empty() {
4238 1476 : None
4239 : } else {
4240 84 : Some(missing_keyspace)
4241 : }
4242 : } else {
4243 3777129 : None
4244 : };
4245 :
4246 3778689 : if let Some(missing_keyspace) = missing_keyspace {
4247 84 : return Err(GetVectoredError::MissingKey(Box::new(MissingKeyError {
4248 84 : keyspace: missing_keyspace, /* better if we can store the full keyspace */
4249 84 : shard: self.shard_identity.number,
4250 84 : original_hwm_lsn,
4251 84 : ancestor_lsn: Some(timeline.ancestor_lsn),
4252 84 : backtrace: None,
4253 84 : read_path: std::mem::take(&mut reconstruct_state.read_path),
4254 84 : query: None,
4255 84 : })));
4256 3778605 : }
4257 3778605 :
4258 3778605 : Ok(())
4259 3778701 : }
4260 :
4261 5130461 : async fn get_vectored_init_fringe(
4262 5130461 : &self,
4263 5130461 : query: &VersionedKeySpaceQuery,
4264 5130461 : ) -> Result<LayerFringe, GetVectoredError> {
4265 5130461 : let mut fringe = LayerFringe::new();
4266 5130461 : let guard = self.layers.read().await;
4267 :
4268 5130461 : match query {
4269 5008157 : VersionedKeySpaceQuery::Uniform { keyspace, lsn } => {
4270 5008157 : // LSNs requested by the compute or determined by the pageserver
4271 5008157 : // are inclusive. Queries to the layer map use exclusive LSNs.
4272 5008157 : // Hence, bump the value before the query - same in the other
4273 5008157 : // match arm.
4274 5008157 : let cont_lsn = Lsn(lsn.0 + 1);
4275 5008157 : guard.update_search_fringe(keyspace, cont_lsn, &mut fringe)?;
4276 : }
4277 122304 : VersionedKeySpaceQuery::Scattered { keyspaces_at_lsn } => {
4278 253308 : for (lsn, keyspace) in keyspaces_at_lsn.iter() {
4279 253308 : let cont_lsn_for_keyspace = Lsn(lsn.0 + 1);
4280 253308 : guard.update_search_fringe(keyspace, cont_lsn_for_keyspace, &mut fringe)?;
4281 : }
4282 : }
4283 : }
4284 :
4285 5130461 : Ok(fringe)
4286 5130461 : }
4287 :
4288 : /// Collect the reconstruct data for a keyspace from the specified timeline.
4289 : ///
4290 : /// Maintain a fringe [`LayerFringe`] which tracks all the layers that intersect
4291 : /// the current keyspace. The current keyspace of the search at any given timeline
4292 : /// is the original keyspace minus all the keys that have been completed minus
4293 : /// any keys for which we couldn't find an intersecting layer. It's not tracked explicitly,
4294 : /// but if you merge all the keyspaces in the fringe, you get the "current keyspace".
4295 : ///
4296 : /// This is basically a depth-first search visitor implementation where a vertex
4297 : /// is the (layer, lsn range, key space) tuple. The fringe acts as the stack.
4298 : ///
4299 : /// At each iteration pop the top of the fringe (the layer with the highest Lsn)
4300 : /// and get all the required reconstruct data from the layer in one go.
4301 : ///
4302 : /// Returns the completed keyspace and the keyspaces with image coverage. The caller
4303 : /// decides how to deal with these two keyspaces.
4304 5130461 : async fn get_vectored_reconstruct_data_timeline(
4305 5130461 : timeline: &Timeline,
4306 5130461 : query: &VersionedKeySpaceQuery,
4307 5130461 : reconstruct_state: &mut ValuesReconstructState,
4308 5130461 : cancel: &CancellationToken,
4309 5130461 : ctx: &RequestContext,
4310 5130461 : ) -> Result<TimelineVisitOutcome, GetVectoredError> {
4311 5130461 : // Prevent GC from progressing while visiting the current timeline.
4312 5130461 : // If we are GC-ing because a new image layer was added while traversing
4313 5130461 : // the timeline, then it will remove layers that are required for fulfilling
4314 5130461 : // the current get request (read-path cannot "look back" and notice the new
4315 5130461 : // image layer).
4316 5130461 : let _gc_cutoff_holder = timeline.get_applied_gc_cutoff_lsn();
4317 :
4318 : // See `compaction::compact_with_gc` for why we need this.
4319 5130461 : let _guard = timeline.gc_compaction_layer_update_lock.read().await;
4320 :
4321 : // Initialize the fringe
4322 5130461 : let mut fringe = timeline.get_vectored_init_fringe(query).await?;
4323 :
4324 5130461 : let mut completed_keyspace = KeySpace::default();
4325 5130461 : let mut image_covered_keyspace = KeySpaceRandomAccum::new();
4326 :
4327 10445427 : while let Some((layer_to_read, keyspace_to_read, lsn_range)) = fringe.next_layer() {
4328 5314966 : if cancel.is_cancelled() {
4329 0 : return Err(GetVectoredError::Cancelled);
4330 5314966 : }
4331 :
4332 5314966 : if let Some(ref mut read_path) = reconstruct_state.read_path {
4333 5314966 : read_path.record_layer_visit(&layer_to_read, &keyspace_to_read, &lsn_range);
4334 5314966 : }
4335 :
4336 : // Visit the layer and plan IOs for it
4337 5314966 : let next_cont_lsn = lsn_range.start;
4338 5314966 : layer_to_read
4339 5314966 : .get_values_reconstruct_data(
4340 5314966 : keyspace_to_read.clone(),
4341 5314966 : lsn_range,
4342 5314966 : reconstruct_state,
4343 5314966 : ctx,
4344 5314966 : )
4345 5314966 : .await?;
4346 :
4347 5314966 : let mut unmapped_keyspace = keyspace_to_read;
4348 5314966 : let cont_lsn = next_cont_lsn;
4349 5314966 :
4350 5314966 : reconstruct_state.on_layer_visited(&layer_to_read);
4351 5314966 :
4352 5314966 : let (keys_done_last_step, keys_with_image_coverage) =
4353 5314966 : reconstruct_state.consume_done_keys();
4354 5314966 : unmapped_keyspace.remove_overlapping_with(&keys_done_last_step);
4355 5314966 : completed_keyspace.merge(&keys_done_last_step);
4356 5314966 : if let Some(keys_with_image_coverage) = keys_with_image_coverage {
4357 181440 : unmapped_keyspace
4358 181440 : .remove_overlapping_with(&KeySpace::single(keys_with_image_coverage.clone()));
4359 181440 : image_covered_keyspace.add_range(keys_with_image_coverage);
4360 5133526 : }
4361 :
4362 : // Query the layer map for the next layers to read.
4363 : //
4364 : // Do not descent any further if the last layer we visited
4365 : // completed all keys in the keyspace it inspected. This is not
4366 : // required for correctness, but avoids visiting extra layers
4367 : // which turns out to be a perf bottleneck in some cases.
4368 5314966 : if !unmapped_keyspace.is_empty() {
4369 1472389 : let guard = timeline.layers.read().await;
4370 1472389 : guard.update_search_fringe(&unmapped_keyspace, cont_lsn, &mut fringe)?;
4371 :
4372 : // It's safe to drop the layer map lock after planning the next round of reads.
4373 : // The fringe keeps readable handles for the layers which are safe to read even
4374 : // if layers were compacted or flushed.
4375 : //
4376 : // The more interesting consideration is: "Why is the read algorithm still correct
4377 : // if the layer map changes while it is operating?". Doing a vectored read on a
4378 : // timeline boils down to pushing an imaginary lsn boundary downwards for each range
4379 : // covered by the read. The layer map tells us how to move the lsn downwards for a
4380 : // range at *a particular point in time*. It is fine for the answer to be different
4381 : // at two different time points.
4382 1472389 : drop(guard);
4383 3842577 : }
4384 : }
4385 :
4386 5130461 : Ok(TimelineVisitOutcome {
4387 5130461 : completed_keyspace,
4388 5130461 : image_covered_keyspace: image_covered_keyspace.consume_keyspace(),
4389 5130461 : })
4390 5130461 : }
4391 :
4392 1351772 : async fn get_ready_ancestor_timeline(
4393 1351772 : &self,
4394 1351772 : ancestor: &Arc<Timeline>,
4395 1351772 : ctx: &RequestContext,
4396 1351772 : ) -> Result<Arc<Timeline>, GetReadyAncestorError> {
4397 1351772 : // It's possible that the ancestor timeline isn't active yet, or
4398 1351772 : // is active but hasn't yet caught up to the branch point. Wait
4399 1351772 : // for it.
4400 1351772 : //
4401 1351772 : // This cannot happen while the pageserver is running normally,
4402 1351772 : // because you cannot create a branch from a point that isn't
4403 1351772 : // present in the pageserver yet. However, we don't wait for the
4404 1351772 : // branch point to be uploaded to cloud storage before creating
4405 1351772 : // a branch. I.e., the branch LSN need not be remote consistent
4406 1351772 : // for the branching operation to succeed.
4407 1351772 : //
4408 1351772 : // Hence, if we try to load a tenant in such a state where
4409 1351772 : // 1. the existence of the branch was persisted (in IndexPart and/or locally)
4410 1351772 : // 2. but the ancestor state is behind branch_lsn because it was not yet persisted
4411 1351772 : // then we will need to wait for the ancestor timeline to
4412 1351772 : // re-stream WAL up to branch_lsn before we access it.
4413 1351772 : //
4414 1351772 : // How can a tenant get in such a state?
4415 1351772 : // - ungraceful pageserver process exit
4416 1351772 : // - detach+attach => this is a bug, https://github.com/neondatabase/neon/issues/4219
4417 1351772 : //
4418 1351772 : // NB: this could be avoided by requiring
4419 1351772 : // branch_lsn >= remote_consistent_lsn
4420 1351772 : // during branch creation.
4421 1351772 : match ancestor.wait_to_become_active(ctx).await {
4422 1351760 : Ok(()) => {}
4423 : Err(TimelineState::Stopping) => {
4424 : // If an ancestor is stopping, it means the tenant is stopping: handle this the same as if this timeline was stopping.
4425 0 : return Err(GetReadyAncestorError::Cancelled);
4426 : }
4427 12 : Err(state) => {
4428 12 : return Err(GetReadyAncestorError::BadState {
4429 12 : timeline_id: ancestor.timeline_id,
4430 12 : state,
4431 12 : });
4432 : }
4433 : }
4434 1351760 : ancestor
4435 1351760 : .wait_lsn(
4436 1351760 : self.ancestor_lsn,
4437 1351760 : WaitLsnWaiter::Timeline(self),
4438 1351760 : WaitLsnTimeout::Default,
4439 1351760 : ctx,
4440 1351760 : )
4441 1351760 : .await
4442 1351760 : .map_err(|e| match e {
4443 0 : e @ WaitLsnError::Timeout(_) => GetReadyAncestorError::AncestorLsnTimeout(e),
4444 0 : WaitLsnError::Shutdown => GetReadyAncestorError::Cancelled,
4445 0 : WaitLsnError::BadState(state) => GetReadyAncestorError::BadState {
4446 0 : timeline_id: ancestor.timeline_id,
4447 0 : state,
4448 0 : },
4449 1351760 : })?;
4450 :
4451 1351760 : Ok(ancestor.clone())
4452 1351772 : }
4453 :
4454 1782936 : pub(crate) fn get_shard_identity(&self) -> &ShardIdentity {
4455 1782936 : &self.shard_identity
4456 1782936 : }
4457 :
4458 : #[inline(always)]
4459 0 : pub(crate) fn shard_timeline_id(&self) -> ShardTimelineId {
4460 0 : ShardTimelineId {
4461 0 : shard_index: ShardIndex {
4462 0 : shard_number: self.shard_identity.number,
4463 0 : shard_count: self.shard_identity.count,
4464 0 : },
4465 0 : timeline_id: self.timeline_id,
4466 0 : }
4467 0 : }
4468 :
4469 : /// Returns a non-frozen open in-memory layer for ingestion.
4470 : ///
4471 : /// Takes a witness of timeline writer state lock being held, because it makes no sense to call
4472 : /// this function without holding the mutex.
4473 7848 : async fn get_layer_for_write(
4474 7848 : &self,
4475 7848 : lsn: Lsn,
4476 7848 : _guard: &tokio::sync::MutexGuard<'_, Option<TimelineWriterState>>,
4477 7848 : ctx: &RequestContext,
4478 7848 : ) -> anyhow::Result<Arc<InMemoryLayer>> {
4479 7848 : let mut guard = self.layers.write().await;
4480 :
4481 7848 : let last_record_lsn = self.get_last_record_lsn();
4482 7848 : ensure!(
4483 7848 : lsn > last_record_lsn,
4484 0 : "cannot modify relation after advancing last_record_lsn (incoming_lsn={}, last_record_lsn={})",
4485 : lsn,
4486 : last_record_lsn,
4487 : );
4488 :
4489 7848 : let layer = guard
4490 7848 : .open_mut()?
4491 7848 : .get_layer_for_write(
4492 7848 : lsn,
4493 7848 : self.conf,
4494 7848 : self.timeline_id,
4495 7848 : self.tenant_shard_id,
4496 7848 : &self.gate,
4497 7848 : &self.cancel,
4498 7848 : ctx,
4499 7848 : )
4500 7848 : .await?;
4501 7848 : Ok(layer)
4502 7848 : }
4503 :
4504 31674624 : pub(crate) fn finish_write(&self, new_lsn: Lsn) {
4505 31674624 : assert!(new_lsn.is_aligned());
4506 :
4507 31674624 : self.metrics.last_record_lsn_gauge.set(new_lsn.0 as i64);
4508 31674624 : self.last_record_lsn.advance(new_lsn);
4509 31674624 : }
4510 :
4511 : /// Freeze any existing open in-memory layer and unconditionally notify the flush loop.
4512 : ///
4513 : /// Unconditional flush loop notification is given because in sharded cases we will want to
4514 : /// leave an Lsn gap. Unsharded tenants do not have Lsn gaps.
4515 7272 : async fn freeze_inmem_layer_at(
4516 7272 : &self,
4517 7272 : at: Lsn,
4518 7272 : write_lock: &mut tokio::sync::MutexGuard<'_, Option<TimelineWriterState>>,
4519 7272 : ) -> Result<u64, FlushLayerError> {
4520 7272 : let frozen = {
4521 7272 : let mut guard = self.layers.write().await;
4522 7272 : guard
4523 7272 : .open_mut()?
4524 7272 : .try_freeze_in_memory_layer(at, &self.last_freeze_at, write_lock, &self.metrics)
4525 7272 : .await
4526 : };
4527 :
4528 7272 : if frozen {
4529 7104 : let now = Instant::now();
4530 7104 : *(self.last_freeze_ts.write().unwrap()) = now;
4531 7104 : }
4532 :
4533 : // Increment the flush cycle counter and wake up the flush task.
4534 : // Remember the new value, so that when we listen for the flush
4535 : // to finish, we know when the flush that we initiated has
4536 : // finished, instead of some other flush that was started earlier.
4537 7272 : let mut my_flush_request = 0;
4538 7272 :
4539 7272 : let flush_loop_state = { *self.flush_loop_state.lock().unwrap() };
4540 7272 : if !matches!(flush_loop_state, FlushLoopState::Running { .. }) {
4541 0 : return Err(FlushLayerError::NotRunning(flush_loop_state));
4542 7272 : }
4543 7272 :
4544 7272 : self.layer_flush_start_tx.send_modify(|(counter, lsn)| {
4545 7272 : my_flush_request = *counter + 1;
4546 7272 : *counter = my_flush_request;
4547 7272 : *lsn = std::cmp::max(at, *lsn);
4548 7272 : });
4549 7272 :
4550 7272 : assert_ne!(my_flush_request, 0);
4551 :
4552 7272 : Ok(my_flush_request)
4553 7272 : }
4554 :
4555 : /// Layer flusher task's main loop.
4556 2748 : async fn flush_loop(
4557 2748 : self: &Arc<Self>,
4558 2748 : mut layer_flush_start_rx: tokio::sync::watch::Receiver<(u64, Lsn)>,
4559 2748 : ctx: &RequestContext,
4560 2748 : ) {
4561 : // Subscribe to L0 delta layer updates, for compaction backpressure.
4562 2748 : let mut watch_l0 = match self.layers.read().await.layer_map() {
4563 2748 : Ok(lm) => lm.watch_level0_deltas(),
4564 0 : Err(Shutdown) => return,
4565 : };
4566 :
4567 2748 : info!("started flush loop");
4568 : loop {
4569 9910 : tokio::select! {
4570 9910 : _ = self.cancel.cancelled() => {
4571 60 : info!("shutting down layer flush task due to Timeline::cancel");
4572 60 : break;
4573 : },
4574 9910 : _ = layer_flush_start_rx.changed() => {}
4575 7162 : }
4576 7162 : trace!("waking up");
4577 7162 : let (flush_counter, frozen_to_lsn) = *layer_flush_start_rx.borrow();
4578 7162 :
4579 7162 : // The highest LSN to which we flushed in the loop over frozen layers
4580 7162 : let mut flushed_to_lsn = Lsn(0);
4581 :
4582 7162 : let result = loop {
4583 14266 : if self.cancel.is_cancelled() {
4584 0 : info!("dropping out of flush loop for timeline shutdown");
4585 : // Note: we do not bother transmitting into [`layer_flush_done_tx`], because
4586 : // anyone waiting on that will respect self.cancel as well: they will stop
4587 : // waiting at the same time we as drop out of this loop.
4588 0 : return;
4589 14266 : }
4590 14266 :
4591 14266 : // Break to notify potential waiters as soon as we've flushed the requested LSN. If
4592 14266 : // more requests have arrived in the meanwhile, we'll resume flushing afterwards.
4593 14266 : if flushed_to_lsn >= frozen_to_lsn {
4594 6994 : break Ok(());
4595 7272 : }
4596 :
4597 : // Fetch the next layer to flush, if any.
4598 7272 : let (layer, l0_count, frozen_count, frozen_size) = {
4599 7272 : let layers = self.layers.read().await;
4600 7272 : let Ok(lm) = layers.layer_map() else {
4601 0 : info!("dropping out of flush loop for timeline shutdown");
4602 0 : return;
4603 : };
4604 7272 : let l0_count = lm.level0_deltas().len();
4605 7272 : let frozen_count = lm.frozen_layers.len();
4606 7272 : let frozen_size: u64 = lm
4607 7272 : .frozen_layers
4608 7272 : .iter()
4609 7272 : .map(|l| l.estimated_in_mem_size())
4610 7272 : .sum();
4611 7272 : let layer = lm.frozen_layers.front().cloned();
4612 7272 : (layer, l0_count, frozen_count, frozen_size)
4613 7272 : // drop 'layers' lock
4614 7272 : };
4615 7272 : let Some(layer) = layer else {
4616 168 : break Ok(());
4617 : };
4618 :
4619 : // Stall flushes to backpressure if compaction can't keep up. This is propagated up
4620 : // to WAL ingestion by having ephemeral layer rolls wait for flushes.
4621 7104 : if let Some(stall_threshold) = self.get_l0_flush_stall_threshold() {
4622 0 : if l0_count >= stall_threshold {
4623 0 : warn!(
4624 0 : "stalling layer flushes for compaction backpressure at {l0_count} \
4625 0 : L0 layers ({frozen_count} frozen layers with {frozen_size} bytes)"
4626 : );
4627 0 : let stall_timer = self
4628 0 : .metrics
4629 0 : .flush_delay_histo
4630 0 : .start_timer()
4631 0 : .record_on_drop();
4632 0 : tokio::select! {
4633 0 : result = watch_l0.wait_for(|l0| *l0 < stall_threshold) => {
4634 0 : if let Ok(l0) = result.as_deref() {
4635 0 : let delay = stall_timer.elapsed().as_secs_f64();
4636 0 : info!("resuming layer flushes at {l0} L0 layers after {delay:.3}s");
4637 0 : }
4638 : },
4639 0 : _ = self.cancel.cancelled() => {},
4640 : }
4641 0 : continue; // check again
4642 0 : }
4643 7104 : }
4644 :
4645 : // Flush the layer.
4646 7104 : let flush_timer = self.metrics.flush_time_histo.start_timer();
4647 7104 : match self.flush_frozen_layer(layer, ctx).await {
4648 7104 : Ok(layer_lsn) => flushed_to_lsn = max(flushed_to_lsn, layer_lsn),
4649 : Err(FlushLayerError::Cancelled) => {
4650 0 : info!("dropping out of flush loop for timeline shutdown");
4651 0 : return;
4652 : }
4653 0 : err @ Err(
4654 0 : FlushLayerError::NotRunning(_)
4655 0 : | FlushLayerError::Other(_)
4656 0 : | FlushLayerError::CreateImageLayersError(_),
4657 0 : ) => {
4658 0 : error!("could not flush frozen layer: {err:?}");
4659 0 : break err.map(|_| ());
4660 : }
4661 : }
4662 7104 : let flush_duration = flush_timer.stop_and_record();
4663 7104 :
4664 7104 : // Notify the tenant compaction loop if L0 compaction is needed.
4665 7104 : let l0_count = *watch_l0.borrow();
4666 7104 : if l0_count >= self.get_compaction_threshold() {
4667 2868 : self.l0_compaction_trigger.notify_one();
4668 4236 : }
4669 :
4670 : // Delay the next flush to backpressure if compaction can't keep up. We delay by the
4671 : // flush duration such that the flush takes 2x as long. This is propagated up to WAL
4672 : // ingestion by having ephemeral layer rolls wait for flushes.
4673 7104 : if let Some(delay_threshold) = self.get_l0_flush_delay_threshold() {
4674 12 : if l0_count >= delay_threshold {
4675 0 : let delay = flush_duration.as_secs_f64();
4676 0 : info!(
4677 0 : "delaying layer flush by {delay:.3}s for compaction backpressure at \
4678 0 : {l0_count} L0 layers ({frozen_count} frozen layers with {frozen_size} bytes)"
4679 : );
4680 0 : let _delay_timer = self
4681 0 : .metrics
4682 0 : .flush_delay_histo
4683 0 : .start_timer()
4684 0 : .record_on_drop();
4685 0 : tokio::select! {
4686 0 : _ = tokio::time::sleep(flush_duration) => {},
4687 0 : _ = watch_l0.wait_for(|l0| *l0 < delay_threshold) => {},
4688 0 : _ = self.cancel.cancelled() => {},
4689 : }
4690 12 : }
4691 7092 : }
4692 : };
4693 :
4694 : // Unsharded tenants should never advance their LSN beyond the end of the
4695 : // highest layer they write: such gaps between layer data and the frozen LSN
4696 : // are only legal on sharded tenants.
4697 7162 : debug_assert!(
4698 7162 : self.shard_identity.count.count() > 1
4699 7162 : || flushed_to_lsn >= frozen_to_lsn
4700 168 : || !flushed_to_lsn.is_valid()
4701 : );
4702 :
4703 7162 : if flushed_to_lsn < frozen_to_lsn && self.shard_identity.count.count() > 1 {
4704 : // If our layer flushes didn't carry disk_consistent_lsn up to the `to_lsn` advertised
4705 : // to us via layer_flush_start_rx, then advance it here.
4706 : //
4707 : // This path is only taken for tenants with multiple shards: single sharded tenants should
4708 : // never encounter a gap in the wal.
4709 0 : let old_disk_consistent_lsn = self.disk_consistent_lsn.load();
4710 0 : tracing::debug!(
4711 0 : "Advancing disk_consistent_lsn across layer gap {old_disk_consistent_lsn}->{frozen_to_lsn}"
4712 : );
4713 0 : if self.set_disk_consistent_lsn(frozen_to_lsn) {
4714 0 : if let Err(e) = self.schedule_uploads(frozen_to_lsn, vec![]) {
4715 0 : tracing::warn!(
4716 0 : "Failed to schedule metadata upload after updating disk_consistent_lsn: {e}"
4717 : );
4718 0 : }
4719 0 : }
4720 7162 : }
4721 :
4722 : // Notify any listeners that we're done
4723 7162 : let _ = self
4724 7162 : .layer_flush_done_tx
4725 7162 : .send_replace((flush_counter, result));
4726 : }
4727 60 : }
4728 :
4729 : /// Waits any flush request created by [`Self::freeze_inmem_layer_at`] to complete.
4730 6792 : async fn wait_flush_completion(&self, request: u64) -> Result<(), FlushLayerError> {
4731 6792 : let mut rx = self.layer_flush_done_tx.subscribe();
4732 : loop {
4733 : {
4734 13783 : let (last_result_counter, last_result) = &*rx.borrow();
4735 13783 : if *last_result_counter >= request {
4736 6792 : if let Err(err) = last_result {
4737 : // We already logged the original error in
4738 : // flush_loop. We cannot propagate it to the caller
4739 : // here, because it might not be Cloneable
4740 0 : return Err(err.clone());
4741 : } else {
4742 6792 : return Ok(());
4743 : }
4744 6991 : }
4745 6991 : }
4746 6991 : trace!("waiting for flush to complete");
4747 6991 : tokio::select! {
4748 6991 : rx_e = rx.changed() => {
4749 6991 : rx_e.map_err(|_| FlushLayerError::NotRunning(*self.flush_loop_state.lock().unwrap()))?;
4750 : },
4751 : // Cancellation safety: we are not leaving an I/O in-flight for the flush, we're just ignoring
4752 : // the notification from [`flush_loop`] that it completed.
4753 6991 : _ = self.cancel.cancelled() => {
4754 0 : tracing::info!("Cancelled layer flush due on timeline shutdown");
4755 0 : return Ok(())
4756 : }
4757 : };
4758 6991 : trace!("done")
4759 : }
4760 6792 : }
4761 :
4762 : /// Flush one frozen in-memory layer to disk, as a new delta layer.
4763 : ///
4764 : /// Return value is the last lsn (inclusive) of the layer that was frozen.
4765 : #[instrument(skip_all, fields(layer=%frozen_layer))]
4766 : async fn flush_frozen_layer(
4767 : self: &Arc<Self>,
4768 : frozen_layer: Arc<InMemoryLayer>,
4769 : ctx: &RequestContext,
4770 : ) -> Result<Lsn, FlushLayerError> {
4771 : debug_assert_current_span_has_tenant_and_timeline_id();
4772 :
4773 : // As a special case, when we have just imported an image into the repository,
4774 : // instead of writing out a L0 delta layer, we directly write out image layer
4775 : // files instead. This is possible as long as *all* the data imported into the
4776 : // repository have the same LSN.
4777 : let lsn_range = frozen_layer.get_lsn_range();
4778 :
4779 : // Whether to directly create image layers for this flush, or flush them as delta layers
4780 : let create_image_layer =
4781 : lsn_range.start == self.initdb_lsn && lsn_range.end == Lsn(self.initdb_lsn.0 + 1);
4782 :
4783 : #[cfg(test)]
4784 : {
4785 : match &mut *self.flush_loop_state.lock().unwrap() {
4786 : FlushLoopState::NotStarted | FlushLoopState::Exited => {
4787 : panic!("flush loop not running")
4788 : }
4789 : FlushLoopState::Running {
4790 : expect_initdb_optimization,
4791 : initdb_optimization_count,
4792 : ..
4793 : } => {
4794 : if create_image_layer {
4795 : *initdb_optimization_count += 1;
4796 : } else {
4797 : assert!(!*expect_initdb_optimization, "expected initdb optimization");
4798 : }
4799 : }
4800 : }
4801 : }
4802 :
4803 : let (layers_to_upload, delta_layer_to_add) = if create_image_layer {
4804 : // Note: The 'ctx' in use here has DownloadBehavior::Error. We should not
4805 : // require downloading anything during initial import.
4806 : let ((rel_partition, metadata_partition), _lsn) = self
4807 : .repartition(
4808 : self.initdb_lsn,
4809 : self.get_compaction_target_size(),
4810 : EnumSet::empty(),
4811 : ctx,
4812 : )
4813 : .await
4814 0 : .map_err(|e| FlushLayerError::from_anyhow(self, e.into()))?;
4815 :
4816 : if self.cancel.is_cancelled() {
4817 : return Err(FlushLayerError::Cancelled);
4818 : }
4819 :
4820 : // Ensure that we have a single call to `create_image_layers` with a combined dense keyspace.
4821 : // So that the key ranges don't overlap.
4822 : let mut partitions = KeyPartitioning::default();
4823 : partitions.parts.extend(rel_partition.parts);
4824 : if !metadata_partition.parts.is_empty() {
4825 : assert_eq!(
4826 : metadata_partition.parts.len(),
4827 : 1,
4828 : "currently sparse keyspace should only contain a single metadata keyspace"
4829 : );
4830 : // Safety: create_image_layers treat sparse keyspaces differently that it does not scan
4831 : // every single key within the keyspace, and therefore, it's safe to force converting it
4832 : // into a dense keyspace before calling this function.
4833 : partitions
4834 : .parts
4835 : .extend(metadata_partition.into_dense().parts);
4836 : }
4837 :
4838 : let mut layers_to_upload = Vec::new();
4839 : let (generated_image_layers, is_complete) = self
4840 : .create_image_layers(
4841 : &partitions,
4842 : self.initdb_lsn,
4843 : ImageLayerCreationMode::Initial,
4844 : ctx,
4845 : LastImageLayerCreationStatus::Initial,
4846 : false, // don't yield for L0, we're flushing L0
4847 : )
4848 : .await?;
4849 : debug_assert!(
4850 : matches!(is_complete, LastImageLayerCreationStatus::Complete),
4851 : "init image generation mode must fully cover the keyspace"
4852 : );
4853 : layers_to_upload.extend(generated_image_layers);
4854 :
4855 : (layers_to_upload, None)
4856 : } else {
4857 : // Normal case, write out a L0 delta layer file.
4858 : // `create_delta_layer` will not modify the layer map.
4859 : // We will remove frozen layer and add delta layer in one atomic operation later.
4860 : let Some(layer) = self
4861 : .create_delta_layer(&frozen_layer, None, ctx)
4862 : .await
4863 0 : .map_err(|e| FlushLayerError::from_anyhow(self, e))?
4864 : else {
4865 : panic!("delta layer cannot be empty if no filter is applied");
4866 : };
4867 : (
4868 : // FIXME: even though we have a single image and single delta layer assumption
4869 : // we push them to vec
4870 : vec![layer.clone()],
4871 : Some(layer),
4872 : )
4873 : };
4874 :
4875 : pausable_failpoint!("flush-layer-cancel-after-writing-layer-out-pausable");
4876 :
4877 : if self.cancel.is_cancelled() {
4878 : return Err(FlushLayerError::Cancelled);
4879 : }
4880 :
4881 : let disk_consistent_lsn = Lsn(lsn_range.end.0 - 1);
4882 :
4883 : // The new on-disk layers are now in the layer map. We can remove the
4884 : // in-memory layer from the map now. The flushed layer is stored in
4885 : // the mapping in `create_delta_layer`.
4886 : {
4887 : let mut guard = self.layers.write().await;
4888 :
4889 : guard.open_mut()?.finish_flush_l0_layer(
4890 : delta_layer_to_add.as_ref(),
4891 : &frozen_layer,
4892 : &self.metrics,
4893 : );
4894 :
4895 : if self.set_disk_consistent_lsn(disk_consistent_lsn) {
4896 : // Schedule remote uploads that will reflect our new disk_consistent_lsn
4897 : self.schedule_uploads(disk_consistent_lsn, layers_to_upload)
4898 0 : .map_err(|e| FlushLayerError::from_anyhow(self, e))?;
4899 : }
4900 : // release lock on 'layers'
4901 : };
4902 :
4903 : // FIXME: between create_delta_layer and the scheduling of the upload in `update_metadata_file`,
4904 : // a compaction can delete the file and then it won't be available for uploads any more.
4905 : // We still schedule the upload, resulting in an error, but ideally we'd somehow avoid this
4906 : // race situation.
4907 : // See https://github.com/neondatabase/neon/issues/4526
4908 : pausable_failpoint!("flush-frozen-pausable");
4909 :
4910 : // This failpoint is used by another test case `test_pageserver_recovery`.
4911 : fail_point!("flush-frozen-exit");
4912 :
4913 : Ok(Lsn(lsn_range.end.0 - 1))
4914 : }
4915 :
4916 : /// Return true if the value changed
4917 : ///
4918 : /// This function must only be used from the layer flush task.
4919 7104 : fn set_disk_consistent_lsn(&self, new_value: Lsn) -> bool {
4920 7104 : let old_value = self.disk_consistent_lsn.fetch_max(new_value);
4921 7104 : assert!(
4922 7104 : new_value >= old_value,
4923 0 : "disk_consistent_lsn must be growing monotonously at runtime; current {old_value}, offered {new_value}"
4924 : );
4925 :
4926 7104 : self.metrics
4927 7104 : .disk_consistent_lsn_gauge
4928 7104 : .set(new_value.0 as i64);
4929 7104 : new_value != old_value
4930 7104 : }
4931 :
4932 : /// Update metadata file
4933 7404 : fn schedule_uploads(
4934 7404 : &self,
4935 7404 : disk_consistent_lsn: Lsn,
4936 7404 : layers_to_upload: impl IntoIterator<Item = ResidentLayer>,
4937 7404 : ) -> anyhow::Result<()> {
4938 7404 : // We can only save a valid 'prev_record_lsn' value on disk if we
4939 7404 : // flushed *all* in-memory changes to disk. We only track
4940 7404 : // 'prev_record_lsn' in memory for the latest processed record, so we
4941 7404 : // don't remember what the correct value that corresponds to some old
4942 7404 : // LSN is. But if we flush everything, then the value corresponding
4943 7404 : // current 'last_record_lsn' is correct and we can store it on disk.
4944 7404 : let RecordLsn {
4945 7404 : last: last_record_lsn,
4946 7404 : prev: prev_record_lsn,
4947 7404 : } = self.last_record_lsn.load();
4948 7404 : let ondisk_prev_record_lsn = if disk_consistent_lsn == last_record_lsn {
4949 6643 : Some(prev_record_lsn)
4950 : } else {
4951 761 : None
4952 : };
4953 :
4954 7404 : let update = crate::tenant::metadata::MetadataUpdate::new(
4955 7404 : disk_consistent_lsn,
4956 7404 : ondisk_prev_record_lsn,
4957 7404 : *self.applied_gc_cutoff_lsn.read(),
4958 7404 : );
4959 7404 :
4960 7404 : fail_point!("checkpoint-before-saving-metadata", |x| bail!(
4961 0 : "{}",
4962 0 : x.unwrap()
4963 7404 : ));
4964 :
4965 14580 : for layer in layers_to_upload {
4966 7176 : self.remote_client.schedule_layer_file_upload(layer)?;
4967 : }
4968 7404 : self.remote_client
4969 7404 : .schedule_index_upload_for_metadata_update(&update)?;
4970 :
4971 7404 : Ok(())
4972 7404 : }
4973 :
4974 0 : pub(crate) async fn preserve_initdb_archive(&self) -> anyhow::Result<()> {
4975 0 : self.remote_client
4976 0 : .preserve_initdb_archive(
4977 0 : &self.tenant_shard_id.tenant_id,
4978 0 : &self.timeline_id,
4979 0 : &self.cancel,
4980 0 : )
4981 0 : .await
4982 0 : }
4983 :
4984 : // Write out the given frozen in-memory layer as a new L0 delta file. This L0 file will not be tracked
4985 : // in layer map immediately. The caller is responsible to put it into the layer map.
4986 5808 : async fn create_delta_layer(
4987 5808 : self: &Arc<Self>,
4988 5808 : frozen_layer: &Arc<InMemoryLayer>,
4989 5808 : key_range: Option<Range<Key>>,
4990 5808 : ctx: &RequestContext,
4991 5808 : ) -> anyhow::Result<Option<ResidentLayer>> {
4992 5808 : let self_clone = Arc::clone(self);
4993 5808 : let frozen_layer = Arc::clone(frozen_layer);
4994 5808 : let ctx = ctx.attached_child();
4995 5808 : let work = async move {
4996 5808 : let Some((desc, path)) = frozen_layer
4997 5808 : .write_to_disk(
4998 5808 : &ctx,
4999 5808 : key_range,
5000 5808 : self_clone.l0_flush_global_state.inner(),
5001 5808 : &self_clone.gate,
5002 5808 : self_clone.cancel.clone(),
5003 5808 : )
5004 5808 : .await?
5005 : else {
5006 0 : return Ok(None);
5007 : };
5008 5808 : let new_delta = Layer::finish_creating(self_clone.conf, &self_clone, desc, &path)?;
5009 :
5010 : // The write_to_disk() above calls writer.finish() which already did the fsync of the inodes.
5011 : // We just need to fsync the directory in which these inodes are linked,
5012 : // which we know to be the timeline directory.
5013 : //
5014 : // We use fatal_err() below because the after write_to_disk returns with success,
5015 : // the in-memory state of the filesystem already has the layer file in its final place,
5016 : // and subsequent pageserver code could think it's durable while it really isn't.
5017 5808 : let timeline_dir = VirtualFile::open(
5018 5808 : &self_clone
5019 5808 : .conf
5020 5808 : .timeline_path(&self_clone.tenant_shard_id, &self_clone.timeline_id),
5021 5808 : &ctx,
5022 5808 : )
5023 5808 : .await
5024 5808 : .fatal_err("VirtualFile::open for timeline dir fsync");
5025 5808 : timeline_dir
5026 5808 : .sync_all()
5027 5808 : .await
5028 5808 : .fatal_err("VirtualFile::sync_all timeline dir");
5029 5808 : anyhow::Ok(Some(new_delta))
5030 5808 : };
5031 : // Before tokio-epoll-uring, we ran write_to_disk & the sync_all inside spawn_blocking.
5032 : // Preserve that behavior to maintain the same behavior for `virtual_file_io_engine=std-fs`.
5033 : use crate::virtual_file::io_engine::IoEngine;
5034 5808 : match crate::virtual_file::io_engine::get() {
5035 0 : IoEngine::NotSet => panic!("io engine not set"),
5036 : IoEngine::StdFs => {
5037 2904 : let span = tracing::info_span!("blocking");
5038 2904 : tokio::task::spawn_blocking({
5039 2904 : move || Handle::current().block_on(work.instrument(span))
5040 2904 : })
5041 2904 : .await
5042 2904 : .context("spawn_blocking")
5043 2904 : .and_then(|x| x)
5044 : }
5045 : #[cfg(target_os = "linux")]
5046 2904 : IoEngine::TokioEpollUring => work.await,
5047 : }
5048 5808 : }
5049 :
5050 3480 : async fn repartition(
5051 3480 : &self,
5052 3480 : lsn: Lsn,
5053 3480 : partition_size: u64,
5054 3480 : flags: EnumSet<CompactFlags>,
5055 3480 : ctx: &RequestContext,
5056 3480 : ) -> Result<((KeyPartitioning, SparseKeyPartitioning), Lsn), CompactionError> {
5057 3480 : let Ok(mut guard) = self.partitioning.try_write_guard() else {
5058 : // NB: there are two callers, one is the compaction task, of which there is only one per struct Tenant and hence Timeline.
5059 : // The other is the initdb optimization in flush_frozen_layer, used by `boostrap_timeline`, which runs before `.activate()`
5060 : // and hence before the compaction task starts.
5061 0 : return Err(CompactionError::Other(anyhow!(
5062 0 : "repartition() called concurrently"
5063 0 : )));
5064 : };
5065 3480 : let ((dense_partition, sparse_partition), partition_lsn) = &*guard.read();
5066 3480 : if lsn < *partition_lsn {
5067 0 : return Err(CompactionError::Other(anyhow!(
5068 0 : "repartition() called with LSN going backwards, this should not happen"
5069 0 : )));
5070 3480 : }
5071 3480 :
5072 3480 : let distance = lsn.0 - partition_lsn.0;
5073 3480 : if *partition_lsn != Lsn(0)
5074 1572 : && distance <= self.repartition_threshold
5075 1572 : && !flags.contains(CompactFlags::ForceRepartition)
5076 : {
5077 1488 : debug!(
5078 : distance,
5079 : threshold = self.repartition_threshold,
5080 0 : "no repartitioning needed"
5081 : );
5082 1488 : return Ok((
5083 1488 : (dense_partition.clone(), sparse_partition.clone()),
5084 1488 : *partition_lsn,
5085 1488 : ));
5086 1992 : }
5087 :
5088 1992 : let (dense_ks, sparse_ks) = self.collect_keyspace(lsn, ctx).await?;
5089 1992 : let dense_partitioning = dense_ks.partition(&self.shard_identity, partition_size);
5090 1992 : let sparse_partitioning = SparseKeyPartitioning {
5091 1992 : parts: vec![sparse_ks],
5092 1992 : }; // no partitioning for metadata keys for now
5093 1992 : let result = ((dense_partitioning, sparse_partitioning), lsn);
5094 1992 : guard.write(result.clone());
5095 1992 : Ok(result)
5096 3480 : }
5097 :
5098 : // Is it time to create a new image layer for the given partition? True if we want to generate.
5099 84 : async fn time_for_new_image_layer(&self, partition: &KeySpace, lsn: Lsn) -> bool {
5100 84 : let threshold = self.get_image_creation_threshold();
5101 :
5102 84 : let guard = self.layers.read().await;
5103 84 : let Ok(layers) = guard.layer_map() else {
5104 0 : return false;
5105 : };
5106 :
5107 84 : let mut max_deltas = 0;
5108 168 : for part_range in &partition.ranges {
5109 84 : let image_coverage = layers.image_coverage(part_range, lsn);
5110 168 : for (img_range, last_img) in image_coverage {
5111 84 : let img_lsn = if let Some(last_img) = last_img {
5112 0 : last_img.get_lsn_range().end
5113 : } else {
5114 84 : Lsn(0)
5115 : };
5116 : // Let's consider an example:
5117 : //
5118 : // delta layer with LSN range 71-81
5119 : // delta layer with LSN range 81-91
5120 : // delta layer with LSN range 91-101
5121 : // image layer at LSN 100
5122 : //
5123 : // If 'lsn' is still 100, i.e. no new WAL has been processed since the last image layer,
5124 : // there's no need to create a new one. We check this case explicitly, to avoid passing
5125 : // a bogus range to count_deltas below, with start > end. It's even possible that there
5126 : // are some delta layers *later* than current 'lsn', if more WAL was processed and flushed
5127 : // after we read last_record_lsn, which is passed here in the 'lsn' argument.
5128 84 : if img_lsn < lsn {
5129 84 : let num_deltas =
5130 84 : layers.count_deltas(&img_range, &(img_lsn..lsn), Some(threshold));
5131 84 :
5132 84 : max_deltas = max_deltas.max(num_deltas);
5133 84 : if num_deltas >= threshold {
5134 0 : debug!(
5135 0 : "key range {}-{}, has {} deltas on this timeline in LSN range {}..{}",
5136 : img_range.start, img_range.end, num_deltas, img_lsn, lsn
5137 : );
5138 0 : return true;
5139 84 : }
5140 0 : }
5141 : }
5142 : }
5143 :
5144 84 : debug!(
5145 : max_deltas,
5146 0 : "none of the partitioned ranges had >= {threshold} deltas"
5147 : );
5148 84 : false
5149 84 : }
5150 :
5151 : /// Create image layers for Postgres data. Assumes the caller passes a partition that is not too large,
5152 : /// so that at most one image layer will be produced from this function.
5153 : #[allow(clippy::too_many_arguments)]
5154 1452 : async fn create_image_layer_for_rel_blocks(
5155 1452 : self: &Arc<Self>,
5156 1452 : partition: &KeySpace,
5157 1452 : mut image_layer_writer: ImageLayerWriter,
5158 1452 : lsn: Lsn,
5159 1452 : ctx: &RequestContext,
5160 1452 : img_range: Range<Key>,
5161 1452 : io_concurrency: IoConcurrency,
5162 1452 : ) -> Result<ImageLayerCreationOutcome, CreateImageLayersError> {
5163 1452 : let mut wrote_keys = false;
5164 1452 :
5165 1452 : let mut key_request_accum = KeySpaceAccum::new();
5166 9636 : for range in &partition.ranges {
5167 8184 : let mut key = range.start;
5168 17736 : while key < range.end {
5169 : // Decide whether to retain this key: usually we do, but sharded tenants may
5170 : // need to drop keys that don't belong to them. If we retain the key, add it
5171 : // to `key_request_accum` for later issuing a vectored get
5172 9552 : if self.shard_identity.is_key_disposable(&key) {
5173 0 : debug!(
5174 0 : "Dropping key {} during compaction (it belongs on shard {:?})",
5175 0 : key,
5176 0 : self.shard_identity.get_shard_number(&key)
5177 : );
5178 9552 : } else {
5179 9552 : key_request_accum.add_key(key);
5180 9552 : }
5181 :
5182 9552 : let last_key_in_range = key.next() == range.end;
5183 9552 : key = key.next();
5184 9552 :
5185 9552 : // Maybe flush `key_rest_accum`
5186 9552 : if key_request_accum.raw_size() >= Timeline::MAX_GET_VECTORED_KEYS
5187 9552 : || (last_key_in_range && key_request_accum.raw_size() > 0)
5188 : {
5189 8184 : let query =
5190 8184 : VersionedKeySpaceQuery::uniform(key_request_accum.consume_keyspace(), lsn);
5191 :
5192 8184 : let results = self
5193 8184 : .get_vectored(query, io_concurrency.clone(), ctx)
5194 8184 : .await?;
5195 :
5196 8184 : if self.cancel.is_cancelled() {
5197 0 : return Err(CreateImageLayersError::Cancelled);
5198 8184 : }
5199 :
5200 17736 : for (img_key, img) in results {
5201 9552 : let img = match img {
5202 9552 : Ok(img) => img,
5203 0 : Err(err) => {
5204 0 : // If we fail to reconstruct a VM or FSM page, we can zero the
5205 0 : // page without losing any actual user data. That seems better
5206 0 : // than failing repeatedly and getting stuck.
5207 0 : //
5208 0 : // We had a bug at one point, where we truncated the FSM and VM
5209 0 : // in the pageserver, but the Postgres didn't know about that
5210 0 : // and continued to generate incremental WAL records for pages
5211 0 : // that didn't exist in the pageserver. Trying to replay those
5212 0 : // WAL records failed to find the previous image of the page.
5213 0 : // This special case allows us to recover from that situation.
5214 0 : // See https://github.com/neondatabase/neon/issues/2601.
5215 0 : //
5216 0 : // Unfortunately we cannot do this for the main fork, or for
5217 0 : // any metadata keys, keys, as that would lead to actual data
5218 0 : // loss.
5219 0 : if img_key.is_rel_fsm_block_key() || img_key.is_rel_vm_block_key() {
5220 0 : warn!(
5221 0 : "could not reconstruct FSM or VM key {img_key}, filling with zeros: {err:?}"
5222 : );
5223 0 : ZERO_PAGE.clone()
5224 : } else {
5225 0 : return Err(CreateImageLayersError::from(err));
5226 : }
5227 : }
5228 : };
5229 :
5230 : // Write all the keys we just read into our new image layer.
5231 9552 : image_layer_writer.put_image(img_key, img, ctx).await?;
5232 9552 : wrote_keys = true;
5233 : }
5234 1368 : }
5235 : }
5236 : }
5237 :
5238 1452 : if wrote_keys {
5239 : // Normal path: we have written some data into the new image layer for this
5240 : // partition, so flush it to disk.
5241 1452 : info!(
5242 0 : "produced image layer for rel {}",
5243 0 : ImageLayerName {
5244 0 : key_range: img_range.clone(),
5245 0 : lsn
5246 0 : },
5247 : );
5248 1452 : Ok(ImageLayerCreationOutcome::Generated {
5249 1452 : unfinished_image_layer: image_layer_writer,
5250 1452 : })
5251 : } else {
5252 0 : tracing::debug!("no data in range {}-{}", img_range.start, img_range.end);
5253 0 : Ok(ImageLayerCreationOutcome::Empty)
5254 : }
5255 1452 : }
5256 :
5257 : /// Create an image layer for metadata keys. This function produces one image layer for all metadata
5258 : /// keys for now. Because metadata keys cannot exceed basebackup size limit, the image layer for it
5259 : /// would not be too large to fit in a single image layer.
5260 : ///
5261 : /// Creating image layers for metadata keys are different from relational keys. Firstly, instead of
5262 : /// iterating each key and get an image for each of them, we do a `vectored_get` scan over the sparse
5263 : /// keyspace to get all images in one run. Secondly, we use a different image layer generation metrics
5264 : /// for metadata keys than relational keys, which is the number of delta files visited during the scan.
5265 : #[allow(clippy::too_many_arguments)]
5266 1392 : async fn create_image_layer_for_metadata_keys(
5267 1392 : self: &Arc<Self>,
5268 1392 : partition: &KeySpace,
5269 1392 : mut image_layer_writer: ImageLayerWriter,
5270 1392 : lsn: Lsn,
5271 1392 : ctx: &RequestContext,
5272 1392 : img_range: Range<Key>,
5273 1392 : mode: ImageLayerCreationMode,
5274 1392 : io_concurrency: IoConcurrency,
5275 1392 : ) -> Result<ImageLayerCreationOutcome, CreateImageLayersError> {
5276 1392 : // Metadata keys image layer creation.
5277 1392 : let mut reconstruct_state = ValuesReconstructState::new(io_concurrency);
5278 1392 : let begin = Instant::now();
5279 : // Directly use `get_vectored_impl` to skip the max_vectored_read_key limit check. Note that the keyspace should
5280 : // not contain too many keys, otherwise this takes a lot of memory.
5281 1392 : let data = self
5282 1392 : .get_vectored_impl(
5283 1392 : VersionedKeySpaceQuery::uniform(partition.clone(), lsn),
5284 1392 : &mut reconstruct_state,
5285 1392 : ctx,
5286 1392 : )
5287 1392 : .await?;
5288 1392 : let (data, total_kb_retrieved, total_keys_retrieved) = {
5289 1392 : let mut new_data = BTreeMap::new();
5290 1392 : let mut total_kb_retrieved = 0;
5291 1392 : let mut total_keys_retrieved = 0;
5292 61464 : for (k, v) in data {
5293 60072 : let v = v?;
5294 60072 : total_kb_retrieved += KEY_SIZE + v.len();
5295 60072 : total_keys_retrieved += 1;
5296 60072 : new_data.insert(k, v);
5297 : }
5298 1392 : (new_data, total_kb_retrieved / 1024, total_keys_retrieved)
5299 1392 : };
5300 1392 : let delta_files_accessed = reconstruct_state.get_delta_layers_visited();
5301 1392 : let elapsed = begin.elapsed();
5302 1392 :
5303 1392 : let trigger_generation = delta_files_accessed as usize >= MAX_AUX_FILE_V2_DELTAS;
5304 1392 : info!(
5305 0 : "metadata key compaction: trigger_generation={trigger_generation}, delta_files_accessed={delta_files_accessed}, total_kb_retrieved={total_kb_retrieved}, total_keys_retrieved={total_keys_retrieved}, read_time={}s",
5306 0 : elapsed.as_secs_f64()
5307 : );
5308 :
5309 1392 : if !trigger_generation && mode == ImageLayerCreationMode::Try {
5310 12 : return Ok(ImageLayerCreationOutcome::Skip);
5311 1380 : }
5312 1380 : if self.cancel.is_cancelled() {
5313 0 : return Err(CreateImageLayersError::Cancelled);
5314 1380 : }
5315 1380 : let mut wrote_any_image = false;
5316 61452 : for (k, v) in data {
5317 60072 : if v.is_empty() {
5318 : // the key has been deleted, it does not need an image
5319 : // in metadata keyspace, an empty image == tombstone
5320 48 : continue;
5321 60024 : }
5322 60024 : wrote_any_image = true;
5323 60024 :
5324 60024 : // No need to handle sharding b/c metadata keys are always on the 0-th shard.
5325 60024 :
5326 60024 : // TODO: split image layers to avoid too large layer files. Too large image files are not handled
5327 60024 : // on the normal data path either.
5328 60024 : image_layer_writer.put_image(k, v, ctx).await?;
5329 : }
5330 :
5331 1380 : if wrote_any_image {
5332 : // Normal path: we have written some data into the new image layer for this
5333 : // partition, so flush it to disk.
5334 72 : info!(
5335 0 : "created image layer for metadata {}",
5336 0 : ImageLayerName {
5337 0 : key_range: img_range.clone(),
5338 0 : lsn
5339 0 : }
5340 : );
5341 72 : Ok(ImageLayerCreationOutcome::Generated {
5342 72 : unfinished_image_layer: image_layer_writer,
5343 72 : })
5344 : } else {
5345 1308 : tracing::debug!("no data in range {}-{}", img_range.start, img_range.end);
5346 1308 : Ok(ImageLayerCreationOutcome::Empty)
5347 : }
5348 1392 : }
5349 :
5350 : /// Predicate function which indicates whether we should check if new image layers
5351 : /// are required. Since checking if new image layers are required is expensive in
5352 : /// terms of CPU, we only do it in the following cases:
5353 : /// 1. If the timeline has ingested sufficient WAL to justify the cost
5354 : /// 2. If enough time has passed since the last check:
5355 : /// 1. For large tenants, we wish to perform the check more often since they
5356 : /// suffer from the lack of image layers
5357 : /// 2. For small tenants (that can mostly fit in RAM), we use a much longer interval
5358 3480 : fn should_check_if_image_layers_required(self: &Arc<Timeline>, lsn: Lsn) -> bool {
5359 : const LARGE_TENANT_THRESHOLD: u64 = 2 * 1024 * 1024 * 1024;
5360 :
5361 3480 : let last_checks_at = self.last_image_layer_creation_check_at.load();
5362 3480 : let distance = lsn
5363 3480 : .checked_sub(last_checks_at)
5364 3480 : .expect("Attempt to compact with LSN going backwards");
5365 3480 : let min_distance =
5366 3480 : self.get_image_layer_creation_check_threshold() as u64 * self.get_checkpoint_distance();
5367 3480 :
5368 3480 : let distance_based_decision = distance.0 >= min_distance;
5369 3480 :
5370 3480 : let mut time_based_decision = false;
5371 3480 : let mut last_check_instant = self.last_image_layer_creation_check_instant.lock().unwrap();
5372 3480 : if let CurrentLogicalSize::Exact(logical_size) = self.current_logical_size.current_size() {
5373 2868 : let check_required_after = if Into::<u64>::into(&logical_size) >= LARGE_TENANT_THRESHOLD
5374 : {
5375 0 : self.get_checkpoint_timeout()
5376 : } else {
5377 2868 : Duration::from_secs(3600 * 48)
5378 : };
5379 :
5380 2868 : time_based_decision = match *last_check_instant {
5381 1572 : Some(last_check) => {
5382 1572 : let elapsed = last_check.elapsed();
5383 1572 : elapsed >= check_required_after
5384 : }
5385 1296 : None => true,
5386 : };
5387 612 : }
5388 :
5389 : // Do the expensive delta layer counting only if this timeline has ingested sufficient
5390 : // WAL since the last check or a checkpoint timeout interval has elapsed since the last
5391 : // check.
5392 3480 : let decision = distance_based_decision || time_based_decision;
5393 :
5394 3480 : if decision {
5395 1308 : self.last_image_layer_creation_check_at.store(lsn);
5396 1308 : *last_check_instant = Some(Instant::now());
5397 2172 : }
5398 :
5399 3480 : decision
5400 3480 : }
5401 :
5402 : /// Returns the image layers generated and an enum indicating whether the process is fully completed.
5403 : /// true = we have generate all image layers, false = we preempt the process for L0 compaction.
5404 : #[tracing::instrument(skip_all, fields(%lsn, %mode))]
5405 : async fn create_image_layers(
5406 : self: &Arc<Timeline>,
5407 : partitioning: &KeyPartitioning,
5408 : lsn: Lsn,
5409 : mode: ImageLayerCreationMode,
5410 : ctx: &RequestContext,
5411 : last_status: LastImageLayerCreationStatus,
5412 : yield_for_l0: bool,
5413 : ) -> Result<(Vec<ResidentLayer>, LastImageLayerCreationStatus), CreateImageLayersError> {
5414 : let timer = self.metrics.create_images_time_histo.start_timer();
5415 :
5416 : if partitioning.parts.is_empty() {
5417 : warn!("no partitions to create image layers for");
5418 : return Ok((vec![], LastImageLayerCreationStatus::Complete));
5419 : }
5420 :
5421 : // We need to avoid holes between generated image layers.
5422 : // Otherwise LayerMap::image_layer_exists will return false if key range of some layer is covered by more than one
5423 : // image layer with hole between them. In this case such layer can not be utilized by GC.
5424 : //
5425 : // How such hole between partitions can appear?
5426 : // if we have relation with relid=1 and size 100 and relation with relid=2 with size 200 then result of
5427 : // KeySpace::partition may contain partitions <100000000..100000099> and <200000000..200000199>.
5428 : // If there is delta layer <100000000..300000000> then it never be garbage collected because
5429 : // image layers <100000000..100000099> and <200000000..200000199> are not completely covering it.
5430 : let mut start = Key::MIN;
5431 :
5432 : let check_for_image_layers =
5433 : if let LastImageLayerCreationStatus::Incomplete { last_key } = last_status {
5434 : info!(
5435 : "resuming image layer creation: last_status=incomplete, continue from {}",
5436 : last_key
5437 : );
5438 : true
5439 : } else {
5440 : self.should_check_if_image_layers_required(lsn)
5441 : };
5442 :
5443 : let mut batch_image_writer = BatchLayerWriter::new(self.conf).await?;
5444 :
5445 : let mut all_generated = true;
5446 :
5447 : let mut partition_processed = 0;
5448 : let mut total_partitions = partitioning.parts.len();
5449 : let mut last_partition_processed = None;
5450 : let mut partition_parts = partitioning.parts.clone();
5451 :
5452 : if let LastImageLayerCreationStatus::Incomplete { last_key } = last_status {
5453 : // We need to skip the partitions that have already been processed.
5454 : let mut found = false;
5455 : for (i, partition) in partition_parts.iter().enumerate() {
5456 : if last_key <= partition.end().unwrap() {
5457 : // ```plain
5458 : // |------|--------|----------|------|
5459 : // ^last_key
5460 : // ^start from this partition
5461 : // ```
5462 : // Why `i+1` instead of `i`?
5463 : // It is possible that the user did some writes after the previous image layer creation attempt so that
5464 : // a relation grows in size, and the last_key is now in the middle of the partition. In this case, we
5465 : // still want to skip this partition, so that we can make progress and avoid generating image layers over
5466 : // the same partition. Doing a mod to ensure we don't end up with an empty vec.
5467 : if i + 1 >= total_partitions {
5468 : // In general, this case should not happen -- if last_key is on the last partition, the previous
5469 : // iteration of image layer creation should return a complete status.
5470 : break; // with found=false
5471 : }
5472 : partition_parts = partition_parts.split_off(i + 1); // Remove the first i + 1 elements
5473 : total_partitions = partition_parts.len();
5474 : // Update the start key to the partition start.
5475 : start = partition_parts[0].start().unwrap();
5476 : found = true;
5477 : break;
5478 : }
5479 : }
5480 : if !found {
5481 : // Last key is within the last partition, or larger than all partitions.
5482 : return Ok((vec![], LastImageLayerCreationStatus::Complete));
5483 : }
5484 : }
5485 :
5486 : for partition in partition_parts.iter() {
5487 : if self.cancel.is_cancelled() {
5488 : return Err(CreateImageLayersError::Cancelled);
5489 : }
5490 : partition_processed += 1;
5491 : let img_range = start..partition.ranges.last().unwrap().end;
5492 : let compact_metadata = partition.overlaps(&Key::metadata_key_range());
5493 : if compact_metadata {
5494 : for range in &partition.ranges {
5495 : assert!(
5496 : range.start.field1 >= METADATA_KEY_BEGIN_PREFIX
5497 : && range.end.field1 <= METADATA_KEY_END_PREFIX,
5498 : "metadata keys must be partitioned separately"
5499 : );
5500 : }
5501 : if mode == ImageLayerCreationMode::Try && !check_for_image_layers {
5502 : // Skip compaction if there are not enough updates. Metadata compaction will do a scan and
5503 : // might mess up with evictions.
5504 : start = img_range.end;
5505 : continue;
5506 : }
5507 : // For initial and force modes, we always generate image layers for metadata keys.
5508 : } else if let ImageLayerCreationMode::Try = mode {
5509 : // check_for_image_layers = false -> skip
5510 : // check_for_image_layers = true -> check time_for_new_image_layer -> skip/generate
5511 : if !check_for_image_layers || !self.time_for_new_image_layer(partition, lsn).await {
5512 : start = img_range.end;
5513 : continue;
5514 : }
5515 : }
5516 : if let ImageLayerCreationMode::Force = mode {
5517 : // When forced to create image layers, we might try and create them where they already
5518 : // exist. This mode is only used in tests/debug.
5519 : let layers = self.layers.read().await;
5520 : if layers.contains_key(&PersistentLayerKey {
5521 : key_range: img_range.clone(),
5522 : lsn_range: PersistentLayerDesc::image_layer_lsn_range(lsn),
5523 : is_delta: false,
5524 : }) {
5525 : // TODO: this can be processed with the BatchLayerWriter::finish_with_discard
5526 : // in the future.
5527 : tracing::info!(
5528 : "Skipping image layer at {lsn} {}..{}, already exists",
5529 : img_range.start,
5530 : img_range.end
5531 : );
5532 : start = img_range.end;
5533 : continue;
5534 : }
5535 : }
5536 :
5537 : let image_layer_writer = ImageLayerWriter::new(
5538 : self.conf,
5539 : self.timeline_id,
5540 : self.tenant_shard_id,
5541 : &img_range,
5542 : lsn,
5543 : &self.gate,
5544 : self.cancel.clone(),
5545 : ctx,
5546 : )
5547 : .await?;
5548 :
5549 0 : fail_point!("image-layer-writer-fail-before-finish", |_| {
5550 0 : Err(CreateImageLayersError::Other(anyhow::anyhow!(
5551 0 : "failpoint image-layer-writer-fail-before-finish"
5552 0 : )))
5553 0 : });
5554 :
5555 : let io_concurrency = IoConcurrency::spawn_from_conf(
5556 : self.conf,
5557 : self.gate
5558 : .enter()
5559 0 : .map_err(|_| CreateImageLayersError::Cancelled)?,
5560 : );
5561 :
5562 : let outcome = if !compact_metadata {
5563 : self.create_image_layer_for_rel_blocks(
5564 : partition,
5565 : image_layer_writer,
5566 : lsn,
5567 : ctx,
5568 : img_range.clone(),
5569 : io_concurrency,
5570 : )
5571 : .await?
5572 : } else {
5573 : self.create_image_layer_for_metadata_keys(
5574 : partition,
5575 : image_layer_writer,
5576 : lsn,
5577 : ctx,
5578 : img_range.clone(),
5579 : mode,
5580 : io_concurrency,
5581 : )
5582 : .await?
5583 : };
5584 : match outcome {
5585 : ImageLayerCreationOutcome::Empty => {
5586 : // No data in this partition, so we don't need to create an image layer (for now).
5587 : // The next image layer should cover this key range, so we don't advance the `start`
5588 : // key.
5589 : }
5590 : ImageLayerCreationOutcome::Generated {
5591 : unfinished_image_layer,
5592 : } => {
5593 : batch_image_writer.add_unfinished_image_writer(
5594 : unfinished_image_layer,
5595 : img_range.clone(),
5596 : lsn,
5597 : );
5598 : // The next image layer should be generated right after this one.
5599 : start = img_range.end;
5600 : }
5601 : ImageLayerCreationOutcome::Skip => {
5602 : // We don't need to create an image layer for this partition.
5603 : // The next image layer should NOT cover this range, otherwise
5604 : // the keyspace becomes empty (reads don't go past image layers).
5605 : start = img_range.end;
5606 : }
5607 : }
5608 :
5609 : if let ImageLayerCreationMode::Try = mode {
5610 : // We have at least made some progress
5611 : if yield_for_l0 && batch_image_writer.pending_layer_num() >= 1 {
5612 : // The `Try` mode is currently only used on the compaction path. We want to avoid
5613 : // image layer generation taking too long time and blocking L0 compaction. So in this
5614 : // mode, we also inspect the current number of L0 layers and skip image layer generation
5615 : // if there are too many of them.
5616 : let image_preempt_threshold = self.get_image_creation_preempt_threshold()
5617 : * self.get_compaction_threshold();
5618 : // TODO: currently we do not respect `get_image_creation_preempt_threshold` and always yield
5619 : // when there is a single timeline with more than L0 threshold L0 layers. As long as the
5620 : // `get_image_creation_preempt_threshold` is set to a value greater than 0, we will yield for L0 compaction.
5621 : if image_preempt_threshold != 0 {
5622 : let should_yield = self
5623 : .l0_compaction_trigger
5624 : .notified()
5625 : .now_or_never()
5626 : .is_some();
5627 : if should_yield {
5628 : tracing::info!(
5629 : "preempt image layer generation at {lsn} when processing partition {}..{}: too many L0 layers",
5630 : partition.start().unwrap(),
5631 : partition.end().unwrap()
5632 : );
5633 : last_partition_processed = Some(partition.clone());
5634 : all_generated = false;
5635 : break;
5636 : }
5637 : }
5638 : }
5639 : }
5640 : }
5641 :
5642 : let image_layers = batch_image_writer.finish(self, ctx).await?;
5643 :
5644 : let mut guard = self.layers.write().await;
5645 :
5646 : // FIXME: we could add the images to be uploaded *before* returning from here, but right
5647 : // now they are being scheduled outside of write lock; current way is inconsistent with
5648 : // compaction lock order.
5649 : guard
5650 : .open_mut()?
5651 : .track_new_image_layers(&image_layers, &self.metrics);
5652 : drop_wlock(guard);
5653 : let duration = timer.stop_and_record();
5654 :
5655 : // Creating image layers may have caused some previously visible layers to be covered
5656 : if !image_layers.is_empty() {
5657 : self.update_layer_visibility().await?;
5658 : }
5659 :
5660 : let total_layer_size = image_layers
5661 : .iter()
5662 1524 : .map(|l| l.metadata().file_size)
5663 : .sum::<u64>();
5664 :
5665 : if !image_layers.is_empty() {
5666 : info!(
5667 : "created {} image layers ({} bytes) in {}s, processed {} out of {} partitions",
5668 : image_layers.len(),
5669 : total_layer_size,
5670 : duration.as_secs_f64(),
5671 : partition_processed,
5672 : total_partitions
5673 : );
5674 : }
5675 :
5676 : Ok((
5677 : image_layers,
5678 : if all_generated {
5679 : LastImageLayerCreationStatus::Complete
5680 : } else {
5681 : LastImageLayerCreationStatus::Incomplete {
5682 : last_key: if let Some(last_partition_processed) = last_partition_processed {
5683 : last_partition_processed.end().unwrap_or(Key::MIN)
5684 : } else {
5685 : // This branch should be unreachable, but in case it happens, we can just return the start key.
5686 : Key::MIN
5687 : },
5688 : }
5689 : },
5690 : ))
5691 : }
5692 :
5693 : /// Wait until the background initial logical size calculation is complete, or
5694 : /// this Timeline is shut down. Calling this function will cause the initial
5695 : /// logical size calculation to skip waiting for the background jobs barrier.
5696 0 : pub(crate) async fn await_initial_logical_size(self: Arc<Self>) {
5697 0 : if !self.shard_identity.is_shard_zero() {
5698 : // We don't populate logical size on shard >0: skip waiting for it.
5699 0 : return;
5700 0 : }
5701 0 :
5702 0 : if self.remote_client.is_deleting() {
5703 : // The timeline was created in a deletion-resume state, we don't expect logical size to be populated
5704 0 : return;
5705 0 : }
5706 0 :
5707 0 : if self.current_logical_size.current_size().is_exact() {
5708 : // root timelines are initialized with exact count, but never start the background
5709 : // calculation
5710 0 : return;
5711 0 : }
5712 0 :
5713 0 : if self.cancel.is_cancelled() {
5714 : // We already requested stopping the tenant, so we cannot wait for the logical size
5715 : // calculation to complete given the task might have been already cancelled.
5716 0 : return;
5717 0 : }
5718 :
5719 0 : if let Some(await_bg_cancel) = self
5720 0 : .current_logical_size
5721 0 : .cancel_wait_for_background_loop_concurrency_limit_semaphore
5722 0 : .get()
5723 0 : {
5724 0 : await_bg_cancel.cancel();
5725 0 : } else {
5726 : // We should not wait if we were not able to explicitly instruct
5727 : // the logical size cancellation to skip the concurrency limit semaphore.
5728 : // TODO: this is an unexpected case. We should restructure so that it
5729 : // can't happen.
5730 0 : tracing::warn!(
5731 0 : "await_initial_logical_size: can't get semaphore cancel token, skipping"
5732 : );
5733 0 : debug_assert!(false);
5734 : }
5735 :
5736 0 : tokio::select!(
5737 0 : _ = self.current_logical_size.initialized.acquire() => {},
5738 0 : _ = self.cancel.cancelled() => {}
5739 : )
5740 0 : }
5741 :
5742 : /// Detach this timeline from its ancestor by copying all of ancestors layers as this
5743 : /// Timelines layers up to the ancestor_lsn.
5744 : ///
5745 : /// Requires a timeline that:
5746 : /// - has an ancestor to detach from
5747 : /// - the ancestor does not have an ancestor -- follows from the original RFC limitations, not
5748 : /// a technical requirement
5749 : ///
5750 : /// After the operation has been started, it cannot be canceled. Upon restart it needs to be
5751 : /// polled again until completion.
5752 : ///
5753 : /// During the operation all timelines sharing the data with this timeline will be reparented
5754 : /// from our ancestor to be branches of this timeline.
5755 0 : pub(crate) async fn prepare_to_detach_from_ancestor(
5756 0 : self: &Arc<Timeline>,
5757 0 : tenant: &crate::tenant::TenantShard,
5758 0 : options: detach_ancestor::Options,
5759 0 : behavior: DetachBehavior,
5760 0 : ctx: &RequestContext,
5761 0 : ) -> Result<detach_ancestor::Progress, detach_ancestor::Error> {
5762 0 : detach_ancestor::prepare(self, tenant, behavior, options, ctx).await
5763 0 : }
5764 :
5765 : /// Second step of detach from ancestor; detaches the `self` from it's current ancestor and
5766 : /// reparents any reparentable children of previous ancestor.
5767 : ///
5768 : /// This method is to be called while holding the TenantManager's tenant slot, so during this
5769 : /// method we cannot be deleted nor can any timeline be deleted. After this method returns
5770 : /// successfully, tenant must be reloaded.
5771 : ///
5772 : /// Final step will be to [`Self::complete_detaching_timeline_ancestor`] after optionally
5773 : /// resetting the tenant.
5774 0 : pub(crate) async fn detach_from_ancestor_and_reparent(
5775 0 : self: &Arc<Timeline>,
5776 0 : tenant: &crate::tenant::TenantShard,
5777 0 : prepared: detach_ancestor::PreparedTimelineDetach,
5778 0 : ancestor_timeline_id: TimelineId,
5779 0 : ancestor_lsn: Lsn,
5780 0 : behavior: DetachBehavior,
5781 0 : ctx: &RequestContext,
5782 0 : ) -> Result<detach_ancestor::DetachingAndReparenting, detach_ancestor::Error> {
5783 0 : detach_ancestor::detach_and_reparent(
5784 0 : self,
5785 0 : tenant,
5786 0 : prepared,
5787 0 : ancestor_timeline_id,
5788 0 : ancestor_lsn,
5789 0 : behavior,
5790 0 : ctx,
5791 0 : )
5792 0 : .await
5793 0 : }
5794 :
5795 : /// Final step which unblocks the GC.
5796 : ///
5797 : /// The tenant must've been reset if ancestry was modified previously (in tenant manager).
5798 0 : pub(crate) async fn complete_detaching_timeline_ancestor(
5799 0 : self: &Arc<Timeline>,
5800 0 : tenant: &crate::tenant::TenantShard,
5801 0 : attempt: detach_ancestor::Attempt,
5802 0 : ctx: &RequestContext,
5803 0 : ) -> Result<(), detach_ancestor::Error> {
5804 0 : detach_ancestor::complete(self, tenant, attempt, ctx).await
5805 0 : }
5806 : }
5807 :
5808 : impl Drop for Timeline {
5809 60 : fn drop(&mut self) {
5810 60 : if let Some(ancestor) = &self.ancestor_timeline {
5811 : // This lock should never be poisoned, but in case it is we do a .map() instead of
5812 : // an unwrap(), to avoid panicking in a destructor and thereby aborting the process.
5813 24 : if let Ok(mut gc_info) = ancestor.gc_info.write() {
5814 24 : if !gc_info.remove_child_not_offloaded(self.timeline_id) {
5815 0 : tracing::error!(tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id,
5816 0 : "Couldn't remove retain_lsn entry from offloaded timeline's parent: already removed");
5817 24 : }
5818 0 : }
5819 36 : }
5820 60 : info!(
5821 0 : "Timeline {} for tenant {} is being dropped",
5822 : self.timeline_id, self.tenant_shard_id.tenant_id
5823 : );
5824 60 : }
5825 : }
5826 :
5827 : /// Top-level failure to compact.
5828 : #[derive(Debug, thiserror::Error)]
5829 : pub(crate) enum CompactionError {
5830 : #[error("The timeline or pageserver is shutting down")]
5831 : ShuttingDown,
5832 : /// Compaction tried to offload a timeline and failed
5833 : #[error("Failed to offload timeline: {0}")]
5834 : Offload(OffloadError),
5835 : /// Compaction cannot be done right now; page reconstruction and so on.
5836 : #[error("Failed to collect keyspace: {0}")]
5837 : CollectKeySpaceError(#[from] CollectKeySpaceError),
5838 : #[error(transparent)]
5839 : Other(anyhow::Error),
5840 : #[error("Compaction already running: {0}")]
5841 : AlreadyRunning(&'static str),
5842 : }
5843 :
5844 : impl CompactionError {
5845 : /// Errors that can be ignored, i.e., cancel and shutdown.
5846 0 : pub fn is_cancel(&self) -> bool {
5847 0 : matches!(
5848 0 : self,
5849 : Self::ShuttingDown
5850 : | Self::AlreadyRunning(_)
5851 : | Self::CollectKeySpaceError(CollectKeySpaceError::Cancelled)
5852 : | Self::CollectKeySpaceError(CollectKeySpaceError::PageRead(
5853 : PageReconstructError::Cancelled
5854 : ))
5855 : | Self::Offload(OffloadError::Cancelled)
5856 : )
5857 0 : }
5858 :
5859 : /// Critical errors that indicate data corruption.
5860 0 : pub fn is_critical(&self) -> bool {
5861 0 : matches!(
5862 0 : self,
5863 : Self::CollectKeySpaceError(
5864 : CollectKeySpaceError::Decode(_)
5865 : | CollectKeySpaceError::PageRead(
5866 : PageReconstructError::MissingKey(_) | PageReconstructError::WalRedo(_),
5867 : )
5868 : )
5869 : )
5870 0 : }
5871 : }
5872 :
5873 : impl From<OffloadError> for CompactionError {
5874 0 : fn from(e: OffloadError) -> Self {
5875 0 : match e {
5876 0 : OffloadError::Cancelled => Self::ShuttingDown,
5877 0 : _ => Self::Offload(e),
5878 : }
5879 0 : }
5880 : }
5881 :
5882 : impl From<super::upload_queue::NotInitialized> for CompactionError {
5883 0 : fn from(value: super::upload_queue::NotInitialized) -> Self {
5884 0 : match value {
5885 : super::upload_queue::NotInitialized::Uninitialized => {
5886 0 : CompactionError::Other(anyhow::anyhow!(value))
5887 : }
5888 : super::upload_queue::NotInitialized::ShuttingDown
5889 0 : | super::upload_queue::NotInitialized::Stopped => CompactionError::ShuttingDown,
5890 : }
5891 0 : }
5892 : }
5893 :
5894 : impl From<super::storage_layer::layer::DownloadError> for CompactionError {
5895 0 : fn from(e: super::storage_layer::layer::DownloadError) -> Self {
5896 0 : match e {
5897 : super::storage_layer::layer::DownloadError::TimelineShutdown
5898 : | super::storage_layer::layer::DownloadError::DownloadCancelled => {
5899 0 : CompactionError::ShuttingDown
5900 : }
5901 : super::storage_layer::layer::DownloadError::ContextAndConfigReallyDeniesDownloads
5902 : | super::storage_layer::layer::DownloadError::DownloadRequired
5903 : | super::storage_layer::layer::DownloadError::NotFile(_)
5904 : | super::storage_layer::layer::DownloadError::DownloadFailed
5905 : | super::storage_layer::layer::DownloadError::PreStatFailed(_) => {
5906 0 : CompactionError::Other(anyhow::anyhow!(e))
5907 : }
5908 : #[cfg(test)]
5909 : super::storage_layer::layer::DownloadError::Failpoint(_) => {
5910 0 : CompactionError::Other(anyhow::anyhow!(e))
5911 : }
5912 : }
5913 0 : }
5914 : }
5915 :
5916 : impl From<layer_manager::Shutdown> for CompactionError {
5917 0 : fn from(_: layer_manager::Shutdown) -> Self {
5918 0 : CompactionError::ShuttingDown
5919 0 : }
5920 : }
5921 :
5922 : #[serde_as]
5923 1176 : #[derive(serde::Serialize)]
5924 : struct RecordedDuration(#[serde_as(as = "serde_with::DurationMicroSeconds")] Duration);
5925 :
5926 : #[derive(Default)]
5927 : enum DurationRecorder {
5928 : #[default]
5929 : NotStarted,
5930 : Recorded(RecordedDuration, tokio::time::Instant),
5931 : }
5932 :
5933 : impl DurationRecorder {
5934 3024 : fn till_now(&self) -> DurationRecorder {
5935 3024 : match self {
5936 : DurationRecorder::NotStarted => {
5937 0 : panic!("must only call on recorded measurements")
5938 : }
5939 3024 : DurationRecorder::Recorded(_, ended) => {
5940 3024 : let now = tokio::time::Instant::now();
5941 3024 : DurationRecorder::Recorded(RecordedDuration(now - *ended), now)
5942 3024 : }
5943 3024 : }
5944 3024 : }
5945 1176 : fn into_recorded(self) -> Option<RecordedDuration> {
5946 1176 : match self {
5947 0 : DurationRecorder::NotStarted => None,
5948 1176 : DurationRecorder::Recorded(recorded, _) => Some(recorded),
5949 : }
5950 1176 : }
5951 : }
5952 :
5953 : /// Descriptor for a delta layer used in testing infra. The start/end key/lsn range of the
5954 : /// delta layer might be different from the min/max key/lsn in the delta layer. Therefore,
5955 : /// the layer descriptor requires the user to provide the ranges, which should cover all
5956 : /// keys specified in the `data` field.
5957 : #[cfg(test)]
5958 : #[derive(Clone)]
5959 : pub struct DeltaLayerTestDesc {
5960 : pub lsn_range: Range<Lsn>,
5961 : pub key_range: Range<Key>,
5962 : pub data: Vec<(Key, Lsn, Value)>,
5963 : }
5964 :
5965 : #[cfg(test)]
5966 : #[derive(Clone)]
5967 : pub struct InMemoryLayerTestDesc {
5968 : pub lsn_range: Range<Lsn>,
5969 : pub data: Vec<(Key, Lsn, Value)>,
5970 : pub is_open: bool,
5971 : }
5972 :
5973 : #[cfg(test)]
5974 : impl DeltaLayerTestDesc {
5975 24 : pub fn new(lsn_range: Range<Lsn>, key_range: Range<Key>, data: Vec<(Key, Lsn, Value)>) -> Self {
5976 24 : Self {
5977 24 : lsn_range,
5978 24 : key_range,
5979 24 : data,
5980 24 : }
5981 24 : }
5982 :
5983 540 : pub fn new_with_inferred_key_range(
5984 540 : lsn_range: Range<Lsn>,
5985 540 : data: Vec<(Key, Lsn, Value)>,
5986 540 : ) -> Self {
5987 1392 : let key_min = data.iter().map(|(key, _, _)| key).min().unwrap();
5988 1392 : let key_max = data.iter().map(|(key, _, _)| key).max().unwrap();
5989 540 : Self {
5990 540 : key_range: (*key_min)..(key_max.next()),
5991 540 : lsn_range,
5992 540 : data,
5993 540 : }
5994 540 : }
5995 :
5996 60 : pub(crate) fn layer_name(&self) -> LayerName {
5997 60 : LayerName::Delta(super::storage_layer::DeltaLayerName {
5998 60 : key_range: self.key_range.clone(),
5999 60 : lsn_range: self.lsn_range.clone(),
6000 60 : })
6001 60 : }
6002 : }
6003 :
6004 : impl Timeline {
6005 168 : async fn finish_compact_batch(
6006 168 : self: &Arc<Self>,
6007 168 : new_deltas: &[ResidentLayer],
6008 168 : new_images: &[ResidentLayer],
6009 168 : layers_to_remove: &[Layer],
6010 168 : ) -> Result<(), CompactionError> {
6011 168 : let mut guard = tokio::select! {
6012 168 : guard = self.layers.write() => guard,
6013 168 : _ = self.cancel.cancelled() => {
6014 0 : return Err(CompactionError::ShuttingDown);
6015 : }
6016 : };
6017 :
6018 168 : let mut duplicated_layers = HashSet::new();
6019 168 :
6020 168 : let mut insert_layers = Vec::with_capacity(new_deltas.len());
6021 :
6022 2016 : for l in new_deltas {
6023 1848 : if guard.contains(l.as_ref()) {
6024 : // expected in tests
6025 0 : tracing::error!(layer=%l, "duplicated L1 layer");
6026 :
6027 : // good ways to cause a duplicate: we repeatedly error after taking the writelock
6028 : // `guard` on self.layers. as of writing this, there are no error returns except
6029 : // for compact_level0_phase1 creating an L0, which does not happen in practice
6030 : // because we have not implemented L0 => L0 compaction.
6031 0 : duplicated_layers.insert(l.layer_desc().key());
6032 1848 : } else if LayerMap::is_l0(&l.layer_desc().key_range, l.layer_desc().is_delta) {
6033 0 : return Err(CompactionError::Other(anyhow::anyhow!(
6034 0 : "compaction generates a L0 layer file as output, which will cause infinite compaction."
6035 0 : )));
6036 1848 : } else {
6037 1848 : insert_layers.push(l.clone());
6038 1848 : }
6039 : }
6040 :
6041 : // only remove those inputs which were not outputs
6042 168 : let remove_layers: Vec<Layer> = layers_to_remove
6043 168 : .iter()
6044 2412 : .filter(|l| !duplicated_layers.contains(&l.layer_desc().key()))
6045 168 : .cloned()
6046 168 : .collect();
6047 168 :
6048 168 : if !new_images.is_empty() {
6049 0 : guard
6050 0 : .open_mut()?
6051 0 : .track_new_image_layers(new_images, &self.metrics);
6052 168 : }
6053 :
6054 168 : guard
6055 168 : .open_mut()?
6056 168 : .finish_compact_l0(&remove_layers, &insert_layers, &self.metrics);
6057 168 :
6058 168 : self.remote_client
6059 168 : .schedule_compaction_update(&remove_layers, new_deltas)?;
6060 :
6061 168 : drop_wlock(guard);
6062 168 :
6063 168 : Ok(())
6064 168 : }
6065 :
6066 0 : async fn rewrite_layers(
6067 0 : self: &Arc<Self>,
6068 0 : mut replace_layers: Vec<(Layer, ResidentLayer)>,
6069 0 : mut drop_layers: Vec<Layer>,
6070 0 : ) -> Result<(), CompactionError> {
6071 0 : let mut guard = self.layers.write().await;
6072 :
6073 : // Trim our lists in case our caller (compaction) raced with someone else (GC) removing layers: we want
6074 : // to avoid double-removing, and avoid rewriting something that was removed.
6075 0 : replace_layers.retain(|(l, _)| guard.contains(l));
6076 0 : drop_layers.retain(|l| guard.contains(l));
6077 0 :
6078 0 : guard
6079 0 : .open_mut()?
6080 0 : .rewrite_layers(&replace_layers, &drop_layers, &self.metrics);
6081 0 :
6082 0 : let upload_layers: Vec<_> = replace_layers.into_iter().map(|r| r.1).collect();
6083 0 :
6084 0 : self.remote_client
6085 0 : .schedule_compaction_update(&drop_layers, &upload_layers)?;
6086 :
6087 0 : Ok(())
6088 0 : }
6089 :
6090 : /// Schedules the uploads of the given image layers
6091 2184 : fn upload_new_image_layers(
6092 2184 : self: &Arc<Self>,
6093 2184 : new_images: impl IntoIterator<Item = ResidentLayer>,
6094 2184 : ) -> Result<(), super::upload_queue::NotInitialized> {
6095 2340 : for layer in new_images {
6096 156 : self.remote_client.schedule_layer_file_upload(layer)?;
6097 : }
6098 : // should any new image layer been created, not uploading index_part will
6099 : // result in a mismatch between remote_physical_size and layermap calculated
6100 : // size, which will fail some tests, but should not be an issue otherwise.
6101 2184 : self.remote_client
6102 2184 : .schedule_index_upload_for_file_changes()?;
6103 2184 : Ok(())
6104 2184 : }
6105 :
6106 0 : async fn find_gc_time_cutoff(
6107 0 : &self,
6108 0 : now: SystemTime,
6109 0 : pitr: Duration,
6110 0 : cancel: &CancellationToken,
6111 0 : ctx: &RequestContext,
6112 0 : ) -> Result<Option<Lsn>, PageReconstructError> {
6113 0 : debug_assert_current_span_has_tenant_and_timeline_id();
6114 0 : if self.shard_identity.is_shard_zero() {
6115 : // Shard Zero has SLRU data and can calculate the PITR time -> LSN mapping itself
6116 0 : let time_range = if pitr == Duration::ZERO {
6117 0 : humantime::parse_duration(DEFAULT_PITR_INTERVAL).expect("constant is invalid")
6118 : } else {
6119 0 : pitr
6120 : };
6121 :
6122 : // If PITR is so large or `now` is so small that this underflows, we will retain no history (highly unexpected case)
6123 0 : let time_cutoff = now.checked_sub(time_range).unwrap_or(now);
6124 0 : let timestamp = to_pg_timestamp(time_cutoff);
6125 :
6126 0 : let time_cutoff = match self.find_lsn_for_timestamp(timestamp, cancel, ctx).await? {
6127 0 : LsnForTimestamp::Present(lsn) => Some(lsn),
6128 0 : LsnForTimestamp::Future(lsn) => {
6129 0 : // The timestamp is in the future. That sounds impossible,
6130 0 : // but what it really means is that there hasn't been
6131 0 : // any commits since the cutoff timestamp.
6132 0 : //
6133 0 : // In this case we should use the LSN of the most recent commit,
6134 0 : // which is implicitly the last LSN in the log.
6135 0 : debug!("future({})", lsn);
6136 0 : Some(self.get_last_record_lsn())
6137 : }
6138 0 : LsnForTimestamp::Past(lsn) => {
6139 0 : debug!("past({})", lsn);
6140 0 : None
6141 : }
6142 0 : LsnForTimestamp::NoData(lsn) => {
6143 0 : debug!("nodata({})", lsn);
6144 0 : None
6145 : }
6146 : };
6147 0 : Ok(time_cutoff)
6148 : } else {
6149 : // Shards other than shard zero cannot do timestamp->lsn lookups, and must instead learn their GC cutoff
6150 : // from shard zero's index. The index doesn't explicitly tell us the time cutoff, but we may assume that
6151 : // the point up to which shard zero's last_gc_cutoff has advanced will either be the time cutoff, or a
6152 : // space cutoff that we would also have respected ourselves.
6153 0 : match self
6154 0 : .remote_client
6155 0 : .download_foreign_index(ShardNumber(0), cancel)
6156 0 : .await
6157 : {
6158 0 : Ok((index_part, index_generation, _index_mtime)) => {
6159 0 : tracing::info!(
6160 0 : "GC loaded shard zero metadata (gen {index_generation:?}): latest_gc_cutoff_lsn: {}",
6161 0 : index_part.metadata.latest_gc_cutoff_lsn()
6162 : );
6163 0 : Ok(Some(index_part.metadata.latest_gc_cutoff_lsn()))
6164 : }
6165 : Err(DownloadError::NotFound) => {
6166 : // This is unexpected, because during timeline creations shard zero persists to remote
6167 : // storage before other shards are called, and during timeline deletion non-zeroth shards are
6168 : // deleted before the zeroth one. However, it should be harmless: if we somehow end up in this
6169 : // state, then shard zero should _eventually_ write an index when it GCs.
6170 0 : tracing::warn!("GC couldn't find shard zero's index for timeline");
6171 0 : Ok(None)
6172 : }
6173 0 : Err(e) => {
6174 0 : // TODO: this function should return a different error type than page reconstruct error
6175 0 : Err(PageReconstructError::Other(anyhow::anyhow!(e)))
6176 : }
6177 : }
6178 :
6179 : // TODO: after reading shard zero's GC cutoff, we should validate its generation with the storage
6180 : // controller. Otherwise, it is possible that we see the GC cutoff go backwards while shard zero
6181 : // is going through a migration if we read the old location's index and it has GC'd ahead of the
6182 : // new location. This is legal in principle, but problematic in practice because it might result
6183 : // in a timeline creation succeeding on shard zero ('s new location) but then failing on other shards
6184 : // because they have GC'd past the branch point.
6185 : }
6186 0 : }
6187 :
6188 : /// Find the Lsns above which layer files need to be retained on
6189 : /// garbage collection.
6190 : ///
6191 : /// We calculate two cutoffs, one based on time and one based on WAL size. `pitr`
6192 : /// controls the time cutoff (or ZERO to disable time-based retention), and `space_cutoff` controls
6193 : /// the space-based retention.
6194 : ///
6195 : /// This function doesn't simply to calculate time & space based retention: it treats time-based
6196 : /// retention as authoritative if enabled, and falls back to space-based retention if calculating
6197 : /// the LSN for a time point isn't possible. Therefore the GcCutoffs::horizon in the response might
6198 : /// be different to the `space_cutoff` input. Callers should treat the min() of the two cutoffs
6199 : /// in the response as the GC cutoff point for the timeline.
6200 : #[instrument(skip_all, fields(timeline_id=%self.timeline_id))]
6201 : pub(super) async fn find_gc_cutoffs(
6202 : &self,
6203 : now: SystemTime,
6204 : space_cutoff: Lsn,
6205 : pitr: Duration,
6206 : cancel: &CancellationToken,
6207 : ctx: &RequestContext,
6208 : ) -> Result<GcCutoffs, PageReconstructError> {
6209 : let _timer = self
6210 : .metrics
6211 : .find_gc_cutoffs_histo
6212 : .start_timer()
6213 : .record_on_drop();
6214 :
6215 : pausable_failpoint!("Timeline::find_gc_cutoffs-pausable");
6216 :
6217 : if cfg!(test) {
6218 : // Unit tests which specify zero PITR interval expect to avoid doing any I/O for timestamp lookup
6219 : if pitr == Duration::ZERO {
6220 : return Ok(GcCutoffs {
6221 : time: self.get_last_record_lsn(),
6222 : space: space_cutoff,
6223 : });
6224 : }
6225 : }
6226 :
6227 : // Calculate a time-based limit on how much to retain:
6228 : // - if PITR interval is set, then this is our cutoff.
6229 : // - if PITR interval is not set, then we do a lookup
6230 : // based on DEFAULT_PITR_INTERVAL, so that size-based retention does not result in keeping history around permanently on idle databases.
6231 : let time_cutoff = self.find_gc_time_cutoff(now, pitr, cancel, ctx).await?;
6232 :
6233 : Ok(match (pitr, time_cutoff) {
6234 : (Duration::ZERO, Some(time_cutoff)) => {
6235 : // PITR is not set. Retain the size-based limit, or the default time retention,
6236 : // whichever requires less data.
6237 : GcCutoffs {
6238 : time: self.get_last_record_lsn(),
6239 : space: std::cmp::max(time_cutoff, space_cutoff),
6240 : }
6241 : }
6242 : (Duration::ZERO, None) => {
6243 : // PITR is not set, and time lookup failed
6244 : GcCutoffs {
6245 : time: self.get_last_record_lsn(),
6246 : space: space_cutoff,
6247 : }
6248 : }
6249 : (_, None) => {
6250 : // PITR interval is set & we didn't look up a timestamp successfully. Conservatively assume PITR
6251 : // cannot advance beyond what was already GC'd, and respect space-based retention
6252 : GcCutoffs {
6253 : time: *self.get_applied_gc_cutoff_lsn(),
6254 : space: space_cutoff,
6255 : }
6256 : }
6257 : (_, Some(time_cutoff)) => {
6258 : // PITR interval is set and we looked up timestamp successfully. Ignore
6259 : // size based retention and make time cutoff authoritative
6260 : GcCutoffs {
6261 : time: time_cutoff,
6262 : space: time_cutoff,
6263 : }
6264 : }
6265 : })
6266 : }
6267 :
6268 : /// Garbage collect layer files on a timeline that are no longer needed.
6269 : ///
6270 : /// Currently, we don't make any attempt at removing unneeded page versions
6271 : /// within a layer file. We can only remove the whole file if it's fully
6272 : /// obsolete.
6273 24 : pub(super) async fn gc(&self) -> Result<GcResult, GcError> {
6274 : // this is most likely the background tasks, but it might be the spawned task from
6275 : // immediate_gc
6276 24 : let _g = tokio::select! {
6277 24 : guard = self.gc_lock.lock() => guard,
6278 24 : _ = self.cancel.cancelled() => return Ok(GcResult::default()),
6279 : };
6280 24 : let timer = self.metrics.garbage_collect_histo.start_timer();
6281 24 :
6282 24 : fail_point!("before-timeline-gc");
6283 24 :
6284 24 : // Is the timeline being deleted?
6285 24 : if self.is_stopping() {
6286 0 : return Err(GcError::TimelineCancelled);
6287 24 : }
6288 24 :
6289 24 : let (space_cutoff, time_cutoff, retain_lsns, max_lsn_with_valid_lease) = {
6290 24 : let gc_info = self.gc_info.read().unwrap();
6291 24 :
6292 24 : let space_cutoff = min(gc_info.cutoffs.space, self.get_disk_consistent_lsn());
6293 24 : let time_cutoff = gc_info.cutoffs.time;
6294 24 : let retain_lsns = gc_info
6295 24 : .retain_lsns
6296 24 : .iter()
6297 24 : .map(|(lsn, _child_id, _is_offloaded)| *lsn)
6298 24 : .collect();
6299 24 :
6300 24 : // Gets the maximum LSN that holds the valid lease.
6301 24 : //
6302 24 : // Caveat: `refresh_gc_info` is in charged of updating the lease map.
6303 24 : // Here, we do not check for stale leases again.
6304 24 : let max_lsn_with_valid_lease = gc_info.leases.last_key_value().map(|(lsn, _)| *lsn);
6305 24 :
6306 24 : (
6307 24 : space_cutoff,
6308 24 : time_cutoff,
6309 24 : retain_lsns,
6310 24 : max_lsn_with_valid_lease,
6311 24 : )
6312 24 : };
6313 24 :
6314 24 : let mut new_gc_cutoff = Lsn::min(space_cutoff, time_cutoff);
6315 24 : let standby_horizon = self.standby_horizon.load();
6316 24 : // Hold GC for the standby, but as a safety guard do it only within some
6317 24 : // reasonable lag.
6318 24 : if standby_horizon != Lsn::INVALID {
6319 0 : if let Some(standby_lag) = new_gc_cutoff.checked_sub(standby_horizon) {
6320 : const MAX_ALLOWED_STANDBY_LAG: u64 = 10u64 << 30; // 10 GB
6321 0 : if standby_lag.0 < MAX_ALLOWED_STANDBY_LAG {
6322 0 : new_gc_cutoff = Lsn::min(standby_horizon, new_gc_cutoff);
6323 0 : trace!("holding off GC for standby apply LSN {}", standby_horizon);
6324 : } else {
6325 0 : warn!(
6326 0 : "standby is lagging for more than {}MB, not holding gc for it",
6327 0 : MAX_ALLOWED_STANDBY_LAG / 1024 / 1024
6328 : )
6329 : }
6330 0 : }
6331 24 : }
6332 :
6333 : // Reset standby horizon to ignore it if it is not updated till next GC.
6334 : // It is an easy way to unset it when standby disappears without adding
6335 : // more conf options.
6336 24 : self.standby_horizon.store(Lsn::INVALID);
6337 24 : self.metrics
6338 24 : .standby_horizon_gauge
6339 24 : .set(Lsn::INVALID.0 as i64);
6340 :
6341 24 : let res = self
6342 24 : .gc_timeline(
6343 24 : space_cutoff,
6344 24 : time_cutoff,
6345 24 : retain_lsns,
6346 24 : max_lsn_with_valid_lease,
6347 24 : new_gc_cutoff,
6348 24 : )
6349 24 : .instrument(
6350 24 : info_span!("gc_timeline", timeline_id = %self.timeline_id, cutoff = %new_gc_cutoff),
6351 : )
6352 24 : .await?;
6353 :
6354 : // only record successes
6355 24 : timer.stop_and_record();
6356 24 :
6357 24 : Ok(res)
6358 24 : }
6359 :
6360 24 : async fn gc_timeline(
6361 24 : &self,
6362 24 : space_cutoff: Lsn,
6363 24 : time_cutoff: Lsn,
6364 24 : retain_lsns: Vec<Lsn>,
6365 24 : max_lsn_with_valid_lease: Option<Lsn>,
6366 24 : new_gc_cutoff: Lsn,
6367 24 : ) -> Result<GcResult, GcError> {
6368 24 : // FIXME: if there is an ongoing detach_from_ancestor, we should just skip gc
6369 24 :
6370 24 : let now = SystemTime::now();
6371 24 : let mut result: GcResult = GcResult::default();
6372 24 :
6373 24 : // Nothing to GC. Return early.
6374 24 : let latest_gc_cutoff = *self.get_applied_gc_cutoff_lsn();
6375 24 : if latest_gc_cutoff >= new_gc_cutoff {
6376 0 : info!(
6377 0 : "Nothing to GC: new_gc_cutoff_lsn {new_gc_cutoff}, latest_gc_cutoff_lsn {latest_gc_cutoff}",
6378 : );
6379 0 : return Ok(result);
6380 24 : }
6381 :
6382 : // We need to ensure that no one tries to read page versions or create
6383 : // branches at a point before latest_gc_cutoff_lsn. See branch_timeline()
6384 : // for details. This will block until the old value is no longer in use.
6385 : //
6386 : // The GC cutoff should only ever move forwards.
6387 24 : let waitlist = {
6388 24 : let write_guard = self.applied_gc_cutoff_lsn.lock_for_write();
6389 24 : if *write_guard > new_gc_cutoff {
6390 0 : return Err(GcError::BadLsn {
6391 0 : why: format!(
6392 0 : "Cannot move GC cutoff LSN backwards (was {}, new {})",
6393 0 : *write_guard, new_gc_cutoff
6394 0 : ),
6395 0 : });
6396 24 : }
6397 24 :
6398 24 : write_guard.store_and_unlock(new_gc_cutoff)
6399 24 : };
6400 24 : waitlist.wait().await;
6401 :
6402 24 : info!("GC starting");
6403 :
6404 24 : debug!("retain_lsns: {:?}", retain_lsns);
6405 :
6406 24 : let mut layers_to_remove = Vec::new();
6407 :
6408 : // Scan all layers in the timeline (remote or on-disk).
6409 : //
6410 : // Garbage collect the layer if all conditions are satisfied:
6411 : // 1. it is older than cutoff LSN;
6412 : // 2. it is older than PITR interval;
6413 : // 3. it doesn't need to be retained for 'retain_lsns';
6414 : // 4. it does not need to be kept for LSNs holding valid leases.
6415 : // 5. newer on-disk image layers cover the layer's whole key range
6416 : //
6417 : // TODO holding a write lock is too agressive and avoidable
6418 24 : let mut guard = self.layers.write().await;
6419 24 : let layers = guard.layer_map()?;
6420 144 : 'outer: for l in layers.iter_historic_layers() {
6421 144 : result.layers_total += 1;
6422 144 :
6423 144 : // 1. Is it newer than GC horizon cutoff point?
6424 144 : if l.get_lsn_range().end > space_cutoff {
6425 12 : info!(
6426 0 : "keeping {} because it's newer than space_cutoff {}",
6427 0 : l.layer_name(),
6428 : space_cutoff,
6429 : );
6430 12 : result.layers_needed_by_cutoff += 1;
6431 12 : continue 'outer;
6432 132 : }
6433 132 :
6434 132 : // 2. It is newer than PiTR cutoff point?
6435 132 : if l.get_lsn_range().end > time_cutoff {
6436 0 : info!(
6437 0 : "keeping {} because it's newer than time_cutoff {}",
6438 0 : l.layer_name(),
6439 : time_cutoff,
6440 : );
6441 0 : result.layers_needed_by_pitr += 1;
6442 0 : continue 'outer;
6443 132 : }
6444 :
6445 : // 3. Is it needed by a child branch?
6446 : // NOTE With that we would keep data that
6447 : // might be referenced by child branches forever.
6448 : // We can track this in child timeline GC and delete parent layers when
6449 : // they are no longer needed. This might be complicated with long inheritance chains.
6450 : //
6451 : // TODO Vec is not a great choice for `retain_lsns`
6452 132 : for retain_lsn in &retain_lsns {
6453 : // start_lsn is inclusive
6454 0 : if &l.get_lsn_range().start <= retain_lsn {
6455 0 : info!(
6456 0 : "keeping {} because it's still might be referenced by child branch forked at {} is_dropped: xx is_incremental: {}",
6457 0 : l.layer_name(),
6458 0 : retain_lsn,
6459 0 : l.is_incremental(),
6460 : );
6461 0 : result.layers_needed_by_branches += 1;
6462 0 : continue 'outer;
6463 0 : }
6464 : }
6465 :
6466 : // 4. Is there a valid lease that requires us to keep this layer?
6467 132 : if let Some(lsn) = &max_lsn_with_valid_lease {
6468 : // keep if layer start <= any of the lease
6469 108 : if &l.get_lsn_range().start <= lsn {
6470 84 : info!(
6471 0 : "keeping {} because there is a valid lease preventing GC at {}",
6472 0 : l.layer_name(),
6473 : lsn,
6474 : );
6475 84 : result.layers_needed_by_leases += 1;
6476 84 : continue 'outer;
6477 24 : }
6478 24 : }
6479 :
6480 : // 5. Is there a later on-disk layer for this relation?
6481 : //
6482 : // The end-LSN is exclusive, while disk_consistent_lsn is
6483 : // inclusive. For example, if disk_consistent_lsn is 100, it is
6484 : // OK for a delta layer to have end LSN 101, but if the end LSN
6485 : // is 102, then it might not have been fully flushed to disk
6486 : // before crash.
6487 : //
6488 : // For example, imagine that the following layers exist:
6489 : //
6490 : // 1000 - image (A)
6491 : // 1000-2000 - delta (B)
6492 : // 2000 - image (C)
6493 : // 2000-3000 - delta (D)
6494 : // 3000 - image (E)
6495 : //
6496 : // If GC horizon is at 2500, we can remove layers A and B, but
6497 : // we cannot remove C, even though it's older than 2500, because
6498 : // the delta layer 2000-3000 depends on it.
6499 48 : if !layers
6500 48 : .image_layer_exists(&l.get_key_range(), &(l.get_lsn_range().end..new_gc_cutoff))
6501 : {
6502 36 : info!("keeping {} because it is the latest layer", l.layer_name());
6503 36 : result.layers_not_updated += 1;
6504 36 : continue 'outer;
6505 12 : }
6506 12 :
6507 12 : // We didn't find any reason to keep this file, so remove it.
6508 12 : info!(
6509 0 : "garbage collecting {} is_dropped: xx is_incremental: {}",
6510 0 : l.layer_name(),
6511 0 : l.is_incremental(),
6512 : );
6513 12 : layers_to_remove.push(l);
6514 : }
6515 :
6516 24 : if !layers_to_remove.is_empty() {
6517 : // Persist the new GC cutoff value before we actually remove anything.
6518 : // This unconditionally schedules also an index_part.json update, even though, we will
6519 : // be doing one a bit later with the unlinked gc'd layers.
6520 12 : let disk_consistent_lsn = self.disk_consistent_lsn.load();
6521 12 : self.schedule_uploads(disk_consistent_lsn, None)
6522 12 : .map_err(|e| {
6523 0 : if self.cancel.is_cancelled() {
6524 0 : GcError::TimelineCancelled
6525 : } else {
6526 0 : GcError::Remote(e)
6527 : }
6528 12 : })?;
6529 :
6530 12 : let gc_layers = layers_to_remove
6531 12 : .iter()
6532 12 : .map(|x| guard.get_from_desc(x))
6533 12 : .collect::<Vec<Layer>>();
6534 12 :
6535 12 : result.layers_removed = gc_layers.len() as u64;
6536 12 :
6537 12 : self.remote_client.schedule_gc_update(&gc_layers)?;
6538 :
6539 12 : guard.open_mut()?.finish_gc_timeline(&gc_layers);
6540 12 :
6541 12 : #[cfg(feature = "testing")]
6542 12 : {
6543 12 : result.doomed_layers = gc_layers;
6544 12 : }
6545 12 : }
6546 :
6547 24 : info!(
6548 0 : "GC completed removing {} layers, cutoff {}",
6549 : result.layers_removed, new_gc_cutoff
6550 : );
6551 :
6552 24 : result.elapsed = now.elapsed().unwrap_or(Duration::ZERO);
6553 24 : Ok(result)
6554 24 : }
6555 :
6556 : /// Reconstruct a value, using the given base image and WAL records in 'data'.
6557 4408713 : async fn reconstruct_value(
6558 4408713 : &self,
6559 4408713 : key: Key,
6560 4408713 : request_lsn: Lsn,
6561 4408713 : mut data: ValueReconstructState,
6562 4408713 : redo_attempt_type: RedoAttemptType,
6563 4408713 : ) -> Result<Bytes, PageReconstructError> {
6564 4408713 : // Perform WAL redo if needed
6565 4408713 : data.records.reverse();
6566 :
6567 4408713 : let fire_critical_error = match redo_attempt_type {
6568 4392717 : RedoAttemptType::ReadPage => true,
6569 0 : RedoAttemptType::LegacyCompaction => true,
6570 15996 : RedoAttemptType::GcCompaction => false,
6571 : };
6572 :
6573 : // If we have a page image, and no WAL, we're all set
6574 4408713 : if data.records.is_empty() {
6575 4087425 : if let Some((img_lsn, img)) = &data.img {
6576 4087425 : trace!(
6577 0 : "found page image for key {} at {}, no WAL redo required, req LSN {}",
6578 : key, img_lsn, request_lsn,
6579 : );
6580 4087425 : Ok(img.clone())
6581 : } else {
6582 0 : Err(PageReconstructError::from(anyhow!(
6583 0 : "base image for {key} at {request_lsn} not found"
6584 0 : )))
6585 : }
6586 : } else {
6587 : // We need to do WAL redo.
6588 : //
6589 : // If we don't have a base image, then the oldest WAL record better initialize
6590 : // the page
6591 321288 : if data.img.is_none() && !data.records.first().unwrap().1.will_init() {
6592 0 : Err(PageReconstructError::from(anyhow!(
6593 0 : "Base image for {} at {} not found, but got {} WAL records",
6594 0 : key,
6595 0 : request_lsn,
6596 0 : data.records.len()
6597 0 : )))
6598 : } else {
6599 321288 : if data.img.is_some() {
6600 156348 : trace!(
6601 0 : "found {} WAL records and a base image for {} at {}, performing WAL redo",
6602 0 : data.records.len(),
6603 : key,
6604 : request_lsn
6605 : );
6606 : } else {
6607 164940 : trace!(
6608 0 : "found {} WAL records that will init the page for {} at {}, performing WAL redo",
6609 0 : data.records.len(),
6610 : key,
6611 : request_lsn
6612 : );
6613 : };
6614 321288 : let res = self
6615 321288 : .walredo_mgr
6616 321288 : .as_ref()
6617 321288 : .context("timeline has no walredo manager")
6618 321288 : .map_err(PageReconstructError::WalRedo)?
6619 321288 : .request_redo(
6620 321288 : key,
6621 321288 : request_lsn,
6622 321288 : data.img,
6623 321288 : data.records,
6624 321288 : self.pg_version,
6625 321288 : redo_attempt_type,
6626 321288 : )
6627 321288 : .await;
6628 321276 : let img = match res {
6629 321276 : Ok(img) => img,
6630 0 : Err(walredo::Error::Cancelled) => return Err(PageReconstructError::Cancelled),
6631 12 : Err(walredo::Error::Other(err)) => {
6632 12 : if fire_critical_error {
6633 0 : critical!("walredo failure during page reconstruction: {err:?}");
6634 12 : }
6635 12 : return Err(PageReconstructError::WalRedo(
6636 12 : err.context("reconstruct a page image"),
6637 12 : ));
6638 : }
6639 : };
6640 321276 : Ok(img)
6641 : }
6642 : }
6643 4408713 : }
6644 :
6645 0 : pub(crate) async fn spawn_download_all_remote_layers(
6646 0 : self: Arc<Self>,
6647 0 : request: DownloadRemoteLayersTaskSpawnRequest,
6648 0 : ctx: &RequestContext,
6649 0 : ) -> Result<DownloadRemoteLayersTaskInfo, DownloadRemoteLayersTaskInfo> {
6650 : use pageserver_api::models::DownloadRemoteLayersTaskState;
6651 :
6652 : // this is not really needed anymore; it has tests which really check the return value from
6653 : // http api. it would be better not to maintain this anymore.
6654 :
6655 0 : let mut status_guard = self.download_all_remote_layers_task_info.write().unwrap();
6656 0 : if let Some(st) = &*status_guard {
6657 0 : match &st.state {
6658 : DownloadRemoteLayersTaskState::Running => {
6659 0 : return Err(st.clone());
6660 : }
6661 : DownloadRemoteLayersTaskState::ShutDown
6662 0 : | DownloadRemoteLayersTaskState::Completed => {
6663 0 : *status_guard = None;
6664 0 : }
6665 : }
6666 0 : }
6667 :
6668 0 : let self_clone = Arc::clone(&self);
6669 0 : let task_ctx = ctx.detached_child(
6670 0 : TaskKind::DownloadAllRemoteLayers,
6671 0 : DownloadBehavior::Download,
6672 0 : );
6673 0 : let task_id = task_mgr::spawn(
6674 0 : task_mgr::BACKGROUND_RUNTIME.handle(),
6675 0 : task_mgr::TaskKind::DownloadAllRemoteLayers,
6676 0 : self.tenant_shard_id,
6677 0 : Some(self.timeline_id),
6678 0 : "download all remote layers task",
6679 0 : async move {
6680 0 : self_clone.download_all_remote_layers(request, &task_ctx).await;
6681 0 : let mut status_guard = self_clone.download_all_remote_layers_task_info.write().unwrap();
6682 0 : match &mut *status_guard {
6683 : None => {
6684 0 : warn!("tasks status is supposed to be Some(), since we are running");
6685 : }
6686 0 : Some(st) => {
6687 0 : let exp_task_id = format!("{}", task_mgr::current_task_id().unwrap());
6688 0 : if st.task_id != exp_task_id {
6689 0 : warn!("task id changed while we were still running, expecting {} but have {}", exp_task_id, st.task_id);
6690 0 : } else {
6691 0 : st.state = DownloadRemoteLayersTaskState::Completed;
6692 0 : }
6693 : }
6694 : };
6695 0 : Ok(())
6696 0 : }
6697 0 : .instrument(info_span!(parent: None, "download_all_remote_layers", tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))
6698 : );
6699 :
6700 0 : let initial_info = DownloadRemoteLayersTaskInfo {
6701 0 : task_id: format!("{task_id}"),
6702 0 : state: DownloadRemoteLayersTaskState::Running,
6703 0 : total_layer_count: 0,
6704 0 : successful_download_count: 0,
6705 0 : failed_download_count: 0,
6706 0 : };
6707 0 : *status_guard = Some(initial_info.clone());
6708 0 :
6709 0 : Ok(initial_info)
6710 0 : }
6711 :
6712 0 : async fn download_all_remote_layers(
6713 0 : self: &Arc<Self>,
6714 0 : request: DownloadRemoteLayersTaskSpawnRequest,
6715 0 : ctx: &RequestContext,
6716 0 : ) {
6717 : use pageserver_api::models::DownloadRemoteLayersTaskState;
6718 :
6719 0 : let remaining = {
6720 0 : let guard = self.layers.read().await;
6721 0 : let Ok(lm) = guard.layer_map() else {
6722 : // technically here we could look into iterating accessible layers, but downloading
6723 : // all layers of a shutdown timeline makes no sense regardless.
6724 0 : tracing::info!("attempted to download all layers of shutdown timeline");
6725 0 : return;
6726 : };
6727 0 : lm.iter_historic_layers()
6728 0 : .map(|desc| guard.get_from_desc(&desc))
6729 0 : .collect::<Vec<_>>()
6730 0 : };
6731 0 : let total_layer_count = remaining.len();
6732 :
6733 : macro_rules! lock_status {
6734 : ($st:ident) => {
6735 : let mut st = self.download_all_remote_layers_task_info.write().unwrap();
6736 : let st = st
6737 : .as_mut()
6738 : .expect("this function is only called after the task has been spawned");
6739 : assert_eq!(
6740 : st.task_id,
6741 : format!(
6742 : "{}",
6743 : task_mgr::current_task_id().expect("we run inside a task_mgr task")
6744 : )
6745 : );
6746 : let $st = st;
6747 : };
6748 : }
6749 :
6750 : {
6751 0 : lock_status!(st);
6752 0 : st.total_layer_count = total_layer_count as u64;
6753 0 : }
6754 0 :
6755 0 : let mut remaining = remaining.into_iter();
6756 0 : let mut have_remaining = true;
6757 0 : let mut js = tokio::task::JoinSet::new();
6758 0 :
6759 0 : let cancel = task_mgr::shutdown_token();
6760 0 :
6761 0 : let limit = request.max_concurrent_downloads;
6762 :
6763 : loop {
6764 0 : while js.len() < limit.get() && have_remaining && !cancel.is_cancelled() {
6765 0 : let Some(next) = remaining.next() else {
6766 0 : have_remaining = false;
6767 0 : break;
6768 : };
6769 :
6770 0 : let span = tracing::info_span!("download", layer = %next);
6771 :
6772 0 : let ctx = ctx.attached_child();
6773 0 : js.spawn(
6774 0 : async move {
6775 0 : let res = next.download(&ctx).await;
6776 0 : (next, res)
6777 0 : }
6778 0 : .instrument(span),
6779 0 : );
6780 0 : }
6781 :
6782 0 : while let Some(res) = js.join_next().await {
6783 0 : match res {
6784 : Ok((_, Ok(_))) => {
6785 0 : lock_status!(st);
6786 0 : st.successful_download_count += 1;
6787 : }
6788 0 : Ok((layer, Err(e))) => {
6789 0 : tracing::error!(%layer, "download failed: {e:#}");
6790 0 : lock_status!(st);
6791 0 : st.failed_download_count += 1;
6792 : }
6793 0 : Err(je) if je.is_cancelled() => unreachable!("not used here"),
6794 0 : Err(je) if je.is_panic() => {
6795 0 : lock_status!(st);
6796 0 : st.failed_download_count += 1;
6797 : }
6798 0 : Err(je) => tracing::warn!("unknown joinerror: {je:?}"),
6799 : }
6800 : }
6801 :
6802 0 : if js.is_empty() && (!have_remaining || cancel.is_cancelled()) {
6803 0 : break;
6804 0 : }
6805 : }
6806 :
6807 : {
6808 0 : lock_status!(st);
6809 0 : st.state = DownloadRemoteLayersTaskState::Completed;
6810 : }
6811 0 : }
6812 :
6813 0 : pub(crate) fn get_download_all_remote_layers_task_info(
6814 0 : &self,
6815 0 : ) -> Option<DownloadRemoteLayersTaskInfo> {
6816 0 : self.download_all_remote_layers_task_info
6817 0 : .read()
6818 0 : .unwrap()
6819 0 : .clone()
6820 0 : }
6821 : }
6822 :
6823 : impl Timeline {
6824 : /// Returns non-remote layers for eviction.
6825 0 : pub(crate) async fn get_local_layers_for_disk_usage_eviction(&self) -> DiskUsageEvictionInfo {
6826 0 : let guard = self.layers.read().await;
6827 0 : let mut max_layer_size: Option<u64> = None;
6828 0 :
6829 0 : let resident_layers = guard
6830 0 : .likely_resident_layers()
6831 0 : .map(|layer| {
6832 0 : let file_size = layer.layer_desc().file_size;
6833 0 : max_layer_size = max_layer_size.map_or(Some(file_size), |m| Some(m.max(file_size)));
6834 0 :
6835 0 : let last_activity_ts = layer.latest_activity();
6836 0 :
6837 0 : EvictionCandidate {
6838 0 : layer: layer.to_owned().into(),
6839 0 : last_activity_ts,
6840 0 : relative_last_activity: finite_f32::FiniteF32::ZERO,
6841 0 : visibility: layer.visibility(),
6842 0 : }
6843 0 : })
6844 0 : .collect();
6845 0 :
6846 0 : DiskUsageEvictionInfo {
6847 0 : max_layer_size,
6848 0 : resident_layers,
6849 0 : }
6850 0 : }
6851 :
6852 11376 : pub(crate) fn get_shard_index(&self) -> ShardIndex {
6853 11376 : ShardIndex {
6854 11376 : shard_number: self.tenant_shard_id.shard_number,
6855 11376 : shard_count: self.tenant_shard_id.shard_count,
6856 11376 : }
6857 11376 : }
6858 :
6859 : /// Persistently blocks gc for `Manual` reason.
6860 : ///
6861 : /// Returns true if no such block existed before, false otherwise.
6862 0 : pub(crate) async fn block_gc(&self, tenant: &super::TenantShard) -> anyhow::Result<bool> {
6863 : use crate::tenant::remote_timeline_client::index::GcBlockingReason;
6864 0 : assert_eq!(self.tenant_shard_id, tenant.tenant_shard_id);
6865 0 : tenant.gc_block.insert(self, GcBlockingReason::Manual).await
6866 0 : }
6867 :
6868 : /// Persistently unblocks gc for `Manual` reason.
6869 0 : pub(crate) async fn unblock_gc(&self, tenant: &super::TenantShard) -> anyhow::Result<()> {
6870 : use crate::tenant::remote_timeline_client::index::GcBlockingReason;
6871 0 : assert_eq!(self.tenant_shard_id, tenant.tenant_shard_id);
6872 0 : tenant.gc_block.remove(self, GcBlockingReason::Manual).await
6873 0 : }
6874 :
6875 : #[cfg(test)]
6876 372 : pub(super) fn force_advance_lsn(self: &Arc<Timeline>, new_lsn: Lsn) {
6877 372 : self.last_record_lsn.advance(new_lsn);
6878 372 : }
6879 :
6880 : #[cfg(test)]
6881 12 : pub(super) fn force_set_disk_consistent_lsn(&self, new_value: Lsn) {
6882 12 : self.disk_consistent_lsn.store(new_value);
6883 12 : }
6884 :
6885 : /// Force create an image layer and place it into the layer map.
6886 : ///
6887 : /// DO NOT use this function directly. Use [`TenantShard::branch_timeline_test_with_layers`]
6888 : /// or [`TenantShard::create_test_timeline_with_layers`] to ensure all these layers are
6889 : /// placed into the layer map in one run AND be validated.
6890 : #[cfg(test)]
6891 432 : pub(super) async fn force_create_image_layer(
6892 432 : self: &Arc<Timeline>,
6893 432 : lsn: Lsn,
6894 432 : mut images: Vec<(Key, Bytes)>,
6895 432 : check_start_lsn: Option<Lsn>,
6896 432 : ctx: &RequestContext,
6897 432 : ) -> anyhow::Result<()> {
6898 432 : let last_record_lsn = self.get_last_record_lsn();
6899 432 : assert!(
6900 432 : lsn <= last_record_lsn,
6901 0 : "advance last record lsn before inserting a layer, lsn={lsn}, last_record_lsn={last_record_lsn}"
6902 : );
6903 432 : if let Some(check_start_lsn) = check_start_lsn {
6904 432 : assert!(lsn >= check_start_lsn);
6905 0 : }
6906 2880 : images.sort_unstable_by(|(ka, _), (kb, _)| ka.cmp(kb));
6907 432 : let min_key = *images.first().map(|(k, _)| k).unwrap();
6908 432 : let end_key = images.last().map(|(k, _)| k).unwrap().next();
6909 432 : let mut image_layer_writer = ImageLayerWriter::new(
6910 432 : self.conf,
6911 432 : self.timeline_id,
6912 432 : self.tenant_shard_id,
6913 432 : &(min_key..end_key),
6914 432 : lsn,
6915 432 : &self.gate,
6916 432 : self.cancel.clone(),
6917 432 : ctx,
6918 432 : )
6919 432 : .await?;
6920 3744 : for (key, img) in images {
6921 3312 : image_layer_writer.put_image(key, img, ctx).await?;
6922 : }
6923 432 : let (desc, path) = image_layer_writer.finish(ctx).await?;
6924 432 : let image_layer = Layer::finish_creating(self.conf, self, desc, &path)?;
6925 432 : info!("force created image layer {}", image_layer.local_path());
6926 : {
6927 432 : let mut guard = self.layers.write().await;
6928 432 : guard
6929 432 : .open_mut()
6930 432 : .unwrap()
6931 432 : .force_insert_layer(image_layer.clone());
6932 432 : }
6933 432 :
6934 432 : // Update remote_timeline_client state to reflect existence of this layer
6935 432 : self.remote_client
6936 432 : .schedule_layer_file_upload(image_layer)
6937 432 : .unwrap();
6938 432 :
6939 432 : Ok(())
6940 432 : }
6941 :
6942 : /// Force create a delta layer and place it into the layer map.
6943 : ///
6944 : /// DO NOT use this function directly. Use [`TenantShard::branch_timeline_test_with_layers`]
6945 : /// or [`TenantShard::create_test_timeline_with_layers`] to ensure all these layers are
6946 : /// placed into the layer map in one run AND be validated.
6947 : #[cfg(test)]
6948 600 : pub(super) async fn force_create_delta_layer(
6949 600 : self: &Arc<Timeline>,
6950 600 : mut deltas: DeltaLayerTestDesc,
6951 600 : check_start_lsn: Option<Lsn>,
6952 600 : ctx: &RequestContext,
6953 600 : ) -> anyhow::Result<()> {
6954 600 : let last_record_lsn = self.get_last_record_lsn();
6955 600 : deltas
6956 600 : .data
6957 1492368 : .sort_unstable_by(|(ka, la, _), (kb, lb, _)| (ka, la).cmp(&(kb, lb)));
6958 600 : assert!(deltas.data.first().unwrap().0 >= deltas.key_range.start);
6959 600 : assert!(deltas.data.last().unwrap().0 < deltas.key_range.end);
6960 125568 : for (_, lsn, _) in &deltas.data {
6961 124968 : assert!(deltas.lsn_range.start <= *lsn && *lsn < deltas.lsn_range.end);
6962 : }
6963 600 : assert!(
6964 600 : deltas.lsn_range.end <= last_record_lsn,
6965 0 : "advance last record lsn before inserting a layer, end_lsn={}, last_record_lsn={}",
6966 : deltas.lsn_range.end,
6967 : last_record_lsn
6968 : );
6969 600 : if let Some(check_start_lsn) = check_start_lsn {
6970 600 : assert!(deltas.lsn_range.start >= check_start_lsn);
6971 0 : }
6972 600 : let mut delta_layer_writer = DeltaLayerWriter::new(
6973 600 : self.conf,
6974 600 : self.timeline_id,
6975 600 : self.tenant_shard_id,
6976 600 : deltas.key_range.start,
6977 600 : deltas.lsn_range,
6978 600 : &self.gate,
6979 600 : self.cancel.clone(),
6980 600 : ctx,
6981 600 : )
6982 600 : .await?;
6983 125568 : for (key, lsn, val) in deltas.data {
6984 124968 : delta_layer_writer.put_value(key, lsn, val, ctx).await?;
6985 : }
6986 600 : let (desc, path) = delta_layer_writer.finish(deltas.key_range.end, ctx).await?;
6987 600 : let delta_layer = Layer::finish_creating(self.conf, self, desc, &path)?;
6988 600 : info!("force created delta layer {}", delta_layer.local_path());
6989 : {
6990 600 : let mut guard = self.layers.write().await;
6991 600 : guard
6992 600 : .open_mut()
6993 600 : .unwrap()
6994 600 : .force_insert_layer(delta_layer.clone());
6995 600 : }
6996 600 :
6997 600 : // Update remote_timeline_client state to reflect existence of this layer
6998 600 : self.remote_client
6999 600 : .schedule_layer_file_upload(delta_layer)
7000 600 : .unwrap();
7001 600 :
7002 600 : Ok(())
7003 600 : }
7004 :
7005 : /// Force create an in-memory layer and place them into the layer map.
7006 : #[cfg(test)]
7007 48 : pub(super) async fn force_create_in_memory_layer(
7008 48 : self: &Arc<Timeline>,
7009 48 : mut in_memory: InMemoryLayerTestDesc,
7010 48 : check_start_lsn: Option<Lsn>,
7011 48 : ctx: &RequestContext,
7012 48 : ) -> anyhow::Result<()> {
7013 : use utils::bin_ser::BeSer;
7014 :
7015 : // Validate LSNs
7016 48 : if let Some(check_start_lsn) = check_start_lsn {
7017 48 : assert!(in_memory.lsn_range.start >= check_start_lsn);
7018 0 : }
7019 :
7020 48 : let last_record_lsn = self.get_last_record_lsn();
7021 48 : let layer_end_lsn = if in_memory.is_open {
7022 12 : in_memory
7023 12 : .data
7024 12 : .iter()
7025 120 : .map(|(_key, lsn, _value)| lsn)
7026 12 : .max()
7027 12 : .cloned()
7028 : } else {
7029 36 : Some(in_memory.lsn_range.end)
7030 : };
7031 :
7032 48 : if let Some(end) = layer_end_lsn {
7033 48 : assert!(
7034 48 : end <= last_record_lsn,
7035 0 : "advance last record lsn before inserting a layer, end_lsn={}, last_record_lsn={}",
7036 : end,
7037 : last_record_lsn,
7038 : );
7039 0 : }
7040 :
7041 237840 : in_memory.data.iter().for_each(|(_key, lsn, _value)| {
7042 237840 : assert!(*lsn >= in_memory.lsn_range.start);
7043 237840 : assert!(*lsn < in_memory.lsn_range.end);
7044 237840 : });
7045 48 :
7046 48 : // Build the batch
7047 48 : in_memory
7048 48 : .data
7049 3280608 : .sort_unstable_by(|(ka, la, _), (kb, lb, _)| (ka, la).cmp(&(kb, lb)));
7050 48 :
7051 48 : let data = in_memory
7052 48 : .data
7053 48 : .into_iter()
7054 237840 : .map(|(key, lsn, value)| {
7055 237840 : let value_size = value.serialized_size().unwrap() as usize;
7056 237840 : (key.to_compact(), lsn, value_size, value)
7057 237840 : })
7058 48 : .collect::<Vec<_>>();
7059 48 :
7060 48 : let batch = SerializedValueBatch::from_values(data);
7061 :
7062 : // Create the in-memory layer and write the batch into it
7063 48 : let layer = InMemoryLayer::create(
7064 48 : self.conf,
7065 48 : self.timeline_id,
7066 48 : self.tenant_shard_id,
7067 48 : in_memory.lsn_range.start,
7068 48 : &self.gate,
7069 48 : // TODO: if we ever use this function in production code, we need to pass the real cancellation token
7070 48 : &CancellationToken::new(),
7071 48 : ctx,
7072 48 : )
7073 48 : .await
7074 48 : .unwrap();
7075 48 :
7076 48 : layer.put_batch(batch, ctx).await.unwrap();
7077 48 : if !in_memory.is_open {
7078 36 : layer.freeze(in_memory.lsn_range.end).await;
7079 12 : }
7080 :
7081 48 : info!("force created in-memory layer {:?}", in_memory.lsn_range);
7082 :
7083 : // Link the layer to the layer map
7084 : {
7085 48 : let mut guard = self.layers.write().await;
7086 48 : let layer_map = guard.open_mut().unwrap();
7087 48 : layer_map.force_insert_in_memory_layer(Arc::new(layer));
7088 48 : }
7089 48 :
7090 48 : Ok(())
7091 48 : }
7092 :
7093 : /// Return all keys at the LSN in the image layers
7094 : #[cfg(test)]
7095 36 : pub(crate) async fn inspect_image_layers(
7096 36 : self: &Arc<Timeline>,
7097 36 : lsn: Lsn,
7098 36 : ctx: &RequestContext,
7099 36 : io_concurrency: IoConcurrency,
7100 36 : ) -> anyhow::Result<Vec<(Key, Bytes)>> {
7101 36 : let mut all_data = Vec::new();
7102 36 : let guard = self.layers.read().await;
7103 204 : for layer in guard.layer_map()?.iter_historic_layers() {
7104 204 : if !layer.is_delta() && layer.image_layer_lsn() == lsn {
7105 48 : let layer = guard.get_from_desc(&layer);
7106 48 : let mut reconstruct_data = ValuesReconstructState::new(io_concurrency.clone());
7107 48 : layer
7108 48 : .get_values_reconstruct_data(
7109 48 : KeySpace::single(Key::MIN..Key::MAX),
7110 48 : lsn..Lsn(lsn.0 + 1),
7111 48 : &mut reconstruct_data,
7112 48 : ctx,
7113 48 : )
7114 48 : .await?;
7115 396 : for (k, v) in std::mem::take(&mut reconstruct_data.keys) {
7116 396 : let v = v.collect_pending_ios().await?;
7117 396 : all_data.push((k, v.img.unwrap().1));
7118 : }
7119 156 : }
7120 : }
7121 36 : all_data.sort();
7122 36 : Ok(all_data)
7123 36 : }
7124 :
7125 : /// Get all historic layer descriptors in the layer map
7126 : #[cfg(test)]
7127 144 : pub(crate) async fn inspect_historic_layers(
7128 144 : self: &Arc<Timeline>,
7129 144 : ) -> anyhow::Result<Vec<super::storage_layer::PersistentLayerKey>> {
7130 144 : let mut layers = Vec::new();
7131 144 : let guard = self.layers.read().await;
7132 684 : for layer in guard.layer_map()?.iter_historic_layers() {
7133 684 : layers.push(layer.key());
7134 684 : }
7135 144 : Ok(layers)
7136 144 : }
7137 :
7138 : #[cfg(test)]
7139 60 : pub(crate) fn add_extra_test_dense_keyspace(&self, ks: KeySpace) {
7140 60 : let mut keyspace = self.extra_test_dense_keyspace.load().as_ref().clone();
7141 60 : keyspace.merge(&ks);
7142 60 : self.extra_test_dense_keyspace.store(Arc::new(keyspace));
7143 60 : }
7144 : }
7145 :
7146 : /// Tracking writes ingestion does to a particular in-memory layer.
7147 : ///
7148 : /// Cleared upon freezing a layer.
7149 : pub(crate) struct TimelineWriterState {
7150 : open_layer: Arc<InMemoryLayer>,
7151 : current_size: u64,
7152 : // Previous Lsn which passed through
7153 : prev_lsn: Option<Lsn>,
7154 : // Largest Lsn which passed through the current writer
7155 : max_lsn: Option<Lsn>,
7156 : // Cached details of the last freeze. Avoids going trough the atomic/lock on every put.
7157 : cached_last_freeze_at: Lsn,
7158 : }
7159 :
7160 : impl TimelineWriterState {
7161 7848 : fn new(open_layer: Arc<InMemoryLayer>, current_size: u64, last_freeze_at: Lsn) -> Self {
7162 7848 : Self {
7163 7848 : open_layer,
7164 7848 : current_size,
7165 7848 : prev_lsn: None,
7166 7848 : max_lsn: None,
7167 7848 : cached_last_freeze_at: last_freeze_at,
7168 7848 : }
7169 7848 : }
7170 : }
7171 :
7172 : /// Various functions to mutate the timeline.
7173 : // TODO Currently, Deref is used to allow easy access to read methods from this trait.
7174 : // This is probably considered a bad practice in Rust and should be fixed eventually,
7175 : // but will cause large code changes.
7176 : pub(crate) struct TimelineWriter<'a> {
7177 : tl: &'a Timeline,
7178 : write_guard: tokio::sync::MutexGuard<'a, Option<TimelineWriterState>>,
7179 : }
7180 :
7181 : impl Deref for TimelineWriter<'_> {
7182 : type Target = Timeline;
7183 :
7184 59389992 : fn deref(&self) -> &Self::Target {
7185 59389992 : self.tl
7186 59389992 : }
7187 : }
7188 :
7189 : #[derive(PartialEq)]
7190 : enum OpenLayerAction {
7191 : Roll,
7192 : Open,
7193 : None,
7194 : }
7195 :
7196 : impl TimelineWriter<'_> {
7197 28825464 : async fn handle_open_layer_action(
7198 28825464 : &mut self,
7199 28825464 : at: Lsn,
7200 28825464 : action: OpenLayerAction,
7201 28825464 : ctx: &RequestContext,
7202 28825464 : ) -> anyhow::Result<&Arc<InMemoryLayer>> {
7203 28825464 : match action {
7204 : OpenLayerAction::Roll => {
7205 480 : let freeze_at = self.write_guard.as_ref().unwrap().max_lsn.unwrap();
7206 480 : self.roll_layer(freeze_at).await?;
7207 480 : self.open_layer(at, ctx).await?;
7208 : }
7209 7368 : OpenLayerAction::Open => self.open_layer(at, ctx).await?,
7210 : OpenLayerAction::None => {
7211 28817616 : assert!(self.write_guard.is_some());
7212 : }
7213 : }
7214 :
7215 28825464 : Ok(&self.write_guard.as_ref().unwrap().open_layer)
7216 28825464 : }
7217 :
7218 7848 : async fn open_layer(&mut self, at: Lsn, ctx: &RequestContext) -> anyhow::Result<()> {
7219 7848 : let layer = self
7220 7848 : .tl
7221 7848 : .get_layer_for_write(at, &self.write_guard, ctx)
7222 7848 : .await?;
7223 7848 : let initial_size = layer.size().await?;
7224 :
7225 7848 : let last_freeze_at = self.last_freeze_at.load();
7226 7848 : self.write_guard.replace(TimelineWriterState::new(
7227 7848 : layer,
7228 7848 : initial_size,
7229 7848 : last_freeze_at,
7230 7848 : ));
7231 7848 :
7232 7848 : Ok(())
7233 7848 : }
7234 :
7235 480 : async fn roll_layer(&mut self, freeze_at: Lsn) -> Result<(), FlushLayerError> {
7236 480 : let current_size = self.write_guard.as_ref().unwrap().current_size;
7237 :
7238 : // If layer flushes are backpressured due to compaction not keeping up, wait for the flush
7239 : // to propagate the backpressure up into WAL ingestion.
7240 480 : let l0_count = self
7241 480 : .tl
7242 480 : .layers
7243 480 : .read()
7244 480 : .await
7245 480 : .layer_map()?
7246 480 : .level0_deltas()
7247 480 : .len();
7248 480 : let wait_thresholds = [
7249 480 : self.get_l0_flush_delay_threshold(),
7250 480 : self.get_l0_flush_stall_threshold(),
7251 480 : ];
7252 480 : let wait_threshold = wait_thresholds.into_iter().flatten().min();
7253 :
7254 : // self.write_guard will be taken by the freezing
7255 480 : let flush_id = self
7256 480 : .tl
7257 480 : .freeze_inmem_layer_at(freeze_at, &mut self.write_guard)
7258 480 : .await?;
7259 :
7260 480 : assert!(self.write_guard.is_none());
7261 :
7262 480 : if let Some(wait_threshold) = wait_threshold {
7263 0 : if l0_count >= wait_threshold {
7264 0 : debug!(
7265 0 : "layer roll waiting for flush due to compaction backpressure at {l0_count} L0 layers"
7266 : );
7267 0 : self.tl.wait_flush_completion(flush_id).await?;
7268 0 : }
7269 480 : }
7270 :
7271 480 : if current_size >= self.get_checkpoint_distance() * 2 {
7272 0 : warn!("Flushed oversized open layer with size {}", current_size)
7273 480 : }
7274 :
7275 480 : Ok(())
7276 480 : }
7277 :
7278 28825464 : fn get_open_layer_action(&self, lsn: Lsn, new_value_size: u64) -> OpenLayerAction {
7279 28825464 : let state = &*self.write_guard;
7280 28825464 : let Some(state) = &state else {
7281 7368 : return OpenLayerAction::Open;
7282 : };
7283 :
7284 : #[cfg(feature = "testing")]
7285 28818096 : if state.cached_last_freeze_at < self.tl.last_freeze_at.load() {
7286 : // this check and assertion are not really needed because
7287 : // LayerManager::try_freeze_in_memory_layer will always clear out the
7288 : // TimelineWriterState if something is frozen. however, we can advance last_freeze_at when there
7289 : // is no TimelineWriterState.
7290 0 : assert!(
7291 0 : state.open_layer.end_lsn.get().is_some(),
7292 0 : "our open_layer must be outdated"
7293 : );
7294 :
7295 : // this would be a memory leak waiting to happen because the in-memory layer always has
7296 : // an index
7297 0 : panic!("BUG: TimelineWriterState held on to frozen in-memory layer.");
7298 28818096 : }
7299 28818096 :
7300 28818096 : if state.prev_lsn == Some(lsn) {
7301 : // Rolling mid LSN is not supported by [downstream code].
7302 : // Hence, only roll at LSN boundaries.
7303 : //
7304 : // [downstream code]: https://github.com/neondatabase/neon/pull/7993#discussion_r1633345422
7305 36 : return OpenLayerAction::None;
7306 28818060 : }
7307 28818060 :
7308 28818060 : if state.current_size == 0 {
7309 : // Don't roll empty layers
7310 0 : return OpenLayerAction::None;
7311 28818060 : }
7312 28818060 :
7313 28818060 : if self.tl.should_roll(
7314 28818060 : state.current_size,
7315 28818060 : state.current_size + new_value_size,
7316 28818060 : self.get_checkpoint_distance(),
7317 28818060 : lsn,
7318 28818060 : state.cached_last_freeze_at,
7319 28818060 : state.open_layer.get_opened_at(),
7320 28818060 : ) {
7321 480 : OpenLayerAction::Roll
7322 : } else {
7323 28817580 : OpenLayerAction::None
7324 : }
7325 28825464 : }
7326 :
7327 : /// Put a batch of keys at the specified Lsns.
7328 28825452 : pub(crate) async fn put_batch(
7329 28825452 : &mut self,
7330 28825452 : batch: SerializedValueBatch,
7331 28825452 : ctx: &RequestContext,
7332 28825452 : ) -> anyhow::Result<()> {
7333 28825452 : if !batch.has_data() {
7334 0 : return Ok(());
7335 28825452 : }
7336 28825452 :
7337 28825452 : // In debug builds, assert that we don't write any keys that don't belong to this shard.
7338 28825452 : // We don't assert this in release builds, since key ownership policies may change over
7339 28825452 : // time. Stray keys will be removed during compaction.
7340 28825452 : if cfg!(debug_assertions) {
7341 59369952 : for metadata in &batch.metadata {
7342 30544500 : if let ValueMeta::Serialized(metadata) = metadata {
7343 30544500 : let key = Key::from_compact(metadata.key);
7344 30544500 : assert!(
7345 30544500 : self.shard_identity.is_key_local(&key)
7346 0 : || self.shard_identity.is_key_global(&key),
7347 0 : "key {key} does not belong on shard {}",
7348 0 : self.shard_identity.shard_index()
7349 : );
7350 0 : }
7351 : }
7352 0 : }
7353 :
7354 28825452 : let batch_max_lsn = batch.max_lsn;
7355 28825452 : let buf_size: u64 = batch.buffer_size() as u64;
7356 28825452 :
7357 28825452 : let action = self.get_open_layer_action(batch_max_lsn, buf_size);
7358 28825452 : let layer = self
7359 28825452 : .handle_open_layer_action(batch_max_lsn, action, ctx)
7360 28825452 : .await?;
7361 :
7362 28825452 : let res = layer.put_batch(batch, ctx).await;
7363 :
7364 28825452 : if res.is_ok() {
7365 28825452 : // Update the current size only when the entire write was ok.
7366 28825452 : // In case of failures, we may have had partial writes which
7367 28825452 : // render the size tracking out of sync. That's ok because
7368 28825452 : // the checkpoint distance should be significantly smaller
7369 28825452 : // than the S3 single shot upload limit of 5GiB.
7370 28825452 : let state = self.write_guard.as_mut().unwrap();
7371 28825452 :
7372 28825452 : state.current_size += buf_size;
7373 28825452 : state.prev_lsn = Some(batch_max_lsn);
7374 28825452 : state.max_lsn = std::cmp::max(state.max_lsn, Some(batch_max_lsn));
7375 28825452 : }
7376 :
7377 28825452 : res
7378 28825452 : }
7379 :
7380 : #[cfg(test)]
7381 : /// Test helper, for tests that would like to poke individual values without composing a batch
7382 26340924 : pub(crate) async fn put(
7383 26340924 : &mut self,
7384 26340924 : key: Key,
7385 26340924 : lsn: Lsn,
7386 26340924 : value: &Value,
7387 26340924 : ctx: &RequestContext,
7388 26340924 : ) -> anyhow::Result<()> {
7389 : use utils::bin_ser::BeSer;
7390 26340924 : if !key.is_valid_key_on_write_path() {
7391 0 : bail!(
7392 0 : "the request contains data not supported by pageserver at TimelineWriter::put: {}",
7393 0 : key
7394 0 : );
7395 26340924 : }
7396 26340924 : let val_ser_size = value.serialized_size().unwrap() as usize;
7397 26340924 : let batch = SerializedValueBatch::from_values(vec![(
7398 26340924 : key.to_compact(),
7399 26340924 : lsn,
7400 26340924 : val_ser_size,
7401 26340924 : value.clone(),
7402 26340924 : )]);
7403 26340924 :
7404 26340924 : self.put_batch(batch, ctx).await
7405 26340924 : }
7406 :
7407 12 : pub(crate) async fn delete_batch(
7408 12 : &mut self,
7409 12 : batch: &[(Range<Key>, Lsn)],
7410 12 : ctx: &RequestContext,
7411 12 : ) -> anyhow::Result<()> {
7412 12 : if let Some((_, lsn)) = batch.first() {
7413 12 : let action = self.get_open_layer_action(*lsn, 0);
7414 12 : let layer = self.handle_open_layer_action(*lsn, action, ctx).await?;
7415 12 : layer.put_tombstones(batch).await?;
7416 0 : }
7417 :
7418 12 : Ok(())
7419 12 : }
7420 :
7421 : /// Track the end of the latest digested WAL record.
7422 : /// Remember the (end of) last valid WAL record remembered in the timeline.
7423 : ///
7424 : /// Call this after you have finished writing all the WAL up to 'lsn'.
7425 : ///
7426 : /// 'lsn' must be aligned. This wakes up any wait_lsn() callers waiting for
7427 : /// the 'lsn' or anything older. The previous last record LSN is stored alongside
7428 : /// the latest and can be read.
7429 31674624 : pub(crate) fn finish_write(&self, new_lsn: Lsn) {
7430 31674624 : self.tl.finish_write(new_lsn);
7431 31674624 : }
7432 :
7433 1623420 : pub(crate) fn update_current_logical_size(&self, delta: i64) {
7434 1623420 : self.tl.update_current_logical_size(delta)
7435 1623420 : }
7436 : }
7437 :
7438 : // We need TimelineWriter to be send in upcoming conversion of
7439 : // Timeline::layers to tokio::sync::RwLock.
7440 : #[test]
7441 12 : fn is_send() {
7442 12 : fn _assert_send<T: Send>() {}
7443 12 : _assert_send::<TimelineWriter<'_>>();
7444 12 : }
7445 :
7446 : #[cfg(test)]
7447 : mod tests {
7448 : use std::sync::Arc;
7449 :
7450 : use pageserver_api::key::Key;
7451 : use pageserver_api::value::Value;
7452 : use std::iter::Iterator;
7453 : use tracing::Instrument;
7454 : use utils::id::TimelineId;
7455 : use utils::lsn::Lsn;
7456 :
7457 : use super::HeatMapTimeline;
7458 : use crate::context::RequestContextBuilder;
7459 : use crate::tenant::harness::{TenantHarness, test_img};
7460 : use crate::tenant::layer_map::LayerMap;
7461 : use crate::tenant::storage_layer::{Layer, LayerName, LayerVisibilityHint};
7462 : use crate::tenant::timeline::{DeltaLayerTestDesc, EvictionError};
7463 : use crate::tenant::{PreviousHeatmap, Timeline};
7464 :
7465 60 : fn assert_heatmaps_have_same_layers(lhs: &HeatMapTimeline, rhs: &HeatMapTimeline) {
7466 60 : assert_eq!(lhs.all_layers().count(), rhs.all_layers().count());
7467 60 : let lhs_rhs = lhs.all_layers().zip(rhs.all_layers());
7468 300 : for (l, r) in lhs_rhs {
7469 240 : assert_eq!(l.name, r.name);
7470 240 : assert_eq!(l.metadata, r.metadata);
7471 : }
7472 60 : }
7473 :
7474 : #[tokio::test]
7475 12 : async fn test_heatmap_generation() {
7476 12 : let harness = TenantHarness::create("heatmap_generation").await.unwrap();
7477 12 :
7478 12 : let covered_delta = DeltaLayerTestDesc::new_with_inferred_key_range(
7479 12 : Lsn(0x10)..Lsn(0x20),
7480 12 : vec![(
7481 12 : Key::from_hex("620000000033333333444444445500000000").unwrap(),
7482 12 : Lsn(0x11),
7483 12 : Value::Image(test_img("foo")),
7484 12 : )],
7485 12 : );
7486 12 : let visible_delta = DeltaLayerTestDesc::new_with_inferred_key_range(
7487 12 : Lsn(0x10)..Lsn(0x20),
7488 12 : vec![(
7489 12 : Key::from_hex("720000000033333333444444445500000000").unwrap(),
7490 12 : Lsn(0x11),
7491 12 : Value::Image(test_img("foo")),
7492 12 : )],
7493 12 : );
7494 12 : let l0_delta = DeltaLayerTestDesc::new(
7495 12 : Lsn(0x20)..Lsn(0x30),
7496 12 : Key::from_hex("000000000000000000000000000000000000").unwrap()
7497 12 : ..Key::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(),
7498 12 : vec![(
7499 12 : Key::from_hex("720000000033333333444444445500000000").unwrap(),
7500 12 : Lsn(0x25),
7501 12 : Value::Image(test_img("foo")),
7502 12 : )],
7503 12 : );
7504 12 : let delta_layers = vec![
7505 12 : covered_delta.clone(),
7506 12 : visible_delta.clone(),
7507 12 : l0_delta.clone(),
7508 12 : ];
7509 12 :
7510 12 : let image_layer = (
7511 12 : Lsn(0x40),
7512 12 : vec![(
7513 12 : Key::from_hex("620000000033333333444444445500000000").unwrap(),
7514 12 : test_img("bar"),
7515 12 : )],
7516 12 : );
7517 12 : let image_layers = vec![image_layer];
7518 12 :
7519 12 : let (tenant, ctx) = harness.load().await;
7520 12 : let timeline = tenant
7521 12 : .create_test_timeline_with_layers(
7522 12 : TimelineId::generate(),
7523 12 : Lsn(0x10),
7524 12 : 14,
7525 12 : &ctx,
7526 12 : Vec::new(), // in-memory layers
7527 12 : delta_layers,
7528 12 : image_layers,
7529 12 : Lsn(0x100),
7530 12 : )
7531 12 : .await
7532 12 : .unwrap();
7533 12 : let ctx = &ctx.with_scope_timeline(&timeline);
7534 12 :
7535 12 : // Layer visibility is an input to heatmap generation, so refresh it first
7536 12 : timeline.update_layer_visibility().await.unwrap();
7537 12 :
7538 12 : let heatmap = timeline
7539 12 : .generate_heatmap()
7540 12 : .await
7541 12 : .expect("Infallible while timeline is not shut down");
7542 12 :
7543 12 : assert_eq!(heatmap.timeline_id, timeline.timeline_id);
7544 12 :
7545 12 : // L0 should come last
7546 12 : let heatmap_layers = heatmap.all_layers().collect::<Vec<_>>();
7547 12 : assert_eq!(heatmap_layers.last().unwrap().name, l0_delta.layer_name());
7548 12 :
7549 12 : let mut last_lsn = Lsn::MAX;
7550 60 : for layer in heatmap_layers {
7551 12 : // Covered layer should be omitted
7552 48 : assert!(layer.name != covered_delta.layer_name());
7553 12 :
7554 48 : let layer_lsn = match &layer.name {
7555 24 : LayerName::Delta(d) => d.lsn_range.end,
7556 24 : LayerName::Image(i) => i.lsn,
7557 12 : };
7558 12 :
7559 12 : // Apart from L0s, newest Layers should come first
7560 48 : if !LayerMap::is_l0(layer.name.key_range(), layer.name.is_delta()) {
7561 36 : assert!(layer_lsn <= last_lsn);
7562 36 : last_lsn = layer_lsn;
7563 12 : }
7564 12 : }
7565 12 :
7566 12 : // Evict all the layers and stash the old heatmap in the timeline.
7567 12 : // This simulates a migration to a cold secondary location.
7568 12 :
7569 12 : let guard = timeline.layers.read().await;
7570 12 : let mut all_layers = Vec::new();
7571 12 : let forever = std::time::Duration::from_secs(120);
7572 60 : for layer in guard.likely_resident_layers() {
7573 60 : all_layers.push(layer.clone());
7574 60 : layer.evict_and_wait(forever).await.unwrap();
7575 12 : }
7576 12 : drop(guard);
7577 12 :
7578 12 : timeline
7579 12 : .previous_heatmap
7580 12 : .store(Some(Arc::new(PreviousHeatmap::Active {
7581 12 : heatmap: heatmap.clone(),
7582 12 : read_at: std::time::Instant::now(),
7583 12 : end_lsn: None,
7584 12 : })));
7585 12 :
7586 12 : // Generate a new heatmap and assert that it contains the same layers as the old one.
7587 12 : let post_migration_heatmap = timeline.generate_heatmap().await.unwrap();
7588 12 : assert_heatmaps_have_same_layers(&heatmap, &post_migration_heatmap);
7589 12 :
7590 12 : // Download each layer one by one. Generate the heatmap at each step and check
7591 12 : // that it's stable.
7592 72 : for layer in all_layers {
7593 60 : if layer.visibility() == LayerVisibilityHint::Covered {
7594 12 : continue;
7595 48 : }
7596 48 :
7597 48 : eprintln!("Downloading {layer} and re-generating heatmap");
7598 48 :
7599 48 : let ctx = &RequestContextBuilder::from(ctx)
7600 48 : .download_behavior(crate::context::DownloadBehavior::Download)
7601 48 : .attached_child();
7602 12 :
7603 48 : let _resident = layer
7604 48 : .download_and_keep_resident(ctx)
7605 48 : .instrument(tracing::info_span!(
7606 48 : parent: None,
7607 12 : "download_layer",
7608 12 : tenant_id = %timeline.tenant_shard_id.tenant_id,
7609 0 : shard_id = %timeline.tenant_shard_id.shard_slug(),
7610 0 : timeline_id = %timeline.timeline_id
7611 12 : ))
7612 48 : .await
7613 48 : .unwrap();
7614 12 :
7615 48 : let post_download_heatmap = timeline.generate_heatmap().await.unwrap();
7616 48 : assert_heatmaps_have_same_layers(&heatmap, &post_download_heatmap);
7617 12 : }
7618 12 :
7619 12 : // Everything from the post-migration heatmap is now resident.
7620 12 : // Check that we drop it from memory.
7621 12 : assert!(matches!(
7622 12 : timeline.previous_heatmap.load().as_deref(),
7623 12 : Some(PreviousHeatmap::Obsolete)
7624 12 : ));
7625 12 : }
7626 :
7627 : #[tokio::test]
7628 12 : async fn test_previous_heatmap_obsoletion() {
7629 12 : let harness = TenantHarness::create("heatmap_previous_heatmap_obsoletion")
7630 12 : .await
7631 12 : .unwrap();
7632 12 :
7633 12 : let l0_delta = DeltaLayerTestDesc::new(
7634 12 : Lsn(0x20)..Lsn(0x30),
7635 12 : Key::from_hex("000000000000000000000000000000000000").unwrap()
7636 12 : ..Key::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(),
7637 12 : vec![(
7638 12 : Key::from_hex("720000000033333333444444445500000000").unwrap(),
7639 12 : Lsn(0x25),
7640 12 : Value::Image(test_img("foo")),
7641 12 : )],
7642 12 : );
7643 12 :
7644 12 : let image_layer = (
7645 12 : Lsn(0x40),
7646 12 : vec![(
7647 12 : Key::from_hex("620000000033333333444444445500000000").unwrap(),
7648 12 : test_img("bar"),
7649 12 : )],
7650 12 : );
7651 12 :
7652 12 : let delta_layers = vec![l0_delta];
7653 12 : let image_layers = vec![image_layer];
7654 12 :
7655 12 : let (tenant, ctx) = harness.load().await;
7656 12 : let timeline = tenant
7657 12 : .create_test_timeline_with_layers(
7658 12 : TimelineId::generate(),
7659 12 : Lsn(0x10),
7660 12 : 14,
7661 12 : &ctx,
7662 12 : Vec::new(), // in-memory layers
7663 12 : delta_layers,
7664 12 : image_layers,
7665 12 : Lsn(0x100),
7666 12 : )
7667 12 : .await
7668 12 : .unwrap();
7669 12 :
7670 12 : // Layer visibility is an input to heatmap generation, so refresh it first
7671 12 : timeline.update_layer_visibility().await.unwrap();
7672 12 :
7673 12 : let heatmap = timeline
7674 12 : .generate_heatmap()
7675 12 : .await
7676 12 : .expect("Infallible while timeline is not shut down");
7677 12 :
7678 12 : // Both layers should be in the heatmap
7679 12 : assert!(heatmap.all_layers().count() > 0);
7680 12 :
7681 12 : // Now simulate a migration.
7682 12 : timeline
7683 12 : .previous_heatmap
7684 12 : .store(Some(Arc::new(PreviousHeatmap::Active {
7685 12 : heatmap: heatmap.clone(),
7686 12 : read_at: std::time::Instant::now(),
7687 12 : end_lsn: None,
7688 12 : })));
7689 12 :
7690 12 : // Evict all the layers in the previous heatmap
7691 12 : let guard = timeline.layers.read().await;
7692 12 : let forever = std::time::Duration::from_secs(120);
7693 36 : for layer in guard.likely_resident_layers() {
7694 36 : layer.evict_and_wait(forever).await.unwrap();
7695 12 : }
7696 12 : drop(guard);
7697 12 :
7698 12 : // Generate a new heatmap and check that the previous heatmap
7699 12 : // has been marked obsolete.
7700 12 : let post_eviction_heatmap = timeline
7701 12 : .generate_heatmap()
7702 12 : .await
7703 12 : .expect("Infallible while timeline is not shut down");
7704 12 :
7705 12 : assert_eq!(post_eviction_heatmap.all_layers().count(), 0);
7706 12 : assert!(matches!(
7707 12 : timeline.previous_heatmap.load().as_deref(),
7708 12 : Some(PreviousHeatmap::Obsolete)
7709 12 : ));
7710 12 : }
7711 :
7712 : #[tokio::test]
7713 12 : async fn two_layer_eviction_attempts_at_the_same_time() {
7714 12 : let harness = TenantHarness::create("two_layer_eviction_attempts_at_the_same_time")
7715 12 : .await
7716 12 : .unwrap();
7717 12 :
7718 12 : let (tenant, ctx) = harness.load().await;
7719 12 : let timeline = tenant
7720 12 : .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx)
7721 12 : .await
7722 12 : .unwrap();
7723 12 :
7724 12 : let layer = find_some_layer(&timeline).await;
7725 12 : let layer = layer
7726 12 : .keep_resident()
7727 12 : .await
7728 12 : .expect("no download => no downloading errors")
7729 12 : .drop_eviction_guard();
7730 12 :
7731 12 : let forever = std::time::Duration::from_secs(120);
7732 12 :
7733 12 : let first = layer.evict_and_wait(forever);
7734 12 : let second = layer.evict_and_wait(forever);
7735 12 :
7736 12 : let (first, second) = tokio::join!(first, second);
7737 12 :
7738 12 : let res = layer.keep_resident().await;
7739 12 : assert!(res.is_none(), "{res:?}");
7740 12 :
7741 12 : match (first, second) {
7742 12 : (Ok(()), Ok(())) => {
7743 12 : // because there are no more timeline locks being taken on eviction path, we can
7744 12 : // witness all three outcomes here.
7745 12 : }
7746 12 : (Ok(()), Err(EvictionError::NotFound)) | (Err(EvictionError::NotFound), Ok(())) => {
7747 0 : // if one completes before the other, this is fine just as well.
7748 0 : }
7749 12 : other => unreachable!("unexpected {:?}", other),
7750 12 : }
7751 12 : }
7752 :
7753 12 : async fn find_some_layer(timeline: &Timeline) -> Layer {
7754 12 : let layers = timeline.layers.read().await;
7755 12 : let desc = layers
7756 12 : .layer_map()
7757 12 : .unwrap()
7758 12 : .iter_historic_layers()
7759 12 : .next()
7760 12 : .expect("must find one layer to evict");
7761 12 :
7762 12 : layers.get_from_desc(&desc)
7763 12 : }
7764 : }
|