Line data Source code
1 : pub(crate) mod analysis;
2 : pub(crate) mod compaction;
3 : pub mod delete;
4 : pub(crate) mod detach_ancestor;
5 : mod eviction_task;
6 : pub(crate) mod handle;
7 : mod heatmap_layers_downloader;
8 : pub(crate) mod import_pgdata;
9 : mod init;
10 : pub mod layer_manager;
11 : pub(crate) mod logical_size;
12 : pub mod offload;
13 : pub mod span;
14 : pub mod uninit;
15 : mod walreceiver;
16 :
17 : use hashlink::LruCache;
18 : use std::array;
19 : use std::cmp::{max, min};
20 : use std::collections::btree_map::Entry;
21 : use std::collections::{BTreeMap, HashMap, HashSet};
22 : use std::ops::{ControlFlow, Deref, Range};
23 : use std::sync::atomic::{AtomicBool, AtomicU64, Ordering as AtomicOrdering};
24 : use std::sync::{Arc, Mutex, OnceLock, RwLock, Weak};
25 : use std::time::{Duration, Instant, SystemTime};
26 :
27 : use anyhow::{Context, Result, anyhow, bail, ensure};
28 : use arc_swap::{ArcSwap, ArcSwapOption};
29 : use bytes::Bytes;
30 : use camino::Utf8Path;
31 : use chrono::{DateTime, Utc};
32 : use compaction::{CompactionOutcome, GcCompactionCombinedSettings};
33 : use enumset::EnumSet;
34 : use fail::fail_point;
35 : use futures::stream::FuturesUnordered;
36 : use futures::{FutureExt, StreamExt};
37 : use handle::ShardTimelineId;
38 : use layer_manager::Shutdown;
39 : use offload::OffloadError;
40 : use once_cell::sync::Lazy;
41 : use pageserver_api::config::tenant_conf_defaults::DEFAULT_PITR_INTERVAL;
42 : use pageserver_api::key::{
43 : KEY_SIZE, Key, METADATA_KEY_BEGIN_PREFIX, METADATA_KEY_END_PREFIX, NON_INHERITED_RANGE,
44 : SPARSE_RANGE,
45 : };
46 : use pageserver_api::keyspace::{KeySpaceAccum, KeySpaceRandomAccum, SparseKeyPartitioning};
47 : use pageserver_api::models::{
48 : CompactKeyRange, CompactLsnRange, CompactionAlgorithm, CompactionAlgorithmSettings,
49 : DetachBehavior, DownloadRemoteLayersTaskInfo, DownloadRemoteLayersTaskSpawnRequest,
50 : EvictionPolicy, InMemoryLayerInfo, LayerMapInfo, LsnLease, PageTraceEvent, RelSizeMigration,
51 : TimelineState,
52 : };
53 : use pageserver_api::reltag::{BlockNumber, RelTag};
54 : use pageserver_api::shard::{ShardIdentity, ShardIndex, ShardNumber, TenantShardId};
55 : #[cfg(test)]
56 : use pageserver_api::value::Value;
57 : use postgres_connection::PgConnectionConfig;
58 : use postgres_ffi::v14::xlog_utils;
59 : use postgres_ffi::{WAL_SEGMENT_SIZE, to_pg_timestamp};
60 : use rand::Rng;
61 : use remote_storage::DownloadError;
62 : use serde_with::serde_as;
63 : use storage_broker::BrokerClientChannel;
64 : use tokio::runtime::Handle;
65 : use tokio::sync::mpsc::Sender;
66 : use tokio::sync::{Notify, oneshot, watch};
67 : use tokio_util::sync::CancellationToken;
68 : use tracing::*;
69 : use utils::generation::Generation;
70 : use utils::guard_arc_swap::GuardArcSwap;
71 : use utils::id::TimelineId;
72 : use utils::logging::{MonitorSlowFutureCallback, monitor_slow_future};
73 : use utils::lsn::{AtomicLsn, Lsn, RecordLsn};
74 : use utils::postgres_client::PostgresClientProtocol;
75 : use utils::rate_limit::RateLimit;
76 : use utils::seqwait::SeqWait;
77 : use utils::simple_rcu::{Rcu, RcuReadGuard};
78 : use utils::sync::gate::{Gate, GateGuard};
79 : use utils::{completion, critical, fs_ext, pausable_failpoint};
80 : use wal_decoder::serialized_batch::{SerializedValueBatch, ValueMeta};
81 :
82 : use self::delete::DeleteTimelineFlow;
83 : pub(super) use self::eviction_task::EvictionTaskTenantState;
84 : use self::eviction_task::EvictionTaskTimelineState;
85 : use self::layer_manager::LayerManager;
86 : use self::logical_size::LogicalSize;
87 : use self::walreceiver::{WalReceiver, WalReceiverConf};
88 : use super::remote_timeline_client::RemoteTimelineClient;
89 : use super::remote_timeline_client::index::{GcCompactionState, IndexPart};
90 : use super::secondary::heatmap::HeatMapLayer;
91 : use super::storage_layer::{LayerFringe, LayerVisibilityHint, ReadableLayer};
92 : use super::tasks::log_compaction_error;
93 : use super::upload_queue::NotInitialized;
94 : use super::{
95 : AttachedTenantConf, BasebackupPrepareSender, GcError, HeatMapTimeline, MaybeOffloaded,
96 : debug_assert_current_span_has_tenant_and_timeline_id,
97 : };
98 : use crate::PERF_TRACE_TARGET;
99 : use crate::aux_file::AuxFileSizeEstimator;
100 : use crate::basebackup_cache::BasebackupPrepareRequest;
101 : use crate::config::PageServerConf;
102 : use crate::context::{
103 : DownloadBehavior, PerfInstrumentFutureExt, RequestContext, RequestContextBuilder,
104 : };
105 : use crate::disk_usage_eviction_task::{DiskUsageEvictionInfo, EvictionCandidate, finite_f32};
106 : use crate::keyspace::{KeyPartitioning, KeySpace};
107 : use crate::l0_flush::{self, L0FlushGlobalState};
108 : use crate::metrics::{
109 : DELTAS_PER_READ_GLOBAL, LAYERS_PER_READ_AMORTIZED_GLOBAL, LAYERS_PER_READ_BATCH_GLOBAL,
110 : LAYERS_PER_READ_GLOBAL, ScanLatencyOngoingRecording, TimelineMetrics,
111 : };
112 : use crate::page_service::TenantManagerTypes;
113 : use crate::pgdatadir_mapping::{
114 : CalculateLogicalSizeError, CollectKeySpaceError, DirectoryKind, LsnForTimestamp,
115 : MAX_AUX_FILE_V2_DELTAS, MetricsUpdate,
116 : };
117 : use crate::task_mgr::TaskKind;
118 : use crate::tenant::config::AttachmentMode;
119 : use crate::tenant::gc_result::GcResult;
120 : use crate::tenant::layer_map::LayerMap;
121 : use crate::tenant::metadata::TimelineMetadata;
122 : use crate::tenant::storage_layer::delta_layer::DeltaEntry;
123 : use crate::tenant::storage_layer::inmemory_layer::IndexEntry;
124 : use crate::tenant::storage_layer::{
125 : AsLayerDesc, BatchLayerWriter, DeltaLayerWriter, EvictionError, ImageLayerName,
126 : ImageLayerWriter, InMemoryLayer, IoConcurrency, Layer, LayerAccessStatsReset, LayerName,
127 : PersistentLayerDesc, PersistentLayerKey, ResidentLayer, ValueReconstructSituation,
128 : ValueReconstructState, ValuesReconstructState,
129 : };
130 : use crate::tenant::tasks::BackgroundLoopKind;
131 : use crate::tenant::timeline::logical_size::CurrentLogicalSize;
132 : use crate::virtual_file::{MaybeFatalIo, VirtualFile};
133 : use crate::walingest::WalLagCooldown;
134 : use crate::walredo::RedoAttemptType;
135 : use crate::{ZERO_PAGE, task_mgr, walredo};
136 :
137 : #[derive(Debug, PartialEq, Eq, Clone, Copy)]
138 : pub(crate) enum FlushLoopState {
139 : NotStarted,
140 : Running {
141 : #[cfg(test)]
142 : expect_initdb_optimization: bool,
143 : #[cfg(test)]
144 : initdb_optimization_count: usize,
145 : },
146 : Exited,
147 : }
148 :
149 : #[derive(Debug, Copy, Clone, PartialEq, Eq)]
150 : pub enum ImageLayerCreationMode {
151 : /// Try to create image layers based on `time_for_new_image_layer`. Used in compaction code path.
152 : Try,
153 : /// Force creating the image layers if possible. For now, no image layers will be created
154 : /// for metadata keys. Used in compaction code path with force flag enabled.
155 : Force,
156 : /// Initial ingestion of the data, and no data should be dropped in this function. This
157 : /// means that no metadata keys should be included in the partitions. Used in flush frozen layer
158 : /// code path.
159 : Initial,
160 : }
161 :
162 : #[derive(Clone, Debug, Default)]
163 : pub enum LastImageLayerCreationStatus {
164 : Incomplete {
165 : /// The last key of the partition (exclusive) that was processed in the last
166 : /// image layer creation attempt. We will continue from this key in the next
167 : /// attempt.
168 : last_key: Key,
169 : },
170 : Complete,
171 : #[default]
172 : Initial,
173 : }
174 :
175 : impl std::fmt::Display for ImageLayerCreationMode {
176 291 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
177 291 : write!(f, "{:?}", self)
178 291 : }
179 : }
180 :
181 : /// Temporary function for immutable storage state refactor, ensures we are dropping mutex guard instead of other things.
182 : /// Can be removed after all refactors are done.
183 14 : fn drop_rlock<T>(rlock: tokio::sync::RwLockReadGuard<T>) {
184 14 : drop(rlock)
185 14 : }
186 :
187 : /// Temporary function for immutable storage state refactor, ensures we are dropping mutex guard instead of other things.
188 : /// Can be removed after all refactors are done.
189 305 : fn drop_wlock<T>(rlock: tokio::sync::RwLockWriteGuard<'_, T>) {
190 305 : drop(rlock)
191 305 : }
192 :
193 : /// The outward-facing resources required to build a Timeline
194 : pub struct TimelineResources {
195 : pub remote_client: RemoteTimelineClient,
196 : pub pagestream_throttle: Arc<crate::tenant::throttle::Throttle>,
197 : pub pagestream_throttle_metrics: Arc<crate::metrics::tenant_throttling::Pagestream>,
198 : pub l0_compaction_trigger: Arc<Notify>,
199 : pub l0_flush_global_state: l0_flush::L0FlushGlobalState,
200 : pub basebackup_prepare_sender: BasebackupPrepareSender,
201 : }
202 :
203 : pub struct Timeline {
204 : pub(crate) conf: &'static PageServerConf,
205 : tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
206 :
207 : myself: Weak<Self>,
208 :
209 : pub(crate) tenant_shard_id: TenantShardId,
210 : pub timeline_id: TimelineId,
211 :
212 : /// The generation of the tenant that instantiated us: this is used for safety when writing remote objects.
213 : /// Never changes for the lifetime of this [`Timeline`] object.
214 : ///
215 : /// This duplicates the generation stored in LocationConf, but that structure is mutable:
216 : /// this copy enforces the invariant that generatio doesn't change during a Tenant's lifetime.
217 : pub(crate) generation: Generation,
218 :
219 : /// The detailed sharding information from our parent Tenant. This enables us to map keys
220 : /// to shards, and is constant through the lifetime of this Timeline.
221 : shard_identity: ShardIdentity,
222 :
223 : pub pg_version: u32,
224 :
225 : /// The tuple has two elements.
226 : /// 1. `LayerFileManager` keeps track of the various physical representations of the layer files (inmem, local, remote).
227 : /// 2. `LayerMap`, the acceleration data structure for `get_reconstruct_data`.
228 : ///
229 : /// `LayerMap` maps out the `(PAGE,LSN) / (KEY,LSN)` space, which is composed of `(KeyRange, LsnRange)` rectangles.
230 : /// We describe these rectangles through the `PersistentLayerDesc` struct.
231 : ///
232 : /// When we want to reconstruct a page, we first find the `PersistentLayerDesc`'s that we need for page reconstruction,
233 : /// using `LayerMap`. Then, we use `LayerFileManager` to get the `PersistentLayer`'s that correspond to the
234 : /// `PersistentLayerDesc`'s.
235 : ///
236 : /// Hence, it's important to keep things coherent. The `LayerFileManager` must always have an entry for all
237 : /// `PersistentLayerDesc`'s in the `LayerMap`. If it doesn't, `LayerFileManager::get_from_desc` will panic at
238 : /// runtime, e.g., during page reconstruction.
239 : ///
240 : /// In the future, we'll be able to split up the tuple of LayerMap and `LayerFileManager`,
241 : /// so that e.g. on-demand-download/eviction, and layer spreading, can operate just on `LayerFileManager`.
242 : pub(crate) layers: tokio::sync::RwLock<LayerManager>,
243 :
244 : last_freeze_at: AtomicLsn,
245 : // Atomic would be more appropriate here.
246 : last_freeze_ts: RwLock<Instant>,
247 :
248 : pub(crate) standby_horizon: AtomicLsn,
249 :
250 : // WAL redo manager. `None` only for broken tenants.
251 : walredo_mgr: Option<Arc<super::WalRedoManager>>,
252 :
253 : /// Remote storage client.
254 : /// See [`remote_timeline_client`](super::remote_timeline_client) module comment for details.
255 : pub(crate) remote_client: Arc<RemoteTimelineClient>,
256 :
257 : // What page versions do we hold in the repository? If we get a
258 : // request > last_record_lsn, we need to wait until we receive all
259 : // the WAL up to the request. The SeqWait provides functions for
260 : // that. TODO: If we get a request for an old LSN, such that the
261 : // versions have already been garbage collected away, we should
262 : // throw an error, but we don't track that currently.
263 : //
264 : // last_record_lsn.load().last points to the end of last processed WAL record.
265 : //
266 : // We also remember the starting point of the previous record in
267 : // 'last_record_lsn.load().prev'. It's used to set the xl_prev pointer of the
268 : // first WAL record when the node is started up. But here, we just
269 : // keep track of it.
270 : last_record_lsn: SeqWait<RecordLsn, Lsn>,
271 :
272 : // All WAL records have been processed and stored durably on files on
273 : // local disk, up to this LSN. On crash and restart, we need to re-process
274 : // the WAL starting from this point.
275 : //
276 : // Some later WAL records might have been processed and also flushed to disk
277 : // already, so don't be surprised to see some, but there's no guarantee on
278 : // them yet.
279 : disk_consistent_lsn: AtomicLsn,
280 :
281 : // Parent timeline that this timeline was branched from, and the LSN
282 : // of the branch point.
283 : ancestor_timeline: Option<Arc<Timeline>>,
284 : ancestor_lsn: Lsn,
285 :
286 : // The LSN of gc-compaction that was last applied to this timeline.
287 : gc_compaction_state: ArcSwap<Option<GcCompactionState>>,
288 :
289 : pub(crate) metrics: Arc<TimelineMetrics>,
290 :
291 : // `Timeline` doesn't write these metrics itself, but it manages the lifetime. Code
292 : // in `crate::page_service` writes these metrics.
293 : pub(crate) query_metrics: crate::metrics::SmgrQueryTimePerTimeline,
294 :
295 : directory_metrics_inited: [AtomicBool; DirectoryKind::KINDS_NUM],
296 : directory_metrics: [AtomicU64; DirectoryKind::KINDS_NUM],
297 :
298 : /// Ensures layers aren't frozen by checkpointer between
299 : /// [`Timeline::get_layer_for_write`] and layer reads.
300 : /// Locked automatically by [`TimelineWriter`] and checkpointer.
301 : /// Must always be acquired before the layer map/individual layer lock
302 : /// to avoid deadlock.
303 : ///
304 : /// The state is cleared upon freezing.
305 : write_lock: tokio::sync::Mutex<Option<TimelineWriterState>>,
306 :
307 : /// Used to avoid multiple `flush_loop` tasks running
308 : pub(super) flush_loop_state: Mutex<FlushLoopState>,
309 :
310 : /// layer_flush_start_tx can be used to wake up the layer-flushing task.
311 : /// - The u64 value is a counter, incremented every time a new flush cycle is requested.
312 : /// The flush cycle counter is sent back on the layer_flush_done channel when
313 : /// the flush finishes. You can use that to wait for the flush to finish.
314 : /// - The LSN is updated to max() of its current value and the latest disk_consistent_lsn
315 : /// read by whoever sends an update
316 : layer_flush_start_tx: tokio::sync::watch::Sender<(u64, Lsn)>,
317 : /// to be notified when layer flushing has finished, subscribe to the layer_flush_done channel
318 : layer_flush_done_tx: tokio::sync::watch::Sender<(u64, Result<(), FlushLayerError>)>,
319 :
320 : // The LSN at which we have executed GC: whereas [`Self::gc_info`] records the LSN at which
321 : // we _intend_ to GC (i.e. the PITR cutoff), this LSN records where we actually last did it.
322 : // Because PITR interval is mutable, it's possible for this LSN to be earlier or later than
323 : // the planned GC cutoff.
324 : pub applied_gc_cutoff_lsn: Rcu<Lsn>,
325 :
326 : pub(crate) gc_compaction_layer_update_lock: tokio::sync::RwLock<()>,
327 :
328 : // List of child timelines and their branch points. This is needed to avoid
329 : // garbage collecting data that is still needed by the child timelines.
330 : pub(crate) gc_info: std::sync::RwLock<GcInfo>,
331 :
332 : pub(crate) last_image_layer_creation_status: ArcSwap<LastImageLayerCreationStatus>,
333 :
334 : // It may change across major versions so for simplicity
335 : // keep it after running initdb for a timeline.
336 : // It is needed in checks when we want to error on some operations
337 : // when they are requested for pre-initdb lsn.
338 : // It can be unified with latest_gc_cutoff_lsn under some "first_valid_lsn",
339 : // though let's keep them both for better error visibility.
340 : pub initdb_lsn: Lsn,
341 :
342 : /// The repartitioning result. Allows a single writer and multiple readers.
343 : pub(crate) partitioning: GuardArcSwap<((KeyPartitioning, SparseKeyPartitioning), Lsn)>,
344 :
345 : /// Configuration: how often should the partitioning be recalculated.
346 : repartition_threshold: u64,
347 :
348 : last_image_layer_creation_check_at: AtomicLsn,
349 : last_image_layer_creation_check_instant: std::sync::Mutex<Option<Instant>>,
350 :
351 : /// Current logical size of the "datadir", at the last LSN.
352 : current_logical_size: LogicalSize,
353 :
354 : /// Information about the last processed message by the WAL receiver,
355 : /// or None if WAL receiver has not received anything for this timeline
356 : /// yet.
357 : pub last_received_wal: Mutex<Option<WalReceiverInfo>>,
358 : pub walreceiver: Mutex<Option<WalReceiver>>,
359 :
360 : /// Relation size cache
361 : pub(crate) rel_size_latest_cache: RwLock<HashMap<RelTag, (Lsn, BlockNumber)>>,
362 : pub(crate) rel_size_snapshot_cache: Mutex<LruCache<(Lsn, RelTag), BlockNumber>>,
363 :
364 : download_all_remote_layers_task_info: RwLock<Option<DownloadRemoteLayersTaskInfo>>,
365 :
366 : state: watch::Sender<TimelineState>,
367 :
368 : /// Prevent two tasks from deleting the timeline at the same time. If held, the
369 : /// timeline is being deleted. If 'true', the timeline has already been deleted.
370 : pub delete_progress: TimelineDeleteProgress,
371 :
372 : eviction_task_timeline_state: tokio::sync::Mutex<EvictionTaskTimelineState>,
373 :
374 : /// Load or creation time information about the disk_consistent_lsn and when the loading
375 : /// happened. Used for consumption metrics.
376 : pub(crate) loaded_at: (Lsn, SystemTime),
377 :
378 : /// Gate to prevent shutdown completing while I/O is still happening to this timeline's data
379 : pub(crate) gate: Gate,
380 :
381 : /// Cancellation token scoped to this timeline: anything doing long-running work relating
382 : /// to the timeline should drop out when this token fires.
383 : pub(crate) cancel: CancellationToken,
384 :
385 : /// Make sure we only have one running compaction at a time in tests.
386 : ///
387 : /// Must only be taken in two places:
388 : /// - [`Timeline::compact`] (this file)
389 : /// - [`delete::delete_local_timeline_directory`]
390 : ///
391 : /// Timeline deletion will acquire both compaction and gc locks in whatever order.
392 : compaction_lock: tokio::sync::Mutex<()>,
393 :
394 : /// If true, the last compaction failed.
395 : compaction_failed: AtomicBool,
396 :
397 : /// Notifies the tenant compaction loop that there is pending L0 compaction work.
398 : l0_compaction_trigger: Arc<Notify>,
399 :
400 : /// Make sure we only have one running gc at a time.
401 : ///
402 : /// Must only be taken in two places:
403 : /// - [`Timeline::gc`] (this file)
404 : /// - [`delete::delete_local_timeline_directory`]
405 : ///
406 : /// Timeline deletion will acquire both compaction and gc locks in whatever order.
407 : gc_lock: tokio::sync::Mutex<()>,
408 :
409 : /// Cloned from [`super::TenantShard::pagestream_throttle`] on construction.
410 : pub(crate) pagestream_throttle: Arc<crate::tenant::throttle::Throttle>,
411 :
412 : /// Size estimator for aux file v2
413 : pub(crate) aux_file_size_estimator: AuxFileSizeEstimator,
414 :
415 : /// Some test cases directly place keys into the timeline without actually modifying the directory
416 : /// keys (i.e., DB_DIR). The test cases creating such keys will put the keyspaces here, so that
417 : /// these keys won't get garbage-collected during compaction/GC. This field only modifies the dense
418 : /// keyspace return value of `collect_keyspace`. For sparse keyspaces, use AUX keys for testing, and
419 : /// in the future, add `extra_test_sparse_keyspace` if necessary.
420 : #[cfg(test)]
421 : pub(crate) extra_test_dense_keyspace: ArcSwap<KeySpace>,
422 :
423 : pub(crate) l0_flush_global_state: L0FlushGlobalState,
424 :
425 : pub(crate) handles: handle::PerTimelineState<TenantManagerTypes>,
426 :
427 : pub(crate) attach_wal_lag_cooldown: Arc<OnceLock<WalLagCooldown>>,
428 :
429 : /// Cf. [`crate::tenant::CreateTimelineIdempotency`].
430 : pub(crate) create_idempotency: crate::tenant::CreateTimelineIdempotency,
431 :
432 : /// If Some, collects GetPage metadata for an ongoing PageTrace.
433 : pub(crate) page_trace: ArcSwapOption<Sender<PageTraceEvent>>,
434 :
435 : pub(super) previous_heatmap: ArcSwapOption<PreviousHeatmap>,
436 :
437 : /// May host a background Tokio task which downloads all the layers from the current
438 : /// heatmap on demand.
439 : heatmap_layers_downloader: Mutex<Option<heatmap_layers_downloader::HeatmapLayersDownloader>>,
440 :
441 : pub(crate) rel_size_v2_status: ArcSwapOption<RelSizeMigration>,
442 :
443 : wait_lsn_log_slow: tokio::sync::Semaphore,
444 :
445 : /// A channel to send async requests to prepare a basebackup for the basebackup cache.
446 : basebackup_prepare_sender: BasebackupPrepareSender,
447 : }
448 :
449 : pub(crate) enum PreviousHeatmap {
450 : Active {
451 : heatmap: HeatMapTimeline,
452 : read_at: std::time::Instant,
453 : // End LSN covered by the heatmap if known
454 : end_lsn: Option<Lsn>,
455 : },
456 : Obsolete,
457 : }
458 :
459 : pub type TimelineDeleteProgress = Arc<tokio::sync::Mutex<DeleteTimelineFlow>>;
460 :
461 : pub struct WalReceiverInfo {
462 : pub wal_source_connconf: PgConnectionConfig,
463 : pub last_received_msg_lsn: Lsn,
464 : pub last_received_msg_ts: u128,
465 : }
466 :
467 : /// Information about how much history needs to be retained, needed by
468 : /// Garbage Collection.
469 : #[derive(Default)]
470 : pub(crate) struct GcInfo {
471 : /// Specific LSNs that are needed.
472 : ///
473 : /// Currently, this includes all points where child branches have
474 : /// been forked off from. In the future, could also include
475 : /// explicit user-defined snapshot points.
476 : pub(crate) retain_lsns: Vec<(Lsn, TimelineId, MaybeOffloaded)>,
477 :
478 : /// The cutoff coordinates, which are combined by selecting the minimum.
479 : pub(crate) cutoffs: GcCutoffs,
480 :
481 : /// Leases granted to particular LSNs.
482 : pub(crate) leases: BTreeMap<Lsn, LsnLease>,
483 :
484 : /// Whether our branch point is within our ancestor's PITR interval (for cost estimation)
485 : pub(crate) within_ancestor_pitr: bool,
486 : }
487 :
488 : impl GcInfo {
489 154 : pub(crate) fn min_cutoff(&self) -> Lsn {
490 154 : self.cutoffs.select_min()
491 154 : }
492 :
493 119 : pub(super) fn insert_child(
494 119 : &mut self,
495 119 : child_id: TimelineId,
496 119 : child_lsn: Lsn,
497 119 : is_offloaded: MaybeOffloaded,
498 119 : ) {
499 119 : self.retain_lsns.push((child_lsn, child_id, is_offloaded));
500 119 : self.retain_lsns.sort_by_key(|i| i.0);
501 119 : }
502 :
503 2 : pub(super) fn remove_child_maybe_offloaded(
504 2 : &mut self,
505 2 : child_id: TimelineId,
506 2 : maybe_offloaded: MaybeOffloaded,
507 2 : ) -> bool {
508 2 : // Remove at most one element. Needed for correctness if there is two live `Timeline` objects referencing
509 2 : // the same timeline. Shouldn't but maybe can occur when Arc's live longer than intended.
510 2 : let mut removed = false;
511 3 : self.retain_lsns.retain(|i| {
512 3 : if removed {
513 1 : return true;
514 2 : }
515 2 : let remove = i.1 == child_id && i.2 == maybe_offloaded;
516 2 : removed |= remove;
517 2 : !remove
518 3 : });
519 2 : removed
520 2 : }
521 :
522 2 : pub(super) fn remove_child_not_offloaded(&mut self, child_id: TimelineId) -> bool {
523 2 : self.remove_child_maybe_offloaded(child_id, MaybeOffloaded::No)
524 2 : }
525 :
526 0 : pub(super) fn remove_child_offloaded(&mut self, child_id: TimelineId) -> bool {
527 0 : self.remove_child_maybe_offloaded(child_id, MaybeOffloaded::Yes)
528 0 : }
529 119 : pub(crate) fn lsn_covered_by_lease(&self, lsn: Lsn) -> bool {
530 119 : self.leases.contains_key(&lsn)
531 119 : }
532 : }
533 :
534 : /// The `GcInfo` component describing which Lsns need to be retained. Functionally, this
535 : /// is a single number (the oldest LSN which we must retain), but it internally distinguishes
536 : /// between time-based and space-based retention for observability and consumption metrics purposes.
537 : #[derive(Clone, Debug, Default)]
538 : pub(crate) struct GcCutoffs {
539 : /// Calculated from the [`pageserver_api::models::TenantConfig::gc_horizon`], this LSN indicates how much
540 : /// history we must keep to retain a specified number of bytes of WAL.
541 : pub(crate) space: Lsn,
542 :
543 : /// Calculated from [`pageserver_api::models::TenantConfig::pitr_interval`], this LSN indicates
544 : /// how much history we must keep to enable reading back at least the PITR interval duration.
545 : ///
546 : /// None indicates that the PITR cutoff has not been computed. A PITR interval of 0 will yield
547 : /// Some(last_record_lsn).
548 : pub(crate) time: Option<Lsn>,
549 : }
550 :
551 : impl GcCutoffs {
552 154 : fn select_min(&self) -> Lsn {
553 154 : // NB: if we haven't computed the PITR cutoff yet, we can't GC anything.
554 154 : self.space.min(self.time.unwrap_or_default())
555 154 : }
556 : }
557 :
558 : pub(crate) struct TimelineVisitOutcome {
559 : completed_keyspace: KeySpace,
560 : image_covered_keyspace: KeySpace,
561 : }
562 :
563 : /// An error happened in a get() operation.
564 : #[derive(thiserror::Error, Debug)]
565 : pub(crate) enum PageReconstructError {
566 : #[error(transparent)]
567 : Other(anyhow::Error),
568 :
569 : #[error("Ancestor LSN wait error: {0}")]
570 : AncestorLsnTimeout(WaitLsnError),
571 :
572 : #[error("timeline shutting down")]
573 : Cancelled,
574 :
575 : /// An error happened replaying WAL records
576 : #[error(transparent)]
577 : WalRedo(anyhow::Error),
578 :
579 : #[error("{0}")]
580 : MissingKey(Box<MissingKeyError>),
581 : }
582 :
583 : impl From<anyhow::Error> for PageReconstructError {
584 1 : fn from(value: anyhow::Error) -> Self {
585 1 : // with walingest.rs many PageReconstructError are wrapped in as anyhow::Error
586 1 : match value.downcast::<PageReconstructError>() {
587 0 : Ok(pre) => pre,
588 1 : Err(other) => PageReconstructError::Other(other),
589 : }
590 1 : }
591 : }
592 :
593 : impl From<utils::bin_ser::DeserializeError> for PageReconstructError {
594 0 : fn from(value: utils::bin_ser::DeserializeError) -> Self {
595 0 : PageReconstructError::Other(anyhow::Error::new(value).context("deserialization failure"))
596 0 : }
597 : }
598 :
599 : impl From<layer_manager::Shutdown> for PageReconstructError {
600 0 : fn from(_: layer_manager::Shutdown) -> Self {
601 0 : PageReconstructError::Cancelled
602 0 : }
603 : }
604 :
605 : impl GetVectoredError {
606 : #[cfg(test)]
607 3 : pub(crate) fn is_missing_key_error(&self) -> bool {
608 3 : matches!(self, Self::MissingKey(_))
609 3 : }
610 : }
611 :
612 : impl From<layer_manager::Shutdown> for GetVectoredError {
613 0 : fn from(_: layer_manager::Shutdown) -> Self {
614 0 : GetVectoredError::Cancelled
615 0 : }
616 : }
617 :
618 : /// A layer identifier when used in the [`ReadPath`] structure. This enum is for observability purposes
619 : /// only and not used by the "real read path".
620 : pub enum ReadPathLayerId {
621 : PersistentLayer(PersistentLayerKey),
622 : InMemoryLayer(Range<Lsn>),
623 : }
624 :
625 : impl std::fmt::Display for ReadPathLayerId {
626 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
627 0 : match self {
628 0 : ReadPathLayerId::PersistentLayer(key) => write!(f, "{}", key),
629 0 : ReadPathLayerId::InMemoryLayer(range) => {
630 0 : write!(f, "in-mem {}..{}", range.start, range.end)
631 : }
632 : }
633 0 : }
634 : }
635 : pub struct ReadPath {
636 : keyspace: KeySpace,
637 : lsn: Lsn,
638 : path: Vec<(ReadPathLayerId, KeySpace, Range<Lsn>)>,
639 : }
640 :
641 : impl ReadPath {
642 312348 : pub fn new(keyspace: KeySpace, lsn: Lsn) -> Self {
643 312348 : Self {
644 312348 : keyspace,
645 312348 : lsn,
646 312348 : path: Vec::new(),
647 312348 : }
648 312348 : }
649 :
650 440597 : pub fn record_layer_visit(
651 440597 : &mut self,
652 440597 : layer_to_read: &ReadableLayer,
653 440597 : keyspace_to_read: &KeySpace,
654 440597 : lsn_range: &Range<Lsn>,
655 440597 : ) {
656 440597 : let id = match layer_to_read {
657 133393 : ReadableLayer::PersistentLayer(layer) => {
658 133393 : ReadPathLayerId::PersistentLayer(layer.layer_desc().key())
659 : }
660 307204 : ReadableLayer::InMemoryLayer(layer) => {
661 307204 : ReadPathLayerId::InMemoryLayer(layer.get_lsn_range())
662 : }
663 : };
664 440597 : self.path
665 440597 : .push((id, keyspace_to_read.clone(), lsn_range.clone()));
666 440597 : }
667 : }
668 :
669 : impl std::fmt::Display for ReadPath {
670 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
671 0 : writeln!(f, "Read path for {} at lsn {}:", self.keyspace, self.lsn)?;
672 0 : for (idx, (layer_id, keyspace, lsn_range)) in self.path.iter().enumerate() {
673 0 : writeln!(
674 0 : f,
675 0 : "{}: {} {}..{} {}",
676 0 : idx, layer_id, lsn_range.start, lsn_range.end, keyspace
677 0 : )?;
678 : }
679 0 : Ok(())
680 0 : }
681 : }
682 :
683 : #[derive(thiserror::Error)]
684 : pub struct MissingKeyError {
685 : keyspace: KeySpace,
686 : shard: ShardNumber,
687 : query: Option<VersionedKeySpaceQuery>,
688 : // This is largest request LSN from the get page request batch
689 : original_hwm_lsn: Lsn,
690 : ancestor_lsn: Option<Lsn>,
691 : /// Debug information about the read path if there's an error
692 : read_path: Option<ReadPath>,
693 : backtrace: Option<std::backtrace::Backtrace>,
694 : }
695 :
696 : impl MissingKeyError {
697 7 : fn enrich(&mut self, query: VersionedKeySpaceQuery) {
698 7 : self.query = Some(query);
699 7 : }
700 : }
701 :
702 : impl std::fmt::Debug for MissingKeyError {
703 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
704 0 : write!(f, "{}", self)
705 0 : }
706 : }
707 :
708 : impl std::fmt::Display for MissingKeyError {
709 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
710 0 : write!(
711 0 : f,
712 0 : "could not find data for key {} (shard {:?}), original HWM LSN {}",
713 0 : self.keyspace, self.shard, self.original_hwm_lsn
714 0 : )?;
715 :
716 0 : if let Some(ref ancestor_lsn) = self.ancestor_lsn {
717 0 : write!(f, ", ancestor {}", ancestor_lsn)?;
718 0 : }
719 :
720 0 : if let Some(ref query) = self.query {
721 0 : write!(f, ", query {}", query)?;
722 0 : }
723 :
724 0 : if let Some(ref read_path) = self.read_path {
725 0 : write!(f, "\n{}", read_path)?;
726 0 : }
727 :
728 0 : if let Some(ref backtrace) = self.backtrace {
729 0 : write!(f, "\n{}", backtrace)?;
730 0 : }
731 :
732 0 : Ok(())
733 0 : }
734 : }
735 :
736 : impl PageReconstructError {
737 : /// Returns true if this error indicates a tenant/timeline shutdown alike situation
738 0 : pub(crate) fn is_stopping(&self) -> bool {
739 : use PageReconstructError::*;
740 0 : match self {
741 0 : Cancelled => true,
742 0 : Other(_) | AncestorLsnTimeout(_) | WalRedo(_) | MissingKey(_) => false,
743 : }
744 0 : }
745 : }
746 :
747 : #[derive(thiserror::Error, Debug)]
748 : pub(crate) enum CreateImageLayersError {
749 : #[error("timeline shutting down")]
750 : Cancelled,
751 :
752 : #[error("read failed")]
753 : GetVectoredError(#[source] GetVectoredError),
754 :
755 : #[error("reconstruction failed")]
756 : PageReconstructError(#[source] PageReconstructError),
757 :
758 : #[error(transparent)]
759 : Other(#[from] anyhow::Error),
760 : }
761 :
762 : impl From<layer_manager::Shutdown> for CreateImageLayersError {
763 0 : fn from(_: layer_manager::Shutdown) -> Self {
764 0 : CreateImageLayersError::Cancelled
765 0 : }
766 : }
767 :
768 : #[derive(thiserror::Error, Debug, Clone)]
769 : pub(crate) enum FlushLayerError {
770 : /// Timeline cancellation token was cancelled
771 : #[error("timeline shutting down")]
772 : Cancelled,
773 :
774 : /// We tried to flush a layer while the Timeline is in an unexpected state
775 : #[error("cannot flush frozen layers when flush_loop is not running, state is {0:?}")]
776 : NotRunning(FlushLoopState),
777 :
778 : // Arc<> the following non-clonable error types: we must be Clone-able because the flush error is propagated from the flush
779 : // loop via a watch channel, where we can only borrow it.
780 : #[error("create image layers (shared)")]
781 : CreateImageLayersError(Arc<CreateImageLayersError>),
782 :
783 : #[error("other (shared)")]
784 : Other(#[from] Arc<anyhow::Error>),
785 : }
786 :
787 : impl FlushLayerError {
788 : // When crossing from generic anyhow errors to this error type, we explicitly check
789 : // for timeline cancellation to avoid logging inoffensive shutdown errors as warn/err.
790 0 : fn from_anyhow(timeline: &Timeline, err: anyhow::Error) -> Self {
791 0 : let cancelled = timeline.cancel.is_cancelled()
792 : // The upload queue might have been shut down before the official cancellation of the timeline.
793 0 : || err
794 0 : .downcast_ref::<NotInitialized>()
795 0 : .map(NotInitialized::is_stopping)
796 0 : .unwrap_or_default();
797 0 : if cancelled {
798 0 : Self::Cancelled
799 : } else {
800 0 : Self::Other(Arc::new(err))
801 : }
802 0 : }
803 : }
804 :
805 : impl From<layer_manager::Shutdown> for FlushLayerError {
806 0 : fn from(_: layer_manager::Shutdown) -> Self {
807 0 : FlushLayerError::Cancelled
808 0 : }
809 : }
810 :
811 : #[derive(thiserror::Error, Debug)]
812 : pub(crate) enum GetVectoredError {
813 : #[error("timeline shutting down")]
814 : Cancelled,
815 :
816 : #[error("requested too many keys: {0} > {}", Timeline::MAX_GET_VECTORED_KEYS)]
817 : Oversized(u64),
818 :
819 : #[error("requested at invalid LSN: {0}")]
820 : InvalidLsn(Lsn),
821 :
822 : #[error("requested key not found: {0}")]
823 : MissingKey(Box<MissingKeyError>),
824 :
825 : #[error("ancestry walk")]
826 : GetReadyAncestorError(#[source] GetReadyAncestorError),
827 :
828 : #[error(transparent)]
829 : Other(#[from] anyhow::Error),
830 : }
831 :
832 : impl From<GetReadyAncestorError> for GetVectoredError {
833 1 : fn from(value: GetReadyAncestorError) -> Self {
834 : use GetReadyAncestorError::*;
835 1 : match value {
836 0 : Cancelled => GetVectoredError::Cancelled,
837 : AncestorLsnTimeout(_) | BadState { .. } => {
838 1 : GetVectoredError::GetReadyAncestorError(value)
839 : }
840 : }
841 1 : }
842 : }
843 :
844 : #[derive(thiserror::Error, Debug)]
845 : pub(crate) enum GetReadyAncestorError {
846 : #[error("ancestor LSN wait error")]
847 : AncestorLsnTimeout(#[from] WaitLsnError),
848 :
849 : #[error("bad state on timeline {timeline_id}: {state:?}")]
850 : BadState {
851 : timeline_id: TimelineId,
852 : state: TimelineState,
853 : },
854 :
855 : #[error("cancelled")]
856 : Cancelled,
857 : }
858 :
859 : #[derive(Clone, Copy)]
860 : pub enum LogicalSizeCalculationCause {
861 : Initial,
862 : ConsumptionMetricsSyntheticSize,
863 : EvictionTaskImitation,
864 : TenantSizeHandler,
865 : }
866 :
867 : pub enum GetLogicalSizePriority {
868 : User,
869 : Background,
870 : }
871 :
872 0 : #[derive(Debug, enumset::EnumSetType)]
873 : pub(crate) enum CompactFlags {
874 : ForceRepartition,
875 : ForceImageLayerCreation,
876 : ForceL0Compaction,
877 : OnlyL0Compaction,
878 : EnhancedGcBottomMostCompaction,
879 : DryRun,
880 : /// Makes image compaction yield if there's pending L0 compaction. This should always be used in
881 : /// the background compaction task, since we want to aggressively compact down L0 to bound
882 : /// read amplification.
883 : ///
884 : /// It only makes sense to use this when `compaction_l0_first` is enabled (such that we yield to
885 : /// an L0 compaction pass), and without `OnlyL0Compaction` (L0 compaction shouldn't yield for L0
886 : /// compaction).
887 : YieldForL0,
888 : }
889 :
890 : #[serde_with::serde_as]
891 0 : #[derive(Debug, Clone, serde::Deserialize)]
892 : pub(crate) struct CompactRequest {
893 : pub compact_key_range: Option<CompactKeyRange>,
894 : pub compact_lsn_range: Option<CompactLsnRange>,
895 : /// Whether the compaction job should be scheduled.
896 : #[serde(default)]
897 : pub scheduled: bool,
898 : /// Whether the compaction job should be split across key ranges.
899 : #[serde(default)]
900 : pub sub_compaction: bool,
901 : /// Max job size for each subcompaction job.
902 : pub sub_compaction_max_job_size_mb: Option<u64>,
903 : }
904 :
905 0 : #[derive(Debug, Clone, serde::Deserialize)]
906 : pub(crate) struct MarkInvisibleRequest {
907 : #[serde(default)]
908 : pub is_visible: Option<bool>,
909 : }
910 :
911 : #[derive(Debug, Clone, Default)]
912 : pub(crate) struct CompactOptions {
913 : pub flags: EnumSet<CompactFlags>,
914 : /// If set, the compaction will only compact the key range specified by this option.
915 : /// This option is only used by GC compaction. For the full explanation, see [`compaction::GcCompactJob`].
916 : pub compact_key_range: Option<CompactKeyRange>,
917 : /// If set, the compaction will only compact the LSN within this value.
918 : /// This option is only used by GC compaction. For the full explanation, see [`compaction::GcCompactJob`].
919 : pub compact_lsn_range: Option<CompactLsnRange>,
920 : /// Enable sub-compaction (split compaction job across key ranges).
921 : /// This option is only used by GC compaction.
922 : pub sub_compaction: bool,
923 : /// Set job size for the GC compaction.
924 : /// This option is only used by GC compaction.
925 : pub sub_compaction_max_job_size_mb: Option<u64>,
926 : }
927 :
928 : impl std::fmt::Debug for Timeline {
929 0 : fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
930 0 : write!(f, "Timeline<{}>", self.timeline_id)
931 0 : }
932 : }
933 :
934 : #[derive(thiserror::Error, Debug, Clone)]
935 : pub(crate) enum WaitLsnError {
936 : // Called on a timeline which is shutting down
937 : #[error("Shutdown")]
938 : Shutdown,
939 :
940 : // Called on an timeline not in active state or shutting down
941 : #[error("Bad timeline state: {0:?}")]
942 : BadState(TimelineState),
943 :
944 : // Timeout expired while waiting for LSN to catch up with goal.
945 : #[error("{0}")]
946 : Timeout(String),
947 : }
948 :
949 : // The impls below achieve cancellation mapping for errors.
950 : // Perhaps there's a way of achieving this with less cruft.
951 :
952 : impl From<CreateImageLayersError> for CompactionError {
953 0 : fn from(e: CreateImageLayersError) -> Self {
954 0 : match e {
955 0 : CreateImageLayersError::Cancelled => CompactionError::ShuttingDown,
956 0 : CreateImageLayersError::Other(e) => {
957 0 : CompactionError::Other(e.context("create image layers"))
958 : }
959 0 : _ => CompactionError::Other(e.into()),
960 : }
961 0 : }
962 : }
963 :
964 : impl From<CreateImageLayersError> for FlushLayerError {
965 0 : fn from(e: CreateImageLayersError) -> Self {
966 0 : match e {
967 0 : CreateImageLayersError::Cancelled => FlushLayerError::Cancelled,
968 0 : any => FlushLayerError::CreateImageLayersError(Arc::new(any)),
969 : }
970 0 : }
971 : }
972 :
973 : impl From<PageReconstructError> for CreateImageLayersError {
974 0 : fn from(e: PageReconstructError) -> Self {
975 0 : match e {
976 0 : PageReconstructError::Cancelled => CreateImageLayersError::Cancelled,
977 0 : _ => CreateImageLayersError::PageReconstructError(e),
978 : }
979 0 : }
980 : }
981 :
982 : impl From<super::storage_layer::errors::PutError> for CreateImageLayersError {
983 0 : fn from(e: super::storage_layer::errors::PutError) -> Self {
984 0 : if e.is_cancel() {
985 0 : CreateImageLayersError::Cancelled
986 : } else {
987 0 : CreateImageLayersError::Other(e.into_anyhow())
988 : }
989 0 : }
990 : }
991 :
992 : impl From<GetVectoredError> for CreateImageLayersError {
993 0 : fn from(e: GetVectoredError) -> Self {
994 0 : match e {
995 0 : GetVectoredError::Cancelled => CreateImageLayersError::Cancelled,
996 0 : _ => CreateImageLayersError::GetVectoredError(e),
997 : }
998 0 : }
999 : }
1000 :
1001 : impl From<GetVectoredError> for PageReconstructError {
1002 3 : fn from(e: GetVectoredError) -> Self {
1003 3 : match e {
1004 0 : GetVectoredError::Cancelled => PageReconstructError::Cancelled,
1005 0 : GetVectoredError::InvalidLsn(_) => PageReconstructError::Other(anyhow!("Invalid LSN")),
1006 0 : err @ GetVectoredError::Oversized(_) => PageReconstructError::Other(err.into()),
1007 2 : GetVectoredError::MissingKey(err) => PageReconstructError::MissingKey(err),
1008 1 : GetVectoredError::GetReadyAncestorError(err) => PageReconstructError::from(err),
1009 0 : GetVectoredError::Other(err) => PageReconstructError::Other(err),
1010 : }
1011 3 : }
1012 : }
1013 :
1014 : impl From<GetReadyAncestorError> for PageReconstructError {
1015 1 : fn from(e: GetReadyAncestorError) -> Self {
1016 : use GetReadyAncestorError::*;
1017 1 : match e {
1018 0 : AncestorLsnTimeout(wait_err) => PageReconstructError::AncestorLsnTimeout(wait_err),
1019 1 : bad_state @ BadState { .. } => PageReconstructError::Other(anyhow::anyhow!(bad_state)),
1020 0 : Cancelled => PageReconstructError::Cancelled,
1021 : }
1022 1 : }
1023 : }
1024 :
1025 : pub(crate) enum WaitLsnTimeout {
1026 : Custom(Duration),
1027 : // Use the [`PageServerConf::wait_lsn_timeout`] default
1028 : Default,
1029 : }
1030 :
1031 : pub(crate) enum WaitLsnWaiter<'a> {
1032 : Timeline(&'a Timeline),
1033 : Tenant,
1034 : PageService,
1035 : HttpEndpoint,
1036 : BaseBackupCache,
1037 : }
1038 :
1039 : /// Argument to [`Timeline::shutdown`].
1040 : #[derive(Debug, Clone, Copy)]
1041 : pub(crate) enum ShutdownMode {
1042 : /// Graceful shutdown, may do a lot of I/O as we flush any open layers to disk and then
1043 : /// also to remote storage. This method can easily take multiple seconds for a busy timeline.
1044 : ///
1045 : /// While we are flushing, we continue to accept read I/O for LSNs ingested before
1046 : /// the call to [`Timeline::shutdown`].
1047 : FreezeAndFlush,
1048 : /// Only flush the layers to the remote storage without freezing any open layers. Flush the deletion
1049 : /// queue. This is the mode used by ancestor detach and any other operations that reloads a tenant
1050 : /// but not increasing the generation number. Note that this mode cannot be used at tenant shutdown,
1051 : /// as flushing the deletion queue at that time will cause shutdown-in-progress errors.
1052 : Reload,
1053 : /// Shut down immediately, without waiting for any open layers to flush.
1054 : Hard,
1055 : }
1056 :
1057 : #[allow(clippy::large_enum_variant, reason = "TODO")]
1058 : enum ImageLayerCreationOutcome {
1059 : /// We generated an image layer
1060 : Generated {
1061 : unfinished_image_layer: ImageLayerWriter,
1062 : },
1063 : /// The key range is empty
1064 : Empty,
1065 : /// (Only used in metadata image layer creation), after reading the metadata keys, we decide to skip
1066 : /// the image layer creation.
1067 : Skip,
1068 : }
1069 :
1070 : /// Public interface functions
1071 : impl Timeline {
1072 : /// Get the LSN where this branch was created
1073 22 : pub(crate) fn get_ancestor_lsn(&self) -> Lsn {
1074 22 : self.ancestor_lsn
1075 22 : }
1076 :
1077 : /// Get the ancestor's timeline id
1078 38 : pub(crate) fn get_ancestor_timeline_id(&self) -> Option<TimelineId> {
1079 38 : self.ancestor_timeline
1080 38 : .as_ref()
1081 38 : .map(|ancestor| ancestor.timeline_id)
1082 38 : }
1083 :
1084 : /// Get the ancestor timeline
1085 1 : pub(crate) fn ancestor_timeline(&self) -> Option<&Arc<Timeline>> {
1086 1 : self.ancestor_timeline.as_ref()
1087 1 : }
1088 :
1089 : /// Get the bytes written since the PITR cutoff on this branch, and
1090 : /// whether this branch's ancestor_lsn is within its parent's PITR.
1091 0 : pub(crate) fn get_pitr_history_stats(&self) -> (u64, bool) {
1092 0 : // TODO: for backwards compatibility, we return the full history back to 0 when the PITR
1093 0 : // cutoff has not yet been initialized. This should return None instead, but this is exposed
1094 0 : // in external HTTP APIs and callers may not handle a null value.
1095 0 : let gc_info = self.gc_info.read().unwrap();
1096 0 : let history = self
1097 0 : .get_last_record_lsn()
1098 0 : .checked_sub(gc_info.cutoffs.time.unwrap_or_default())
1099 0 : .unwrap_or_default()
1100 0 : .0;
1101 0 : (history, gc_info.within_ancestor_pitr)
1102 0 : }
1103 :
1104 : /// Read timeline's GC cutoff: this is the LSN at which GC has started to happen
1105 425005 : pub(crate) fn get_applied_gc_cutoff_lsn(&self) -> RcuReadGuard<Lsn> {
1106 425005 : self.applied_gc_cutoff_lsn.read()
1107 425005 : }
1108 :
1109 : /// Read timeline's planned GC cutoff: this is the logical end of history that users are allowed
1110 : /// to read (based on configured PITR), even if physically we have more history. Returns None
1111 : /// if the PITR cutoff has not yet been initialized.
1112 0 : pub(crate) fn get_gc_cutoff_lsn(&self) -> Option<Lsn> {
1113 0 : self.gc_info.read().unwrap().cutoffs.time
1114 0 : }
1115 :
1116 : /// Look up given page version.
1117 : ///
1118 : /// If a remote layer file is needed, it is downloaded as part of this
1119 : /// call.
1120 : ///
1121 : /// This method enforces [`Self::pagestream_throttle`] internally.
1122 : ///
1123 : /// NOTE: It is considered an error to 'get' a key that doesn't exist. The
1124 : /// abstraction above this needs to store suitable metadata to track what
1125 : /// data exists with what keys, in separate metadata entries. If a
1126 : /// non-existent key is requested, we may incorrectly return a value from
1127 : /// an ancestor branch, for example, or waste a lot of cycles chasing the
1128 : /// non-existing key.
1129 : ///
1130 : /// # Cancel-Safety
1131 : ///
1132 : /// This method is cancellation-safe.
1133 : #[inline(always)]
1134 301274 : pub(crate) async fn get(
1135 301274 : &self,
1136 301274 : key: Key,
1137 301274 : lsn: Lsn,
1138 301274 : ctx: &RequestContext,
1139 301274 : ) -> Result<Bytes, PageReconstructError> {
1140 301274 : if !lsn.is_valid() {
1141 0 : return Err(PageReconstructError::Other(anyhow::anyhow!("Invalid LSN")));
1142 301274 : }
1143 301274 :
1144 301274 : // This check is debug-only because of the cost of hashing, and because it's a double-check: we
1145 301274 : // already checked the key against the shard_identity when looking up the Timeline from
1146 301274 : // page_service.
1147 301274 : debug_assert!(!self.shard_identity.is_key_disposable(&key));
1148 :
1149 301274 : let mut reconstruct_state = ValuesReconstructState::new(IoConcurrency::sequential());
1150 301274 :
1151 301274 : let query = VersionedKeySpaceQuery::uniform(KeySpace::single(key..key.next()), lsn);
1152 :
1153 301274 : let vectored_res = self
1154 301274 : .get_vectored_impl(query, &mut reconstruct_state, ctx)
1155 301274 : .await;
1156 :
1157 301274 : let key_value = vectored_res?.pop_first();
1158 301271 : match key_value {
1159 301265 : Some((got_key, value)) => {
1160 301265 : if got_key != key {
1161 0 : error!(
1162 0 : "Expected {}, but singular vectored get returned {}",
1163 : key, got_key
1164 : );
1165 0 : Err(PageReconstructError::Other(anyhow!(
1166 0 : "Singular vectored get returned wrong key"
1167 0 : )))
1168 : } else {
1169 301265 : value
1170 : }
1171 : }
1172 6 : None => Err(PageReconstructError::MissingKey(Box::new(
1173 6 : MissingKeyError {
1174 6 : keyspace: KeySpace::single(key..key.next()),
1175 6 : shard: self.shard_identity.get_shard_number(&key),
1176 6 : original_hwm_lsn: lsn,
1177 6 : ancestor_lsn: None,
1178 6 : backtrace: None,
1179 6 : read_path: None,
1180 6 : query: None,
1181 6 : },
1182 6 : ))),
1183 : }
1184 301274 : }
1185 :
1186 : pub(crate) const MAX_GET_VECTORED_KEYS: u64 = 32;
1187 : pub(crate) const LAYERS_VISITED_WARN_THRESHOLD: u32 = 100;
1188 :
1189 : /// Look up multiple page versions at a given LSN
1190 : ///
1191 : /// This naive implementation will be replaced with a more efficient one
1192 : /// which actually vectorizes the read path.
1193 10882 : pub(crate) async fn get_vectored(
1194 10882 : &self,
1195 10882 : query: VersionedKeySpaceQuery,
1196 10882 : io_concurrency: super::storage_layer::IoConcurrency,
1197 10882 : ctx: &RequestContext,
1198 10882 : ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
1199 10882 : let total_keyspace = query.total_keyspace();
1200 10882 :
1201 10882 : let key_count = total_keyspace.total_raw_size().try_into().unwrap();
1202 10882 : if key_count > Timeline::MAX_GET_VECTORED_KEYS {
1203 0 : return Err(GetVectoredError::Oversized(key_count));
1204 10882 : }
1205 :
1206 34183 : for range in &total_keyspace.ranges {
1207 23301 : let mut key = range.start;
1208 65316 : while key != range.end {
1209 42015 : assert!(!self.shard_identity.is_key_disposable(&key));
1210 42015 : key = key.next();
1211 : }
1212 : }
1213 :
1214 10882 : trace!(
1215 0 : "get vectored query {} from task kind {:?}",
1216 0 : query,
1217 0 : ctx.task_kind(),
1218 : );
1219 :
1220 10882 : let start = crate::metrics::GET_VECTORED_LATENCY
1221 10882 : .for_task_kind(ctx.task_kind())
1222 10882 : .map(|metric| (metric, Instant::now()));
1223 :
1224 10882 : let res = self
1225 10882 : .get_vectored_impl(query, &mut ValuesReconstructState::new(io_concurrency), ctx)
1226 10882 : .await;
1227 :
1228 10882 : if let Some((metric, start)) = start {
1229 0 : let elapsed = start.elapsed();
1230 0 : metric.observe(elapsed.as_secs_f64());
1231 10882 : }
1232 :
1233 10882 : res
1234 10882 : }
1235 :
1236 : /// Scan the keyspace and return all existing key-values in the keyspace. This currently uses vectored
1237 : /// get underlying. Normal vectored get would throw an error when a key in the keyspace is not found
1238 : /// during the search, but for the scan interface, it returns all existing key-value pairs, and does
1239 : /// not expect each single key in the key space will be found. The semantics is closer to the RocksDB
1240 : /// scan iterator interface. We could optimize this interface later to avoid some checks in the vectored
1241 : /// get path to maintain and split the probing and to-be-probe keyspace. We also need to ensure that
1242 : /// the scan operation will not cause OOM in the future.
1243 8 : pub(crate) async fn scan(
1244 8 : &self,
1245 8 : keyspace: KeySpace,
1246 8 : lsn: Lsn,
1247 8 : ctx: &RequestContext,
1248 8 : io_concurrency: super::storage_layer::IoConcurrency,
1249 8 : ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
1250 8 : if !lsn.is_valid() {
1251 0 : return Err(GetVectoredError::InvalidLsn(lsn));
1252 8 : }
1253 8 :
1254 8 : trace!(
1255 0 : "key-value scan request for {:?}@{} from task kind {:?}",
1256 0 : keyspace,
1257 0 : lsn,
1258 0 : ctx.task_kind()
1259 : );
1260 :
1261 : // We should generalize this into Keyspace::contains in the future.
1262 16 : for range in &keyspace.ranges {
1263 8 : if range.start.field1 < METADATA_KEY_BEGIN_PREFIX
1264 8 : || range.end.field1 > METADATA_KEY_END_PREFIX
1265 : {
1266 0 : return Err(GetVectoredError::Other(anyhow::anyhow!(
1267 0 : "only metadata keyspace can be scanned"
1268 0 : )));
1269 8 : }
1270 : }
1271 :
1272 8 : let start = crate::metrics::SCAN_LATENCY
1273 8 : .for_task_kind(ctx.task_kind())
1274 8 : .map(ScanLatencyOngoingRecording::start_recording);
1275 8 :
1276 8 : let query = VersionedKeySpaceQuery::uniform(keyspace, lsn);
1277 :
1278 8 : let vectored_res = self
1279 8 : .get_vectored_impl(query, &mut ValuesReconstructState::new(io_concurrency), ctx)
1280 8 : .await;
1281 :
1282 8 : if let Some(recording) = start {
1283 0 : recording.observe();
1284 8 : }
1285 :
1286 8 : vectored_res
1287 8 : }
1288 :
1289 312348 : pub(super) async fn get_vectored_impl(
1290 312348 : &self,
1291 312348 : query: VersionedKeySpaceQuery,
1292 312348 : reconstruct_state: &mut ValuesReconstructState,
1293 312348 : ctx: &RequestContext,
1294 312348 : ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
1295 312348 : if query.is_empty() {
1296 0 : return Ok(BTreeMap::default());
1297 312348 : }
1298 :
1299 312348 : let read_path = if self.conf.enable_read_path_debugging || ctx.read_path_debug() {
1300 : Some(ReadPath::new(
1301 312348 : query.total_keyspace(),
1302 312348 : query.high_watermark_lsn()?,
1303 : ))
1304 : } else {
1305 0 : None
1306 : };
1307 :
1308 312348 : reconstruct_state.read_path = read_path;
1309 :
1310 312348 : let redo_attempt_type = if ctx.task_kind() == TaskKind::Compaction {
1311 0 : RedoAttemptType::LegacyCompaction
1312 : } else {
1313 312348 : RedoAttemptType::ReadPage
1314 : };
1315 :
1316 312348 : let traversal_res: Result<(), _> = {
1317 312348 : let ctx = RequestContextBuilder::from(ctx)
1318 312348 : .perf_span(|crnt_perf_span| {
1319 0 : info_span!(
1320 : target: PERF_TRACE_TARGET,
1321 0 : parent: crnt_perf_span,
1322 : "PLAN_IO",
1323 : )
1324 312348 : })
1325 312348 : .attached_child();
1326 312348 :
1327 312348 : self.get_vectored_reconstruct_data(query.clone(), reconstruct_state, &ctx)
1328 312348 : .maybe_perf_instrument(&ctx, |crnt_perf_span| crnt_perf_span.clone())
1329 312348 : .await
1330 : };
1331 :
1332 312348 : if let Err(err) = traversal_res {
1333 : // Wait for all the spawned IOs to complete.
1334 : // See comments on `spawn_io` inside `storage_layer` for more details.
1335 8 : let mut collect_futs = std::mem::take(&mut reconstruct_state.keys)
1336 8 : .into_values()
1337 8 : .map(|state| state.collect_pending_ios())
1338 8 : .collect::<FuturesUnordered<_>>();
1339 8 : while collect_futs.next().await.is_some() {}
1340 :
1341 : // Enrich the missing key error with the original query.
1342 8 : if let GetVectoredError::MissingKey(mut missing_err) = err {
1343 7 : missing_err.enrich(query.clone());
1344 7 : return Err(GetVectoredError::MissingKey(missing_err));
1345 1 : }
1346 1 :
1347 1 : return Err(err);
1348 312340 : };
1349 312340 :
1350 312340 : let layers_visited = reconstruct_state.get_layers_visited();
1351 312340 :
1352 312340 : let ctx = RequestContextBuilder::from(ctx)
1353 312340 : .perf_span(|crnt_perf_span| {
1354 0 : info_span!(
1355 : target: PERF_TRACE_TARGET,
1356 0 : parent: crnt_perf_span,
1357 : "RECONSTRUCT",
1358 : )
1359 312340 : })
1360 312340 : .attached_child();
1361 312340 :
1362 312340 : let futs = FuturesUnordered::new();
1363 363519 : for (key, state) in std::mem::take(&mut reconstruct_state.keys) {
1364 363519 : let req_lsn_for_key = query.map_key_to_lsn(&key);
1365 363519 :
1366 363519 : futs.push({
1367 363519 : let walredo_self = self.myself.upgrade().expect("&self method holds the arc");
1368 363519 : let ctx = RequestContextBuilder::from(&ctx)
1369 363519 : .perf_span(|crnt_perf_span| {
1370 0 : info_span!(
1371 : target: PERF_TRACE_TARGET,
1372 0 : parent: crnt_perf_span,
1373 : "RECONSTRUCT_KEY",
1374 : key = %key,
1375 : )
1376 363519 : })
1377 363519 : .attached_child();
1378 363519 :
1379 363519 : async move {
1380 363519 : assert_eq!(state.situation, ValueReconstructSituation::Complete);
1381 :
1382 363519 : let res = state
1383 363519 : .collect_pending_ios()
1384 363519 : .maybe_perf_instrument(&ctx, |crnt_perf_span| {
1385 0 : info_span!(
1386 : target: PERF_TRACE_TARGET,
1387 0 : parent: crnt_perf_span,
1388 : "WAIT_FOR_IO_COMPLETIONS",
1389 : )
1390 363519 : })
1391 363519 : .await;
1392 :
1393 363519 : let converted = match res {
1394 363519 : Ok(ok) => ok,
1395 0 : Err(err) => {
1396 0 : return (key, Err(err));
1397 : }
1398 : };
1399 363519 : DELTAS_PER_READ_GLOBAL.observe(converted.num_deltas() as f64);
1400 363519 :
1401 363519 : // The walredo module expects the records to be descending in terms of Lsn.
1402 363519 : // And we submit the IOs in that order, so, there shuold be no need to sort here.
1403 363519 : debug_assert!(
1404 363519 : converted
1405 363519 : .records
1406 1403254 : .is_sorted_by_key(|(lsn, _)| std::cmp::Reverse(*lsn)),
1407 0 : "{converted:?}"
1408 : );
1409 :
1410 363519 : let walredo_deltas = converted.num_deltas();
1411 363519 : let walredo_res = walredo_self
1412 363519 : .reconstruct_value(key, req_lsn_for_key, converted, redo_attempt_type)
1413 363519 : .maybe_perf_instrument(&ctx, |crnt_perf_span| {
1414 0 : info_span!(
1415 : target: PERF_TRACE_TARGET,
1416 0 : parent: crnt_perf_span,
1417 : "WALREDO",
1418 : deltas = %walredo_deltas,
1419 : )
1420 363519 : })
1421 363519 : .await;
1422 :
1423 363519 : (key, walredo_res)
1424 363519 : }
1425 363519 : });
1426 363519 : }
1427 :
1428 312340 : let results = futs
1429 312340 : .collect::<BTreeMap<Key, Result<Bytes, PageReconstructError>>>()
1430 312340 : .maybe_perf_instrument(&ctx, |crnt_perf_span| crnt_perf_span.clone())
1431 312340 : .await;
1432 :
1433 : // For aux file keys (v1 or v2) the vectored read path does not return an error
1434 : // when they're missing. Instead they are omitted from the resulting btree
1435 : // (this is a requirement, not a bug). Skip updating the metric in these cases
1436 : // to avoid infinite results.
1437 312340 : if !results.is_empty() {
1438 312213 : if layers_visited >= Self::LAYERS_VISITED_WARN_THRESHOLD {
1439 0 : let total_keyspace = query.total_keyspace();
1440 0 : let max_request_lsn = query.high_watermark_lsn().expect("Validated previously");
1441 0 :
1442 0 : static LOG_PACER: Lazy<Mutex<RateLimit>> =
1443 0 : Lazy::new(|| Mutex::new(RateLimit::new(Duration::from_secs(60))));
1444 0 : LOG_PACER.lock().unwrap().call(|| {
1445 0 : let num_keys = total_keyspace.total_raw_size();
1446 0 : let num_pages = results.len();
1447 0 : tracing::info!(
1448 0 : shard_id = %self.tenant_shard_id.shard_slug(),
1449 0 : lsn = %max_request_lsn,
1450 0 : "Vectored read for {total_keyspace} visited {layers_visited} layers. Returned {num_pages}/{num_keys} pages.",
1451 : );
1452 0 : });
1453 312213 : }
1454 :
1455 : // Records the number of layers visited in a few different ways:
1456 : //
1457 : // * LAYERS_PER_READ: all layers count towards every read in the batch, because each
1458 : // layer directly affects its observed latency.
1459 : //
1460 : // * LAYERS_PER_READ_BATCH: all layers count towards each batch, to get the per-batch
1461 : // layer visits and access cost.
1462 : //
1463 : // * LAYERS_PER_READ_AMORTIZED: the average layer count per read, to get the amortized
1464 : // read amplification after batching.
1465 312213 : let layers_visited = layers_visited as f64;
1466 312213 : let avg_layers_visited = layers_visited / results.len() as f64;
1467 312213 : LAYERS_PER_READ_BATCH_GLOBAL.observe(layers_visited);
1468 675732 : for _ in &results {
1469 363519 : self.metrics.layers_per_read.observe(layers_visited);
1470 363519 : LAYERS_PER_READ_GLOBAL.observe(layers_visited);
1471 363519 : LAYERS_PER_READ_AMORTIZED_GLOBAL.observe(avg_layers_visited);
1472 363519 : }
1473 127 : }
1474 :
1475 312340 : Ok(results)
1476 312348 : }
1477 :
1478 : /// Get last or prev record separately. Same as get_last_record_rlsn().last/prev.
1479 137284 : pub(crate) fn get_last_record_lsn(&self) -> Lsn {
1480 137284 : self.last_record_lsn.load().last
1481 137284 : }
1482 :
1483 0 : pub(crate) fn get_prev_record_lsn(&self) -> Lsn {
1484 0 : self.last_record_lsn.load().prev
1485 0 : }
1486 :
1487 : /// Atomically get both last and prev.
1488 117 : pub(crate) fn get_last_record_rlsn(&self) -> RecordLsn {
1489 117 : self.last_record_lsn.load()
1490 117 : }
1491 :
1492 : /// Subscribe to callers of wait_lsn(). The value of the channel is None if there are no
1493 : /// wait_lsn() calls in progress, and Some(Lsn) if there is an active waiter for wait_lsn().
1494 0 : pub(crate) fn subscribe_for_wait_lsn_updates(&self) -> watch::Receiver<Option<Lsn>> {
1495 0 : self.last_record_lsn.status_receiver()
1496 0 : }
1497 :
1498 231 : pub(crate) fn get_disk_consistent_lsn(&self) -> Lsn {
1499 231 : self.disk_consistent_lsn.load()
1500 231 : }
1501 :
1502 : /// remote_consistent_lsn from the perspective of the tenant's current generation,
1503 : /// not validated with control plane yet.
1504 : /// See [`Self::get_remote_consistent_lsn_visible`].
1505 0 : pub(crate) fn get_remote_consistent_lsn_projected(&self) -> Option<Lsn> {
1506 0 : self.remote_client.remote_consistent_lsn_projected()
1507 0 : }
1508 :
1509 : /// remote_consistent_lsn which the tenant is guaranteed not to go backward from,
1510 : /// i.e. a value of remote_consistent_lsn_projected which has undergone
1511 : /// generation validation in the deletion queue.
1512 0 : pub(crate) fn get_remote_consistent_lsn_visible(&self) -> Option<Lsn> {
1513 0 : self.remote_client.remote_consistent_lsn_visible()
1514 0 : }
1515 :
1516 : /// The sum of the file size of all historic layers in the layer map.
1517 : /// This method makes no distinction between local and remote layers.
1518 : /// Hence, the result **does not represent local filesystem usage**.
1519 0 : pub(crate) async fn layer_size_sum(&self) -> u64 {
1520 0 : let guard = self.layers.read().await;
1521 0 : guard.layer_size_sum()
1522 0 : }
1523 :
1524 0 : pub(crate) fn resident_physical_size(&self) -> u64 {
1525 0 : self.metrics.resident_physical_size_get()
1526 0 : }
1527 :
1528 0 : pub(crate) fn get_directory_metrics(&self) -> [u64; DirectoryKind::KINDS_NUM] {
1529 0 : array::from_fn(|idx| self.directory_metrics[idx].load(AtomicOrdering::Relaxed))
1530 0 : }
1531 :
1532 : ///
1533 : /// Wait until WAL has been received and processed up to this LSN.
1534 : ///
1535 : /// You should call this before any of the other get_* or list_* functions. Calling
1536 : /// those functions with an LSN that has been processed yet is an error.
1537 : ///
1538 112494 : pub(crate) async fn wait_lsn(
1539 112494 : &self,
1540 112494 : lsn: Lsn,
1541 112494 : who_is_waiting: WaitLsnWaiter<'_>,
1542 112494 : timeout: WaitLsnTimeout,
1543 112494 : ctx: &RequestContext, /* Prepare for use by cancellation */
1544 112494 : ) -> Result<(), WaitLsnError> {
1545 112494 : let state = self.current_state();
1546 112494 : if self.cancel.is_cancelled() || matches!(state, TimelineState::Stopping) {
1547 0 : return Err(WaitLsnError::Shutdown);
1548 112494 : } else if !matches!(state, TimelineState::Active) {
1549 0 : return Err(WaitLsnError::BadState(state));
1550 112494 : }
1551 112494 :
1552 112494 : if cfg!(debug_assertions) {
1553 112494 : match ctx.task_kind() {
1554 : TaskKind::WalReceiverManager
1555 : | TaskKind::WalReceiverConnectionHandler
1556 : | TaskKind::WalReceiverConnectionPoller => {
1557 0 : let is_myself = match who_is_waiting {
1558 0 : WaitLsnWaiter::Timeline(waiter) => {
1559 0 : Weak::ptr_eq(&waiter.myself, &self.myself)
1560 : }
1561 : WaitLsnWaiter::Tenant
1562 : | WaitLsnWaiter::PageService
1563 : | WaitLsnWaiter::HttpEndpoint
1564 0 : | WaitLsnWaiter::BaseBackupCache => unreachable!(
1565 0 : "tenant or page_service context are not expected to have task kind {:?}",
1566 0 : ctx.task_kind()
1567 0 : ),
1568 : };
1569 0 : if is_myself {
1570 0 : if let Err(current) = self.last_record_lsn.would_wait_for(lsn) {
1571 : // walingest is the only one that can advance last_record_lsn; it should make sure to never reach here
1572 0 : panic!(
1573 0 : "this timeline's walingest task is calling wait_lsn({lsn}) but we only have last_record_lsn={current}; would deadlock"
1574 0 : );
1575 0 : }
1576 0 : } else {
1577 0 : // if another timeline's is waiting for us, there's no deadlock risk because
1578 0 : // our walreceiver task can make progress independent of theirs
1579 0 : }
1580 : }
1581 112494 : _ => {}
1582 : }
1583 0 : }
1584 :
1585 112494 : let timeout = match timeout {
1586 0 : WaitLsnTimeout::Custom(t) => t,
1587 112494 : WaitLsnTimeout::Default => self.conf.wait_lsn_timeout,
1588 : };
1589 :
1590 112494 : let timer = crate::metrics::WAIT_LSN_TIME.start_timer();
1591 112494 : let start_finish_counterpair_guard = self.metrics.wait_lsn_start_finish_counterpair.guard();
1592 112494 :
1593 112494 : let wait_for_timeout = self.last_record_lsn.wait_for_timeout(lsn, timeout);
1594 112494 : let wait_for_timeout = std::pin::pin!(wait_for_timeout);
1595 112494 : // Use threshold of 1 because even 1 second of wait for ingest is very much abnormal.
1596 112494 : let log_slow_threshold = Duration::from_secs(1);
1597 112494 : // Use period of 10 to avoid flooding logs during an outage that affects all timelines.
1598 112494 : let log_slow_period = Duration::from_secs(10);
1599 112494 : let mut logging_permit = None;
1600 112494 : let wait_for_timeout = monitor_slow_future(
1601 112494 : log_slow_threshold,
1602 112494 : log_slow_period,
1603 112494 : wait_for_timeout,
1604 112494 : |MonitorSlowFutureCallback {
1605 : ready,
1606 : is_slow,
1607 : elapsed_total,
1608 : elapsed_since_last_callback,
1609 112494 : }| {
1610 112494 : self.metrics
1611 112494 : .wait_lsn_in_progress_micros
1612 112494 : .inc_by(u64::try_from(elapsed_since_last_callback.as_micros()).unwrap());
1613 112494 : if !is_slow {
1614 112494 : return;
1615 0 : }
1616 0 : // It's slow, see if we should log it.
1617 0 : // (We limit the logging to one per invocation per timeline to avoid excessive
1618 0 : // logging during an extended broker / networking outage that affects all timelines.)
1619 0 : if logging_permit.is_none() {
1620 0 : logging_permit = self.wait_lsn_log_slow.try_acquire().ok();
1621 0 : }
1622 0 : if logging_permit.is_none() {
1623 0 : return;
1624 0 : }
1625 0 : // We log it.
1626 0 : if ready {
1627 0 : info!(
1628 0 : "slow wait_lsn completed after {:.3}s",
1629 0 : elapsed_total.as_secs_f64()
1630 : );
1631 : } else {
1632 0 : info!(
1633 0 : "slow wait_lsn still running for {:.3}s",
1634 0 : elapsed_total.as_secs_f64()
1635 : );
1636 : }
1637 112494 : },
1638 112494 : );
1639 112494 : let res = wait_for_timeout.await;
1640 : // don't count the time spent waiting for lock below, and also in walreceiver.status(), towards the wait_lsn_time_histo
1641 112494 : drop(logging_permit);
1642 112494 : drop(start_finish_counterpair_guard);
1643 112494 : drop(timer);
1644 112494 : match res {
1645 112494 : Ok(()) => Ok(()),
1646 0 : Err(e) => {
1647 : use utils::seqwait::SeqWaitError::*;
1648 0 : match e {
1649 0 : Shutdown => Err(WaitLsnError::Shutdown),
1650 : Timeout => {
1651 0 : let walreceiver_status = self.walreceiver_status();
1652 0 : Err(WaitLsnError::Timeout(format!(
1653 0 : "Timed out while waiting for WAL record at LSN {} to arrive, last_record_lsn {} disk consistent LSN={}, WalReceiver status: {}",
1654 0 : lsn,
1655 0 : self.get_last_record_lsn(),
1656 0 : self.get_disk_consistent_lsn(),
1657 0 : walreceiver_status,
1658 0 : )))
1659 : }
1660 : }
1661 : }
1662 : }
1663 112494 : }
1664 :
1665 0 : pub(crate) fn walreceiver_status(&self) -> String {
1666 0 : match &*self.walreceiver.lock().unwrap() {
1667 0 : None => "stopping or stopped".to_string(),
1668 0 : Some(walreceiver) => match walreceiver.status() {
1669 0 : Some(status) => status.to_human_readable_string(),
1670 0 : None => "Not active".to_string(),
1671 : },
1672 : }
1673 0 : }
1674 :
1675 : /// Check that it is valid to request operations with that lsn.
1676 119 : pub(crate) fn check_lsn_is_in_scope(
1677 119 : &self,
1678 119 : lsn: Lsn,
1679 119 : latest_gc_cutoff_lsn: &RcuReadGuard<Lsn>,
1680 119 : ) -> anyhow::Result<()> {
1681 119 : ensure!(
1682 119 : lsn >= **latest_gc_cutoff_lsn,
1683 2 : "LSN {} is earlier than latest GC cutoff {} (we might've already garbage collected needed data)",
1684 2 : lsn,
1685 2 : **latest_gc_cutoff_lsn,
1686 : );
1687 117 : Ok(())
1688 119 : }
1689 :
1690 : /// Initializes an LSN lease. The function will return an error if the requested LSN is less than the `latest_gc_cutoff_lsn`.
1691 5 : pub(crate) fn init_lsn_lease(
1692 5 : &self,
1693 5 : lsn: Lsn,
1694 5 : length: Duration,
1695 5 : ctx: &RequestContext,
1696 5 : ) -> anyhow::Result<LsnLease> {
1697 5 : self.make_lsn_lease(lsn, length, true, ctx)
1698 5 : }
1699 :
1700 : /// Renews a lease at a particular LSN. The requested LSN is not validated against the `latest_gc_cutoff_lsn` when we are in the grace period.
1701 2 : pub(crate) fn renew_lsn_lease(
1702 2 : &self,
1703 2 : lsn: Lsn,
1704 2 : length: Duration,
1705 2 : ctx: &RequestContext,
1706 2 : ) -> anyhow::Result<LsnLease> {
1707 2 : self.make_lsn_lease(lsn, length, false, ctx)
1708 2 : }
1709 :
1710 : /// Obtains a temporary lease blocking garbage collection for the given LSN.
1711 : ///
1712 : /// If we are in `AttachedSingle` mode and is not blocked by the lsn lease deadline, this function will error
1713 : /// if the requesting LSN is less than the `latest_gc_cutoff_lsn` and there is no existing request present.
1714 : ///
1715 : /// If there is an existing lease in the map, the lease will be renewed only if the request extends the lease.
1716 : /// The returned lease is therefore the maximum between the existing lease and the requesting lease.
1717 7 : fn make_lsn_lease(
1718 7 : &self,
1719 7 : lsn: Lsn,
1720 7 : length: Duration,
1721 7 : init: bool,
1722 7 : _ctx: &RequestContext,
1723 7 : ) -> anyhow::Result<LsnLease> {
1724 6 : let lease = {
1725 : // Normalize the requested LSN to be aligned, and move to the first record
1726 : // if it points to the beginning of the page (header).
1727 7 : let lsn = xlog_utils::normalize_lsn(lsn, WAL_SEGMENT_SIZE);
1728 7 :
1729 7 : let mut gc_info = self.gc_info.write().unwrap();
1730 7 : let planned_cutoff = gc_info.min_cutoff();
1731 7 :
1732 7 : let valid_until = SystemTime::now() + length;
1733 7 :
1734 7 : let entry = gc_info.leases.entry(lsn);
1735 7 :
1736 7 : match entry {
1737 3 : Entry::Occupied(mut occupied) => {
1738 3 : let existing_lease = occupied.get_mut();
1739 3 : if valid_until > existing_lease.valid_until {
1740 1 : existing_lease.valid_until = valid_until;
1741 1 : let dt: DateTime<Utc> = valid_until.into();
1742 1 : info!("lease extended to {}", dt);
1743 : } else {
1744 2 : let dt: DateTime<Utc> = existing_lease.valid_until.into();
1745 2 : info!("existing lease covers greater length, valid until {}", dt);
1746 : }
1747 :
1748 3 : existing_lease.clone()
1749 : }
1750 4 : Entry::Vacant(vacant) => {
1751 : // Reject already GC-ed LSN if we are in AttachedSingle and
1752 : // not blocked by the lsn lease deadline.
1753 4 : let validate = {
1754 4 : let conf = self.tenant_conf.load();
1755 4 : conf.location.attach_mode == AttachmentMode::Single
1756 4 : && !conf.is_gc_blocked_by_lsn_lease_deadline()
1757 : };
1758 :
1759 4 : if init || validate {
1760 4 : let latest_gc_cutoff_lsn = self.get_applied_gc_cutoff_lsn();
1761 4 : if lsn < *latest_gc_cutoff_lsn {
1762 1 : bail!(
1763 1 : "tried to request an lsn lease for an lsn below the latest gc cutoff. requested at {} gc cutoff {}",
1764 1 : lsn,
1765 1 : *latest_gc_cutoff_lsn
1766 1 : );
1767 3 : }
1768 3 : if lsn < planned_cutoff {
1769 0 : bail!(
1770 0 : "tried to request an lsn lease for an lsn below the planned gc cutoff. requested at {} planned gc cutoff {}",
1771 0 : lsn,
1772 0 : planned_cutoff
1773 0 : );
1774 3 : }
1775 0 : }
1776 :
1777 3 : let dt: DateTime<Utc> = valid_until.into();
1778 3 : info!("lease created, valid until {}", dt);
1779 3 : vacant.insert(LsnLease { valid_until }).clone()
1780 : }
1781 : }
1782 : };
1783 :
1784 6 : Ok(lease)
1785 7 : }
1786 :
1787 : /// Freeze the current open in-memory layer. It will be written to disk on next iteration.
1788 : /// Returns the flush request ID which can be awaited with wait_flush_completion().
1789 : #[instrument(skip(self), fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id))]
1790 : pub(crate) async fn freeze(&self) -> Result<u64, FlushLayerError> {
1791 : self.freeze0().await
1792 : }
1793 :
1794 : /// Freeze and flush the open in-memory layer, waiting for it to be written to disk.
1795 : #[instrument(skip(self), fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id))]
1796 : pub(crate) async fn freeze_and_flush(&self) -> Result<(), FlushLayerError> {
1797 : self.freeze_and_flush0().await
1798 : }
1799 :
1800 : /// Freeze the current open in-memory layer. It will be written to disk on next iteration.
1801 : /// Returns the flush request ID which can be awaited with wait_flush_completion().
1802 567 : pub(crate) async fn freeze0(&self) -> Result<u64, FlushLayerError> {
1803 567 : let mut g = self.write_lock.lock().await;
1804 567 : let to_lsn = self.get_last_record_lsn();
1805 567 : self.freeze_inmem_layer_at(to_lsn, &mut g).await
1806 567 : }
1807 :
1808 : // This exists to provide a non-span creating version of `freeze_and_flush` we can call without
1809 : // polluting the span hierarchy.
1810 567 : pub(crate) async fn freeze_and_flush0(&self) -> Result<(), FlushLayerError> {
1811 567 : let token = self.freeze0().await?;
1812 567 : self.wait_flush_completion(token).await
1813 567 : }
1814 :
1815 : // Check if an open ephemeral layer should be closed: this provides
1816 : // background enforcement of checkpoint interval if there is no active WAL receiver, to avoid keeping
1817 : // an ephemeral layer open forever when idle. It also freezes layers if the global limit on
1818 : // ephemeral layer bytes has been breached.
1819 0 : pub(super) async fn maybe_freeze_ephemeral_layer(&self) {
1820 0 : let Ok(mut write_guard) = self.write_lock.try_lock() else {
1821 : // If the write lock is held, there is an active wal receiver: rolling open layers
1822 : // is their responsibility while they hold this lock.
1823 0 : return;
1824 : };
1825 :
1826 : // FIXME: why not early exit? because before #7927 the state would had been cleared every
1827 : // time, and this was missed.
1828 : // if write_guard.is_none() { return; }
1829 :
1830 0 : let Ok(layers_guard) = self.layers.try_read() else {
1831 : // Don't block if the layer lock is busy
1832 0 : return;
1833 : };
1834 :
1835 0 : let Ok(lm) = layers_guard.layer_map() else {
1836 0 : return;
1837 : };
1838 :
1839 0 : let Some(open_layer) = &lm.open_layer else {
1840 : // If there is no open layer, we have no layer freezing to do. However, we might need to generate
1841 : // some updates to disk_consistent_lsn and remote_consistent_lsn, in case we ingested some WAL regions
1842 : // that didn't result in writes to this shard.
1843 :
1844 : // Must not hold the layers lock while waiting for a flush.
1845 0 : drop(layers_guard);
1846 0 :
1847 0 : let last_record_lsn = self.get_last_record_lsn();
1848 0 : let disk_consistent_lsn = self.get_disk_consistent_lsn();
1849 0 : if last_record_lsn > disk_consistent_lsn {
1850 : // We have no open layer, but disk_consistent_lsn is behind the last record: this indicates
1851 : // we are a sharded tenant and have skipped some WAL
1852 0 : let last_freeze_ts = *self.last_freeze_ts.read().unwrap();
1853 0 : if last_freeze_ts.elapsed() >= self.get_checkpoint_timeout() {
1854 : // Only do this if have been layer-less longer than get_checkpoint_timeout, so that a shard
1855 : // without any data ingested (yet) doesn't write a remote index as soon as it
1856 : // sees its LSN advance: we only do this if we've been layer-less
1857 : // for some time.
1858 0 : tracing::debug!(
1859 0 : "Advancing disk_consistent_lsn past WAL ingest gap {} -> {}",
1860 : disk_consistent_lsn,
1861 : last_record_lsn
1862 : );
1863 :
1864 : // The flush loop will update remote consistent LSN as well as disk consistent LSN.
1865 : // We know there is no open layer, so we can request freezing without actually
1866 : // freezing anything. This is true even if we have dropped the layers_guard, we
1867 : // still hold the write_guard.
1868 0 : let _ = async {
1869 0 : let token = self
1870 0 : .freeze_inmem_layer_at(last_record_lsn, &mut write_guard)
1871 0 : .await?;
1872 0 : self.wait_flush_completion(token).await
1873 0 : }
1874 0 : .await;
1875 0 : }
1876 0 : }
1877 :
1878 0 : return;
1879 : };
1880 :
1881 0 : let Some(current_size) = open_layer.try_len() else {
1882 : // Unexpected: since we hold the write guard, nobody else should be writing to this layer, so
1883 : // read lock to get size should always succeed.
1884 0 : tracing::warn!("Lock conflict while reading size of open layer");
1885 0 : return;
1886 : };
1887 :
1888 0 : let current_lsn = self.get_last_record_lsn();
1889 :
1890 0 : let checkpoint_distance_override = open_layer.tick().await;
1891 :
1892 0 : if let Some(size_override) = checkpoint_distance_override {
1893 0 : if current_size > size_override {
1894 : // This is not harmful, but it only happens in relatively rare cases where
1895 : // time-based checkpoints are not happening fast enough to keep the amount of
1896 : // ephemeral data within configured limits. It's a sign of stress on the system.
1897 0 : tracing::info!(
1898 0 : "Early-rolling open layer at size {current_size} (limit {size_override}) due to dirty data pressure"
1899 : );
1900 0 : }
1901 0 : }
1902 :
1903 0 : let checkpoint_distance =
1904 0 : checkpoint_distance_override.unwrap_or(self.get_checkpoint_distance());
1905 0 :
1906 0 : if self.should_roll(
1907 0 : current_size,
1908 0 : current_size,
1909 0 : checkpoint_distance,
1910 0 : self.get_last_record_lsn(),
1911 0 : self.last_freeze_at.load(),
1912 0 : open_layer.get_opened_at(),
1913 0 : ) {
1914 0 : match open_layer.info() {
1915 0 : InMemoryLayerInfo::Frozen { lsn_start, lsn_end } => {
1916 0 : // We may reach this point if the layer was already frozen by not yet flushed: flushing
1917 0 : // happens asynchronously in the background.
1918 0 : tracing::debug!(
1919 0 : "Not freezing open layer, it's already frozen ({lsn_start}..{lsn_end})"
1920 : );
1921 : }
1922 : InMemoryLayerInfo::Open { .. } => {
1923 : // Upgrade to a write lock and freeze the layer
1924 0 : drop(layers_guard);
1925 0 : let res = self
1926 0 : .freeze_inmem_layer_at(current_lsn, &mut write_guard)
1927 0 : .await;
1928 :
1929 0 : if let Err(e) = res {
1930 0 : tracing::info!(
1931 0 : "failed to flush frozen layer after background freeze: {e:#}"
1932 : );
1933 0 : }
1934 : }
1935 : }
1936 0 : }
1937 0 : }
1938 :
1939 : /// Checks if the internal state of the timeline is consistent with it being able to be offloaded.
1940 : ///
1941 : /// This is neccessary but not sufficient for offloading of the timeline as it might have
1942 : /// child timelines that are not offloaded yet.
1943 0 : pub(crate) fn can_offload(&self) -> (bool, &'static str) {
1944 0 : if self.remote_client.is_archived() != Some(true) {
1945 0 : return (false, "the timeline is not archived");
1946 0 : }
1947 0 : if !self.remote_client.no_pending_work() {
1948 : // if the remote client is still processing some work, we can't offload
1949 0 : return (false, "the upload queue is not drained yet");
1950 0 : }
1951 0 :
1952 0 : (true, "ok")
1953 0 : }
1954 :
1955 : /// Outermost timeline compaction operation; downloads needed layers. Returns whether we have pending
1956 : /// compaction tasks.
1957 182 : pub(crate) async fn compact(
1958 182 : self: &Arc<Self>,
1959 182 : cancel: &CancellationToken,
1960 182 : flags: EnumSet<CompactFlags>,
1961 182 : ctx: &RequestContext,
1962 182 : ) -> Result<CompactionOutcome, CompactionError> {
1963 182 : let res = self
1964 182 : .compact_with_options(
1965 182 : cancel,
1966 182 : CompactOptions {
1967 182 : flags,
1968 182 : compact_key_range: None,
1969 182 : compact_lsn_range: None,
1970 182 : sub_compaction: false,
1971 182 : sub_compaction_max_job_size_mb: None,
1972 182 : },
1973 182 : ctx,
1974 182 : )
1975 182 : .await;
1976 182 : if let Err(err) = &res {
1977 0 : log_compaction_error(err, None, cancel.is_cancelled(), false);
1978 182 : }
1979 182 : res
1980 182 : }
1981 :
1982 : /// Outermost timeline compaction operation; downloads needed layers.
1983 : ///
1984 : /// NB: the cancellation token is usually from a background task, but can also come from a
1985 : /// request task.
1986 182 : pub(crate) async fn compact_with_options(
1987 182 : self: &Arc<Self>,
1988 182 : cancel: &CancellationToken,
1989 182 : options: CompactOptions,
1990 182 : ctx: &RequestContext,
1991 182 : ) -> Result<CompactionOutcome, CompactionError> {
1992 182 : // Acquire the compaction lock and task semaphore.
1993 182 : //
1994 182 : // L0-only compaction uses a separate semaphore (if enabled) to make sure it isn't starved
1995 182 : // out by other background tasks (including image compaction). We request this via
1996 182 : // `BackgroundLoopKind::L0Compaction`.
1997 182 : //
1998 182 : // Yield for pending L0 compaction while waiting for the semaphore.
1999 182 : let is_l0_only = options.flags.contains(CompactFlags::OnlyL0Compaction);
2000 182 : let semaphore_kind = match is_l0_only && self.get_compaction_l0_semaphore() {
2001 0 : true => BackgroundLoopKind::L0Compaction,
2002 182 : false => BackgroundLoopKind::Compaction,
2003 : };
2004 182 : let yield_for_l0 = options.flags.contains(CompactFlags::YieldForL0);
2005 182 : if yield_for_l0 {
2006 : // If this is an L0 pass, it doesn't make sense to yield for L0.
2007 0 : debug_assert!(!is_l0_only, "YieldForL0 during L0 pass");
2008 : // If `compaction_l0_first` is disabled, there's no point yielding.
2009 0 : debug_assert!(self.get_compaction_l0_first(), "YieldForL0 without L0 pass");
2010 182 : }
2011 :
2012 182 : let acquire = async move {
2013 182 : let guard = self.compaction_lock.lock().await;
2014 182 : let permit = super::tasks::acquire_concurrency_permit(semaphore_kind, ctx).await;
2015 182 : (guard, permit)
2016 182 : };
2017 :
2018 182 : let (_guard, _permit) = tokio::select! {
2019 182 : (guard, permit) = acquire => (guard, permit),
2020 182 : _ = self.l0_compaction_trigger.notified(), if yield_for_l0 => {
2021 0 : return Ok(CompactionOutcome::YieldForL0);
2022 : }
2023 182 : _ = self.cancel.cancelled() => return Ok(CompactionOutcome::Skipped),
2024 182 : _ = cancel.cancelled() => return Ok(CompactionOutcome::Skipped),
2025 : };
2026 :
2027 182 : let last_record_lsn = self.get_last_record_lsn();
2028 182 :
2029 182 : // Last record Lsn could be zero in case the timeline was just created
2030 182 : if !last_record_lsn.is_valid() {
2031 0 : warn!(
2032 0 : "Skipping compaction for potentially just initialized timeline, it has invalid last record lsn: {last_record_lsn}"
2033 : );
2034 0 : return Ok(CompactionOutcome::Skipped);
2035 182 : }
2036 :
2037 182 : let result = match self.get_compaction_algorithm_settings().kind {
2038 : CompactionAlgorithm::Tiered => {
2039 0 : self.compact_tiered(cancel, ctx).await?;
2040 0 : Ok(CompactionOutcome::Done)
2041 : }
2042 182 : CompactionAlgorithm::Legacy => self.compact_legacy(cancel, options, ctx).await,
2043 : };
2044 :
2045 : // Signal compaction failure to avoid L0 flush stalls when it's broken.
2046 0 : match &result {
2047 182 : Ok(_) => self.compaction_failed.store(false, AtomicOrdering::Relaxed),
2048 0 : Err(e) if e.is_cancel() => {}
2049 0 : Err(CompactionError::ShuttingDown) => {
2050 0 : // Covered by the `Err(e) if e.is_cancel()` branch.
2051 0 : }
2052 0 : Err(CompactionError::AlreadyRunning(_)) => {
2053 0 : // Covered by the `Err(e) if e.is_cancel()` branch.
2054 0 : }
2055 : Err(CompactionError::Other(_)) => {
2056 0 : self.compaction_failed.store(true, AtomicOrdering::Relaxed)
2057 : }
2058 : Err(CompactionError::CollectKeySpaceError(_)) => {
2059 : // Cancelled errors are covered by the `Err(e) if e.is_cancel()` branch.
2060 0 : self.compaction_failed.store(true, AtomicOrdering::Relaxed)
2061 : }
2062 : // Don't change the current value on offload failure or shutdown. We don't want to
2063 : // abruptly stall nor resume L0 flushes in these cases.
2064 0 : Err(CompactionError::Offload(_)) => {}
2065 : };
2066 :
2067 182 : result
2068 182 : }
2069 :
2070 : /// Mutate the timeline with a [`TimelineWriter`].
2071 2566604 : pub(crate) async fn writer(&self) -> TimelineWriter<'_> {
2072 2566604 : TimelineWriter {
2073 2566604 : tl: self,
2074 2566604 : write_guard: self.write_lock.lock().await,
2075 : }
2076 2566604 : }
2077 :
2078 0 : pub(crate) fn activate(
2079 0 : self: &Arc<Self>,
2080 0 : parent: Arc<crate::tenant::TenantShard>,
2081 0 : broker_client: BrokerClientChannel,
2082 0 : background_jobs_can_start: Option<&completion::Barrier>,
2083 0 : ctx: &RequestContext,
2084 0 : ) {
2085 0 : if self.tenant_shard_id.is_shard_zero() {
2086 0 : // Logical size is only maintained accurately on shard zero.
2087 0 : self.spawn_initial_logical_size_computation_task(ctx);
2088 0 : }
2089 0 : self.launch_wal_receiver(ctx, broker_client);
2090 0 : self.set_state(TimelineState::Active);
2091 0 : self.launch_eviction_task(parent, background_jobs_can_start);
2092 0 : }
2093 :
2094 : /// After this function returns, there are no timeline-scoped tasks are left running.
2095 : ///
2096 : /// The preferred pattern for is:
2097 : /// - in any spawned tasks, keep Timeline::guard open + Timeline::cancel / child token
2098 : /// - if early shutdown (not just cancellation) of a sub-tree of tasks is required,
2099 : /// go the extra mile and keep track of JoinHandles
2100 : /// - Keep track of JoinHandles using a passed-down `Arc<Mutex<Option<JoinSet>>>` or similar,
2101 : /// instead of spawning directly on a runtime. It is a more composable / testable pattern.
2102 : ///
2103 : /// For legacy reasons, we still have multiple tasks spawned using
2104 : /// `task_mgr::spawn(X, Some(tenant_id), Some(timeline_id))`.
2105 : /// We refer to these as "timeline-scoped task_mgr tasks".
2106 : /// Some of these tasks are already sensitive to Timeline::cancel while others are
2107 : /// not sensitive to Timeline::cancel and instead respect [`task_mgr::shutdown_token`]
2108 : /// or [`task_mgr::shutdown_watcher`].
2109 : /// We want to gradually convert the code base away from these.
2110 : ///
2111 : /// Here is an inventory of timeline-scoped task_mgr tasks that are still sensitive to
2112 : /// `task_mgr::shutdown_{token,watcher}` (there are also tenant-scoped and global-scoped
2113 : /// ones that aren't mentioned here):
2114 : /// - [`TaskKind::TimelineDeletionWorker`]
2115 : /// - NB: also used for tenant deletion
2116 : /// - [`TaskKind::RemoteUploadTask`]`
2117 : /// - [`TaskKind::InitialLogicalSizeCalculation`]
2118 : /// - [`TaskKind::DownloadAllRemoteLayers`] (can we get rid of it?)
2119 : // Inventory of timeline-scoped task_mgr tasks that use spawn but aren't sensitive:
2120 : /// - [`TaskKind::Eviction`]
2121 : /// - [`TaskKind::LayerFlushTask`]
2122 : /// - [`TaskKind::OndemandLogicalSizeCalculation`]
2123 : /// - [`TaskKind::GarbageCollector`] (immediate_gc is timeline-scoped)
2124 5 : pub(crate) async fn shutdown(&self, mode: ShutdownMode) {
2125 5 : debug_assert_current_span_has_tenant_and_timeline_id();
2126 5 :
2127 5 : // Regardless of whether we're going to try_freeze_and_flush
2128 5 : // or not, stop ingesting any more data.
2129 5 : let walreceiver = self.walreceiver.lock().unwrap().take();
2130 5 : tracing::debug!(
2131 0 : is_some = walreceiver.is_some(),
2132 0 : "Waiting for WalReceiverManager..."
2133 : );
2134 5 : if let Some(walreceiver) = walreceiver {
2135 0 : walreceiver.shutdown().await;
2136 5 : }
2137 : // ... and inform any waiters for newer LSNs that there won't be any.
2138 5 : self.last_record_lsn.shutdown();
2139 5 :
2140 5 : if let ShutdownMode::FreezeAndFlush = mode {
2141 3 : let do_flush = if let Some((open, frozen)) = self
2142 3 : .layers
2143 3 : .read()
2144 3 : .await
2145 3 : .layer_map()
2146 3 : .map(|lm| (lm.open_layer.is_some(), lm.frozen_layers.len()))
2147 3 : .ok()
2148 3 : .filter(|(open, frozen)| *open || *frozen > 0)
2149 : {
2150 0 : if self.remote_client.is_archived() == Some(true) {
2151 : // No point flushing on shutdown for an archived timeline: it is not important
2152 : // to have it nice and fresh after our restart, and trying to flush here might
2153 : // race with trying to offload it (which also stops the flush loop)
2154 0 : false
2155 : } else {
2156 0 : tracing::info!(?open, frozen, "flushing and freezing on shutdown");
2157 0 : true
2158 : }
2159 : } else {
2160 : // this is double-shutdown, it'll be a no-op
2161 3 : true
2162 : };
2163 :
2164 : // we shut down walreceiver above, so, we won't add anything more
2165 : // to the InMemoryLayer; freeze it and wait for all frozen layers
2166 : // to reach the disk & upload queue, then shut the upload queue and
2167 : // wait for it to drain.
2168 3 : if do_flush {
2169 3 : match self.freeze_and_flush().await {
2170 : Ok(_) => {
2171 : // drain the upload queue
2172 : // if we did not wait for completion here, it might be our shutdown process
2173 : // didn't wait for remote uploads to complete at all, as new tasks can forever
2174 : // be spawned.
2175 : //
2176 : // what is problematic is the shutting down of RemoteTimelineClient, because
2177 : // obviously it does not make sense to stop while we wait for it, but what
2178 : // about corner cases like s3 suddenly hanging up?
2179 3 : self.remote_client.shutdown().await;
2180 : }
2181 : Err(FlushLayerError::Cancelled) => {
2182 : // this is likely the second shutdown, ignore silently.
2183 : // TODO: this can be removed once https://github.com/neondatabase/neon/issues/5080
2184 0 : debug_assert!(self.cancel.is_cancelled());
2185 : }
2186 0 : Err(e) => {
2187 0 : // Non-fatal. Shutdown is infallible. Failures to flush just mean that
2188 0 : // we have some extra WAL replay to do next time the timeline starts.
2189 0 : warn!("failed to freeze and flush: {e:#}");
2190 : }
2191 : }
2192 :
2193 : // `self.remote_client.shutdown().await` above should have already flushed everything from the queue, but
2194 : // we also do a final check here to ensure that the queue is empty.
2195 3 : if !self.remote_client.no_pending_work() {
2196 0 : warn!(
2197 0 : "still have pending work in remote upload queue, but continuing shutting down anyways"
2198 : );
2199 3 : }
2200 0 : }
2201 2 : }
2202 :
2203 5 : if let ShutdownMode::Reload = mode {
2204 : // drain the upload queue
2205 1 : self.remote_client.shutdown().await;
2206 1 : if !self.remote_client.no_pending_work() {
2207 0 : warn!(
2208 0 : "still have pending work in remote upload queue, but continuing shutting down anyways"
2209 : );
2210 1 : }
2211 4 : }
2212 :
2213 : // Signal any subscribers to our cancellation token to drop out
2214 5 : tracing::debug!("Cancelling CancellationToken");
2215 5 : self.cancel.cancel();
2216 5 :
2217 5 : // If we have a background task downloading heatmap layers stop it.
2218 5 : // The background downloads are sensitive to timeline cancellation (done above),
2219 5 : // so the drain will be immediate.
2220 5 : self.stop_and_drain_heatmap_layers_download().await;
2221 :
2222 : // Ensure Prevent new page service requests from starting.
2223 5 : self.handles.shutdown();
2224 5 :
2225 5 : // Transition the remote_client into a state where it's only useful for timeline deletion.
2226 5 : // (The deletion use case is why we can't just hook up remote_client to Self::cancel).)
2227 5 : self.remote_client.stop();
2228 5 :
2229 5 : // As documented in remote_client.stop()'s doc comment, it's our responsibility
2230 5 : // to shut down the upload queue tasks.
2231 5 : // TODO: fix that, task management should be encapsulated inside remote_client.
2232 5 : task_mgr::shutdown_tasks(
2233 5 : Some(TaskKind::RemoteUploadTask),
2234 5 : Some(self.tenant_shard_id),
2235 5 : Some(self.timeline_id),
2236 5 : )
2237 5 : .await;
2238 :
2239 : // TODO: work toward making this a no-op. See this function's doc comment for more context.
2240 5 : tracing::debug!("Waiting for tasks...");
2241 5 : task_mgr::shutdown_tasks(None, Some(self.tenant_shard_id), Some(self.timeline_id)).await;
2242 :
2243 : {
2244 : // Allow any remaining in-memory layers to do cleanup -- until that, they hold the gate
2245 : // open.
2246 5 : let mut write_guard = self.write_lock.lock().await;
2247 5 : self.layers.write().await.shutdown(&mut write_guard);
2248 5 : }
2249 5 :
2250 5 : // Finally wait until any gate-holders are complete.
2251 5 : //
2252 5 : // TODO: once above shutdown_tasks is a no-op, we can close the gate before calling shutdown_tasks
2253 5 : // and use a TBD variant of shutdown_tasks that asserts that there were no tasks left.
2254 5 : self.gate.close().await;
2255 :
2256 5 : self.metrics.shutdown();
2257 5 : }
2258 :
2259 234 : pub(crate) fn set_state(&self, new_state: TimelineState) {
2260 234 : match (self.current_state(), new_state) {
2261 234 : (equal_state_1, equal_state_2) if equal_state_1 == equal_state_2 => {
2262 1 : info!("Ignoring new state, equal to the existing one: {equal_state_2:?}");
2263 : }
2264 0 : (st, TimelineState::Loading) => {
2265 0 : error!("ignoring transition from {st:?} into Loading state");
2266 : }
2267 0 : (TimelineState::Broken { .. }, new_state) => {
2268 0 : error!("Ignoring state update {new_state:?} for broken timeline");
2269 : }
2270 : (TimelineState::Stopping, TimelineState::Active) => {
2271 0 : error!("Not activating a Stopping timeline");
2272 : }
2273 233 : (_, new_state) => {
2274 233 : self.state.send_replace(new_state);
2275 233 : }
2276 : }
2277 234 : }
2278 :
2279 1 : pub(crate) fn set_broken(&self, reason: String) {
2280 1 : let backtrace_str: String = format!("{}", std::backtrace::Backtrace::force_capture());
2281 1 : let broken_state = TimelineState::Broken {
2282 1 : reason,
2283 1 : backtrace: backtrace_str,
2284 1 : };
2285 1 : self.set_state(broken_state);
2286 1 :
2287 1 : // Although the Broken state is not equivalent to shutdown() (shutdown will be called
2288 1 : // later when this tenant is detach or the process shuts down), firing the cancellation token
2289 1 : // here avoids the need for other tasks to watch for the Broken state explicitly.
2290 1 : self.cancel.cancel();
2291 1 : }
2292 :
2293 113038 : pub(crate) fn current_state(&self) -> TimelineState {
2294 113038 : self.state.borrow().clone()
2295 113038 : }
2296 :
2297 3 : pub(crate) fn is_broken(&self) -> bool {
2298 3 : matches!(&*self.state.borrow(), TimelineState::Broken { .. })
2299 3 : }
2300 :
2301 126 : pub(crate) fn is_active(&self) -> bool {
2302 126 : self.current_state() == TimelineState::Active
2303 126 : }
2304 :
2305 8 : pub(crate) fn is_archived(&self) -> Option<bool> {
2306 8 : self.remote_client.is_archived()
2307 8 : }
2308 :
2309 8 : pub(crate) fn is_invisible(&self) -> Option<bool> {
2310 8 : self.remote_client.is_invisible()
2311 8 : }
2312 :
2313 184 : pub(crate) fn is_stopping(&self) -> bool {
2314 184 : self.current_state() == TimelineState::Stopping
2315 184 : }
2316 :
2317 0 : pub(crate) fn subscribe_for_state_updates(&self) -> watch::Receiver<TimelineState> {
2318 0 : self.state.subscribe()
2319 0 : }
2320 :
2321 112495 : pub(crate) async fn wait_to_become_active(
2322 112495 : &self,
2323 112495 : _ctx: &RequestContext, // Prepare for use by cancellation
2324 112495 : ) -> Result<(), TimelineState> {
2325 112495 : let mut receiver = self.state.subscribe();
2326 : loop {
2327 112495 : let current_state = receiver.borrow().clone();
2328 112495 : match current_state {
2329 : TimelineState::Loading => {
2330 0 : receiver
2331 0 : .changed()
2332 0 : .await
2333 0 : .expect("holding a reference to self");
2334 : }
2335 : TimelineState::Active => {
2336 112494 : return Ok(());
2337 : }
2338 : TimelineState::Broken { .. } | TimelineState::Stopping => {
2339 : // There's no chance the timeline can transition back into ::Active
2340 1 : return Err(current_state);
2341 : }
2342 : }
2343 : }
2344 112495 : }
2345 :
2346 0 : pub(crate) async fn layer_map_info(
2347 0 : &self,
2348 0 : reset: LayerAccessStatsReset,
2349 0 : ) -> Result<LayerMapInfo, layer_manager::Shutdown> {
2350 0 : let guard = self.layers.read().await;
2351 0 : let layer_map = guard.layer_map()?;
2352 0 : let mut in_memory_layers = Vec::with_capacity(layer_map.frozen_layers.len() + 1);
2353 0 : if let Some(open_layer) = &layer_map.open_layer {
2354 0 : in_memory_layers.push(open_layer.info());
2355 0 : }
2356 0 : for frozen_layer in &layer_map.frozen_layers {
2357 0 : in_memory_layers.push(frozen_layer.info());
2358 0 : }
2359 :
2360 0 : let historic_layers = layer_map
2361 0 : .iter_historic_layers()
2362 0 : .map(|desc| guard.get_from_desc(&desc).info(reset))
2363 0 : .collect();
2364 0 :
2365 0 : Ok(LayerMapInfo {
2366 0 : in_memory_layers,
2367 0 : historic_layers,
2368 0 : })
2369 0 : }
2370 :
2371 : #[instrument(skip_all, fields(tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))]
2372 : pub(crate) async fn download_layer(
2373 : &self,
2374 : layer_file_name: &LayerName,
2375 : ctx: &RequestContext,
2376 : ) -> Result<Option<bool>, super::storage_layer::layer::DownloadError> {
2377 : let Some(layer) = self
2378 : .find_layer(layer_file_name)
2379 : .await
2380 0 : .map_err(|e| match e {
2381 0 : layer_manager::Shutdown => {
2382 0 : super::storage_layer::layer::DownloadError::TimelineShutdown
2383 0 : }
2384 0 : })?
2385 : else {
2386 : return Ok(None);
2387 : };
2388 :
2389 : layer.download(ctx).await?;
2390 :
2391 : Ok(Some(true))
2392 : }
2393 :
2394 : /// Evict just one layer.
2395 : ///
2396 : /// Returns `Ok(None)` in the case where the layer could not be found by its `layer_file_name`.
2397 0 : pub(crate) async fn evict_layer(
2398 0 : &self,
2399 0 : layer_file_name: &LayerName,
2400 0 : ) -> anyhow::Result<Option<bool>> {
2401 0 : let _gate = self
2402 0 : .gate
2403 0 : .enter()
2404 0 : .map_err(|_| anyhow::anyhow!("Shutting down"))?;
2405 :
2406 0 : let Some(local_layer) = self.find_layer(layer_file_name).await? else {
2407 0 : return Ok(None);
2408 : };
2409 :
2410 : // curl has this by default
2411 0 : let timeout = std::time::Duration::from_secs(120);
2412 0 :
2413 0 : match local_layer.evict_and_wait(timeout).await {
2414 0 : Ok(()) => Ok(Some(true)),
2415 0 : Err(EvictionError::NotFound) => Ok(Some(false)),
2416 0 : Err(EvictionError::Downloaded) => Ok(Some(false)),
2417 0 : Err(EvictionError::Timeout) => Ok(Some(false)),
2418 : }
2419 0 : }
2420 :
2421 2401506 : fn should_roll(
2422 2401506 : &self,
2423 2401506 : layer_size: u64,
2424 2401506 : projected_layer_size: u64,
2425 2401506 : checkpoint_distance: u64,
2426 2401506 : projected_lsn: Lsn,
2427 2401506 : last_freeze_at: Lsn,
2428 2401506 : opened_at: Instant,
2429 2401506 : ) -> bool {
2430 2401506 : let distance = projected_lsn.widening_sub(last_freeze_at);
2431 2401506 :
2432 2401506 : // Rolling the open layer can be triggered by:
2433 2401506 : // 1. The distance from the last LSN we rolled at. This bounds the amount of WAL that
2434 2401506 : // the safekeepers need to store. For sharded tenants, we multiply by shard count to
2435 2401506 : // account for how writes are distributed across shards: we expect each node to consume
2436 2401506 : // 1/count of the LSN on average.
2437 2401506 : // 2. The size of the currently open layer.
2438 2401506 : // 3. The time since the last roll. It helps safekeepers to regard pageserver as caught
2439 2401506 : // up and suspend activity.
2440 2401506 : if distance >= checkpoint_distance as i128 * self.shard_identity.count.count() as i128 {
2441 0 : info!(
2442 0 : "Will roll layer at {} with layer size {} due to LSN distance ({})",
2443 : projected_lsn, layer_size, distance
2444 : );
2445 :
2446 0 : true
2447 2401506 : } else if projected_layer_size >= checkpoint_distance {
2448 : // NB: this check is relied upon by:
2449 40 : let _ = IndexEntry::validate_checkpoint_distance;
2450 40 : info!(
2451 0 : "Will roll layer at {} with layer size {} due to layer size ({})",
2452 : projected_lsn, layer_size, projected_layer_size
2453 : );
2454 :
2455 40 : true
2456 2401466 : } else if distance > 0 && opened_at.elapsed() >= self.get_checkpoint_timeout() {
2457 0 : info!(
2458 0 : "Will roll layer at {} with layer size {} due to time since first write to the layer ({:?})",
2459 0 : projected_lsn,
2460 0 : layer_size,
2461 0 : opened_at.elapsed()
2462 : );
2463 :
2464 0 : true
2465 : } else {
2466 2401466 : false
2467 : }
2468 2401506 : }
2469 :
2470 1 : pub(crate) fn is_basebackup_cache_enabled(&self) -> bool {
2471 1 : let tenant_conf = self.tenant_conf.load();
2472 1 : tenant_conf
2473 1 : .tenant_conf
2474 1 : .basebackup_cache_enabled
2475 1 : .unwrap_or(self.conf.default_tenant_conf.basebackup_cache_enabled)
2476 1 : }
2477 :
2478 : /// Prepare basebackup for the given LSN and store it in the basebackup cache.
2479 : /// The method is asynchronous and returns immediately.
2480 : /// The actual basebackup preparation is performed in the background
2481 : /// by the basebackup cache on a best-effort basis.
2482 1 : pub(crate) fn prepare_basebackup(&self, lsn: Lsn) {
2483 1 : if !self.is_basebackup_cache_enabled() {
2484 1 : return;
2485 0 : }
2486 0 : if !self.tenant_shard_id.is_shard_zero() {
2487 : // In theory we should never get here, but just in case check it.
2488 : // Preparing basebackup doesn't make sense for shards other than shard zero.
2489 0 : return;
2490 0 : }
2491 0 :
2492 0 : let res = self
2493 0 : .basebackup_prepare_sender
2494 0 : .send(BasebackupPrepareRequest {
2495 0 : tenant_shard_id: self.tenant_shard_id,
2496 0 : timeline_id: self.timeline_id,
2497 0 : lsn,
2498 0 : });
2499 0 : if let Err(e) = res {
2500 : // May happen during shutdown, it's not critical.
2501 0 : info!("Failed to send shutdown checkpoint: {e:#}");
2502 0 : }
2503 1 : }
2504 : }
2505 :
2506 : /// Number of times we will compute partition within a checkpoint distance.
2507 : const REPARTITION_FREQ_IN_CHECKPOINT_DISTANCE: u64 = 10;
2508 :
2509 : // Private functions
2510 : impl Timeline {
2511 6 : pub(crate) fn get_lsn_lease_length(&self) -> Duration {
2512 6 : let tenant_conf = self.tenant_conf.load();
2513 6 : tenant_conf
2514 6 : .tenant_conf
2515 6 : .lsn_lease_length
2516 6 : .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length)
2517 6 : }
2518 :
2519 0 : pub(crate) fn get_lsn_lease_length_for_ts(&self) -> Duration {
2520 0 : let tenant_conf = self.tenant_conf.load();
2521 0 : tenant_conf
2522 0 : .tenant_conf
2523 0 : .lsn_lease_length_for_ts
2524 0 : .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length_for_ts)
2525 0 : }
2526 :
2527 0 : pub(crate) fn is_gc_blocked_by_lsn_lease_deadline(&self) -> bool {
2528 0 : let tenant_conf = self.tenant_conf.load();
2529 0 : tenant_conf.is_gc_blocked_by_lsn_lease_deadline()
2530 0 : }
2531 :
2532 0 : pub(crate) fn get_lazy_slru_download(&self) -> bool {
2533 0 : let tenant_conf = self.tenant_conf.load();
2534 0 : tenant_conf
2535 0 : .tenant_conf
2536 0 : .lazy_slru_download
2537 0 : .unwrap_or(self.conf.default_tenant_conf.lazy_slru_download)
2538 0 : }
2539 :
2540 : /// Checks if a get page request should get perf tracing
2541 : ///
2542 : /// The configuration priority is: tenant config override, default tenant config,
2543 : /// pageserver config.
2544 0 : pub(crate) fn is_get_page_request_sampled(&self) -> bool {
2545 0 : let tenant_conf = self.tenant_conf.load();
2546 0 : let ratio = tenant_conf
2547 0 : .tenant_conf
2548 0 : .sampling_ratio
2549 0 : .flatten()
2550 0 : .or(self.conf.default_tenant_conf.sampling_ratio)
2551 0 : .or(self.conf.tracing.as_ref().map(|t| t.sampling_ratio));
2552 0 :
2553 0 : match ratio {
2554 0 : Some(r) => {
2555 0 : if r.numerator == 0 {
2556 0 : false
2557 : } else {
2558 0 : rand::thread_rng().gen_range(0..r.denominator) < r.numerator
2559 : }
2560 : }
2561 0 : None => false,
2562 : }
2563 0 : }
2564 :
2565 2402266 : fn get_checkpoint_distance(&self) -> u64 {
2566 2402266 : let tenant_conf = self.tenant_conf.load();
2567 2402266 : tenant_conf
2568 2402266 : .tenant_conf
2569 2402266 : .checkpoint_distance
2570 2402266 : .unwrap_or(self.conf.default_tenant_conf.checkpoint_distance)
2571 2402266 : }
2572 :
2573 2401466 : fn get_checkpoint_timeout(&self) -> Duration {
2574 2401466 : let tenant_conf = self.tenant_conf.load();
2575 2401466 : tenant_conf
2576 2401466 : .tenant_conf
2577 2401466 : .checkpoint_timeout
2578 2401466 : .unwrap_or(self.conf.default_tenant_conf.checkpoint_timeout)
2579 2401466 : }
2580 :
2581 0 : pub(crate) fn get_pitr_interval(&self) -> Duration {
2582 0 : let tenant_conf = &self.tenant_conf.load().tenant_conf;
2583 0 : tenant_conf
2584 0 : .pitr_interval
2585 0 : .unwrap_or(self.conf.default_tenant_conf.pitr_interval)
2586 0 : }
2587 :
2588 1266 : fn get_compaction_period(&self) -> Duration {
2589 1266 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2590 1266 : tenant_conf
2591 1266 : .compaction_period
2592 1266 : .unwrap_or(self.conf.default_tenant_conf.compaction_period)
2593 1266 : }
2594 :
2595 340 : fn get_compaction_target_size(&self) -> u64 {
2596 340 : let tenant_conf = self.tenant_conf.load();
2597 340 : tenant_conf
2598 340 : .tenant_conf
2599 340 : .compaction_target_size
2600 340 : .unwrap_or(self.conf.default_tenant_conf.compaction_target_size)
2601 340 : }
2602 :
2603 791 : fn get_compaction_threshold(&self) -> usize {
2604 791 : let tenant_conf = self.tenant_conf.load();
2605 791 : tenant_conf
2606 791 : .tenant_conf
2607 791 : .compaction_threshold
2608 791 : .unwrap_or(self.conf.default_tenant_conf.compaction_threshold)
2609 791 : }
2610 :
2611 : /// Returns `true` if the rel_size_v2 config is enabled. NOTE: the write path and read path
2612 : /// should look at `get_rel_size_v2_status()` to get the actual status of the timeline. It is
2613 : /// possible that the index part persists the state while the config doesn't get persisted.
2614 973 : pub(crate) fn get_rel_size_v2_enabled(&self) -> bool {
2615 973 : let tenant_conf = self.tenant_conf.load();
2616 973 : tenant_conf
2617 973 : .tenant_conf
2618 973 : .rel_size_v2_enabled
2619 973 : .unwrap_or(self.conf.default_tenant_conf.rel_size_v2_enabled)
2620 973 : }
2621 :
2622 1099 : pub(crate) fn get_rel_size_v2_status(&self) -> RelSizeMigration {
2623 1099 : self.rel_size_v2_status
2624 1099 : .load()
2625 1099 : .as_ref()
2626 1099 : .map(|s| s.as_ref().clone())
2627 1099 : .unwrap_or(RelSizeMigration::Legacy)
2628 1099 : }
2629 :
2630 14 : fn get_compaction_upper_limit(&self) -> usize {
2631 14 : let tenant_conf = self.tenant_conf.load();
2632 14 : tenant_conf
2633 14 : .tenant_conf
2634 14 : .compaction_upper_limit
2635 14 : .unwrap_or(self.conf.default_tenant_conf.compaction_upper_limit)
2636 14 : }
2637 :
2638 0 : pub fn get_compaction_l0_first(&self) -> bool {
2639 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2640 0 : tenant_conf
2641 0 : .compaction_l0_first
2642 0 : .unwrap_or(self.conf.default_tenant_conf.compaction_l0_first)
2643 0 : }
2644 :
2645 0 : pub fn get_compaction_l0_semaphore(&self) -> bool {
2646 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2647 0 : tenant_conf
2648 0 : .compaction_l0_semaphore
2649 0 : .unwrap_or(self.conf.default_tenant_conf.compaction_l0_semaphore)
2650 0 : }
2651 :
2652 633 : fn get_l0_flush_delay_threshold(&self) -> Option<usize> {
2653 : // By default, delay L0 flushes at 3x the compaction threshold. The compaction threshold
2654 : // defaults to 10, and L0 compaction is generally able to keep L0 counts below 30.
2655 : const DEFAULT_L0_FLUSH_DELAY_FACTOR: usize = 3;
2656 :
2657 : // If compaction is disabled, don't delay.
2658 633 : if self.get_compaction_period() == Duration::ZERO {
2659 632 : return None;
2660 1 : }
2661 1 :
2662 1 : let compaction_threshold = self.get_compaction_threshold();
2663 1 : let tenant_conf = self.tenant_conf.load();
2664 1 : let l0_flush_delay_threshold = tenant_conf
2665 1 : .tenant_conf
2666 1 : .l0_flush_delay_threshold
2667 1 : .or(self.conf.default_tenant_conf.l0_flush_delay_threshold)
2668 1 : .unwrap_or(DEFAULT_L0_FLUSH_DELAY_FACTOR * compaction_threshold);
2669 1 :
2670 1 : // 0 disables backpressure.
2671 1 : if l0_flush_delay_threshold == 0 {
2672 0 : return None;
2673 1 : }
2674 1 :
2675 1 : // Clamp the flush delay threshold to the compaction threshold; it doesn't make sense to
2676 1 : // backpressure flushes below this.
2677 1 : // TODO: the tenant config should have validation to prevent this instead.
2678 1 : debug_assert!(l0_flush_delay_threshold >= compaction_threshold);
2679 1 : Some(max(l0_flush_delay_threshold, compaction_threshold))
2680 633 : }
2681 :
2682 633 : fn get_l0_flush_stall_threshold(&self) -> Option<usize> {
2683 : // Disable L0 stalls by default. Stalling can cause unavailability if L0 compaction isn't
2684 : // responsive, and it can e.g. block on other compaction via the compaction semaphore or
2685 : // sibling timelines. We need more confidence before enabling this.
2686 : const DEFAULT_L0_FLUSH_STALL_FACTOR: usize = 0; // TODO: default to e.g. 5
2687 :
2688 : // If compaction is disabled, don't stall.
2689 633 : if self.get_compaction_period() == Duration::ZERO {
2690 632 : return None;
2691 1 : }
2692 1 :
2693 1 : // If compaction is failing, don't stall and try to keep the tenant alive. This may not be a
2694 1 : // good idea: read amp can grow unbounded, leading to terrible performance, and we may take
2695 1 : // on unbounded compaction debt that can take a long time to fix once compaction comes back
2696 1 : // online. At least we'll delay flushes, slowing down the growth and buying some time.
2697 1 : if self.compaction_failed.load(AtomicOrdering::Relaxed) {
2698 0 : return None;
2699 1 : }
2700 1 :
2701 1 : let compaction_threshold = self.get_compaction_threshold();
2702 1 : let tenant_conf = self.tenant_conf.load();
2703 1 : let l0_flush_stall_threshold = tenant_conf
2704 1 : .tenant_conf
2705 1 : .l0_flush_stall_threshold
2706 1 : .or(self.conf.default_tenant_conf.l0_flush_stall_threshold);
2707 1 :
2708 1 : // Tests sometimes set compaction_threshold=1 to generate lots of layer files, and don't
2709 1 : // handle the 20-second compaction delay. Some (e.g. `test_backward_compatibility`) can't
2710 1 : // easily adjust the L0 backpressure settings, so just disable stalls in this case.
2711 1 : if cfg!(feature = "testing")
2712 1 : && compaction_threshold == 1
2713 0 : && l0_flush_stall_threshold.is_none()
2714 : {
2715 0 : return None;
2716 1 : }
2717 1 :
2718 1 : let l0_flush_stall_threshold = l0_flush_stall_threshold
2719 1 : .unwrap_or(DEFAULT_L0_FLUSH_STALL_FACTOR * compaction_threshold);
2720 1 :
2721 1 : // 0 disables backpressure.
2722 1 : if l0_flush_stall_threshold == 0 {
2723 1 : return None;
2724 0 : }
2725 0 :
2726 0 : // Clamp the flush stall threshold to the compaction threshold; it doesn't make sense to
2727 0 : // backpressure flushes below this.
2728 0 : // TODO: the tenant config should have validation to prevent this instead.
2729 0 : debug_assert!(l0_flush_stall_threshold >= compaction_threshold);
2730 0 : Some(max(l0_flush_stall_threshold, compaction_threshold))
2731 633 : }
2732 :
2733 7 : fn get_image_creation_threshold(&self) -> usize {
2734 7 : let tenant_conf = self.tenant_conf.load();
2735 7 : tenant_conf
2736 7 : .tenant_conf
2737 7 : .image_creation_threshold
2738 7 : .unwrap_or(self.conf.default_tenant_conf.image_creation_threshold)
2739 7 : }
2740 :
2741 182 : fn get_compaction_algorithm_settings(&self) -> CompactionAlgorithmSettings {
2742 182 : let tenant_conf = &self.tenant_conf.load();
2743 182 : tenant_conf
2744 182 : .tenant_conf
2745 182 : .compaction_algorithm
2746 182 : .as_ref()
2747 182 : .unwrap_or(&self.conf.default_tenant_conf.compaction_algorithm)
2748 182 : .clone()
2749 182 : }
2750 :
2751 182 : pub fn get_compaction_shard_ancestor(&self) -> bool {
2752 182 : let tenant_conf = self.tenant_conf.load();
2753 182 : tenant_conf
2754 182 : .tenant_conf
2755 182 : .compaction_shard_ancestor
2756 182 : .unwrap_or(self.conf.default_tenant_conf.compaction_shard_ancestor)
2757 182 : }
2758 :
2759 0 : fn get_eviction_policy(&self) -> EvictionPolicy {
2760 0 : let tenant_conf = self.tenant_conf.load();
2761 0 : tenant_conf
2762 0 : .tenant_conf
2763 0 : .eviction_policy
2764 0 : .unwrap_or(self.conf.default_tenant_conf.eviction_policy)
2765 0 : }
2766 :
2767 233 : fn get_evictions_low_residence_duration_metric_threshold(
2768 233 : tenant_conf: &pageserver_api::models::TenantConfig,
2769 233 : default_tenant_conf: &pageserver_api::config::TenantConfigToml,
2770 233 : ) -> Duration {
2771 233 : tenant_conf
2772 233 : .evictions_low_residence_duration_metric_threshold
2773 233 : .unwrap_or(default_tenant_conf.evictions_low_residence_duration_metric_threshold)
2774 233 : }
2775 :
2776 291 : fn get_image_layer_creation_check_threshold(&self) -> u8 {
2777 291 : let tenant_conf = self.tenant_conf.load();
2778 291 : tenant_conf
2779 291 : .tenant_conf
2780 291 : .image_layer_creation_check_threshold
2781 291 : .unwrap_or(
2782 291 : self.conf
2783 291 : .default_tenant_conf
2784 291 : .image_layer_creation_check_threshold,
2785 291 : )
2786 291 : }
2787 :
2788 27 : fn get_gc_compaction_settings(&self) -> GcCompactionCombinedSettings {
2789 27 : let tenant_conf = &self.tenant_conf.load();
2790 27 : let gc_compaction_enabled = tenant_conf
2791 27 : .tenant_conf
2792 27 : .gc_compaction_enabled
2793 27 : .unwrap_or(self.conf.default_tenant_conf.gc_compaction_enabled);
2794 27 : let gc_compaction_verification = tenant_conf
2795 27 : .tenant_conf
2796 27 : .gc_compaction_verification
2797 27 : .unwrap_or(self.conf.default_tenant_conf.gc_compaction_verification);
2798 27 : let gc_compaction_initial_threshold_kb = tenant_conf
2799 27 : .tenant_conf
2800 27 : .gc_compaction_initial_threshold_kb
2801 27 : .unwrap_or(
2802 27 : self.conf
2803 27 : .default_tenant_conf
2804 27 : .gc_compaction_initial_threshold_kb,
2805 27 : );
2806 27 : let gc_compaction_ratio_percent = tenant_conf
2807 27 : .tenant_conf
2808 27 : .gc_compaction_ratio_percent
2809 27 : .unwrap_or(self.conf.default_tenant_conf.gc_compaction_ratio_percent);
2810 27 : GcCompactionCombinedSettings {
2811 27 : gc_compaction_enabled,
2812 27 : gc_compaction_verification,
2813 27 : gc_compaction_initial_threshold_kb,
2814 27 : gc_compaction_ratio_percent,
2815 27 : }
2816 27 : }
2817 :
2818 0 : fn get_image_creation_preempt_threshold(&self) -> usize {
2819 0 : let tenant_conf = self.tenant_conf.load();
2820 0 : tenant_conf
2821 0 : .tenant_conf
2822 0 : .image_creation_preempt_threshold
2823 0 : .unwrap_or(
2824 0 : self.conf
2825 0 : .default_tenant_conf
2826 0 : .image_creation_preempt_threshold,
2827 0 : )
2828 0 : }
2829 :
2830 : /// Resolve the effective WAL receiver protocol to use for this tenant.
2831 : ///
2832 : /// Priority order is:
2833 : /// 1. Tenant config override
2834 : /// 2. Default value for tenant config override
2835 : /// 3. Pageserver config override
2836 : /// 4. Pageserver config default
2837 0 : pub fn resolve_wal_receiver_protocol(&self) -> PostgresClientProtocol {
2838 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2839 0 : tenant_conf
2840 0 : .wal_receiver_protocol_override
2841 0 : .or(self.conf.default_tenant_conf.wal_receiver_protocol_override)
2842 0 : .unwrap_or(self.conf.wal_receiver_protocol)
2843 0 : }
2844 :
2845 0 : pub(super) fn tenant_conf_updated(&self, new_conf: &AttachedTenantConf) {
2846 0 : // NB: Most tenant conf options are read by background loops, so,
2847 0 : // changes will automatically be picked up.
2848 0 :
2849 0 : // The threshold is embedded in the metric. So, we need to update it.
2850 0 : {
2851 0 : let new_threshold = Self::get_evictions_low_residence_duration_metric_threshold(
2852 0 : &new_conf.tenant_conf,
2853 0 : &self.conf.default_tenant_conf,
2854 0 : );
2855 0 :
2856 0 : let tenant_id_str = self.tenant_shard_id.tenant_id.to_string();
2857 0 : let shard_id_str = format!("{}", self.tenant_shard_id.shard_slug());
2858 0 :
2859 0 : let timeline_id_str = self.timeline_id.to_string();
2860 0 :
2861 0 : self.remote_client.update_config(&new_conf.location);
2862 0 :
2863 0 : let mut rel_size_cache = self.rel_size_snapshot_cache.lock().unwrap();
2864 0 : if let Some(new_capacity) = new_conf.tenant_conf.relsize_snapshot_cache_capacity {
2865 0 : if new_capacity != rel_size_cache.capacity() {
2866 0 : rel_size_cache.set_capacity(new_capacity);
2867 0 : }
2868 0 : }
2869 :
2870 0 : self.metrics
2871 0 : .evictions_with_low_residence_duration
2872 0 : .write()
2873 0 : .unwrap()
2874 0 : .change_threshold(
2875 0 : &tenant_id_str,
2876 0 : &shard_id_str,
2877 0 : &timeline_id_str,
2878 0 : new_threshold,
2879 0 : );
2880 0 : }
2881 0 : }
2882 :
2883 : /// Open a Timeline handle.
2884 : ///
2885 : /// Loads the metadata for the timeline into memory, but not the layer map.
2886 : #[allow(clippy::too_many_arguments)]
2887 233 : pub(super) fn new(
2888 233 : conf: &'static PageServerConf,
2889 233 : tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
2890 233 : metadata: &TimelineMetadata,
2891 233 : previous_heatmap: Option<PreviousHeatmap>,
2892 233 : ancestor: Option<Arc<Timeline>>,
2893 233 : timeline_id: TimelineId,
2894 233 : tenant_shard_id: TenantShardId,
2895 233 : generation: Generation,
2896 233 : shard_identity: ShardIdentity,
2897 233 : walredo_mgr: Option<Arc<super::WalRedoManager>>,
2898 233 : resources: TimelineResources,
2899 233 : pg_version: u32,
2900 233 : state: TimelineState,
2901 233 : attach_wal_lag_cooldown: Arc<OnceLock<WalLagCooldown>>,
2902 233 : create_idempotency: crate::tenant::CreateTimelineIdempotency,
2903 233 : gc_compaction_state: Option<GcCompactionState>,
2904 233 : rel_size_v2_status: Option<RelSizeMigration>,
2905 233 : cancel: CancellationToken,
2906 233 : ) -> Arc<Self> {
2907 233 : let disk_consistent_lsn = metadata.disk_consistent_lsn();
2908 233 : let (state, _) = watch::channel(state);
2909 233 :
2910 233 : let (layer_flush_start_tx, _) = tokio::sync::watch::channel((0, disk_consistent_lsn));
2911 233 : let (layer_flush_done_tx, _) = tokio::sync::watch::channel((0, Ok(())));
2912 233 :
2913 233 : let evictions_low_residence_duration_metric_threshold = {
2914 233 : let loaded_tenant_conf = tenant_conf.load();
2915 233 : Self::get_evictions_low_residence_duration_metric_threshold(
2916 233 : &loaded_tenant_conf.tenant_conf,
2917 233 : &conf.default_tenant_conf,
2918 233 : )
2919 : };
2920 :
2921 233 : if let Some(ancestor) = &ancestor {
2922 118 : let mut ancestor_gc_info = ancestor.gc_info.write().unwrap();
2923 118 : // If we construct an explicit timeline object, it's obviously not offloaded
2924 118 : let is_offloaded = MaybeOffloaded::No;
2925 118 : ancestor_gc_info.insert_child(timeline_id, metadata.ancestor_lsn(), is_offloaded);
2926 118 : }
2927 :
2928 233 : let relsize_snapshot_cache_capacity = {
2929 233 : let loaded_tenant_conf = tenant_conf.load();
2930 233 : loaded_tenant_conf
2931 233 : .tenant_conf
2932 233 : .relsize_snapshot_cache_capacity
2933 233 : .unwrap_or(conf.default_tenant_conf.relsize_snapshot_cache_capacity)
2934 233 : };
2935 233 :
2936 233 : Arc::new_cyclic(|myself| {
2937 233 : let metrics = Arc::new(TimelineMetrics::new(
2938 233 : &tenant_shard_id,
2939 233 : &timeline_id,
2940 233 : crate::metrics::EvictionsWithLowResidenceDurationBuilder::new(
2941 233 : "mtime",
2942 233 : evictions_low_residence_duration_metric_threshold,
2943 233 : ),
2944 233 : ));
2945 233 : let aux_file_metrics = metrics.aux_file_size_gauge.clone();
2946 :
2947 233 : let mut result = Timeline {
2948 233 : conf,
2949 233 : tenant_conf,
2950 233 : myself: myself.clone(),
2951 233 : timeline_id,
2952 233 : tenant_shard_id,
2953 233 : generation,
2954 233 : shard_identity,
2955 233 : pg_version,
2956 233 : layers: Default::default(),
2957 233 : gc_compaction_layer_update_lock: tokio::sync::RwLock::new(()),
2958 233 :
2959 233 : walredo_mgr,
2960 233 : walreceiver: Mutex::new(None),
2961 233 :
2962 233 : remote_client: Arc::new(resources.remote_client),
2963 233 :
2964 233 : // initialize in-memory 'last_record_lsn' from 'disk_consistent_lsn'.
2965 233 : last_record_lsn: SeqWait::new(RecordLsn {
2966 233 : last: disk_consistent_lsn,
2967 233 : prev: metadata.prev_record_lsn().unwrap_or(Lsn(0)),
2968 233 : }),
2969 233 : disk_consistent_lsn: AtomicLsn::new(disk_consistent_lsn.0),
2970 233 :
2971 233 : gc_compaction_state: ArcSwap::new(Arc::new(gc_compaction_state)),
2972 233 :
2973 233 : last_freeze_at: AtomicLsn::new(disk_consistent_lsn.0),
2974 233 : last_freeze_ts: RwLock::new(Instant::now()),
2975 233 :
2976 233 : loaded_at: (disk_consistent_lsn, SystemTime::now()),
2977 233 :
2978 233 : ancestor_timeline: ancestor,
2979 233 : ancestor_lsn: metadata.ancestor_lsn(),
2980 233 :
2981 233 : metrics,
2982 233 :
2983 233 : query_metrics: crate::metrics::SmgrQueryTimePerTimeline::new(
2984 233 : &tenant_shard_id,
2985 233 : &timeline_id,
2986 233 : resources.pagestream_throttle_metrics,
2987 233 : ),
2988 233 :
2989 1864 : directory_metrics: array::from_fn(|_| AtomicU64::new(0)),
2990 1864 : directory_metrics_inited: array::from_fn(|_| AtomicBool::new(false)),
2991 233 :
2992 233 : flush_loop_state: Mutex::new(FlushLoopState::NotStarted),
2993 233 :
2994 233 : layer_flush_start_tx,
2995 233 : layer_flush_done_tx,
2996 233 :
2997 233 : write_lock: tokio::sync::Mutex::new(None),
2998 233 :
2999 233 : gc_info: std::sync::RwLock::new(GcInfo::default()),
3000 233 :
3001 233 : last_image_layer_creation_status: ArcSwap::new(Arc::new(
3002 233 : LastImageLayerCreationStatus::default(),
3003 233 : )),
3004 233 :
3005 233 : applied_gc_cutoff_lsn: Rcu::new(metadata.latest_gc_cutoff_lsn()),
3006 233 : initdb_lsn: metadata.initdb_lsn(),
3007 233 :
3008 233 : current_logical_size: if disk_consistent_lsn.is_valid() {
3009 : // we're creating timeline data with some layer files existing locally,
3010 : // need to recalculate timeline's logical size based on data in the layers.
3011 120 : LogicalSize::deferred_initial(disk_consistent_lsn)
3012 : } else {
3013 : // we're creating timeline data without any layers existing locally,
3014 : // initial logical size is 0.
3015 113 : LogicalSize::empty_initial()
3016 : },
3017 :
3018 233 : partitioning: GuardArcSwap::new((
3019 233 : (KeyPartitioning::new(), KeyPartitioning::new().into_sparse()),
3020 233 : Lsn(0),
3021 233 : )),
3022 233 : repartition_threshold: 0,
3023 233 : last_image_layer_creation_check_at: AtomicLsn::new(0),
3024 233 : last_image_layer_creation_check_instant: Mutex::new(None),
3025 233 :
3026 233 : last_received_wal: Mutex::new(None),
3027 233 : rel_size_latest_cache: RwLock::new(HashMap::new()),
3028 233 : rel_size_snapshot_cache: Mutex::new(LruCache::new(relsize_snapshot_cache_capacity)),
3029 233 :
3030 233 : download_all_remote_layers_task_info: RwLock::new(None),
3031 233 :
3032 233 : state,
3033 233 :
3034 233 : eviction_task_timeline_state: tokio::sync::Mutex::new(
3035 233 : EvictionTaskTimelineState::default(),
3036 233 : ),
3037 233 : delete_progress: TimelineDeleteProgress::default(),
3038 233 :
3039 233 : cancel,
3040 233 : gate: Gate::default(),
3041 233 :
3042 233 : compaction_lock: tokio::sync::Mutex::default(),
3043 233 : compaction_failed: AtomicBool::default(),
3044 233 : l0_compaction_trigger: resources.l0_compaction_trigger,
3045 233 : gc_lock: tokio::sync::Mutex::default(),
3046 233 :
3047 233 : standby_horizon: AtomicLsn::new(0),
3048 233 :
3049 233 : pagestream_throttle: resources.pagestream_throttle,
3050 233 :
3051 233 : aux_file_size_estimator: AuxFileSizeEstimator::new(aux_file_metrics),
3052 233 :
3053 233 : #[cfg(test)]
3054 233 : extra_test_dense_keyspace: ArcSwap::new(Arc::new(KeySpace::default())),
3055 233 :
3056 233 : l0_flush_global_state: resources.l0_flush_global_state,
3057 233 :
3058 233 : handles: Default::default(),
3059 233 :
3060 233 : attach_wal_lag_cooldown,
3061 233 :
3062 233 : create_idempotency,
3063 233 :
3064 233 : page_trace: Default::default(),
3065 233 :
3066 233 : previous_heatmap: ArcSwapOption::from_pointee(previous_heatmap),
3067 233 :
3068 233 : heatmap_layers_downloader: Mutex::new(None),
3069 233 :
3070 233 : rel_size_v2_status: ArcSwapOption::from_pointee(rel_size_v2_status),
3071 233 :
3072 233 : wait_lsn_log_slow: tokio::sync::Semaphore::new(1),
3073 233 :
3074 233 : basebackup_prepare_sender: resources.basebackup_prepare_sender,
3075 233 : };
3076 233 :
3077 233 : result.repartition_threshold =
3078 233 : result.get_checkpoint_distance() / REPARTITION_FREQ_IN_CHECKPOINT_DISTANCE;
3079 233 :
3080 233 : result
3081 233 : .metrics
3082 233 : .last_record_lsn_gauge
3083 233 : .set(disk_consistent_lsn.0 as i64);
3084 233 : result
3085 233 : })
3086 233 : }
3087 :
3088 339 : pub(super) fn maybe_spawn_flush_loop(self: &Arc<Self>) {
3089 339 : let Ok(guard) = self.gate.enter() else {
3090 0 : info!("cannot start flush loop when the timeline gate has already been closed");
3091 0 : return;
3092 : };
3093 339 : let mut flush_loop_state = self.flush_loop_state.lock().unwrap();
3094 339 : match *flush_loop_state {
3095 230 : FlushLoopState::NotStarted => (),
3096 : FlushLoopState::Running { .. } => {
3097 109 : info!(
3098 0 : "skipping attempt to start flush_loop twice {}/{}",
3099 0 : self.tenant_shard_id, self.timeline_id
3100 : );
3101 109 : return;
3102 : }
3103 : FlushLoopState::Exited => {
3104 0 : info!(
3105 0 : "ignoring attempt to restart exited flush_loop {}/{}",
3106 0 : self.tenant_shard_id, self.timeline_id
3107 : );
3108 0 : return;
3109 : }
3110 : }
3111 :
3112 230 : let layer_flush_start_rx = self.layer_flush_start_tx.subscribe();
3113 230 : let self_clone = Arc::clone(self);
3114 230 :
3115 230 : debug!("spawning flush loop");
3116 230 : *flush_loop_state = FlushLoopState::Running {
3117 230 : #[cfg(test)]
3118 230 : expect_initdb_optimization: false,
3119 230 : #[cfg(test)]
3120 230 : initdb_optimization_count: 0,
3121 230 : };
3122 230 : task_mgr::spawn(
3123 230 : task_mgr::BACKGROUND_RUNTIME.handle(),
3124 230 : task_mgr::TaskKind::LayerFlushTask,
3125 230 : self.tenant_shard_id,
3126 230 : Some(self.timeline_id),
3127 230 : "layer flush task",
3128 230 : async move {
3129 230 : let _guard = guard;
3130 230 : let background_ctx = RequestContext::todo_child(TaskKind::LayerFlushTask, DownloadBehavior::Error).with_scope_timeline(&self_clone);
3131 230 : self_clone.flush_loop(layer_flush_start_rx, &background_ctx).await;
3132 5 : let mut flush_loop_state = self_clone.flush_loop_state.lock().unwrap();
3133 5 : assert!(matches!(*flush_loop_state, FlushLoopState::Running{..}));
3134 5 : *flush_loop_state = FlushLoopState::Exited;
3135 5 : Ok(())
3136 5 : }
3137 230 : .instrument(info_span!(parent: None, "layer flush task", tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))
3138 : );
3139 339 : }
3140 :
3141 0 : pub(crate) fn update_gc_compaction_state(
3142 0 : &self,
3143 0 : gc_compaction_state: GcCompactionState,
3144 0 : ) -> anyhow::Result<()> {
3145 0 : self.gc_compaction_state
3146 0 : .store(Arc::new(Some(gc_compaction_state.clone())));
3147 0 : self.remote_client
3148 0 : .schedule_index_upload_for_gc_compaction_state_update(gc_compaction_state)
3149 0 : }
3150 :
3151 0 : pub(crate) fn update_rel_size_v2_status(
3152 0 : &self,
3153 0 : rel_size_v2_status: RelSizeMigration,
3154 0 : ) -> anyhow::Result<()> {
3155 0 : self.rel_size_v2_status
3156 0 : .store(Some(Arc::new(rel_size_v2_status.clone())));
3157 0 : self.remote_client
3158 0 : .schedule_index_upload_for_rel_size_v2_status_update(rel_size_v2_status)
3159 0 : }
3160 :
3161 0 : pub(crate) fn get_gc_compaction_state(&self) -> Option<GcCompactionState> {
3162 0 : self.gc_compaction_state.load_full().as_ref().clone()
3163 0 : }
3164 :
3165 : /// Creates and starts the wal receiver.
3166 : ///
3167 : /// This function is expected to be called at most once per Timeline's lifecycle
3168 : /// when the timeline is activated.
3169 0 : fn launch_wal_receiver(
3170 0 : self: &Arc<Self>,
3171 0 : ctx: &RequestContext,
3172 0 : broker_client: BrokerClientChannel,
3173 0 : ) {
3174 0 : info!(
3175 0 : "launching WAL receiver for timeline {} of tenant {}",
3176 0 : self.timeline_id, self.tenant_shard_id
3177 : );
3178 :
3179 0 : let tenant_conf = self.tenant_conf.load();
3180 0 : let wal_connect_timeout = tenant_conf
3181 0 : .tenant_conf
3182 0 : .walreceiver_connect_timeout
3183 0 : .unwrap_or(self.conf.default_tenant_conf.walreceiver_connect_timeout);
3184 0 : let lagging_wal_timeout = tenant_conf
3185 0 : .tenant_conf
3186 0 : .lagging_wal_timeout
3187 0 : .unwrap_or(self.conf.default_tenant_conf.lagging_wal_timeout);
3188 0 : let max_lsn_wal_lag = tenant_conf
3189 0 : .tenant_conf
3190 0 : .max_lsn_wal_lag
3191 0 : .unwrap_or(self.conf.default_tenant_conf.max_lsn_wal_lag);
3192 0 :
3193 0 : let mut guard = self.walreceiver.lock().unwrap();
3194 0 : assert!(
3195 0 : guard.is_none(),
3196 0 : "multiple launches / re-launches of WAL receiver are not supported"
3197 : );
3198 0 : *guard = Some(WalReceiver::start(
3199 0 : Arc::clone(self),
3200 0 : WalReceiverConf {
3201 0 : protocol: self.resolve_wal_receiver_protocol(),
3202 0 : wal_connect_timeout,
3203 0 : lagging_wal_timeout,
3204 0 : max_lsn_wal_lag,
3205 0 : auth_token: crate::config::SAFEKEEPER_AUTH_TOKEN.get().cloned(),
3206 0 : availability_zone: self.conf.availability_zone.clone(),
3207 0 : ingest_batch_size: self.conf.ingest_batch_size,
3208 0 : validate_wal_contiguity: self.conf.validate_wal_contiguity,
3209 0 : },
3210 0 : broker_client,
3211 0 : ctx,
3212 0 : ));
3213 0 : }
3214 :
3215 : /// Initialize with an empty layer map. Used when creating a new timeline.
3216 230 : pub(super) fn init_empty_layer_map(&self, start_lsn: Lsn) {
3217 230 : let mut layers = self.layers.try_write().expect(
3218 230 : "in the context where we call this function, no other task has access to the object",
3219 230 : );
3220 230 : layers
3221 230 : .open_mut()
3222 230 : .expect("in this context the LayerManager must still be open")
3223 230 : .initialize_empty(Lsn(start_lsn.0));
3224 230 : }
3225 :
3226 : /// Scan the timeline directory, cleanup, populate the layer map, and schedule uploads for local-only
3227 : /// files.
3228 3 : pub(super) async fn load_layer_map(
3229 3 : &self,
3230 3 : disk_consistent_lsn: Lsn,
3231 3 : index_part: IndexPart,
3232 3 : ) -> anyhow::Result<()> {
3233 : use LayerName::*;
3234 : use init::Decision::*;
3235 : use init::{Discovered, DismissedLayer};
3236 :
3237 3 : let mut guard = self.layers.write().await;
3238 :
3239 3 : let timer = self.metrics.load_layer_map_histo.start_timer();
3240 3 :
3241 3 : // Scan timeline directory and create ImageLayerName and DeltaFilename
3242 3 : // structs representing all files on disk
3243 3 : let timeline_path = self
3244 3 : .conf
3245 3 : .timeline_path(&self.tenant_shard_id, &self.timeline_id);
3246 3 : let conf = self.conf;
3247 3 : let span = tracing::Span::current();
3248 3 :
3249 3 : // Copy to move into the task we're about to spawn
3250 3 : let this = self.myself.upgrade().expect("&self method holds the arc");
3251 :
3252 3 : let (loaded_layers, needs_cleanup, total_physical_size) = tokio::task::spawn_blocking({
3253 3 : move || {
3254 3 : let _g = span.entered();
3255 3 : let discovered = init::scan_timeline_dir(&timeline_path)?;
3256 3 : let mut discovered_layers = Vec::with_capacity(discovered.len());
3257 3 : let mut unrecognized_files = Vec::new();
3258 3 :
3259 3 : let mut path = timeline_path;
3260 :
3261 11 : for discovered in discovered {
3262 8 : let (name, kind) = match discovered {
3263 8 : Discovered::Layer(layer_file_name, local_metadata) => {
3264 8 : discovered_layers.push((layer_file_name, local_metadata));
3265 8 : continue;
3266 : }
3267 0 : Discovered::IgnoredBackup(path) => {
3268 0 : std::fs::remove_file(path)
3269 0 : .or_else(fs_ext::ignore_not_found)
3270 0 : .fatal_err("Removing .old file");
3271 0 : continue;
3272 : }
3273 0 : Discovered::Unknown(file_name) => {
3274 0 : // we will later error if there are any
3275 0 : unrecognized_files.push(file_name);
3276 0 : continue;
3277 : }
3278 0 : Discovered::Ephemeral(name) => (name, "old ephemeral file"),
3279 0 : Discovered::Temporary(name) => (name, "temporary timeline file"),
3280 0 : Discovered::TemporaryDownload(name) => (name, "temporary download"),
3281 : };
3282 0 : path.push(Utf8Path::new(&name));
3283 0 : init::cleanup(&path, kind)?;
3284 0 : path.pop();
3285 : }
3286 :
3287 3 : if !unrecognized_files.is_empty() {
3288 : // assume that if there are any there are many many.
3289 0 : let n = unrecognized_files.len();
3290 0 : let first = &unrecognized_files[..n.min(10)];
3291 0 : anyhow::bail!(
3292 0 : "unrecognized files in timeline dir (total {n}), first 10: {first:?}"
3293 0 : );
3294 3 : }
3295 3 :
3296 3 : let decided = init::reconcile(discovered_layers, &index_part, disk_consistent_lsn);
3297 3 :
3298 3 : let mut loaded_layers = Vec::new();
3299 3 : let mut needs_cleanup = Vec::new();
3300 3 : let mut total_physical_size = 0;
3301 :
3302 11 : for (name, decision) in decided {
3303 8 : let decision = match decision {
3304 8 : Ok(decision) => decision,
3305 0 : Err(DismissedLayer::Future { local }) => {
3306 0 : if let Some(local) = local {
3307 0 : init::cleanup_future_layer(
3308 0 : &local.local_path,
3309 0 : &name,
3310 0 : disk_consistent_lsn,
3311 0 : )?;
3312 0 : }
3313 0 : needs_cleanup.push(name);
3314 0 : continue;
3315 : }
3316 0 : Err(DismissedLayer::LocalOnly(local)) => {
3317 0 : init::cleanup_local_only_file(&name, &local)?;
3318 : // this file never existed remotely, we will have to do rework
3319 0 : continue;
3320 : }
3321 0 : Err(DismissedLayer::BadMetadata(local)) => {
3322 0 : init::cleanup_local_file_for_remote(&local)?;
3323 : // this file never existed remotely, we will have to do rework
3324 0 : continue;
3325 : }
3326 : };
3327 :
3328 8 : match &name {
3329 6 : Delta(d) => assert!(d.lsn_range.end <= disk_consistent_lsn + 1),
3330 2 : Image(i) => assert!(i.lsn <= disk_consistent_lsn),
3331 : }
3332 :
3333 8 : tracing::debug!(layer=%name, ?decision, "applied");
3334 :
3335 8 : let layer = match decision {
3336 8 : Resident { local, remote } => {
3337 8 : total_physical_size += local.file_size;
3338 8 : Layer::for_resident(conf, &this, local.local_path, name, remote)
3339 8 : .drop_eviction_guard()
3340 : }
3341 0 : Evicted(remote) => Layer::for_evicted(conf, &this, name, remote),
3342 : };
3343 :
3344 8 : loaded_layers.push(layer);
3345 : }
3346 3 : Ok((loaded_layers, needs_cleanup, total_physical_size))
3347 3 : }
3348 3 : })
3349 3 : .await
3350 3 : .map_err(anyhow::Error::new)
3351 3 : .and_then(|x| x)?;
3352 :
3353 3 : let num_layers = loaded_layers.len();
3354 3 :
3355 3 : guard
3356 3 : .open_mut()
3357 3 : .expect("layermanager must be open during init")
3358 3 : .initialize_local_layers(loaded_layers, disk_consistent_lsn + 1);
3359 3 :
3360 3 : self.remote_client
3361 3 : .schedule_layer_file_deletion(&needs_cleanup)?;
3362 3 : self.remote_client
3363 3 : .schedule_index_upload_for_file_changes()?;
3364 : // This barrier orders above DELETEs before any later operations.
3365 : // This is critical because code executing after the barrier might
3366 : // create again objects with the same key that we just scheduled for deletion.
3367 : // For example, if we just scheduled deletion of an image layer "from the future",
3368 : // later compaction might run again and re-create the same image layer.
3369 : // "from the future" here means an image layer whose LSN is > IndexPart::disk_consistent_lsn.
3370 : // "same" here means same key range and LSN.
3371 : //
3372 : // Without a barrier between above DELETEs and the re-creation's PUTs,
3373 : // the upload queue may execute the PUT first, then the DELETE.
3374 : // In our example, we will end up with an IndexPart referencing a non-existent object.
3375 : //
3376 : // 1. a future image layer is created and uploaded
3377 : // 2. ps restart
3378 : // 3. the future layer from (1) is deleted during load layer map
3379 : // 4. image layer is re-created and uploaded
3380 : // 5. deletion queue would like to delete (1) but actually deletes (4)
3381 : // 6. delete by name works as expected, but it now deletes the wrong (later) version
3382 : //
3383 : // See https://github.com/neondatabase/neon/issues/5878
3384 : //
3385 : // NB: generation numbers naturally protect against this because they disambiguate
3386 : // (1) and (4)
3387 : // TODO: this is basically a no-op now, should we remove it?
3388 3 : self.remote_client.schedule_barrier()?;
3389 : // TenantShard::create_timeline will wait for these uploads to happen before returning, or
3390 : // on retry.
3391 :
3392 : // Now that we have the full layer map, we may calculate the visibility of layers within it (a global scan)
3393 3 : drop(guard); // drop write lock, update_layer_visibility will take a read lock.
3394 3 : self.update_layer_visibility().await?;
3395 :
3396 3 : info!(
3397 0 : "loaded layer map with {} layers at {}, total physical size: {}",
3398 : num_layers, disk_consistent_lsn, total_physical_size
3399 : );
3400 :
3401 3 : timer.stop_and_record();
3402 3 : Ok(())
3403 3 : }
3404 :
3405 : /// Retrieve current logical size of the timeline.
3406 : ///
3407 : /// The size could be lagging behind the actual number, in case
3408 : /// the initial size calculation has not been run (gets triggered on the first size access).
3409 : ///
3410 : /// return size and boolean flag that shows if the size is exact
3411 0 : pub(crate) fn get_current_logical_size(
3412 0 : self: &Arc<Self>,
3413 0 : priority: GetLogicalSizePriority,
3414 0 : ctx: &RequestContext,
3415 0 : ) -> logical_size::CurrentLogicalSize {
3416 0 : if !self.tenant_shard_id.is_shard_zero() {
3417 : // Logical size is only accurately maintained on shard zero: when called elsewhere, for example
3418 : // when HTTP API is serving a GET for timeline zero, return zero
3419 0 : return logical_size::CurrentLogicalSize::Approximate(logical_size::Approximate::zero());
3420 0 : }
3421 0 :
3422 0 : let current_size = self.current_logical_size.current_size();
3423 0 : debug!("Current size: {current_size:?}");
3424 :
3425 0 : match (current_size.accuracy(), priority) {
3426 0 : (logical_size::Accuracy::Exact, _) => (), // nothing to do
3427 0 : (logical_size::Accuracy::Approximate, GetLogicalSizePriority::Background) => {
3428 0 : // background task will eventually deliver an exact value, we're in no rush
3429 0 : }
3430 : (logical_size::Accuracy::Approximate, GetLogicalSizePriority::User) => {
3431 : // background task is not ready, but user is asking for it now;
3432 : // => make the background task skip the line
3433 : // (The alternative would be to calculate the size here, but,
3434 : // it can actually take a long time if the user has a lot of rels.
3435 : // And we'll inevitable need it again; So, let the background task do the work.)
3436 0 : match self
3437 0 : .current_logical_size
3438 0 : .cancel_wait_for_background_loop_concurrency_limit_semaphore
3439 0 : .get()
3440 : {
3441 0 : Some(cancel) => cancel.cancel(),
3442 : None => {
3443 0 : match self.current_state() {
3444 0 : TimelineState::Broken { .. } | TimelineState::Stopping => {
3445 0 : // Can happen when timeline detail endpoint is used when deletion is ongoing (or its broken).
3446 0 : // Don't make noise.
3447 0 : }
3448 : TimelineState::Loading => {
3449 : // Import does not return an activated timeline.
3450 0 : info!(
3451 0 : "discarding priority boost for logical size calculation because timeline is not yet active"
3452 : );
3453 : }
3454 : TimelineState::Active => {
3455 : // activation should be setting the once cell
3456 0 : warn!(
3457 0 : "unexpected: cancel_wait_for_background_loop_concurrency_limit_semaphore not set, priority-boosting of logical size calculation will not work"
3458 : );
3459 0 : debug_assert!(false);
3460 : }
3461 : }
3462 : }
3463 : }
3464 : }
3465 : }
3466 :
3467 0 : if let CurrentLogicalSize::Approximate(_) = ¤t_size {
3468 0 : if ctx.task_kind() == TaskKind::WalReceiverConnectionHandler {
3469 0 : let first = self
3470 0 : .current_logical_size
3471 0 : .did_return_approximate_to_walreceiver
3472 0 : .compare_exchange(
3473 0 : false,
3474 0 : true,
3475 0 : AtomicOrdering::Relaxed,
3476 0 : AtomicOrdering::Relaxed,
3477 0 : )
3478 0 : .is_ok();
3479 0 : if first {
3480 0 : crate::metrics::initial_logical_size::TIMELINES_WHERE_WALRECEIVER_GOT_APPROXIMATE_SIZE.inc();
3481 0 : }
3482 0 : }
3483 0 : }
3484 :
3485 0 : current_size
3486 0 : }
3487 :
3488 0 : fn spawn_initial_logical_size_computation_task(self: &Arc<Self>, ctx: &RequestContext) {
3489 0 : let Some(initial_part_end) = self.current_logical_size.initial_part_end else {
3490 : // nothing to do for freshly created timelines;
3491 0 : assert_eq!(
3492 0 : self.current_logical_size.current_size().accuracy(),
3493 0 : logical_size::Accuracy::Exact,
3494 0 : );
3495 0 : self.current_logical_size.initialized.add_permits(1);
3496 0 : return;
3497 : };
3498 :
3499 0 : let cancel_wait_for_background_loop_concurrency_limit_semaphore = CancellationToken::new();
3500 0 : let token = cancel_wait_for_background_loop_concurrency_limit_semaphore.clone();
3501 0 : self.current_logical_size
3502 0 : .cancel_wait_for_background_loop_concurrency_limit_semaphore.set(token)
3503 0 : .expect("initial logical size calculation task must be spawned exactly once per Timeline object");
3504 0 :
3505 0 : let self_clone = Arc::clone(self);
3506 0 : let background_ctx = ctx.detached_child(
3507 0 : TaskKind::InitialLogicalSizeCalculation,
3508 0 : DownloadBehavior::Download,
3509 0 : );
3510 0 : task_mgr::spawn(
3511 0 : task_mgr::BACKGROUND_RUNTIME.handle(),
3512 0 : task_mgr::TaskKind::InitialLogicalSizeCalculation,
3513 0 : self.tenant_shard_id,
3514 0 : Some(self.timeline_id),
3515 0 : "initial size calculation",
3516 : // NB: don't log errors here, task_mgr will do that.
3517 0 : async move {
3518 0 : self_clone
3519 0 : .initial_logical_size_calculation_task(
3520 0 : initial_part_end,
3521 0 : cancel_wait_for_background_loop_concurrency_limit_semaphore,
3522 0 : background_ctx,
3523 0 : )
3524 0 : .await;
3525 0 : Ok(())
3526 0 : }
3527 0 : .instrument(info_span!(parent: None, "initial_size_calculation", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id)),
3528 : );
3529 0 : }
3530 :
3531 : /// # Cancellation
3532 : ///
3533 : /// This method is sensitive to `Timeline::cancel`.
3534 : ///
3535 : /// It is _not_ sensitive to task_mgr::shutdown_token().
3536 : ///
3537 : /// # Cancel-Safety
3538 : ///
3539 : /// It does Timeline IO, hence this should be polled to completion because
3540 : /// we could be leaving in-flight IOs behind, which is safe, but annoying
3541 : /// to reason about.
3542 0 : async fn initial_logical_size_calculation_task(
3543 0 : self: Arc<Self>,
3544 0 : initial_part_end: Lsn,
3545 0 : skip_concurrency_limiter: CancellationToken,
3546 0 : background_ctx: RequestContext,
3547 0 : ) {
3548 0 : scopeguard::defer! {
3549 0 : // Irrespective of the outcome of this operation, we should unblock anyone waiting for it.
3550 0 : self.current_logical_size.initialized.add_permits(1);
3551 0 : }
3552 0 :
3553 0 : let try_once = |attempt: usize| {
3554 0 : let background_ctx = &background_ctx;
3555 0 : let self_ref = &self;
3556 0 : let skip_concurrency_limiter = &skip_concurrency_limiter;
3557 0 : async move {
3558 0 : let wait_for_permit = super::tasks::acquire_concurrency_permit(
3559 0 : BackgroundLoopKind::InitialLogicalSizeCalculation,
3560 0 : background_ctx,
3561 0 : );
3562 :
3563 : use crate::metrics::initial_logical_size::StartCircumstances;
3564 0 : let (_maybe_permit, circumstances) = tokio::select! {
3565 0 : permit = wait_for_permit => {
3566 0 : (Some(permit), StartCircumstances::AfterBackgroundTasksRateLimit)
3567 : }
3568 0 : _ = self_ref.cancel.cancelled() => {
3569 0 : return Err(CalculateLogicalSizeError::Cancelled);
3570 : }
3571 0 : () = skip_concurrency_limiter.cancelled() => {
3572 : // Some action that is part of a end user interaction requested logical size
3573 : // => break out of the rate limit
3574 : // TODO: ideally we'd not run on BackgroundRuntime but the requester's runtime;
3575 : // but then again what happens if they cancel; also, we should just be using
3576 : // one runtime across the entire process, so, let's leave this for now.
3577 0 : (None, StartCircumstances::SkippedConcurrencyLimiter)
3578 : }
3579 : };
3580 :
3581 0 : let metrics_guard = if attempt == 1 {
3582 0 : crate::metrics::initial_logical_size::START_CALCULATION.first(circumstances)
3583 : } else {
3584 0 : crate::metrics::initial_logical_size::START_CALCULATION.retry(circumstances)
3585 : };
3586 :
3587 0 : let io_concurrency = IoConcurrency::spawn_from_conf(
3588 0 : self_ref.conf.get_vectored_concurrent_io,
3589 0 : self_ref
3590 0 : .gate
3591 0 : .enter()
3592 0 : .map_err(|_| CalculateLogicalSizeError::Cancelled)?,
3593 : );
3594 :
3595 0 : let calculated_size = self_ref
3596 0 : .logical_size_calculation_task(
3597 0 : initial_part_end,
3598 0 : LogicalSizeCalculationCause::Initial,
3599 0 : background_ctx,
3600 0 : )
3601 0 : .await?;
3602 :
3603 0 : self_ref
3604 0 : .trigger_aux_file_size_computation(
3605 0 : initial_part_end,
3606 0 : background_ctx,
3607 0 : io_concurrency,
3608 0 : )
3609 0 : .await?;
3610 :
3611 : // TODO: add aux file size to logical size
3612 :
3613 0 : Ok((calculated_size, metrics_guard))
3614 0 : }
3615 0 : };
3616 :
3617 0 : let retrying = async {
3618 0 : let mut attempt = 0;
3619 : loop {
3620 0 : attempt += 1;
3621 0 :
3622 0 : match try_once(attempt).await {
3623 0 : Ok(res) => return ControlFlow::Continue(res),
3624 0 : Err(CalculateLogicalSizeError::Cancelled) => return ControlFlow::Break(()),
3625 : Err(
3626 0 : e @ (CalculateLogicalSizeError::Decode(_)
3627 0 : | CalculateLogicalSizeError::PageRead(_)),
3628 0 : ) => {
3629 0 : warn!(attempt, "initial size calculation failed: {e:?}");
3630 : // exponential back-off doesn't make sense at these long intervals;
3631 : // use fixed retry interval with generous jitter instead
3632 0 : let sleep_duration = Duration::from_secs(
3633 0 : u64::try_from(
3634 0 : // 1hour base
3635 0 : (60_i64 * 60_i64)
3636 0 : // 10min jitter
3637 0 : + rand::thread_rng().gen_range(-10 * 60..10 * 60),
3638 0 : )
3639 0 : .expect("10min < 1hour"),
3640 0 : );
3641 0 : tokio::select! {
3642 0 : _ = tokio::time::sleep(sleep_duration) => {}
3643 0 : _ = self.cancel.cancelled() => return ControlFlow::Break(()),
3644 : }
3645 : }
3646 : }
3647 : }
3648 0 : };
3649 :
3650 0 : let (calculated_size, metrics_guard) = match retrying.await {
3651 0 : ControlFlow::Continue(calculated_size) => calculated_size,
3652 0 : ControlFlow::Break(()) => return,
3653 : };
3654 :
3655 : // we cannot query current_logical_size.current_size() to know the current
3656 : // *negative* value, only truncated to u64.
3657 0 : let added = self
3658 0 : .current_logical_size
3659 0 : .size_added_after_initial
3660 0 : .load(AtomicOrdering::Relaxed);
3661 0 :
3662 0 : let sum = calculated_size.saturating_add_signed(added);
3663 0 :
3664 0 : // set the gauge value before it can be set in `update_current_logical_size`.
3665 0 : self.metrics.current_logical_size_gauge.set(sum);
3666 0 :
3667 0 : self.current_logical_size
3668 0 : .initial_logical_size
3669 0 : .set((calculated_size, metrics_guard.calculation_result_saved()))
3670 0 : .ok()
3671 0 : .expect("only this task sets it");
3672 0 : }
3673 :
3674 7 : pub(crate) fn spawn_ondemand_logical_size_calculation(
3675 7 : self: &Arc<Self>,
3676 7 : lsn: Lsn,
3677 7 : cause: LogicalSizeCalculationCause,
3678 7 : ctx: RequestContext,
3679 7 : ) -> oneshot::Receiver<Result<u64, CalculateLogicalSizeError>> {
3680 7 : let (sender, receiver) = oneshot::channel();
3681 7 : let self_clone = Arc::clone(self);
3682 7 : // XXX if our caller loses interest, i.e., ctx is cancelled,
3683 7 : // we should stop the size calculation work and return an error.
3684 7 : // That would require restructuring this function's API to
3685 7 : // return the result directly, instead of a Receiver for the result.
3686 7 : let ctx = ctx.detached_child(
3687 7 : TaskKind::OndemandLogicalSizeCalculation,
3688 7 : DownloadBehavior::Download,
3689 7 : );
3690 7 : task_mgr::spawn(
3691 7 : task_mgr::BACKGROUND_RUNTIME.handle(),
3692 7 : task_mgr::TaskKind::OndemandLogicalSizeCalculation,
3693 7 : self.tenant_shard_id,
3694 7 : Some(self.timeline_id),
3695 7 : "ondemand logical size calculation",
3696 7 : async move {
3697 7 : let res = self_clone
3698 7 : .logical_size_calculation_task(lsn, cause, &ctx)
3699 7 : .await;
3700 7 : let _ = sender.send(res).ok();
3701 7 : Ok(()) // Receiver is responsible for handling errors
3702 7 : }
3703 7 : .in_current_span(),
3704 7 : );
3705 7 : receiver
3706 7 : }
3707 :
3708 : #[instrument(skip_all)]
3709 : async fn logical_size_calculation_task(
3710 : self: &Arc<Self>,
3711 : lsn: Lsn,
3712 : cause: LogicalSizeCalculationCause,
3713 : ctx: &RequestContext,
3714 : ) -> Result<u64, CalculateLogicalSizeError> {
3715 : crate::span::debug_assert_current_span_has_tenant_and_timeline_id();
3716 : // We should never be calculating logical sizes on shard !=0, because these shards do not have
3717 : // accurate relation sizes, and they do not emit consumption metrics.
3718 : debug_assert!(self.tenant_shard_id.is_shard_zero());
3719 :
3720 : let guard = self
3721 : .gate
3722 : .enter()
3723 0 : .map_err(|_| CalculateLogicalSizeError::Cancelled)?;
3724 :
3725 : self.calculate_logical_size(lsn, cause, &guard, ctx).await
3726 : }
3727 :
3728 : /// Calculate the logical size of the database at the latest LSN.
3729 : ///
3730 : /// NOTE: counted incrementally, includes ancestors. This can be a slow operation,
3731 : /// especially if we need to download remote layers.
3732 7 : async fn calculate_logical_size(
3733 7 : &self,
3734 7 : up_to_lsn: Lsn,
3735 7 : cause: LogicalSizeCalculationCause,
3736 7 : _guard: &GateGuard,
3737 7 : ctx: &RequestContext,
3738 7 : ) -> Result<u64, CalculateLogicalSizeError> {
3739 7 : info!(
3740 0 : "Calculating logical size for timeline {} at {}",
3741 : self.timeline_id, up_to_lsn
3742 : );
3743 :
3744 7 : if let Err(()) = pausable_failpoint!("timeline-calculate-logical-size-pause", &self.cancel)
3745 : {
3746 0 : return Err(CalculateLogicalSizeError::Cancelled);
3747 7 : }
3748 :
3749 : // See if we've already done the work for initial size calculation.
3750 : // This is a short-cut for timelines that are mostly unused.
3751 7 : if let Some(size) = self.current_logical_size.initialized_size(up_to_lsn) {
3752 0 : return Ok(size);
3753 7 : }
3754 7 : let storage_time_metrics = match cause {
3755 : LogicalSizeCalculationCause::Initial
3756 : | LogicalSizeCalculationCause::ConsumptionMetricsSyntheticSize
3757 0 : | LogicalSizeCalculationCause::TenantSizeHandler => &self.metrics.logical_size_histo,
3758 : LogicalSizeCalculationCause::EvictionTaskImitation => {
3759 7 : &self.metrics.imitate_logical_size_histo
3760 : }
3761 : };
3762 7 : let timer = storage_time_metrics.start_timer();
3763 7 : let logical_size = self
3764 7 : .get_current_logical_size_non_incremental(up_to_lsn, ctx)
3765 7 : .await?;
3766 7 : debug!("calculated logical size: {logical_size}");
3767 7 : timer.stop_and_record();
3768 7 : Ok(logical_size)
3769 7 : }
3770 :
3771 : /// Update current logical size, adding `delta' to the old value.
3772 135285 : fn update_current_logical_size(&self, delta: i64) {
3773 135285 : let logical_size = &self.current_logical_size;
3774 135285 : logical_size.increment_size(delta);
3775 135285 :
3776 135285 : // Also set the value in the prometheus gauge. Note that
3777 135285 : // there is a race condition here: if this is is called by two
3778 135285 : // threads concurrently, the prometheus gauge might be set to
3779 135285 : // one value while current_logical_size is set to the
3780 135285 : // other.
3781 135285 : match logical_size.current_size() {
3782 135285 : CurrentLogicalSize::Exact(ref new_current_size) => self
3783 135285 : .metrics
3784 135285 : .current_logical_size_gauge
3785 135285 : .set(new_current_size.into()),
3786 0 : CurrentLogicalSize::Approximate(_) => {
3787 0 : // don't update the gauge yet, this allows us not to update the gauge back and
3788 0 : // forth between the initial size calculation task.
3789 0 : }
3790 : }
3791 135285 : }
3792 :
3793 1517 : pub(crate) fn update_directory_entries_count(&self, kind: DirectoryKind, count: MetricsUpdate) {
3794 1517 : // TODO: this directory metrics is not correct -- we could have multiple reldirs in the system
3795 1517 : // for each of the database, but we only store one value, and therefore each pgdirmodification
3796 1517 : // would overwrite the previous value if they modify different databases.
3797 1517 :
3798 1517 : match count {
3799 556 : MetricsUpdate::Set(count) => {
3800 556 : self.directory_metrics[kind.offset()].store(count, AtomicOrdering::Relaxed);
3801 556 : self.directory_metrics_inited[kind.offset()].store(true, AtomicOrdering::Relaxed);
3802 556 : }
3803 960 : MetricsUpdate::Add(count) => {
3804 960 : // TODO: these operations are not atomic; but we only have one writer to the metrics, so
3805 960 : // it's fine.
3806 960 : if self.directory_metrics_inited[kind.offset()].load(AtomicOrdering::Relaxed) {
3807 960 : // The metrics has been initialized with `MetricsUpdate::Set` before, so we can add/sub
3808 960 : // the value reliably.
3809 960 : self.directory_metrics[kind.offset()].fetch_add(count, AtomicOrdering::Relaxed);
3810 960 : }
3811 : // Otherwise, ignore this update
3812 : }
3813 1 : MetricsUpdate::Sub(count) => {
3814 1 : // TODO: these operations are not atomic; but we only have one writer to the metrics, so
3815 1 : // it's fine.
3816 1 : if self.directory_metrics_inited[kind.offset()].load(AtomicOrdering::Relaxed) {
3817 1 : // The metrics has been initialized with `MetricsUpdate::Set` before.
3818 1 : // The operation could overflow so we need to normalize the value.
3819 1 : let prev_val =
3820 1 : self.directory_metrics[kind.offset()].load(AtomicOrdering::Relaxed);
3821 1 : let res = prev_val.saturating_sub(count);
3822 1 : self.directory_metrics[kind.offset()].store(res, AtomicOrdering::Relaxed);
3823 1 : }
3824 : // Otherwise, ignore this update
3825 : }
3826 : };
3827 :
3828 : // TODO: remove this, there's no place in the code that updates this aux metrics.
3829 1517 : let aux_metric =
3830 1517 : self.directory_metrics[DirectoryKind::AuxFiles.offset()].load(AtomicOrdering::Relaxed);
3831 1517 :
3832 1517 : let sum_of_entries = self
3833 1517 : .directory_metrics
3834 1517 : .iter()
3835 12136 : .map(|v| v.load(AtomicOrdering::Relaxed))
3836 1517 : .sum();
3837 : // Set a high general threshold and a lower threshold for the auxiliary files,
3838 : // as we can have large numbers of relations in the db directory.
3839 : const SUM_THRESHOLD: u64 = 5000;
3840 : const AUX_THRESHOLD: u64 = 1000;
3841 1517 : if sum_of_entries >= SUM_THRESHOLD || aux_metric >= AUX_THRESHOLD {
3842 0 : self.metrics
3843 0 : .directory_entries_count_gauge
3844 0 : .set(sum_of_entries);
3845 1517 : } else if let Some(metric) = Lazy::get(&self.metrics.directory_entries_count_gauge) {
3846 0 : metric.set(sum_of_entries);
3847 1517 : }
3848 1517 : }
3849 :
3850 0 : async fn find_layer(
3851 0 : &self,
3852 0 : layer_name: &LayerName,
3853 0 : ) -> Result<Option<Layer>, layer_manager::Shutdown> {
3854 0 : let guard = self.layers.read().await;
3855 0 : let layer = guard
3856 0 : .layer_map()?
3857 0 : .iter_historic_layers()
3858 0 : .find(|l| &l.layer_name() == layer_name)
3859 0 : .map(|found| guard.get_from_desc(&found));
3860 0 : Ok(layer)
3861 0 : }
3862 :
3863 0 : pub(super) fn should_keep_previous_heatmap(&self, new_heatmap_end_lsn: Lsn) -> bool {
3864 0 : let crnt = self.previous_heatmap.load();
3865 0 : match crnt.as_deref() {
3866 0 : Some(PreviousHeatmap::Active { end_lsn, .. }) => match end_lsn {
3867 0 : Some(crnt_end_lsn) => *crnt_end_lsn > new_heatmap_end_lsn,
3868 0 : None => true,
3869 : },
3870 0 : Some(PreviousHeatmap::Obsolete) => false,
3871 0 : None => false,
3872 : }
3873 0 : }
3874 :
3875 : /// The timeline heatmap is a hint to secondary locations from the primary location,
3876 : /// indicating which layers are currently on-disk on the primary.
3877 : ///
3878 : /// None is returned if the Timeline is in a state where uploading a heatmap
3879 : /// doesn't make sense, such as shutting down or initializing. The caller
3880 : /// should treat this as a cue to simply skip doing any heatmap uploading
3881 : /// for this timeline.
3882 8 : pub(crate) async fn generate_heatmap(&self) -> Option<HeatMapTimeline> {
3883 8 : if !self.is_active() {
3884 0 : return None;
3885 8 : }
3886 :
3887 8 : let guard = self.layers.read().await;
3888 :
3889 : // Firstly, if there's any heatmap left over from when this location
3890 : // was a secondary, take that into account. Keep layers that are:
3891 : // * present in the layer map
3892 : // * visible
3893 : // * non-resident
3894 : // * not evicted since we read the heatmap
3895 : //
3896 : // Without this, a new cold, attached location would clobber the previous
3897 : // heatamp.
3898 8 : let previous_heatmap = self.previous_heatmap.load();
3899 8 : let visible_non_resident = match previous_heatmap.as_deref() {
3900 : Some(PreviousHeatmap::Active {
3901 6 : heatmap, read_at, ..
3902 23 : }) => Some(heatmap.all_layers().filter_map(|hl| {
3903 23 : let desc: PersistentLayerDesc = hl.name.clone().into();
3904 23 : let layer = guard.try_get_from_key(&desc.key())?;
3905 :
3906 23 : if layer.visibility() == LayerVisibilityHint::Covered {
3907 0 : return None;
3908 23 : }
3909 23 :
3910 23 : if layer.is_likely_resident() {
3911 10 : return None;
3912 13 : }
3913 13 :
3914 13 : if layer.last_evicted_at().happened_after(*read_at) {
3915 3 : return None;
3916 10 : }
3917 10 :
3918 10 : Some((desc, hl.metadata.clone(), hl.access_time, hl.cold))
3919 23 : })),
3920 0 : Some(PreviousHeatmap::Obsolete) => None,
3921 2 : None => None,
3922 : };
3923 :
3924 : // Secondly, all currently visible, resident layers are included.
3925 18 : let resident = guard.likely_resident_layers().filter_map(|layer| {
3926 18 : match layer.visibility() {
3927 : LayerVisibilityHint::Visible => {
3928 : // Layer is visible to one or more read LSNs: elegible for inclusion in layer map
3929 17 : let last_activity_ts = layer.latest_activity();
3930 17 : Some((
3931 17 : layer.layer_desc().clone(),
3932 17 : layer.metadata(),
3933 17 : last_activity_ts,
3934 17 : false, // these layers are not cold
3935 17 : ))
3936 : }
3937 : LayerVisibilityHint::Covered => {
3938 : // Layer is resident but unlikely to be read: not elegible for inclusion in heatmap.
3939 1 : None
3940 : }
3941 : }
3942 18 : });
3943 :
3944 8 : let mut layers = match visible_non_resident {
3945 6 : Some(non_resident) => {
3946 6 : let mut non_resident = non_resident.peekable();
3947 6 : if non_resident.peek().is_none() {
3948 2 : tracing::info!(timeline_id=%self.timeline_id, "Previous heatmap now obsolete");
3949 2 : self.previous_heatmap
3950 2 : .store(Some(PreviousHeatmap::Obsolete.into()));
3951 4 : }
3952 :
3953 6 : non_resident.chain(resident).collect::<Vec<_>>()
3954 : }
3955 2 : None => resident.collect::<Vec<_>>(),
3956 : };
3957 :
3958 : // Sort layers in order of which to download first. For a large set of layers to download, we
3959 : // want to prioritize those layers which are most likely to still be in the resident many minutes
3960 : // or hours later:
3961 : // - Cold layers go last for convenience when a human inspects the heatmap.
3962 : // - Download L0s last, because they churn the fastest: L0s on a fast-writing tenant might
3963 : // only exist for a few minutes before being compacted into L1s.
3964 : // - For L1 & image layers, download most recent LSNs first: the older the LSN, the sooner
3965 : // the layer is likely to be covered by an image layer during compaction.
3966 58 : layers.sort_by_key(|(desc, _meta, _atime, cold)| {
3967 58 : std::cmp::Reverse((
3968 58 : *cold,
3969 58 : !LayerMap::is_l0(&desc.key_range, desc.is_delta),
3970 58 : desc.lsn_range.end,
3971 58 : ))
3972 58 : });
3973 8 :
3974 8 : let layers = layers
3975 8 : .into_iter()
3976 27 : .map(|(desc, meta, atime, cold)| {
3977 27 : HeatMapLayer::new(desc.layer_name(), meta, atime, cold)
3978 27 : })
3979 8 : .collect();
3980 8 :
3981 8 : Some(HeatMapTimeline::new(self.timeline_id, layers))
3982 8 : }
3983 :
3984 0 : pub(super) async fn generate_unarchival_heatmap(&self, end_lsn: Lsn) -> PreviousHeatmap {
3985 0 : let guard = self.layers.read().await;
3986 :
3987 0 : let now = SystemTime::now();
3988 0 : let mut heatmap_layers = Vec::default();
3989 0 : for vl in guard.visible_layers() {
3990 0 : if vl.layer_desc().get_lsn_range().start >= end_lsn {
3991 0 : continue;
3992 0 : }
3993 0 :
3994 0 : let hl = HeatMapLayer {
3995 0 : name: vl.layer_desc().layer_name(),
3996 0 : metadata: vl.metadata(),
3997 0 : access_time: now,
3998 0 : cold: true,
3999 0 : };
4000 0 : heatmap_layers.push(hl);
4001 : }
4002 :
4003 0 : tracing::info!(
4004 0 : "Generating unarchival heatmap with {} layers",
4005 0 : heatmap_layers.len()
4006 : );
4007 :
4008 0 : let heatmap = HeatMapTimeline::new(self.timeline_id, heatmap_layers);
4009 0 : PreviousHeatmap::Active {
4010 0 : heatmap,
4011 0 : read_at: Instant::now(),
4012 0 : end_lsn: Some(end_lsn),
4013 0 : }
4014 0 : }
4015 :
4016 : /// Returns true if the given lsn is or was an ancestor branchpoint.
4017 0 : pub(crate) fn is_ancestor_lsn(&self, lsn: Lsn) -> bool {
4018 0 : // upon timeline detach, we set the ancestor_lsn to Lsn::INVALID and the store the original
4019 0 : // branchpoint in the value in IndexPart::lineage
4020 0 : self.ancestor_lsn == lsn
4021 0 : || (self.ancestor_lsn == Lsn::INVALID
4022 0 : && self.remote_client.is_previous_ancestor_lsn(lsn))
4023 0 : }
4024 : }
4025 :
4026 : #[derive(Clone)]
4027 : /// Type representing a query in the ([`Lsn`], [`Key`]) space.
4028 : /// In other words, a set of segments in a 2D space.
4029 : ///
4030 : /// This representation has the advatange of avoiding hash map
4031 : /// allocations for uniform queries.
4032 : pub(crate) enum VersionedKeySpaceQuery {
4033 : /// Variant for queries at a single [`Lsn`]
4034 : Uniform { keyspace: KeySpace, lsn: Lsn },
4035 : /// Variant for queries at multiple [`Lsn`]s
4036 : Scattered {
4037 : keyspaces_at_lsn: Vec<(Lsn, KeySpace)>,
4038 : },
4039 : }
4040 :
4041 : impl VersionedKeySpaceQuery {
4042 302156 : pub(crate) fn uniform(keyspace: KeySpace, lsn: Lsn) -> Self {
4043 302156 : Self::Uniform { keyspace, lsn }
4044 302156 : }
4045 :
4046 10192 : pub(crate) fn scattered(keyspaces_at_lsn: Vec<(Lsn, KeySpace)>) -> Self {
4047 10192 : Self::Scattered { keyspaces_at_lsn }
4048 10192 : }
4049 :
4050 : /// Returns the most recent (largest) LSN included in the query.
4051 : /// If any of the LSNs included in the query are invalid, returns
4052 : /// an error instead.
4053 624696 : fn high_watermark_lsn(&self) -> Result<Lsn, GetVectoredError> {
4054 624696 : match self {
4055 604312 : Self::Uniform { lsn, .. } => {
4056 604312 : if !lsn.is_valid() {
4057 0 : return Err(GetVectoredError::InvalidLsn(*lsn));
4058 604312 : }
4059 604312 :
4060 604312 : Ok(*lsn)
4061 : }
4062 20384 : Self::Scattered { keyspaces_at_lsn } => {
4063 20384 : let mut max_lsn = None;
4064 42218 : for (lsn, _keyspace) in keyspaces_at_lsn.iter() {
4065 42218 : if !lsn.is_valid() {
4066 0 : return Err(GetVectoredError::InvalidLsn(*lsn));
4067 42218 : }
4068 42218 : max_lsn = std::cmp::max(max_lsn, Some(lsn));
4069 : }
4070 :
4071 20384 : if let Some(computed) = max_lsn {
4072 20384 : Ok(*computed)
4073 : } else {
4074 0 : Err(GetVectoredError::Other(anyhow!("empty input")))
4075 : }
4076 : }
4077 : }
4078 624696 : }
4079 :
4080 : /// Returns the total keyspace being queried: the result of projecting
4081 : /// everything in the key dimensions onto the key axis.
4082 323361 : fn total_keyspace(&self) -> KeySpace {
4083 323361 : match self {
4084 302977 : Self::Uniform { keyspace, .. } => keyspace.clone(),
4085 20384 : Self::Scattered { keyspaces_at_lsn } => keyspaces_at_lsn
4086 20384 : .iter()
4087 42218 : .map(|(_lsn, keyspace)| keyspace)
4088 42218 : .fold(KeySpace::default(), |mut acc, v| {
4089 42218 : acc.merge(v);
4090 42218 : acc
4091 42218 : }),
4092 : }
4093 323361 : }
4094 :
4095 : /// Returns LSN for a specific key.
4096 : ///
4097 : /// Invariant: requested key must be part of [`Self::total_keyspace`]
4098 395519 : pub(super) fn map_key_to_lsn(&self, key: &Key) -> Lsn {
4099 395519 : match self {
4100 322327 : Self::Uniform { lsn, .. } => *lsn,
4101 73192 : Self::Scattered { keyspaces_at_lsn } => {
4102 73192 : keyspaces_at_lsn
4103 73192 : .iter()
4104 423930 : .find(|(_lsn, keyspace)| keyspace.contains(key))
4105 73192 : .expect("Returned key was requested")
4106 73192 : .0
4107 : }
4108 : }
4109 395519 : }
4110 :
4111 : /// Remove any parts of the query (segments) which overlap with the provided
4112 : /// key space (also segments).
4113 962179 : fn remove_overlapping_with(&mut self, to_remove: &KeySpace) -> KeySpace {
4114 962179 : match self {
4115 941795 : Self::Uniform { keyspace, .. } => keyspace.remove_overlapping_with(to_remove),
4116 20384 : Self::Scattered { keyspaces_at_lsn } => {
4117 20384 : let mut removed_accum = KeySpaceRandomAccum::new();
4118 42218 : keyspaces_at_lsn.iter_mut().for_each(|(_lsn, keyspace)| {
4119 42218 : let removed = keyspace.remove_overlapping_with(to_remove);
4120 42218 : removed_accum.add_keyspace(removed);
4121 42218 : });
4122 20384 :
4123 20384 : removed_accum.to_keyspace()
4124 : }
4125 : }
4126 962179 : }
4127 :
4128 737190 : fn is_empty(&self) -> bool {
4129 737190 : match self {
4130 716806 : Self::Uniform { keyspace, .. } => keyspace.is_empty(),
4131 20384 : Self::Scattered { keyspaces_at_lsn } => keyspaces_at_lsn
4132 20384 : .iter()
4133 31301 : .all(|(_lsn, keyspace)| keyspace.is_empty()),
4134 : }
4135 737190 : }
4136 :
4137 : /// "Lower" the query on the LSN dimension
4138 112495 : fn lower(&mut self, to: Lsn) {
4139 112495 : match self {
4140 112495 : Self::Uniform { lsn, .. } => {
4141 112495 : // If the originally requested LSN is smaller than the starting
4142 112495 : // LSN of the ancestor we are descending into, we need to respect that.
4143 112495 : // Hence the min.
4144 112495 : *lsn = std::cmp::min(*lsn, to);
4145 112495 : }
4146 0 : Self::Scattered { keyspaces_at_lsn } => {
4147 0 : keyspaces_at_lsn.iter_mut().for_each(|(lsn, _keyspace)| {
4148 0 : *lsn = std::cmp::min(*lsn, to);
4149 0 : });
4150 0 : }
4151 : }
4152 112495 : }
4153 : }
4154 :
4155 : impl std::fmt::Display for VersionedKeySpaceQuery {
4156 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
4157 0 : write!(f, "[")?;
4158 :
4159 0 : match self {
4160 0 : VersionedKeySpaceQuery::Uniform { keyspace, lsn } => {
4161 0 : write!(f, "{keyspace} @ {lsn}")?;
4162 : }
4163 0 : VersionedKeySpaceQuery::Scattered { keyspaces_at_lsn } => {
4164 0 : for (lsn, keyspace) in keyspaces_at_lsn.iter() {
4165 0 : write!(f, "{keyspace} @ {lsn},")?;
4166 : }
4167 : }
4168 : }
4169 :
4170 0 : write!(f, "]")
4171 0 : }
4172 : }
4173 :
4174 : impl Timeline {
4175 : #[allow(clippy::doc_lazy_continuation)]
4176 : /// Get the data needed to reconstruct all keys in the provided keyspace
4177 : ///
4178 : /// The algorithm is as follows:
4179 : /// 1. While some keys are still not done and there's a timeline to visit:
4180 : /// 2. Visit the timeline (see [`Timeline::get_vectored_reconstruct_data_timeline`]:
4181 : /// 2.1: Build the fringe for the current keyspace
4182 : /// 2.2 Visit the newest layer from the fringe to collect all values for the range it
4183 : /// intersects
4184 : /// 2.3. Pop the timeline from the fringe
4185 : /// 2.4. If the fringe is empty, go back to 1
4186 312348 : async fn get_vectored_reconstruct_data(
4187 312348 : &self,
4188 312348 : mut query: VersionedKeySpaceQuery,
4189 312348 : reconstruct_state: &mut ValuesReconstructState,
4190 312348 : ctx: &RequestContext,
4191 312348 : ) -> Result<(), GetVectoredError> {
4192 312348 : let original_hwm_lsn = query.high_watermark_lsn().unwrap();
4193 312348 :
4194 312348 : let mut timeline_owned: Arc<Timeline>;
4195 312348 : let mut timeline = self;
4196 :
4197 312347 : let missing_keyspace = loop {
4198 424842 : if self.cancel.is_cancelled() {
4199 0 : return Err(GetVectoredError::Cancelled);
4200 424842 : }
4201 :
4202 : let TimelineVisitOutcome {
4203 424842 : completed_keyspace: completed,
4204 424842 : image_covered_keyspace,
4205 : } = {
4206 424842 : let ctx = RequestContextBuilder::from(ctx)
4207 424842 : .perf_span(|crnt_perf_span| {
4208 0 : info_span!(
4209 : target: PERF_TRACE_TARGET,
4210 0 : parent: crnt_perf_span,
4211 : "PLAN_IO_TIMELINE",
4212 : timeline = %timeline.timeline_id,
4213 0 : high_watermark_lsn = %query.high_watermark_lsn().unwrap(),
4214 : )
4215 424842 : })
4216 424842 : .attached_child();
4217 424842 :
4218 424842 : Self::get_vectored_reconstruct_data_timeline(
4219 424842 : timeline,
4220 424842 : &query,
4221 424842 : reconstruct_state,
4222 424842 : &self.cancel,
4223 424842 : &ctx,
4224 424842 : )
4225 424842 : .maybe_perf_instrument(&ctx, |crnt_perf_span| crnt_perf_span.clone())
4226 424842 : .await?
4227 : };
4228 :
4229 424842 : query.remove_overlapping_with(&completed);
4230 424842 :
4231 424842 : // Do not descend into the ancestor timeline for aux files.
4232 424842 : // We don't return a blanket [`GetVectoredError::MissingKey`] to avoid
4233 424842 : // stalling compaction.
4234 424842 : query.remove_overlapping_with(&KeySpace {
4235 424842 : ranges: vec![NON_INHERITED_RANGE, Key::sparse_non_inherited_keyspace()],
4236 424842 : });
4237 424842 :
4238 424842 : // Keyspace is fully retrieved
4239 424842 : if query.is_empty() {
4240 312216 : break None;
4241 112626 : }
4242 :
4243 112626 : let Some(ancestor_timeline) = timeline.ancestor_timeline.as_ref() else {
4244 : // Not fully retrieved but no ancestor timeline.
4245 131 : break Some(query.total_keyspace());
4246 : };
4247 :
4248 : // Now we see if there are keys covered by the image layer but does not exist in the
4249 : // image layer, which means that the key does not exist.
4250 :
4251 : // The block below will stop the vectored search if any of the keys encountered an image layer
4252 : // which did not contain a snapshot for said key. Since we have already removed all completed
4253 : // keys from `keyspace`, we expect there to be no overlap between it and the image covered key
4254 : // space. If that's not the case, we had at least one key encounter a gap in the image layer
4255 : // and stop the search as a result of that.
4256 112495 : let mut removed = query.remove_overlapping_with(&image_covered_keyspace);
4257 112495 : // Do not fire missing key error and end early for sparse keys. Note that we hava already removed
4258 112495 : // non-inherited keyspaces before, so we can safely do a full `SPARSE_RANGE` remove instead of
4259 112495 : // figuring out what is the inherited key range and do a fine-grained pruning.
4260 112495 : removed.remove_overlapping_with(&KeySpace {
4261 112495 : ranges: vec![SPARSE_RANGE],
4262 112495 : });
4263 112495 : if !removed.is_empty() {
4264 0 : break Some(removed);
4265 112495 : }
4266 112495 :
4267 112495 : // Each key range in the original query is at some point in the LSN space.
4268 112495 : // When descending into the ancestor, lower all ranges in the LSN space
4269 112495 : // such that new changes on the parent timeline are not visible.
4270 112495 : query.lower(timeline.ancestor_lsn);
4271 112495 :
4272 112495 : let ctx = RequestContextBuilder::from(ctx)
4273 112495 : .perf_span(|crnt_perf_span| {
4274 0 : info_span!(
4275 : target: PERF_TRACE_TARGET,
4276 0 : parent: crnt_perf_span,
4277 : "GET_ANCESTOR",
4278 : timeline = %timeline.timeline_id,
4279 0 : ancestor = %ancestor_timeline.timeline_id,
4280 : ancestor_lsn = %timeline.ancestor_lsn
4281 : )
4282 112495 : })
4283 112495 : .attached_child();
4284 :
4285 112495 : timeline_owned = timeline
4286 112495 : .get_ready_ancestor_timeline(ancestor_timeline, &ctx)
4287 112495 : .maybe_perf_instrument(&ctx, |crnt_perf_span| crnt_perf_span.clone())
4288 112495 : .await?;
4289 112494 : timeline = &*timeline_owned;
4290 : };
4291 :
4292 : // Remove sparse keys from the keyspace so that it doesn't fire errors.
4293 312347 : let missing_keyspace = if let Some(missing_keyspace) = missing_keyspace {
4294 131 : let mut missing_keyspace = missing_keyspace;
4295 131 : missing_keyspace.remove_overlapping_with(&KeySpace {
4296 131 : ranges: vec![SPARSE_RANGE],
4297 131 : });
4298 131 : if missing_keyspace.is_empty() {
4299 124 : None
4300 : } else {
4301 7 : Some(missing_keyspace)
4302 : }
4303 : } else {
4304 312216 : None
4305 : };
4306 :
4307 312347 : if let Some(missing_keyspace) = missing_keyspace {
4308 7 : return Err(GetVectoredError::MissingKey(Box::new(MissingKeyError {
4309 7 : keyspace: missing_keyspace, /* better if we can store the full keyspace */
4310 7 : shard: self.shard_identity.number,
4311 7 : original_hwm_lsn,
4312 7 : ancestor_lsn: Some(timeline.ancestor_lsn),
4313 7 : backtrace: None,
4314 7 : read_path: std::mem::take(&mut reconstruct_state.read_path),
4315 7 : query: None,
4316 7 : })));
4317 312340 : }
4318 312340 :
4319 312340 : Ok(())
4320 312348 : }
4321 :
4322 424842 : async fn get_vectored_init_fringe(
4323 424842 : &self,
4324 424842 : query: &VersionedKeySpaceQuery,
4325 424842 : ) -> Result<LayerFringe, GetVectoredError> {
4326 424842 : let mut fringe = LayerFringe::new();
4327 424842 : let guard = self.layers.read().await;
4328 :
4329 424842 : match query {
4330 414650 : VersionedKeySpaceQuery::Uniform { keyspace, lsn } => {
4331 414650 : // LSNs requested by the compute or determined by the pageserver
4332 414650 : // are inclusive. Queries to the layer map use exclusive LSNs.
4333 414650 : // Hence, bump the value before the query - same in the other
4334 414650 : // match arm.
4335 414650 : let cont_lsn = Lsn(lsn.0 + 1);
4336 414650 : guard.update_search_fringe(keyspace, cont_lsn, &mut fringe)?;
4337 : }
4338 10192 : VersionedKeySpaceQuery::Scattered { keyspaces_at_lsn } => {
4339 21109 : for (lsn, keyspace) in keyspaces_at_lsn.iter() {
4340 21109 : let cont_lsn_for_keyspace = Lsn(lsn.0 + 1);
4341 21109 : guard.update_search_fringe(keyspace, cont_lsn_for_keyspace, &mut fringe)?;
4342 : }
4343 : }
4344 : }
4345 :
4346 424842 : Ok(fringe)
4347 424842 : }
4348 :
4349 : /// Collect the reconstruct data for a keyspace from the specified timeline.
4350 : ///
4351 : /// Maintain a fringe [`LayerFringe`] which tracks all the layers that intersect
4352 : /// the current keyspace. The current keyspace of the search at any given timeline
4353 : /// is the original keyspace minus all the keys that have been completed minus
4354 : /// any keys for which we couldn't find an intersecting layer. It's not tracked explicitly,
4355 : /// but if you merge all the keyspaces in the fringe, you get the "current keyspace".
4356 : ///
4357 : /// This is basically a depth-first search visitor implementation where a vertex
4358 : /// is the (layer, lsn range, key space) tuple. The fringe acts as the stack.
4359 : ///
4360 : /// At each iteration pop the top of the fringe (the layer with the highest Lsn)
4361 : /// and get all the required reconstruct data from the layer in one go.
4362 : ///
4363 : /// Returns the completed keyspace and the keyspaces with image coverage. The caller
4364 : /// decides how to deal with these two keyspaces.
4365 424842 : async fn get_vectored_reconstruct_data_timeline(
4366 424842 : timeline: &Timeline,
4367 424842 : query: &VersionedKeySpaceQuery,
4368 424842 : reconstruct_state: &mut ValuesReconstructState,
4369 424842 : cancel: &CancellationToken,
4370 424842 : ctx: &RequestContext,
4371 424842 : ) -> Result<TimelineVisitOutcome, GetVectoredError> {
4372 424842 : // Prevent GC from progressing while visiting the current timeline.
4373 424842 : // If we are GC-ing because a new image layer was added while traversing
4374 424842 : // the timeline, then it will remove layers that are required for fulfilling
4375 424842 : // the current get request (read-path cannot "look back" and notice the new
4376 424842 : // image layer).
4377 424842 : let _gc_cutoff_holder = timeline.get_applied_gc_cutoff_lsn();
4378 :
4379 : // See `compaction::compact_with_gc` for why we need this.
4380 424842 : let _guard = timeline.gc_compaction_layer_update_lock.read().await;
4381 :
4382 : // Initialize the fringe
4383 424842 : let mut fringe = timeline.get_vectored_init_fringe(query).await?;
4384 :
4385 424842 : let mut completed_keyspace = KeySpace::default();
4386 424842 : let mut image_covered_keyspace = KeySpaceRandomAccum::new();
4387 :
4388 865439 : while let Some((layer_to_read, keyspace_to_read, lsn_range)) = fringe.next_layer() {
4389 440597 : if cancel.is_cancelled() {
4390 0 : return Err(GetVectoredError::Cancelled);
4391 440597 : }
4392 :
4393 440597 : if let Some(ref mut read_path) = reconstruct_state.read_path {
4394 440597 : read_path.record_layer_visit(&layer_to_read, &keyspace_to_read, &lsn_range);
4395 440597 : }
4396 :
4397 : // Visit the layer and plan IOs for it
4398 440597 : let next_cont_lsn = lsn_range.start;
4399 440597 : layer_to_read
4400 440597 : .get_values_reconstruct_data(
4401 440597 : keyspace_to_read.clone(),
4402 440597 : lsn_range,
4403 440597 : reconstruct_state,
4404 440597 : ctx,
4405 440597 : )
4406 440597 : .await?;
4407 :
4408 440597 : let mut unmapped_keyspace = keyspace_to_read;
4409 440597 : let cont_lsn = next_cont_lsn;
4410 440597 :
4411 440597 : reconstruct_state.on_layer_visited(&layer_to_read);
4412 440597 :
4413 440597 : let (keys_done_last_step, keys_with_image_coverage) =
4414 440597 : reconstruct_state.consume_done_keys();
4415 440597 : unmapped_keyspace.remove_overlapping_with(&keys_done_last_step);
4416 440597 : completed_keyspace.merge(&keys_done_last_step);
4417 440597 : if let Some(keys_with_image_coverage) = keys_with_image_coverage {
4418 15120 : unmapped_keyspace
4419 15120 : .remove_overlapping_with(&KeySpace::single(keys_with_image_coverage.clone()));
4420 15120 : image_covered_keyspace.add_range(keys_with_image_coverage);
4421 425477 : }
4422 :
4423 : // Query the layer map for the next layers to read.
4424 : //
4425 : // Do not descent any further if the last layer we visited
4426 : // completed all keys in the keyspace it inspected. This is not
4427 : // required for correctness, but avoids visiting extra layers
4428 : // which turns out to be a perf bottleneck in some cases.
4429 440597 : if !unmapped_keyspace.is_empty() {
4430 122929 : let guard = timeline.layers.read().await;
4431 122929 : guard.update_search_fringe(&unmapped_keyspace, cont_lsn, &mut fringe)?;
4432 :
4433 : // It's safe to drop the layer map lock after planning the next round of reads.
4434 : // The fringe keeps readable handles for the layers which are safe to read even
4435 : // if layers were compacted or flushed.
4436 : //
4437 : // The more interesting consideration is: "Why is the read algorithm still correct
4438 : // if the layer map changes while it is operating?". Doing a vectored read on a
4439 : // timeline boils down to pushing an imaginary lsn boundary downwards for each range
4440 : // covered by the read. The layer map tells us how to move the lsn downwards for a
4441 : // range at *a particular point in time*. It is fine for the answer to be different
4442 : // at two different time points.
4443 122929 : drop(guard);
4444 317668 : }
4445 : }
4446 :
4447 424842 : Ok(TimelineVisitOutcome {
4448 424842 : completed_keyspace,
4449 424842 : image_covered_keyspace: image_covered_keyspace.consume_keyspace(),
4450 424842 : })
4451 424842 : }
4452 :
4453 112495 : async fn get_ready_ancestor_timeline(
4454 112495 : &self,
4455 112495 : ancestor: &Arc<Timeline>,
4456 112495 : ctx: &RequestContext,
4457 112495 : ) -> Result<Arc<Timeline>, GetReadyAncestorError> {
4458 112495 : // It's possible that the ancestor timeline isn't active yet, or
4459 112495 : // is active but hasn't yet caught up to the branch point. Wait
4460 112495 : // for it.
4461 112495 : //
4462 112495 : // This cannot happen while the pageserver is running normally,
4463 112495 : // because you cannot create a branch from a point that isn't
4464 112495 : // present in the pageserver yet. However, we don't wait for the
4465 112495 : // branch point to be uploaded to cloud storage before creating
4466 112495 : // a branch. I.e., the branch LSN need not be remote consistent
4467 112495 : // for the branching operation to succeed.
4468 112495 : //
4469 112495 : // Hence, if we try to load a tenant in such a state where
4470 112495 : // 1. the existence of the branch was persisted (in IndexPart and/or locally)
4471 112495 : // 2. but the ancestor state is behind branch_lsn because it was not yet persisted
4472 112495 : // then we will need to wait for the ancestor timeline to
4473 112495 : // re-stream WAL up to branch_lsn before we access it.
4474 112495 : //
4475 112495 : // How can a tenant get in such a state?
4476 112495 : // - ungraceful pageserver process exit
4477 112495 : // - detach+attach => this is a bug, https://github.com/neondatabase/neon/issues/4219
4478 112495 : //
4479 112495 : // NB: this could be avoided by requiring
4480 112495 : // branch_lsn >= remote_consistent_lsn
4481 112495 : // during branch creation.
4482 112495 : match ancestor.wait_to_become_active(ctx).await {
4483 112494 : Ok(()) => {}
4484 : Err(TimelineState::Stopping) => {
4485 : // If an ancestor is stopping, it means the tenant is stopping: handle this the same as if this timeline was stopping.
4486 0 : return Err(GetReadyAncestorError::Cancelled);
4487 : }
4488 1 : Err(state) => {
4489 1 : return Err(GetReadyAncestorError::BadState {
4490 1 : timeline_id: ancestor.timeline_id,
4491 1 : state,
4492 1 : });
4493 : }
4494 : }
4495 112494 : ancestor
4496 112494 : .wait_lsn(
4497 112494 : self.ancestor_lsn,
4498 112494 : WaitLsnWaiter::Timeline(self),
4499 112494 : WaitLsnTimeout::Default,
4500 112494 : ctx,
4501 112494 : )
4502 112494 : .await
4503 112494 : .map_err(|e| match e {
4504 0 : e @ WaitLsnError::Timeout(_) => GetReadyAncestorError::AncestorLsnTimeout(e),
4505 0 : WaitLsnError::Shutdown => GetReadyAncestorError::Cancelled,
4506 0 : WaitLsnError::BadState(state) => GetReadyAncestorError::BadState {
4507 0 : timeline_id: ancestor.timeline_id,
4508 0 : state,
4509 0 : },
4510 112494 : })?;
4511 :
4512 112494 : Ok(ancestor.clone())
4513 112495 : }
4514 :
4515 148578 : pub(crate) fn get_shard_identity(&self) -> &ShardIdentity {
4516 148578 : &self.shard_identity
4517 148578 : }
4518 :
4519 : #[inline(always)]
4520 0 : pub(crate) fn shard_timeline_id(&self) -> ShardTimelineId {
4521 0 : ShardTimelineId {
4522 0 : shard_index: ShardIndex {
4523 0 : shard_number: self.shard_identity.number,
4524 0 : shard_count: self.shard_identity.count,
4525 0 : },
4526 0 : timeline_id: self.timeline_id,
4527 0 : }
4528 0 : }
4529 :
4530 : /// Returns a non-frozen open in-memory layer for ingestion.
4531 : ///
4532 : /// Takes a witness of timeline writer state lock being held, because it makes no sense to call
4533 : /// this function without holding the mutex.
4534 656 : async fn get_layer_for_write(
4535 656 : &self,
4536 656 : lsn: Lsn,
4537 656 : _guard: &tokio::sync::MutexGuard<'_, Option<TimelineWriterState>>,
4538 656 : ctx: &RequestContext,
4539 656 : ) -> anyhow::Result<Arc<InMemoryLayer>> {
4540 656 : let mut guard = self.layers.write().await;
4541 :
4542 656 : let last_record_lsn = self.get_last_record_lsn();
4543 656 : ensure!(
4544 656 : lsn > last_record_lsn,
4545 0 : "cannot modify relation after advancing last_record_lsn (incoming_lsn={}, last_record_lsn={})",
4546 : lsn,
4547 : last_record_lsn,
4548 : );
4549 :
4550 656 : let layer = guard
4551 656 : .open_mut()?
4552 656 : .get_layer_for_write(
4553 656 : lsn,
4554 656 : self.conf,
4555 656 : self.timeline_id,
4556 656 : self.tenant_shard_id,
4557 656 : &self.gate,
4558 656 : &self.cancel,
4559 656 : ctx,
4560 656 : )
4561 656 : .await?;
4562 656 : Ok(layer)
4563 656 : }
4564 :
4565 2639555 : pub(crate) fn finish_write(&self, new_lsn: Lsn) {
4566 2639555 : assert!(new_lsn.is_aligned());
4567 :
4568 2639555 : self.metrics.last_record_lsn_gauge.set(new_lsn.0 as i64);
4569 2639555 : self.last_record_lsn.advance(new_lsn);
4570 2639555 : }
4571 :
4572 : /// Freeze any existing open in-memory layer and unconditionally notify the flush loop.
4573 : ///
4574 : /// Unconditional flush loop notification is given because in sharded cases we will want to
4575 : /// leave an Lsn gap. Unsharded tenants do not have Lsn gaps.
4576 607 : async fn freeze_inmem_layer_at(
4577 607 : &self,
4578 607 : at: Lsn,
4579 607 : write_lock: &mut tokio::sync::MutexGuard<'_, Option<TimelineWriterState>>,
4580 607 : ) -> Result<u64, FlushLayerError> {
4581 607 : let frozen = {
4582 607 : let mut guard = self.layers.write().await;
4583 607 : guard
4584 607 : .open_mut()?
4585 607 : .try_freeze_in_memory_layer(at, &self.last_freeze_at, write_lock, &self.metrics)
4586 607 : .await
4587 : };
4588 :
4589 607 : if frozen {
4590 593 : let now = Instant::now();
4591 593 : *(self.last_freeze_ts.write().unwrap()) = now;
4592 593 : }
4593 :
4594 : // Increment the flush cycle counter and wake up the flush task.
4595 : // Remember the new value, so that when we listen for the flush
4596 : // to finish, we know when the flush that we initiated has
4597 : // finished, instead of some other flush that was started earlier.
4598 607 : let mut my_flush_request = 0;
4599 607 :
4600 607 : let flush_loop_state = { *self.flush_loop_state.lock().unwrap() };
4601 607 : if !matches!(flush_loop_state, FlushLoopState::Running { .. }) {
4602 0 : return Err(FlushLayerError::NotRunning(flush_loop_state));
4603 607 : }
4604 607 :
4605 607 : self.layer_flush_start_tx.send_modify(|(counter, lsn)| {
4606 607 : my_flush_request = *counter + 1;
4607 607 : *counter = my_flush_request;
4608 607 : *lsn = std::cmp::max(at, *lsn);
4609 607 : });
4610 607 :
4611 607 : assert_ne!(my_flush_request, 0);
4612 :
4613 607 : Ok(my_flush_request)
4614 607 : }
4615 :
4616 : /// Layer flusher task's main loop.
4617 230 : async fn flush_loop(
4618 230 : self: &Arc<Self>,
4619 230 : mut layer_flush_start_rx: tokio::sync::watch::Receiver<(u64, Lsn)>,
4620 230 : ctx: &RequestContext,
4621 230 : ) {
4622 : // Subscribe to L0 delta layer updates, for compaction backpressure.
4623 230 : let mut watch_l0 = match self.layers.read().await.layer_map() {
4624 230 : Ok(lm) => lm.watch_level0_deltas(),
4625 0 : Err(Shutdown) => return,
4626 : };
4627 :
4628 230 : info!("started flush loop");
4629 : loop {
4630 829 : tokio::select! {
4631 829 : _ = self.cancel.cancelled() => {
4632 5 : info!("shutting down layer flush task due to Timeline::cancel");
4633 5 : break;
4634 : },
4635 829 : _ = layer_flush_start_rx.changed() => {}
4636 599 : }
4637 599 : trace!("waking up");
4638 599 : let (flush_counter, frozen_to_lsn) = *layer_flush_start_rx.borrow();
4639 599 :
4640 599 : // The highest LSN to which we flushed in the loop over frozen layers
4641 599 : let mut flushed_to_lsn = Lsn(0);
4642 :
4643 599 : let result = loop {
4644 1192 : if self.cancel.is_cancelled() {
4645 0 : info!("dropping out of flush loop for timeline shutdown");
4646 : // Note: we do not bother transmitting into [`layer_flush_done_tx`], because
4647 : // anyone waiting on that will respect self.cancel as well: they will stop
4648 : // waiting at the same time we as drop out of this loop.
4649 0 : return;
4650 1192 : }
4651 1192 :
4652 1192 : // Break to notify potential waiters as soon as we've flushed the requested LSN. If
4653 1192 : // more requests have arrived in the meanwhile, we'll resume flushing afterwards.
4654 1192 : if flushed_to_lsn >= frozen_to_lsn {
4655 585 : break Ok(());
4656 607 : }
4657 :
4658 : // Fetch the next layer to flush, if any.
4659 607 : let (layer, l0_count, frozen_count, frozen_size) = {
4660 607 : let layers = self.layers.read().await;
4661 607 : let Ok(lm) = layers.layer_map() else {
4662 0 : info!("dropping out of flush loop for timeline shutdown");
4663 0 : return;
4664 : };
4665 607 : let l0_count = lm.level0_deltas().len();
4666 607 : let frozen_count = lm.frozen_layers.len();
4667 607 : let frozen_size: u64 = lm
4668 607 : .frozen_layers
4669 607 : .iter()
4670 607 : .map(|l| l.estimated_in_mem_size())
4671 607 : .sum();
4672 607 : let layer = lm.frozen_layers.front().cloned();
4673 607 : (layer, l0_count, frozen_count, frozen_size)
4674 607 : // drop 'layers' lock
4675 607 : };
4676 607 : let Some(layer) = layer else {
4677 14 : break Ok(());
4678 : };
4679 :
4680 : // Stall flushes to backpressure if compaction can't keep up. This is propagated up
4681 : // to WAL ingestion by having ephemeral layer rolls wait for flushes.
4682 593 : if let Some(stall_threshold) = self.get_l0_flush_stall_threshold() {
4683 0 : if l0_count >= stall_threshold {
4684 0 : warn!(
4685 0 : "stalling layer flushes for compaction backpressure at {l0_count} \
4686 0 : L0 layers ({frozen_count} frozen layers with {frozen_size} bytes)"
4687 : );
4688 0 : let stall_timer = self
4689 0 : .metrics
4690 0 : .flush_delay_histo
4691 0 : .start_timer()
4692 0 : .record_on_drop();
4693 0 : tokio::select! {
4694 0 : result = watch_l0.wait_for(|l0| *l0 < stall_threshold) => {
4695 0 : if let Ok(l0) = result.as_deref() {
4696 0 : let delay = stall_timer.elapsed().as_secs_f64();
4697 0 : info!("resuming layer flushes at {l0} L0 layers after {delay:.3}s");
4698 0 : }
4699 : },
4700 0 : _ = self.cancel.cancelled() => {},
4701 : }
4702 0 : continue; // check again
4703 0 : }
4704 593 : }
4705 :
4706 : // Flush the layer.
4707 593 : let flush_timer = self.metrics.flush_time_histo.start_timer();
4708 593 : match self.flush_frozen_layer(layer, ctx).await {
4709 593 : Ok(layer_lsn) => flushed_to_lsn = max(flushed_to_lsn, layer_lsn),
4710 : Err(FlushLayerError::Cancelled) => {
4711 0 : info!("dropping out of flush loop for timeline shutdown");
4712 0 : return;
4713 : }
4714 0 : err @ Err(
4715 0 : FlushLayerError::NotRunning(_)
4716 0 : | FlushLayerError::Other(_)
4717 0 : | FlushLayerError::CreateImageLayersError(_),
4718 0 : ) => {
4719 0 : error!("could not flush frozen layer: {err:?}");
4720 0 : break err.map(|_| ());
4721 : }
4722 : }
4723 593 : let flush_duration = flush_timer.stop_and_record();
4724 593 :
4725 593 : // Notify the tenant compaction loop if L0 compaction is needed.
4726 593 : let l0_count = *watch_l0.borrow();
4727 593 : if l0_count >= self.get_compaction_threshold() {
4728 239 : self.l0_compaction_trigger.notify_one();
4729 354 : }
4730 :
4731 : // Delay the next flush to backpressure if compaction can't keep up. We delay by the
4732 : // flush duration such that the flush takes 2x as long. This is propagated up to WAL
4733 : // ingestion by having ephemeral layer rolls wait for flushes.
4734 593 : if let Some(delay_threshold) = self.get_l0_flush_delay_threshold() {
4735 1 : if l0_count >= delay_threshold {
4736 0 : let delay = flush_duration.as_secs_f64();
4737 0 : info!(
4738 0 : "delaying layer flush by {delay:.3}s for compaction backpressure at \
4739 0 : {l0_count} L0 layers ({frozen_count} frozen layers with {frozen_size} bytes)"
4740 : );
4741 0 : let _delay_timer = self
4742 0 : .metrics
4743 0 : .flush_delay_histo
4744 0 : .start_timer()
4745 0 : .record_on_drop();
4746 0 : tokio::select! {
4747 0 : _ = tokio::time::sleep(flush_duration) => {},
4748 0 : _ = watch_l0.wait_for(|l0| *l0 < delay_threshold) => {},
4749 0 : _ = self.cancel.cancelled() => {},
4750 : }
4751 1 : }
4752 592 : }
4753 : };
4754 :
4755 : // Unsharded tenants should never advance their LSN beyond the end of the
4756 : // highest layer they write: such gaps between layer data and the frozen LSN
4757 : // are only legal on sharded tenants.
4758 599 : debug_assert!(
4759 599 : self.shard_identity.count.count() > 1
4760 599 : || flushed_to_lsn >= frozen_to_lsn
4761 14 : || !flushed_to_lsn.is_valid()
4762 : );
4763 :
4764 599 : if flushed_to_lsn < frozen_to_lsn && self.shard_identity.count.count() > 1 {
4765 : // If our layer flushes didn't carry disk_consistent_lsn up to the `to_lsn` advertised
4766 : // to us via layer_flush_start_rx, then advance it here.
4767 : //
4768 : // This path is only taken for tenants with multiple shards: single sharded tenants should
4769 : // never encounter a gap in the wal.
4770 0 : let old_disk_consistent_lsn = self.disk_consistent_lsn.load();
4771 0 : tracing::debug!(
4772 0 : "Advancing disk_consistent_lsn across layer gap {old_disk_consistent_lsn}->{frozen_to_lsn}"
4773 : );
4774 0 : if self.set_disk_consistent_lsn(frozen_to_lsn) {
4775 0 : if let Err(e) = self.schedule_uploads(frozen_to_lsn, vec![]) {
4776 0 : tracing::warn!(
4777 0 : "Failed to schedule metadata upload after updating disk_consistent_lsn: {e}"
4778 : );
4779 0 : }
4780 0 : }
4781 599 : }
4782 :
4783 : // Notify any listeners that we're done
4784 599 : let _ = self
4785 599 : .layer_flush_done_tx
4786 599 : .send_replace((flush_counter, result));
4787 : }
4788 5 : }
4789 :
4790 : /// Waits any flush request created by [`Self::freeze_inmem_layer_at`] to complete.
4791 567 : async fn wait_flush_completion(&self, request: u64) -> Result<(), FlushLayerError> {
4792 567 : let mut rx = self.layer_flush_done_tx.subscribe();
4793 : loop {
4794 : {
4795 1153 : let (last_result_counter, last_result) = &*rx.borrow();
4796 1153 : if *last_result_counter >= request {
4797 567 : if let Err(err) = last_result {
4798 : // We already logged the original error in
4799 : // flush_loop. We cannot propagate it to the caller
4800 : // here, because it might not be Cloneable
4801 0 : return Err(err.clone());
4802 : } else {
4803 567 : return Ok(());
4804 : }
4805 586 : }
4806 586 : }
4807 586 : trace!("waiting for flush to complete");
4808 586 : tokio::select! {
4809 586 : rx_e = rx.changed() => {
4810 586 : rx_e.map_err(|_| FlushLayerError::NotRunning(*self.flush_loop_state.lock().unwrap()))?;
4811 : },
4812 : // Cancellation safety: we are not leaving an I/O in-flight for the flush, we're just ignoring
4813 : // the notification from [`flush_loop`] that it completed.
4814 586 : _ = self.cancel.cancelled() => {
4815 0 : tracing::info!("Cancelled layer flush due on timeline shutdown");
4816 0 : return Ok(())
4817 : }
4818 : };
4819 586 : trace!("done")
4820 : }
4821 567 : }
4822 :
4823 : /// Flush one frozen in-memory layer to disk, as a new delta layer.
4824 : ///
4825 : /// Return value is the last lsn (inclusive) of the layer that was frozen.
4826 : #[instrument(skip_all, fields(layer=%frozen_layer))]
4827 : async fn flush_frozen_layer(
4828 : self: &Arc<Self>,
4829 : frozen_layer: Arc<InMemoryLayer>,
4830 : ctx: &RequestContext,
4831 : ) -> Result<Lsn, FlushLayerError> {
4832 : debug_assert_current_span_has_tenant_and_timeline_id();
4833 :
4834 : // As a special case, when we have just imported an image into the repository,
4835 : // instead of writing out a L0 delta layer, we directly write out image layer
4836 : // files instead. This is possible as long as *all* the data imported into the
4837 : // repository have the same LSN.
4838 : let lsn_range = frozen_layer.get_lsn_range();
4839 :
4840 : // Whether to directly create image layers for this flush, or flush them as delta layers
4841 : let create_image_layer =
4842 : lsn_range.start == self.initdb_lsn && lsn_range.end == Lsn(self.initdb_lsn.0 + 1);
4843 :
4844 : #[cfg(test)]
4845 : {
4846 : match &mut *self.flush_loop_state.lock().unwrap() {
4847 : FlushLoopState::NotStarted | FlushLoopState::Exited => {
4848 : panic!("flush loop not running")
4849 : }
4850 : FlushLoopState::Running {
4851 : expect_initdb_optimization,
4852 : initdb_optimization_count,
4853 : ..
4854 : } => {
4855 : if create_image_layer {
4856 : *initdb_optimization_count += 1;
4857 : } else {
4858 : assert!(!*expect_initdb_optimization, "expected initdb optimization");
4859 : }
4860 : }
4861 : }
4862 : }
4863 :
4864 : let (layers_to_upload, delta_layer_to_add) = if create_image_layer {
4865 : // Note: The 'ctx' in use here has DownloadBehavior::Error. We should not
4866 : // require downloading anything during initial import.
4867 : let ((rel_partition, metadata_partition), _lsn) = self
4868 : .repartition(
4869 : self.initdb_lsn,
4870 : self.get_compaction_target_size(),
4871 : EnumSet::empty(),
4872 : ctx,
4873 : )
4874 : .await
4875 0 : .map_err(|e| FlushLayerError::from_anyhow(self, e.into()))?;
4876 :
4877 : if self.cancel.is_cancelled() {
4878 : return Err(FlushLayerError::Cancelled);
4879 : }
4880 :
4881 : // Ensure that we have a single call to `create_image_layers` with a combined dense keyspace.
4882 : // So that the key ranges don't overlap.
4883 : let mut partitions = KeyPartitioning::default();
4884 : partitions.parts.extend(rel_partition.parts);
4885 : if !metadata_partition.parts.is_empty() {
4886 : assert_eq!(
4887 : metadata_partition.parts.len(),
4888 : 1,
4889 : "currently sparse keyspace should only contain a single metadata keyspace"
4890 : );
4891 : // Safety: create_image_layers treat sparse keyspaces differently that it does not scan
4892 : // every single key within the keyspace, and therefore, it's safe to force converting it
4893 : // into a dense keyspace before calling this function.
4894 : partitions
4895 : .parts
4896 : .extend(metadata_partition.into_dense().parts);
4897 : }
4898 :
4899 : let mut layers_to_upload = Vec::new();
4900 : let (generated_image_layers, is_complete) = self
4901 : .create_image_layers(
4902 : &partitions,
4903 : self.initdb_lsn,
4904 : ImageLayerCreationMode::Initial,
4905 : ctx,
4906 : LastImageLayerCreationStatus::Initial,
4907 : false, // don't yield for L0, we're flushing L0
4908 : )
4909 : .await?;
4910 : debug_assert!(
4911 : matches!(is_complete, LastImageLayerCreationStatus::Complete),
4912 : "init image generation mode must fully cover the keyspace"
4913 : );
4914 : layers_to_upload.extend(generated_image_layers);
4915 :
4916 : (layers_to_upload, None)
4917 : } else {
4918 : // Normal case, write out a L0 delta layer file.
4919 : // `create_delta_layer` will not modify the layer map.
4920 : // We will remove frozen layer and add delta layer in one atomic operation later.
4921 : let Some(layer) = self
4922 : .create_delta_layer(&frozen_layer, None, ctx)
4923 : .await
4924 0 : .map_err(|e| FlushLayerError::from_anyhow(self, e))?
4925 : else {
4926 : panic!("delta layer cannot be empty if no filter is applied");
4927 : };
4928 : (
4929 : // FIXME: even though we have a single image and single delta layer assumption
4930 : // we push them to vec
4931 : vec![layer.clone()],
4932 : Some(layer),
4933 : )
4934 : };
4935 :
4936 : pausable_failpoint!("flush-layer-cancel-after-writing-layer-out-pausable");
4937 :
4938 : if self.cancel.is_cancelled() {
4939 : return Err(FlushLayerError::Cancelled);
4940 : }
4941 :
4942 : let disk_consistent_lsn = Lsn(lsn_range.end.0 - 1);
4943 :
4944 : // The new on-disk layers are now in the layer map. We can remove the
4945 : // in-memory layer from the map now. The flushed layer is stored in
4946 : // the mapping in `create_delta_layer`.
4947 : {
4948 : let mut guard = self.layers.write().await;
4949 :
4950 : guard.open_mut()?.finish_flush_l0_layer(
4951 : delta_layer_to_add.as_ref(),
4952 : &frozen_layer,
4953 : &self.metrics,
4954 : );
4955 :
4956 : if self.set_disk_consistent_lsn(disk_consistent_lsn) {
4957 : // Schedule remote uploads that will reflect our new disk_consistent_lsn
4958 : self.schedule_uploads(disk_consistent_lsn, layers_to_upload)
4959 0 : .map_err(|e| FlushLayerError::from_anyhow(self, e))?;
4960 : }
4961 : // release lock on 'layers'
4962 : };
4963 :
4964 : // FIXME: between create_delta_layer and the scheduling of the upload in `update_metadata_file`,
4965 : // a compaction can delete the file and then it won't be available for uploads any more.
4966 : // We still schedule the upload, resulting in an error, but ideally we'd somehow avoid this
4967 : // race situation.
4968 : // See https://github.com/neondatabase/neon/issues/4526
4969 : pausable_failpoint!("flush-frozen-pausable");
4970 :
4971 : // This failpoint is used by another test case `test_pageserver_recovery`.
4972 : fail_point!("flush-frozen-exit");
4973 :
4974 : Ok(Lsn(lsn_range.end.0 - 1))
4975 : }
4976 :
4977 : /// Return true if the value changed
4978 : ///
4979 : /// This function must only be used from the layer flush task.
4980 593 : fn set_disk_consistent_lsn(&self, new_value: Lsn) -> bool {
4981 593 : let old_value = self.disk_consistent_lsn.fetch_max(new_value);
4982 593 : assert!(
4983 593 : new_value >= old_value,
4984 0 : "disk_consistent_lsn must be growing monotonously at runtime; current {old_value}, offered {new_value}"
4985 : );
4986 :
4987 593 : self.metrics
4988 593 : .disk_consistent_lsn_gauge
4989 593 : .set(new_value.0 as i64);
4990 593 : new_value != old_value
4991 593 : }
4992 :
4993 : /// Update metadata file
4994 618 : fn schedule_uploads(
4995 618 : &self,
4996 618 : disk_consistent_lsn: Lsn,
4997 618 : layers_to_upload: impl IntoIterator<Item = ResidentLayer>,
4998 618 : ) -> anyhow::Result<()> {
4999 618 : // We can only save a valid 'prev_record_lsn' value on disk if we
5000 618 : // flushed *all* in-memory changes to disk. We only track
5001 618 : // 'prev_record_lsn' in memory for the latest processed record, so we
5002 618 : // don't remember what the correct value that corresponds to some old
5003 618 : // LSN is. But if we flush everything, then the value corresponding
5004 618 : // current 'last_record_lsn' is correct and we can store it on disk.
5005 618 : let RecordLsn {
5006 618 : last: last_record_lsn,
5007 618 : prev: prev_record_lsn,
5008 618 : } = self.last_record_lsn.load();
5009 618 : let ondisk_prev_record_lsn = if disk_consistent_lsn == last_record_lsn {
5010 554 : Some(prev_record_lsn)
5011 : } else {
5012 64 : None
5013 : };
5014 :
5015 618 : let update = crate::tenant::metadata::MetadataUpdate::new(
5016 618 : disk_consistent_lsn,
5017 618 : ondisk_prev_record_lsn,
5018 618 : *self.applied_gc_cutoff_lsn.read(),
5019 618 : );
5020 618 :
5021 618 : fail_point!("checkpoint-before-saving-metadata", |x| bail!(
5022 0 : "{}",
5023 0 : x.unwrap()
5024 618 : ));
5025 :
5026 1217 : for layer in layers_to_upload {
5027 599 : self.remote_client.schedule_layer_file_upload(layer)?;
5028 : }
5029 618 : self.remote_client
5030 618 : .schedule_index_upload_for_metadata_update(&update)?;
5031 :
5032 618 : Ok(())
5033 618 : }
5034 :
5035 0 : pub(crate) async fn preserve_initdb_archive(&self) -> anyhow::Result<()> {
5036 0 : self.remote_client
5037 0 : .preserve_initdb_archive(
5038 0 : &self.tenant_shard_id.tenant_id,
5039 0 : &self.timeline_id,
5040 0 : &self.cancel,
5041 0 : )
5042 0 : .await
5043 0 : }
5044 :
5045 : // Write out the given frozen in-memory layer as a new L0 delta file. This L0 file will not be tracked
5046 : // in layer map immediately. The caller is responsible to put it into the layer map.
5047 484 : async fn create_delta_layer(
5048 484 : self: &Arc<Self>,
5049 484 : frozen_layer: &Arc<InMemoryLayer>,
5050 484 : key_range: Option<Range<Key>>,
5051 484 : ctx: &RequestContext,
5052 484 : ) -> anyhow::Result<Option<ResidentLayer>> {
5053 484 : let self_clone = Arc::clone(self);
5054 484 : let frozen_layer = Arc::clone(frozen_layer);
5055 484 : let ctx = ctx.attached_child();
5056 484 : let work = async move {
5057 484 : let Some((desc, path)) = frozen_layer
5058 484 : .write_to_disk(
5059 484 : &ctx,
5060 484 : key_range,
5061 484 : self_clone.l0_flush_global_state.inner(),
5062 484 : &self_clone.gate,
5063 484 : self_clone.cancel.clone(),
5064 484 : )
5065 484 : .await?
5066 : else {
5067 0 : return Ok(None);
5068 : };
5069 484 : let new_delta = Layer::finish_creating(self_clone.conf, &self_clone, desc, &path)?;
5070 :
5071 : // The write_to_disk() above calls writer.finish() which already did the fsync of the inodes.
5072 : // We just need to fsync the directory in which these inodes are linked,
5073 : // which we know to be the timeline directory.
5074 : //
5075 : // We use fatal_err() below because the after write_to_disk returns with success,
5076 : // the in-memory state of the filesystem already has the layer file in its final place,
5077 : // and subsequent pageserver code could think it's durable while it really isn't.
5078 484 : let timeline_dir = VirtualFile::open(
5079 484 : &self_clone
5080 484 : .conf
5081 484 : .timeline_path(&self_clone.tenant_shard_id, &self_clone.timeline_id),
5082 484 : &ctx,
5083 484 : )
5084 484 : .await
5085 484 : .fatal_err("VirtualFile::open for timeline dir fsync");
5086 484 : timeline_dir
5087 484 : .sync_all()
5088 484 : .await
5089 484 : .fatal_err("VirtualFile::sync_all timeline dir");
5090 484 : anyhow::Ok(Some(new_delta))
5091 484 : };
5092 : // Before tokio-epoll-uring, we ran write_to_disk & the sync_all inside spawn_blocking.
5093 : // Preserve that behavior to maintain the same behavior for `virtual_file_io_engine=std-fs`.
5094 : use crate::virtual_file::io_engine::IoEngine;
5095 484 : match crate::virtual_file::io_engine::get() {
5096 0 : IoEngine::NotSet => panic!("io engine not set"),
5097 : IoEngine::StdFs => {
5098 0 : let span = tracing::info_span!("blocking");
5099 0 : tokio::task::spawn_blocking({
5100 0 : move || Handle::current().block_on(work.instrument(span))
5101 0 : })
5102 0 : .await
5103 0 : .context("spawn_blocking")
5104 0 : .and_then(|x| x)
5105 : }
5106 : #[cfg(target_os = "linux")]
5107 484 : IoEngine::TokioEpollUring => work.await,
5108 : }
5109 484 : }
5110 :
5111 291 : async fn repartition(
5112 291 : &self,
5113 291 : lsn: Lsn,
5114 291 : partition_size: u64,
5115 291 : flags: EnumSet<CompactFlags>,
5116 291 : ctx: &RequestContext,
5117 291 : ) -> Result<((KeyPartitioning, SparseKeyPartitioning), Lsn), CompactionError> {
5118 291 : let Ok(mut guard) = self.partitioning.try_write_guard() else {
5119 : // NB: there are two callers, one is the compaction task, of which there is only one per struct Tenant and hence Timeline.
5120 : // The other is the initdb optimization in flush_frozen_layer, used by `boostrap_timeline`, which runs before `.activate()`
5121 : // and hence before the compaction task starts.
5122 0 : return Err(CompactionError::Other(anyhow!(
5123 0 : "repartition() called concurrently"
5124 0 : )));
5125 : };
5126 291 : let ((dense_partition, sparse_partition), partition_lsn) = &*guard.read();
5127 291 : if lsn < *partition_lsn {
5128 0 : return Err(CompactionError::Other(anyhow!(
5129 0 : "repartition() called with LSN going backwards, this should not happen"
5130 0 : )));
5131 291 : }
5132 291 :
5133 291 : let distance = lsn.0 - partition_lsn.0;
5134 291 : if *partition_lsn != Lsn(0)
5135 131 : && distance <= self.repartition_threshold
5136 131 : && !flags.contains(CompactFlags::ForceRepartition)
5137 : {
5138 124 : debug!(
5139 : distance,
5140 : threshold = self.repartition_threshold,
5141 0 : "no repartitioning needed"
5142 : );
5143 124 : return Ok((
5144 124 : (dense_partition.clone(), sparse_partition.clone()),
5145 124 : *partition_lsn,
5146 124 : ));
5147 167 : }
5148 :
5149 167 : let (dense_ks, sparse_ks) = self.collect_keyspace(lsn, ctx).await?;
5150 167 : let dense_partitioning = dense_ks.partition(&self.shard_identity, partition_size);
5151 167 : let sparse_partitioning = SparseKeyPartitioning {
5152 167 : parts: vec![sparse_ks],
5153 167 : }; // no partitioning for metadata keys for now
5154 167 : let result = ((dense_partitioning, sparse_partitioning), lsn);
5155 167 : guard.write(result.clone());
5156 167 : Ok(result)
5157 291 : }
5158 :
5159 : // Is it time to create a new image layer for the given partition? True if we want to generate.
5160 7 : async fn time_for_new_image_layer(&self, partition: &KeySpace, lsn: Lsn) -> bool {
5161 7 : let threshold = self.get_image_creation_threshold();
5162 :
5163 7 : let guard = self.layers.read().await;
5164 7 : let Ok(layers) = guard.layer_map() else {
5165 0 : return false;
5166 : };
5167 :
5168 7 : let mut max_deltas = 0;
5169 14 : for part_range in &partition.ranges {
5170 7 : let image_coverage = layers.image_coverage(part_range, lsn);
5171 14 : for (img_range, last_img) in image_coverage {
5172 7 : let img_lsn = if let Some(last_img) = last_img {
5173 0 : last_img.get_lsn_range().end
5174 : } else {
5175 7 : Lsn(0)
5176 : };
5177 : // Let's consider an example:
5178 : //
5179 : // delta layer with LSN range 71-81
5180 : // delta layer with LSN range 81-91
5181 : // delta layer with LSN range 91-101
5182 : // image layer at LSN 100
5183 : //
5184 : // If 'lsn' is still 100, i.e. no new WAL has been processed since the last image layer,
5185 : // there's no need to create a new one. We check this case explicitly, to avoid passing
5186 : // a bogus range to count_deltas below, with start > end. It's even possible that there
5187 : // are some delta layers *later* than current 'lsn', if more WAL was processed and flushed
5188 : // after we read last_record_lsn, which is passed here in the 'lsn' argument.
5189 7 : if img_lsn < lsn {
5190 7 : let num_deltas =
5191 7 : layers.count_deltas(&img_range, &(img_lsn..lsn), Some(threshold));
5192 7 :
5193 7 : max_deltas = max_deltas.max(num_deltas);
5194 7 : if num_deltas >= threshold {
5195 0 : debug!(
5196 0 : "key range {}-{}, has {} deltas on this timeline in LSN range {}..{}",
5197 : img_range.start, img_range.end, num_deltas, img_lsn, lsn
5198 : );
5199 0 : return true;
5200 7 : }
5201 0 : }
5202 : }
5203 : }
5204 :
5205 7 : debug!(
5206 : max_deltas,
5207 0 : "none of the partitioned ranges had >= {threshold} deltas"
5208 : );
5209 7 : false
5210 7 : }
5211 :
5212 : /// Create image layers for Postgres data. Assumes the caller passes a partition that is not too large,
5213 : /// so that at most one image layer will be produced from this function.
5214 : #[allow(clippy::too_many_arguments)]
5215 122 : async fn create_image_layer_for_rel_blocks(
5216 122 : self: &Arc<Self>,
5217 122 : partition: &KeySpace,
5218 122 : mut image_layer_writer: ImageLayerWriter,
5219 122 : lsn: Lsn,
5220 122 : ctx: &RequestContext,
5221 122 : img_range: Range<Key>,
5222 122 : io_concurrency: IoConcurrency,
5223 122 : ) -> Result<ImageLayerCreationOutcome, CreateImageLayersError> {
5224 122 : let mut wrote_keys = false;
5225 122 :
5226 122 : let mut key_request_accum = KeySpaceAccum::new();
5227 810 : for range in &partition.ranges {
5228 688 : let mut key = range.start;
5229 1491 : while key < range.end {
5230 : // Decide whether to retain this key: usually we do, but sharded tenants may
5231 : // need to drop keys that don't belong to them. If we retain the key, add it
5232 : // to `key_request_accum` for later issuing a vectored get
5233 803 : if self.shard_identity.is_key_disposable(&key) {
5234 0 : debug!(
5235 0 : "Dropping key {} during compaction (it belongs on shard {:?})",
5236 0 : key,
5237 0 : self.shard_identity.get_shard_number(&key)
5238 : );
5239 803 : } else {
5240 803 : key_request_accum.add_key(key);
5241 803 : }
5242 :
5243 803 : let last_key_in_range = key.next() == range.end;
5244 803 : key = key.next();
5245 803 :
5246 803 : // Maybe flush `key_rest_accum`
5247 803 : if key_request_accum.raw_size() >= Timeline::MAX_GET_VECTORED_KEYS
5248 803 : || (last_key_in_range && key_request_accum.raw_size() > 0)
5249 : {
5250 688 : let query =
5251 688 : VersionedKeySpaceQuery::uniform(key_request_accum.consume_keyspace(), lsn);
5252 :
5253 688 : let results = self
5254 688 : .get_vectored(query, io_concurrency.clone(), ctx)
5255 688 : .await?;
5256 :
5257 688 : if self.cancel.is_cancelled() {
5258 0 : return Err(CreateImageLayersError::Cancelled);
5259 688 : }
5260 :
5261 1491 : for (img_key, img) in results {
5262 803 : let img = match img {
5263 803 : Ok(img) => img,
5264 0 : Err(err) => {
5265 0 : // If we fail to reconstruct a VM or FSM page, we can zero the
5266 0 : // page without losing any actual user data. That seems better
5267 0 : // than failing repeatedly and getting stuck.
5268 0 : //
5269 0 : // We had a bug at one point, where we truncated the FSM and VM
5270 0 : // in the pageserver, but the Postgres didn't know about that
5271 0 : // and continued to generate incremental WAL records for pages
5272 0 : // that didn't exist in the pageserver. Trying to replay those
5273 0 : // WAL records failed to find the previous image of the page.
5274 0 : // This special case allows us to recover from that situation.
5275 0 : // See https://github.com/neondatabase/neon/issues/2601.
5276 0 : //
5277 0 : // Unfortunately we cannot do this for the main fork, or for
5278 0 : // any metadata keys, keys, as that would lead to actual data
5279 0 : // loss.
5280 0 : if img_key.is_rel_fsm_block_key() || img_key.is_rel_vm_block_key() {
5281 0 : warn!(
5282 0 : "could not reconstruct FSM or VM key {img_key}, filling with zeros: {err:?}"
5283 : );
5284 0 : ZERO_PAGE.clone()
5285 : } else {
5286 0 : return Err(CreateImageLayersError::from(err));
5287 : }
5288 : }
5289 : };
5290 :
5291 : // Write all the keys we just read into our new image layer.
5292 803 : image_layer_writer.put_image(img_key, img, ctx).await?;
5293 803 : wrote_keys = true;
5294 : }
5295 115 : }
5296 : }
5297 : }
5298 :
5299 122 : if wrote_keys {
5300 : // Normal path: we have written some data into the new image layer for this
5301 : // partition, so flush it to disk.
5302 122 : info!(
5303 0 : "produced image layer for rel {}",
5304 0 : ImageLayerName {
5305 0 : key_range: img_range.clone(),
5306 0 : lsn
5307 0 : },
5308 : );
5309 122 : Ok(ImageLayerCreationOutcome::Generated {
5310 122 : unfinished_image_layer: image_layer_writer,
5311 122 : })
5312 : } else {
5313 0 : tracing::debug!("no data in range {}-{}", img_range.start, img_range.end);
5314 0 : Ok(ImageLayerCreationOutcome::Empty)
5315 : }
5316 122 : }
5317 :
5318 : /// Create an image layer for metadata keys. This function produces one image layer for all metadata
5319 : /// keys for now. Because metadata keys cannot exceed basebackup size limit, the image layer for it
5320 : /// would not be too large to fit in a single image layer.
5321 : ///
5322 : /// Creating image layers for metadata keys are different from relational keys. Firstly, instead of
5323 : /// iterating each key and get an image for each of them, we do a `vectored_get` scan over the sparse
5324 : /// keyspace to get all images in one run. Secondly, we use a different image layer generation metrics
5325 : /// for metadata keys than relational keys, which is the number of delta files visited during the scan.
5326 : #[allow(clippy::too_many_arguments)]
5327 117 : async fn create_image_layer_for_metadata_keys(
5328 117 : self: &Arc<Self>,
5329 117 : partition: &KeySpace,
5330 117 : mut image_layer_writer: ImageLayerWriter,
5331 117 : lsn: Lsn,
5332 117 : ctx: &RequestContext,
5333 117 : img_range: Range<Key>,
5334 117 : mode: ImageLayerCreationMode,
5335 117 : io_concurrency: IoConcurrency,
5336 117 : ) -> Result<ImageLayerCreationOutcome, CreateImageLayersError> {
5337 117 : // Metadata keys image layer creation.
5338 117 : let mut reconstruct_state = ValuesReconstructState::new(io_concurrency);
5339 117 : let begin = Instant::now();
5340 : // Directly use `get_vectored_impl` to skip the max_vectored_read_key limit check. Note that the keyspace should
5341 : // not contain too many keys, otherwise this takes a lot of memory.
5342 117 : let data = self
5343 117 : .get_vectored_impl(
5344 117 : VersionedKeySpaceQuery::uniform(partition.clone(), lsn),
5345 117 : &mut reconstruct_state,
5346 117 : ctx,
5347 117 : )
5348 117 : .await?;
5349 117 : let (data, total_kb_retrieved, total_keys_retrieved) = {
5350 117 : let mut new_data = BTreeMap::new();
5351 117 : let mut total_kb_retrieved = 0;
5352 117 : let mut total_keys_retrieved = 0;
5353 5123 : for (k, v) in data {
5354 5006 : let v = v?;
5355 5006 : total_kb_retrieved += KEY_SIZE + v.len();
5356 5006 : total_keys_retrieved += 1;
5357 5006 : new_data.insert(k, v);
5358 : }
5359 117 : (new_data, total_kb_retrieved / 1024, total_keys_retrieved)
5360 117 : };
5361 117 : let delta_files_accessed = reconstruct_state.get_delta_layers_visited();
5362 117 : let elapsed = begin.elapsed();
5363 117 :
5364 117 : let trigger_generation = delta_files_accessed as usize >= MAX_AUX_FILE_V2_DELTAS;
5365 117 : info!(
5366 0 : "metadata key compaction: trigger_generation={trigger_generation}, delta_files_accessed={delta_files_accessed}, total_kb_retrieved={total_kb_retrieved}, total_keys_retrieved={total_keys_retrieved}, read_time={}s",
5367 0 : elapsed.as_secs_f64()
5368 : );
5369 :
5370 117 : if !trigger_generation && mode == ImageLayerCreationMode::Try {
5371 1 : return Ok(ImageLayerCreationOutcome::Skip);
5372 116 : }
5373 116 : if self.cancel.is_cancelled() {
5374 0 : return Err(CreateImageLayersError::Cancelled);
5375 116 : }
5376 116 : let mut wrote_any_image = false;
5377 5122 : for (k, v) in data {
5378 5006 : if v.is_empty() {
5379 : // the key has been deleted, it does not need an image
5380 : // in metadata keyspace, an empty image == tombstone
5381 4 : continue;
5382 5002 : }
5383 5002 : wrote_any_image = true;
5384 5002 :
5385 5002 : // No need to handle sharding b/c metadata keys are always on the 0-th shard.
5386 5002 :
5387 5002 : // TODO: split image layers to avoid too large layer files. Too large image files are not handled
5388 5002 : // on the normal data path either.
5389 5002 : image_layer_writer.put_image(k, v, ctx).await?;
5390 : }
5391 :
5392 116 : if wrote_any_image {
5393 : // Normal path: we have written some data into the new image layer for this
5394 : // partition, so flush it to disk.
5395 6 : info!(
5396 0 : "created image layer for metadata {}",
5397 0 : ImageLayerName {
5398 0 : key_range: img_range.clone(),
5399 0 : lsn
5400 0 : }
5401 : );
5402 6 : Ok(ImageLayerCreationOutcome::Generated {
5403 6 : unfinished_image_layer: image_layer_writer,
5404 6 : })
5405 : } else {
5406 110 : tracing::debug!("no data in range {}-{}", img_range.start, img_range.end);
5407 110 : Ok(ImageLayerCreationOutcome::Empty)
5408 : }
5409 117 : }
5410 :
5411 : /// Predicate function which indicates whether we should check if new image layers
5412 : /// are required. Since checking if new image layers are required is expensive in
5413 : /// terms of CPU, we only do it in the following cases:
5414 : /// 1. If the timeline has ingested sufficient WAL to justify the cost
5415 : /// 2. If enough time has passed since the last check:
5416 : /// 1. For large tenants, we wish to perform the check more often since they
5417 : /// suffer from the lack of image layers
5418 : /// 2. For small tenants (that can mostly fit in RAM), we use a much longer interval
5419 291 : fn should_check_if_image_layers_required(self: &Arc<Timeline>, lsn: Lsn) -> bool {
5420 : const LARGE_TENANT_THRESHOLD: u64 = 2 * 1024 * 1024 * 1024;
5421 :
5422 291 : let last_checks_at = self.last_image_layer_creation_check_at.load();
5423 291 : let distance = lsn
5424 291 : .checked_sub(last_checks_at)
5425 291 : .expect("Attempt to compact with LSN going backwards");
5426 291 : let min_distance =
5427 291 : self.get_image_layer_creation_check_threshold() as u64 * self.get_checkpoint_distance();
5428 291 :
5429 291 : let distance_based_decision = distance.0 >= min_distance;
5430 291 :
5431 291 : let mut time_based_decision = false;
5432 291 : let mut last_check_instant = self.last_image_layer_creation_check_instant.lock().unwrap();
5433 291 : if let CurrentLogicalSize::Exact(logical_size) = self.current_logical_size.current_size() {
5434 240 : let check_required_after = if Into::<u64>::into(&logical_size) >= LARGE_TENANT_THRESHOLD
5435 : {
5436 0 : self.get_checkpoint_timeout()
5437 : } else {
5438 240 : Duration::from_secs(3600 * 48)
5439 : };
5440 :
5441 240 : time_based_decision = match *last_check_instant {
5442 131 : Some(last_check) => {
5443 131 : let elapsed = last_check.elapsed();
5444 131 : elapsed >= check_required_after
5445 : }
5446 109 : None => true,
5447 : };
5448 51 : }
5449 :
5450 : // Do the expensive delta layer counting only if this timeline has ingested sufficient
5451 : // WAL since the last check or a checkpoint timeout interval has elapsed since the last
5452 : // check.
5453 291 : let decision = distance_based_decision || time_based_decision;
5454 :
5455 291 : if decision {
5456 110 : self.last_image_layer_creation_check_at.store(lsn);
5457 110 : *last_check_instant = Some(Instant::now());
5458 181 : }
5459 :
5460 291 : decision
5461 291 : }
5462 :
5463 : /// Returns the image layers generated and an enum indicating whether the process is fully completed.
5464 : /// true = we have generate all image layers, false = we preempt the process for L0 compaction.
5465 : #[tracing::instrument(skip_all, fields(%lsn, %mode))]
5466 : async fn create_image_layers(
5467 : self: &Arc<Timeline>,
5468 : partitioning: &KeyPartitioning,
5469 : lsn: Lsn,
5470 : mode: ImageLayerCreationMode,
5471 : ctx: &RequestContext,
5472 : last_status: LastImageLayerCreationStatus,
5473 : yield_for_l0: bool,
5474 : ) -> Result<(Vec<ResidentLayer>, LastImageLayerCreationStatus), CreateImageLayersError> {
5475 : let timer = self.metrics.create_images_time_histo.start_timer();
5476 :
5477 : if partitioning.parts.is_empty() {
5478 : warn!("no partitions to create image layers for");
5479 : return Ok((vec![], LastImageLayerCreationStatus::Complete));
5480 : }
5481 :
5482 : // We need to avoid holes between generated image layers.
5483 : // Otherwise LayerMap::image_layer_exists will return false if key range of some layer is covered by more than one
5484 : // image layer with hole between them. In this case such layer can not be utilized by GC.
5485 : //
5486 : // How such hole between partitions can appear?
5487 : // if we have relation with relid=1 and size 100 and relation with relid=2 with size 200 then result of
5488 : // KeySpace::partition may contain partitions <100000000..100000099> and <200000000..200000199>.
5489 : // If there is delta layer <100000000..300000000> then it never be garbage collected because
5490 : // image layers <100000000..100000099> and <200000000..200000199> are not completely covering it.
5491 : let mut start = Key::MIN;
5492 :
5493 : let check_for_image_layers =
5494 : if let LastImageLayerCreationStatus::Incomplete { last_key } = last_status {
5495 : info!(
5496 : "resuming image layer creation: last_status=incomplete, continue from {}",
5497 : last_key
5498 : );
5499 : true
5500 : } else {
5501 : self.should_check_if_image_layers_required(lsn)
5502 : };
5503 :
5504 : let mut batch_image_writer = BatchLayerWriter::new(self.conf).await?;
5505 :
5506 : let mut all_generated = true;
5507 :
5508 : let mut partition_processed = 0;
5509 : let mut total_partitions = partitioning.parts.len();
5510 : let mut last_partition_processed = None;
5511 : let mut partition_parts = partitioning.parts.clone();
5512 :
5513 : if let LastImageLayerCreationStatus::Incomplete { last_key } = last_status {
5514 : // We need to skip the partitions that have already been processed.
5515 : let mut found = false;
5516 : for (i, partition) in partition_parts.iter().enumerate() {
5517 : if last_key <= partition.end().unwrap() {
5518 : // ```plain
5519 : // |------|--------|----------|------|
5520 : // ^last_key
5521 : // ^start from this partition
5522 : // ```
5523 : // Why `i+1` instead of `i`?
5524 : // It is possible that the user did some writes after the previous image layer creation attempt so that
5525 : // a relation grows in size, and the last_key is now in the middle of the partition. In this case, we
5526 : // still want to skip this partition, so that we can make progress and avoid generating image layers over
5527 : // the same partition. Doing a mod to ensure we don't end up with an empty vec.
5528 : if i + 1 >= total_partitions {
5529 : // In general, this case should not happen -- if last_key is on the last partition, the previous
5530 : // iteration of image layer creation should return a complete status.
5531 : break; // with found=false
5532 : }
5533 : partition_parts = partition_parts.split_off(i + 1); // Remove the first i + 1 elements
5534 : total_partitions = partition_parts.len();
5535 : // Update the start key to the partition start.
5536 : start = partition_parts[0].start().unwrap();
5537 : found = true;
5538 : break;
5539 : }
5540 : }
5541 : if !found {
5542 : // Last key is within the last partition, or larger than all partitions.
5543 : return Ok((vec![], LastImageLayerCreationStatus::Complete));
5544 : }
5545 : }
5546 :
5547 : for partition in partition_parts.iter() {
5548 : if self.cancel.is_cancelled() {
5549 : return Err(CreateImageLayersError::Cancelled);
5550 : }
5551 : partition_processed += 1;
5552 : let img_range = start..partition.ranges.last().unwrap().end;
5553 : let compact_metadata = partition.overlaps(&Key::metadata_key_range());
5554 : if compact_metadata {
5555 : for range in &partition.ranges {
5556 : assert!(
5557 : range.start.field1 >= METADATA_KEY_BEGIN_PREFIX
5558 : && range.end.field1 <= METADATA_KEY_END_PREFIX,
5559 : "metadata keys must be partitioned separately"
5560 : );
5561 : }
5562 : if mode == ImageLayerCreationMode::Try && !check_for_image_layers {
5563 : // Skip compaction if there are not enough updates. Metadata compaction will do a scan and
5564 : // might mess up with evictions.
5565 : start = img_range.end;
5566 : continue;
5567 : }
5568 : // For initial and force modes, we always generate image layers for metadata keys.
5569 : } else if let ImageLayerCreationMode::Try = mode {
5570 : // check_for_image_layers = false -> skip
5571 : // check_for_image_layers = true -> check time_for_new_image_layer -> skip/generate
5572 : if !check_for_image_layers || !self.time_for_new_image_layer(partition, lsn).await {
5573 : start = img_range.end;
5574 : continue;
5575 : }
5576 : }
5577 : if let ImageLayerCreationMode::Force = mode {
5578 : // When forced to create image layers, we might try and create them where they already
5579 : // exist. This mode is only used in tests/debug.
5580 : let layers = self.layers.read().await;
5581 : if layers.contains_key(&PersistentLayerKey {
5582 : key_range: img_range.clone(),
5583 : lsn_range: PersistentLayerDesc::image_layer_lsn_range(lsn),
5584 : is_delta: false,
5585 : }) {
5586 : // TODO: this can be processed with the BatchLayerWriter::finish_with_discard
5587 : // in the future.
5588 : tracing::info!(
5589 : "Skipping image layer at {lsn} {}..{}, already exists",
5590 : img_range.start,
5591 : img_range.end
5592 : );
5593 : start = img_range.end;
5594 : continue;
5595 : }
5596 : }
5597 :
5598 : let image_layer_writer = ImageLayerWriter::new(
5599 : self.conf,
5600 : self.timeline_id,
5601 : self.tenant_shard_id,
5602 : &img_range,
5603 : lsn,
5604 : &self.gate,
5605 : self.cancel.clone(),
5606 : ctx,
5607 : )
5608 : .await?;
5609 :
5610 0 : fail_point!("image-layer-writer-fail-before-finish", |_| {
5611 0 : Err(CreateImageLayersError::Other(anyhow::anyhow!(
5612 0 : "failpoint image-layer-writer-fail-before-finish"
5613 0 : )))
5614 0 : });
5615 :
5616 : let io_concurrency = IoConcurrency::spawn_from_conf(
5617 : self.conf.get_vectored_concurrent_io,
5618 : self.gate
5619 : .enter()
5620 0 : .map_err(|_| CreateImageLayersError::Cancelled)?,
5621 : );
5622 :
5623 : let outcome = if !compact_metadata {
5624 : self.create_image_layer_for_rel_blocks(
5625 : partition,
5626 : image_layer_writer,
5627 : lsn,
5628 : ctx,
5629 : img_range.clone(),
5630 : io_concurrency,
5631 : )
5632 : .await?
5633 : } else {
5634 : self.create_image_layer_for_metadata_keys(
5635 : partition,
5636 : image_layer_writer,
5637 : lsn,
5638 : ctx,
5639 : img_range.clone(),
5640 : mode,
5641 : io_concurrency,
5642 : )
5643 : .await?
5644 : };
5645 : match outcome {
5646 : ImageLayerCreationOutcome::Empty => {
5647 : // No data in this partition, so we don't need to create an image layer (for now).
5648 : // The next image layer should cover this key range, so we don't advance the `start`
5649 : // key.
5650 : }
5651 : ImageLayerCreationOutcome::Generated {
5652 : unfinished_image_layer,
5653 : } => {
5654 : batch_image_writer.add_unfinished_image_writer(
5655 : unfinished_image_layer,
5656 : img_range.clone(),
5657 : lsn,
5658 : );
5659 : // The next image layer should be generated right after this one.
5660 : start = img_range.end;
5661 : }
5662 : ImageLayerCreationOutcome::Skip => {
5663 : // We don't need to create an image layer for this partition.
5664 : // The next image layer should NOT cover this range, otherwise
5665 : // the keyspace becomes empty (reads don't go past image layers).
5666 : start = img_range.end;
5667 : }
5668 : }
5669 :
5670 : if let ImageLayerCreationMode::Try = mode {
5671 : // We have at least made some progress
5672 : if yield_for_l0 && batch_image_writer.pending_layer_num() >= 1 {
5673 : // The `Try` mode is currently only used on the compaction path. We want to avoid
5674 : // image layer generation taking too long time and blocking L0 compaction. So in this
5675 : // mode, we also inspect the current number of L0 layers and skip image layer generation
5676 : // if there are too many of them.
5677 : let image_preempt_threshold = self.get_image_creation_preempt_threshold()
5678 : * self.get_compaction_threshold();
5679 : // TODO: currently we do not respect `get_image_creation_preempt_threshold` and always yield
5680 : // when there is a single timeline with more than L0 threshold L0 layers. As long as the
5681 : // `get_image_creation_preempt_threshold` is set to a value greater than 0, we will yield for L0 compaction.
5682 : if image_preempt_threshold != 0 {
5683 : let should_yield = self
5684 : .l0_compaction_trigger
5685 : .notified()
5686 : .now_or_never()
5687 : .is_some();
5688 : if should_yield {
5689 : tracing::info!(
5690 : "preempt image layer generation at {lsn} when processing partition {}..{}: too many L0 layers",
5691 : partition.start().unwrap(),
5692 : partition.end().unwrap()
5693 : );
5694 : last_partition_processed = Some(partition.clone());
5695 : all_generated = false;
5696 : break;
5697 : }
5698 : }
5699 : }
5700 : }
5701 : }
5702 :
5703 : let image_layers = batch_image_writer.finish(self, ctx).await?;
5704 :
5705 : let mut guard = self.layers.write().await;
5706 :
5707 : // FIXME: we could add the images to be uploaded *before* returning from here, but right
5708 : // now they are being scheduled outside of write lock; current way is inconsistent with
5709 : // compaction lock order.
5710 : guard
5711 : .open_mut()?
5712 : .track_new_image_layers(&image_layers, &self.metrics);
5713 : drop_wlock(guard);
5714 : let duration = timer.stop_and_record();
5715 :
5716 : // Creating image layers may have caused some previously visible layers to be covered
5717 : if !image_layers.is_empty() {
5718 : self.update_layer_visibility().await?;
5719 : }
5720 :
5721 : let total_layer_size = image_layers
5722 : .iter()
5723 128 : .map(|l| l.metadata().file_size)
5724 : .sum::<u64>();
5725 :
5726 : if !image_layers.is_empty() {
5727 : info!(
5728 : "created {} image layers ({} bytes) in {}s, processed {} out of {} partitions",
5729 : image_layers.len(),
5730 : total_layer_size,
5731 : duration.as_secs_f64(),
5732 : partition_processed,
5733 : total_partitions
5734 : );
5735 : }
5736 :
5737 : Ok((
5738 : image_layers,
5739 : if all_generated {
5740 : LastImageLayerCreationStatus::Complete
5741 : } else {
5742 : LastImageLayerCreationStatus::Incomplete {
5743 : last_key: if let Some(last_partition_processed) = last_partition_processed {
5744 : last_partition_processed.end().unwrap_or(Key::MIN)
5745 : } else {
5746 : // This branch should be unreachable, but in case it happens, we can just return the start key.
5747 : Key::MIN
5748 : },
5749 : }
5750 : },
5751 : ))
5752 : }
5753 :
5754 : /// Wait until the background initial logical size calculation is complete, or
5755 : /// this Timeline is shut down. Calling this function will cause the initial
5756 : /// logical size calculation to skip waiting for the background jobs barrier.
5757 0 : pub(crate) async fn await_initial_logical_size(self: Arc<Self>) {
5758 0 : if !self.shard_identity.is_shard_zero() {
5759 : // We don't populate logical size on shard >0: skip waiting for it.
5760 0 : return;
5761 0 : }
5762 0 :
5763 0 : if self.remote_client.is_deleting() {
5764 : // The timeline was created in a deletion-resume state, we don't expect logical size to be populated
5765 0 : return;
5766 0 : }
5767 0 :
5768 0 : if self.current_logical_size.current_size().is_exact() {
5769 : // root timelines are initialized with exact count, but never start the background
5770 : // calculation
5771 0 : return;
5772 0 : }
5773 0 :
5774 0 : if self.cancel.is_cancelled() {
5775 : // We already requested stopping the tenant, so we cannot wait for the logical size
5776 : // calculation to complete given the task might have been already cancelled.
5777 0 : return;
5778 0 : }
5779 :
5780 0 : if let Some(await_bg_cancel) = self
5781 0 : .current_logical_size
5782 0 : .cancel_wait_for_background_loop_concurrency_limit_semaphore
5783 0 : .get()
5784 0 : {
5785 0 : await_bg_cancel.cancel();
5786 0 : } else {
5787 : // We should not wait if we were not able to explicitly instruct
5788 : // the logical size cancellation to skip the concurrency limit semaphore.
5789 : // TODO: this is an unexpected case. We should restructure so that it
5790 : // can't happen.
5791 0 : tracing::warn!(
5792 0 : "await_initial_logical_size: can't get semaphore cancel token, skipping"
5793 : );
5794 0 : debug_assert!(false);
5795 : }
5796 :
5797 0 : tokio::select!(
5798 0 : _ = self.current_logical_size.initialized.acquire() => {},
5799 0 : _ = self.cancel.cancelled() => {}
5800 : )
5801 0 : }
5802 :
5803 : /// Detach this timeline from its ancestor by copying all of ancestors layers as this
5804 : /// Timelines layers up to the ancestor_lsn.
5805 : ///
5806 : /// Requires a timeline that:
5807 : /// - has an ancestor to detach from
5808 : /// - the ancestor does not have an ancestor -- follows from the original RFC limitations, not
5809 : /// a technical requirement
5810 : ///
5811 : /// After the operation has been started, it cannot be canceled. Upon restart it needs to be
5812 : /// polled again until completion.
5813 : ///
5814 : /// During the operation all timelines sharing the data with this timeline will be reparented
5815 : /// from our ancestor to be branches of this timeline.
5816 0 : pub(crate) async fn prepare_to_detach_from_ancestor(
5817 0 : self: &Arc<Timeline>,
5818 0 : tenant: &crate::tenant::TenantShard,
5819 0 : options: detach_ancestor::Options,
5820 0 : behavior: DetachBehavior,
5821 0 : ctx: &RequestContext,
5822 0 : ) -> Result<detach_ancestor::Progress, detach_ancestor::Error> {
5823 0 : detach_ancestor::prepare(self, tenant, behavior, options, ctx).await
5824 0 : }
5825 :
5826 : /// Second step of detach from ancestor; detaches the `self` from it's current ancestor and
5827 : /// reparents any reparentable children of previous ancestor.
5828 : ///
5829 : /// This method is to be called while holding the TenantManager's tenant slot, so during this
5830 : /// method we cannot be deleted nor can any timeline be deleted. After this method returns
5831 : /// successfully, tenant must be reloaded.
5832 : ///
5833 : /// Final step will be to [`Self::complete_detaching_timeline_ancestor`] after optionally
5834 : /// resetting the tenant.
5835 0 : pub(crate) async fn detach_from_ancestor_and_reparent(
5836 0 : self: &Arc<Timeline>,
5837 0 : tenant: &crate::tenant::TenantShard,
5838 0 : prepared: detach_ancestor::PreparedTimelineDetach,
5839 0 : ancestor_timeline_id: TimelineId,
5840 0 : ancestor_lsn: Lsn,
5841 0 : behavior: DetachBehavior,
5842 0 : ctx: &RequestContext,
5843 0 : ) -> Result<detach_ancestor::DetachingAndReparenting, detach_ancestor::Error> {
5844 0 : detach_ancestor::detach_and_reparent(
5845 0 : self,
5846 0 : tenant,
5847 0 : prepared,
5848 0 : ancestor_timeline_id,
5849 0 : ancestor_lsn,
5850 0 : behavior,
5851 0 : ctx,
5852 0 : )
5853 0 : .await
5854 0 : }
5855 :
5856 : /// Final step which unblocks the GC.
5857 : ///
5858 : /// The tenant must've been reset if ancestry was modified previously (in tenant manager).
5859 0 : pub(crate) async fn complete_detaching_timeline_ancestor(
5860 0 : self: &Arc<Timeline>,
5861 0 : tenant: &crate::tenant::TenantShard,
5862 0 : attempt: detach_ancestor::Attempt,
5863 0 : ctx: &RequestContext,
5864 0 : ) -> Result<(), detach_ancestor::Error> {
5865 0 : detach_ancestor::complete(self, tenant, attempt, ctx).await
5866 0 : }
5867 : }
5868 :
5869 : impl Drop for Timeline {
5870 5 : fn drop(&mut self) {
5871 5 : if let Some(ancestor) = &self.ancestor_timeline {
5872 : // This lock should never be poisoned, but in case it is we do a .map() instead of
5873 : // an unwrap(), to avoid panicking in a destructor and thereby aborting the process.
5874 2 : if let Ok(mut gc_info) = ancestor.gc_info.write() {
5875 2 : if !gc_info.remove_child_not_offloaded(self.timeline_id) {
5876 0 : tracing::error!(tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id,
5877 0 : "Couldn't remove retain_lsn entry from offloaded timeline's parent: already removed");
5878 2 : }
5879 0 : }
5880 3 : }
5881 5 : info!(
5882 0 : "Timeline {} for tenant {} is being dropped",
5883 : self.timeline_id, self.tenant_shard_id.tenant_id
5884 : );
5885 5 : }
5886 : }
5887 :
5888 : /// Top-level failure to compact.
5889 : #[derive(Debug, thiserror::Error)]
5890 : pub(crate) enum CompactionError {
5891 : #[error("The timeline or pageserver is shutting down")]
5892 : ShuttingDown,
5893 : /// Compaction tried to offload a timeline and failed
5894 : #[error("Failed to offload timeline: {0}")]
5895 : Offload(OffloadError),
5896 : /// Compaction cannot be done right now; page reconstruction and so on.
5897 : #[error("Failed to collect keyspace: {0}")]
5898 : CollectKeySpaceError(#[from] CollectKeySpaceError),
5899 : #[error(transparent)]
5900 : Other(anyhow::Error),
5901 : #[error("Compaction already running: {0}")]
5902 : AlreadyRunning(&'static str),
5903 : }
5904 :
5905 : impl CompactionError {
5906 : /// Errors that can be ignored, i.e., cancel and shutdown.
5907 0 : pub fn is_cancel(&self) -> bool {
5908 0 : matches!(
5909 0 : self,
5910 : Self::ShuttingDown
5911 : | Self::AlreadyRunning(_)
5912 : | Self::CollectKeySpaceError(CollectKeySpaceError::Cancelled)
5913 : | Self::CollectKeySpaceError(CollectKeySpaceError::PageRead(
5914 : PageReconstructError::Cancelled
5915 : ))
5916 : | Self::Offload(OffloadError::Cancelled)
5917 : )
5918 0 : }
5919 :
5920 : /// Critical errors that indicate data corruption.
5921 0 : pub fn is_critical(&self) -> bool {
5922 0 : matches!(
5923 0 : self,
5924 : Self::CollectKeySpaceError(
5925 : CollectKeySpaceError::Decode(_)
5926 : | CollectKeySpaceError::PageRead(
5927 : PageReconstructError::MissingKey(_) | PageReconstructError::WalRedo(_),
5928 : )
5929 : )
5930 : )
5931 0 : }
5932 : }
5933 :
5934 : impl From<OffloadError> for CompactionError {
5935 0 : fn from(e: OffloadError) -> Self {
5936 0 : match e {
5937 0 : OffloadError::Cancelled => Self::ShuttingDown,
5938 0 : _ => Self::Offload(e),
5939 : }
5940 0 : }
5941 : }
5942 :
5943 : impl From<super::upload_queue::NotInitialized> for CompactionError {
5944 0 : fn from(value: super::upload_queue::NotInitialized) -> Self {
5945 0 : match value {
5946 : super::upload_queue::NotInitialized::Uninitialized => {
5947 0 : CompactionError::Other(anyhow::anyhow!(value))
5948 : }
5949 : super::upload_queue::NotInitialized::ShuttingDown
5950 0 : | super::upload_queue::NotInitialized::Stopped => CompactionError::ShuttingDown,
5951 : }
5952 0 : }
5953 : }
5954 :
5955 : impl From<super::storage_layer::layer::DownloadError> for CompactionError {
5956 0 : fn from(e: super::storage_layer::layer::DownloadError) -> Self {
5957 0 : match e {
5958 : super::storage_layer::layer::DownloadError::TimelineShutdown
5959 : | super::storage_layer::layer::DownloadError::DownloadCancelled => {
5960 0 : CompactionError::ShuttingDown
5961 : }
5962 : super::storage_layer::layer::DownloadError::ContextAndConfigReallyDeniesDownloads
5963 : | super::storage_layer::layer::DownloadError::DownloadRequired
5964 : | super::storage_layer::layer::DownloadError::NotFile(_)
5965 : | super::storage_layer::layer::DownloadError::DownloadFailed
5966 : | super::storage_layer::layer::DownloadError::PreStatFailed(_) => {
5967 0 : CompactionError::Other(anyhow::anyhow!(e))
5968 : }
5969 : #[cfg(test)]
5970 : super::storage_layer::layer::DownloadError::Failpoint(_) => {
5971 0 : CompactionError::Other(anyhow::anyhow!(e))
5972 : }
5973 : }
5974 0 : }
5975 : }
5976 :
5977 : impl From<layer_manager::Shutdown> for CompactionError {
5978 0 : fn from(_: layer_manager::Shutdown) -> Self {
5979 0 : CompactionError::ShuttingDown
5980 0 : }
5981 : }
5982 :
5983 : impl From<super::storage_layer::errors::PutError> for CompactionError {
5984 0 : fn from(e: super::storage_layer::errors::PutError) -> Self {
5985 0 : if e.is_cancel() {
5986 0 : CompactionError::ShuttingDown
5987 : } else {
5988 0 : CompactionError::Other(e.into_anyhow())
5989 : }
5990 0 : }
5991 : }
5992 :
5993 : #[serde_as]
5994 98 : #[derive(serde::Serialize)]
5995 : struct RecordedDuration(#[serde_as(as = "serde_with::DurationMicroSeconds")] Duration);
5996 :
5997 : #[derive(Default)]
5998 : enum DurationRecorder {
5999 : #[default]
6000 : NotStarted,
6001 : Recorded(RecordedDuration, tokio::time::Instant),
6002 : }
6003 :
6004 : impl DurationRecorder {
6005 252 : fn till_now(&self) -> DurationRecorder {
6006 252 : match self {
6007 : DurationRecorder::NotStarted => {
6008 0 : panic!("must only call on recorded measurements")
6009 : }
6010 252 : DurationRecorder::Recorded(_, ended) => {
6011 252 : let now = tokio::time::Instant::now();
6012 252 : DurationRecorder::Recorded(RecordedDuration(now - *ended), now)
6013 252 : }
6014 252 : }
6015 252 : }
6016 98 : fn into_recorded(self) -> Option<RecordedDuration> {
6017 98 : match self {
6018 0 : DurationRecorder::NotStarted => None,
6019 98 : DurationRecorder::Recorded(recorded, _) => Some(recorded),
6020 : }
6021 98 : }
6022 : }
6023 :
6024 : /// Descriptor for a delta layer used in testing infra. The start/end key/lsn range of the
6025 : /// delta layer might be different from the min/max key/lsn in the delta layer. Therefore,
6026 : /// the layer descriptor requires the user to provide the ranges, which should cover all
6027 : /// keys specified in the `data` field.
6028 : #[cfg(test)]
6029 : #[derive(Clone)]
6030 : pub struct DeltaLayerTestDesc {
6031 : pub lsn_range: Range<Lsn>,
6032 : pub key_range: Range<Key>,
6033 : pub data: Vec<(Key, Lsn, Value)>,
6034 : }
6035 :
6036 : #[cfg(test)]
6037 : #[derive(Clone)]
6038 : pub struct InMemoryLayerTestDesc {
6039 : pub lsn_range: Range<Lsn>,
6040 : pub data: Vec<(Key, Lsn, Value)>,
6041 : pub is_open: bool,
6042 : }
6043 :
6044 : #[cfg(test)]
6045 : impl DeltaLayerTestDesc {
6046 2 : pub fn new(lsn_range: Range<Lsn>, key_range: Range<Key>, data: Vec<(Key, Lsn, Value)>) -> Self {
6047 2 : Self {
6048 2 : lsn_range,
6049 2 : key_range,
6050 2 : data,
6051 2 : }
6052 2 : }
6053 :
6054 45 : pub fn new_with_inferred_key_range(
6055 45 : lsn_range: Range<Lsn>,
6056 45 : data: Vec<(Key, Lsn, Value)>,
6057 45 : ) -> Self {
6058 116 : let key_min = data.iter().map(|(key, _, _)| key).min().unwrap();
6059 116 : let key_max = data.iter().map(|(key, _, _)| key).max().unwrap();
6060 45 : Self {
6061 45 : key_range: (*key_min)..(key_max.next()),
6062 45 : lsn_range,
6063 45 : data,
6064 45 : }
6065 45 : }
6066 :
6067 5 : pub(crate) fn layer_name(&self) -> LayerName {
6068 5 : LayerName::Delta(super::storage_layer::DeltaLayerName {
6069 5 : key_range: self.key_range.clone(),
6070 5 : lsn_range: self.lsn_range.clone(),
6071 5 : })
6072 5 : }
6073 : }
6074 :
6075 : impl Timeline {
6076 14 : async fn finish_compact_batch(
6077 14 : self: &Arc<Self>,
6078 14 : new_deltas: &[ResidentLayer],
6079 14 : new_images: &[ResidentLayer],
6080 14 : layers_to_remove: &[Layer],
6081 14 : ) -> Result<(), CompactionError> {
6082 14 : let mut guard = tokio::select! {
6083 14 : guard = self.layers.write() => guard,
6084 14 : _ = self.cancel.cancelled() => {
6085 0 : return Err(CompactionError::ShuttingDown);
6086 : }
6087 : };
6088 :
6089 14 : let mut duplicated_layers = HashSet::new();
6090 14 :
6091 14 : let mut insert_layers = Vec::with_capacity(new_deltas.len());
6092 :
6093 168 : for l in new_deltas {
6094 154 : if guard.contains(l.as_ref()) {
6095 : // expected in tests
6096 0 : tracing::error!(layer=%l, "duplicated L1 layer");
6097 :
6098 : // good ways to cause a duplicate: we repeatedly error after taking the writelock
6099 : // `guard` on self.layers. as of writing this, there are no error returns except
6100 : // for compact_level0_phase1 creating an L0, which does not happen in practice
6101 : // because we have not implemented L0 => L0 compaction.
6102 0 : duplicated_layers.insert(l.layer_desc().key());
6103 154 : } else if LayerMap::is_l0(&l.layer_desc().key_range, l.layer_desc().is_delta) {
6104 0 : return Err(CompactionError::Other(anyhow::anyhow!(
6105 0 : "compaction generates a L0 layer file as output, which will cause infinite compaction."
6106 0 : )));
6107 154 : } else {
6108 154 : insert_layers.push(l.clone());
6109 154 : }
6110 : }
6111 :
6112 : // only remove those inputs which were not outputs
6113 14 : let remove_layers: Vec<Layer> = layers_to_remove
6114 14 : .iter()
6115 201 : .filter(|l| !duplicated_layers.contains(&l.layer_desc().key()))
6116 14 : .cloned()
6117 14 : .collect();
6118 14 :
6119 14 : if !new_images.is_empty() {
6120 0 : guard
6121 0 : .open_mut()?
6122 0 : .track_new_image_layers(new_images, &self.metrics);
6123 14 : }
6124 :
6125 14 : guard
6126 14 : .open_mut()?
6127 14 : .finish_compact_l0(&remove_layers, &insert_layers, &self.metrics);
6128 14 :
6129 14 : self.remote_client
6130 14 : .schedule_compaction_update(&remove_layers, new_deltas)?;
6131 :
6132 14 : drop_wlock(guard);
6133 14 :
6134 14 : Ok(())
6135 14 : }
6136 :
6137 0 : async fn rewrite_layers(
6138 0 : self: &Arc<Self>,
6139 0 : mut replace_layers: Vec<(Layer, ResidentLayer)>,
6140 0 : mut drop_layers: Vec<Layer>,
6141 0 : ) -> Result<(), CompactionError> {
6142 0 : let mut guard = self.layers.write().await;
6143 :
6144 : // Trim our lists in case our caller (compaction) raced with someone else (GC) removing layers: we want
6145 : // to avoid double-removing, and avoid rewriting something that was removed.
6146 0 : replace_layers.retain(|(l, _)| guard.contains(l));
6147 0 : drop_layers.retain(|l| guard.contains(l));
6148 0 :
6149 0 : guard
6150 0 : .open_mut()?
6151 0 : .rewrite_layers(&replace_layers, &drop_layers, &self.metrics);
6152 0 :
6153 0 : let upload_layers: Vec<_> = replace_layers.into_iter().map(|r| r.1).collect();
6154 0 :
6155 0 : self.remote_client
6156 0 : .schedule_compaction_update(&drop_layers, &upload_layers)?;
6157 :
6158 0 : Ok(())
6159 0 : }
6160 :
6161 : /// Schedules the uploads of the given image layers
6162 182 : fn upload_new_image_layers(
6163 182 : self: &Arc<Self>,
6164 182 : new_images: impl IntoIterator<Item = ResidentLayer>,
6165 182 : ) -> Result<(), super::upload_queue::NotInitialized> {
6166 195 : for layer in new_images {
6167 13 : self.remote_client.schedule_layer_file_upload(layer)?;
6168 : }
6169 : // should any new image layer been created, not uploading index_part will
6170 : // result in a mismatch between remote_physical_size and layermap calculated
6171 : // size, which will fail some tests, but should not be an issue otherwise.
6172 182 : self.remote_client
6173 182 : .schedule_index_upload_for_file_changes()?;
6174 182 : Ok(())
6175 182 : }
6176 :
6177 0 : async fn find_gc_time_cutoff(
6178 0 : &self,
6179 0 : now: SystemTime,
6180 0 : pitr: Duration,
6181 0 : cancel: &CancellationToken,
6182 0 : ctx: &RequestContext,
6183 0 : ) -> Result<Option<Lsn>, PageReconstructError> {
6184 0 : debug_assert_current_span_has_tenant_and_timeline_id();
6185 0 : if self.shard_identity.is_shard_zero() {
6186 : // Shard Zero has SLRU data and can calculate the PITR time -> LSN mapping itself
6187 0 : let time_range = if pitr == Duration::ZERO {
6188 0 : humantime::parse_duration(DEFAULT_PITR_INTERVAL).expect("constant is invalid")
6189 : } else {
6190 0 : pitr
6191 : };
6192 :
6193 : // If PITR is so large or `now` is so small that this underflows, we will retain no history (highly unexpected case)
6194 0 : let time_cutoff = now.checked_sub(time_range).unwrap_or(now);
6195 0 : let timestamp = to_pg_timestamp(time_cutoff);
6196 :
6197 0 : let time_cutoff = match self.find_lsn_for_timestamp(timestamp, cancel, ctx).await? {
6198 0 : LsnForTimestamp::Present(lsn) => Some(lsn),
6199 0 : LsnForTimestamp::Future(lsn) => {
6200 0 : // The timestamp is in the future. That sounds impossible,
6201 0 : // but what it really means is that there hasn't been
6202 0 : // any commits since the cutoff timestamp.
6203 0 : //
6204 0 : // In this case we should use the LSN of the most recent commit,
6205 0 : // which is implicitly the last LSN in the log.
6206 0 : debug!("future({})", lsn);
6207 0 : Some(self.get_last_record_lsn())
6208 : }
6209 0 : LsnForTimestamp::Past(lsn) => {
6210 0 : debug!("past({})", lsn);
6211 0 : None
6212 : }
6213 0 : LsnForTimestamp::NoData(lsn) => {
6214 0 : debug!("nodata({})", lsn);
6215 0 : None
6216 : }
6217 : };
6218 0 : Ok(time_cutoff)
6219 : } else {
6220 : // Shards other than shard zero cannot do timestamp->lsn lookups, and must instead learn their GC cutoff
6221 : // from shard zero's index. The index doesn't explicitly tell us the time cutoff, but we may assume that
6222 : // the point up to which shard zero's last_gc_cutoff has advanced will either be the time cutoff, or a
6223 : // space cutoff that we would also have respected ourselves.
6224 0 : match self
6225 0 : .remote_client
6226 0 : .download_foreign_index(ShardNumber(0), cancel)
6227 0 : .await
6228 : {
6229 0 : Ok((index_part, index_generation, _index_mtime)) => {
6230 0 : tracing::info!(
6231 0 : "GC loaded shard zero metadata (gen {index_generation:?}): latest_gc_cutoff_lsn: {}",
6232 0 : index_part.metadata.latest_gc_cutoff_lsn()
6233 : );
6234 0 : Ok(Some(index_part.metadata.latest_gc_cutoff_lsn()))
6235 : }
6236 : Err(DownloadError::NotFound) => {
6237 : // This is unexpected, because during timeline creations shard zero persists to remote
6238 : // storage before other shards are called, and during timeline deletion non-zeroth shards are
6239 : // deleted before the zeroth one. However, it should be harmless: if we somehow end up in this
6240 : // state, then shard zero should _eventually_ write an index when it GCs.
6241 0 : tracing::warn!("GC couldn't find shard zero's index for timeline");
6242 0 : Ok(None)
6243 : }
6244 0 : Err(e) => {
6245 0 : // TODO: this function should return a different error type than page reconstruct error
6246 0 : Err(PageReconstructError::Other(anyhow::anyhow!(e)))
6247 : }
6248 : }
6249 :
6250 : // TODO: after reading shard zero's GC cutoff, we should validate its generation with the storage
6251 : // controller. Otherwise, it is possible that we see the GC cutoff go backwards while shard zero
6252 : // is going through a migration if we read the old location's index and it has GC'd ahead of the
6253 : // new location. This is legal in principle, but problematic in practice because it might result
6254 : // in a timeline creation succeeding on shard zero ('s new location) but then failing on other shards
6255 : // because they have GC'd past the branch point.
6256 : }
6257 0 : }
6258 :
6259 : /// Find the Lsns above which layer files need to be retained on
6260 : /// garbage collection.
6261 : ///
6262 : /// We calculate two cutoffs, one based on time and one based on WAL size. `pitr`
6263 : /// controls the time cutoff (or ZERO to disable time-based retention), and `space_cutoff` controls
6264 : /// the space-based retention.
6265 : ///
6266 : /// This function doesn't simply to calculate time & space based retention: it treats time-based
6267 : /// retention as authoritative if enabled, and falls back to space-based retention if calculating
6268 : /// the LSN for a time point isn't possible. Therefore the GcCutoffs::horizon in the response might
6269 : /// be different to the `space_cutoff` input. Callers should treat the min() of the two cutoffs
6270 : /// in the response as the GC cutoff point for the timeline.
6271 : #[instrument(skip_all, fields(timeline_id=%self.timeline_id))]
6272 : pub(super) async fn find_gc_cutoffs(
6273 : &self,
6274 : now: SystemTime,
6275 : space_cutoff: Lsn,
6276 : pitr: Duration,
6277 : cancel: &CancellationToken,
6278 : ctx: &RequestContext,
6279 : ) -> Result<GcCutoffs, PageReconstructError> {
6280 : let _timer = self
6281 : .metrics
6282 : .find_gc_cutoffs_histo
6283 : .start_timer()
6284 : .record_on_drop();
6285 :
6286 : pausable_failpoint!("Timeline::find_gc_cutoffs-pausable");
6287 :
6288 : if cfg!(test) && pitr == Duration::ZERO {
6289 : // Unit tests which specify zero PITR interval expect to avoid doing any I/O for timestamp lookup
6290 : return Ok(GcCutoffs {
6291 : time: Some(self.get_last_record_lsn()),
6292 : space: space_cutoff,
6293 : });
6294 : }
6295 :
6296 : // Calculate a time-based limit on how much to retain:
6297 : // - if PITR interval is set, then this is our cutoff.
6298 : // - if PITR interval is not set, then we do a lookup
6299 : // based on DEFAULT_PITR_INTERVAL, so that size-based retention does not result in keeping history around permanently on idle databases.
6300 : let time_cutoff = self.find_gc_time_cutoff(now, pitr, cancel, ctx).await?;
6301 :
6302 : Ok(match (pitr, time_cutoff) {
6303 : (Duration::ZERO, Some(time_cutoff)) => {
6304 : // PITR is not set. Retain the size-based limit, or the default time retention,
6305 : // whichever requires less data.
6306 : GcCutoffs {
6307 : time: Some(self.get_last_record_lsn()),
6308 : space: std::cmp::max(time_cutoff, space_cutoff),
6309 : }
6310 : }
6311 : (Duration::ZERO, None) => {
6312 : // PITR is not set, and time lookup failed
6313 : GcCutoffs {
6314 : time: Some(self.get_last_record_lsn()),
6315 : space: space_cutoff,
6316 : }
6317 : }
6318 : (_, None) => {
6319 : // PITR interval is set & we didn't look up a timestamp successfully. Conservatively assume PITR
6320 : // cannot advance beyond what was already GC'd, and respect space-based retention
6321 : GcCutoffs {
6322 : time: Some(*self.get_applied_gc_cutoff_lsn()),
6323 : space: space_cutoff,
6324 : }
6325 : }
6326 : (_, Some(time_cutoff)) => {
6327 : // PITR interval is set and we looked up timestamp successfully. Ignore
6328 : // size based retention and make time cutoff authoritative
6329 : GcCutoffs {
6330 : time: Some(time_cutoff),
6331 : space: time_cutoff,
6332 : }
6333 : }
6334 : })
6335 : }
6336 :
6337 : /// Garbage collect layer files on a timeline that are no longer needed.
6338 : ///
6339 : /// Currently, we don't make any attempt at removing unneeded page versions
6340 : /// within a layer file. We can only remove the whole file if it's fully
6341 : /// obsolete.
6342 2 : pub(super) async fn gc(&self) -> Result<GcResult, GcError> {
6343 : // this is most likely the background tasks, but it might be the spawned task from
6344 : // immediate_gc
6345 2 : let _g = tokio::select! {
6346 2 : guard = self.gc_lock.lock() => guard,
6347 2 : _ = self.cancel.cancelled() => return Ok(GcResult::default()),
6348 : };
6349 2 : let timer = self.metrics.garbage_collect_histo.start_timer();
6350 2 :
6351 2 : fail_point!("before-timeline-gc");
6352 2 :
6353 2 : // Is the timeline being deleted?
6354 2 : if self.is_stopping() {
6355 0 : return Err(GcError::TimelineCancelled);
6356 2 : }
6357 2 :
6358 2 : let (space_cutoff, time_cutoff, retain_lsns, max_lsn_with_valid_lease) = {
6359 2 : let gc_info = self.gc_info.read().unwrap();
6360 2 :
6361 2 : let space_cutoff = min(gc_info.cutoffs.space, self.get_disk_consistent_lsn());
6362 2 : let time_cutoff = gc_info.cutoffs.time;
6363 2 : let retain_lsns = gc_info
6364 2 : .retain_lsns
6365 2 : .iter()
6366 2 : .map(|(lsn, _child_id, _is_offloaded)| *lsn)
6367 2 : .collect();
6368 2 :
6369 2 : // Gets the maximum LSN that holds the valid lease.
6370 2 : //
6371 2 : // Caveat: `refresh_gc_info` is in charged of updating the lease map.
6372 2 : // Here, we do not check for stale leases again.
6373 2 : let max_lsn_with_valid_lease = gc_info.leases.last_key_value().map(|(lsn, _)| *lsn);
6374 2 :
6375 2 : (
6376 2 : space_cutoff,
6377 2 : time_cutoff,
6378 2 : retain_lsns,
6379 2 : max_lsn_with_valid_lease,
6380 2 : )
6381 2 : };
6382 2 :
6383 2 : let mut new_gc_cutoff = space_cutoff.min(time_cutoff.unwrap_or_default());
6384 2 : let standby_horizon = self.standby_horizon.load();
6385 2 : // Hold GC for the standby, but as a safety guard do it only within some
6386 2 : // reasonable lag.
6387 2 : if standby_horizon != Lsn::INVALID {
6388 0 : if let Some(standby_lag) = new_gc_cutoff.checked_sub(standby_horizon) {
6389 : const MAX_ALLOWED_STANDBY_LAG: u64 = 10u64 << 30; // 10 GB
6390 0 : if standby_lag.0 < MAX_ALLOWED_STANDBY_LAG {
6391 0 : new_gc_cutoff = Lsn::min(standby_horizon, new_gc_cutoff);
6392 0 : trace!("holding off GC for standby apply LSN {}", standby_horizon);
6393 : } else {
6394 0 : warn!(
6395 0 : "standby is lagging for more than {}MB, not holding gc for it",
6396 0 : MAX_ALLOWED_STANDBY_LAG / 1024 / 1024
6397 : )
6398 : }
6399 0 : }
6400 2 : }
6401 :
6402 : // Reset standby horizon to ignore it if it is not updated till next GC.
6403 : // It is an easy way to unset it when standby disappears without adding
6404 : // more conf options.
6405 2 : self.standby_horizon.store(Lsn::INVALID);
6406 2 : self.metrics
6407 2 : .standby_horizon_gauge
6408 2 : .set(Lsn::INVALID.0 as i64);
6409 :
6410 2 : let res = self
6411 2 : .gc_timeline(
6412 2 : space_cutoff,
6413 2 : time_cutoff,
6414 2 : retain_lsns,
6415 2 : max_lsn_with_valid_lease,
6416 2 : new_gc_cutoff,
6417 2 : )
6418 2 : .instrument(
6419 2 : info_span!("gc_timeline", timeline_id = %self.timeline_id, cutoff = %new_gc_cutoff),
6420 : )
6421 2 : .await?;
6422 :
6423 : // only record successes
6424 2 : timer.stop_and_record();
6425 2 :
6426 2 : Ok(res)
6427 2 : }
6428 :
6429 2 : async fn gc_timeline(
6430 2 : &self,
6431 2 : space_cutoff: Lsn,
6432 2 : time_cutoff: Option<Lsn>, // None if uninitialized
6433 2 : retain_lsns: Vec<Lsn>,
6434 2 : max_lsn_with_valid_lease: Option<Lsn>,
6435 2 : new_gc_cutoff: Lsn,
6436 2 : ) -> Result<GcResult, GcError> {
6437 2 : // FIXME: if there is an ongoing detach_from_ancestor, we should just skip gc
6438 2 :
6439 2 : let now = SystemTime::now();
6440 2 : let mut result: GcResult = GcResult::default();
6441 2 :
6442 2 : // Nothing to GC. Return early.
6443 2 : let latest_gc_cutoff = *self.get_applied_gc_cutoff_lsn();
6444 2 : if latest_gc_cutoff >= new_gc_cutoff {
6445 0 : info!(
6446 0 : "Nothing to GC: new_gc_cutoff_lsn {new_gc_cutoff}, latest_gc_cutoff_lsn {latest_gc_cutoff}",
6447 : );
6448 0 : return Ok(result);
6449 2 : }
6450 :
6451 2 : let Some(time_cutoff) = time_cutoff else {
6452 : // The GC cutoff should have been computed by now, but let's be defensive.
6453 0 : info!("Nothing to GC: time_cutoff not yet computed");
6454 0 : return Ok(result);
6455 : };
6456 :
6457 : // We need to ensure that no one tries to read page versions or create
6458 : // branches at a point before latest_gc_cutoff_lsn. See branch_timeline()
6459 : // for details. This will block until the old value is no longer in use.
6460 : //
6461 : // The GC cutoff should only ever move forwards.
6462 2 : let waitlist = {
6463 2 : let write_guard = self.applied_gc_cutoff_lsn.lock_for_write();
6464 2 : if *write_guard > new_gc_cutoff {
6465 0 : return Err(GcError::BadLsn {
6466 0 : why: format!(
6467 0 : "Cannot move GC cutoff LSN backwards (was {}, new {})",
6468 0 : *write_guard, new_gc_cutoff
6469 0 : ),
6470 0 : });
6471 2 : }
6472 2 :
6473 2 : write_guard.store_and_unlock(new_gc_cutoff)
6474 2 : };
6475 2 : waitlist.wait().await;
6476 :
6477 2 : info!("GC starting");
6478 :
6479 2 : debug!("retain_lsns: {:?}", retain_lsns);
6480 :
6481 2 : let mut layers_to_remove = Vec::new();
6482 :
6483 : // Scan all layers in the timeline (remote or on-disk).
6484 : //
6485 : // Garbage collect the layer if all conditions are satisfied:
6486 : // 1. it is older than cutoff LSN;
6487 : // 2. it is older than PITR interval;
6488 : // 3. it doesn't need to be retained for 'retain_lsns';
6489 : // 4. it does not need to be kept for LSNs holding valid leases.
6490 : // 5. newer on-disk image layers cover the layer's whole key range
6491 : //
6492 : // TODO holding a write lock is too agressive and avoidable
6493 2 : let mut guard = self.layers.write().await;
6494 2 : let layers = guard.layer_map()?;
6495 12 : 'outer: for l in layers.iter_historic_layers() {
6496 12 : result.layers_total += 1;
6497 12 :
6498 12 : // 1. Is it newer than GC horizon cutoff point?
6499 12 : if l.get_lsn_range().end > space_cutoff {
6500 1 : info!(
6501 0 : "keeping {} because it's newer than space_cutoff {}",
6502 0 : l.layer_name(),
6503 : space_cutoff,
6504 : );
6505 1 : result.layers_needed_by_cutoff += 1;
6506 1 : continue 'outer;
6507 11 : }
6508 11 :
6509 11 : // 2. It is newer than PiTR cutoff point?
6510 11 : if l.get_lsn_range().end > time_cutoff {
6511 0 : info!(
6512 0 : "keeping {} because it's newer than time_cutoff {}",
6513 0 : l.layer_name(),
6514 : time_cutoff,
6515 : );
6516 0 : result.layers_needed_by_pitr += 1;
6517 0 : continue 'outer;
6518 11 : }
6519 :
6520 : // 3. Is it needed by a child branch?
6521 : // NOTE With that we would keep data that
6522 : // might be referenced by child branches forever.
6523 : // We can track this in child timeline GC and delete parent layers when
6524 : // they are no longer needed. This might be complicated with long inheritance chains.
6525 : //
6526 : // TODO Vec is not a great choice for `retain_lsns`
6527 11 : for retain_lsn in &retain_lsns {
6528 : // start_lsn is inclusive
6529 0 : if &l.get_lsn_range().start <= retain_lsn {
6530 0 : info!(
6531 0 : "keeping {} because it's still might be referenced by child branch forked at {} is_dropped: xx is_incremental: {}",
6532 0 : l.layer_name(),
6533 0 : retain_lsn,
6534 0 : l.is_incremental(),
6535 : );
6536 0 : result.layers_needed_by_branches += 1;
6537 0 : continue 'outer;
6538 0 : }
6539 : }
6540 :
6541 : // 4. Is there a valid lease that requires us to keep this layer?
6542 11 : if let Some(lsn) = &max_lsn_with_valid_lease {
6543 : // keep if layer start <= any of the lease
6544 9 : if &l.get_lsn_range().start <= lsn {
6545 7 : info!(
6546 0 : "keeping {} because there is a valid lease preventing GC at {}",
6547 0 : l.layer_name(),
6548 : lsn,
6549 : );
6550 7 : result.layers_needed_by_leases += 1;
6551 7 : continue 'outer;
6552 2 : }
6553 2 : }
6554 :
6555 : // 5. Is there a later on-disk layer for this relation?
6556 : //
6557 : // The end-LSN is exclusive, while disk_consistent_lsn is
6558 : // inclusive. For example, if disk_consistent_lsn is 100, it is
6559 : // OK for a delta layer to have end LSN 101, but if the end LSN
6560 : // is 102, then it might not have been fully flushed to disk
6561 : // before crash.
6562 : //
6563 : // For example, imagine that the following layers exist:
6564 : //
6565 : // 1000 - image (A)
6566 : // 1000-2000 - delta (B)
6567 : // 2000 - image (C)
6568 : // 2000-3000 - delta (D)
6569 : // 3000 - image (E)
6570 : //
6571 : // If GC horizon is at 2500, we can remove layers A and B, but
6572 : // we cannot remove C, even though it's older than 2500, because
6573 : // the delta layer 2000-3000 depends on it.
6574 4 : if !layers
6575 4 : .image_layer_exists(&l.get_key_range(), &(l.get_lsn_range().end..new_gc_cutoff))
6576 : {
6577 3 : info!("keeping {} because it is the latest layer", l.layer_name());
6578 3 : result.layers_not_updated += 1;
6579 3 : continue 'outer;
6580 1 : }
6581 1 :
6582 1 : // We didn't find any reason to keep this file, so remove it.
6583 1 : info!(
6584 0 : "garbage collecting {} is_dropped: xx is_incremental: {}",
6585 0 : l.layer_name(),
6586 0 : l.is_incremental(),
6587 : );
6588 1 : layers_to_remove.push(l);
6589 : }
6590 :
6591 2 : if !layers_to_remove.is_empty() {
6592 : // Persist the new GC cutoff value before we actually remove anything.
6593 : // This unconditionally schedules also an index_part.json update, even though, we will
6594 : // be doing one a bit later with the unlinked gc'd layers.
6595 1 : let disk_consistent_lsn = self.disk_consistent_lsn.load();
6596 1 : self.schedule_uploads(disk_consistent_lsn, None)
6597 1 : .map_err(|e| {
6598 0 : if self.cancel.is_cancelled() {
6599 0 : GcError::TimelineCancelled
6600 : } else {
6601 0 : GcError::Remote(e)
6602 : }
6603 1 : })?;
6604 :
6605 1 : let gc_layers = layers_to_remove
6606 1 : .iter()
6607 1 : .map(|x| guard.get_from_desc(x))
6608 1 : .collect::<Vec<Layer>>();
6609 1 :
6610 1 : result.layers_removed = gc_layers.len() as u64;
6611 1 :
6612 1 : self.remote_client.schedule_gc_update(&gc_layers)?;
6613 :
6614 1 : guard.open_mut()?.finish_gc_timeline(&gc_layers);
6615 1 :
6616 1 : #[cfg(feature = "testing")]
6617 1 : {
6618 1 : result.doomed_layers = gc_layers;
6619 1 : }
6620 1 : }
6621 :
6622 2 : info!(
6623 0 : "GC completed removing {} layers, cutoff {}",
6624 : result.layers_removed, new_gc_cutoff
6625 : );
6626 :
6627 2 : result.elapsed = now.elapsed().unwrap_or(Duration::ZERO);
6628 2 : Ok(result)
6629 2 : }
6630 :
6631 : /// Reconstruct a value, using the given base image and WAL records in 'data'.
6632 364852 : async fn reconstruct_value(
6633 364852 : &self,
6634 364852 : key: Key,
6635 364852 : request_lsn: Lsn,
6636 364852 : mut data: ValueReconstructState,
6637 364852 : redo_attempt_type: RedoAttemptType,
6638 364852 : ) -> Result<Bytes, PageReconstructError> {
6639 364852 : // Perform WAL redo if needed
6640 364852 : data.records.reverse();
6641 :
6642 364852 : let fire_critical_error = match redo_attempt_type {
6643 363519 : RedoAttemptType::ReadPage => true,
6644 0 : RedoAttemptType::LegacyCompaction => true,
6645 1333 : RedoAttemptType::GcCompaction => false,
6646 : };
6647 :
6648 : // If we have a page image, and no WAL, we're all set
6649 364852 : if data.records.is_empty() {
6650 338078 : if let Some((img_lsn, img)) = &data.img {
6651 338078 : trace!(
6652 0 : "found page image for key {} at {}, no WAL redo required, req LSN {}",
6653 : key, img_lsn, request_lsn,
6654 : );
6655 338078 : Ok(img.clone())
6656 : } else {
6657 0 : Err(PageReconstructError::from(anyhow!(
6658 0 : "base image for {key} at {request_lsn} not found"
6659 0 : )))
6660 : }
6661 : } else {
6662 : // We need to do WAL redo.
6663 : //
6664 : // If we don't have a base image, then the oldest WAL record better initialize
6665 : // the page
6666 26774 : if data.img.is_none() && !data.records.first().unwrap().1.will_init() {
6667 0 : Err(PageReconstructError::from(anyhow!(
6668 0 : "Base image for {} at {} not found, but got {} WAL records",
6669 0 : key,
6670 0 : request_lsn,
6671 0 : data.records.len()
6672 0 : )))
6673 : } else {
6674 26774 : if data.img.is_some() {
6675 13029 : trace!(
6676 0 : "found {} WAL records and a base image for {} at {}, performing WAL redo",
6677 0 : data.records.len(),
6678 : key,
6679 : request_lsn
6680 : );
6681 : } else {
6682 13745 : trace!(
6683 0 : "found {} WAL records that will init the page for {} at {}, performing WAL redo",
6684 0 : data.records.len(),
6685 : key,
6686 : request_lsn
6687 : );
6688 : };
6689 26774 : let res = self
6690 26774 : .walredo_mgr
6691 26774 : .as_ref()
6692 26774 : .context("timeline has no walredo manager")
6693 26774 : .map_err(PageReconstructError::WalRedo)?
6694 26774 : .request_redo(
6695 26774 : key,
6696 26774 : request_lsn,
6697 26774 : data.img,
6698 26774 : data.records,
6699 26774 : self.pg_version,
6700 26774 : redo_attempt_type,
6701 26774 : )
6702 26774 : .await;
6703 26773 : let img = match res {
6704 26773 : Ok(img) => img,
6705 0 : Err(walredo::Error::Cancelled) => return Err(PageReconstructError::Cancelled),
6706 1 : Err(walredo::Error::Other(err)) => {
6707 1 : if fire_critical_error {
6708 0 : critical!("walredo failure during page reconstruction: {err:?}");
6709 1 : }
6710 1 : return Err(PageReconstructError::WalRedo(
6711 1 : err.context("reconstruct a page image"),
6712 1 : ));
6713 : }
6714 : };
6715 26773 : Ok(img)
6716 : }
6717 : }
6718 364852 : }
6719 :
6720 0 : pub(crate) async fn spawn_download_all_remote_layers(
6721 0 : self: Arc<Self>,
6722 0 : request: DownloadRemoteLayersTaskSpawnRequest,
6723 0 : ctx: &RequestContext,
6724 0 : ) -> Result<DownloadRemoteLayersTaskInfo, DownloadRemoteLayersTaskInfo> {
6725 : use pageserver_api::models::DownloadRemoteLayersTaskState;
6726 :
6727 : // this is not really needed anymore; it has tests which really check the return value from
6728 : // http api. it would be better not to maintain this anymore.
6729 :
6730 0 : let mut status_guard = self.download_all_remote_layers_task_info.write().unwrap();
6731 0 : if let Some(st) = &*status_guard {
6732 0 : match &st.state {
6733 : DownloadRemoteLayersTaskState::Running => {
6734 0 : return Err(st.clone());
6735 : }
6736 : DownloadRemoteLayersTaskState::ShutDown
6737 0 : | DownloadRemoteLayersTaskState::Completed => {
6738 0 : *status_guard = None;
6739 0 : }
6740 : }
6741 0 : }
6742 :
6743 0 : let self_clone = Arc::clone(&self);
6744 0 : let task_ctx = ctx.detached_child(
6745 0 : TaskKind::DownloadAllRemoteLayers,
6746 0 : DownloadBehavior::Download,
6747 0 : );
6748 0 : let task_id = task_mgr::spawn(
6749 0 : task_mgr::BACKGROUND_RUNTIME.handle(),
6750 0 : task_mgr::TaskKind::DownloadAllRemoteLayers,
6751 0 : self.tenant_shard_id,
6752 0 : Some(self.timeline_id),
6753 0 : "download all remote layers task",
6754 0 : async move {
6755 0 : self_clone.download_all_remote_layers(request, &task_ctx).await;
6756 0 : let mut status_guard = self_clone.download_all_remote_layers_task_info.write().unwrap();
6757 0 : match &mut *status_guard {
6758 : None => {
6759 0 : warn!("tasks status is supposed to be Some(), since we are running");
6760 : }
6761 0 : Some(st) => {
6762 0 : let exp_task_id = format!("{}", task_mgr::current_task_id().unwrap());
6763 0 : if st.task_id != exp_task_id {
6764 0 : warn!("task id changed while we were still running, expecting {} but have {}", exp_task_id, st.task_id);
6765 0 : } else {
6766 0 : st.state = DownloadRemoteLayersTaskState::Completed;
6767 0 : }
6768 : }
6769 : };
6770 0 : Ok(())
6771 0 : }
6772 0 : .instrument(info_span!(parent: None, "download_all_remote_layers", tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))
6773 : );
6774 :
6775 0 : let initial_info = DownloadRemoteLayersTaskInfo {
6776 0 : task_id: format!("{task_id}"),
6777 0 : state: DownloadRemoteLayersTaskState::Running,
6778 0 : total_layer_count: 0,
6779 0 : successful_download_count: 0,
6780 0 : failed_download_count: 0,
6781 0 : };
6782 0 : *status_guard = Some(initial_info.clone());
6783 0 :
6784 0 : Ok(initial_info)
6785 0 : }
6786 :
6787 0 : async fn download_all_remote_layers(
6788 0 : self: &Arc<Self>,
6789 0 : request: DownloadRemoteLayersTaskSpawnRequest,
6790 0 : ctx: &RequestContext,
6791 0 : ) {
6792 : use pageserver_api::models::DownloadRemoteLayersTaskState;
6793 :
6794 0 : let remaining = {
6795 0 : let guard = self.layers.read().await;
6796 0 : let Ok(lm) = guard.layer_map() else {
6797 : // technically here we could look into iterating accessible layers, but downloading
6798 : // all layers of a shutdown timeline makes no sense regardless.
6799 0 : tracing::info!("attempted to download all layers of shutdown timeline");
6800 0 : return;
6801 : };
6802 0 : lm.iter_historic_layers()
6803 0 : .map(|desc| guard.get_from_desc(&desc))
6804 0 : .collect::<Vec<_>>()
6805 0 : };
6806 0 : let total_layer_count = remaining.len();
6807 :
6808 : macro_rules! lock_status {
6809 : ($st:ident) => {
6810 : let mut st = self.download_all_remote_layers_task_info.write().unwrap();
6811 : let st = st
6812 : .as_mut()
6813 : .expect("this function is only called after the task has been spawned");
6814 : assert_eq!(
6815 : st.task_id,
6816 : format!(
6817 : "{}",
6818 : task_mgr::current_task_id().expect("we run inside a task_mgr task")
6819 : )
6820 : );
6821 : let $st = st;
6822 : };
6823 : }
6824 :
6825 : {
6826 0 : lock_status!(st);
6827 0 : st.total_layer_count = total_layer_count as u64;
6828 0 : }
6829 0 :
6830 0 : let mut remaining = remaining.into_iter();
6831 0 : let mut have_remaining = true;
6832 0 : let mut js = tokio::task::JoinSet::new();
6833 0 :
6834 0 : let cancel = task_mgr::shutdown_token();
6835 0 :
6836 0 : let limit = request.max_concurrent_downloads;
6837 :
6838 : loop {
6839 0 : while js.len() < limit.get() && have_remaining && !cancel.is_cancelled() {
6840 0 : let Some(next) = remaining.next() else {
6841 0 : have_remaining = false;
6842 0 : break;
6843 : };
6844 :
6845 0 : let span = tracing::info_span!("download", layer = %next);
6846 :
6847 0 : let ctx = ctx.attached_child();
6848 0 : js.spawn(
6849 0 : async move {
6850 0 : let res = next.download(&ctx).await;
6851 0 : (next, res)
6852 0 : }
6853 0 : .instrument(span),
6854 0 : );
6855 0 : }
6856 :
6857 0 : while let Some(res) = js.join_next().await {
6858 0 : match res {
6859 : Ok((_, Ok(_))) => {
6860 0 : lock_status!(st);
6861 0 : st.successful_download_count += 1;
6862 : }
6863 0 : Ok((layer, Err(e))) => {
6864 0 : tracing::error!(%layer, "download failed: {e:#}");
6865 0 : lock_status!(st);
6866 0 : st.failed_download_count += 1;
6867 : }
6868 0 : Err(je) if je.is_cancelled() => unreachable!("not used here"),
6869 0 : Err(je) if je.is_panic() => {
6870 0 : lock_status!(st);
6871 0 : st.failed_download_count += 1;
6872 : }
6873 0 : Err(je) => tracing::warn!("unknown joinerror: {je:?}"),
6874 : }
6875 : }
6876 :
6877 0 : if js.is_empty() && (!have_remaining || cancel.is_cancelled()) {
6878 0 : break;
6879 0 : }
6880 : }
6881 :
6882 : {
6883 0 : lock_status!(st);
6884 0 : st.state = DownloadRemoteLayersTaskState::Completed;
6885 : }
6886 0 : }
6887 :
6888 0 : pub(crate) fn get_download_all_remote_layers_task_info(
6889 0 : &self,
6890 0 : ) -> Option<DownloadRemoteLayersTaskInfo> {
6891 0 : self.download_all_remote_layers_task_info
6892 0 : .read()
6893 0 : .unwrap()
6894 0 : .clone()
6895 0 : }
6896 : }
6897 :
6898 : impl Timeline {
6899 : /// Returns non-remote layers for eviction.
6900 0 : pub(crate) async fn get_local_layers_for_disk_usage_eviction(&self) -> DiskUsageEvictionInfo {
6901 0 : let guard = self.layers.read().await;
6902 0 : let mut max_layer_size: Option<u64> = None;
6903 0 :
6904 0 : let resident_layers = guard
6905 0 : .likely_resident_layers()
6906 0 : .map(|layer| {
6907 0 : let file_size = layer.layer_desc().file_size;
6908 0 : max_layer_size = max_layer_size.map_or(Some(file_size), |m| Some(m.max(file_size)));
6909 0 :
6910 0 : let last_activity_ts = layer.latest_activity();
6911 0 :
6912 0 : EvictionCandidate {
6913 0 : layer: layer.to_owned().into(),
6914 0 : last_activity_ts,
6915 0 : relative_last_activity: finite_f32::FiniteF32::ZERO,
6916 0 : visibility: layer.visibility(),
6917 0 : }
6918 0 : })
6919 0 : .collect();
6920 0 :
6921 0 : DiskUsageEvictionInfo {
6922 0 : max_layer_size,
6923 0 : resident_layers,
6924 0 : }
6925 0 : }
6926 :
6927 949 : pub(crate) fn get_shard_index(&self) -> ShardIndex {
6928 949 : ShardIndex {
6929 949 : shard_number: self.tenant_shard_id.shard_number,
6930 949 : shard_count: self.tenant_shard_id.shard_count,
6931 949 : }
6932 949 : }
6933 :
6934 : /// Persistently blocks gc for `Manual` reason.
6935 : ///
6936 : /// Returns true if no such block existed before, false otherwise.
6937 0 : pub(crate) async fn block_gc(&self, tenant: &super::TenantShard) -> anyhow::Result<bool> {
6938 : use crate::tenant::remote_timeline_client::index::GcBlockingReason;
6939 0 : assert_eq!(self.tenant_shard_id, tenant.tenant_shard_id);
6940 0 : tenant.gc_block.insert(self, GcBlockingReason::Manual).await
6941 0 : }
6942 :
6943 : /// Persistently unblocks gc for `Manual` reason.
6944 0 : pub(crate) async fn unblock_gc(&self, tenant: &super::TenantShard) -> anyhow::Result<()> {
6945 : use crate::tenant::remote_timeline_client::index::GcBlockingReason;
6946 0 : assert_eq!(self.tenant_shard_id, tenant.tenant_shard_id);
6947 0 : tenant.gc_block.remove(self, GcBlockingReason::Manual).await
6948 0 : }
6949 :
6950 : #[cfg(test)]
6951 31 : pub(super) fn force_advance_lsn(self: &Arc<Timeline>, new_lsn: Lsn) {
6952 31 : self.last_record_lsn.advance(new_lsn);
6953 31 : }
6954 :
6955 : #[cfg(test)]
6956 1 : pub(super) fn force_set_disk_consistent_lsn(&self, new_value: Lsn) {
6957 1 : self.disk_consistent_lsn.store(new_value);
6958 1 : }
6959 :
6960 : /// Force create an image layer and place it into the layer map.
6961 : ///
6962 : /// DO NOT use this function directly. Use [`TenantShard::branch_timeline_test_with_layers`]
6963 : /// or [`TenantShard::create_test_timeline_with_layers`] to ensure all these layers are
6964 : /// placed into the layer map in one run AND be validated.
6965 : #[cfg(test)]
6966 36 : pub(super) async fn force_create_image_layer(
6967 36 : self: &Arc<Timeline>,
6968 36 : lsn: Lsn,
6969 36 : mut images: Vec<(Key, Bytes)>,
6970 36 : check_start_lsn: Option<Lsn>,
6971 36 : ctx: &RequestContext,
6972 36 : ) -> anyhow::Result<()> {
6973 36 : let last_record_lsn = self.get_last_record_lsn();
6974 36 : assert!(
6975 36 : lsn <= last_record_lsn,
6976 0 : "advance last record lsn before inserting a layer, lsn={lsn}, last_record_lsn={last_record_lsn}"
6977 : );
6978 36 : if let Some(check_start_lsn) = check_start_lsn {
6979 36 : assert!(lsn >= check_start_lsn);
6980 0 : }
6981 240 : images.sort_unstable_by(|(ka, _), (kb, _)| ka.cmp(kb));
6982 36 : let min_key = *images.first().map(|(k, _)| k).unwrap();
6983 36 : let end_key = images.last().map(|(k, _)| k).unwrap().next();
6984 36 : let mut image_layer_writer = ImageLayerWriter::new(
6985 36 : self.conf,
6986 36 : self.timeline_id,
6987 36 : self.tenant_shard_id,
6988 36 : &(min_key..end_key),
6989 36 : lsn,
6990 36 : &self.gate,
6991 36 : self.cancel.clone(),
6992 36 : ctx,
6993 36 : )
6994 36 : .await?;
6995 312 : for (key, img) in images {
6996 276 : image_layer_writer.put_image(key, img, ctx).await?;
6997 : }
6998 36 : let (desc, path) = image_layer_writer.finish(ctx).await?;
6999 36 : let image_layer = Layer::finish_creating(self.conf, self, desc, &path)?;
7000 36 : info!("force created image layer {}", image_layer.local_path());
7001 : {
7002 36 : let mut guard = self.layers.write().await;
7003 36 : guard
7004 36 : .open_mut()
7005 36 : .unwrap()
7006 36 : .force_insert_layer(image_layer.clone());
7007 36 : }
7008 36 :
7009 36 : // Update remote_timeline_client state to reflect existence of this layer
7010 36 : self.remote_client
7011 36 : .schedule_layer_file_upload(image_layer)
7012 36 : .unwrap();
7013 36 :
7014 36 : Ok(())
7015 36 : }
7016 :
7017 : /// Force create a delta layer and place it into the layer map.
7018 : ///
7019 : /// DO NOT use this function directly. Use [`TenantShard::branch_timeline_test_with_layers`]
7020 : /// or [`TenantShard::create_test_timeline_with_layers`] to ensure all these layers are
7021 : /// placed into the layer map in one run AND be validated.
7022 : #[cfg(test)]
7023 50 : pub(super) async fn force_create_delta_layer(
7024 50 : self: &Arc<Timeline>,
7025 50 : mut deltas: DeltaLayerTestDesc,
7026 50 : check_start_lsn: Option<Lsn>,
7027 50 : ctx: &RequestContext,
7028 50 : ) -> anyhow::Result<()> {
7029 50 : let last_record_lsn = self.get_last_record_lsn();
7030 50 : deltas
7031 50 : .data
7032 124364 : .sort_unstable_by(|(ka, la, _), (kb, lb, _)| (ka, la).cmp(&(kb, lb)));
7033 50 : assert!(deltas.data.first().unwrap().0 >= deltas.key_range.start);
7034 50 : assert!(deltas.data.last().unwrap().0 < deltas.key_range.end);
7035 10464 : for (_, lsn, _) in &deltas.data {
7036 10414 : assert!(deltas.lsn_range.start <= *lsn && *lsn < deltas.lsn_range.end);
7037 : }
7038 50 : assert!(
7039 50 : deltas.lsn_range.end <= last_record_lsn,
7040 0 : "advance last record lsn before inserting a layer, end_lsn={}, last_record_lsn={}",
7041 : deltas.lsn_range.end,
7042 : last_record_lsn
7043 : );
7044 50 : if let Some(check_start_lsn) = check_start_lsn {
7045 50 : assert!(deltas.lsn_range.start >= check_start_lsn);
7046 0 : }
7047 50 : let mut delta_layer_writer = DeltaLayerWriter::new(
7048 50 : self.conf,
7049 50 : self.timeline_id,
7050 50 : self.tenant_shard_id,
7051 50 : deltas.key_range.start,
7052 50 : deltas.lsn_range,
7053 50 : &self.gate,
7054 50 : self.cancel.clone(),
7055 50 : ctx,
7056 50 : )
7057 50 : .await?;
7058 10464 : for (key, lsn, val) in deltas.data {
7059 10414 : delta_layer_writer.put_value(key, lsn, val, ctx).await?;
7060 : }
7061 50 : let (desc, path) = delta_layer_writer.finish(deltas.key_range.end, ctx).await?;
7062 50 : let delta_layer = Layer::finish_creating(self.conf, self, desc, &path)?;
7063 50 : info!("force created delta layer {}", delta_layer.local_path());
7064 : {
7065 50 : let mut guard = self.layers.write().await;
7066 50 : guard
7067 50 : .open_mut()
7068 50 : .unwrap()
7069 50 : .force_insert_layer(delta_layer.clone());
7070 50 : }
7071 50 :
7072 50 : // Update remote_timeline_client state to reflect existence of this layer
7073 50 : self.remote_client
7074 50 : .schedule_layer_file_upload(delta_layer)
7075 50 : .unwrap();
7076 50 :
7077 50 : Ok(())
7078 50 : }
7079 :
7080 : /// Force create an in-memory layer and place them into the layer map.
7081 : #[cfg(test)]
7082 4 : pub(super) async fn force_create_in_memory_layer(
7083 4 : self: &Arc<Timeline>,
7084 4 : mut in_memory: InMemoryLayerTestDesc,
7085 4 : check_start_lsn: Option<Lsn>,
7086 4 : ctx: &RequestContext,
7087 4 : ) -> anyhow::Result<()> {
7088 : use utils::bin_ser::BeSer;
7089 :
7090 : // Validate LSNs
7091 4 : if let Some(check_start_lsn) = check_start_lsn {
7092 4 : assert!(in_memory.lsn_range.start >= check_start_lsn);
7093 0 : }
7094 :
7095 4 : let last_record_lsn = self.get_last_record_lsn();
7096 4 : let layer_end_lsn = if in_memory.is_open {
7097 1 : in_memory
7098 1 : .data
7099 1 : .iter()
7100 10 : .map(|(_key, lsn, _value)| lsn)
7101 1 : .max()
7102 1 : .cloned()
7103 : } else {
7104 3 : Some(in_memory.lsn_range.end)
7105 : };
7106 :
7107 4 : if let Some(end) = layer_end_lsn {
7108 4 : assert!(
7109 4 : end <= last_record_lsn,
7110 0 : "advance last record lsn before inserting a layer, end_lsn={}, last_record_lsn={}",
7111 : end,
7112 : last_record_lsn,
7113 : );
7114 0 : }
7115 :
7116 19820 : in_memory.data.iter().for_each(|(_key, lsn, _value)| {
7117 19820 : assert!(*lsn >= in_memory.lsn_range.start);
7118 19820 : assert!(*lsn < in_memory.lsn_range.end);
7119 19820 : });
7120 4 :
7121 4 : // Build the batch
7122 4 : in_memory
7123 4 : .data
7124 273384 : .sort_unstable_by(|(ka, la, _), (kb, lb, _)| (ka, la).cmp(&(kb, lb)));
7125 4 :
7126 4 : let data = in_memory
7127 4 : .data
7128 4 : .into_iter()
7129 19820 : .map(|(key, lsn, value)| {
7130 19820 : let value_size = value.serialized_size().unwrap() as usize;
7131 19820 : (key.to_compact(), lsn, value_size, value)
7132 19820 : })
7133 4 : .collect::<Vec<_>>();
7134 4 :
7135 4 : let batch = SerializedValueBatch::from_values(data);
7136 :
7137 : // Create the in-memory layer and write the batch into it
7138 4 : let layer = InMemoryLayer::create(
7139 4 : self.conf,
7140 4 : self.timeline_id,
7141 4 : self.tenant_shard_id,
7142 4 : in_memory.lsn_range.start,
7143 4 : &self.gate,
7144 4 : // TODO: if we ever use this function in production code, we need to pass the real cancellation token
7145 4 : &CancellationToken::new(),
7146 4 : ctx,
7147 4 : )
7148 4 : .await
7149 4 : .unwrap();
7150 4 :
7151 4 : layer.put_batch(batch, ctx).await.unwrap();
7152 4 : if !in_memory.is_open {
7153 3 : layer.freeze(in_memory.lsn_range.end).await;
7154 1 : }
7155 :
7156 4 : info!("force created in-memory layer {:?}", in_memory.lsn_range);
7157 :
7158 : // Link the layer to the layer map
7159 : {
7160 4 : let mut guard = self.layers.write().await;
7161 4 : let layer_map = guard.open_mut().unwrap();
7162 4 : layer_map.force_insert_in_memory_layer(Arc::new(layer));
7163 4 : }
7164 4 :
7165 4 : Ok(())
7166 4 : }
7167 :
7168 : /// Return all keys at the LSN in the image layers
7169 : #[cfg(test)]
7170 3 : pub(crate) async fn inspect_image_layers(
7171 3 : self: &Arc<Timeline>,
7172 3 : lsn: Lsn,
7173 3 : ctx: &RequestContext,
7174 3 : io_concurrency: IoConcurrency,
7175 3 : ) -> anyhow::Result<Vec<(Key, Bytes)>> {
7176 3 : let mut all_data = Vec::new();
7177 3 : let guard = self.layers.read().await;
7178 17 : for layer in guard.layer_map()?.iter_historic_layers() {
7179 17 : if !layer.is_delta() && layer.image_layer_lsn() == lsn {
7180 4 : let layer = guard.get_from_desc(&layer);
7181 4 : let mut reconstruct_data = ValuesReconstructState::new(io_concurrency.clone());
7182 4 : layer
7183 4 : .get_values_reconstruct_data(
7184 4 : KeySpace::single(Key::MIN..Key::MAX),
7185 4 : lsn..Lsn(lsn.0 + 1),
7186 4 : &mut reconstruct_data,
7187 4 : ctx,
7188 4 : )
7189 4 : .await?;
7190 33 : for (k, v) in std::mem::take(&mut reconstruct_data.keys) {
7191 33 : let v = v.collect_pending_ios().await?;
7192 33 : all_data.push((k, v.img.unwrap().1));
7193 : }
7194 13 : }
7195 : }
7196 3 : all_data.sort();
7197 3 : Ok(all_data)
7198 3 : }
7199 :
7200 : /// Get all historic layer descriptors in the layer map
7201 : #[cfg(test)]
7202 12 : pub(crate) async fn inspect_historic_layers(
7203 12 : self: &Arc<Timeline>,
7204 12 : ) -> anyhow::Result<Vec<super::storage_layer::PersistentLayerKey>> {
7205 12 : let mut layers = Vec::new();
7206 12 : let guard = self.layers.read().await;
7207 57 : for layer in guard.layer_map()?.iter_historic_layers() {
7208 57 : layers.push(layer.key());
7209 57 : }
7210 12 : Ok(layers)
7211 12 : }
7212 :
7213 : #[cfg(test)]
7214 5 : pub(crate) fn add_extra_test_dense_keyspace(&self, ks: KeySpace) {
7215 5 : let mut keyspace = self.extra_test_dense_keyspace.load().as_ref().clone();
7216 5 : keyspace.merge(&ks);
7217 5 : self.extra_test_dense_keyspace.store(Arc::new(keyspace));
7218 5 : }
7219 : }
7220 :
7221 : /// Tracking writes ingestion does to a particular in-memory layer.
7222 : ///
7223 : /// Cleared upon freezing a layer.
7224 : pub(crate) struct TimelineWriterState {
7225 : open_layer: Arc<InMemoryLayer>,
7226 : current_size: u64,
7227 : // Previous Lsn which passed through
7228 : prev_lsn: Option<Lsn>,
7229 : // Largest Lsn which passed through the current writer
7230 : max_lsn: Option<Lsn>,
7231 : // Cached details of the last freeze. Avoids going trough the atomic/lock on every put.
7232 : cached_last_freeze_at: Lsn,
7233 : }
7234 :
7235 : impl TimelineWriterState {
7236 656 : fn new(open_layer: Arc<InMemoryLayer>, current_size: u64, last_freeze_at: Lsn) -> Self {
7237 656 : Self {
7238 656 : open_layer,
7239 656 : current_size,
7240 656 : prev_lsn: None,
7241 656 : max_lsn: None,
7242 656 : cached_last_freeze_at: last_freeze_at,
7243 656 : }
7244 656 : }
7245 : }
7246 :
7247 : /// Various functions to mutate the timeline.
7248 : // TODO Currently, Deref is used to allow easy access to read methods from this trait.
7249 : // This is probably considered a bad practice in Rust and should be fixed eventually,
7250 : // but will cause large code changes.
7251 : pub(crate) struct TimelineWriter<'a> {
7252 : tl: &'a Timeline,
7253 : write_guard: tokio::sync::MutexGuard<'a, Option<TimelineWriterState>>,
7254 : }
7255 :
7256 : impl Deref for TimelineWriter<'_> {
7257 : type Target = Timeline;
7258 :
7259 4949184 : fn deref(&self) -> &Self::Target {
7260 4949184 : self.tl
7261 4949184 : }
7262 : }
7263 :
7264 : #[derive(PartialEq)]
7265 : enum OpenLayerAction {
7266 : Roll,
7267 : Open,
7268 : None,
7269 : }
7270 :
7271 : impl TimelineWriter<'_> {
7272 2402125 : async fn handle_open_layer_action(
7273 2402125 : &mut self,
7274 2402125 : at: Lsn,
7275 2402125 : action: OpenLayerAction,
7276 2402125 : ctx: &RequestContext,
7277 2402125 : ) -> anyhow::Result<&Arc<InMemoryLayer>> {
7278 2402125 : match action {
7279 : OpenLayerAction::Roll => {
7280 40 : let freeze_at = self.write_guard.as_ref().unwrap().max_lsn.unwrap();
7281 40 : self.roll_layer(freeze_at).await?;
7282 40 : self.open_layer(at, ctx).await?;
7283 : }
7284 616 : OpenLayerAction::Open => self.open_layer(at, ctx).await?,
7285 : OpenLayerAction::None => {
7286 2401469 : assert!(self.write_guard.is_some());
7287 : }
7288 : }
7289 :
7290 2402125 : Ok(&self.write_guard.as_ref().unwrap().open_layer)
7291 2402125 : }
7292 :
7293 656 : async fn open_layer(&mut self, at: Lsn, ctx: &RequestContext) -> anyhow::Result<()> {
7294 656 : let layer = self
7295 656 : .tl
7296 656 : .get_layer_for_write(at, &self.write_guard, ctx)
7297 656 : .await?;
7298 656 : let initial_size = layer.size().await?;
7299 :
7300 656 : let last_freeze_at = self.last_freeze_at.load();
7301 656 : self.write_guard.replace(TimelineWriterState::new(
7302 656 : layer,
7303 656 : initial_size,
7304 656 : last_freeze_at,
7305 656 : ));
7306 656 :
7307 656 : Ok(())
7308 656 : }
7309 :
7310 40 : async fn roll_layer(&mut self, freeze_at: Lsn) -> Result<(), FlushLayerError> {
7311 40 : let current_size = self.write_guard.as_ref().unwrap().current_size;
7312 :
7313 : // If layer flushes are backpressured due to compaction not keeping up, wait for the flush
7314 : // to propagate the backpressure up into WAL ingestion.
7315 40 : let l0_count = self
7316 40 : .tl
7317 40 : .layers
7318 40 : .read()
7319 40 : .await
7320 40 : .layer_map()?
7321 40 : .level0_deltas()
7322 40 : .len();
7323 40 : let wait_thresholds = [
7324 40 : self.get_l0_flush_delay_threshold(),
7325 40 : self.get_l0_flush_stall_threshold(),
7326 40 : ];
7327 40 : let wait_threshold = wait_thresholds.into_iter().flatten().min();
7328 :
7329 : // self.write_guard will be taken by the freezing
7330 40 : let flush_id = self
7331 40 : .tl
7332 40 : .freeze_inmem_layer_at(freeze_at, &mut self.write_guard)
7333 40 : .await?;
7334 :
7335 40 : assert!(self.write_guard.is_none());
7336 :
7337 40 : if let Some(wait_threshold) = wait_threshold {
7338 0 : if l0_count >= wait_threshold {
7339 0 : debug!(
7340 0 : "layer roll waiting for flush due to compaction backpressure at {l0_count} L0 layers"
7341 : );
7342 0 : self.tl.wait_flush_completion(flush_id).await?;
7343 0 : }
7344 40 : }
7345 :
7346 40 : if current_size >= self.get_checkpoint_distance() * 2 {
7347 0 : warn!("Flushed oversized open layer with size {}", current_size)
7348 40 : }
7349 :
7350 40 : Ok(())
7351 40 : }
7352 :
7353 2402125 : fn get_open_layer_action(&self, lsn: Lsn, new_value_size: u64) -> OpenLayerAction {
7354 2402125 : let state = &*self.write_guard;
7355 2402125 : let Some(state) = &state else {
7356 616 : return OpenLayerAction::Open;
7357 : };
7358 :
7359 : #[cfg(feature = "testing")]
7360 2401509 : if state.cached_last_freeze_at < self.tl.last_freeze_at.load() {
7361 : // this check and assertion are not really needed because
7362 : // LayerManager::try_freeze_in_memory_layer will always clear out the
7363 : // TimelineWriterState if something is frozen. however, we can advance last_freeze_at when there
7364 : // is no TimelineWriterState.
7365 0 : assert!(
7366 0 : state.open_layer.end_lsn.get().is_some(),
7367 0 : "our open_layer must be outdated"
7368 : );
7369 :
7370 : // this would be a memory leak waiting to happen because the in-memory layer always has
7371 : // an index
7372 0 : panic!("BUG: TimelineWriterState held on to frozen in-memory layer.");
7373 2401509 : }
7374 2401509 :
7375 2401509 : if state.prev_lsn == Some(lsn) {
7376 : // Rolling mid LSN is not supported by [downstream code].
7377 : // Hence, only roll at LSN boundaries.
7378 : //
7379 : // [downstream code]: https://github.com/neondatabase/neon/pull/7993#discussion_r1633345422
7380 3 : return OpenLayerAction::None;
7381 2401506 : }
7382 2401506 :
7383 2401506 : if state.current_size == 0 {
7384 : // Don't roll empty layers
7385 0 : return OpenLayerAction::None;
7386 2401506 : }
7387 2401506 :
7388 2401506 : if self.tl.should_roll(
7389 2401506 : state.current_size,
7390 2401506 : state.current_size + new_value_size,
7391 2401506 : self.get_checkpoint_distance(),
7392 2401506 : lsn,
7393 2401506 : state.cached_last_freeze_at,
7394 2401506 : state.open_layer.get_opened_at(),
7395 2401506 : ) {
7396 40 : OpenLayerAction::Roll
7397 : } else {
7398 2401466 : OpenLayerAction::None
7399 : }
7400 2402125 : }
7401 :
7402 : /// Put a batch of keys at the specified Lsns.
7403 2402124 : pub(crate) async fn put_batch(
7404 2402124 : &mut self,
7405 2402124 : batch: SerializedValueBatch,
7406 2402124 : ctx: &RequestContext,
7407 2402124 : ) -> anyhow::Result<()> {
7408 2402124 : if !batch.has_data() {
7409 0 : return Ok(());
7410 2402124 : }
7411 2402124 :
7412 2402124 : // In debug builds, assert that we don't write any keys that don't belong to this shard.
7413 2402124 : // We don't assert this in release builds, since key ownership policies may change over
7414 2402124 : // time. Stray keys will be removed during compaction.
7415 2402124 : if cfg!(debug_assertions) {
7416 4947509 : for metadata in &batch.metadata {
7417 2545385 : if let ValueMeta::Serialized(metadata) = metadata {
7418 2545385 : let key = Key::from_compact(metadata.key);
7419 2545385 : assert!(
7420 2545385 : self.shard_identity.is_key_local(&key)
7421 0 : || self.shard_identity.is_key_global(&key),
7422 0 : "key {key} does not belong on shard {}",
7423 0 : self.shard_identity.shard_index()
7424 : );
7425 0 : }
7426 : }
7427 0 : }
7428 :
7429 2402124 : let batch_max_lsn = batch.max_lsn;
7430 2402124 : let buf_size: u64 = batch.buffer_size() as u64;
7431 2402124 :
7432 2402124 : let action = self.get_open_layer_action(batch_max_lsn, buf_size);
7433 2402124 : let layer = self
7434 2402124 : .handle_open_layer_action(batch_max_lsn, action, ctx)
7435 2402124 : .await?;
7436 :
7437 2402124 : let res = layer.put_batch(batch, ctx).await;
7438 :
7439 2402124 : if res.is_ok() {
7440 2402124 : // Update the current size only when the entire write was ok.
7441 2402124 : // In case of failures, we may have had partial writes which
7442 2402124 : // render the size tracking out of sync. That's ok because
7443 2402124 : // the checkpoint distance should be significantly smaller
7444 2402124 : // than the S3 single shot upload limit of 5GiB.
7445 2402124 : let state = self.write_guard.as_mut().unwrap();
7446 2402124 :
7447 2402124 : state.current_size += buf_size;
7448 2402124 : state.prev_lsn = Some(batch_max_lsn);
7449 2402124 : state.max_lsn = std::cmp::max(state.max_lsn, Some(batch_max_lsn));
7450 2402124 : }
7451 :
7452 2402124 : res
7453 2402124 : }
7454 :
7455 : #[cfg(test)]
7456 : /// Test helper, for tests that would like to poke individual values without composing a batch
7457 2195077 : pub(crate) async fn put(
7458 2195077 : &mut self,
7459 2195077 : key: Key,
7460 2195077 : lsn: Lsn,
7461 2195077 : value: &Value,
7462 2195077 : ctx: &RequestContext,
7463 2195077 : ) -> anyhow::Result<()> {
7464 : use utils::bin_ser::BeSer;
7465 2195077 : if !key.is_valid_key_on_write_path() {
7466 0 : bail!(
7467 0 : "the request contains data not supported by pageserver at TimelineWriter::put: {}",
7468 0 : key
7469 0 : );
7470 2195077 : }
7471 2195077 : let val_ser_size = value.serialized_size().unwrap() as usize;
7472 2195077 : let batch = SerializedValueBatch::from_values(vec![(
7473 2195077 : key.to_compact(),
7474 2195077 : lsn,
7475 2195077 : val_ser_size,
7476 2195077 : value.clone(),
7477 2195077 : )]);
7478 2195077 :
7479 2195077 : self.put_batch(batch, ctx).await
7480 2195077 : }
7481 :
7482 1 : pub(crate) async fn delete_batch(
7483 1 : &mut self,
7484 1 : batch: &[(Range<Key>, Lsn)],
7485 1 : ctx: &RequestContext,
7486 1 : ) -> anyhow::Result<()> {
7487 1 : if let Some((_, lsn)) = batch.first() {
7488 1 : let action = self.get_open_layer_action(*lsn, 0);
7489 1 : let layer = self.handle_open_layer_action(*lsn, action, ctx).await?;
7490 1 : layer.put_tombstones(batch).await?;
7491 0 : }
7492 :
7493 1 : Ok(())
7494 1 : }
7495 :
7496 : /// Track the end of the latest digested WAL record.
7497 : /// Remember the (end of) last valid WAL record remembered in the timeline.
7498 : ///
7499 : /// Call this after you have finished writing all the WAL up to 'lsn'.
7500 : ///
7501 : /// 'lsn' must be aligned. This wakes up any wait_lsn() callers waiting for
7502 : /// the 'lsn' or anything older. The previous last record LSN is stored alongside
7503 : /// the latest and can be read.
7504 2639555 : pub(crate) fn finish_write(&self, new_lsn: Lsn) {
7505 2639555 : self.tl.finish_write(new_lsn);
7506 2639555 : }
7507 :
7508 135285 : pub(crate) fn update_current_logical_size(&self, delta: i64) {
7509 135285 : self.tl.update_current_logical_size(delta)
7510 135285 : }
7511 : }
7512 :
7513 : // We need TimelineWriter to be send in upcoming conversion of
7514 : // Timeline::layers to tokio::sync::RwLock.
7515 : #[test]
7516 1 : fn is_send() {
7517 1 : fn _assert_send<T: Send>() {}
7518 1 : _assert_send::<TimelineWriter<'_>>();
7519 1 : }
7520 :
7521 : #[cfg(test)]
7522 : mod tests {
7523 : use std::sync::Arc;
7524 :
7525 : use pageserver_api::key::Key;
7526 : use pageserver_api::value::Value;
7527 : use std::iter::Iterator;
7528 : use tracing::Instrument;
7529 : use utils::id::TimelineId;
7530 : use utils::lsn::Lsn;
7531 :
7532 : use super::HeatMapTimeline;
7533 : use crate::context::RequestContextBuilder;
7534 : use crate::tenant::harness::{TenantHarness, test_img};
7535 : use crate::tenant::layer_map::LayerMap;
7536 : use crate::tenant::storage_layer::{Layer, LayerName, LayerVisibilityHint};
7537 : use crate::tenant::timeline::{DeltaLayerTestDesc, EvictionError};
7538 : use crate::tenant::{PreviousHeatmap, Timeline};
7539 :
7540 5 : fn assert_heatmaps_have_same_layers(lhs: &HeatMapTimeline, rhs: &HeatMapTimeline) {
7541 5 : assert_eq!(lhs.all_layers().count(), rhs.all_layers().count());
7542 5 : let lhs_rhs = lhs.all_layers().zip(rhs.all_layers());
7543 25 : for (l, r) in lhs_rhs {
7544 20 : assert_eq!(l.name, r.name);
7545 20 : assert_eq!(l.metadata, r.metadata);
7546 : }
7547 5 : }
7548 :
7549 : #[tokio::test]
7550 1 : async fn test_heatmap_generation() {
7551 1 : let harness = TenantHarness::create("heatmap_generation").await.unwrap();
7552 1 :
7553 1 : let covered_delta = DeltaLayerTestDesc::new_with_inferred_key_range(
7554 1 : Lsn(0x10)..Lsn(0x20),
7555 1 : vec![(
7556 1 : Key::from_hex("620000000033333333444444445500000000").unwrap(),
7557 1 : Lsn(0x11),
7558 1 : Value::Image(test_img("foo")),
7559 1 : )],
7560 1 : );
7561 1 : let visible_delta = DeltaLayerTestDesc::new_with_inferred_key_range(
7562 1 : Lsn(0x10)..Lsn(0x20),
7563 1 : vec![(
7564 1 : Key::from_hex("720000000033333333444444445500000000").unwrap(),
7565 1 : Lsn(0x11),
7566 1 : Value::Image(test_img("foo")),
7567 1 : )],
7568 1 : );
7569 1 : let l0_delta = DeltaLayerTestDesc::new(
7570 1 : Lsn(0x20)..Lsn(0x30),
7571 1 : Key::from_hex("000000000000000000000000000000000000").unwrap()
7572 1 : ..Key::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(),
7573 1 : vec![(
7574 1 : Key::from_hex("720000000033333333444444445500000000").unwrap(),
7575 1 : Lsn(0x25),
7576 1 : Value::Image(test_img("foo")),
7577 1 : )],
7578 1 : );
7579 1 : let delta_layers = vec![
7580 1 : covered_delta.clone(),
7581 1 : visible_delta.clone(),
7582 1 : l0_delta.clone(),
7583 1 : ];
7584 1 :
7585 1 : let image_layer = (
7586 1 : Lsn(0x40),
7587 1 : vec![(
7588 1 : Key::from_hex("620000000033333333444444445500000000").unwrap(),
7589 1 : test_img("bar"),
7590 1 : )],
7591 1 : );
7592 1 : let image_layers = vec![image_layer];
7593 1 :
7594 1 : let (tenant, ctx) = harness.load().await;
7595 1 : let timeline = tenant
7596 1 : .create_test_timeline_with_layers(
7597 1 : TimelineId::generate(),
7598 1 : Lsn(0x10),
7599 1 : 14,
7600 1 : &ctx,
7601 1 : Vec::new(), // in-memory layers
7602 1 : delta_layers,
7603 1 : image_layers,
7604 1 : Lsn(0x100),
7605 1 : )
7606 1 : .await
7607 1 : .unwrap();
7608 1 : let ctx = &ctx.with_scope_timeline(&timeline);
7609 1 :
7610 1 : // Layer visibility is an input to heatmap generation, so refresh it first
7611 1 : timeline.update_layer_visibility().await.unwrap();
7612 1 :
7613 1 : let heatmap = timeline
7614 1 : .generate_heatmap()
7615 1 : .await
7616 1 : .expect("Infallible while timeline is not shut down");
7617 1 :
7618 1 : assert_eq!(heatmap.timeline_id, timeline.timeline_id);
7619 1 :
7620 1 : // L0 should come last
7621 1 : let heatmap_layers = heatmap.all_layers().collect::<Vec<_>>();
7622 1 : assert_eq!(heatmap_layers.last().unwrap().name, l0_delta.layer_name());
7623 1 :
7624 1 : let mut last_lsn = Lsn::MAX;
7625 5 : for layer in heatmap_layers {
7626 1 : // Covered layer should be omitted
7627 4 : assert!(layer.name != covered_delta.layer_name());
7628 1 :
7629 4 : let layer_lsn = match &layer.name {
7630 2 : LayerName::Delta(d) => d.lsn_range.end,
7631 2 : LayerName::Image(i) => i.lsn,
7632 1 : };
7633 1 :
7634 1 : // Apart from L0s, newest Layers should come first
7635 4 : if !LayerMap::is_l0(layer.name.key_range(), layer.name.is_delta()) {
7636 3 : assert!(layer_lsn <= last_lsn);
7637 3 : last_lsn = layer_lsn;
7638 1 : }
7639 1 : }
7640 1 :
7641 1 : // Evict all the layers and stash the old heatmap in the timeline.
7642 1 : // This simulates a migration to a cold secondary location.
7643 1 :
7644 1 : let guard = timeline.layers.read().await;
7645 1 : let mut all_layers = Vec::new();
7646 1 : let forever = std::time::Duration::from_secs(120);
7647 5 : for layer in guard.likely_resident_layers() {
7648 5 : all_layers.push(layer.clone());
7649 5 : layer.evict_and_wait(forever).await.unwrap();
7650 1 : }
7651 1 : drop(guard);
7652 1 :
7653 1 : timeline
7654 1 : .previous_heatmap
7655 1 : .store(Some(Arc::new(PreviousHeatmap::Active {
7656 1 : heatmap: heatmap.clone(),
7657 1 : read_at: std::time::Instant::now(),
7658 1 : end_lsn: None,
7659 1 : })));
7660 1 :
7661 1 : // Generate a new heatmap and assert that it contains the same layers as the old one.
7662 1 : let post_migration_heatmap = timeline.generate_heatmap().await.unwrap();
7663 1 : assert_heatmaps_have_same_layers(&heatmap, &post_migration_heatmap);
7664 1 :
7665 1 : // Download each layer one by one. Generate the heatmap at each step and check
7666 1 : // that it's stable.
7667 6 : for layer in all_layers {
7668 5 : if layer.visibility() == LayerVisibilityHint::Covered {
7669 1 : continue;
7670 4 : }
7671 4 :
7672 4 : eprintln!("Downloading {layer} and re-generating heatmap");
7673 4 :
7674 4 : let ctx = &RequestContextBuilder::from(ctx)
7675 4 : .download_behavior(crate::context::DownloadBehavior::Download)
7676 4 : .attached_child();
7677 1 :
7678 4 : let _resident = layer
7679 4 : .download_and_keep_resident(ctx)
7680 4 : .instrument(tracing::info_span!(
7681 4 : parent: None,
7682 1 : "download_layer",
7683 1 : tenant_id = %timeline.tenant_shard_id.tenant_id,
7684 0 : shard_id = %timeline.tenant_shard_id.shard_slug(),
7685 0 : timeline_id = %timeline.timeline_id
7686 1 : ))
7687 4 : .await
7688 4 : .unwrap();
7689 1 :
7690 4 : let post_download_heatmap = timeline.generate_heatmap().await.unwrap();
7691 4 : assert_heatmaps_have_same_layers(&heatmap, &post_download_heatmap);
7692 1 : }
7693 1 :
7694 1 : // Everything from the post-migration heatmap is now resident.
7695 1 : // Check that we drop it from memory.
7696 1 : assert!(matches!(
7697 1 : timeline.previous_heatmap.load().as_deref(),
7698 1 : Some(PreviousHeatmap::Obsolete)
7699 1 : ));
7700 1 : }
7701 :
7702 : #[tokio::test]
7703 1 : async fn test_previous_heatmap_obsoletion() {
7704 1 : let harness = TenantHarness::create("heatmap_previous_heatmap_obsoletion")
7705 1 : .await
7706 1 : .unwrap();
7707 1 :
7708 1 : let l0_delta = DeltaLayerTestDesc::new(
7709 1 : Lsn(0x20)..Lsn(0x30),
7710 1 : Key::from_hex("000000000000000000000000000000000000").unwrap()
7711 1 : ..Key::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(),
7712 1 : vec![(
7713 1 : Key::from_hex("720000000033333333444444445500000000").unwrap(),
7714 1 : Lsn(0x25),
7715 1 : Value::Image(test_img("foo")),
7716 1 : )],
7717 1 : );
7718 1 :
7719 1 : let image_layer = (
7720 1 : Lsn(0x40),
7721 1 : vec![(
7722 1 : Key::from_hex("620000000033333333444444445500000000").unwrap(),
7723 1 : test_img("bar"),
7724 1 : )],
7725 1 : );
7726 1 :
7727 1 : let delta_layers = vec![l0_delta];
7728 1 : let image_layers = vec![image_layer];
7729 1 :
7730 1 : let (tenant, ctx) = harness.load().await;
7731 1 : let timeline = tenant
7732 1 : .create_test_timeline_with_layers(
7733 1 : TimelineId::generate(),
7734 1 : Lsn(0x10),
7735 1 : 14,
7736 1 : &ctx,
7737 1 : Vec::new(), // in-memory layers
7738 1 : delta_layers,
7739 1 : image_layers,
7740 1 : Lsn(0x100),
7741 1 : )
7742 1 : .await
7743 1 : .unwrap();
7744 1 :
7745 1 : // Layer visibility is an input to heatmap generation, so refresh it first
7746 1 : timeline.update_layer_visibility().await.unwrap();
7747 1 :
7748 1 : let heatmap = timeline
7749 1 : .generate_heatmap()
7750 1 : .await
7751 1 : .expect("Infallible while timeline is not shut down");
7752 1 :
7753 1 : // Both layers should be in the heatmap
7754 1 : assert!(heatmap.all_layers().count() > 0);
7755 1 :
7756 1 : // Now simulate a migration.
7757 1 : timeline
7758 1 : .previous_heatmap
7759 1 : .store(Some(Arc::new(PreviousHeatmap::Active {
7760 1 : heatmap: heatmap.clone(),
7761 1 : read_at: std::time::Instant::now(),
7762 1 : end_lsn: None,
7763 1 : })));
7764 1 :
7765 1 : // Evict all the layers in the previous heatmap
7766 1 : let guard = timeline.layers.read().await;
7767 1 : let forever = std::time::Duration::from_secs(120);
7768 3 : for layer in guard.likely_resident_layers() {
7769 3 : layer.evict_and_wait(forever).await.unwrap();
7770 1 : }
7771 1 : drop(guard);
7772 1 :
7773 1 : // Generate a new heatmap and check that the previous heatmap
7774 1 : // has been marked obsolete.
7775 1 : let post_eviction_heatmap = timeline
7776 1 : .generate_heatmap()
7777 1 : .await
7778 1 : .expect("Infallible while timeline is not shut down");
7779 1 :
7780 1 : assert_eq!(post_eviction_heatmap.all_layers().count(), 0);
7781 1 : assert!(matches!(
7782 1 : timeline.previous_heatmap.load().as_deref(),
7783 1 : Some(PreviousHeatmap::Obsolete)
7784 1 : ));
7785 1 : }
7786 :
7787 : #[tokio::test]
7788 1 : async fn two_layer_eviction_attempts_at_the_same_time() {
7789 1 : let harness = TenantHarness::create("two_layer_eviction_attempts_at_the_same_time")
7790 1 : .await
7791 1 : .unwrap();
7792 1 :
7793 1 : let (tenant, ctx) = harness.load().await;
7794 1 : let timeline = tenant
7795 1 : .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx)
7796 1 : .await
7797 1 : .unwrap();
7798 1 :
7799 1 : let layer = find_some_layer(&timeline).await;
7800 1 : let layer = layer
7801 1 : .keep_resident()
7802 1 : .await
7803 1 : .expect("no download => no downloading errors")
7804 1 : .drop_eviction_guard();
7805 1 :
7806 1 : let forever = std::time::Duration::from_secs(120);
7807 1 :
7808 1 : let first = layer.evict_and_wait(forever);
7809 1 : let second = layer.evict_and_wait(forever);
7810 1 :
7811 1 : let (first, second) = tokio::join!(first, second);
7812 1 :
7813 1 : let res = layer.keep_resident().await;
7814 1 : assert!(res.is_none(), "{res:?}");
7815 1 :
7816 1 : match (first, second) {
7817 1 : (Ok(()), Ok(())) => {
7818 1 : // because there are no more timeline locks being taken on eviction path, we can
7819 1 : // witness all three outcomes here.
7820 1 : }
7821 1 : (Ok(()), Err(EvictionError::NotFound)) | (Err(EvictionError::NotFound), Ok(())) => {
7822 0 : // if one completes before the other, this is fine just as well.
7823 0 : }
7824 1 : other => unreachable!("unexpected {:?}", other),
7825 1 : }
7826 1 : }
7827 :
7828 1 : async fn find_some_layer(timeline: &Timeline) -> Layer {
7829 1 : let layers = timeline.layers.read().await;
7830 1 : let desc = layers
7831 1 : .layer_map()
7832 1 : .unwrap()
7833 1 : .iter_historic_layers()
7834 1 : .next()
7835 1 : .expect("must find one layer to evict");
7836 1 :
7837 1 : layers.get_from_desc(&desc)
7838 1 : }
7839 : }
|