Line data Source code
1 : pub(crate) mod analysis;
2 : pub(crate) mod compaction;
3 : pub mod delete;
4 : pub(crate) mod detach_ancestor;
5 : mod eviction_task;
6 : pub(crate) mod handle;
7 : mod heatmap_layers_downloader;
8 : pub(crate) mod import_pgdata;
9 : mod init;
10 : pub mod layer_manager;
11 : pub(crate) mod logical_size;
12 : pub mod offload;
13 : pub mod span;
14 : pub mod uninit;
15 : mod walreceiver;
16 :
17 : use anyhow::{anyhow, bail, ensure, Context, Result};
18 : use arc_swap::{ArcSwap, ArcSwapOption};
19 : use bytes::Bytes;
20 : use camino::Utf8Path;
21 : use chrono::{DateTime, Utc};
22 : use compaction::CompactionOutcome;
23 : use enumset::EnumSet;
24 : use fail::fail_point;
25 : use futures::FutureExt;
26 : use futures::{stream::FuturesUnordered, StreamExt};
27 : use handle::ShardTimelineId;
28 : use layer_manager::Shutdown;
29 : use offload::OffloadError;
30 : use once_cell::sync::Lazy;
31 : use pageserver_api::models::PageTraceEvent;
32 : use pageserver_api::{
33 : key::{
34 : KEY_SIZE, METADATA_KEY_BEGIN_PREFIX, METADATA_KEY_END_PREFIX, NON_INHERITED_RANGE,
35 : SPARSE_RANGE,
36 : },
37 : keyspace::{KeySpaceAccum, KeySpaceRandomAccum, SparseKeyPartitioning},
38 : models::{
39 : CompactKeyRange, CompactLsnRange, CompactionAlgorithm, CompactionAlgorithmSettings,
40 : DownloadRemoteLayersTaskInfo, DownloadRemoteLayersTaskSpawnRequest, EvictionPolicy,
41 : InMemoryLayerInfo, LayerMapInfo, LsnLease, TimelineState,
42 : },
43 : reltag::BlockNumber,
44 : shard::{ShardIdentity, ShardNumber, TenantShardId},
45 : };
46 : use rand::Rng;
47 : use remote_storage::DownloadError;
48 : use serde_with::serde_as;
49 : use storage_broker::BrokerClientChannel;
50 : use tokio::runtime::Handle;
51 : use tokio::sync::mpsc::Sender;
52 : use tokio::sync::{oneshot, watch, Notify};
53 : use tokio_util::sync::CancellationToken;
54 : use tracing::*;
55 : use utils::critical;
56 : use utils::rate_limit::RateLimit;
57 : use utils::{
58 : fs_ext,
59 : guard_arc_swap::GuardArcSwap,
60 : pausable_failpoint,
61 : postgres_client::PostgresClientProtocol,
62 : sync::gate::{Gate, GateGuard},
63 : };
64 : use wal_decoder::serialized_batch::{SerializedValueBatch, ValueMeta};
65 :
66 : use std::array;
67 : use std::cmp::{max, min};
68 : use std::collections::btree_map::Entry;
69 : use std::collections::{BTreeMap, HashMap, HashSet};
70 : use std::ops::{ControlFlow, Deref, Range};
71 : use std::sync::atomic::{AtomicBool, AtomicU64, Ordering as AtomicOrdering};
72 : use std::sync::{Arc, Mutex, OnceLock, RwLock, Weak};
73 : use std::time::{Duration, Instant, SystemTime};
74 :
75 : use crate::l0_flush::{self, L0FlushGlobalState};
76 : use crate::tenant::storage_layer::ImageLayerName;
77 : use crate::{
78 : aux_file::AuxFileSizeEstimator,
79 : page_service::TenantManagerTypes,
80 : tenant::{
81 : config::AttachmentMode,
82 : layer_map::{LayerMap, SearchResult},
83 : metadata::TimelineMetadata,
84 : storage_layer::{
85 : inmemory_layer::IndexEntry, BatchLayerWriter, IoConcurrency, PersistentLayerDesc,
86 : ValueReconstructSituation,
87 : },
88 : },
89 : walingest::WalLagCooldown,
90 : walredo,
91 : };
92 : use crate::{
93 : context::{DownloadBehavior, RequestContext},
94 : disk_usage_eviction_task::DiskUsageEvictionInfo,
95 : pgdatadir_mapping::CollectKeySpaceError,
96 : };
97 : use crate::{
98 : disk_usage_eviction_task::finite_f32,
99 : tenant::storage_layer::{
100 : AsLayerDesc, DeltaLayerWriter, EvictionError, ImageLayerWriter, InMemoryLayer, Layer,
101 : LayerAccessStatsReset, LayerName, ResidentLayer, ValueReconstructState,
102 : ValuesReconstructState,
103 : },
104 : };
105 : use crate::{
106 : disk_usage_eviction_task::EvictionCandidate, tenant::storage_layer::delta_layer::DeltaEntry,
107 : };
108 : use crate::{
109 : metrics::ScanLatencyOngoingRecording, tenant::timeline::logical_size::CurrentLogicalSize,
110 : };
111 : use crate::{
112 : pgdatadir_mapping::DirectoryKind,
113 : virtual_file::{MaybeFatalIo, VirtualFile},
114 : };
115 : use crate::{pgdatadir_mapping::LsnForTimestamp, tenant::tasks::BackgroundLoopKind};
116 : use crate::{pgdatadir_mapping::MAX_AUX_FILE_V2_DELTAS, tenant::storage_layer::PersistentLayerKey};
117 : use pageserver_api::config::tenant_conf_defaults::DEFAULT_PITR_INTERVAL;
118 :
119 : use crate::config::PageServerConf;
120 : use crate::keyspace::{KeyPartitioning, KeySpace};
121 : use crate::metrics::{TimelineMetrics, DELTAS_PER_READ_GLOBAL, LAYERS_PER_READ_GLOBAL};
122 : use crate::pgdatadir_mapping::{CalculateLogicalSizeError, MetricsUpdate};
123 : use crate::tenant::config::TenantConfOpt;
124 : use pageserver_api::reltag::RelTag;
125 : use pageserver_api::shard::ShardIndex;
126 :
127 : use postgres_connection::PgConnectionConfig;
128 : use postgres_ffi::{to_pg_timestamp, v14::xlog_utils, WAL_SEGMENT_SIZE};
129 : use utils::{
130 : completion,
131 : generation::Generation,
132 : id::TimelineId,
133 : lsn::{AtomicLsn, Lsn, RecordLsn},
134 : seqwait::SeqWait,
135 : simple_rcu::{Rcu, RcuReadGuard},
136 : };
137 :
138 : use crate::task_mgr;
139 : use crate::task_mgr::TaskKind;
140 : use crate::tenant::gc_result::GcResult;
141 : use crate::ZERO_PAGE;
142 : use pageserver_api::key::Key;
143 :
144 : use self::delete::DeleteTimelineFlow;
145 : pub(super) use self::eviction_task::EvictionTaskTenantState;
146 : use self::eviction_task::EvictionTaskTimelineState;
147 : use self::layer_manager::LayerManager;
148 : use self::logical_size::LogicalSize;
149 : use self::walreceiver::{WalReceiver, WalReceiverConf};
150 :
151 : use super::{
152 : config::TenantConf, storage_layer::LayerVisibilityHint, upload_queue::NotInitialized,
153 : MaybeOffloaded,
154 : };
155 : use super::{
156 : debug_assert_current_span_has_tenant_and_timeline_id, AttachedTenantConf, HeatMapTimeline,
157 : };
158 : use super::{remote_timeline_client::index::IndexPart, storage_layer::LayerFringe};
159 : use super::{
160 : remote_timeline_client::RemoteTimelineClient, remote_timeline_client::WaitCompletionError,
161 : storage_layer::ReadableLayer,
162 : };
163 : use super::{secondary::heatmap::HeatMapLayer, GcError};
164 :
165 : #[cfg(test)]
166 : use pageserver_api::value::Value;
167 :
168 : #[derive(Debug, PartialEq, Eq, Clone, Copy)]
169 : pub(crate) enum FlushLoopState {
170 : NotStarted,
171 : Running {
172 : #[cfg(test)]
173 : expect_initdb_optimization: bool,
174 : #[cfg(test)]
175 : initdb_optimization_count: usize,
176 : },
177 : Exited,
178 : }
179 :
180 : #[derive(Debug, Copy, Clone, PartialEq, Eq)]
181 : pub enum ImageLayerCreationMode {
182 : /// Try to create image layers based on `time_for_new_image_layer`. Used in compaction code path.
183 : Try,
184 : /// Force creating the image layers if possible. For now, no image layers will be created
185 : /// for metadata keys. Used in compaction code path with force flag enabled.
186 : Force,
187 : /// Initial ingestion of the data, and no data should be dropped in this function. This
188 : /// means that no metadata keys should be included in the partitions. Used in flush frozen layer
189 : /// code path.
190 : Initial,
191 : }
192 :
193 : #[derive(Clone, Debug, Default)]
194 : pub enum LastImageLayerCreationStatus {
195 : Incomplete {
196 : /// The last key of the partition (exclusive) that was processed in the last
197 : /// image layer creation attempt. We will continue from this key in the next
198 : /// attempt.
199 : last_key: Key,
200 : },
201 : Complete,
202 : #[default]
203 : Initial,
204 : }
205 :
206 : impl std::fmt::Display for ImageLayerCreationMode {
207 1140 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
208 1140 : write!(f, "{:?}", self)
209 1140 : }
210 : }
211 :
212 : /// Temporary function for immutable storage state refactor, ensures we are dropping mutex guard instead of other things.
213 : /// Can be removed after all refactors are done.
214 56 : fn drop_rlock<T>(rlock: tokio::sync::RwLockReadGuard<T>) {
215 56 : drop(rlock)
216 56 : }
217 :
218 : /// Temporary function for immutable storage state refactor, ensures we are dropping mutex guard instead of other things.
219 : /// Can be removed after all refactors are done.
220 1196 : fn drop_wlock<T>(rlock: tokio::sync::RwLockWriteGuard<'_, T>) {
221 1196 : drop(rlock)
222 1196 : }
223 :
224 : /// The outward-facing resources required to build a Timeline
225 : pub struct TimelineResources {
226 : pub remote_client: RemoteTimelineClient,
227 : pub pagestream_throttle: Arc<crate::tenant::throttle::Throttle>,
228 : pub pagestream_throttle_metrics: Arc<crate::metrics::tenant_throttling::Pagestream>,
229 : pub l0_compaction_trigger: Arc<Notify>,
230 : pub l0_flush_global_state: l0_flush::L0FlushGlobalState,
231 : }
232 :
233 : /// The relation size cache caches relation sizes at the end of the timeline. It speeds up WAL
234 : /// ingestion considerably, because WAL ingestion needs to check on most records if the record
235 : /// implicitly extends the relation. At startup, `complete_as_of` is initialized to the current end
236 : /// of the timeline (disk_consistent_lsn). It's used on reads of relation sizes to check if the
237 : /// value can be used to also update the cache, see [`Timeline::update_cached_rel_size`].
238 : pub(crate) struct RelSizeCache {
239 : pub(crate) complete_as_of: Lsn,
240 : pub(crate) map: HashMap<RelTag, (Lsn, BlockNumber)>,
241 : }
242 :
243 : pub struct Timeline {
244 : pub(crate) conf: &'static PageServerConf,
245 : tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
246 :
247 : myself: Weak<Self>,
248 :
249 : pub(crate) tenant_shard_id: TenantShardId,
250 : pub timeline_id: TimelineId,
251 :
252 : /// The generation of the tenant that instantiated us: this is used for safety when writing remote objects.
253 : /// Never changes for the lifetime of this [`Timeline`] object.
254 : ///
255 : /// This duplicates the generation stored in LocationConf, but that structure is mutable:
256 : /// this copy enforces the invariant that generatio doesn't change during a Tenant's lifetime.
257 : pub(crate) generation: Generation,
258 :
259 : /// The detailed sharding information from our parent Tenant. This enables us to map keys
260 : /// to shards, and is constant through the lifetime of this Timeline.
261 : shard_identity: ShardIdentity,
262 :
263 : pub pg_version: u32,
264 :
265 : /// The tuple has two elements.
266 : /// 1. `LayerFileManager` keeps track of the various physical representations of the layer files (inmem, local, remote).
267 : /// 2. `LayerMap`, the acceleration data structure for `get_reconstruct_data`.
268 : ///
269 : /// `LayerMap` maps out the `(PAGE,LSN) / (KEY,LSN)` space, which is composed of `(KeyRange, LsnRange)` rectangles.
270 : /// We describe these rectangles through the `PersistentLayerDesc` struct.
271 : ///
272 : /// When we want to reconstruct a page, we first find the `PersistentLayerDesc`'s that we need for page reconstruction,
273 : /// using `LayerMap`. Then, we use `LayerFileManager` to get the `PersistentLayer`'s that correspond to the
274 : /// `PersistentLayerDesc`'s.
275 : ///
276 : /// Hence, it's important to keep things coherent. The `LayerFileManager` must always have an entry for all
277 : /// `PersistentLayerDesc`'s in the `LayerMap`. If it doesn't, `LayerFileManager::get_from_desc` will panic at
278 : /// runtime, e.g., during page reconstruction.
279 : ///
280 : /// In the future, we'll be able to split up the tuple of LayerMap and `LayerFileManager`,
281 : /// so that e.g. on-demand-download/eviction, and layer spreading, can operate just on `LayerFileManager`.
282 : pub(crate) layers: tokio::sync::RwLock<LayerManager>,
283 :
284 : last_freeze_at: AtomicLsn,
285 : // Atomic would be more appropriate here.
286 : last_freeze_ts: RwLock<Instant>,
287 :
288 : pub(crate) standby_horizon: AtomicLsn,
289 :
290 : // WAL redo manager. `None` only for broken tenants.
291 : walredo_mgr: Option<Arc<super::WalRedoManager>>,
292 :
293 : /// Remote storage client.
294 : /// See [`remote_timeline_client`](super::remote_timeline_client) module comment for details.
295 : pub(crate) remote_client: Arc<RemoteTimelineClient>,
296 :
297 : // What page versions do we hold in the repository? If we get a
298 : // request > last_record_lsn, we need to wait until we receive all
299 : // the WAL up to the request. The SeqWait provides functions for
300 : // that. TODO: If we get a request for an old LSN, such that the
301 : // versions have already been garbage collected away, we should
302 : // throw an error, but we don't track that currently.
303 : //
304 : // last_record_lsn.load().last points to the end of last processed WAL record.
305 : //
306 : // We also remember the starting point of the previous record in
307 : // 'last_record_lsn.load().prev'. It's used to set the xl_prev pointer of the
308 : // first WAL record when the node is started up. But here, we just
309 : // keep track of it.
310 : last_record_lsn: SeqWait<RecordLsn, Lsn>,
311 :
312 : // All WAL records have been processed and stored durably on files on
313 : // local disk, up to this LSN. On crash and restart, we need to re-process
314 : // the WAL starting from this point.
315 : //
316 : // Some later WAL records might have been processed and also flushed to disk
317 : // already, so don't be surprised to see some, but there's no guarantee on
318 : // them yet.
319 : disk_consistent_lsn: AtomicLsn,
320 :
321 : // Parent timeline that this timeline was branched from, and the LSN
322 : // of the branch point.
323 : ancestor_timeline: Option<Arc<Timeline>>,
324 : ancestor_lsn: Lsn,
325 :
326 : pub(super) metrics: TimelineMetrics,
327 :
328 : // `Timeline` doesn't write these metrics itself, but it manages the lifetime. Code
329 : // in `crate::page_service` writes these metrics.
330 : pub(crate) query_metrics: crate::metrics::SmgrQueryTimePerTimeline,
331 :
332 : directory_metrics_inited: [AtomicBool; DirectoryKind::KINDS_NUM],
333 : directory_metrics: [AtomicU64; DirectoryKind::KINDS_NUM],
334 :
335 : /// Ensures layers aren't frozen by checkpointer between
336 : /// [`Timeline::get_layer_for_write`] and layer reads.
337 : /// Locked automatically by [`TimelineWriter`] and checkpointer.
338 : /// Must always be acquired before the layer map/individual layer lock
339 : /// to avoid deadlock.
340 : ///
341 : /// The state is cleared upon freezing.
342 : write_lock: tokio::sync::Mutex<Option<TimelineWriterState>>,
343 :
344 : /// Used to avoid multiple `flush_loop` tasks running
345 : pub(super) flush_loop_state: Mutex<FlushLoopState>,
346 :
347 : /// layer_flush_start_tx can be used to wake up the layer-flushing task.
348 : /// - The u64 value is a counter, incremented every time a new flush cycle is requested.
349 : /// The flush cycle counter is sent back on the layer_flush_done channel when
350 : /// the flush finishes. You can use that to wait for the flush to finish.
351 : /// - The LSN is updated to max() of its current value and the latest disk_consistent_lsn
352 : /// read by whoever sends an update
353 : layer_flush_start_tx: tokio::sync::watch::Sender<(u64, Lsn)>,
354 : /// to be notified when layer flushing has finished, subscribe to the layer_flush_done channel
355 : layer_flush_done_tx: tokio::sync::watch::Sender<(u64, Result<(), FlushLayerError>)>,
356 :
357 : // The LSN at which we have executed GC: whereas [`Self::gc_info`] records the LSN at which
358 : // we _intend_ to GC (i.e. the PITR cutoff), this LSN records where we actually last did it.
359 : // Because PITR interval is mutable, it's possible for this LSN to be earlier or later than
360 : // the planned GC cutoff.
361 : pub applied_gc_cutoff_lsn: Rcu<Lsn>,
362 :
363 : pub(crate) gc_compaction_layer_update_lock: tokio::sync::RwLock<()>,
364 :
365 : // List of child timelines and their branch points. This is needed to avoid
366 : // garbage collecting data that is still needed by the child timelines.
367 : pub(crate) gc_info: std::sync::RwLock<GcInfo>,
368 :
369 : pub(crate) last_image_layer_creation_status: ArcSwap<LastImageLayerCreationStatus>,
370 :
371 : // It may change across major versions so for simplicity
372 : // keep it after running initdb for a timeline.
373 : // It is needed in checks when we want to error on some operations
374 : // when they are requested for pre-initdb lsn.
375 : // It can be unified with latest_gc_cutoff_lsn under some "first_valid_lsn",
376 : // though let's keep them both for better error visibility.
377 : pub initdb_lsn: Lsn,
378 :
379 : /// The repartitioning result. Allows a single writer and multiple readers.
380 : pub(crate) partitioning: GuardArcSwap<((KeyPartitioning, SparseKeyPartitioning), Lsn)>,
381 :
382 : /// Configuration: how often should the partitioning be recalculated.
383 : repartition_threshold: u64,
384 :
385 : last_image_layer_creation_check_at: AtomicLsn,
386 : last_image_layer_creation_check_instant: std::sync::Mutex<Option<Instant>>,
387 :
388 : /// Current logical size of the "datadir", at the last LSN.
389 : current_logical_size: LogicalSize,
390 :
391 : /// Information about the last processed message by the WAL receiver,
392 : /// or None if WAL receiver has not received anything for this timeline
393 : /// yet.
394 : pub last_received_wal: Mutex<Option<WalReceiverInfo>>,
395 : pub walreceiver: Mutex<Option<WalReceiver>>,
396 :
397 : /// Relation size cache
398 : pub(crate) rel_size_cache: RwLock<RelSizeCache>,
399 :
400 : download_all_remote_layers_task_info: RwLock<Option<DownloadRemoteLayersTaskInfo>>,
401 :
402 : state: watch::Sender<TimelineState>,
403 :
404 : /// Prevent two tasks from deleting the timeline at the same time. If held, the
405 : /// timeline is being deleted. If 'true', the timeline has already been deleted.
406 : pub delete_progress: TimelineDeleteProgress,
407 :
408 : eviction_task_timeline_state: tokio::sync::Mutex<EvictionTaskTimelineState>,
409 :
410 : /// Load or creation time information about the disk_consistent_lsn and when the loading
411 : /// happened. Used for consumption metrics.
412 : pub(crate) loaded_at: (Lsn, SystemTime),
413 :
414 : /// Gate to prevent shutdown completing while I/O is still happening to this timeline's data
415 : pub(crate) gate: Gate,
416 :
417 : /// Cancellation token scoped to this timeline: anything doing long-running work relating
418 : /// to the timeline should drop out when this token fires.
419 : pub(crate) cancel: CancellationToken,
420 :
421 : /// Make sure we only have one running compaction at a time in tests.
422 : ///
423 : /// Must only be taken in two places:
424 : /// - [`Timeline::compact`] (this file)
425 : /// - [`delete::delete_local_timeline_directory`]
426 : ///
427 : /// Timeline deletion will acquire both compaction and gc locks in whatever order.
428 : compaction_lock: tokio::sync::Mutex<()>,
429 :
430 : /// If true, the last compaction failed.
431 : compaction_failed: AtomicBool,
432 :
433 : /// Notifies the tenant compaction loop that there is pending L0 compaction work.
434 : l0_compaction_trigger: Arc<Notify>,
435 :
436 : /// Make sure we only have one running gc at a time.
437 : ///
438 : /// Must only be taken in two places:
439 : /// - [`Timeline::gc`] (this file)
440 : /// - [`delete::delete_local_timeline_directory`]
441 : ///
442 : /// Timeline deletion will acquire both compaction and gc locks in whatever order.
443 : gc_lock: tokio::sync::Mutex<()>,
444 :
445 : /// Cloned from [`super::Tenant::pagestream_throttle`] on construction.
446 : pub(crate) pagestream_throttle: Arc<crate::tenant::throttle::Throttle>,
447 :
448 : /// Size estimator for aux file v2
449 : pub(crate) aux_file_size_estimator: AuxFileSizeEstimator,
450 :
451 : /// Some test cases directly place keys into the timeline without actually modifying the directory
452 : /// keys (i.e., DB_DIR). The test cases creating such keys will put the keyspaces here, so that
453 : /// these keys won't get garbage-collected during compaction/GC. This field only modifies the dense
454 : /// keyspace return value of `collect_keyspace`. For sparse keyspaces, use AUX keys for testing, and
455 : /// in the future, add `extra_test_sparse_keyspace` if necessary.
456 : #[cfg(test)]
457 : pub(crate) extra_test_dense_keyspace: ArcSwap<KeySpace>,
458 :
459 : pub(crate) l0_flush_global_state: L0FlushGlobalState,
460 :
461 : pub(crate) handles: handle::PerTimelineState<TenantManagerTypes>,
462 :
463 : pub(crate) attach_wal_lag_cooldown: Arc<OnceLock<WalLagCooldown>>,
464 :
465 : /// Cf. [`crate::tenant::CreateTimelineIdempotency`].
466 : pub(crate) create_idempotency: crate::tenant::CreateTimelineIdempotency,
467 :
468 : /// If Some, collects GetPage metadata for an ongoing PageTrace.
469 : pub(crate) page_trace: ArcSwapOption<Sender<PageTraceEvent>>,
470 :
471 : previous_heatmap: ArcSwapOption<PreviousHeatmap>,
472 :
473 : /// May host a background Tokio task which downloads all the layers from the current
474 : /// heatmap on demand.
475 : heatmap_layers_downloader: Mutex<Option<heatmap_layers_downloader::HeatmapLayersDownloader>>,
476 : }
477 :
478 : pub(crate) enum PreviousHeatmap {
479 : Active {
480 : heatmap: HeatMapTimeline,
481 : read_at: std::time::Instant,
482 : },
483 : Obsolete,
484 : }
485 :
486 : pub type TimelineDeleteProgress = Arc<tokio::sync::Mutex<DeleteTimelineFlow>>;
487 :
488 : pub struct WalReceiverInfo {
489 : pub wal_source_connconf: PgConnectionConfig,
490 : pub last_received_msg_lsn: Lsn,
491 : pub last_received_msg_ts: u128,
492 : }
493 :
494 : /// Information about how much history needs to be retained, needed by
495 : /// Garbage Collection.
496 : #[derive(Default)]
497 : pub(crate) struct GcInfo {
498 : /// Specific LSNs that are needed.
499 : ///
500 : /// Currently, this includes all points where child branches have
501 : /// been forked off from. In the future, could also include
502 : /// explicit user-defined snapshot points.
503 : pub(crate) retain_lsns: Vec<(Lsn, TimelineId, MaybeOffloaded)>,
504 :
505 : /// The cutoff coordinates, which are combined by selecting the minimum.
506 : pub(crate) cutoffs: GcCutoffs,
507 :
508 : /// Leases granted to particular LSNs.
509 : pub(crate) leases: BTreeMap<Lsn, LsnLease>,
510 :
511 : /// Whether our branch point is within our ancestor's PITR interval (for cost estimation)
512 : pub(crate) within_ancestor_pitr: bool,
513 : }
514 :
515 : impl GcInfo {
516 600 : pub(crate) fn min_cutoff(&self) -> Lsn {
517 600 : self.cutoffs.select_min()
518 600 : }
519 :
520 464 : pub(super) fn insert_child(
521 464 : &mut self,
522 464 : child_id: TimelineId,
523 464 : child_lsn: Lsn,
524 464 : is_offloaded: MaybeOffloaded,
525 464 : ) {
526 464 : self.retain_lsns.push((child_lsn, child_id, is_offloaded));
527 464 : self.retain_lsns.sort_by_key(|i| i.0);
528 464 : }
529 :
530 8 : pub(super) fn remove_child_maybe_offloaded(
531 8 : &mut self,
532 8 : child_id: TimelineId,
533 8 : maybe_offloaded: MaybeOffloaded,
534 8 : ) -> bool {
535 8 : // Remove at most one element. Needed for correctness if there is two live `Timeline` objects referencing
536 8 : // the same timeline. Shouldn't but maybe can occur when Arc's live longer than intended.
537 8 : let mut removed = false;
538 12 : self.retain_lsns.retain(|i| {
539 12 : if removed {
540 4 : return true;
541 8 : }
542 8 : let remove = i.1 == child_id && i.2 == maybe_offloaded;
543 8 : removed |= remove;
544 8 : !remove
545 12 : });
546 8 : removed
547 8 : }
548 :
549 8 : pub(super) fn remove_child_not_offloaded(&mut self, child_id: TimelineId) -> bool {
550 8 : self.remove_child_maybe_offloaded(child_id, MaybeOffloaded::No)
551 8 : }
552 :
553 0 : pub(super) fn remove_child_offloaded(&mut self, child_id: TimelineId) -> bool {
554 0 : self.remove_child_maybe_offloaded(child_id, MaybeOffloaded::Yes)
555 0 : }
556 464 : pub(crate) fn lsn_covered_by_lease(&self, lsn: Lsn) -> bool {
557 464 : self.leases.contains_key(&lsn)
558 464 : }
559 : }
560 :
561 : /// The `GcInfo` component describing which Lsns need to be retained. Functionally, this
562 : /// is a single number (the oldest LSN which we must retain), but it internally distinguishes
563 : /// between time-based and space-based retention for observability and consumption metrics purposes.
564 : #[derive(Debug, Clone)]
565 : pub(crate) struct GcCutoffs {
566 : /// Calculated from the [`TenantConf::gc_horizon`], this LSN indicates how much
567 : /// history we must keep to retain a specified number of bytes of WAL.
568 : pub(crate) space: Lsn,
569 :
570 : /// Calculated from [`TenantConf::pitr_interval`], this LSN indicates how much
571 : /// history we must keep to enable reading back at least the PITR interval duration.
572 : pub(crate) time: Lsn,
573 : }
574 :
575 : impl Default for GcCutoffs {
576 896 : fn default() -> Self {
577 896 : Self {
578 896 : space: Lsn::INVALID,
579 896 : time: Lsn::INVALID,
580 896 : }
581 896 : }
582 : }
583 :
584 : impl GcCutoffs {
585 600 : fn select_min(&self) -> Lsn {
586 600 : std::cmp::min(self.space, self.time)
587 600 : }
588 : }
589 :
590 : pub(crate) struct TimelineVisitOutcome {
591 : completed_keyspace: KeySpace,
592 : image_covered_keyspace: KeySpace,
593 : }
594 :
595 : /// An error happened in a get() operation.
596 : #[derive(thiserror::Error, Debug)]
597 : pub(crate) enum PageReconstructError {
598 : #[error(transparent)]
599 : Other(anyhow::Error),
600 :
601 : #[error("Ancestor LSN wait error: {0}")]
602 : AncestorLsnTimeout(WaitLsnError),
603 :
604 : #[error("timeline shutting down")]
605 : Cancelled,
606 :
607 : /// An error happened replaying WAL records
608 : #[error(transparent)]
609 : WalRedo(anyhow::Error),
610 :
611 : #[error("{0}")]
612 : MissingKey(MissingKeyError),
613 : }
614 :
615 : impl From<anyhow::Error> for PageReconstructError {
616 0 : fn from(value: anyhow::Error) -> Self {
617 0 : // with walingest.rs many PageReconstructError are wrapped in as anyhow::Error
618 0 : match value.downcast::<PageReconstructError>() {
619 0 : Ok(pre) => pre,
620 0 : Err(other) => PageReconstructError::Other(other),
621 : }
622 0 : }
623 : }
624 :
625 : impl From<utils::bin_ser::DeserializeError> for PageReconstructError {
626 0 : fn from(value: utils::bin_ser::DeserializeError) -> Self {
627 0 : PageReconstructError::Other(anyhow::Error::new(value).context("deserialization failure"))
628 0 : }
629 : }
630 :
631 : impl From<layer_manager::Shutdown> for PageReconstructError {
632 0 : fn from(_: layer_manager::Shutdown) -> Self {
633 0 : PageReconstructError::Cancelled
634 0 : }
635 : }
636 :
637 : impl GetVectoredError {
638 : #[cfg(test)]
639 12 : pub(crate) fn is_missing_key_error(&self) -> bool {
640 12 : matches!(self, Self::MissingKey(_))
641 12 : }
642 : }
643 :
644 : impl From<layer_manager::Shutdown> for GetVectoredError {
645 0 : fn from(_: layer_manager::Shutdown) -> Self {
646 0 : GetVectoredError::Cancelled
647 0 : }
648 : }
649 :
650 : /// A layer identifier when used in the [`ReadPath`] structure. This enum is for observability purposes
651 : /// only and not used by the "real read path".
652 : pub enum ReadPathLayerId {
653 : PersistentLayer(PersistentLayerKey),
654 : InMemoryLayer(Range<Lsn>),
655 : }
656 :
657 : impl std::fmt::Display for ReadPathLayerId {
658 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
659 0 : match self {
660 0 : ReadPathLayerId::PersistentLayer(key) => write!(f, "{}", key),
661 0 : ReadPathLayerId::InMemoryLayer(range) => {
662 0 : write!(f, "in-mem {}..{}", range.start, range.end)
663 : }
664 : }
665 0 : }
666 : }
667 : pub struct ReadPath {
668 : keyspace: KeySpace,
669 : lsn: Lsn,
670 : path: Vec<(ReadPathLayerId, KeySpace, Range<Lsn>)>,
671 : }
672 :
673 : impl ReadPath {
674 1255315 : pub fn new(keyspace: KeySpace, lsn: Lsn) -> Self {
675 1255315 : Self {
676 1255315 : keyspace,
677 1255315 : lsn,
678 1255315 : path: Vec::new(),
679 1255315 : }
680 1255315 : }
681 :
682 1692763 : pub fn record_layer_visit(
683 1692763 : &mut self,
684 1692763 : layer_to_read: &ReadableLayer,
685 1692763 : keyspace_to_read: &KeySpace,
686 1692763 : lsn_range: &Range<Lsn>,
687 1692763 : ) {
688 1692763 : let id = match layer_to_read {
689 479416 : ReadableLayer::PersistentLayer(layer) => {
690 479416 : ReadPathLayerId::PersistentLayer(layer.layer_desc().key())
691 : }
692 1213347 : ReadableLayer::InMemoryLayer(layer) => {
693 1213347 : ReadPathLayerId::InMemoryLayer(layer.get_lsn_range())
694 : }
695 : };
696 1692763 : self.path
697 1692763 : .push((id, keyspace_to_read.clone(), lsn_range.clone()));
698 1692763 : }
699 : }
700 :
701 : impl std::fmt::Display for ReadPath {
702 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
703 0 : writeln!(f, "Read path for {} at lsn {}:", self.keyspace, self.lsn)?;
704 0 : for (idx, (layer_id, keyspace, lsn_range)) in self.path.iter().enumerate() {
705 0 : writeln!(
706 0 : f,
707 0 : "{}: {} {}..{} {}",
708 0 : idx, layer_id, lsn_range.start, lsn_range.end, keyspace
709 0 : )?;
710 : }
711 0 : Ok(())
712 0 : }
713 : }
714 :
715 : #[derive(thiserror::Error)]
716 : pub struct MissingKeyError {
717 : key: Key,
718 : shard: ShardNumber,
719 : cont_lsn: Lsn,
720 : request_lsn: Lsn,
721 : ancestor_lsn: Option<Lsn>,
722 : /// Debug information about the read path if there's an error
723 : read_path: Option<ReadPath>,
724 : backtrace: Option<std::backtrace::Backtrace>,
725 : }
726 :
727 : impl std::fmt::Debug for MissingKeyError {
728 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
729 0 : write!(f, "{}", self)
730 0 : }
731 : }
732 :
733 : impl std::fmt::Display for MissingKeyError {
734 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
735 0 : write!(
736 0 : f,
737 0 : "could not find data for key {} (shard {:?}) at LSN {}, request LSN {}",
738 0 : self.key, self.shard, self.cont_lsn, self.request_lsn
739 0 : )?;
740 :
741 0 : if let Some(ref ancestor_lsn) = self.ancestor_lsn {
742 0 : write!(f, ", ancestor {}", ancestor_lsn)?;
743 0 : }
744 :
745 0 : if let Some(ref read_path) = self.read_path {
746 0 : write!(f, "\n{}", read_path)?;
747 0 : }
748 :
749 0 : if let Some(ref backtrace) = self.backtrace {
750 0 : write!(f, "\n{}", backtrace)?;
751 0 : }
752 :
753 0 : Ok(())
754 0 : }
755 : }
756 :
757 : impl PageReconstructError {
758 : /// Returns true if this error indicates a tenant/timeline shutdown alike situation
759 0 : pub(crate) fn is_stopping(&self) -> bool {
760 : use PageReconstructError::*;
761 0 : match self {
762 0 : Cancelled => true,
763 0 : Other(_) | AncestorLsnTimeout(_) | WalRedo(_) | MissingKey(_) => false,
764 : }
765 0 : }
766 : }
767 :
768 : #[derive(thiserror::Error, Debug)]
769 : pub(crate) enum CreateImageLayersError {
770 : #[error("timeline shutting down")]
771 : Cancelled,
772 :
773 : #[error("read failed")]
774 : GetVectoredError(#[source] GetVectoredError),
775 :
776 : #[error("reconstruction failed")]
777 : PageReconstructError(#[source] PageReconstructError),
778 :
779 : #[error(transparent)]
780 : Other(#[from] anyhow::Error),
781 : }
782 :
783 : impl From<layer_manager::Shutdown> for CreateImageLayersError {
784 0 : fn from(_: layer_manager::Shutdown) -> Self {
785 0 : CreateImageLayersError::Cancelled
786 0 : }
787 : }
788 :
789 : #[derive(thiserror::Error, Debug, Clone)]
790 : pub(crate) enum FlushLayerError {
791 : /// Timeline cancellation token was cancelled
792 : #[error("timeline shutting down")]
793 : Cancelled,
794 :
795 : /// We tried to flush a layer while the Timeline is in an unexpected state
796 : #[error("cannot flush frozen layers when flush_loop is not running, state is {0:?}")]
797 : NotRunning(FlushLoopState),
798 :
799 : // Arc<> the following non-clonable error types: we must be Clone-able because the flush error is propagated from the flush
800 : // loop via a watch channel, where we can only borrow it.
801 : #[error("create image layers (shared)")]
802 : CreateImageLayersError(Arc<CreateImageLayersError>),
803 :
804 : #[error("other (shared)")]
805 : Other(#[from] Arc<anyhow::Error>),
806 : }
807 :
808 : impl FlushLayerError {
809 : // When crossing from generic anyhow errors to this error type, we explicitly check
810 : // for timeline cancellation to avoid logging inoffensive shutdown errors as warn/err.
811 0 : fn from_anyhow(timeline: &Timeline, err: anyhow::Error) -> Self {
812 0 : let cancelled = timeline.cancel.is_cancelled()
813 : // The upload queue might have been shut down before the official cancellation of the timeline.
814 0 : || err
815 0 : .downcast_ref::<NotInitialized>()
816 0 : .map(NotInitialized::is_stopping)
817 0 : .unwrap_or_default();
818 0 : if cancelled {
819 0 : Self::Cancelled
820 : } else {
821 0 : Self::Other(Arc::new(err))
822 : }
823 0 : }
824 : }
825 :
826 : impl From<layer_manager::Shutdown> for FlushLayerError {
827 0 : fn from(_: layer_manager::Shutdown) -> Self {
828 0 : FlushLayerError::Cancelled
829 0 : }
830 : }
831 :
832 : #[derive(thiserror::Error, Debug)]
833 : pub(crate) enum GetVectoredError {
834 : #[error("timeline shutting down")]
835 : Cancelled,
836 :
837 : #[error("requested too many keys: {0} > {}", Timeline::MAX_GET_VECTORED_KEYS)]
838 : Oversized(u64),
839 :
840 : #[error("requested at invalid LSN: {0}")]
841 : InvalidLsn(Lsn),
842 :
843 : #[error("requested key not found: {0}")]
844 : MissingKey(MissingKeyError),
845 :
846 : #[error("ancestry walk")]
847 : GetReadyAncestorError(#[source] GetReadyAncestorError),
848 :
849 : #[error(transparent)]
850 : Other(#[from] anyhow::Error),
851 : }
852 :
853 : impl From<GetReadyAncestorError> for GetVectoredError {
854 4 : fn from(value: GetReadyAncestorError) -> Self {
855 : use GetReadyAncestorError::*;
856 4 : match value {
857 0 : Cancelled => GetVectoredError::Cancelled,
858 : AncestorLsnTimeout(_) | BadState { .. } => {
859 4 : GetVectoredError::GetReadyAncestorError(value)
860 : }
861 : }
862 4 : }
863 : }
864 :
865 : #[derive(thiserror::Error, Debug)]
866 : pub(crate) enum GetReadyAncestorError {
867 : #[error("ancestor LSN wait error")]
868 : AncestorLsnTimeout(#[from] WaitLsnError),
869 :
870 : #[error("bad state on timeline {timeline_id}: {state:?}")]
871 : BadState {
872 : timeline_id: TimelineId,
873 : state: TimelineState,
874 : },
875 :
876 : #[error("cancelled")]
877 : Cancelled,
878 : }
879 :
880 : #[derive(Clone, Copy)]
881 : pub enum LogicalSizeCalculationCause {
882 : Initial,
883 : ConsumptionMetricsSyntheticSize,
884 : EvictionTaskImitation,
885 : TenantSizeHandler,
886 : }
887 :
888 : pub enum GetLogicalSizePriority {
889 : User,
890 : Background,
891 : }
892 :
893 0 : #[derive(Debug, enumset::EnumSetType)]
894 : pub(crate) enum CompactFlags {
895 : ForceRepartition,
896 : ForceImageLayerCreation,
897 : ForceL0Compaction,
898 : OnlyL0Compaction,
899 : EnhancedGcBottomMostCompaction,
900 : DryRun,
901 : /// Disables compaction yielding e.g. due to high L0 count. This is set e.g. when requesting
902 : /// compaction via HTTP API.
903 : NoYield,
904 : }
905 :
906 : #[serde_with::serde_as]
907 0 : #[derive(Debug, Clone, serde::Deserialize)]
908 : pub(crate) struct CompactRequest {
909 : pub compact_key_range: Option<CompactKeyRange>,
910 : pub compact_lsn_range: Option<CompactLsnRange>,
911 : /// Whether the compaction job should be scheduled.
912 : #[serde(default)]
913 : pub scheduled: bool,
914 : /// Whether the compaction job should be split across key ranges.
915 : #[serde(default)]
916 : pub sub_compaction: bool,
917 : /// Max job size for each subcompaction job.
918 : pub sub_compaction_max_job_size_mb: Option<u64>,
919 : }
920 :
921 : #[derive(Debug, Clone, Default)]
922 : pub(crate) struct CompactOptions {
923 : pub flags: EnumSet<CompactFlags>,
924 : /// If set, the compaction will only compact the key range specified by this option.
925 : /// This option is only used by GC compaction. For the full explanation, see [`compaction::GcCompactJob`].
926 : pub compact_key_range: Option<CompactKeyRange>,
927 : /// If set, the compaction will only compact the LSN within this value.
928 : /// This option is only used by GC compaction. For the full explanation, see [`compaction::GcCompactJob`].
929 : pub compact_lsn_range: Option<CompactLsnRange>,
930 : /// Enable sub-compaction (split compaction job across key ranges).
931 : /// This option is only used by GC compaction.
932 : pub sub_compaction: bool,
933 : /// Set job size for the GC compaction.
934 : /// This option is only used by GC compaction.
935 : pub sub_compaction_max_job_size_mb: Option<u64>,
936 : }
937 :
938 : impl std::fmt::Debug for Timeline {
939 0 : fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
940 0 : write!(f, "Timeline<{}>", self.timeline_id)
941 0 : }
942 : }
943 :
944 : #[derive(thiserror::Error, Debug)]
945 : pub(crate) enum WaitLsnError {
946 : // Called on a timeline which is shutting down
947 : #[error("Shutdown")]
948 : Shutdown,
949 :
950 : // Called on an timeline not in active state or shutting down
951 : #[error("Bad timeline state: {0:?}")]
952 : BadState(TimelineState),
953 :
954 : // Timeout expired while waiting for LSN to catch up with goal.
955 : #[error("{0}")]
956 : Timeout(String),
957 : }
958 :
959 : // The impls below achieve cancellation mapping for errors.
960 : // Perhaps there's a way of achieving this with less cruft.
961 :
962 : impl From<CreateImageLayersError> for CompactionError {
963 0 : fn from(e: CreateImageLayersError) -> Self {
964 0 : match e {
965 0 : CreateImageLayersError::Cancelled => CompactionError::ShuttingDown,
966 0 : CreateImageLayersError::Other(e) => {
967 0 : CompactionError::Other(e.context("create image layers"))
968 : }
969 0 : _ => CompactionError::Other(e.into()),
970 : }
971 0 : }
972 : }
973 :
974 : impl From<CreateImageLayersError> for FlushLayerError {
975 0 : fn from(e: CreateImageLayersError) -> Self {
976 0 : match e {
977 0 : CreateImageLayersError::Cancelled => FlushLayerError::Cancelled,
978 0 : any => FlushLayerError::CreateImageLayersError(Arc::new(any)),
979 : }
980 0 : }
981 : }
982 :
983 : impl From<PageReconstructError> for CreateImageLayersError {
984 0 : fn from(e: PageReconstructError) -> Self {
985 0 : match e {
986 0 : PageReconstructError::Cancelled => CreateImageLayersError::Cancelled,
987 0 : _ => CreateImageLayersError::PageReconstructError(e),
988 : }
989 0 : }
990 : }
991 :
992 : impl From<GetVectoredError> for CreateImageLayersError {
993 0 : fn from(e: GetVectoredError) -> Self {
994 0 : match e {
995 0 : GetVectoredError::Cancelled => CreateImageLayersError::Cancelled,
996 0 : _ => CreateImageLayersError::GetVectoredError(e),
997 : }
998 0 : }
999 : }
1000 :
1001 : impl From<GetVectoredError> for PageReconstructError {
1002 12 : fn from(e: GetVectoredError) -> Self {
1003 12 : match e {
1004 0 : GetVectoredError::Cancelled => PageReconstructError::Cancelled,
1005 0 : GetVectoredError::InvalidLsn(_) => PageReconstructError::Other(anyhow!("Invalid LSN")),
1006 0 : err @ GetVectoredError::Oversized(_) => PageReconstructError::Other(err.into()),
1007 8 : GetVectoredError::MissingKey(err) => PageReconstructError::MissingKey(err),
1008 4 : GetVectoredError::GetReadyAncestorError(err) => PageReconstructError::from(err),
1009 0 : GetVectoredError::Other(err) => PageReconstructError::Other(err),
1010 : }
1011 12 : }
1012 : }
1013 :
1014 : impl From<GetReadyAncestorError> for PageReconstructError {
1015 4 : fn from(e: GetReadyAncestorError) -> Self {
1016 : use GetReadyAncestorError::*;
1017 4 : match e {
1018 0 : AncestorLsnTimeout(wait_err) => PageReconstructError::AncestorLsnTimeout(wait_err),
1019 4 : bad_state @ BadState { .. } => PageReconstructError::Other(anyhow::anyhow!(bad_state)),
1020 0 : Cancelled => PageReconstructError::Cancelled,
1021 : }
1022 4 : }
1023 : }
1024 :
1025 : pub(crate) enum WaitLsnTimeout {
1026 : Custom(Duration),
1027 : // Use the [`PageServerConf::wait_lsn_timeout`] default
1028 : Default,
1029 : }
1030 :
1031 : pub(crate) enum WaitLsnWaiter<'a> {
1032 : Timeline(&'a Timeline),
1033 : Tenant,
1034 : PageService,
1035 : HttpEndpoint,
1036 : }
1037 :
1038 : /// Argument to [`Timeline::shutdown`].
1039 : #[derive(Debug, Clone, Copy)]
1040 : pub(crate) enum ShutdownMode {
1041 : /// Graceful shutdown, may do a lot of I/O as we flush any open layers to disk and then
1042 : /// also to remote storage. This method can easily take multiple seconds for a busy timeline.
1043 : ///
1044 : /// While we are flushing, we continue to accept read I/O for LSNs ingested before
1045 : /// the call to [`Timeline::shutdown`].
1046 : FreezeAndFlush,
1047 : /// Only flush the layers to the remote storage without freezing any open layers. Flush the deletion
1048 : /// queue. This is the mode used by ancestor detach and any other operations that reloads a tenant
1049 : /// but not increasing the generation number. Note that this mode cannot be used at tenant shutdown,
1050 : /// as flushing the deletion queue at that time will cause shutdown-in-progress errors.
1051 : Reload,
1052 : /// Shut down immediately, without waiting for any open layers to flush.
1053 : Hard,
1054 : }
1055 :
1056 : enum ImageLayerCreationOutcome {
1057 : /// We generated an image layer
1058 : Generated {
1059 : unfinished_image_layer: ImageLayerWriter,
1060 : },
1061 : /// The key range is empty
1062 : Empty,
1063 : /// (Only used in metadata image layer creation), after reading the metadata keys, we decide to skip
1064 : /// the image layer creation.
1065 : Skip,
1066 : }
1067 :
1068 : /// Public interface functions
1069 : impl Timeline {
1070 : /// Get the LSN where this branch was created
1071 8 : pub(crate) fn get_ancestor_lsn(&self) -> Lsn {
1072 8 : self.ancestor_lsn
1073 8 : }
1074 :
1075 : /// Get the ancestor's timeline id
1076 24 : pub(crate) fn get_ancestor_timeline_id(&self) -> Option<TimelineId> {
1077 24 : self.ancestor_timeline
1078 24 : .as_ref()
1079 24 : .map(|ancestor| ancestor.timeline_id)
1080 24 : }
1081 :
1082 : /// Get the ancestor timeline
1083 4 : pub(crate) fn ancestor_timeline(&self) -> Option<&Arc<Timeline>> {
1084 4 : self.ancestor_timeline.as_ref()
1085 4 : }
1086 :
1087 : /// Get the bytes written since the PITR cutoff on this branch, and
1088 : /// whether this branch's ancestor_lsn is within its parent's PITR.
1089 0 : pub(crate) fn get_pitr_history_stats(&self) -> (u64, bool) {
1090 0 : let gc_info = self.gc_info.read().unwrap();
1091 0 : let history = self
1092 0 : .get_last_record_lsn()
1093 0 : .checked_sub(gc_info.cutoffs.time)
1094 0 : .unwrap_or(Lsn(0))
1095 0 : .0;
1096 0 : (history, gc_info.within_ancestor_pitr)
1097 0 : }
1098 :
1099 : /// Read timeline's GC cutoff: this is the LSN at which GC has started to happen
1100 1708419 : pub(crate) fn get_applied_gc_cutoff_lsn(&self) -> RcuReadGuard<Lsn> {
1101 1708419 : self.applied_gc_cutoff_lsn.read()
1102 1708419 : }
1103 :
1104 : /// Read timeline's planned GC cutoff: this is the logical end of history that users
1105 : /// are allowed to read (based on configured PITR), even if physically we have more history.
1106 0 : pub(crate) fn get_gc_cutoff_lsn(&self) -> Lsn {
1107 0 : self.gc_info.read().unwrap().cutoffs.time
1108 0 : }
1109 :
1110 : /// Look up given page version.
1111 : ///
1112 : /// If a remote layer file is needed, it is downloaded as part of this
1113 : /// call.
1114 : ///
1115 : /// This method enforces [`Self::pagestream_throttle`] internally.
1116 : ///
1117 : /// NOTE: It is considered an error to 'get' a key that doesn't exist. The
1118 : /// abstraction above this needs to store suitable metadata to track what
1119 : /// data exists with what keys, in separate metadata entries. If a
1120 : /// non-existent key is requested, we may incorrectly return a value from
1121 : /// an ancestor branch, for example, or waste a lot of cycles chasing the
1122 : /// non-existing key.
1123 : ///
1124 : /// # Cancel-Safety
1125 : ///
1126 : /// This method is cancellation-safe.
1127 : #[inline(always)]
1128 1215199 : pub(crate) async fn get(
1129 1215199 : &self,
1130 1215199 : key: Key,
1131 1215199 : lsn: Lsn,
1132 1215199 : ctx: &RequestContext,
1133 1215199 : ) -> Result<Bytes, PageReconstructError> {
1134 1215199 : if !lsn.is_valid() {
1135 0 : return Err(PageReconstructError::Other(anyhow::anyhow!("Invalid LSN")));
1136 1215199 : }
1137 1215199 :
1138 1215199 : // This check is debug-only because of the cost of hashing, and because it's a double-check: we
1139 1215199 : // already checked the key against the shard_identity when looking up the Timeline from
1140 1215199 : // page_service.
1141 1215199 : debug_assert!(!self.shard_identity.is_key_disposable(&key));
1142 :
1143 1215199 : let keyspace = KeySpace {
1144 1215199 : ranges: vec![key..key.next()],
1145 1215199 : };
1146 1215199 :
1147 1215199 : let mut reconstruct_state = ValuesReconstructState::new(IoConcurrency::sequential());
1148 :
1149 1215199 : let vectored_res = self
1150 1215199 : .get_vectored_impl(keyspace.clone(), lsn, &mut reconstruct_state, ctx)
1151 1215199 : .await;
1152 :
1153 1215199 : let key_value = vectored_res?.pop_first();
1154 1215187 : match key_value {
1155 1215163 : Some((got_key, value)) => {
1156 1215163 : if got_key != key {
1157 0 : error!(
1158 0 : "Expected {}, but singular vectored get returned {}",
1159 : key, got_key
1160 : );
1161 0 : Err(PageReconstructError::Other(anyhow!(
1162 0 : "Singular vectored get returned wrong key"
1163 0 : )))
1164 : } else {
1165 1215163 : value
1166 : }
1167 : }
1168 24 : None => Err(PageReconstructError::MissingKey(MissingKeyError {
1169 24 : key,
1170 24 : shard: self.shard_identity.get_shard_number(&key),
1171 24 : cont_lsn: Lsn(0),
1172 24 : request_lsn: lsn,
1173 24 : ancestor_lsn: None,
1174 24 : backtrace: None,
1175 24 : read_path: None,
1176 24 : })),
1177 : }
1178 1215199 : }
1179 :
1180 : pub(crate) const MAX_GET_VECTORED_KEYS: u64 = 32;
1181 : pub(crate) const LAYERS_VISITED_WARN_THRESHOLD: u32 = 100;
1182 :
1183 : /// Look up multiple page versions at a given LSN
1184 : ///
1185 : /// This naive implementation will be replaced with a more efficient one
1186 : /// which actually vectorizes the read path.
1187 39380 : pub(crate) async fn get_vectored(
1188 39380 : &self,
1189 39380 : keyspace: KeySpace,
1190 39380 : lsn: Lsn,
1191 39380 : io_concurrency: super::storage_layer::IoConcurrency,
1192 39380 : ctx: &RequestContext,
1193 39380 : ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
1194 39380 : if !lsn.is_valid() {
1195 0 : return Err(GetVectoredError::InvalidLsn(lsn));
1196 39380 : }
1197 39380 :
1198 39380 : let key_count = keyspace.total_raw_size().try_into().unwrap();
1199 39380 : if key_count > Timeline::MAX_GET_VECTORED_KEYS {
1200 0 : return Err(GetVectoredError::Oversized(key_count));
1201 39380 : }
1202 :
1203 78760 : for range in &keyspace.ranges {
1204 39380 : let mut key = range.start;
1205 79232 : while key != range.end {
1206 39852 : assert!(!self.shard_identity.is_key_disposable(&key));
1207 39852 : key = key.next();
1208 : }
1209 : }
1210 :
1211 39380 : trace!(
1212 0 : "get vectored request for {:?}@{} from task kind {:?}",
1213 0 : keyspace,
1214 0 : lsn,
1215 0 : ctx.task_kind(),
1216 : );
1217 :
1218 39380 : let start = crate::metrics::GET_VECTORED_LATENCY
1219 39380 : .for_task_kind(ctx.task_kind())
1220 39380 : .map(|metric| (metric, Instant::now()));
1221 :
1222 39380 : let res = self
1223 39380 : .get_vectored_impl(
1224 39380 : keyspace.clone(),
1225 39380 : lsn,
1226 39380 : &mut ValuesReconstructState::new(io_concurrency),
1227 39380 : ctx,
1228 39380 : )
1229 39380 : .await;
1230 :
1231 39380 : if let Some((metric, start)) = start {
1232 0 : let elapsed = start.elapsed();
1233 0 : metric.observe(elapsed.as_secs_f64());
1234 39380 : }
1235 :
1236 39380 : res
1237 39380 : }
1238 :
1239 : /// Scan the keyspace and return all existing key-values in the keyspace. This currently uses vectored
1240 : /// get underlying. Normal vectored get would throw an error when a key in the keyspace is not found
1241 : /// during the search, but for the scan interface, it returns all existing key-value pairs, and does
1242 : /// not expect each single key in the key space will be found. The semantics is closer to the RocksDB
1243 : /// scan iterator interface. We could optimize this interface later to avoid some checks in the vectored
1244 : /// get path to maintain and split the probing and to-be-probe keyspace. We also need to ensure that
1245 : /// the scan operation will not cause OOM in the future.
1246 24 : pub(crate) async fn scan(
1247 24 : &self,
1248 24 : keyspace: KeySpace,
1249 24 : lsn: Lsn,
1250 24 : ctx: &RequestContext,
1251 24 : io_concurrency: super::storage_layer::IoConcurrency,
1252 24 : ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
1253 24 : if !lsn.is_valid() {
1254 0 : return Err(GetVectoredError::InvalidLsn(lsn));
1255 24 : }
1256 24 :
1257 24 : trace!(
1258 0 : "key-value scan request for {:?}@{} from task kind {:?}",
1259 0 : keyspace,
1260 0 : lsn,
1261 0 : ctx.task_kind()
1262 : );
1263 :
1264 : // We should generalize this into Keyspace::contains in the future.
1265 48 : for range in &keyspace.ranges {
1266 24 : if range.start.field1 < METADATA_KEY_BEGIN_PREFIX
1267 24 : || range.end.field1 > METADATA_KEY_END_PREFIX
1268 : {
1269 0 : return Err(GetVectoredError::Other(anyhow::anyhow!(
1270 0 : "only metadata keyspace can be scanned"
1271 0 : )));
1272 24 : }
1273 : }
1274 :
1275 24 : let start = crate::metrics::SCAN_LATENCY
1276 24 : .for_task_kind(ctx.task_kind())
1277 24 : .map(ScanLatencyOngoingRecording::start_recording);
1278 :
1279 24 : let vectored_res = self
1280 24 : .get_vectored_impl(
1281 24 : keyspace.clone(),
1282 24 : lsn,
1283 24 : &mut ValuesReconstructState::new(io_concurrency),
1284 24 : ctx,
1285 24 : )
1286 24 : .await;
1287 :
1288 24 : if let Some(recording) = start {
1289 0 : recording.observe();
1290 24 : }
1291 :
1292 24 : vectored_res
1293 24 : }
1294 :
1295 1255315 : pub(super) async fn get_vectored_impl(
1296 1255315 : &self,
1297 1255315 : keyspace: KeySpace,
1298 1255315 : lsn: Lsn,
1299 1255315 : reconstruct_state: &mut ValuesReconstructState,
1300 1255315 : ctx: &RequestContext,
1301 1255315 : ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
1302 1255315 : let read_path = if self.conf.enable_read_path_debugging || ctx.read_path_debug() {
1303 1255315 : Some(ReadPath::new(keyspace.clone(), lsn))
1304 : } else {
1305 0 : None
1306 : };
1307 1255315 : reconstruct_state.read_path = read_path;
1308 :
1309 1255315 : let traversal_res: Result<(), _> = self
1310 1255315 : .get_vectored_reconstruct_data(keyspace.clone(), lsn, reconstruct_state, ctx)
1311 1255315 : .await;
1312 1255315 : if let Err(err) = traversal_res {
1313 : // Wait for all the spawned IOs to complete.
1314 : // See comments on `spawn_io` inside `storage_layer` for more details.
1315 32 : let mut collect_futs = std::mem::take(&mut reconstruct_state.keys)
1316 32 : .into_values()
1317 32 : .map(|state| state.collect_pending_ios())
1318 32 : .collect::<FuturesUnordered<_>>();
1319 32 : while collect_futs.next().await.is_some() {}
1320 32 : return Err(err);
1321 1255283 : };
1322 1255283 :
1323 1255283 : let layers_visited = reconstruct_state.get_layers_visited();
1324 1255283 :
1325 1255283 : let futs = FuturesUnordered::new();
1326 1335951 : for (key, state) in std::mem::take(&mut reconstruct_state.keys) {
1327 1335951 : futs.push({
1328 1335951 : let walredo_self = self.myself.upgrade().expect("&self method holds the arc");
1329 1335951 : async move {
1330 1335951 : assert_eq!(state.situation, ValueReconstructSituation::Complete);
1331 :
1332 1335951 : let converted = match state.collect_pending_ios().await {
1333 1335951 : Ok(ok) => ok,
1334 0 : Err(err) => {
1335 0 : return (key, Err(err));
1336 : }
1337 : };
1338 1335951 : DELTAS_PER_READ_GLOBAL.observe(converted.num_deltas() as f64);
1339 1335951 :
1340 1335951 : // The walredo module expects the records to be descending in terms of Lsn.
1341 1335951 : // And we submit the IOs in that order, so, there shuold be no need to sort here.
1342 1335951 : debug_assert!(
1343 1335951 : converted
1344 1335951 : .records
1345 1335951 : .is_sorted_by_key(|(lsn, _)| std::cmp::Reverse(*lsn)),
1346 0 : "{converted:?}"
1347 : );
1348 :
1349 : (
1350 1335951 : key,
1351 1335951 : walredo_self.reconstruct_value(key, lsn, converted).await,
1352 : )
1353 1335951 : }
1354 1335951 : });
1355 1335951 : }
1356 :
1357 1255283 : let results = futs
1358 1255283 : .collect::<BTreeMap<Key, Result<Bytes, PageReconstructError>>>()
1359 1255283 : .await;
1360 :
1361 : // For aux file keys (v1 or v2) the vectored read path does not return an error
1362 : // when they're missing. Instead they are omitted from the resulting btree
1363 : // (this is a requirement, not a bug). Skip updating the metric in these cases
1364 : // to avoid infinite results.
1365 1255283 : if !results.is_empty() {
1366 : // Record the total number of layers visited towards each key in the batch. While some
1367 : // layers may not intersect with a given read, and the cost of layer visits are
1368 : // amortized across the batch, each visited layer contributes directly to the observed
1369 : // latency for every read in the batch, which is what we care about.
1370 1254799 : if layers_visited >= Self::LAYERS_VISITED_WARN_THRESHOLD {
1371 0 : static LOG_PACER: Lazy<Mutex<RateLimit>> =
1372 0 : Lazy::new(|| Mutex::new(RateLimit::new(Duration::from_secs(60))));
1373 0 : LOG_PACER.lock().unwrap().call(|| {
1374 0 : let num_keys = keyspace.total_raw_size();
1375 0 : let num_pages = results.len();
1376 0 : tracing::info!(
1377 0 : shard_id = %self.tenant_shard_id.shard_slug(),
1378 0 : lsn = %lsn,
1379 0 : "Vectored read for {keyspace} visited {layers_visited} layers. Returned {num_pages}/{num_keys} pages.",
1380 : );
1381 0 : });
1382 1254799 : }
1383 :
1384 2590750 : for _ in &results {
1385 1335951 : self.metrics.layers_per_read.observe(layers_visited as f64);
1386 1335951 : LAYERS_PER_READ_GLOBAL.observe(layers_visited as f64);
1387 1335951 : }
1388 484 : }
1389 :
1390 1255283 : Ok(results)
1391 1255315 : }
1392 :
1393 : /// Get last or prev record separately. Same as get_last_record_rlsn().last/prev.
1394 548876 : pub(crate) fn get_last_record_lsn(&self) -> Lsn {
1395 548876 : self.last_record_lsn.load().last
1396 548876 : }
1397 :
1398 0 : pub(crate) fn get_prev_record_lsn(&self) -> Lsn {
1399 0 : self.last_record_lsn.load().prev
1400 0 : }
1401 :
1402 : /// Atomically get both last and prev.
1403 456 : pub(crate) fn get_last_record_rlsn(&self) -> RecordLsn {
1404 456 : self.last_record_lsn.load()
1405 456 : }
1406 :
1407 : /// Subscribe to callers of wait_lsn(). The value of the channel is None if there are no
1408 : /// wait_lsn() calls in progress, and Some(Lsn) if there is an active waiter for wait_lsn().
1409 0 : pub(crate) fn subscribe_for_wait_lsn_updates(&self) -> watch::Receiver<Option<Lsn>> {
1410 0 : self.last_record_lsn.status_receiver()
1411 0 : }
1412 :
1413 888 : pub(crate) fn get_disk_consistent_lsn(&self) -> Lsn {
1414 888 : self.disk_consistent_lsn.load()
1415 888 : }
1416 :
1417 : /// remote_consistent_lsn from the perspective of the tenant's current generation,
1418 : /// not validated with control plane yet.
1419 : /// See [`Self::get_remote_consistent_lsn_visible`].
1420 0 : pub(crate) fn get_remote_consistent_lsn_projected(&self) -> Option<Lsn> {
1421 0 : self.remote_client.remote_consistent_lsn_projected()
1422 0 : }
1423 :
1424 : /// remote_consistent_lsn which the tenant is guaranteed not to go backward from,
1425 : /// i.e. a value of remote_consistent_lsn_projected which has undergone
1426 : /// generation validation in the deletion queue.
1427 0 : pub(crate) fn get_remote_consistent_lsn_visible(&self) -> Option<Lsn> {
1428 0 : self.remote_client.remote_consistent_lsn_visible()
1429 0 : }
1430 :
1431 : /// The sum of the file size of all historic layers in the layer map.
1432 : /// This method makes no distinction between local and remote layers.
1433 : /// Hence, the result **does not represent local filesystem usage**.
1434 0 : pub(crate) async fn layer_size_sum(&self) -> u64 {
1435 0 : let guard = self.layers.read().await;
1436 0 : guard.layer_size_sum()
1437 0 : }
1438 :
1439 0 : pub(crate) fn resident_physical_size(&self) -> u64 {
1440 0 : self.metrics.resident_physical_size_get()
1441 0 : }
1442 :
1443 0 : pub(crate) fn get_directory_metrics(&self) -> [u64; DirectoryKind::KINDS_NUM] {
1444 0 : array::from_fn(|idx| self.directory_metrics[idx].load(AtomicOrdering::Relaxed))
1445 0 : }
1446 :
1447 : ///
1448 : /// Wait until WAL has been received and processed up to this LSN.
1449 : ///
1450 : /// You should call this before any of the other get_* or list_* functions. Calling
1451 : /// those functions with an LSN that has been processed yet is an error.
1452 : ///
1453 452500 : pub(crate) async fn wait_lsn(
1454 452500 : &self,
1455 452500 : lsn: Lsn,
1456 452500 : who_is_waiting: WaitLsnWaiter<'_>,
1457 452500 : timeout: WaitLsnTimeout,
1458 452500 : ctx: &RequestContext, /* Prepare for use by cancellation */
1459 452500 : ) -> Result<(), WaitLsnError> {
1460 452500 : let state = self.current_state();
1461 452500 : if self.cancel.is_cancelled() || matches!(state, TimelineState::Stopping) {
1462 0 : return Err(WaitLsnError::Shutdown);
1463 452500 : } else if !matches!(state, TimelineState::Active) {
1464 0 : return Err(WaitLsnError::BadState(state));
1465 452500 : }
1466 452500 :
1467 452500 : if cfg!(debug_assertions) {
1468 452500 : match ctx.task_kind() {
1469 : TaskKind::WalReceiverManager
1470 : | TaskKind::WalReceiverConnectionHandler
1471 : | TaskKind::WalReceiverConnectionPoller => {
1472 0 : let is_myself = match who_is_waiting {
1473 0 : WaitLsnWaiter::Timeline(waiter) => Weak::ptr_eq(&waiter.myself, &self.myself),
1474 0 : WaitLsnWaiter::Tenant | WaitLsnWaiter::PageService | WaitLsnWaiter::HttpEndpoint => unreachable!("tenant or page_service context are not expected to have task kind {:?}", ctx.task_kind()),
1475 : };
1476 0 : if is_myself {
1477 0 : if let Err(current) = self.last_record_lsn.would_wait_for(lsn) {
1478 : // walingest is the only one that can advance last_record_lsn; it should make sure to never reach here
1479 0 : panic!("this timeline's walingest task is calling wait_lsn({lsn}) but we only have last_record_lsn={current}; would deadlock");
1480 0 : }
1481 0 : } else {
1482 0 : // if another timeline's is waiting for us, there's no deadlock risk because
1483 0 : // our walreceiver task can make progress independent of theirs
1484 0 : }
1485 : }
1486 452500 : _ => {}
1487 : }
1488 0 : }
1489 :
1490 452500 : let timeout = match timeout {
1491 0 : WaitLsnTimeout::Custom(t) => t,
1492 452500 : WaitLsnTimeout::Default => self.conf.wait_lsn_timeout,
1493 : };
1494 :
1495 452500 : let _timer = crate::metrics::WAIT_LSN_TIME.start_timer();
1496 452500 :
1497 452500 : match self.last_record_lsn.wait_for_timeout(lsn, timeout).await {
1498 452500 : Ok(()) => Ok(()),
1499 0 : Err(e) => {
1500 : use utils::seqwait::SeqWaitError::*;
1501 0 : match e {
1502 0 : Shutdown => Err(WaitLsnError::Shutdown),
1503 : Timeout => {
1504 : // don't count the time spent waiting for lock below, and also in walreceiver.status(), towards the wait_lsn_time_histo
1505 0 : drop(_timer);
1506 0 : let walreceiver_status = self.walreceiver_status();
1507 0 : Err(WaitLsnError::Timeout(format!(
1508 0 : "Timed out while waiting for WAL record at LSN {} to arrive, last_record_lsn {} disk consistent LSN={}, WalReceiver status: {}",
1509 0 : lsn,
1510 0 : self.get_last_record_lsn(),
1511 0 : self.get_disk_consistent_lsn(),
1512 0 : walreceiver_status,
1513 0 : )))
1514 : }
1515 : }
1516 : }
1517 : }
1518 452500 : }
1519 :
1520 0 : pub(crate) fn walreceiver_status(&self) -> String {
1521 0 : match &*self.walreceiver.lock().unwrap() {
1522 0 : None => "stopping or stopped".to_string(),
1523 0 : Some(walreceiver) => match walreceiver.status() {
1524 0 : Some(status) => status.to_human_readable_string(),
1525 0 : None => "Not active".to_string(),
1526 : },
1527 : }
1528 0 : }
1529 :
1530 : /// Check that it is valid to request operations with that lsn.
1531 464 : pub(crate) fn check_lsn_is_in_scope(
1532 464 : &self,
1533 464 : lsn: Lsn,
1534 464 : latest_gc_cutoff_lsn: &RcuReadGuard<Lsn>,
1535 464 : ) -> anyhow::Result<()> {
1536 464 : ensure!(
1537 464 : lsn >= **latest_gc_cutoff_lsn,
1538 8 : "LSN {} is earlier than latest GC cutoff {} (we might've already garbage collected needed data)",
1539 8 : lsn,
1540 8 : **latest_gc_cutoff_lsn,
1541 : );
1542 456 : Ok(())
1543 464 : }
1544 :
1545 : /// Initializes an LSN lease. The function will return an error if the requested LSN is less than the `latest_gc_cutoff_lsn`.
1546 20 : pub(crate) fn init_lsn_lease(
1547 20 : &self,
1548 20 : lsn: Lsn,
1549 20 : length: Duration,
1550 20 : ctx: &RequestContext,
1551 20 : ) -> anyhow::Result<LsnLease> {
1552 20 : self.make_lsn_lease(lsn, length, true, ctx)
1553 20 : }
1554 :
1555 : /// Renews a lease at a particular LSN. The requested LSN is not validated against the `latest_gc_cutoff_lsn` when we are in the grace period.
1556 8 : pub(crate) fn renew_lsn_lease(
1557 8 : &self,
1558 8 : lsn: Lsn,
1559 8 : length: Duration,
1560 8 : ctx: &RequestContext,
1561 8 : ) -> anyhow::Result<LsnLease> {
1562 8 : self.make_lsn_lease(lsn, length, false, ctx)
1563 8 : }
1564 :
1565 : /// Obtains a temporary lease blocking garbage collection for the given LSN.
1566 : ///
1567 : /// If we are in `AttachedSingle` mode and is not blocked by the lsn lease deadline, this function will error
1568 : /// if the requesting LSN is less than the `latest_gc_cutoff_lsn` and there is no existing request present.
1569 : ///
1570 : /// If there is an existing lease in the map, the lease will be renewed only if the request extends the lease.
1571 : /// The returned lease is therefore the maximum between the existing lease and the requesting lease.
1572 28 : fn make_lsn_lease(
1573 28 : &self,
1574 28 : lsn: Lsn,
1575 28 : length: Duration,
1576 28 : init: bool,
1577 28 : _ctx: &RequestContext,
1578 28 : ) -> anyhow::Result<LsnLease> {
1579 24 : let lease = {
1580 : // Normalize the requested LSN to be aligned, and move to the first record
1581 : // if it points to the beginning of the page (header).
1582 28 : let lsn = xlog_utils::normalize_lsn(lsn, WAL_SEGMENT_SIZE);
1583 28 :
1584 28 : let mut gc_info = self.gc_info.write().unwrap();
1585 28 : let planned_cutoff = gc_info.min_cutoff();
1586 28 :
1587 28 : let valid_until = SystemTime::now() + length;
1588 28 :
1589 28 : let entry = gc_info.leases.entry(lsn);
1590 28 :
1591 28 : match entry {
1592 12 : Entry::Occupied(mut occupied) => {
1593 12 : let existing_lease = occupied.get_mut();
1594 12 : if valid_until > existing_lease.valid_until {
1595 4 : existing_lease.valid_until = valid_until;
1596 4 : let dt: DateTime<Utc> = valid_until.into();
1597 4 : info!("lease extended to {}", dt);
1598 : } else {
1599 8 : let dt: DateTime<Utc> = existing_lease.valid_until.into();
1600 8 : info!("existing lease covers greater length, valid until {}", dt);
1601 : }
1602 :
1603 12 : existing_lease.clone()
1604 : }
1605 16 : Entry::Vacant(vacant) => {
1606 : // Reject already GC-ed LSN if we are in AttachedSingle and
1607 : // not blocked by the lsn lease deadline.
1608 16 : let validate = {
1609 16 : let conf = self.tenant_conf.load();
1610 16 : conf.location.attach_mode == AttachmentMode::Single
1611 16 : && !conf.is_gc_blocked_by_lsn_lease_deadline()
1612 : };
1613 :
1614 16 : if init || validate {
1615 16 : let latest_gc_cutoff_lsn = self.get_applied_gc_cutoff_lsn();
1616 16 : if lsn < *latest_gc_cutoff_lsn {
1617 4 : bail!("tried to request an lsn lease for an lsn below the latest gc cutoff. requested at {} gc cutoff {}", lsn, *latest_gc_cutoff_lsn);
1618 12 : }
1619 12 : if lsn < planned_cutoff {
1620 0 : bail!("tried to request an lsn lease for an lsn below the planned gc cutoff. requested at {} planned gc cutoff {}", lsn, planned_cutoff);
1621 12 : }
1622 0 : }
1623 :
1624 12 : let dt: DateTime<Utc> = valid_until.into();
1625 12 : info!("lease created, valid until {}", dt);
1626 12 : vacant.insert(LsnLease { valid_until }).clone()
1627 : }
1628 : }
1629 : };
1630 :
1631 24 : Ok(lease)
1632 28 : }
1633 :
1634 : /// Freeze the current open in-memory layer. It will be written to disk on next iteration.
1635 : /// Returns the flush request ID which can be awaited with wait_flush_completion().
1636 : #[instrument(skip(self), fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id))]
1637 : pub(crate) async fn freeze(&self) -> Result<u64, FlushLayerError> {
1638 : self.freeze0().await
1639 : }
1640 :
1641 : /// Freeze and flush the open in-memory layer, waiting for it to be written to disk.
1642 : #[instrument(skip(self), fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id))]
1643 : pub(crate) async fn freeze_and_flush(&self) -> Result<(), FlushLayerError> {
1644 : self.freeze_and_flush0().await
1645 : }
1646 :
1647 : /// Freeze the current open in-memory layer. It will be written to disk on next iteration.
1648 : /// Returns the flush request ID which can be awaited with wait_flush_completion().
1649 2244 : pub(crate) async fn freeze0(&self) -> Result<u64, FlushLayerError> {
1650 2244 : let mut g = self.write_lock.lock().await;
1651 2244 : let to_lsn = self.get_last_record_lsn();
1652 2244 : self.freeze_inmem_layer_at(to_lsn, &mut g).await
1653 2244 : }
1654 :
1655 : // This exists to provide a non-span creating version of `freeze_and_flush` we can call without
1656 : // polluting the span hierarchy.
1657 2244 : pub(crate) async fn freeze_and_flush0(&self) -> Result<(), FlushLayerError> {
1658 2244 : let token = self.freeze0().await?;
1659 2244 : self.wait_flush_completion(token).await
1660 2244 : }
1661 :
1662 : // Check if an open ephemeral layer should be closed: this provides
1663 : // background enforcement of checkpoint interval if there is no active WAL receiver, to avoid keeping
1664 : // an ephemeral layer open forever when idle. It also freezes layers if the global limit on
1665 : // ephemeral layer bytes has been breached.
1666 0 : pub(super) async fn maybe_freeze_ephemeral_layer(&self) {
1667 0 : let Ok(mut write_guard) = self.write_lock.try_lock() else {
1668 : // If the write lock is held, there is an active wal receiver: rolling open layers
1669 : // is their responsibility while they hold this lock.
1670 0 : return;
1671 : };
1672 :
1673 : // FIXME: why not early exit? because before #7927 the state would had been cleared every
1674 : // time, and this was missed.
1675 : // if write_guard.is_none() { return; }
1676 :
1677 0 : let Ok(layers_guard) = self.layers.try_read() else {
1678 : // Don't block if the layer lock is busy
1679 0 : return;
1680 : };
1681 :
1682 0 : let Ok(lm) = layers_guard.layer_map() else {
1683 0 : return;
1684 : };
1685 :
1686 0 : let Some(open_layer) = &lm.open_layer else {
1687 : // If there is no open layer, we have no layer freezing to do. However, we might need to generate
1688 : // some updates to disk_consistent_lsn and remote_consistent_lsn, in case we ingested some WAL regions
1689 : // that didn't result in writes to this shard.
1690 :
1691 : // Must not hold the layers lock while waiting for a flush.
1692 0 : drop(layers_guard);
1693 0 :
1694 0 : let last_record_lsn = self.get_last_record_lsn();
1695 0 : let disk_consistent_lsn = self.get_disk_consistent_lsn();
1696 0 : if last_record_lsn > disk_consistent_lsn {
1697 : // We have no open layer, but disk_consistent_lsn is behind the last record: this indicates
1698 : // we are a sharded tenant and have skipped some WAL
1699 0 : let last_freeze_ts = *self.last_freeze_ts.read().unwrap();
1700 0 : if last_freeze_ts.elapsed() >= self.get_checkpoint_timeout() {
1701 : // Only do this if have been layer-less longer than get_checkpoint_timeout, so that a shard
1702 : // without any data ingested (yet) doesn't write a remote index as soon as it
1703 : // sees its LSN advance: we only do this if we've been layer-less
1704 : // for some time.
1705 0 : tracing::debug!(
1706 0 : "Advancing disk_consistent_lsn past WAL ingest gap {} -> {}",
1707 : disk_consistent_lsn,
1708 : last_record_lsn
1709 : );
1710 :
1711 : // The flush loop will update remote consistent LSN as well as disk consistent LSN.
1712 : // We know there is no open layer, so we can request freezing without actually
1713 : // freezing anything. This is true even if we have dropped the layers_guard, we
1714 : // still hold the write_guard.
1715 0 : let _ = async {
1716 0 : let token = self
1717 0 : .freeze_inmem_layer_at(last_record_lsn, &mut write_guard)
1718 0 : .await?;
1719 0 : self.wait_flush_completion(token).await
1720 0 : }
1721 0 : .await;
1722 0 : }
1723 0 : }
1724 :
1725 0 : return;
1726 : };
1727 :
1728 0 : let Some(current_size) = open_layer.try_len() else {
1729 : // Unexpected: since we hold the write guard, nobody else should be writing to this layer, so
1730 : // read lock to get size should always succeed.
1731 0 : tracing::warn!("Lock conflict while reading size of open layer");
1732 0 : return;
1733 : };
1734 :
1735 0 : let current_lsn = self.get_last_record_lsn();
1736 :
1737 0 : let checkpoint_distance_override = open_layer.tick().await;
1738 :
1739 0 : if let Some(size_override) = checkpoint_distance_override {
1740 0 : if current_size > size_override {
1741 : // This is not harmful, but it only happens in relatively rare cases where
1742 : // time-based checkpoints are not happening fast enough to keep the amount of
1743 : // ephemeral data within configured limits. It's a sign of stress on the system.
1744 0 : tracing::info!("Early-rolling open layer at size {current_size} (limit {size_override}) due to dirty data pressure");
1745 0 : }
1746 0 : }
1747 :
1748 0 : let checkpoint_distance =
1749 0 : checkpoint_distance_override.unwrap_or(self.get_checkpoint_distance());
1750 0 :
1751 0 : if self.should_roll(
1752 0 : current_size,
1753 0 : current_size,
1754 0 : checkpoint_distance,
1755 0 : self.get_last_record_lsn(),
1756 0 : self.last_freeze_at.load(),
1757 0 : open_layer.get_opened_at(),
1758 0 : ) {
1759 0 : match open_layer.info() {
1760 0 : InMemoryLayerInfo::Frozen { lsn_start, lsn_end } => {
1761 0 : // We may reach this point if the layer was already frozen by not yet flushed: flushing
1762 0 : // happens asynchronously in the background.
1763 0 : tracing::debug!(
1764 0 : "Not freezing open layer, it's already frozen ({lsn_start}..{lsn_end})"
1765 : );
1766 : }
1767 : InMemoryLayerInfo::Open { .. } => {
1768 : // Upgrade to a write lock and freeze the layer
1769 0 : drop(layers_guard);
1770 0 : let res = self
1771 0 : .freeze_inmem_layer_at(current_lsn, &mut write_guard)
1772 0 : .await;
1773 :
1774 0 : if let Err(e) = res {
1775 0 : tracing::info!(
1776 0 : "failed to flush frozen layer after background freeze: {e:#}"
1777 : );
1778 0 : }
1779 : }
1780 : }
1781 0 : }
1782 0 : }
1783 :
1784 : /// Checks if the internal state of the timeline is consistent with it being able to be offloaded.
1785 : ///
1786 : /// This is neccessary but not sufficient for offloading of the timeline as it might have
1787 : /// child timelines that are not offloaded yet.
1788 0 : pub(crate) fn can_offload(&self) -> (bool, &'static str) {
1789 0 : if self.remote_client.is_archived() != Some(true) {
1790 0 : return (false, "the timeline is not archived");
1791 0 : }
1792 0 : if !self.remote_client.no_pending_work() {
1793 : // if the remote client is still processing some work, we can't offload
1794 0 : return (false, "the upload queue is not drained yet");
1795 0 : }
1796 0 :
1797 0 : (true, "ok")
1798 0 : }
1799 :
1800 : /// Outermost timeline compaction operation; downloads needed layers. Returns whether we have pending
1801 : /// compaction tasks.
1802 728 : pub(crate) async fn compact(
1803 728 : self: &Arc<Self>,
1804 728 : cancel: &CancellationToken,
1805 728 : flags: EnumSet<CompactFlags>,
1806 728 : ctx: &RequestContext,
1807 728 : ) -> Result<CompactionOutcome, CompactionError> {
1808 728 : self.compact_with_options(
1809 728 : cancel,
1810 728 : CompactOptions {
1811 728 : flags,
1812 728 : compact_key_range: None,
1813 728 : compact_lsn_range: None,
1814 728 : sub_compaction: false,
1815 728 : sub_compaction_max_job_size_mb: None,
1816 728 : },
1817 728 : ctx,
1818 728 : )
1819 728 : .await
1820 728 : }
1821 :
1822 : /// Outermost timeline compaction operation; downloads needed layers.
1823 : ///
1824 : /// NB: the cancellation token is usually from a background task, but can also come from a
1825 : /// request task.
1826 728 : pub(crate) async fn compact_with_options(
1827 728 : self: &Arc<Self>,
1828 728 : cancel: &CancellationToken,
1829 728 : options: CompactOptions,
1830 728 : ctx: &RequestContext,
1831 728 : ) -> Result<CompactionOutcome, CompactionError> {
1832 728 : // Acquire the compaction lock and task semaphore.
1833 728 : //
1834 728 : // L0-only compaction uses a separate semaphore (if enabled) to make sure it isn't starved
1835 728 : // out by other background tasks (including image compaction). We request this via
1836 728 : // `BackgroundLoopKind::L0Compaction`.
1837 728 : //
1838 728 : // If this is a regular compaction pass, and L0-only compaction is enabled in the config,
1839 728 : // then we should yield for immediate L0 compaction if necessary while we're waiting for the
1840 728 : // background task semaphore. There's no point yielding otherwise, since we'd just end up
1841 728 : // right back here.
1842 728 : let is_l0_only = options.flags.contains(CompactFlags::OnlyL0Compaction);
1843 728 : let semaphore_kind = match is_l0_only && self.get_compaction_l0_semaphore() {
1844 0 : true => BackgroundLoopKind::L0Compaction,
1845 728 : false => BackgroundLoopKind::Compaction,
1846 : };
1847 728 : let yield_for_l0 = !is_l0_only
1848 728 : && self.get_compaction_l0_first()
1849 0 : && !options.flags.contains(CompactFlags::NoYield);
1850 :
1851 728 : let acquire = async move {
1852 728 : let guard = self.compaction_lock.lock().await;
1853 728 : let permit = super::tasks::acquire_concurrency_permit(semaphore_kind, ctx).await;
1854 728 : (guard, permit)
1855 728 : };
1856 :
1857 728 : let (_guard, _permit) = tokio::select! {
1858 728 : (guard, permit) = acquire => (guard, permit),
1859 728 : _ = self.l0_compaction_trigger.notified(), if yield_for_l0 => {
1860 0 : return Ok(CompactionOutcome::YieldForL0);
1861 : }
1862 728 : _ = self.cancel.cancelled() => return Ok(CompactionOutcome::Skipped),
1863 728 : _ = cancel.cancelled() => return Ok(CompactionOutcome::Skipped),
1864 : };
1865 :
1866 728 : let last_record_lsn = self.get_last_record_lsn();
1867 728 :
1868 728 : // Last record Lsn could be zero in case the timeline was just created
1869 728 : if !last_record_lsn.is_valid() {
1870 0 : warn!("Skipping compaction for potentially just initialized timeline, it has invalid last record lsn: {last_record_lsn}");
1871 0 : return Ok(CompactionOutcome::Skipped);
1872 728 : }
1873 :
1874 728 : let result = match self.get_compaction_algorithm_settings().kind {
1875 : CompactionAlgorithm::Tiered => {
1876 0 : self.compact_tiered(cancel, ctx).await?;
1877 0 : Ok(CompactionOutcome::Done)
1878 : }
1879 728 : CompactionAlgorithm::Legacy => self.compact_legacy(cancel, options, ctx).await,
1880 : };
1881 :
1882 : // Signal compaction failure to avoid L0 flush stalls when it's broken.
1883 0 : match result {
1884 728 : Ok(_) => self.compaction_failed.store(false, AtomicOrdering::Relaxed),
1885 : Err(CompactionError::Other(_)) | Err(CompactionError::CollectKeySpaceError(_)) => {
1886 0 : self.compaction_failed.store(true, AtomicOrdering::Relaxed)
1887 : }
1888 : // Don't change the current value on offload failure or shutdown. We don't want to
1889 : // abruptly stall nor resume L0 flushes in these cases.
1890 0 : Err(CompactionError::Offload(_)) => {}
1891 0 : Err(CompactionError::ShuttingDown) => {}
1892 : };
1893 :
1894 728 : result
1895 728 : }
1896 :
1897 : /// Mutate the timeline with a [`TimelineWriter`].
1898 10266384 : pub(crate) async fn writer(&self) -> TimelineWriter<'_> {
1899 10266384 : TimelineWriter {
1900 10266384 : tl: self,
1901 10266384 : write_guard: self.write_lock.lock().await,
1902 : }
1903 10266384 : }
1904 :
1905 0 : pub(crate) fn activate(
1906 0 : self: &Arc<Self>,
1907 0 : parent: Arc<crate::tenant::Tenant>,
1908 0 : broker_client: BrokerClientChannel,
1909 0 : background_jobs_can_start: Option<&completion::Barrier>,
1910 0 : ctx: &RequestContext,
1911 0 : ) {
1912 0 : if self.tenant_shard_id.is_shard_zero() {
1913 0 : // Logical size is only maintained accurately on shard zero.
1914 0 : self.spawn_initial_logical_size_computation_task(ctx);
1915 0 : }
1916 0 : self.launch_wal_receiver(ctx, broker_client);
1917 0 : self.set_state(TimelineState::Active);
1918 0 : self.launch_eviction_task(parent, background_jobs_can_start);
1919 0 : }
1920 :
1921 : /// After this function returns, there are no timeline-scoped tasks are left running.
1922 : ///
1923 : /// The preferred pattern for is:
1924 : /// - in any spawned tasks, keep Timeline::guard open + Timeline::cancel / child token
1925 : /// - if early shutdown (not just cancellation) of a sub-tree of tasks is required,
1926 : /// go the extra mile and keep track of JoinHandles
1927 : /// - Keep track of JoinHandles using a passed-down `Arc<Mutex<Option<JoinSet>>>` or similar,
1928 : /// instead of spawning directly on a runtime. It is a more composable / testable pattern.
1929 : ///
1930 : /// For legacy reasons, we still have multiple tasks spawned using
1931 : /// `task_mgr::spawn(X, Some(tenant_id), Some(timeline_id))`.
1932 : /// We refer to these as "timeline-scoped task_mgr tasks".
1933 : /// Some of these tasks are already sensitive to Timeline::cancel while others are
1934 : /// not sensitive to Timeline::cancel and instead respect [`task_mgr::shutdown_token`]
1935 : /// or [`task_mgr::shutdown_watcher`].
1936 : /// We want to gradually convert the code base away from these.
1937 : ///
1938 : /// Here is an inventory of timeline-scoped task_mgr tasks that are still sensitive to
1939 : /// `task_mgr::shutdown_{token,watcher}` (there are also tenant-scoped and global-scoped
1940 : /// ones that aren't mentioned here):
1941 : /// - [`TaskKind::TimelineDeletionWorker`]
1942 : /// - NB: also used for tenant deletion
1943 : /// - [`TaskKind::RemoteUploadTask`]`
1944 : /// - [`TaskKind::InitialLogicalSizeCalculation`]
1945 : /// - [`TaskKind::DownloadAllRemoteLayers`] (can we get rid of it?)
1946 : // Inventory of timeline-scoped task_mgr tasks that use spawn but aren't sensitive:
1947 : /// - [`TaskKind::Eviction`]
1948 : /// - [`TaskKind::LayerFlushTask`]
1949 : /// - [`TaskKind::OndemandLogicalSizeCalculation`]
1950 : /// - [`TaskKind::GarbageCollector`] (immediate_gc is timeline-scoped)
1951 20 : pub(crate) async fn shutdown(&self, mode: ShutdownMode) {
1952 20 : debug_assert_current_span_has_tenant_and_timeline_id();
1953 20 :
1954 20 : // Regardless of whether we're going to try_freeze_and_flush
1955 20 : // or not, stop ingesting any more data. Walreceiver only provides
1956 20 : // cancellation but no "wait until gone", because it uses the Timeline::gate.
1957 20 : // So, only after the self.gate.close() below will we know for sure that
1958 20 : // no walreceiver tasks are left.
1959 20 : // For `try_freeze_and_flush=true`, this means that we might still be ingesting
1960 20 : // data during the call to `self.freeze_and_flush()` below.
1961 20 : // That's not ideal, but, we don't have the concept of a ChildGuard,
1962 20 : // which is what we'd need to properly model early shutdown of the walreceiver
1963 20 : // task sub-tree before the other Timeline task sub-trees.
1964 20 : let walreceiver = self.walreceiver.lock().unwrap().take();
1965 20 : tracing::debug!(
1966 0 : is_some = walreceiver.is_some(),
1967 0 : "Waiting for WalReceiverManager..."
1968 : );
1969 20 : if let Some(walreceiver) = walreceiver {
1970 0 : walreceiver.cancel();
1971 20 : }
1972 : // ... and inform any waiters for newer LSNs that there won't be any.
1973 20 : self.last_record_lsn.shutdown();
1974 20 :
1975 20 : if let ShutdownMode::FreezeAndFlush = mode {
1976 12 : let do_flush = if let Some((open, frozen)) = self
1977 12 : .layers
1978 12 : .read()
1979 12 : .await
1980 12 : .layer_map()
1981 12 : .map(|lm| (lm.open_layer.is_some(), lm.frozen_layers.len()))
1982 12 : .ok()
1983 12 : .filter(|(open, frozen)| *open || *frozen > 0)
1984 : {
1985 0 : if self.remote_client.is_archived() == Some(true) {
1986 : // No point flushing on shutdown for an archived timeline: it is not important
1987 : // to have it nice and fresh after our restart, and trying to flush here might
1988 : // race with trying to offload it (which also stops the flush loop)
1989 0 : false
1990 : } else {
1991 0 : tracing::info!(?open, frozen, "flushing and freezing on shutdown");
1992 0 : true
1993 : }
1994 : } else {
1995 : // this is double-shutdown, it'll be a no-op
1996 12 : true
1997 : };
1998 :
1999 : // we shut down walreceiver above, so, we won't add anything more
2000 : // to the InMemoryLayer; freeze it and wait for all frozen layers
2001 : // to reach the disk & upload queue, then shut the upload queue and
2002 : // wait for it to drain.
2003 12 : if do_flush {
2004 12 : match self.freeze_and_flush().await {
2005 : Ok(_) => {
2006 : // drain the upload queue
2007 : // if we did not wait for completion here, it might be our shutdown process
2008 : // didn't wait for remote uploads to complete at all, as new tasks can forever
2009 : // be spawned.
2010 : //
2011 : // what is problematic is the shutting down of RemoteTimelineClient, because
2012 : // obviously it does not make sense to stop while we wait for it, but what
2013 : // about corner cases like s3 suddenly hanging up?
2014 12 : self.remote_client.shutdown().await;
2015 : }
2016 : Err(FlushLayerError::Cancelled) => {
2017 : // this is likely the second shutdown, ignore silently.
2018 : // TODO: this can be removed once https://github.com/neondatabase/neon/issues/5080
2019 0 : debug_assert!(self.cancel.is_cancelled());
2020 : }
2021 0 : Err(e) => {
2022 0 : // Non-fatal. Shutdown is infallible. Failures to flush just mean that
2023 0 : // we have some extra WAL replay to do next time the timeline starts.
2024 0 : warn!("failed to freeze and flush: {e:#}");
2025 : }
2026 : }
2027 :
2028 : // `self.remote_client.shutdown().await` above should have already flushed everything from the queue, but
2029 : // we also do a final check here to ensure that the queue is empty.
2030 12 : if !self.remote_client.no_pending_work() {
2031 0 : warn!("still have pending work in remote upload queue, but continuing shutting down anyways");
2032 12 : }
2033 0 : }
2034 8 : }
2035 :
2036 20 : if let ShutdownMode::Reload = mode {
2037 : // drain the upload queue
2038 4 : self.remote_client.shutdown().await;
2039 4 : if !self.remote_client.no_pending_work() {
2040 0 : warn!("still have pending work in remote upload queue, but continuing shutting down anyways");
2041 4 : }
2042 16 : }
2043 :
2044 : // Signal any subscribers to our cancellation token to drop out
2045 20 : tracing::debug!("Cancelling CancellationToken");
2046 20 : self.cancel.cancel();
2047 20 :
2048 20 : // If we have a background task downloading heatmap layers stop it.
2049 20 : // The background downloads are sensitive to timeline cancellation (done above),
2050 20 : // so the drain will be immediate.
2051 20 : self.stop_and_drain_heatmap_layers_download().await;
2052 :
2053 : // Ensure Prevent new page service requests from starting.
2054 20 : self.handles.shutdown();
2055 20 :
2056 20 : // Transition the remote_client into a state where it's only useful for timeline deletion.
2057 20 : // (The deletion use case is why we can't just hook up remote_client to Self::cancel).)
2058 20 : self.remote_client.stop();
2059 20 :
2060 20 : // As documented in remote_client.stop()'s doc comment, it's our responsibility
2061 20 : // to shut down the upload queue tasks.
2062 20 : // TODO: fix that, task management should be encapsulated inside remote_client.
2063 20 : task_mgr::shutdown_tasks(
2064 20 : Some(TaskKind::RemoteUploadTask),
2065 20 : Some(self.tenant_shard_id),
2066 20 : Some(self.timeline_id),
2067 20 : )
2068 20 : .await;
2069 :
2070 : // TODO: work toward making this a no-op. See this function's doc comment for more context.
2071 20 : tracing::debug!("Waiting for tasks...");
2072 20 : task_mgr::shutdown_tasks(None, Some(self.tenant_shard_id), Some(self.timeline_id)).await;
2073 :
2074 : {
2075 : // Allow any remaining in-memory layers to do cleanup -- until that, they hold the gate
2076 : // open.
2077 20 : let mut write_guard = self.write_lock.lock().await;
2078 20 : self.layers.write().await.shutdown(&mut write_guard);
2079 20 : }
2080 20 :
2081 20 : // Finally wait until any gate-holders are complete.
2082 20 : //
2083 20 : // TODO: once above shutdown_tasks is a no-op, we can close the gate before calling shutdown_tasks
2084 20 : // and use a TBD variant of shutdown_tasks that asserts that there were no tasks left.
2085 20 : self.gate.close().await;
2086 :
2087 20 : self.metrics.shutdown();
2088 20 : }
2089 :
2090 900 : pub(crate) fn set_state(&self, new_state: TimelineState) {
2091 900 : match (self.current_state(), new_state) {
2092 900 : (equal_state_1, equal_state_2) if equal_state_1 == equal_state_2 => {
2093 4 : info!("Ignoring new state, equal to the existing one: {equal_state_2:?}");
2094 : }
2095 0 : (st, TimelineState::Loading) => {
2096 0 : error!("ignoring transition from {st:?} into Loading state");
2097 : }
2098 0 : (TimelineState::Broken { .. }, new_state) => {
2099 0 : error!("Ignoring state update {new_state:?} for broken timeline");
2100 : }
2101 : (TimelineState::Stopping, TimelineState::Active) => {
2102 0 : error!("Not activating a Stopping timeline");
2103 : }
2104 896 : (_, new_state) => {
2105 896 : self.state.send_replace(new_state);
2106 896 : }
2107 : }
2108 900 : }
2109 :
2110 4 : pub(crate) fn set_broken(&self, reason: String) {
2111 4 : let backtrace_str: String = format!("{}", std::backtrace::Backtrace::force_capture());
2112 4 : let broken_state = TimelineState::Broken {
2113 4 : reason,
2114 4 : backtrace: backtrace_str,
2115 4 : };
2116 4 : self.set_state(broken_state);
2117 4 :
2118 4 : // Although the Broken state is not equivalent to shutdown() (shutdown will be called
2119 4 : // later when this tenant is detach or the process shuts down), firing the cancellation token
2120 4 : // here avoids the need for other tasks to watch for the Broken state explicitly.
2121 4 : self.cancel.cancel();
2122 4 : }
2123 :
2124 454608 : pub(crate) fn current_state(&self) -> TimelineState {
2125 454608 : self.state.borrow().clone()
2126 454608 : }
2127 :
2128 12 : pub(crate) fn is_broken(&self) -> bool {
2129 12 : matches!(&*self.state.borrow(), TimelineState::Broken { .. })
2130 12 : }
2131 :
2132 472 : pub(crate) fn is_active(&self) -> bool {
2133 472 : self.current_state() == TimelineState::Active
2134 472 : }
2135 :
2136 0 : pub(crate) fn is_archived(&self) -> Option<bool> {
2137 0 : self.remote_client.is_archived()
2138 0 : }
2139 :
2140 736 : pub(crate) fn is_stopping(&self) -> bool {
2141 736 : self.current_state() == TimelineState::Stopping
2142 736 : }
2143 :
2144 0 : pub(crate) fn subscribe_for_state_updates(&self) -> watch::Receiver<TimelineState> {
2145 0 : self.state.subscribe()
2146 0 : }
2147 :
2148 452504 : pub(crate) async fn wait_to_become_active(
2149 452504 : &self,
2150 452504 : _ctx: &RequestContext, // Prepare for use by cancellation
2151 452504 : ) -> Result<(), TimelineState> {
2152 452504 : let mut receiver = self.state.subscribe();
2153 : loop {
2154 452504 : let current_state = receiver.borrow().clone();
2155 452504 : match current_state {
2156 : TimelineState::Loading => {
2157 0 : receiver
2158 0 : .changed()
2159 0 : .await
2160 0 : .expect("holding a reference to self");
2161 : }
2162 : TimelineState::Active { .. } => {
2163 452500 : return Ok(());
2164 : }
2165 : TimelineState::Broken { .. } | TimelineState::Stopping => {
2166 : // There's no chance the timeline can transition back into ::Active
2167 4 : return Err(current_state);
2168 : }
2169 : }
2170 : }
2171 452504 : }
2172 :
2173 0 : pub(crate) async fn layer_map_info(
2174 0 : &self,
2175 0 : reset: LayerAccessStatsReset,
2176 0 : ) -> Result<LayerMapInfo, layer_manager::Shutdown> {
2177 0 : let guard = self.layers.read().await;
2178 0 : let layer_map = guard.layer_map()?;
2179 0 : let mut in_memory_layers = Vec::with_capacity(layer_map.frozen_layers.len() + 1);
2180 0 : if let Some(open_layer) = &layer_map.open_layer {
2181 0 : in_memory_layers.push(open_layer.info());
2182 0 : }
2183 0 : for frozen_layer in &layer_map.frozen_layers {
2184 0 : in_memory_layers.push(frozen_layer.info());
2185 0 : }
2186 :
2187 0 : let historic_layers = layer_map
2188 0 : .iter_historic_layers()
2189 0 : .map(|desc| guard.get_from_desc(&desc).info(reset))
2190 0 : .collect();
2191 0 :
2192 0 : Ok(LayerMapInfo {
2193 0 : in_memory_layers,
2194 0 : historic_layers,
2195 0 : })
2196 0 : }
2197 :
2198 : #[instrument(skip_all, fields(tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))]
2199 : pub(crate) async fn download_layer(
2200 : &self,
2201 : layer_file_name: &LayerName,
2202 : ) -> Result<Option<bool>, super::storage_layer::layer::DownloadError> {
2203 : let Some(layer) = self
2204 : .find_layer(layer_file_name)
2205 : .await
2206 0 : .map_err(|e| match e {
2207 0 : layer_manager::Shutdown => {
2208 0 : super::storage_layer::layer::DownloadError::TimelineShutdown
2209 0 : }
2210 0 : })?
2211 : else {
2212 : return Ok(None);
2213 : };
2214 :
2215 : layer.download().await?;
2216 :
2217 : Ok(Some(true))
2218 : }
2219 :
2220 : /// Evict just one layer.
2221 : ///
2222 : /// Returns `Ok(None)` in the case where the layer could not be found by its `layer_file_name`.
2223 0 : pub(crate) async fn evict_layer(
2224 0 : &self,
2225 0 : layer_file_name: &LayerName,
2226 0 : ) -> anyhow::Result<Option<bool>> {
2227 0 : let _gate = self
2228 0 : .gate
2229 0 : .enter()
2230 0 : .map_err(|_| anyhow::anyhow!("Shutting down"))?;
2231 :
2232 0 : let Some(local_layer) = self.find_layer(layer_file_name).await? else {
2233 0 : return Ok(None);
2234 : };
2235 :
2236 : // curl has this by default
2237 0 : let timeout = std::time::Duration::from_secs(120);
2238 0 :
2239 0 : match local_layer.evict_and_wait(timeout).await {
2240 0 : Ok(()) => Ok(Some(true)),
2241 0 : Err(EvictionError::NotFound) => Ok(Some(false)),
2242 0 : Err(EvictionError::Downloaded) => Ok(Some(false)),
2243 0 : Err(EvictionError::Timeout) => Ok(Some(false)),
2244 : }
2245 0 : }
2246 :
2247 9606020 : fn should_roll(
2248 9606020 : &self,
2249 9606020 : layer_size: u64,
2250 9606020 : projected_layer_size: u64,
2251 9606020 : checkpoint_distance: u64,
2252 9606020 : projected_lsn: Lsn,
2253 9606020 : last_freeze_at: Lsn,
2254 9606020 : opened_at: Instant,
2255 9606020 : ) -> bool {
2256 9606020 : let distance = projected_lsn.widening_sub(last_freeze_at);
2257 9606020 :
2258 9606020 : // Rolling the open layer can be triggered by:
2259 9606020 : // 1. The distance from the last LSN we rolled at. This bounds the amount of WAL that
2260 9606020 : // the safekeepers need to store. For sharded tenants, we multiply by shard count to
2261 9606020 : // account for how writes are distributed across shards: we expect each node to consume
2262 9606020 : // 1/count of the LSN on average.
2263 9606020 : // 2. The size of the currently open layer.
2264 9606020 : // 3. The time since the last roll. It helps safekeepers to regard pageserver as caught
2265 9606020 : // up and suspend activity.
2266 9606020 : if distance >= checkpoint_distance as i128 * self.shard_identity.count.count() as i128 {
2267 0 : info!(
2268 0 : "Will roll layer at {} with layer size {} due to LSN distance ({})",
2269 : projected_lsn, layer_size, distance
2270 : );
2271 :
2272 0 : true
2273 9606020 : } else if projected_layer_size >= checkpoint_distance {
2274 : // NB: this check is relied upon by:
2275 160 : let _ = IndexEntry::validate_checkpoint_distance;
2276 160 : info!(
2277 0 : "Will roll layer at {} with layer size {} due to layer size ({})",
2278 : projected_lsn, layer_size, projected_layer_size
2279 : );
2280 :
2281 160 : true
2282 9605860 : } else if distance > 0 && opened_at.elapsed() >= self.get_checkpoint_timeout() {
2283 0 : info!(
2284 0 : "Will roll layer at {} with layer size {} due to time since first write to the layer ({:?})",
2285 0 : projected_lsn,
2286 0 : layer_size,
2287 0 : opened_at.elapsed()
2288 : );
2289 :
2290 0 : true
2291 : } else {
2292 9605860 : false
2293 : }
2294 9606020 : }
2295 : }
2296 :
2297 : /// Number of times we will compute partition within a checkpoint distance.
2298 : const REPARTITION_FREQ_IN_CHECKPOINT_DISTANCE: u64 = 10;
2299 :
2300 : // Private functions
2301 : impl Timeline {
2302 24 : pub(crate) fn get_lsn_lease_length(&self) -> Duration {
2303 24 : let tenant_conf = self.tenant_conf.load();
2304 24 : tenant_conf
2305 24 : .tenant_conf
2306 24 : .lsn_lease_length
2307 24 : .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length)
2308 24 : }
2309 :
2310 0 : pub(crate) fn get_lsn_lease_length_for_ts(&self) -> Duration {
2311 0 : let tenant_conf = self.tenant_conf.load();
2312 0 : tenant_conf
2313 0 : .tenant_conf
2314 0 : .lsn_lease_length_for_ts
2315 0 : .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length_for_ts)
2316 0 : }
2317 :
2318 0 : pub(crate) fn is_gc_blocked_by_lsn_lease_deadline(&self) -> bool {
2319 0 : let tenant_conf = self.tenant_conf.load();
2320 0 : tenant_conf.is_gc_blocked_by_lsn_lease_deadline()
2321 0 : }
2322 :
2323 0 : pub(crate) fn get_lazy_slru_download(&self) -> bool {
2324 0 : let tenant_conf = self.tenant_conf.load();
2325 0 : tenant_conf
2326 0 : .tenant_conf
2327 0 : .lazy_slru_download
2328 0 : .unwrap_or(self.conf.default_tenant_conf.lazy_slru_download)
2329 0 : }
2330 :
2331 9609000 : fn get_checkpoint_distance(&self) -> u64 {
2332 9609000 : let tenant_conf = self.tenant_conf.load();
2333 9609000 : tenant_conf
2334 9609000 : .tenant_conf
2335 9609000 : .checkpoint_distance
2336 9609000 : .unwrap_or(self.conf.default_tenant_conf.checkpoint_distance)
2337 9609000 : }
2338 :
2339 9605860 : fn get_checkpoint_timeout(&self) -> Duration {
2340 9605860 : let tenant_conf = self.tenant_conf.load();
2341 9605860 : tenant_conf
2342 9605860 : .tenant_conf
2343 9605860 : .checkpoint_timeout
2344 9605860 : .unwrap_or(self.conf.default_tenant_conf.checkpoint_timeout)
2345 9605860 : }
2346 :
2347 5016 : fn get_compaction_period(&self) -> Duration {
2348 5016 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2349 5016 : tenant_conf
2350 5016 : .compaction_period
2351 5016 : .unwrap_or(self.conf.default_tenant_conf.compaction_period)
2352 5016 : }
2353 :
2354 1328 : fn get_compaction_target_size(&self) -> u64 {
2355 1328 : let tenant_conf = self.tenant_conf.load();
2356 1328 : tenant_conf
2357 1328 : .tenant_conf
2358 1328 : .compaction_target_size
2359 1328 : .unwrap_or(self.conf.default_tenant_conf.compaction_target_size)
2360 1328 : }
2361 :
2362 3132 : fn get_compaction_threshold(&self) -> usize {
2363 3132 : let tenant_conf = self.tenant_conf.load();
2364 3132 : tenant_conf
2365 3132 : .tenant_conf
2366 3132 : .compaction_threshold
2367 3132 : .unwrap_or(self.conf.default_tenant_conf.compaction_threshold)
2368 3132 : }
2369 :
2370 3892 : pub(crate) fn get_rel_size_v2_enabled(&self) -> bool {
2371 3892 : let tenant_conf = self.tenant_conf.load();
2372 3892 : tenant_conf
2373 3892 : .tenant_conf
2374 3892 : .rel_size_v2_enabled
2375 3892 : .unwrap_or(self.conf.default_tenant_conf.rel_size_v2_enabled)
2376 3892 : }
2377 :
2378 56 : fn get_compaction_upper_limit(&self) -> usize {
2379 56 : let tenant_conf = self.tenant_conf.load();
2380 56 : tenant_conf
2381 56 : .tenant_conf
2382 56 : .compaction_upper_limit
2383 56 : .unwrap_or(self.conf.default_tenant_conf.compaction_upper_limit)
2384 56 : }
2385 :
2386 728 : pub fn get_compaction_l0_first(&self) -> bool {
2387 728 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2388 728 : tenant_conf
2389 728 : .compaction_l0_first
2390 728 : .unwrap_or(self.conf.default_tenant_conf.compaction_l0_first)
2391 728 : }
2392 :
2393 0 : pub fn get_compaction_l0_semaphore(&self) -> bool {
2394 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2395 0 : tenant_conf
2396 0 : .compaction_l0_semaphore
2397 0 : .unwrap_or(self.conf.default_tenant_conf.compaction_l0_semaphore)
2398 0 : }
2399 :
2400 2508 : fn get_l0_flush_delay_threshold(&self) -> Option<usize> {
2401 : // Disable L0 flushes by default. This and compaction needs further tuning.
2402 : const DEFAULT_L0_FLUSH_DELAY_FACTOR: usize = 0; // TODO: default to e.g. 3
2403 :
2404 : // If compaction is disabled, don't delay.
2405 2508 : if self.get_compaction_period() == Duration::ZERO {
2406 2508 : return None;
2407 0 : }
2408 0 :
2409 0 : let compaction_threshold = self.get_compaction_threshold();
2410 0 : let tenant_conf = self.tenant_conf.load();
2411 0 : let l0_flush_delay_threshold = tenant_conf
2412 0 : .tenant_conf
2413 0 : .l0_flush_delay_threshold
2414 0 : .or(self.conf.default_tenant_conf.l0_flush_delay_threshold)
2415 0 : .unwrap_or(DEFAULT_L0_FLUSH_DELAY_FACTOR * compaction_threshold);
2416 0 :
2417 0 : // 0 disables backpressure.
2418 0 : if l0_flush_delay_threshold == 0 {
2419 0 : return None;
2420 0 : }
2421 0 :
2422 0 : // Clamp the flush delay threshold to the compaction threshold; it doesn't make sense to
2423 0 : // backpressure flushes below this.
2424 0 : // TODO: the tenant config should have validation to prevent this instead.
2425 0 : debug_assert!(l0_flush_delay_threshold >= compaction_threshold);
2426 0 : Some(max(l0_flush_delay_threshold, compaction_threshold))
2427 2508 : }
2428 :
2429 2508 : fn get_l0_flush_stall_threshold(&self) -> Option<usize> {
2430 : // Disable L0 stalls by default. In ingest benchmarks, we see image compaction take >10
2431 : // minutes, blocking L0 compaction, and we can't stall L0 flushes for that long.
2432 : const DEFAULT_L0_FLUSH_STALL_FACTOR: usize = 0; // TODO: default to e.g. 5
2433 :
2434 : // If compaction is disabled, don't stall.
2435 2508 : if self.get_compaction_period() == Duration::ZERO {
2436 2508 : return None;
2437 0 : }
2438 0 :
2439 0 : // If compaction is failing, don't stall and try to keep the tenant alive. This may not be a
2440 0 : // good idea: read amp can grow unbounded, leading to terrible performance, and we may take
2441 0 : // on unbounded compaction debt that can take a long time to fix once compaction comes back
2442 0 : // online. At least we'll delay flushes, slowing down the growth and buying some time.
2443 0 : if self.compaction_failed.load(AtomicOrdering::Relaxed) {
2444 0 : return None;
2445 0 : }
2446 0 :
2447 0 : let compaction_threshold = self.get_compaction_threshold();
2448 0 : let tenant_conf = self.tenant_conf.load();
2449 0 : let l0_flush_stall_threshold = tenant_conf
2450 0 : .tenant_conf
2451 0 : .l0_flush_stall_threshold
2452 0 : .or(self.conf.default_tenant_conf.l0_flush_stall_threshold);
2453 0 :
2454 0 : // Tests sometimes set compaction_threshold=1 to generate lots of layer files, and don't
2455 0 : // handle the 20-second compaction delay. Some (e.g. `test_backward_compatibility`) can't
2456 0 : // easily adjust the L0 backpressure settings, so just disable stalls in this case.
2457 0 : if cfg!(feature = "testing")
2458 0 : && compaction_threshold == 1
2459 0 : && l0_flush_stall_threshold.is_none()
2460 : {
2461 0 : return None;
2462 0 : }
2463 0 :
2464 0 : let l0_flush_stall_threshold = l0_flush_stall_threshold
2465 0 : .unwrap_or(DEFAULT_L0_FLUSH_STALL_FACTOR * compaction_threshold);
2466 0 :
2467 0 : // 0 disables backpressure.
2468 0 : if l0_flush_stall_threshold == 0 {
2469 0 : return None;
2470 0 : }
2471 0 :
2472 0 : // Clamp the flush stall threshold to the compaction threshold; it doesn't make sense to
2473 0 : // backpressure flushes below this.
2474 0 : // TODO: the tenant config should have validation to prevent this instead.
2475 0 : debug_assert!(l0_flush_stall_threshold >= compaction_threshold);
2476 0 : Some(max(l0_flush_stall_threshold, compaction_threshold))
2477 2508 : }
2478 :
2479 2348 : fn get_l0_flush_wait_upload(&self) -> bool {
2480 2348 : let tenant_conf = self.tenant_conf.load();
2481 2348 : tenant_conf
2482 2348 : .tenant_conf
2483 2348 : .l0_flush_wait_upload
2484 2348 : .unwrap_or(self.conf.default_tenant_conf.l0_flush_wait_upload)
2485 2348 : }
2486 :
2487 28 : fn get_image_creation_threshold(&self) -> usize {
2488 28 : let tenant_conf = self.tenant_conf.load();
2489 28 : tenant_conf
2490 28 : .tenant_conf
2491 28 : .image_creation_threshold
2492 28 : .unwrap_or(self.conf.default_tenant_conf.image_creation_threshold)
2493 28 : }
2494 :
2495 728 : fn get_compaction_algorithm_settings(&self) -> CompactionAlgorithmSettings {
2496 728 : let tenant_conf = &self.tenant_conf.load();
2497 728 : tenant_conf
2498 728 : .tenant_conf
2499 728 : .compaction_algorithm
2500 728 : .as_ref()
2501 728 : .unwrap_or(&self.conf.default_tenant_conf.compaction_algorithm)
2502 728 : .clone()
2503 728 : }
2504 :
2505 0 : fn get_eviction_policy(&self) -> EvictionPolicy {
2506 0 : let tenant_conf = self.tenant_conf.load();
2507 0 : tenant_conf
2508 0 : .tenant_conf
2509 0 : .eviction_policy
2510 0 : .unwrap_or(self.conf.default_tenant_conf.eviction_policy)
2511 0 : }
2512 :
2513 896 : fn get_evictions_low_residence_duration_metric_threshold(
2514 896 : tenant_conf: &TenantConfOpt,
2515 896 : default_tenant_conf: &TenantConf,
2516 896 : ) -> Duration {
2517 896 : tenant_conf
2518 896 : .evictions_low_residence_duration_metric_threshold
2519 896 : .unwrap_or(default_tenant_conf.evictions_low_residence_duration_metric_threshold)
2520 896 : }
2521 :
2522 1140 : fn get_image_layer_creation_check_threshold(&self) -> u8 {
2523 1140 : let tenant_conf = self.tenant_conf.load();
2524 1140 : tenant_conf
2525 1140 : .tenant_conf
2526 1140 : .image_layer_creation_check_threshold
2527 1140 : .unwrap_or(
2528 1140 : self.conf
2529 1140 : .default_tenant_conf
2530 1140 : .image_layer_creation_check_threshold,
2531 1140 : )
2532 1140 : }
2533 :
2534 0 : fn get_image_creation_preempt_threshold(&self) -> usize {
2535 0 : let tenant_conf = self.tenant_conf.load();
2536 0 : tenant_conf
2537 0 : .tenant_conf
2538 0 : .image_creation_preempt_threshold
2539 0 : .unwrap_or(
2540 0 : self.conf
2541 0 : .default_tenant_conf
2542 0 : .image_creation_preempt_threshold,
2543 0 : )
2544 0 : }
2545 :
2546 : /// Resolve the effective WAL receiver protocol to use for this tenant.
2547 : ///
2548 : /// Priority order is:
2549 : /// 1. Tenant config override
2550 : /// 2. Default value for tenant config override
2551 : /// 3. Pageserver config override
2552 : /// 4. Pageserver config default
2553 0 : pub fn resolve_wal_receiver_protocol(&self) -> PostgresClientProtocol {
2554 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2555 0 : tenant_conf
2556 0 : .wal_receiver_protocol_override
2557 0 : .or(self.conf.default_tenant_conf.wal_receiver_protocol_override)
2558 0 : .unwrap_or(self.conf.wal_receiver_protocol)
2559 0 : }
2560 :
2561 0 : pub(super) fn tenant_conf_updated(&self, new_conf: &AttachedTenantConf) {
2562 0 : // NB: Most tenant conf options are read by background loops, so,
2563 0 : // changes will automatically be picked up.
2564 0 :
2565 0 : // The threshold is embedded in the metric. So, we need to update it.
2566 0 : {
2567 0 : let new_threshold = Self::get_evictions_low_residence_duration_metric_threshold(
2568 0 : &new_conf.tenant_conf,
2569 0 : &self.conf.default_tenant_conf,
2570 0 : );
2571 0 :
2572 0 : let tenant_id_str = self.tenant_shard_id.tenant_id.to_string();
2573 0 : let shard_id_str = format!("{}", self.tenant_shard_id.shard_slug());
2574 0 :
2575 0 : let timeline_id_str = self.timeline_id.to_string();
2576 0 :
2577 0 : self.remote_client.update_config(&new_conf.location);
2578 0 :
2579 0 : self.metrics
2580 0 : .evictions_with_low_residence_duration
2581 0 : .write()
2582 0 : .unwrap()
2583 0 : .change_threshold(
2584 0 : &tenant_id_str,
2585 0 : &shard_id_str,
2586 0 : &timeline_id_str,
2587 0 : new_threshold,
2588 0 : );
2589 0 : }
2590 0 : }
2591 :
2592 : /// Open a Timeline handle.
2593 : ///
2594 : /// Loads the metadata for the timeline into memory, but not the layer map.
2595 : #[allow(clippy::too_many_arguments)]
2596 896 : pub(super) fn new(
2597 896 : conf: &'static PageServerConf,
2598 896 : tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
2599 896 : metadata: &TimelineMetadata,
2600 896 : previous_heatmap: Option<PreviousHeatmap>,
2601 896 : ancestor: Option<Arc<Timeline>>,
2602 896 : timeline_id: TimelineId,
2603 896 : tenant_shard_id: TenantShardId,
2604 896 : generation: Generation,
2605 896 : shard_identity: ShardIdentity,
2606 896 : walredo_mgr: Option<Arc<super::WalRedoManager>>,
2607 896 : resources: TimelineResources,
2608 896 : pg_version: u32,
2609 896 : state: TimelineState,
2610 896 : attach_wal_lag_cooldown: Arc<OnceLock<WalLagCooldown>>,
2611 896 : create_idempotency: crate::tenant::CreateTimelineIdempotency,
2612 896 : cancel: CancellationToken,
2613 896 : ) -> Arc<Self> {
2614 896 : let disk_consistent_lsn = metadata.disk_consistent_lsn();
2615 896 : let (state, _) = watch::channel(state);
2616 896 :
2617 896 : let (layer_flush_start_tx, _) = tokio::sync::watch::channel((0, disk_consistent_lsn));
2618 896 : let (layer_flush_done_tx, _) = tokio::sync::watch::channel((0, Ok(())));
2619 896 :
2620 896 : let evictions_low_residence_duration_metric_threshold = {
2621 896 : let loaded_tenant_conf = tenant_conf.load();
2622 896 : Self::get_evictions_low_residence_duration_metric_threshold(
2623 896 : &loaded_tenant_conf.tenant_conf,
2624 896 : &conf.default_tenant_conf,
2625 896 : )
2626 : };
2627 :
2628 896 : if let Some(ancestor) = &ancestor {
2629 460 : let mut ancestor_gc_info = ancestor.gc_info.write().unwrap();
2630 460 : // If we construct an explicit timeline object, it's obviously not offloaded
2631 460 : let is_offloaded = MaybeOffloaded::No;
2632 460 : ancestor_gc_info.insert_child(timeline_id, metadata.ancestor_lsn(), is_offloaded);
2633 460 : }
2634 :
2635 896 : Arc::new_cyclic(|myself| {
2636 896 : let metrics = TimelineMetrics::new(
2637 896 : &tenant_shard_id,
2638 896 : &timeline_id,
2639 896 : crate::metrics::EvictionsWithLowResidenceDurationBuilder::new(
2640 896 : "mtime",
2641 896 : evictions_low_residence_duration_metric_threshold,
2642 896 : ),
2643 896 : );
2644 896 : let aux_file_metrics = metrics.aux_file_size_gauge.clone();
2645 :
2646 896 : let mut result = Timeline {
2647 896 : conf,
2648 896 : tenant_conf,
2649 896 : myself: myself.clone(),
2650 896 : timeline_id,
2651 896 : tenant_shard_id,
2652 896 : generation,
2653 896 : shard_identity,
2654 896 : pg_version,
2655 896 : layers: Default::default(),
2656 896 : gc_compaction_layer_update_lock: tokio::sync::RwLock::new(()),
2657 896 :
2658 896 : walredo_mgr,
2659 896 : walreceiver: Mutex::new(None),
2660 896 :
2661 896 : remote_client: Arc::new(resources.remote_client),
2662 896 :
2663 896 : // initialize in-memory 'last_record_lsn' from 'disk_consistent_lsn'.
2664 896 : last_record_lsn: SeqWait::new(RecordLsn {
2665 896 : last: disk_consistent_lsn,
2666 896 : prev: metadata.prev_record_lsn().unwrap_or(Lsn(0)),
2667 896 : }),
2668 896 : disk_consistent_lsn: AtomicLsn::new(disk_consistent_lsn.0),
2669 896 :
2670 896 : last_freeze_at: AtomicLsn::new(disk_consistent_lsn.0),
2671 896 : last_freeze_ts: RwLock::new(Instant::now()),
2672 896 :
2673 896 : loaded_at: (disk_consistent_lsn, SystemTime::now()),
2674 896 :
2675 896 : ancestor_timeline: ancestor,
2676 896 : ancestor_lsn: metadata.ancestor_lsn(),
2677 896 :
2678 896 : metrics,
2679 896 :
2680 896 : query_metrics: crate::metrics::SmgrQueryTimePerTimeline::new(
2681 896 : &tenant_shard_id,
2682 896 : &timeline_id,
2683 896 : resources.pagestream_throttle_metrics,
2684 896 : ),
2685 896 :
2686 7168 : directory_metrics: array::from_fn(|_| AtomicU64::new(0)),
2687 7168 : directory_metrics_inited: array::from_fn(|_| AtomicBool::new(false)),
2688 896 :
2689 896 : flush_loop_state: Mutex::new(FlushLoopState::NotStarted),
2690 896 :
2691 896 : layer_flush_start_tx,
2692 896 : layer_flush_done_tx,
2693 896 :
2694 896 : write_lock: tokio::sync::Mutex::new(None),
2695 896 :
2696 896 : gc_info: std::sync::RwLock::new(GcInfo::default()),
2697 896 :
2698 896 : last_image_layer_creation_status: ArcSwap::new(Arc::new(
2699 896 : LastImageLayerCreationStatus::default(),
2700 896 : )),
2701 896 :
2702 896 : applied_gc_cutoff_lsn: Rcu::new(metadata.latest_gc_cutoff_lsn()),
2703 896 : initdb_lsn: metadata.initdb_lsn(),
2704 896 :
2705 896 : current_logical_size: if disk_consistent_lsn.is_valid() {
2706 : // we're creating timeline data with some layer files existing locally,
2707 : // need to recalculate timeline's logical size based on data in the layers.
2708 468 : LogicalSize::deferred_initial(disk_consistent_lsn)
2709 : } else {
2710 : // we're creating timeline data without any layers existing locally,
2711 : // initial logical size is 0.
2712 428 : LogicalSize::empty_initial()
2713 : },
2714 :
2715 896 : partitioning: GuardArcSwap::new((
2716 896 : (KeyPartitioning::new(), KeyPartitioning::new().into_sparse()),
2717 896 : Lsn(0),
2718 896 : )),
2719 896 : repartition_threshold: 0,
2720 896 : last_image_layer_creation_check_at: AtomicLsn::new(0),
2721 896 : last_image_layer_creation_check_instant: Mutex::new(None),
2722 896 :
2723 896 : last_received_wal: Mutex::new(None),
2724 896 : rel_size_cache: RwLock::new(RelSizeCache {
2725 896 : complete_as_of: disk_consistent_lsn,
2726 896 : map: HashMap::new(),
2727 896 : }),
2728 896 :
2729 896 : download_all_remote_layers_task_info: RwLock::new(None),
2730 896 :
2731 896 : state,
2732 896 :
2733 896 : eviction_task_timeline_state: tokio::sync::Mutex::new(
2734 896 : EvictionTaskTimelineState::default(),
2735 896 : ),
2736 896 : delete_progress: TimelineDeleteProgress::default(),
2737 896 :
2738 896 : cancel,
2739 896 : gate: Gate::default(),
2740 896 :
2741 896 : compaction_lock: tokio::sync::Mutex::default(),
2742 896 : compaction_failed: AtomicBool::default(),
2743 896 : l0_compaction_trigger: resources.l0_compaction_trigger,
2744 896 : gc_lock: tokio::sync::Mutex::default(),
2745 896 :
2746 896 : standby_horizon: AtomicLsn::new(0),
2747 896 :
2748 896 : pagestream_throttle: resources.pagestream_throttle,
2749 896 :
2750 896 : aux_file_size_estimator: AuxFileSizeEstimator::new(aux_file_metrics),
2751 896 :
2752 896 : #[cfg(test)]
2753 896 : extra_test_dense_keyspace: ArcSwap::new(Arc::new(KeySpace::default())),
2754 896 :
2755 896 : l0_flush_global_state: resources.l0_flush_global_state,
2756 896 :
2757 896 : handles: Default::default(),
2758 896 :
2759 896 : attach_wal_lag_cooldown,
2760 896 :
2761 896 : create_idempotency,
2762 896 :
2763 896 : page_trace: Default::default(),
2764 896 :
2765 896 : previous_heatmap: ArcSwapOption::from_pointee(previous_heatmap),
2766 896 :
2767 896 : heatmap_layers_downloader: Mutex::new(None),
2768 896 : };
2769 896 :
2770 896 : result.repartition_threshold =
2771 896 : result.get_checkpoint_distance() / REPARTITION_FREQ_IN_CHECKPOINT_DISTANCE;
2772 896 :
2773 896 : result
2774 896 : .metrics
2775 896 : .last_record_lsn_gauge
2776 896 : .set(disk_consistent_lsn.0 as i64);
2777 896 : result
2778 896 : })
2779 896 : }
2780 :
2781 1296 : pub(super) fn maybe_spawn_flush_loop(self: &Arc<Self>) {
2782 1296 : let Ok(guard) = self.gate.enter() else {
2783 0 : info!("cannot start flush loop when the timeline gate has already been closed");
2784 0 : return;
2785 : };
2786 1296 : let mut flush_loop_state = self.flush_loop_state.lock().unwrap();
2787 1296 : match *flush_loop_state {
2788 884 : FlushLoopState::NotStarted => (),
2789 : FlushLoopState::Running { .. } => {
2790 412 : info!(
2791 0 : "skipping attempt to start flush_loop twice {}/{}",
2792 0 : self.tenant_shard_id, self.timeline_id
2793 : );
2794 412 : return;
2795 : }
2796 : FlushLoopState::Exited => {
2797 0 : info!(
2798 0 : "ignoring attempt to restart exited flush_loop {}/{}",
2799 0 : self.tenant_shard_id, self.timeline_id
2800 : );
2801 0 : return;
2802 : }
2803 : }
2804 :
2805 884 : let layer_flush_start_rx = self.layer_flush_start_tx.subscribe();
2806 884 : let self_clone = Arc::clone(self);
2807 884 :
2808 884 : debug!("spawning flush loop");
2809 884 : *flush_loop_state = FlushLoopState::Running {
2810 884 : #[cfg(test)]
2811 884 : expect_initdb_optimization: false,
2812 884 : #[cfg(test)]
2813 884 : initdb_optimization_count: 0,
2814 884 : };
2815 884 : task_mgr::spawn(
2816 884 : task_mgr::BACKGROUND_RUNTIME.handle(),
2817 884 : task_mgr::TaskKind::LayerFlushTask,
2818 884 : self.tenant_shard_id,
2819 884 : Some(self.timeline_id),
2820 884 : "layer flush task",
2821 884 : async move {
2822 884 : let _guard = guard;
2823 884 : let background_ctx = RequestContext::todo_child(TaskKind::LayerFlushTask, DownloadBehavior::Error);
2824 884 : self_clone.flush_loop(layer_flush_start_rx, &background_ctx).await;
2825 20 : let mut flush_loop_state = self_clone.flush_loop_state.lock().unwrap();
2826 20 : assert!(matches!(*flush_loop_state, FlushLoopState::Running{..}));
2827 20 : *flush_loop_state = FlushLoopState::Exited;
2828 20 : Ok(())
2829 20 : }
2830 884 : .instrument(info_span!(parent: None, "layer flush task", tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))
2831 : );
2832 1296 : }
2833 :
2834 : /// Creates and starts the wal receiver.
2835 : ///
2836 : /// This function is expected to be called at most once per Timeline's lifecycle
2837 : /// when the timeline is activated.
2838 0 : fn launch_wal_receiver(
2839 0 : self: &Arc<Self>,
2840 0 : ctx: &RequestContext,
2841 0 : broker_client: BrokerClientChannel,
2842 0 : ) {
2843 0 : info!(
2844 0 : "launching WAL receiver for timeline {} of tenant {}",
2845 0 : self.timeline_id, self.tenant_shard_id
2846 : );
2847 :
2848 0 : let tenant_conf = self.tenant_conf.load();
2849 0 : let wal_connect_timeout = tenant_conf
2850 0 : .tenant_conf
2851 0 : .walreceiver_connect_timeout
2852 0 : .unwrap_or(self.conf.default_tenant_conf.walreceiver_connect_timeout);
2853 0 : let lagging_wal_timeout = tenant_conf
2854 0 : .tenant_conf
2855 0 : .lagging_wal_timeout
2856 0 : .unwrap_or(self.conf.default_tenant_conf.lagging_wal_timeout);
2857 0 : let max_lsn_wal_lag = tenant_conf
2858 0 : .tenant_conf
2859 0 : .max_lsn_wal_lag
2860 0 : .unwrap_or(self.conf.default_tenant_conf.max_lsn_wal_lag);
2861 0 :
2862 0 : let mut guard = self.walreceiver.lock().unwrap();
2863 0 : assert!(
2864 0 : guard.is_none(),
2865 0 : "multiple launches / re-launches of WAL receiver are not supported"
2866 : );
2867 0 : *guard = Some(WalReceiver::start(
2868 0 : Arc::clone(self),
2869 0 : WalReceiverConf {
2870 0 : protocol: self.resolve_wal_receiver_protocol(),
2871 0 : wal_connect_timeout,
2872 0 : lagging_wal_timeout,
2873 0 : max_lsn_wal_lag,
2874 0 : auth_token: crate::config::SAFEKEEPER_AUTH_TOKEN.get().cloned(),
2875 0 : availability_zone: self.conf.availability_zone.clone(),
2876 0 : ingest_batch_size: self.conf.ingest_batch_size,
2877 0 : },
2878 0 : broker_client,
2879 0 : ctx,
2880 0 : ));
2881 0 : }
2882 :
2883 : /// Initialize with an empty layer map. Used when creating a new timeline.
2884 884 : pub(super) fn init_empty_layer_map(&self, start_lsn: Lsn) {
2885 884 : let mut layers = self.layers.try_write().expect(
2886 884 : "in the context where we call this function, no other task has access to the object",
2887 884 : );
2888 884 : layers
2889 884 : .open_mut()
2890 884 : .expect("in this context the LayerManager must still be open")
2891 884 : .initialize_empty(Lsn(start_lsn.0));
2892 884 : }
2893 :
2894 : /// Scan the timeline directory, cleanup, populate the layer map, and schedule uploads for local-only
2895 : /// files.
2896 12 : pub(super) async fn load_layer_map(
2897 12 : &self,
2898 12 : disk_consistent_lsn: Lsn,
2899 12 : index_part: IndexPart,
2900 12 : ) -> anyhow::Result<()> {
2901 : use init::{Decision::*, Discovered, DismissedLayer};
2902 : use LayerName::*;
2903 :
2904 12 : let mut guard = self.layers.write().await;
2905 :
2906 12 : let timer = self.metrics.load_layer_map_histo.start_timer();
2907 12 :
2908 12 : // Scan timeline directory and create ImageLayerName and DeltaFilename
2909 12 : // structs representing all files on disk
2910 12 : let timeline_path = self
2911 12 : .conf
2912 12 : .timeline_path(&self.tenant_shard_id, &self.timeline_id);
2913 12 : let conf = self.conf;
2914 12 : let span = tracing::Span::current();
2915 12 :
2916 12 : // Copy to move into the task we're about to spawn
2917 12 : let this = self.myself.upgrade().expect("&self method holds the arc");
2918 :
2919 12 : let (loaded_layers, needs_cleanup, total_physical_size) = tokio::task::spawn_blocking({
2920 12 : move || {
2921 12 : let _g = span.entered();
2922 12 : let discovered = init::scan_timeline_dir(&timeline_path)?;
2923 12 : let mut discovered_layers = Vec::with_capacity(discovered.len());
2924 12 : let mut unrecognized_files = Vec::new();
2925 12 :
2926 12 : let mut path = timeline_path;
2927 :
2928 44 : for discovered in discovered {
2929 32 : let (name, kind) = match discovered {
2930 32 : Discovered::Layer(layer_file_name, local_metadata) => {
2931 32 : discovered_layers.push((layer_file_name, local_metadata));
2932 32 : continue;
2933 : }
2934 0 : Discovered::IgnoredBackup(path) => {
2935 0 : std::fs::remove_file(path)
2936 0 : .or_else(fs_ext::ignore_not_found)
2937 0 : .fatal_err("Removing .old file");
2938 0 : continue;
2939 : }
2940 0 : Discovered::Unknown(file_name) => {
2941 0 : // we will later error if there are any
2942 0 : unrecognized_files.push(file_name);
2943 0 : continue;
2944 : }
2945 0 : Discovered::Ephemeral(name) => (name, "old ephemeral file"),
2946 0 : Discovered::Temporary(name) => (name, "temporary timeline file"),
2947 0 : Discovered::TemporaryDownload(name) => (name, "temporary download"),
2948 : };
2949 0 : path.push(Utf8Path::new(&name));
2950 0 : init::cleanup(&path, kind)?;
2951 0 : path.pop();
2952 : }
2953 :
2954 12 : if !unrecognized_files.is_empty() {
2955 : // assume that if there are any there are many many.
2956 0 : let n = unrecognized_files.len();
2957 0 : let first = &unrecognized_files[..n.min(10)];
2958 0 : anyhow::bail!(
2959 0 : "unrecognized files in timeline dir (total {n}), first 10: {first:?}"
2960 0 : );
2961 12 : }
2962 12 :
2963 12 : let decided = init::reconcile(discovered_layers, &index_part, disk_consistent_lsn);
2964 12 :
2965 12 : let mut loaded_layers = Vec::new();
2966 12 : let mut needs_cleanup = Vec::new();
2967 12 : let mut total_physical_size = 0;
2968 :
2969 44 : for (name, decision) in decided {
2970 32 : let decision = match decision {
2971 32 : Ok(decision) => decision,
2972 0 : Err(DismissedLayer::Future { local }) => {
2973 0 : if let Some(local) = local {
2974 0 : init::cleanup_future_layer(
2975 0 : &local.local_path,
2976 0 : &name,
2977 0 : disk_consistent_lsn,
2978 0 : )?;
2979 0 : }
2980 0 : needs_cleanup.push(name);
2981 0 : continue;
2982 : }
2983 0 : Err(DismissedLayer::LocalOnly(local)) => {
2984 0 : init::cleanup_local_only_file(&name, &local)?;
2985 : // this file never existed remotely, we will have to do rework
2986 0 : continue;
2987 : }
2988 0 : Err(DismissedLayer::BadMetadata(local)) => {
2989 0 : init::cleanup_local_file_for_remote(&local)?;
2990 : // this file never existed remotely, we will have to do rework
2991 0 : continue;
2992 : }
2993 : };
2994 :
2995 32 : match &name {
2996 24 : Delta(d) => assert!(d.lsn_range.end <= disk_consistent_lsn + 1),
2997 8 : Image(i) => assert!(i.lsn <= disk_consistent_lsn),
2998 : }
2999 :
3000 32 : tracing::debug!(layer=%name, ?decision, "applied");
3001 :
3002 32 : let layer = match decision {
3003 32 : Resident { local, remote } => {
3004 32 : total_physical_size += local.file_size;
3005 32 : Layer::for_resident(conf, &this, local.local_path, name, remote)
3006 32 : .drop_eviction_guard()
3007 : }
3008 0 : Evicted(remote) => Layer::for_evicted(conf, &this, name, remote),
3009 : };
3010 :
3011 32 : loaded_layers.push(layer);
3012 : }
3013 12 : Ok((loaded_layers, needs_cleanup, total_physical_size))
3014 12 : }
3015 12 : })
3016 12 : .await
3017 12 : .map_err(anyhow::Error::new)
3018 12 : .and_then(|x| x)?;
3019 :
3020 12 : let num_layers = loaded_layers.len();
3021 12 :
3022 12 : guard
3023 12 : .open_mut()
3024 12 : .expect("layermanager must be open during init")
3025 12 : .initialize_local_layers(loaded_layers, disk_consistent_lsn + 1);
3026 12 :
3027 12 : self.remote_client
3028 12 : .schedule_layer_file_deletion(&needs_cleanup)?;
3029 12 : self.remote_client
3030 12 : .schedule_index_upload_for_file_changes()?;
3031 : // This barrier orders above DELETEs before any later operations.
3032 : // This is critical because code executing after the barrier might
3033 : // create again objects with the same key that we just scheduled for deletion.
3034 : // For example, if we just scheduled deletion of an image layer "from the future",
3035 : // later compaction might run again and re-create the same image layer.
3036 : // "from the future" here means an image layer whose LSN is > IndexPart::disk_consistent_lsn.
3037 : // "same" here means same key range and LSN.
3038 : //
3039 : // Without a barrier between above DELETEs and the re-creation's PUTs,
3040 : // the upload queue may execute the PUT first, then the DELETE.
3041 : // In our example, we will end up with an IndexPart referencing a non-existent object.
3042 : //
3043 : // 1. a future image layer is created and uploaded
3044 : // 2. ps restart
3045 : // 3. the future layer from (1) is deleted during load layer map
3046 : // 4. image layer is re-created and uploaded
3047 : // 5. deletion queue would like to delete (1) but actually deletes (4)
3048 : // 6. delete by name works as expected, but it now deletes the wrong (later) version
3049 : //
3050 : // See https://github.com/neondatabase/neon/issues/5878
3051 : //
3052 : // NB: generation numbers naturally protect against this because they disambiguate
3053 : // (1) and (4)
3054 : // TODO: this is basically a no-op now, should we remove it?
3055 12 : self.remote_client.schedule_barrier()?;
3056 : // Tenant::create_timeline will wait for these uploads to happen before returning, or
3057 : // on retry.
3058 :
3059 : // Now that we have the full layer map, we may calculate the visibility of layers within it (a global scan)
3060 12 : drop(guard); // drop write lock, update_layer_visibility will take a read lock.
3061 12 : self.update_layer_visibility().await?;
3062 :
3063 12 : info!(
3064 0 : "loaded layer map with {} layers at {}, total physical size: {}",
3065 : num_layers, disk_consistent_lsn, total_physical_size
3066 : );
3067 :
3068 12 : timer.stop_and_record();
3069 12 : Ok(())
3070 12 : }
3071 :
3072 : /// Retrieve current logical size of the timeline.
3073 : ///
3074 : /// The size could be lagging behind the actual number, in case
3075 : /// the initial size calculation has not been run (gets triggered on the first size access).
3076 : ///
3077 : /// return size and boolean flag that shows if the size is exact
3078 0 : pub(crate) fn get_current_logical_size(
3079 0 : self: &Arc<Self>,
3080 0 : priority: GetLogicalSizePriority,
3081 0 : ctx: &RequestContext,
3082 0 : ) -> logical_size::CurrentLogicalSize {
3083 0 : if !self.tenant_shard_id.is_shard_zero() {
3084 : // Logical size is only accurately maintained on shard zero: when called elsewhere, for example
3085 : // when HTTP API is serving a GET for timeline zero, return zero
3086 0 : return logical_size::CurrentLogicalSize::Approximate(logical_size::Approximate::zero());
3087 0 : }
3088 0 :
3089 0 : let current_size = self.current_logical_size.current_size();
3090 0 : debug!("Current size: {current_size:?}");
3091 :
3092 0 : match (current_size.accuracy(), priority) {
3093 0 : (logical_size::Accuracy::Exact, _) => (), // nothing to do
3094 0 : (logical_size::Accuracy::Approximate, GetLogicalSizePriority::Background) => {
3095 0 : // background task will eventually deliver an exact value, we're in no rush
3096 0 : }
3097 : (logical_size::Accuracy::Approximate, GetLogicalSizePriority::User) => {
3098 : // background task is not ready, but user is asking for it now;
3099 : // => make the background task skip the line
3100 : // (The alternative would be to calculate the size here, but,
3101 : // it can actually take a long time if the user has a lot of rels.
3102 : // And we'll inevitable need it again; So, let the background task do the work.)
3103 0 : match self
3104 0 : .current_logical_size
3105 0 : .cancel_wait_for_background_loop_concurrency_limit_semaphore
3106 0 : .get()
3107 : {
3108 0 : Some(cancel) => cancel.cancel(),
3109 : None => {
3110 0 : match self.current_state() {
3111 0 : TimelineState::Broken { .. } | TimelineState::Stopping => {
3112 0 : // Can happen when timeline detail endpoint is used when deletion is ongoing (or its broken).
3113 0 : // Don't make noise.
3114 0 : }
3115 : TimelineState::Loading => {
3116 : // Import does not return an activated timeline.
3117 0 : info!("discarding priority boost for logical size calculation because timeline is not yet active");
3118 : }
3119 : TimelineState::Active => {
3120 : // activation should be setting the once cell
3121 0 : warn!("unexpected: cancel_wait_for_background_loop_concurrency_limit_semaphore not set, priority-boosting of logical size calculation will not work");
3122 0 : debug_assert!(false);
3123 : }
3124 : }
3125 : }
3126 : }
3127 : }
3128 : }
3129 :
3130 0 : if let CurrentLogicalSize::Approximate(_) = ¤t_size {
3131 0 : if ctx.task_kind() == TaskKind::WalReceiverConnectionHandler {
3132 0 : let first = self
3133 0 : .current_logical_size
3134 0 : .did_return_approximate_to_walreceiver
3135 0 : .compare_exchange(
3136 0 : false,
3137 0 : true,
3138 0 : AtomicOrdering::Relaxed,
3139 0 : AtomicOrdering::Relaxed,
3140 0 : )
3141 0 : .is_ok();
3142 0 : if first {
3143 0 : crate::metrics::initial_logical_size::TIMELINES_WHERE_WALRECEIVER_GOT_APPROXIMATE_SIZE.inc();
3144 0 : }
3145 0 : }
3146 0 : }
3147 :
3148 0 : current_size
3149 0 : }
3150 :
3151 0 : fn spawn_initial_logical_size_computation_task(self: &Arc<Self>, ctx: &RequestContext) {
3152 0 : let Some(initial_part_end) = self.current_logical_size.initial_part_end else {
3153 : // nothing to do for freshly created timelines;
3154 0 : assert_eq!(
3155 0 : self.current_logical_size.current_size().accuracy(),
3156 0 : logical_size::Accuracy::Exact,
3157 0 : );
3158 0 : self.current_logical_size.initialized.add_permits(1);
3159 0 : return;
3160 : };
3161 :
3162 0 : let cancel_wait_for_background_loop_concurrency_limit_semaphore = CancellationToken::new();
3163 0 : let token = cancel_wait_for_background_loop_concurrency_limit_semaphore.clone();
3164 0 : self.current_logical_size
3165 0 : .cancel_wait_for_background_loop_concurrency_limit_semaphore.set(token)
3166 0 : .expect("initial logical size calculation task must be spawned exactly once per Timeline object");
3167 0 :
3168 0 : let self_clone = Arc::clone(self);
3169 0 : let background_ctx = ctx.detached_child(
3170 0 : TaskKind::InitialLogicalSizeCalculation,
3171 0 : DownloadBehavior::Download,
3172 0 : );
3173 0 : task_mgr::spawn(
3174 0 : task_mgr::BACKGROUND_RUNTIME.handle(),
3175 0 : task_mgr::TaskKind::InitialLogicalSizeCalculation,
3176 0 : self.tenant_shard_id,
3177 0 : Some(self.timeline_id),
3178 0 : "initial size calculation",
3179 : // NB: don't log errors here, task_mgr will do that.
3180 0 : async move {
3181 0 : self_clone
3182 0 : .initial_logical_size_calculation_task(
3183 0 : initial_part_end,
3184 0 : cancel_wait_for_background_loop_concurrency_limit_semaphore,
3185 0 : background_ctx,
3186 0 : )
3187 0 : .await;
3188 0 : Ok(())
3189 0 : }
3190 0 : .instrument(info_span!(parent: None, "initial_size_calculation", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id)),
3191 : );
3192 0 : }
3193 :
3194 : /// # Cancellation
3195 : ///
3196 : /// This method is sensitive to `Timeline::cancel`.
3197 : ///
3198 : /// It is _not_ sensitive to task_mgr::shutdown_token().
3199 : ///
3200 : /// # Cancel-Safety
3201 : ///
3202 : /// It does Timeline IO, hence this should be polled to completion because
3203 : /// we could be leaving in-flight IOs behind, which is safe, but annoying
3204 : /// to reason about.
3205 0 : async fn initial_logical_size_calculation_task(
3206 0 : self: Arc<Self>,
3207 0 : initial_part_end: Lsn,
3208 0 : skip_concurrency_limiter: CancellationToken,
3209 0 : background_ctx: RequestContext,
3210 0 : ) {
3211 0 : scopeguard::defer! {
3212 0 : // Irrespective of the outcome of this operation, we should unblock anyone waiting for it.
3213 0 : self.current_logical_size.initialized.add_permits(1);
3214 0 : }
3215 0 :
3216 0 : let try_once = |attempt: usize| {
3217 0 : let background_ctx = &background_ctx;
3218 0 : let self_ref = &self;
3219 0 : let skip_concurrency_limiter = &skip_concurrency_limiter;
3220 0 : async move {
3221 0 : let wait_for_permit = super::tasks::acquire_concurrency_permit(
3222 0 : BackgroundLoopKind::InitialLogicalSizeCalculation,
3223 0 : background_ctx,
3224 0 : );
3225 :
3226 : use crate::metrics::initial_logical_size::StartCircumstances;
3227 0 : let (_maybe_permit, circumstances) = tokio::select! {
3228 0 : permit = wait_for_permit => {
3229 0 : (Some(permit), StartCircumstances::AfterBackgroundTasksRateLimit)
3230 : }
3231 0 : _ = self_ref.cancel.cancelled() => {
3232 0 : return Err(CalculateLogicalSizeError::Cancelled);
3233 : }
3234 0 : () = skip_concurrency_limiter.cancelled() => {
3235 : // Some action that is part of a end user interaction requested logical size
3236 : // => break out of the rate limit
3237 : // TODO: ideally we'd not run on BackgroundRuntime but the requester's runtime;
3238 : // but then again what happens if they cancel; also, we should just be using
3239 : // one runtime across the entire process, so, let's leave this for now.
3240 0 : (None, StartCircumstances::SkippedConcurrencyLimiter)
3241 : }
3242 : };
3243 :
3244 0 : let metrics_guard = if attempt == 1 {
3245 0 : crate::metrics::initial_logical_size::START_CALCULATION.first(circumstances)
3246 : } else {
3247 0 : crate::metrics::initial_logical_size::START_CALCULATION.retry(circumstances)
3248 : };
3249 :
3250 0 : let io_concurrency = IoConcurrency::spawn_from_conf(
3251 0 : self_ref.conf,
3252 0 : self_ref
3253 0 : .gate
3254 0 : .enter()
3255 0 : .map_err(|_| CalculateLogicalSizeError::Cancelled)?,
3256 : );
3257 :
3258 0 : let calculated_size = self_ref
3259 0 : .logical_size_calculation_task(
3260 0 : initial_part_end,
3261 0 : LogicalSizeCalculationCause::Initial,
3262 0 : background_ctx,
3263 0 : )
3264 0 : .await?;
3265 :
3266 0 : self_ref
3267 0 : .trigger_aux_file_size_computation(
3268 0 : initial_part_end,
3269 0 : background_ctx,
3270 0 : io_concurrency,
3271 0 : )
3272 0 : .await?;
3273 :
3274 : // TODO: add aux file size to logical size
3275 :
3276 0 : Ok((calculated_size, metrics_guard))
3277 0 : }
3278 0 : };
3279 :
3280 0 : let retrying = async {
3281 0 : let mut attempt = 0;
3282 : loop {
3283 0 : attempt += 1;
3284 0 :
3285 0 : match try_once(attempt).await {
3286 0 : Ok(res) => return ControlFlow::Continue(res),
3287 0 : Err(CalculateLogicalSizeError::Cancelled) => return ControlFlow::Break(()),
3288 : Err(
3289 0 : e @ (CalculateLogicalSizeError::Decode(_)
3290 0 : | CalculateLogicalSizeError::PageRead(_)),
3291 0 : ) => {
3292 0 : warn!(attempt, "initial size calculation failed: {e:?}");
3293 : // exponential back-off doesn't make sense at these long intervals;
3294 : // use fixed retry interval with generous jitter instead
3295 0 : let sleep_duration = Duration::from_secs(
3296 0 : u64::try_from(
3297 0 : // 1hour base
3298 0 : (60_i64 * 60_i64)
3299 0 : // 10min jitter
3300 0 : + rand::thread_rng().gen_range(-10 * 60..10 * 60),
3301 0 : )
3302 0 : .expect("10min < 1hour"),
3303 0 : );
3304 0 : tokio::select! {
3305 0 : _ = tokio::time::sleep(sleep_duration) => {}
3306 0 : _ = self.cancel.cancelled() => return ControlFlow::Break(()),
3307 : }
3308 : }
3309 : }
3310 : }
3311 0 : };
3312 :
3313 0 : let (calculated_size, metrics_guard) = match retrying.await {
3314 0 : ControlFlow::Continue(calculated_size) => calculated_size,
3315 0 : ControlFlow::Break(()) => return,
3316 : };
3317 :
3318 : // we cannot query current_logical_size.current_size() to know the current
3319 : // *negative* value, only truncated to u64.
3320 0 : let added = self
3321 0 : .current_logical_size
3322 0 : .size_added_after_initial
3323 0 : .load(AtomicOrdering::Relaxed);
3324 0 :
3325 0 : let sum = calculated_size.saturating_add_signed(added);
3326 0 :
3327 0 : // set the gauge value before it can be set in `update_current_logical_size`.
3328 0 : self.metrics.current_logical_size_gauge.set(sum);
3329 0 :
3330 0 : self.current_logical_size
3331 0 : .initial_logical_size
3332 0 : .set((calculated_size, metrics_guard.calculation_result_saved()))
3333 0 : .ok()
3334 0 : .expect("only this task sets it");
3335 0 : }
3336 :
3337 0 : pub(crate) fn spawn_ondemand_logical_size_calculation(
3338 0 : self: &Arc<Self>,
3339 0 : lsn: Lsn,
3340 0 : cause: LogicalSizeCalculationCause,
3341 0 : ctx: RequestContext,
3342 0 : ) -> oneshot::Receiver<Result<u64, CalculateLogicalSizeError>> {
3343 0 : let (sender, receiver) = oneshot::channel();
3344 0 : let self_clone = Arc::clone(self);
3345 0 : // XXX if our caller loses interest, i.e., ctx is cancelled,
3346 0 : // we should stop the size calculation work and return an error.
3347 0 : // That would require restructuring this function's API to
3348 0 : // return the result directly, instead of a Receiver for the result.
3349 0 : let ctx = ctx.detached_child(
3350 0 : TaskKind::OndemandLogicalSizeCalculation,
3351 0 : DownloadBehavior::Download,
3352 0 : );
3353 0 : task_mgr::spawn(
3354 0 : task_mgr::BACKGROUND_RUNTIME.handle(),
3355 0 : task_mgr::TaskKind::OndemandLogicalSizeCalculation,
3356 0 : self.tenant_shard_id,
3357 0 : Some(self.timeline_id),
3358 0 : "ondemand logical size calculation",
3359 0 : async move {
3360 0 : let res = self_clone
3361 0 : .logical_size_calculation_task(lsn, cause, &ctx)
3362 0 : .await;
3363 0 : let _ = sender.send(res).ok();
3364 0 : Ok(()) // Receiver is responsible for handling errors
3365 0 : }
3366 0 : .in_current_span(),
3367 0 : );
3368 0 : receiver
3369 0 : }
3370 :
3371 : #[instrument(skip_all)]
3372 : async fn logical_size_calculation_task(
3373 : self: &Arc<Self>,
3374 : lsn: Lsn,
3375 : cause: LogicalSizeCalculationCause,
3376 : ctx: &RequestContext,
3377 : ) -> Result<u64, CalculateLogicalSizeError> {
3378 : crate::span::debug_assert_current_span_has_tenant_and_timeline_id();
3379 : // We should never be calculating logical sizes on shard !=0, because these shards do not have
3380 : // accurate relation sizes, and they do not emit consumption metrics.
3381 : debug_assert!(self.tenant_shard_id.is_shard_zero());
3382 :
3383 : let guard = self
3384 : .gate
3385 : .enter()
3386 0 : .map_err(|_| CalculateLogicalSizeError::Cancelled)?;
3387 :
3388 : self.calculate_logical_size(lsn, cause, &guard, ctx).await
3389 : }
3390 :
3391 : /// Calculate the logical size of the database at the latest LSN.
3392 : ///
3393 : /// NOTE: counted incrementally, includes ancestors. This can be a slow operation,
3394 : /// especially if we need to download remote layers.
3395 0 : async fn calculate_logical_size(
3396 0 : &self,
3397 0 : up_to_lsn: Lsn,
3398 0 : cause: LogicalSizeCalculationCause,
3399 0 : _guard: &GateGuard,
3400 0 : ctx: &RequestContext,
3401 0 : ) -> Result<u64, CalculateLogicalSizeError> {
3402 0 : info!(
3403 0 : "Calculating logical size for timeline {} at {}",
3404 : self.timeline_id, up_to_lsn
3405 : );
3406 :
3407 0 : if let Err(()) = pausable_failpoint!("timeline-calculate-logical-size-pause", &self.cancel)
3408 : {
3409 0 : return Err(CalculateLogicalSizeError::Cancelled);
3410 0 : }
3411 :
3412 : // See if we've already done the work for initial size calculation.
3413 : // This is a short-cut for timelines that are mostly unused.
3414 0 : if let Some(size) = self.current_logical_size.initialized_size(up_to_lsn) {
3415 0 : return Ok(size);
3416 0 : }
3417 0 : let storage_time_metrics = match cause {
3418 : LogicalSizeCalculationCause::Initial
3419 : | LogicalSizeCalculationCause::ConsumptionMetricsSyntheticSize
3420 0 : | LogicalSizeCalculationCause::TenantSizeHandler => &self.metrics.logical_size_histo,
3421 : LogicalSizeCalculationCause::EvictionTaskImitation => {
3422 0 : &self.metrics.imitate_logical_size_histo
3423 : }
3424 : };
3425 0 : let timer = storage_time_metrics.start_timer();
3426 0 : let logical_size = self
3427 0 : .get_current_logical_size_non_incremental(up_to_lsn, ctx)
3428 0 : .await?;
3429 0 : debug!("calculated logical size: {logical_size}");
3430 0 : timer.stop_and_record();
3431 0 : Ok(logical_size)
3432 0 : }
3433 :
3434 : /// Update current logical size, adding `delta' to the old value.
3435 541140 : fn update_current_logical_size(&self, delta: i64) {
3436 541140 : let logical_size = &self.current_logical_size;
3437 541140 : logical_size.increment_size(delta);
3438 541140 :
3439 541140 : // Also set the value in the prometheus gauge. Note that
3440 541140 : // there is a race condition here: if this is is called by two
3441 541140 : // threads concurrently, the prometheus gauge might be set to
3442 541140 : // one value while current_logical_size is set to the
3443 541140 : // other.
3444 541140 : match logical_size.current_size() {
3445 541140 : CurrentLogicalSize::Exact(ref new_current_size) => self
3446 541140 : .metrics
3447 541140 : .current_logical_size_gauge
3448 541140 : .set(new_current_size.into()),
3449 0 : CurrentLogicalSize::Approximate(_) => {
3450 0 : // don't update the gauge yet, this allows us not to update the gauge back and
3451 0 : // forth between the initial size calculation task.
3452 0 : }
3453 : }
3454 541140 : }
3455 :
3456 5948 : pub(crate) fn update_directory_entries_count(&self, kind: DirectoryKind, count: MetricsUpdate) {
3457 5948 : // TODO: this directory metrics is not correct -- we could have multiple reldirs in the system
3458 5948 : // for each of the database, but we only store one value, and therefore each pgdirmodification
3459 5948 : // would overwrite the previous value if they modify different databases.
3460 5948 :
3461 5948 : match count {
3462 2104 : MetricsUpdate::Set(count) => {
3463 2104 : self.directory_metrics[kind.offset()].store(count, AtomicOrdering::Relaxed);
3464 2104 : self.directory_metrics_inited[kind.offset()].store(true, AtomicOrdering::Relaxed);
3465 2104 : }
3466 3840 : MetricsUpdate::Add(count) => {
3467 3840 : // TODO: these operations are not atomic; but we only have one writer to the metrics, so
3468 3840 : // it's fine.
3469 3840 : if self.directory_metrics_inited[kind.offset()].load(AtomicOrdering::Relaxed) {
3470 3840 : // The metrics has been initialized with `MetricsUpdate::Set` before, so we can add/sub
3471 3840 : // the value reliably.
3472 3840 : self.directory_metrics[kind.offset()].fetch_add(count, AtomicOrdering::Relaxed);
3473 3840 : }
3474 : // Otherwise, ignore this update
3475 : }
3476 4 : MetricsUpdate::Sub(count) => {
3477 4 : // TODO: these operations are not atomic; but we only have one writer to the metrics, so
3478 4 : // it's fine.
3479 4 : if self.directory_metrics_inited[kind.offset()].load(AtomicOrdering::Relaxed) {
3480 4 : // The metrics has been initialized with `MetricsUpdate::Set` before.
3481 4 : // The operation could overflow so we need to normalize the value.
3482 4 : let prev_val =
3483 4 : self.directory_metrics[kind.offset()].load(AtomicOrdering::Relaxed);
3484 4 : let res = prev_val.saturating_sub(count);
3485 4 : self.directory_metrics[kind.offset()].store(res, AtomicOrdering::Relaxed);
3486 4 : }
3487 : // Otherwise, ignore this update
3488 : }
3489 : };
3490 :
3491 : // TODO: remove this, there's no place in the code that updates this aux metrics.
3492 5948 : let aux_metric =
3493 5948 : self.directory_metrics[DirectoryKind::AuxFiles.offset()].load(AtomicOrdering::Relaxed);
3494 5948 :
3495 5948 : let sum_of_entries = self
3496 5948 : .directory_metrics
3497 5948 : .iter()
3498 47584 : .map(|v| v.load(AtomicOrdering::Relaxed))
3499 5948 : .sum();
3500 : // Set a high general threshold and a lower threshold for the auxiliary files,
3501 : // as we can have large numbers of relations in the db directory.
3502 : const SUM_THRESHOLD: u64 = 5000;
3503 : const AUX_THRESHOLD: u64 = 1000;
3504 5948 : if sum_of_entries >= SUM_THRESHOLD || aux_metric >= AUX_THRESHOLD {
3505 0 : self.metrics
3506 0 : .directory_entries_count_gauge
3507 0 : .set(sum_of_entries);
3508 5948 : } else if let Some(metric) = Lazy::get(&self.metrics.directory_entries_count_gauge) {
3509 0 : metric.set(sum_of_entries);
3510 5948 : }
3511 5948 : }
3512 :
3513 0 : async fn find_layer(
3514 0 : &self,
3515 0 : layer_name: &LayerName,
3516 0 : ) -> Result<Option<Layer>, layer_manager::Shutdown> {
3517 0 : let guard = self.layers.read().await;
3518 0 : let layer = guard
3519 0 : .layer_map()?
3520 0 : .iter_historic_layers()
3521 0 : .find(|l| &l.layer_name() == layer_name)
3522 0 : .map(|found| guard.get_from_desc(&found));
3523 0 : Ok(layer)
3524 0 : }
3525 :
3526 : /// The timeline heatmap is a hint to secondary locations from the primary location,
3527 : /// indicating which layers are currently on-disk on the primary.
3528 : ///
3529 : /// None is returned if the Timeline is in a state where uploading a heatmap
3530 : /// doesn't make sense, such as shutting down or initializing. The caller
3531 : /// should treat this as a cue to simply skip doing any heatmap uploading
3532 : /// for this timeline.
3533 32 : pub(crate) async fn generate_heatmap(&self) -> Option<HeatMapTimeline> {
3534 32 : if !self.is_active() {
3535 0 : return None;
3536 32 : }
3537 :
3538 32 : let guard = self.layers.read().await;
3539 :
3540 : // Firstly, if there's any heatmap left over from when this location
3541 : // was a secondary, take that into account. Keep layers that are:
3542 : // * present in the layer map
3543 : // * visible
3544 : // * non-resident
3545 : // * not evicted since we read the heatmap
3546 : //
3547 : // Without this, a new cold, attached location would clobber the previous
3548 : // heatamp.
3549 32 : let previous_heatmap = self.previous_heatmap.load();
3550 32 : let visible_non_resident = match previous_heatmap.as_deref() {
3551 24 : Some(PreviousHeatmap::Active { heatmap, read_at }) => {
3552 92 : Some(heatmap.layers.iter().filter_map(|hl| {
3553 92 : let desc: PersistentLayerDesc = hl.name.clone().into();
3554 92 : let layer = guard.try_get_from_key(&desc.key())?;
3555 :
3556 92 : if layer.visibility() == LayerVisibilityHint::Covered {
3557 0 : return None;
3558 92 : }
3559 92 :
3560 92 : if layer.is_likely_resident() {
3561 40 : return None;
3562 52 : }
3563 52 :
3564 52 : if layer.last_evicted_at().happened_after(*read_at) {
3565 12 : return None;
3566 40 : }
3567 40 :
3568 40 : Some((desc, hl.metadata.clone(), hl.access_time))
3569 92 : }))
3570 : }
3571 0 : Some(PreviousHeatmap::Obsolete) => None,
3572 8 : None => None,
3573 : };
3574 :
3575 : // Secondly, all currently visible, resident layers are included.
3576 72 : let resident = guard.likely_resident_layers().filter_map(|layer| {
3577 72 : match layer.visibility() {
3578 : LayerVisibilityHint::Visible => {
3579 : // Layer is visible to one or more read LSNs: elegible for inclusion in layer map
3580 68 : let last_activity_ts = layer.latest_activity();
3581 68 : Some((
3582 68 : layer.layer_desc().clone(),
3583 68 : layer.metadata(),
3584 68 : last_activity_ts,
3585 68 : ))
3586 : }
3587 : LayerVisibilityHint::Covered => {
3588 : // Layer is resident but unlikely to be read: not elegible for inclusion in heatmap.
3589 4 : None
3590 : }
3591 : }
3592 72 : });
3593 :
3594 32 : let mut layers = match visible_non_resident {
3595 24 : Some(non_resident) => {
3596 24 : let mut non_resident = non_resident.peekable();
3597 24 : if non_resident.peek().is_none() {
3598 8 : self.previous_heatmap
3599 8 : .store(Some(PreviousHeatmap::Obsolete.into()));
3600 16 : }
3601 :
3602 24 : non_resident.chain(resident).collect::<Vec<_>>()
3603 : }
3604 8 : None => resident.collect::<Vec<_>>(),
3605 : };
3606 :
3607 : // Sort layers in order of which to download first. For a large set of layers to download, we
3608 : // want to prioritize those layers which are most likely to still be in the resident many minutes
3609 : // or hours later:
3610 : // - Download L0s last, because they churn the fastest: L0s on a fast-writing tenant might
3611 : // only exist for a few minutes before being compacted into L1s.
3612 : // - For L1 & image layers, download most recent LSNs first: the older the LSN, the sooner
3613 : // the layer is likely to be covered by an image layer during compaction.
3614 244 : layers.sort_by_key(|(desc, _meta, _atime)| {
3615 244 : std::cmp::Reverse((
3616 244 : !LayerMap::is_l0(&desc.key_range, desc.is_delta),
3617 244 : desc.lsn_range.end,
3618 244 : ))
3619 244 : });
3620 32 :
3621 32 : let layers = layers
3622 32 : .into_iter()
3623 108 : .map(|(desc, meta, atime)| HeatMapLayer::new(desc.layer_name(), meta, atime))
3624 32 : .collect();
3625 32 :
3626 32 : Some(HeatMapTimeline::new(self.timeline_id, layers))
3627 32 : }
3628 :
3629 : /// Returns true if the given lsn is or was an ancestor branchpoint.
3630 0 : pub(crate) fn is_ancestor_lsn(&self, lsn: Lsn) -> bool {
3631 0 : // upon timeline detach, we set the ancestor_lsn to Lsn::INVALID and the store the original
3632 0 : // branchpoint in the value in IndexPart::lineage
3633 0 : self.ancestor_lsn == lsn
3634 0 : || (self.ancestor_lsn == Lsn::INVALID
3635 0 : && self.remote_client.is_previous_ancestor_lsn(lsn))
3636 0 : }
3637 : }
3638 :
3639 : impl Timeline {
3640 : #[allow(clippy::doc_lazy_continuation)]
3641 : /// Get the data needed to reconstruct all keys in the provided keyspace
3642 : ///
3643 : /// The algorithm is as follows:
3644 : /// 1. While some keys are still not done and there's a timeline to visit:
3645 : /// 2. Visit the timeline (see [`Timeline::get_vectored_reconstruct_data_timeline`]:
3646 : /// 2.1: Build the fringe for the current keyspace
3647 : /// 2.2 Visit the newest layer from the fringe to collect all values for the range it
3648 : /// intersects
3649 : /// 2.3. Pop the timeline from the fringe
3650 : /// 2.4. If the fringe is empty, go back to 1
3651 1255315 : async fn get_vectored_reconstruct_data(
3652 1255315 : &self,
3653 1255315 : mut keyspace: KeySpace,
3654 1255315 : request_lsn: Lsn,
3655 1255315 : reconstruct_state: &mut ValuesReconstructState,
3656 1255315 : ctx: &RequestContext,
3657 1255315 : ) -> Result<(), GetVectoredError> {
3658 1255315 : let mut timeline_owned: Arc<Timeline>;
3659 1255315 : let mut timeline = self;
3660 1255315 :
3661 1255315 : let mut cont_lsn = Lsn(request_lsn.0 + 1);
3662 :
3663 1255311 : let missing_keyspace = loop {
3664 1707815 : if self.cancel.is_cancelled() {
3665 0 : return Err(GetVectoredError::Cancelled);
3666 1707815 : }
3667 :
3668 : let TimelineVisitOutcome {
3669 1707815 : completed_keyspace: completed,
3670 1707815 : image_covered_keyspace,
3671 1707815 : } = Self::get_vectored_reconstruct_data_timeline(
3672 1707815 : timeline,
3673 1707815 : keyspace.clone(),
3674 1707815 : cont_lsn,
3675 1707815 : reconstruct_state,
3676 1707815 : &self.cancel,
3677 1707815 : ctx,
3678 1707815 : )
3679 1707815 : .await?;
3680 :
3681 1707815 : keyspace.remove_overlapping_with(&completed);
3682 1707815 :
3683 1707815 : // Do not descend into the ancestor timeline for aux files.
3684 1707815 : // We don't return a blanket [`GetVectoredError::MissingKey`] to avoid
3685 1707815 : // stalling compaction.
3686 1707815 : keyspace.remove_overlapping_with(&KeySpace {
3687 1707815 : ranges: vec![NON_INHERITED_RANGE, Key::sparse_non_inherited_keyspace()],
3688 1707815 : });
3689 1707815 :
3690 1707815 : // Keyspace is fully retrieved
3691 1707815 : if keyspace.is_empty() {
3692 1254811 : break None;
3693 453004 : }
3694 :
3695 453004 : let Some(ancestor_timeline) = timeline.ancestor_timeline.as_ref() else {
3696 : // Not fully retrieved but no ancestor timeline.
3697 500 : break Some(keyspace);
3698 : };
3699 :
3700 : // Now we see if there are keys covered by the image layer but does not exist in the
3701 : // image layer, which means that the key does not exist.
3702 :
3703 : // The block below will stop the vectored search if any of the keys encountered an image layer
3704 : // which did not contain a snapshot for said key. Since we have already removed all completed
3705 : // keys from `keyspace`, we expect there to be no overlap between it and the image covered key
3706 : // space. If that's not the case, we had at least one key encounter a gap in the image layer
3707 : // and stop the search as a result of that.
3708 452504 : let mut removed = keyspace.remove_overlapping_with(&image_covered_keyspace);
3709 452504 : // Do not fire missing key error and end early for sparse keys. Note that we hava already removed
3710 452504 : // non-inherited keyspaces before, so we can safely do a full `SPARSE_RANGE` remove instead of
3711 452504 : // figuring out what is the inherited key range and do a fine-grained pruning.
3712 452504 : removed.remove_overlapping_with(&KeySpace {
3713 452504 : ranges: vec![SPARSE_RANGE],
3714 452504 : });
3715 452504 : if !removed.is_empty() {
3716 0 : break Some(removed);
3717 452504 : }
3718 452504 : // If we reached this point, `remove_overlapping_with` should not have made any change to the
3719 452504 : // keyspace.
3720 452504 :
3721 452504 : // Take the min to avoid reconstructing a page with data newer than request Lsn.
3722 452504 : cont_lsn = std::cmp::min(Lsn(request_lsn.0 + 1), Lsn(timeline.ancestor_lsn.0 + 1));
3723 452504 : timeline_owned = timeline
3724 452504 : .get_ready_ancestor_timeline(ancestor_timeline, ctx)
3725 452504 : .await?;
3726 452500 : timeline = &*timeline_owned;
3727 : };
3728 :
3729 : // Remove sparse keys from the keyspace so that it doesn't fire errors.
3730 1255311 : let missing_keyspace = if let Some(missing_keyspace) = missing_keyspace {
3731 500 : let mut missing_keyspace = missing_keyspace;
3732 500 : missing_keyspace.remove_overlapping_with(&KeySpace {
3733 500 : ranges: vec![SPARSE_RANGE],
3734 500 : });
3735 500 : if missing_keyspace.is_empty() {
3736 472 : None
3737 : } else {
3738 28 : Some(missing_keyspace)
3739 : }
3740 : } else {
3741 1254811 : None
3742 : };
3743 :
3744 1255311 : if let Some(missing_keyspace) = missing_keyspace {
3745 28 : return Err(GetVectoredError::MissingKey(MissingKeyError {
3746 28 : key: missing_keyspace.start().unwrap(), /* better if we can store the full keyspace */
3747 28 : shard: self
3748 28 : .shard_identity
3749 28 : .get_shard_number(&missing_keyspace.start().unwrap()),
3750 28 : cont_lsn,
3751 28 : request_lsn,
3752 28 : ancestor_lsn: Some(timeline.ancestor_lsn),
3753 28 : backtrace: None,
3754 28 : read_path: std::mem::take(&mut reconstruct_state.read_path),
3755 28 : }));
3756 1255283 : }
3757 1255283 :
3758 1255283 : Ok(())
3759 1255315 : }
3760 :
3761 : /// Collect the reconstruct data for a keyspace from the specified timeline.
3762 : ///
3763 : /// Maintain a fringe [`LayerFringe`] which tracks all the layers that intersect
3764 : /// the current keyspace. The current keyspace of the search at any given timeline
3765 : /// is the original keyspace minus all the keys that have been completed minus
3766 : /// any keys for which we couldn't find an intersecting layer. It's not tracked explicitly,
3767 : /// but if you merge all the keyspaces in the fringe, you get the "current keyspace".
3768 : ///
3769 : /// This is basically a depth-first search visitor implementation where a vertex
3770 : /// is the (layer, lsn range, key space) tuple. The fringe acts as the stack.
3771 : ///
3772 : /// At each iteration pop the top of the fringe (the layer with the highest Lsn)
3773 : /// and get all the required reconstruct data from the layer in one go.
3774 : ///
3775 : /// Returns the completed keyspace and the keyspaces with image coverage. The caller
3776 : /// decides how to deal with these two keyspaces.
3777 1707815 : async fn get_vectored_reconstruct_data_timeline(
3778 1707815 : timeline: &Timeline,
3779 1707815 : keyspace: KeySpace,
3780 1707815 : mut cont_lsn: Lsn,
3781 1707815 : reconstruct_state: &mut ValuesReconstructState,
3782 1707815 : cancel: &CancellationToken,
3783 1707815 : ctx: &RequestContext,
3784 1707815 : ) -> Result<TimelineVisitOutcome, GetVectoredError> {
3785 1707815 : let mut unmapped_keyspace = keyspace.clone();
3786 1707815 : let mut fringe = LayerFringe::new();
3787 1707815 :
3788 1707815 : let mut completed_keyspace = KeySpace::default();
3789 1707815 : let mut image_covered_keyspace = KeySpaceRandomAccum::new();
3790 1707815 :
3791 1707815 : // Prevent GC from progressing while visiting the current timeline.
3792 1707815 : // If we are GC-ing because a new image layer was added while traversing
3793 1707815 : // the timeline, then it will remove layers that are required for fulfilling
3794 1707815 : // the current get request (read-path cannot "look back" and notice the new
3795 1707815 : // image layer).
3796 1707815 : let _gc_cutoff_holder = timeline.get_applied_gc_cutoff_lsn();
3797 :
3798 : // See `compaction::compact_with_gc` for why we need this.
3799 1707815 : let _guard = timeline.gc_compaction_layer_update_lock.read().await;
3800 :
3801 : loop {
3802 3400578 : if cancel.is_cancelled() {
3803 0 : return Err(GetVectoredError::Cancelled);
3804 3400578 : }
3805 3400578 :
3806 3400578 : let (keys_done_last_step, keys_with_image_coverage) =
3807 3400578 : reconstruct_state.consume_done_keys();
3808 3400578 : unmapped_keyspace.remove_overlapping_with(&keys_done_last_step);
3809 3400578 : completed_keyspace.merge(&keys_done_last_step);
3810 3400578 : if let Some(keys_with_image_coverage) = keys_with_image_coverage {
3811 45092 : unmapped_keyspace
3812 45092 : .remove_overlapping_with(&KeySpace::single(keys_with_image_coverage.clone()));
3813 45092 : image_covered_keyspace.add_range(keys_with_image_coverage);
3814 3355486 : }
3815 :
3816 : // Do not descent any further if the last layer we visited
3817 : // completed all keys in the keyspace it inspected. This is not
3818 : // required for correctness, but avoids visiting extra layers
3819 : // which turns out to be a perf bottleneck in some cases.
3820 3400578 : if !unmapped_keyspace.is_empty() {
3821 2149811 : let guard = timeline.layers.read().await;
3822 2149811 : let layers = guard.layer_map()?;
3823 :
3824 2149811 : let in_memory_layer = layers.find_in_memory_layer(|l| {
3825 1828410 : let start_lsn = l.get_lsn_range().start;
3826 1828410 : cont_lsn > start_lsn
3827 2149811 : });
3828 2149811 :
3829 2149811 : match in_memory_layer {
3830 1213347 : Some(l) => {
3831 1213347 : let lsn_range = l.get_lsn_range().start..cont_lsn;
3832 1213347 : fringe.update(
3833 1213347 : ReadableLayer::InMemoryLayer(l),
3834 1213347 : unmapped_keyspace.clone(),
3835 1213347 : lsn_range,
3836 1213347 : );
3837 1213347 : }
3838 : None => {
3839 937004 : for range in unmapped_keyspace.ranges.iter() {
3840 937004 : let results = layers.range_search(range.clone(), cont_lsn);
3841 937004 :
3842 937004 : results
3843 937004 : .found
3844 937004 : .into_iter()
3845 937004 : .map(|(SearchResult { layer, lsn_floor }, keyspace_accum)| {
3846 479504 : (
3847 479504 : ReadableLayer::PersistentLayer(guard.get_from_desc(&layer)),
3848 479504 : keyspace_accum.to_keyspace(),
3849 479504 : lsn_floor..cont_lsn,
3850 479504 : )
3851 937004 : })
3852 937004 : .for_each(|(layer, keyspace, lsn_range)| {
3853 479504 : fringe.update(layer, keyspace, lsn_range)
3854 937004 : });
3855 937004 : }
3856 : }
3857 : }
3858 :
3859 : // It's safe to drop the layer map lock after planning the next round of reads.
3860 : // The fringe keeps readable handles for the layers which are safe to read even
3861 : // if layers were compacted or flushed.
3862 : //
3863 : // The more interesting consideration is: "Why is the read algorithm still correct
3864 : // if the layer map changes while it is operating?". Doing a vectored read on a
3865 : // timeline boils down to pushing an imaginary lsn boundary downwards for each range
3866 : // covered by the read. The layer map tells us how to move the lsn downwards for a
3867 : // range at *a particular point in time*. It is fine for the answer to be different
3868 : // at two different time points.
3869 2149811 : drop(guard);
3870 1250767 : }
3871 :
3872 3400578 : if let Some((layer_to_read, keyspace_to_read, lsn_range)) = fringe.next_layer() {
3873 1692763 : if let Some(ref mut read_path) = reconstruct_state.read_path {
3874 1692763 : read_path.record_layer_visit(&layer_to_read, &keyspace_to_read, &lsn_range);
3875 1692763 : }
3876 1692763 : let next_cont_lsn = lsn_range.start;
3877 1692763 : layer_to_read
3878 1692763 : .get_values_reconstruct_data(
3879 1692763 : keyspace_to_read.clone(),
3880 1692763 : lsn_range,
3881 1692763 : reconstruct_state,
3882 1692763 : ctx,
3883 1692763 : )
3884 1692763 : .await?;
3885 :
3886 1692763 : unmapped_keyspace = keyspace_to_read;
3887 1692763 : cont_lsn = next_cont_lsn;
3888 1692763 :
3889 1692763 : reconstruct_state.on_layer_visited(&layer_to_read);
3890 : } else {
3891 1707815 : break;
3892 1707815 : }
3893 1707815 : }
3894 1707815 :
3895 1707815 : Ok(TimelineVisitOutcome {
3896 1707815 : completed_keyspace,
3897 1707815 : image_covered_keyspace: image_covered_keyspace.consume_keyspace(),
3898 1707815 : })
3899 1707815 : }
3900 :
3901 452504 : async fn get_ready_ancestor_timeline(
3902 452504 : &self,
3903 452504 : ancestor: &Arc<Timeline>,
3904 452504 : ctx: &RequestContext,
3905 452504 : ) -> Result<Arc<Timeline>, GetReadyAncestorError> {
3906 452504 : // It's possible that the ancestor timeline isn't active yet, or
3907 452504 : // is active but hasn't yet caught up to the branch point. Wait
3908 452504 : // for it.
3909 452504 : //
3910 452504 : // This cannot happen while the pageserver is running normally,
3911 452504 : // because you cannot create a branch from a point that isn't
3912 452504 : // present in the pageserver yet. However, we don't wait for the
3913 452504 : // branch point to be uploaded to cloud storage before creating
3914 452504 : // a branch. I.e., the branch LSN need not be remote consistent
3915 452504 : // for the branching operation to succeed.
3916 452504 : //
3917 452504 : // Hence, if we try to load a tenant in such a state where
3918 452504 : // 1. the existence of the branch was persisted (in IndexPart and/or locally)
3919 452504 : // 2. but the ancestor state is behind branch_lsn because it was not yet persisted
3920 452504 : // then we will need to wait for the ancestor timeline to
3921 452504 : // re-stream WAL up to branch_lsn before we access it.
3922 452504 : //
3923 452504 : // How can a tenant get in such a state?
3924 452504 : // - ungraceful pageserver process exit
3925 452504 : // - detach+attach => this is a bug, https://github.com/neondatabase/neon/issues/4219
3926 452504 : //
3927 452504 : // NB: this could be avoided by requiring
3928 452504 : // branch_lsn >= remote_consistent_lsn
3929 452504 : // during branch creation.
3930 452504 : match ancestor.wait_to_become_active(ctx).await {
3931 452500 : Ok(()) => {}
3932 : Err(TimelineState::Stopping) => {
3933 : // If an ancestor is stopping, it means the tenant is stopping: handle this the same as if this timeline was stopping.
3934 0 : return Err(GetReadyAncestorError::Cancelled);
3935 : }
3936 4 : Err(state) => {
3937 4 : return Err(GetReadyAncestorError::BadState {
3938 4 : timeline_id: ancestor.timeline_id,
3939 4 : state,
3940 4 : });
3941 : }
3942 : }
3943 452500 : ancestor
3944 452500 : .wait_lsn(
3945 452500 : self.ancestor_lsn,
3946 452500 : WaitLsnWaiter::Timeline(self),
3947 452500 : WaitLsnTimeout::Default,
3948 452500 : ctx,
3949 452500 : )
3950 452500 : .await
3951 452500 : .map_err(|e| match e {
3952 0 : e @ WaitLsnError::Timeout(_) => GetReadyAncestorError::AncestorLsnTimeout(e),
3953 0 : WaitLsnError::Shutdown => GetReadyAncestorError::Cancelled,
3954 0 : WaitLsnError::BadState(state) => GetReadyAncestorError::BadState {
3955 0 : timeline_id: ancestor.timeline_id,
3956 0 : state,
3957 0 : },
3958 452500 : })?;
3959 :
3960 452500 : Ok(ancestor.clone())
3961 452504 : }
3962 :
3963 594312 : pub(crate) fn get_shard_identity(&self) -> &ShardIdentity {
3964 594312 : &self.shard_identity
3965 594312 : }
3966 :
3967 : #[inline(always)]
3968 0 : pub(crate) fn shard_timeline_id(&self) -> ShardTimelineId {
3969 0 : ShardTimelineId {
3970 0 : shard_index: ShardIndex {
3971 0 : shard_number: self.shard_identity.number,
3972 0 : shard_count: self.shard_identity.count,
3973 0 : },
3974 0 : timeline_id: self.timeline_id,
3975 0 : }
3976 0 : }
3977 :
3978 : /// Returns a non-frozen open in-memory layer for ingestion.
3979 : ///
3980 : /// Takes a witness of timeline writer state lock being held, because it makes no sense to call
3981 : /// this function without holding the mutex.
3982 2596 : async fn get_layer_for_write(
3983 2596 : &self,
3984 2596 : lsn: Lsn,
3985 2596 : _guard: &tokio::sync::MutexGuard<'_, Option<TimelineWriterState>>,
3986 2596 : ctx: &RequestContext,
3987 2596 : ) -> anyhow::Result<Arc<InMemoryLayer>> {
3988 2596 : let mut guard = self.layers.write().await;
3989 :
3990 2596 : let last_record_lsn = self.get_last_record_lsn();
3991 2596 : ensure!(
3992 2596 : lsn > last_record_lsn,
3993 0 : "cannot modify relation after advancing last_record_lsn (incoming_lsn={}, last_record_lsn={})",
3994 : lsn,
3995 : last_record_lsn,
3996 : );
3997 :
3998 2596 : let layer = guard
3999 2596 : .open_mut()?
4000 2596 : .get_layer_for_write(
4001 2596 : lsn,
4002 2596 : self.conf,
4003 2596 : self.timeline_id,
4004 2596 : self.tenant_shard_id,
4005 2596 : &self.gate,
4006 2596 : ctx,
4007 2596 : )
4008 2596 : .await?;
4009 2596 : Ok(layer)
4010 2596 : }
4011 :
4012 10558188 : pub(crate) fn finish_write(&self, new_lsn: Lsn) {
4013 10558188 : assert!(new_lsn.is_aligned());
4014 :
4015 10558188 : self.metrics.last_record_lsn_gauge.set(new_lsn.0 as i64);
4016 10558188 : self.last_record_lsn.advance(new_lsn);
4017 10558188 : }
4018 :
4019 : /// Freeze any existing open in-memory layer and unconditionally notify the flush loop.
4020 : ///
4021 : /// Unconditional flush loop notification is given because in sharded cases we will want to
4022 : /// leave an Lsn gap. Unsharded tenants do not have Lsn gaps.
4023 2404 : async fn freeze_inmem_layer_at(
4024 2404 : &self,
4025 2404 : at: Lsn,
4026 2404 : write_lock: &mut tokio::sync::MutexGuard<'_, Option<TimelineWriterState>>,
4027 2404 : ) -> Result<u64, FlushLayerError> {
4028 2404 : let frozen = {
4029 2404 : let mut guard = self.layers.write().await;
4030 2404 : guard
4031 2404 : .open_mut()?
4032 2404 : .try_freeze_in_memory_layer(at, &self.last_freeze_at, write_lock, &self.metrics)
4033 2404 : .await
4034 : };
4035 :
4036 2404 : if frozen {
4037 2348 : let now = Instant::now();
4038 2348 : *(self.last_freeze_ts.write().unwrap()) = now;
4039 2348 : }
4040 :
4041 : // Increment the flush cycle counter and wake up the flush task.
4042 : // Remember the new value, so that when we listen for the flush
4043 : // to finish, we know when the flush that we initiated has
4044 : // finished, instead of some other flush that was started earlier.
4045 2404 : let mut my_flush_request = 0;
4046 2404 :
4047 2404 : let flush_loop_state = { *self.flush_loop_state.lock().unwrap() };
4048 2404 : if !matches!(flush_loop_state, FlushLoopState::Running { .. }) {
4049 0 : return Err(FlushLayerError::NotRunning(flush_loop_state));
4050 2404 : }
4051 2404 :
4052 2404 : self.layer_flush_start_tx.send_modify(|(counter, lsn)| {
4053 2404 : my_flush_request = *counter + 1;
4054 2404 : *counter = my_flush_request;
4055 2404 : *lsn = std::cmp::max(at, *lsn);
4056 2404 : });
4057 2404 :
4058 2404 : assert_ne!(my_flush_request, 0);
4059 :
4060 2404 : Ok(my_flush_request)
4061 2404 : }
4062 :
4063 : /// Layer flusher task's main loop.
4064 884 : async fn flush_loop(
4065 884 : self: &Arc<Self>,
4066 884 : mut layer_flush_start_rx: tokio::sync::watch::Receiver<(u64, Lsn)>,
4067 884 : ctx: &RequestContext,
4068 884 : ) {
4069 : // Subscribe to L0 delta layer updates, for compaction backpressure.
4070 884 : let mut watch_l0 = match self.layers.read().await.layer_map() {
4071 884 : Ok(lm) => lm.watch_level0_deltas(),
4072 0 : Err(Shutdown) => return,
4073 : };
4074 :
4075 884 : info!("started flush loop");
4076 : loop {
4077 3212 : tokio::select! {
4078 3212 : _ = self.cancel.cancelled() => {
4079 20 : info!("shutting down layer flush task due to Timeline::cancel");
4080 20 : break;
4081 : },
4082 3212 : _ = layer_flush_start_rx.changed() => {}
4083 2328 : }
4084 2328 : trace!("waking up");
4085 2328 : let (flush_counter, frozen_to_lsn) = *layer_flush_start_rx.borrow();
4086 2328 :
4087 2328 : // The highest LSN to which we flushed in the loop over frozen layers
4088 2328 : let mut flushed_to_lsn = Lsn(0);
4089 :
4090 2328 : let result = loop {
4091 4676 : if self.cancel.is_cancelled() {
4092 0 : info!("dropping out of flush loop for timeline shutdown");
4093 : // Note: we do not bother transmitting into [`layer_flush_done_tx`], because
4094 : // anyone waiting on that will respect self.cancel as well: they will stop
4095 : // waiting at the same time we as drop out of this loop.
4096 0 : return;
4097 4676 : }
4098 4676 :
4099 4676 : // Break to notify potential waiters as soon as we've flushed the requested LSN. If
4100 4676 : // more requests have arrived in the meanwhile, we'll resume flushing afterwards.
4101 4676 : if flushed_to_lsn >= frozen_to_lsn {
4102 2272 : break Ok(());
4103 2404 : }
4104 :
4105 : // Fetch the next layer to flush, if any.
4106 2404 : let (layer, l0_count, frozen_count, frozen_size) = {
4107 2404 : let layers = self.layers.read().await;
4108 2404 : let Ok(lm) = layers.layer_map() else {
4109 0 : info!("dropping out of flush loop for timeline shutdown");
4110 0 : return;
4111 : };
4112 2404 : let l0_count = lm.level0_deltas().len();
4113 2404 : let frozen_count = lm.frozen_layers.len();
4114 2404 : let frozen_size: u64 = lm
4115 2404 : .frozen_layers
4116 2404 : .iter()
4117 2426 : .map(|l| l.estimated_in_mem_size())
4118 2404 : .sum();
4119 2404 : let layer = lm.frozen_layers.front().cloned();
4120 2404 : (layer, l0_count, frozen_count, frozen_size)
4121 2404 : // drop 'layers' lock
4122 2404 : };
4123 2404 : let Some(layer) = layer else {
4124 56 : break Ok(());
4125 : };
4126 :
4127 : // Stall flushes to backpressure if compaction can't keep up. This is propagated up
4128 : // to WAL ingestion by having ephemeral layer rolls wait for flushes.
4129 : //
4130 : // NB: the compaction loop only checks `compaction_threshold` every 20 seconds, so
4131 : // we can end up stalling before compaction even starts. Consider making it more
4132 : // responsive (e.g. via `watch_level0_deltas`).
4133 2348 : if let Some(stall_threshold) = self.get_l0_flush_stall_threshold() {
4134 0 : if l0_count >= stall_threshold {
4135 0 : warn!(
4136 0 : "stalling layer flushes for compaction backpressure at {l0_count} \
4137 0 : L0 layers ({frozen_count} frozen layers with {frozen_size} bytes)"
4138 : );
4139 0 : let stall_timer = self
4140 0 : .metrics
4141 0 : .flush_delay_histo
4142 0 : .start_timer()
4143 0 : .record_on_drop();
4144 0 : tokio::select! {
4145 0 : result = watch_l0.wait_for(|l0| *l0 < stall_threshold) => {
4146 0 : if let Ok(l0) = result.as_deref() {
4147 0 : let delay = stall_timer.elapsed().as_secs_f64();
4148 0 : info!("resuming layer flushes at {l0} L0 layers after {delay:.3}s");
4149 0 : }
4150 : },
4151 0 : _ = self.cancel.cancelled() => {},
4152 : }
4153 0 : continue; // check again
4154 0 : }
4155 2348 : }
4156 :
4157 : // Flush the layer.
4158 2348 : let flush_timer = self.metrics.flush_time_histo.start_timer();
4159 2348 : match self.flush_frozen_layer(layer, ctx).await {
4160 2348 : Ok(layer_lsn) => flushed_to_lsn = max(flushed_to_lsn, layer_lsn),
4161 : Err(FlushLayerError::Cancelled) => {
4162 0 : info!("dropping out of flush loop for timeline shutdown");
4163 0 : return;
4164 : }
4165 0 : err @ Err(
4166 0 : FlushLayerError::NotRunning(_)
4167 0 : | FlushLayerError::Other(_)
4168 0 : | FlushLayerError::CreateImageLayersError(_),
4169 0 : ) => {
4170 0 : error!("could not flush frozen layer: {err:?}");
4171 0 : break err.map(|_| ());
4172 : }
4173 : }
4174 2348 : let flush_duration = flush_timer.stop_and_record();
4175 2348 :
4176 2348 : // Notify the tenant compaction loop if L0 compaction is needed.
4177 2348 : let l0_count = *watch_l0.borrow();
4178 2348 : if l0_count >= self.get_compaction_threshold() {
4179 956 : self.l0_compaction_trigger.notify_one();
4180 1392 : }
4181 :
4182 : // Delay the next flush to backpressure if compaction can't keep up. We delay by the
4183 : // flush duration such that the flush takes 2x as long. This is propagated up to WAL
4184 : // ingestion by having ephemeral layer rolls wait for flushes.
4185 2348 : if let Some(delay_threshold) = self.get_l0_flush_delay_threshold() {
4186 0 : if l0_count >= delay_threshold {
4187 0 : let delay = flush_duration.as_secs_f64();
4188 0 : info!(
4189 0 : "delaying layer flush by {delay:.3}s for compaction backpressure at \
4190 0 : {l0_count} L0 layers ({frozen_count} frozen layers with {frozen_size} bytes)"
4191 : );
4192 0 : let _delay_timer = self
4193 0 : .metrics
4194 0 : .flush_delay_histo
4195 0 : .start_timer()
4196 0 : .record_on_drop();
4197 0 : tokio::select! {
4198 0 : _ = tokio::time::sleep(flush_duration) => {},
4199 0 : _ = watch_l0.wait_for(|l0| *l0 < delay_threshold) => {},
4200 0 : _ = self.cancel.cancelled() => {},
4201 : }
4202 0 : }
4203 2348 : }
4204 : };
4205 :
4206 : // Unsharded tenants should never advance their LSN beyond the end of the
4207 : // highest layer they write: such gaps between layer data and the frozen LSN
4208 : // are only legal on sharded tenants.
4209 2328 : debug_assert!(
4210 2328 : self.shard_identity.count.count() > 1
4211 2328 : || flushed_to_lsn >= frozen_to_lsn
4212 56 : || !flushed_to_lsn.is_valid()
4213 : );
4214 :
4215 2328 : if flushed_to_lsn < frozen_to_lsn && self.shard_identity.count.count() > 1 {
4216 : // If our layer flushes didn't carry disk_consistent_lsn up to the `to_lsn` advertised
4217 : // to us via layer_flush_start_rx, then advance it here.
4218 : //
4219 : // This path is only taken for tenants with multiple shards: single sharded tenants should
4220 : // never encounter a gap in the wal.
4221 0 : let old_disk_consistent_lsn = self.disk_consistent_lsn.load();
4222 0 : tracing::debug!("Advancing disk_consistent_lsn across layer gap {old_disk_consistent_lsn}->{frozen_to_lsn}");
4223 0 : if self.set_disk_consistent_lsn(frozen_to_lsn) {
4224 0 : if let Err(e) = self.schedule_uploads(frozen_to_lsn, vec![]) {
4225 0 : tracing::warn!("Failed to schedule metadata upload after updating disk_consistent_lsn: {e}");
4226 0 : }
4227 0 : }
4228 2328 : }
4229 :
4230 : // Notify any listeners that we're done
4231 2328 : let _ = self
4232 2328 : .layer_flush_done_tx
4233 2328 : .send_replace((flush_counter, result));
4234 : }
4235 20 : }
4236 :
4237 : /// Waits any flush request created by [`Self::freeze_inmem_layer_at`] to complete.
4238 2244 : async fn wait_flush_completion(&self, request: u64) -> Result<(), FlushLayerError> {
4239 2244 : let mut rx = self.layer_flush_done_tx.subscribe();
4240 : loop {
4241 : {
4242 4564 : let (last_result_counter, last_result) = &*rx.borrow();
4243 4564 : if *last_result_counter >= request {
4244 2244 : if let Err(err) = last_result {
4245 : // We already logged the original error in
4246 : // flush_loop. We cannot propagate it to the caller
4247 : // here, because it might not be Cloneable
4248 0 : return Err(err.clone());
4249 : } else {
4250 2244 : return Ok(());
4251 : }
4252 2320 : }
4253 2320 : }
4254 2320 : trace!("waiting for flush to complete");
4255 2320 : tokio::select! {
4256 2320 : rx_e = rx.changed() => {
4257 2320 : rx_e.map_err(|_| FlushLayerError::NotRunning(*self.flush_loop_state.lock().unwrap()))?;
4258 : },
4259 : // Cancellation safety: we are not leaving an I/O in-flight for the flush, we're just ignoring
4260 : // the notification from [`flush_loop`] that it completed.
4261 2320 : _ = self.cancel.cancelled() => {
4262 0 : tracing::info!("Cancelled layer flush due on timeline shutdown");
4263 0 : return Ok(())
4264 : }
4265 : };
4266 2320 : trace!("done")
4267 : }
4268 2244 : }
4269 :
4270 : /// Flush one frozen in-memory layer to disk, as a new delta layer.
4271 : ///
4272 : /// Return value is the last lsn (inclusive) of the layer that was frozen.
4273 : #[instrument(skip_all, fields(layer=%frozen_layer))]
4274 : async fn flush_frozen_layer(
4275 : self: &Arc<Self>,
4276 : frozen_layer: Arc<InMemoryLayer>,
4277 : ctx: &RequestContext,
4278 : ) -> Result<Lsn, FlushLayerError> {
4279 : debug_assert_current_span_has_tenant_and_timeline_id();
4280 :
4281 : // As a special case, when we have just imported an image into the repository,
4282 : // instead of writing out a L0 delta layer, we directly write out image layer
4283 : // files instead. This is possible as long as *all* the data imported into the
4284 : // repository have the same LSN.
4285 : let lsn_range = frozen_layer.get_lsn_range();
4286 :
4287 : // Whether to directly create image layers for this flush, or flush them as delta layers
4288 : let create_image_layer =
4289 : lsn_range.start == self.initdb_lsn && lsn_range.end == Lsn(self.initdb_lsn.0 + 1);
4290 :
4291 : #[cfg(test)]
4292 : {
4293 : match &mut *self.flush_loop_state.lock().unwrap() {
4294 : FlushLoopState::NotStarted | FlushLoopState::Exited => {
4295 : panic!("flush loop not running")
4296 : }
4297 : FlushLoopState::Running {
4298 : expect_initdb_optimization,
4299 : initdb_optimization_count,
4300 : ..
4301 : } => {
4302 : if create_image_layer {
4303 : *initdb_optimization_count += 1;
4304 : } else {
4305 : assert!(!*expect_initdb_optimization, "expected initdb optimization");
4306 : }
4307 : }
4308 : }
4309 : }
4310 :
4311 : let (layers_to_upload, delta_layer_to_add) = if create_image_layer {
4312 : // Note: The 'ctx' in use here has DownloadBehavior::Error. We should not
4313 : // require downloading anything during initial import.
4314 : let ((rel_partition, metadata_partition), _lsn) = self
4315 : .repartition(
4316 : self.initdb_lsn,
4317 : self.get_compaction_target_size(),
4318 : EnumSet::empty(),
4319 : ctx,
4320 : )
4321 : .await
4322 0 : .map_err(|e| FlushLayerError::from_anyhow(self, e.into()))?;
4323 :
4324 : if self.cancel.is_cancelled() {
4325 : return Err(FlushLayerError::Cancelled);
4326 : }
4327 :
4328 : // Ensure that we have a single call to `create_image_layers` with a combined dense keyspace.
4329 : // So that the key ranges don't overlap.
4330 : let mut partitions = KeyPartitioning::default();
4331 : partitions.parts.extend(rel_partition.parts);
4332 : if !metadata_partition.parts.is_empty() {
4333 : assert_eq!(
4334 : metadata_partition.parts.len(),
4335 : 1,
4336 : "currently sparse keyspace should only contain a single metadata keyspace"
4337 : );
4338 : // Safety: create_image_layers treat sparse keyspaces differently that it does not scan
4339 : // every single key within the keyspace, and therefore, it's safe to force converting it
4340 : // into a dense keyspace before calling this function.
4341 : partitions
4342 : .parts
4343 : .extend(metadata_partition.into_dense().parts);
4344 : }
4345 :
4346 : let mut layers_to_upload = Vec::new();
4347 : let (generated_image_layers, is_complete) = self
4348 : .create_image_layers(
4349 : &partitions,
4350 : self.initdb_lsn,
4351 : ImageLayerCreationMode::Initial,
4352 : ctx,
4353 : LastImageLayerCreationStatus::Initial,
4354 : false, // don't yield for L0, we're flushing L0
4355 : )
4356 : .await?;
4357 : debug_assert!(
4358 : matches!(is_complete, LastImageLayerCreationStatus::Complete),
4359 : "init image generation mode must fully cover the keyspace"
4360 : );
4361 : layers_to_upload.extend(generated_image_layers);
4362 :
4363 : (layers_to_upload, None)
4364 : } else {
4365 : // Normal case, write out a L0 delta layer file.
4366 : // `create_delta_layer` will not modify the layer map.
4367 : // We will remove frozen layer and add delta layer in one atomic operation later.
4368 : let Some(layer) = self
4369 : .create_delta_layer(&frozen_layer, None, ctx)
4370 : .await
4371 0 : .map_err(|e| FlushLayerError::from_anyhow(self, e))?
4372 : else {
4373 : panic!("delta layer cannot be empty if no filter is applied");
4374 : };
4375 : (
4376 : // FIXME: even though we have a single image and single delta layer assumption
4377 : // we push them to vec
4378 : vec![layer.clone()],
4379 : Some(layer),
4380 : )
4381 : };
4382 :
4383 : pausable_failpoint!("flush-layer-cancel-after-writing-layer-out-pausable");
4384 :
4385 : if self.cancel.is_cancelled() {
4386 : return Err(FlushLayerError::Cancelled);
4387 : }
4388 :
4389 : let disk_consistent_lsn = Lsn(lsn_range.end.0 - 1);
4390 :
4391 : // The new on-disk layers are now in the layer map. We can remove the
4392 : // in-memory layer from the map now. The flushed layer is stored in
4393 : // the mapping in `create_delta_layer`.
4394 : {
4395 : let mut guard = self.layers.write().await;
4396 :
4397 : guard.open_mut()?.finish_flush_l0_layer(
4398 : delta_layer_to_add.as_ref(),
4399 : &frozen_layer,
4400 : &self.metrics,
4401 : );
4402 :
4403 : if self.set_disk_consistent_lsn(disk_consistent_lsn) {
4404 : // Schedule remote uploads that will reflect our new disk_consistent_lsn
4405 : self.schedule_uploads(disk_consistent_lsn, layers_to_upload)
4406 0 : .map_err(|e| FlushLayerError::from_anyhow(self, e))?;
4407 : }
4408 : // release lock on 'layers'
4409 : };
4410 :
4411 : // Backpressure mechanism: wait with continuation of the flush loop until we have uploaded all layer files.
4412 : // This makes us refuse ingest until the new layers have been persisted to the remote
4413 : // TODO: remove this, and rely on l0_flush_{delay,stall}_threshold instead.
4414 : if self.get_l0_flush_wait_upload() {
4415 : let start = Instant::now();
4416 : self.remote_client
4417 : .wait_completion()
4418 : .await
4419 0 : .map_err(|e| match e {
4420 : WaitCompletionError::UploadQueueShutDownOrStopped
4421 : | WaitCompletionError::NotInitialized(
4422 : NotInitialized::ShuttingDown | NotInitialized::Stopped,
4423 0 : ) => FlushLayerError::Cancelled,
4424 : WaitCompletionError::NotInitialized(NotInitialized::Uninitialized) => {
4425 0 : FlushLayerError::Other(anyhow!(e).into())
4426 : }
4427 0 : })?;
4428 : let duration = start.elapsed().as_secs_f64();
4429 : self.metrics.flush_wait_upload_time_gauge_add(duration);
4430 : }
4431 :
4432 : // FIXME: between create_delta_layer and the scheduling of the upload in `update_metadata_file`,
4433 : // a compaction can delete the file and then it won't be available for uploads any more.
4434 : // We still schedule the upload, resulting in an error, but ideally we'd somehow avoid this
4435 : // race situation.
4436 : // See https://github.com/neondatabase/neon/issues/4526
4437 : pausable_failpoint!("flush-frozen-pausable");
4438 :
4439 : // This failpoint is used by another test case `test_pageserver_recovery`.
4440 : fail_point!("flush-frozen-exit");
4441 :
4442 : Ok(Lsn(lsn_range.end.0 - 1))
4443 : }
4444 :
4445 : /// Return true if the value changed
4446 : ///
4447 : /// This function must only be used from the layer flush task.
4448 2348 : fn set_disk_consistent_lsn(&self, new_value: Lsn) -> bool {
4449 2348 : let old_value = self.disk_consistent_lsn.fetch_max(new_value);
4450 2348 : assert!(new_value >= old_value, "disk_consistent_lsn must be growing monotonously at runtime; current {old_value}, offered {new_value}");
4451 :
4452 2348 : self.metrics
4453 2348 : .disk_consistent_lsn_gauge
4454 2348 : .set(new_value.0 as i64);
4455 2348 : new_value != old_value
4456 2348 : }
4457 :
4458 : /// Update metadata file
4459 2448 : fn schedule_uploads(
4460 2448 : &self,
4461 2448 : disk_consistent_lsn: Lsn,
4462 2448 : layers_to_upload: impl IntoIterator<Item = ResidentLayer>,
4463 2448 : ) -> anyhow::Result<()> {
4464 2448 : // We can only save a valid 'prev_record_lsn' value on disk if we
4465 2448 : // flushed *all* in-memory changes to disk. We only track
4466 2448 : // 'prev_record_lsn' in memory for the latest processed record, so we
4467 2448 : // don't remember what the correct value that corresponds to some old
4468 2448 : // LSN is. But if we flush everything, then the value corresponding
4469 2448 : // current 'last_record_lsn' is correct and we can store it on disk.
4470 2448 : let RecordLsn {
4471 2448 : last: last_record_lsn,
4472 2448 : prev: prev_record_lsn,
4473 2448 : } = self.last_record_lsn.load();
4474 2448 : let ondisk_prev_record_lsn = if disk_consistent_lsn == last_record_lsn {
4475 2194 : Some(prev_record_lsn)
4476 : } else {
4477 254 : None
4478 : };
4479 :
4480 2448 : let update = crate::tenant::metadata::MetadataUpdate::new(
4481 2448 : disk_consistent_lsn,
4482 2448 : ondisk_prev_record_lsn,
4483 2448 : *self.applied_gc_cutoff_lsn.read(),
4484 2448 : );
4485 2448 :
4486 2448 : fail_point!("checkpoint-before-saving-metadata", |x| bail!(
4487 0 : "{}",
4488 0 : x.unwrap()
4489 2448 : ));
4490 :
4491 4820 : for layer in layers_to_upload {
4492 2372 : self.remote_client.schedule_layer_file_upload(layer)?;
4493 : }
4494 2448 : self.remote_client
4495 2448 : .schedule_index_upload_for_metadata_update(&update)?;
4496 :
4497 2448 : Ok(())
4498 2448 : }
4499 :
4500 0 : pub(crate) async fn preserve_initdb_archive(&self) -> anyhow::Result<()> {
4501 0 : self.remote_client
4502 0 : .preserve_initdb_archive(
4503 0 : &self.tenant_shard_id.tenant_id,
4504 0 : &self.timeline_id,
4505 0 : &self.cancel,
4506 0 : )
4507 0 : .await
4508 0 : }
4509 :
4510 : // Write out the given frozen in-memory layer as a new L0 delta file. This L0 file will not be tracked
4511 : // in layer map immediately. The caller is responsible to put it into the layer map.
4512 1936 : async fn create_delta_layer(
4513 1936 : self: &Arc<Self>,
4514 1936 : frozen_layer: &Arc<InMemoryLayer>,
4515 1936 : key_range: Option<Range<Key>>,
4516 1936 : ctx: &RequestContext,
4517 1936 : ) -> anyhow::Result<Option<ResidentLayer>> {
4518 1936 : let self_clone = Arc::clone(self);
4519 1936 : let frozen_layer = Arc::clone(frozen_layer);
4520 1936 : let ctx = ctx.attached_child();
4521 1936 : let work = async move {
4522 1936 : let Some((desc, path)) = frozen_layer
4523 1936 : .write_to_disk(&ctx, key_range, self_clone.l0_flush_global_state.inner())
4524 1936 : .await?
4525 : else {
4526 0 : return Ok(None);
4527 : };
4528 1936 : let new_delta = Layer::finish_creating(self_clone.conf, &self_clone, desc, &path)?;
4529 :
4530 : // The write_to_disk() above calls writer.finish() which already did the fsync of the inodes.
4531 : // We just need to fsync the directory in which these inodes are linked,
4532 : // which we know to be the timeline directory.
4533 : //
4534 : // We use fatal_err() below because the after write_to_disk returns with success,
4535 : // the in-memory state of the filesystem already has the layer file in its final place,
4536 : // and subsequent pageserver code could think it's durable while it really isn't.
4537 1936 : let timeline_dir = VirtualFile::open(
4538 1936 : &self_clone
4539 1936 : .conf
4540 1936 : .timeline_path(&self_clone.tenant_shard_id, &self_clone.timeline_id),
4541 1936 : &ctx,
4542 1936 : )
4543 1936 : .await
4544 1936 : .fatal_err("VirtualFile::open for timeline dir fsync");
4545 1936 : timeline_dir
4546 1936 : .sync_all()
4547 1936 : .await
4548 1936 : .fatal_err("VirtualFile::sync_all timeline dir");
4549 1936 : anyhow::Ok(Some(new_delta))
4550 1936 : };
4551 : // Before tokio-epoll-uring, we ran write_to_disk & the sync_all inside spawn_blocking.
4552 : // Preserve that behavior to maintain the same behavior for `virtual_file_io_engine=std-fs`.
4553 : use crate::virtual_file::io_engine::IoEngine;
4554 1936 : match crate::virtual_file::io_engine::get() {
4555 0 : IoEngine::NotSet => panic!("io engine not set"),
4556 : IoEngine::StdFs => {
4557 968 : let span = tracing::info_span!("blocking");
4558 968 : tokio::task::spawn_blocking({
4559 968 : move || Handle::current().block_on(work.instrument(span))
4560 968 : })
4561 968 : .await
4562 968 : .context("spawn_blocking")
4563 968 : .and_then(|x| x)
4564 : }
4565 : #[cfg(target_os = "linux")]
4566 968 : IoEngine::TokioEpollUring => work.await,
4567 : }
4568 1936 : }
4569 :
4570 1140 : async fn repartition(
4571 1140 : &self,
4572 1140 : lsn: Lsn,
4573 1140 : partition_size: u64,
4574 1140 : flags: EnumSet<CompactFlags>,
4575 1140 : ctx: &RequestContext,
4576 1140 : ) -> Result<((KeyPartitioning, SparseKeyPartitioning), Lsn), CompactionError> {
4577 1140 : let Ok(mut guard) = self.partitioning.try_write_guard() else {
4578 : // NB: there are two callers, one is the compaction task, of which there is only one per struct Tenant and hence Timeline.
4579 : // The other is the initdb optimization in flush_frozen_layer, used by `boostrap_timeline`, which runs before `.activate()`
4580 : // and hence before the compaction task starts.
4581 0 : return Err(CompactionError::Other(anyhow!(
4582 0 : "repartition() called concurrently"
4583 0 : )));
4584 : };
4585 1140 : let ((dense_partition, sparse_partition), partition_lsn) = &*guard.read();
4586 1140 : if lsn < *partition_lsn {
4587 0 : return Err(CompactionError::Other(anyhow!(
4588 0 : "repartition() called with LSN going backwards, this should not happen"
4589 0 : )));
4590 1140 : }
4591 1140 :
4592 1140 : let distance = lsn.0 - partition_lsn.0;
4593 1140 : if *partition_lsn != Lsn(0)
4594 524 : && distance <= self.repartition_threshold
4595 524 : && !flags.contains(CompactFlags::ForceRepartition)
4596 : {
4597 496 : debug!(
4598 : distance,
4599 : threshold = self.repartition_threshold,
4600 0 : "no repartitioning needed"
4601 : );
4602 496 : return Ok((
4603 496 : (dense_partition.clone(), sparse_partition.clone()),
4604 496 : *partition_lsn,
4605 496 : ));
4606 644 : }
4607 :
4608 644 : let (dense_ks, sparse_ks) = self
4609 644 : .collect_keyspace(lsn, ctx)
4610 644 : .await
4611 644 : .map_err(CompactionError::CollectKeySpaceError)?;
4612 644 : let dense_partitioning = dense_ks.partition(&self.shard_identity, partition_size);
4613 644 : let sparse_partitioning = SparseKeyPartitioning {
4614 644 : parts: vec![sparse_ks],
4615 644 : }; // no partitioning for metadata keys for now
4616 644 : let result = ((dense_partitioning, sparse_partitioning), lsn);
4617 644 : guard.write(result.clone());
4618 644 : Ok(result)
4619 1140 : }
4620 :
4621 : // Is it time to create a new image layer for the given partition? True if we want to generate.
4622 28 : async fn time_for_new_image_layer(&self, partition: &KeySpace, lsn: Lsn) -> bool {
4623 28 : let threshold = self.get_image_creation_threshold();
4624 :
4625 28 : let guard = self.layers.read().await;
4626 28 : let Ok(layers) = guard.layer_map() else {
4627 0 : return false;
4628 : };
4629 :
4630 28 : let mut max_deltas = 0;
4631 56 : for part_range in &partition.ranges {
4632 28 : let image_coverage = layers.image_coverage(part_range, lsn);
4633 56 : for (img_range, last_img) in image_coverage {
4634 28 : let img_lsn = if let Some(last_img) = last_img {
4635 0 : last_img.get_lsn_range().end
4636 : } else {
4637 28 : Lsn(0)
4638 : };
4639 : // Let's consider an example:
4640 : //
4641 : // delta layer with LSN range 71-81
4642 : // delta layer with LSN range 81-91
4643 : // delta layer with LSN range 91-101
4644 : // image layer at LSN 100
4645 : //
4646 : // If 'lsn' is still 100, i.e. no new WAL has been processed since the last image layer,
4647 : // there's no need to create a new one. We check this case explicitly, to avoid passing
4648 : // a bogus range to count_deltas below, with start > end. It's even possible that there
4649 : // are some delta layers *later* than current 'lsn', if more WAL was processed and flushed
4650 : // after we read last_record_lsn, which is passed here in the 'lsn' argument.
4651 28 : if img_lsn < lsn {
4652 28 : let num_deltas =
4653 28 : layers.count_deltas(&img_range, &(img_lsn..lsn), Some(threshold));
4654 28 :
4655 28 : max_deltas = max_deltas.max(num_deltas);
4656 28 : if num_deltas >= threshold {
4657 0 : debug!(
4658 0 : "key range {}-{}, has {} deltas on this timeline in LSN range {}..{}",
4659 : img_range.start, img_range.end, num_deltas, img_lsn, lsn
4660 : );
4661 0 : return true;
4662 28 : }
4663 0 : }
4664 : }
4665 : }
4666 :
4667 28 : debug!(
4668 : max_deltas,
4669 0 : "none of the partitioned ranges had >= {threshold} deltas"
4670 : );
4671 28 : false
4672 28 : }
4673 :
4674 : /// Create image layers for Postgres data. Assumes the caller passes a partition that is not too large,
4675 : /// so that at most one image layer will be produced from this function.
4676 : #[allow(clippy::too_many_arguments)]
4677 464 : async fn create_image_layer_for_rel_blocks(
4678 464 : self: &Arc<Self>,
4679 464 : partition: &KeySpace,
4680 464 : mut image_layer_writer: ImageLayerWriter,
4681 464 : lsn: Lsn,
4682 464 : ctx: &RequestContext,
4683 464 : img_range: Range<Key>,
4684 464 : io_concurrency: IoConcurrency,
4685 464 : ) -> Result<ImageLayerCreationOutcome, CreateImageLayersError> {
4686 464 : let mut wrote_keys = false;
4687 464 :
4688 464 : let mut key_request_accum = KeySpaceAccum::new();
4689 3072 : for range in &partition.ranges {
4690 2608 : let mut key = range.start;
4691 5652 : while key < range.end {
4692 : // Decide whether to retain this key: usually we do, but sharded tenants may
4693 : // need to drop keys that don't belong to them. If we retain the key, add it
4694 : // to `key_request_accum` for later issuing a vectored get
4695 3044 : if self.shard_identity.is_key_disposable(&key) {
4696 0 : debug!(
4697 0 : "Dropping key {} during compaction (it belongs on shard {:?})",
4698 0 : key,
4699 0 : self.shard_identity.get_shard_number(&key)
4700 : );
4701 3044 : } else {
4702 3044 : key_request_accum.add_key(key);
4703 3044 : }
4704 :
4705 3044 : let last_key_in_range = key.next() == range.end;
4706 3044 : key = key.next();
4707 3044 :
4708 3044 : // Maybe flush `key_rest_accum`
4709 3044 : if key_request_accum.raw_size() >= Timeline::MAX_GET_VECTORED_KEYS
4710 3044 : || (last_key_in_range && key_request_accum.raw_size() > 0)
4711 : {
4712 2608 : let results = self
4713 2608 : .get_vectored(
4714 2608 : key_request_accum.consume_keyspace(),
4715 2608 : lsn,
4716 2608 : io_concurrency.clone(),
4717 2608 : ctx,
4718 2608 : )
4719 2608 : .await?;
4720 :
4721 2608 : if self.cancel.is_cancelled() {
4722 0 : return Err(CreateImageLayersError::Cancelled);
4723 2608 : }
4724 :
4725 5652 : for (img_key, img) in results {
4726 3044 : let img = match img {
4727 3044 : Ok(img) => img,
4728 0 : Err(err) => {
4729 0 : // If we fail to reconstruct a VM or FSM page, we can zero the
4730 0 : // page without losing any actual user data. That seems better
4731 0 : // than failing repeatedly and getting stuck.
4732 0 : //
4733 0 : // We had a bug at one point, where we truncated the FSM and VM
4734 0 : // in the pageserver, but the Postgres didn't know about that
4735 0 : // and continued to generate incremental WAL records for pages
4736 0 : // that didn't exist in the pageserver. Trying to replay those
4737 0 : // WAL records failed to find the previous image of the page.
4738 0 : // This special case allows us to recover from that situation.
4739 0 : // See https://github.com/neondatabase/neon/issues/2601.
4740 0 : //
4741 0 : // Unfortunately we cannot do this for the main fork, or for
4742 0 : // any metadata keys, keys, as that would lead to actual data
4743 0 : // loss.
4744 0 : if img_key.is_rel_fsm_block_key() || img_key.is_rel_vm_block_key() {
4745 0 : warn!("could not reconstruct FSM or VM key {img_key}, filling with zeros: {err:?}");
4746 0 : ZERO_PAGE.clone()
4747 : } else {
4748 0 : return Err(CreateImageLayersError::from(err));
4749 : }
4750 : }
4751 : };
4752 :
4753 : // Write all the keys we just read into our new image layer.
4754 3044 : image_layer_writer.put_image(img_key, img, ctx).await?;
4755 3044 : wrote_keys = true;
4756 : }
4757 436 : }
4758 : }
4759 : }
4760 :
4761 464 : if wrote_keys {
4762 : // Normal path: we have written some data into the new image layer for this
4763 : // partition, so flush it to disk.
4764 464 : info!(
4765 0 : "produced image layer for rel {}",
4766 0 : ImageLayerName {
4767 0 : key_range: img_range.clone(),
4768 0 : lsn
4769 0 : },
4770 : );
4771 464 : Ok(ImageLayerCreationOutcome::Generated {
4772 464 : unfinished_image_layer: image_layer_writer,
4773 464 : })
4774 : } else {
4775 0 : tracing::debug!("no data in range {}-{}", img_range.start, img_range.end);
4776 0 : Ok(ImageLayerCreationOutcome::Empty)
4777 : }
4778 464 : }
4779 :
4780 : /// Create an image layer for metadata keys. This function produces one image layer for all metadata
4781 : /// keys for now. Because metadata keys cannot exceed basebackup size limit, the image layer for it
4782 : /// would not be too large to fit in a single image layer.
4783 : ///
4784 : /// Creating image layers for metadata keys are different from relational keys. Firstly, instead of
4785 : /// iterating each key and get an image for each of them, we do a `vectored_get` scan over the sparse
4786 : /// keyspace to get all images in one run. Secondly, we use a different image layer generation metrics
4787 : /// for metadata keys than relational keys, which is the number of delta files visited during the scan.
4788 : #[allow(clippy::too_many_arguments)]
4789 444 : async fn create_image_layer_for_metadata_keys(
4790 444 : self: &Arc<Self>,
4791 444 : partition: &KeySpace,
4792 444 : mut image_layer_writer: ImageLayerWriter,
4793 444 : lsn: Lsn,
4794 444 : ctx: &RequestContext,
4795 444 : img_range: Range<Key>,
4796 444 : mode: ImageLayerCreationMode,
4797 444 : io_concurrency: IoConcurrency,
4798 444 : ) -> Result<ImageLayerCreationOutcome, CreateImageLayersError> {
4799 444 : // Metadata keys image layer creation.
4800 444 : let mut reconstruct_state = ValuesReconstructState::new(io_concurrency);
4801 444 : let begin = Instant::now();
4802 : // Directly use `get_vectored_impl` to skip the max_vectored_read_key limit check. Note that the keyspace should
4803 : // not contain too many keys, otherwise this takes a lot of memory.
4804 444 : let data = self
4805 444 : .get_vectored_impl(partition.clone(), lsn, &mut reconstruct_state, ctx)
4806 444 : .await?;
4807 444 : let (data, total_kb_retrieved, total_keys_retrieved) = {
4808 444 : let mut new_data = BTreeMap::new();
4809 444 : let mut total_kb_retrieved = 0;
4810 444 : let mut total_keys_retrieved = 0;
4811 20468 : for (k, v) in data {
4812 20024 : let v = v?;
4813 20024 : total_kb_retrieved += KEY_SIZE + v.len();
4814 20024 : total_keys_retrieved += 1;
4815 20024 : new_data.insert(k, v);
4816 : }
4817 444 : (new_data, total_kb_retrieved / 1024, total_keys_retrieved)
4818 444 : };
4819 444 : let delta_files_accessed = reconstruct_state.get_delta_layers_visited();
4820 444 : let elapsed = begin.elapsed();
4821 444 :
4822 444 : let trigger_generation = delta_files_accessed as usize >= MAX_AUX_FILE_V2_DELTAS;
4823 444 : info!(
4824 0 : "metadata key compaction: trigger_generation={trigger_generation}, delta_files_accessed={delta_files_accessed}, total_kb_retrieved={total_kb_retrieved}, total_keys_retrieved={total_keys_retrieved}, read_time={}s", elapsed.as_secs_f64()
4825 : );
4826 :
4827 444 : if !trigger_generation && mode == ImageLayerCreationMode::Try {
4828 4 : return Ok(ImageLayerCreationOutcome::Skip);
4829 440 : }
4830 440 : if self.cancel.is_cancelled() {
4831 0 : return Err(CreateImageLayersError::Cancelled);
4832 440 : }
4833 440 : let mut wrote_any_image = false;
4834 20464 : for (k, v) in data {
4835 20024 : if v.is_empty() {
4836 : // the key has been deleted, it does not need an image
4837 : // in metadata keyspace, an empty image == tombstone
4838 16 : continue;
4839 20008 : }
4840 20008 : wrote_any_image = true;
4841 20008 :
4842 20008 : // No need to handle sharding b/c metadata keys are always on the 0-th shard.
4843 20008 :
4844 20008 : // TODO: split image layers to avoid too large layer files. Too large image files are not handled
4845 20008 : // on the normal data path either.
4846 20008 : image_layer_writer.put_image(k, v, ctx).await?;
4847 : }
4848 :
4849 440 : if wrote_any_image {
4850 : // Normal path: we have written some data into the new image layer for this
4851 : // partition, so flush it to disk.
4852 24 : info!(
4853 0 : "created image layer for metadata {}",
4854 0 : ImageLayerName {
4855 0 : key_range: img_range.clone(),
4856 0 : lsn
4857 0 : }
4858 : );
4859 24 : Ok(ImageLayerCreationOutcome::Generated {
4860 24 : unfinished_image_layer: image_layer_writer,
4861 24 : })
4862 : } else {
4863 416 : tracing::debug!("no data in range {}-{}", img_range.start, img_range.end);
4864 416 : Ok(ImageLayerCreationOutcome::Empty)
4865 : }
4866 444 : }
4867 :
4868 : /// Predicate function which indicates whether we should check if new image layers
4869 : /// are required. Since checking if new image layers are required is expensive in
4870 : /// terms of CPU, we only do it in the following cases:
4871 : /// 1. If the timeline has ingested sufficient WAL to justify the cost
4872 : /// 2. If enough time has passed since the last check:
4873 : /// 1. For large tenants, we wish to perform the check more often since they
4874 : /// suffer from the lack of image layers
4875 : /// 2. For small tenants (that can mostly fit in RAM), we use a much longer interval
4876 1140 : fn should_check_if_image_layers_required(self: &Arc<Timeline>, lsn: Lsn) -> bool {
4877 : const LARGE_TENANT_THRESHOLD: u64 = 2 * 1024 * 1024 * 1024;
4878 :
4879 1140 : let last_checks_at = self.last_image_layer_creation_check_at.load();
4880 1140 : let distance = lsn
4881 1140 : .checked_sub(last_checks_at)
4882 1140 : .expect("Attempt to compact with LSN going backwards");
4883 1140 : let min_distance =
4884 1140 : self.get_image_layer_creation_check_threshold() as u64 * self.get_checkpoint_distance();
4885 1140 :
4886 1140 : let distance_based_decision = distance.0 >= min_distance;
4887 1140 :
4888 1140 : let mut time_based_decision = false;
4889 1140 : let mut last_check_instant = self.last_image_layer_creation_check_instant.lock().unwrap();
4890 1140 : if let CurrentLogicalSize::Exact(logical_size) = self.current_logical_size.current_size() {
4891 936 : let check_required_after = if Into::<u64>::into(&logical_size) >= LARGE_TENANT_THRESHOLD
4892 : {
4893 0 : self.get_checkpoint_timeout()
4894 : } else {
4895 936 : Duration::from_secs(3600 * 48)
4896 : };
4897 :
4898 936 : time_based_decision = match *last_check_instant {
4899 524 : Some(last_check) => {
4900 524 : let elapsed = last_check.elapsed();
4901 524 : elapsed >= check_required_after
4902 : }
4903 412 : None => true,
4904 : };
4905 204 : }
4906 :
4907 : // Do the expensive delta layer counting only if this timeline has ingested sufficient
4908 : // WAL since the last check or a checkpoint timeout interval has elapsed since the last
4909 : // check.
4910 1140 : let decision = distance_based_decision || time_based_decision;
4911 :
4912 1140 : if decision {
4913 416 : self.last_image_layer_creation_check_at.store(lsn);
4914 416 : *last_check_instant = Some(Instant::now());
4915 724 : }
4916 :
4917 1140 : decision
4918 1140 : }
4919 :
4920 : /// Returns the image layers generated and an enum indicating whether the process is fully completed.
4921 : /// true = we have generate all image layers, false = we preempt the process for L0 compaction.
4922 : #[tracing::instrument(skip_all, fields(%lsn, %mode))]
4923 : async fn create_image_layers(
4924 : self: &Arc<Timeline>,
4925 : partitioning: &KeyPartitioning,
4926 : lsn: Lsn,
4927 : mode: ImageLayerCreationMode,
4928 : ctx: &RequestContext,
4929 : last_status: LastImageLayerCreationStatus,
4930 : yield_for_l0: bool,
4931 : ) -> Result<(Vec<ResidentLayer>, LastImageLayerCreationStatus), CreateImageLayersError> {
4932 : let timer = self.metrics.create_images_time_histo.start_timer();
4933 :
4934 : if partitioning.parts.is_empty() {
4935 : warn!("no partitions to create image layers for");
4936 : return Ok((vec![], LastImageLayerCreationStatus::Complete));
4937 : }
4938 :
4939 : // We need to avoid holes between generated image layers.
4940 : // Otherwise LayerMap::image_layer_exists will return false if key range of some layer is covered by more than one
4941 : // image layer with hole between them. In this case such layer can not be utilized by GC.
4942 : //
4943 : // How such hole between partitions can appear?
4944 : // if we have relation with relid=1 and size 100 and relation with relid=2 with size 200 then result of
4945 : // KeySpace::partition may contain partitions <100000000..100000099> and <200000000..200000199>.
4946 : // If there is delta layer <100000000..300000000> then it never be garbage collected because
4947 : // image layers <100000000..100000099> and <200000000..200000199> are not completely covering it.
4948 : let mut start = Key::MIN;
4949 :
4950 : let check_for_image_layers =
4951 : if let LastImageLayerCreationStatus::Incomplete { last_key } = last_status {
4952 : info!(
4953 : "resuming image layer creation: last_status=incomplete, continue from {}",
4954 : last_key
4955 : );
4956 : true
4957 : } else {
4958 : self.should_check_if_image_layers_required(lsn)
4959 : };
4960 :
4961 : let mut batch_image_writer = BatchLayerWriter::new(self.conf).await?;
4962 :
4963 : let mut all_generated = true;
4964 :
4965 : let mut partition_processed = 0;
4966 : let mut total_partitions = partitioning.parts.len();
4967 : let mut last_partition_processed = None;
4968 : let mut partition_parts = partitioning.parts.clone();
4969 :
4970 : if let LastImageLayerCreationStatus::Incomplete { last_key } = last_status {
4971 : // We need to skip the partitions that have already been processed.
4972 : let mut found = false;
4973 : for (i, partition) in partition_parts.iter().enumerate() {
4974 : if last_key <= partition.end().unwrap() {
4975 : // ```plain
4976 : // |------|--------|----------|------|
4977 : // ^last_key
4978 : // ^start from this partition
4979 : // ```
4980 : // Why `i+1` instead of `i`?
4981 : // It is possible that the user did some writes after the previous image layer creation attempt so that
4982 : // a relation grows in size, and the last_key is now in the middle of the partition. In this case, we
4983 : // still want to skip this partition, so that we can make progress and avoid generating image layers over
4984 : // the same partition. Doing a mod to ensure we don't end up with an empty vec.
4985 : if i + 1 >= total_partitions {
4986 : // In general, this case should not happen -- if last_key is on the last partition, the previous
4987 : // iteration of image layer creation should return a complete status.
4988 : break; // with found=false
4989 : }
4990 : partition_parts = partition_parts.split_off(i + 1); // Remove the first i + 1 elements
4991 : total_partitions = partition_parts.len();
4992 : // Update the start key to the partition start.
4993 : start = partition_parts[0].start().unwrap();
4994 : found = true;
4995 : break;
4996 : }
4997 : }
4998 : if !found {
4999 : // Last key is within the last partition, or larger than all partitions.
5000 : return Ok((vec![], LastImageLayerCreationStatus::Complete));
5001 : }
5002 : }
5003 :
5004 : for partition in partition_parts.iter() {
5005 : if self.cancel.is_cancelled() {
5006 : return Err(CreateImageLayersError::Cancelled);
5007 : }
5008 : partition_processed += 1;
5009 : let img_range = start..partition.ranges.last().unwrap().end;
5010 : let compact_metadata = partition.overlaps(&Key::metadata_key_range());
5011 : if compact_metadata {
5012 : for range in &partition.ranges {
5013 : assert!(
5014 : range.start.field1 >= METADATA_KEY_BEGIN_PREFIX
5015 : && range.end.field1 <= METADATA_KEY_END_PREFIX,
5016 : "metadata keys must be partitioned separately"
5017 : );
5018 : }
5019 : if mode == ImageLayerCreationMode::Try && !check_for_image_layers {
5020 : // Skip compaction if there are not enough updates. Metadata compaction will do a scan and
5021 : // might mess up with evictions.
5022 : start = img_range.end;
5023 : continue;
5024 : }
5025 : // For initial and force modes, we always generate image layers for metadata keys.
5026 : } else if let ImageLayerCreationMode::Try = mode {
5027 : // check_for_image_layers = false -> skip
5028 : // check_for_image_layers = true -> check time_for_new_image_layer -> skip/generate
5029 : if !check_for_image_layers || !self.time_for_new_image_layer(partition, lsn).await {
5030 : start = img_range.end;
5031 : continue;
5032 : }
5033 : }
5034 : if let ImageLayerCreationMode::Force = mode {
5035 : // When forced to create image layers, we might try and create them where they already
5036 : // exist. This mode is only used in tests/debug.
5037 : let layers = self.layers.read().await;
5038 : if layers.contains_key(&PersistentLayerKey {
5039 : key_range: img_range.clone(),
5040 : lsn_range: PersistentLayerDesc::image_layer_lsn_range(lsn),
5041 : is_delta: false,
5042 : }) {
5043 : // TODO: this can be processed with the BatchLayerWriter::finish_with_discard
5044 : // in the future.
5045 : tracing::info!(
5046 : "Skipping image layer at {lsn} {}..{}, already exists",
5047 : img_range.start,
5048 : img_range.end
5049 : );
5050 : start = img_range.end;
5051 : continue;
5052 : }
5053 : }
5054 :
5055 : let image_layer_writer = ImageLayerWriter::new(
5056 : self.conf,
5057 : self.timeline_id,
5058 : self.tenant_shard_id,
5059 : &img_range,
5060 : lsn,
5061 : ctx,
5062 : )
5063 : .await?;
5064 :
5065 0 : fail_point!("image-layer-writer-fail-before-finish", |_| {
5066 0 : Err(CreateImageLayersError::Other(anyhow::anyhow!(
5067 0 : "failpoint image-layer-writer-fail-before-finish"
5068 0 : )))
5069 0 : });
5070 :
5071 : let io_concurrency = IoConcurrency::spawn_from_conf(
5072 : self.conf,
5073 : self.gate
5074 : .enter()
5075 0 : .map_err(|_| CreateImageLayersError::Cancelled)?,
5076 : );
5077 :
5078 : let outcome = if !compact_metadata {
5079 : self.create_image_layer_for_rel_blocks(
5080 : partition,
5081 : image_layer_writer,
5082 : lsn,
5083 : ctx,
5084 : img_range.clone(),
5085 : io_concurrency,
5086 : )
5087 : .await?
5088 : } else {
5089 : self.create_image_layer_for_metadata_keys(
5090 : partition,
5091 : image_layer_writer,
5092 : lsn,
5093 : ctx,
5094 : img_range.clone(),
5095 : mode,
5096 : io_concurrency,
5097 : )
5098 : .await?
5099 : };
5100 : match outcome {
5101 : ImageLayerCreationOutcome::Empty => {
5102 : // No data in this partition, so we don't need to create an image layer (for now).
5103 : // The next image layer should cover this key range, so we don't advance the `start`
5104 : // key.
5105 : }
5106 : ImageLayerCreationOutcome::Generated {
5107 : unfinished_image_layer,
5108 : } => {
5109 : batch_image_writer.add_unfinished_image_writer(
5110 : unfinished_image_layer,
5111 : img_range.clone(),
5112 : lsn,
5113 : );
5114 : // The next image layer should be generated right after this one.
5115 : start = img_range.end;
5116 : }
5117 : ImageLayerCreationOutcome::Skip => {
5118 : // We don't need to create an image layer for this partition.
5119 : // The next image layer should NOT cover this range, otherwise
5120 : // the keyspace becomes empty (reads don't go past image layers).
5121 : start = img_range.end;
5122 : }
5123 : }
5124 :
5125 : if let ImageLayerCreationMode::Try = mode {
5126 : // We have at least made some progress
5127 : if yield_for_l0 && batch_image_writer.pending_layer_num() >= 1 {
5128 : // The `Try` mode is currently only used on the compaction path. We want to avoid
5129 : // image layer generation taking too long time and blocking L0 compaction. So in this
5130 : // mode, we also inspect the current number of L0 layers and skip image layer generation
5131 : // if there are too many of them.
5132 : let image_preempt_threshold = self.get_image_creation_preempt_threshold()
5133 : * self.get_compaction_threshold();
5134 : // TODO: currently we do not respect `get_image_creation_preempt_threshold` and always yield
5135 : // when there is a single timeline with more than L0 threshold L0 layers. As long as the
5136 : // `get_image_creation_preempt_threshold` is set to a value greater than 0, we will yield for L0 compaction.
5137 : if image_preempt_threshold != 0 {
5138 : let should_yield = self
5139 : .l0_compaction_trigger
5140 : .notified()
5141 : .now_or_never()
5142 : .is_some();
5143 : if should_yield {
5144 : tracing::info!(
5145 : "preempt image layer generation at {lsn} when processing partition {}..{}: too many L0 layers",
5146 : partition.start().unwrap(), partition.end().unwrap()
5147 : );
5148 : last_partition_processed = Some(partition.clone());
5149 : all_generated = false;
5150 : break;
5151 : }
5152 : }
5153 : }
5154 : }
5155 : }
5156 :
5157 : let image_layers = batch_image_writer.finish(self, ctx).await?;
5158 :
5159 : let mut guard = self.layers.write().await;
5160 :
5161 : // FIXME: we could add the images to be uploaded *before* returning from here, but right
5162 : // now they are being scheduled outside of write lock; current way is inconsistent with
5163 : // compaction lock order.
5164 : guard
5165 : .open_mut()?
5166 : .track_new_image_layers(&image_layers, &self.metrics);
5167 : drop_wlock(guard);
5168 : let duration = timer.stop_and_record();
5169 :
5170 : // Creating image layers may have caused some previously visible layers to be covered
5171 : if !image_layers.is_empty() {
5172 : self.update_layer_visibility().await?;
5173 : }
5174 :
5175 : let total_layer_size = image_layers
5176 : .iter()
5177 488 : .map(|l| l.metadata().file_size)
5178 : .sum::<u64>();
5179 :
5180 : if !image_layers.is_empty() {
5181 : info!(
5182 : "created {} image layers ({} bytes) in {}s, processed {} out of {} partitions",
5183 : image_layers.len(),
5184 : total_layer_size,
5185 : duration.as_secs_f64(),
5186 : partition_processed,
5187 : total_partitions
5188 : );
5189 : }
5190 :
5191 : Ok((
5192 : image_layers,
5193 : if all_generated {
5194 : LastImageLayerCreationStatus::Complete
5195 : } else {
5196 : LastImageLayerCreationStatus::Incomplete {
5197 : last_key: if let Some(last_partition_processed) = last_partition_processed {
5198 : last_partition_processed.end().unwrap_or(Key::MIN)
5199 : } else {
5200 : // This branch should be unreachable, but in case it happens, we can just return the start key.
5201 : Key::MIN
5202 : },
5203 : }
5204 : },
5205 : ))
5206 : }
5207 :
5208 : /// Wait until the background initial logical size calculation is complete, or
5209 : /// this Timeline is shut down. Calling this function will cause the initial
5210 : /// logical size calculation to skip waiting for the background jobs barrier.
5211 0 : pub(crate) async fn await_initial_logical_size(self: Arc<Self>) {
5212 0 : if !self.shard_identity.is_shard_zero() {
5213 : // We don't populate logical size on shard >0: skip waiting for it.
5214 0 : return;
5215 0 : }
5216 0 :
5217 0 : if self.remote_client.is_deleting() {
5218 : // The timeline was created in a deletion-resume state, we don't expect logical size to be populated
5219 0 : return;
5220 0 : }
5221 0 :
5222 0 : if self.current_logical_size.current_size().is_exact() {
5223 : // root timelines are initialized with exact count, but never start the background
5224 : // calculation
5225 0 : return;
5226 0 : }
5227 :
5228 0 : if let Some(await_bg_cancel) = self
5229 0 : .current_logical_size
5230 0 : .cancel_wait_for_background_loop_concurrency_limit_semaphore
5231 0 : .get()
5232 0 : {
5233 0 : await_bg_cancel.cancel();
5234 0 : } else {
5235 : // We should not wait if we were not able to explicitly instruct
5236 : // the logical size cancellation to skip the concurrency limit semaphore.
5237 : // TODO: this is an unexpected case. We should restructure so that it
5238 : // can't happen.
5239 0 : tracing::warn!(
5240 0 : "await_initial_logical_size: can't get semaphore cancel token, skipping"
5241 : );
5242 0 : debug_assert!(false);
5243 : }
5244 :
5245 0 : tokio::select!(
5246 0 : _ = self.current_logical_size.initialized.acquire() => {},
5247 0 : _ = self.cancel.cancelled() => {}
5248 : )
5249 0 : }
5250 :
5251 : /// Detach this timeline from its ancestor by copying all of ancestors layers as this
5252 : /// Timelines layers up to the ancestor_lsn.
5253 : ///
5254 : /// Requires a timeline that:
5255 : /// - has an ancestor to detach from
5256 : /// - the ancestor does not have an ancestor -- follows from the original RFC limitations, not
5257 : /// a technical requirement
5258 : ///
5259 : /// After the operation has been started, it cannot be canceled. Upon restart it needs to be
5260 : /// polled again until completion.
5261 : ///
5262 : /// During the operation all timelines sharing the data with this timeline will be reparented
5263 : /// from our ancestor to be branches of this timeline.
5264 0 : pub(crate) async fn prepare_to_detach_from_ancestor(
5265 0 : self: &Arc<Timeline>,
5266 0 : tenant: &crate::tenant::Tenant,
5267 0 : options: detach_ancestor::Options,
5268 0 : ctx: &RequestContext,
5269 0 : ) -> Result<detach_ancestor::Progress, detach_ancestor::Error> {
5270 0 : detach_ancestor::prepare(self, tenant, options, ctx).await
5271 0 : }
5272 :
5273 : /// Second step of detach from ancestor; detaches the `self` from it's current ancestor and
5274 : /// reparents any reparentable children of previous ancestor.
5275 : ///
5276 : /// This method is to be called while holding the TenantManager's tenant slot, so during this
5277 : /// method we cannot be deleted nor can any timeline be deleted. After this method returns
5278 : /// successfully, tenant must be reloaded.
5279 : ///
5280 : /// Final step will be to [`Self::complete_detaching_timeline_ancestor`] after optionally
5281 : /// resetting the tenant.
5282 0 : pub(crate) async fn detach_from_ancestor_and_reparent(
5283 0 : self: &Arc<Timeline>,
5284 0 : tenant: &crate::tenant::Tenant,
5285 0 : prepared: detach_ancestor::PreparedTimelineDetach,
5286 0 : ctx: &RequestContext,
5287 0 : ) -> Result<detach_ancestor::DetachingAndReparenting, detach_ancestor::Error> {
5288 0 : detach_ancestor::detach_and_reparent(self, tenant, prepared, ctx).await
5289 0 : }
5290 :
5291 : /// Final step which unblocks the GC.
5292 : ///
5293 : /// The tenant must've been reset if ancestry was modified previously (in tenant manager).
5294 0 : pub(crate) async fn complete_detaching_timeline_ancestor(
5295 0 : self: &Arc<Timeline>,
5296 0 : tenant: &crate::tenant::Tenant,
5297 0 : attempt: detach_ancestor::Attempt,
5298 0 : ctx: &RequestContext,
5299 0 : ) -> Result<(), detach_ancestor::Error> {
5300 0 : detach_ancestor::complete(self, tenant, attempt, ctx).await
5301 0 : }
5302 : }
5303 :
5304 : impl Drop for Timeline {
5305 20 : fn drop(&mut self) {
5306 20 : if let Some(ancestor) = &self.ancestor_timeline {
5307 : // This lock should never be poisoned, but in case it is we do a .map() instead of
5308 : // an unwrap(), to avoid panicking in a destructor and thereby aborting the process.
5309 8 : if let Ok(mut gc_info) = ancestor.gc_info.write() {
5310 8 : if !gc_info.remove_child_not_offloaded(self.timeline_id) {
5311 0 : tracing::error!(tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id,
5312 0 : "Couldn't remove retain_lsn entry from offloaded timeline's parent: already removed");
5313 8 : }
5314 0 : }
5315 12 : }
5316 20 : info!(
5317 0 : "Timeline {} for tenant {} is being dropped",
5318 : self.timeline_id, self.tenant_shard_id.tenant_id
5319 : );
5320 20 : }
5321 : }
5322 :
5323 : /// Top-level failure to compact.
5324 : #[derive(Debug, thiserror::Error)]
5325 : pub(crate) enum CompactionError {
5326 : #[error("The timeline or pageserver is shutting down")]
5327 : ShuttingDown,
5328 : /// Compaction tried to offload a timeline and failed
5329 : #[error("Failed to offload timeline: {0}")]
5330 : Offload(OffloadError),
5331 : /// Compaction cannot be done right now; page reconstruction and so on.
5332 : #[error("Failed to collect keyspace: {0}")]
5333 : CollectKeySpaceError(CollectKeySpaceError),
5334 : #[error(transparent)]
5335 : Other(anyhow::Error),
5336 : }
5337 :
5338 : impl From<OffloadError> for CompactionError {
5339 0 : fn from(e: OffloadError) -> Self {
5340 0 : match e {
5341 0 : OffloadError::Cancelled => Self::ShuttingDown,
5342 0 : _ => Self::Offload(e),
5343 : }
5344 0 : }
5345 : }
5346 :
5347 : impl From<CollectKeySpaceError> for CompactionError {
5348 0 : fn from(err: CollectKeySpaceError) -> Self {
5349 0 : match err {
5350 : CollectKeySpaceError::Cancelled
5351 : | CollectKeySpaceError::PageRead(PageReconstructError::Cancelled) => {
5352 0 : CompactionError::ShuttingDown
5353 : }
5354 0 : e => CompactionError::Other(e.into()),
5355 : }
5356 0 : }
5357 : }
5358 :
5359 : impl From<super::upload_queue::NotInitialized> for CompactionError {
5360 0 : fn from(value: super::upload_queue::NotInitialized) -> Self {
5361 0 : match value {
5362 : super::upload_queue::NotInitialized::Uninitialized => {
5363 0 : CompactionError::Other(anyhow::anyhow!(value))
5364 : }
5365 : super::upload_queue::NotInitialized::ShuttingDown
5366 0 : | super::upload_queue::NotInitialized::Stopped => CompactionError::ShuttingDown,
5367 : }
5368 0 : }
5369 : }
5370 :
5371 : impl From<super::storage_layer::layer::DownloadError> for CompactionError {
5372 0 : fn from(e: super::storage_layer::layer::DownloadError) -> Self {
5373 0 : match e {
5374 : super::storage_layer::layer::DownloadError::TimelineShutdown
5375 : | super::storage_layer::layer::DownloadError::DownloadCancelled => {
5376 0 : CompactionError::ShuttingDown
5377 : }
5378 : super::storage_layer::layer::DownloadError::ContextAndConfigReallyDeniesDownloads
5379 : | super::storage_layer::layer::DownloadError::DownloadRequired
5380 : | super::storage_layer::layer::DownloadError::NotFile(_)
5381 : | super::storage_layer::layer::DownloadError::DownloadFailed
5382 : | super::storage_layer::layer::DownloadError::PreStatFailed(_) => {
5383 0 : CompactionError::Other(anyhow::anyhow!(e))
5384 : }
5385 : #[cfg(test)]
5386 : super::storage_layer::layer::DownloadError::Failpoint(_) => {
5387 0 : CompactionError::Other(anyhow::anyhow!(e))
5388 : }
5389 : }
5390 0 : }
5391 : }
5392 :
5393 : impl From<layer_manager::Shutdown> for CompactionError {
5394 0 : fn from(_: layer_manager::Shutdown) -> Self {
5395 0 : CompactionError::ShuttingDown
5396 0 : }
5397 : }
5398 :
5399 : #[serde_as]
5400 392 : #[derive(serde::Serialize)]
5401 : struct RecordedDuration(#[serde_as(as = "serde_with::DurationMicroSeconds")] Duration);
5402 :
5403 : #[derive(Default)]
5404 : enum DurationRecorder {
5405 : #[default]
5406 : NotStarted,
5407 : Recorded(RecordedDuration, tokio::time::Instant),
5408 : }
5409 :
5410 : impl DurationRecorder {
5411 1008 : fn till_now(&self) -> DurationRecorder {
5412 1008 : match self {
5413 : DurationRecorder::NotStarted => {
5414 0 : panic!("must only call on recorded measurements")
5415 : }
5416 1008 : DurationRecorder::Recorded(_, ended) => {
5417 1008 : let now = tokio::time::Instant::now();
5418 1008 : DurationRecorder::Recorded(RecordedDuration(now - *ended), now)
5419 1008 : }
5420 1008 : }
5421 1008 : }
5422 392 : fn into_recorded(self) -> Option<RecordedDuration> {
5423 392 : match self {
5424 0 : DurationRecorder::NotStarted => None,
5425 392 : DurationRecorder::Recorded(recorded, _) => Some(recorded),
5426 : }
5427 392 : }
5428 : }
5429 :
5430 : /// Descriptor for a delta layer used in testing infra. The start/end key/lsn range of the
5431 : /// delta layer might be different from the min/max key/lsn in the delta layer. Therefore,
5432 : /// the layer descriptor requires the user to provide the ranges, which should cover all
5433 : /// keys specified in the `data` field.
5434 : #[cfg(test)]
5435 : #[derive(Clone)]
5436 : pub struct DeltaLayerTestDesc {
5437 : pub lsn_range: Range<Lsn>,
5438 : pub key_range: Range<Key>,
5439 : pub data: Vec<(Key, Lsn, Value)>,
5440 : }
5441 :
5442 : #[cfg(test)]
5443 : impl DeltaLayerTestDesc {
5444 8 : pub fn new(lsn_range: Range<Lsn>, key_range: Range<Key>, data: Vec<(Key, Lsn, Value)>) -> Self {
5445 8 : Self {
5446 8 : lsn_range,
5447 8 : key_range,
5448 8 : data,
5449 8 : }
5450 8 : }
5451 :
5452 176 : pub fn new_with_inferred_key_range(
5453 176 : lsn_range: Range<Lsn>,
5454 176 : data: Vec<(Key, Lsn, Value)>,
5455 176 : ) -> Self {
5456 440 : let key_min = data.iter().map(|(key, _, _)| key).min().unwrap();
5457 440 : let key_max = data.iter().map(|(key, _, _)| key).max().unwrap();
5458 176 : Self {
5459 176 : key_range: (*key_min)..(key_max.next()),
5460 176 : lsn_range,
5461 176 : data,
5462 176 : }
5463 176 : }
5464 :
5465 20 : pub(crate) fn layer_name(&self) -> LayerName {
5466 20 : LayerName::Delta(super::storage_layer::DeltaLayerName {
5467 20 : key_range: self.key_range.clone(),
5468 20 : lsn_range: self.lsn_range.clone(),
5469 20 : })
5470 20 : }
5471 : }
5472 :
5473 : impl Timeline {
5474 56 : async fn finish_compact_batch(
5475 56 : self: &Arc<Self>,
5476 56 : new_deltas: &[ResidentLayer],
5477 56 : new_images: &[ResidentLayer],
5478 56 : layers_to_remove: &[Layer],
5479 56 : ) -> Result<(), CompactionError> {
5480 56 : let mut guard = tokio::select! {
5481 56 : guard = self.layers.write() => guard,
5482 56 : _ = self.cancel.cancelled() => {
5483 0 : return Err(CompactionError::ShuttingDown);
5484 : }
5485 : };
5486 :
5487 56 : let mut duplicated_layers = HashSet::new();
5488 56 :
5489 56 : let mut insert_layers = Vec::with_capacity(new_deltas.len());
5490 :
5491 672 : for l in new_deltas {
5492 616 : if guard.contains(l.as_ref()) {
5493 : // expected in tests
5494 0 : tracing::error!(layer=%l, "duplicated L1 layer");
5495 :
5496 : // good ways to cause a duplicate: we repeatedly error after taking the writelock
5497 : // `guard` on self.layers. as of writing this, there are no error returns except
5498 : // for compact_level0_phase1 creating an L0, which does not happen in practice
5499 : // because we have not implemented L0 => L0 compaction.
5500 0 : duplicated_layers.insert(l.layer_desc().key());
5501 616 : } else if LayerMap::is_l0(&l.layer_desc().key_range, l.layer_desc().is_delta) {
5502 0 : return Err(CompactionError::Other(anyhow::anyhow!("compaction generates a L0 layer file as output, which will cause infinite compaction.")));
5503 616 : } else {
5504 616 : insert_layers.push(l.clone());
5505 616 : }
5506 : }
5507 :
5508 : // only remove those inputs which were not outputs
5509 56 : let remove_layers: Vec<Layer> = layers_to_remove
5510 56 : .iter()
5511 804 : .filter(|l| !duplicated_layers.contains(&l.layer_desc().key()))
5512 56 : .cloned()
5513 56 : .collect();
5514 56 :
5515 56 : if !new_images.is_empty() {
5516 0 : guard
5517 0 : .open_mut()?
5518 0 : .track_new_image_layers(new_images, &self.metrics);
5519 56 : }
5520 :
5521 56 : guard
5522 56 : .open_mut()?
5523 56 : .finish_compact_l0(&remove_layers, &insert_layers, &self.metrics);
5524 56 :
5525 56 : self.remote_client
5526 56 : .schedule_compaction_update(&remove_layers, new_deltas)?;
5527 :
5528 56 : drop_wlock(guard);
5529 56 :
5530 56 : Ok(())
5531 56 : }
5532 :
5533 0 : async fn rewrite_layers(
5534 0 : self: &Arc<Self>,
5535 0 : mut replace_layers: Vec<(Layer, ResidentLayer)>,
5536 0 : mut drop_layers: Vec<Layer>,
5537 0 : ) -> Result<(), CompactionError> {
5538 0 : let mut guard = self.layers.write().await;
5539 :
5540 : // Trim our lists in case our caller (compaction) raced with someone else (GC) removing layers: we want
5541 : // to avoid double-removing, and avoid rewriting something that was removed.
5542 0 : replace_layers.retain(|(l, _)| guard.contains(l));
5543 0 : drop_layers.retain(|l| guard.contains(l));
5544 0 :
5545 0 : guard
5546 0 : .open_mut()?
5547 0 : .rewrite_layers(&replace_layers, &drop_layers, &self.metrics);
5548 0 :
5549 0 : let upload_layers: Vec<_> = replace_layers.into_iter().map(|r| r.1).collect();
5550 0 :
5551 0 : self.remote_client
5552 0 : .schedule_compaction_update(&drop_layers, &upload_layers)?;
5553 :
5554 0 : Ok(())
5555 0 : }
5556 :
5557 : /// Schedules the uploads of the given image layers
5558 728 : fn upload_new_image_layers(
5559 728 : self: &Arc<Self>,
5560 728 : new_images: impl IntoIterator<Item = ResidentLayer>,
5561 728 : ) -> Result<(), super::upload_queue::NotInitialized> {
5562 780 : for layer in new_images {
5563 52 : self.remote_client.schedule_layer_file_upload(layer)?;
5564 : }
5565 : // should any new image layer been created, not uploading index_part will
5566 : // result in a mismatch between remote_physical_size and layermap calculated
5567 : // size, which will fail some tests, but should not be an issue otherwise.
5568 728 : self.remote_client
5569 728 : .schedule_index_upload_for_file_changes()?;
5570 728 : Ok(())
5571 728 : }
5572 :
5573 0 : async fn find_gc_time_cutoff(
5574 0 : &self,
5575 0 : now: SystemTime,
5576 0 : pitr: Duration,
5577 0 : cancel: &CancellationToken,
5578 0 : ctx: &RequestContext,
5579 0 : ) -> Result<Option<Lsn>, PageReconstructError> {
5580 0 : debug_assert_current_span_has_tenant_and_timeline_id();
5581 0 : if self.shard_identity.is_shard_zero() {
5582 : // Shard Zero has SLRU data and can calculate the PITR time -> LSN mapping itself
5583 0 : let time_range = if pitr == Duration::ZERO {
5584 0 : humantime::parse_duration(DEFAULT_PITR_INTERVAL).expect("constant is invalid")
5585 : } else {
5586 0 : pitr
5587 : };
5588 :
5589 : // If PITR is so large or `now` is so small that this underflows, we will retain no history (highly unexpected case)
5590 0 : let time_cutoff = now.checked_sub(time_range).unwrap_or(now);
5591 0 : let timestamp = to_pg_timestamp(time_cutoff);
5592 :
5593 0 : let time_cutoff = match self.find_lsn_for_timestamp(timestamp, cancel, ctx).await? {
5594 0 : LsnForTimestamp::Present(lsn) => Some(lsn),
5595 0 : LsnForTimestamp::Future(lsn) => {
5596 0 : // The timestamp is in the future. That sounds impossible,
5597 0 : // but what it really means is that there hasn't been
5598 0 : // any commits since the cutoff timestamp.
5599 0 : //
5600 0 : // In this case we should use the LSN of the most recent commit,
5601 0 : // which is implicitly the last LSN in the log.
5602 0 : debug!("future({})", lsn);
5603 0 : Some(self.get_last_record_lsn())
5604 : }
5605 0 : LsnForTimestamp::Past(lsn) => {
5606 0 : debug!("past({})", lsn);
5607 0 : None
5608 : }
5609 0 : LsnForTimestamp::NoData(lsn) => {
5610 0 : debug!("nodata({})", lsn);
5611 0 : None
5612 : }
5613 : };
5614 0 : Ok(time_cutoff)
5615 : } else {
5616 : // Shards other than shard zero cannot do timestamp->lsn lookups, and must instead learn their GC cutoff
5617 : // from shard zero's index. The index doesn't explicitly tell us the time cutoff, but we may assume that
5618 : // the point up to which shard zero's last_gc_cutoff has advanced will either be the time cutoff, or a
5619 : // space cutoff that we would also have respected ourselves.
5620 0 : match self
5621 0 : .remote_client
5622 0 : .download_foreign_index(ShardNumber(0), cancel)
5623 0 : .await
5624 : {
5625 0 : Ok((index_part, index_generation, _index_mtime)) => {
5626 0 : tracing::info!("GC loaded shard zero metadata (gen {index_generation:?}): latest_gc_cutoff_lsn: {}",
5627 0 : index_part.metadata.latest_gc_cutoff_lsn());
5628 0 : Ok(Some(index_part.metadata.latest_gc_cutoff_lsn()))
5629 : }
5630 : Err(DownloadError::NotFound) => {
5631 : // This is unexpected, because during timeline creations shard zero persists to remote
5632 : // storage before other shards are called, and during timeline deletion non-zeroth shards are
5633 : // deleted before the zeroth one. However, it should be harmless: if we somehow end up in this
5634 : // state, then shard zero should _eventually_ write an index when it GCs.
5635 0 : tracing::warn!("GC couldn't find shard zero's index for timeline");
5636 0 : Ok(None)
5637 : }
5638 0 : Err(e) => {
5639 0 : // TODO: this function should return a different error type than page reconstruct error
5640 0 : Err(PageReconstructError::Other(anyhow::anyhow!(e)))
5641 : }
5642 : }
5643 :
5644 : // TODO: after reading shard zero's GC cutoff, we should validate its generation with the storage
5645 : // controller. Otherwise, it is possible that we see the GC cutoff go backwards while shard zero
5646 : // is going through a migration if we read the old location's index and it has GC'd ahead of the
5647 : // new location. This is legal in principle, but problematic in practice because it might result
5648 : // in a timeline creation succeeding on shard zero ('s new location) but then failing on other shards
5649 : // because they have GC'd past the branch point.
5650 : }
5651 0 : }
5652 :
5653 : /// Find the Lsns above which layer files need to be retained on
5654 : /// garbage collection.
5655 : ///
5656 : /// We calculate two cutoffs, one based on time and one based on WAL size. `pitr`
5657 : /// controls the time cutoff (or ZERO to disable time-based retention), and `space_cutoff` controls
5658 : /// the space-based retention.
5659 : ///
5660 : /// This function doesn't simply to calculate time & space based retention: it treats time-based
5661 : /// retention as authoritative if enabled, and falls back to space-based retention if calculating
5662 : /// the LSN for a time point isn't possible. Therefore the GcCutoffs::horizon in the response might
5663 : /// be different to the `space_cutoff` input. Callers should treat the min() of the two cutoffs
5664 : /// in the response as the GC cutoff point for the timeline.
5665 : #[instrument(skip_all, fields(timeline_id=%self.timeline_id))]
5666 : pub(super) async fn find_gc_cutoffs(
5667 : &self,
5668 : now: SystemTime,
5669 : space_cutoff: Lsn,
5670 : pitr: Duration,
5671 : cancel: &CancellationToken,
5672 : ctx: &RequestContext,
5673 : ) -> Result<GcCutoffs, PageReconstructError> {
5674 : let _timer = self
5675 : .metrics
5676 : .find_gc_cutoffs_histo
5677 : .start_timer()
5678 : .record_on_drop();
5679 :
5680 : pausable_failpoint!("Timeline::find_gc_cutoffs-pausable");
5681 :
5682 : if cfg!(test) {
5683 : // Unit tests which specify zero PITR interval expect to avoid doing any I/O for timestamp lookup
5684 : if pitr == Duration::ZERO {
5685 : return Ok(GcCutoffs {
5686 : time: self.get_last_record_lsn(),
5687 : space: space_cutoff,
5688 : });
5689 : }
5690 : }
5691 :
5692 : // Calculate a time-based limit on how much to retain:
5693 : // - if PITR interval is set, then this is our cutoff.
5694 : // - if PITR interval is not set, then we do a lookup
5695 : // based on DEFAULT_PITR_INTERVAL, so that size-based retention does not result in keeping history around permanently on idle databases.
5696 : let time_cutoff = self.find_gc_time_cutoff(now, pitr, cancel, ctx).await?;
5697 :
5698 : Ok(match (pitr, time_cutoff) {
5699 : (Duration::ZERO, Some(time_cutoff)) => {
5700 : // PITR is not set. Retain the size-based limit, or the default time retention,
5701 : // whichever requires less data.
5702 : GcCutoffs {
5703 : time: self.get_last_record_lsn(),
5704 : space: std::cmp::max(time_cutoff, space_cutoff),
5705 : }
5706 : }
5707 : (Duration::ZERO, None) => {
5708 : // PITR is not set, and time lookup failed
5709 : GcCutoffs {
5710 : time: self.get_last_record_lsn(),
5711 : space: space_cutoff,
5712 : }
5713 : }
5714 : (_, None) => {
5715 : // PITR interval is set & we didn't look up a timestamp successfully. Conservatively assume PITR
5716 : // cannot advance beyond what was already GC'd, and respect space-based retention
5717 : GcCutoffs {
5718 : time: *self.get_applied_gc_cutoff_lsn(),
5719 : space: space_cutoff,
5720 : }
5721 : }
5722 : (_, Some(time_cutoff)) => {
5723 : // PITR interval is set and we looked up timestamp successfully. Ignore
5724 : // size based retention and make time cutoff authoritative
5725 : GcCutoffs {
5726 : time: time_cutoff,
5727 : space: time_cutoff,
5728 : }
5729 : }
5730 : })
5731 : }
5732 :
5733 : /// Garbage collect layer files on a timeline that are no longer needed.
5734 : ///
5735 : /// Currently, we don't make any attempt at removing unneeded page versions
5736 : /// within a layer file. We can only remove the whole file if it's fully
5737 : /// obsolete.
5738 8 : pub(super) async fn gc(&self) -> Result<GcResult, GcError> {
5739 : // this is most likely the background tasks, but it might be the spawned task from
5740 : // immediate_gc
5741 8 : let _g = tokio::select! {
5742 8 : guard = self.gc_lock.lock() => guard,
5743 8 : _ = self.cancel.cancelled() => return Ok(GcResult::default()),
5744 : };
5745 8 : let timer = self.metrics.garbage_collect_histo.start_timer();
5746 8 :
5747 8 : fail_point!("before-timeline-gc");
5748 8 :
5749 8 : // Is the timeline being deleted?
5750 8 : if self.is_stopping() {
5751 0 : return Err(GcError::TimelineCancelled);
5752 8 : }
5753 8 :
5754 8 : let (space_cutoff, time_cutoff, retain_lsns, max_lsn_with_valid_lease) = {
5755 8 : let gc_info = self.gc_info.read().unwrap();
5756 8 :
5757 8 : let space_cutoff = min(gc_info.cutoffs.space, self.get_disk_consistent_lsn());
5758 8 : let time_cutoff = gc_info.cutoffs.time;
5759 8 : let retain_lsns = gc_info
5760 8 : .retain_lsns
5761 8 : .iter()
5762 8 : .map(|(lsn, _child_id, _is_offloaded)| *lsn)
5763 8 : .collect();
5764 8 :
5765 8 : // Gets the maximum LSN that holds the valid lease.
5766 8 : //
5767 8 : // Caveat: `refresh_gc_info` is in charged of updating the lease map.
5768 8 : // Here, we do not check for stale leases again.
5769 8 : let max_lsn_with_valid_lease = gc_info.leases.last_key_value().map(|(lsn, _)| *lsn);
5770 8 :
5771 8 : (
5772 8 : space_cutoff,
5773 8 : time_cutoff,
5774 8 : retain_lsns,
5775 8 : max_lsn_with_valid_lease,
5776 8 : )
5777 8 : };
5778 8 :
5779 8 : let mut new_gc_cutoff = Lsn::min(space_cutoff, time_cutoff);
5780 8 : let standby_horizon = self.standby_horizon.load();
5781 8 : // Hold GC for the standby, but as a safety guard do it only within some
5782 8 : // reasonable lag.
5783 8 : if standby_horizon != Lsn::INVALID {
5784 0 : if let Some(standby_lag) = new_gc_cutoff.checked_sub(standby_horizon) {
5785 : const MAX_ALLOWED_STANDBY_LAG: u64 = 10u64 << 30; // 10 GB
5786 0 : if standby_lag.0 < MAX_ALLOWED_STANDBY_LAG {
5787 0 : new_gc_cutoff = Lsn::min(standby_horizon, new_gc_cutoff);
5788 0 : trace!("holding off GC for standby apply LSN {}", standby_horizon);
5789 : } else {
5790 0 : warn!(
5791 0 : "standby is lagging for more than {}MB, not holding gc for it",
5792 0 : MAX_ALLOWED_STANDBY_LAG / 1024 / 1024
5793 : )
5794 : }
5795 0 : }
5796 8 : }
5797 :
5798 : // Reset standby horizon to ignore it if it is not updated till next GC.
5799 : // It is an easy way to unset it when standby disappears without adding
5800 : // more conf options.
5801 8 : self.standby_horizon.store(Lsn::INVALID);
5802 8 : self.metrics
5803 8 : .standby_horizon_gauge
5804 8 : .set(Lsn::INVALID.0 as i64);
5805 :
5806 8 : let res = self
5807 8 : .gc_timeline(
5808 8 : space_cutoff,
5809 8 : time_cutoff,
5810 8 : retain_lsns,
5811 8 : max_lsn_with_valid_lease,
5812 8 : new_gc_cutoff,
5813 8 : )
5814 8 : .instrument(
5815 8 : info_span!("gc_timeline", timeline_id = %self.timeline_id, cutoff = %new_gc_cutoff),
5816 : )
5817 8 : .await?;
5818 :
5819 : // only record successes
5820 8 : timer.stop_and_record();
5821 8 :
5822 8 : Ok(res)
5823 8 : }
5824 :
5825 8 : async fn gc_timeline(
5826 8 : &self,
5827 8 : space_cutoff: Lsn,
5828 8 : time_cutoff: Lsn,
5829 8 : retain_lsns: Vec<Lsn>,
5830 8 : max_lsn_with_valid_lease: Option<Lsn>,
5831 8 : new_gc_cutoff: Lsn,
5832 8 : ) -> Result<GcResult, GcError> {
5833 8 : // FIXME: if there is an ongoing detach_from_ancestor, we should just skip gc
5834 8 :
5835 8 : let now = SystemTime::now();
5836 8 : let mut result: GcResult = GcResult::default();
5837 8 :
5838 8 : // Nothing to GC. Return early.
5839 8 : let latest_gc_cutoff = *self.get_applied_gc_cutoff_lsn();
5840 8 : if latest_gc_cutoff >= new_gc_cutoff {
5841 0 : info!(
5842 0 : "Nothing to GC: new_gc_cutoff_lsn {new_gc_cutoff}, latest_gc_cutoff_lsn {latest_gc_cutoff}",
5843 : );
5844 0 : return Ok(result);
5845 8 : }
5846 :
5847 : // We need to ensure that no one tries to read page versions or create
5848 : // branches at a point before latest_gc_cutoff_lsn. See branch_timeline()
5849 : // for details. This will block until the old value is no longer in use.
5850 : //
5851 : // The GC cutoff should only ever move forwards.
5852 8 : let waitlist = {
5853 8 : let write_guard = self.applied_gc_cutoff_lsn.lock_for_write();
5854 8 : if *write_guard > new_gc_cutoff {
5855 0 : return Err(GcError::BadLsn {
5856 0 : why: format!(
5857 0 : "Cannot move GC cutoff LSN backwards (was {}, new {})",
5858 0 : *write_guard, new_gc_cutoff
5859 0 : ),
5860 0 : });
5861 8 : }
5862 8 :
5863 8 : write_guard.store_and_unlock(new_gc_cutoff)
5864 8 : };
5865 8 : waitlist.wait().await;
5866 :
5867 8 : info!("GC starting");
5868 :
5869 8 : debug!("retain_lsns: {:?}", retain_lsns);
5870 :
5871 8 : let mut layers_to_remove = Vec::new();
5872 :
5873 : // Scan all layers in the timeline (remote or on-disk).
5874 : //
5875 : // Garbage collect the layer if all conditions are satisfied:
5876 : // 1. it is older than cutoff LSN;
5877 : // 2. it is older than PITR interval;
5878 : // 3. it doesn't need to be retained for 'retain_lsns';
5879 : // 4. it does not need to be kept for LSNs holding valid leases.
5880 : // 5. newer on-disk image layers cover the layer's whole key range
5881 : //
5882 : // TODO holding a write lock is too agressive and avoidable
5883 8 : let mut guard = self.layers.write().await;
5884 8 : let layers = guard.layer_map()?;
5885 48 : 'outer: for l in layers.iter_historic_layers() {
5886 48 : result.layers_total += 1;
5887 48 :
5888 48 : // 1. Is it newer than GC horizon cutoff point?
5889 48 : if l.get_lsn_range().end > space_cutoff {
5890 4 : info!(
5891 0 : "keeping {} because it's newer than space_cutoff {}",
5892 0 : l.layer_name(),
5893 : space_cutoff,
5894 : );
5895 4 : result.layers_needed_by_cutoff += 1;
5896 4 : continue 'outer;
5897 44 : }
5898 44 :
5899 44 : // 2. It is newer than PiTR cutoff point?
5900 44 : if l.get_lsn_range().end > time_cutoff {
5901 0 : info!(
5902 0 : "keeping {} because it's newer than time_cutoff {}",
5903 0 : l.layer_name(),
5904 : time_cutoff,
5905 : );
5906 0 : result.layers_needed_by_pitr += 1;
5907 0 : continue 'outer;
5908 44 : }
5909 :
5910 : // 3. Is it needed by a child branch?
5911 : // NOTE With that we would keep data that
5912 : // might be referenced by child branches forever.
5913 : // We can track this in child timeline GC and delete parent layers when
5914 : // they are no longer needed. This might be complicated with long inheritance chains.
5915 : //
5916 : // TODO Vec is not a great choice for `retain_lsns`
5917 44 : for retain_lsn in &retain_lsns {
5918 : // start_lsn is inclusive
5919 0 : if &l.get_lsn_range().start <= retain_lsn {
5920 0 : info!(
5921 0 : "keeping {} because it's still might be referenced by child branch forked at {} is_dropped: xx is_incremental: {}",
5922 0 : l.layer_name(),
5923 0 : retain_lsn,
5924 0 : l.is_incremental(),
5925 : );
5926 0 : result.layers_needed_by_branches += 1;
5927 0 : continue 'outer;
5928 0 : }
5929 : }
5930 :
5931 : // 4. Is there a valid lease that requires us to keep this layer?
5932 44 : if let Some(lsn) = &max_lsn_with_valid_lease {
5933 : // keep if layer start <= any of the lease
5934 36 : if &l.get_lsn_range().start <= lsn {
5935 28 : info!(
5936 0 : "keeping {} because there is a valid lease preventing GC at {}",
5937 0 : l.layer_name(),
5938 : lsn,
5939 : );
5940 28 : result.layers_needed_by_leases += 1;
5941 28 : continue 'outer;
5942 8 : }
5943 8 : }
5944 :
5945 : // 5. Is there a later on-disk layer for this relation?
5946 : //
5947 : // The end-LSN is exclusive, while disk_consistent_lsn is
5948 : // inclusive. For example, if disk_consistent_lsn is 100, it is
5949 : // OK for a delta layer to have end LSN 101, but if the end LSN
5950 : // is 102, then it might not have been fully flushed to disk
5951 : // before crash.
5952 : //
5953 : // For example, imagine that the following layers exist:
5954 : //
5955 : // 1000 - image (A)
5956 : // 1000-2000 - delta (B)
5957 : // 2000 - image (C)
5958 : // 2000-3000 - delta (D)
5959 : // 3000 - image (E)
5960 : //
5961 : // If GC horizon is at 2500, we can remove layers A and B, but
5962 : // we cannot remove C, even though it's older than 2500, because
5963 : // the delta layer 2000-3000 depends on it.
5964 16 : if !layers
5965 16 : .image_layer_exists(&l.get_key_range(), &(l.get_lsn_range().end..new_gc_cutoff))
5966 : {
5967 12 : info!("keeping {} because it is the latest layer", l.layer_name());
5968 12 : result.layers_not_updated += 1;
5969 12 : continue 'outer;
5970 4 : }
5971 4 :
5972 4 : // We didn't find any reason to keep this file, so remove it.
5973 4 : info!(
5974 0 : "garbage collecting {} is_dropped: xx is_incremental: {}",
5975 0 : l.layer_name(),
5976 0 : l.is_incremental(),
5977 : );
5978 4 : layers_to_remove.push(l);
5979 : }
5980 :
5981 8 : if !layers_to_remove.is_empty() {
5982 : // Persist the new GC cutoff value before we actually remove anything.
5983 : // This unconditionally schedules also an index_part.json update, even though, we will
5984 : // be doing one a bit later with the unlinked gc'd layers.
5985 4 : let disk_consistent_lsn = self.disk_consistent_lsn.load();
5986 4 : self.schedule_uploads(disk_consistent_lsn, None)
5987 4 : .map_err(|e| {
5988 0 : if self.cancel.is_cancelled() {
5989 0 : GcError::TimelineCancelled
5990 : } else {
5991 0 : GcError::Remote(e)
5992 : }
5993 4 : })?;
5994 :
5995 4 : let gc_layers = layers_to_remove
5996 4 : .iter()
5997 4 : .map(|x| guard.get_from_desc(x))
5998 4 : .collect::<Vec<Layer>>();
5999 4 :
6000 4 : result.layers_removed = gc_layers.len() as u64;
6001 4 :
6002 4 : self.remote_client.schedule_gc_update(&gc_layers)?;
6003 :
6004 4 : guard.open_mut()?.finish_gc_timeline(&gc_layers);
6005 4 :
6006 4 : #[cfg(feature = "testing")]
6007 4 : {
6008 4 : result.doomed_layers = gc_layers;
6009 4 : }
6010 4 : }
6011 :
6012 8 : info!(
6013 0 : "GC completed removing {} layers, cutoff {}",
6014 : result.layers_removed, new_gc_cutoff
6015 : );
6016 :
6017 8 : result.elapsed = now.elapsed().unwrap_or(Duration::ZERO);
6018 8 : Ok(result)
6019 8 : }
6020 :
6021 : /// Reconstruct a value, using the given base image and WAL records in 'data'.
6022 1340031 : async fn reconstruct_value(
6023 1340031 : &self,
6024 1340031 : key: Key,
6025 1340031 : request_lsn: Lsn,
6026 1340031 : mut data: ValueReconstructState,
6027 1340031 : ) -> Result<Bytes, PageReconstructError> {
6028 1340031 : // Perform WAL redo if needed
6029 1340031 : data.records.reverse();
6030 1340031 :
6031 1340031 : // If we have a page image, and no WAL, we're all set
6032 1340031 : if data.records.is_empty() {
6033 1338395 : if let Some((img_lsn, img)) = &data.img {
6034 1338395 : trace!(
6035 0 : "found page image for key {} at {}, no WAL redo required, req LSN {}",
6036 : key,
6037 : img_lsn,
6038 : request_lsn,
6039 : );
6040 1338395 : Ok(img.clone())
6041 : } else {
6042 0 : Err(PageReconstructError::from(anyhow!(
6043 0 : "base image for {key} at {request_lsn} not found"
6044 0 : )))
6045 : }
6046 : } else {
6047 : // We need to do WAL redo.
6048 : //
6049 : // If we don't have a base image, then the oldest WAL record better initialize
6050 : // the page
6051 1636 : if data.img.is_none() && !data.records.first().unwrap().1.will_init() {
6052 0 : Err(PageReconstructError::from(anyhow!(
6053 0 : "Base image for {} at {} not found, but got {} WAL records",
6054 0 : key,
6055 0 : request_lsn,
6056 0 : data.records.len()
6057 0 : )))
6058 : } else {
6059 1636 : if data.img.is_some() {
6060 1504 : trace!(
6061 0 : "found {} WAL records and a base image for {} at {}, performing WAL redo",
6062 0 : data.records.len(),
6063 : key,
6064 : request_lsn
6065 : );
6066 : } else {
6067 132 : trace!("found {} WAL records that will init the page for {} at {}, performing WAL redo", data.records.len(), key, request_lsn);
6068 : };
6069 1636 : let res = self
6070 1636 : .walredo_mgr
6071 1636 : .as_ref()
6072 1636 : .context("timeline has no walredo manager")
6073 1636 : .map_err(PageReconstructError::WalRedo)?
6074 1636 : .request_redo(key, request_lsn, data.img, data.records, self.pg_version)
6075 1636 : .await;
6076 1636 : let img = match res {
6077 1636 : Ok(img) => img,
6078 0 : Err(walredo::Error::Cancelled) => return Err(PageReconstructError::Cancelled),
6079 0 : Err(walredo::Error::Other(err)) => {
6080 0 : critical!("walredo failure during page reconstruction: {err:?}");
6081 0 : return Err(PageReconstructError::WalRedo(
6082 0 : err.context("reconstruct a page image"),
6083 0 : ));
6084 : }
6085 : };
6086 1636 : Ok(img)
6087 : }
6088 : }
6089 1340031 : }
6090 :
6091 0 : pub(crate) async fn spawn_download_all_remote_layers(
6092 0 : self: Arc<Self>,
6093 0 : request: DownloadRemoteLayersTaskSpawnRequest,
6094 0 : ) -> Result<DownloadRemoteLayersTaskInfo, DownloadRemoteLayersTaskInfo> {
6095 : use pageserver_api::models::DownloadRemoteLayersTaskState;
6096 :
6097 : // this is not really needed anymore; it has tests which really check the return value from
6098 : // http api. it would be better not to maintain this anymore.
6099 :
6100 0 : let mut status_guard = self.download_all_remote_layers_task_info.write().unwrap();
6101 0 : if let Some(st) = &*status_guard {
6102 0 : match &st.state {
6103 : DownloadRemoteLayersTaskState::Running => {
6104 0 : return Err(st.clone());
6105 : }
6106 : DownloadRemoteLayersTaskState::ShutDown
6107 0 : | DownloadRemoteLayersTaskState::Completed => {
6108 0 : *status_guard = None;
6109 0 : }
6110 : }
6111 0 : }
6112 :
6113 0 : let self_clone = Arc::clone(&self);
6114 0 : let task_id = task_mgr::spawn(
6115 0 : task_mgr::BACKGROUND_RUNTIME.handle(),
6116 0 : task_mgr::TaskKind::DownloadAllRemoteLayers,
6117 0 : self.tenant_shard_id,
6118 0 : Some(self.timeline_id),
6119 0 : "download all remote layers task",
6120 0 : async move {
6121 0 : self_clone.download_all_remote_layers(request).await;
6122 0 : let mut status_guard = self_clone.download_all_remote_layers_task_info.write().unwrap();
6123 0 : match &mut *status_guard {
6124 : None => {
6125 0 : warn!("tasks status is supposed to be Some(), since we are running");
6126 : }
6127 0 : Some(st) => {
6128 0 : let exp_task_id = format!("{}", task_mgr::current_task_id().unwrap());
6129 0 : if st.task_id != exp_task_id {
6130 0 : warn!("task id changed while we were still running, expecting {} but have {}", exp_task_id, st.task_id);
6131 0 : } else {
6132 0 : st.state = DownloadRemoteLayersTaskState::Completed;
6133 0 : }
6134 : }
6135 : };
6136 0 : Ok(())
6137 0 : }
6138 0 : .instrument(info_span!(parent: None, "download_all_remote_layers", tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))
6139 : );
6140 :
6141 0 : let initial_info = DownloadRemoteLayersTaskInfo {
6142 0 : task_id: format!("{task_id}"),
6143 0 : state: DownloadRemoteLayersTaskState::Running,
6144 0 : total_layer_count: 0,
6145 0 : successful_download_count: 0,
6146 0 : failed_download_count: 0,
6147 0 : };
6148 0 : *status_guard = Some(initial_info.clone());
6149 0 :
6150 0 : Ok(initial_info)
6151 0 : }
6152 :
6153 0 : async fn download_all_remote_layers(
6154 0 : self: &Arc<Self>,
6155 0 : request: DownloadRemoteLayersTaskSpawnRequest,
6156 0 : ) {
6157 : use pageserver_api::models::DownloadRemoteLayersTaskState;
6158 :
6159 0 : let remaining = {
6160 0 : let guard = self.layers.read().await;
6161 0 : let Ok(lm) = guard.layer_map() else {
6162 : // technically here we could look into iterating accessible layers, but downloading
6163 : // all layers of a shutdown timeline makes no sense regardless.
6164 0 : tracing::info!("attempted to download all layers of shutdown timeline");
6165 0 : return;
6166 : };
6167 0 : lm.iter_historic_layers()
6168 0 : .map(|desc| guard.get_from_desc(&desc))
6169 0 : .collect::<Vec<_>>()
6170 0 : };
6171 0 : let total_layer_count = remaining.len();
6172 :
6173 : macro_rules! lock_status {
6174 : ($st:ident) => {
6175 : let mut st = self.download_all_remote_layers_task_info.write().unwrap();
6176 : let st = st
6177 : .as_mut()
6178 : .expect("this function is only called after the task has been spawned");
6179 : assert_eq!(
6180 : st.task_id,
6181 : format!(
6182 : "{}",
6183 : task_mgr::current_task_id().expect("we run inside a task_mgr task")
6184 : )
6185 : );
6186 : let $st = st;
6187 : };
6188 : }
6189 :
6190 : {
6191 0 : lock_status!(st);
6192 0 : st.total_layer_count = total_layer_count as u64;
6193 0 : }
6194 0 :
6195 0 : let mut remaining = remaining.into_iter();
6196 0 : let mut have_remaining = true;
6197 0 : let mut js = tokio::task::JoinSet::new();
6198 0 :
6199 0 : let cancel = task_mgr::shutdown_token();
6200 0 :
6201 0 : let limit = request.max_concurrent_downloads;
6202 :
6203 : loop {
6204 0 : while js.len() < limit.get() && have_remaining && !cancel.is_cancelled() {
6205 0 : let Some(next) = remaining.next() else {
6206 0 : have_remaining = false;
6207 0 : break;
6208 : };
6209 :
6210 0 : let span = tracing::info_span!("download", layer = %next);
6211 :
6212 0 : js.spawn(
6213 0 : async move {
6214 0 : let res = next.download().await;
6215 0 : (next, res)
6216 0 : }
6217 0 : .instrument(span),
6218 0 : );
6219 0 : }
6220 :
6221 0 : while let Some(res) = js.join_next().await {
6222 0 : match res {
6223 : Ok((_, Ok(_))) => {
6224 0 : lock_status!(st);
6225 0 : st.successful_download_count += 1;
6226 : }
6227 0 : Ok((layer, Err(e))) => {
6228 0 : tracing::error!(%layer, "download failed: {e:#}");
6229 0 : lock_status!(st);
6230 0 : st.failed_download_count += 1;
6231 : }
6232 0 : Err(je) if je.is_cancelled() => unreachable!("not used here"),
6233 0 : Err(je) if je.is_panic() => {
6234 0 : lock_status!(st);
6235 0 : st.failed_download_count += 1;
6236 : }
6237 0 : Err(je) => tracing::warn!("unknown joinerror: {je:?}"),
6238 : }
6239 : }
6240 :
6241 0 : if js.is_empty() && (!have_remaining || cancel.is_cancelled()) {
6242 0 : break;
6243 0 : }
6244 : }
6245 :
6246 : {
6247 0 : lock_status!(st);
6248 0 : st.state = DownloadRemoteLayersTaskState::Completed;
6249 : }
6250 0 : }
6251 :
6252 0 : pub(crate) fn get_download_all_remote_layers_task_info(
6253 0 : &self,
6254 0 : ) -> Option<DownloadRemoteLayersTaskInfo> {
6255 0 : self.download_all_remote_layers_task_info
6256 0 : .read()
6257 0 : .unwrap()
6258 0 : .clone()
6259 0 : }
6260 : }
6261 :
6262 : impl Timeline {
6263 : /// Returns non-remote layers for eviction.
6264 0 : pub(crate) async fn get_local_layers_for_disk_usage_eviction(&self) -> DiskUsageEvictionInfo {
6265 0 : let guard = self.layers.read().await;
6266 0 : let mut max_layer_size: Option<u64> = None;
6267 0 :
6268 0 : let resident_layers = guard
6269 0 : .likely_resident_layers()
6270 0 : .map(|layer| {
6271 0 : let file_size = layer.layer_desc().file_size;
6272 0 : max_layer_size = max_layer_size.map_or(Some(file_size), |m| Some(m.max(file_size)));
6273 0 :
6274 0 : let last_activity_ts = layer.latest_activity();
6275 0 :
6276 0 : EvictionCandidate {
6277 0 : layer: layer.to_owned().into(),
6278 0 : last_activity_ts,
6279 0 : relative_last_activity: finite_f32::FiniteF32::ZERO,
6280 0 : visibility: layer.visibility(),
6281 0 : }
6282 0 : })
6283 0 : .collect();
6284 0 :
6285 0 : DiskUsageEvictionInfo {
6286 0 : max_layer_size,
6287 0 : resident_layers,
6288 0 : }
6289 0 : }
6290 :
6291 3732 : pub(crate) fn get_shard_index(&self) -> ShardIndex {
6292 3732 : ShardIndex {
6293 3732 : shard_number: self.tenant_shard_id.shard_number,
6294 3732 : shard_count: self.tenant_shard_id.shard_count,
6295 3732 : }
6296 3732 : }
6297 :
6298 : /// Persistently blocks gc for `Manual` reason.
6299 : ///
6300 : /// Returns true if no such block existed before, false otherwise.
6301 0 : pub(crate) async fn block_gc(&self, tenant: &super::Tenant) -> anyhow::Result<bool> {
6302 : use crate::tenant::remote_timeline_client::index::GcBlockingReason;
6303 0 : assert_eq!(self.tenant_shard_id, tenant.tenant_shard_id);
6304 0 : tenant.gc_block.insert(self, GcBlockingReason::Manual).await
6305 0 : }
6306 :
6307 : /// Persistently unblocks gc for `Manual` reason.
6308 0 : pub(crate) async fn unblock_gc(&self, tenant: &super::Tenant) -> anyhow::Result<()> {
6309 : use crate::tenant::remote_timeline_client::index::GcBlockingReason;
6310 0 : assert_eq!(self.tenant_shard_id, tenant.tenant_shard_id);
6311 0 : tenant.gc_block.remove(self, GcBlockingReason::Manual).await
6312 0 : }
6313 :
6314 : #[cfg(test)]
6315 96 : pub(super) fn force_advance_lsn(self: &Arc<Timeline>, new_lsn: Lsn) {
6316 96 : self.last_record_lsn.advance(new_lsn);
6317 96 : }
6318 :
6319 : #[cfg(test)]
6320 4 : pub(super) fn force_set_disk_consistent_lsn(&self, new_value: Lsn) {
6321 4 : self.disk_consistent_lsn.store(new_value);
6322 4 : }
6323 :
6324 : /// Force create an image layer and place it into the layer map.
6325 : ///
6326 : /// DO NOT use this function directly. Use [`Tenant::branch_timeline_test_with_layers`]
6327 : /// or [`Tenant::create_test_timeline_with_layers`] to ensure all these layers are
6328 : /// placed into the layer map in one run AND be validated.
6329 : #[cfg(test)]
6330 120 : pub(super) async fn force_create_image_layer(
6331 120 : self: &Arc<Timeline>,
6332 120 : lsn: Lsn,
6333 120 : mut images: Vec<(Key, Bytes)>,
6334 120 : check_start_lsn: Option<Lsn>,
6335 120 : ctx: &RequestContext,
6336 120 : ) -> anyhow::Result<()> {
6337 120 : let last_record_lsn = self.get_last_record_lsn();
6338 120 : assert!(
6339 120 : lsn <= last_record_lsn,
6340 0 : "advance last record lsn before inserting a layer, lsn={lsn}, last_record_lsn={last_record_lsn}"
6341 : );
6342 120 : if let Some(check_start_lsn) = check_start_lsn {
6343 120 : assert!(lsn >= check_start_lsn);
6344 0 : }
6345 348 : images.sort_unstable_by(|(ka, _), (kb, _)| ka.cmp(kb));
6346 120 : let min_key = *images.first().map(|(k, _)| k).unwrap();
6347 120 : let end_key = images.last().map(|(k, _)| k).unwrap().next();
6348 120 : let mut image_layer_writer = ImageLayerWriter::new(
6349 120 : self.conf,
6350 120 : self.timeline_id,
6351 120 : self.tenant_shard_id,
6352 120 : &(min_key..end_key),
6353 120 : lsn,
6354 120 : ctx,
6355 120 : )
6356 120 : .await?;
6357 588 : for (key, img) in images {
6358 468 : image_layer_writer.put_image(key, img, ctx).await?;
6359 : }
6360 120 : let (desc, path) = image_layer_writer.finish(ctx).await?;
6361 120 : let image_layer = Layer::finish_creating(self.conf, self, desc, &path)?;
6362 120 : info!("force created image layer {}", image_layer.local_path());
6363 : {
6364 120 : let mut guard = self.layers.write().await;
6365 120 : guard
6366 120 : .open_mut()
6367 120 : .unwrap()
6368 120 : .force_insert_layer(image_layer.clone());
6369 120 : }
6370 120 :
6371 120 : // Update remote_timeline_client state to reflect existence of this layer
6372 120 : self.remote_client
6373 120 : .schedule_layer_file_upload(image_layer)
6374 120 : .unwrap();
6375 120 :
6376 120 : Ok(())
6377 120 : }
6378 :
6379 : /// Force create a delta layer and place it into the layer map.
6380 : ///
6381 : /// DO NOT use this function directly. Use [`Tenant::branch_timeline_test_with_layers`]
6382 : /// or [`Tenant::create_test_timeline_with_layers`] to ensure all these layers are
6383 : /// placed into the layer map in one run AND be validated.
6384 : #[cfg(test)]
6385 184 : pub(super) async fn force_create_delta_layer(
6386 184 : self: &Arc<Timeline>,
6387 184 : mut deltas: DeltaLayerTestDesc,
6388 184 : check_start_lsn: Option<Lsn>,
6389 184 : ctx: &RequestContext,
6390 184 : ) -> anyhow::Result<()> {
6391 184 : let last_record_lsn = self.get_last_record_lsn();
6392 184 : deltas
6393 184 : .data
6394 264 : .sort_unstable_by(|(ka, la, _), (kb, lb, _)| (ka, la).cmp(&(kb, lb)));
6395 184 : assert!(deltas.data.first().unwrap().0 >= deltas.key_range.start);
6396 184 : assert!(deltas.data.last().unwrap().0 < deltas.key_range.end);
6397 632 : for (_, lsn, _) in &deltas.data {
6398 448 : assert!(deltas.lsn_range.start <= *lsn && *lsn < deltas.lsn_range.end);
6399 : }
6400 184 : assert!(
6401 184 : deltas.lsn_range.end <= last_record_lsn,
6402 0 : "advance last record lsn before inserting a layer, end_lsn={}, last_record_lsn={}",
6403 : deltas.lsn_range.end,
6404 : last_record_lsn
6405 : );
6406 184 : if let Some(check_start_lsn) = check_start_lsn {
6407 184 : assert!(deltas.lsn_range.start >= check_start_lsn);
6408 0 : }
6409 184 : let mut delta_layer_writer = DeltaLayerWriter::new(
6410 184 : self.conf,
6411 184 : self.timeline_id,
6412 184 : self.tenant_shard_id,
6413 184 : deltas.key_range.start,
6414 184 : deltas.lsn_range,
6415 184 : ctx,
6416 184 : )
6417 184 : .await?;
6418 632 : for (key, lsn, val) in deltas.data {
6419 448 : delta_layer_writer.put_value(key, lsn, val, ctx).await?;
6420 : }
6421 184 : let (desc, path) = delta_layer_writer.finish(deltas.key_range.end, ctx).await?;
6422 184 : let delta_layer = Layer::finish_creating(self.conf, self, desc, &path)?;
6423 184 : info!("force created delta layer {}", delta_layer.local_path());
6424 : {
6425 184 : let mut guard = self.layers.write().await;
6426 184 : guard
6427 184 : .open_mut()
6428 184 : .unwrap()
6429 184 : .force_insert_layer(delta_layer.clone());
6430 184 : }
6431 184 :
6432 184 : // Update remote_timeline_client state to reflect existence of this layer
6433 184 : self.remote_client
6434 184 : .schedule_layer_file_upload(delta_layer)
6435 184 : .unwrap();
6436 184 :
6437 184 : Ok(())
6438 184 : }
6439 :
6440 : /// Return all keys at the LSN in the image layers
6441 : #[cfg(test)]
6442 12 : pub(crate) async fn inspect_image_layers(
6443 12 : self: &Arc<Timeline>,
6444 12 : lsn: Lsn,
6445 12 : ctx: &RequestContext,
6446 12 : io_concurrency: IoConcurrency,
6447 12 : ) -> anyhow::Result<Vec<(Key, Bytes)>> {
6448 12 : let mut all_data = Vec::new();
6449 12 : let guard = self.layers.read().await;
6450 68 : for layer in guard.layer_map()?.iter_historic_layers() {
6451 68 : if !layer.is_delta() && layer.image_layer_lsn() == lsn {
6452 16 : let layer = guard.get_from_desc(&layer);
6453 16 : let mut reconstruct_data = ValuesReconstructState::new(io_concurrency.clone());
6454 16 : layer
6455 16 : .get_values_reconstruct_data(
6456 16 : KeySpace::single(Key::MIN..Key::MAX),
6457 16 : lsn..Lsn(lsn.0 + 1),
6458 16 : &mut reconstruct_data,
6459 16 : ctx,
6460 16 : )
6461 16 : .await?;
6462 132 : for (k, v) in std::mem::take(&mut reconstruct_data.keys) {
6463 132 : let v = v.collect_pending_ios().await?;
6464 132 : all_data.push((k, v.img.unwrap().1));
6465 : }
6466 52 : }
6467 : }
6468 12 : all_data.sort();
6469 12 : Ok(all_data)
6470 12 : }
6471 :
6472 : /// Get all historic layer descriptors in the layer map
6473 : #[cfg(test)]
6474 48 : pub(crate) async fn inspect_historic_layers(
6475 48 : self: &Arc<Timeline>,
6476 48 : ) -> anyhow::Result<Vec<super::storage_layer::PersistentLayerKey>> {
6477 48 : let mut layers = Vec::new();
6478 48 : let guard = self.layers.read().await;
6479 228 : for layer in guard.layer_map()?.iter_historic_layers() {
6480 228 : layers.push(layer.key());
6481 228 : }
6482 48 : Ok(layers)
6483 48 : }
6484 :
6485 : #[cfg(test)]
6486 20 : pub(crate) fn add_extra_test_dense_keyspace(&self, ks: KeySpace) {
6487 20 : let mut keyspace = self.extra_test_dense_keyspace.load().as_ref().clone();
6488 20 : keyspace.merge(&ks);
6489 20 : self.extra_test_dense_keyspace.store(Arc::new(keyspace));
6490 20 : }
6491 : }
6492 :
6493 : /// Tracking writes ingestion does to a particular in-memory layer.
6494 : ///
6495 : /// Cleared upon freezing a layer.
6496 : pub(crate) struct TimelineWriterState {
6497 : open_layer: Arc<InMemoryLayer>,
6498 : current_size: u64,
6499 : // Previous Lsn which passed through
6500 : prev_lsn: Option<Lsn>,
6501 : // Largest Lsn which passed through the current writer
6502 : max_lsn: Option<Lsn>,
6503 : // Cached details of the last freeze. Avoids going trough the atomic/lock on every put.
6504 : cached_last_freeze_at: Lsn,
6505 : }
6506 :
6507 : impl TimelineWriterState {
6508 2596 : fn new(open_layer: Arc<InMemoryLayer>, current_size: u64, last_freeze_at: Lsn) -> Self {
6509 2596 : Self {
6510 2596 : open_layer,
6511 2596 : current_size,
6512 2596 : prev_lsn: None,
6513 2596 : max_lsn: None,
6514 2596 : cached_last_freeze_at: last_freeze_at,
6515 2596 : }
6516 2596 : }
6517 : }
6518 :
6519 : /// Various functions to mutate the timeline.
6520 : // TODO Currently, Deref is used to allow easy access to read methods from this trait.
6521 : // This is probably considered a bad practice in Rust and should be fixed eventually,
6522 : // but will cause large code changes.
6523 : pub(crate) struct TimelineWriter<'a> {
6524 : tl: &'a Timeline,
6525 : write_guard: tokio::sync::MutexGuard<'a, Option<TimelineWriterState>>,
6526 : }
6527 :
6528 : impl Deref for TimelineWriter<'_> {
6529 : type Target = Timeline;
6530 :
6531 19796404 : fn deref(&self) -> &Self::Target {
6532 19796404 : self.tl
6533 19796404 : }
6534 : }
6535 :
6536 : #[derive(PartialEq)]
6537 : enum OpenLayerAction {
6538 : Roll,
6539 : Open,
6540 : None,
6541 : }
6542 :
6543 : impl TimelineWriter<'_> {
6544 9608468 : async fn handle_open_layer_action(
6545 9608468 : &mut self,
6546 9608468 : at: Lsn,
6547 9608468 : action: OpenLayerAction,
6548 9608468 : ctx: &RequestContext,
6549 9608468 : ) -> anyhow::Result<&Arc<InMemoryLayer>> {
6550 9608468 : match action {
6551 : OpenLayerAction::Roll => {
6552 160 : let freeze_at = self.write_guard.as_ref().unwrap().max_lsn.unwrap();
6553 160 : self.roll_layer(freeze_at).await?;
6554 160 : self.open_layer(at, ctx).await?;
6555 : }
6556 2436 : OpenLayerAction::Open => self.open_layer(at, ctx).await?,
6557 : OpenLayerAction::None => {
6558 9605872 : assert!(self.write_guard.is_some());
6559 : }
6560 : }
6561 :
6562 9608468 : Ok(&self.write_guard.as_ref().unwrap().open_layer)
6563 9608468 : }
6564 :
6565 2596 : async fn open_layer(&mut self, at: Lsn, ctx: &RequestContext) -> anyhow::Result<()> {
6566 2596 : let layer = self
6567 2596 : .tl
6568 2596 : .get_layer_for_write(at, &self.write_guard, ctx)
6569 2596 : .await?;
6570 2596 : let initial_size = layer.size().await?;
6571 :
6572 2596 : let last_freeze_at = self.last_freeze_at.load();
6573 2596 : self.write_guard.replace(TimelineWriterState::new(
6574 2596 : layer,
6575 2596 : initial_size,
6576 2596 : last_freeze_at,
6577 2596 : ));
6578 2596 :
6579 2596 : Ok(())
6580 2596 : }
6581 :
6582 160 : async fn roll_layer(&mut self, freeze_at: Lsn) -> Result<(), FlushLayerError> {
6583 160 : let current_size = self.write_guard.as_ref().unwrap().current_size;
6584 :
6585 : // If layer flushes are backpressured due to compaction not keeping up, wait for the flush
6586 : // to propagate the backpressure up into WAL ingestion.
6587 160 : let l0_count = self
6588 160 : .tl
6589 160 : .layers
6590 160 : .read()
6591 160 : .await
6592 160 : .layer_map()?
6593 160 : .level0_deltas()
6594 160 : .len();
6595 160 : let wait_thresholds = [
6596 160 : self.get_l0_flush_delay_threshold(),
6597 160 : self.get_l0_flush_stall_threshold(),
6598 160 : ];
6599 160 : let wait_threshold = wait_thresholds.into_iter().flatten().min();
6600 :
6601 : // self.write_guard will be taken by the freezing
6602 160 : let flush_id = self
6603 160 : .tl
6604 160 : .freeze_inmem_layer_at(freeze_at, &mut self.write_guard)
6605 160 : .await?;
6606 :
6607 160 : assert!(self.write_guard.is_none());
6608 :
6609 160 : if let Some(wait_threshold) = wait_threshold {
6610 0 : if l0_count >= wait_threshold {
6611 0 : debug!("layer roll waiting for flush due to compaction backpressure at {l0_count} L0 layers");
6612 0 : self.tl.wait_flush_completion(flush_id).await?;
6613 0 : }
6614 160 : }
6615 :
6616 160 : if current_size >= self.get_checkpoint_distance() * 2 {
6617 0 : warn!("Flushed oversized open layer with size {}", current_size)
6618 160 : }
6619 :
6620 160 : Ok(())
6621 160 : }
6622 :
6623 9608468 : fn get_open_layer_action(&self, lsn: Lsn, new_value_size: u64) -> OpenLayerAction {
6624 9608468 : let state = &*self.write_guard;
6625 9608468 : let Some(state) = &state else {
6626 2436 : return OpenLayerAction::Open;
6627 : };
6628 :
6629 : #[cfg(feature = "testing")]
6630 9606032 : if state.cached_last_freeze_at < self.tl.last_freeze_at.load() {
6631 : // this check and assertion are not really needed because
6632 : // LayerManager::try_freeze_in_memory_layer will always clear out the
6633 : // TimelineWriterState if something is frozen. however, we can advance last_freeze_at when there
6634 : // is no TimelineWriterState.
6635 0 : assert!(
6636 0 : state.open_layer.end_lsn.get().is_some(),
6637 0 : "our open_layer must be outdated"
6638 : );
6639 :
6640 : // this would be a memory leak waiting to happen because the in-memory layer always has
6641 : // an index
6642 0 : panic!("BUG: TimelineWriterState held on to frozen in-memory layer.");
6643 9606032 : }
6644 9606032 :
6645 9606032 : if state.prev_lsn == Some(lsn) {
6646 : // Rolling mid LSN is not supported by [downstream code].
6647 : // Hence, only roll at LSN boundaries.
6648 : //
6649 : // [downstream code]: https://github.com/neondatabase/neon/pull/7993#discussion_r1633345422
6650 12 : return OpenLayerAction::None;
6651 9606020 : }
6652 9606020 :
6653 9606020 : if state.current_size == 0 {
6654 : // Don't roll empty layers
6655 0 : return OpenLayerAction::None;
6656 9606020 : }
6657 9606020 :
6658 9606020 : if self.tl.should_roll(
6659 9606020 : state.current_size,
6660 9606020 : state.current_size + new_value_size,
6661 9606020 : self.get_checkpoint_distance(),
6662 9606020 : lsn,
6663 9606020 : state.cached_last_freeze_at,
6664 9606020 : state.open_layer.get_opened_at(),
6665 9606020 : ) {
6666 160 : OpenLayerAction::Roll
6667 : } else {
6668 9605860 : OpenLayerAction::None
6669 : }
6670 9608468 : }
6671 :
6672 : /// Put a batch of keys at the specified Lsns.
6673 9608464 : pub(crate) async fn put_batch(
6674 9608464 : &mut self,
6675 9608464 : batch: SerializedValueBatch,
6676 9608464 : ctx: &RequestContext,
6677 9608464 : ) -> anyhow::Result<()> {
6678 9608464 : if !batch.has_data() {
6679 0 : return Ok(());
6680 9608464 : }
6681 9608464 :
6682 9608464 : // In debug builds, assert that we don't write any keys that don't belong to this shard.
6683 9608464 : // We don't assert this in release builds, since key ownership policies may change over
6684 9608464 : // time. Stray keys will be removed during compaction.
6685 9608464 : if cfg!(debug_assertions) {
6686 19789824 : for metadata in &batch.metadata {
6687 10181360 : if let ValueMeta::Serialized(metadata) = metadata {
6688 10181360 : let key = Key::from_compact(metadata.key);
6689 10181360 : assert!(
6690 10181360 : self.shard_identity.is_key_local(&key)
6691 0 : || self.shard_identity.is_key_global(&key),
6692 0 : "key {key} does not belong on shard {}",
6693 0 : self.shard_identity.shard_index()
6694 : );
6695 0 : }
6696 : }
6697 0 : }
6698 :
6699 9608464 : let batch_max_lsn = batch.max_lsn;
6700 9608464 : let buf_size: u64 = batch.buffer_size() as u64;
6701 9608464 :
6702 9608464 : let action = self.get_open_layer_action(batch_max_lsn, buf_size);
6703 9608464 : let layer = self
6704 9608464 : .handle_open_layer_action(batch_max_lsn, action, ctx)
6705 9608464 : .await?;
6706 :
6707 9608464 : let res = layer.put_batch(batch, ctx).await;
6708 :
6709 9608464 : if res.is_ok() {
6710 9608464 : // Update the current size only when the entire write was ok.
6711 9608464 : // In case of failures, we may have had partial writes which
6712 9608464 : // render the size tracking out of sync. That's ok because
6713 9608464 : // the checkpoint distance should be significantly smaller
6714 9608464 : // than the S3 single shot upload limit of 5GiB.
6715 9608464 : let state = self.write_guard.as_mut().unwrap();
6716 9608464 :
6717 9608464 : state.current_size += buf_size;
6718 9608464 : state.prev_lsn = Some(batch_max_lsn);
6719 9608464 : state.max_lsn = std::cmp::max(state.max_lsn, Some(batch_max_lsn));
6720 9608464 : }
6721 :
6722 9608464 : res
6723 9608464 : }
6724 :
6725 : #[cfg(test)]
6726 : /// Test helper, for tests that would like to poke individual values without composing a batch
6727 8780308 : pub(crate) async fn put(
6728 8780308 : &mut self,
6729 8780308 : key: Key,
6730 8780308 : lsn: Lsn,
6731 8780308 : value: &Value,
6732 8780308 : ctx: &RequestContext,
6733 8780308 : ) -> anyhow::Result<()> {
6734 : use utils::bin_ser::BeSer;
6735 8780308 : if !key.is_valid_key_on_write_path() {
6736 0 : bail!(
6737 0 : "the request contains data not supported by pageserver at TimelineWriter::put: {}",
6738 0 : key
6739 0 : );
6740 8780308 : }
6741 8780308 : let val_ser_size = value.serialized_size().unwrap() as usize;
6742 8780308 : let batch = SerializedValueBatch::from_values(vec![(
6743 8780308 : key.to_compact(),
6744 8780308 : lsn,
6745 8780308 : val_ser_size,
6746 8780308 : value.clone(),
6747 8780308 : )]);
6748 8780308 :
6749 8780308 : self.put_batch(batch, ctx).await
6750 8780308 : }
6751 :
6752 4 : pub(crate) async fn delete_batch(
6753 4 : &mut self,
6754 4 : batch: &[(Range<Key>, Lsn)],
6755 4 : ctx: &RequestContext,
6756 4 : ) -> anyhow::Result<()> {
6757 4 : if let Some((_, lsn)) = batch.first() {
6758 4 : let action = self.get_open_layer_action(*lsn, 0);
6759 4 : let layer = self.handle_open_layer_action(*lsn, action, ctx).await?;
6760 4 : layer.put_tombstones(batch).await?;
6761 0 : }
6762 :
6763 4 : Ok(())
6764 4 : }
6765 :
6766 : /// Track the end of the latest digested WAL record.
6767 : /// Remember the (end of) last valid WAL record remembered in the timeline.
6768 : ///
6769 : /// Call this after you have finished writing all the WAL up to 'lsn'.
6770 : ///
6771 : /// 'lsn' must be aligned. This wakes up any wait_lsn() callers waiting for
6772 : /// the 'lsn' or anything older. The previous last record LSN is stored alongside
6773 : /// the latest and can be read.
6774 10558188 : pub(crate) fn finish_write(&self, new_lsn: Lsn) {
6775 10558188 : self.tl.finish_write(new_lsn);
6776 10558188 : }
6777 :
6778 541140 : pub(crate) fn update_current_logical_size(&self, delta: i64) {
6779 541140 : self.tl.update_current_logical_size(delta)
6780 541140 : }
6781 : }
6782 :
6783 : // We need TimelineWriter to be send in upcoming conversion of
6784 : // Timeline::layers to tokio::sync::RwLock.
6785 : #[test]
6786 4 : fn is_send() {
6787 4 : fn _assert_send<T: Send>() {}
6788 4 : _assert_send::<TimelineWriter<'_>>();
6789 4 : }
6790 :
6791 : #[cfg(test)]
6792 : mod tests {
6793 : use std::sync::Arc;
6794 :
6795 : use pageserver_api::key::Key;
6796 : use pageserver_api::value::Value;
6797 : use tracing::Instrument;
6798 : use utils::{id::TimelineId, lsn::Lsn};
6799 :
6800 : use crate::tenant::{
6801 : harness::{test_img, TenantHarness},
6802 : layer_map::LayerMap,
6803 : storage_layer::{Layer, LayerName, LayerVisibilityHint},
6804 : timeline::{DeltaLayerTestDesc, EvictionError},
6805 : PreviousHeatmap, Timeline,
6806 : };
6807 :
6808 : use super::HeatMapTimeline;
6809 :
6810 20 : fn assert_heatmaps_have_same_layers(lhs: &HeatMapTimeline, rhs: &HeatMapTimeline) {
6811 20 : assert_eq!(lhs.layers.len(), rhs.layers.len());
6812 20 : let lhs_rhs = lhs.layers.iter().zip(rhs.layers.iter());
6813 100 : for (l, r) in lhs_rhs {
6814 80 : assert_eq!(l.name, r.name);
6815 80 : assert_eq!(l.metadata, r.metadata);
6816 : }
6817 20 : }
6818 :
6819 : #[tokio::test]
6820 4 : async fn test_heatmap_generation() {
6821 4 : let harness = TenantHarness::create("heatmap_generation").await.unwrap();
6822 4 :
6823 4 : let covered_delta = DeltaLayerTestDesc::new_with_inferred_key_range(
6824 4 : Lsn(0x10)..Lsn(0x20),
6825 4 : vec![(
6826 4 : Key::from_hex("620000000033333333444444445500000000").unwrap(),
6827 4 : Lsn(0x11),
6828 4 : Value::Image(test_img("foo")),
6829 4 : )],
6830 4 : );
6831 4 : let visible_delta = DeltaLayerTestDesc::new_with_inferred_key_range(
6832 4 : Lsn(0x10)..Lsn(0x20),
6833 4 : vec![(
6834 4 : Key::from_hex("720000000033333333444444445500000000").unwrap(),
6835 4 : Lsn(0x11),
6836 4 : Value::Image(test_img("foo")),
6837 4 : )],
6838 4 : );
6839 4 : let l0_delta = DeltaLayerTestDesc::new(
6840 4 : Lsn(0x20)..Lsn(0x30),
6841 4 : Key::from_hex("000000000000000000000000000000000000").unwrap()
6842 4 : ..Key::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(),
6843 4 : vec![(
6844 4 : Key::from_hex("720000000033333333444444445500000000").unwrap(),
6845 4 : Lsn(0x25),
6846 4 : Value::Image(test_img("foo")),
6847 4 : )],
6848 4 : );
6849 4 : let delta_layers = vec![
6850 4 : covered_delta.clone(),
6851 4 : visible_delta.clone(),
6852 4 : l0_delta.clone(),
6853 4 : ];
6854 4 :
6855 4 : let image_layer = (
6856 4 : Lsn(0x40),
6857 4 : vec![(
6858 4 : Key::from_hex("620000000033333333444444445500000000").unwrap(),
6859 4 : test_img("bar"),
6860 4 : )],
6861 4 : );
6862 4 : let image_layers = vec![image_layer];
6863 4 :
6864 4 : let (tenant, ctx) = harness.load().await;
6865 4 : let timeline = tenant
6866 4 : .create_test_timeline_with_layers(
6867 4 : TimelineId::generate(),
6868 4 : Lsn(0x10),
6869 4 : 14,
6870 4 : &ctx,
6871 4 : delta_layers,
6872 4 : image_layers,
6873 4 : Lsn(0x100),
6874 4 : )
6875 4 : .await
6876 4 : .unwrap();
6877 4 :
6878 4 : // Layer visibility is an input to heatmap generation, so refresh it first
6879 4 : timeline.update_layer_visibility().await.unwrap();
6880 4 :
6881 4 : let heatmap = timeline
6882 4 : .generate_heatmap()
6883 4 : .await
6884 4 : .expect("Infallible while timeline is not shut down");
6885 4 :
6886 4 : assert_eq!(heatmap.timeline_id, timeline.timeline_id);
6887 4 :
6888 4 : // L0 should come last
6889 4 : assert_eq!(heatmap.layers.last().unwrap().name, l0_delta.layer_name());
6890 4 :
6891 4 : let mut last_lsn = Lsn::MAX;
6892 20 : for layer in &heatmap.layers {
6893 4 : // Covered layer should be omitted
6894 16 : assert!(layer.name != covered_delta.layer_name());
6895 4 :
6896 16 : let layer_lsn = match &layer.name {
6897 8 : LayerName::Delta(d) => d.lsn_range.end,
6898 8 : LayerName::Image(i) => i.lsn,
6899 4 : };
6900 4 :
6901 4 : // Apart from L0s, newest Layers should come first
6902 16 : if !LayerMap::is_l0(layer.name.key_range(), layer.name.is_delta()) {
6903 12 : assert!(layer_lsn <= last_lsn);
6904 12 : last_lsn = layer_lsn;
6905 4 : }
6906 4 : }
6907 4 :
6908 4 : // Evict all the layers and stash the old heatmap in the timeline.
6909 4 : // This simulates a migration to a cold secondary location.
6910 4 :
6911 4 : let guard = timeline.layers.read().await;
6912 4 : let mut all_layers = Vec::new();
6913 4 : let forever = std::time::Duration::from_secs(120);
6914 20 : for layer in guard.likely_resident_layers() {
6915 20 : all_layers.push(layer.clone());
6916 20 : layer.evict_and_wait(forever).await.unwrap();
6917 4 : }
6918 4 : drop(guard);
6919 4 :
6920 4 : timeline
6921 4 : .previous_heatmap
6922 4 : .store(Some(Arc::new(PreviousHeatmap::Active {
6923 4 : heatmap: heatmap.clone(),
6924 4 : read_at: std::time::Instant::now(),
6925 4 : })));
6926 4 :
6927 4 : // Generate a new heatmap and assert that it contains the same layers as the old one.
6928 4 : let post_migration_heatmap = timeline.generate_heatmap().await.unwrap();
6929 4 : assert_heatmaps_have_same_layers(&heatmap, &post_migration_heatmap);
6930 4 :
6931 4 : // Download each layer one by one. Generate the heatmap at each step and check
6932 4 : // that it's stable.
6933 24 : for layer in all_layers {
6934 20 : if layer.visibility() == LayerVisibilityHint::Covered {
6935 4 : continue;
6936 16 : }
6937 16 :
6938 16 : eprintln!("Downloading {layer} and re-generating heatmap");
6939 4 :
6940 16 : let _resident = layer
6941 16 : .download_and_keep_resident()
6942 16 : .instrument(tracing::info_span!(
6943 16 : parent: None,
6944 4 : "download_layer",
6945 4 : tenant_id = %timeline.tenant_shard_id.tenant_id,
6946 0 : shard_id = %timeline.tenant_shard_id.shard_slug(),
6947 0 : timeline_id = %timeline.timeline_id
6948 4 : ))
6949 16 : .await
6950 16 : .unwrap();
6951 4 :
6952 16 : let post_download_heatmap = timeline.generate_heatmap().await.unwrap();
6953 16 : assert_heatmaps_have_same_layers(&heatmap, &post_download_heatmap);
6954 4 : }
6955 4 :
6956 4 : // Everything from the post-migration heatmap is now resident.
6957 4 : // Check that we drop it from memory.
6958 4 : assert!(matches!(
6959 4 : timeline.previous_heatmap.load().as_deref(),
6960 4 : Some(PreviousHeatmap::Obsolete)
6961 4 : ));
6962 4 : }
6963 :
6964 : #[tokio::test]
6965 4 : async fn test_previous_heatmap_obsoletion() {
6966 4 : let harness = TenantHarness::create("heatmap_previous_heatmap_obsoletion")
6967 4 : .await
6968 4 : .unwrap();
6969 4 :
6970 4 : let l0_delta = DeltaLayerTestDesc::new(
6971 4 : Lsn(0x20)..Lsn(0x30),
6972 4 : Key::from_hex("000000000000000000000000000000000000").unwrap()
6973 4 : ..Key::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(),
6974 4 : vec![(
6975 4 : Key::from_hex("720000000033333333444444445500000000").unwrap(),
6976 4 : Lsn(0x25),
6977 4 : Value::Image(test_img("foo")),
6978 4 : )],
6979 4 : );
6980 4 :
6981 4 : let image_layer = (
6982 4 : Lsn(0x40),
6983 4 : vec![(
6984 4 : Key::from_hex("620000000033333333444444445500000000").unwrap(),
6985 4 : test_img("bar"),
6986 4 : )],
6987 4 : );
6988 4 :
6989 4 : let delta_layers = vec![l0_delta];
6990 4 : let image_layers = vec![image_layer];
6991 4 :
6992 4 : let (tenant, ctx) = harness.load().await;
6993 4 : let timeline = tenant
6994 4 : .create_test_timeline_with_layers(
6995 4 : TimelineId::generate(),
6996 4 : Lsn(0x10),
6997 4 : 14,
6998 4 : &ctx,
6999 4 : delta_layers,
7000 4 : image_layers,
7001 4 : Lsn(0x100),
7002 4 : )
7003 4 : .await
7004 4 : .unwrap();
7005 4 :
7006 4 : // Layer visibility is an input to heatmap generation, so refresh it first
7007 4 : timeline.update_layer_visibility().await.unwrap();
7008 4 :
7009 4 : let heatmap = timeline
7010 4 : .generate_heatmap()
7011 4 : .await
7012 4 : .expect("Infallible while timeline is not shut down");
7013 4 :
7014 4 : // Both layers should be in the heatmap
7015 4 : assert!(!heatmap.layers.is_empty());
7016 4 :
7017 4 : // Now simulate a migration.
7018 4 : timeline
7019 4 : .previous_heatmap
7020 4 : .store(Some(Arc::new(PreviousHeatmap::Active {
7021 4 : heatmap: heatmap.clone(),
7022 4 : read_at: std::time::Instant::now(),
7023 4 : })));
7024 4 :
7025 4 : // Evict all the layers in the previous heatmap
7026 4 : let guard = timeline.layers.read().await;
7027 4 : let forever = std::time::Duration::from_secs(120);
7028 12 : for layer in guard.likely_resident_layers() {
7029 12 : layer.evict_and_wait(forever).await.unwrap();
7030 4 : }
7031 4 : drop(guard);
7032 4 :
7033 4 : // Generate a new heatmap and check that the previous heatmap
7034 4 : // has been marked obsolete.
7035 4 : let post_eviction_heatmap = timeline
7036 4 : .generate_heatmap()
7037 4 : .await
7038 4 : .expect("Infallible while timeline is not shut down");
7039 4 :
7040 4 : assert!(post_eviction_heatmap.layers.is_empty());
7041 4 : assert!(matches!(
7042 4 : timeline.previous_heatmap.load().as_deref(),
7043 4 : Some(PreviousHeatmap::Obsolete)
7044 4 : ));
7045 4 : }
7046 :
7047 : #[tokio::test]
7048 4 : async fn two_layer_eviction_attempts_at_the_same_time() {
7049 4 : let harness = TenantHarness::create("two_layer_eviction_attempts_at_the_same_time")
7050 4 : .await
7051 4 : .unwrap();
7052 4 :
7053 4 : let (tenant, ctx) = harness.load().await;
7054 4 : let timeline = tenant
7055 4 : .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx)
7056 4 : .await
7057 4 : .unwrap();
7058 4 :
7059 4 : let layer = find_some_layer(&timeline).await;
7060 4 : let layer = layer
7061 4 : .keep_resident()
7062 4 : .await
7063 4 : .expect("no download => no downloading errors")
7064 4 : .drop_eviction_guard();
7065 4 :
7066 4 : let forever = std::time::Duration::from_secs(120);
7067 4 :
7068 4 : let first = layer.evict_and_wait(forever);
7069 4 : let second = layer.evict_and_wait(forever);
7070 4 :
7071 4 : let (first, second) = tokio::join!(first, second);
7072 4 :
7073 4 : let res = layer.keep_resident().await;
7074 4 : assert!(res.is_none(), "{res:?}");
7075 4 :
7076 4 : match (first, second) {
7077 4 : (Ok(()), Ok(())) => {
7078 4 : // because there are no more timeline locks being taken on eviction path, we can
7079 4 : // witness all three outcomes here.
7080 4 : }
7081 4 : (Ok(()), Err(EvictionError::NotFound)) | (Err(EvictionError::NotFound), Ok(())) => {
7082 0 : // if one completes before the other, this is fine just as well.
7083 0 : }
7084 4 : other => unreachable!("unexpected {:?}", other),
7085 4 : }
7086 4 : }
7087 :
7088 4 : async fn find_some_layer(timeline: &Timeline) -> Layer {
7089 4 : let layers = timeline.layers.read().await;
7090 4 : let desc = layers
7091 4 : .layer_map()
7092 4 : .unwrap()
7093 4 : .iter_historic_layers()
7094 4 : .next()
7095 4 : .expect("must find one layer to evict");
7096 4 :
7097 4 : layers.get_from_desc(&desc)
7098 4 : }
7099 : }
|