Line data Source code
1 : pub(crate) mod analysis;
2 : pub(crate) mod compaction;
3 : pub mod delete;
4 : pub(crate) mod detach_ancestor;
5 : mod eviction_task;
6 : pub(crate) mod handle;
7 : pub(crate) mod import_pgdata;
8 : mod init;
9 : pub mod layer_manager;
10 : pub(crate) mod logical_size;
11 : pub mod offload;
12 : pub mod span;
13 : pub mod uninit;
14 : mod walreceiver;
15 :
16 : use anyhow::{anyhow, bail, ensure, Context, Result};
17 : use arc_swap::{ArcSwap, ArcSwapOption};
18 : use bytes::Bytes;
19 : use camino::Utf8Path;
20 : use chrono::{DateTime, Utc};
21 : use enumset::EnumSet;
22 : use fail::fail_point;
23 : use futures::{stream::FuturesUnordered, StreamExt};
24 : use handle::ShardTimelineId;
25 : use layer_manager::Shutdown;
26 : use offload::OffloadError;
27 : use once_cell::sync::Lazy;
28 : use pageserver_api::models::PageTraceEvent;
29 : use pageserver_api::{
30 : key::{
31 : KEY_SIZE, METADATA_KEY_BEGIN_PREFIX, METADATA_KEY_END_PREFIX, NON_INHERITED_RANGE,
32 : SPARSE_RANGE,
33 : },
34 : keyspace::{KeySpaceAccum, KeySpaceRandomAccum, SparseKeyPartitioning},
35 : models::{
36 : CompactKeyRange, CompactLsnRange, CompactionAlgorithm, CompactionAlgorithmSettings,
37 : DownloadRemoteLayersTaskInfo, DownloadRemoteLayersTaskSpawnRequest, EvictionPolicy,
38 : InMemoryLayerInfo, LayerMapInfo, LsnLease, TimelineState,
39 : },
40 : reltag::BlockNumber,
41 : shard::{ShardIdentity, ShardNumber, TenantShardId},
42 : };
43 : use rand::Rng;
44 : use remote_storage::DownloadError;
45 : use serde_with::serde_as;
46 : use storage_broker::BrokerClientChannel;
47 : use tokio::sync::mpsc::Sender;
48 : use tokio::{
49 : runtime::Handle,
50 : sync::{oneshot, watch},
51 : };
52 : use tokio_util::sync::CancellationToken;
53 : use tracing::*;
54 : use utils::rate_limit::RateLimit;
55 : use utils::{
56 : fs_ext,
57 : guard_arc_swap::GuardArcSwap,
58 : pausable_failpoint,
59 : postgres_client::PostgresClientProtocol,
60 : sync::gate::{Gate, GateGuard},
61 : };
62 : use wal_decoder::serialized_batch::{SerializedValueBatch, ValueMeta};
63 :
64 : use std::array;
65 : use std::cmp::{max, min};
66 : use std::collections::btree_map::Entry;
67 : use std::collections::{BTreeMap, HashMap, HashSet};
68 : use std::ops::{ControlFlow, Deref, Range};
69 : use std::sync::atomic::{AtomicBool, AtomicU64, Ordering as AtomicOrdering};
70 : use std::sync::{Arc, Mutex, OnceLock, RwLock, Weak};
71 : use std::time::{Duration, Instant, SystemTime};
72 :
73 : use crate::l0_flush::{self, L0FlushGlobalState};
74 : use crate::tenant::storage_layer::ImageLayerName;
75 : use crate::{
76 : aux_file::AuxFileSizeEstimator,
77 : page_service::TenantManagerTypes,
78 : tenant::{
79 : config::AttachmentMode,
80 : layer_map::{LayerMap, SearchResult},
81 : metadata::TimelineMetadata,
82 : storage_layer::{
83 : inmemory_layer::IndexEntry, BatchLayerWriter, IoConcurrency, PersistentLayerDesc,
84 : ValueReconstructSituation,
85 : },
86 : },
87 : walingest::WalLagCooldown,
88 : walredo,
89 : };
90 : use crate::{
91 : context::{DownloadBehavior, RequestContext},
92 : disk_usage_eviction_task::DiskUsageEvictionInfo,
93 : pgdatadir_mapping::CollectKeySpaceError,
94 : };
95 : use crate::{
96 : disk_usage_eviction_task::finite_f32,
97 : tenant::storage_layer::{
98 : AsLayerDesc, DeltaLayerWriter, EvictionError, ImageLayerWriter, InMemoryLayer, Layer,
99 : LayerAccessStatsReset, LayerName, ResidentLayer, ValueReconstructState,
100 : ValuesReconstructState,
101 : },
102 : };
103 : use crate::{
104 : disk_usage_eviction_task::EvictionCandidate, tenant::storage_layer::delta_layer::DeltaEntry,
105 : };
106 : use crate::{
107 : metrics::ScanLatencyOngoingRecording, tenant::timeline::logical_size::CurrentLogicalSize,
108 : };
109 : use crate::{
110 : pgdatadir_mapping::DirectoryKind,
111 : virtual_file::{MaybeFatalIo, VirtualFile},
112 : };
113 : use crate::{pgdatadir_mapping::LsnForTimestamp, tenant::tasks::BackgroundLoopKind};
114 : use crate::{pgdatadir_mapping::MAX_AUX_FILE_V2_DELTAS, tenant::storage_layer::PersistentLayerKey};
115 : use pageserver_api::config::tenant_conf_defaults::DEFAULT_PITR_INTERVAL;
116 :
117 : use crate::config::PageServerConf;
118 : use crate::keyspace::{KeyPartitioning, KeySpace};
119 : use crate::metrics::{TimelineMetrics, DELTAS_PER_READ_GLOBAL, LAYERS_PER_READ_GLOBAL};
120 : use crate::pgdatadir_mapping::CalculateLogicalSizeError;
121 : use crate::tenant::config::TenantConfOpt;
122 : use pageserver_api::reltag::RelTag;
123 : use pageserver_api::shard::ShardIndex;
124 :
125 : use postgres_connection::PgConnectionConfig;
126 : use postgres_ffi::{to_pg_timestamp, v14::xlog_utils, WAL_SEGMENT_SIZE};
127 : use utils::{
128 : completion,
129 : generation::Generation,
130 : id::TimelineId,
131 : lsn::{AtomicLsn, Lsn, RecordLsn},
132 : seqwait::SeqWait,
133 : simple_rcu::{Rcu, RcuReadGuard},
134 : };
135 :
136 : use crate::task_mgr;
137 : use crate::task_mgr::TaskKind;
138 : use crate::tenant::gc_result::GcResult;
139 : use crate::ZERO_PAGE;
140 : use pageserver_api::key::Key;
141 :
142 : use self::delete::DeleteTimelineFlow;
143 : pub(super) use self::eviction_task::EvictionTaskTenantState;
144 : use self::eviction_task::EvictionTaskTimelineState;
145 : use self::layer_manager::LayerManager;
146 : use self::logical_size::LogicalSize;
147 : use self::walreceiver::{WalReceiver, WalReceiverConf};
148 :
149 : use super::{
150 : config::TenantConf, storage_layer::LayerVisibilityHint, upload_queue::NotInitialized,
151 : MaybeOffloaded,
152 : };
153 : use super::{debug_assert_current_span_has_tenant_and_timeline_id, AttachedTenantConf};
154 : use super::{remote_timeline_client::index::IndexPart, storage_layer::LayerFringe};
155 : use super::{
156 : remote_timeline_client::RemoteTimelineClient, remote_timeline_client::WaitCompletionError,
157 : storage_layer::ReadableLayer,
158 : };
159 : use super::{
160 : secondary::heatmap::{HeatMapLayer, HeatMapTimeline},
161 : GcError,
162 : };
163 :
164 : #[cfg(test)]
165 : use pageserver_api::value::Value;
166 :
167 : #[derive(Debug, PartialEq, Eq, Clone, Copy)]
168 : pub(crate) enum FlushLoopState {
169 : NotStarted,
170 : Running {
171 : #[cfg(test)]
172 : expect_initdb_optimization: bool,
173 : #[cfg(test)]
174 : initdb_optimization_count: usize,
175 : },
176 : Exited,
177 : }
178 :
179 : #[derive(Debug, Copy, Clone, PartialEq, Eq)]
180 : pub enum ImageLayerCreationMode {
181 : /// Try to create image layers based on `time_for_new_image_layer`. Used in compaction code path.
182 : Try,
183 : /// Force creating the image layers if possible. For now, no image layers will be created
184 : /// for metadata keys. Used in compaction code path with force flag enabled.
185 : Force,
186 : /// Initial ingestion of the data, and no data should be dropped in this function. This
187 : /// means that no metadata keys should be included in the partitions. Used in flush frozen layer
188 : /// code path.
189 : Initial,
190 : }
191 :
192 : impl std::fmt::Display for ImageLayerCreationMode {
193 1136 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
194 1136 : write!(f, "{:?}", self)
195 1136 : }
196 : }
197 :
198 : /// Temporary function for immutable storage state refactor, ensures we are dropping mutex guard instead of other things.
199 : /// Can be removed after all refactors are done.
200 56 : fn drop_rlock<T>(rlock: tokio::sync::RwLockReadGuard<T>) {
201 56 : drop(rlock)
202 56 : }
203 :
204 : /// Temporary function for immutable storage state refactor, ensures we are dropping mutex guard instead of other things.
205 : /// Can be removed after all refactors are done.
206 1192 : fn drop_wlock<T>(rlock: tokio::sync::RwLockWriteGuard<'_, T>) {
207 1192 : drop(rlock)
208 1192 : }
209 :
210 : /// The outward-facing resources required to build a Timeline
211 : pub struct TimelineResources {
212 : pub remote_client: RemoteTimelineClient,
213 : pub pagestream_throttle: Arc<crate::tenant::throttle::Throttle>,
214 : pub pagestream_throttle_metrics: Arc<crate::metrics::tenant_throttling::Pagestream>,
215 : pub l0_flush_global_state: l0_flush::L0FlushGlobalState,
216 : }
217 :
218 : /// The relation size cache caches relation sizes at the end of the timeline. It speeds up WAL
219 : /// ingestion considerably, because WAL ingestion needs to check on most records if the record
220 : /// implicitly extends the relation. At startup, `complete_as_of` is initialized to the current end
221 : /// of the timeline (disk_consistent_lsn). It's used on reads of relation sizes to check if the
222 : /// value can be used to also update the cache, see [`Timeline::update_cached_rel_size`].
223 : pub(crate) struct RelSizeCache {
224 : pub(crate) complete_as_of: Lsn,
225 : pub(crate) map: HashMap<RelTag, (Lsn, BlockNumber)>,
226 : }
227 :
228 : pub struct Timeline {
229 : pub(crate) conf: &'static PageServerConf,
230 : tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
231 :
232 : myself: Weak<Self>,
233 :
234 : pub(crate) tenant_shard_id: TenantShardId,
235 : pub timeline_id: TimelineId,
236 :
237 : /// The generation of the tenant that instantiated us: this is used for safety when writing remote objects.
238 : /// Never changes for the lifetime of this [`Timeline`] object.
239 : ///
240 : /// This duplicates the generation stored in LocationConf, but that structure is mutable:
241 : /// this copy enforces the invariant that generatio doesn't change during a Tenant's lifetime.
242 : pub(crate) generation: Generation,
243 :
244 : /// The detailed sharding information from our parent Tenant. This enables us to map keys
245 : /// to shards, and is constant through the lifetime of this Timeline.
246 : shard_identity: ShardIdentity,
247 :
248 : pub pg_version: u32,
249 :
250 : /// The tuple has two elements.
251 : /// 1. `LayerFileManager` keeps track of the various physical representations of the layer files (inmem, local, remote).
252 : /// 2. `LayerMap`, the acceleration data structure for `get_reconstruct_data`.
253 : ///
254 : /// `LayerMap` maps out the `(PAGE,LSN) / (KEY,LSN)` space, which is composed of `(KeyRange, LsnRange)` rectangles.
255 : /// We describe these rectangles through the `PersistentLayerDesc` struct.
256 : ///
257 : /// When we want to reconstruct a page, we first find the `PersistentLayerDesc`'s that we need for page reconstruction,
258 : /// using `LayerMap`. Then, we use `LayerFileManager` to get the `PersistentLayer`'s that correspond to the
259 : /// `PersistentLayerDesc`'s.
260 : ///
261 : /// Hence, it's important to keep things coherent. The `LayerFileManager` must always have an entry for all
262 : /// `PersistentLayerDesc`'s in the `LayerMap`. If it doesn't, `LayerFileManager::get_from_desc` will panic at
263 : /// runtime, e.g., during page reconstruction.
264 : ///
265 : /// In the future, we'll be able to split up the tuple of LayerMap and `LayerFileManager`,
266 : /// so that e.g. on-demand-download/eviction, and layer spreading, can operate just on `LayerFileManager`.
267 : pub(crate) layers: tokio::sync::RwLock<LayerManager>,
268 :
269 : last_freeze_at: AtomicLsn,
270 : // Atomic would be more appropriate here.
271 : last_freeze_ts: RwLock<Instant>,
272 :
273 : pub(crate) standby_horizon: AtomicLsn,
274 :
275 : // WAL redo manager. `None` only for broken tenants.
276 : walredo_mgr: Option<Arc<super::WalRedoManager>>,
277 :
278 : /// Remote storage client.
279 : /// See [`remote_timeline_client`](super::remote_timeline_client) module comment for details.
280 : pub(crate) remote_client: Arc<RemoteTimelineClient>,
281 :
282 : // What page versions do we hold in the repository? If we get a
283 : // request > last_record_lsn, we need to wait until we receive all
284 : // the WAL up to the request. The SeqWait provides functions for
285 : // that. TODO: If we get a request for an old LSN, such that the
286 : // versions have already been garbage collected away, we should
287 : // throw an error, but we don't track that currently.
288 : //
289 : // last_record_lsn.load().last points to the end of last processed WAL record.
290 : //
291 : // We also remember the starting point of the previous record in
292 : // 'last_record_lsn.load().prev'. It's used to set the xl_prev pointer of the
293 : // first WAL record when the node is started up. But here, we just
294 : // keep track of it.
295 : last_record_lsn: SeqWait<RecordLsn, Lsn>,
296 :
297 : // All WAL records have been processed and stored durably on files on
298 : // local disk, up to this LSN. On crash and restart, we need to re-process
299 : // the WAL starting from this point.
300 : //
301 : // Some later WAL records might have been processed and also flushed to disk
302 : // already, so don't be surprised to see some, but there's no guarantee on
303 : // them yet.
304 : disk_consistent_lsn: AtomicLsn,
305 :
306 : // Parent timeline that this timeline was branched from, and the LSN
307 : // of the branch point.
308 : ancestor_timeline: Option<Arc<Timeline>>,
309 : ancestor_lsn: Lsn,
310 :
311 : pub(super) metrics: TimelineMetrics,
312 :
313 : // `Timeline` doesn't write these metrics itself, but it manages the lifetime. Code
314 : // in `crate::page_service` writes these metrics.
315 : pub(crate) query_metrics: crate::metrics::SmgrQueryTimePerTimeline,
316 :
317 : directory_metrics: [AtomicU64; DirectoryKind::KINDS_NUM],
318 :
319 : /// Ensures layers aren't frozen by checkpointer between
320 : /// [`Timeline::get_layer_for_write`] and layer reads.
321 : /// Locked automatically by [`TimelineWriter`] and checkpointer.
322 : /// Must always be acquired before the layer map/individual layer lock
323 : /// to avoid deadlock.
324 : ///
325 : /// The state is cleared upon freezing.
326 : write_lock: tokio::sync::Mutex<Option<TimelineWriterState>>,
327 :
328 : /// Used to avoid multiple `flush_loop` tasks running
329 : pub(super) flush_loop_state: Mutex<FlushLoopState>,
330 :
331 : /// layer_flush_start_tx can be used to wake up the layer-flushing task.
332 : /// - The u64 value is a counter, incremented every time a new flush cycle is requested.
333 : /// The flush cycle counter is sent back on the layer_flush_done channel when
334 : /// the flush finishes. You can use that to wait for the flush to finish.
335 : /// - The LSN is updated to max() of its current value and the latest disk_consistent_lsn
336 : /// read by whoever sends an update
337 : layer_flush_start_tx: tokio::sync::watch::Sender<(u64, Lsn)>,
338 : /// to be notified when layer flushing has finished, subscribe to the layer_flush_done channel
339 : layer_flush_done_tx: tokio::sync::watch::Sender<(u64, Result<(), FlushLayerError>)>,
340 :
341 : // Needed to ensure that we can't create a branch at a point that was already garbage collected
342 : pub latest_gc_cutoff_lsn: Rcu<Lsn>,
343 :
344 : pub(crate) gc_compaction_layer_update_lock: tokio::sync::RwLock<()>,
345 :
346 : // List of child timelines and their branch points. This is needed to avoid
347 : // garbage collecting data that is still needed by the child timelines.
348 : pub(crate) gc_info: std::sync::RwLock<GcInfo>,
349 :
350 : // It may change across major versions so for simplicity
351 : // keep it after running initdb for a timeline.
352 : // It is needed in checks when we want to error on some operations
353 : // when they are requested for pre-initdb lsn.
354 : // It can be unified with latest_gc_cutoff_lsn under some "first_valid_lsn",
355 : // though let's keep them both for better error visibility.
356 : pub initdb_lsn: Lsn,
357 :
358 : /// The repartitioning result. Allows a single writer and multiple readers.
359 : pub(crate) partitioning: GuardArcSwap<((KeyPartitioning, SparseKeyPartitioning), Lsn)>,
360 :
361 : /// Configuration: how often should the partitioning be recalculated.
362 : repartition_threshold: u64,
363 :
364 : last_image_layer_creation_check_at: AtomicLsn,
365 : last_image_layer_creation_check_instant: std::sync::Mutex<Option<Instant>>,
366 :
367 : /// Current logical size of the "datadir", at the last LSN.
368 : current_logical_size: LogicalSize,
369 :
370 : /// Information about the last processed message by the WAL receiver,
371 : /// or None if WAL receiver has not received anything for this timeline
372 : /// yet.
373 : pub last_received_wal: Mutex<Option<WalReceiverInfo>>,
374 : pub walreceiver: Mutex<Option<WalReceiver>>,
375 :
376 : /// Relation size cache
377 : pub(crate) rel_size_cache: RwLock<RelSizeCache>,
378 :
379 : download_all_remote_layers_task_info: RwLock<Option<DownloadRemoteLayersTaskInfo>>,
380 :
381 : state: watch::Sender<TimelineState>,
382 :
383 : /// Prevent two tasks from deleting the timeline at the same time. If held, the
384 : /// timeline is being deleted. If 'true', the timeline has already been deleted.
385 : pub delete_progress: TimelineDeleteProgress,
386 :
387 : eviction_task_timeline_state: tokio::sync::Mutex<EvictionTaskTimelineState>,
388 :
389 : /// Load or creation time information about the disk_consistent_lsn and when the loading
390 : /// happened. Used for consumption metrics.
391 : pub(crate) loaded_at: (Lsn, SystemTime),
392 :
393 : /// Gate to prevent shutdown completing while I/O is still happening to this timeline's data
394 : pub(crate) gate: Gate,
395 :
396 : /// Cancellation token scoped to this timeline: anything doing long-running work relating
397 : /// to the timeline should drop out when this token fires.
398 : pub(crate) cancel: CancellationToken,
399 :
400 : /// Make sure we only have one running compaction at a time in tests.
401 : ///
402 : /// Must only be taken in two places:
403 : /// - [`Timeline::compact`] (this file)
404 : /// - [`delete::delete_local_timeline_directory`]
405 : ///
406 : /// Timeline deletion will acquire both compaction and gc locks in whatever order.
407 : compaction_lock: tokio::sync::Mutex<()>,
408 :
409 : /// If true, the last compaction failed.
410 : compaction_failed: AtomicBool,
411 :
412 : /// Make sure we only have one running gc at a time.
413 : ///
414 : /// Must only be taken in two places:
415 : /// - [`Timeline::gc`] (this file)
416 : /// - [`delete::delete_local_timeline_directory`]
417 : ///
418 : /// Timeline deletion will acquire both compaction and gc locks in whatever order.
419 : gc_lock: tokio::sync::Mutex<()>,
420 :
421 : /// Cloned from [`super::Tenant::pagestream_throttle`] on construction.
422 : pub(crate) pagestream_throttle: Arc<crate::tenant::throttle::Throttle>,
423 :
424 : /// Size estimator for aux file v2
425 : pub(crate) aux_file_size_estimator: AuxFileSizeEstimator,
426 :
427 : /// Some test cases directly place keys into the timeline without actually modifying the directory
428 : /// keys (i.e., DB_DIR). The test cases creating such keys will put the keyspaces here, so that
429 : /// these keys won't get garbage-collected during compaction/GC. This field only modifies the dense
430 : /// keyspace return value of `collect_keyspace`. For sparse keyspaces, use AUX keys for testing, and
431 : /// in the future, add `extra_test_sparse_keyspace` if necessary.
432 : #[cfg(test)]
433 : pub(crate) extra_test_dense_keyspace: ArcSwap<KeySpace>,
434 :
435 : pub(crate) l0_flush_global_state: L0FlushGlobalState,
436 :
437 : pub(crate) handles: handle::PerTimelineState<TenantManagerTypes>,
438 :
439 : pub(crate) attach_wal_lag_cooldown: Arc<OnceLock<WalLagCooldown>>,
440 :
441 : /// Cf. [`crate::tenant::CreateTimelineIdempotency`].
442 : pub(crate) create_idempotency: crate::tenant::CreateTimelineIdempotency,
443 :
444 : /// If Some, collects GetPage metadata for an ongoing PageTrace.
445 : pub(crate) page_trace: ArcSwapOption<Sender<PageTraceEvent>>,
446 : }
447 :
448 : pub type TimelineDeleteProgress = Arc<tokio::sync::Mutex<DeleteTimelineFlow>>;
449 :
450 : pub struct WalReceiverInfo {
451 : pub wal_source_connconf: PgConnectionConfig,
452 : pub last_received_msg_lsn: Lsn,
453 : pub last_received_msg_ts: u128,
454 : }
455 :
456 : /// Information about how much history needs to be retained, needed by
457 : /// Garbage Collection.
458 : #[derive(Default)]
459 : pub(crate) struct GcInfo {
460 : /// Specific LSNs that are needed.
461 : ///
462 : /// Currently, this includes all points where child branches have
463 : /// been forked off from. In the future, could also include
464 : /// explicit user-defined snapshot points.
465 : pub(crate) retain_lsns: Vec<(Lsn, TimelineId, MaybeOffloaded)>,
466 :
467 : /// The cutoff coordinates, which are combined by selecting the minimum.
468 : pub(crate) cutoffs: GcCutoffs,
469 :
470 : /// Leases granted to particular LSNs.
471 : pub(crate) leases: BTreeMap<Lsn, LsnLease>,
472 :
473 : /// Whether our branch point is within our ancestor's PITR interval (for cost estimation)
474 : pub(crate) within_ancestor_pitr: bool,
475 : }
476 :
477 : impl GcInfo {
478 564 : pub(crate) fn min_cutoff(&self) -> Lsn {
479 564 : self.cutoffs.select_min()
480 564 : }
481 :
482 464 : pub(super) fn insert_child(
483 464 : &mut self,
484 464 : child_id: TimelineId,
485 464 : child_lsn: Lsn,
486 464 : is_offloaded: MaybeOffloaded,
487 464 : ) {
488 464 : self.retain_lsns.push((child_lsn, child_id, is_offloaded));
489 464 : self.retain_lsns.sort_by_key(|i| i.0);
490 464 : }
491 :
492 8 : pub(super) fn remove_child_maybe_offloaded(
493 8 : &mut self,
494 8 : child_id: TimelineId,
495 8 : maybe_offloaded: MaybeOffloaded,
496 8 : ) -> bool {
497 8 : // Remove at most one element. Needed for correctness if there is two live `Timeline` objects referencing
498 8 : // the same timeline. Shouldn't but maybe can occur when Arc's live longer than intended.
499 8 : let mut removed = false;
500 12 : self.retain_lsns.retain(|i| {
501 12 : if removed {
502 4 : return true;
503 8 : }
504 8 : let remove = i.1 == child_id && i.2 == maybe_offloaded;
505 8 : removed |= remove;
506 8 : !remove
507 12 : });
508 8 : removed
509 8 : }
510 :
511 8 : pub(super) fn remove_child_not_offloaded(&mut self, child_id: TimelineId) -> bool {
512 8 : self.remove_child_maybe_offloaded(child_id, MaybeOffloaded::No)
513 8 : }
514 :
515 0 : pub(super) fn remove_child_offloaded(&mut self, child_id: TimelineId) -> bool {
516 0 : self.remove_child_maybe_offloaded(child_id, MaybeOffloaded::Yes)
517 0 : }
518 : }
519 :
520 : /// The `GcInfo` component describing which Lsns need to be retained. Functionally, this
521 : /// is a single number (the oldest LSN which we must retain), but it internally distinguishes
522 : /// between time-based and space-based retention for observability and consumption metrics purposes.
523 : #[derive(Debug, Clone)]
524 : pub(crate) struct GcCutoffs {
525 : /// Calculated from the [`TenantConf::gc_horizon`], this LSN indicates how much
526 : /// history we must keep to retain a specified number of bytes of WAL.
527 : pub(crate) space: Lsn,
528 :
529 : /// Calculated from [`TenantConf::pitr_interval`], this LSN indicates how much
530 : /// history we must keep to enable reading back at least the PITR interval duration.
531 : pub(crate) time: Lsn,
532 : }
533 :
534 : impl Default for GcCutoffs {
535 892 : fn default() -> Self {
536 892 : Self {
537 892 : space: Lsn::INVALID,
538 892 : time: Lsn::INVALID,
539 892 : }
540 892 : }
541 : }
542 :
543 : impl GcCutoffs {
544 564 : fn select_min(&self) -> Lsn {
545 564 : std::cmp::min(self.space, self.time)
546 564 : }
547 : }
548 :
549 : pub(crate) struct TimelineVisitOutcome {
550 : completed_keyspace: KeySpace,
551 : image_covered_keyspace: KeySpace,
552 : }
553 :
554 : /// An error happened in a get() operation.
555 : #[derive(thiserror::Error, Debug)]
556 : pub(crate) enum PageReconstructError {
557 : #[error(transparent)]
558 : Other(anyhow::Error),
559 :
560 : #[error("Ancestor LSN wait error: {0}")]
561 : AncestorLsnTimeout(WaitLsnError),
562 :
563 : #[error("timeline shutting down")]
564 : Cancelled,
565 :
566 : /// An error happened replaying WAL records
567 : #[error(transparent)]
568 : WalRedo(anyhow::Error),
569 :
570 : #[error("{0}")]
571 : MissingKey(MissingKeyError),
572 : }
573 :
574 : impl From<anyhow::Error> for PageReconstructError {
575 0 : fn from(value: anyhow::Error) -> Self {
576 0 : // with walingest.rs many PageReconstructError are wrapped in as anyhow::Error
577 0 : match value.downcast::<PageReconstructError>() {
578 0 : Ok(pre) => pre,
579 0 : Err(other) => PageReconstructError::Other(other),
580 : }
581 0 : }
582 : }
583 :
584 : impl From<utils::bin_ser::DeserializeError> for PageReconstructError {
585 0 : fn from(value: utils::bin_ser::DeserializeError) -> Self {
586 0 : PageReconstructError::Other(anyhow::Error::new(value).context("deserialization failure"))
587 0 : }
588 : }
589 :
590 : impl From<layer_manager::Shutdown> for PageReconstructError {
591 0 : fn from(_: layer_manager::Shutdown) -> Self {
592 0 : PageReconstructError::Cancelled
593 0 : }
594 : }
595 :
596 : impl GetVectoredError {
597 : #[cfg(test)]
598 12 : pub(crate) fn is_missing_key_error(&self) -> bool {
599 12 : matches!(self, Self::MissingKey(_))
600 12 : }
601 : }
602 :
603 : impl From<layer_manager::Shutdown> for GetVectoredError {
604 0 : fn from(_: layer_manager::Shutdown) -> Self {
605 0 : GetVectoredError::Cancelled
606 0 : }
607 : }
608 :
609 : #[derive(thiserror::Error)]
610 : pub struct MissingKeyError {
611 : key: Key,
612 : shard: ShardNumber,
613 : cont_lsn: Lsn,
614 : request_lsn: Lsn,
615 : ancestor_lsn: Option<Lsn>,
616 : backtrace: Option<std::backtrace::Backtrace>,
617 : }
618 :
619 : impl std::fmt::Debug for MissingKeyError {
620 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
621 0 : write!(f, "{}", self)
622 0 : }
623 : }
624 :
625 : impl std::fmt::Display for MissingKeyError {
626 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
627 0 : write!(
628 0 : f,
629 0 : "could not find data for key {} (shard {:?}) at LSN {}, request LSN {}",
630 0 : self.key, self.shard, self.cont_lsn, self.request_lsn
631 0 : )?;
632 0 : if let Some(ref ancestor_lsn) = self.ancestor_lsn {
633 0 : write!(f, ", ancestor {}", ancestor_lsn)?;
634 0 : }
635 :
636 0 : if let Some(ref backtrace) = self.backtrace {
637 0 : write!(f, "\n{}", backtrace)?;
638 0 : }
639 :
640 0 : Ok(())
641 0 : }
642 : }
643 :
644 : impl PageReconstructError {
645 : /// Returns true if this error indicates a tenant/timeline shutdown alike situation
646 0 : pub(crate) fn is_stopping(&self) -> bool {
647 : use PageReconstructError::*;
648 0 : match self {
649 0 : Cancelled => true,
650 0 : Other(_) | AncestorLsnTimeout(_) | WalRedo(_) | MissingKey(_) => false,
651 : }
652 0 : }
653 : }
654 :
655 : #[derive(thiserror::Error, Debug)]
656 : pub(crate) enum CreateImageLayersError {
657 : #[error("timeline shutting down")]
658 : Cancelled,
659 :
660 : #[error("read failed")]
661 : GetVectoredError(#[source] GetVectoredError),
662 :
663 : #[error("reconstruction failed")]
664 : PageReconstructError(#[source] PageReconstructError),
665 :
666 : #[error(transparent)]
667 : Other(#[from] anyhow::Error),
668 : }
669 :
670 : impl From<layer_manager::Shutdown> for CreateImageLayersError {
671 0 : fn from(_: layer_manager::Shutdown) -> Self {
672 0 : CreateImageLayersError::Cancelled
673 0 : }
674 : }
675 :
676 : #[derive(thiserror::Error, Debug, Clone)]
677 : pub(crate) enum FlushLayerError {
678 : /// Timeline cancellation token was cancelled
679 : #[error("timeline shutting down")]
680 : Cancelled,
681 :
682 : /// We tried to flush a layer while the Timeline is in an unexpected state
683 : #[error("cannot flush frozen layers when flush_loop is not running, state is {0:?}")]
684 : NotRunning(FlushLoopState),
685 :
686 : // Arc<> the following non-clonable error types: we must be Clone-able because the flush error is propagated from the flush
687 : // loop via a watch channel, where we can only borrow it.
688 : #[error("create image layers (shared)")]
689 : CreateImageLayersError(Arc<CreateImageLayersError>),
690 :
691 : #[error("other (shared)")]
692 : Other(#[from] Arc<anyhow::Error>),
693 : }
694 :
695 : impl FlushLayerError {
696 : // When crossing from generic anyhow errors to this error type, we explicitly check
697 : // for timeline cancellation to avoid logging inoffensive shutdown errors as warn/err.
698 0 : fn from_anyhow(timeline: &Timeline, err: anyhow::Error) -> Self {
699 0 : let cancelled = timeline.cancel.is_cancelled()
700 : // The upload queue might have been shut down before the official cancellation of the timeline.
701 0 : || err
702 0 : .downcast_ref::<NotInitialized>()
703 0 : .map(NotInitialized::is_stopping)
704 0 : .unwrap_or_default();
705 0 : if cancelled {
706 0 : Self::Cancelled
707 : } else {
708 0 : Self::Other(Arc::new(err))
709 : }
710 0 : }
711 : }
712 :
713 : impl From<layer_manager::Shutdown> for FlushLayerError {
714 0 : fn from(_: layer_manager::Shutdown) -> Self {
715 0 : FlushLayerError::Cancelled
716 0 : }
717 : }
718 :
719 : #[derive(thiserror::Error, Debug)]
720 : pub(crate) enum GetVectoredError {
721 : #[error("timeline shutting down")]
722 : Cancelled,
723 :
724 : #[error("requested too many keys: {0} > {}", Timeline::MAX_GET_VECTORED_KEYS)]
725 : Oversized(u64),
726 :
727 : #[error("requested at invalid LSN: {0}")]
728 : InvalidLsn(Lsn),
729 :
730 : #[error("requested key not found: {0}")]
731 : MissingKey(MissingKeyError),
732 :
733 : #[error("ancestry walk")]
734 : GetReadyAncestorError(#[source] GetReadyAncestorError),
735 :
736 : #[error(transparent)]
737 : Other(#[from] anyhow::Error),
738 : }
739 :
740 : impl From<GetReadyAncestorError> for GetVectoredError {
741 4 : fn from(value: GetReadyAncestorError) -> Self {
742 : use GetReadyAncestorError::*;
743 4 : match value {
744 0 : Cancelled => GetVectoredError::Cancelled,
745 : AncestorLsnTimeout(_) | BadState { .. } => {
746 4 : GetVectoredError::GetReadyAncestorError(value)
747 : }
748 : }
749 4 : }
750 : }
751 :
752 : #[derive(thiserror::Error, Debug)]
753 : pub(crate) enum GetReadyAncestorError {
754 : #[error("ancestor LSN wait error")]
755 : AncestorLsnTimeout(#[from] WaitLsnError),
756 :
757 : #[error("bad state on timeline {timeline_id}: {state:?}")]
758 : BadState {
759 : timeline_id: TimelineId,
760 : state: TimelineState,
761 : },
762 :
763 : #[error("cancelled")]
764 : Cancelled,
765 : }
766 :
767 : #[derive(Clone, Copy)]
768 : pub enum LogicalSizeCalculationCause {
769 : Initial,
770 : ConsumptionMetricsSyntheticSize,
771 : EvictionTaskImitation,
772 : TenantSizeHandler,
773 : }
774 :
775 : pub enum GetLogicalSizePriority {
776 : User,
777 : Background,
778 : }
779 :
780 0 : #[derive(Debug, enumset::EnumSetType)]
781 : pub(crate) enum CompactFlags {
782 : ForceRepartition,
783 : ForceImageLayerCreation,
784 : ForceL0Compaction,
785 : EnhancedGcBottomMostCompaction,
786 : DryRun,
787 : }
788 :
789 : #[serde_with::serde_as]
790 0 : #[derive(Debug, Clone, serde::Deserialize)]
791 : pub(crate) struct CompactRequest {
792 : pub compact_key_range: Option<CompactKeyRange>,
793 : pub compact_lsn_range: Option<CompactLsnRange>,
794 : /// Whether the compaction job should be scheduled.
795 : #[serde(default)]
796 : pub scheduled: bool,
797 : /// Whether the compaction job should be split across key ranges.
798 : #[serde(default)]
799 : pub sub_compaction: bool,
800 : /// Max job size for each subcompaction job.
801 : pub sub_compaction_max_job_size_mb: Option<u64>,
802 : }
803 :
804 : #[derive(Debug, Clone, Default)]
805 : pub(crate) struct CompactOptions {
806 : pub flags: EnumSet<CompactFlags>,
807 : /// If set, the compaction will only compact the key range specified by this option.
808 : /// This option is only used by GC compaction. For the full explanation, see [`compaction::GcCompactJob`].
809 : pub compact_key_range: Option<CompactKeyRange>,
810 : /// If set, the compaction will only compact the LSN within this value.
811 : /// This option is only used by GC compaction. For the full explanation, see [`compaction::GcCompactJob`].
812 : pub compact_lsn_range: Option<CompactLsnRange>,
813 : /// Enable sub-compaction (split compaction job across key ranges).
814 : /// This option is only used by GC compaction.
815 : pub sub_compaction: bool,
816 : /// Set job size for the GC compaction.
817 : /// This option is only used by GC compaction.
818 : pub sub_compaction_max_job_size_mb: Option<u64>,
819 : }
820 :
821 : impl std::fmt::Debug for Timeline {
822 0 : fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
823 0 : write!(f, "Timeline<{}>", self.timeline_id)
824 0 : }
825 : }
826 :
827 : #[derive(thiserror::Error, Debug)]
828 : pub(crate) enum WaitLsnError {
829 : // Called on a timeline which is shutting down
830 : #[error("Shutdown")]
831 : Shutdown,
832 :
833 : // Called on an timeline not in active state or shutting down
834 : #[error("Bad timeline state: {0:?}")]
835 : BadState(TimelineState),
836 :
837 : // Timeout expired while waiting for LSN to catch up with goal.
838 : #[error("{0}")]
839 : Timeout(String),
840 : }
841 :
842 : // The impls below achieve cancellation mapping for errors.
843 : // Perhaps there's a way of achieving this with less cruft.
844 :
845 : impl From<CreateImageLayersError> for CompactionError {
846 0 : fn from(e: CreateImageLayersError) -> Self {
847 0 : match e {
848 0 : CreateImageLayersError::Cancelled => CompactionError::ShuttingDown,
849 0 : CreateImageLayersError::Other(e) => {
850 0 : CompactionError::Other(e.context("create image layers"))
851 : }
852 0 : _ => CompactionError::Other(e.into()),
853 : }
854 0 : }
855 : }
856 :
857 : impl From<CreateImageLayersError> for FlushLayerError {
858 0 : fn from(e: CreateImageLayersError) -> Self {
859 0 : match e {
860 0 : CreateImageLayersError::Cancelled => FlushLayerError::Cancelled,
861 0 : any => FlushLayerError::CreateImageLayersError(Arc::new(any)),
862 : }
863 0 : }
864 : }
865 :
866 : impl From<PageReconstructError> for CreateImageLayersError {
867 0 : fn from(e: PageReconstructError) -> Self {
868 0 : match e {
869 0 : PageReconstructError::Cancelled => CreateImageLayersError::Cancelled,
870 0 : _ => CreateImageLayersError::PageReconstructError(e),
871 : }
872 0 : }
873 : }
874 :
875 : impl From<GetVectoredError> for CreateImageLayersError {
876 0 : fn from(e: GetVectoredError) -> Self {
877 0 : match e {
878 0 : GetVectoredError::Cancelled => CreateImageLayersError::Cancelled,
879 0 : _ => CreateImageLayersError::GetVectoredError(e),
880 : }
881 0 : }
882 : }
883 :
884 : impl From<GetVectoredError> for PageReconstructError {
885 12 : fn from(e: GetVectoredError) -> Self {
886 12 : match e {
887 0 : GetVectoredError::Cancelled => PageReconstructError::Cancelled,
888 0 : GetVectoredError::InvalidLsn(_) => PageReconstructError::Other(anyhow!("Invalid LSN")),
889 0 : err @ GetVectoredError::Oversized(_) => PageReconstructError::Other(err.into()),
890 8 : GetVectoredError::MissingKey(err) => PageReconstructError::MissingKey(err),
891 4 : GetVectoredError::GetReadyAncestorError(err) => PageReconstructError::from(err),
892 0 : GetVectoredError::Other(err) => PageReconstructError::Other(err),
893 : }
894 12 : }
895 : }
896 :
897 : impl From<GetReadyAncestorError> for PageReconstructError {
898 4 : fn from(e: GetReadyAncestorError) -> Self {
899 : use GetReadyAncestorError::*;
900 4 : match e {
901 0 : AncestorLsnTimeout(wait_err) => PageReconstructError::AncestorLsnTimeout(wait_err),
902 4 : bad_state @ BadState { .. } => PageReconstructError::Other(anyhow::anyhow!(bad_state)),
903 0 : Cancelled => PageReconstructError::Cancelled,
904 : }
905 4 : }
906 : }
907 :
908 : pub(crate) enum WaitLsnTimeout {
909 : Custom(Duration),
910 : // Use the [`PageServerConf::wait_lsn_timeout`] default
911 : Default,
912 : }
913 :
914 : pub(crate) enum WaitLsnWaiter<'a> {
915 : Timeline(&'a Timeline),
916 : Tenant,
917 : PageService,
918 : HttpEndpoint,
919 : }
920 :
921 : /// Argument to [`Timeline::shutdown`].
922 : #[derive(Debug, Clone, Copy)]
923 : pub(crate) enum ShutdownMode {
924 : /// Graceful shutdown, may do a lot of I/O as we flush any open layers to disk and then
925 : /// also to remote storage. This method can easily take multiple seconds for a busy timeline.
926 : ///
927 : /// While we are flushing, we continue to accept read I/O for LSNs ingested before
928 : /// the call to [`Timeline::shutdown`].
929 : FreezeAndFlush,
930 : /// Only flush the layers to the remote storage without freezing any open layers. Flush the deletion
931 : /// queue. This is the mode used by ancestor detach and any other operations that reloads a tenant
932 : /// but not increasing the generation number. Note that this mode cannot be used at tenant shutdown,
933 : /// as flushing the deletion queue at that time will cause shutdown-in-progress errors.
934 : Reload,
935 : /// Shut down immediately, without waiting for any open layers to flush.
936 : Hard,
937 : }
938 :
939 : struct ImageLayerCreationOutcome {
940 : unfinished_image_layer: Option<ImageLayerWriter>,
941 : next_start_key: Key,
942 : }
943 :
944 : /// Public interface functions
945 : impl Timeline {
946 : /// Get the LSN where this branch was created
947 8 : pub(crate) fn get_ancestor_lsn(&self) -> Lsn {
948 8 : self.ancestor_lsn
949 8 : }
950 :
951 : /// Get the ancestor's timeline id
952 24 : pub(crate) fn get_ancestor_timeline_id(&self) -> Option<TimelineId> {
953 24 : self.ancestor_timeline
954 24 : .as_ref()
955 24 : .map(|ancestor| ancestor.timeline_id)
956 24 : }
957 :
958 : /// Get the ancestor timeline
959 4 : pub(crate) fn ancestor_timeline(&self) -> Option<&Arc<Timeline>> {
960 4 : self.ancestor_timeline.as_ref()
961 4 : }
962 :
963 : /// Get the bytes written since the PITR cutoff on this branch, and
964 : /// whether this branch's ancestor_lsn is within its parent's PITR.
965 0 : pub(crate) fn get_pitr_history_stats(&self) -> (u64, bool) {
966 0 : let gc_info = self.gc_info.read().unwrap();
967 0 : let history = self
968 0 : .get_last_record_lsn()
969 0 : .checked_sub(gc_info.cutoffs.time)
970 0 : .unwrap_or(Lsn(0))
971 0 : .0;
972 0 : (history, gc_info.within_ancestor_pitr)
973 0 : }
974 :
975 : /// Lock and get timeline's GC cutoff
976 1707902 : pub(crate) fn get_latest_gc_cutoff_lsn(&self) -> RcuReadGuard<Lsn> {
977 1707902 : self.latest_gc_cutoff_lsn.read()
978 1707902 : }
979 :
980 : /// Look up given page version.
981 : ///
982 : /// If a remote layer file is needed, it is downloaded as part of this
983 : /// call.
984 : ///
985 : /// This method enforces [`Self::pagestream_throttle`] internally.
986 : ///
987 : /// NOTE: It is considered an error to 'get' a key that doesn't exist. The
988 : /// abstraction above this needs to store suitable metadata to track what
989 : /// data exists with what keys, in separate metadata entries. If a
990 : /// non-existent key is requested, we may incorrectly return a value from
991 : /// an ancestor branch, for example, or waste a lot of cycles chasing the
992 : /// non-existing key.
993 : ///
994 : /// # Cancel-Safety
995 : ///
996 : /// This method is cancellation-safe.
997 : #[inline(always)]
998 1215186 : pub(crate) async fn get(
999 1215186 : &self,
1000 1215186 : key: Key,
1001 1215186 : lsn: Lsn,
1002 1215186 : ctx: &RequestContext,
1003 1215186 : ) -> Result<Bytes, PageReconstructError> {
1004 1215186 : if !lsn.is_valid() {
1005 0 : return Err(PageReconstructError::Other(anyhow::anyhow!("Invalid LSN")));
1006 1215186 : }
1007 1215186 :
1008 1215186 : // This check is debug-only because of the cost of hashing, and because it's a double-check: we
1009 1215186 : // already checked the key against the shard_identity when looking up the Timeline from
1010 1215186 : // page_service.
1011 1215186 : debug_assert!(!self.shard_identity.is_key_disposable(&key));
1012 :
1013 1215186 : let keyspace = KeySpace {
1014 1215186 : ranges: vec![key..key.next()],
1015 1215186 : };
1016 1215186 :
1017 1215186 : let mut reconstruct_state = ValuesReconstructState::new(IoConcurrency::sequential());
1018 :
1019 1215186 : let vectored_res = self
1020 1215186 : .get_vectored_impl(keyspace.clone(), lsn, &mut reconstruct_state, ctx)
1021 1215186 : .await;
1022 :
1023 1215186 : let key_value = vectored_res?.pop_first();
1024 1215174 : match key_value {
1025 1215150 : Some((got_key, value)) => {
1026 1215150 : if got_key != key {
1027 0 : error!(
1028 0 : "Expected {}, but singular vectored get returned {}",
1029 : key, got_key
1030 : );
1031 0 : Err(PageReconstructError::Other(anyhow!(
1032 0 : "Singular vectored get returned wrong key"
1033 0 : )))
1034 : } else {
1035 1215150 : value
1036 : }
1037 : }
1038 24 : None => Err(PageReconstructError::MissingKey(MissingKeyError {
1039 24 : key,
1040 24 : shard: self.shard_identity.get_shard_number(&key),
1041 24 : cont_lsn: Lsn(0),
1042 24 : request_lsn: lsn,
1043 24 : ancestor_lsn: None,
1044 24 : backtrace: None,
1045 24 : })),
1046 : }
1047 1215186 : }
1048 :
1049 : pub(crate) const MAX_GET_VECTORED_KEYS: u64 = 32;
1050 : pub(crate) const LAYERS_VISITED_WARN_THRESHOLD: u32 = 100;
1051 :
1052 : /// Look up multiple page versions at a given LSN
1053 : ///
1054 : /// This naive implementation will be replaced with a more efficient one
1055 : /// which actually vectorizes the read path.
1056 39356 : pub(crate) async fn get_vectored(
1057 39356 : &self,
1058 39356 : keyspace: KeySpace,
1059 39356 : lsn: Lsn,
1060 39356 : io_concurrency: super::storage_layer::IoConcurrency,
1061 39356 : ctx: &RequestContext,
1062 39356 : ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
1063 39356 : if !lsn.is_valid() {
1064 0 : return Err(GetVectoredError::InvalidLsn(lsn));
1065 39356 : }
1066 39356 :
1067 39356 : let key_count = keyspace.total_raw_size().try_into().unwrap();
1068 39356 : if key_count > Timeline::MAX_GET_VECTORED_KEYS {
1069 0 : return Err(GetVectoredError::Oversized(key_count));
1070 39356 : }
1071 :
1072 78712 : for range in &keyspace.ranges {
1073 39356 : let mut key = range.start;
1074 79180 : while key != range.end {
1075 39824 : assert!(!self.shard_identity.is_key_disposable(&key));
1076 39824 : key = key.next();
1077 : }
1078 : }
1079 :
1080 39356 : trace!(
1081 0 : "get vectored request for {:?}@{} from task kind {:?}",
1082 0 : keyspace,
1083 0 : lsn,
1084 0 : ctx.task_kind(),
1085 : );
1086 :
1087 39356 : let start = crate::metrics::GET_VECTORED_LATENCY
1088 39356 : .for_task_kind(ctx.task_kind())
1089 39356 : .map(|metric| (metric, Instant::now()));
1090 :
1091 39356 : let res = self
1092 39356 : .get_vectored_impl(
1093 39356 : keyspace.clone(),
1094 39356 : lsn,
1095 39356 : &mut ValuesReconstructState::new(io_concurrency),
1096 39356 : ctx,
1097 39356 : )
1098 39356 : .await;
1099 :
1100 39356 : if let Some((metric, start)) = start {
1101 0 : let elapsed = start.elapsed();
1102 0 : metric.observe(elapsed.as_secs_f64());
1103 39356 : }
1104 :
1105 39356 : res
1106 39356 : }
1107 :
1108 : /// Scan the keyspace and return all existing key-values in the keyspace. This currently uses vectored
1109 : /// get underlying. Normal vectored get would throw an error when a key in the keyspace is not found
1110 : /// during the search, but for the scan interface, it returns all existing key-value pairs, and does
1111 : /// not expect each single key in the key space will be found. The semantics is closer to the RocksDB
1112 : /// scan iterator interface. We could optimize this interface later to avoid some checks in the vectored
1113 : /// get path to maintain and split the probing and to-be-probe keyspace. We also need to ensure that
1114 : /// the scan operation will not cause OOM in the future.
1115 24 : pub(crate) async fn scan(
1116 24 : &self,
1117 24 : keyspace: KeySpace,
1118 24 : lsn: Lsn,
1119 24 : ctx: &RequestContext,
1120 24 : io_concurrency: super::storage_layer::IoConcurrency,
1121 24 : ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
1122 24 : if !lsn.is_valid() {
1123 0 : return Err(GetVectoredError::InvalidLsn(lsn));
1124 24 : }
1125 24 :
1126 24 : trace!(
1127 0 : "key-value scan request for {:?}@{} from task kind {:?}",
1128 0 : keyspace,
1129 0 : lsn,
1130 0 : ctx.task_kind()
1131 : );
1132 :
1133 : // We should generalize this into Keyspace::contains in the future.
1134 48 : for range in &keyspace.ranges {
1135 24 : if range.start.field1 < METADATA_KEY_BEGIN_PREFIX
1136 24 : || range.end.field1 > METADATA_KEY_END_PREFIX
1137 : {
1138 0 : return Err(GetVectoredError::Other(anyhow::anyhow!(
1139 0 : "only metadata keyspace can be scanned"
1140 0 : )));
1141 24 : }
1142 : }
1143 :
1144 24 : let start = crate::metrics::SCAN_LATENCY
1145 24 : .for_task_kind(ctx.task_kind())
1146 24 : .map(ScanLatencyOngoingRecording::start_recording);
1147 :
1148 24 : let vectored_res = self
1149 24 : .get_vectored_impl(
1150 24 : keyspace.clone(),
1151 24 : lsn,
1152 24 : &mut ValuesReconstructState::new(io_concurrency),
1153 24 : ctx,
1154 24 : )
1155 24 : .await;
1156 :
1157 24 : if let Some(recording) = start {
1158 0 : recording.observe();
1159 24 : }
1160 :
1161 24 : vectored_res
1162 24 : }
1163 :
1164 1255274 : pub(super) async fn get_vectored_impl(
1165 1255274 : &self,
1166 1255274 : keyspace: KeySpace,
1167 1255274 : lsn: Lsn,
1168 1255274 : reconstruct_state: &mut ValuesReconstructState,
1169 1255274 : ctx: &RequestContext,
1170 1255274 : ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
1171 1255274 : let traversal_res: Result<(), _> = self
1172 1255274 : .get_vectored_reconstruct_data(keyspace.clone(), lsn, reconstruct_state, ctx)
1173 1255274 : .await;
1174 1255274 : if let Err(err) = traversal_res {
1175 : // Wait for all the spawned IOs to complete.
1176 : // See comments on `spawn_io` inside `storage_layer` for more details.
1177 32 : let mut collect_futs = std::mem::take(&mut reconstruct_state.keys)
1178 32 : .into_values()
1179 32 : .map(|state| state.collect_pending_ios())
1180 32 : .collect::<FuturesUnordered<_>>();
1181 32 : while collect_futs.next().await.is_some() {}
1182 32 : return Err(err);
1183 1255242 : };
1184 1255242 :
1185 1255242 : let layers_visited = reconstruct_state.get_layers_visited();
1186 1255242 :
1187 1255242 : let futs = FuturesUnordered::new();
1188 1335910 : for (key, state) in std::mem::take(&mut reconstruct_state.keys) {
1189 1335910 : futs.push({
1190 1335910 : let walredo_self = self.myself.upgrade().expect("&self method holds the arc");
1191 1335910 : async move {
1192 1335910 : assert_eq!(state.situation, ValueReconstructSituation::Complete);
1193 :
1194 1335910 : let converted = match state.collect_pending_ios().await {
1195 1335910 : Ok(ok) => ok,
1196 0 : Err(err) => {
1197 0 : return (key, Err(err));
1198 : }
1199 : };
1200 1335910 : DELTAS_PER_READ_GLOBAL.observe(converted.num_deltas() as f64);
1201 1335910 :
1202 1335910 : // The walredo module expects the records to be descending in terms of Lsn.
1203 1335910 : // And we submit the IOs in that order, so, there shuold be no need to sort here.
1204 1335910 : debug_assert!(
1205 1335910 : converted
1206 1335910 : .records
1207 1335910 : .is_sorted_by_key(|(lsn, _)| std::cmp::Reverse(*lsn)),
1208 0 : "{converted:?}"
1209 : );
1210 :
1211 : (
1212 1335910 : key,
1213 1335910 : walredo_self.reconstruct_value(key, lsn, converted).await,
1214 : )
1215 1335910 : }
1216 1335910 : });
1217 1335910 : }
1218 :
1219 1255242 : let results = futs
1220 1255242 : .collect::<BTreeMap<Key, Result<Bytes, PageReconstructError>>>()
1221 1255242 : .await;
1222 :
1223 : // For aux file keys (v1 or v2) the vectored read path does not return an error
1224 : // when they're missing. Instead they are omitted from the resulting btree
1225 : // (this is a requirement, not a bug). Skip updating the metric in these cases
1226 : // to avoid infinite results.
1227 1255242 : if !results.is_empty() {
1228 : // Record the total number of layers visited towards each key in the batch. While some
1229 : // layers may not intersect with a given read, and the cost of layer visits are
1230 : // amortized across the batch, each visited layer contributes directly to the observed
1231 : // latency for every read in the batch, which is what we care about.
1232 1254762 : if layers_visited >= Self::LAYERS_VISITED_WARN_THRESHOLD {
1233 0 : static LOG_PACER: Lazy<Mutex<RateLimit>> =
1234 0 : Lazy::new(|| Mutex::new(RateLimit::new(Duration::from_secs(60))));
1235 0 : LOG_PACER.lock().unwrap().call(|| {
1236 0 : let num_keys = keyspace.total_raw_size();
1237 0 : let num_pages = results.len();
1238 0 : tracing::info!(
1239 0 : shard_id = %self.tenant_shard_id.shard_slug(),
1240 0 : lsn = %lsn,
1241 0 : "Vectored read for {keyspace} visited {layers_visited} layers. Returned {num_pages}/{num_keys} pages.",
1242 : );
1243 0 : });
1244 1254762 : }
1245 :
1246 2590672 : for _ in &results {
1247 1335910 : self.metrics.layers_per_read.observe(layers_visited as f64);
1248 1335910 : LAYERS_PER_READ_GLOBAL.observe(layers_visited as f64);
1249 1335910 : }
1250 480 : }
1251 :
1252 1255242 : Ok(results)
1253 1255274 : }
1254 :
1255 : /// Get last or prev record separately. Same as get_last_record_rlsn().last/prev.
1256 548848 : pub(crate) fn get_last_record_lsn(&self) -> Lsn {
1257 548848 : self.last_record_lsn.load().last
1258 548848 : }
1259 :
1260 0 : pub(crate) fn get_prev_record_lsn(&self) -> Lsn {
1261 0 : self.last_record_lsn.load().prev
1262 0 : }
1263 :
1264 : /// Atomically get both last and prev.
1265 456 : pub(crate) fn get_last_record_rlsn(&self) -> RecordLsn {
1266 456 : self.last_record_lsn.load()
1267 456 : }
1268 :
1269 : /// Subscribe to callers of wait_lsn(). The value of the channel is None if there are no
1270 : /// wait_lsn() calls in progress, and Some(Lsn) if there is an active waiter for wait_lsn().
1271 0 : pub(crate) fn subscribe_for_wait_lsn_updates(&self) -> watch::Receiver<Option<Lsn>> {
1272 0 : self.last_record_lsn.status_receiver()
1273 0 : }
1274 :
1275 884 : pub(crate) fn get_disk_consistent_lsn(&self) -> Lsn {
1276 884 : self.disk_consistent_lsn.load()
1277 884 : }
1278 :
1279 : /// remote_consistent_lsn from the perspective of the tenant's current generation,
1280 : /// not validated with control plane yet.
1281 : /// See [`Self::get_remote_consistent_lsn_visible`].
1282 0 : pub(crate) fn get_remote_consistent_lsn_projected(&self) -> Option<Lsn> {
1283 0 : self.remote_client.remote_consistent_lsn_projected()
1284 0 : }
1285 :
1286 : /// remote_consistent_lsn which the tenant is guaranteed not to go backward from,
1287 : /// i.e. a value of remote_consistent_lsn_projected which has undergone
1288 : /// generation validation in the deletion queue.
1289 0 : pub(crate) fn get_remote_consistent_lsn_visible(&self) -> Option<Lsn> {
1290 0 : self.remote_client.remote_consistent_lsn_visible()
1291 0 : }
1292 :
1293 : /// The sum of the file size of all historic layers in the layer map.
1294 : /// This method makes no distinction between local and remote layers.
1295 : /// Hence, the result **does not represent local filesystem usage**.
1296 0 : pub(crate) async fn layer_size_sum(&self) -> u64 {
1297 0 : let guard = self.layers.read().await;
1298 0 : guard.layer_size_sum()
1299 0 : }
1300 :
1301 0 : pub(crate) fn resident_physical_size(&self) -> u64 {
1302 0 : self.metrics.resident_physical_size_get()
1303 0 : }
1304 :
1305 0 : pub(crate) fn get_directory_metrics(&self) -> [u64; DirectoryKind::KINDS_NUM] {
1306 0 : array::from_fn(|idx| self.directory_metrics[idx].load(AtomicOrdering::Relaxed))
1307 0 : }
1308 :
1309 : ///
1310 : /// Wait until WAL has been received and processed up to this LSN.
1311 : ///
1312 : /// You should call this before any of the other get_* or list_* functions. Calling
1313 : /// those functions with an LSN that has been processed yet is an error.
1314 : ///
1315 452024 : pub(crate) async fn wait_lsn(
1316 452024 : &self,
1317 452024 : lsn: Lsn,
1318 452024 : who_is_waiting: WaitLsnWaiter<'_>,
1319 452024 : timeout: WaitLsnTimeout,
1320 452024 : ctx: &RequestContext, /* Prepare for use by cancellation */
1321 452024 : ) -> Result<(), WaitLsnError> {
1322 452024 : let state = self.current_state();
1323 452024 : if self.cancel.is_cancelled() || matches!(state, TimelineState::Stopping) {
1324 0 : return Err(WaitLsnError::Shutdown);
1325 452024 : } else if !matches!(state, TimelineState::Active) {
1326 0 : return Err(WaitLsnError::BadState(state));
1327 452024 : }
1328 452024 :
1329 452024 : if cfg!(debug_assertions) {
1330 452024 : match ctx.task_kind() {
1331 : TaskKind::WalReceiverManager
1332 : | TaskKind::WalReceiverConnectionHandler
1333 : | TaskKind::WalReceiverConnectionPoller => {
1334 0 : let is_myself = match who_is_waiting {
1335 0 : WaitLsnWaiter::Timeline(waiter) => Weak::ptr_eq(&waiter.myself, &self.myself),
1336 0 : WaitLsnWaiter::Tenant | WaitLsnWaiter::PageService | WaitLsnWaiter::HttpEndpoint => unreachable!("tenant or page_service context are not expected to have task kind {:?}", ctx.task_kind()),
1337 : };
1338 0 : if is_myself {
1339 0 : if let Err(current) = self.last_record_lsn.would_wait_for(lsn) {
1340 : // walingest is the only one that can advance last_record_lsn; it should make sure to never reach here
1341 0 : panic!("this timeline's walingest task is calling wait_lsn({lsn}) but we only have last_record_lsn={current}; would deadlock");
1342 0 : }
1343 0 : } else {
1344 0 : // if another timeline's is waiting for us, there's no deadlock risk because
1345 0 : // our walreceiver task can make progress independent of theirs
1346 0 : }
1347 : }
1348 452024 : _ => {}
1349 : }
1350 0 : }
1351 :
1352 452024 : let timeout = match timeout {
1353 0 : WaitLsnTimeout::Custom(t) => t,
1354 452024 : WaitLsnTimeout::Default => self.conf.wait_lsn_timeout,
1355 : };
1356 :
1357 452024 : let _timer = crate::metrics::WAIT_LSN_TIME.start_timer();
1358 452024 :
1359 452024 : match self.last_record_lsn.wait_for_timeout(lsn, timeout).await {
1360 452024 : Ok(()) => Ok(()),
1361 0 : Err(e) => {
1362 : use utils::seqwait::SeqWaitError::*;
1363 0 : match e {
1364 0 : Shutdown => Err(WaitLsnError::Shutdown),
1365 : Timeout => {
1366 : // don't count the time spent waiting for lock below, and also in walreceiver.status(), towards the wait_lsn_time_histo
1367 0 : drop(_timer);
1368 0 : let walreceiver_status = self.walreceiver_status();
1369 0 : Err(WaitLsnError::Timeout(format!(
1370 0 : "Timed out while waiting for WAL record at LSN {} to arrive, last_record_lsn {} disk consistent LSN={}, WalReceiver status: {}",
1371 0 : lsn,
1372 0 : self.get_last_record_lsn(),
1373 0 : self.get_disk_consistent_lsn(),
1374 0 : walreceiver_status,
1375 0 : )))
1376 : }
1377 : }
1378 : }
1379 : }
1380 452024 : }
1381 :
1382 0 : pub(crate) fn walreceiver_status(&self) -> String {
1383 0 : match &*self.walreceiver.lock().unwrap() {
1384 0 : None => "stopping or stopped".to_string(),
1385 0 : Some(walreceiver) => match walreceiver.status() {
1386 0 : Some(status) => status.to_human_readable_string(),
1387 0 : None => "Not active".to_string(),
1388 : },
1389 : }
1390 0 : }
1391 :
1392 : /// Check that it is valid to request operations with that lsn.
1393 464 : pub(crate) fn check_lsn_is_in_scope(
1394 464 : &self,
1395 464 : lsn: Lsn,
1396 464 : latest_gc_cutoff_lsn: &RcuReadGuard<Lsn>,
1397 464 : ) -> anyhow::Result<()> {
1398 464 : ensure!(
1399 464 : lsn >= **latest_gc_cutoff_lsn,
1400 8 : "LSN {} is earlier than latest GC cutoff {} (we might've already garbage collected needed data)",
1401 8 : lsn,
1402 8 : **latest_gc_cutoff_lsn,
1403 : );
1404 456 : Ok(())
1405 464 : }
1406 :
1407 : /// Initializes an LSN lease. The function will return an error if the requested LSN is less than the `latest_gc_cutoff_lsn`.
1408 20 : pub(crate) fn init_lsn_lease(
1409 20 : &self,
1410 20 : lsn: Lsn,
1411 20 : length: Duration,
1412 20 : ctx: &RequestContext,
1413 20 : ) -> anyhow::Result<LsnLease> {
1414 20 : self.make_lsn_lease(lsn, length, true, ctx)
1415 20 : }
1416 :
1417 : /// Renews a lease at a particular LSN. The requested LSN is not validated against the `latest_gc_cutoff_lsn` when we are in the grace period.
1418 8 : pub(crate) fn renew_lsn_lease(
1419 8 : &self,
1420 8 : lsn: Lsn,
1421 8 : length: Duration,
1422 8 : ctx: &RequestContext,
1423 8 : ) -> anyhow::Result<LsnLease> {
1424 8 : self.make_lsn_lease(lsn, length, false, ctx)
1425 8 : }
1426 :
1427 : /// Obtains a temporary lease blocking garbage collection for the given LSN.
1428 : ///
1429 : /// If we are in `AttachedSingle` mode and is not blocked by the lsn lease deadline, this function will error
1430 : /// if the requesting LSN is less than the `latest_gc_cutoff_lsn` and there is no existing request present.
1431 : ///
1432 : /// If there is an existing lease in the map, the lease will be renewed only if the request extends the lease.
1433 : /// The returned lease is therefore the maximum between the existing lease and the requesting lease.
1434 28 : fn make_lsn_lease(
1435 28 : &self,
1436 28 : lsn: Lsn,
1437 28 : length: Duration,
1438 28 : init: bool,
1439 28 : _ctx: &RequestContext,
1440 28 : ) -> anyhow::Result<LsnLease> {
1441 24 : let lease = {
1442 : // Normalize the requested LSN to be aligned, and move to the first record
1443 : // if it points to the beginning of the page (header).
1444 28 : let lsn = xlog_utils::normalize_lsn(lsn, WAL_SEGMENT_SIZE);
1445 28 :
1446 28 : let mut gc_info = self.gc_info.write().unwrap();
1447 28 :
1448 28 : let valid_until = SystemTime::now() + length;
1449 28 :
1450 28 : let entry = gc_info.leases.entry(lsn);
1451 28 :
1452 28 : match entry {
1453 12 : Entry::Occupied(mut occupied) => {
1454 12 : let existing_lease = occupied.get_mut();
1455 12 : if valid_until > existing_lease.valid_until {
1456 4 : existing_lease.valid_until = valid_until;
1457 4 : let dt: DateTime<Utc> = valid_until.into();
1458 4 : info!("lease extended to {}", dt);
1459 : } else {
1460 8 : let dt: DateTime<Utc> = existing_lease.valid_until.into();
1461 8 : info!("existing lease covers greater length, valid until {}", dt);
1462 : }
1463 :
1464 12 : existing_lease.clone()
1465 : }
1466 16 : Entry::Vacant(vacant) => {
1467 : // Reject already GC-ed LSN (lsn < latest_gc_cutoff) if we are in AttachedSingle and
1468 : // not blocked by the lsn lease deadline.
1469 16 : let validate = {
1470 16 : let conf = self.tenant_conf.load();
1471 16 : conf.location.attach_mode == AttachmentMode::Single
1472 16 : && !conf.is_gc_blocked_by_lsn_lease_deadline()
1473 : };
1474 :
1475 16 : if init || validate {
1476 16 : let latest_gc_cutoff_lsn = self.get_latest_gc_cutoff_lsn();
1477 16 : if lsn < *latest_gc_cutoff_lsn {
1478 4 : bail!("tried to request a page version that was garbage collected. requested at {} gc cutoff {}", lsn, *latest_gc_cutoff_lsn);
1479 12 : }
1480 0 : }
1481 :
1482 12 : let dt: DateTime<Utc> = valid_until.into();
1483 12 : info!("lease created, valid until {}", dt);
1484 12 : vacant.insert(LsnLease { valid_until }).clone()
1485 : }
1486 : }
1487 : };
1488 :
1489 24 : Ok(lease)
1490 28 : }
1491 :
1492 : /// Freeze the current open in-memory layer. It will be written to disk on next iteration.
1493 : /// Returns the flush request ID which can be awaited with wait_flush_completion().
1494 : #[instrument(skip(self), fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id))]
1495 : pub(crate) async fn freeze(&self) -> Result<u64, FlushLayerError> {
1496 : self.freeze0().await
1497 : }
1498 :
1499 : /// Freeze and flush the open in-memory layer, waiting for it to be written to disk.
1500 : #[instrument(skip(self), fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id))]
1501 : pub(crate) async fn freeze_and_flush(&self) -> Result<(), FlushLayerError> {
1502 : self.freeze_and_flush0().await
1503 : }
1504 :
1505 : /// Freeze the current open in-memory layer. It will be written to disk on next iteration.
1506 : /// Returns the flush request ID which can be awaited with wait_flush_completion().
1507 2240 : pub(crate) async fn freeze0(&self) -> Result<u64, FlushLayerError> {
1508 2240 : let mut g = self.write_lock.lock().await;
1509 2240 : let to_lsn = self.get_last_record_lsn();
1510 2240 : self.freeze_inmem_layer_at(to_lsn, &mut g).await
1511 2240 : }
1512 :
1513 : // This exists to provide a non-span creating version of `freeze_and_flush` we can call without
1514 : // polluting the span hierarchy.
1515 2240 : pub(crate) async fn freeze_and_flush0(&self) -> Result<(), FlushLayerError> {
1516 2240 : let token = self.freeze0().await?;
1517 2240 : self.wait_flush_completion(token).await
1518 2240 : }
1519 :
1520 : // Check if an open ephemeral layer should be closed: this provides
1521 : // background enforcement of checkpoint interval if there is no active WAL receiver, to avoid keeping
1522 : // an ephemeral layer open forever when idle. It also freezes layers if the global limit on
1523 : // ephemeral layer bytes has been breached.
1524 0 : pub(super) async fn maybe_freeze_ephemeral_layer(&self) {
1525 0 : let Ok(mut write_guard) = self.write_lock.try_lock() else {
1526 : // If the write lock is held, there is an active wal receiver: rolling open layers
1527 : // is their responsibility while they hold this lock.
1528 0 : return;
1529 : };
1530 :
1531 : // FIXME: why not early exit? because before #7927 the state would had been cleared every
1532 : // time, and this was missed.
1533 : // if write_guard.is_none() { return; }
1534 :
1535 0 : let Ok(layers_guard) = self.layers.try_read() else {
1536 : // Don't block if the layer lock is busy
1537 0 : return;
1538 : };
1539 :
1540 0 : let Ok(lm) = layers_guard.layer_map() else {
1541 0 : return;
1542 : };
1543 :
1544 0 : let Some(open_layer) = &lm.open_layer else {
1545 : // If there is no open layer, we have no layer freezing to do. However, we might need to generate
1546 : // some updates to disk_consistent_lsn and remote_consistent_lsn, in case we ingested some WAL regions
1547 : // that didn't result in writes to this shard.
1548 :
1549 : // Must not hold the layers lock while waiting for a flush.
1550 0 : drop(layers_guard);
1551 0 :
1552 0 : let last_record_lsn = self.get_last_record_lsn();
1553 0 : let disk_consistent_lsn = self.get_disk_consistent_lsn();
1554 0 : if last_record_lsn > disk_consistent_lsn {
1555 : // We have no open layer, but disk_consistent_lsn is behind the last record: this indicates
1556 : // we are a sharded tenant and have skipped some WAL
1557 0 : let last_freeze_ts = *self.last_freeze_ts.read().unwrap();
1558 0 : if last_freeze_ts.elapsed() >= self.get_checkpoint_timeout() {
1559 : // Only do this if have been layer-less longer than get_checkpoint_timeout, so that a shard
1560 : // without any data ingested (yet) doesn't write a remote index as soon as it
1561 : // sees its LSN advance: we only do this if we've been layer-less
1562 : // for some time.
1563 0 : tracing::debug!(
1564 0 : "Advancing disk_consistent_lsn past WAL ingest gap {} -> {}",
1565 : disk_consistent_lsn,
1566 : last_record_lsn
1567 : );
1568 :
1569 : // The flush loop will update remote consistent LSN as well as disk consistent LSN.
1570 : // We know there is no open layer, so we can request freezing without actually
1571 : // freezing anything. This is true even if we have dropped the layers_guard, we
1572 : // still hold the write_guard.
1573 0 : let _ = async {
1574 0 : let token = self
1575 0 : .freeze_inmem_layer_at(last_record_lsn, &mut write_guard)
1576 0 : .await?;
1577 0 : self.wait_flush_completion(token).await
1578 0 : }
1579 0 : .await;
1580 0 : }
1581 0 : }
1582 :
1583 0 : return;
1584 : };
1585 :
1586 0 : let Some(current_size) = open_layer.try_len() else {
1587 : // Unexpected: since we hold the write guard, nobody else should be writing to this layer, so
1588 : // read lock to get size should always succeed.
1589 0 : tracing::warn!("Lock conflict while reading size of open layer");
1590 0 : return;
1591 : };
1592 :
1593 0 : let current_lsn = self.get_last_record_lsn();
1594 :
1595 0 : let checkpoint_distance_override = open_layer.tick().await;
1596 :
1597 0 : if let Some(size_override) = checkpoint_distance_override {
1598 0 : if current_size > size_override {
1599 : // This is not harmful, but it only happens in relatively rare cases where
1600 : // time-based checkpoints are not happening fast enough to keep the amount of
1601 : // ephemeral data within configured limits. It's a sign of stress on the system.
1602 0 : tracing::info!("Early-rolling open layer at size {current_size} (limit {size_override}) due to dirty data pressure");
1603 0 : }
1604 0 : }
1605 :
1606 0 : let checkpoint_distance =
1607 0 : checkpoint_distance_override.unwrap_or(self.get_checkpoint_distance());
1608 0 :
1609 0 : if self.should_roll(
1610 0 : current_size,
1611 0 : current_size,
1612 0 : checkpoint_distance,
1613 0 : self.get_last_record_lsn(),
1614 0 : self.last_freeze_at.load(),
1615 0 : open_layer.get_opened_at(),
1616 0 : ) {
1617 0 : match open_layer.info() {
1618 0 : InMemoryLayerInfo::Frozen { lsn_start, lsn_end } => {
1619 0 : // We may reach this point if the layer was already frozen by not yet flushed: flushing
1620 0 : // happens asynchronously in the background.
1621 0 : tracing::debug!(
1622 0 : "Not freezing open layer, it's already frozen ({lsn_start}..{lsn_end})"
1623 : );
1624 : }
1625 : InMemoryLayerInfo::Open { .. } => {
1626 : // Upgrade to a write lock and freeze the layer
1627 0 : drop(layers_guard);
1628 0 : let res = self
1629 0 : .freeze_inmem_layer_at(current_lsn, &mut write_guard)
1630 0 : .await;
1631 :
1632 0 : if let Err(e) = res {
1633 0 : tracing::info!(
1634 0 : "failed to flush frozen layer after background freeze: {e:#}"
1635 : );
1636 0 : }
1637 : }
1638 : }
1639 0 : }
1640 0 : }
1641 :
1642 : /// Checks if the internal state of the timeline is consistent with it being able to be offloaded.
1643 : ///
1644 : /// This is neccessary but not sufficient for offloading of the timeline as it might have
1645 : /// child timelines that are not offloaded yet.
1646 0 : pub(crate) fn can_offload(&self) -> (bool, &'static str) {
1647 0 : if self.remote_client.is_archived() != Some(true) {
1648 0 : return (false, "the timeline is not archived");
1649 0 : }
1650 0 : if !self.remote_client.no_pending_work() {
1651 : // if the remote client is still processing some work, we can't offload
1652 0 : return (false, "the upload queue is not drained yet");
1653 0 : }
1654 0 :
1655 0 : (true, "ok")
1656 0 : }
1657 :
1658 : /// Outermost timeline compaction operation; downloads needed layers. Returns whether we have pending
1659 : /// compaction tasks.
1660 728 : pub(crate) async fn compact(
1661 728 : self: &Arc<Self>,
1662 728 : cancel: &CancellationToken,
1663 728 : flags: EnumSet<CompactFlags>,
1664 728 : ctx: &RequestContext,
1665 728 : ) -> Result<bool, CompactionError> {
1666 728 : self.compact_with_options(
1667 728 : cancel,
1668 728 : CompactOptions {
1669 728 : flags,
1670 728 : compact_key_range: None,
1671 728 : compact_lsn_range: None,
1672 728 : sub_compaction: false,
1673 728 : sub_compaction_max_job_size_mb: None,
1674 728 : },
1675 728 : ctx,
1676 728 : )
1677 728 : .await
1678 728 : }
1679 :
1680 : /// Outermost timeline compaction operation; downloads needed layers. Returns whether we have pending
1681 : /// compaction tasks.
1682 728 : pub(crate) async fn compact_with_options(
1683 728 : self: &Arc<Self>,
1684 728 : cancel: &CancellationToken,
1685 728 : options: CompactOptions,
1686 728 : ctx: &RequestContext,
1687 728 : ) -> Result<bool, CompactionError> {
1688 728 : // most likely the cancellation token is from background task, but in tests it could be the
1689 728 : // request task as well.
1690 728 :
1691 728 : let prepare = async move {
1692 728 : let guard = self.compaction_lock.lock().await;
1693 :
1694 728 : let permit = super::tasks::concurrent_background_tasks_rate_limit_permit(
1695 728 : BackgroundLoopKind::Compaction,
1696 728 : ctx,
1697 728 : )
1698 728 : .await;
1699 :
1700 728 : (guard, permit)
1701 728 : };
1702 :
1703 : // this wait probably never needs any "long time spent" logging, because we already nag if
1704 : // compaction task goes over it's period (20s) which is quite often in production.
1705 728 : let (_guard, _permit) = tokio::select! {
1706 728 : tuple = prepare => { tuple },
1707 728 : _ = self.cancel.cancelled() => return Ok(false),
1708 728 : _ = cancel.cancelled() => return Ok(false),
1709 : };
1710 :
1711 728 : let last_record_lsn = self.get_last_record_lsn();
1712 728 :
1713 728 : // Last record Lsn could be zero in case the timeline was just created
1714 728 : if !last_record_lsn.is_valid() {
1715 0 : warn!("Skipping compaction for potentially just initialized timeline, it has invalid last record lsn: {last_record_lsn}");
1716 0 : return Ok(false);
1717 728 : }
1718 :
1719 728 : let result = match self.get_compaction_algorithm_settings().kind {
1720 : CompactionAlgorithm::Tiered => {
1721 0 : self.compact_tiered(cancel, ctx).await?;
1722 0 : Ok(false)
1723 : }
1724 728 : CompactionAlgorithm::Legacy => self.compact_legacy(cancel, options, ctx).await,
1725 : };
1726 :
1727 : // Signal compaction failure to avoid L0 flush stalls when it's broken.
1728 0 : match result {
1729 728 : Ok(_) => self.compaction_failed.store(false, AtomicOrdering::Relaxed),
1730 : Err(CompactionError::Other(_)) => {
1731 0 : self.compaction_failed.store(true, AtomicOrdering::Relaxed)
1732 : }
1733 : // Don't change the current value on offload failure or shutdown. We don't want to
1734 : // abruptly stall nor resume L0 flushes in these cases.
1735 0 : Err(CompactionError::Offload(_)) => {}
1736 0 : Err(CompactionError::ShuttingDown) => {}
1737 : };
1738 :
1739 728 : result
1740 728 : }
1741 :
1742 : /// Mutate the timeline with a [`TimelineWriter`].
1743 10266380 : pub(crate) async fn writer(&self) -> TimelineWriter<'_> {
1744 10266380 : TimelineWriter {
1745 10266380 : tl: self,
1746 10266380 : write_guard: self.write_lock.lock().await,
1747 : }
1748 10266380 : }
1749 :
1750 0 : pub(crate) fn activate(
1751 0 : self: &Arc<Self>,
1752 0 : parent: Arc<crate::tenant::Tenant>,
1753 0 : broker_client: BrokerClientChannel,
1754 0 : background_jobs_can_start: Option<&completion::Barrier>,
1755 0 : ctx: &RequestContext,
1756 0 : ) {
1757 0 : if self.tenant_shard_id.is_shard_zero() {
1758 0 : // Logical size is only maintained accurately on shard zero.
1759 0 : self.spawn_initial_logical_size_computation_task(ctx);
1760 0 : }
1761 0 : self.launch_wal_receiver(ctx, broker_client);
1762 0 : self.set_state(TimelineState::Active);
1763 0 : self.launch_eviction_task(parent, background_jobs_can_start);
1764 0 : }
1765 :
1766 : /// After this function returns, there are no timeline-scoped tasks are left running.
1767 : ///
1768 : /// The preferred pattern for is:
1769 : /// - in any spawned tasks, keep Timeline::guard open + Timeline::cancel / child token
1770 : /// - if early shutdown (not just cancellation) of a sub-tree of tasks is required,
1771 : /// go the extra mile and keep track of JoinHandles
1772 : /// - Keep track of JoinHandles using a passed-down `Arc<Mutex<Option<JoinSet>>>` or similar,
1773 : /// instead of spawning directly on a runtime. It is a more composable / testable pattern.
1774 : ///
1775 : /// For legacy reasons, we still have multiple tasks spawned using
1776 : /// `task_mgr::spawn(X, Some(tenant_id), Some(timeline_id))`.
1777 : /// We refer to these as "timeline-scoped task_mgr tasks".
1778 : /// Some of these tasks are already sensitive to Timeline::cancel while others are
1779 : /// not sensitive to Timeline::cancel and instead respect [`task_mgr::shutdown_token`]
1780 : /// or [`task_mgr::shutdown_watcher`].
1781 : /// We want to gradually convert the code base away from these.
1782 : ///
1783 : /// Here is an inventory of timeline-scoped task_mgr tasks that are still sensitive to
1784 : /// `task_mgr::shutdown_{token,watcher}` (there are also tenant-scoped and global-scoped
1785 : /// ones that aren't mentioned here):
1786 : /// - [`TaskKind::TimelineDeletionWorker`]
1787 : /// - NB: also used for tenant deletion
1788 : /// - [`TaskKind::RemoteUploadTask`]`
1789 : /// - [`TaskKind::InitialLogicalSizeCalculation`]
1790 : /// - [`TaskKind::DownloadAllRemoteLayers`] (can we get rid of it?)
1791 : // Inventory of timeline-scoped task_mgr tasks that use spawn but aren't sensitive:
1792 : /// - [`TaskKind::Eviction`]
1793 : /// - [`TaskKind::LayerFlushTask`]
1794 : /// - [`TaskKind::OndemandLogicalSizeCalculation`]
1795 : /// - [`TaskKind::GarbageCollector`] (immediate_gc is timeline-scoped)
1796 20 : pub(crate) async fn shutdown(&self, mode: ShutdownMode) {
1797 20 : debug_assert_current_span_has_tenant_and_timeline_id();
1798 20 :
1799 20 : // Regardless of whether we're going to try_freeze_and_flush
1800 20 : // or not, stop ingesting any more data. Walreceiver only provides
1801 20 : // cancellation but no "wait until gone", because it uses the Timeline::gate.
1802 20 : // So, only after the self.gate.close() below will we know for sure that
1803 20 : // no walreceiver tasks are left.
1804 20 : // For `try_freeze_and_flush=true`, this means that we might still be ingesting
1805 20 : // data during the call to `self.freeze_and_flush()` below.
1806 20 : // That's not ideal, but, we don't have the concept of a ChildGuard,
1807 20 : // which is what we'd need to properly model early shutdown of the walreceiver
1808 20 : // task sub-tree before the other Timeline task sub-trees.
1809 20 : let walreceiver = self.walreceiver.lock().unwrap().take();
1810 20 : tracing::debug!(
1811 0 : is_some = walreceiver.is_some(),
1812 0 : "Waiting for WalReceiverManager..."
1813 : );
1814 20 : if let Some(walreceiver) = walreceiver {
1815 0 : walreceiver.cancel();
1816 20 : }
1817 : // ... and inform any waiters for newer LSNs that there won't be any.
1818 20 : self.last_record_lsn.shutdown();
1819 20 :
1820 20 : if let ShutdownMode::FreezeAndFlush = mode {
1821 12 : let do_flush = if let Some((open, frozen)) = self
1822 12 : .layers
1823 12 : .read()
1824 12 : .await
1825 12 : .layer_map()
1826 12 : .map(|lm| (lm.open_layer.is_some(), lm.frozen_layers.len()))
1827 12 : .ok()
1828 12 : .filter(|(open, frozen)| *open || *frozen > 0)
1829 : {
1830 0 : if self.remote_client.is_archived() == Some(true) {
1831 : // No point flushing on shutdown for an archived timeline: it is not important
1832 : // to have it nice and fresh after our restart, and trying to flush here might
1833 : // race with trying to offload it (which also stops the flush loop)
1834 0 : false
1835 : } else {
1836 0 : tracing::info!(?open, frozen, "flushing and freezing on shutdown");
1837 0 : true
1838 : }
1839 : } else {
1840 : // this is double-shutdown, it'll be a no-op
1841 12 : true
1842 : };
1843 :
1844 : // we shut down walreceiver above, so, we won't add anything more
1845 : // to the InMemoryLayer; freeze it and wait for all frozen layers
1846 : // to reach the disk & upload queue, then shut the upload queue and
1847 : // wait for it to drain.
1848 12 : if do_flush {
1849 12 : match self.freeze_and_flush().await {
1850 : Ok(_) => {
1851 : // drain the upload queue
1852 : // if we did not wait for completion here, it might be our shutdown process
1853 : // didn't wait for remote uploads to complete at all, as new tasks can forever
1854 : // be spawned.
1855 : //
1856 : // what is problematic is the shutting down of RemoteTimelineClient, because
1857 : // obviously it does not make sense to stop while we wait for it, but what
1858 : // about corner cases like s3 suddenly hanging up?
1859 12 : self.remote_client.shutdown().await;
1860 : }
1861 : Err(FlushLayerError::Cancelled) => {
1862 : // this is likely the second shutdown, ignore silently.
1863 : // TODO: this can be removed once https://github.com/neondatabase/neon/issues/5080
1864 0 : debug_assert!(self.cancel.is_cancelled());
1865 : }
1866 0 : Err(e) => {
1867 0 : // Non-fatal. Shutdown is infallible. Failures to flush just mean that
1868 0 : // we have some extra WAL replay to do next time the timeline starts.
1869 0 : warn!("failed to freeze and flush: {e:#}");
1870 : }
1871 : }
1872 :
1873 : // `self.remote_client.shutdown().await` above should have already flushed everything from the queue, but
1874 : // we also do a final check here to ensure that the queue is empty.
1875 12 : if !self.remote_client.no_pending_work() {
1876 0 : warn!("still have pending work in remote upload queue, but continuing shutting down anyways");
1877 12 : }
1878 0 : }
1879 8 : }
1880 :
1881 20 : if let ShutdownMode::Reload = mode {
1882 : // drain the upload queue
1883 4 : self.remote_client.shutdown().await;
1884 4 : if !self.remote_client.no_pending_work() {
1885 0 : warn!("still have pending work in remote upload queue, but continuing shutting down anyways");
1886 4 : }
1887 16 : }
1888 :
1889 : // Signal any subscribers to our cancellation token to drop out
1890 20 : tracing::debug!("Cancelling CancellationToken");
1891 20 : self.cancel.cancel();
1892 20 :
1893 20 : // Ensure Prevent new page service requests from starting.
1894 20 : self.handles.shutdown();
1895 20 :
1896 20 : // Transition the remote_client into a state where it's only useful for timeline deletion.
1897 20 : // (The deletion use case is why we can't just hook up remote_client to Self::cancel).)
1898 20 : self.remote_client.stop();
1899 20 :
1900 20 : // As documented in remote_client.stop()'s doc comment, it's our responsibility
1901 20 : // to shut down the upload queue tasks.
1902 20 : // TODO: fix that, task management should be encapsulated inside remote_client.
1903 20 : task_mgr::shutdown_tasks(
1904 20 : Some(TaskKind::RemoteUploadTask),
1905 20 : Some(self.tenant_shard_id),
1906 20 : Some(self.timeline_id),
1907 20 : )
1908 20 : .await;
1909 :
1910 : // TODO: work toward making this a no-op. See this function's doc comment for more context.
1911 20 : tracing::debug!("Waiting for tasks...");
1912 20 : task_mgr::shutdown_tasks(None, Some(self.tenant_shard_id), Some(self.timeline_id)).await;
1913 :
1914 : {
1915 : // Allow any remaining in-memory layers to do cleanup -- until that, they hold the gate
1916 : // open.
1917 20 : let mut write_guard = self.write_lock.lock().await;
1918 20 : self.layers.write().await.shutdown(&mut write_guard);
1919 20 : }
1920 20 :
1921 20 : // Finally wait until any gate-holders are complete.
1922 20 : //
1923 20 : // TODO: once above shutdown_tasks is a no-op, we can close the gate before calling shutdown_tasks
1924 20 : // and use a TBD variant of shutdown_tasks that asserts that there were no tasks left.
1925 20 : self.gate.close().await;
1926 :
1927 20 : self.metrics.shutdown();
1928 20 : }
1929 :
1930 896 : pub(crate) fn set_state(&self, new_state: TimelineState) {
1931 896 : match (self.current_state(), new_state) {
1932 896 : (equal_state_1, equal_state_2) if equal_state_1 == equal_state_2 => {
1933 4 : info!("Ignoring new state, equal to the existing one: {equal_state_2:?}");
1934 : }
1935 0 : (st, TimelineState::Loading) => {
1936 0 : error!("ignoring transition from {st:?} into Loading state");
1937 : }
1938 0 : (TimelineState::Broken { .. }, new_state) => {
1939 0 : error!("Ignoring state update {new_state:?} for broken timeline");
1940 : }
1941 : (TimelineState::Stopping, TimelineState::Active) => {
1942 0 : error!("Not activating a Stopping timeline");
1943 : }
1944 892 : (_, new_state) => {
1945 892 : self.state.send_replace(new_state);
1946 892 : }
1947 : }
1948 896 : }
1949 :
1950 4 : pub(crate) fn set_broken(&self, reason: String) {
1951 4 : let backtrace_str: String = format!("{}", std::backtrace::Backtrace::force_capture());
1952 4 : let broken_state = TimelineState::Broken {
1953 4 : reason,
1954 4 : backtrace: backtrace_str,
1955 4 : };
1956 4 : self.set_state(broken_state);
1957 4 :
1958 4 : // Although the Broken state is not equivalent to shutdown() (shutdown will be called
1959 4 : // later when this tenant is detach or the process shuts down), firing the cancellation token
1960 4 : // here avoids the need for other tasks to watch for the Broken state explicitly.
1961 4 : self.cancel.cancel();
1962 4 : }
1963 :
1964 454100 : pub(crate) fn current_state(&self) -> TimelineState {
1965 454100 : self.state.borrow().clone()
1966 454100 : }
1967 :
1968 12 : pub(crate) fn is_broken(&self) -> bool {
1969 12 : matches!(&*self.state.borrow(), TimelineState::Broken { .. })
1970 12 : }
1971 :
1972 444 : pub(crate) fn is_active(&self) -> bool {
1973 444 : self.current_state() == TimelineState::Active
1974 444 : }
1975 :
1976 0 : pub(crate) fn is_archived(&self) -> Option<bool> {
1977 0 : self.remote_client.is_archived()
1978 0 : }
1979 :
1980 736 : pub(crate) fn is_stopping(&self) -> bool {
1981 736 : self.current_state() == TimelineState::Stopping
1982 736 : }
1983 :
1984 0 : pub(crate) fn subscribe_for_state_updates(&self) -> watch::Receiver<TimelineState> {
1985 0 : self.state.subscribe()
1986 0 : }
1987 :
1988 452028 : pub(crate) async fn wait_to_become_active(
1989 452028 : &self,
1990 452028 : _ctx: &RequestContext, // Prepare for use by cancellation
1991 452028 : ) -> Result<(), TimelineState> {
1992 452028 : let mut receiver = self.state.subscribe();
1993 : loop {
1994 452028 : let current_state = receiver.borrow().clone();
1995 452028 : match current_state {
1996 : TimelineState::Loading => {
1997 0 : receiver
1998 0 : .changed()
1999 0 : .await
2000 0 : .expect("holding a reference to self");
2001 : }
2002 : TimelineState::Active { .. } => {
2003 452024 : return Ok(());
2004 : }
2005 : TimelineState::Broken { .. } | TimelineState::Stopping => {
2006 : // There's no chance the timeline can transition back into ::Active
2007 4 : return Err(current_state);
2008 : }
2009 : }
2010 : }
2011 452028 : }
2012 :
2013 0 : pub(crate) async fn layer_map_info(
2014 0 : &self,
2015 0 : reset: LayerAccessStatsReset,
2016 0 : ) -> Result<LayerMapInfo, layer_manager::Shutdown> {
2017 0 : let guard = self.layers.read().await;
2018 0 : let layer_map = guard.layer_map()?;
2019 0 : let mut in_memory_layers = Vec::with_capacity(layer_map.frozen_layers.len() + 1);
2020 0 : if let Some(open_layer) = &layer_map.open_layer {
2021 0 : in_memory_layers.push(open_layer.info());
2022 0 : }
2023 0 : for frozen_layer in &layer_map.frozen_layers {
2024 0 : in_memory_layers.push(frozen_layer.info());
2025 0 : }
2026 :
2027 0 : let historic_layers = layer_map
2028 0 : .iter_historic_layers()
2029 0 : .map(|desc| guard.get_from_desc(&desc).info(reset))
2030 0 : .collect();
2031 0 :
2032 0 : Ok(LayerMapInfo {
2033 0 : in_memory_layers,
2034 0 : historic_layers,
2035 0 : })
2036 0 : }
2037 :
2038 : #[instrument(skip_all, fields(tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))]
2039 : pub(crate) async fn download_layer(
2040 : &self,
2041 : layer_file_name: &LayerName,
2042 : ) -> Result<Option<bool>, super::storage_layer::layer::DownloadError> {
2043 : let Some(layer) = self
2044 : .find_layer(layer_file_name)
2045 : .await
2046 0 : .map_err(|e| match e {
2047 0 : layer_manager::Shutdown => {
2048 0 : super::storage_layer::layer::DownloadError::TimelineShutdown
2049 0 : }
2050 0 : })?
2051 : else {
2052 : return Ok(None);
2053 : };
2054 :
2055 : layer.download().await?;
2056 :
2057 : Ok(Some(true))
2058 : }
2059 :
2060 : /// Evict just one layer.
2061 : ///
2062 : /// Returns `Ok(None)` in the case where the layer could not be found by its `layer_file_name`.
2063 0 : pub(crate) async fn evict_layer(
2064 0 : &self,
2065 0 : layer_file_name: &LayerName,
2066 0 : ) -> anyhow::Result<Option<bool>> {
2067 0 : let _gate = self
2068 0 : .gate
2069 0 : .enter()
2070 0 : .map_err(|_| anyhow::anyhow!("Shutting down"))?;
2071 :
2072 0 : let Some(local_layer) = self.find_layer(layer_file_name).await? else {
2073 0 : return Ok(None);
2074 : };
2075 :
2076 : // curl has this by default
2077 0 : let timeout = std::time::Duration::from_secs(120);
2078 0 :
2079 0 : match local_layer.evict_and_wait(timeout).await {
2080 0 : Ok(()) => Ok(Some(true)),
2081 0 : Err(EvictionError::NotFound) => Ok(Some(false)),
2082 0 : Err(EvictionError::Downloaded) => Ok(Some(false)),
2083 0 : Err(EvictionError::Timeout) => Ok(Some(false)),
2084 : }
2085 0 : }
2086 :
2087 9606020 : fn should_roll(
2088 9606020 : &self,
2089 9606020 : layer_size: u64,
2090 9606020 : projected_layer_size: u64,
2091 9606020 : checkpoint_distance: u64,
2092 9606020 : projected_lsn: Lsn,
2093 9606020 : last_freeze_at: Lsn,
2094 9606020 : opened_at: Instant,
2095 9606020 : ) -> bool {
2096 9606020 : let distance = projected_lsn.widening_sub(last_freeze_at);
2097 9606020 :
2098 9606020 : // Rolling the open layer can be triggered by:
2099 9606020 : // 1. The distance from the last LSN we rolled at. This bounds the amount of WAL that
2100 9606020 : // the safekeepers need to store. For sharded tenants, we multiply by shard count to
2101 9606020 : // account for how writes are distributed across shards: we expect each node to consume
2102 9606020 : // 1/count of the LSN on average.
2103 9606020 : // 2. The size of the currently open layer.
2104 9606020 : // 3. The time since the last roll. It helps safekeepers to regard pageserver as caught
2105 9606020 : // up and suspend activity.
2106 9606020 : if distance >= checkpoint_distance as i128 * self.shard_identity.count.count() as i128 {
2107 0 : info!(
2108 0 : "Will roll layer at {} with layer size {} due to LSN distance ({})",
2109 : projected_lsn, layer_size, distance
2110 : );
2111 :
2112 0 : true
2113 9606020 : } else if projected_layer_size >= checkpoint_distance {
2114 : // NB: this check is relied upon by:
2115 160 : let _ = IndexEntry::validate_checkpoint_distance;
2116 160 : info!(
2117 0 : "Will roll layer at {} with layer size {} due to layer size ({})",
2118 : projected_lsn, layer_size, projected_layer_size
2119 : );
2120 :
2121 160 : true
2122 9605860 : } else if distance > 0 && opened_at.elapsed() >= self.get_checkpoint_timeout() {
2123 0 : info!(
2124 0 : "Will roll layer at {} with layer size {} due to time since first write to the layer ({:?})",
2125 0 : projected_lsn,
2126 0 : layer_size,
2127 0 : opened_at.elapsed()
2128 : );
2129 :
2130 0 : true
2131 : } else {
2132 9605860 : false
2133 : }
2134 9606020 : }
2135 : }
2136 :
2137 : /// Number of times we will compute partition within a checkpoint distance.
2138 : const REPARTITION_FREQ_IN_CHECKPOINT_DISTANCE: u64 = 10;
2139 :
2140 : // Private functions
2141 : impl Timeline {
2142 24 : pub(crate) fn get_lsn_lease_length(&self) -> Duration {
2143 24 : let tenant_conf = self.tenant_conf.load();
2144 24 : tenant_conf
2145 24 : .tenant_conf
2146 24 : .lsn_lease_length
2147 24 : .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length)
2148 24 : }
2149 :
2150 0 : pub(crate) fn get_lsn_lease_length_for_ts(&self) -> Duration {
2151 0 : let tenant_conf = self.tenant_conf.load();
2152 0 : tenant_conf
2153 0 : .tenant_conf
2154 0 : .lsn_lease_length_for_ts
2155 0 : .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length_for_ts)
2156 0 : }
2157 :
2158 0 : pub(crate) fn is_gc_blocked_by_lsn_lease_deadline(&self) -> bool {
2159 0 : let tenant_conf = self.tenant_conf.load();
2160 0 : tenant_conf.is_gc_blocked_by_lsn_lease_deadline()
2161 0 : }
2162 :
2163 0 : pub(crate) fn get_lazy_slru_download(&self) -> bool {
2164 0 : let tenant_conf = self.tenant_conf.load();
2165 0 : tenant_conf
2166 0 : .tenant_conf
2167 0 : .lazy_slru_download
2168 0 : .unwrap_or(self.conf.default_tenant_conf.lazy_slru_download)
2169 0 : }
2170 :
2171 9608992 : fn get_checkpoint_distance(&self) -> u64 {
2172 9608992 : let tenant_conf = self.tenant_conf.load();
2173 9608992 : tenant_conf
2174 9608992 : .tenant_conf
2175 9608992 : .checkpoint_distance
2176 9608992 : .unwrap_or(self.conf.default_tenant_conf.checkpoint_distance)
2177 9608992 : }
2178 :
2179 9605860 : fn get_checkpoint_timeout(&self) -> Duration {
2180 9605860 : let tenant_conf = self.tenant_conf.load();
2181 9605860 : tenant_conf
2182 9605860 : .tenant_conf
2183 9605860 : .checkpoint_timeout
2184 9605860 : .unwrap_or(self.conf.default_tenant_conf.checkpoint_timeout)
2185 9605860 : }
2186 :
2187 5008 : fn get_compaction_period(&self) -> Duration {
2188 5008 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2189 5008 : tenant_conf
2190 5008 : .compaction_period
2191 5008 : .unwrap_or(self.conf.default_tenant_conf.compaction_period)
2192 5008 : }
2193 :
2194 1324 : fn get_compaction_target_size(&self) -> u64 {
2195 1324 : let tenant_conf = self.tenant_conf.load();
2196 1324 : tenant_conf
2197 1324 : .tenant_conf
2198 1324 : .compaction_target_size
2199 1324 : .unwrap_or(self.conf.default_tenant_conf.compaction_target_size)
2200 1324 : }
2201 :
2202 784 : fn get_compaction_threshold(&self) -> usize {
2203 784 : let tenant_conf = self.tenant_conf.load();
2204 784 : tenant_conf
2205 784 : .tenant_conf
2206 784 : .compaction_threshold
2207 784 : .unwrap_or(self.conf.default_tenant_conf.compaction_threshold)
2208 784 : }
2209 :
2210 56 : fn get_compaction_upper_limit(&self) -> usize {
2211 56 : let tenant_conf = self.tenant_conf.load();
2212 56 : tenant_conf
2213 56 : .tenant_conf
2214 56 : .compaction_upper_limit
2215 56 : .unwrap_or(self.conf.default_tenant_conf.compaction_upper_limit)
2216 56 : }
2217 :
2218 2504 : fn get_l0_flush_delay_threshold(&self) -> Option<usize> {
2219 : // Disable L0 flushes by default. This and compaction needs further tuning.
2220 : const DEFAULT_L0_FLUSH_DELAY_FACTOR: usize = 0; // TODO: default to e.g. 3
2221 :
2222 : // If compaction is disabled, don't delay.
2223 2504 : if self.get_compaction_period() == Duration::ZERO {
2224 2504 : return None;
2225 0 : }
2226 0 :
2227 0 : let compaction_threshold = self.get_compaction_threshold();
2228 0 : let tenant_conf = self.tenant_conf.load();
2229 0 : let l0_flush_delay_threshold = tenant_conf
2230 0 : .tenant_conf
2231 0 : .l0_flush_delay_threshold
2232 0 : .or(self.conf.default_tenant_conf.l0_flush_delay_threshold)
2233 0 : .unwrap_or(DEFAULT_L0_FLUSH_DELAY_FACTOR * compaction_threshold);
2234 0 :
2235 0 : // 0 disables backpressure.
2236 0 : if l0_flush_delay_threshold == 0 {
2237 0 : return None;
2238 0 : }
2239 0 :
2240 0 : // Clamp the flush delay threshold to the compaction threshold; it doesn't make sense to
2241 0 : // backpressure flushes below this.
2242 0 : // TODO: the tenant config should have validation to prevent this instead.
2243 0 : debug_assert!(l0_flush_delay_threshold >= compaction_threshold);
2244 0 : Some(max(l0_flush_delay_threshold, compaction_threshold))
2245 2504 : }
2246 :
2247 2504 : fn get_l0_flush_stall_threshold(&self) -> Option<usize> {
2248 : // Disable L0 stalls by default. In ingest benchmarks, we see image compaction take >10
2249 : // minutes, blocking L0 compaction, and we can't stall L0 flushes for that long.
2250 : const DEFAULT_L0_FLUSH_STALL_FACTOR: usize = 0; // TODO: default to e.g. 5
2251 :
2252 : // If compaction is disabled, don't stall.
2253 2504 : if self.get_compaction_period() == Duration::ZERO {
2254 2504 : return None;
2255 0 : }
2256 0 :
2257 0 : // If compaction is failing, don't stall and try to keep the tenant alive. This may not be a
2258 0 : // good idea: read amp can grow unbounded, leading to terrible performance, and we may take
2259 0 : // on unbounded compaction debt that can take a long time to fix once compaction comes back
2260 0 : // online. At least we'll delay flushes, slowing down the growth and buying some time.
2261 0 : if self.compaction_failed.load(AtomicOrdering::Relaxed) {
2262 0 : return None;
2263 0 : }
2264 0 :
2265 0 : let compaction_threshold = self.get_compaction_threshold();
2266 0 : let tenant_conf = self.tenant_conf.load();
2267 0 : let l0_flush_stall_threshold = tenant_conf
2268 0 : .tenant_conf
2269 0 : .l0_flush_stall_threshold
2270 0 : .or(self.conf.default_tenant_conf.l0_flush_stall_threshold);
2271 0 :
2272 0 : // Tests sometimes set compaction_threshold=1 to generate lots of layer files, and don't
2273 0 : // handle the 20-second compaction delay. Some (e.g. `test_backward_compatibility`) can't
2274 0 : // easily adjust the L0 backpressure settings, so just disable stalls in this case.
2275 0 : if cfg!(feature = "testing")
2276 0 : && compaction_threshold == 1
2277 0 : && l0_flush_stall_threshold.is_none()
2278 : {
2279 0 : return None;
2280 0 : }
2281 0 :
2282 0 : let l0_flush_stall_threshold = l0_flush_stall_threshold
2283 0 : .unwrap_or(DEFAULT_L0_FLUSH_STALL_FACTOR * compaction_threshold);
2284 0 :
2285 0 : // 0 disables backpressure.
2286 0 : if l0_flush_stall_threshold == 0 {
2287 0 : return None;
2288 0 : }
2289 0 :
2290 0 : // Clamp the flush stall threshold to the compaction threshold; it doesn't make sense to
2291 0 : // backpressure flushes below this.
2292 0 : // TODO: the tenant config should have validation to prevent this instead.
2293 0 : debug_assert!(l0_flush_stall_threshold >= compaction_threshold);
2294 0 : Some(max(l0_flush_stall_threshold, compaction_threshold))
2295 2504 : }
2296 :
2297 2344 : fn get_l0_flush_wait_upload(&self) -> bool {
2298 2344 : let tenant_conf = self.tenant_conf.load();
2299 2344 : tenant_conf
2300 2344 : .tenant_conf
2301 2344 : .l0_flush_wait_upload
2302 2344 : .unwrap_or(self.conf.default_tenant_conf.l0_flush_wait_upload)
2303 2344 : }
2304 :
2305 28 : fn get_image_creation_threshold(&self) -> usize {
2306 28 : let tenant_conf = self.tenant_conf.load();
2307 28 : tenant_conf
2308 28 : .tenant_conf
2309 28 : .image_creation_threshold
2310 28 : .unwrap_or(self.conf.default_tenant_conf.image_creation_threshold)
2311 28 : }
2312 :
2313 728 : fn get_compaction_algorithm_settings(&self) -> CompactionAlgorithmSettings {
2314 728 : let tenant_conf = &self.tenant_conf.load();
2315 728 : tenant_conf
2316 728 : .tenant_conf
2317 728 : .compaction_algorithm
2318 728 : .as_ref()
2319 728 : .unwrap_or(&self.conf.default_tenant_conf.compaction_algorithm)
2320 728 : .clone()
2321 728 : }
2322 :
2323 0 : fn get_eviction_policy(&self) -> EvictionPolicy {
2324 0 : let tenant_conf = self.tenant_conf.load();
2325 0 : tenant_conf
2326 0 : .tenant_conf
2327 0 : .eviction_policy
2328 0 : .unwrap_or(self.conf.default_tenant_conf.eviction_policy)
2329 0 : }
2330 :
2331 892 : fn get_evictions_low_residence_duration_metric_threshold(
2332 892 : tenant_conf: &TenantConfOpt,
2333 892 : default_tenant_conf: &TenantConf,
2334 892 : ) -> Duration {
2335 892 : tenant_conf
2336 892 : .evictions_low_residence_duration_metric_threshold
2337 892 : .unwrap_or(default_tenant_conf.evictions_low_residence_duration_metric_threshold)
2338 892 : }
2339 :
2340 1136 : fn get_image_layer_creation_check_threshold(&self) -> u8 {
2341 1136 : let tenant_conf = self.tenant_conf.load();
2342 1136 : tenant_conf
2343 1136 : .tenant_conf
2344 1136 : .image_layer_creation_check_threshold
2345 1136 : .unwrap_or(
2346 1136 : self.conf
2347 1136 : .default_tenant_conf
2348 1136 : .image_layer_creation_check_threshold,
2349 1136 : )
2350 1136 : }
2351 :
2352 : /// Resolve the effective WAL receiver protocol to use for this tenant.
2353 : ///
2354 : /// Priority order is:
2355 : /// 1. Tenant config override
2356 : /// 2. Default value for tenant config override
2357 : /// 3. Pageserver config override
2358 : /// 4. Pageserver config default
2359 0 : pub fn resolve_wal_receiver_protocol(&self) -> PostgresClientProtocol {
2360 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2361 0 : tenant_conf
2362 0 : .wal_receiver_protocol_override
2363 0 : .or(self.conf.default_tenant_conf.wal_receiver_protocol_override)
2364 0 : .unwrap_or(self.conf.wal_receiver_protocol)
2365 0 : }
2366 :
2367 0 : pub(super) fn tenant_conf_updated(&self, new_conf: &AttachedTenantConf) {
2368 0 : // NB: Most tenant conf options are read by background loops, so,
2369 0 : // changes will automatically be picked up.
2370 0 :
2371 0 : // The threshold is embedded in the metric. So, we need to update it.
2372 0 : {
2373 0 : let new_threshold = Self::get_evictions_low_residence_duration_metric_threshold(
2374 0 : &new_conf.tenant_conf,
2375 0 : &self.conf.default_tenant_conf,
2376 0 : );
2377 0 :
2378 0 : let tenant_id_str = self.tenant_shard_id.tenant_id.to_string();
2379 0 : let shard_id_str = format!("{}", self.tenant_shard_id.shard_slug());
2380 0 :
2381 0 : let timeline_id_str = self.timeline_id.to_string();
2382 0 :
2383 0 : self.remote_client.update_config(&new_conf.location);
2384 0 :
2385 0 : self.metrics
2386 0 : .evictions_with_low_residence_duration
2387 0 : .write()
2388 0 : .unwrap()
2389 0 : .change_threshold(
2390 0 : &tenant_id_str,
2391 0 : &shard_id_str,
2392 0 : &timeline_id_str,
2393 0 : new_threshold,
2394 0 : );
2395 0 : }
2396 0 : }
2397 :
2398 : /// Open a Timeline handle.
2399 : ///
2400 : /// Loads the metadata for the timeline into memory, but not the layer map.
2401 : #[allow(clippy::too_many_arguments)]
2402 892 : pub(super) fn new(
2403 892 : conf: &'static PageServerConf,
2404 892 : tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
2405 892 : metadata: &TimelineMetadata,
2406 892 : ancestor: Option<Arc<Timeline>>,
2407 892 : timeline_id: TimelineId,
2408 892 : tenant_shard_id: TenantShardId,
2409 892 : generation: Generation,
2410 892 : shard_identity: ShardIdentity,
2411 892 : walredo_mgr: Option<Arc<super::WalRedoManager>>,
2412 892 : resources: TimelineResources,
2413 892 : pg_version: u32,
2414 892 : state: TimelineState,
2415 892 : attach_wal_lag_cooldown: Arc<OnceLock<WalLagCooldown>>,
2416 892 : create_idempotency: crate::tenant::CreateTimelineIdempotency,
2417 892 : cancel: CancellationToken,
2418 892 : ) -> Arc<Self> {
2419 892 : let disk_consistent_lsn = metadata.disk_consistent_lsn();
2420 892 : let (state, _) = watch::channel(state);
2421 892 :
2422 892 : let (layer_flush_start_tx, _) = tokio::sync::watch::channel((0, disk_consistent_lsn));
2423 892 : let (layer_flush_done_tx, _) = tokio::sync::watch::channel((0, Ok(())));
2424 892 :
2425 892 : let evictions_low_residence_duration_metric_threshold = {
2426 892 : let loaded_tenant_conf = tenant_conf.load();
2427 892 : Self::get_evictions_low_residence_duration_metric_threshold(
2428 892 : &loaded_tenant_conf.tenant_conf,
2429 892 : &conf.default_tenant_conf,
2430 892 : )
2431 : };
2432 :
2433 892 : if let Some(ancestor) = &ancestor {
2434 460 : let mut ancestor_gc_info = ancestor.gc_info.write().unwrap();
2435 460 : // If we construct an explicit timeline object, it's obviously not offloaded
2436 460 : let is_offloaded = MaybeOffloaded::No;
2437 460 : ancestor_gc_info.insert_child(timeline_id, metadata.ancestor_lsn(), is_offloaded);
2438 460 : }
2439 :
2440 892 : Arc::new_cyclic(|myself| {
2441 892 : let metrics = TimelineMetrics::new(
2442 892 : &tenant_shard_id,
2443 892 : &timeline_id,
2444 892 : crate::metrics::EvictionsWithLowResidenceDurationBuilder::new(
2445 892 : "mtime",
2446 892 : evictions_low_residence_duration_metric_threshold,
2447 892 : ),
2448 892 : );
2449 892 : let aux_file_metrics = metrics.aux_file_size_gauge.clone();
2450 :
2451 892 : let mut result = Timeline {
2452 892 : conf,
2453 892 : tenant_conf,
2454 892 : myself: myself.clone(),
2455 892 : timeline_id,
2456 892 : tenant_shard_id,
2457 892 : generation,
2458 892 : shard_identity,
2459 892 : pg_version,
2460 892 : layers: Default::default(),
2461 892 : gc_compaction_layer_update_lock: tokio::sync::RwLock::new(()),
2462 892 :
2463 892 : walredo_mgr,
2464 892 : walreceiver: Mutex::new(None),
2465 892 :
2466 892 : remote_client: Arc::new(resources.remote_client),
2467 892 :
2468 892 : // initialize in-memory 'last_record_lsn' from 'disk_consistent_lsn'.
2469 892 : last_record_lsn: SeqWait::new(RecordLsn {
2470 892 : last: disk_consistent_lsn,
2471 892 : prev: metadata.prev_record_lsn().unwrap_or(Lsn(0)),
2472 892 : }),
2473 892 : disk_consistent_lsn: AtomicLsn::new(disk_consistent_lsn.0),
2474 892 :
2475 892 : last_freeze_at: AtomicLsn::new(disk_consistent_lsn.0),
2476 892 : last_freeze_ts: RwLock::new(Instant::now()),
2477 892 :
2478 892 : loaded_at: (disk_consistent_lsn, SystemTime::now()),
2479 892 :
2480 892 : ancestor_timeline: ancestor,
2481 892 : ancestor_lsn: metadata.ancestor_lsn(),
2482 892 :
2483 892 : metrics,
2484 892 :
2485 892 : query_metrics: crate::metrics::SmgrQueryTimePerTimeline::new(
2486 892 : &tenant_shard_id,
2487 892 : &timeline_id,
2488 892 : resources.pagestream_throttle_metrics,
2489 892 : ),
2490 892 :
2491 6244 : directory_metrics: array::from_fn(|_| AtomicU64::new(0)),
2492 892 :
2493 892 : flush_loop_state: Mutex::new(FlushLoopState::NotStarted),
2494 892 :
2495 892 : layer_flush_start_tx,
2496 892 : layer_flush_done_tx,
2497 892 :
2498 892 : write_lock: tokio::sync::Mutex::new(None),
2499 892 :
2500 892 : gc_info: std::sync::RwLock::new(GcInfo::default()),
2501 892 :
2502 892 : latest_gc_cutoff_lsn: Rcu::new(metadata.latest_gc_cutoff_lsn()),
2503 892 : initdb_lsn: metadata.initdb_lsn(),
2504 892 :
2505 892 : current_logical_size: if disk_consistent_lsn.is_valid() {
2506 : // we're creating timeline data with some layer files existing locally,
2507 : // need to recalculate timeline's logical size based on data in the layers.
2508 468 : LogicalSize::deferred_initial(disk_consistent_lsn)
2509 : } else {
2510 : // we're creating timeline data without any layers existing locally,
2511 : // initial logical size is 0.
2512 424 : LogicalSize::empty_initial()
2513 : },
2514 :
2515 892 : partitioning: GuardArcSwap::new((
2516 892 : (KeyPartitioning::new(), KeyPartitioning::new().into_sparse()),
2517 892 : Lsn(0),
2518 892 : )),
2519 892 : repartition_threshold: 0,
2520 892 : last_image_layer_creation_check_at: AtomicLsn::new(0),
2521 892 : last_image_layer_creation_check_instant: Mutex::new(None),
2522 892 :
2523 892 : last_received_wal: Mutex::new(None),
2524 892 : rel_size_cache: RwLock::new(RelSizeCache {
2525 892 : complete_as_of: disk_consistent_lsn,
2526 892 : map: HashMap::new(),
2527 892 : }),
2528 892 :
2529 892 : download_all_remote_layers_task_info: RwLock::new(None),
2530 892 :
2531 892 : state,
2532 892 :
2533 892 : eviction_task_timeline_state: tokio::sync::Mutex::new(
2534 892 : EvictionTaskTimelineState::default(),
2535 892 : ),
2536 892 : delete_progress: TimelineDeleteProgress::default(),
2537 892 :
2538 892 : cancel,
2539 892 : gate: Gate::default(),
2540 892 :
2541 892 : compaction_lock: tokio::sync::Mutex::default(),
2542 892 : compaction_failed: AtomicBool::default(),
2543 892 : gc_lock: tokio::sync::Mutex::default(),
2544 892 :
2545 892 : standby_horizon: AtomicLsn::new(0),
2546 892 :
2547 892 : pagestream_throttle: resources.pagestream_throttle,
2548 892 :
2549 892 : aux_file_size_estimator: AuxFileSizeEstimator::new(aux_file_metrics),
2550 892 :
2551 892 : #[cfg(test)]
2552 892 : extra_test_dense_keyspace: ArcSwap::new(Arc::new(KeySpace::default())),
2553 892 :
2554 892 : l0_flush_global_state: resources.l0_flush_global_state,
2555 892 :
2556 892 : handles: Default::default(),
2557 892 :
2558 892 : attach_wal_lag_cooldown,
2559 892 :
2560 892 : create_idempotency,
2561 892 :
2562 892 : page_trace: Default::default(),
2563 892 : };
2564 892 :
2565 892 : result.repartition_threshold =
2566 892 : result.get_checkpoint_distance() / REPARTITION_FREQ_IN_CHECKPOINT_DISTANCE;
2567 892 :
2568 892 : result
2569 892 : .metrics
2570 892 : .last_record_lsn_gauge
2571 892 : .set(disk_consistent_lsn.0 as i64);
2572 892 : result
2573 892 : })
2574 892 : }
2575 :
2576 1288 : pub(super) fn maybe_spawn_flush_loop(self: &Arc<Self>) {
2577 1288 : let Ok(guard) = self.gate.enter() else {
2578 0 : info!("cannot start flush loop when the timeline gate has already been closed");
2579 0 : return;
2580 : };
2581 1288 : let mut flush_loop_state = self.flush_loop_state.lock().unwrap();
2582 1288 : match *flush_loop_state {
2583 880 : FlushLoopState::NotStarted => (),
2584 : FlushLoopState::Running { .. } => {
2585 408 : info!(
2586 0 : "skipping attempt to start flush_loop twice {}/{}",
2587 0 : self.tenant_shard_id, self.timeline_id
2588 : );
2589 408 : return;
2590 : }
2591 : FlushLoopState::Exited => {
2592 0 : warn!(
2593 0 : "ignoring attempt to restart exited flush_loop {}/{}",
2594 0 : self.tenant_shard_id, self.timeline_id
2595 : );
2596 0 : return;
2597 : }
2598 : }
2599 :
2600 880 : let layer_flush_start_rx = self.layer_flush_start_tx.subscribe();
2601 880 : let self_clone = Arc::clone(self);
2602 880 :
2603 880 : debug!("spawning flush loop");
2604 880 : *flush_loop_state = FlushLoopState::Running {
2605 880 : #[cfg(test)]
2606 880 : expect_initdb_optimization: false,
2607 880 : #[cfg(test)]
2608 880 : initdb_optimization_count: 0,
2609 880 : };
2610 880 : task_mgr::spawn(
2611 880 : task_mgr::BACKGROUND_RUNTIME.handle(),
2612 880 : task_mgr::TaskKind::LayerFlushTask,
2613 880 : self.tenant_shard_id,
2614 880 : Some(self.timeline_id),
2615 880 : "layer flush task",
2616 880 : async move {
2617 880 : let _guard = guard;
2618 880 : let background_ctx = RequestContext::todo_child(TaskKind::LayerFlushTask, DownloadBehavior::Error);
2619 880 : self_clone.flush_loop(layer_flush_start_rx, &background_ctx).await;
2620 20 : let mut flush_loop_state = self_clone.flush_loop_state.lock().unwrap();
2621 20 : assert!(matches!(*flush_loop_state, FlushLoopState::Running{..}));
2622 20 : *flush_loop_state = FlushLoopState::Exited;
2623 20 : Ok(())
2624 20 : }
2625 880 : .instrument(info_span!(parent: None, "layer flush task", tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))
2626 : );
2627 1288 : }
2628 :
2629 : /// Creates and starts the wal receiver.
2630 : ///
2631 : /// This function is expected to be called at most once per Timeline's lifecycle
2632 : /// when the timeline is activated.
2633 0 : fn launch_wal_receiver(
2634 0 : self: &Arc<Self>,
2635 0 : ctx: &RequestContext,
2636 0 : broker_client: BrokerClientChannel,
2637 0 : ) {
2638 0 : info!(
2639 0 : "launching WAL receiver for timeline {} of tenant {}",
2640 0 : self.timeline_id, self.tenant_shard_id
2641 : );
2642 :
2643 0 : let tenant_conf = self.tenant_conf.load();
2644 0 : let wal_connect_timeout = tenant_conf
2645 0 : .tenant_conf
2646 0 : .walreceiver_connect_timeout
2647 0 : .unwrap_or(self.conf.default_tenant_conf.walreceiver_connect_timeout);
2648 0 : let lagging_wal_timeout = tenant_conf
2649 0 : .tenant_conf
2650 0 : .lagging_wal_timeout
2651 0 : .unwrap_or(self.conf.default_tenant_conf.lagging_wal_timeout);
2652 0 : let max_lsn_wal_lag = tenant_conf
2653 0 : .tenant_conf
2654 0 : .max_lsn_wal_lag
2655 0 : .unwrap_or(self.conf.default_tenant_conf.max_lsn_wal_lag);
2656 0 :
2657 0 : let mut guard = self.walreceiver.lock().unwrap();
2658 0 : assert!(
2659 0 : guard.is_none(),
2660 0 : "multiple launches / re-launches of WAL receiver are not supported"
2661 : );
2662 0 : *guard = Some(WalReceiver::start(
2663 0 : Arc::clone(self),
2664 0 : WalReceiverConf {
2665 0 : protocol: self.resolve_wal_receiver_protocol(),
2666 0 : wal_connect_timeout,
2667 0 : lagging_wal_timeout,
2668 0 : max_lsn_wal_lag,
2669 0 : auth_token: crate::config::SAFEKEEPER_AUTH_TOKEN.get().cloned(),
2670 0 : availability_zone: self.conf.availability_zone.clone(),
2671 0 : ingest_batch_size: self.conf.ingest_batch_size,
2672 0 : },
2673 0 : broker_client,
2674 0 : ctx,
2675 0 : ));
2676 0 : }
2677 :
2678 : /// Initialize with an empty layer map. Used when creating a new timeline.
2679 880 : pub(super) fn init_empty_layer_map(&self, start_lsn: Lsn) {
2680 880 : let mut layers = self.layers.try_write().expect(
2681 880 : "in the context where we call this function, no other task has access to the object",
2682 880 : );
2683 880 : layers
2684 880 : .open_mut()
2685 880 : .expect("in this context the LayerManager must still be open")
2686 880 : .initialize_empty(Lsn(start_lsn.0));
2687 880 : }
2688 :
2689 : /// Scan the timeline directory, cleanup, populate the layer map, and schedule uploads for local-only
2690 : /// files.
2691 12 : pub(super) async fn load_layer_map(
2692 12 : &self,
2693 12 : disk_consistent_lsn: Lsn,
2694 12 : index_part: IndexPart,
2695 12 : ) -> anyhow::Result<()> {
2696 : use init::{Decision::*, Discovered, DismissedLayer};
2697 : use LayerName::*;
2698 :
2699 12 : let mut guard = self.layers.write().await;
2700 :
2701 12 : let timer = self.metrics.load_layer_map_histo.start_timer();
2702 12 :
2703 12 : // Scan timeline directory and create ImageLayerName and DeltaFilename
2704 12 : // structs representing all files on disk
2705 12 : let timeline_path = self
2706 12 : .conf
2707 12 : .timeline_path(&self.tenant_shard_id, &self.timeline_id);
2708 12 : let conf = self.conf;
2709 12 : let span = tracing::Span::current();
2710 12 :
2711 12 : // Copy to move into the task we're about to spawn
2712 12 : let this = self.myself.upgrade().expect("&self method holds the arc");
2713 :
2714 12 : let (loaded_layers, needs_cleanup, total_physical_size) = tokio::task::spawn_blocking({
2715 12 : move || {
2716 12 : let _g = span.entered();
2717 12 : let discovered = init::scan_timeline_dir(&timeline_path)?;
2718 12 : let mut discovered_layers = Vec::with_capacity(discovered.len());
2719 12 : let mut unrecognized_files = Vec::new();
2720 12 :
2721 12 : let mut path = timeline_path;
2722 :
2723 44 : for discovered in discovered {
2724 32 : let (name, kind) = match discovered {
2725 32 : Discovered::Layer(layer_file_name, local_metadata) => {
2726 32 : discovered_layers.push((layer_file_name, local_metadata));
2727 32 : continue;
2728 : }
2729 0 : Discovered::IgnoredBackup(path) => {
2730 0 : std::fs::remove_file(path)
2731 0 : .or_else(fs_ext::ignore_not_found)
2732 0 : .fatal_err("Removing .old file");
2733 0 : continue;
2734 : }
2735 0 : Discovered::Unknown(file_name) => {
2736 0 : // we will later error if there are any
2737 0 : unrecognized_files.push(file_name);
2738 0 : continue;
2739 : }
2740 0 : Discovered::Ephemeral(name) => (name, "old ephemeral file"),
2741 0 : Discovered::Temporary(name) => (name, "temporary timeline file"),
2742 0 : Discovered::TemporaryDownload(name) => (name, "temporary download"),
2743 : };
2744 0 : path.push(Utf8Path::new(&name));
2745 0 : init::cleanup(&path, kind)?;
2746 0 : path.pop();
2747 : }
2748 :
2749 12 : if !unrecognized_files.is_empty() {
2750 : // assume that if there are any there are many many.
2751 0 : let n = unrecognized_files.len();
2752 0 : let first = &unrecognized_files[..n.min(10)];
2753 0 : anyhow::bail!(
2754 0 : "unrecognized files in timeline dir (total {n}), first 10: {first:?}"
2755 0 : );
2756 12 : }
2757 12 :
2758 12 : let decided = init::reconcile(discovered_layers, &index_part, disk_consistent_lsn);
2759 12 :
2760 12 : let mut loaded_layers = Vec::new();
2761 12 : let mut needs_cleanup = Vec::new();
2762 12 : let mut total_physical_size = 0;
2763 :
2764 44 : for (name, decision) in decided {
2765 32 : let decision = match decision {
2766 32 : Ok(decision) => decision,
2767 0 : Err(DismissedLayer::Future { local }) => {
2768 0 : if let Some(local) = local {
2769 0 : init::cleanup_future_layer(
2770 0 : &local.local_path,
2771 0 : &name,
2772 0 : disk_consistent_lsn,
2773 0 : )?;
2774 0 : }
2775 0 : needs_cleanup.push(name);
2776 0 : continue;
2777 : }
2778 0 : Err(DismissedLayer::LocalOnly(local)) => {
2779 0 : init::cleanup_local_only_file(&name, &local)?;
2780 : // this file never existed remotely, we will have to do rework
2781 0 : continue;
2782 : }
2783 0 : Err(DismissedLayer::BadMetadata(local)) => {
2784 0 : init::cleanup_local_file_for_remote(&local)?;
2785 : // this file never existed remotely, we will have to do rework
2786 0 : continue;
2787 : }
2788 : };
2789 :
2790 32 : match &name {
2791 24 : Delta(d) => assert!(d.lsn_range.end <= disk_consistent_lsn + 1),
2792 8 : Image(i) => assert!(i.lsn <= disk_consistent_lsn),
2793 : }
2794 :
2795 32 : tracing::debug!(layer=%name, ?decision, "applied");
2796 :
2797 32 : let layer = match decision {
2798 32 : Resident { local, remote } => {
2799 32 : total_physical_size += local.file_size;
2800 32 : Layer::for_resident(conf, &this, local.local_path, name, remote)
2801 32 : .drop_eviction_guard()
2802 : }
2803 0 : Evicted(remote) => Layer::for_evicted(conf, &this, name, remote),
2804 : };
2805 :
2806 32 : loaded_layers.push(layer);
2807 : }
2808 12 : Ok((loaded_layers, needs_cleanup, total_physical_size))
2809 12 : }
2810 12 : })
2811 12 : .await
2812 12 : .map_err(anyhow::Error::new)
2813 12 : .and_then(|x| x)?;
2814 :
2815 12 : let num_layers = loaded_layers.len();
2816 12 :
2817 12 : guard
2818 12 : .open_mut()
2819 12 : .expect("layermanager must be open during init")
2820 12 : .initialize_local_layers(loaded_layers, disk_consistent_lsn + 1);
2821 12 :
2822 12 : self.remote_client
2823 12 : .schedule_layer_file_deletion(&needs_cleanup)?;
2824 12 : self.remote_client
2825 12 : .schedule_index_upload_for_file_changes()?;
2826 : // This barrier orders above DELETEs before any later operations.
2827 : // This is critical because code executing after the barrier might
2828 : // create again objects with the same key that we just scheduled for deletion.
2829 : // For example, if we just scheduled deletion of an image layer "from the future",
2830 : // later compaction might run again and re-create the same image layer.
2831 : // "from the future" here means an image layer whose LSN is > IndexPart::disk_consistent_lsn.
2832 : // "same" here means same key range and LSN.
2833 : //
2834 : // Without a barrier between above DELETEs and the re-creation's PUTs,
2835 : // the upload queue may execute the PUT first, then the DELETE.
2836 : // In our example, we will end up with an IndexPart referencing a non-existent object.
2837 : //
2838 : // 1. a future image layer is created and uploaded
2839 : // 2. ps restart
2840 : // 3. the future layer from (1) is deleted during load layer map
2841 : // 4. image layer is re-created and uploaded
2842 : // 5. deletion queue would like to delete (1) but actually deletes (4)
2843 : // 6. delete by name works as expected, but it now deletes the wrong (later) version
2844 : //
2845 : // See https://github.com/neondatabase/neon/issues/5878
2846 : //
2847 : // NB: generation numbers naturally protect against this because they disambiguate
2848 : // (1) and (4)
2849 : // TODO: this is basically a no-op now, should we remove it?
2850 12 : self.remote_client.schedule_barrier()?;
2851 : // Tenant::create_timeline will wait for these uploads to happen before returning, or
2852 : // on retry.
2853 :
2854 : // Now that we have the full layer map, we may calculate the visibility of layers within it (a global scan)
2855 12 : drop(guard); // drop write lock, update_layer_visibility will take a read lock.
2856 12 : self.update_layer_visibility().await?;
2857 :
2858 12 : info!(
2859 0 : "loaded layer map with {} layers at {}, total physical size: {}",
2860 : num_layers, disk_consistent_lsn, total_physical_size
2861 : );
2862 :
2863 12 : timer.stop_and_record();
2864 12 : Ok(())
2865 12 : }
2866 :
2867 : /// Retrieve current logical size of the timeline.
2868 : ///
2869 : /// The size could be lagging behind the actual number, in case
2870 : /// the initial size calculation has not been run (gets triggered on the first size access).
2871 : ///
2872 : /// return size and boolean flag that shows if the size is exact
2873 0 : pub(crate) fn get_current_logical_size(
2874 0 : self: &Arc<Self>,
2875 0 : priority: GetLogicalSizePriority,
2876 0 : ctx: &RequestContext,
2877 0 : ) -> logical_size::CurrentLogicalSize {
2878 0 : if !self.tenant_shard_id.is_shard_zero() {
2879 : // Logical size is only accurately maintained on shard zero: when called elsewhere, for example
2880 : // when HTTP API is serving a GET for timeline zero, return zero
2881 0 : return logical_size::CurrentLogicalSize::Approximate(logical_size::Approximate::zero());
2882 0 : }
2883 0 :
2884 0 : let current_size = self.current_logical_size.current_size();
2885 0 : debug!("Current size: {current_size:?}");
2886 :
2887 0 : match (current_size.accuracy(), priority) {
2888 0 : (logical_size::Accuracy::Exact, _) => (), // nothing to do
2889 0 : (logical_size::Accuracy::Approximate, GetLogicalSizePriority::Background) => {
2890 0 : // background task will eventually deliver an exact value, we're in no rush
2891 0 : }
2892 : (logical_size::Accuracy::Approximate, GetLogicalSizePriority::User) => {
2893 : // background task is not ready, but user is asking for it now;
2894 : // => make the background task skip the line
2895 : // (The alternative would be to calculate the size here, but,
2896 : // it can actually take a long time if the user has a lot of rels.
2897 : // And we'll inevitable need it again; So, let the background task do the work.)
2898 0 : match self
2899 0 : .current_logical_size
2900 0 : .cancel_wait_for_background_loop_concurrency_limit_semaphore
2901 0 : .get()
2902 : {
2903 0 : Some(cancel) => cancel.cancel(),
2904 : None => {
2905 0 : match self.current_state() {
2906 0 : TimelineState::Broken { .. } | TimelineState::Stopping => {
2907 0 : // Can happen when timeline detail endpoint is used when deletion is ongoing (or its broken).
2908 0 : // Don't make noise.
2909 0 : }
2910 : TimelineState::Loading => {
2911 : // Import does not return an activated timeline.
2912 0 : info!("discarding priority boost for logical size calculation because timeline is not yet active");
2913 : }
2914 : TimelineState::Active => {
2915 : // activation should be setting the once cell
2916 0 : warn!("unexpected: cancel_wait_for_background_loop_concurrency_limit_semaphore not set, priority-boosting of logical size calculation will not work");
2917 0 : debug_assert!(false);
2918 : }
2919 : }
2920 : }
2921 : }
2922 : }
2923 : }
2924 :
2925 0 : if let CurrentLogicalSize::Approximate(_) = ¤t_size {
2926 0 : if ctx.task_kind() == TaskKind::WalReceiverConnectionHandler {
2927 0 : let first = self
2928 0 : .current_logical_size
2929 0 : .did_return_approximate_to_walreceiver
2930 0 : .compare_exchange(
2931 0 : false,
2932 0 : true,
2933 0 : AtomicOrdering::Relaxed,
2934 0 : AtomicOrdering::Relaxed,
2935 0 : )
2936 0 : .is_ok();
2937 0 : if first {
2938 0 : crate::metrics::initial_logical_size::TIMELINES_WHERE_WALRECEIVER_GOT_APPROXIMATE_SIZE.inc();
2939 0 : }
2940 0 : }
2941 0 : }
2942 :
2943 0 : current_size
2944 0 : }
2945 :
2946 0 : fn spawn_initial_logical_size_computation_task(self: &Arc<Self>, ctx: &RequestContext) {
2947 0 : let Some(initial_part_end) = self.current_logical_size.initial_part_end else {
2948 : // nothing to do for freshly created timelines;
2949 0 : assert_eq!(
2950 0 : self.current_logical_size.current_size().accuracy(),
2951 0 : logical_size::Accuracy::Exact,
2952 0 : );
2953 0 : self.current_logical_size.initialized.add_permits(1);
2954 0 : return;
2955 : };
2956 :
2957 0 : let cancel_wait_for_background_loop_concurrency_limit_semaphore = CancellationToken::new();
2958 0 : let token = cancel_wait_for_background_loop_concurrency_limit_semaphore.clone();
2959 0 : self.current_logical_size
2960 0 : .cancel_wait_for_background_loop_concurrency_limit_semaphore.set(token)
2961 0 : .expect("initial logical size calculation task must be spawned exactly once per Timeline object");
2962 0 :
2963 0 : let self_clone = Arc::clone(self);
2964 0 : let background_ctx = ctx.detached_child(
2965 0 : TaskKind::InitialLogicalSizeCalculation,
2966 0 : DownloadBehavior::Download,
2967 0 : );
2968 0 : task_mgr::spawn(
2969 0 : task_mgr::BACKGROUND_RUNTIME.handle(),
2970 0 : task_mgr::TaskKind::InitialLogicalSizeCalculation,
2971 0 : self.tenant_shard_id,
2972 0 : Some(self.timeline_id),
2973 0 : "initial size calculation",
2974 : // NB: don't log errors here, task_mgr will do that.
2975 0 : async move {
2976 0 : self_clone
2977 0 : .initial_logical_size_calculation_task(
2978 0 : initial_part_end,
2979 0 : cancel_wait_for_background_loop_concurrency_limit_semaphore,
2980 0 : background_ctx,
2981 0 : )
2982 0 : .await;
2983 0 : Ok(())
2984 0 : }
2985 0 : .instrument(info_span!(parent: None, "initial_size_calculation", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id)),
2986 : );
2987 0 : }
2988 :
2989 : /// # Cancellation
2990 : ///
2991 : /// This method is sensitive to `Timeline::cancel`.
2992 : ///
2993 : /// It is _not_ sensitive to task_mgr::shutdown_token().
2994 : ///
2995 : /// # Cancel-Safety
2996 : ///
2997 : /// It does Timeline IO, hence this should be polled to completion because
2998 : /// we could be leaving in-flight IOs behind, which is safe, but annoying
2999 : /// to reason about.
3000 0 : async fn initial_logical_size_calculation_task(
3001 0 : self: Arc<Self>,
3002 0 : initial_part_end: Lsn,
3003 0 : skip_concurrency_limiter: CancellationToken,
3004 0 : background_ctx: RequestContext,
3005 0 : ) {
3006 0 : scopeguard::defer! {
3007 0 : // Irrespective of the outcome of this operation, we should unblock anyone waiting for it.
3008 0 : self.current_logical_size.initialized.add_permits(1);
3009 0 : }
3010 0 :
3011 0 : let try_once = |attempt: usize| {
3012 0 : let background_ctx = &background_ctx;
3013 0 : let self_ref = &self;
3014 0 : let skip_concurrency_limiter = &skip_concurrency_limiter;
3015 0 : async move {
3016 0 : let wait_for_permit = super::tasks::concurrent_background_tasks_rate_limit_permit(
3017 0 : BackgroundLoopKind::InitialLogicalSizeCalculation,
3018 0 : background_ctx,
3019 0 : );
3020 :
3021 : use crate::metrics::initial_logical_size::StartCircumstances;
3022 0 : let (_maybe_permit, circumstances) = tokio::select! {
3023 0 : permit = wait_for_permit => {
3024 0 : (Some(permit), StartCircumstances::AfterBackgroundTasksRateLimit)
3025 : }
3026 0 : _ = self_ref.cancel.cancelled() => {
3027 0 : return Err(CalculateLogicalSizeError::Cancelled);
3028 : }
3029 0 : () = skip_concurrency_limiter.cancelled() => {
3030 : // Some action that is part of a end user interaction requested logical size
3031 : // => break out of the rate limit
3032 : // TODO: ideally we'd not run on BackgroundRuntime but the requester's runtime;
3033 : // but then again what happens if they cancel; also, we should just be using
3034 : // one runtime across the entire process, so, let's leave this for now.
3035 0 : (None, StartCircumstances::SkippedConcurrencyLimiter)
3036 : }
3037 : };
3038 :
3039 0 : let metrics_guard = if attempt == 1 {
3040 0 : crate::metrics::initial_logical_size::START_CALCULATION.first(circumstances)
3041 : } else {
3042 0 : crate::metrics::initial_logical_size::START_CALCULATION.retry(circumstances)
3043 : };
3044 :
3045 0 : let io_concurrency = IoConcurrency::spawn_from_conf(
3046 0 : self_ref.conf,
3047 0 : self_ref
3048 0 : .gate
3049 0 : .enter()
3050 0 : .map_err(|_| CalculateLogicalSizeError::Cancelled)?,
3051 : );
3052 :
3053 0 : let calculated_size = self_ref
3054 0 : .logical_size_calculation_task(
3055 0 : initial_part_end,
3056 0 : LogicalSizeCalculationCause::Initial,
3057 0 : background_ctx,
3058 0 : )
3059 0 : .await?;
3060 :
3061 0 : self_ref
3062 0 : .trigger_aux_file_size_computation(
3063 0 : initial_part_end,
3064 0 : background_ctx,
3065 0 : io_concurrency,
3066 0 : )
3067 0 : .await?;
3068 :
3069 : // TODO: add aux file size to logical size
3070 :
3071 0 : Ok((calculated_size, metrics_guard))
3072 0 : }
3073 0 : };
3074 :
3075 0 : let retrying = async {
3076 0 : let mut attempt = 0;
3077 : loop {
3078 0 : attempt += 1;
3079 0 :
3080 0 : match try_once(attempt).await {
3081 0 : Ok(res) => return ControlFlow::Continue(res),
3082 0 : Err(CalculateLogicalSizeError::Cancelled) => return ControlFlow::Break(()),
3083 : Err(
3084 0 : e @ (CalculateLogicalSizeError::Decode(_)
3085 0 : | CalculateLogicalSizeError::PageRead(_)),
3086 0 : ) => {
3087 0 : warn!(attempt, "initial size calculation failed: {e:?}");
3088 : // exponential back-off doesn't make sense at these long intervals;
3089 : // use fixed retry interval with generous jitter instead
3090 0 : let sleep_duration = Duration::from_secs(
3091 0 : u64::try_from(
3092 0 : // 1hour base
3093 0 : (60_i64 * 60_i64)
3094 0 : // 10min jitter
3095 0 : + rand::thread_rng().gen_range(-10 * 60..10 * 60),
3096 0 : )
3097 0 : .expect("10min < 1hour"),
3098 0 : );
3099 0 : tokio::select! {
3100 0 : _ = tokio::time::sleep(sleep_duration) => {}
3101 0 : _ = self.cancel.cancelled() => return ControlFlow::Break(()),
3102 : }
3103 : }
3104 : }
3105 : }
3106 0 : };
3107 :
3108 0 : let (calculated_size, metrics_guard) = match retrying.await {
3109 0 : ControlFlow::Continue(calculated_size) => calculated_size,
3110 0 : ControlFlow::Break(()) => return,
3111 : };
3112 :
3113 : // we cannot query current_logical_size.current_size() to know the current
3114 : // *negative* value, only truncated to u64.
3115 0 : let added = self
3116 0 : .current_logical_size
3117 0 : .size_added_after_initial
3118 0 : .load(AtomicOrdering::Relaxed);
3119 0 :
3120 0 : let sum = calculated_size.saturating_add_signed(added);
3121 0 :
3122 0 : // set the gauge value before it can be set in `update_current_logical_size`.
3123 0 : self.metrics.current_logical_size_gauge.set(sum);
3124 0 :
3125 0 : self.current_logical_size
3126 0 : .initial_logical_size
3127 0 : .set((calculated_size, metrics_guard.calculation_result_saved()))
3128 0 : .ok()
3129 0 : .expect("only this task sets it");
3130 0 : }
3131 :
3132 0 : pub(crate) fn spawn_ondemand_logical_size_calculation(
3133 0 : self: &Arc<Self>,
3134 0 : lsn: Lsn,
3135 0 : cause: LogicalSizeCalculationCause,
3136 0 : ctx: RequestContext,
3137 0 : ) -> oneshot::Receiver<Result<u64, CalculateLogicalSizeError>> {
3138 0 : let (sender, receiver) = oneshot::channel();
3139 0 : let self_clone = Arc::clone(self);
3140 0 : // XXX if our caller loses interest, i.e., ctx is cancelled,
3141 0 : // we should stop the size calculation work and return an error.
3142 0 : // That would require restructuring this function's API to
3143 0 : // return the result directly, instead of a Receiver for the result.
3144 0 : let ctx = ctx.detached_child(
3145 0 : TaskKind::OndemandLogicalSizeCalculation,
3146 0 : DownloadBehavior::Download,
3147 0 : );
3148 0 : task_mgr::spawn(
3149 0 : task_mgr::BACKGROUND_RUNTIME.handle(),
3150 0 : task_mgr::TaskKind::OndemandLogicalSizeCalculation,
3151 0 : self.tenant_shard_id,
3152 0 : Some(self.timeline_id),
3153 0 : "ondemand logical size calculation",
3154 0 : async move {
3155 0 : let res = self_clone
3156 0 : .logical_size_calculation_task(lsn, cause, &ctx)
3157 0 : .await;
3158 0 : let _ = sender.send(res).ok();
3159 0 : Ok(()) // Receiver is responsible for handling errors
3160 0 : }
3161 0 : .in_current_span(),
3162 0 : );
3163 0 : receiver
3164 0 : }
3165 :
3166 : #[instrument(skip_all)]
3167 : async fn logical_size_calculation_task(
3168 : self: &Arc<Self>,
3169 : lsn: Lsn,
3170 : cause: LogicalSizeCalculationCause,
3171 : ctx: &RequestContext,
3172 : ) -> Result<u64, CalculateLogicalSizeError> {
3173 : crate::span::debug_assert_current_span_has_tenant_and_timeline_id();
3174 : // We should never be calculating logical sizes on shard !=0, because these shards do not have
3175 : // accurate relation sizes, and they do not emit consumption metrics.
3176 : debug_assert!(self.tenant_shard_id.is_shard_zero());
3177 :
3178 : let guard = self
3179 : .gate
3180 : .enter()
3181 0 : .map_err(|_| CalculateLogicalSizeError::Cancelled)?;
3182 :
3183 : self.calculate_logical_size(lsn, cause, &guard, ctx).await
3184 : }
3185 :
3186 : /// Calculate the logical size of the database at the latest LSN.
3187 : ///
3188 : /// NOTE: counted incrementally, includes ancestors. This can be a slow operation,
3189 : /// especially if we need to download remote layers.
3190 0 : async fn calculate_logical_size(
3191 0 : &self,
3192 0 : up_to_lsn: Lsn,
3193 0 : cause: LogicalSizeCalculationCause,
3194 0 : _guard: &GateGuard,
3195 0 : ctx: &RequestContext,
3196 0 : ) -> Result<u64, CalculateLogicalSizeError> {
3197 0 : info!(
3198 0 : "Calculating logical size for timeline {} at {}",
3199 : self.timeline_id, up_to_lsn
3200 : );
3201 :
3202 0 : if let Err(()) = pausable_failpoint!("timeline-calculate-logical-size-pause", &self.cancel)
3203 : {
3204 0 : return Err(CalculateLogicalSizeError::Cancelled);
3205 0 : }
3206 :
3207 : // See if we've already done the work for initial size calculation.
3208 : // This is a short-cut for timelines that are mostly unused.
3209 0 : if let Some(size) = self.current_logical_size.initialized_size(up_to_lsn) {
3210 0 : return Ok(size);
3211 0 : }
3212 0 : let storage_time_metrics = match cause {
3213 : LogicalSizeCalculationCause::Initial
3214 : | LogicalSizeCalculationCause::ConsumptionMetricsSyntheticSize
3215 0 : | LogicalSizeCalculationCause::TenantSizeHandler => &self.metrics.logical_size_histo,
3216 : LogicalSizeCalculationCause::EvictionTaskImitation => {
3217 0 : &self.metrics.imitate_logical_size_histo
3218 : }
3219 : };
3220 0 : let timer = storage_time_metrics.start_timer();
3221 0 : let logical_size = self
3222 0 : .get_current_logical_size_non_incremental(up_to_lsn, ctx)
3223 0 : .await?;
3224 0 : debug!("calculated logical size: {logical_size}");
3225 0 : timer.stop_and_record();
3226 0 : Ok(logical_size)
3227 0 : }
3228 :
3229 : /// Update current logical size, adding `delta' to the old value.
3230 541140 : fn update_current_logical_size(&self, delta: i64) {
3231 541140 : let logical_size = &self.current_logical_size;
3232 541140 : logical_size.increment_size(delta);
3233 541140 :
3234 541140 : // Also set the value in the prometheus gauge. Note that
3235 541140 : // there is a race condition here: if this is is called by two
3236 541140 : // threads concurrently, the prometheus gauge might be set to
3237 541140 : // one value while current_logical_size is set to the
3238 541140 : // other.
3239 541140 : match logical_size.current_size() {
3240 541140 : CurrentLogicalSize::Exact(ref new_current_size) => self
3241 541140 : .metrics
3242 541140 : .current_logical_size_gauge
3243 541140 : .set(new_current_size.into()),
3244 0 : CurrentLogicalSize::Approximate(_) => {
3245 0 : // don't update the gauge yet, this allows us not to update the gauge back and
3246 0 : // forth between the initial size calculation task.
3247 0 : }
3248 : }
3249 541140 : }
3250 :
3251 5912 : pub(crate) fn update_directory_entries_count(&self, kind: DirectoryKind, count: u64) {
3252 5912 : self.directory_metrics[kind.offset()].store(count, AtomicOrdering::Relaxed);
3253 5912 : let aux_metric =
3254 5912 : self.directory_metrics[DirectoryKind::AuxFiles.offset()].load(AtomicOrdering::Relaxed);
3255 5912 :
3256 5912 : let sum_of_entries = self
3257 5912 : .directory_metrics
3258 5912 : .iter()
3259 41384 : .map(|v| v.load(AtomicOrdering::Relaxed))
3260 5912 : .sum();
3261 : // Set a high general threshold and a lower threshold for the auxiliary files,
3262 : // as we can have large numbers of relations in the db directory.
3263 : const SUM_THRESHOLD: u64 = 5000;
3264 : const AUX_THRESHOLD: u64 = 1000;
3265 5912 : if sum_of_entries >= SUM_THRESHOLD || aux_metric >= AUX_THRESHOLD {
3266 0 : self.metrics
3267 0 : .directory_entries_count_gauge
3268 0 : .set(sum_of_entries);
3269 5912 : } else if let Some(metric) = Lazy::get(&self.metrics.directory_entries_count_gauge) {
3270 0 : metric.set(sum_of_entries);
3271 5912 : }
3272 5912 : }
3273 :
3274 0 : async fn find_layer(
3275 0 : &self,
3276 0 : layer_name: &LayerName,
3277 0 : ) -> Result<Option<Layer>, layer_manager::Shutdown> {
3278 0 : let guard = self.layers.read().await;
3279 0 : let layer = guard
3280 0 : .layer_map()?
3281 0 : .iter_historic_layers()
3282 0 : .find(|l| &l.layer_name() == layer_name)
3283 0 : .map(|found| guard.get_from_desc(&found));
3284 0 : Ok(layer)
3285 0 : }
3286 :
3287 : /// The timeline heatmap is a hint to secondary locations from the primary location,
3288 : /// indicating which layers are currently on-disk on the primary.
3289 : ///
3290 : /// None is returned if the Timeline is in a state where uploading a heatmap
3291 : /// doesn't make sense, such as shutting down or initializing. The caller
3292 : /// should treat this as a cue to simply skip doing any heatmap uploading
3293 : /// for this timeline.
3294 4 : pub(crate) async fn generate_heatmap(&self) -> Option<HeatMapTimeline> {
3295 4 : if !self.is_active() {
3296 0 : return None;
3297 4 : }
3298 :
3299 4 : let guard = self.layers.read().await;
3300 :
3301 20 : let resident = guard.likely_resident_layers().filter_map(|layer| {
3302 20 : match layer.visibility() {
3303 : LayerVisibilityHint::Visible => {
3304 : // Layer is visible to one or more read LSNs: elegible for inclusion in layer map
3305 16 : let last_activity_ts = layer.latest_activity();
3306 16 : Some((layer.layer_desc(), layer.metadata(), last_activity_ts))
3307 : }
3308 : LayerVisibilityHint::Covered => {
3309 : // Layer is resident but unlikely to be read: not elegible for inclusion in heatmap.
3310 4 : None
3311 : }
3312 : }
3313 20 : });
3314 4 :
3315 4 : let mut layers = resident.collect::<Vec<_>>();
3316 4 :
3317 4 : // Sort layers in order of which to download first. For a large set of layers to download, we
3318 4 : // want to prioritize those layers which are most likely to still be in the resident many minutes
3319 4 : // or hours later:
3320 4 : // - Download L0s last, because they churn the fastest: L0s on a fast-writing tenant might
3321 4 : // only exist for a few minutes before being compacted into L1s.
3322 4 : // - For L1 & image layers, download most recent LSNs first: the older the LSN, the sooner
3323 4 : // the layer is likely to be covered by an image layer during compaction.
3324 36 : layers.sort_by_key(|(desc, _meta, _atime)| {
3325 36 : std::cmp::Reverse((
3326 36 : !LayerMap::is_l0(&desc.key_range, desc.is_delta),
3327 36 : desc.lsn_range.end,
3328 36 : ))
3329 36 : });
3330 4 :
3331 4 : let layers = layers
3332 4 : .into_iter()
3333 16 : .map(|(desc, meta, atime)| HeatMapLayer::new(desc.layer_name(), meta, atime))
3334 4 : .collect();
3335 4 :
3336 4 : Some(HeatMapTimeline::new(self.timeline_id, layers))
3337 4 : }
3338 :
3339 : /// Returns true if the given lsn is or was an ancestor branchpoint.
3340 0 : pub(crate) fn is_ancestor_lsn(&self, lsn: Lsn) -> bool {
3341 0 : // upon timeline detach, we set the ancestor_lsn to Lsn::INVALID and the store the original
3342 0 : // branchpoint in the value in IndexPart::lineage
3343 0 : self.ancestor_lsn == lsn
3344 0 : || (self.ancestor_lsn == Lsn::INVALID
3345 0 : && self.remote_client.is_previous_ancestor_lsn(lsn))
3346 0 : }
3347 : }
3348 :
3349 : impl Timeline {
3350 : #[allow(clippy::doc_lazy_continuation)]
3351 : /// Get the data needed to reconstruct all keys in the provided keyspace
3352 : ///
3353 : /// The algorithm is as follows:
3354 : /// 1. While some keys are still not done and there's a timeline to visit:
3355 : /// 2. Visit the timeline (see [`Timeline::get_vectored_reconstruct_data_timeline`]:
3356 : /// 2.1: Build the fringe for the current keyspace
3357 : /// 2.2 Visit the newest layer from the fringe to collect all values for the range it
3358 : /// intersects
3359 : /// 2.3. Pop the timeline from the fringe
3360 : /// 2.4. If the fringe is empty, go back to 1
3361 1255274 : async fn get_vectored_reconstruct_data(
3362 1255274 : &self,
3363 1255274 : mut keyspace: KeySpace,
3364 1255274 : request_lsn: Lsn,
3365 1255274 : reconstruct_state: &mut ValuesReconstructState,
3366 1255274 : ctx: &RequestContext,
3367 1255274 : ) -> Result<(), GetVectoredError> {
3368 1255274 : let mut timeline_owned: Arc<Timeline>;
3369 1255274 : let mut timeline = self;
3370 1255274 :
3371 1255274 : let mut cont_lsn = Lsn(request_lsn.0 + 1);
3372 :
3373 1255270 : let missing_keyspace = loop {
3374 1707298 : if self.cancel.is_cancelled() {
3375 0 : return Err(GetVectoredError::Cancelled);
3376 1707298 : }
3377 :
3378 : let TimelineVisitOutcome {
3379 1707298 : completed_keyspace: completed,
3380 1707298 : image_covered_keyspace,
3381 1707298 : } = Self::get_vectored_reconstruct_data_timeline(
3382 1707298 : timeline,
3383 1707298 : keyspace.clone(),
3384 1707298 : cont_lsn,
3385 1707298 : reconstruct_state,
3386 1707298 : &self.cancel,
3387 1707298 : ctx,
3388 1707298 : )
3389 1707298 : .await?;
3390 :
3391 1707298 : keyspace.remove_overlapping_with(&completed);
3392 1707298 :
3393 1707298 : // Do not descend into the ancestor timeline for aux files.
3394 1707298 : // We don't return a blanket [`GetVectoredError::MissingKey`] to avoid
3395 1707298 : // stalling compaction.
3396 1707298 : keyspace.remove_overlapping_with(&KeySpace {
3397 1707298 : ranges: vec![NON_INHERITED_RANGE, Key::sparse_non_inherited_keyspace()],
3398 1707298 : });
3399 1707298 :
3400 1707298 : // Keyspace is fully retrieved
3401 1707298 : if keyspace.is_empty() {
3402 1255214 : break None;
3403 452084 : }
3404 :
3405 452084 : let Some(ancestor_timeline) = timeline.ancestor_timeline.as_ref() else {
3406 : // Not fully retrieved but no ancestor timeline.
3407 56 : break Some(keyspace);
3408 : };
3409 :
3410 : // Now we see if there are keys covered by the image layer but does not exist in the
3411 : // image layer, which means that the key does not exist.
3412 :
3413 : // The block below will stop the vectored search if any of the keys encountered an image layer
3414 : // which did not contain a snapshot for said key. Since we have already removed all completed
3415 : // keys from `keyspace`, we expect there to be no overlap between it and the image covered key
3416 : // space. If that's not the case, we had at least one key encounter a gap in the image layer
3417 : // and stop the search as a result of that.
3418 452028 : let mut removed = keyspace.remove_overlapping_with(&image_covered_keyspace);
3419 452028 : // Do not fire missing key error for sparse keys.
3420 452028 : removed.remove_overlapping_with(&KeySpace {
3421 452028 : ranges: vec![SPARSE_RANGE],
3422 452028 : });
3423 452028 : if !removed.is_empty() {
3424 0 : break Some(removed);
3425 452028 : }
3426 452028 : // If we reached this point, `remove_overlapping_with` should not have made any change to the
3427 452028 : // keyspace.
3428 452028 :
3429 452028 : // Take the min to avoid reconstructing a page with data newer than request Lsn.
3430 452028 : cont_lsn = std::cmp::min(Lsn(request_lsn.0 + 1), Lsn(timeline.ancestor_lsn.0 + 1));
3431 452028 : timeline_owned = timeline
3432 452028 : .get_ready_ancestor_timeline(ancestor_timeline, ctx)
3433 452028 : .await?;
3434 452024 : timeline = &*timeline_owned;
3435 : };
3436 :
3437 : // Remove sparse keys from the keyspace so that it doesn't fire errors.
3438 1255270 : let missing_keyspace = if let Some(missing_keyspace) = missing_keyspace {
3439 56 : let mut missing_keyspace = missing_keyspace;
3440 56 : missing_keyspace.remove_overlapping_with(&KeySpace {
3441 56 : ranges: vec![SPARSE_RANGE],
3442 56 : });
3443 56 : if missing_keyspace.is_empty() {
3444 28 : None
3445 : } else {
3446 28 : Some(missing_keyspace)
3447 : }
3448 : } else {
3449 1255214 : None
3450 : };
3451 :
3452 1255270 : if let Some(missing_keyspace) = missing_keyspace {
3453 28 : return Err(GetVectoredError::MissingKey(MissingKeyError {
3454 28 : key: missing_keyspace.start().unwrap(), /* better if we can store the full keyspace */
3455 28 : shard: self
3456 28 : .shard_identity
3457 28 : .get_shard_number(&missing_keyspace.start().unwrap()),
3458 28 : cont_lsn,
3459 28 : request_lsn,
3460 28 : ancestor_lsn: Some(timeline.ancestor_lsn),
3461 28 : backtrace: None,
3462 28 : }));
3463 1255242 : }
3464 1255242 :
3465 1255242 : Ok(())
3466 1255274 : }
3467 :
3468 : /// Collect the reconstruct data for a keyspace from the specified timeline.
3469 : ///
3470 : /// Maintain a fringe [`LayerFringe`] which tracks all the layers that intersect
3471 : /// the current keyspace. The current keyspace of the search at any given timeline
3472 : /// is the original keyspace minus all the keys that have been completed minus
3473 : /// any keys for which we couldn't find an intersecting layer. It's not tracked explicitly,
3474 : /// but if you merge all the keyspaces in the fringe, you get the "current keyspace".
3475 : ///
3476 : /// This is basically a depth-first search visitor implementation where a vertex
3477 : /// is the (layer, lsn range, key space) tuple. The fringe acts as the stack.
3478 : ///
3479 : /// At each iteration pop the top of the fringe (the layer with the highest Lsn)
3480 : /// and get all the required reconstruct data from the layer in one go.
3481 : ///
3482 : /// Returns the completed keyspace and the keyspaces with image coverage. The caller
3483 : /// decides how to deal with these two keyspaces.
3484 1707298 : async fn get_vectored_reconstruct_data_timeline(
3485 1707298 : timeline: &Timeline,
3486 1707298 : keyspace: KeySpace,
3487 1707298 : mut cont_lsn: Lsn,
3488 1707298 : reconstruct_state: &mut ValuesReconstructState,
3489 1707298 : cancel: &CancellationToken,
3490 1707298 : ctx: &RequestContext,
3491 1707298 : ) -> Result<TimelineVisitOutcome, GetVectoredError> {
3492 1707298 : let mut unmapped_keyspace = keyspace.clone();
3493 1707298 : let mut fringe = LayerFringe::new();
3494 1707298 :
3495 1707298 : let mut completed_keyspace = KeySpace::default();
3496 1707298 : let mut image_covered_keyspace = KeySpaceRandomAccum::new();
3497 1707298 :
3498 1707298 : // Prevent GC from progressing while visiting the current timeline.
3499 1707298 : // If we are GC-ing because a new image layer was added while traversing
3500 1707298 : // the timeline, then it will remove layers that are required for fulfilling
3501 1707298 : // the current get request (read-path cannot "look back" and notice the new
3502 1707298 : // image layer).
3503 1707298 : let _gc_cutoff_holder = timeline.get_latest_gc_cutoff_lsn();
3504 :
3505 : // See `compaction::compact_with_gc` for why we need this.
3506 1707298 : let _guard = timeline.gc_compaction_layer_update_lock.read().await;
3507 :
3508 : loop {
3509 3401537 : if cancel.is_cancelled() {
3510 0 : return Err(GetVectoredError::Cancelled);
3511 3401537 : }
3512 3401537 :
3513 3401537 : let (keys_done_last_step, keys_with_image_coverage) =
3514 3401537 : reconstruct_state.consume_done_keys();
3515 3401537 : unmapped_keyspace.remove_overlapping_with(&keys_done_last_step);
3516 3401537 : completed_keyspace.merge(&keys_done_last_step);
3517 3401537 : if let Some(keys_with_image_coverage) = keys_with_image_coverage {
3518 45092 : unmapped_keyspace
3519 45092 : .remove_overlapping_with(&KeySpace::single(keys_with_image_coverage.clone()));
3520 45092 : image_covered_keyspace.add_range(keys_with_image_coverage);
3521 3356445 : }
3522 :
3523 : // Do not descent any further if the last layer we visited
3524 : // completed all keys in the keyspace it inspected. This is not
3525 : // required for correctness, but avoids visiting extra layers
3526 : // which turns out to be a perf bottleneck in some cases.
3527 3401537 : if !unmapped_keyspace.is_empty() {
3528 2150807 : let guard = timeline.layers.read().await;
3529 2150807 : let layers = guard.layer_map()?;
3530 :
3531 2150807 : let in_memory_layer = layers.find_in_memory_layer(|l| {
3532 1828673 : let start_lsn = l.get_lsn_range().start;
3533 1828673 : cont_lsn > start_lsn
3534 2150807 : });
3535 2150807 :
3536 2150807 : match in_memory_layer {
3537 1213307 : Some(l) => {
3538 1213307 : let lsn_range = l.get_lsn_range().start..cont_lsn;
3539 1213307 : fringe.update(
3540 1213307 : ReadableLayer::InMemoryLayer(l),
3541 1213307 : unmapped_keyspace.clone(),
3542 1213307 : lsn_range,
3543 1213307 : );
3544 1213307 : }
3545 : None => {
3546 937548 : for range in unmapped_keyspace.ranges.iter() {
3547 937548 : let results = layers.range_search(range.clone(), cont_lsn);
3548 937548 :
3549 937548 : results
3550 937548 : .found
3551 937548 : .into_iter()
3552 937548 : .map(|(SearchResult { layer, lsn_floor }, keyspace_accum)| {
3553 480960 : (
3554 480960 : ReadableLayer::PersistentLayer(guard.get_from_desc(&layer)),
3555 480960 : keyspace_accum.to_keyspace(),
3556 480960 : lsn_floor..cont_lsn,
3557 480960 : )
3558 937548 : })
3559 937548 : .for_each(|(layer, keyspace, lsn_range)| {
3560 480960 : fringe.update(layer, keyspace, lsn_range)
3561 937548 : });
3562 937548 : }
3563 : }
3564 : }
3565 :
3566 : // It's safe to drop the layer map lock after planning the next round of reads.
3567 : // The fringe keeps readable handles for the layers which are safe to read even
3568 : // if layers were compacted or flushed.
3569 : //
3570 : // The more interesting consideration is: "Why is the read algorithm still correct
3571 : // if the layer map changes while it is operating?". Doing a vectored read on a
3572 : // timeline boils down to pushing an imaginary lsn boundary downwards for each range
3573 : // covered by the read. The layer map tells us how to move the lsn downwards for a
3574 : // range at *a particular point in time*. It is fine for the answer to be different
3575 : // at two different time points.
3576 2150807 : drop(guard);
3577 1250730 : }
3578 :
3579 3401537 : if let Some((layer_to_read, keyspace_to_read, lsn_range)) = fringe.next_layer() {
3580 1694239 : let next_cont_lsn = lsn_range.start;
3581 1694239 : layer_to_read
3582 1694239 : .get_values_reconstruct_data(
3583 1694239 : keyspace_to_read.clone(),
3584 1694239 : lsn_range,
3585 1694239 : reconstruct_state,
3586 1694239 : ctx,
3587 1694239 : )
3588 1694239 : .await?;
3589 :
3590 1694239 : unmapped_keyspace = keyspace_to_read;
3591 1694239 : cont_lsn = next_cont_lsn;
3592 1694239 :
3593 1694239 : reconstruct_state.on_layer_visited(&layer_to_read);
3594 : } else {
3595 1707298 : break;
3596 1707298 : }
3597 1707298 : }
3598 1707298 :
3599 1707298 : Ok(TimelineVisitOutcome {
3600 1707298 : completed_keyspace,
3601 1707298 : image_covered_keyspace: image_covered_keyspace.consume_keyspace(),
3602 1707298 : })
3603 1707298 : }
3604 :
3605 452028 : async fn get_ready_ancestor_timeline(
3606 452028 : &self,
3607 452028 : ancestor: &Arc<Timeline>,
3608 452028 : ctx: &RequestContext,
3609 452028 : ) -> Result<Arc<Timeline>, GetReadyAncestorError> {
3610 452028 : // It's possible that the ancestor timeline isn't active yet, or
3611 452028 : // is active but hasn't yet caught up to the branch point. Wait
3612 452028 : // for it.
3613 452028 : //
3614 452028 : // This cannot happen while the pageserver is running normally,
3615 452028 : // because you cannot create a branch from a point that isn't
3616 452028 : // present in the pageserver yet. However, we don't wait for the
3617 452028 : // branch point to be uploaded to cloud storage before creating
3618 452028 : // a branch. I.e., the branch LSN need not be remote consistent
3619 452028 : // for the branching operation to succeed.
3620 452028 : //
3621 452028 : // Hence, if we try to load a tenant in such a state where
3622 452028 : // 1. the existence of the branch was persisted (in IndexPart and/or locally)
3623 452028 : // 2. but the ancestor state is behind branch_lsn because it was not yet persisted
3624 452028 : // then we will need to wait for the ancestor timeline to
3625 452028 : // re-stream WAL up to branch_lsn before we access it.
3626 452028 : //
3627 452028 : // How can a tenant get in such a state?
3628 452028 : // - ungraceful pageserver process exit
3629 452028 : // - detach+attach => this is a bug, https://github.com/neondatabase/neon/issues/4219
3630 452028 : //
3631 452028 : // NB: this could be avoided by requiring
3632 452028 : // branch_lsn >= remote_consistent_lsn
3633 452028 : // during branch creation.
3634 452028 : match ancestor.wait_to_become_active(ctx).await {
3635 452024 : Ok(()) => {}
3636 : Err(TimelineState::Stopping) => {
3637 : // If an ancestor is stopping, it means the tenant is stopping: handle this the same as if this timeline was stopping.
3638 0 : return Err(GetReadyAncestorError::Cancelled);
3639 : }
3640 4 : Err(state) => {
3641 4 : return Err(GetReadyAncestorError::BadState {
3642 4 : timeline_id: ancestor.timeline_id,
3643 4 : state,
3644 4 : });
3645 : }
3646 : }
3647 452024 : ancestor
3648 452024 : .wait_lsn(
3649 452024 : self.ancestor_lsn,
3650 452024 : WaitLsnWaiter::Timeline(self),
3651 452024 : WaitLsnTimeout::Default,
3652 452024 : ctx,
3653 452024 : )
3654 452024 : .await
3655 452024 : .map_err(|e| match e {
3656 0 : e @ WaitLsnError::Timeout(_) => GetReadyAncestorError::AncestorLsnTimeout(e),
3657 0 : WaitLsnError::Shutdown => GetReadyAncestorError::Cancelled,
3658 0 : WaitLsnError::BadState(state) => GetReadyAncestorError::BadState {
3659 0 : timeline_id: ancestor.timeline_id,
3660 0 : state,
3661 0 : },
3662 452024 : })?;
3663 :
3664 452024 : Ok(ancestor.clone())
3665 452028 : }
3666 :
3667 594312 : pub(crate) fn get_shard_identity(&self) -> &ShardIdentity {
3668 594312 : &self.shard_identity
3669 594312 : }
3670 :
3671 : #[inline(always)]
3672 0 : pub(crate) fn shard_timeline_id(&self) -> ShardTimelineId {
3673 0 : ShardTimelineId {
3674 0 : shard_index: ShardIndex {
3675 0 : shard_number: self.shard_identity.number,
3676 0 : shard_count: self.shard_identity.count,
3677 0 : },
3678 0 : timeline_id: self.timeline_id,
3679 0 : }
3680 0 : }
3681 :
3682 : /// Returns a non-frozen open in-memory layer for ingestion.
3683 : ///
3684 : /// Takes a witness of timeline writer state lock being held, because it makes no sense to call
3685 : /// this function without holding the mutex.
3686 2592 : async fn get_layer_for_write(
3687 2592 : &self,
3688 2592 : lsn: Lsn,
3689 2592 : _guard: &tokio::sync::MutexGuard<'_, Option<TimelineWriterState>>,
3690 2592 : ctx: &RequestContext,
3691 2592 : ) -> anyhow::Result<Arc<InMemoryLayer>> {
3692 2592 : let mut guard = self.layers.write().await;
3693 :
3694 2592 : let last_record_lsn = self.get_last_record_lsn();
3695 2592 : ensure!(
3696 2592 : lsn > last_record_lsn,
3697 0 : "cannot modify relation after advancing last_record_lsn (incoming_lsn={}, last_record_lsn={})",
3698 : lsn,
3699 : last_record_lsn,
3700 : );
3701 :
3702 2592 : let layer = guard
3703 2592 : .open_mut()?
3704 2592 : .get_layer_for_write(
3705 2592 : lsn,
3706 2592 : self.conf,
3707 2592 : self.timeline_id,
3708 2592 : self.tenant_shard_id,
3709 2592 : &self.gate,
3710 2592 : ctx,
3711 2592 : )
3712 2592 : .await?;
3713 2592 : Ok(layer)
3714 2592 : }
3715 :
3716 10558184 : pub(crate) fn finish_write(&self, new_lsn: Lsn) {
3717 10558184 : assert!(new_lsn.is_aligned());
3718 :
3719 10558184 : self.metrics.last_record_lsn_gauge.set(new_lsn.0 as i64);
3720 10558184 : self.last_record_lsn.advance(new_lsn);
3721 10558184 : }
3722 :
3723 : /// Freeze any existing open in-memory layer and unconditionally notify the flush loop.
3724 : ///
3725 : /// Unconditional flush loop notification is given because in sharded cases we will want to
3726 : /// leave an Lsn gap. Unsharded tenants do not have Lsn gaps.
3727 2400 : async fn freeze_inmem_layer_at(
3728 2400 : &self,
3729 2400 : at: Lsn,
3730 2400 : write_lock: &mut tokio::sync::MutexGuard<'_, Option<TimelineWriterState>>,
3731 2400 : ) -> Result<u64, FlushLayerError> {
3732 2400 : let frozen = {
3733 2400 : let mut guard = self.layers.write().await;
3734 2400 : guard
3735 2400 : .open_mut()?
3736 2400 : .try_freeze_in_memory_layer(at, &self.last_freeze_at, write_lock, &self.metrics)
3737 2400 : .await
3738 : };
3739 :
3740 2400 : if frozen {
3741 2344 : let now = Instant::now();
3742 2344 : *(self.last_freeze_ts.write().unwrap()) = now;
3743 2344 : }
3744 :
3745 : // Increment the flush cycle counter and wake up the flush task.
3746 : // Remember the new value, so that when we listen for the flush
3747 : // to finish, we know when the flush that we initiated has
3748 : // finished, instead of some other flush that was started earlier.
3749 2400 : let mut my_flush_request = 0;
3750 2400 :
3751 2400 : let flush_loop_state = { *self.flush_loop_state.lock().unwrap() };
3752 2400 : if !matches!(flush_loop_state, FlushLoopState::Running { .. }) {
3753 0 : return Err(FlushLayerError::NotRunning(flush_loop_state));
3754 2400 : }
3755 2400 :
3756 2400 : self.layer_flush_start_tx.send_modify(|(counter, lsn)| {
3757 2400 : my_flush_request = *counter + 1;
3758 2400 : *counter = my_flush_request;
3759 2400 : *lsn = std::cmp::max(at, *lsn);
3760 2400 : });
3761 2400 :
3762 2400 : assert_ne!(my_flush_request, 0);
3763 :
3764 2400 : Ok(my_flush_request)
3765 2400 : }
3766 :
3767 : /// Layer flusher task's main loop.
3768 880 : async fn flush_loop(
3769 880 : self: &Arc<Self>,
3770 880 : mut layer_flush_start_rx: tokio::sync::watch::Receiver<(u64, Lsn)>,
3771 880 : ctx: &RequestContext,
3772 880 : ) {
3773 : // Subscribe to L0 delta layer updates, for compaction backpressure.
3774 880 : let mut watch_l0 = match self.layers.read().await.layer_map() {
3775 880 : Ok(lm) => lm.watch_level0_deltas(),
3776 0 : Err(Shutdown) => return,
3777 : };
3778 :
3779 880 : info!("started flush loop");
3780 : loop {
3781 3206 : tokio::select! {
3782 3206 : _ = self.cancel.cancelled() => {
3783 20 : info!("shutting down layer flush task due to Timeline::cancel");
3784 20 : break;
3785 : },
3786 3206 : _ = layer_flush_start_rx.changed() => {}
3787 2326 : }
3788 2326 : trace!("waking up");
3789 2326 : let (flush_counter, frozen_to_lsn) = *layer_flush_start_rx.borrow();
3790 2326 :
3791 2326 : // The highest LSN to which we flushed in the loop over frozen layers
3792 2326 : let mut flushed_to_lsn = Lsn(0);
3793 :
3794 2326 : let result = loop {
3795 4670 : if self.cancel.is_cancelled() {
3796 0 : info!("dropping out of flush loop for timeline shutdown");
3797 : // Note: we do not bother transmitting into [`layer_flush_done_tx`], because
3798 : // anyone waiting on that will respect self.cancel as well: they will stop
3799 : // waiting at the same time we as drop out of this loop.
3800 0 : return;
3801 4670 : }
3802 4670 :
3803 4670 : // Break to notify potential waiters as soon as we've flushed the requested LSN. If
3804 4670 : // more requests have arrived in the meanwhile, we'll resume flushing afterwards.
3805 4670 : if flushed_to_lsn >= frozen_to_lsn {
3806 2270 : break Ok(());
3807 2400 : }
3808 :
3809 : // Fetch the next layer to flush, if any.
3810 2400 : let (layer, l0_count, frozen_count, frozen_size) = {
3811 2400 : let layers = self.layers.read().await;
3812 2400 : let Ok(lm) = layers.layer_map() else {
3813 0 : info!("dropping out of flush loop for timeline shutdown");
3814 0 : return;
3815 : };
3816 2400 : let l0_count = lm.level0_deltas().len();
3817 2400 : let frozen_count = lm.frozen_layers.len();
3818 2400 : let frozen_size: u64 = lm
3819 2400 : .frozen_layers
3820 2400 : .iter()
3821 2419 : .map(|l| l.estimated_in_mem_size())
3822 2400 : .sum();
3823 2400 : let layer = lm.frozen_layers.front().cloned();
3824 2400 : (layer, l0_count, frozen_count, frozen_size)
3825 2400 : // drop 'layers' lock
3826 2400 : };
3827 2400 : let Some(layer) = layer else {
3828 56 : break Ok(());
3829 : };
3830 :
3831 : // Stall flushes to backpressure if compaction can't keep up. This is propagated up
3832 : // to WAL ingestion by having ephemeral layer rolls wait for flushes.
3833 : //
3834 : // NB: the compaction loop only checks `compaction_threshold` every 20 seconds, so
3835 : // we can end up stalling before compaction even starts. Consider making it more
3836 : // responsive (e.g. via `watch_level0_deltas`).
3837 2344 : if let Some(stall_threshold) = self.get_l0_flush_stall_threshold() {
3838 0 : if l0_count >= stall_threshold {
3839 0 : warn!(
3840 0 : "stalling layer flushes for compaction backpressure at {l0_count} \
3841 0 : L0 layers ({frozen_count} frozen layers with {frozen_size} bytes)"
3842 : );
3843 0 : let stall_timer = self
3844 0 : .metrics
3845 0 : .flush_delay_histo
3846 0 : .start_timer()
3847 0 : .record_on_drop();
3848 0 : tokio::select! {
3849 0 : result = watch_l0.wait_for(|l0| *l0 < stall_threshold) => {
3850 0 : if let Ok(l0) = result.as_deref() {
3851 0 : let delay = stall_timer.elapsed().as_secs_f64();
3852 0 : info!("resuming layer flushes at {l0} L0 layers after {delay:.3}s");
3853 0 : }
3854 : },
3855 0 : _ = self.cancel.cancelled() => {},
3856 : }
3857 0 : continue; // check again
3858 0 : }
3859 2344 : }
3860 :
3861 : // Flush the layer.
3862 2344 : let flush_timer = self.metrics.flush_time_histo.start_timer();
3863 2344 : match self.flush_frozen_layer(layer, ctx).await {
3864 2344 : Ok(layer_lsn) => flushed_to_lsn = max(flushed_to_lsn, layer_lsn),
3865 : Err(FlushLayerError::Cancelled) => {
3866 0 : info!("dropping out of flush loop for timeline shutdown");
3867 0 : return;
3868 : }
3869 0 : err @ Err(
3870 0 : FlushLayerError::NotRunning(_)
3871 0 : | FlushLayerError::Other(_)
3872 0 : | FlushLayerError::CreateImageLayersError(_),
3873 0 : ) => {
3874 0 : error!("could not flush frozen layer: {err:?}");
3875 0 : break err.map(|_| ());
3876 : }
3877 : }
3878 2344 : let flush_duration = flush_timer.stop_and_record();
3879 :
3880 : // Delay the next flush to backpressure if compaction can't keep up. We delay by the
3881 : // flush duration such that the flush takes 2x as long. This is propagated up to WAL
3882 : // ingestion by having ephemeral layer rolls wait for flushes.
3883 2344 : if let Some(delay_threshold) = self.get_l0_flush_delay_threshold() {
3884 0 : if l0_count >= delay_threshold {
3885 0 : let delay = flush_duration.as_secs_f64();
3886 0 : info!(
3887 0 : "delaying layer flush by {delay:.3}s for compaction backpressure at \
3888 0 : {l0_count} L0 layers ({frozen_count} frozen layers with {frozen_size} bytes)"
3889 : );
3890 0 : let _delay_timer = self
3891 0 : .metrics
3892 0 : .flush_delay_histo
3893 0 : .start_timer()
3894 0 : .record_on_drop();
3895 0 : tokio::select! {
3896 0 : _ = tokio::time::sleep(flush_duration) => {},
3897 0 : _ = watch_l0.wait_for(|l0| *l0 < delay_threshold) => {},
3898 0 : _ = self.cancel.cancelled() => {},
3899 : }
3900 0 : }
3901 2344 : }
3902 : };
3903 :
3904 : // Unsharded tenants should never advance their LSN beyond the end of the
3905 : // highest layer they write: such gaps between layer data and the frozen LSN
3906 : // are only legal on sharded tenants.
3907 2326 : debug_assert!(
3908 2326 : self.shard_identity.count.count() > 1
3909 2326 : || flushed_to_lsn >= frozen_to_lsn
3910 56 : || !flushed_to_lsn.is_valid()
3911 : );
3912 :
3913 2326 : if flushed_to_lsn < frozen_to_lsn && self.shard_identity.count.count() > 1 {
3914 : // If our layer flushes didn't carry disk_consistent_lsn up to the `to_lsn` advertised
3915 : // to us via layer_flush_start_rx, then advance it here.
3916 : //
3917 : // This path is only taken for tenants with multiple shards: single sharded tenants should
3918 : // never encounter a gap in the wal.
3919 0 : let old_disk_consistent_lsn = self.disk_consistent_lsn.load();
3920 0 : tracing::debug!("Advancing disk_consistent_lsn across layer gap {old_disk_consistent_lsn}->{frozen_to_lsn}");
3921 0 : if self.set_disk_consistent_lsn(frozen_to_lsn) {
3922 0 : if let Err(e) = self.schedule_uploads(frozen_to_lsn, vec![]) {
3923 0 : tracing::warn!("Failed to schedule metadata upload after updating disk_consistent_lsn: {e}");
3924 0 : }
3925 0 : }
3926 2326 : }
3927 :
3928 : // Notify any listeners that we're done
3929 2326 : let _ = self
3930 2326 : .layer_flush_done_tx
3931 2326 : .send_replace((flush_counter, result));
3932 : }
3933 20 : }
3934 :
3935 : /// Waits any flush request created by [`Self::freeze_inmem_layer_at`] to complete.
3936 2240 : async fn wait_flush_completion(&self, request: u64) -> Result<(), FlushLayerError> {
3937 2240 : let mut rx = self.layer_flush_done_tx.subscribe();
3938 : loop {
3939 : {
3940 4558 : let (last_result_counter, last_result) = &*rx.borrow();
3941 4558 : if *last_result_counter >= request {
3942 2240 : if let Err(err) = last_result {
3943 : // We already logged the original error in
3944 : // flush_loop. We cannot propagate it to the caller
3945 : // here, because it might not be Cloneable
3946 0 : return Err(err.clone());
3947 : } else {
3948 2240 : return Ok(());
3949 : }
3950 2318 : }
3951 2318 : }
3952 2318 : trace!("waiting for flush to complete");
3953 2318 : tokio::select! {
3954 2318 : rx_e = rx.changed() => {
3955 2318 : rx_e.map_err(|_| FlushLayerError::NotRunning(*self.flush_loop_state.lock().unwrap()))?;
3956 : },
3957 : // Cancellation safety: we are not leaving an I/O in-flight for the flush, we're just ignoring
3958 : // the notification from [`flush_loop`] that it completed.
3959 2318 : _ = self.cancel.cancelled() => {
3960 0 : tracing::info!("Cancelled layer flush due on timeline shutdown");
3961 0 : return Ok(())
3962 : }
3963 : };
3964 2318 : trace!("done")
3965 : }
3966 2240 : }
3967 :
3968 : /// Flush one frozen in-memory layer to disk, as a new delta layer.
3969 : ///
3970 : /// Return value is the last lsn (inclusive) of the layer that was frozen.
3971 : #[instrument(skip_all, fields(layer=%frozen_layer))]
3972 : async fn flush_frozen_layer(
3973 : self: &Arc<Self>,
3974 : frozen_layer: Arc<InMemoryLayer>,
3975 : ctx: &RequestContext,
3976 : ) -> Result<Lsn, FlushLayerError> {
3977 : debug_assert_current_span_has_tenant_and_timeline_id();
3978 :
3979 : // As a special case, when we have just imported an image into the repository,
3980 : // instead of writing out a L0 delta layer, we directly write out image layer
3981 : // files instead. This is possible as long as *all* the data imported into the
3982 : // repository have the same LSN.
3983 : let lsn_range = frozen_layer.get_lsn_range();
3984 :
3985 : // Whether to directly create image layers for this flush, or flush them as delta layers
3986 : let create_image_layer =
3987 : lsn_range.start == self.initdb_lsn && lsn_range.end == Lsn(self.initdb_lsn.0 + 1);
3988 :
3989 : #[cfg(test)]
3990 : {
3991 : match &mut *self.flush_loop_state.lock().unwrap() {
3992 : FlushLoopState::NotStarted | FlushLoopState::Exited => {
3993 : panic!("flush loop not running")
3994 : }
3995 : FlushLoopState::Running {
3996 : expect_initdb_optimization,
3997 : initdb_optimization_count,
3998 : ..
3999 : } => {
4000 : if create_image_layer {
4001 : *initdb_optimization_count += 1;
4002 : } else {
4003 : assert!(!*expect_initdb_optimization, "expected initdb optimization");
4004 : }
4005 : }
4006 : }
4007 : }
4008 :
4009 : let (layers_to_upload, delta_layer_to_add) = if create_image_layer {
4010 : // Note: The 'ctx' in use here has DownloadBehavior::Error. We should not
4011 : // require downloading anything during initial import.
4012 : let ((rel_partition, metadata_partition), _lsn) = self
4013 : .repartition(
4014 : self.initdb_lsn,
4015 : self.get_compaction_target_size(),
4016 : EnumSet::empty(),
4017 : ctx,
4018 : )
4019 : .await
4020 0 : .map_err(|e| FlushLayerError::from_anyhow(self, e.into()))?;
4021 :
4022 : if self.cancel.is_cancelled() {
4023 : return Err(FlushLayerError::Cancelled);
4024 : }
4025 :
4026 : // Ensure that we have a single call to `create_image_layers` with a combined dense keyspace.
4027 : // So that the key ranges don't overlap.
4028 : let mut partitions = KeyPartitioning::default();
4029 : partitions.parts.extend(rel_partition.parts);
4030 : if !metadata_partition.parts.is_empty() {
4031 : assert_eq!(
4032 : metadata_partition.parts.len(),
4033 : 1,
4034 : "currently sparse keyspace should only contain a single metadata keyspace"
4035 : );
4036 : // Safety: create_image_layers treat sparse keyspaces differently that it does not scan
4037 : // every single key within the keyspace, and therefore, it's safe to force converting it
4038 : // into a dense keyspace before calling this function.
4039 : partitions
4040 : .parts
4041 : .extend(metadata_partition.into_dense().parts);
4042 : }
4043 :
4044 : let mut layers_to_upload = Vec::new();
4045 : layers_to_upload.extend(
4046 : self.create_image_layers(
4047 : &partitions,
4048 : self.initdb_lsn,
4049 : ImageLayerCreationMode::Initial,
4050 : ctx,
4051 : )
4052 : .await?,
4053 : );
4054 :
4055 : (layers_to_upload, None)
4056 : } else {
4057 : // Normal case, write out a L0 delta layer file.
4058 : // `create_delta_layer` will not modify the layer map.
4059 : // We will remove frozen layer and add delta layer in one atomic operation later.
4060 : let Some(layer) = self
4061 : .create_delta_layer(&frozen_layer, None, ctx)
4062 : .await
4063 0 : .map_err(|e| FlushLayerError::from_anyhow(self, e))?
4064 : else {
4065 : panic!("delta layer cannot be empty if no filter is applied");
4066 : };
4067 : (
4068 : // FIXME: even though we have a single image and single delta layer assumption
4069 : // we push them to vec
4070 : vec![layer.clone()],
4071 : Some(layer),
4072 : )
4073 : };
4074 :
4075 : pausable_failpoint!("flush-layer-cancel-after-writing-layer-out-pausable");
4076 :
4077 : if self.cancel.is_cancelled() {
4078 : return Err(FlushLayerError::Cancelled);
4079 : }
4080 :
4081 : let disk_consistent_lsn = Lsn(lsn_range.end.0 - 1);
4082 :
4083 : // The new on-disk layers are now in the layer map. We can remove the
4084 : // in-memory layer from the map now. The flushed layer is stored in
4085 : // the mapping in `create_delta_layer`.
4086 : {
4087 : let mut guard = self.layers.write().await;
4088 :
4089 : guard.open_mut()?.finish_flush_l0_layer(
4090 : delta_layer_to_add.as_ref(),
4091 : &frozen_layer,
4092 : &self.metrics,
4093 : );
4094 :
4095 : if self.set_disk_consistent_lsn(disk_consistent_lsn) {
4096 : // Schedule remote uploads that will reflect our new disk_consistent_lsn
4097 : self.schedule_uploads(disk_consistent_lsn, layers_to_upload)
4098 0 : .map_err(|e| FlushLayerError::from_anyhow(self, e))?;
4099 : }
4100 : // release lock on 'layers'
4101 : };
4102 :
4103 : // Backpressure mechanism: wait with continuation of the flush loop until we have uploaded all layer files.
4104 : // This makes us refuse ingest until the new layers have been persisted to the remote
4105 : // TODO: remove this, and rely on l0_flush_{delay,stall}_threshold instead.
4106 : if self.get_l0_flush_wait_upload() {
4107 : let start = Instant::now();
4108 : self.remote_client
4109 : .wait_completion()
4110 : .await
4111 0 : .map_err(|e| match e {
4112 : WaitCompletionError::UploadQueueShutDownOrStopped
4113 : | WaitCompletionError::NotInitialized(
4114 : NotInitialized::ShuttingDown | NotInitialized::Stopped,
4115 0 : ) => FlushLayerError::Cancelled,
4116 : WaitCompletionError::NotInitialized(NotInitialized::Uninitialized) => {
4117 0 : FlushLayerError::Other(anyhow!(e).into())
4118 : }
4119 0 : })?;
4120 : let duration = start.elapsed().as_secs_f64();
4121 : self.metrics.flush_wait_upload_time_gauge_add(duration);
4122 : }
4123 :
4124 : // FIXME: between create_delta_layer and the scheduling of the upload in `update_metadata_file`,
4125 : // a compaction can delete the file and then it won't be available for uploads any more.
4126 : // We still schedule the upload, resulting in an error, but ideally we'd somehow avoid this
4127 : // race situation.
4128 : // See https://github.com/neondatabase/neon/issues/4526
4129 : pausable_failpoint!("flush-frozen-pausable");
4130 :
4131 : // This failpoint is used by another test case `test_pageserver_recovery`.
4132 : fail_point!("flush-frozen-exit");
4133 :
4134 : Ok(Lsn(lsn_range.end.0 - 1))
4135 : }
4136 :
4137 : /// Return true if the value changed
4138 : ///
4139 : /// This function must only be used from the layer flush task.
4140 2344 : fn set_disk_consistent_lsn(&self, new_value: Lsn) -> bool {
4141 2344 : let old_value = self.disk_consistent_lsn.fetch_max(new_value);
4142 2344 : assert!(new_value >= old_value, "disk_consistent_lsn must be growing monotonously at runtime; current {old_value}, offered {new_value}");
4143 :
4144 2344 : self.metrics
4145 2344 : .disk_consistent_lsn_gauge
4146 2344 : .set(new_value.0 as i64);
4147 2344 : new_value != old_value
4148 2344 : }
4149 :
4150 : /// Update metadata file
4151 2444 : fn schedule_uploads(
4152 2444 : &self,
4153 2444 : disk_consistent_lsn: Lsn,
4154 2444 : layers_to_upload: impl IntoIterator<Item = ResidentLayer>,
4155 2444 : ) -> anyhow::Result<()> {
4156 2444 : // We can only save a valid 'prev_record_lsn' value on disk if we
4157 2444 : // flushed *all* in-memory changes to disk. We only track
4158 2444 : // 'prev_record_lsn' in memory for the latest processed record, so we
4159 2444 : // don't remember what the correct value that corresponds to some old
4160 2444 : // LSN is. But if we flush everything, then the value corresponding
4161 2444 : // current 'last_record_lsn' is correct and we can store it on disk.
4162 2444 : let RecordLsn {
4163 2444 : last: last_record_lsn,
4164 2444 : prev: prev_record_lsn,
4165 2444 : } = self.last_record_lsn.load();
4166 2444 : let ondisk_prev_record_lsn = if disk_consistent_lsn == last_record_lsn {
4167 2188 : Some(prev_record_lsn)
4168 : } else {
4169 256 : None
4170 : };
4171 :
4172 2444 : let update = crate::tenant::metadata::MetadataUpdate::new(
4173 2444 : disk_consistent_lsn,
4174 2444 : ondisk_prev_record_lsn,
4175 2444 : *self.latest_gc_cutoff_lsn.read(),
4176 2444 : );
4177 2444 :
4178 2444 : fail_point!("checkpoint-before-saving-metadata", |x| bail!(
4179 0 : "{}",
4180 0 : x.unwrap()
4181 2444 : ));
4182 :
4183 4812 : for layer in layers_to_upload {
4184 2368 : self.remote_client.schedule_layer_file_upload(layer)?;
4185 : }
4186 2444 : self.remote_client
4187 2444 : .schedule_index_upload_for_metadata_update(&update)?;
4188 :
4189 2444 : Ok(())
4190 2444 : }
4191 :
4192 0 : pub(crate) async fn preserve_initdb_archive(&self) -> anyhow::Result<()> {
4193 0 : self.remote_client
4194 0 : .preserve_initdb_archive(
4195 0 : &self.tenant_shard_id.tenant_id,
4196 0 : &self.timeline_id,
4197 0 : &self.cancel,
4198 0 : )
4199 0 : .await
4200 0 : }
4201 :
4202 : // Write out the given frozen in-memory layer as a new L0 delta file. This L0 file will not be tracked
4203 : // in layer map immediately. The caller is responsible to put it into the layer map.
4204 1936 : async fn create_delta_layer(
4205 1936 : self: &Arc<Self>,
4206 1936 : frozen_layer: &Arc<InMemoryLayer>,
4207 1936 : key_range: Option<Range<Key>>,
4208 1936 : ctx: &RequestContext,
4209 1936 : ) -> anyhow::Result<Option<ResidentLayer>> {
4210 1936 : let self_clone = Arc::clone(self);
4211 1936 : let frozen_layer = Arc::clone(frozen_layer);
4212 1936 : let ctx = ctx.attached_child();
4213 1936 : let work = async move {
4214 1936 : let Some((desc, path)) = frozen_layer
4215 1936 : .write_to_disk(&ctx, key_range, self_clone.l0_flush_global_state.inner())
4216 1936 : .await?
4217 : else {
4218 0 : return Ok(None);
4219 : };
4220 1936 : let new_delta = Layer::finish_creating(self_clone.conf, &self_clone, desc, &path)?;
4221 :
4222 : // The write_to_disk() above calls writer.finish() which already did the fsync of the inodes.
4223 : // We just need to fsync the directory in which these inodes are linked,
4224 : // which we know to be the timeline directory.
4225 : //
4226 : // We use fatal_err() below because the after write_to_disk returns with success,
4227 : // the in-memory state of the filesystem already has the layer file in its final place,
4228 : // and subsequent pageserver code could think it's durable while it really isn't.
4229 1936 : let timeline_dir = VirtualFile::open(
4230 1936 : &self_clone
4231 1936 : .conf
4232 1936 : .timeline_path(&self_clone.tenant_shard_id, &self_clone.timeline_id),
4233 1936 : &ctx,
4234 1936 : )
4235 1936 : .await
4236 1936 : .fatal_err("VirtualFile::open for timeline dir fsync");
4237 1936 : timeline_dir
4238 1936 : .sync_all()
4239 1936 : .await
4240 1936 : .fatal_err("VirtualFile::sync_all timeline dir");
4241 1936 : anyhow::Ok(Some(new_delta))
4242 1936 : };
4243 : // Before tokio-epoll-uring, we ran write_to_disk & the sync_all inside spawn_blocking.
4244 : // Preserve that behavior to maintain the same behavior for `virtual_file_io_engine=std-fs`.
4245 : use crate::virtual_file::io_engine::IoEngine;
4246 1936 : match crate::virtual_file::io_engine::get() {
4247 0 : IoEngine::NotSet => panic!("io engine not set"),
4248 : IoEngine::StdFs => {
4249 968 : let span = tracing::info_span!("blocking");
4250 968 : tokio::task::spawn_blocking({
4251 968 : move || Handle::current().block_on(work.instrument(span))
4252 968 : })
4253 968 : .await
4254 968 : .context("spawn_blocking")
4255 968 : .and_then(|x| x)
4256 : }
4257 : #[cfg(target_os = "linux")]
4258 968 : IoEngine::TokioEpollUring => work.await,
4259 : }
4260 1936 : }
4261 :
4262 1136 : async fn repartition(
4263 1136 : &self,
4264 1136 : lsn: Lsn,
4265 1136 : partition_size: u64,
4266 1136 : flags: EnumSet<CompactFlags>,
4267 1136 : ctx: &RequestContext,
4268 1136 : ) -> Result<((KeyPartitioning, SparseKeyPartitioning), Lsn), CompactionError> {
4269 1136 : let Ok(mut guard) = self.partitioning.try_write_guard() else {
4270 : // NB: there are two callers, one is the compaction task, of which there is only one per struct Tenant and hence Timeline.
4271 : // The other is the initdb optimization in flush_frozen_layer, used by `boostrap_timeline`, which runs before `.activate()`
4272 : // and hence before the compaction task starts.
4273 0 : return Err(CompactionError::Other(anyhow!(
4274 0 : "repartition() called concurrently"
4275 0 : )));
4276 : };
4277 1136 : let ((dense_partition, sparse_partition), partition_lsn) = &*guard.read();
4278 1136 : if lsn < *partition_lsn {
4279 0 : return Err(CompactionError::Other(anyhow!(
4280 0 : "repartition() called with LSN going backwards, this should not happen"
4281 0 : )));
4282 1136 : }
4283 1136 :
4284 1136 : let distance = lsn.0 - partition_lsn.0;
4285 1136 : if *partition_lsn != Lsn(0)
4286 524 : && distance <= self.repartition_threshold
4287 524 : && !flags.contains(CompactFlags::ForceRepartition)
4288 : {
4289 496 : debug!(
4290 : distance,
4291 : threshold = self.repartition_threshold,
4292 0 : "no repartitioning needed"
4293 : );
4294 496 : return Ok((
4295 496 : (dense_partition.clone(), sparse_partition.clone()),
4296 496 : *partition_lsn,
4297 496 : ));
4298 640 : }
4299 :
4300 640 : let (dense_ks, sparse_ks) = self.collect_keyspace(lsn, ctx).await?;
4301 640 : let dense_partitioning = dense_ks.partition(&self.shard_identity, partition_size);
4302 640 : let sparse_partitioning = SparseKeyPartitioning {
4303 640 : parts: vec![sparse_ks],
4304 640 : }; // no partitioning for metadata keys for now
4305 640 : let result = ((dense_partitioning, sparse_partitioning), lsn);
4306 640 : guard.write(result.clone());
4307 640 : Ok(result)
4308 1136 : }
4309 :
4310 : // Is it time to create a new image layer for the given partition?
4311 28 : async fn time_for_new_image_layer(&self, partition: &KeySpace, lsn: Lsn) -> bool {
4312 28 : let threshold = self.get_image_creation_threshold();
4313 :
4314 28 : let guard = self.layers.read().await;
4315 28 : let Ok(layers) = guard.layer_map() else {
4316 0 : return false;
4317 : };
4318 :
4319 28 : let mut max_deltas = 0;
4320 56 : for part_range in &partition.ranges {
4321 28 : let image_coverage = layers.image_coverage(part_range, lsn);
4322 56 : for (img_range, last_img) in image_coverage {
4323 28 : let img_lsn = if let Some(last_img) = last_img {
4324 0 : last_img.get_lsn_range().end
4325 : } else {
4326 28 : Lsn(0)
4327 : };
4328 : // Let's consider an example:
4329 : //
4330 : // delta layer with LSN range 71-81
4331 : // delta layer with LSN range 81-91
4332 : // delta layer with LSN range 91-101
4333 : // image layer at LSN 100
4334 : //
4335 : // If 'lsn' is still 100, i.e. no new WAL has been processed since the last image layer,
4336 : // there's no need to create a new one. We check this case explicitly, to avoid passing
4337 : // a bogus range to count_deltas below, with start > end. It's even possible that there
4338 : // are some delta layers *later* than current 'lsn', if more WAL was processed and flushed
4339 : // after we read last_record_lsn, which is passed here in the 'lsn' argument.
4340 28 : if img_lsn < lsn {
4341 28 : let num_deltas =
4342 28 : layers.count_deltas(&img_range, &(img_lsn..lsn), Some(threshold));
4343 28 :
4344 28 : max_deltas = max_deltas.max(num_deltas);
4345 28 : if num_deltas >= threshold {
4346 0 : debug!(
4347 0 : "key range {}-{}, has {} deltas on this timeline in LSN range {}..{}",
4348 : img_range.start, img_range.end, num_deltas, img_lsn, lsn
4349 : );
4350 0 : return true;
4351 28 : }
4352 0 : }
4353 : }
4354 : }
4355 :
4356 28 : debug!(
4357 : max_deltas,
4358 0 : "none of the partitioned ranges had >= {threshold} deltas"
4359 : );
4360 28 : false
4361 28 : }
4362 :
4363 : /// Create image layers for Postgres data. Assumes the caller passes a partition that is not too large,
4364 : /// so that at most one image layer will be produced from this function.
4365 : #[allow(clippy::too_many_arguments)]
4366 460 : async fn create_image_layer_for_rel_blocks(
4367 460 : self: &Arc<Self>,
4368 460 : partition: &KeySpace,
4369 460 : mut image_layer_writer: ImageLayerWriter,
4370 460 : lsn: Lsn,
4371 460 : ctx: &RequestContext,
4372 460 : img_range: Range<Key>,
4373 460 : start: Key,
4374 460 : io_concurrency: IoConcurrency,
4375 460 : ) -> Result<ImageLayerCreationOutcome, CreateImageLayersError> {
4376 460 : let mut wrote_keys = false;
4377 460 :
4378 460 : let mut key_request_accum = KeySpaceAccum::new();
4379 3044 : for range in &partition.ranges {
4380 2584 : let mut key = range.start;
4381 5600 : while key < range.end {
4382 : // Decide whether to retain this key: usually we do, but sharded tenants may
4383 : // need to drop keys that don't belong to them. If we retain the key, add it
4384 : // to `key_request_accum` for later issuing a vectored get
4385 3016 : if self.shard_identity.is_key_disposable(&key) {
4386 0 : debug!(
4387 0 : "Dropping key {} during compaction (it belongs on shard {:?})",
4388 0 : key,
4389 0 : self.shard_identity.get_shard_number(&key)
4390 : );
4391 3016 : } else {
4392 3016 : key_request_accum.add_key(key);
4393 3016 : }
4394 :
4395 3016 : let last_key_in_range = key.next() == range.end;
4396 3016 : key = key.next();
4397 3016 :
4398 3016 : // Maybe flush `key_rest_accum`
4399 3016 : if key_request_accum.raw_size() >= Timeline::MAX_GET_VECTORED_KEYS
4400 3016 : || (last_key_in_range && key_request_accum.raw_size() > 0)
4401 : {
4402 2584 : let results = self
4403 2584 : .get_vectored(
4404 2584 : key_request_accum.consume_keyspace(),
4405 2584 : lsn,
4406 2584 : io_concurrency.clone(),
4407 2584 : ctx,
4408 2584 : )
4409 2584 : .await?;
4410 :
4411 2584 : if self.cancel.is_cancelled() {
4412 0 : return Err(CreateImageLayersError::Cancelled);
4413 2584 : }
4414 :
4415 5600 : for (img_key, img) in results {
4416 3016 : let img = match img {
4417 3016 : Ok(img) => img,
4418 0 : Err(err) => {
4419 0 : // If we fail to reconstruct a VM or FSM page, we can zero the
4420 0 : // page without losing any actual user data. That seems better
4421 0 : // than failing repeatedly and getting stuck.
4422 0 : //
4423 0 : // We had a bug at one point, where we truncated the FSM and VM
4424 0 : // in the pageserver, but the Postgres didn't know about that
4425 0 : // and continued to generate incremental WAL records for pages
4426 0 : // that didn't exist in the pageserver. Trying to replay those
4427 0 : // WAL records failed to find the previous image of the page.
4428 0 : // This special case allows us to recover from that situation.
4429 0 : // See https://github.com/neondatabase/neon/issues/2601.
4430 0 : //
4431 0 : // Unfortunately we cannot do this for the main fork, or for
4432 0 : // any metadata keys, keys, as that would lead to actual data
4433 0 : // loss.
4434 0 : if img_key.is_rel_fsm_block_key() || img_key.is_rel_vm_block_key() {
4435 0 : warn!("could not reconstruct FSM or VM key {img_key}, filling with zeros: {err:?}");
4436 0 : ZERO_PAGE.clone()
4437 : } else {
4438 0 : return Err(CreateImageLayersError::from(err));
4439 : }
4440 : }
4441 : };
4442 :
4443 : // Write all the keys we just read into our new image layer.
4444 3016 : image_layer_writer.put_image(img_key, img, ctx).await?;
4445 3016 : wrote_keys = true;
4446 : }
4447 432 : }
4448 : }
4449 : }
4450 :
4451 460 : if wrote_keys {
4452 : // Normal path: we have written some data into the new image layer for this
4453 : // partition, so flush it to disk.
4454 460 : info!(
4455 0 : "produced image layer for rel {}",
4456 0 : ImageLayerName {
4457 0 : key_range: img_range.clone(),
4458 0 : lsn
4459 0 : },
4460 : );
4461 460 : Ok(ImageLayerCreationOutcome {
4462 460 : unfinished_image_layer: Some(image_layer_writer),
4463 460 : next_start_key: img_range.end,
4464 460 : })
4465 : } else {
4466 : // Special case: the image layer may be empty if this is a sharded tenant and the
4467 : // partition does not cover any keys owned by this shard. In this case, to ensure
4468 : // we don't leave gaps between image layers, leave `start` where it is, so that the next
4469 : // layer we write will cover the key range that we just scanned.
4470 0 : tracing::debug!("no data in range {}-{}", img_range.start, img_range.end);
4471 0 : Ok(ImageLayerCreationOutcome {
4472 0 : unfinished_image_layer: None,
4473 0 : next_start_key: start,
4474 0 : })
4475 : }
4476 460 : }
4477 :
4478 : /// Create an image layer for metadata keys. This function produces one image layer for all metadata
4479 : /// keys for now. Because metadata keys cannot exceed basebackup size limit, the image layer for it
4480 : /// would not be too large to fit in a single image layer.
4481 : #[allow(clippy::too_many_arguments)]
4482 440 : async fn create_image_layer_for_metadata_keys(
4483 440 : self: &Arc<Self>,
4484 440 : partition: &KeySpace,
4485 440 : mut image_layer_writer: ImageLayerWriter,
4486 440 : lsn: Lsn,
4487 440 : ctx: &RequestContext,
4488 440 : img_range: Range<Key>,
4489 440 : mode: ImageLayerCreationMode,
4490 440 : start: Key,
4491 440 : io_concurrency: IoConcurrency,
4492 440 : ) -> Result<ImageLayerCreationOutcome, CreateImageLayersError> {
4493 440 : // Metadata keys image layer creation.
4494 440 : let mut reconstruct_state = ValuesReconstructState::new(io_concurrency);
4495 440 : let begin = Instant::now();
4496 440 : let data = self
4497 440 : .get_vectored_impl(partition.clone(), lsn, &mut reconstruct_state, ctx)
4498 440 : .await?;
4499 440 : let (data, total_kb_retrieved, total_keys_retrieved) = {
4500 440 : let mut new_data = BTreeMap::new();
4501 440 : let mut total_kb_retrieved = 0;
4502 440 : let mut total_keys_retrieved = 0;
4503 20464 : for (k, v) in data {
4504 20024 : let v = v?;
4505 20024 : total_kb_retrieved += KEY_SIZE + v.len();
4506 20024 : total_keys_retrieved += 1;
4507 20024 : new_data.insert(k, v);
4508 : }
4509 440 : (new_data, total_kb_retrieved / 1024, total_keys_retrieved)
4510 440 : };
4511 440 : let delta_files_accessed = reconstruct_state.get_delta_layers_visited();
4512 440 : let elapsed = begin.elapsed();
4513 440 :
4514 440 : let trigger_generation = delta_files_accessed as usize >= MAX_AUX_FILE_V2_DELTAS;
4515 440 : info!(
4516 0 : "metadata key compaction: trigger_generation={trigger_generation}, delta_files_accessed={delta_files_accessed}, total_kb_retrieved={total_kb_retrieved}, total_keys_retrieved={total_keys_retrieved}, read_time={}s", elapsed.as_secs_f64()
4517 : );
4518 :
4519 440 : if !trigger_generation && mode == ImageLayerCreationMode::Try {
4520 4 : return Ok(ImageLayerCreationOutcome {
4521 4 : unfinished_image_layer: None,
4522 4 : next_start_key: img_range.end,
4523 4 : });
4524 436 : }
4525 436 : if self.cancel.is_cancelled() {
4526 0 : return Err(CreateImageLayersError::Cancelled);
4527 436 : }
4528 436 : let mut wrote_any_image = false;
4529 20460 : for (k, v) in data {
4530 20024 : if v.is_empty() {
4531 : // the key has been deleted, it does not need an image
4532 : // in metadata keyspace, an empty image == tombstone
4533 16 : continue;
4534 20008 : }
4535 20008 : wrote_any_image = true;
4536 20008 :
4537 20008 : // No need to handle sharding b/c metadata keys are always on the 0-th shard.
4538 20008 :
4539 20008 : // TODO: split image layers to avoid too large layer files. Too large image files are not handled
4540 20008 : // on the normal data path either.
4541 20008 : image_layer_writer.put_image(k, v, ctx).await?;
4542 : }
4543 :
4544 436 : if wrote_any_image {
4545 : // Normal path: we have written some data into the new image layer for this
4546 : // partition, so flush it to disk.
4547 24 : info!(
4548 0 : "created image layer for metadata {}",
4549 0 : ImageLayerName {
4550 0 : key_range: img_range.clone(),
4551 0 : lsn
4552 0 : }
4553 : );
4554 24 : Ok(ImageLayerCreationOutcome {
4555 24 : unfinished_image_layer: Some(image_layer_writer),
4556 24 : next_start_key: img_range.end,
4557 24 : })
4558 : } else {
4559 : // Special case: the image layer may be empty if this is a sharded tenant and the
4560 : // partition does not cover any keys owned by this shard. In this case, to ensure
4561 : // we don't leave gaps between image layers, leave `start` where it is, so that the next
4562 : // layer we write will cover the key range that we just scanned.
4563 412 : tracing::debug!("no data in range {}-{}", img_range.start, img_range.end);
4564 412 : Ok(ImageLayerCreationOutcome {
4565 412 : unfinished_image_layer: None,
4566 412 : next_start_key: start,
4567 412 : })
4568 : }
4569 440 : }
4570 :
4571 : /// Predicate function which indicates whether we should check if new image layers
4572 : /// are required. Since checking if new image layers are required is expensive in
4573 : /// terms of CPU, we only do it in the following cases:
4574 : /// 1. If the timeline has ingested sufficient WAL to justify the cost
4575 : /// 2. If enough time has passed since the last check:
4576 : /// 1. For large tenants, we wish to perform the check more often since they
4577 : /// suffer from the lack of image layers
4578 : /// 2. For small tenants (that can mostly fit in RAM), we use a much longer interval
4579 1136 : fn should_check_if_image_layers_required(self: &Arc<Timeline>, lsn: Lsn) -> bool {
4580 : const LARGE_TENANT_THRESHOLD: u64 = 2 * 1024 * 1024 * 1024;
4581 :
4582 1136 : let last_checks_at = self.last_image_layer_creation_check_at.load();
4583 1136 : let distance = lsn
4584 1136 : .checked_sub(last_checks_at)
4585 1136 : .expect("Attempt to compact with LSN going backwards");
4586 1136 : let min_distance =
4587 1136 : self.get_image_layer_creation_check_threshold() as u64 * self.get_checkpoint_distance();
4588 1136 :
4589 1136 : let distance_based_decision = distance.0 >= min_distance;
4590 1136 :
4591 1136 : let mut time_based_decision = false;
4592 1136 : let mut last_check_instant = self.last_image_layer_creation_check_instant.lock().unwrap();
4593 1136 : if let CurrentLogicalSize::Exact(logical_size) = self.current_logical_size.current_size() {
4594 932 : let check_required_after = if Into::<u64>::into(&logical_size) >= LARGE_TENANT_THRESHOLD
4595 : {
4596 0 : self.get_checkpoint_timeout()
4597 : } else {
4598 932 : Duration::from_secs(3600 * 48)
4599 : };
4600 :
4601 932 : time_based_decision = match *last_check_instant {
4602 524 : Some(last_check) => {
4603 524 : let elapsed = last_check.elapsed();
4604 524 : elapsed >= check_required_after
4605 : }
4606 408 : None => true,
4607 : };
4608 204 : }
4609 :
4610 : // Do the expensive delta layer counting only if this timeline has ingested sufficient
4611 : // WAL since the last check or a checkpoint timeout interval has elapsed since the last
4612 : // check.
4613 1136 : let decision = distance_based_decision || time_based_decision;
4614 :
4615 1136 : if decision {
4616 412 : self.last_image_layer_creation_check_at.store(lsn);
4617 412 : *last_check_instant = Some(Instant::now());
4618 724 : }
4619 :
4620 1136 : decision
4621 1136 : }
4622 :
4623 : #[tracing::instrument(skip_all, fields(%lsn, %mode))]
4624 : async fn create_image_layers(
4625 : self: &Arc<Timeline>,
4626 : partitioning: &KeyPartitioning,
4627 : lsn: Lsn,
4628 : mode: ImageLayerCreationMode,
4629 : ctx: &RequestContext,
4630 : ) -> Result<Vec<ResidentLayer>, CreateImageLayersError> {
4631 : let timer = self.metrics.create_images_time_histo.start_timer();
4632 :
4633 : // We need to avoid holes between generated image layers.
4634 : // Otherwise LayerMap::image_layer_exists will return false if key range of some layer is covered by more than one
4635 : // image layer with hole between them. In this case such layer can not be utilized by GC.
4636 : //
4637 : // How such hole between partitions can appear?
4638 : // if we have relation with relid=1 and size 100 and relation with relid=2 with size 200 then result of
4639 : // KeySpace::partition may contain partitions <100000000..100000099> and <200000000..200000199>.
4640 : // If there is delta layer <100000000..300000000> then it never be garbage collected because
4641 : // image layers <100000000..100000099> and <200000000..200000199> are not completely covering it.
4642 : let mut start = Key::MIN;
4643 :
4644 : let check_for_image_layers = self.should_check_if_image_layers_required(lsn);
4645 :
4646 : let mut batch_image_writer = BatchLayerWriter::new(self.conf).await?;
4647 :
4648 : for partition in partitioning.parts.iter() {
4649 : if self.cancel.is_cancelled() {
4650 : return Err(CreateImageLayersError::Cancelled);
4651 : }
4652 :
4653 : let img_range = start..partition.ranges.last().unwrap().end;
4654 : let compact_metadata = partition.overlaps(&Key::metadata_key_range());
4655 : if compact_metadata {
4656 : for range in &partition.ranges {
4657 : assert!(
4658 : range.start.field1 >= METADATA_KEY_BEGIN_PREFIX
4659 : && range.end.field1 <= METADATA_KEY_END_PREFIX,
4660 : "metadata keys must be partitioned separately"
4661 : );
4662 : }
4663 : if mode == ImageLayerCreationMode::Try && !check_for_image_layers {
4664 : // Skip compaction if there are not enough updates. Metadata compaction will do a scan and
4665 : // might mess up with evictions.
4666 : start = img_range.end;
4667 : continue;
4668 : }
4669 : // For initial and force modes, we always generate image layers for metadata keys.
4670 : } else if let ImageLayerCreationMode::Try = mode {
4671 : // check_for_image_layers = false -> skip
4672 : // check_for_image_layers = true -> check time_for_new_image_layer -> skip/generate
4673 : if !check_for_image_layers || !self.time_for_new_image_layer(partition, lsn).await {
4674 : start = img_range.end;
4675 : continue;
4676 : }
4677 : }
4678 : if let ImageLayerCreationMode::Force = mode {
4679 : // When forced to create image layers, we might try and create them where they already
4680 : // exist. This mode is only used in tests/debug.
4681 : let layers = self.layers.read().await;
4682 : if layers.contains_key(&PersistentLayerKey {
4683 : key_range: img_range.clone(),
4684 : lsn_range: PersistentLayerDesc::image_layer_lsn_range(lsn),
4685 : is_delta: false,
4686 : }) {
4687 : tracing::info!(
4688 : "Skipping image layer at {lsn} {}..{}, already exists",
4689 : img_range.start,
4690 : img_range.end
4691 : );
4692 : start = img_range.end;
4693 : continue;
4694 : }
4695 : }
4696 :
4697 : let image_layer_writer = ImageLayerWriter::new(
4698 : self.conf,
4699 : self.timeline_id,
4700 : self.tenant_shard_id,
4701 : &img_range,
4702 : lsn,
4703 : ctx,
4704 : )
4705 : .await?;
4706 :
4707 0 : fail_point!("image-layer-writer-fail-before-finish", |_| {
4708 0 : Err(CreateImageLayersError::Other(anyhow::anyhow!(
4709 0 : "failpoint image-layer-writer-fail-before-finish"
4710 0 : )))
4711 0 : });
4712 :
4713 : let io_concurrency = IoConcurrency::spawn_from_conf(
4714 : self.conf,
4715 : self.gate
4716 : .enter()
4717 0 : .map_err(|_| CreateImageLayersError::Cancelled)?,
4718 : );
4719 :
4720 : let ImageLayerCreationOutcome {
4721 : unfinished_image_layer,
4722 : next_start_key,
4723 : } = if !compact_metadata {
4724 : self.create_image_layer_for_rel_blocks(
4725 : partition,
4726 : image_layer_writer,
4727 : lsn,
4728 : ctx,
4729 : img_range.clone(),
4730 : start,
4731 : io_concurrency,
4732 : )
4733 : .await?
4734 : } else {
4735 : self.create_image_layer_for_metadata_keys(
4736 : partition,
4737 : image_layer_writer,
4738 : lsn,
4739 : ctx,
4740 : img_range.clone(),
4741 : mode,
4742 : start,
4743 : io_concurrency,
4744 : )
4745 : .await?
4746 : };
4747 : start = next_start_key;
4748 : if let Some(unfinished_image_layer) = unfinished_image_layer {
4749 : batch_image_writer.add_unfinished_image_writer(
4750 : unfinished_image_layer,
4751 : img_range,
4752 : lsn,
4753 : );
4754 : }
4755 : }
4756 :
4757 : let image_layers = batch_image_writer.finish(self, ctx).await?;
4758 :
4759 : let mut guard = self.layers.write().await;
4760 :
4761 : // FIXME: we could add the images to be uploaded *before* returning from here, but right
4762 : // now they are being scheduled outside of write lock; current way is inconsistent with
4763 : // compaction lock order.
4764 : guard
4765 : .open_mut()?
4766 : .track_new_image_layers(&image_layers, &self.metrics);
4767 : drop_wlock(guard);
4768 : timer.stop_and_record();
4769 :
4770 : // Creating image layers may have caused some previously visible layers to be covered
4771 : if !image_layers.is_empty() {
4772 : self.update_layer_visibility().await?;
4773 : }
4774 :
4775 : Ok(image_layers)
4776 : }
4777 :
4778 : /// Wait until the background initial logical size calculation is complete, or
4779 : /// this Timeline is shut down. Calling this function will cause the initial
4780 : /// logical size calculation to skip waiting for the background jobs barrier.
4781 0 : pub(crate) async fn await_initial_logical_size(self: Arc<Self>) {
4782 0 : if !self.shard_identity.is_shard_zero() {
4783 : // We don't populate logical size on shard >0: skip waiting for it.
4784 0 : return;
4785 0 : }
4786 0 :
4787 0 : if self.remote_client.is_deleting() {
4788 : // The timeline was created in a deletion-resume state, we don't expect logical size to be populated
4789 0 : return;
4790 0 : }
4791 0 :
4792 0 : if self.current_logical_size.current_size().is_exact() {
4793 : // root timelines are initialized with exact count, but never start the background
4794 : // calculation
4795 0 : return;
4796 0 : }
4797 :
4798 0 : if let Some(await_bg_cancel) = self
4799 0 : .current_logical_size
4800 0 : .cancel_wait_for_background_loop_concurrency_limit_semaphore
4801 0 : .get()
4802 0 : {
4803 0 : await_bg_cancel.cancel();
4804 0 : } else {
4805 : // We should not wait if we were not able to explicitly instruct
4806 : // the logical size cancellation to skip the concurrency limit semaphore.
4807 : // TODO: this is an unexpected case. We should restructure so that it
4808 : // can't happen.
4809 0 : tracing::warn!(
4810 0 : "await_initial_logical_size: can't get semaphore cancel token, skipping"
4811 : );
4812 0 : debug_assert!(false);
4813 : }
4814 :
4815 0 : tokio::select!(
4816 0 : _ = self.current_logical_size.initialized.acquire() => {},
4817 0 : _ = self.cancel.cancelled() => {}
4818 : )
4819 0 : }
4820 :
4821 : /// Detach this timeline from its ancestor by copying all of ancestors layers as this
4822 : /// Timelines layers up to the ancestor_lsn.
4823 : ///
4824 : /// Requires a timeline that:
4825 : /// - has an ancestor to detach from
4826 : /// - the ancestor does not have an ancestor -- follows from the original RFC limitations, not
4827 : /// a technical requirement
4828 : ///
4829 : /// After the operation has been started, it cannot be canceled. Upon restart it needs to be
4830 : /// polled again until completion.
4831 : ///
4832 : /// During the operation all timelines sharing the data with this timeline will be reparented
4833 : /// from our ancestor to be branches of this timeline.
4834 0 : pub(crate) async fn prepare_to_detach_from_ancestor(
4835 0 : self: &Arc<Timeline>,
4836 0 : tenant: &crate::tenant::Tenant,
4837 0 : options: detach_ancestor::Options,
4838 0 : ctx: &RequestContext,
4839 0 : ) -> Result<detach_ancestor::Progress, detach_ancestor::Error> {
4840 0 : detach_ancestor::prepare(self, tenant, options, ctx).await
4841 0 : }
4842 :
4843 : /// Second step of detach from ancestor; detaches the `self` from it's current ancestor and
4844 : /// reparents any reparentable children of previous ancestor.
4845 : ///
4846 : /// This method is to be called while holding the TenantManager's tenant slot, so during this
4847 : /// method we cannot be deleted nor can any timeline be deleted. After this method returns
4848 : /// successfully, tenant must be reloaded.
4849 : ///
4850 : /// Final step will be to [`Self::complete_detaching_timeline_ancestor`] after optionally
4851 : /// resetting the tenant.
4852 0 : pub(crate) async fn detach_from_ancestor_and_reparent(
4853 0 : self: &Arc<Timeline>,
4854 0 : tenant: &crate::tenant::Tenant,
4855 0 : prepared: detach_ancestor::PreparedTimelineDetach,
4856 0 : ctx: &RequestContext,
4857 0 : ) -> Result<detach_ancestor::DetachingAndReparenting, detach_ancestor::Error> {
4858 0 : detach_ancestor::detach_and_reparent(self, tenant, prepared, ctx).await
4859 0 : }
4860 :
4861 : /// Final step which unblocks the GC.
4862 : ///
4863 : /// The tenant must've been reset if ancestry was modified previously (in tenant manager).
4864 0 : pub(crate) async fn complete_detaching_timeline_ancestor(
4865 0 : self: &Arc<Timeline>,
4866 0 : tenant: &crate::tenant::Tenant,
4867 0 : attempt: detach_ancestor::Attempt,
4868 0 : ctx: &RequestContext,
4869 0 : ) -> Result<(), detach_ancestor::Error> {
4870 0 : detach_ancestor::complete(self, tenant, attempt, ctx).await
4871 0 : }
4872 : }
4873 :
4874 : impl Drop for Timeline {
4875 20 : fn drop(&mut self) {
4876 20 : if let Some(ancestor) = &self.ancestor_timeline {
4877 : // This lock should never be poisoned, but in case it is we do a .map() instead of
4878 : // an unwrap(), to avoid panicking in a destructor and thereby aborting the process.
4879 8 : if let Ok(mut gc_info) = ancestor.gc_info.write() {
4880 8 : if !gc_info.remove_child_not_offloaded(self.timeline_id) {
4881 0 : tracing::error!(tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id,
4882 0 : "Couldn't remove retain_lsn entry from offloaded timeline's parent: already removed");
4883 8 : }
4884 0 : }
4885 12 : }
4886 20 : info!(
4887 0 : "Timeline {} for tenant {} is being dropped",
4888 : self.timeline_id, self.tenant_shard_id.tenant_id
4889 : );
4890 20 : }
4891 : }
4892 :
4893 : /// Top-level failure to compact.
4894 : #[derive(Debug, thiserror::Error)]
4895 : pub(crate) enum CompactionError {
4896 : #[error("The timeline or pageserver is shutting down")]
4897 : ShuttingDown,
4898 : /// Compaction tried to offload a timeline and failed
4899 : #[error("Failed to offload timeline: {0}")]
4900 : Offload(OffloadError),
4901 : /// Compaction cannot be done right now; page reconstruction and so on.
4902 : #[error(transparent)]
4903 : Other(anyhow::Error),
4904 : }
4905 :
4906 : impl From<OffloadError> for CompactionError {
4907 0 : fn from(e: OffloadError) -> Self {
4908 0 : match e {
4909 0 : OffloadError::Cancelled => Self::ShuttingDown,
4910 0 : _ => Self::Offload(e),
4911 : }
4912 0 : }
4913 : }
4914 :
4915 : impl CompactionError {
4916 0 : pub fn is_cancelled(&self) -> bool {
4917 0 : matches!(self, CompactionError::ShuttingDown)
4918 0 : }
4919 : }
4920 :
4921 : impl From<CollectKeySpaceError> for CompactionError {
4922 0 : fn from(err: CollectKeySpaceError) -> Self {
4923 0 : match err {
4924 : CollectKeySpaceError::Cancelled
4925 : | CollectKeySpaceError::PageRead(PageReconstructError::Cancelled) => {
4926 0 : CompactionError::ShuttingDown
4927 : }
4928 0 : e => CompactionError::Other(e.into()),
4929 : }
4930 0 : }
4931 : }
4932 :
4933 : impl From<super::upload_queue::NotInitialized> for CompactionError {
4934 0 : fn from(value: super::upload_queue::NotInitialized) -> Self {
4935 0 : match value {
4936 : super::upload_queue::NotInitialized::Uninitialized => {
4937 0 : CompactionError::Other(anyhow::anyhow!(value))
4938 : }
4939 : super::upload_queue::NotInitialized::ShuttingDown
4940 0 : | super::upload_queue::NotInitialized::Stopped => CompactionError::ShuttingDown,
4941 : }
4942 0 : }
4943 : }
4944 :
4945 : impl From<super::storage_layer::layer::DownloadError> for CompactionError {
4946 0 : fn from(e: super::storage_layer::layer::DownloadError) -> Self {
4947 0 : match e {
4948 : super::storage_layer::layer::DownloadError::TimelineShutdown
4949 : | super::storage_layer::layer::DownloadError::DownloadCancelled => {
4950 0 : CompactionError::ShuttingDown
4951 : }
4952 : super::storage_layer::layer::DownloadError::ContextAndConfigReallyDeniesDownloads
4953 : | super::storage_layer::layer::DownloadError::DownloadRequired
4954 : | super::storage_layer::layer::DownloadError::NotFile(_)
4955 : | super::storage_layer::layer::DownloadError::DownloadFailed
4956 : | super::storage_layer::layer::DownloadError::PreStatFailed(_) => {
4957 0 : CompactionError::Other(anyhow::anyhow!(e))
4958 : }
4959 : #[cfg(test)]
4960 : super::storage_layer::layer::DownloadError::Failpoint(_) => {
4961 0 : CompactionError::Other(anyhow::anyhow!(e))
4962 : }
4963 : }
4964 0 : }
4965 : }
4966 :
4967 : impl From<layer_manager::Shutdown> for CompactionError {
4968 0 : fn from(_: layer_manager::Shutdown) -> Self {
4969 0 : CompactionError::ShuttingDown
4970 0 : }
4971 : }
4972 :
4973 : #[serde_as]
4974 392 : #[derive(serde::Serialize)]
4975 : struct RecordedDuration(#[serde_as(as = "serde_with::DurationMicroSeconds")] Duration);
4976 :
4977 : #[derive(Default)]
4978 : enum DurationRecorder {
4979 : #[default]
4980 : NotStarted,
4981 : Recorded(RecordedDuration, tokio::time::Instant),
4982 : }
4983 :
4984 : impl DurationRecorder {
4985 1008 : fn till_now(&self) -> DurationRecorder {
4986 1008 : match self {
4987 : DurationRecorder::NotStarted => {
4988 0 : panic!("must only call on recorded measurements")
4989 : }
4990 1008 : DurationRecorder::Recorded(_, ended) => {
4991 1008 : let now = tokio::time::Instant::now();
4992 1008 : DurationRecorder::Recorded(RecordedDuration(now - *ended), now)
4993 1008 : }
4994 1008 : }
4995 1008 : }
4996 392 : fn into_recorded(self) -> Option<RecordedDuration> {
4997 392 : match self {
4998 0 : DurationRecorder::NotStarted => None,
4999 392 : DurationRecorder::Recorded(recorded, _) => Some(recorded),
5000 : }
5001 392 : }
5002 : }
5003 :
5004 : /// Descriptor for a delta layer used in testing infra. The start/end key/lsn range of the
5005 : /// delta layer might be different from the min/max key/lsn in the delta layer. Therefore,
5006 : /// the layer descriptor requires the user to provide the ranges, which should cover all
5007 : /// keys specified in the `data` field.
5008 : #[cfg(test)]
5009 : #[derive(Clone)]
5010 : pub struct DeltaLayerTestDesc {
5011 : pub lsn_range: Range<Lsn>,
5012 : pub key_range: Range<Key>,
5013 : pub data: Vec<(Key, Lsn, Value)>,
5014 : }
5015 :
5016 : #[cfg(test)]
5017 : impl DeltaLayerTestDesc {
5018 4 : pub fn new(lsn_range: Range<Lsn>, key_range: Range<Key>, data: Vec<(Key, Lsn, Value)>) -> Self {
5019 4 : Self {
5020 4 : lsn_range,
5021 4 : key_range,
5022 4 : data,
5023 4 : }
5024 4 : }
5025 :
5026 176 : pub fn new_with_inferred_key_range(
5027 176 : lsn_range: Range<Lsn>,
5028 176 : data: Vec<(Key, Lsn, Value)>,
5029 176 : ) -> Self {
5030 440 : let key_min = data.iter().map(|(key, _, _)| key).min().unwrap();
5031 440 : let key_max = data.iter().map(|(key, _, _)| key).max().unwrap();
5032 176 : Self {
5033 176 : key_range: (*key_min)..(key_max.next()),
5034 176 : lsn_range,
5035 176 : data,
5036 176 : }
5037 176 : }
5038 :
5039 20 : pub(crate) fn layer_name(&self) -> LayerName {
5040 20 : LayerName::Delta(super::storage_layer::DeltaLayerName {
5041 20 : key_range: self.key_range.clone(),
5042 20 : lsn_range: self.lsn_range.clone(),
5043 20 : })
5044 20 : }
5045 : }
5046 :
5047 : impl Timeline {
5048 56 : async fn finish_compact_batch(
5049 56 : self: &Arc<Self>,
5050 56 : new_deltas: &[ResidentLayer],
5051 56 : new_images: &[ResidentLayer],
5052 56 : layers_to_remove: &[Layer],
5053 56 : ) -> Result<(), CompactionError> {
5054 56 : let mut guard = tokio::select! {
5055 56 : guard = self.layers.write() => guard,
5056 56 : _ = self.cancel.cancelled() => {
5057 0 : return Err(CompactionError::ShuttingDown);
5058 : }
5059 : };
5060 :
5061 56 : let mut duplicated_layers = HashSet::new();
5062 56 :
5063 56 : let mut insert_layers = Vec::with_capacity(new_deltas.len());
5064 :
5065 672 : for l in new_deltas {
5066 616 : if guard.contains(l.as_ref()) {
5067 : // expected in tests
5068 0 : tracing::error!(layer=%l, "duplicated L1 layer");
5069 :
5070 : // good ways to cause a duplicate: we repeatedly error after taking the writelock
5071 : // `guard` on self.layers. as of writing this, there are no error returns except
5072 : // for compact_level0_phase1 creating an L0, which does not happen in practice
5073 : // because we have not implemented L0 => L0 compaction.
5074 0 : duplicated_layers.insert(l.layer_desc().key());
5075 616 : } else if LayerMap::is_l0(&l.layer_desc().key_range, l.layer_desc().is_delta) {
5076 0 : return Err(CompactionError::Other(anyhow::anyhow!("compaction generates a L0 layer file as output, which will cause infinite compaction.")));
5077 616 : } else {
5078 616 : insert_layers.push(l.clone());
5079 616 : }
5080 : }
5081 :
5082 : // only remove those inputs which were not outputs
5083 56 : let remove_layers: Vec<Layer> = layers_to_remove
5084 56 : .iter()
5085 804 : .filter(|l| !duplicated_layers.contains(&l.layer_desc().key()))
5086 56 : .cloned()
5087 56 : .collect();
5088 56 :
5089 56 : if !new_images.is_empty() {
5090 0 : guard
5091 0 : .open_mut()?
5092 0 : .track_new_image_layers(new_images, &self.metrics);
5093 56 : }
5094 :
5095 56 : guard
5096 56 : .open_mut()?
5097 56 : .finish_compact_l0(&remove_layers, &insert_layers, &self.metrics);
5098 56 :
5099 56 : self.remote_client
5100 56 : .schedule_compaction_update(&remove_layers, new_deltas)?;
5101 :
5102 56 : drop_wlock(guard);
5103 56 :
5104 56 : Ok(())
5105 56 : }
5106 :
5107 0 : async fn rewrite_layers(
5108 0 : self: &Arc<Self>,
5109 0 : mut replace_layers: Vec<(Layer, ResidentLayer)>,
5110 0 : mut drop_layers: Vec<Layer>,
5111 0 : ) -> Result<(), CompactionError> {
5112 0 : let mut guard = self.layers.write().await;
5113 :
5114 : // Trim our lists in case our caller (compaction) raced with someone else (GC) removing layers: we want
5115 : // to avoid double-removing, and avoid rewriting something that was removed.
5116 0 : replace_layers.retain(|(l, _)| guard.contains(l));
5117 0 : drop_layers.retain(|l| guard.contains(l));
5118 0 :
5119 0 : guard
5120 0 : .open_mut()?
5121 0 : .rewrite_layers(&replace_layers, &drop_layers, &self.metrics);
5122 0 :
5123 0 : let upload_layers: Vec<_> = replace_layers.into_iter().map(|r| r.1).collect();
5124 0 :
5125 0 : self.remote_client
5126 0 : .schedule_compaction_update(&drop_layers, &upload_layers)?;
5127 :
5128 0 : Ok(())
5129 0 : }
5130 :
5131 : /// Schedules the uploads of the given image layers
5132 728 : fn upload_new_image_layers(
5133 728 : self: &Arc<Self>,
5134 728 : new_images: impl IntoIterator<Item = ResidentLayer>,
5135 728 : ) -> Result<(), super::upload_queue::NotInitialized> {
5136 780 : for layer in new_images {
5137 52 : self.remote_client.schedule_layer_file_upload(layer)?;
5138 : }
5139 : // should any new image layer been created, not uploading index_part will
5140 : // result in a mismatch between remote_physical_size and layermap calculated
5141 : // size, which will fail some tests, but should not be an issue otherwise.
5142 728 : self.remote_client
5143 728 : .schedule_index_upload_for_file_changes()?;
5144 728 : Ok(())
5145 728 : }
5146 :
5147 0 : async fn find_gc_time_cutoff(
5148 0 : &self,
5149 0 : now: SystemTime,
5150 0 : pitr: Duration,
5151 0 : cancel: &CancellationToken,
5152 0 : ctx: &RequestContext,
5153 0 : ) -> Result<Option<Lsn>, PageReconstructError> {
5154 0 : debug_assert_current_span_has_tenant_and_timeline_id();
5155 0 : if self.shard_identity.is_shard_zero() {
5156 : // Shard Zero has SLRU data and can calculate the PITR time -> LSN mapping itself
5157 0 : let time_range = if pitr == Duration::ZERO {
5158 0 : humantime::parse_duration(DEFAULT_PITR_INTERVAL).expect("constant is invalid")
5159 : } else {
5160 0 : pitr
5161 : };
5162 :
5163 : // If PITR is so large or `now` is so small that this underflows, we will retain no history (highly unexpected case)
5164 0 : let time_cutoff = now.checked_sub(time_range).unwrap_or(now);
5165 0 : let timestamp = to_pg_timestamp(time_cutoff);
5166 :
5167 0 : let time_cutoff = match self.find_lsn_for_timestamp(timestamp, cancel, ctx).await? {
5168 0 : LsnForTimestamp::Present(lsn) => Some(lsn),
5169 0 : LsnForTimestamp::Future(lsn) => {
5170 0 : // The timestamp is in the future. That sounds impossible,
5171 0 : // but what it really means is that there hasn't been
5172 0 : // any commits since the cutoff timestamp.
5173 0 : //
5174 0 : // In this case we should use the LSN of the most recent commit,
5175 0 : // which is implicitly the last LSN in the log.
5176 0 : debug!("future({})", lsn);
5177 0 : Some(self.get_last_record_lsn())
5178 : }
5179 0 : LsnForTimestamp::Past(lsn) => {
5180 0 : debug!("past({})", lsn);
5181 0 : None
5182 : }
5183 0 : LsnForTimestamp::NoData(lsn) => {
5184 0 : debug!("nodata({})", lsn);
5185 0 : None
5186 : }
5187 : };
5188 0 : Ok(time_cutoff)
5189 : } else {
5190 : // Shards other than shard zero cannot do timestamp->lsn lookups, and must instead learn their GC cutoff
5191 : // from shard zero's index. The index doesn't explicitly tell us the time cutoff, but we may assume that
5192 : // the point up to which shard zero's last_gc_cutoff has advanced will either be the time cutoff, or a
5193 : // space cutoff that we would also have respected ourselves.
5194 0 : match self
5195 0 : .remote_client
5196 0 : .download_foreign_index(ShardNumber(0), cancel)
5197 0 : .await
5198 : {
5199 0 : Ok((index_part, index_generation, _index_mtime)) => {
5200 0 : tracing::info!("GC loaded shard zero metadata (gen {index_generation:?}): latest_gc_cutoff_lsn: {}",
5201 0 : index_part.metadata.latest_gc_cutoff_lsn());
5202 0 : Ok(Some(index_part.metadata.latest_gc_cutoff_lsn()))
5203 : }
5204 : Err(DownloadError::NotFound) => {
5205 : // This is unexpected, because during timeline creations shard zero persists to remote
5206 : // storage before other shards are called, and during timeline deletion non-zeroth shards are
5207 : // deleted before the zeroth one. However, it should be harmless: if we somehow end up in this
5208 : // state, then shard zero should _eventually_ write an index when it GCs.
5209 0 : tracing::warn!("GC couldn't find shard zero's index for timeline");
5210 0 : Ok(None)
5211 : }
5212 0 : Err(e) => {
5213 0 : // TODO: this function should return a different error type than page reconstruct error
5214 0 : Err(PageReconstructError::Other(anyhow::anyhow!(e)))
5215 : }
5216 : }
5217 :
5218 : // TODO: after reading shard zero's GC cutoff, we should validate its generation with the storage
5219 : // controller. Otherwise, it is possible that we see the GC cutoff go backwards while shard zero
5220 : // is going through a migration if we read the old location's index and it has GC'd ahead of the
5221 : // new location. This is legal in principle, but problematic in practice because it might result
5222 : // in a timeline creation succeeding on shard zero ('s new location) but then failing on other shards
5223 : // because they have GC'd past the branch point.
5224 : }
5225 0 : }
5226 :
5227 : /// Find the Lsns above which layer files need to be retained on
5228 : /// garbage collection.
5229 : ///
5230 : /// We calculate two cutoffs, one based on time and one based on WAL size. `pitr`
5231 : /// controls the time cutoff (or ZERO to disable time-based retention), and `space_cutoff` controls
5232 : /// the space-based retention.
5233 : ///
5234 : /// This function doesn't simply to calculate time & space based retention: it treats time-based
5235 : /// retention as authoritative if enabled, and falls back to space-based retention if calculating
5236 : /// the LSN for a time point isn't possible. Therefore the GcCutoffs::horizon in the response might
5237 : /// be different to the `space_cutoff` input. Callers should treat the min() of the two cutoffs
5238 : /// in the response as the GC cutoff point for the timeline.
5239 : #[instrument(skip_all, fields(timeline_id=%self.timeline_id))]
5240 : pub(super) async fn find_gc_cutoffs(
5241 : &self,
5242 : now: SystemTime,
5243 : space_cutoff: Lsn,
5244 : pitr: Duration,
5245 : cancel: &CancellationToken,
5246 : ctx: &RequestContext,
5247 : ) -> Result<GcCutoffs, PageReconstructError> {
5248 : let _timer = self
5249 : .metrics
5250 : .find_gc_cutoffs_histo
5251 : .start_timer()
5252 : .record_on_drop();
5253 :
5254 : pausable_failpoint!("Timeline::find_gc_cutoffs-pausable");
5255 :
5256 : if cfg!(test) {
5257 : // Unit tests which specify zero PITR interval expect to avoid doing any I/O for timestamp lookup
5258 : if pitr == Duration::ZERO {
5259 : return Ok(GcCutoffs {
5260 : time: self.get_last_record_lsn(),
5261 : space: space_cutoff,
5262 : });
5263 : }
5264 : }
5265 :
5266 : // Calculate a time-based limit on how much to retain:
5267 : // - if PITR interval is set, then this is our cutoff.
5268 : // - if PITR interval is not set, then we do a lookup
5269 : // based on DEFAULT_PITR_INTERVAL, so that size-based retention does not result in keeping history around permanently on idle databases.
5270 : let time_cutoff = self.find_gc_time_cutoff(now, pitr, cancel, ctx).await?;
5271 :
5272 : Ok(match (pitr, time_cutoff) {
5273 : (Duration::ZERO, Some(time_cutoff)) => {
5274 : // PITR is not set. Retain the size-based limit, or the default time retention,
5275 : // whichever requires less data.
5276 : GcCutoffs {
5277 : time: self.get_last_record_lsn(),
5278 : space: std::cmp::max(time_cutoff, space_cutoff),
5279 : }
5280 : }
5281 : (Duration::ZERO, None) => {
5282 : // PITR is not set, and time lookup failed
5283 : GcCutoffs {
5284 : time: self.get_last_record_lsn(),
5285 : space: space_cutoff,
5286 : }
5287 : }
5288 : (_, None) => {
5289 : // PITR interval is set & we didn't look up a timestamp successfully. Conservatively assume PITR
5290 : // cannot advance beyond what was already GC'd, and respect space-based retention
5291 : GcCutoffs {
5292 : time: *self.get_latest_gc_cutoff_lsn(),
5293 : space: space_cutoff,
5294 : }
5295 : }
5296 : (_, Some(time_cutoff)) => {
5297 : // PITR interval is set and we looked up timestamp successfully. Ignore
5298 : // size based retention and make time cutoff authoritative
5299 : GcCutoffs {
5300 : time: time_cutoff,
5301 : space: time_cutoff,
5302 : }
5303 : }
5304 : })
5305 : }
5306 :
5307 : /// Garbage collect layer files on a timeline that are no longer needed.
5308 : ///
5309 : /// Currently, we don't make any attempt at removing unneeded page versions
5310 : /// within a layer file. We can only remove the whole file if it's fully
5311 : /// obsolete.
5312 8 : pub(super) async fn gc(&self) -> Result<GcResult, GcError> {
5313 : // this is most likely the background tasks, but it might be the spawned task from
5314 : // immediate_gc
5315 8 : let _g = tokio::select! {
5316 8 : guard = self.gc_lock.lock() => guard,
5317 8 : _ = self.cancel.cancelled() => return Ok(GcResult::default()),
5318 : };
5319 8 : let timer = self.metrics.garbage_collect_histo.start_timer();
5320 8 :
5321 8 : fail_point!("before-timeline-gc");
5322 8 :
5323 8 : // Is the timeline being deleted?
5324 8 : if self.is_stopping() {
5325 0 : return Err(GcError::TimelineCancelled);
5326 8 : }
5327 8 :
5328 8 : let (space_cutoff, time_cutoff, retain_lsns, max_lsn_with_valid_lease) = {
5329 8 : let gc_info = self.gc_info.read().unwrap();
5330 8 :
5331 8 : let space_cutoff = min(gc_info.cutoffs.space, self.get_disk_consistent_lsn());
5332 8 : let time_cutoff = gc_info.cutoffs.time;
5333 8 : let retain_lsns = gc_info
5334 8 : .retain_lsns
5335 8 : .iter()
5336 8 : .map(|(lsn, _child_id, _is_offloaded)| *lsn)
5337 8 : .collect();
5338 8 :
5339 8 : // Gets the maximum LSN that holds the valid lease.
5340 8 : //
5341 8 : // Caveat: `refresh_gc_info` is in charged of updating the lease map.
5342 8 : // Here, we do not check for stale leases again.
5343 8 : let max_lsn_with_valid_lease = gc_info.leases.last_key_value().map(|(lsn, _)| *lsn);
5344 8 :
5345 8 : (
5346 8 : space_cutoff,
5347 8 : time_cutoff,
5348 8 : retain_lsns,
5349 8 : max_lsn_with_valid_lease,
5350 8 : )
5351 8 : };
5352 8 :
5353 8 : let mut new_gc_cutoff = Lsn::min(space_cutoff, time_cutoff);
5354 8 : let standby_horizon = self.standby_horizon.load();
5355 8 : // Hold GC for the standby, but as a safety guard do it only within some
5356 8 : // reasonable lag.
5357 8 : if standby_horizon != Lsn::INVALID {
5358 0 : if let Some(standby_lag) = new_gc_cutoff.checked_sub(standby_horizon) {
5359 : const MAX_ALLOWED_STANDBY_LAG: u64 = 10u64 << 30; // 10 GB
5360 0 : if standby_lag.0 < MAX_ALLOWED_STANDBY_LAG {
5361 0 : new_gc_cutoff = Lsn::min(standby_horizon, new_gc_cutoff);
5362 0 : trace!("holding off GC for standby apply LSN {}", standby_horizon);
5363 : } else {
5364 0 : warn!(
5365 0 : "standby is lagging for more than {}MB, not holding gc for it",
5366 0 : MAX_ALLOWED_STANDBY_LAG / 1024 / 1024
5367 : )
5368 : }
5369 0 : }
5370 8 : }
5371 :
5372 : // Reset standby horizon to ignore it if it is not updated till next GC.
5373 : // It is an easy way to unset it when standby disappears without adding
5374 : // more conf options.
5375 8 : self.standby_horizon.store(Lsn::INVALID);
5376 8 : self.metrics
5377 8 : .standby_horizon_gauge
5378 8 : .set(Lsn::INVALID.0 as i64);
5379 :
5380 8 : let res = self
5381 8 : .gc_timeline(
5382 8 : space_cutoff,
5383 8 : time_cutoff,
5384 8 : retain_lsns,
5385 8 : max_lsn_with_valid_lease,
5386 8 : new_gc_cutoff,
5387 8 : )
5388 8 : .instrument(
5389 8 : info_span!("gc_timeline", timeline_id = %self.timeline_id, cutoff = %new_gc_cutoff),
5390 : )
5391 8 : .await?;
5392 :
5393 : // only record successes
5394 8 : timer.stop_and_record();
5395 8 :
5396 8 : Ok(res)
5397 8 : }
5398 :
5399 8 : async fn gc_timeline(
5400 8 : &self,
5401 8 : space_cutoff: Lsn,
5402 8 : time_cutoff: Lsn,
5403 8 : retain_lsns: Vec<Lsn>,
5404 8 : max_lsn_with_valid_lease: Option<Lsn>,
5405 8 : new_gc_cutoff: Lsn,
5406 8 : ) -> Result<GcResult, GcError> {
5407 8 : // FIXME: if there is an ongoing detach_from_ancestor, we should just skip gc
5408 8 :
5409 8 : let now = SystemTime::now();
5410 8 : let mut result: GcResult = GcResult::default();
5411 8 :
5412 8 : // Nothing to GC. Return early.
5413 8 : let latest_gc_cutoff = *self.get_latest_gc_cutoff_lsn();
5414 8 : if latest_gc_cutoff >= new_gc_cutoff {
5415 0 : info!(
5416 0 : "Nothing to GC: new_gc_cutoff_lsn {new_gc_cutoff}, latest_gc_cutoff_lsn {latest_gc_cutoff}",
5417 : );
5418 0 : return Ok(result);
5419 8 : }
5420 :
5421 : // We need to ensure that no one tries to read page versions or create
5422 : // branches at a point before latest_gc_cutoff_lsn. See branch_timeline()
5423 : // for details. This will block until the old value is no longer in use.
5424 : //
5425 : // The GC cutoff should only ever move forwards.
5426 8 : let waitlist = {
5427 8 : let write_guard = self.latest_gc_cutoff_lsn.lock_for_write();
5428 8 : if *write_guard > new_gc_cutoff {
5429 0 : return Err(GcError::BadLsn {
5430 0 : why: format!(
5431 0 : "Cannot move GC cutoff LSN backwards (was {}, new {})",
5432 0 : *write_guard, new_gc_cutoff
5433 0 : ),
5434 0 : });
5435 8 : }
5436 8 :
5437 8 : write_guard.store_and_unlock(new_gc_cutoff)
5438 8 : };
5439 8 : waitlist.wait().await;
5440 :
5441 8 : info!("GC starting");
5442 :
5443 8 : debug!("retain_lsns: {:?}", retain_lsns);
5444 :
5445 8 : let mut layers_to_remove = Vec::new();
5446 :
5447 : // Scan all layers in the timeline (remote or on-disk).
5448 : //
5449 : // Garbage collect the layer if all conditions are satisfied:
5450 : // 1. it is older than cutoff LSN;
5451 : // 2. it is older than PITR interval;
5452 : // 3. it doesn't need to be retained for 'retain_lsns';
5453 : // 4. it does not need to be kept for LSNs holding valid leases.
5454 : // 5. newer on-disk image layers cover the layer's whole key range
5455 : //
5456 : // TODO holding a write lock is too agressive and avoidable
5457 8 : let mut guard = self.layers.write().await;
5458 8 : let layers = guard.layer_map()?;
5459 48 : 'outer: for l in layers.iter_historic_layers() {
5460 48 : result.layers_total += 1;
5461 48 :
5462 48 : // 1. Is it newer than GC horizon cutoff point?
5463 48 : if l.get_lsn_range().end > space_cutoff {
5464 4 : info!(
5465 0 : "keeping {} because it's newer than space_cutoff {}",
5466 0 : l.layer_name(),
5467 : space_cutoff,
5468 : );
5469 4 : result.layers_needed_by_cutoff += 1;
5470 4 : continue 'outer;
5471 44 : }
5472 44 :
5473 44 : // 2. It is newer than PiTR cutoff point?
5474 44 : if l.get_lsn_range().end > time_cutoff {
5475 0 : info!(
5476 0 : "keeping {} because it's newer than time_cutoff {}",
5477 0 : l.layer_name(),
5478 : time_cutoff,
5479 : );
5480 0 : result.layers_needed_by_pitr += 1;
5481 0 : continue 'outer;
5482 44 : }
5483 :
5484 : // 3. Is it needed by a child branch?
5485 : // NOTE With that we would keep data that
5486 : // might be referenced by child branches forever.
5487 : // We can track this in child timeline GC and delete parent layers when
5488 : // they are no longer needed. This might be complicated with long inheritance chains.
5489 : //
5490 : // TODO Vec is not a great choice for `retain_lsns`
5491 44 : for retain_lsn in &retain_lsns {
5492 : // start_lsn is inclusive
5493 0 : if &l.get_lsn_range().start <= retain_lsn {
5494 0 : info!(
5495 0 : "keeping {} because it's still might be referenced by child branch forked at {} is_dropped: xx is_incremental: {}",
5496 0 : l.layer_name(),
5497 0 : retain_lsn,
5498 0 : l.is_incremental(),
5499 : );
5500 0 : result.layers_needed_by_branches += 1;
5501 0 : continue 'outer;
5502 0 : }
5503 : }
5504 :
5505 : // 4. Is there a valid lease that requires us to keep this layer?
5506 44 : if let Some(lsn) = &max_lsn_with_valid_lease {
5507 : // keep if layer start <= any of the lease
5508 36 : if &l.get_lsn_range().start <= lsn {
5509 28 : info!(
5510 0 : "keeping {} because there is a valid lease preventing GC at {}",
5511 0 : l.layer_name(),
5512 : lsn,
5513 : );
5514 28 : result.layers_needed_by_leases += 1;
5515 28 : continue 'outer;
5516 8 : }
5517 8 : }
5518 :
5519 : // 5. Is there a later on-disk layer for this relation?
5520 : //
5521 : // The end-LSN is exclusive, while disk_consistent_lsn is
5522 : // inclusive. For example, if disk_consistent_lsn is 100, it is
5523 : // OK for a delta layer to have end LSN 101, but if the end LSN
5524 : // is 102, then it might not have been fully flushed to disk
5525 : // before crash.
5526 : //
5527 : // For example, imagine that the following layers exist:
5528 : //
5529 : // 1000 - image (A)
5530 : // 1000-2000 - delta (B)
5531 : // 2000 - image (C)
5532 : // 2000-3000 - delta (D)
5533 : // 3000 - image (E)
5534 : //
5535 : // If GC horizon is at 2500, we can remove layers A and B, but
5536 : // we cannot remove C, even though it's older than 2500, because
5537 : // the delta layer 2000-3000 depends on it.
5538 16 : if !layers
5539 16 : .image_layer_exists(&l.get_key_range(), &(l.get_lsn_range().end..new_gc_cutoff))
5540 : {
5541 12 : info!("keeping {} because it is the latest layer", l.layer_name());
5542 12 : result.layers_not_updated += 1;
5543 12 : continue 'outer;
5544 4 : }
5545 4 :
5546 4 : // We didn't find any reason to keep this file, so remove it.
5547 4 : info!(
5548 0 : "garbage collecting {} is_dropped: xx is_incremental: {}",
5549 0 : l.layer_name(),
5550 0 : l.is_incremental(),
5551 : );
5552 4 : layers_to_remove.push(l);
5553 : }
5554 :
5555 8 : if !layers_to_remove.is_empty() {
5556 : // Persist the new GC cutoff value before we actually remove anything.
5557 : // This unconditionally schedules also an index_part.json update, even though, we will
5558 : // be doing one a bit later with the unlinked gc'd layers.
5559 4 : let disk_consistent_lsn = self.disk_consistent_lsn.load();
5560 4 : self.schedule_uploads(disk_consistent_lsn, None)
5561 4 : .map_err(|e| {
5562 0 : if self.cancel.is_cancelled() {
5563 0 : GcError::TimelineCancelled
5564 : } else {
5565 0 : GcError::Remote(e)
5566 : }
5567 4 : })?;
5568 :
5569 4 : let gc_layers = layers_to_remove
5570 4 : .iter()
5571 4 : .map(|x| guard.get_from_desc(x))
5572 4 : .collect::<Vec<Layer>>();
5573 4 :
5574 4 : result.layers_removed = gc_layers.len() as u64;
5575 4 :
5576 4 : self.remote_client.schedule_gc_update(&gc_layers)?;
5577 :
5578 4 : guard.open_mut()?.finish_gc_timeline(&gc_layers);
5579 4 :
5580 4 : #[cfg(feature = "testing")]
5581 4 : {
5582 4 : result.doomed_layers = gc_layers;
5583 4 : }
5584 4 : }
5585 :
5586 8 : info!(
5587 0 : "GC completed removing {} layers, cutoff {}",
5588 : result.layers_removed, new_gc_cutoff
5589 : );
5590 :
5591 8 : result.elapsed = now.elapsed().unwrap_or(Duration::ZERO);
5592 8 : Ok(result)
5593 8 : }
5594 :
5595 : /// Reconstruct a value, using the given base image and WAL records in 'data'.
5596 1339990 : async fn reconstruct_value(
5597 1339990 : &self,
5598 1339990 : key: Key,
5599 1339990 : request_lsn: Lsn,
5600 1339990 : mut data: ValueReconstructState,
5601 1339990 : ) -> Result<Bytes, PageReconstructError> {
5602 1339990 : // Perform WAL redo if needed
5603 1339990 : data.records.reverse();
5604 1339990 :
5605 1339990 : // If we have a page image, and no WAL, we're all set
5606 1339990 : if data.records.is_empty() {
5607 1338354 : if let Some((img_lsn, img)) = &data.img {
5608 1338354 : trace!(
5609 0 : "found page image for key {} at {}, no WAL redo required, req LSN {}",
5610 : key,
5611 : img_lsn,
5612 : request_lsn,
5613 : );
5614 1338354 : Ok(img.clone())
5615 : } else {
5616 0 : Err(PageReconstructError::from(anyhow!(
5617 0 : "base image for {key} at {request_lsn} not found"
5618 0 : )))
5619 : }
5620 : } else {
5621 : // We need to do WAL redo.
5622 : //
5623 : // If we don't have a base image, then the oldest WAL record better initialize
5624 : // the page
5625 1636 : if data.img.is_none() && !data.records.first().unwrap().1.will_init() {
5626 0 : Err(PageReconstructError::from(anyhow!(
5627 0 : "Base image for {} at {} not found, but got {} WAL records",
5628 0 : key,
5629 0 : request_lsn,
5630 0 : data.records.len()
5631 0 : )))
5632 : } else {
5633 1636 : if data.img.is_some() {
5634 1504 : trace!(
5635 0 : "found {} WAL records and a base image for {} at {}, performing WAL redo",
5636 0 : data.records.len(),
5637 : key,
5638 : request_lsn
5639 : );
5640 : } else {
5641 132 : trace!("found {} WAL records that will init the page for {} at {}, performing WAL redo", data.records.len(), key, request_lsn);
5642 : };
5643 1636 : let res = self
5644 1636 : .walredo_mgr
5645 1636 : .as_ref()
5646 1636 : .context("timeline has no walredo manager")
5647 1636 : .map_err(PageReconstructError::WalRedo)?
5648 1636 : .request_redo(key, request_lsn, data.img, data.records, self.pg_version)
5649 1636 : .await;
5650 1636 : let img = match res {
5651 1636 : Ok(img) => img,
5652 0 : Err(walredo::Error::Cancelled) => return Err(PageReconstructError::Cancelled),
5653 0 : Err(walredo::Error::Other(e)) => {
5654 0 : return Err(PageReconstructError::WalRedo(
5655 0 : e.context("reconstruct a page image"),
5656 0 : ))
5657 : }
5658 : };
5659 1636 : Ok(img)
5660 : }
5661 : }
5662 1339990 : }
5663 :
5664 0 : pub(crate) async fn spawn_download_all_remote_layers(
5665 0 : self: Arc<Self>,
5666 0 : request: DownloadRemoteLayersTaskSpawnRequest,
5667 0 : ) -> Result<DownloadRemoteLayersTaskInfo, DownloadRemoteLayersTaskInfo> {
5668 : use pageserver_api::models::DownloadRemoteLayersTaskState;
5669 :
5670 : // this is not really needed anymore; it has tests which really check the return value from
5671 : // http api. it would be better not to maintain this anymore.
5672 :
5673 0 : let mut status_guard = self.download_all_remote_layers_task_info.write().unwrap();
5674 0 : if let Some(st) = &*status_guard {
5675 0 : match &st.state {
5676 : DownloadRemoteLayersTaskState::Running => {
5677 0 : return Err(st.clone());
5678 : }
5679 : DownloadRemoteLayersTaskState::ShutDown
5680 0 : | DownloadRemoteLayersTaskState::Completed => {
5681 0 : *status_guard = None;
5682 0 : }
5683 : }
5684 0 : }
5685 :
5686 0 : let self_clone = Arc::clone(&self);
5687 0 : let task_id = task_mgr::spawn(
5688 0 : task_mgr::BACKGROUND_RUNTIME.handle(),
5689 0 : task_mgr::TaskKind::DownloadAllRemoteLayers,
5690 0 : self.tenant_shard_id,
5691 0 : Some(self.timeline_id),
5692 0 : "download all remote layers task",
5693 0 : async move {
5694 0 : self_clone.download_all_remote_layers(request).await;
5695 0 : let mut status_guard = self_clone.download_all_remote_layers_task_info.write().unwrap();
5696 0 : match &mut *status_guard {
5697 : None => {
5698 0 : warn!("tasks status is supposed to be Some(), since we are running");
5699 : }
5700 0 : Some(st) => {
5701 0 : let exp_task_id = format!("{}", task_mgr::current_task_id().unwrap());
5702 0 : if st.task_id != exp_task_id {
5703 0 : warn!("task id changed while we were still running, expecting {} but have {}", exp_task_id, st.task_id);
5704 0 : } else {
5705 0 : st.state = DownloadRemoteLayersTaskState::Completed;
5706 0 : }
5707 : }
5708 : };
5709 0 : Ok(())
5710 0 : }
5711 0 : .instrument(info_span!(parent: None, "download_all_remote_layers", tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))
5712 : );
5713 :
5714 0 : let initial_info = DownloadRemoteLayersTaskInfo {
5715 0 : task_id: format!("{task_id}"),
5716 0 : state: DownloadRemoteLayersTaskState::Running,
5717 0 : total_layer_count: 0,
5718 0 : successful_download_count: 0,
5719 0 : failed_download_count: 0,
5720 0 : };
5721 0 : *status_guard = Some(initial_info.clone());
5722 0 :
5723 0 : Ok(initial_info)
5724 0 : }
5725 :
5726 0 : async fn download_all_remote_layers(
5727 0 : self: &Arc<Self>,
5728 0 : request: DownloadRemoteLayersTaskSpawnRequest,
5729 0 : ) {
5730 : use pageserver_api::models::DownloadRemoteLayersTaskState;
5731 :
5732 0 : let remaining = {
5733 0 : let guard = self.layers.read().await;
5734 0 : let Ok(lm) = guard.layer_map() else {
5735 : // technically here we could look into iterating accessible layers, but downloading
5736 : // all layers of a shutdown timeline makes no sense regardless.
5737 0 : tracing::info!("attempted to download all layers of shutdown timeline");
5738 0 : return;
5739 : };
5740 0 : lm.iter_historic_layers()
5741 0 : .map(|desc| guard.get_from_desc(&desc))
5742 0 : .collect::<Vec<_>>()
5743 0 : };
5744 0 : let total_layer_count = remaining.len();
5745 :
5746 : macro_rules! lock_status {
5747 : ($st:ident) => {
5748 : let mut st = self.download_all_remote_layers_task_info.write().unwrap();
5749 : let st = st
5750 : .as_mut()
5751 : .expect("this function is only called after the task has been spawned");
5752 : assert_eq!(
5753 : st.task_id,
5754 : format!(
5755 : "{}",
5756 : task_mgr::current_task_id().expect("we run inside a task_mgr task")
5757 : )
5758 : );
5759 : let $st = st;
5760 : };
5761 : }
5762 :
5763 : {
5764 0 : lock_status!(st);
5765 0 : st.total_layer_count = total_layer_count as u64;
5766 0 : }
5767 0 :
5768 0 : let mut remaining = remaining.into_iter();
5769 0 : let mut have_remaining = true;
5770 0 : let mut js = tokio::task::JoinSet::new();
5771 0 :
5772 0 : let cancel = task_mgr::shutdown_token();
5773 0 :
5774 0 : let limit = request.max_concurrent_downloads;
5775 :
5776 : loop {
5777 0 : while js.len() < limit.get() && have_remaining && !cancel.is_cancelled() {
5778 0 : let Some(next) = remaining.next() else {
5779 0 : have_remaining = false;
5780 0 : break;
5781 : };
5782 :
5783 0 : let span = tracing::info_span!("download", layer = %next);
5784 :
5785 0 : js.spawn(
5786 0 : async move {
5787 0 : let res = next.download().await;
5788 0 : (next, res)
5789 0 : }
5790 0 : .instrument(span),
5791 0 : );
5792 0 : }
5793 :
5794 0 : while let Some(res) = js.join_next().await {
5795 0 : match res {
5796 : Ok((_, Ok(_))) => {
5797 0 : lock_status!(st);
5798 0 : st.successful_download_count += 1;
5799 : }
5800 0 : Ok((layer, Err(e))) => {
5801 0 : tracing::error!(%layer, "download failed: {e:#}");
5802 0 : lock_status!(st);
5803 0 : st.failed_download_count += 1;
5804 : }
5805 0 : Err(je) if je.is_cancelled() => unreachable!("not used here"),
5806 0 : Err(je) if je.is_panic() => {
5807 0 : lock_status!(st);
5808 0 : st.failed_download_count += 1;
5809 : }
5810 0 : Err(je) => tracing::warn!("unknown joinerror: {je:?}"),
5811 : }
5812 : }
5813 :
5814 0 : if js.is_empty() && (!have_remaining || cancel.is_cancelled()) {
5815 0 : break;
5816 0 : }
5817 : }
5818 :
5819 : {
5820 0 : lock_status!(st);
5821 0 : st.state = DownloadRemoteLayersTaskState::Completed;
5822 : }
5823 0 : }
5824 :
5825 0 : pub(crate) fn get_download_all_remote_layers_task_info(
5826 0 : &self,
5827 0 : ) -> Option<DownloadRemoteLayersTaskInfo> {
5828 0 : self.download_all_remote_layers_task_info
5829 0 : .read()
5830 0 : .unwrap()
5831 0 : .clone()
5832 0 : }
5833 : }
5834 :
5835 : impl Timeline {
5836 : /// Returns non-remote layers for eviction.
5837 0 : pub(crate) async fn get_local_layers_for_disk_usage_eviction(&self) -> DiskUsageEvictionInfo {
5838 0 : let guard = self.layers.read().await;
5839 0 : let mut max_layer_size: Option<u64> = None;
5840 0 :
5841 0 : let resident_layers = guard
5842 0 : .likely_resident_layers()
5843 0 : .map(|layer| {
5844 0 : let file_size = layer.layer_desc().file_size;
5845 0 : max_layer_size = max_layer_size.map_or(Some(file_size), |m| Some(m.max(file_size)));
5846 0 :
5847 0 : let last_activity_ts = layer.latest_activity();
5848 0 :
5849 0 : EvictionCandidate {
5850 0 : layer: layer.to_owned().into(),
5851 0 : last_activity_ts,
5852 0 : relative_last_activity: finite_f32::FiniteF32::ZERO,
5853 0 : visibility: layer.visibility(),
5854 0 : }
5855 0 : })
5856 0 : .collect();
5857 0 :
5858 0 : DiskUsageEvictionInfo {
5859 0 : max_layer_size,
5860 0 : resident_layers,
5861 0 : }
5862 0 : }
5863 :
5864 3720 : pub(crate) fn get_shard_index(&self) -> ShardIndex {
5865 3720 : ShardIndex {
5866 3720 : shard_number: self.tenant_shard_id.shard_number,
5867 3720 : shard_count: self.tenant_shard_id.shard_count,
5868 3720 : }
5869 3720 : }
5870 :
5871 : /// Persistently blocks gc for `Manual` reason.
5872 : ///
5873 : /// Returns true if no such block existed before, false otherwise.
5874 0 : pub(crate) async fn block_gc(&self, tenant: &super::Tenant) -> anyhow::Result<bool> {
5875 : use crate::tenant::remote_timeline_client::index::GcBlockingReason;
5876 0 : assert_eq!(self.tenant_shard_id, tenant.tenant_shard_id);
5877 0 : tenant.gc_block.insert(self, GcBlockingReason::Manual).await
5878 0 : }
5879 :
5880 : /// Persistently unblocks gc for `Manual` reason.
5881 0 : pub(crate) async fn unblock_gc(&self, tenant: &super::Tenant) -> anyhow::Result<()> {
5882 : use crate::tenant::remote_timeline_client::index::GcBlockingReason;
5883 0 : assert_eq!(self.tenant_shard_id, tenant.tenant_shard_id);
5884 0 : tenant.gc_block.remove(self, GcBlockingReason::Manual).await
5885 0 : }
5886 :
5887 : #[cfg(test)]
5888 92 : pub(super) fn force_advance_lsn(self: &Arc<Timeline>, new_lsn: Lsn) {
5889 92 : self.last_record_lsn.advance(new_lsn);
5890 92 : }
5891 :
5892 : #[cfg(test)]
5893 4 : pub(super) fn force_set_disk_consistent_lsn(&self, new_value: Lsn) {
5894 4 : self.disk_consistent_lsn.store(new_value);
5895 4 : }
5896 :
5897 : /// Force create an image layer and place it into the layer map.
5898 : ///
5899 : /// DO NOT use this function directly. Use [`Tenant::branch_timeline_test_with_layers`]
5900 : /// or [`Tenant::create_test_timeline_with_layers`] to ensure all these layers are
5901 : /// placed into the layer map in one run AND be validated.
5902 : #[cfg(test)]
5903 116 : pub(super) async fn force_create_image_layer(
5904 116 : self: &Arc<Timeline>,
5905 116 : lsn: Lsn,
5906 116 : mut images: Vec<(Key, Bytes)>,
5907 116 : check_start_lsn: Option<Lsn>,
5908 116 : ctx: &RequestContext,
5909 116 : ) -> anyhow::Result<()> {
5910 116 : let last_record_lsn = self.get_last_record_lsn();
5911 116 : assert!(
5912 116 : lsn <= last_record_lsn,
5913 0 : "advance last record lsn before inserting a layer, lsn={lsn}, last_record_lsn={last_record_lsn}"
5914 : );
5915 116 : if let Some(check_start_lsn) = check_start_lsn {
5916 116 : assert!(lsn >= check_start_lsn);
5917 0 : }
5918 348 : images.sort_unstable_by(|(ka, _), (kb, _)| ka.cmp(kb));
5919 116 : let min_key = *images.first().map(|(k, _)| k).unwrap();
5920 116 : let end_key = images.last().map(|(k, _)| k).unwrap().next();
5921 116 : let mut image_layer_writer = ImageLayerWriter::new(
5922 116 : self.conf,
5923 116 : self.timeline_id,
5924 116 : self.tenant_shard_id,
5925 116 : &(min_key..end_key),
5926 116 : lsn,
5927 116 : ctx,
5928 116 : )
5929 116 : .await?;
5930 580 : for (key, img) in images {
5931 464 : image_layer_writer.put_image(key, img, ctx).await?;
5932 : }
5933 116 : let (desc, path) = image_layer_writer.finish(ctx).await?;
5934 116 : let image_layer = Layer::finish_creating(self.conf, self, desc, &path)?;
5935 116 : info!("force created image layer {}", image_layer.local_path());
5936 : {
5937 116 : let mut guard = self.layers.write().await;
5938 116 : guard
5939 116 : .open_mut()
5940 116 : .unwrap()
5941 116 : .force_insert_layer(image_layer.clone());
5942 116 : }
5943 116 :
5944 116 : // Update remote_timeline_client state to reflect existence of this layer
5945 116 : self.remote_client
5946 116 : .schedule_layer_file_upload(image_layer)
5947 116 : .unwrap();
5948 116 :
5949 116 : Ok(())
5950 116 : }
5951 :
5952 : /// Force create a delta layer and place it into the layer map.
5953 : ///
5954 : /// DO NOT use this function directly. Use [`Tenant::branch_timeline_test_with_layers`]
5955 : /// or [`Tenant::create_test_timeline_with_layers`] to ensure all these layers are
5956 : /// placed into the layer map in one run AND be validated.
5957 : #[cfg(test)]
5958 180 : pub(super) async fn force_create_delta_layer(
5959 180 : self: &Arc<Timeline>,
5960 180 : mut deltas: DeltaLayerTestDesc,
5961 180 : check_start_lsn: Option<Lsn>,
5962 180 : ctx: &RequestContext,
5963 180 : ) -> anyhow::Result<()> {
5964 180 : let last_record_lsn = self.get_last_record_lsn();
5965 180 : deltas
5966 180 : .data
5967 264 : .sort_unstable_by(|(ka, la, _), (kb, lb, _)| (ka, la).cmp(&(kb, lb)));
5968 180 : assert!(deltas.data.first().unwrap().0 >= deltas.key_range.start);
5969 180 : assert!(deltas.data.last().unwrap().0 < deltas.key_range.end);
5970 624 : for (_, lsn, _) in &deltas.data {
5971 444 : assert!(deltas.lsn_range.start <= *lsn && *lsn < deltas.lsn_range.end);
5972 : }
5973 180 : assert!(
5974 180 : deltas.lsn_range.end <= last_record_lsn,
5975 0 : "advance last record lsn before inserting a layer, end_lsn={}, last_record_lsn={}",
5976 : deltas.lsn_range.end,
5977 : last_record_lsn
5978 : );
5979 180 : if let Some(check_start_lsn) = check_start_lsn {
5980 180 : assert!(deltas.lsn_range.start >= check_start_lsn);
5981 0 : }
5982 180 : let mut delta_layer_writer = DeltaLayerWriter::new(
5983 180 : self.conf,
5984 180 : self.timeline_id,
5985 180 : self.tenant_shard_id,
5986 180 : deltas.key_range.start,
5987 180 : deltas.lsn_range,
5988 180 : ctx,
5989 180 : )
5990 180 : .await?;
5991 624 : for (key, lsn, val) in deltas.data {
5992 444 : delta_layer_writer.put_value(key, lsn, val, ctx).await?;
5993 : }
5994 180 : let (desc, path) = delta_layer_writer.finish(deltas.key_range.end, ctx).await?;
5995 180 : let delta_layer = Layer::finish_creating(self.conf, self, desc, &path)?;
5996 180 : info!("force created delta layer {}", delta_layer.local_path());
5997 : {
5998 180 : let mut guard = self.layers.write().await;
5999 180 : guard
6000 180 : .open_mut()
6001 180 : .unwrap()
6002 180 : .force_insert_layer(delta_layer.clone());
6003 180 : }
6004 180 :
6005 180 : // Update remote_timeline_client state to reflect existence of this layer
6006 180 : self.remote_client
6007 180 : .schedule_layer_file_upload(delta_layer)
6008 180 : .unwrap();
6009 180 :
6010 180 : Ok(())
6011 180 : }
6012 :
6013 : /// Return all keys at the LSN in the image layers
6014 : #[cfg(test)]
6015 12 : pub(crate) async fn inspect_image_layers(
6016 12 : self: &Arc<Timeline>,
6017 12 : lsn: Lsn,
6018 12 : ctx: &RequestContext,
6019 12 : io_concurrency: IoConcurrency,
6020 12 : ) -> anyhow::Result<Vec<(Key, Bytes)>> {
6021 12 : let mut all_data = Vec::new();
6022 12 : let guard = self.layers.read().await;
6023 68 : for layer in guard.layer_map()?.iter_historic_layers() {
6024 68 : if !layer.is_delta() && layer.image_layer_lsn() == lsn {
6025 16 : let layer = guard.get_from_desc(&layer);
6026 16 : let mut reconstruct_data = ValuesReconstructState::new(io_concurrency.clone());
6027 16 : layer
6028 16 : .get_values_reconstruct_data(
6029 16 : KeySpace::single(Key::MIN..Key::MAX),
6030 16 : lsn..Lsn(lsn.0 + 1),
6031 16 : &mut reconstruct_data,
6032 16 : ctx,
6033 16 : )
6034 16 : .await?;
6035 132 : for (k, v) in std::mem::take(&mut reconstruct_data.keys) {
6036 132 : let v = v.collect_pending_ios().await?;
6037 132 : all_data.push((k, v.img.unwrap().1));
6038 : }
6039 52 : }
6040 : }
6041 12 : all_data.sort();
6042 12 : Ok(all_data)
6043 12 : }
6044 :
6045 : /// Get all historic layer descriptors in the layer map
6046 : #[cfg(test)]
6047 48 : pub(crate) async fn inspect_historic_layers(
6048 48 : self: &Arc<Timeline>,
6049 48 : ) -> anyhow::Result<Vec<super::storage_layer::PersistentLayerKey>> {
6050 48 : let mut layers = Vec::new();
6051 48 : let guard = self.layers.read().await;
6052 228 : for layer in guard.layer_map()?.iter_historic_layers() {
6053 228 : layers.push(layer.key());
6054 228 : }
6055 48 : Ok(layers)
6056 48 : }
6057 :
6058 : #[cfg(test)]
6059 20 : pub(crate) fn add_extra_test_dense_keyspace(&self, ks: KeySpace) {
6060 20 : let mut keyspace = self.extra_test_dense_keyspace.load().as_ref().clone();
6061 20 : keyspace.merge(&ks);
6062 20 : self.extra_test_dense_keyspace.store(Arc::new(keyspace));
6063 20 : }
6064 : }
6065 :
6066 : /// Tracking writes ingestion does to a particular in-memory layer.
6067 : ///
6068 : /// Cleared upon freezing a layer.
6069 : pub(crate) struct TimelineWriterState {
6070 : open_layer: Arc<InMemoryLayer>,
6071 : current_size: u64,
6072 : // Previous Lsn which passed through
6073 : prev_lsn: Option<Lsn>,
6074 : // Largest Lsn which passed through the current writer
6075 : max_lsn: Option<Lsn>,
6076 : // Cached details of the last freeze. Avoids going trough the atomic/lock on every put.
6077 : cached_last_freeze_at: Lsn,
6078 : }
6079 :
6080 : impl TimelineWriterState {
6081 2592 : fn new(open_layer: Arc<InMemoryLayer>, current_size: u64, last_freeze_at: Lsn) -> Self {
6082 2592 : Self {
6083 2592 : open_layer,
6084 2592 : current_size,
6085 2592 : prev_lsn: None,
6086 2592 : max_lsn: None,
6087 2592 : cached_last_freeze_at: last_freeze_at,
6088 2592 : }
6089 2592 : }
6090 : }
6091 :
6092 : /// Various functions to mutate the timeline.
6093 : // TODO Currently, Deref is used to allow easy access to read methods from this trait.
6094 : // This is probably considered a bad practice in Rust and should be fixed eventually,
6095 : // but will cause large code changes.
6096 : pub(crate) struct TimelineWriter<'a> {
6097 : tl: &'a Timeline,
6098 : write_guard: tokio::sync::MutexGuard<'a, Option<TimelineWriterState>>,
6099 : }
6100 :
6101 : impl Deref for TimelineWriter<'_> {
6102 : type Target = Timeline;
6103 :
6104 19796336 : fn deref(&self) -> &Self::Target {
6105 19796336 : self.tl
6106 19796336 : }
6107 : }
6108 :
6109 : #[derive(PartialEq)]
6110 : enum OpenLayerAction {
6111 : Roll,
6112 : Open,
6113 : None,
6114 : }
6115 :
6116 : impl TimelineWriter<'_> {
6117 9608464 : async fn handle_open_layer_action(
6118 9608464 : &mut self,
6119 9608464 : at: Lsn,
6120 9608464 : action: OpenLayerAction,
6121 9608464 : ctx: &RequestContext,
6122 9608464 : ) -> anyhow::Result<&Arc<InMemoryLayer>> {
6123 9608464 : match action {
6124 : OpenLayerAction::Roll => {
6125 160 : let freeze_at = self.write_guard.as_ref().unwrap().max_lsn.unwrap();
6126 160 : self.roll_layer(freeze_at).await?;
6127 160 : self.open_layer(at, ctx).await?;
6128 : }
6129 2432 : OpenLayerAction::Open => self.open_layer(at, ctx).await?,
6130 : OpenLayerAction::None => {
6131 9605872 : assert!(self.write_guard.is_some());
6132 : }
6133 : }
6134 :
6135 9608464 : Ok(&self.write_guard.as_ref().unwrap().open_layer)
6136 9608464 : }
6137 :
6138 2592 : async fn open_layer(&mut self, at: Lsn, ctx: &RequestContext) -> anyhow::Result<()> {
6139 2592 : let layer = self
6140 2592 : .tl
6141 2592 : .get_layer_for_write(at, &self.write_guard, ctx)
6142 2592 : .await?;
6143 2592 : let initial_size = layer.size().await?;
6144 :
6145 2592 : let last_freeze_at = self.last_freeze_at.load();
6146 2592 : self.write_guard.replace(TimelineWriterState::new(
6147 2592 : layer,
6148 2592 : initial_size,
6149 2592 : last_freeze_at,
6150 2592 : ));
6151 2592 :
6152 2592 : Ok(())
6153 2592 : }
6154 :
6155 160 : async fn roll_layer(&mut self, freeze_at: Lsn) -> Result<(), FlushLayerError> {
6156 160 : let current_size = self.write_guard.as_ref().unwrap().current_size;
6157 :
6158 : // If layer flushes are backpressured due to compaction not keeping up, wait for the flush
6159 : // to propagate the backpressure up into WAL ingestion.
6160 160 : let l0_count = self
6161 160 : .tl
6162 160 : .layers
6163 160 : .read()
6164 160 : .await
6165 160 : .layer_map()?
6166 160 : .level0_deltas()
6167 160 : .len();
6168 160 : let wait_thresholds = [
6169 160 : self.get_l0_flush_delay_threshold(),
6170 160 : self.get_l0_flush_stall_threshold(),
6171 160 : ];
6172 160 : let wait_threshold = wait_thresholds.into_iter().flatten().min();
6173 :
6174 : // self.write_guard will be taken by the freezing
6175 160 : let flush_id = self
6176 160 : .tl
6177 160 : .freeze_inmem_layer_at(freeze_at, &mut self.write_guard)
6178 160 : .await?;
6179 :
6180 160 : assert!(self.write_guard.is_none());
6181 :
6182 160 : if let Some(wait_threshold) = wait_threshold {
6183 0 : if l0_count >= wait_threshold {
6184 0 : info!("layer roll waiting for flush due to compaction backpressure at {l0_count} L0 layers");
6185 0 : self.tl.wait_flush_completion(flush_id).await?;
6186 0 : }
6187 160 : }
6188 :
6189 160 : if current_size >= self.get_checkpoint_distance() * 2 {
6190 0 : warn!("Flushed oversized open layer with size {}", current_size)
6191 160 : }
6192 :
6193 160 : Ok(())
6194 160 : }
6195 :
6196 9608464 : fn get_open_layer_action(&self, lsn: Lsn, new_value_size: u64) -> OpenLayerAction {
6197 9608464 : let state = &*self.write_guard;
6198 9608464 : let Some(state) = &state else {
6199 2432 : return OpenLayerAction::Open;
6200 : };
6201 :
6202 : #[cfg(feature = "testing")]
6203 9606032 : if state.cached_last_freeze_at < self.tl.last_freeze_at.load() {
6204 : // this check and assertion are not really needed because
6205 : // LayerManager::try_freeze_in_memory_layer will always clear out the
6206 : // TimelineWriterState if something is frozen. however, we can advance last_freeze_at when there
6207 : // is no TimelineWriterState.
6208 0 : assert!(
6209 0 : state.open_layer.end_lsn.get().is_some(),
6210 0 : "our open_layer must be outdated"
6211 : );
6212 :
6213 : // this would be a memory leak waiting to happen because the in-memory layer always has
6214 : // an index
6215 0 : panic!("BUG: TimelineWriterState held on to frozen in-memory layer.");
6216 9606032 : }
6217 9606032 :
6218 9606032 : if state.prev_lsn == Some(lsn) {
6219 : // Rolling mid LSN is not supported by [downstream code].
6220 : // Hence, only roll at LSN boundaries.
6221 : //
6222 : // [downstream code]: https://github.com/neondatabase/neon/pull/7993#discussion_r1633345422
6223 12 : return OpenLayerAction::None;
6224 9606020 : }
6225 9606020 :
6226 9606020 : if state.current_size == 0 {
6227 : // Don't roll empty layers
6228 0 : return OpenLayerAction::None;
6229 9606020 : }
6230 9606020 :
6231 9606020 : if self.tl.should_roll(
6232 9606020 : state.current_size,
6233 9606020 : state.current_size + new_value_size,
6234 9606020 : self.get_checkpoint_distance(),
6235 9606020 : lsn,
6236 9606020 : state.cached_last_freeze_at,
6237 9606020 : state.open_layer.get_opened_at(),
6238 9606020 : ) {
6239 160 : OpenLayerAction::Roll
6240 : } else {
6241 9605860 : OpenLayerAction::None
6242 : }
6243 9608464 : }
6244 :
6245 : /// Put a batch of keys at the specified Lsns.
6246 9608460 : pub(crate) async fn put_batch(
6247 9608460 : &mut self,
6248 9608460 : batch: SerializedValueBatch,
6249 9608460 : ctx: &RequestContext,
6250 9608460 : ) -> anyhow::Result<()> {
6251 9608460 : if !batch.has_data() {
6252 0 : return Ok(());
6253 9608460 : }
6254 9608460 :
6255 9608460 : // In debug builds, assert that we don't write any keys that don't belong to this shard.
6256 9608460 : // We don't assert this in release builds, since key ownership policies may change over
6257 9608460 : // time. Stray keys will be removed during compaction.
6258 9608460 : if cfg!(debug_assertions) {
6259 19789792 : for metadata in &batch.metadata {
6260 10181332 : if let ValueMeta::Serialized(metadata) = metadata {
6261 10181332 : let key = Key::from_compact(metadata.key);
6262 10181332 : assert!(
6263 10181332 : self.shard_identity.is_key_local(&key)
6264 0 : || self.shard_identity.is_key_global(&key),
6265 0 : "key {key} does not belong on shard {}",
6266 0 : self.shard_identity.shard_index()
6267 : );
6268 0 : }
6269 : }
6270 0 : }
6271 :
6272 9608460 : let batch_max_lsn = batch.max_lsn;
6273 9608460 : let buf_size: u64 = batch.buffer_size() as u64;
6274 9608460 :
6275 9608460 : let action = self.get_open_layer_action(batch_max_lsn, buf_size);
6276 9608460 : let layer = self
6277 9608460 : .handle_open_layer_action(batch_max_lsn, action, ctx)
6278 9608460 : .await?;
6279 :
6280 9608460 : let res = layer.put_batch(batch, ctx).await;
6281 :
6282 9608460 : if res.is_ok() {
6283 9608460 : // Update the current size only when the entire write was ok.
6284 9608460 : // In case of failures, we may have had partial writes which
6285 9608460 : // render the size tracking out of sync. That's ok because
6286 9608460 : // the checkpoint distance should be significantly smaller
6287 9608460 : // than the S3 single shot upload limit of 5GiB.
6288 9608460 : let state = self.write_guard.as_mut().unwrap();
6289 9608460 :
6290 9608460 : state.current_size += buf_size;
6291 9608460 : state.prev_lsn = Some(batch_max_lsn);
6292 9608460 : state.max_lsn = std::cmp::max(state.max_lsn, Some(batch_max_lsn));
6293 9608460 : }
6294 :
6295 9608460 : res
6296 9608460 : }
6297 :
6298 : #[cfg(test)]
6299 : /// Test helper, for tests that would like to poke individual values without composing a batch
6300 8780308 : pub(crate) async fn put(
6301 8780308 : &mut self,
6302 8780308 : key: Key,
6303 8780308 : lsn: Lsn,
6304 8780308 : value: &Value,
6305 8780308 : ctx: &RequestContext,
6306 8780308 : ) -> anyhow::Result<()> {
6307 : use utils::bin_ser::BeSer;
6308 8780308 : if !key.is_valid_key_on_write_path() {
6309 0 : bail!(
6310 0 : "the request contains data not supported by pageserver at TimelineWriter::put: {}",
6311 0 : key
6312 0 : );
6313 8780308 : }
6314 8780308 : let val_ser_size = value.serialized_size().unwrap() as usize;
6315 8780308 : let batch = SerializedValueBatch::from_values(vec![(
6316 8780308 : key.to_compact(),
6317 8780308 : lsn,
6318 8780308 : val_ser_size,
6319 8780308 : value.clone(),
6320 8780308 : )]);
6321 8780308 :
6322 8780308 : self.put_batch(batch, ctx).await
6323 8780308 : }
6324 :
6325 4 : pub(crate) async fn delete_batch(
6326 4 : &mut self,
6327 4 : batch: &[(Range<Key>, Lsn)],
6328 4 : ctx: &RequestContext,
6329 4 : ) -> anyhow::Result<()> {
6330 4 : if let Some((_, lsn)) = batch.first() {
6331 4 : let action = self.get_open_layer_action(*lsn, 0);
6332 4 : let layer = self.handle_open_layer_action(*lsn, action, ctx).await?;
6333 4 : layer.put_tombstones(batch).await?;
6334 0 : }
6335 :
6336 4 : Ok(())
6337 4 : }
6338 :
6339 : /// Track the end of the latest digested WAL record.
6340 : /// Remember the (end of) last valid WAL record remembered in the timeline.
6341 : ///
6342 : /// Call this after you have finished writing all the WAL up to 'lsn'.
6343 : ///
6344 : /// 'lsn' must be aligned. This wakes up any wait_lsn() callers waiting for
6345 : /// the 'lsn' or anything older. The previous last record LSN is stored alongside
6346 : /// the latest and can be read.
6347 10558184 : pub(crate) fn finish_write(&self, new_lsn: Lsn) {
6348 10558184 : self.tl.finish_write(new_lsn);
6349 10558184 : }
6350 :
6351 541140 : pub(crate) fn update_current_logical_size(&self, delta: i64) {
6352 541140 : self.tl.update_current_logical_size(delta)
6353 541140 : }
6354 : }
6355 :
6356 : // We need TimelineWriter to be send in upcoming conversion of
6357 : // Timeline::layers to tokio::sync::RwLock.
6358 : #[test]
6359 4 : fn is_send() {
6360 4 : fn _assert_send<T: Send>() {}
6361 4 : _assert_send::<TimelineWriter<'_>>();
6362 4 : }
6363 :
6364 : #[cfg(test)]
6365 : mod tests {
6366 : use pageserver_api::key::Key;
6367 : use pageserver_api::value::Value;
6368 : use utils::{id::TimelineId, lsn::Lsn};
6369 :
6370 : use crate::tenant::{
6371 : harness::{test_img, TenantHarness},
6372 : layer_map::LayerMap,
6373 : storage_layer::{Layer, LayerName},
6374 : timeline::{DeltaLayerTestDesc, EvictionError},
6375 : Timeline,
6376 : };
6377 :
6378 : #[tokio::test]
6379 4 : async fn test_heatmap_generation() {
6380 4 : let harness = TenantHarness::create("heatmap_generation").await.unwrap();
6381 4 :
6382 4 : let covered_delta = DeltaLayerTestDesc::new_with_inferred_key_range(
6383 4 : Lsn(0x10)..Lsn(0x20),
6384 4 : vec![(
6385 4 : Key::from_hex("620000000033333333444444445500000000").unwrap(),
6386 4 : Lsn(0x11),
6387 4 : Value::Image(test_img("foo")),
6388 4 : )],
6389 4 : );
6390 4 : let visible_delta = DeltaLayerTestDesc::new_with_inferred_key_range(
6391 4 : Lsn(0x10)..Lsn(0x20),
6392 4 : vec![(
6393 4 : Key::from_hex("720000000033333333444444445500000000").unwrap(),
6394 4 : Lsn(0x11),
6395 4 : Value::Image(test_img("foo")),
6396 4 : )],
6397 4 : );
6398 4 : let l0_delta = DeltaLayerTestDesc::new(
6399 4 : Lsn(0x20)..Lsn(0x30),
6400 4 : Key::from_hex("000000000000000000000000000000000000").unwrap()
6401 4 : ..Key::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(),
6402 4 : vec![(
6403 4 : Key::from_hex("720000000033333333444444445500000000").unwrap(),
6404 4 : Lsn(0x25),
6405 4 : Value::Image(test_img("foo")),
6406 4 : )],
6407 4 : );
6408 4 : let delta_layers = vec![
6409 4 : covered_delta.clone(),
6410 4 : visible_delta.clone(),
6411 4 : l0_delta.clone(),
6412 4 : ];
6413 4 :
6414 4 : let image_layer = (
6415 4 : Lsn(0x40),
6416 4 : vec![(
6417 4 : Key::from_hex("620000000033333333444444445500000000").unwrap(),
6418 4 : test_img("bar"),
6419 4 : )],
6420 4 : );
6421 4 : let image_layers = vec![image_layer];
6422 4 :
6423 4 : let (tenant, ctx) = harness.load().await;
6424 4 : let timeline = tenant
6425 4 : .create_test_timeline_with_layers(
6426 4 : TimelineId::generate(),
6427 4 : Lsn(0x10),
6428 4 : 14,
6429 4 : &ctx,
6430 4 : delta_layers,
6431 4 : image_layers,
6432 4 : Lsn(0x100),
6433 4 : )
6434 4 : .await
6435 4 : .unwrap();
6436 4 :
6437 4 : // Layer visibility is an input to heatmap generation, so refresh it first
6438 4 : timeline.update_layer_visibility().await.unwrap();
6439 4 :
6440 4 : let heatmap = timeline
6441 4 : .generate_heatmap()
6442 4 : .await
6443 4 : .expect("Infallible while timeline is not shut down");
6444 4 :
6445 4 : assert_eq!(heatmap.timeline_id, timeline.timeline_id);
6446 4 :
6447 4 : // L0 should come last
6448 4 : assert_eq!(heatmap.layers.last().unwrap().name, l0_delta.layer_name());
6449 4 :
6450 4 : let mut last_lsn = Lsn::MAX;
6451 20 : for layer in heatmap.layers {
6452 4 : // Covered layer should be omitted
6453 16 : assert!(layer.name != covered_delta.layer_name());
6454 4 :
6455 16 : let layer_lsn = match &layer.name {
6456 8 : LayerName::Delta(d) => d.lsn_range.end,
6457 8 : LayerName::Image(i) => i.lsn,
6458 4 : };
6459 4 :
6460 4 : // Apart from L0s, newest Layers should come first
6461 16 : if !LayerMap::is_l0(layer.name.key_range(), layer.name.is_delta()) {
6462 12 : assert!(layer_lsn <= last_lsn);
6463 12 : last_lsn = layer_lsn;
6464 4 : }
6465 4 : }
6466 4 : }
6467 :
6468 : #[tokio::test]
6469 4 : async fn two_layer_eviction_attempts_at_the_same_time() {
6470 4 : let harness = TenantHarness::create("two_layer_eviction_attempts_at_the_same_time")
6471 4 : .await
6472 4 : .unwrap();
6473 4 :
6474 4 : let (tenant, ctx) = harness.load().await;
6475 4 : let timeline = tenant
6476 4 : .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx)
6477 4 : .await
6478 4 : .unwrap();
6479 4 :
6480 4 : let layer = find_some_layer(&timeline).await;
6481 4 : let layer = layer
6482 4 : .keep_resident()
6483 4 : .await
6484 4 : .expect("no download => no downloading errors")
6485 4 : .drop_eviction_guard();
6486 4 :
6487 4 : let forever = std::time::Duration::from_secs(120);
6488 4 :
6489 4 : let first = layer.evict_and_wait(forever);
6490 4 : let second = layer.evict_and_wait(forever);
6491 4 :
6492 4 : let (first, second) = tokio::join!(first, second);
6493 4 :
6494 4 : let res = layer.keep_resident().await;
6495 4 : assert!(res.is_none(), "{res:?}");
6496 4 :
6497 4 : match (first, second) {
6498 4 : (Ok(()), Ok(())) => {
6499 4 : // because there are no more timeline locks being taken on eviction path, we can
6500 4 : // witness all three outcomes here.
6501 4 : }
6502 4 : (Ok(()), Err(EvictionError::NotFound)) | (Err(EvictionError::NotFound), Ok(())) => {
6503 0 : // if one completes before the other, this is fine just as well.
6504 0 : }
6505 4 : other => unreachable!("unexpected {:?}", other),
6506 4 : }
6507 4 : }
6508 :
6509 4 : async fn find_some_layer(timeline: &Timeline) -> Layer {
6510 4 : let layers = timeline.layers.read().await;
6511 4 : let desc = layers
6512 4 : .layer_map()
6513 4 : .unwrap()
6514 4 : .iter_historic_layers()
6515 4 : .next()
6516 4 : .expect("must find one layer to evict");
6517 4 :
6518 4 : layers.get_from_desc(&desc)
6519 4 : }
6520 : }
|