Line data Source code
1 : pub(crate) mod analysis;
2 : pub(crate) mod compaction;
3 : pub mod delete;
4 : pub(crate) mod detach_ancestor;
5 : mod eviction_task;
6 : pub(crate) mod handle;
7 : mod init;
8 : pub mod layer_manager;
9 : pub(crate) mod logical_size;
10 : pub mod offload;
11 : pub mod span;
12 : pub mod uninit;
13 : mod walreceiver;
14 :
15 : use anyhow::{anyhow, bail, ensure, Context, Result};
16 : use arc_swap::ArcSwap;
17 : use bytes::Bytes;
18 : use camino::Utf8Path;
19 : use chrono::{DateTime, Utc};
20 : use enumset::EnumSet;
21 : use fail::fail_point;
22 : use handle::ShardTimelineId;
23 : use offload::OffloadError;
24 : use once_cell::sync::Lazy;
25 : use pageserver_api::{
26 : config::tenant_conf_defaults::DEFAULT_COMPACTION_THRESHOLD,
27 : key::{
28 : KEY_SIZE, METADATA_KEY_BEGIN_PREFIX, METADATA_KEY_END_PREFIX, NON_INHERITED_RANGE,
29 : NON_INHERITED_SPARSE_RANGE,
30 : },
31 : keyspace::{KeySpaceAccum, KeySpaceRandomAccum, SparseKeyPartitioning},
32 : models::{
33 : CompactionAlgorithm, CompactionAlgorithmSettings, DownloadRemoteLayersTaskInfo,
34 : DownloadRemoteLayersTaskSpawnRequest, EvictionPolicy, InMemoryLayerInfo, LayerMapInfo,
35 : LsnLease, TimelineState,
36 : },
37 : reltag::BlockNumber,
38 : shard::{ShardIdentity, ShardNumber, TenantShardId},
39 : };
40 : use rand::Rng;
41 : use serde_with::serde_as;
42 : use storage_broker::BrokerClientChannel;
43 : use tokio::{
44 : runtime::Handle,
45 : sync::{oneshot, watch},
46 : };
47 : use tokio_util::sync::CancellationToken;
48 : use tracing::*;
49 : use utils::{
50 : fs_ext, pausable_failpoint,
51 : sync::gate::{Gate, GateGuard},
52 : };
53 : use wal_decoder::serialized_batch::SerializedValueBatch;
54 :
55 : use std::sync::atomic::Ordering as AtomicOrdering;
56 : use std::sync::{Arc, Mutex, RwLock, Weak};
57 : use std::time::{Duration, Instant, SystemTime};
58 : use std::{
59 : array,
60 : collections::{BTreeMap, HashMap, HashSet},
61 : sync::atomic::AtomicU64,
62 : };
63 : use std::{cmp::min, ops::ControlFlow};
64 : use std::{
65 : collections::btree_map::Entry,
66 : ops::{Deref, Range},
67 : };
68 : use std::{pin::pin, sync::OnceLock};
69 :
70 : use crate::{
71 : aux_file::AuxFileSizeEstimator,
72 : tenant::{
73 : config::AttachmentMode,
74 : layer_map::{LayerMap, SearchResult},
75 : metadata::TimelineMetadata,
76 : storage_layer::{inmemory_layer::IndexEntry, PersistentLayerDesc},
77 : },
78 : walingest::WalLagCooldown,
79 : walredo,
80 : };
81 : use crate::{
82 : context::{DownloadBehavior, RequestContext},
83 : disk_usage_eviction_task::DiskUsageEvictionInfo,
84 : pgdatadir_mapping::CollectKeySpaceError,
85 : };
86 : use crate::{
87 : disk_usage_eviction_task::finite_f32,
88 : tenant::storage_layer::{
89 : AsLayerDesc, DeltaLayerWriter, EvictionError, ImageLayerWriter, InMemoryLayer, Layer,
90 : LayerAccessStatsReset, LayerName, ResidentLayer, ValueReconstructState,
91 : ValuesReconstructState,
92 : },
93 : };
94 : use crate::{
95 : disk_usage_eviction_task::EvictionCandidate, tenant::storage_layer::delta_layer::DeltaEntry,
96 : };
97 : use crate::{
98 : l0_flush::{self, L0FlushGlobalState},
99 : metrics::GetKind,
100 : };
101 : use crate::{
102 : metrics::ScanLatencyOngoingRecording, tenant::timeline::logical_size::CurrentLogicalSize,
103 : };
104 : use crate::{
105 : pgdatadir_mapping::DirectoryKind,
106 : virtual_file::{MaybeFatalIo, VirtualFile},
107 : };
108 : use crate::{pgdatadir_mapping::LsnForTimestamp, tenant::tasks::BackgroundLoopKind};
109 : use crate::{pgdatadir_mapping::MAX_AUX_FILE_V2_DELTAS, tenant::storage_layer::PersistentLayerKey};
110 : use pageserver_api::config::tenant_conf_defaults::DEFAULT_PITR_INTERVAL;
111 :
112 : use crate::config::PageServerConf;
113 : use crate::keyspace::{KeyPartitioning, KeySpace};
114 : use crate::metrics::TimelineMetrics;
115 : use crate::pgdatadir_mapping::CalculateLogicalSizeError;
116 : use crate::tenant::config::TenantConfOpt;
117 : use pageserver_api::reltag::RelTag;
118 : use pageserver_api::shard::ShardIndex;
119 :
120 : use postgres_connection::PgConnectionConfig;
121 : use postgres_ffi::{to_pg_timestamp, v14::xlog_utils, WAL_SEGMENT_SIZE};
122 : use utils::{
123 : completion,
124 : generation::Generation,
125 : id::TimelineId,
126 : lsn::{AtomicLsn, Lsn, RecordLsn},
127 : seqwait::SeqWait,
128 : simple_rcu::{Rcu, RcuReadGuard},
129 : };
130 :
131 : use crate::task_mgr;
132 : use crate::task_mgr::TaskKind;
133 : use crate::tenant::gc_result::GcResult;
134 : use crate::ZERO_PAGE;
135 : use pageserver_api::key::Key;
136 :
137 : use self::delete::DeleteTimelineFlow;
138 : pub(super) use self::eviction_task::EvictionTaskTenantState;
139 : use self::eviction_task::EvictionTaskTimelineState;
140 : use self::layer_manager::LayerManager;
141 : use self::logical_size::LogicalSize;
142 : use self::walreceiver::{WalReceiver, WalReceiverConf};
143 :
144 : use super::{
145 : config::TenantConf, storage_layer::LayerVisibilityHint, upload_queue::NotInitialized,
146 : MaybeOffloaded,
147 : };
148 : use super::{debug_assert_current_span_has_tenant_and_timeline_id, AttachedTenantConf};
149 : use super::{remote_timeline_client::index::IndexPart, storage_layer::LayerFringe};
150 : use super::{
151 : remote_timeline_client::RemoteTimelineClient, remote_timeline_client::WaitCompletionError,
152 : storage_layer::ReadableLayer,
153 : };
154 : use super::{
155 : secondary::heatmap::{HeatMapLayer, HeatMapTimeline},
156 : GcError,
157 : };
158 :
159 : #[cfg(test)]
160 : use pageserver_api::value::Value;
161 :
162 : #[derive(Debug, PartialEq, Eq, Clone, Copy)]
163 : pub(crate) enum FlushLoopState {
164 : NotStarted,
165 : Running {
166 : #[cfg(test)]
167 : expect_initdb_optimization: bool,
168 : #[cfg(test)]
169 : initdb_optimization_count: usize,
170 : },
171 : Exited,
172 : }
173 :
174 : #[derive(Debug, Copy, Clone, PartialEq, Eq)]
175 : pub enum ImageLayerCreationMode {
176 : /// Try to create image layers based on `time_for_new_image_layer`. Used in compaction code path.
177 : Try,
178 : /// Force creating the image layers if possible. For now, no image layers will be created
179 : /// for metadata keys. Used in compaction code path with force flag enabled.
180 : Force,
181 : /// Initial ingestion of the data, and no data should be dropped in this function. This
182 : /// means that no metadata keys should be included in the partitions. Used in flush frozen layer
183 : /// code path.
184 : Initial,
185 : }
186 :
187 : impl std::fmt::Display for ImageLayerCreationMode {
188 712 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
189 712 : write!(f, "{:?}", self)
190 712 : }
191 : }
192 :
193 : /// Temporary function for immutable storage state refactor, ensures we are dropping mutex guard instead of other things.
194 : /// Can be removed after all refactors are done.
195 28 : fn drop_rlock<T>(rlock: tokio::sync::RwLockReadGuard<T>) {
196 28 : drop(rlock)
197 28 : }
198 :
199 : /// Temporary function for immutable storage state refactor, ensures we are dropping mutex guard instead of other things.
200 : /// Can be removed after all refactors are done.
201 740 : fn drop_wlock<T>(rlock: tokio::sync::RwLockWriteGuard<'_, T>) {
202 740 : drop(rlock)
203 740 : }
204 :
205 : /// The outward-facing resources required to build a Timeline
206 : pub struct TimelineResources {
207 : pub remote_client: RemoteTimelineClient,
208 : pub timeline_get_throttle:
209 : Arc<crate::tenant::throttle::Throttle<crate::metrics::tenant_throttling::TimelineGet>>,
210 : pub l0_flush_global_state: l0_flush::L0FlushGlobalState,
211 : }
212 :
213 : /// The relation size cache caches relation sizes at the end of the timeline. It speeds up WAL
214 : /// ingestion considerably, because WAL ingestion needs to check on most records if the record
215 : /// implicitly extends the relation. At startup, `complete_as_of` is initialized to the current end
216 : /// of the timeline (disk_consistent_lsn). It's used on reads of relation sizes to check if the
217 : /// value can be used to also update the cache, see [`Timeline::update_cached_rel_size`].
218 : pub(crate) struct RelSizeCache {
219 : pub(crate) complete_as_of: Lsn,
220 : pub(crate) map: HashMap<RelTag, (Lsn, BlockNumber)>,
221 : }
222 :
223 : pub struct Timeline {
224 : pub(crate) conf: &'static PageServerConf,
225 : tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
226 :
227 : myself: Weak<Self>,
228 :
229 : pub(crate) tenant_shard_id: TenantShardId,
230 : pub timeline_id: TimelineId,
231 :
232 : /// The generation of the tenant that instantiated us: this is used for safety when writing remote objects.
233 : /// Never changes for the lifetime of this [`Timeline`] object.
234 : ///
235 : /// This duplicates the generation stored in LocationConf, but that structure is mutable:
236 : /// this copy enforces the invariant that generatio doesn't change during a Tenant's lifetime.
237 : pub(crate) generation: Generation,
238 :
239 : /// The detailed sharding information from our parent Tenant. This enables us to map keys
240 : /// to shards, and is constant through the lifetime of this Timeline.
241 : shard_identity: ShardIdentity,
242 :
243 : pub pg_version: u32,
244 :
245 : /// The tuple has two elements.
246 : /// 1. `LayerFileManager` keeps track of the various physical representations of the layer files (inmem, local, remote).
247 : /// 2. `LayerMap`, the acceleration data structure for `get_reconstruct_data`.
248 : ///
249 : /// `LayerMap` maps out the `(PAGE,LSN) / (KEY,LSN)` space, which is composed of `(KeyRange, LsnRange)` rectangles.
250 : /// We describe these rectangles through the `PersistentLayerDesc` struct.
251 : ///
252 : /// When we want to reconstruct a page, we first find the `PersistentLayerDesc`'s that we need for page reconstruction,
253 : /// using `LayerMap`. Then, we use `LayerFileManager` to get the `PersistentLayer`'s that correspond to the
254 : /// `PersistentLayerDesc`'s.
255 : ///
256 : /// Hence, it's important to keep things coherent. The `LayerFileManager` must always have an entry for all
257 : /// `PersistentLayerDesc`'s in the `LayerMap`. If it doesn't, `LayerFileManager::get_from_desc` will panic at
258 : /// runtime, e.g., during page reconstruction.
259 : ///
260 : /// In the future, we'll be able to split up the tuple of LayerMap and `LayerFileManager`,
261 : /// so that e.g. on-demand-download/eviction, and layer spreading, can operate just on `LayerFileManager`.
262 : pub(crate) layers: tokio::sync::RwLock<LayerManager>,
263 :
264 : last_freeze_at: AtomicLsn,
265 : // Atomic would be more appropriate here.
266 : last_freeze_ts: RwLock<Instant>,
267 :
268 : pub(crate) standby_horizon: AtomicLsn,
269 :
270 : // WAL redo manager. `None` only for broken tenants.
271 : walredo_mgr: Option<Arc<super::WalRedoManager>>,
272 :
273 : /// Remote storage client.
274 : /// See [`remote_timeline_client`](super::remote_timeline_client) module comment for details.
275 : pub remote_client: Arc<RemoteTimelineClient>,
276 :
277 : // What page versions do we hold in the repository? If we get a
278 : // request > last_record_lsn, we need to wait until we receive all
279 : // the WAL up to the request. The SeqWait provides functions for
280 : // that. TODO: If we get a request for an old LSN, such that the
281 : // versions have already been garbage collected away, we should
282 : // throw an error, but we don't track that currently.
283 : //
284 : // last_record_lsn.load().last points to the end of last processed WAL record.
285 : //
286 : // We also remember the starting point of the previous record in
287 : // 'last_record_lsn.load().prev'. It's used to set the xl_prev pointer of the
288 : // first WAL record when the node is started up. But here, we just
289 : // keep track of it.
290 : last_record_lsn: SeqWait<RecordLsn, Lsn>,
291 :
292 : // All WAL records have been processed and stored durably on files on
293 : // local disk, up to this LSN. On crash and restart, we need to re-process
294 : // the WAL starting from this point.
295 : //
296 : // Some later WAL records might have been processed and also flushed to disk
297 : // already, so don't be surprised to see some, but there's no guarantee on
298 : // them yet.
299 : disk_consistent_lsn: AtomicLsn,
300 :
301 : // Parent timeline that this timeline was branched from, and the LSN
302 : // of the branch point.
303 : ancestor_timeline: Option<Arc<Timeline>>,
304 : ancestor_lsn: Lsn,
305 :
306 : pub(super) metrics: TimelineMetrics,
307 :
308 : // `Timeline` doesn't write these metrics itself, but it manages the lifetime. Code
309 : // in `crate::page_service` writes these metrics.
310 : pub(crate) query_metrics: crate::metrics::SmgrQueryTimePerTimeline,
311 :
312 : directory_metrics: [AtomicU64; DirectoryKind::KINDS_NUM],
313 :
314 : /// Ensures layers aren't frozen by checkpointer between
315 : /// [`Timeline::get_layer_for_write`] and layer reads.
316 : /// Locked automatically by [`TimelineWriter`] and checkpointer.
317 : /// Must always be acquired before the layer map/individual layer lock
318 : /// to avoid deadlock.
319 : ///
320 : /// The state is cleared upon freezing.
321 : write_lock: tokio::sync::Mutex<Option<TimelineWriterState>>,
322 :
323 : /// Used to avoid multiple `flush_loop` tasks running
324 : pub(super) flush_loop_state: Mutex<FlushLoopState>,
325 :
326 : /// layer_flush_start_tx can be used to wake up the layer-flushing task.
327 : /// - The u64 value is a counter, incremented every time a new flush cycle is requested.
328 : /// The flush cycle counter is sent back on the layer_flush_done channel when
329 : /// the flush finishes. You can use that to wait for the flush to finish.
330 : /// - The LSN is updated to max() of its current value and the latest disk_consistent_lsn
331 : /// read by whoever sends an update
332 : layer_flush_start_tx: tokio::sync::watch::Sender<(u64, Lsn)>,
333 : /// to be notified when layer flushing has finished, subscribe to the layer_flush_done channel
334 : layer_flush_done_tx: tokio::sync::watch::Sender<(u64, Result<(), FlushLayerError>)>,
335 :
336 : // Needed to ensure that we can't create a branch at a point that was already garbage collected
337 : pub latest_gc_cutoff_lsn: Rcu<Lsn>,
338 :
339 : // List of child timelines and their branch points. This is needed to avoid
340 : // garbage collecting data that is still needed by the child timelines.
341 : pub(crate) gc_info: std::sync::RwLock<GcInfo>,
342 :
343 : // It may change across major versions so for simplicity
344 : // keep it after running initdb for a timeline.
345 : // It is needed in checks when we want to error on some operations
346 : // when they are requested for pre-initdb lsn.
347 : // It can be unified with latest_gc_cutoff_lsn under some "first_valid_lsn",
348 : // though let's keep them both for better error visibility.
349 : pub initdb_lsn: Lsn,
350 :
351 : /// When did we last calculate the partitioning? Make it pub to test cases.
352 : pub(super) partitioning: tokio::sync::Mutex<((KeyPartitioning, SparseKeyPartitioning), Lsn)>,
353 :
354 : /// Configuration: how often should the partitioning be recalculated.
355 : repartition_threshold: u64,
356 :
357 : last_image_layer_creation_check_at: AtomicLsn,
358 : last_image_layer_creation_check_instant: std::sync::Mutex<Option<Instant>>,
359 :
360 : /// Current logical size of the "datadir", at the last LSN.
361 : current_logical_size: LogicalSize,
362 :
363 : /// Information about the last processed message by the WAL receiver,
364 : /// or None if WAL receiver has not received anything for this timeline
365 : /// yet.
366 : pub last_received_wal: Mutex<Option<WalReceiverInfo>>,
367 : pub walreceiver: Mutex<Option<WalReceiver>>,
368 :
369 : /// Relation size cache
370 : pub(crate) rel_size_cache: RwLock<RelSizeCache>,
371 :
372 : download_all_remote_layers_task_info: RwLock<Option<DownloadRemoteLayersTaskInfo>>,
373 :
374 : state: watch::Sender<TimelineState>,
375 :
376 : /// Prevent two tasks from deleting the timeline at the same time. If held, the
377 : /// timeline is being deleted. If 'true', the timeline has already been deleted.
378 : pub delete_progress: TimelineDeleteProgress,
379 :
380 : eviction_task_timeline_state: tokio::sync::Mutex<EvictionTaskTimelineState>,
381 :
382 : /// Load or creation time information about the disk_consistent_lsn and when the loading
383 : /// happened. Used for consumption metrics.
384 : pub(crate) loaded_at: (Lsn, SystemTime),
385 :
386 : /// Gate to prevent shutdown completing while I/O is still happening to this timeline's data
387 : pub(crate) gate: Gate,
388 :
389 : /// Cancellation token scoped to this timeline: anything doing long-running work relating
390 : /// to the timeline should drop out when this token fires.
391 : pub(crate) cancel: CancellationToken,
392 :
393 : /// Make sure we only have one running compaction at a time in tests.
394 : ///
395 : /// Must only be taken in two places:
396 : /// - [`Timeline::compact`] (this file)
397 : /// - [`delete::delete_local_timeline_directory`]
398 : ///
399 : /// Timeline deletion will acquire both compaction and gc locks in whatever order.
400 : compaction_lock: tokio::sync::Mutex<()>,
401 :
402 : /// Make sure we only have one running gc at a time.
403 : ///
404 : /// Must only be taken in two places:
405 : /// - [`Timeline::gc`] (this file)
406 : /// - [`delete::delete_local_timeline_directory`]
407 : ///
408 : /// Timeline deletion will acquire both compaction and gc locks in whatever order.
409 : gc_lock: tokio::sync::Mutex<()>,
410 :
411 : /// Cloned from [`super::Tenant::timeline_get_throttle`] on construction.
412 : timeline_get_throttle:
413 : Arc<crate::tenant::throttle::Throttle<crate::metrics::tenant_throttling::TimelineGet>>,
414 :
415 : /// Size estimator for aux file v2
416 : pub(crate) aux_file_size_estimator: AuxFileSizeEstimator,
417 :
418 : /// Some test cases directly place keys into the timeline without actually modifying the directory
419 : /// keys (i.e., DB_DIR). The test cases creating such keys will put the keyspaces here, so that
420 : /// these keys won't get garbage-collected during compaction/GC. This field only modifies the dense
421 : /// keyspace return value of `collect_keyspace`. For sparse keyspaces, use AUX keys for testing, and
422 : /// in the future, add `extra_test_sparse_keyspace` if necessary.
423 : #[cfg(test)]
424 : pub(crate) extra_test_dense_keyspace: ArcSwap<KeySpace>,
425 :
426 : pub(crate) l0_flush_global_state: L0FlushGlobalState,
427 :
428 : pub(crate) handles: handle::PerTimelineState<crate::page_service::TenantManagerTypes>,
429 :
430 : pub(crate) attach_wal_lag_cooldown: Arc<OnceLock<WalLagCooldown>>,
431 :
432 : /// Cf. [`crate::tenant::CreateTimelineIdempotency`].
433 : pub(crate) create_idempotency: crate::tenant::CreateTimelineIdempotency,
434 : }
435 :
436 : pub type TimelineDeleteProgress = Arc<tokio::sync::Mutex<DeleteTimelineFlow>>;
437 :
438 : pub struct WalReceiverInfo {
439 : pub wal_source_connconf: PgConnectionConfig,
440 : pub last_received_msg_lsn: Lsn,
441 : pub last_received_msg_ts: u128,
442 : }
443 :
444 : /// Information about how much history needs to be retained, needed by
445 : /// Garbage Collection.
446 : #[derive(Default)]
447 : pub(crate) struct GcInfo {
448 : /// Specific LSNs that are needed.
449 : ///
450 : /// Currently, this includes all points where child branches have
451 : /// been forked off from. In the future, could also include
452 : /// explicit user-defined snapshot points.
453 : pub(crate) retain_lsns: Vec<(Lsn, TimelineId, MaybeOffloaded)>,
454 :
455 : /// The cutoff coordinates, which are combined by selecting the minimum.
456 : pub(crate) cutoffs: GcCutoffs,
457 :
458 : /// Leases granted to particular LSNs.
459 : pub(crate) leases: BTreeMap<Lsn, LsnLease>,
460 :
461 : /// Whether our branch point is within our ancestor's PITR interval (for cost estimation)
462 : pub(crate) within_ancestor_pitr: bool,
463 : }
464 :
465 : impl GcInfo {
466 226 : pub(crate) fn min_cutoff(&self) -> Lsn {
467 226 : self.cutoffs.select_min()
468 226 : }
469 :
470 228 : pub(super) fn insert_child(
471 228 : &mut self,
472 228 : child_id: TimelineId,
473 228 : child_lsn: Lsn,
474 228 : is_offloaded: MaybeOffloaded,
475 228 : ) {
476 228 : self.retain_lsns.push((child_lsn, child_id, is_offloaded));
477 228 : self.retain_lsns.sort_by_key(|i| i.0);
478 228 : }
479 :
480 2 : pub(super) fn remove_child(&mut self, child_id: TimelineId) {
481 2 : self.retain_lsns.retain(|i| i.1 != child_id);
482 2 : }
483 : }
484 :
485 : /// The `GcInfo` component describing which Lsns need to be retained. Functionally, this
486 : /// is a single number (the oldest LSN which we must retain), but it internally distinguishes
487 : /// between time-based and space-based retention for observability and consumption metrics purposes.
488 : #[derive(Debug, Clone)]
489 : pub(crate) struct GcCutoffs {
490 : /// Calculated from the [`TenantConf::gc_horizon`], this LSN indicates how much
491 : /// history we must keep to retain a specified number of bytes of WAL.
492 : pub(crate) space: Lsn,
493 :
494 : /// Calculated from [`TenantConf::pitr_interval`], this LSN indicates how much
495 : /// history we must keep to enable reading back at least the PITR interval duration.
496 : pub(crate) time: Lsn,
497 : }
498 :
499 : impl Default for GcCutoffs {
500 414 : fn default() -> Self {
501 414 : Self {
502 414 : space: Lsn::INVALID,
503 414 : time: Lsn::INVALID,
504 414 : }
505 414 : }
506 : }
507 :
508 : impl GcCutoffs {
509 266 : fn select_min(&self) -> Lsn {
510 266 : std::cmp::min(self.space, self.time)
511 266 : }
512 : }
513 :
514 : pub(crate) struct TimelineVisitOutcome {
515 : completed_keyspace: KeySpace,
516 : image_covered_keyspace: KeySpace,
517 : }
518 :
519 : /// An error happened in a get() operation.
520 2 : #[derive(thiserror::Error, Debug)]
521 : pub(crate) enum PageReconstructError {
522 : #[error(transparent)]
523 : Other(anyhow::Error),
524 :
525 : #[error("Ancestor LSN wait error: {0}")]
526 : AncestorLsnTimeout(WaitLsnError),
527 :
528 : #[error("timeline shutting down")]
529 : Cancelled,
530 :
531 : /// An error happened replaying WAL records
532 : #[error(transparent)]
533 : WalRedo(anyhow::Error),
534 :
535 : #[error("{0}")]
536 : MissingKey(MissingKeyError),
537 : }
538 :
539 : impl From<anyhow::Error> for PageReconstructError {
540 0 : fn from(value: anyhow::Error) -> Self {
541 0 : // with walingest.rs many PageReconstructError are wrapped in as anyhow::Error
542 0 : match value.downcast::<PageReconstructError>() {
543 0 : Ok(pre) => pre,
544 0 : Err(other) => PageReconstructError::Other(other),
545 : }
546 0 : }
547 : }
548 :
549 : impl From<utils::bin_ser::DeserializeError> for PageReconstructError {
550 0 : fn from(value: utils::bin_ser::DeserializeError) -> Self {
551 0 : PageReconstructError::Other(anyhow::Error::new(value).context("deserialization failure"))
552 0 : }
553 : }
554 :
555 : impl From<layer_manager::Shutdown> for PageReconstructError {
556 0 : fn from(_: layer_manager::Shutdown) -> Self {
557 0 : PageReconstructError::Cancelled
558 0 : }
559 : }
560 :
561 : impl GetVectoredError {
562 : #[cfg(test)]
563 6 : pub(crate) fn is_missing_key_error(&self) -> bool {
564 6 : matches!(self, Self::MissingKey(_))
565 6 : }
566 : }
567 :
568 : impl From<layer_manager::Shutdown> for GetVectoredError {
569 0 : fn from(_: layer_manager::Shutdown) -> Self {
570 0 : GetVectoredError::Cancelled
571 0 : }
572 : }
573 :
574 : #[derive(thiserror::Error)]
575 : pub struct MissingKeyError {
576 : key: Key,
577 : shard: ShardNumber,
578 : cont_lsn: Lsn,
579 : request_lsn: Lsn,
580 : ancestor_lsn: Option<Lsn>,
581 : backtrace: Option<std::backtrace::Backtrace>,
582 : }
583 :
584 : impl std::fmt::Debug for MissingKeyError {
585 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
586 0 : write!(f, "{}", self)
587 0 : }
588 : }
589 :
590 : impl std::fmt::Display for MissingKeyError {
591 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
592 0 : write!(
593 0 : f,
594 0 : "could not find data for key {} (shard {:?}) at LSN {}, request LSN {}",
595 0 : self.key, self.shard, self.cont_lsn, self.request_lsn
596 0 : )?;
597 0 : if let Some(ref ancestor_lsn) = self.ancestor_lsn {
598 0 : write!(f, ", ancestor {}", ancestor_lsn)?;
599 0 : }
600 :
601 0 : if let Some(ref backtrace) = self.backtrace {
602 0 : write!(f, "\n{}", backtrace)?;
603 0 : }
604 :
605 0 : Ok(())
606 0 : }
607 : }
608 :
609 : impl PageReconstructError {
610 : /// Returns true if this error indicates a tenant/timeline shutdown alike situation
611 0 : pub(crate) fn is_stopping(&self) -> bool {
612 : use PageReconstructError::*;
613 0 : match self {
614 0 : Cancelled => true,
615 0 : Other(_) | AncestorLsnTimeout(_) | WalRedo(_) | MissingKey(_) => false,
616 : }
617 0 : }
618 : }
619 :
620 0 : #[derive(thiserror::Error, Debug)]
621 : pub(crate) enum CreateImageLayersError {
622 : #[error("timeline shutting down")]
623 : Cancelled,
624 :
625 : #[error("read failed")]
626 : GetVectoredError(#[source] GetVectoredError),
627 :
628 : #[error("reconstruction failed")]
629 : PageReconstructError(#[source] PageReconstructError),
630 :
631 : #[error(transparent)]
632 : Other(#[from] anyhow::Error),
633 : }
634 :
635 : impl From<layer_manager::Shutdown> for CreateImageLayersError {
636 0 : fn from(_: layer_manager::Shutdown) -> Self {
637 0 : CreateImageLayersError::Cancelled
638 0 : }
639 : }
640 :
641 0 : #[derive(thiserror::Error, Debug, Clone)]
642 : pub(crate) enum FlushLayerError {
643 : /// Timeline cancellation token was cancelled
644 : #[error("timeline shutting down")]
645 : Cancelled,
646 :
647 : /// We tried to flush a layer while the Timeline is in an unexpected state
648 : #[error("cannot flush frozen layers when flush_loop is not running, state is {0:?}")]
649 : NotRunning(FlushLoopState),
650 :
651 : // Arc<> the following non-clonable error types: we must be Clone-able because the flush error is propagated from the flush
652 : // loop via a watch channel, where we can only borrow it.
653 : #[error("create image layers (shared)")]
654 : CreateImageLayersError(Arc<CreateImageLayersError>),
655 :
656 : #[error("other (shared)")]
657 : Other(#[from] Arc<anyhow::Error>),
658 : }
659 :
660 : impl FlushLayerError {
661 : // When crossing from generic anyhow errors to this error type, we explicitly check
662 : // for timeline cancellation to avoid logging inoffensive shutdown errors as warn/err.
663 0 : fn from_anyhow(timeline: &Timeline, err: anyhow::Error) -> Self {
664 0 : let cancelled = timeline.cancel.is_cancelled()
665 : // The upload queue might have been shut down before the official cancellation of the timeline.
666 0 : || err
667 0 : .downcast_ref::<NotInitialized>()
668 0 : .map(NotInitialized::is_stopping)
669 0 : .unwrap_or_default();
670 0 : if cancelled {
671 0 : Self::Cancelled
672 : } else {
673 0 : Self::Other(Arc::new(err))
674 : }
675 0 : }
676 : }
677 :
678 : impl From<layer_manager::Shutdown> for FlushLayerError {
679 0 : fn from(_: layer_manager::Shutdown) -> Self {
680 0 : FlushLayerError::Cancelled
681 0 : }
682 : }
683 :
684 0 : #[derive(thiserror::Error, Debug)]
685 : pub(crate) enum GetVectoredError {
686 : #[error("timeline shutting down")]
687 : Cancelled,
688 :
689 : #[error("requested too many keys: {0} > {}", Timeline::MAX_GET_VECTORED_KEYS)]
690 : Oversized(u64),
691 :
692 : #[error("requested at invalid LSN: {0}")]
693 : InvalidLsn(Lsn),
694 :
695 : #[error("requested key not found: {0}")]
696 : MissingKey(MissingKeyError),
697 :
698 : #[error("ancestry walk")]
699 : GetReadyAncestorError(#[source] GetReadyAncestorError),
700 :
701 : #[error(transparent)]
702 : Other(#[from] anyhow::Error),
703 : }
704 :
705 : impl From<GetReadyAncestorError> for GetVectoredError {
706 2 : fn from(value: GetReadyAncestorError) -> Self {
707 : use GetReadyAncestorError::*;
708 2 : match value {
709 0 : Cancelled => GetVectoredError::Cancelled,
710 : AncestorLsnTimeout(_) | BadState { .. } => {
711 2 : GetVectoredError::GetReadyAncestorError(value)
712 : }
713 : }
714 2 : }
715 : }
716 :
717 2 : #[derive(thiserror::Error, Debug)]
718 : pub(crate) enum GetReadyAncestorError {
719 : #[error("ancestor LSN wait error")]
720 : AncestorLsnTimeout(#[from] WaitLsnError),
721 :
722 : #[error("bad state on timeline {timeline_id}: {state:?}")]
723 : BadState {
724 : timeline_id: TimelineId,
725 : state: TimelineState,
726 : },
727 :
728 : #[error("cancelled")]
729 : Cancelled,
730 : }
731 :
732 : #[derive(Clone, Copy)]
733 : pub enum LogicalSizeCalculationCause {
734 : Initial,
735 : ConsumptionMetricsSyntheticSize,
736 : EvictionTaskImitation,
737 : TenantSizeHandler,
738 : }
739 :
740 : pub enum GetLogicalSizePriority {
741 : User,
742 : Background,
743 : }
744 :
745 0 : #[derive(enumset::EnumSetType)]
746 : pub(crate) enum CompactFlags {
747 : ForceRepartition,
748 : ForceImageLayerCreation,
749 : ForceL0Compaction,
750 : EnhancedGcBottomMostCompaction,
751 : DryRun,
752 : }
753 :
754 : impl std::fmt::Debug for Timeline {
755 0 : fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
756 0 : write!(f, "Timeline<{}>", self.timeline_id)
757 0 : }
758 : }
759 :
760 0 : #[derive(thiserror::Error, Debug)]
761 : pub(crate) enum WaitLsnError {
762 : // Called on a timeline which is shutting down
763 : #[error("Shutdown")]
764 : Shutdown,
765 :
766 : // Called on an timeline not in active state or shutting down
767 : #[error("Bad timeline state: {0:?}")]
768 : BadState(TimelineState),
769 :
770 : // Timeout expired while waiting for LSN to catch up with goal.
771 : #[error("{0}")]
772 : Timeout(String),
773 : }
774 :
775 : // The impls below achieve cancellation mapping for errors.
776 : // Perhaps there's a way of achieving this with less cruft.
777 :
778 : impl From<CreateImageLayersError> for CompactionError {
779 0 : fn from(e: CreateImageLayersError) -> Self {
780 0 : match e {
781 0 : CreateImageLayersError::Cancelled => CompactionError::ShuttingDown,
782 0 : CreateImageLayersError::Other(e) => {
783 0 : CompactionError::Other(e.context("create image layers"))
784 : }
785 0 : _ => CompactionError::Other(e.into()),
786 : }
787 0 : }
788 : }
789 :
790 : impl From<CreateImageLayersError> for FlushLayerError {
791 0 : fn from(e: CreateImageLayersError) -> Self {
792 0 : match e {
793 0 : CreateImageLayersError::Cancelled => FlushLayerError::Cancelled,
794 0 : any => FlushLayerError::CreateImageLayersError(Arc::new(any)),
795 : }
796 0 : }
797 : }
798 :
799 : impl From<PageReconstructError> for CreateImageLayersError {
800 0 : fn from(e: PageReconstructError) -> Self {
801 0 : match e {
802 0 : PageReconstructError::Cancelled => CreateImageLayersError::Cancelled,
803 0 : _ => CreateImageLayersError::PageReconstructError(e),
804 : }
805 0 : }
806 : }
807 :
808 : impl From<GetVectoredError> for CreateImageLayersError {
809 0 : fn from(e: GetVectoredError) -> Self {
810 0 : match e {
811 0 : GetVectoredError::Cancelled => CreateImageLayersError::Cancelled,
812 0 : _ => CreateImageLayersError::GetVectoredError(e),
813 : }
814 0 : }
815 : }
816 :
817 : impl From<GetVectoredError> for PageReconstructError {
818 6 : fn from(e: GetVectoredError) -> Self {
819 6 : match e {
820 0 : GetVectoredError::Cancelled => PageReconstructError::Cancelled,
821 0 : GetVectoredError::InvalidLsn(_) => PageReconstructError::Other(anyhow!("Invalid LSN")),
822 0 : err @ GetVectoredError::Oversized(_) => PageReconstructError::Other(err.into()),
823 4 : GetVectoredError::MissingKey(err) => PageReconstructError::MissingKey(err),
824 2 : GetVectoredError::GetReadyAncestorError(err) => PageReconstructError::from(err),
825 0 : GetVectoredError::Other(err) => PageReconstructError::Other(err),
826 : }
827 6 : }
828 : }
829 :
830 : impl From<GetReadyAncestorError> for PageReconstructError {
831 2 : fn from(e: GetReadyAncestorError) -> Self {
832 : use GetReadyAncestorError::*;
833 2 : match e {
834 0 : AncestorLsnTimeout(wait_err) => PageReconstructError::AncestorLsnTimeout(wait_err),
835 2 : bad_state @ BadState { .. } => PageReconstructError::Other(anyhow::anyhow!(bad_state)),
836 0 : Cancelled => PageReconstructError::Cancelled,
837 : }
838 2 : }
839 : }
840 :
841 : pub(crate) enum WaitLsnWaiter<'a> {
842 : Timeline(&'a Timeline),
843 : Tenant,
844 : PageService,
845 : }
846 :
847 : /// Argument to [`Timeline::shutdown`].
848 : #[derive(Debug, Clone, Copy)]
849 : pub(crate) enum ShutdownMode {
850 : /// Graceful shutdown, may do a lot of I/O as we flush any open layers to disk and then
851 : /// also to remote storage. This method can easily take multiple seconds for a busy timeline.
852 : ///
853 : /// While we are flushing, we continue to accept read I/O for LSNs ingested before
854 : /// the call to [`Timeline::shutdown`].
855 : FreezeAndFlush,
856 : /// Only flush the layers to the remote storage without freezing any open layers. This is the
857 : /// mode used by ancestor detach and any other operations that reloads a tenant but not increasing
858 : /// the generation number.
859 : Flush,
860 : /// Shut down immediately, without waiting for any open layers to flush.
861 : Hard,
862 : }
863 :
864 : struct ImageLayerCreationOutcome {
865 : image: Option<ResidentLayer>,
866 : next_start_key: Key,
867 : }
868 :
869 : /// Public interface functions
870 : impl Timeline {
871 : /// Get the LSN where this branch was created
872 2 : pub(crate) fn get_ancestor_lsn(&self) -> Lsn {
873 2 : self.ancestor_lsn
874 2 : }
875 :
876 : /// Get the ancestor's timeline id
877 4 : pub(crate) fn get_ancestor_timeline_id(&self) -> Option<TimelineId> {
878 4 : self.ancestor_timeline
879 4 : .as_ref()
880 4 : .map(|ancestor| ancestor.timeline_id)
881 4 : }
882 :
883 : /// Get the ancestor timeline
884 0 : pub(crate) fn ancestor_timeline(&self) -> Option<&Arc<Timeline>> {
885 0 : self.ancestor_timeline.as_ref()
886 0 : }
887 :
888 : /// Get the bytes written since the PITR cutoff on this branch, and
889 : /// whether this branch's ancestor_lsn is within its parent's PITR.
890 0 : pub(crate) fn get_pitr_history_stats(&self) -> (u64, bool) {
891 0 : let gc_info = self.gc_info.read().unwrap();
892 0 : let history = self
893 0 : .get_last_record_lsn()
894 0 : .checked_sub(gc_info.cutoffs.time)
895 0 : .unwrap_or(Lsn(0))
896 0 : .0;
897 0 : (history, gc_info.within_ancestor_pitr)
898 0 : }
899 :
900 : /// Lock and get timeline's GC cutoff
901 246 : pub(crate) fn get_latest_gc_cutoff_lsn(&self) -> RcuReadGuard<Lsn> {
902 246 : self.latest_gc_cutoff_lsn.read()
903 246 : }
904 :
905 : /// Look up given page version.
906 : ///
907 : /// If a remote layer file is needed, it is downloaded as part of this
908 : /// call.
909 : ///
910 : /// This method enforces [`Self::timeline_get_throttle`] internally.
911 : ///
912 : /// NOTE: It is considered an error to 'get' a key that doesn't exist. The
913 : /// abstraction above this needs to store suitable metadata to track what
914 : /// data exists with what keys, in separate metadata entries. If a
915 : /// non-existent key is requested, we may incorrectly return a value from
916 : /// an ancestor branch, for example, or waste a lot of cycles chasing the
917 : /// non-existing key.
918 : ///
919 : /// # Cancel-Safety
920 : ///
921 : /// This method is cancellation-safe.
922 : #[inline(always)]
923 625130 : pub(crate) async fn get(
924 625130 : &self,
925 625130 : key: Key,
926 625130 : lsn: Lsn,
927 625130 : ctx: &RequestContext,
928 625130 : ) -> Result<Bytes, PageReconstructError> {
929 625130 : if !lsn.is_valid() {
930 0 : return Err(PageReconstructError::Other(anyhow::anyhow!("Invalid LSN")));
931 625130 : }
932 625130 :
933 625130 : // This check is debug-only because of the cost of hashing, and because it's a double-check: we
934 625130 : // already checked the key against the shard_identity when looking up the Timeline from
935 625130 : // page_service.
936 625130 : debug_assert!(!self.shard_identity.is_key_disposable(&key));
937 :
938 625130 : self.timeline_get_throttle.throttle(ctx, 1).await;
939 :
940 625130 : let keyspace = KeySpace {
941 625130 : ranges: vec![key..key.next()],
942 625130 : };
943 625130 :
944 625130 : // Initialise the reconstruct state for the key with the cache
945 625130 : // entry returned above.
946 625130 : let mut reconstruct_state = ValuesReconstructState::new();
947 :
948 625130 : let vectored_res = self
949 625130 : .get_vectored_impl(keyspace.clone(), lsn, &mut reconstruct_state, ctx)
950 189914 : .await;
951 :
952 625130 : let key_value = vectored_res?.pop_first();
953 625124 : match key_value {
954 625112 : Some((got_key, value)) => {
955 625112 : if got_key != key {
956 0 : error!(
957 0 : "Expected {}, but singular vectored get returned {}",
958 : key, got_key
959 : );
960 0 : Err(PageReconstructError::Other(anyhow!(
961 0 : "Singular vectored get returned wrong key"
962 0 : )))
963 : } else {
964 625112 : value
965 : }
966 : }
967 12 : None => Err(PageReconstructError::MissingKey(MissingKeyError {
968 12 : key,
969 12 : shard: self.shard_identity.get_shard_number(&key),
970 12 : cont_lsn: Lsn(0),
971 12 : request_lsn: lsn,
972 12 : ancestor_lsn: None,
973 12 : backtrace: None,
974 12 : })),
975 : }
976 625130 : }
977 :
978 : pub(crate) const MAX_GET_VECTORED_KEYS: u64 = 32;
979 : pub(crate) const VEC_GET_LAYERS_VISITED_WARN_THRESH: f64 = 512.0;
980 :
981 : /// Look up multiple page versions at a given LSN
982 : ///
983 : /// This naive implementation will be replaced with a more efficient one
984 : /// which actually vectorizes the read path.
985 1132 : pub(crate) async fn get_vectored(
986 1132 : &self,
987 1132 : keyspace: KeySpace,
988 1132 : lsn: Lsn,
989 1132 : ctx: &RequestContext,
990 1132 : ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
991 1132 : if !lsn.is_valid() {
992 0 : return Err(GetVectoredError::InvalidLsn(lsn));
993 1132 : }
994 1132 :
995 1132 : let key_count = keyspace.total_raw_size().try_into().unwrap();
996 1132 : if key_count > Timeline::MAX_GET_VECTORED_KEYS {
997 0 : return Err(GetVectoredError::Oversized(key_count));
998 1132 : }
999 :
1000 2264 : for range in &keyspace.ranges {
1001 1132 : let mut key = range.start;
1002 2468 : while key != range.end {
1003 1336 : assert!(!self.shard_identity.is_key_disposable(&key));
1004 1336 : key = key.next();
1005 : }
1006 : }
1007 :
1008 1132 : trace!(
1009 0 : "get vectored request for {:?}@{} from task kind {:?}",
1010 0 : keyspace,
1011 0 : lsn,
1012 0 : ctx.task_kind(),
1013 : );
1014 :
1015 1132 : let start = crate::metrics::GET_VECTORED_LATENCY
1016 1132 : .for_task_kind(ctx.task_kind())
1017 1132 : .map(|metric| (metric, Instant::now()));
1018 :
1019 : // start counting after throttle so that throttle time
1020 : // is always less than observation time
1021 1132 : let throttled = self
1022 1132 : .timeline_get_throttle
1023 1132 : .throttle(ctx, key_count as usize)
1024 0 : .await;
1025 :
1026 1132 : let res = self
1027 1132 : .get_vectored_impl(
1028 1132 : keyspace.clone(),
1029 1132 : lsn,
1030 1132 : &mut ValuesReconstructState::new(),
1031 1132 : ctx,
1032 1132 : )
1033 64 : .await;
1034 :
1035 1132 : if let Some((metric, start)) = start {
1036 0 : let elapsed = start.elapsed();
1037 0 : let ex_throttled = if let Some(throttled) = throttled {
1038 0 : elapsed.checked_sub(throttled)
1039 : } else {
1040 0 : Some(elapsed)
1041 : };
1042 :
1043 0 : if let Some(ex_throttled) = ex_throttled {
1044 0 : metric.observe(ex_throttled.as_secs_f64());
1045 0 : } else {
1046 0 : use utils::rate_limit::RateLimit;
1047 0 : static LOGGED: Lazy<Mutex<RateLimit>> =
1048 0 : Lazy::new(|| Mutex::new(RateLimit::new(Duration::from_secs(10))));
1049 0 : let mut rate_limit = LOGGED.lock().unwrap();
1050 0 : rate_limit.call(|| {
1051 0 : warn!("error deducting time spent throttled; this message is logged at a global rate limit");
1052 0 : });
1053 0 : }
1054 1132 : }
1055 :
1056 1132 : res
1057 1132 : }
1058 :
1059 : /// Scan the keyspace and return all existing key-values in the keyspace. This currently uses vectored
1060 : /// get underlying. Normal vectored get would throw an error when a key in the keyspace is not found
1061 : /// during the search, but for the scan interface, it returns all existing key-value pairs, and does
1062 : /// not expect each single key in the key space will be found. The semantics is closer to the RocksDB
1063 : /// scan iterator interface. We could optimize this interface later to avoid some checks in the vectored
1064 : /// get path to maintain and split the probing and to-be-probe keyspace. We also need to ensure that
1065 : /// the scan operation will not cause OOM in the future.
1066 12 : pub(crate) async fn scan(
1067 12 : &self,
1068 12 : keyspace: KeySpace,
1069 12 : lsn: Lsn,
1070 12 : ctx: &RequestContext,
1071 12 : ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
1072 12 : if !lsn.is_valid() {
1073 0 : return Err(GetVectoredError::InvalidLsn(lsn));
1074 12 : }
1075 12 :
1076 12 : trace!(
1077 0 : "key-value scan request for {:?}@{} from task kind {:?}",
1078 0 : keyspace,
1079 0 : lsn,
1080 0 : ctx.task_kind()
1081 : );
1082 :
1083 : // We should generalize this into Keyspace::contains in the future.
1084 24 : for range in &keyspace.ranges {
1085 12 : if range.start.field1 < METADATA_KEY_BEGIN_PREFIX
1086 12 : || range.end.field1 > METADATA_KEY_END_PREFIX
1087 : {
1088 0 : return Err(GetVectoredError::Other(anyhow::anyhow!(
1089 0 : "only metadata keyspace can be scanned"
1090 0 : )));
1091 12 : }
1092 : }
1093 :
1094 12 : let start = crate::metrics::SCAN_LATENCY
1095 12 : .for_task_kind(ctx.task_kind())
1096 12 : .map(ScanLatencyOngoingRecording::start_recording);
1097 :
1098 : // start counting after throttle so that throttle time
1099 : // is always less than observation time
1100 12 : let throttled = self
1101 12 : .timeline_get_throttle
1102 12 : // assume scan = 1 quota for now until we find a better way to process this
1103 12 : .throttle(ctx, 1)
1104 0 : .await;
1105 :
1106 12 : let vectored_res = self
1107 12 : .get_vectored_impl(
1108 12 : keyspace.clone(),
1109 12 : lsn,
1110 12 : &mut ValuesReconstructState::default(),
1111 12 : ctx,
1112 12 : )
1113 0 : .await;
1114 :
1115 12 : if let Some(recording) = start {
1116 0 : recording.observe(throttled);
1117 12 : }
1118 :
1119 12 : vectored_res
1120 12 : }
1121 :
1122 626574 : pub(super) async fn get_vectored_impl(
1123 626574 : &self,
1124 626574 : keyspace: KeySpace,
1125 626574 : lsn: Lsn,
1126 626574 : reconstruct_state: &mut ValuesReconstructState,
1127 626574 : ctx: &RequestContext,
1128 626574 : ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
1129 626574 : let get_kind = if keyspace.total_raw_size() == 1 {
1130 626142 : GetKind::Singular
1131 : } else {
1132 432 : GetKind::Vectored
1133 : };
1134 :
1135 626574 : let get_data_timer = crate::metrics::GET_RECONSTRUCT_DATA_TIME
1136 626574 : .for_get_kind(get_kind)
1137 626574 : .start_timer();
1138 626574 : self.get_vectored_reconstruct_data(keyspace.clone(), lsn, reconstruct_state, ctx)
1139 192145 : .await?;
1140 626558 : get_data_timer.stop_and_record();
1141 626558 :
1142 626558 : let reconstruct_timer = crate::metrics::RECONSTRUCT_TIME
1143 626558 : .for_get_kind(get_kind)
1144 626558 : .start_timer();
1145 626558 : let mut results: BTreeMap<Key, Result<Bytes, PageReconstructError>> = BTreeMap::new();
1146 626558 : let layers_visited = reconstruct_state.get_layers_visited();
1147 :
1148 666884 : for (key, res) in std::mem::take(&mut reconstruct_state.keys) {
1149 666884 : match res {
1150 0 : Err(err) => {
1151 0 : results.insert(key, Err(err));
1152 0 : }
1153 666884 : Ok(state) => {
1154 666884 : let state = ValueReconstructState::from(state);
1155 :
1156 666884 : let reconstruct_res = self.reconstruct_value(key, lsn, state).await;
1157 666884 : results.insert(key, reconstruct_res);
1158 : }
1159 : }
1160 : }
1161 626558 : reconstruct_timer.stop_and_record();
1162 626558 :
1163 626558 : // For aux file keys (v1 or v2) the vectored read path does not return an error
1164 626558 : // when they're missing. Instead they are omitted from the resulting btree
1165 626558 : // (this is a requirement, not a bug). Skip updating the metric in these cases
1166 626558 : // to avoid infinite results.
1167 626558 : if !results.is_empty() {
1168 626354 : let avg = layers_visited as f64 / results.len() as f64;
1169 626354 : if avg >= Self::VEC_GET_LAYERS_VISITED_WARN_THRESH {
1170 0 : use utils::rate_limit::RateLimit;
1171 0 : static LOGGED: Lazy<Mutex<RateLimit>> =
1172 0 : Lazy::new(|| Mutex::new(RateLimit::new(Duration::from_secs(60))));
1173 0 : let mut rate_limit = LOGGED.lock().unwrap();
1174 0 : rate_limit.call(|| {
1175 0 : tracing::info!(
1176 0 : shard_id = %self.tenant_shard_id.shard_slug(),
1177 0 : lsn = %lsn,
1178 0 : "Vectored read for {} visited {} layers on average per key and {} in total. {}/{} pages were returned",
1179 0 : keyspace, avg, layers_visited, results.len(), keyspace.total_raw_size());
1180 0 : });
1181 626354 : }
1182 :
1183 : // Note that this is an approximation. Tracking the exact number of layers visited
1184 : // per key requires virtually unbounded memory usage and is inefficient
1185 : // (i.e. segment tree tracking each range queried from a layer)
1186 626354 : crate::metrics::VEC_READ_NUM_LAYERS_VISITED.observe(avg);
1187 204 : }
1188 :
1189 626558 : Ok(results)
1190 626574 : }
1191 :
1192 : /// Get last or prev record separately. Same as get_last_record_rlsn().last/prev.
1193 274282 : pub(crate) fn get_last_record_lsn(&self) -> Lsn {
1194 274282 : self.last_record_lsn.load().last
1195 274282 : }
1196 :
1197 0 : pub(crate) fn get_prev_record_lsn(&self) -> Lsn {
1198 0 : self.last_record_lsn.load().prev
1199 0 : }
1200 :
1201 : /// Atomically get both last and prev.
1202 226 : pub(crate) fn get_last_record_rlsn(&self) -> RecordLsn {
1203 226 : self.last_record_lsn.load()
1204 226 : }
1205 :
1206 : /// Subscribe to callers of wait_lsn(). The value of the channel is None if there are no
1207 : /// wait_lsn() calls in progress, and Some(Lsn) if there is an active waiter for wait_lsn().
1208 0 : pub(crate) fn subscribe_for_wait_lsn_updates(&self) -> watch::Receiver<Option<Lsn>> {
1209 0 : self.last_record_lsn.status_receiver()
1210 0 : }
1211 :
1212 410 : pub(crate) fn get_disk_consistent_lsn(&self) -> Lsn {
1213 410 : self.disk_consistent_lsn.load()
1214 410 : }
1215 :
1216 : /// remote_consistent_lsn from the perspective of the tenant's current generation,
1217 : /// not validated with control plane yet.
1218 : /// See [`Self::get_remote_consistent_lsn_visible`].
1219 0 : pub(crate) fn get_remote_consistent_lsn_projected(&self) -> Option<Lsn> {
1220 0 : self.remote_client.remote_consistent_lsn_projected()
1221 0 : }
1222 :
1223 : /// remote_consistent_lsn which the tenant is guaranteed not to go backward from,
1224 : /// i.e. a value of remote_consistent_lsn_projected which has undergone
1225 : /// generation validation in the deletion queue.
1226 0 : pub(crate) fn get_remote_consistent_lsn_visible(&self) -> Option<Lsn> {
1227 0 : self.remote_client.remote_consistent_lsn_visible()
1228 0 : }
1229 :
1230 : /// The sum of the file size of all historic layers in the layer map.
1231 : /// This method makes no distinction between local and remote layers.
1232 : /// Hence, the result **does not represent local filesystem usage**.
1233 0 : pub(crate) async fn layer_size_sum(&self) -> u64 {
1234 0 : let guard = self.layers.read().await;
1235 0 : guard.layer_size_sum()
1236 0 : }
1237 :
1238 0 : pub(crate) fn resident_physical_size(&self) -> u64 {
1239 0 : self.metrics.resident_physical_size_get()
1240 0 : }
1241 :
1242 0 : pub(crate) fn get_directory_metrics(&self) -> [u64; DirectoryKind::KINDS_NUM] {
1243 0 : array::from_fn(|idx| self.directory_metrics[idx].load(AtomicOrdering::Relaxed))
1244 0 : }
1245 :
1246 : ///
1247 : /// Wait until WAL has been received and processed up to this LSN.
1248 : ///
1249 : /// You should call this before any of the other get_* or list_* functions. Calling
1250 : /// those functions with an LSN that has been processed yet is an error.
1251 : ///
1252 225225 : pub(crate) async fn wait_lsn(
1253 225225 : &self,
1254 225225 : lsn: Lsn,
1255 225225 : who_is_waiting: WaitLsnWaiter<'_>,
1256 225225 : ctx: &RequestContext, /* Prepare for use by cancellation */
1257 225225 : ) -> Result<(), WaitLsnError> {
1258 225225 : let state = self.current_state();
1259 225225 : if self.cancel.is_cancelled() || matches!(state, TimelineState::Stopping) {
1260 0 : return Err(WaitLsnError::Shutdown);
1261 225225 : } else if !matches!(state, TimelineState::Active) {
1262 0 : return Err(WaitLsnError::BadState(state));
1263 225225 : }
1264 225225 :
1265 225225 : if cfg!(debug_assertions) {
1266 225225 : match ctx.task_kind() {
1267 : TaskKind::WalReceiverManager
1268 : | TaskKind::WalReceiverConnectionHandler
1269 : | TaskKind::WalReceiverConnectionPoller => {
1270 0 : let is_myself = match who_is_waiting {
1271 0 : WaitLsnWaiter::Timeline(waiter) => Weak::ptr_eq(&waiter.myself, &self.myself),
1272 0 : WaitLsnWaiter::Tenant | WaitLsnWaiter::PageService => unreachable!("tenant or page_service context are not expected to have task kind {:?}", ctx.task_kind()),
1273 : };
1274 0 : if is_myself {
1275 0 : if let Err(current) = self.last_record_lsn.would_wait_for(lsn) {
1276 : // walingest is the only one that can advance last_record_lsn; it should make sure to never reach here
1277 0 : panic!("this timeline's walingest task is calling wait_lsn({lsn}) but we only have last_record_lsn={current}; would deadlock");
1278 0 : }
1279 0 : } else {
1280 0 : // if another timeline's is waiting for us, there's no deadlock risk because
1281 0 : // our walreceiver task can make progress independent of theirs
1282 0 : }
1283 : }
1284 225225 : _ => {}
1285 : }
1286 0 : }
1287 :
1288 225225 : let _timer = crate::metrics::WAIT_LSN_TIME.start_timer();
1289 225225 :
1290 225225 : match self
1291 225225 : .last_record_lsn
1292 225225 : .wait_for_timeout(lsn, self.conf.wait_lsn_timeout)
1293 0 : .await
1294 : {
1295 225225 : Ok(()) => Ok(()),
1296 0 : Err(e) => {
1297 : use utils::seqwait::SeqWaitError::*;
1298 0 : match e {
1299 0 : Shutdown => Err(WaitLsnError::Shutdown),
1300 : Timeout => {
1301 : // don't count the time spent waiting for lock below, and also in walreceiver.status(), towards the wait_lsn_time_histo
1302 0 : drop(_timer);
1303 0 : let walreceiver_status = self.walreceiver_status();
1304 0 : Err(WaitLsnError::Timeout(format!(
1305 0 : "Timed out while waiting for WAL record at LSN {} to arrive, last_record_lsn {} disk consistent LSN={}, WalReceiver status: {}",
1306 0 : lsn,
1307 0 : self.get_last_record_lsn(),
1308 0 : self.get_disk_consistent_lsn(),
1309 0 : walreceiver_status,
1310 0 : )))
1311 : }
1312 : }
1313 : }
1314 : }
1315 225225 : }
1316 :
1317 0 : pub(crate) fn walreceiver_status(&self) -> String {
1318 0 : match &*self.walreceiver.lock().unwrap() {
1319 0 : None => "stopping or stopped".to_string(),
1320 0 : Some(walreceiver) => match walreceiver.status() {
1321 0 : Some(status) => status.to_human_readable_string(),
1322 0 : None => "Not active".to_string(),
1323 : },
1324 : }
1325 0 : }
1326 :
1327 : /// Check that it is valid to request operations with that lsn.
1328 230 : pub(crate) fn check_lsn_is_in_scope(
1329 230 : &self,
1330 230 : lsn: Lsn,
1331 230 : latest_gc_cutoff_lsn: &RcuReadGuard<Lsn>,
1332 230 : ) -> anyhow::Result<()> {
1333 230 : ensure!(
1334 230 : lsn >= **latest_gc_cutoff_lsn,
1335 4 : "LSN {} is earlier than latest GC cutoff {} (we might've already garbage collected needed data)",
1336 4 : lsn,
1337 4 : **latest_gc_cutoff_lsn,
1338 : );
1339 226 : Ok(())
1340 230 : }
1341 :
1342 : /// Initializes an LSN lease. The function will return an error if the requested LSN is less than the `latest_gc_cutoff_lsn`.
1343 10 : pub(crate) fn init_lsn_lease(
1344 10 : &self,
1345 10 : lsn: Lsn,
1346 10 : length: Duration,
1347 10 : ctx: &RequestContext,
1348 10 : ) -> anyhow::Result<LsnLease> {
1349 10 : self.make_lsn_lease(lsn, length, true, ctx)
1350 10 : }
1351 :
1352 : /// Renews a lease at a particular LSN. The requested LSN is not validated against the `latest_gc_cutoff_lsn` when we are in the grace period.
1353 4 : pub(crate) fn renew_lsn_lease(
1354 4 : &self,
1355 4 : lsn: Lsn,
1356 4 : length: Duration,
1357 4 : ctx: &RequestContext,
1358 4 : ) -> anyhow::Result<LsnLease> {
1359 4 : self.make_lsn_lease(lsn, length, false, ctx)
1360 4 : }
1361 :
1362 : /// Obtains a temporary lease blocking garbage collection for the given LSN.
1363 : ///
1364 : /// If we are in `AttachedSingle` mode and is not blocked by the lsn lease deadline, this function will error
1365 : /// if the requesting LSN is less than the `latest_gc_cutoff_lsn` and there is no existing request present.
1366 : ///
1367 : /// If there is an existing lease in the map, the lease will be renewed only if the request extends the lease.
1368 : /// The returned lease is therefore the maximum between the existing lease and the requesting lease.
1369 14 : fn make_lsn_lease(
1370 14 : &self,
1371 14 : lsn: Lsn,
1372 14 : length: Duration,
1373 14 : init: bool,
1374 14 : _ctx: &RequestContext,
1375 14 : ) -> anyhow::Result<LsnLease> {
1376 12 : let lease = {
1377 : // Normalize the requested LSN to be aligned, and move to the first record
1378 : // if it points to the beginning of the page (header).
1379 14 : let lsn = xlog_utils::normalize_lsn(lsn, WAL_SEGMENT_SIZE);
1380 14 :
1381 14 : let mut gc_info = self.gc_info.write().unwrap();
1382 14 :
1383 14 : let valid_until = SystemTime::now() + length;
1384 14 :
1385 14 : let entry = gc_info.leases.entry(lsn);
1386 14 :
1387 14 : match entry {
1388 6 : Entry::Occupied(mut occupied) => {
1389 6 : let existing_lease = occupied.get_mut();
1390 6 : if valid_until > existing_lease.valid_until {
1391 2 : existing_lease.valid_until = valid_until;
1392 2 : let dt: DateTime<Utc> = valid_until.into();
1393 2 : info!("lease extended to {}", dt);
1394 : } else {
1395 4 : let dt: DateTime<Utc> = existing_lease.valid_until.into();
1396 4 : info!("existing lease covers greater length, valid until {}", dt);
1397 : }
1398 :
1399 6 : existing_lease.clone()
1400 : }
1401 8 : Entry::Vacant(vacant) => {
1402 : // Reject already GC-ed LSN (lsn < latest_gc_cutoff) if we are in AttachedSingle and
1403 : // not blocked by the lsn lease deadline.
1404 8 : let validate = {
1405 8 : let conf = self.tenant_conf.load();
1406 8 : conf.location.attach_mode == AttachmentMode::Single
1407 8 : && !conf.is_gc_blocked_by_lsn_lease_deadline()
1408 : };
1409 :
1410 8 : if init || validate {
1411 8 : let latest_gc_cutoff_lsn = self.get_latest_gc_cutoff_lsn();
1412 8 : if lsn < *latest_gc_cutoff_lsn {
1413 2 : bail!("tried to request a page version that was garbage collected. requested at {} gc cutoff {}", lsn, *latest_gc_cutoff_lsn);
1414 6 : }
1415 0 : }
1416 :
1417 6 : let dt: DateTime<Utc> = valid_until.into();
1418 6 : info!("lease created, valid until {}", dt);
1419 6 : vacant.insert(LsnLease { valid_until }).clone()
1420 : }
1421 : }
1422 : };
1423 :
1424 12 : Ok(lease)
1425 14 : }
1426 :
1427 : /// Flush to disk all data that was written with the put_* functions
1428 1090 : #[instrument(skip(self), fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id))]
1429 : pub(crate) async fn freeze_and_flush(&self) -> Result<(), FlushLayerError> {
1430 : self.freeze_and_flush0().await
1431 : }
1432 :
1433 : // This exists to provide a non-span creating version of `freeze_and_flush` we can call without
1434 : // polluting the span hierarchy.
1435 1090 : pub(crate) async fn freeze_and_flush0(&self) -> Result<(), FlushLayerError> {
1436 1090 : let token = {
1437 : // Freeze the current open in-memory layer. It will be written to disk on next
1438 : // iteration.
1439 1090 : let mut g = self.write_lock.lock().await;
1440 :
1441 1090 : let to_lsn = self.get_last_record_lsn();
1442 1090 : self.freeze_inmem_layer_at(to_lsn, &mut g).await?
1443 : };
1444 1090 : self.wait_flush_completion(token).await
1445 1090 : }
1446 :
1447 : // Check if an open ephemeral layer should be closed: this provides
1448 : // background enforcement of checkpoint interval if there is no active WAL receiver, to avoid keeping
1449 : // an ephemeral layer open forever when idle. It also freezes layers if the global limit on
1450 : // ephemeral layer bytes has been breached.
1451 0 : pub(super) async fn maybe_freeze_ephemeral_layer(&self) {
1452 0 : let Ok(mut write_guard) = self.write_lock.try_lock() else {
1453 : // If the write lock is held, there is an active wal receiver: rolling open layers
1454 : // is their responsibility while they hold this lock.
1455 0 : return;
1456 : };
1457 :
1458 : // FIXME: why not early exit? because before #7927 the state would had been cleared every
1459 : // time, and this was missed.
1460 : // if write_guard.is_none() { return; }
1461 :
1462 0 : let Ok(layers_guard) = self.layers.try_read() else {
1463 : // Don't block if the layer lock is busy
1464 0 : return;
1465 : };
1466 :
1467 0 : let Ok(lm) = layers_guard.layer_map() else {
1468 0 : return;
1469 : };
1470 :
1471 0 : let Some(open_layer) = &lm.open_layer else {
1472 : // If there is no open layer, we have no layer freezing to do. However, we might need to generate
1473 : // some updates to disk_consistent_lsn and remote_consistent_lsn, in case we ingested some WAL regions
1474 : // that didn't result in writes to this shard.
1475 :
1476 : // Must not hold the layers lock while waiting for a flush.
1477 0 : drop(layers_guard);
1478 0 :
1479 0 : let last_record_lsn = self.get_last_record_lsn();
1480 0 : let disk_consistent_lsn = self.get_disk_consistent_lsn();
1481 0 : if last_record_lsn > disk_consistent_lsn {
1482 : // We have no open layer, but disk_consistent_lsn is behind the last record: this indicates
1483 : // we are a sharded tenant and have skipped some WAL
1484 0 : let last_freeze_ts = *self.last_freeze_ts.read().unwrap();
1485 0 : if last_freeze_ts.elapsed() >= self.get_checkpoint_timeout() {
1486 : // Only do this if have been layer-less longer than get_checkpoint_timeout, so that a shard
1487 : // without any data ingested (yet) doesn't write a remote index as soon as it
1488 : // sees its LSN advance: we only do this if we've been layer-less
1489 : // for some time.
1490 0 : tracing::debug!(
1491 0 : "Advancing disk_consistent_lsn past WAL ingest gap {} -> {}",
1492 : disk_consistent_lsn,
1493 : last_record_lsn
1494 : );
1495 :
1496 : // The flush loop will update remote consistent LSN as well as disk consistent LSN.
1497 : // We know there is no open layer, so we can request freezing without actually
1498 : // freezing anything. This is true even if we have dropped the layers_guard, we
1499 : // still hold the write_guard.
1500 0 : let _ = async {
1501 0 : let token = self
1502 0 : .freeze_inmem_layer_at(last_record_lsn, &mut write_guard)
1503 0 : .await?;
1504 0 : self.wait_flush_completion(token).await
1505 0 : }
1506 0 : .await;
1507 0 : }
1508 0 : }
1509 :
1510 0 : return;
1511 : };
1512 :
1513 0 : let Some(current_size) = open_layer.try_len() else {
1514 : // Unexpected: since we hold the write guard, nobody else should be writing to this layer, so
1515 : // read lock to get size should always succeed.
1516 0 : tracing::warn!("Lock conflict while reading size of open layer");
1517 0 : return;
1518 : };
1519 :
1520 0 : let current_lsn = self.get_last_record_lsn();
1521 :
1522 0 : let checkpoint_distance_override = open_layer.tick().await;
1523 :
1524 0 : if let Some(size_override) = checkpoint_distance_override {
1525 0 : if current_size > size_override {
1526 : // This is not harmful, but it only happens in relatively rare cases where
1527 : // time-based checkpoints are not happening fast enough to keep the amount of
1528 : // ephemeral data within configured limits. It's a sign of stress on the system.
1529 0 : tracing::info!("Early-rolling open layer at size {current_size} (limit {size_override}) due to dirty data pressure");
1530 0 : }
1531 0 : }
1532 :
1533 0 : let checkpoint_distance =
1534 0 : checkpoint_distance_override.unwrap_or(self.get_checkpoint_distance());
1535 0 :
1536 0 : if self.should_roll(
1537 0 : current_size,
1538 0 : current_size,
1539 0 : checkpoint_distance,
1540 0 : self.get_last_record_lsn(),
1541 0 : self.last_freeze_at.load(),
1542 0 : open_layer.get_opened_at(),
1543 0 : ) {
1544 0 : match open_layer.info() {
1545 0 : InMemoryLayerInfo::Frozen { lsn_start, lsn_end } => {
1546 0 : // We may reach this point if the layer was already frozen by not yet flushed: flushing
1547 0 : // happens asynchronously in the background.
1548 0 : tracing::debug!(
1549 0 : "Not freezing open layer, it's already frozen ({lsn_start}..{lsn_end})"
1550 : );
1551 : }
1552 : InMemoryLayerInfo::Open { .. } => {
1553 : // Upgrade to a write lock and freeze the layer
1554 0 : drop(layers_guard);
1555 0 : let res = self
1556 0 : .freeze_inmem_layer_at(current_lsn, &mut write_guard)
1557 0 : .await;
1558 :
1559 0 : if let Err(e) = res {
1560 0 : tracing::info!(
1561 0 : "failed to flush frozen layer after background freeze: {e:#}"
1562 : );
1563 0 : }
1564 : }
1565 : }
1566 0 : }
1567 0 : }
1568 :
1569 : /// Checks if the internal state of the timeline is consistent with it being able to be offloaded.
1570 : ///
1571 : /// This is neccessary but not sufficient for offloading of the timeline as it might have
1572 : /// child timelines that are not offloaded yet.
1573 0 : pub(crate) fn can_offload(&self) -> (bool, &'static str) {
1574 0 : if self.remote_client.is_archived() != Some(true) {
1575 0 : return (false, "the timeline is not archived");
1576 0 : }
1577 0 : if !self.remote_client.no_pending_work() {
1578 : // if the remote client is still processing some work, we can't offload
1579 0 : return (false, "the upload queue is not drained yet");
1580 0 : }
1581 0 :
1582 0 : (true, "ok")
1583 0 : }
1584 :
1585 : /// Outermost timeline compaction operation; downloads needed layers. Returns whether we have pending
1586 : /// compaction tasks.
1587 364 : pub(crate) async fn compact(
1588 364 : self: &Arc<Self>,
1589 364 : cancel: &CancellationToken,
1590 364 : flags: EnumSet<CompactFlags>,
1591 364 : ctx: &RequestContext,
1592 364 : ) -> Result<bool, CompactionError> {
1593 364 : // most likely the cancellation token is from background task, but in tests it could be the
1594 364 : // request task as well.
1595 364 :
1596 364 : let prepare = async move {
1597 364 : let guard = self.compaction_lock.lock().await;
1598 :
1599 364 : let permit = super::tasks::concurrent_background_tasks_rate_limit_permit(
1600 364 : BackgroundLoopKind::Compaction,
1601 364 : ctx,
1602 364 : )
1603 0 : .await;
1604 :
1605 364 : (guard, permit)
1606 364 : };
1607 :
1608 : // this wait probably never needs any "long time spent" logging, because we already nag if
1609 : // compaction task goes over it's period (20s) which is quite often in production.
1610 364 : let (_guard, _permit) = tokio::select! {
1611 364 : tuple = prepare => { tuple },
1612 364 : _ = self.cancel.cancelled() => return Ok(false),
1613 364 : _ = cancel.cancelled() => return Ok(false),
1614 : };
1615 :
1616 364 : let last_record_lsn = self.get_last_record_lsn();
1617 364 :
1618 364 : // Last record Lsn could be zero in case the timeline was just created
1619 364 : if !last_record_lsn.is_valid() {
1620 0 : warn!("Skipping compaction for potentially just initialized timeline, it has invalid last record lsn: {last_record_lsn}");
1621 0 : return Ok(false);
1622 364 : }
1623 364 :
1624 364 : match self.get_compaction_algorithm_settings().kind {
1625 : CompactionAlgorithm::Tiered => {
1626 0 : self.compact_tiered(cancel, ctx).await?;
1627 0 : Ok(false)
1628 : }
1629 37059 : CompactionAlgorithm::Legacy => self.compact_legacy(cancel, flags, ctx).await,
1630 : }
1631 364 : }
1632 :
1633 : /// Mutate the timeline with a [`TimelineWriter`].
1634 5133160 : pub(crate) async fn writer(&self) -> TimelineWriter<'_> {
1635 5133160 : TimelineWriter {
1636 5133160 : tl: self,
1637 5133160 : write_guard: self.write_lock.lock().await,
1638 : }
1639 5133160 : }
1640 :
1641 0 : pub(crate) fn activate(
1642 0 : self: &Arc<Self>,
1643 0 : parent: Arc<crate::tenant::Tenant>,
1644 0 : broker_client: BrokerClientChannel,
1645 0 : background_jobs_can_start: Option<&completion::Barrier>,
1646 0 : ctx: &RequestContext,
1647 0 : ) {
1648 0 : if self.tenant_shard_id.is_shard_zero() {
1649 0 : // Logical size is only maintained accurately on shard zero.
1650 0 : self.spawn_initial_logical_size_computation_task(ctx);
1651 0 : }
1652 0 : self.launch_wal_receiver(ctx, broker_client);
1653 0 : self.set_state(TimelineState::Active);
1654 0 : self.launch_eviction_task(parent, background_jobs_can_start);
1655 0 : }
1656 :
1657 : /// After this function returns, there are no timeline-scoped tasks are left running.
1658 : ///
1659 : /// The preferred pattern for is:
1660 : /// - in any spawned tasks, keep Timeline::guard open + Timeline::cancel / child token
1661 : /// - if early shutdown (not just cancellation) of a sub-tree of tasks is required,
1662 : /// go the extra mile and keep track of JoinHandles
1663 : /// - Keep track of JoinHandles using a passed-down `Arc<Mutex<Option<JoinSet>>>` or similar,
1664 : /// instead of spawning directly on a runtime. It is a more composable / testable pattern.
1665 : ///
1666 : /// For legacy reasons, we still have multiple tasks spawned using
1667 : /// `task_mgr::spawn(X, Some(tenant_id), Some(timeline_id))`.
1668 : /// We refer to these as "timeline-scoped task_mgr tasks".
1669 : /// Some of these tasks are already sensitive to Timeline::cancel while others are
1670 : /// not sensitive to Timeline::cancel and instead respect [`task_mgr::shutdown_token`]
1671 : /// or [`task_mgr::shutdown_watcher`].
1672 : /// We want to gradually convert the code base away from these.
1673 : ///
1674 : /// Here is an inventory of timeline-scoped task_mgr tasks that are still sensitive to
1675 : /// `task_mgr::shutdown_{token,watcher}` (there are also tenant-scoped and global-scoped
1676 : /// ones that aren't mentioned here):
1677 : /// - [`TaskKind::TimelineDeletionWorker`]
1678 : /// - NB: also used for tenant deletion
1679 : /// - [`TaskKind::RemoteUploadTask`]`
1680 : /// - [`TaskKind::InitialLogicalSizeCalculation`]
1681 : /// - [`TaskKind::DownloadAllRemoteLayers`] (can we get rid of it?)
1682 : // Inventory of timeline-scoped task_mgr tasks that use spawn but aren't sensitive:
1683 : /// - [`TaskKind::Eviction`]
1684 : /// - [`TaskKind::LayerFlushTask`]
1685 : /// - [`TaskKind::OndemandLogicalSizeCalculation`]
1686 : /// - [`TaskKind::GarbageCollector`] (immediate_gc is timeline-scoped)
1687 8 : pub(crate) async fn shutdown(&self, mode: ShutdownMode) {
1688 8 : debug_assert_current_span_has_tenant_and_timeline_id();
1689 8 :
1690 8 : // Regardless of whether we're going to try_freeze_and_flush
1691 8 : // or not, stop ingesting any more data. Walreceiver only provides
1692 8 : // cancellation but no "wait until gone", because it uses the Timeline::gate.
1693 8 : // So, only after the self.gate.close() below will we know for sure that
1694 8 : // no walreceiver tasks are left.
1695 8 : // For `try_freeze_and_flush=true`, this means that we might still be ingesting
1696 8 : // data during the call to `self.freeze_and_flush()` below.
1697 8 : // That's not ideal, but, we don't have the concept of a ChildGuard,
1698 8 : // which is what we'd need to properly model early shutdown of the walreceiver
1699 8 : // task sub-tree before the other Timeline task sub-trees.
1700 8 : let walreceiver = self.walreceiver.lock().unwrap().take();
1701 8 : tracing::debug!(
1702 0 : is_some = walreceiver.is_some(),
1703 0 : "Waiting for WalReceiverManager..."
1704 : );
1705 8 : if let Some(walreceiver) = walreceiver {
1706 0 : walreceiver.cancel();
1707 8 : }
1708 : // ... and inform any waiters for newer LSNs that there won't be any.
1709 8 : self.last_record_lsn.shutdown();
1710 8 :
1711 8 : if let ShutdownMode::FreezeAndFlush = mode {
1712 6 : if let Some((open, frozen)) = self
1713 6 : .layers
1714 6 : .read()
1715 0 : .await
1716 6 : .layer_map()
1717 6 : .map(|lm| (lm.open_layer.is_some(), lm.frozen_layers.len()))
1718 6 : .ok()
1719 6 : .filter(|(open, frozen)| *open || *frozen > 0)
1720 : {
1721 0 : tracing::info!(?open, frozen, "flushing and freezing on shutdown");
1722 6 : } else {
1723 6 : // this is double-shutdown, ignore it
1724 6 : }
1725 :
1726 : // we shut down walreceiver above, so, we won't add anything more
1727 : // to the InMemoryLayer; freeze it and wait for all frozen layers
1728 : // to reach the disk & upload queue, then shut the upload queue and
1729 : // wait for it to drain.
1730 6 : match self.freeze_and_flush().await {
1731 : Ok(_) => {
1732 : // drain the upload queue
1733 : // if we did not wait for completion here, it might be our shutdown process
1734 : // didn't wait for remote uploads to complete at all, as new tasks can forever
1735 : // be spawned.
1736 : //
1737 : // what is problematic is the shutting down of RemoteTimelineClient, because
1738 : // obviously it does not make sense to stop while we wait for it, but what
1739 : // about corner cases like s3 suddenly hanging up?
1740 6 : self.remote_client.shutdown().await;
1741 : }
1742 : Err(FlushLayerError::Cancelled) => {
1743 : // this is likely the second shutdown, ignore silently.
1744 : // TODO: this can be removed once https://github.com/neondatabase/neon/issues/5080
1745 0 : debug_assert!(self.cancel.is_cancelled());
1746 : }
1747 0 : Err(e) => {
1748 0 : // Non-fatal. Shutdown is infallible. Failures to flush just mean that
1749 0 : // we have some extra WAL replay to do next time the timeline starts.
1750 0 : warn!("failed to freeze and flush: {e:#}");
1751 : }
1752 : }
1753 :
1754 : // `self.remote_client.shutdown().await` above should have already flushed everything from the queue, but
1755 : // we also do a final check here to ensure that the queue is empty.
1756 6 : if !self.remote_client.no_pending_work() {
1757 0 : warn!("still have pending work in remote upload queue, but continuing shutting down anyways");
1758 6 : }
1759 2 : }
1760 :
1761 8 : if let ShutdownMode::Flush = mode {
1762 : // drain the upload queue
1763 0 : self.remote_client.shutdown().await;
1764 0 : if !self.remote_client.no_pending_work() {
1765 0 : warn!("still have pending work in remote upload queue, but continuing shutting down anyways");
1766 0 : }
1767 8 : }
1768 :
1769 : // Signal any subscribers to our cancellation token to drop out
1770 8 : tracing::debug!("Cancelling CancellationToken");
1771 8 : self.cancel.cancel();
1772 8 :
1773 8 : // Ensure Prevent new page service requests from starting.
1774 8 : self.handles.shutdown();
1775 8 :
1776 8 : // Transition the remote_client into a state where it's only useful for timeline deletion.
1777 8 : // (The deletion use case is why we can't just hook up remote_client to Self::cancel).)
1778 8 : self.remote_client.stop();
1779 8 :
1780 8 : // As documented in remote_client.stop()'s doc comment, it's our responsibility
1781 8 : // to shut down the upload queue tasks.
1782 8 : // TODO: fix that, task management should be encapsulated inside remote_client.
1783 8 : task_mgr::shutdown_tasks(
1784 8 : Some(TaskKind::RemoteUploadTask),
1785 8 : Some(self.tenant_shard_id),
1786 8 : Some(self.timeline_id),
1787 8 : )
1788 0 : .await;
1789 :
1790 : // TODO: work toward making this a no-op. See this function's doc comment for more context.
1791 8 : tracing::debug!("Waiting for tasks...");
1792 8 : task_mgr::shutdown_tasks(None, Some(self.tenant_shard_id), Some(self.timeline_id)).await;
1793 :
1794 : {
1795 : // Allow any remaining in-memory layers to do cleanup -- until that, they hold the gate
1796 : // open.
1797 8 : let mut write_guard = self.write_lock.lock().await;
1798 8 : self.layers.write().await.shutdown(&mut write_guard);
1799 8 : }
1800 8 :
1801 8 : // Finally wait until any gate-holders are complete.
1802 8 : //
1803 8 : // TODO: once above shutdown_tasks is a no-op, we can close the gate before calling shutdown_tasks
1804 8 : // and use a TBD variant of shutdown_tasks that asserts that there were no tasks left.
1805 8 : self.gate.close().await;
1806 :
1807 8 : self.metrics.shutdown();
1808 8 : }
1809 :
1810 414 : pub(crate) fn set_state(&self, new_state: TimelineState) {
1811 414 : match (self.current_state(), new_state) {
1812 414 : (equal_state_1, equal_state_2) if equal_state_1 == equal_state_2 => {
1813 2 : info!("Ignoring new state, equal to the existing one: {equal_state_2:?}");
1814 : }
1815 0 : (st, TimelineState::Loading) => {
1816 0 : error!("ignoring transition from {st:?} into Loading state");
1817 : }
1818 0 : (TimelineState::Broken { .. }, new_state) => {
1819 0 : error!("Ignoring state update {new_state:?} for broken timeline");
1820 : }
1821 : (TimelineState::Stopping, TimelineState::Active) => {
1822 0 : error!("Not activating a Stopping timeline");
1823 : }
1824 412 : (_, new_state) => {
1825 412 : self.state.send_replace(new_state);
1826 412 : }
1827 : }
1828 414 : }
1829 :
1830 2 : pub(crate) fn set_broken(&self, reason: String) {
1831 2 : let backtrace_str: String = format!("{}", std::backtrace::Backtrace::force_capture());
1832 2 : let broken_state = TimelineState::Broken {
1833 2 : reason,
1834 2 : backtrace: backtrace_str,
1835 2 : };
1836 2 : self.set_state(broken_state);
1837 2 :
1838 2 : // Although the Broken state is not equivalent to shutdown() (shutdown will be called
1839 2 : // later when this tenant is detach or the process shuts down), firing the cancellation token
1840 2 : // here avoids the need for other tasks to watch for the Broken state explicitly.
1841 2 : self.cancel.cancel();
1842 2 : }
1843 :
1844 226229 : pub(crate) fn current_state(&self) -> TimelineState {
1845 226229 : self.state.borrow().clone()
1846 226229 : }
1847 :
1848 6 : pub(crate) fn is_broken(&self) -> bool {
1849 6 : matches!(&*self.state.borrow(), TimelineState::Broken { .. })
1850 6 : }
1851 :
1852 222 : pub(crate) fn is_active(&self) -> bool {
1853 222 : self.current_state() == TimelineState::Active
1854 222 : }
1855 :
1856 0 : pub(crate) fn is_archived(&self) -> Option<bool> {
1857 0 : self.remote_client.is_archived()
1858 0 : }
1859 :
1860 368 : pub(crate) fn is_stopping(&self) -> bool {
1861 368 : self.current_state() == TimelineState::Stopping
1862 368 : }
1863 :
1864 0 : pub(crate) fn subscribe_for_state_updates(&self) -> watch::Receiver<TimelineState> {
1865 0 : self.state.subscribe()
1866 0 : }
1867 :
1868 225227 : pub(crate) async fn wait_to_become_active(
1869 225227 : &self,
1870 225227 : _ctx: &RequestContext, // Prepare for use by cancellation
1871 225227 : ) -> Result<(), TimelineState> {
1872 225227 : let mut receiver = self.state.subscribe();
1873 : loop {
1874 225227 : let current_state = receiver.borrow().clone();
1875 225227 : match current_state {
1876 : TimelineState::Loading => {
1877 0 : receiver
1878 0 : .changed()
1879 0 : .await
1880 0 : .expect("holding a reference to self");
1881 : }
1882 : TimelineState::Active { .. } => {
1883 225225 : return Ok(());
1884 : }
1885 : TimelineState::Broken { .. } | TimelineState::Stopping => {
1886 : // There's no chance the timeline can transition back into ::Active
1887 2 : return Err(current_state);
1888 : }
1889 : }
1890 : }
1891 225227 : }
1892 :
1893 0 : pub(crate) async fn layer_map_info(
1894 0 : &self,
1895 0 : reset: LayerAccessStatsReset,
1896 0 : ) -> Result<LayerMapInfo, layer_manager::Shutdown> {
1897 0 : let guard = self.layers.read().await;
1898 0 : let layer_map = guard.layer_map()?;
1899 0 : let mut in_memory_layers = Vec::with_capacity(layer_map.frozen_layers.len() + 1);
1900 0 : if let Some(open_layer) = &layer_map.open_layer {
1901 0 : in_memory_layers.push(open_layer.info());
1902 0 : }
1903 0 : for frozen_layer in &layer_map.frozen_layers {
1904 0 : in_memory_layers.push(frozen_layer.info());
1905 0 : }
1906 :
1907 0 : let historic_layers = layer_map
1908 0 : .iter_historic_layers()
1909 0 : .map(|desc| guard.get_from_desc(&desc).info(reset))
1910 0 : .collect();
1911 0 :
1912 0 : Ok(LayerMapInfo {
1913 0 : in_memory_layers,
1914 0 : historic_layers,
1915 0 : })
1916 0 : }
1917 :
1918 0 : #[instrument(skip_all, fields(tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))]
1919 : pub(crate) async fn download_layer(
1920 : &self,
1921 : layer_file_name: &LayerName,
1922 : ) -> anyhow::Result<Option<bool>> {
1923 : let Some(layer) = self.find_layer(layer_file_name).await? else {
1924 : return Ok(None);
1925 : };
1926 :
1927 : layer.download().await?;
1928 :
1929 : Ok(Some(true))
1930 : }
1931 :
1932 : /// Evict just one layer.
1933 : ///
1934 : /// Returns `Ok(None)` in the case where the layer could not be found by its `layer_file_name`.
1935 0 : pub(crate) async fn evict_layer(
1936 0 : &self,
1937 0 : layer_file_name: &LayerName,
1938 0 : ) -> anyhow::Result<Option<bool>> {
1939 0 : let _gate = self
1940 0 : .gate
1941 0 : .enter()
1942 0 : .map_err(|_| anyhow::anyhow!("Shutting down"))?;
1943 :
1944 0 : let Some(local_layer) = self.find_layer(layer_file_name).await? else {
1945 0 : return Ok(None);
1946 : };
1947 :
1948 : // curl has this by default
1949 0 : let timeout = std::time::Duration::from_secs(120);
1950 0 :
1951 0 : match local_layer.evict_and_wait(timeout).await {
1952 0 : Ok(()) => Ok(Some(true)),
1953 0 : Err(EvictionError::NotFound) => Ok(Some(false)),
1954 0 : Err(EvictionError::Downloaded) => Ok(Some(false)),
1955 0 : Err(EvictionError::Timeout) => Ok(Some(false)),
1956 : }
1957 0 : }
1958 :
1959 4803010 : fn should_roll(
1960 4803010 : &self,
1961 4803010 : layer_size: u64,
1962 4803010 : projected_layer_size: u64,
1963 4803010 : checkpoint_distance: u64,
1964 4803010 : projected_lsn: Lsn,
1965 4803010 : last_freeze_at: Lsn,
1966 4803010 : opened_at: Instant,
1967 4803010 : ) -> bool {
1968 4803010 : let distance = projected_lsn.widening_sub(last_freeze_at);
1969 4803010 :
1970 4803010 : // Rolling the open layer can be triggered by:
1971 4803010 : // 1. The distance from the last LSN we rolled at. This bounds the amount of WAL that
1972 4803010 : // the safekeepers need to store. For sharded tenants, we multiply by shard count to
1973 4803010 : // account for how writes are distributed across shards: we expect each node to consume
1974 4803010 : // 1/count of the LSN on average.
1975 4803010 : // 2. The size of the currently open layer.
1976 4803010 : // 3. The time since the last roll. It helps safekeepers to regard pageserver as caught
1977 4803010 : // up and suspend activity.
1978 4803010 : if distance >= checkpoint_distance as i128 * self.shard_identity.count.count() as i128 {
1979 0 : info!(
1980 0 : "Will roll layer at {} with layer size {} due to LSN distance ({})",
1981 : projected_lsn, layer_size, distance
1982 : );
1983 :
1984 0 : true
1985 4803010 : } else if projected_layer_size >= checkpoint_distance {
1986 : // NB: this check is relied upon by:
1987 80 : let _ = IndexEntry::validate_checkpoint_distance;
1988 80 : info!(
1989 0 : "Will roll layer at {} with layer size {} due to layer size ({})",
1990 : projected_lsn, layer_size, projected_layer_size
1991 : );
1992 :
1993 80 : true
1994 4802930 : } else if distance > 0 && opened_at.elapsed() >= self.get_checkpoint_timeout() {
1995 0 : info!(
1996 0 : "Will roll layer at {} with layer size {} due to time since first write to the layer ({:?})",
1997 0 : projected_lsn,
1998 0 : layer_size,
1999 0 : opened_at.elapsed()
2000 : );
2001 :
2002 0 : true
2003 : } else {
2004 4802930 : false
2005 : }
2006 4803010 : }
2007 : }
2008 :
2009 : /// Number of times we will compute partition within a checkpoint distance.
2010 : const REPARTITION_FREQ_IN_CHECKPOINT_DISTANCE: u64 = 10;
2011 :
2012 : // Private functions
2013 : impl Timeline {
2014 12 : pub(crate) fn get_lsn_lease_length(&self) -> Duration {
2015 12 : let tenant_conf = self.tenant_conf.load();
2016 12 : tenant_conf
2017 12 : .tenant_conf
2018 12 : .lsn_lease_length
2019 12 : .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length)
2020 12 : }
2021 :
2022 0 : pub(crate) fn get_lsn_lease_length_for_ts(&self) -> Duration {
2023 0 : let tenant_conf = self.tenant_conf.load();
2024 0 : tenant_conf
2025 0 : .tenant_conf
2026 0 : .lsn_lease_length_for_ts
2027 0 : .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length_for_ts)
2028 0 : }
2029 :
2030 0 : pub(crate) fn get_lazy_slru_download(&self) -> bool {
2031 0 : let tenant_conf = self.tenant_conf.load();
2032 0 : tenant_conf
2033 0 : .tenant_conf
2034 0 : .lazy_slru_download
2035 0 : .unwrap_or(self.conf.default_tenant_conf.lazy_slru_download)
2036 0 : }
2037 :
2038 4804608 : fn get_checkpoint_distance(&self) -> u64 {
2039 4804608 : let tenant_conf = self.tenant_conf.load();
2040 4804608 : tenant_conf
2041 4804608 : .tenant_conf
2042 4804608 : .checkpoint_distance
2043 4804608 : .unwrap_or(self.conf.default_tenant_conf.checkpoint_distance)
2044 4804608 : }
2045 :
2046 4802930 : fn get_checkpoint_timeout(&self) -> Duration {
2047 4802930 : let tenant_conf = self.tenant_conf.load();
2048 4802930 : tenant_conf
2049 4802930 : .tenant_conf
2050 4802930 : .checkpoint_timeout
2051 4802930 : .unwrap_or(self.conf.default_tenant_conf.checkpoint_timeout)
2052 4802930 : }
2053 :
2054 616 : fn get_compaction_target_size(&self) -> u64 {
2055 616 : let tenant_conf = self.tenant_conf.load();
2056 616 : tenant_conf
2057 616 : .tenant_conf
2058 616 : .compaction_target_size
2059 616 : .unwrap_or(self.conf.default_tenant_conf.compaction_target_size)
2060 616 : }
2061 :
2062 1534 : fn get_compaction_threshold(&self) -> usize {
2063 1534 : let tenant_conf = self.tenant_conf.load();
2064 1534 : tenant_conf
2065 1534 : .tenant_conf
2066 1534 : .compaction_threshold
2067 1534 : .unwrap_or(self.conf.default_tenant_conf.compaction_threshold)
2068 1534 : }
2069 :
2070 14 : fn get_image_creation_threshold(&self) -> usize {
2071 14 : let tenant_conf = self.tenant_conf.load();
2072 14 : tenant_conf
2073 14 : .tenant_conf
2074 14 : .image_creation_threshold
2075 14 : .unwrap_or(self.conf.default_tenant_conf.image_creation_threshold)
2076 14 : }
2077 :
2078 364 : fn get_compaction_algorithm_settings(&self) -> CompactionAlgorithmSettings {
2079 364 : let tenant_conf = &self.tenant_conf.load();
2080 364 : tenant_conf
2081 364 : .tenant_conf
2082 364 : .compaction_algorithm
2083 364 : .as_ref()
2084 364 : .unwrap_or(&self.conf.default_tenant_conf.compaction_algorithm)
2085 364 : .clone()
2086 364 : }
2087 :
2088 0 : fn get_eviction_policy(&self) -> EvictionPolicy {
2089 0 : let tenant_conf = self.tenant_conf.load();
2090 0 : tenant_conf
2091 0 : .tenant_conf
2092 0 : .eviction_policy
2093 0 : .unwrap_or(self.conf.default_tenant_conf.eviction_policy)
2094 0 : }
2095 :
2096 414 : fn get_evictions_low_residence_duration_metric_threshold(
2097 414 : tenant_conf: &TenantConfOpt,
2098 414 : default_tenant_conf: &TenantConf,
2099 414 : ) -> Duration {
2100 414 : tenant_conf
2101 414 : .evictions_low_residence_duration_metric_threshold
2102 414 : .unwrap_or(default_tenant_conf.evictions_low_residence_duration_metric_threshold)
2103 414 : }
2104 :
2105 712 : fn get_image_layer_creation_check_threshold(&self) -> u8 {
2106 712 : let tenant_conf = self.tenant_conf.load();
2107 712 : tenant_conf
2108 712 : .tenant_conf
2109 712 : .image_layer_creation_check_threshold
2110 712 : .unwrap_or(
2111 712 : self.conf
2112 712 : .default_tenant_conf
2113 712 : .image_layer_creation_check_threshold,
2114 712 : )
2115 712 : }
2116 :
2117 0 : pub(super) fn tenant_conf_updated(&self, new_conf: &TenantConfOpt) {
2118 0 : // NB: Most tenant conf options are read by background loops, so,
2119 0 : // changes will automatically be picked up.
2120 0 :
2121 0 : // The threshold is embedded in the metric. So, we need to update it.
2122 0 : {
2123 0 : let new_threshold = Self::get_evictions_low_residence_duration_metric_threshold(
2124 0 : new_conf,
2125 0 : &self.conf.default_tenant_conf,
2126 0 : );
2127 0 :
2128 0 : let tenant_id_str = self.tenant_shard_id.tenant_id.to_string();
2129 0 : let shard_id_str = format!("{}", self.tenant_shard_id.shard_slug());
2130 0 :
2131 0 : let timeline_id_str = self.timeline_id.to_string();
2132 0 : self.metrics
2133 0 : .evictions_with_low_residence_duration
2134 0 : .write()
2135 0 : .unwrap()
2136 0 : .change_threshold(
2137 0 : &tenant_id_str,
2138 0 : &shard_id_str,
2139 0 : &timeline_id_str,
2140 0 : new_threshold,
2141 0 : );
2142 0 : }
2143 0 : }
2144 :
2145 : /// Open a Timeline handle.
2146 : ///
2147 : /// Loads the metadata for the timeline into memory, but not the layer map.
2148 : #[allow(clippy::too_many_arguments)]
2149 414 : pub(super) fn new(
2150 414 : conf: &'static PageServerConf,
2151 414 : tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
2152 414 : metadata: &TimelineMetadata,
2153 414 : ancestor: Option<Arc<Timeline>>,
2154 414 : timeline_id: TimelineId,
2155 414 : tenant_shard_id: TenantShardId,
2156 414 : generation: Generation,
2157 414 : shard_identity: ShardIdentity,
2158 414 : walredo_mgr: Option<Arc<super::WalRedoManager>>,
2159 414 : resources: TimelineResources,
2160 414 : pg_version: u32,
2161 414 : state: TimelineState,
2162 414 : attach_wal_lag_cooldown: Arc<OnceLock<WalLagCooldown>>,
2163 414 : create_idempotency: crate::tenant::CreateTimelineIdempotency,
2164 414 : cancel: CancellationToken,
2165 414 : ) -> Arc<Self> {
2166 414 : let disk_consistent_lsn = metadata.disk_consistent_lsn();
2167 414 : let (state, _) = watch::channel(state);
2168 414 :
2169 414 : let (layer_flush_start_tx, _) = tokio::sync::watch::channel((0, disk_consistent_lsn));
2170 414 : let (layer_flush_done_tx, _) = tokio::sync::watch::channel((0, Ok(())));
2171 414 :
2172 414 : let evictions_low_residence_duration_metric_threshold = {
2173 414 : let loaded_tenant_conf = tenant_conf.load();
2174 414 : Self::get_evictions_low_residence_duration_metric_threshold(
2175 414 : &loaded_tenant_conf.tenant_conf,
2176 414 : &conf.default_tenant_conf,
2177 414 : )
2178 : };
2179 :
2180 414 : if let Some(ancestor) = &ancestor {
2181 228 : let mut ancestor_gc_info = ancestor.gc_info.write().unwrap();
2182 228 : // If we construct an explicit timeline object, it's obviously not offloaded
2183 228 : let is_offloaded = MaybeOffloaded::No;
2184 228 : ancestor_gc_info.insert_child(timeline_id, metadata.ancestor_lsn(), is_offloaded);
2185 228 : }
2186 :
2187 414 : Arc::new_cyclic(|myself| {
2188 414 : let metrics = TimelineMetrics::new(
2189 414 : &tenant_shard_id,
2190 414 : &timeline_id,
2191 414 : crate::metrics::EvictionsWithLowResidenceDurationBuilder::new(
2192 414 : "mtime",
2193 414 : evictions_low_residence_duration_metric_threshold,
2194 414 : ),
2195 414 : );
2196 414 : let aux_file_metrics = metrics.aux_file_size_gauge.clone();
2197 :
2198 414 : let mut result = Timeline {
2199 414 : conf,
2200 414 : tenant_conf,
2201 414 : myself: myself.clone(),
2202 414 : timeline_id,
2203 414 : tenant_shard_id,
2204 414 : generation,
2205 414 : shard_identity,
2206 414 : pg_version,
2207 414 : layers: Default::default(),
2208 414 :
2209 414 : walredo_mgr,
2210 414 : walreceiver: Mutex::new(None),
2211 414 :
2212 414 : remote_client: Arc::new(resources.remote_client),
2213 414 :
2214 414 : // initialize in-memory 'last_record_lsn' from 'disk_consistent_lsn'.
2215 414 : last_record_lsn: SeqWait::new(RecordLsn {
2216 414 : last: disk_consistent_lsn,
2217 414 : prev: metadata.prev_record_lsn().unwrap_or(Lsn(0)),
2218 414 : }),
2219 414 : disk_consistent_lsn: AtomicLsn::new(disk_consistent_lsn.0),
2220 414 :
2221 414 : last_freeze_at: AtomicLsn::new(disk_consistent_lsn.0),
2222 414 : last_freeze_ts: RwLock::new(Instant::now()),
2223 414 :
2224 414 : loaded_at: (disk_consistent_lsn, SystemTime::now()),
2225 414 :
2226 414 : ancestor_timeline: ancestor,
2227 414 : ancestor_lsn: metadata.ancestor_lsn(),
2228 414 :
2229 414 : metrics,
2230 414 :
2231 414 : query_metrics: crate::metrics::SmgrQueryTimePerTimeline::new(
2232 414 : &tenant_shard_id,
2233 414 : &timeline_id,
2234 414 : ),
2235 414 :
2236 2898 : directory_metrics: array::from_fn(|_| AtomicU64::new(0)),
2237 414 :
2238 414 : flush_loop_state: Mutex::new(FlushLoopState::NotStarted),
2239 414 :
2240 414 : layer_flush_start_tx,
2241 414 : layer_flush_done_tx,
2242 414 :
2243 414 : write_lock: tokio::sync::Mutex::new(None),
2244 414 :
2245 414 : gc_info: std::sync::RwLock::new(GcInfo::default()),
2246 414 :
2247 414 : latest_gc_cutoff_lsn: Rcu::new(metadata.latest_gc_cutoff_lsn()),
2248 414 : initdb_lsn: metadata.initdb_lsn(),
2249 414 :
2250 414 : current_logical_size: if disk_consistent_lsn.is_valid() {
2251 : // we're creating timeline data with some layer files existing locally,
2252 : // need to recalculate timeline's logical size based on data in the layers.
2253 232 : LogicalSize::deferred_initial(disk_consistent_lsn)
2254 : } else {
2255 : // we're creating timeline data without any layers existing locally,
2256 : // initial logical size is 0.
2257 182 : LogicalSize::empty_initial()
2258 : },
2259 414 : partitioning: tokio::sync::Mutex::new((
2260 414 : (KeyPartitioning::new(), KeyPartitioning::new().into_sparse()),
2261 414 : Lsn(0),
2262 414 : )),
2263 414 : repartition_threshold: 0,
2264 414 : last_image_layer_creation_check_at: AtomicLsn::new(0),
2265 414 : last_image_layer_creation_check_instant: Mutex::new(None),
2266 414 :
2267 414 : last_received_wal: Mutex::new(None),
2268 414 : rel_size_cache: RwLock::new(RelSizeCache {
2269 414 : complete_as_of: disk_consistent_lsn,
2270 414 : map: HashMap::new(),
2271 414 : }),
2272 414 :
2273 414 : download_all_remote_layers_task_info: RwLock::new(None),
2274 414 :
2275 414 : state,
2276 414 :
2277 414 : eviction_task_timeline_state: tokio::sync::Mutex::new(
2278 414 : EvictionTaskTimelineState::default(),
2279 414 : ),
2280 414 : delete_progress: TimelineDeleteProgress::default(),
2281 414 :
2282 414 : cancel,
2283 414 : gate: Gate::default(),
2284 414 :
2285 414 : compaction_lock: tokio::sync::Mutex::default(),
2286 414 : gc_lock: tokio::sync::Mutex::default(),
2287 414 :
2288 414 : standby_horizon: AtomicLsn::new(0),
2289 414 :
2290 414 : timeline_get_throttle: resources.timeline_get_throttle,
2291 414 :
2292 414 : aux_file_size_estimator: AuxFileSizeEstimator::new(aux_file_metrics),
2293 414 :
2294 414 : #[cfg(test)]
2295 414 : extra_test_dense_keyspace: ArcSwap::new(Arc::new(KeySpace::default())),
2296 414 :
2297 414 : l0_flush_global_state: resources.l0_flush_global_state,
2298 414 :
2299 414 : handles: Default::default(),
2300 414 :
2301 414 : attach_wal_lag_cooldown,
2302 414 :
2303 414 : create_idempotency,
2304 414 : };
2305 414 :
2306 414 : result.repartition_threshold =
2307 414 : result.get_checkpoint_distance() / REPARTITION_FREQ_IN_CHECKPOINT_DISTANCE;
2308 414 :
2309 414 : result
2310 414 : .metrics
2311 414 : .last_record_gauge
2312 414 : .set(disk_consistent_lsn.0 as i64);
2313 414 : result
2314 414 : })
2315 414 : }
2316 :
2317 582 : pub(super) fn maybe_spawn_flush_loop(self: &Arc<Self>) {
2318 582 : let Ok(guard) = self.gate.enter() else {
2319 0 : info!("cannot start flush loop when the timeline gate has already been closed");
2320 0 : return;
2321 : };
2322 582 : let mut flush_loop_state = self.flush_loop_state.lock().unwrap();
2323 582 : match *flush_loop_state {
2324 408 : FlushLoopState::NotStarted => (),
2325 : FlushLoopState::Running { .. } => {
2326 174 : info!(
2327 0 : "skipping attempt to start flush_loop twice {}/{}",
2328 0 : self.tenant_shard_id, self.timeline_id
2329 : );
2330 174 : return;
2331 : }
2332 : FlushLoopState::Exited => {
2333 0 : warn!(
2334 0 : "ignoring attempt to restart exited flush_loop {}/{}",
2335 0 : self.tenant_shard_id, self.timeline_id
2336 : );
2337 0 : return;
2338 : }
2339 : }
2340 :
2341 408 : let layer_flush_start_rx = self.layer_flush_start_tx.subscribe();
2342 408 : let self_clone = Arc::clone(self);
2343 408 :
2344 408 : debug!("spawning flush loop");
2345 408 : *flush_loop_state = FlushLoopState::Running {
2346 408 : #[cfg(test)]
2347 408 : expect_initdb_optimization: false,
2348 408 : #[cfg(test)]
2349 408 : initdb_optimization_count: 0,
2350 408 : };
2351 408 : task_mgr::spawn(
2352 408 : task_mgr::BACKGROUND_RUNTIME.handle(),
2353 408 : task_mgr::TaskKind::LayerFlushTask,
2354 408 : self.tenant_shard_id,
2355 408 : Some(self.timeline_id),
2356 408 : "layer flush task",
2357 408 : async move {
2358 408 : let _guard = guard;
2359 408 : let background_ctx = RequestContext::todo_child(TaskKind::LayerFlushTask, DownloadBehavior::Error);
2360 18041 : self_clone.flush_loop(layer_flush_start_rx, &background_ctx).await;
2361 8 : let mut flush_loop_state = self_clone.flush_loop_state.lock().unwrap();
2362 8 : assert!(matches!(*flush_loop_state, FlushLoopState::Running{..}));
2363 8 : *flush_loop_state = FlushLoopState::Exited;
2364 8 : Ok(())
2365 8 : }
2366 408 : .instrument(info_span!(parent: None, "layer flush task", tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))
2367 : );
2368 582 : }
2369 :
2370 : /// Creates and starts the wal receiver.
2371 : ///
2372 : /// This function is expected to be called at most once per Timeline's lifecycle
2373 : /// when the timeline is activated.
2374 0 : fn launch_wal_receiver(
2375 0 : self: &Arc<Self>,
2376 0 : ctx: &RequestContext,
2377 0 : broker_client: BrokerClientChannel,
2378 0 : ) {
2379 0 : info!(
2380 0 : "launching WAL receiver for timeline {} of tenant {}",
2381 0 : self.timeline_id, self.tenant_shard_id
2382 : );
2383 :
2384 0 : let tenant_conf = self.tenant_conf.load();
2385 0 : let wal_connect_timeout = tenant_conf
2386 0 : .tenant_conf
2387 0 : .walreceiver_connect_timeout
2388 0 : .unwrap_or(self.conf.default_tenant_conf.walreceiver_connect_timeout);
2389 0 : let lagging_wal_timeout = tenant_conf
2390 0 : .tenant_conf
2391 0 : .lagging_wal_timeout
2392 0 : .unwrap_or(self.conf.default_tenant_conf.lagging_wal_timeout);
2393 0 : let max_lsn_wal_lag = tenant_conf
2394 0 : .tenant_conf
2395 0 : .max_lsn_wal_lag
2396 0 : .unwrap_or(self.conf.default_tenant_conf.max_lsn_wal_lag);
2397 0 :
2398 0 : let mut guard = self.walreceiver.lock().unwrap();
2399 0 : assert!(
2400 0 : guard.is_none(),
2401 0 : "multiple launches / re-launches of WAL receiver are not supported"
2402 : );
2403 0 : *guard = Some(WalReceiver::start(
2404 0 : Arc::clone(self),
2405 0 : WalReceiverConf {
2406 0 : wal_connect_timeout,
2407 0 : lagging_wal_timeout,
2408 0 : max_lsn_wal_lag,
2409 0 : auth_token: crate::config::SAFEKEEPER_AUTH_TOKEN.get().cloned(),
2410 0 : availability_zone: self.conf.availability_zone.clone(),
2411 0 : ingest_batch_size: self.conf.ingest_batch_size,
2412 0 : },
2413 0 : broker_client,
2414 0 : ctx,
2415 0 : ));
2416 0 : }
2417 :
2418 : /// Initialize with an empty layer map. Used when creating a new timeline.
2419 408 : pub(super) fn init_empty_layer_map(&self, start_lsn: Lsn) {
2420 408 : let mut layers = self.layers.try_write().expect(
2421 408 : "in the context where we call this function, no other task has access to the object",
2422 408 : );
2423 408 : layers
2424 408 : .open_mut()
2425 408 : .expect("in this context the LayerManager must still be open")
2426 408 : .initialize_empty(Lsn(start_lsn.0));
2427 408 : }
2428 :
2429 : /// Scan the timeline directory, cleanup, populate the layer map, and schedule uploads for local-only
2430 : /// files.
2431 6 : pub(super) async fn load_layer_map(
2432 6 : &self,
2433 6 : disk_consistent_lsn: Lsn,
2434 6 : index_part: IndexPart,
2435 6 : ) -> anyhow::Result<()> {
2436 : use init::{Decision::*, Discovered, DismissedLayer};
2437 : use LayerName::*;
2438 :
2439 6 : let mut guard = self.layers.write().await;
2440 :
2441 6 : let timer = self.metrics.load_layer_map_histo.start_timer();
2442 6 :
2443 6 : // Scan timeline directory and create ImageLayerName and DeltaFilename
2444 6 : // structs representing all files on disk
2445 6 : let timeline_path = self
2446 6 : .conf
2447 6 : .timeline_path(&self.tenant_shard_id, &self.timeline_id);
2448 6 : let conf = self.conf;
2449 6 : let span = tracing::Span::current();
2450 6 :
2451 6 : // Copy to move into the task we're about to spawn
2452 6 : let this = self.myself.upgrade().expect("&self method holds the arc");
2453 :
2454 6 : let (loaded_layers, needs_cleanup, total_physical_size) = tokio::task::spawn_blocking({
2455 6 : move || {
2456 6 : let _g = span.entered();
2457 6 : let discovered = init::scan_timeline_dir(&timeline_path)?;
2458 6 : let mut discovered_layers = Vec::with_capacity(discovered.len());
2459 6 : let mut unrecognized_files = Vec::new();
2460 6 :
2461 6 : let mut path = timeline_path;
2462 :
2463 22 : for discovered in discovered {
2464 16 : let (name, kind) = match discovered {
2465 16 : Discovered::Layer(layer_file_name, local_metadata) => {
2466 16 : discovered_layers.push((layer_file_name, local_metadata));
2467 16 : continue;
2468 : }
2469 0 : Discovered::IgnoredBackup(path) => {
2470 0 : std::fs::remove_file(path)
2471 0 : .or_else(fs_ext::ignore_not_found)
2472 0 : .fatal_err("Removing .old file");
2473 0 : continue;
2474 : }
2475 0 : Discovered::Unknown(file_name) => {
2476 0 : // we will later error if there are any
2477 0 : unrecognized_files.push(file_name);
2478 0 : continue;
2479 : }
2480 0 : Discovered::Ephemeral(name) => (name, "old ephemeral file"),
2481 0 : Discovered::Temporary(name) => (name, "temporary timeline file"),
2482 0 : Discovered::TemporaryDownload(name) => (name, "temporary download"),
2483 : };
2484 0 : path.push(Utf8Path::new(&name));
2485 0 : init::cleanup(&path, kind)?;
2486 0 : path.pop();
2487 : }
2488 :
2489 6 : if !unrecognized_files.is_empty() {
2490 : // assume that if there are any there are many many.
2491 0 : let n = unrecognized_files.len();
2492 0 : let first = &unrecognized_files[..n.min(10)];
2493 0 : anyhow::bail!(
2494 0 : "unrecognized files in timeline dir (total {n}), first 10: {first:?}"
2495 0 : );
2496 6 : }
2497 6 :
2498 6 : let decided = init::reconcile(discovered_layers, &index_part, disk_consistent_lsn);
2499 6 :
2500 6 : let mut loaded_layers = Vec::new();
2501 6 : let mut needs_cleanup = Vec::new();
2502 6 : let mut total_physical_size = 0;
2503 :
2504 22 : for (name, decision) in decided {
2505 16 : let decision = match decision {
2506 16 : Ok(decision) => decision,
2507 0 : Err(DismissedLayer::Future { local }) => {
2508 0 : if let Some(local) = local {
2509 0 : init::cleanup_future_layer(
2510 0 : &local.local_path,
2511 0 : &name,
2512 0 : disk_consistent_lsn,
2513 0 : )?;
2514 0 : }
2515 0 : needs_cleanup.push(name);
2516 0 : continue;
2517 : }
2518 0 : Err(DismissedLayer::LocalOnly(local)) => {
2519 0 : init::cleanup_local_only_file(&name, &local)?;
2520 : // this file never existed remotely, we will have to do rework
2521 0 : continue;
2522 : }
2523 0 : Err(DismissedLayer::BadMetadata(local)) => {
2524 0 : init::cleanup_local_file_for_remote(&local)?;
2525 : // this file never existed remotely, we will have to do rework
2526 0 : continue;
2527 : }
2528 : };
2529 :
2530 16 : match &name {
2531 12 : Delta(d) => assert!(d.lsn_range.end <= disk_consistent_lsn + 1),
2532 4 : Image(i) => assert!(i.lsn <= disk_consistent_lsn),
2533 : }
2534 :
2535 16 : tracing::debug!(layer=%name, ?decision, "applied");
2536 :
2537 16 : let layer = match decision {
2538 16 : Resident { local, remote } => {
2539 16 : total_physical_size += local.file_size;
2540 16 : Layer::for_resident(conf, &this, local.local_path, name, remote)
2541 16 : .drop_eviction_guard()
2542 : }
2543 0 : Evicted(remote) => Layer::for_evicted(conf, &this, name, remote),
2544 : };
2545 :
2546 16 : loaded_layers.push(layer);
2547 : }
2548 6 : Ok((loaded_layers, needs_cleanup, total_physical_size))
2549 6 : }
2550 6 : })
2551 5 : .await
2552 6 : .map_err(anyhow::Error::new)
2553 6 : .and_then(|x| x)?;
2554 :
2555 6 : let num_layers = loaded_layers.len();
2556 6 :
2557 6 : guard
2558 6 : .open_mut()
2559 6 : .expect("layermanager must be open during init")
2560 6 : .initialize_local_layers(loaded_layers, disk_consistent_lsn + 1);
2561 6 :
2562 6 : self.remote_client
2563 6 : .schedule_layer_file_deletion(&needs_cleanup)?;
2564 6 : self.remote_client
2565 6 : .schedule_index_upload_for_file_changes()?;
2566 : // This barrier orders above DELETEs before any later operations.
2567 : // This is critical because code executing after the barrier might
2568 : // create again objects with the same key that we just scheduled for deletion.
2569 : // For example, if we just scheduled deletion of an image layer "from the future",
2570 : // later compaction might run again and re-create the same image layer.
2571 : // "from the future" here means an image layer whose LSN is > IndexPart::disk_consistent_lsn.
2572 : // "same" here means same key range and LSN.
2573 : //
2574 : // Without a barrier between above DELETEs and the re-creation's PUTs,
2575 : // the upload queue may execute the PUT first, then the DELETE.
2576 : // In our example, we will end up with an IndexPart referencing a non-existent object.
2577 : //
2578 : // 1. a future image layer is created and uploaded
2579 : // 2. ps restart
2580 : // 3. the future layer from (1) is deleted during load layer map
2581 : // 4. image layer is re-created and uploaded
2582 : // 5. deletion queue would like to delete (1) but actually deletes (4)
2583 : // 6. delete by name works as expected, but it now deletes the wrong (later) version
2584 : //
2585 : // See https://github.com/neondatabase/neon/issues/5878
2586 : //
2587 : // NB: generation numbers naturally protect against this because they disambiguate
2588 : // (1) and (4)
2589 6 : self.remote_client.schedule_barrier()?;
2590 : // Tenant::create_timeline will wait for these uploads to happen before returning, or
2591 : // on retry.
2592 :
2593 : // Now that we have the full layer map, we may calculate the visibility of layers within it (a global scan)
2594 6 : drop(guard); // drop write lock, update_layer_visibility will take a read lock.
2595 6 : self.update_layer_visibility().await?;
2596 :
2597 6 : info!(
2598 0 : "loaded layer map with {} layers at {}, total physical size: {}",
2599 : num_layers, disk_consistent_lsn, total_physical_size
2600 : );
2601 :
2602 6 : timer.stop_and_record();
2603 6 : Ok(())
2604 6 : }
2605 :
2606 : /// Retrieve current logical size of the timeline.
2607 : ///
2608 : /// The size could be lagging behind the actual number, in case
2609 : /// the initial size calculation has not been run (gets triggered on the first size access).
2610 : ///
2611 : /// return size and boolean flag that shows if the size is exact
2612 0 : pub(crate) fn get_current_logical_size(
2613 0 : self: &Arc<Self>,
2614 0 : priority: GetLogicalSizePriority,
2615 0 : ctx: &RequestContext,
2616 0 : ) -> logical_size::CurrentLogicalSize {
2617 0 : if !self.tenant_shard_id.is_shard_zero() {
2618 : // Logical size is only accurately maintained on shard zero: when called elsewhere, for example
2619 : // when HTTP API is serving a GET for timeline zero, return zero
2620 0 : return logical_size::CurrentLogicalSize::Approximate(logical_size::Approximate::zero());
2621 0 : }
2622 0 :
2623 0 : let current_size = self.current_logical_size.current_size();
2624 0 : debug!("Current size: {current_size:?}");
2625 :
2626 0 : match (current_size.accuracy(), priority) {
2627 0 : (logical_size::Accuracy::Exact, _) => (), // nothing to do
2628 0 : (logical_size::Accuracy::Approximate, GetLogicalSizePriority::Background) => {
2629 0 : // background task will eventually deliver an exact value, we're in no rush
2630 0 : }
2631 : (logical_size::Accuracy::Approximate, GetLogicalSizePriority::User) => {
2632 : // background task is not ready, but user is asking for it now;
2633 : // => make the background task skip the line
2634 : // (The alternative would be to calculate the size here, but,
2635 : // it can actually take a long time if the user has a lot of rels.
2636 : // And we'll inevitable need it again; So, let the background task do the work.)
2637 0 : match self
2638 0 : .current_logical_size
2639 0 : .cancel_wait_for_background_loop_concurrency_limit_semaphore
2640 0 : .get()
2641 : {
2642 0 : Some(cancel) => cancel.cancel(),
2643 : None => {
2644 0 : let state = self.current_state();
2645 0 : if matches!(
2646 0 : state,
2647 : TimelineState::Broken { .. } | TimelineState::Stopping
2648 0 : ) {
2649 0 :
2650 0 : // Can happen when timeline detail endpoint is used when deletion is ongoing (or its broken).
2651 0 : // Don't make noise.
2652 0 : } else {
2653 0 : warn!("unexpected: cancel_wait_for_background_loop_concurrency_limit_semaphore not set, priority-boosting of logical size calculation will not work");
2654 0 : debug_assert!(false);
2655 : }
2656 : }
2657 : };
2658 : }
2659 : }
2660 :
2661 0 : if let CurrentLogicalSize::Approximate(_) = ¤t_size {
2662 0 : if ctx.task_kind() == TaskKind::WalReceiverConnectionHandler {
2663 0 : let first = self
2664 0 : .current_logical_size
2665 0 : .did_return_approximate_to_walreceiver
2666 0 : .compare_exchange(
2667 0 : false,
2668 0 : true,
2669 0 : AtomicOrdering::Relaxed,
2670 0 : AtomicOrdering::Relaxed,
2671 0 : )
2672 0 : .is_ok();
2673 0 : if first {
2674 0 : crate::metrics::initial_logical_size::TIMELINES_WHERE_WALRECEIVER_GOT_APPROXIMATE_SIZE.inc();
2675 0 : }
2676 0 : }
2677 0 : }
2678 :
2679 0 : current_size
2680 0 : }
2681 :
2682 0 : fn spawn_initial_logical_size_computation_task(self: &Arc<Self>, ctx: &RequestContext) {
2683 0 : let Some(initial_part_end) = self.current_logical_size.initial_part_end else {
2684 : // nothing to do for freshly created timelines;
2685 0 : assert_eq!(
2686 0 : self.current_logical_size.current_size().accuracy(),
2687 0 : logical_size::Accuracy::Exact,
2688 0 : );
2689 0 : self.current_logical_size.initialized.add_permits(1);
2690 0 : return;
2691 : };
2692 :
2693 0 : let cancel_wait_for_background_loop_concurrency_limit_semaphore = CancellationToken::new();
2694 0 : let token = cancel_wait_for_background_loop_concurrency_limit_semaphore.clone();
2695 0 : self.current_logical_size
2696 0 : .cancel_wait_for_background_loop_concurrency_limit_semaphore.set(token)
2697 0 : .expect("initial logical size calculation task must be spawned exactly once per Timeline object");
2698 0 :
2699 0 : let self_clone = Arc::clone(self);
2700 0 : let background_ctx = ctx.detached_child(
2701 0 : TaskKind::InitialLogicalSizeCalculation,
2702 0 : DownloadBehavior::Download,
2703 0 : );
2704 0 : task_mgr::spawn(
2705 0 : task_mgr::BACKGROUND_RUNTIME.handle(),
2706 0 : task_mgr::TaskKind::InitialLogicalSizeCalculation,
2707 0 : self.tenant_shard_id,
2708 0 : Some(self.timeline_id),
2709 0 : "initial size calculation",
2710 : // NB: don't log errors here, task_mgr will do that.
2711 0 : async move {
2712 0 : let cancel = task_mgr::shutdown_token();
2713 0 : self_clone
2714 0 : .initial_logical_size_calculation_task(
2715 0 : initial_part_end,
2716 0 : cancel_wait_for_background_loop_concurrency_limit_semaphore,
2717 0 : cancel,
2718 0 : background_ctx,
2719 0 : )
2720 0 : .await;
2721 0 : Ok(())
2722 0 : }
2723 0 : .instrument(info_span!(parent: None, "initial_size_calculation", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id)),
2724 : );
2725 0 : }
2726 :
2727 0 : async fn initial_logical_size_calculation_task(
2728 0 : self: Arc<Self>,
2729 0 : initial_part_end: Lsn,
2730 0 : skip_concurrency_limiter: CancellationToken,
2731 0 : cancel: CancellationToken,
2732 0 : background_ctx: RequestContext,
2733 0 : ) {
2734 0 : scopeguard::defer! {
2735 0 : // Irrespective of the outcome of this operation, we should unblock anyone waiting for it.
2736 0 : self.current_logical_size.initialized.add_permits(1);
2737 0 : }
2738 0 :
2739 0 : let try_once = |attempt: usize| {
2740 0 : let background_ctx = &background_ctx;
2741 0 : let self_ref = &self;
2742 0 : let skip_concurrency_limiter = &skip_concurrency_limiter;
2743 0 : async move {
2744 0 : let cancel = task_mgr::shutdown_token();
2745 0 : let wait_for_permit = super::tasks::concurrent_background_tasks_rate_limit_permit(
2746 0 : BackgroundLoopKind::InitialLogicalSizeCalculation,
2747 0 : background_ctx,
2748 0 : );
2749 :
2750 : use crate::metrics::initial_logical_size::StartCircumstances;
2751 0 : let (_maybe_permit, circumstances) = tokio::select! {
2752 0 : permit = wait_for_permit => {
2753 0 : (Some(permit), StartCircumstances::AfterBackgroundTasksRateLimit)
2754 : }
2755 0 : _ = self_ref.cancel.cancelled() => {
2756 0 : return Err(CalculateLogicalSizeError::Cancelled);
2757 : }
2758 0 : _ = cancel.cancelled() => {
2759 0 : return Err(CalculateLogicalSizeError::Cancelled);
2760 : },
2761 0 : () = skip_concurrency_limiter.cancelled() => {
2762 : // Some action that is part of a end user interaction requested logical size
2763 : // => break out of the rate limit
2764 : // TODO: ideally we'd not run on BackgroundRuntime but the requester's runtime;
2765 : // but then again what happens if they cancel; also, we should just be using
2766 : // one runtime across the entire process, so, let's leave this for now.
2767 0 : (None, StartCircumstances::SkippedConcurrencyLimiter)
2768 : }
2769 : };
2770 :
2771 0 : let metrics_guard = if attempt == 1 {
2772 0 : crate::metrics::initial_logical_size::START_CALCULATION.first(circumstances)
2773 : } else {
2774 0 : crate::metrics::initial_logical_size::START_CALCULATION.retry(circumstances)
2775 : };
2776 :
2777 0 : let calculated_size = self_ref
2778 0 : .logical_size_calculation_task(
2779 0 : initial_part_end,
2780 0 : LogicalSizeCalculationCause::Initial,
2781 0 : background_ctx,
2782 0 : )
2783 0 : .await?;
2784 :
2785 0 : self_ref
2786 0 : .trigger_aux_file_size_computation(initial_part_end, background_ctx)
2787 0 : .await?;
2788 :
2789 : // TODO: add aux file size to logical size
2790 :
2791 0 : Ok((calculated_size, metrics_guard))
2792 0 : }
2793 0 : };
2794 :
2795 0 : let retrying = async {
2796 0 : let mut attempt = 0;
2797 : loop {
2798 0 : attempt += 1;
2799 0 :
2800 0 : match try_once(attempt).await {
2801 0 : Ok(res) => return ControlFlow::Continue(res),
2802 0 : Err(CalculateLogicalSizeError::Cancelled) => return ControlFlow::Break(()),
2803 : Err(
2804 0 : e @ (CalculateLogicalSizeError::Decode(_)
2805 0 : | CalculateLogicalSizeError::PageRead(_)),
2806 0 : ) => {
2807 0 : warn!(attempt, "initial size calculation failed: {e:?}");
2808 : // exponential back-off doesn't make sense at these long intervals;
2809 : // use fixed retry interval with generous jitter instead
2810 0 : let sleep_duration = Duration::from_secs(
2811 0 : u64::try_from(
2812 0 : // 1hour base
2813 0 : (60_i64 * 60_i64)
2814 0 : // 10min jitter
2815 0 : + rand::thread_rng().gen_range(-10 * 60..10 * 60),
2816 0 : )
2817 0 : .expect("10min < 1hour"),
2818 0 : );
2819 0 : tokio::time::sleep(sleep_duration).await;
2820 : }
2821 : }
2822 : }
2823 0 : };
2824 :
2825 0 : let (calculated_size, metrics_guard) = tokio::select! {
2826 0 : res = retrying => {
2827 0 : match res {
2828 0 : ControlFlow::Continue(calculated_size) => calculated_size,
2829 0 : ControlFlow::Break(()) => return,
2830 : }
2831 : }
2832 0 : _ = cancel.cancelled() => {
2833 0 : return;
2834 : }
2835 : };
2836 :
2837 : // we cannot query current_logical_size.current_size() to know the current
2838 : // *negative* value, only truncated to u64.
2839 0 : let added = self
2840 0 : .current_logical_size
2841 0 : .size_added_after_initial
2842 0 : .load(AtomicOrdering::Relaxed);
2843 0 :
2844 0 : let sum = calculated_size.saturating_add_signed(added);
2845 0 :
2846 0 : // set the gauge value before it can be set in `update_current_logical_size`.
2847 0 : self.metrics.current_logical_size_gauge.set(sum);
2848 0 :
2849 0 : self.current_logical_size
2850 0 : .initial_logical_size
2851 0 : .set((calculated_size, metrics_guard.calculation_result_saved()))
2852 0 : .ok()
2853 0 : .expect("only this task sets it");
2854 0 : }
2855 :
2856 0 : pub(crate) fn spawn_ondemand_logical_size_calculation(
2857 0 : self: &Arc<Self>,
2858 0 : lsn: Lsn,
2859 0 : cause: LogicalSizeCalculationCause,
2860 0 : ctx: RequestContext,
2861 0 : ) -> oneshot::Receiver<Result<u64, CalculateLogicalSizeError>> {
2862 0 : let (sender, receiver) = oneshot::channel();
2863 0 : let self_clone = Arc::clone(self);
2864 0 : // XXX if our caller loses interest, i.e., ctx is cancelled,
2865 0 : // we should stop the size calculation work and return an error.
2866 0 : // That would require restructuring this function's API to
2867 0 : // return the result directly, instead of a Receiver for the result.
2868 0 : let ctx = ctx.detached_child(
2869 0 : TaskKind::OndemandLogicalSizeCalculation,
2870 0 : DownloadBehavior::Download,
2871 0 : );
2872 0 : task_mgr::spawn(
2873 0 : task_mgr::BACKGROUND_RUNTIME.handle(),
2874 0 : task_mgr::TaskKind::OndemandLogicalSizeCalculation,
2875 0 : self.tenant_shard_id,
2876 0 : Some(self.timeline_id),
2877 0 : "ondemand logical size calculation",
2878 0 : async move {
2879 0 : let res = self_clone
2880 0 : .logical_size_calculation_task(lsn, cause, &ctx)
2881 0 : .await;
2882 0 : let _ = sender.send(res).ok();
2883 0 : Ok(()) // Receiver is responsible for handling errors
2884 0 : }
2885 0 : .in_current_span(),
2886 0 : );
2887 0 : receiver
2888 0 : }
2889 :
2890 : /// # Cancel-Safety
2891 : ///
2892 : /// This method is cancellation-safe.
2893 0 : #[instrument(skip_all)]
2894 : async fn logical_size_calculation_task(
2895 : self: &Arc<Self>,
2896 : lsn: Lsn,
2897 : cause: LogicalSizeCalculationCause,
2898 : ctx: &RequestContext,
2899 : ) -> Result<u64, CalculateLogicalSizeError> {
2900 : crate::span::debug_assert_current_span_has_tenant_and_timeline_id();
2901 : // We should never be calculating logical sizes on shard !=0, because these shards do not have
2902 : // accurate relation sizes, and they do not emit consumption metrics.
2903 : debug_assert!(self.tenant_shard_id.is_shard_zero());
2904 :
2905 : let guard = self
2906 : .gate
2907 : .enter()
2908 0 : .map_err(|_| CalculateLogicalSizeError::Cancelled)?;
2909 :
2910 : let self_calculation = Arc::clone(self);
2911 :
2912 0 : let mut calculation = pin!(async {
2913 0 : let ctx = ctx.attached_child();
2914 0 : self_calculation
2915 0 : .calculate_logical_size(lsn, cause, &guard, &ctx)
2916 0 : .await
2917 0 : });
2918 :
2919 : tokio::select! {
2920 : res = &mut calculation => { res }
2921 : _ = self.cancel.cancelled() => {
2922 : debug!("cancelling logical size calculation for timeline shutdown");
2923 : calculation.await
2924 : }
2925 : }
2926 : }
2927 :
2928 : /// Calculate the logical size of the database at the latest LSN.
2929 : ///
2930 : /// NOTE: counted incrementally, includes ancestors. This can be a slow operation,
2931 : /// especially if we need to download remote layers.
2932 : ///
2933 : /// # Cancel-Safety
2934 : ///
2935 : /// This method is cancellation-safe.
2936 0 : async fn calculate_logical_size(
2937 0 : &self,
2938 0 : up_to_lsn: Lsn,
2939 0 : cause: LogicalSizeCalculationCause,
2940 0 : _guard: &GateGuard,
2941 0 : ctx: &RequestContext,
2942 0 : ) -> Result<u64, CalculateLogicalSizeError> {
2943 0 : info!(
2944 0 : "Calculating logical size for timeline {} at {}",
2945 : self.timeline_id, up_to_lsn
2946 : );
2947 :
2948 0 : pausable_failpoint!("timeline-calculate-logical-size-pause");
2949 :
2950 : // See if we've already done the work for initial size calculation.
2951 : // This is a short-cut for timelines that are mostly unused.
2952 0 : if let Some(size) = self.current_logical_size.initialized_size(up_to_lsn) {
2953 0 : return Ok(size);
2954 0 : }
2955 0 : let storage_time_metrics = match cause {
2956 : LogicalSizeCalculationCause::Initial
2957 : | LogicalSizeCalculationCause::ConsumptionMetricsSyntheticSize
2958 0 : | LogicalSizeCalculationCause::TenantSizeHandler => &self.metrics.logical_size_histo,
2959 : LogicalSizeCalculationCause::EvictionTaskImitation => {
2960 0 : &self.metrics.imitate_logical_size_histo
2961 : }
2962 : };
2963 0 : let timer = storage_time_metrics.start_timer();
2964 0 : let logical_size = self
2965 0 : .get_current_logical_size_non_incremental(up_to_lsn, ctx)
2966 0 : .await?;
2967 0 : debug!("calculated logical size: {logical_size}");
2968 0 : timer.stop_and_record();
2969 0 : Ok(logical_size)
2970 0 : }
2971 :
2972 : /// Update current logical size, adding `delta' to the old value.
2973 270570 : fn update_current_logical_size(&self, delta: i64) {
2974 270570 : let logical_size = &self.current_logical_size;
2975 270570 : logical_size.increment_size(delta);
2976 270570 :
2977 270570 : // Also set the value in the prometheus gauge. Note that
2978 270570 : // there is a race condition here: if this is is called by two
2979 270570 : // threads concurrently, the prometheus gauge might be set to
2980 270570 : // one value while current_logical_size is set to the
2981 270570 : // other.
2982 270570 : match logical_size.current_size() {
2983 270570 : CurrentLogicalSize::Exact(ref new_current_size) => self
2984 270570 : .metrics
2985 270570 : .current_logical_size_gauge
2986 270570 : .set(new_current_size.into()),
2987 0 : CurrentLogicalSize::Approximate(_) => {
2988 0 : // don't update the gauge yet, this allows us not to update the gauge back and
2989 0 : // forth between the initial size calculation task.
2990 0 : }
2991 : }
2992 270570 : }
2993 :
2994 2824 : pub(crate) fn update_directory_entries_count(&self, kind: DirectoryKind, count: u64) {
2995 2824 : self.directory_metrics[kind.offset()].store(count, AtomicOrdering::Relaxed);
2996 2824 : let aux_metric =
2997 2824 : self.directory_metrics[DirectoryKind::AuxFiles.offset()].load(AtomicOrdering::Relaxed);
2998 2824 :
2999 2824 : let sum_of_entries = self
3000 2824 : .directory_metrics
3001 2824 : .iter()
3002 19768 : .map(|v| v.load(AtomicOrdering::Relaxed))
3003 2824 : .sum();
3004 : // Set a high general threshold and a lower threshold for the auxiliary files,
3005 : // as we can have large numbers of relations in the db directory.
3006 : const SUM_THRESHOLD: u64 = 5000;
3007 : const AUX_THRESHOLD: u64 = 1000;
3008 2824 : if sum_of_entries >= SUM_THRESHOLD || aux_metric >= AUX_THRESHOLD {
3009 0 : self.metrics
3010 0 : .directory_entries_count_gauge
3011 0 : .set(sum_of_entries);
3012 2824 : } else if let Some(metric) = Lazy::get(&self.metrics.directory_entries_count_gauge) {
3013 0 : metric.set(sum_of_entries);
3014 2824 : }
3015 2824 : }
3016 :
3017 0 : async fn find_layer(
3018 0 : &self,
3019 0 : layer_name: &LayerName,
3020 0 : ) -> Result<Option<Layer>, layer_manager::Shutdown> {
3021 0 : let guard = self.layers.read().await;
3022 0 : let layer = guard
3023 0 : .layer_map()?
3024 0 : .iter_historic_layers()
3025 0 : .find(|l| &l.layer_name() == layer_name)
3026 0 : .map(|found| guard.get_from_desc(&found));
3027 0 : Ok(layer)
3028 0 : }
3029 :
3030 : /// The timeline heatmap is a hint to secondary locations from the primary location,
3031 : /// indicating which layers are currently on-disk on the primary.
3032 : ///
3033 : /// None is returned if the Timeline is in a state where uploading a heatmap
3034 : /// doesn't make sense, such as shutting down or initializing. The caller
3035 : /// should treat this as a cue to simply skip doing any heatmap uploading
3036 : /// for this timeline.
3037 2 : pub(crate) async fn generate_heatmap(&self) -> Option<HeatMapTimeline> {
3038 2 : if !self.is_active() {
3039 0 : return None;
3040 2 : }
3041 :
3042 2 : let guard = self.layers.read().await;
3043 :
3044 10 : let resident = guard.likely_resident_layers().filter_map(|layer| {
3045 10 : match layer.visibility() {
3046 : LayerVisibilityHint::Visible => {
3047 : // Layer is visible to one or more read LSNs: elegible for inclusion in layer map
3048 8 : let last_activity_ts = layer.latest_activity();
3049 8 : Some((layer.layer_desc(), layer.metadata(), last_activity_ts))
3050 : }
3051 : LayerVisibilityHint::Covered => {
3052 : // Layer is resident but unlikely to be read: not elegible for inclusion in heatmap.
3053 2 : None
3054 : }
3055 : }
3056 10 : });
3057 2 :
3058 2 : let mut layers = resident.collect::<Vec<_>>();
3059 2 :
3060 2 : // Sort layers in order of which to download first. For a large set of layers to download, we
3061 2 : // want to prioritize those layers which are most likely to still be in the resident many minutes
3062 2 : // or hours later:
3063 2 : // - Download L0s last, because they churn the fastest: L0s on a fast-writing tenant might
3064 2 : // only exist for a few minutes before being compacted into L1s.
3065 2 : // - For L1 & image layers, download most recent LSNs first: the older the LSN, the sooner
3066 2 : // the layer is likely to be covered by an image layer during compaction.
3067 20 : layers.sort_by_key(|(desc, _meta, _atime)| {
3068 20 : std::cmp::Reverse((
3069 20 : !LayerMap::is_l0(&desc.key_range, desc.is_delta),
3070 20 : desc.lsn_range.end,
3071 20 : ))
3072 20 : });
3073 2 :
3074 2 : let layers = layers
3075 2 : .into_iter()
3076 8 : .map(|(desc, meta, atime)| HeatMapLayer::new(desc.layer_name(), meta, atime))
3077 2 : .collect();
3078 2 :
3079 2 : Some(HeatMapTimeline::new(self.timeline_id, layers))
3080 2 : }
3081 :
3082 : /// Returns true if the given lsn is or was an ancestor branchpoint.
3083 0 : pub(crate) fn is_ancestor_lsn(&self, lsn: Lsn) -> bool {
3084 0 : // upon timeline detach, we set the ancestor_lsn to Lsn::INVALID and the store the original
3085 0 : // branchpoint in the value in IndexPart::lineage
3086 0 : self.ancestor_lsn == lsn
3087 0 : || (self.ancestor_lsn == Lsn::INVALID
3088 0 : && self.remote_client.is_previous_ancestor_lsn(lsn))
3089 0 : }
3090 : }
3091 :
3092 : impl Timeline {
3093 : #[allow(clippy::doc_lazy_continuation)]
3094 : /// Get the data needed to reconstruct all keys in the provided keyspace
3095 : ///
3096 : /// The algorithm is as follows:
3097 : /// 1. While some keys are still not done and there's a timeline to visit:
3098 : /// 2. Visit the timeline (see [`Timeline::get_vectored_reconstruct_data_timeline`]:
3099 : /// 2.1: Build the fringe for the current keyspace
3100 : /// 2.2 Visit the newest layer from the fringe to collect all values for the range it
3101 : /// intersects
3102 : /// 2.3. Pop the timeline from the fringe
3103 : /// 2.4. If the fringe is empty, go back to 1
3104 626574 : async fn get_vectored_reconstruct_data(
3105 626574 : &self,
3106 626574 : mut keyspace: KeySpace,
3107 626574 : request_lsn: Lsn,
3108 626574 : reconstruct_state: &mut ValuesReconstructState,
3109 626574 : ctx: &RequestContext,
3110 626574 : ) -> Result<(), GetVectoredError> {
3111 626574 : let mut timeline_owned: Arc<Timeline>;
3112 626574 : let mut timeline = self;
3113 626574 :
3114 626574 : let mut cont_lsn = Lsn(request_lsn.0 + 1);
3115 :
3116 626572 : let missing_keyspace = loop {
3117 851799 : if self.cancel.is_cancelled() {
3118 0 : return Err(GetVectoredError::Cancelled);
3119 851799 : }
3120 :
3121 : let TimelineVisitOutcome {
3122 851799 : completed_keyspace: completed,
3123 851799 : image_covered_keyspace,
3124 851799 : } = Self::get_vectored_reconstruct_data_timeline(
3125 851799 : timeline,
3126 851799 : keyspace.clone(),
3127 851799 : cont_lsn,
3128 851799 : reconstruct_state,
3129 851799 : &self.cancel,
3130 851799 : ctx,
3131 851799 : )
3132 192145 : .await?;
3133 :
3134 851799 : keyspace.remove_overlapping_with(&completed);
3135 851799 :
3136 851799 : // Do not descend into the ancestor timeline for aux files.
3137 851799 : // We don't return a blanket [`GetVectoredError::MissingKey`] to avoid
3138 851799 : // stalling compaction.
3139 851799 : keyspace.remove_overlapping_with(&KeySpace {
3140 851799 : ranges: vec![NON_INHERITED_RANGE, NON_INHERITED_SPARSE_RANGE],
3141 851799 : });
3142 851799 :
3143 851799 : // Keyspace is fully retrieved
3144 851799 : if keyspace.is_empty() {
3145 626558 : break None;
3146 225241 : }
3147 :
3148 225241 : let Some(ancestor_timeline) = timeline.ancestor_timeline.as_ref() else {
3149 : // Not fully retrieved but no ancestor timeline.
3150 14 : break Some(keyspace);
3151 : };
3152 :
3153 : // Now we see if there are keys covered by the image layer but does not exist in the
3154 : // image layer, which means that the key does not exist.
3155 :
3156 : // The block below will stop the vectored search if any of the keys encountered an image layer
3157 : // which did not contain a snapshot for said key. Since we have already removed all completed
3158 : // keys from `keyspace`, we expect there to be no overlap between it and the image covered key
3159 : // space. If that's not the case, we had at least one key encounter a gap in the image layer
3160 : // and stop the search as a result of that.
3161 225227 : let removed = keyspace.remove_overlapping_with(&image_covered_keyspace);
3162 225227 : if !removed.is_empty() {
3163 0 : break Some(removed);
3164 225227 : }
3165 225227 : // If we reached this point, `remove_overlapping_with` should not have made any change to the
3166 225227 : // keyspace.
3167 225227 :
3168 225227 : // Take the min to avoid reconstructing a page with data newer than request Lsn.
3169 225227 : cont_lsn = std::cmp::min(Lsn(request_lsn.0 + 1), Lsn(timeline.ancestor_lsn.0 + 1));
3170 225227 : timeline_owned = timeline
3171 225227 : .get_ready_ancestor_timeline(ancestor_timeline, ctx)
3172 2 : .await?;
3173 225225 : timeline = &*timeline_owned;
3174 : };
3175 :
3176 626572 : if let Some(missing_keyspace) = missing_keyspace {
3177 14 : return Err(GetVectoredError::MissingKey(MissingKeyError {
3178 14 : key: missing_keyspace.start().unwrap(), /* better if we can store the full keyspace */
3179 14 : shard: self
3180 14 : .shard_identity
3181 14 : .get_shard_number(&missing_keyspace.start().unwrap()),
3182 14 : cont_lsn,
3183 14 : request_lsn,
3184 14 : ancestor_lsn: Some(timeline.ancestor_lsn),
3185 14 : backtrace: None,
3186 14 : }));
3187 626558 : }
3188 626558 :
3189 626558 : Ok(())
3190 626574 : }
3191 :
3192 : /// Collect the reconstruct data for a keyspace from the specified timeline.
3193 : ///
3194 : /// Maintain a fringe [`LayerFringe`] which tracks all the layers that intersect
3195 : /// the current keyspace. The current keyspace of the search at any given timeline
3196 : /// is the original keyspace minus all the keys that have been completed minus
3197 : /// any keys for which we couldn't find an intersecting layer. It's not tracked explicitly,
3198 : /// but if you merge all the keyspaces in the fringe, you get the "current keyspace".
3199 : ///
3200 : /// This is basically a depth-first search visitor implementation where a vertex
3201 : /// is the (layer, lsn range, key space) tuple. The fringe acts as the stack.
3202 : ///
3203 : /// At each iteration pop the top of the fringe (the layer with the highest Lsn)
3204 : /// and get all the required reconstruct data from the layer in one go.
3205 : ///
3206 : /// Returns the completed keyspace and the keyspaces with image coverage. The caller
3207 : /// decides how to deal with these two keyspaces.
3208 851799 : async fn get_vectored_reconstruct_data_timeline(
3209 851799 : timeline: &Timeline,
3210 851799 : keyspace: KeySpace,
3211 851799 : mut cont_lsn: Lsn,
3212 851799 : reconstruct_state: &mut ValuesReconstructState,
3213 851799 : cancel: &CancellationToken,
3214 851799 : ctx: &RequestContext,
3215 851799 : ) -> Result<TimelineVisitOutcome, GetVectoredError> {
3216 851799 : let mut unmapped_keyspace = keyspace.clone();
3217 851799 : let mut fringe = LayerFringe::new();
3218 851799 :
3219 851799 : let mut completed_keyspace = KeySpace::default();
3220 851799 : let mut image_covered_keyspace = KeySpaceRandomAccum::new();
3221 :
3222 : loop {
3223 1697413 : if cancel.is_cancelled() {
3224 0 : return Err(GetVectoredError::Cancelled);
3225 1697413 : }
3226 1697413 :
3227 1697413 : let (keys_done_last_step, keys_with_image_coverage) =
3228 1697413 : reconstruct_state.consume_done_keys();
3229 1697413 : unmapped_keyspace.remove_overlapping_with(&keys_done_last_step);
3230 1697413 : completed_keyspace.merge(&keys_done_last_step);
3231 1697413 : if let Some(keys_with_image_coverage) = keys_with_image_coverage {
3232 21808 : unmapped_keyspace
3233 21808 : .remove_overlapping_with(&KeySpace::single(keys_with_image_coverage.clone()));
3234 21808 : image_covered_keyspace.add_range(keys_with_image_coverage);
3235 1675605 : }
3236 :
3237 : // Do not descent any further if the last layer we visited
3238 : // completed all keys in the keyspace it inspected. This is not
3239 : // required for correctness, but avoids visiting extra layers
3240 : // which turns out to be a perf bottleneck in some cases.
3241 1697413 : if !unmapped_keyspace.is_empty() {
3242 1073097 : let guard = timeline.layers.read().await;
3243 1073097 : let layers = guard.layer_map()?;
3244 :
3245 1073097 : let in_memory_layer = layers.find_in_memory_layer(|l| {
3246 912964 : let start_lsn = l.get_lsn_range().start;
3247 912964 : cont_lsn > start_lsn
3248 1073097 : });
3249 1073097 :
3250 1073097 : match in_memory_layer {
3251 606295 : Some(l) => {
3252 606295 : let lsn_range = l.get_lsn_range().start..cont_lsn;
3253 606295 : fringe.update(
3254 606295 : ReadableLayer::InMemoryLayer(l),
3255 606295 : unmapped_keyspace.clone(),
3256 606295 : lsn_range,
3257 606295 : );
3258 606295 : }
3259 : None => {
3260 466824 : for range in unmapped_keyspace.ranges.iter() {
3261 466824 : let results = layers.range_search(range.clone(), cont_lsn);
3262 466824 :
3263 466824 : results
3264 466824 : .found
3265 466824 : .into_iter()
3266 466824 : .map(|(SearchResult { layer, lsn_floor }, keyspace_accum)| {
3267 239333 : (
3268 239333 : ReadableLayer::PersistentLayer(guard.get_from_desc(&layer)),
3269 239333 : keyspace_accum.to_keyspace(),
3270 239333 : lsn_floor..cont_lsn,
3271 239333 : )
3272 466824 : })
3273 466824 : .for_each(|(layer, keyspace, lsn_range)| {
3274 239333 : fringe.update(layer, keyspace, lsn_range)
3275 466824 : });
3276 466824 : }
3277 : }
3278 : }
3279 :
3280 : // It's safe to drop the layer map lock after planning the next round of reads.
3281 : // The fringe keeps readable handles for the layers which are safe to read even
3282 : // if layers were compacted or flushed.
3283 : //
3284 : // The more interesting consideration is: "Why is the read algorithm still correct
3285 : // if the layer map changes while it is operating?". Doing a vectored read on a
3286 : // timeline boils down to pushing an imaginary lsn boundary downwards for each range
3287 : // covered by the read. The layer map tells us how to move the lsn downwards for a
3288 : // range at *a particular point in time*. It is fine for the answer to be different
3289 : // at two different time points.
3290 1073097 : drop(guard);
3291 624316 : }
3292 :
3293 1697413 : if let Some((layer_to_read, keyspace_to_read, lsn_range)) = fringe.next_layer() {
3294 845614 : let next_cont_lsn = lsn_range.start;
3295 845614 : layer_to_read
3296 845614 : .get_values_reconstruct_data(
3297 845614 : keyspace_to_read.clone(),
3298 845614 : lsn_range,
3299 845614 : reconstruct_state,
3300 845614 : ctx,
3301 845614 : )
3302 182999 : .await?;
3303 :
3304 845614 : unmapped_keyspace = keyspace_to_read;
3305 845614 : cont_lsn = next_cont_lsn;
3306 845614 :
3307 845614 : reconstruct_state.on_layer_visited(&layer_to_read);
3308 : } else {
3309 851799 : break;
3310 851799 : }
3311 851799 : }
3312 851799 :
3313 851799 : Ok(TimelineVisitOutcome {
3314 851799 : completed_keyspace,
3315 851799 : image_covered_keyspace: image_covered_keyspace.consume_keyspace(),
3316 851799 : })
3317 851799 : }
3318 :
3319 225227 : async fn get_ready_ancestor_timeline(
3320 225227 : &self,
3321 225227 : ancestor: &Arc<Timeline>,
3322 225227 : ctx: &RequestContext,
3323 225227 : ) -> Result<Arc<Timeline>, GetReadyAncestorError> {
3324 225227 : // It's possible that the ancestor timeline isn't active yet, or
3325 225227 : // is active but hasn't yet caught up to the branch point. Wait
3326 225227 : // for it.
3327 225227 : //
3328 225227 : // This cannot happen while the pageserver is running normally,
3329 225227 : // because you cannot create a branch from a point that isn't
3330 225227 : // present in the pageserver yet. However, we don't wait for the
3331 225227 : // branch point to be uploaded to cloud storage before creating
3332 225227 : // a branch. I.e., the branch LSN need not be remote consistent
3333 225227 : // for the branching operation to succeed.
3334 225227 : //
3335 225227 : // Hence, if we try to load a tenant in such a state where
3336 225227 : // 1. the existence of the branch was persisted (in IndexPart and/or locally)
3337 225227 : // 2. but the ancestor state is behind branch_lsn because it was not yet persisted
3338 225227 : // then we will need to wait for the ancestor timeline to
3339 225227 : // re-stream WAL up to branch_lsn before we access it.
3340 225227 : //
3341 225227 : // How can a tenant get in such a state?
3342 225227 : // - ungraceful pageserver process exit
3343 225227 : // - detach+attach => this is a bug, https://github.com/neondatabase/neon/issues/4219
3344 225227 : //
3345 225227 : // NB: this could be avoided by requiring
3346 225227 : // branch_lsn >= remote_consistent_lsn
3347 225227 : // during branch creation.
3348 225227 : match ancestor.wait_to_become_active(ctx).await {
3349 225225 : Ok(()) => {}
3350 : Err(TimelineState::Stopping) => {
3351 : // If an ancestor is stopping, it means the tenant is stopping: handle this the same as if this timeline was stopping.
3352 0 : return Err(GetReadyAncestorError::Cancelled);
3353 : }
3354 2 : Err(state) => {
3355 2 : return Err(GetReadyAncestorError::BadState {
3356 2 : timeline_id: ancestor.timeline_id,
3357 2 : state,
3358 2 : });
3359 : }
3360 : }
3361 225225 : ancestor
3362 225225 : .wait_lsn(self.ancestor_lsn, WaitLsnWaiter::Timeline(self), ctx)
3363 0 : .await
3364 225225 : .map_err(|e| match e {
3365 0 : e @ WaitLsnError::Timeout(_) => GetReadyAncestorError::AncestorLsnTimeout(e),
3366 0 : WaitLsnError::Shutdown => GetReadyAncestorError::Cancelled,
3367 0 : WaitLsnError::BadState(state) => GetReadyAncestorError::BadState {
3368 0 : timeline_id: ancestor.timeline_id,
3369 0 : state,
3370 0 : },
3371 225225 : })?;
3372 :
3373 225225 : Ok(ancestor.clone())
3374 225227 : }
3375 :
3376 151304 : pub(crate) fn get_shard_identity(&self) -> &ShardIdentity {
3377 151304 : &self.shard_identity
3378 151304 : }
3379 :
3380 : #[inline(always)]
3381 0 : pub(crate) fn shard_timeline_id(&self) -> ShardTimelineId {
3382 0 : ShardTimelineId {
3383 0 : shard_index: ShardIndex {
3384 0 : shard_number: self.shard_identity.number,
3385 0 : shard_count: self.shard_identity.count,
3386 0 : },
3387 0 : timeline_id: self.timeline_id,
3388 0 : }
3389 0 : }
3390 :
3391 : /// Returns a non-frozen open in-memory layer for ingestion.
3392 : ///
3393 : /// Takes a witness of timeline writer state lock being held, because it makes no sense to call
3394 : /// this function without holding the mutex.
3395 1266 : async fn get_layer_for_write(
3396 1266 : &self,
3397 1266 : lsn: Lsn,
3398 1266 : _guard: &tokio::sync::MutexGuard<'_, Option<TimelineWriterState>>,
3399 1266 : ctx: &RequestContext,
3400 1266 : ) -> anyhow::Result<Arc<InMemoryLayer>> {
3401 1266 : let mut guard = self.layers.write().await;
3402 1266 : let gate_guard = self.gate.enter().context("enter gate for inmem layer")?;
3403 :
3404 1266 : let last_record_lsn = self.get_last_record_lsn();
3405 1266 : ensure!(
3406 1266 : lsn > last_record_lsn,
3407 0 : "cannot modify relation after advancing last_record_lsn (incoming_lsn={}, last_record_lsn={})",
3408 : lsn,
3409 : last_record_lsn,
3410 : );
3411 :
3412 1266 : let layer = guard
3413 1266 : .open_mut()?
3414 1266 : .get_layer_for_write(
3415 1266 : lsn,
3416 1266 : self.conf,
3417 1266 : self.timeline_id,
3418 1266 : self.tenant_shard_id,
3419 1266 : gate_guard,
3420 1266 : ctx,
3421 1266 : )
3422 717 : .await?;
3423 1266 : Ok(layer)
3424 1266 : }
3425 :
3426 5279062 : pub(crate) fn finish_write(&self, new_lsn: Lsn) {
3427 5279062 : assert!(new_lsn.is_aligned());
3428 :
3429 5279062 : self.metrics.last_record_gauge.set(new_lsn.0 as i64);
3430 5279062 : self.last_record_lsn.advance(new_lsn);
3431 5279062 : }
3432 :
3433 : /// Freeze any existing open in-memory layer and unconditionally notify the flush loop.
3434 : ///
3435 : /// Unconditional flush loop notification is given because in sharded cases we will want to
3436 : /// leave an Lsn gap. Unsharded tenants do not have Lsn gaps.
3437 1170 : async fn freeze_inmem_layer_at(
3438 1170 : &self,
3439 1170 : at: Lsn,
3440 1170 : write_lock: &mut tokio::sync::MutexGuard<'_, Option<TimelineWriterState>>,
3441 1170 : ) -> Result<u64, FlushLayerError> {
3442 1170 : let frozen = {
3443 1170 : let mut guard = self.layers.write().await;
3444 1170 : guard
3445 1170 : .open_mut()?
3446 1170 : .try_freeze_in_memory_layer(at, &self.last_freeze_at, write_lock)
3447 1 : .await
3448 : };
3449 :
3450 1170 : if frozen {
3451 1142 : let now = Instant::now();
3452 1142 : *(self.last_freeze_ts.write().unwrap()) = now;
3453 1142 : }
3454 :
3455 : // Increment the flush cycle counter and wake up the flush task.
3456 : // Remember the new value, so that when we listen for the flush
3457 : // to finish, we know when the flush that we initiated has
3458 : // finished, instead of some other flush that was started earlier.
3459 1170 : let mut my_flush_request = 0;
3460 1170 :
3461 1170 : let flush_loop_state = { *self.flush_loop_state.lock().unwrap() };
3462 1170 : if !matches!(flush_loop_state, FlushLoopState::Running { .. }) {
3463 0 : return Err(FlushLayerError::NotRunning(flush_loop_state));
3464 1170 : }
3465 1170 :
3466 1170 : self.layer_flush_start_tx.send_modify(|(counter, lsn)| {
3467 1170 : my_flush_request = *counter + 1;
3468 1170 : *counter = my_flush_request;
3469 1170 : *lsn = std::cmp::max(at, *lsn);
3470 1170 : });
3471 1170 :
3472 1170 : assert_ne!(my_flush_request, 0);
3473 :
3474 1170 : Ok(my_flush_request)
3475 1170 : }
3476 :
3477 : /// Layer flusher task's main loop.
3478 408 : async fn flush_loop(
3479 408 : self: &Arc<Self>,
3480 408 : mut layer_flush_start_rx: tokio::sync::watch::Receiver<(u64, Lsn)>,
3481 408 : ctx: &RequestContext,
3482 408 : ) {
3483 408 : info!("started flush loop");
3484 : loop {
3485 1539 : tokio::select! {
3486 1539 : _ = self.cancel.cancelled() => {
3487 8 : info!("shutting down layer flush task due to Timeline::cancel");
3488 8 : break;
3489 : },
3490 1539 : _ = layer_flush_start_rx.changed() => {}
3491 1131 : }
3492 1131 : trace!("waking up");
3493 1131 : let (flush_counter, frozen_to_lsn) = *layer_flush_start_rx.borrow();
3494 1131 :
3495 1131 : // The highest LSN to which we flushed in the loop over frozen layers
3496 1131 : let mut flushed_to_lsn = Lsn(0);
3497 :
3498 1131 : let result = loop {
3499 2273 : if self.cancel.is_cancelled() {
3500 0 : info!("dropping out of flush loop for timeline shutdown");
3501 : // Note: we do not bother transmitting into [`layer_flush_done_tx`], because
3502 : // anyone waiting on that will respect self.cancel as well: they will stop
3503 : // waiting at the same time we as drop out of this loop.
3504 0 : return;
3505 2273 : }
3506 2273 :
3507 2273 : let timer = self.metrics.flush_time_histo.start_timer();
3508 :
3509 : let num_frozen_layers;
3510 : let frozen_layer_total_size;
3511 2273 : let layer_to_flush = {
3512 2273 : let guard = self.layers.read().await;
3513 2273 : let Ok(lm) = guard.layer_map() else {
3514 0 : info!("dropping out of flush loop for timeline shutdown");
3515 0 : return;
3516 : };
3517 2273 : num_frozen_layers = lm.frozen_layers.len();
3518 2273 : frozen_layer_total_size = lm
3519 2273 : .frozen_layers
3520 2273 : .iter()
3521 2273 : .map(|l| l.estimated_in_mem_size())
3522 2273 : .sum::<u64>();
3523 2273 : lm.frozen_layers.front().cloned()
3524 : // drop 'layers' lock to allow concurrent reads and writes
3525 : };
3526 2273 : let Some(layer_to_flush) = layer_to_flush else {
3527 1131 : break Ok(());
3528 : };
3529 1142 : if num_frozen_layers
3530 1142 : > std::cmp::max(
3531 1142 : self.get_compaction_threshold(),
3532 1142 : DEFAULT_COMPACTION_THRESHOLD,
3533 1142 : )
3534 0 : && frozen_layer_total_size >= /* 128 MB */ 128000000
3535 : {
3536 0 : tracing::warn!(
3537 0 : "too many frozen layers: {num_frozen_layers} layers with estimated in-mem size of {frozen_layer_total_size} bytes",
3538 : );
3539 1142 : }
3540 17080 : match self.flush_frozen_layer(layer_to_flush, ctx).await {
3541 1142 : Ok(this_layer_to_lsn) => {
3542 1142 : flushed_to_lsn = std::cmp::max(flushed_to_lsn, this_layer_to_lsn);
3543 1142 : }
3544 : Err(FlushLayerError::Cancelled) => {
3545 0 : info!("dropping out of flush loop for timeline shutdown");
3546 0 : return;
3547 : }
3548 0 : err @ Err(
3549 0 : FlushLayerError::NotRunning(_)
3550 0 : | FlushLayerError::Other(_)
3551 0 : | FlushLayerError::CreateImageLayersError(_),
3552 0 : ) => {
3553 0 : error!("could not flush frozen layer: {err:?}");
3554 0 : break err.map(|_| ());
3555 : }
3556 : }
3557 1142 : timer.stop_and_record();
3558 : };
3559 :
3560 : // Unsharded tenants should never advance their LSN beyond the end of the
3561 : // highest layer they write: such gaps between layer data and the frozen LSN
3562 : // are only legal on sharded tenants.
3563 1131 : debug_assert!(
3564 1131 : self.shard_identity.count.count() > 1
3565 1131 : || flushed_to_lsn >= frozen_to_lsn
3566 67 : || !flushed_to_lsn.is_valid()
3567 : );
3568 :
3569 1131 : if flushed_to_lsn < frozen_to_lsn && self.shard_identity.count.count() > 1 {
3570 : // If our layer flushes didn't carry disk_consistent_lsn up to the `to_lsn` advertised
3571 : // to us via layer_flush_start_rx, then advance it here.
3572 : //
3573 : // This path is only taken for tenants with multiple shards: single sharded tenants should
3574 : // never encounter a gap in the wal.
3575 0 : let old_disk_consistent_lsn = self.disk_consistent_lsn.load();
3576 0 : tracing::debug!("Advancing disk_consistent_lsn across layer gap {old_disk_consistent_lsn}->{frozen_to_lsn}");
3577 0 : if self.set_disk_consistent_lsn(frozen_to_lsn) {
3578 0 : if let Err(e) = self.schedule_uploads(frozen_to_lsn, vec![]) {
3579 0 : tracing::warn!("Failed to schedule metadata upload after updating disk_consistent_lsn: {e}");
3580 0 : }
3581 0 : }
3582 1131 : }
3583 :
3584 : // Notify any listeners that we're done
3585 1131 : let _ = self
3586 1131 : .layer_flush_done_tx
3587 1131 : .send_replace((flush_counter, result));
3588 : }
3589 8 : }
3590 :
3591 : /// Waits any flush request created by [`Self::freeze_inmem_layer_at`] to complete.
3592 1090 : async fn wait_flush_completion(&self, request: u64) -> Result<(), FlushLayerError> {
3593 1090 : let mut rx = self.layer_flush_done_tx.subscribe();
3594 : loop {
3595 : {
3596 2179 : let (last_result_counter, last_result) = &*rx.borrow();
3597 2179 : if *last_result_counter >= request {
3598 1090 : if let Err(err) = last_result {
3599 : // We already logged the original error in
3600 : // flush_loop. We cannot propagate it to the caller
3601 : // here, because it might not be Cloneable
3602 0 : return Err(err.clone());
3603 : } else {
3604 1090 : return Ok(());
3605 : }
3606 1089 : }
3607 1089 : }
3608 1089 : trace!("waiting for flush to complete");
3609 1089 : tokio::select! {
3610 1089 : rx_e = rx.changed() => {
3611 1089 : rx_e.map_err(|_| FlushLayerError::NotRunning(*self.flush_loop_state.lock().unwrap()))?;
3612 : },
3613 : // Cancellation safety: we are not leaving an I/O in-flight for the flush, we're just ignoring
3614 : // the notification from [`flush_loop`] that it completed.
3615 1089 : _ = self.cancel.cancelled() => {
3616 0 : tracing::info!("Cancelled layer flush due on timeline shutdown");
3617 0 : return Ok(())
3618 : }
3619 : };
3620 1089 : trace!("done")
3621 : }
3622 1090 : }
3623 :
3624 : /// Flush one frozen in-memory layer to disk, as a new delta layer.
3625 : ///
3626 : /// Return value is the last lsn (inclusive) of the layer that was frozen.
3627 1142 : #[instrument(skip_all, fields(layer=%frozen_layer))]
3628 : async fn flush_frozen_layer(
3629 : self: &Arc<Self>,
3630 : frozen_layer: Arc<InMemoryLayer>,
3631 : ctx: &RequestContext,
3632 : ) -> Result<Lsn, FlushLayerError> {
3633 : debug_assert_current_span_has_tenant_and_timeline_id();
3634 :
3635 : // As a special case, when we have just imported an image into the repository,
3636 : // instead of writing out a L0 delta layer, we directly write out image layer
3637 : // files instead. This is possible as long as *all* the data imported into the
3638 : // repository have the same LSN.
3639 : let lsn_range = frozen_layer.get_lsn_range();
3640 :
3641 : // Whether to directly create image layers for this flush, or flush them as delta layers
3642 : let create_image_layer =
3643 : lsn_range.start == self.initdb_lsn && lsn_range.end == Lsn(self.initdb_lsn.0 + 1);
3644 :
3645 : #[cfg(test)]
3646 : {
3647 : match &mut *self.flush_loop_state.lock().unwrap() {
3648 : FlushLoopState::NotStarted | FlushLoopState::Exited => {
3649 : panic!("flush loop not running")
3650 : }
3651 : FlushLoopState::Running {
3652 : expect_initdb_optimization,
3653 : initdb_optimization_count,
3654 : ..
3655 : } => {
3656 : if create_image_layer {
3657 : *initdb_optimization_count += 1;
3658 : } else {
3659 : assert!(!*expect_initdb_optimization, "expected initdb optimization");
3660 : }
3661 : }
3662 : }
3663 : }
3664 :
3665 : let (layers_to_upload, delta_layer_to_add) = if create_image_layer {
3666 : // Note: The 'ctx' in use here has DownloadBehavior::Error. We should not
3667 : // require downloading anything during initial import.
3668 : let ((rel_partition, metadata_partition), _lsn) = self
3669 : .repartition(
3670 : self.initdb_lsn,
3671 : self.get_compaction_target_size(),
3672 : EnumSet::empty(),
3673 : ctx,
3674 : )
3675 : .await
3676 0 : .map_err(|e| FlushLayerError::from_anyhow(self, e.into()))?;
3677 :
3678 : if self.cancel.is_cancelled() {
3679 : return Err(FlushLayerError::Cancelled);
3680 : }
3681 :
3682 : let mut layers_to_upload = Vec::new();
3683 : layers_to_upload.extend(
3684 : self.create_image_layers(
3685 : &rel_partition,
3686 : self.initdb_lsn,
3687 : ImageLayerCreationMode::Initial,
3688 : ctx,
3689 : )
3690 : .await?,
3691 : );
3692 : if !metadata_partition.parts.is_empty() {
3693 : assert_eq!(
3694 : metadata_partition.parts.len(),
3695 : 1,
3696 : "currently sparse keyspace should only contain a single metadata keyspace"
3697 : );
3698 : layers_to_upload.extend(
3699 : self.create_image_layers(
3700 : // Safety: create_image_layers treat sparse keyspaces differently that it does not scan
3701 : // every single key within the keyspace, and therefore, it's safe to force converting it
3702 : // into a dense keyspace before calling this function.
3703 : &metadata_partition.into_dense(),
3704 : self.initdb_lsn,
3705 : ImageLayerCreationMode::Initial,
3706 : ctx,
3707 : )
3708 : .await?,
3709 : );
3710 : }
3711 :
3712 : (layers_to_upload, None)
3713 : } else {
3714 : // Normal case, write out a L0 delta layer file.
3715 : // `create_delta_layer` will not modify the layer map.
3716 : // We will remove frozen layer and add delta layer in one atomic operation later.
3717 : let Some(layer) = self
3718 : .create_delta_layer(&frozen_layer, None, ctx)
3719 : .await
3720 0 : .map_err(|e| FlushLayerError::from_anyhow(self, e))?
3721 : else {
3722 : panic!("delta layer cannot be empty if no filter is applied");
3723 : };
3724 : (
3725 : // FIXME: even though we have a single image and single delta layer assumption
3726 : // we push them to vec
3727 : vec![layer.clone()],
3728 : Some(layer),
3729 : )
3730 : };
3731 :
3732 : pausable_failpoint!("flush-layer-cancel-after-writing-layer-out-pausable");
3733 :
3734 : if self.cancel.is_cancelled() {
3735 : return Err(FlushLayerError::Cancelled);
3736 : }
3737 :
3738 : let disk_consistent_lsn = Lsn(lsn_range.end.0 - 1);
3739 :
3740 : // The new on-disk layers are now in the layer map. We can remove the
3741 : // in-memory layer from the map now. The flushed layer is stored in
3742 : // the mapping in `create_delta_layer`.
3743 : {
3744 : let mut guard = self.layers.write().await;
3745 :
3746 : guard.open_mut()?.finish_flush_l0_layer(
3747 : delta_layer_to_add.as_ref(),
3748 : &frozen_layer,
3749 : &self.metrics,
3750 : );
3751 :
3752 : if self.set_disk_consistent_lsn(disk_consistent_lsn) {
3753 : // Schedule remote uploads that will reflect our new disk_consistent_lsn
3754 : self.schedule_uploads(disk_consistent_lsn, layers_to_upload)
3755 0 : .map_err(|e| FlushLayerError::from_anyhow(self, e))?;
3756 : }
3757 : // release lock on 'layers'
3758 : };
3759 :
3760 : // Backpressure mechanism: wait with continuation of the flush loop until we have uploaded all layer files.
3761 : // This makes us refuse ingest until the new layers have been persisted to the remote.
3762 : self.remote_client
3763 : .wait_completion()
3764 : .await
3765 0 : .map_err(|e| match e {
3766 : WaitCompletionError::UploadQueueShutDownOrStopped
3767 : | WaitCompletionError::NotInitialized(
3768 : NotInitialized::ShuttingDown | NotInitialized::Stopped,
3769 0 : ) => FlushLayerError::Cancelled,
3770 : WaitCompletionError::NotInitialized(NotInitialized::Uninitialized) => {
3771 0 : FlushLayerError::Other(anyhow!(e).into())
3772 : }
3773 0 : })?;
3774 :
3775 : // FIXME: between create_delta_layer and the scheduling of the upload in `update_metadata_file`,
3776 : // a compaction can delete the file and then it won't be available for uploads any more.
3777 : // We still schedule the upload, resulting in an error, but ideally we'd somehow avoid this
3778 : // race situation.
3779 : // See https://github.com/neondatabase/neon/issues/4526
3780 : pausable_failpoint!("flush-frozen-pausable");
3781 :
3782 : // This failpoint is used by another test case `test_pageserver_recovery`.
3783 : fail_point!("flush-frozen-exit");
3784 :
3785 : Ok(Lsn(lsn_range.end.0 - 1))
3786 : }
3787 :
3788 : /// Return true if the value changed
3789 : ///
3790 : /// This function must only be used from the layer flush task.
3791 1142 : fn set_disk_consistent_lsn(&self, new_value: Lsn) -> bool {
3792 1142 : let old_value = self.disk_consistent_lsn.fetch_max(new_value);
3793 1142 : assert!(new_value >= old_value, "disk_consistent_lsn must be growing monotonously at runtime; current {old_value}, offered {new_value}");
3794 1142 : new_value != old_value
3795 1142 : }
3796 :
3797 : /// Update metadata file
3798 1144 : fn schedule_uploads(
3799 1144 : &self,
3800 1144 : disk_consistent_lsn: Lsn,
3801 1144 : layers_to_upload: impl IntoIterator<Item = ResidentLayer>,
3802 1144 : ) -> anyhow::Result<()> {
3803 1144 : // We can only save a valid 'prev_record_lsn' value on disk if we
3804 1144 : // flushed *all* in-memory changes to disk. We only track
3805 1144 : // 'prev_record_lsn' in memory for the latest processed record, so we
3806 1144 : // don't remember what the correct value that corresponds to some old
3807 1144 : // LSN is. But if we flush everything, then the value corresponding
3808 1144 : // current 'last_record_lsn' is correct and we can store it on disk.
3809 1144 : let RecordLsn {
3810 1144 : last: last_record_lsn,
3811 1144 : prev: prev_record_lsn,
3812 1144 : } = self.last_record_lsn.load();
3813 1144 : let ondisk_prev_record_lsn = if disk_consistent_lsn == last_record_lsn {
3814 1064 : Some(prev_record_lsn)
3815 : } else {
3816 80 : None
3817 : };
3818 :
3819 1144 : let update = crate::tenant::metadata::MetadataUpdate::new(
3820 1144 : disk_consistent_lsn,
3821 1144 : ondisk_prev_record_lsn,
3822 1144 : *self.latest_gc_cutoff_lsn.read(),
3823 1144 : );
3824 1144 :
3825 1144 : fail_point!("checkpoint-before-saving-metadata", |x| bail!(
3826 0 : "{}",
3827 0 : x.unwrap()
3828 1144 : ));
3829 :
3830 2298 : for layer in layers_to_upload {
3831 1154 : self.remote_client.schedule_layer_file_upload(layer)?;
3832 : }
3833 1144 : self.remote_client
3834 1144 : .schedule_index_upload_for_metadata_update(&update)?;
3835 :
3836 1144 : Ok(())
3837 1144 : }
3838 :
3839 0 : pub(crate) async fn preserve_initdb_archive(&self) -> anyhow::Result<()> {
3840 0 : self.remote_client
3841 0 : .preserve_initdb_archive(
3842 0 : &self.tenant_shard_id.tenant_id,
3843 0 : &self.timeline_id,
3844 0 : &self.cancel,
3845 0 : )
3846 0 : .await
3847 0 : }
3848 :
3849 : // Write out the given frozen in-memory layer as a new L0 delta file. This L0 file will not be tracked
3850 : // in layer map immediately. The caller is responsible to put it into the layer map.
3851 968 : async fn create_delta_layer(
3852 968 : self: &Arc<Self>,
3853 968 : frozen_layer: &Arc<InMemoryLayer>,
3854 968 : key_range: Option<Range<Key>>,
3855 968 : ctx: &RequestContext,
3856 968 : ) -> anyhow::Result<Option<ResidentLayer>> {
3857 968 : let self_clone = Arc::clone(self);
3858 968 : let frozen_layer = Arc::clone(frozen_layer);
3859 968 : let ctx = ctx.attached_child();
3860 968 : let work = async move {
3861 968 : let Some((desc, path)) = frozen_layer
3862 968 : .write_to_disk(&ctx, key_range, self_clone.l0_flush_global_state.inner())
3863 10251 : .await?
3864 : else {
3865 0 : return Ok(None);
3866 : };
3867 968 : let new_delta = Layer::finish_creating(self_clone.conf, &self_clone, desc, &path)?;
3868 :
3869 : // The write_to_disk() above calls writer.finish() which already did the fsync of the inodes.
3870 : // We just need to fsync the directory in which these inodes are linked,
3871 : // which we know to be the timeline directory.
3872 : //
3873 : // We use fatal_err() below because the after write_to_disk returns with success,
3874 : // the in-memory state of the filesystem already has the layer file in its final place,
3875 : // and subsequent pageserver code could think it's durable while it really isn't.
3876 968 : let timeline_dir = VirtualFile::open(
3877 968 : &self_clone
3878 968 : .conf
3879 968 : .timeline_path(&self_clone.tenant_shard_id, &self_clone.timeline_id),
3880 968 : &ctx,
3881 968 : )
3882 489 : .await
3883 968 : .fatal_err("VirtualFile::open for timeline dir fsync");
3884 968 : timeline_dir
3885 968 : .sync_all()
3886 484 : .await
3887 968 : .fatal_err("VirtualFile::sync_all timeline dir");
3888 968 : anyhow::Ok(Some(new_delta))
3889 968 : };
3890 : // Before tokio-epoll-uring, we ran write_to_disk & the sync_all inside spawn_blocking.
3891 : // Preserve that behavior to maintain the same behavior for `virtual_file_io_engine=std-fs`.
3892 : use crate::virtual_file::io_engine::IoEngine;
3893 968 : match crate::virtual_file::io_engine::get() {
3894 0 : IoEngine::NotSet => panic!("io engine not set"),
3895 : IoEngine::StdFs => {
3896 484 : let span = tracing::info_span!("blocking");
3897 484 : tokio::task::spawn_blocking({
3898 484 : move || Handle::current().block_on(work.instrument(span))
3899 484 : })
3900 484 : .await
3901 484 : .context("spawn_blocking")
3902 484 : .and_then(|x| x)
3903 : }
3904 : #[cfg(target_os = "linux")]
3905 11219 : IoEngine::TokioEpollUring => work.await,
3906 : }
3907 968 : }
3908 :
3909 538 : async fn repartition(
3910 538 : &self,
3911 538 : lsn: Lsn,
3912 538 : partition_size: u64,
3913 538 : flags: EnumSet<CompactFlags>,
3914 538 : ctx: &RequestContext,
3915 538 : ) -> Result<((KeyPartitioning, SparseKeyPartitioning), Lsn), CompactionError> {
3916 538 : let Ok(mut partitioning_guard) = self.partitioning.try_lock() else {
3917 : // NB: there are two callers, one is the compaction task, of which there is only one per struct Tenant and hence Timeline.
3918 : // The other is the initdb optimization in flush_frozen_layer, used by `boostrap_timeline`, which runs before `.activate()`
3919 : // and hence before the compaction task starts.
3920 0 : return Err(CompactionError::Other(anyhow!(
3921 0 : "repartition() called concurrently, this should not happen"
3922 0 : )));
3923 : };
3924 538 : let ((dense_partition, sparse_partition), partition_lsn) = &*partitioning_guard;
3925 538 : if lsn < *partition_lsn {
3926 0 : return Err(CompactionError::Other(anyhow!(
3927 0 : "repartition() called with LSN going backwards, this should not happen"
3928 0 : )));
3929 538 : }
3930 538 :
3931 538 : let distance = lsn.0 - partition_lsn.0;
3932 538 : if *partition_lsn != Lsn(0)
3933 262 : && distance <= self.repartition_threshold
3934 262 : && !flags.contains(CompactFlags::ForceRepartition)
3935 : {
3936 248 : debug!(
3937 : distance,
3938 : threshold = self.repartition_threshold,
3939 0 : "no repartitioning needed"
3940 : );
3941 248 : return Ok((
3942 248 : (dense_partition.clone(), sparse_partition.clone()),
3943 248 : *partition_lsn,
3944 248 : ));
3945 290 : }
3946 :
3947 15721 : let (dense_ks, sparse_ks) = self.collect_keyspace(lsn, ctx).await?;
3948 290 : let dense_partitioning = dense_ks.partition(&self.shard_identity, partition_size);
3949 290 : let sparse_partitioning = SparseKeyPartitioning {
3950 290 : parts: vec![sparse_ks],
3951 290 : }; // no partitioning for metadata keys for now
3952 290 : *partitioning_guard = ((dense_partitioning, sparse_partitioning), lsn);
3953 290 :
3954 290 : Ok((partitioning_guard.0.clone(), partitioning_guard.1))
3955 538 : }
3956 :
3957 : // Is it time to create a new image layer for the given partition?
3958 14 : async fn time_for_new_image_layer(&self, partition: &KeySpace, lsn: Lsn) -> bool {
3959 14 : let threshold = self.get_image_creation_threshold();
3960 :
3961 14 : let guard = self.layers.read().await;
3962 14 : let Ok(layers) = guard.layer_map() else {
3963 0 : return false;
3964 : };
3965 :
3966 14 : let mut max_deltas = 0;
3967 28 : for part_range in &partition.ranges {
3968 14 : let image_coverage = layers.image_coverage(part_range, lsn);
3969 28 : for (img_range, last_img) in image_coverage {
3970 14 : let img_lsn = if let Some(last_img) = last_img {
3971 0 : last_img.get_lsn_range().end
3972 : } else {
3973 14 : Lsn(0)
3974 : };
3975 : // Let's consider an example:
3976 : //
3977 : // delta layer with LSN range 71-81
3978 : // delta layer with LSN range 81-91
3979 : // delta layer with LSN range 91-101
3980 : // image layer at LSN 100
3981 : //
3982 : // If 'lsn' is still 100, i.e. no new WAL has been processed since the last image layer,
3983 : // there's no need to create a new one. We check this case explicitly, to avoid passing
3984 : // a bogus range to count_deltas below, with start > end. It's even possible that there
3985 : // are some delta layers *later* than current 'lsn', if more WAL was processed and flushed
3986 : // after we read last_record_lsn, which is passed here in the 'lsn' argument.
3987 14 : if img_lsn < lsn {
3988 14 : let num_deltas =
3989 14 : layers.count_deltas(&img_range, &(img_lsn..lsn), Some(threshold));
3990 14 :
3991 14 : max_deltas = max_deltas.max(num_deltas);
3992 14 : if num_deltas >= threshold {
3993 0 : debug!(
3994 0 : "key range {}-{}, has {} deltas on this timeline in LSN range {}..{}",
3995 : img_range.start, img_range.end, num_deltas, img_lsn, lsn
3996 : );
3997 0 : return true;
3998 14 : }
3999 0 : }
4000 : }
4001 : }
4002 :
4003 14 : debug!(
4004 : max_deltas,
4005 0 : "none of the partitioned ranges had >= {threshold} deltas"
4006 : );
4007 14 : false
4008 14 : }
4009 :
4010 : /// Create image layers for Postgres data. Assumes the caller passes a partition that is not too large,
4011 : /// so that at most one image layer will be produced from this function.
4012 200 : async fn create_image_layer_for_rel_blocks(
4013 200 : self: &Arc<Self>,
4014 200 : partition: &KeySpace,
4015 200 : mut image_layer_writer: ImageLayerWriter,
4016 200 : lsn: Lsn,
4017 200 : ctx: &RequestContext,
4018 200 : img_range: Range<Key>,
4019 200 : start: Key,
4020 200 : ) -> Result<ImageLayerCreationOutcome, CreateImageLayersError> {
4021 200 : let mut wrote_keys = false;
4022 200 :
4023 200 : let mut key_request_accum = KeySpaceAccum::new();
4024 1330 : for range in &partition.ranges {
4025 1130 : let mut key = range.start;
4026 2446 : while key < range.end {
4027 : // Decide whether to retain this key: usually we do, but sharded tenants may
4028 : // need to drop keys that don't belong to them. If we retain the key, add it
4029 : // to `key_request_accum` for later issuing a vectored get
4030 1316 : if self.shard_identity.is_key_disposable(&key) {
4031 0 : debug!(
4032 0 : "Dropping key {} during compaction (it belongs on shard {:?})",
4033 0 : key,
4034 0 : self.shard_identity.get_shard_number(&key)
4035 : );
4036 1316 : } else {
4037 1316 : key_request_accum.add_key(key);
4038 1316 : }
4039 :
4040 1316 : let last_key_in_range = key.next() == range.end;
4041 1316 : key = key.next();
4042 1316 :
4043 1316 : // Maybe flush `key_rest_accum`
4044 1316 : if key_request_accum.raw_size() >= Timeline::MAX_GET_VECTORED_KEYS
4045 1316 : || (last_key_in_range && key_request_accum.raw_size() > 0)
4046 : {
4047 1130 : let results = self
4048 1130 : .get_vectored(key_request_accum.consume_keyspace(), lsn, ctx)
4049 51 : .await?;
4050 :
4051 1130 : if self.cancel.is_cancelled() {
4052 0 : return Err(CreateImageLayersError::Cancelled);
4053 1130 : }
4054 :
4055 2446 : for (img_key, img) in results {
4056 1316 : let img = match img {
4057 1316 : Ok(img) => img,
4058 0 : Err(err) => {
4059 0 : // If we fail to reconstruct a VM or FSM page, we can zero the
4060 0 : // page without losing any actual user data. That seems better
4061 0 : // than failing repeatedly and getting stuck.
4062 0 : //
4063 0 : // We had a bug at one point, where we truncated the FSM and VM
4064 0 : // in the pageserver, but the Postgres didn't know about that
4065 0 : // and continued to generate incremental WAL records for pages
4066 0 : // that didn't exist in the pageserver. Trying to replay those
4067 0 : // WAL records failed to find the previous image of the page.
4068 0 : // This special case allows us to recover from that situation.
4069 0 : // See https://github.com/neondatabase/neon/issues/2601.
4070 0 : //
4071 0 : // Unfortunately we cannot do this for the main fork, or for
4072 0 : // any metadata keys, keys, as that would lead to actual data
4073 0 : // loss.
4074 0 : if img_key.is_rel_fsm_block_key() || img_key.is_rel_vm_block_key() {
4075 0 : warn!("could not reconstruct FSM or VM key {img_key}, filling with zeros: {err:?}");
4076 0 : ZERO_PAGE.clone()
4077 : } else {
4078 0 : return Err(CreateImageLayersError::from(err));
4079 : }
4080 : }
4081 : };
4082 :
4083 : // Write all the keys we just read into our new image layer.
4084 1465 : image_layer_writer.put_image(img_key, img, ctx).await?;
4085 1316 : wrote_keys = true;
4086 : }
4087 186 : }
4088 : }
4089 : }
4090 :
4091 200 : if wrote_keys {
4092 : // Normal path: we have written some data into the new image layer for this
4093 : // partition, so flush it to disk.
4094 406 : let (desc, path) = image_layer_writer.finish(ctx).await?;
4095 200 : let image_layer = Layer::finish_creating(self.conf, self, desc, &path)?;
4096 200 : info!("created image layer for rel {}", image_layer.local_path());
4097 200 : Ok(ImageLayerCreationOutcome {
4098 200 : image: Some(image_layer),
4099 200 : next_start_key: img_range.end,
4100 200 : })
4101 : } else {
4102 : // Special case: the image layer may be empty if this is a sharded tenant and the
4103 : // partition does not cover any keys owned by this shard. In this case, to ensure
4104 : // we don't leave gaps between image layers, leave `start` where it is, so that the next
4105 : // layer we write will cover the key range that we just scanned.
4106 0 : tracing::debug!("no data in range {}-{}", img_range.start, img_range.end);
4107 0 : Ok(ImageLayerCreationOutcome {
4108 0 : image: None,
4109 0 : next_start_key: start,
4110 0 : })
4111 : }
4112 200 : }
4113 :
4114 : /// Create an image layer for metadata keys. This function produces one image layer for all metadata
4115 : /// keys for now. Because metadata keys cannot exceed basebackup size limit, the image layer for it
4116 : /// would not be too large to fit in a single image layer.
4117 : #[allow(clippy::too_many_arguments)]
4118 190 : async fn create_image_layer_for_metadata_keys(
4119 190 : self: &Arc<Self>,
4120 190 : partition: &KeySpace,
4121 190 : mut image_layer_writer: ImageLayerWriter,
4122 190 : lsn: Lsn,
4123 190 : ctx: &RequestContext,
4124 190 : img_range: Range<Key>,
4125 190 : mode: ImageLayerCreationMode,
4126 190 : start: Key,
4127 190 : ) -> Result<ImageLayerCreationOutcome, CreateImageLayersError> {
4128 190 : // Metadata keys image layer creation.
4129 190 : let mut reconstruct_state = ValuesReconstructState::default();
4130 190 : let begin = Instant::now();
4131 190 : let data = self
4132 190 : .get_vectored_impl(partition.clone(), lsn, &mut reconstruct_state, ctx)
4133 1061 : .await?;
4134 190 : let (data, total_kb_retrieved, total_keys_retrieved) = {
4135 190 : let mut new_data = BTreeMap::new();
4136 190 : let mut total_kb_retrieved = 0;
4137 190 : let mut total_keys_retrieved = 0;
4138 10202 : for (k, v) in data {
4139 10012 : let v = v?;
4140 10012 : total_kb_retrieved += KEY_SIZE + v.len();
4141 10012 : total_keys_retrieved += 1;
4142 10012 : new_data.insert(k, v);
4143 : }
4144 190 : (new_data, total_kb_retrieved / 1024, total_keys_retrieved)
4145 190 : };
4146 190 : let delta_files_accessed = reconstruct_state.get_delta_layers_visited();
4147 190 : let elapsed = begin.elapsed();
4148 190 :
4149 190 : let trigger_generation = delta_files_accessed as usize >= MAX_AUX_FILE_V2_DELTAS;
4150 190 : info!(
4151 0 : "metadata key compaction: trigger_generation={trigger_generation}, delta_files_accessed={delta_files_accessed}, total_kb_retrieved={total_kb_retrieved}, total_keys_retrieved={total_keys_retrieved}, read_time={}s", elapsed.as_secs_f64()
4152 : );
4153 :
4154 190 : if !trigger_generation && mode == ImageLayerCreationMode::Try {
4155 2 : return Ok(ImageLayerCreationOutcome {
4156 2 : image: None,
4157 2 : next_start_key: img_range.end,
4158 2 : });
4159 188 : }
4160 188 : if self.cancel.is_cancelled() {
4161 0 : return Err(CreateImageLayersError::Cancelled);
4162 188 : }
4163 188 : let mut wrote_any_image = false;
4164 10200 : for (k, v) in data {
4165 10012 : if v.is_empty() {
4166 : // the key has been deleted, it does not need an image
4167 : // in metadata keyspace, an empty image == tombstone
4168 8 : continue;
4169 10004 : }
4170 10004 : wrote_any_image = true;
4171 10004 :
4172 10004 : // No need to handle sharding b/c metadata keys are always on the 0-th shard.
4173 10004 :
4174 10004 : // TODO: split image layers to avoid too large layer files. Too large image files are not handled
4175 10004 : // on the normal data path either.
4176 10162 : image_layer_writer.put_image(k, v, ctx).await?;
4177 : }
4178 :
4179 188 : if wrote_any_image {
4180 : // Normal path: we have written some data into the new image layer for this
4181 : // partition, so flush it to disk.
4182 24 : let (desc, path) = image_layer_writer.finish(ctx).await?;
4183 12 : let image_layer = Layer::finish_creating(self.conf, self, desc, &path)?;
4184 12 : info!(
4185 0 : "created image layer for metadata {}",
4186 0 : image_layer.local_path()
4187 : );
4188 12 : Ok(ImageLayerCreationOutcome {
4189 12 : image: Some(image_layer),
4190 12 : next_start_key: img_range.end,
4191 12 : })
4192 : } else {
4193 : // Special case: the image layer may be empty if this is a sharded tenant and the
4194 : // partition does not cover any keys owned by this shard. In this case, to ensure
4195 : // we don't leave gaps between image layers, leave `start` where it is, so that the next
4196 : // layer we write will cover the key range that we just scanned.
4197 176 : tracing::debug!("no data in range {}-{}", img_range.start, img_range.end);
4198 176 : Ok(ImageLayerCreationOutcome {
4199 176 : image: None,
4200 176 : next_start_key: start,
4201 176 : })
4202 : }
4203 190 : }
4204 :
4205 : /// Predicate function which indicates whether we should check if new image layers
4206 : /// are required. Since checking if new image layers are required is expensive in
4207 : /// terms of CPU, we only do it in the following cases:
4208 : /// 1. If the timeline has ingested sufficient WAL to justify the cost
4209 : /// 2. If enough time has passed since the last check:
4210 : /// 1. For large tenants, we wish to perform the check more often since they
4211 : /// suffer from the lack of image layers
4212 : /// 2. For small tenants (that can mostly fit in RAM), we use a much longer interval
4213 712 : fn should_check_if_image_layers_required(self: &Arc<Timeline>, lsn: Lsn) -> bool {
4214 : const LARGE_TENANT_THRESHOLD: u64 = 2 * 1024 * 1024 * 1024;
4215 :
4216 712 : let last_checks_at = self.last_image_layer_creation_check_at.load();
4217 712 : let distance = lsn
4218 712 : .checked_sub(last_checks_at)
4219 712 : .expect("Attempt to compact with LSN going backwards");
4220 712 : let min_distance =
4221 712 : self.get_image_layer_creation_check_threshold() as u64 * self.get_checkpoint_distance();
4222 712 :
4223 712 : let distance_based_decision = distance.0 >= min_distance;
4224 712 :
4225 712 : let mut time_based_decision = false;
4226 712 : let mut last_check_instant = self.last_image_layer_creation_check_instant.lock().unwrap();
4227 712 : if let CurrentLogicalSize::Exact(logical_size) = self.current_logical_size.current_size() {
4228 610 : let check_required_after = if Into::<u64>::into(&logical_size) >= LARGE_TENANT_THRESHOLD
4229 : {
4230 0 : self.get_checkpoint_timeout()
4231 : } else {
4232 610 : Duration::from_secs(3600 * 48)
4233 : };
4234 :
4235 610 : time_based_decision = match *last_check_instant {
4236 436 : Some(last_check) => {
4237 436 : let elapsed = last_check.elapsed();
4238 436 : elapsed >= check_required_after
4239 : }
4240 174 : None => true,
4241 : };
4242 102 : }
4243 :
4244 : // Do the expensive delta layer counting only if this timeline has ingested sufficient
4245 : // WAL since the last check or a checkpoint timeout interval has elapsed since the last
4246 : // check.
4247 712 : let decision = distance_based_decision || time_based_decision;
4248 :
4249 712 : if decision {
4250 176 : self.last_image_layer_creation_check_at.store(lsn);
4251 176 : *last_check_instant = Some(Instant::now());
4252 536 : }
4253 :
4254 712 : decision
4255 712 : }
4256 :
4257 712 : #[tracing::instrument(skip_all, fields(%lsn, %mode))]
4258 : async fn create_image_layers(
4259 : self: &Arc<Timeline>,
4260 : partitioning: &KeyPartitioning,
4261 : lsn: Lsn,
4262 : mode: ImageLayerCreationMode,
4263 : ctx: &RequestContext,
4264 : ) -> Result<Vec<ResidentLayer>, CreateImageLayersError> {
4265 : let timer = self.metrics.create_images_time_histo.start_timer();
4266 : let mut image_layers = Vec::new();
4267 :
4268 : // We need to avoid holes between generated image layers.
4269 : // Otherwise LayerMap::image_layer_exists will return false if key range of some layer is covered by more than one
4270 : // image layer with hole between them. In this case such layer can not be utilized by GC.
4271 : //
4272 : // How such hole between partitions can appear?
4273 : // if we have relation with relid=1 and size 100 and relation with relid=2 with size 200 then result of
4274 : // KeySpace::partition may contain partitions <100000000..100000099> and <200000000..200000199>.
4275 : // If there is delta layer <100000000..300000000> then it never be garbage collected because
4276 : // image layers <100000000..100000099> and <200000000..200000199> are not completely covering it.
4277 : let mut start = Key::MIN;
4278 :
4279 : let check_for_image_layers = self.should_check_if_image_layers_required(lsn);
4280 :
4281 : for partition in partitioning.parts.iter() {
4282 : if self.cancel.is_cancelled() {
4283 : return Err(CreateImageLayersError::Cancelled);
4284 : }
4285 :
4286 : let img_range = start..partition.ranges.last().unwrap().end;
4287 : let compact_metadata = partition.overlaps(&Key::metadata_key_range());
4288 : if compact_metadata {
4289 : for range in &partition.ranges {
4290 : assert!(
4291 : range.start.field1 >= METADATA_KEY_BEGIN_PREFIX
4292 : && range.end.field1 <= METADATA_KEY_END_PREFIX,
4293 : "metadata keys must be partitioned separately"
4294 : );
4295 : }
4296 : if mode == ImageLayerCreationMode::Try && !check_for_image_layers {
4297 : // Skip compaction if there are not enough updates. Metadata compaction will do a scan and
4298 : // might mess up with evictions.
4299 : start = img_range.end;
4300 : continue;
4301 : }
4302 : // For initial and force modes, we always generate image layers for metadata keys.
4303 : } else if let ImageLayerCreationMode::Try = mode {
4304 : // check_for_image_layers = false -> skip
4305 : // check_for_image_layers = true -> check time_for_new_image_layer -> skip/generate
4306 : if !check_for_image_layers || !self.time_for_new_image_layer(partition, lsn).await {
4307 : start = img_range.end;
4308 : continue;
4309 : }
4310 : }
4311 : if let ImageLayerCreationMode::Force = mode {
4312 : // When forced to create image layers, we might try and create them where they already
4313 : // exist. This mode is only used in tests/debug.
4314 : let layers = self.layers.read().await;
4315 : if layers.contains_key(&PersistentLayerKey {
4316 : key_range: img_range.clone(),
4317 : lsn_range: PersistentLayerDesc::image_layer_lsn_range(lsn),
4318 : is_delta: false,
4319 : }) {
4320 : tracing::info!(
4321 : "Skipping image layer at {lsn} {}..{}, already exists",
4322 : img_range.start,
4323 : img_range.end
4324 : );
4325 : start = img_range.end;
4326 : continue;
4327 : }
4328 : }
4329 :
4330 : let image_layer_writer = ImageLayerWriter::new(
4331 : self.conf,
4332 : self.timeline_id,
4333 : self.tenant_shard_id,
4334 : &img_range,
4335 : lsn,
4336 : ctx,
4337 : )
4338 : .await?;
4339 :
4340 0 : fail_point!("image-layer-writer-fail-before-finish", |_| {
4341 0 : Err(CreateImageLayersError::Other(anyhow::anyhow!(
4342 0 : "failpoint image-layer-writer-fail-before-finish"
4343 0 : )))
4344 0 : });
4345 :
4346 : if !compact_metadata {
4347 : let ImageLayerCreationOutcome {
4348 : image,
4349 : next_start_key,
4350 : } = self
4351 : .create_image_layer_for_rel_blocks(
4352 : partition,
4353 : image_layer_writer,
4354 : lsn,
4355 : ctx,
4356 : img_range,
4357 : start,
4358 : )
4359 : .await?;
4360 :
4361 : start = next_start_key;
4362 : image_layers.extend(image);
4363 : } else {
4364 : let ImageLayerCreationOutcome {
4365 : image,
4366 : next_start_key,
4367 : } = self
4368 : .create_image_layer_for_metadata_keys(
4369 : partition,
4370 : image_layer_writer,
4371 : lsn,
4372 : ctx,
4373 : img_range,
4374 : mode,
4375 : start,
4376 : )
4377 : .await?;
4378 : start = next_start_key;
4379 : image_layers.extend(image);
4380 : }
4381 : }
4382 :
4383 : let mut guard = self.layers.write().await;
4384 :
4385 : // FIXME: we could add the images to be uploaded *before* returning from here, but right
4386 : // now they are being scheduled outside of write lock; current way is inconsistent with
4387 : // compaction lock order.
4388 : guard
4389 : .open_mut()?
4390 : .track_new_image_layers(&image_layers, &self.metrics);
4391 : drop_wlock(guard);
4392 : timer.stop_and_record();
4393 :
4394 : // Creating image layers may have caused some previously visible layers to be covered
4395 : if !image_layers.is_empty() {
4396 : self.update_layer_visibility().await?;
4397 : }
4398 :
4399 : Ok(image_layers)
4400 : }
4401 :
4402 : /// Wait until the background initial logical size calculation is complete, or
4403 : /// this Timeline is shut down. Calling this function will cause the initial
4404 : /// logical size calculation to skip waiting for the background jobs barrier.
4405 0 : pub(crate) async fn await_initial_logical_size(self: Arc<Self>) {
4406 0 : if !self.shard_identity.is_shard_zero() {
4407 : // We don't populate logical size on shard >0: skip waiting for it.
4408 0 : return;
4409 0 : }
4410 0 :
4411 0 : if self.remote_client.is_deleting() {
4412 : // The timeline was created in a deletion-resume state, we don't expect logical size to be populated
4413 0 : return;
4414 0 : }
4415 0 :
4416 0 : if self.current_logical_size.current_size().is_exact() {
4417 : // root timelines are initialized with exact count, but never start the background
4418 : // calculation
4419 0 : return;
4420 0 : }
4421 :
4422 0 : if let Some(await_bg_cancel) = self
4423 0 : .current_logical_size
4424 0 : .cancel_wait_for_background_loop_concurrency_limit_semaphore
4425 0 : .get()
4426 0 : {
4427 0 : await_bg_cancel.cancel();
4428 0 : } else {
4429 : // We should not wait if we were not able to explicitly instruct
4430 : // the logical size cancellation to skip the concurrency limit semaphore.
4431 : // TODO: this is an unexpected case. We should restructure so that it
4432 : // can't happen.
4433 0 : tracing::warn!(
4434 0 : "await_initial_logical_size: can't get semaphore cancel token, skipping"
4435 : );
4436 0 : debug_assert!(false);
4437 : }
4438 :
4439 0 : tokio::select!(
4440 0 : _ = self.current_logical_size.initialized.acquire() => {},
4441 0 : _ = self.cancel.cancelled() => {}
4442 : )
4443 0 : }
4444 :
4445 : /// Detach this timeline from its ancestor by copying all of ancestors layers as this
4446 : /// Timelines layers up to the ancestor_lsn.
4447 : ///
4448 : /// Requires a timeline that:
4449 : /// - has an ancestor to detach from
4450 : /// - the ancestor does not have an ancestor -- follows from the original RFC limitations, not
4451 : /// a technical requirement
4452 : ///
4453 : /// After the operation has been started, it cannot be canceled. Upon restart it needs to be
4454 : /// polled again until completion.
4455 : ///
4456 : /// During the operation all timelines sharing the data with this timeline will be reparented
4457 : /// from our ancestor to be branches of this timeline.
4458 0 : pub(crate) async fn prepare_to_detach_from_ancestor(
4459 0 : self: &Arc<Timeline>,
4460 0 : tenant: &crate::tenant::Tenant,
4461 0 : options: detach_ancestor::Options,
4462 0 : ctx: &RequestContext,
4463 0 : ) -> Result<detach_ancestor::Progress, detach_ancestor::Error> {
4464 0 : detach_ancestor::prepare(self, tenant, options, ctx).await
4465 0 : }
4466 :
4467 : /// Second step of detach from ancestor; detaches the `self` from it's current ancestor and
4468 : /// reparents any reparentable children of previous ancestor.
4469 : ///
4470 : /// This method is to be called while holding the TenantManager's tenant slot, so during this
4471 : /// method we cannot be deleted nor can any timeline be deleted. After this method returns
4472 : /// successfully, tenant must be reloaded.
4473 : ///
4474 : /// Final step will be to [`Self::complete_detaching_timeline_ancestor`] after optionally
4475 : /// resetting the tenant.
4476 0 : pub(crate) async fn detach_from_ancestor_and_reparent(
4477 0 : self: &Arc<Timeline>,
4478 0 : tenant: &crate::tenant::Tenant,
4479 0 : prepared: detach_ancestor::PreparedTimelineDetach,
4480 0 : ctx: &RequestContext,
4481 0 : ) -> Result<detach_ancestor::DetachingAndReparenting, detach_ancestor::Error> {
4482 0 : detach_ancestor::detach_and_reparent(self, tenant, prepared, ctx).await
4483 0 : }
4484 :
4485 : /// Final step which unblocks the GC.
4486 : ///
4487 : /// The tenant must've been reset if ancestry was modified previously (in tenant manager).
4488 0 : pub(crate) async fn complete_detaching_timeline_ancestor(
4489 0 : self: &Arc<Timeline>,
4490 0 : tenant: &crate::tenant::Tenant,
4491 0 : attempt: detach_ancestor::Attempt,
4492 0 : ctx: &RequestContext,
4493 0 : ) -> Result<(), detach_ancestor::Error> {
4494 0 : detach_ancestor::complete(self, tenant, attempt, ctx).await
4495 0 : }
4496 : }
4497 :
4498 : impl Drop for Timeline {
4499 8 : fn drop(&mut self) {
4500 8 : if let Some(ancestor) = &self.ancestor_timeline {
4501 : // This lock should never be poisoned, but in case it is we do a .map() instead of
4502 : // an unwrap(), to avoid panicking in a destructor and thereby aborting the process.
4503 2 : if let Ok(mut gc_info) = ancestor.gc_info.write() {
4504 2 : gc_info.remove_child(self.timeline_id)
4505 0 : }
4506 6 : }
4507 8 : }
4508 : }
4509 :
4510 : /// Top-level failure to compact.
4511 0 : #[derive(Debug, thiserror::Error)]
4512 : pub(crate) enum CompactionError {
4513 : #[error("The timeline or pageserver is shutting down")]
4514 : ShuttingDown,
4515 : /// Compaction tried to offload a timeline and failed
4516 : #[error("Failed to offload timeline: {0}")]
4517 : Offload(OffloadError),
4518 : /// Compaction cannot be done right now; page reconstruction and so on.
4519 : #[error(transparent)]
4520 : Other(anyhow::Error),
4521 : }
4522 :
4523 : impl From<OffloadError> for CompactionError {
4524 0 : fn from(e: OffloadError) -> Self {
4525 0 : match e {
4526 0 : OffloadError::Cancelled => Self::ShuttingDown,
4527 0 : _ => Self::Offload(e),
4528 : }
4529 0 : }
4530 : }
4531 :
4532 : impl CompactionError {
4533 0 : pub fn is_cancelled(&self) -> bool {
4534 0 : matches!(self, CompactionError::ShuttingDown)
4535 0 : }
4536 : }
4537 :
4538 : impl From<CollectKeySpaceError> for CompactionError {
4539 0 : fn from(err: CollectKeySpaceError) -> Self {
4540 0 : match err {
4541 : CollectKeySpaceError::Cancelled
4542 : | CollectKeySpaceError::PageRead(PageReconstructError::Cancelled) => {
4543 0 : CompactionError::ShuttingDown
4544 : }
4545 0 : e => CompactionError::Other(e.into()),
4546 : }
4547 0 : }
4548 : }
4549 :
4550 : impl From<super::upload_queue::NotInitialized> for CompactionError {
4551 0 : fn from(value: super::upload_queue::NotInitialized) -> Self {
4552 0 : match value {
4553 : super::upload_queue::NotInitialized::Uninitialized => {
4554 0 : CompactionError::Other(anyhow::anyhow!(value))
4555 : }
4556 : super::upload_queue::NotInitialized::ShuttingDown
4557 0 : | super::upload_queue::NotInitialized::Stopped => CompactionError::ShuttingDown,
4558 : }
4559 0 : }
4560 : }
4561 :
4562 : impl From<super::storage_layer::layer::DownloadError> for CompactionError {
4563 0 : fn from(e: super::storage_layer::layer::DownloadError) -> Self {
4564 0 : match e {
4565 : super::storage_layer::layer::DownloadError::TimelineShutdown
4566 : | super::storage_layer::layer::DownloadError::DownloadCancelled => {
4567 0 : CompactionError::ShuttingDown
4568 : }
4569 : super::storage_layer::layer::DownloadError::ContextAndConfigReallyDeniesDownloads
4570 : | super::storage_layer::layer::DownloadError::DownloadRequired
4571 : | super::storage_layer::layer::DownloadError::NotFile(_)
4572 : | super::storage_layer::layer::DownloadError::DownloadFailed
4573 : | super::storage_layer::layer::DownloadError::PreStatFailed(_) => {
4574 0 : CompactionError::Other(anyhow::anyhow!(e))
4575 : }
4576 : #[cfg(test)]
4577 : super::storage_layer::layer::DownloadError::Failpoint(_) => {
4578 0 : CompactionError::Other(anyhow::anyhow!(e))
4579 : }
4580 : }
4581 0 : }
4582 : }
4583 :
4584 : impl From<layer_manager::Shutdown> for CompactionError {
4585 0 : fn from(_: layer_manager::Shutdown) -> Self {
4586 0 : CompactionError::ShuttingDown
4587 0 : }
4588 : }
4589 :
4590 : #[serde_as]
4591 196 : #[derive(serde::Serialize)]
4592 : struct RecordedDuration(#[serde_as(as = "serde_with::DurationMicroSeconds")] Duration);
4593 :
4594 : #[derive(Default)]
4595 : enum DurationRecorder {
4596 : #[default]
4597 : NotStarted,
4598 : Recorded(RecordedDuration, tokio::time::Instant),
4599 : }
4600 :
4601 : impl DurationRecorder {
4602 504 : fn till_now(&self) -> DurationRecorder {
4603 504 : match self {
4604 : DurationRecorder::NotStarted => {
4605 0 : panic!("must only call on recorded measurements")
4606 : }
4607 504 : DurationRecorder::Recorded(_, ended) => {
4608 504 : let now = tokio::time::Instant::now();
4609 504 : DurationRecorder::Recorded(RecordedDuration(now - *ended), now)
4610 504 : }
4611 504 : }
4612 504 : }
4613 196 : fn into_recorded(self) -> Option<RecordedDuration> {
4614 196 : match self {
4615 0 : DurationRecorder::NotStarted => None,
4616 196 : DurationRecorder::Recorded(recorded, _) => Some(recorded),
4617 : }
4618 196 : }
4619 : }
4620 :
4621 : /// Descriptor for a delta layer used in testing infra. The start/end key/lsn range of the
4622 : /// delta layer might be different from the min/max key/lsn in the delta layer. Therefore,
4623 : /// the layer descriptor requires the user to provide the ranges, which should cover all
4624 : /// keys specified in the `data` field.
4625 : #[cfg(test)]
4626 : #[derive(Clone)]
4627 : pub struct DeltaLayerTestDesc {
4628 : pub lsn_range: Range<Lsn>,
4629 : pub key_range: Range<Key>,
4630 : pub data: Vec<(Key, Lsn, Value)>,
4631 : }
4632 :
4633 : #[cfg(test)]
4634 : impl DeltaLayerTestDesc {
4635 2 : pub fn new(lsn_range: Range<Lsn>, key_range: Range<Key>, data: Vec<(Key, Lsn, Value)>) -> Self {
4636 2 : Self {
4637 2 : lsn_range,
4638 2 : key_range,
4639 2 : data,
4640 2 : }
4641 2 : }
4642 :
4643 72 : pub fn new_with_inferred_key_range(
4644 72 : lsn_range: Range<Lsn>,
4645 72 : data: Vec<(Key, Lsn, Value)>,
4646 72 : ) -> Self {
4647 196 : let key_min = data.iter().map(|(key, _, _)| key).min().unwrap();
4648 196 : let key_max = data.iter().map(|(key, _, _)| key).max().unwrap();
4649 72 : Self {
4650 72 : key_range: (*key_min)..(key_max.next()),
4651 72 : lsn_range,
4652 72 : data,
4653 72 : }
4654 72 : }
4655 :
4656 10 : pub(crate) fn layer_name(&self) -> LayerName {
4657 10 : LayerName::Delta(super::storage_layer::DeltaLayerName {
4658 10 : key_range: self.key_range.clone(),
4659 10 : lsn_range: self.lsn_range.clone(),
4660 10 : })
4661 10 : }
4662 : }
4663 :
4664 : impl Timeline {
4665 28 : async fn finish_compact_batch(
4666 28 : self: &Arc<Self>,
4667 28 : new_deltas: &[ResidentLayer],
4668 28 : new_images: &[ResidentLayer],
4669 28 : layers_to_remove: &[Layer],
4670 28 : ) -> Result<(), CompactionError> {
4671 28 : let mut guard = tokio::select! {
4672 28 : guard = self.layers.write() => guard,
4673 28 : _ = self.cancel.cancelled() => {
4674 0 : return Err(CompactionError::ShuttingDown);
4675 : }
4676 : };
4677 :
4678 28 : let mut duplicated_layers = HashSet::new();
4679 28 :
4680 28 : let mut insert_layers = Vec::with_capacity(new_deltas.len());
4681 :
4682 336 : for l in new_deltas {
4683 308 : if guard.contains(l.as_ref()) {
4684 : // expected in tests
4685 0 : tracing::error!(layer=%l, "duplicated L1 layer");
4686 :
4687 : // good ways to cause a duplicate: we repeatedly error after taking the writelock
4688 : // `guard` on self.layers. as of writing this, there are no error returns except
4689 : // for compact_level0_phase1 creating an L0, which does not happen in practice
4690 : // because we have not implemented L0 => L0 compaction.
4691 0 : duplicated_layers.insert(l.layer_desc().key());
4692 308 : } else if LayerMap::is_l0(&l.layer_desc().key_range, l.layer_desc().is_delta) {
4693 0 : return Err(CompactionError::Other(anyhow::anyhow!("compaction generates a L0 layer file as output, which will cause infinite compaction.")));
4694 308 : } else {
4695 308 : insert_layers.push(l.clone());
4696 308 : }
4697 : }
4698 :
4699 : // only remove those inputs which were not outputs
4700 28 : let remove_layers: Vec<Layer> = layers_to_remove
4701 28 : .iter()
4702 402 : .filter(|l| !duplicated_layers.contains(&l.layer_desc().key()))
4703 28 : .cloned()
4704 28 : .collect();
4705 28 :
4706 28 : if !new_images.is_empty() {
4707 0 : guard
4708 0 : .open_mut()?
4709 0 : .track_new_image_layers(new_images, &self.metrics);
4710 28 : }
4711 :
4712 28 : guard
4713 28 : .open_mut()?
4714 28 : .finish_compact_l0(&remove_layers, &insert_layers, &self.metrics);
4715 28 :
4716 28 : self.remote_client
4717 28 : .schedule_compaction_update(&remove_layers, new_deltas)?;
4718 :
4719 28 : drop_wlock(guard);
4720 28 :
4721 28 : Ok(())
4722 28 : }
4723 :
4724 0 : async fn rewrite_layers(
4725 0 : self: &Arc<Self>,
4726 0 : mut replace_layers: Vec<(Layer, ResidentLayer)>,
4727 0 : mut drop_layers: Vec<Layer>,
4728 0 : ) -> Result<(), CompactionError> {
4729 0 : let mut guard = self.layers.write().await;
4730 :
4731 : // Trim our lists in case our caller (compaction) raced with someone else (GC) removing layers: we want
4732 : // to avoid double-removing, and avoid rewriting something that was removed.
4733 0 : replace_layers.retain(|(l, _)| guard.contains(l));
4734 0 : drop_layers.retain(|l| guard.contains(l));
4735 0 :
4736 0 : guard
4737 0 : .open_mut()?
4738 0 : .rewrite_layers(&replace_layers, &drop_layers, &self.metrics);
4739 0 :
4740 0 : let upload_layers: Vec<_> = replace_layers.into_iter().map(|r| r.1).collect();
4741 0 :
4742 0 : self.remote_client
4743 0 : .schedule_compaction_update(&drop_layers, &upload_layers)?;
4744 :
4745 0 : Ok(())
4746 0 : }
4747 :
4748 : /// Schedules the uploads of the given image layers
4749 364 : fn upload_new_image_layers(
4750 364 : self: &Arc<Self>,
4751 364 : new_images: impl IntoIterator<Item = ResidentLayer>,
4752 364 : ) -> Result<(), super::upload_queue::NotInitialized> {
4753 390 : for layer in new_images {
4754 26 : self.remote_client.schedule_layer_file_upload(layer)?;
4755 : }
4756 : // should any new image layer been created, not uploading index_part will
4757 : // result in a mismatch between remote_physical_size and layermap calculated
4758 : // size, which will fail some tests, but should not be an issue otherwise.
4759 364 : self.remote_client
4760 364 : .schedule_index_upload_for_file_changes()?;
4761 364 : Ok(())
4762 364 : }
4763 :
4764 : /// Find the Lsns above which layer files need to be retained on
4765 : /// garbage collection.
4766 : ///
4767 : /// We calculate two cutoffs, one based on time and one based on WAL size. `pitr`
4768 : /// controls the time cutoff (or ZERO to disable time-based retention), and `space_cutoff` controls
4769 : /// the space-based retention.
4770 : ///
4771 : /// This function doesn't simply to calculate time & space based retention: it treats time-based
4772 : /// retention as authoritative if enabled, and falls back to space-based retention if calculating
4773 : /// the LSN for a time point isn't possible. Therefore the GcCutoffs::horizon in the response might
4774 : /// be different to the `space_cutoff` input. Callers should treat the min() of the two cutoffs
4775 : /// in the response as the GC cutoff point for the timeline.
4776 4 : #[instrument(skip_all, fields(timeline_id=%self.timeline_id))]
4777 : pub(super) async fn find_gc_cutoffs(
4778 : &self,
4779 : space_cutoff: Lsn,
4780 : pitr: Duration,
4781 : cancel: &CancellationToken,
4782 : ctx: &RequestContext,
4783 : ) -> Result<GcCutoffs, PageReconstructError> {
4784 : let _timer = self
4785 : .metrics
4786 : .find_gc_cutoffs_histo
4787 : .start_timer()
4788 : .record_on_drop();
4789 :
4790 : pausable_failpoint!("Timeline::find_gc_cutoffs-pausable");
4791 :
4792 : if cfg!(test) {
4793 : // Unit tests which specify zero PITR interval expect to avoid doing any I/O for timestamp lookup
4794 : if pitr == Duration::ZERO {
4795 : return Ok(GcCutoffs {
4796 : time: self.get_last_record_lsn(),
4797 : space: space_cutoff,
4798 : });
4799 : }
4800 : }
4801 :
4802 : // Calculate a time-based limit on how much to retain:
4803 : // - if PITR interval is set, then this is our cutoff.
4804 : // - if PITR interval is not set, then we do a lookup
4805 : // based on DEFAULT_PITR_INTERVAL, so that size-based retention does not result in keeping history around permanently on idle databases.
4806 : let time_cutoff = {
4807 : let now = SystemTime::now();
4808 : let time_range = if pitr == Duration::ZERO {
4809 : humantime::parse_duration(DEFAULT_PITR_INTERVAL).expect("constant is invalid")
4810 : } else {
4811 : pitr
4812 : };
4813 :
4814 : // If PITR is so large or `now` is so small that this underflows, we will retain no history (highly unexpected case)
4815 : let time_cutoff = now.checked_sub(time_range).unwrap_or(now);
4816 : let timestamp = to_pg_timestamp(time_cutoff);
4817 :
4818 : match self.find_lsn_for_timestamp(timestamp, cancel, ctx).await? {
4819 : LsnForTimestamp::Present(lsn) => Some(lsn),
4820 : LsnForTimestamp::Future(lsn) => {
4821 : // The timestamp is in the future. That sounds impossible,
4822 : // but what it really means is that there hasn't been
4823 : // any commits since the cutoff timestamp.
4824 : //
4825 : // In this case we should use the LSN of the most recent commit,
4826 : // which is implicitly the last LSN in the log.
4827 : debug!("future({})", lsn);
4828 : Some(self.get_last_record_lsn())
4829 : }
4830 : LsnForTimestamp::Past(lsn) => {
4831 : debug!("past({})", lsn);
4832 : None
4833 : }
4834 : LsnForTimestamp::NoData(lsn) => {
4835 : debug!("nodata({})", lsn);
4836 : None
4837 : }
4838 : }
4839 : };
4840 :
4841 : Ok(match (pitr, time_cutoff) {
4842 : (Duration::ZERO, Some(time_cutoff)) => {
4843 : // PITR is not set. Retain the size-based limit, or the default time retention,
4844 : // whichever requires less data.
4845 : GcCutoffs {
4846 : time: self.get_last_record_lsn(),
4847 : space: std::cmp::max(time_cutoff, space_cutoff),
4848 : }
4849 : }
4850 : (Duration::ZERO, None) => {
4851 : // PITR is not set, and time lookup failed
4852 : GcCutoffs {
4853 : time: self.get_last_record_lsn(),
4854 : space: space_cutoff,
4855 : }
4856 : }
4857 : (_, None) => {
4858 : // PITR interval is set & we didn't look up a timestamp successfully. Conservatively assume PITR
4859 : // cannot advance beyond what was already GC'd, and respect space-based retention
4860 : GcCutoffs {
4861 : time: *self.get_latest_gc_cutoff_lsn(),
4862 : space: space_cutoff,
4863 : }
4864 : }
4865 : (_, Some(time_cutoff)) => {
4866 : // PITR interval is set and we looked up timestamp successfully. Ignore
4867 : // size based retention and make time cutoff authoritative
4868 : GcCutoffs {
4869 : time: time_cutoff,
4870 : space: time_cutoff,
4871 : }
4872 : }
4873 : })
4874 : }
4875 :
4876 : /// Garbage collect layer files on a timeline that are no longer needed.
4877 : ///
4878 : /// Currently, we don't make any attempt at removing unneeded page versions
4879 : /// within a layer file. We can only remove the whole file if it's fully
4880 : /// obsolete.
4881 4 : pub(super) async fn gc(&self) -> Result<GcResult, GcError> {
4882 : // this is most likely the background tasks, but it might be the spawned task from
4883 : // immediate_gc
4884 4 : let _g = tokio::select! {
4885 4 : guard = self.gc_lock.lock() => guard,
4886 4 : _ = self.cancel.cancelled() => return Ok(GcResult::default()),
4887 : };
4888 4 : let timer = self.metrics.garbage_collect_histo.start_timer();
4889 4 :
4890 4 : fail_point!("before-timeline-gc");
4891 4 :
4892 4 : // Is the timeline being deleted?
4893 4 : if self.is_stopping() {
4894 0 : return Err(GcError::TimelineCancelled);
4895 4 : }
4896 4 :
4897 4 : let (space_cutoff, time_cutoff, retain_lsns, max_lsn_with_valid_lease) = {
4898 4 : let gc_info = self.gc_info.read().unwrap();
4899 4 :
4900 4 : let space_cutoff = min(gc_info.cutoffs.space, self.get_disk_consistent_lsn());
4901 4 : let time_cutoff = gc_info.cutoffs.time;
4902 4 : let retain_lsns = gc_info
4903 4 : .retain_lsns
4904 4 : .iter()
4905 4 : .map(|(lsn, _child_id, _is_offloaded)| *lsn)
4906 4 : .collect();
4907 4 :
4908 4 : // Gets the maximum LSN that holds the valid lease.
4909 4 : //
4910 4 : // Caveat: `refresh_gc_info` is in charged of updating the lease map.
4911 4 : // Here, we do not check for stale leases again.
4912 4 : let max_lsn_with_valid_lease = gc_info.leases.last_key_value().map(|(lsn, _)| *lsn);
4913 4 :
4914 4 : (
4915 4 : space_cutoff,
4916 4 : time_cutoff,
4917 4 : retain_lsns,
4918 4 : max_lsn_with_valid_lease,
4919 4 : )
4920 4 : };
4921 4 :
4922 4 : let mut new_gc_cutoff = Lsn::min(space_cutoff, time_cutoff);
4923 4 : let standby_horizon = self.standby_horizon.load();
4924 4 : // Hold GC for the standby, but as a safety guard do it only within some
4925 4 : // reasonable lag.
4926 4 : if standby_horizon != Lsn::INVALID {
4927 0 : if let Some(standby_lag) = new_gc_cutoff.checked_sub(standby_horizon) {
4928 : const MAX_ALLOWED_STANDBY_LAG: u64 = 10u64 << 30; // 10 GB
4929 0 : if standby_lag.0 < MAX_ALLOWED_STANDBY_LAG {
4930 0 : new_gc_cutoff = Lsn::min(standby_horizon, new_gc_cutoff);
4931 0 : trace!("holding off GC for standby apply LSN {}", standby_horizon);
4932 : } else {
4933 0 : warn!(
4934 0 : "standby is lagging for more than {}MB, not holding gc for it",
4935 0 : MAX_ALLOWED_STANDBY_LAG / 1024 / 1024
4936 : )
4937 : }
4938 0 : }
4939 4 : }
4940 :
4941 : // Reset standby horizon to ignore it if it is not updated till next GC.
4942 : // It is an easy way to unset it when standby disappears without adding
4943 : // more conf options.
4944 4 : self.standby_horizon.store(Lsn::INVALID);
4945 4 : self.metrics
4946 4 : .standby_horizon_gauge
4947 4 : .set(Lsn::INVALID.0 as i64);
4948 :
4949 4 : let res = self
4950 4 : .gc_timeline(
4951 4 : space_cutoff,
4952 4 : time_cutoff,
4953 4 : retain_lsns,
4954 4 : max_lsn_with_valid_lease,
4955 4 : new_gc_cutoff,
4956 4 : )
4957 4 : .instrument(
4958 4 : info_span!("gc_timeline", timeline_id = %self.timeline_id, cutoff = %new_gc_cutoff),
4959 : )
4960 0 : .await?;
4961 :
4962 : // only record successes
4963 4 : timer.stop_and_record();
4964 4 :
4965 4 : Ok(res)
4966 4 : }
4967 :
4968 4 : async fn gc_timeline(
4969 4 : &self,
4970 4 : space_cutoff: Lsn,
4971 4 : time_cutoff: Lsn,
4972 4 : retain_lsns: Vec<Lsn>,
4973 4 : max_lsn_with_valid_lease: Option<Lsn>,
4974 4 : new_gc_cutoff: Lsn,
4975 4 : ) -> Result<GcResult, GcError> {
4976 4 : // FIXME: if there is an ongoing detach_from_ancestor, we should just skip gc
4977 4 :
4978 4 : let now = SystemTime::now();
4979 4 : let mut result: GcResult = GcResult::default();
4980 4 :
4981 4 : // Nothing to GC. Return early.
4982 4 : let latest_gc_cutoff = *self.get_latest_gc_cutoff_lsn();
4983 4 : if latest_gc_cutoff >= new_gc_cutoff {
4984 0 : info!(
4985 0 : "Nothing to GC: new_gc_cutoff_lsn {new_gc_cutoff}, latest_gc_cutoff_lsn {latest_gc_cutoff}",
4986 : );
4987 0 : return Ok(result);
4988 4 : }
4989 :
4990 : // We need to ensure that no one tries to read page versions or create
4991 : // branches at a point before latest_gc_cutoff_lsn. See branch_timeline()
4992 : // for details. This will block until the old value is no longer in use.
4993 : //
4994 : // The GC cutoff should only ever move forwards.
4995 4 : let waitlist = {
4996 4 : let write_guard = self.latest_gc_cutoff_lsn.lock_for_write();
4997 4 : if *write_guard > new_gc_cutoff {
4998 0 : return Err(GcError::BadLsn {
4999 0 : why: format!(
5000 0 : "Cannot move GC cutoff LSN backwards (was {}, new {})",
5001 0 : *write_guard, new_gc_cutoff
5002 0 : ),
5003 0 : });
5004 4 : }
5005 4 :
5006 4 : write_guard.store_and_unlock(new_gc_cutoff)
5007 4 : };
5008 4 : waitlist.wait().await;
5009 :
5010 4 : info!("GC starting");
5011 :
5012 4 : debug!("retain_lsns: {:?}", retain_lsns);
5013 :
5014 4 : let mut layers_to_remove = Vec::new();
5015 :
5016 : // Scan all layers in the timeline (remote or on-disk).
5017 : //
5018 : // Garbage collect the layer if all conditions are satisfied:
5019 : // 1. it is older than cutoff LSN;
5020 : // 2. it is older than PITR interval;
5021 : // 3. it doesn't need to be retained for 'retain_lsns';
5022 : // 4. it does not need to be kept for LSNs holding valid leases.
5023 : // 5. newer on-disk image layers cover the layer's whole key range
5024 : //
5025 : // TODO holding a write lock is too agressive and avoidable
5026 4 : let mut guard = self.layers.write().await;
5027 4 : let layers = guard.layer_map()?;
5028 24 : 'outer: for l in layers.iter_historic_layers() {
5029 24 : result.layers_total += 1;
5030 24 :
5031 24 : // 1. Is it newer than GC horizon cutoff point?
5032 24 : if l.get_lsn_range().end > space_cutoff {
5033 2 : debug!(
5034 0 : "keeping {} because it's newer than space_cutoff {}",
5035 0 : l.layer_name(),
5036 : space_cutoff,
5037 : );
5038 2 : result.layers_needed_by_cutoff += 1;
5039 2 : continue 'outer;
5040 22 : }
5041 22 :
5042 22 : // 2. It is newer than PiTR cutoff point?
5043 22 : if l.get_lsn_range().end > time_cutoff {
5044 0 : debug!(
5045 0 : "keeping {} because it's newer than time_cutoff {}",
5046 0 : l.layer_name(),
5047 : time_cutoff,
5048 : );
5049 0 : result.layers_needed_by_pitr += 1;
5050 0 : continue 'outer;
5051 22 : }
5052 :
5053 : // 3. Is it needed by a child branch?
5054 : // NOTE With that we would keep data that
5055 : // might be referenced by child branches forever.
5056 : // We can track this in child timeline GC and delete parent layers when
5057 : // they are no longer needed. This might be complicated with long inheritance chains.
5058 : //
5059 : // TODO Vec is not a great choice for `retain_lsns`
5060 22 : for retain_lsn in &retain_lsns {
5061 : // start_lsn is inclusive
5062 0 : if &l.get_lsn_range().start <= retain_lsn {
5063 0 : debug!(
5064 0 : "keeping {} because it's still might be referenced by child branch forked at {} is_dropped: xx is_incremental: {}",
5065 0 : l.layer_name(),
5066 0 : retain_lsn,
5067 0 : l.is_incremental(),
5068 : );
5069 0 : result.layers_needed_by_branches += 1;
5070 0 : continue 'outer;
5071 0 : }
5072 : }
5073 :
5074 : // 4. Is there a valid lease that requires us to keep this layer?
5075 22 : if let Some(lsn) = &max_lsn_with_valid_lease {
5076 : // keep if layer start <= any of the lease
5077 18 : if &l.get_lsn_range().start <= lsn {
5078 14 : debug!(
5079 0 : "keeping {} because there is a valid lease preventing GC at {}",
5080 0 : l.layer_name(),
5081 : lsn,
5082 : );
5083 14 : result.layers_needed_by_leases += 1;
5084 14 : continue 'outer;
5085 4 : }
5086 4 : }
5087 :
5088 : // 5. Is there a later on-disk layer for this relation?
5089 : //
5090 : // The end-LSN is exclusive, while disk_consistent_lsn is
5091 : // inclusive. For example, if disk_consistent_lsn is 100, it is
5092 : // OK for a delta layer to have end LSN 101, but if the end LSN
5093 : // is 102, then it might not have been fully flushed to disk
5094 : // before crash.
5095 : //
5096 : // For example, imagine that the following layers exist:
5097 : //
5098 : // 1000 - image (A)
5099 : // 1000-2000 - delta (B)
5100 : // 2000 - image (C)
5101 : // 2000-3000 - delta (D)
5102 : // 3000 - image (E)
5103 : //
5104 : // If GC horizon is at 2500, we can remove layers A and B, but
5105 : // we cannot remove C, even though it's older than 2500, because
5106 : // the delta layer 2000-3000 depends on it.
5107 8 : if !layers
5108 8 : .image_layer_exists(&l.get_key_range(), &(l.get_lsn_range().end..new_gc_cutoff))
5109 : {
5110 6 : debug!("keeping {} because it is the latest layer", l.layer_name());
5111 6 : result.layers_not_updated += 1;
5112 6 : continue 'outer;
5113 2 : }
5114 2 :
5115 2 : // We didn't find any reason to keep this file, so remove it.
5116 2 : debug!(
5117 0 : "garbage collecting {} is_dropped: xx is_incremental: {}",
5118 0 : l.layer_name(),
5119 0 : l.is_incremental(),
5120 : );
5121 2 : layers_to_remove.push(l);
5122 : }
5123 :
5124 4 : if !layers_to_remove.is_empty() {
5125 : // Persist the new GC cutoff value before we actually remove anything.
5126 : // This unconditionally schedules also an index_part.json update, even though, we will
5127 : // be doing one a bit later with the unlinked gc'd layers.
5128 2 : let disk_consistent_lsn = self.disk_consistent_lsn.load();
5129 2 : self.schedule_uploads(disk_consistent_lsn, None)
5130 2 : .map_err(|e| {
5131 0 : if self.cancel.is_cancelled() {
5132 0 : GcError::TimelineCancelled
5133 : } else {
5134 0 : GcError::Remote(e)
5135 : }
5136 2 : })?;
5137 :
5138 2 : let gc_layers = layers_to_remove
5139 2 : .iter()
5140 2 : .map(|x| guard.get_from_desc(x))
5141 2 : .collect::<Vec<Layer>>();
5142 2 :
5143 2 : result.layers_removed = gc_layers.len() as u64;
5144 2 :
5145 2 : self.remote_client.schedule_gc_update(&gc_layers)?;
5146 :
5147 2 : guard.open_mut()?.finish_gc_timeline(&gc_layers);
5148 2 :
5149 2 : #[cfg(feature = "testing")]
5150 2 : {
5151 2 : result.doomed_layers = gc_layers;
5152 2 : }
5153 2 : }
5154 :
5155 4 : info!(
5156 0 : "GC completed removing {} layers, cutoff {}",
5157 : result.layers_removed, new_gc_cutoff
5158 : );
5159 :
5160 4 : result.elapsed = now.elapsed().unwrap_or(Duration::ZERO);
5161 4 : Ok(result)
5162 4 : }
5163 :
5164 : /// Reconstruct a value, using the given base image and WAL records in 'data'.
5165 667410 : async fn reconstruct_value(
5166 667410 : &self,
5167 667410 : key: Key,
5168 667410 : request_lsn: Lsn,
5169 667410 : mut data: ValueReconstructState,
5170 667410 : ) -> Result<Bytes, PageReconstructError> {
5171 667410 : // Perform WAL redo if needed
5172 667410 : data.records.reverse();
5173 667410 :
5174 667410 : // If we have a page image, and no WAL, we're all set
5175 667410 : if data.records.is_empty() {
5176 667000 : if let Some((img_lsn, img)) = &data.img {
5177 667000 : trace!(
5178 0 : "found page image for key {} at {}, no WAL redo required, req LSN {}",
5179 : key,
5180 : img_lsn,
5181 : request_lsn,
5182 : );
5183 667000 : Ok(img.clone())
5184 : } else {
5185 0 : Err(PageReconstructError::from(anyhow!(
5186 0 : "base image for {key} at {request_lsn} not found"
5187 0 : )))
5188 : }
5189 : } else {
5190 : // We need to do WAL redo.
5191 : //
5192 : // If we don't have a base image, then the oldest WAL record better initialize
5193 : // the page
5194 410 : if data.img.is_none() && !data.records.first().unwrap().1.will_init() {
5195 0 : Err(PageReconstructError::from(anyhow!(
5196 0 : "Base image for {} at {} not found, but got {} WAL records",
5197 0 : key,
5198 0 : request_lsn,
5199 0 : data.records.len()
5200 0 : )))
5201 : } else {
5202 410 : if data.img.is_some() {
5203 344 : trace!(
5204 0 : "found {} WAL records and a base image for {} at {}, performing WAL redo",
5205 0 : data.records.len(),
5206 : key,
5207 : request_lsn
5208 : );
5209 : } else {
5210 66 : trace!("found {} WAL records that will init the page for {} at {}, performing WAL redo", data.records.len(), key, request_lsn);
5211 : };
5212 410 : let res = self
5213 410 : .walredo_mgr
5214 410 : .as_ref()
5215 410 : .context("timeline has no walredo manager")
5216 410 : .map_err(PageReconstructError::WalRedo)?
5217 410 : .request_redo(key, request_lsn, data.img, data.records, self.pg_version)
5218 0 : .await;
5219 410 : let img = match res {
5220 410 : Ok(img) => img,
5221 0 : Err(walredo::Error::Cancelled) => return Err(PageReconstructError::Cancelled),
5222 0 : Err(walredo::Error::Other(e)) => {
5223 0 : return Err(PageReconstructError::WalRedo(
5224 0 : e.context("reconstruct a page image"),
5225 0 : ))
5226 : }
5227 : };
5228 410 : Ok(img)
5229 : }
5230 : }
5231 667410 : }
5232 :
5233 0 : pub(crate) async fn spawn_download_all_remote_layers(
5234 0 : self: Arc<Self>,
5235 0 : request: DownloadRemoteLayersTaskSpawnRequest,
5236 0 : ) -> Result<DownloadRemoteLayersTaskInfo, DownloadRemoteLayersTaskInfo> {
5237 : use pageserver_api::models::DownloadRemoteLayersTaskState;
5238 :
5239 : // this is not really needed anymore; it has tests which really check the return value from
5240 : // http api. it would be better not to maintain this anymore.
5241 :
5242 0 : let mut status_guard = self.download_all_remote_layers_task_info.write().unwrap();
5243 0 : if let Some(st) = &*status_guard {
5244 0 : match &st.state {
5245 : DownloadRemoteLayersTaskState::Running => {
5246 0 : return Err(st.clone());
5247 : }
5248 : DownloadRemoteLayersTaskState::ShutDown
5249 0 : | DownloadRemoteLayersTaskState::Completed => {
5250 0 : *status_guard = None;
5251 0 : }
5252 : }
5253 0 : }
5254 :
5255 0 : let self_clone = Arc::clone(&self);
5256 0 : let task_id = task_mgr::spawn(
5257 0 : task_mgr::BACKGROUND_RUNTIME.handle(),
5258 0 : task_mgr::TaskKind::DownloadAllRemoteLayers,
5259 0 : self.tenant_shard_id,
5260 0 : Some(self.timeline_id),
5261 0 : "download all remote layers task",
5262 0 : async move {
5263 0 : self_clone.download_all_remote_layers(request).await;
5264 0 : let mut status_guard = self_clone.download_all_remote_layers_task_info.write().unwrap();
5265 0 : match &mut *status_guard {
5266 : None => {
5267 0 : warn!("tasks status is supposed to be Some(), since we are running");
5268 : }
5269 0 : Some(st) => {
5270 0 : let exp_task_id = format!("{}", task_mgr::current_task_id().unwrap());
5271 0 : if st.task_id != exp_task_id {
5272 0 : warn!("task id changed while we were still running, expecting {} but have {}", exp_task_id, st.task_id);
5273 0 : } else {
5274 0 : st.state = DownloadRemoteLayersTaskState::Completed;
5275 0 : }
5276 : }
5277 : };
5278 0 : Ok(())
5279 0 : }
5280 0 : .instrument(info_span!(parent: None, "download_all_remote_layers", tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))
5281 : );
5282 :
5283 0 : let initial_info = DownloadRemoteLayersTaskInfo {
5284 0 : task_id: format!("{task_id}"),
5285 0 : state: DownloadRemoteLayersTaskState::Running,
5286 0 : total_layer_count: 0,
5287 0 : successful_download_count: 0,
5288 0 : failed_download_count: 0,
5289 0 : };
5290 0 : *status_guard = Some(initial_info.clone());
5291 0 :
5292 0 : Ok(initial_info)
5293 0 : }
5294 :
5295 0 : async fn download_all_remote_layers(
5296 0 : self: &Arc<Self>,
5297 0 : request: DownloadRemoteLayersTaskSpawnRequest,
5298 0 : ) {
5299 : use pageserver_api::models::DownloadRemoteLayersTaskState;
5300 :
5301 0 : let remaining = {
5302 0 : let guard = self.layers.read().await;
5303 0 : let Ok(lm) = guard.layer_map() else {
5304 : // technically here we could look into iterating accessible layers, but downloading
5305 : // all layers of a shutdown timeline makes no sense regardless.
5306 0 : tracing::info!("attempted to download all layers of shutdown timeline");
5307 0 : return;
5308 : };
5309 0 : lm.iter_historic_layers()
5310 0 : .map(|desc| guard.get_from_desc(&desc))
5311 0 : .collect::<Vec<_>>()
5312 0 : };
5313 0 : let total_layer_count = remaining.len();
5314 :
5315 : macro_rules! lock_status {
5316 : ($st:ident) => {
5317 : let mut st = self.download_all_remote_layers_task_info.write().unwrap();
5318 : let st = st
5319 : .as_mut()
5320 : .expect("this function is only called after the task has been spawned");
5321 : assert_eq!(
5322 : st.task_id,
5323 : format!(
5324 : "{}",
5325 : task_mgr::current_task_id().expect("we run inside a task_mgr task")
5326 : )
5327 : );
5328 : let $st = st;
5329 : };
5330 : }
5331 :
5332 : {
5333 0 : lock_status!(st);
5334 0 : st.total_layer_count = total_layer_count as u64;
5335 0 : }
5336 0 :
5337 0 : let mut remaining = remaining.into_iter();
5338 0 : let mut have_remaining = true;
5339 0 : let mut js = tokio::task::JoinSet::new();
5340 0 :
5341 0 : let cancel = task_mgr::shutdown_token();
5342 0 :
5343 0 : let limit = request.max_concurrent_downloads;
5344 :
5345 : loop {
5346 0 : while js.len() < limit.get() && have_remaining && !cancel.is_cancelled() {
5347 0 : let Some(next) = remaining.next() else {
5348 0 : have_remaining = false;
5349 0 : break;
5350 : };
5351 :
5352 0 : let span = tracing::info_span!("download", layer = %next);
5353 :
5354 0 : js.spawn(
5355 0 : async move {
5356 0 : let res = next.download().await;
5357 0 : (next, res)
5358 0 : }
5359 0 : .instrument(span),
5360 0 : );
5361 0 : }
5362 :
5363 0 : while let Some(res) = js.join_next().await {
5364 0 : match res {
5365 : Ok((_, Ok(_))) => {
5366 0 : lock_status!(st);
5367 0 : st.successful_download_count += 1;
5368 : }
5369 0 : Ok((layer, Err(e))) => {
5370 0 : tracing::error!(%layer, "download failed: {e:#}");
5371 0 : lock_status!(st);
5372 0 : st.failed_download_count += 1;
5373 : }
5374 0 : Err(je) if je.is_cancelled() => unreachable!("not used here"),
5375 0 : Err(je) if je.is_panic() => {
5376 0 : lock_status!(st);
5377 0 : st.failed_download_count += 1;
5378 : }
5379 0 : Err(je) => tracing::warn!("unknown joinerror: {je:?}"),
5380 : }
5381 : }
5382 :
5383 0 : if js.is_empty() && (!have_remaining || cancel.is_cancelled()) {
5384 0 : break;
5385 0 : }
5386 : }
5387 :
5388 : {
5389 0 : lock_status!(st);
5390 0 : st.state = DownloadRemoteLayersTaskState::Completed;
5391 : }
5392 0 : }
5393 :
5394 0 : pub(crate) fn get_download_all_remote_layers_task_info(
5395 0 : &self,
5396 0 : ) -> Option<DownloadRemoteLayersTaskInfo> {
5397 0 : self.download_all_remote_layers_task_info
5398 0 : .read()
5399 0 : .unwrap()
5400 0 : .clone()
5401 0 : }
5402 : }
5403 :
5404 : impl Timeline {
5405 : /// Returns non-remote layers for eviction.
5406 0 : pub(crate) async fn get_local_layers_for_disk_usage_eviction(&self) -> DiskUsageEvictionInfo {
5407 0 : let guard = self.layers.read().await;
5408 0 : let mut max_layer_size: Option<u64> = None;
5409 0 :
5410 0 : let resident_layers = guard
5411 0 : .likely_resident_layers()
5412 0 : .map(|layer| {
5413 0 : let file_size = layer.layer_desc().file_size;
5414 0 : max_layer_size = max_layer_size.map_or(Some(file_size), |m| Some(m.max(file_size)));
5415 0 :
5416 0 : let last_activity_ts = layer.latest_activity();
5417 0 :
5418 0 : EvictionCandidate {
5419 0 : layer: layer.to_owned().into(),
5420 0 : last_activity_ts,
5421 0 : relative_last_activity: finite_f32::FiniteF32::ZERO,
5422 0 : visibility: layer.visibility(),
5423 0 : }
5424 0 : })
5425 0 : .collect();
5426 0 :
5427 0 : DiskUsageEvictionInfo {
5428 0 : max_layer_size,
5429 0 : resident_layers,
5430 0 : }
5431 0 : }
5432 :
5433 1728 : pub(crate) fn get_shard_index(&self) -> ShardIndex {
5434 1728 : ShardIndex {
5435 1728 : shard_number: self.tenant_shard_id.shard_number,
5436 1728 : shard_count: self.tenant_shard_id.shard_count,
5437 1728 : }
5438 1728 : }
5439 :
5440 : /// Persistently blocks gc for `Manual` reason.
5441 : ///
5442 : /// Returns true if no such block existed before, false otherwise.
5443 0 : pub(crate) async fn block_gc(&self, tenant: &super::Tenant) -> anyhow::Result<bool> {
5444 : use crate::tenant::remote_timeline_client::index::GcBlockingReason;
5445 0 : assert_eq!(self.tenant_shard_id, tenant.tenant_shard_id);
5446 0 : tenant.gc_block.insert(self, GcBlockingReason::Manual).await
5447 0 : }
5448 :
5449 : /// Persistently unblocks gc for `Manual` reason.
5450 0 : pub(crate) async fn unblock_gc(&self, tenant: &super::Tenant) -> anyhow::Result<()> {
5451 : use crate::tenant::remote_timeline_client::index::GcBlockingReason;
5452 0 : assert_eq!(self.tenant_shard_id, tenant.tenant_shard_id);
5453 0 : tenant.gc_block.remove(self, GcBlockingReason::Manual).await
5454 0 : }
5455 :
5456 : #[cfg(test)]
5457 40 : pub(super) fn force_advance_lsn(self: &Arc<Timeline>, new_lsn: Lsn) {
5458 40 : self.last_record_lsn.advance(new_lsn);
5459 40 : }
5460 :
5461 : #[cfg(test)]
5462 2 : pub(super) fn force_set_disk_consistent_lsn(&self, new_value: Lsn) {
5463 2 : self.disk_consistent_lsn.store(new_value);
5464 2 : }
5465 :
5466 : /// Force create an image layer and place it into the layer map.
5467 : ///
5468 : /// DO NOT use this function directly. Use [`Tenant::branch_timeline_test_with_layers`]
5469 : /// or [`Tenant::create_test_timeline_with_layers`] to ensure all these layers are
5470 : /// placed into the layer map in one run AND be validated.
5471 : #[cfg(test)]
5472 52 : pub(super) async fn force_create_image_layer(
5473 52 : self: &Arc<Timeline>,
5474 52 : lsn: Lsn,
5475 52 : mut images: Vec<(Key, Bytes)>,
5476 52 : check_start_lsn: Option<Lsn>,
5477 52 : ctx: &RequestContext,
5478 52 : ) -> anyhow::Result<()> {
5479 52 : let last_record_lsn = self.get_last_record_lsn();
5480 52 : assert!(
5481 52 : lsn <= last_record_lsn,
5482 0 : "advance last record lsn before inserting a layer, lsn={lsn}, last_record_lsn={last_record_lsn}"
5483 : );
5484 52 : if let Some(check_start_lsn) = check_start_lsn {
5485 52 : assert!(lsn >= check_start_lsn);
5486 0 : }
5487 126 : images.sort_unstable_by(|(ka, _), (kb, _)| ka.cmp(kb));
5488 52 : let min_key = *images.first().map(|(k, _)| k).unwrap();
5489 52 : let end_key = images.last().map(|(k, _)| k).unwrap().next();
5490 52 : let mut image_layer_writer = ImageLayerWriter::new(
5491 52 : self.conf,
5492 52 : self.timeline_id,
5493 52 : self.tenant_shard_id,
5494 52 : &(min_key..end_key),
5495 52 : lsn,
5496 52 : ctx,
5497 52 : )
5498 26 : .await?;
5499 230 : for (key, img) in images {
5500 178 : image_layer_writer.put_image(key, img, ctx).await?;
5501 : }
5502 104 : let (desc, path) = image_layer_writer.finish(ctx).await?;
5503 52 : let image_layer = Layer::finish_creating(self.conf, self, desc, &path)?;
5504 52 : info!("force created image layer {}", image_layer.local_path());
5505 : {
5506 52 : let mut guard = self.layers.write().await;
5507 52 : guard.open_mut().unwrap().force_insert_layer(image_layer);
5508 52 : }
5509 52 :
5510 52 : Ok(())
5511 52 : }
5512 :
5513 : /// Force create a delta layer and place it into the layer map.
5514 : ///
5515 : /// DO NOT use this function directly. Use [`Tenant::branch_timeline_test_with_layers`]
5516 : /// or [`Tenant::create_test_timeline_with_layers`] to ensure all these layers are
5517 : /// placed into the layer map in one run AND be validated.
5518 : #[cfg(test)]
5519 74 : pub(super) async fn force_create_delta_layer(
5520 74 : self: &Arc<Timeline>,
5521 74 : mut deltas: DeltaLayerTestDesc,
5522 74 : check_start_lsn: Option<Lsn>,
5523 74 : ctx: &RequestContext,
5524 74 : ) -> anyhow::Result<()> {
5525 74 : let last_record_lsn = self.get_last_record_lsn();
5526 74 : deltas
5527 74 : .data
5528 124 : .sort_unstable_by(|(ka, la, _), (kb, lb, _)| (ka, la).cmp(&(kb, lb)));
5529 74 : assert!(deltas.data.first().unwrap().0 >= deltas.key_range.start);
5530 74 : assert!(deltas.data.last().unwrap().0 < deltas.key_range.end);
5531 272 : for (_, lsn, _) in &deltas.data {
5532 198 : assert!(deltas.lsn_range.start <= *lsn && *lsn < deltas.lsn_range.end);
5533 : }
5534 74 : assert!(
5535 74 : deltas.lsn_range.end <= last_record_lsn,
5536 0 : "advance last record lsn before inserting a layer, end_lsn={}, last_record_lsn={}",
5537 : deltas.lsn_range.end,
5538 : last_record_lsn
5539 : );
5540 74 : if let Some(check_start_lsn) = check_start_lsn {
5541 74 : assert!(deltas.lsn_range.start >= check_start_lsn);
5542 0 : }
5543 74 : let mut delta_layer_writer = DeltaLayerWriter::new(
5544 74 : self.conf,
5545 74 : self.timeline_id,
5546 74 : self.tenant_shard_id,
5547 74 : deltas.key_range.start,
5548 74 : deltas.lsn_range,
5549 74 : ctx,
5550 74 : )
5551 37 : .await?;
5552 272 : for (key, lsn, val) in deltas.data {
5553 198 : delta_layer_writer.put_value(key, lsn, val, ctx).await?;
5554 : }
5555 185 : let (desc, path) = delta_layer_writer.finish(deltas.key_range.end, ctx).await?;
5556 74 : let delta_layer = Layer::finish_creating(self.conf, self, desc, &path)?;
5557 74 : info!("force created delta layer {}", delta_layer.local_path());
5558 : {
5559 74 : let mut guard = self.layers.write().await;
5560 74 : guard.open_mut().unwrap().force_insert_layer(delta_layer);
5561 74 : }
5562 74 :
5563 74 : Ok(())
5564 74 : }
5565 :
5566 : /// Return all keys at the LSN in the image layers
5567 : #[cfg(test)]
5568 6 : pub(crate) async fn inspect_image_layers(
5569 6 : self: &Arc<Timeline>,
5570 6 : lsn: Lsn,
5571 6 : ctx: &RequestContext,
5572 6 : ) -> anyhow::Result<Vec<(Key, Bytes)>> {
5573 6 : let mut all_data = Vec::new();
5574 6 : let guard = self.layers.read().await;
5575 34 : for layer in guard.layer_map()?.iter_historic_layers() {
5576 34 : if !layer.is_delta() && layer.image_layer_lsn() == lsn {
5577 8 : let layer = guard.get_from_desc(&layer);
5578 8 : let mut reconstruct_data = ValuesReconstructState::default();
5579 8 : layer
5580 8 : .get_values_reconstruct_data(
5581 8 : KeySpace::single(Key::MIN..Key::MAX),
5582 8 : lsn..Lsn(lsn.0 + 1),
5583 8 : &mut reconstruct_data,
5584 8 : ctx,
5585 8 : )
5586 13 : .await?;
5587 74 : for (k, v) in reconstruct_data.keys {
5588 66 : all_data.push((k, v?.img.unwrap().1));
5589 : }
5590 26 : }
5591 : }
5592 6 : all_data.sort();
5593 6 : Ok(all_data)
5594 6 : }
5595 :
5596 : /// Get all historic layer descriptors in the layer map
5597 : #[cfg(test)]
5598 12 : pub(crate) async fn inspect_historic_layers(
5599 12 : self: &Arc<Timeline>,
5600 12 : ) -> anyhow::Result<Vec<super::storage_layer::PersistentLayerKey>> {
5601 12 : let mut layers = Vec::new();
5602 12 : let guard = self.layers.read().await;
5603 70 : for layer in guard.layer_map()?.iter_historic_layers() {
5604 70 : layers.push(layer.key());
5605 70 : }
5606 12 : Ok(layers)
5607 12 : }
5608 :
5609 : #[cfg(test)]
5610 10 : pub(crate) fn add_extra_test_dense_keyspace(&self, ks: KeySpace) {
5611 10 : let mut keyspace = self.extra_test_dense_keyspace.load().as_ref().clone();
5612 10 : keyspace.merge(&ks);
5613 10 : self.extra_test_dense_keyspace.store(Arc::new(keyspace));
5614 10 : }
5615 : }
5616 :
5617 : /// Tracking writes ingestion does to a particular in-memory layer.
5618 : ///
5619 : /// Cleared upon freezing a layer.
5620 : pub(crate) struct TimelineWriterState {
5621 : open_layer: Arc<InMemoryLayer>,
5622 : current_size: u64,
5623 : // Previous Lsn which passed through
5624 : prev_lsn: Option<Lsn>,
5625 : // Largest Lsn which passed through the current writer
5626 : max_lsn: Option<Lsn>,
5627 : // Cached details of the last freeze. Avoids going trough the atomic/lock on every put.
5628 : cached_last_freeze_at: Lsn,
5629 : }
5630 :
5631 : impl TimelineWriterState {
5632 1266 : fn new(open_layer: Arc<InMemoryLayer>, current_size: u64, last_freeze_at: Lsn) -> Self {
5633 1266 : Self {
5634 1266 : open_layer,
5635 1266 : current_size,
5636 1266 : prev_lsn: None,
5637 1266 : max_lsn: None,
5638 1266 : cached_last_freeze_at: last_freeze_at,
5639 1266 : }
5640 1266 : }
5641 : }
5642 :
5643 : /// Various functions to mutate the timeline.
5644 : // TODO Currently, Deref is used to allow easy access to read methods from this trait.
5645 : // This is probably considered a bad practice in Rust and should be fixed eventually,
5646 : // but will cause large code changes.
5647 : pub(crate) struct TimelineWriter<'a> {
5648 : tl: &'a Timeline,
5649 : write_guard: tokio::sync::MutexGuard<'a, Option<TimelineWriterState>>,
5650 : }
5651 :
5652 : impl Deref for TimelineWriter<'_> {
5653 : type Target = Timeline;
5654 :
5655 4807180 : fn deref(&self) -> &Self::Target {
5656 4807180 : self.tl
5657 4807180 : }
5658 : }
5659 :
5660 : #[derive(PartialEq)]
5661 : enum OpenLayerAction {
5662 : Roll,
5663 : Open,
5664 : None,
5665 : }
5666 :
5667 : impl<'a> TimelineWriter<'a> {
5668 4804202 : async fn handle_open_layer_action(
5669 4804202 : &mut self,
5670 4804202 : at: Lsn,
5671 4804202 : action: OpenLayerAction,
5672 4804202 : ctx: &RequestContext,
5673 4804202 : ) -> anyhow::Result<&Arc<InMemoryLayer>> {
5674 4804202 : match action {
5675 : OpenLayerAction::Roll => {
5676 80 : let freeze_at = self.write_guard.as_ref().unwrap().max_lsn.unwrap();
5677 80 : self.roll_layer(freeze_at).await?;
5678 80 : self.open_layer(at, ctx).await?;
5679 : }
5680 1186 : OpenLayerAction::Open => self.open_layer(at, ctx).await?,
5681 : OpenLayerAction::None => {
5682 4802936 : assert!(self.write_guard.is_some());
5683 : }
5684 : }
5685 :
5686 4804202 : Ok(&self.write_guard.as_ref().unwrap().open_layer)
5687 4804202 : }
5688 :
5689 1266 : async fn open_layer(&mut self, at: Lsn, ctx: &RequestContext) -> anyhow::Result<()> {
5690 1266 : let layer = self
5691 1266 : .tl
5692 1266 : .get_layer_for_write(at, &self.write_guard, ctx)
5693 717 : .await?;
5694 1266 : let initial_size = layer.size().await?;
5695 :
5696 1266 : let last_freeze_at = self.last_freeze_at.load();
5697 1266 : self.write_guard.replace(TimelineWriterState::new(
5698 1266 : layer,
5699 1266 : initial_size,
5700 1266 : last_freeze_at,
5701 1266 : ));
5702 1266 :
5703 1266 : Ok(())
5704 1266 : }
5705 :
5706 80 : async fn roll_layer(&mut self, freeze_at: Lsn) -> Result<(), FlushLayerError> {
5707 80 : let current_size = self.write_guard.as_ref().unwrap().current_size;
5708 80 :
5709 80 : // self.write_guard will be taken by the freezing
5710 80 : self.tl
5711 80 : .freeze_inmem_layer_at(freeze_at, &mut self.write_guard)
5712 5 : .await?;
5713 :
5714 80 : assert!(self.write_guard.is_none());
5715 :
5716 80 : if current_size >= self.get_checkpoint_distance() * 2 {
5717 0 : warn!("Flushed oversized open layer with size {}", current_size)
5718 80 : }
5719 :
5720 80 : Ok(())
5721 80 : }
5722 :
5723 4804202 : fn get_open_layer_action(&self, lsn: Lsn, new_value_size: u64) -> OpenLayerAction {
5724 4804202 : let state = &*self.write_guard;
5725 4804202 : let Some(state) = &state else {
5726 1186 : return OpenLayerAction::Open;
5727 : };
5728 :
5729 : #[cfg(feature = "testing")]
5730 4803016 : if state.cached_last_freeze_at < self.tl.last_freeze_at.load() {
5731 : // this check and assertion are not really needed because
5732 : // LayerManager::try_freeze_in_memory_layer will always clear out the
5733 : // TimelineWriterState if something is frozen. however, we can advance last_freeze_at when there
5734 : // is no TimelineWriterState.
5735 0 : assert!(
5736 0 : state.open_layer.end_lsn.get().is_some(),
5737 0 : "our open_layer must be outdated"
5738 : );
5739 :
5740 : // this would be a memory leak waiting to happen because the in-memory layer always has
5741 : // an index
5742 0 : panic!("BUG: TimelineWriterState held on to frozen in-memory layer.");
5743 4803016 : }
5744 4803016 :
5745 4803016 : if state.prev_lsn == Some(lsn) {
5746 : // Rolling mid LSN is not supported by [downstream code].
5747 : // Hence, only roll at LSN boundaries.
5748 : //
5749 : // [downstream code]: https://github.com/neondatabase/neon/pull/7993#discussion_r1633345422
5750 6 : return OpenLayerAction::None;
5751 4803010 : }
5752 4803010 :
5753 4803010 : if state.current_size == 0 {
5754 : // Don't roll empty layers
5755 0 : return OpenLayerAction::None;
5756 4803010 : }
5757 4803010 :
5758 4803010 : if self.tl.should_roll(
5759 4803010 : state.current_size,
5760 4803010 : state.current_size + new_value_size,
5761 4803010 : self.get_checkpoint_distance(),
5762 4803010 : lsn,
5763 4803010 : state.cached_last_freeze_at,
5764 4803010 : state.open_layer.get_opened_at(),
5765 4803010 : ) {
5766 80 : OpenLayerAction::Roll
5767 : } else {
5768 4802930 : OpenLayerAction::None
5769 : }
5770 4804202 : }
5771 :
5772 : /// Put a batch of keys at the specified Lsns.
5773 4804200 : pub(crate) async fn put_batch(
5774 4804200 : &mut self,
5775 4804200 : batch: SerializedValueBatch,
5776 4804200 : ctx: &RequestContext,
5777 4804200 : ) -> anyhow::Result<()> {
5778 4804200 : if batch.is_empty() {
5779 0 : return Ok(());
5780 4804200 : }
5781 4804200 :
5782 4804200 : let batch_max_lsn = batch.max_lsn;
5783 4804200 : let buf_size: u64 = batch.buffer_size() as u64;
5784 4804200 :
5785 4804200 : let action = self.get_open_layer_action(batch_max_lsn, buf_size);
5786 4804200 : let layer = self
5787 4804200 : .handle_open_layer_action(batch_max_lsn, action, ctx)
5788 722 : .await?;
5789 :
5790 4804200 : let res = layer.put_batch(batch, ctx).await;
5791 :
5792 4804200 : if res.is_ok() {
5793 4804200 : // Update the current size only when the entire write was ok.
5794 4804200 : // In case of failures, we may have had partial writes which
5795 4804200 : // render the size tracking out of sync. That's ok because
5796 4804200 : // the checkpoint distance should be significantly smaller
5797 4804200 : // than the S3 single shot upload limit of 5GiB.
5798 4804200 : let state = self.write_guard.as_mut().unwrap();
5799 4804200 :
5800 4804200 : state.current_size += buf_size;
5801 4804200 : state.prev_lsn = Some(batch_max_lsn);
5802 4804200 : state.max_lsn = std::cmp::max(state.max_lsn, Some(batch_max_lsn));
5803 4804200 : }
5804 :
5805 4804200 : res
5806 4804200 : }
5807 :
5808 : #[cfg(test)]
5809 : /// Test helper, for tests that would like to poke individual values without composing a batch
5810 4390154 : pub(crate) async fn put(
5811 4390154 : &mut self,
5812 4390154 : key: Key,
5813 4390154 : lsn: Lsn,
5814 4390154 : value: &Value,
5815 4390154 : ctx: &RequestContext,
5816 4390154 : ) -> anyhow::Result<()> {
5817 : use utils::bin_ser::BeSer;
5818 4390154 : if !key.is_valid_key_on_write_path() {
5819 0 : bail!(
5820 0 : "the request contains data not supported by pageserver at TimelineWriter::put: {}",
5821 0 : key
5822 0 : );
5823 4390154 : }
5824 4390154 : let val_ser_size = value.serialized_size().unwrap() as usize;
5825 4390154 : let batch = SerializedValueBatch::from_values(vec![(
5826 4390154 : key.to_compact(),
5827 4390154 : lsn,
5828 4390154 : val_ser_size,
5829 4390154 : value.clone(),
5830 4390154 : )]);
5831 4390154 :
5832 4390154 : self.put_batch(batch, ctx).await
5833 4390154 : }
5834 :
5835 2 : pub(crate) async fn delete_batch(
5836 2 : &mut self,
5837 2 : batch: &[(Range<Key>, Lsn)],
5838 2 : ctx: &RequestContext,
5839 2 : ) -> anyhow::Result<()> {
5840 2 : if let Some((_, lsn)) = batch.first() {
5841 2 : let action = self.get_open_layer_action(*lsn, 0);
5842 2 : let layer = self.handle_open_layer_action(*lsn, action, ctx).await?;
5843 2 : layer.put_tombstones(batch).await?;
5844 0 : }
5845 :
5846 2 : Ok(())
5847 2 : }
5848 :
5849 : /// Track the end of the latest digested WAL record.
5850 : /// Remember the (end of) last valid WAL record remembered in the timeline.
5851 : ///
5852 : /// Call this after you have finished writing all the WAL up to 'lsn'.
5853 : ///
5854 : /// 'lsn' must be aligned. This wakes up any wait_lsn() callers waiting for
5855 : /// the 'lsn' or anything older. The previous last record LSN is stored alongside
5856 : /// the latest and can be read.
5857 5279062 : pub(crate) fn finish_write(&self, new_lsn: Lsn) {
5858 5279062 : self.tl.finish_write(new_lsn);
5859 5279062 : }
5860 :
5861 270570 : pub(crate) fn update_current_logical_size(&self, delta: i64) {
5862 270570 : self.tl.update_current_logical_size(delta)
5863 270570 : }
5864 : }
5865 :
5866 : // We need TimelineWriter to be send in upcoming conversion of
5867 : // Timeline::layers to tokio::sync::RwLock.
5868 : #[test]
5869 2 : fn is_send() {
5870 2 : fn _assert_send<T: Send>() {}
5871 2 : _assert_send::<TimelineWriter<'_>>();
5872 2 : }
5873 :
5874 : #[cfg(test)]
5875 : mod tests {
5876 : use pageserver_api::key::Key;
5877 : use pageserver_api::value::Value;
5878 : use utils::{id::TimelineId, lsn::Lsn};
5879 :
5880 : use crate::tenant::{
5881 : harness::{test_img, TenantHarness},
5882 : layer_map::LayerMap,
5883 : storage_layer::{Layer, LayerName},
5884 : timeline::{DeltaLayerTestDesc, EvictionError},
5885 : Timeline,
5886 : };
5887 :
5888 : #[tokio::test]
5889 2 : async fn test_heatmap_generation() {
5890 2 : let harness = TenantHarness::create("heatmap_generation").await.unwrap();
5891 2 :
5892 2 : let covered_delta = DeltaLayerTestDesc::new_with_inferred_key_range(
5893 2 : Lsn(0x10)..Lsn(0x20),
5894 2 : vec![(
5895 2 : Key::from_hex("620000000033333333444444445500000000").unwrap(),
5896 2 : Lsn(0x11),
5897 2 : Value::Image(test_img("foo")),
5898 2 : )],
5899 2 : );
5900 2 : let visible_delta = DeltaLayerTestDesc::new_with_inferred_key_range(
5901 2 : Lsn(0x10)..Lsn(0x20),
5902 2 : vec![(
5903 2 : Key::from_hex("720000000033333333444444445500000000").unwrap(),
5904 2 : Lsn(0x11),
5905 2 : Value::Image(test_img("foo")),
5906 2 : )],
5907 2 : );
5908 2 : let l0_delta = DeltaLayerTestDesc::new(
5909 2 : Lsn(0x20)..Lsn(0x30),
5910 2 : Key::from_hex("000000000000000000000000000000000000").unwrap()
5911 2 : ..Key::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(),
5912 2 : vec![(
5913 2 : Key::from_hex("720000000033333333444444445500000000").unwrap(),
5914 2 : Lsn(0x25),
5915 2 : Value::Image(test_img("foo")),
5916 2 : )],
5917 2 : );
5918 2 : let delta_layers = vec![
5919 2 : covered_delta.clone(),
5920 2 : visible_delta.clone(),
5921 2 : l0_delta.clone(),
5922 2 : ];
5923 2 :
5924 2 : let image_layer = (
5925 2 : Lsn(0x40),
5926 2 : vec![(
5927 2 : Key::from_hex("620000000033333333444444445500000000").unwrap(),
5928 2 : test_img("bar"),
5929 2 : )],
5930 2 : );
5931 2 : let image_layers = vec![image_layer];
5932 2 :
5933 20 : let (tenant, ctx) = harness.load().await;
5934 2 : let timeline = tenant
5935 2 : .create_test_timeline_with_layers(
5936 2 : TimelineId::generate(),
5937 2 : Lsn(0x10),
5938 2 : 14,
5939 2 : &ctx,
5940 2 : delta_layers,
5941 2 : image_layers,
5942 2 : Lsn(0x100),
5943 2 : )
5944 31 : .await
5945 2 : .unwrap();
5946 2 :
5947 2 : // Layer visibility is an input to heatmap generation, so refresh it first
5948 2 : timeline.update_layer_visibility().await.unwrap();
5949 2 :
5950 2 : let heatmap = timeline
5951 2 : .generate_heatmap()
5952 2 : .await
5953 2 : .expect("Infallible while timeline is not shut down");
5954 2 :
5955 2 : assert_eq!(heatmap.timeline_id, timeline.timeline_id);
5956 2 :
5957 2 : // L0 should come last
5958 2 : assert_eq!(heatmap.layers.last().unwrap().name, l0_delta.layer_name());
5959 2 :
5960 2 : let mut last_lsn = Lsn::MAX;
5961 10 : for layer in heatmap.layers {
5962 2 : // Covered layer should be omitted
5963 8 : assert!(layer.name != covered_delta.layer_name());
5964 2 :
5965 8 : let layer_lsn = match &layer.name {
5966 4 : LayerName::Delta(d) => d.lsn_range.end,
5967 4 : LayerName::Image(i) => i.lsn,
5968 2 : };
5969 2 :
5970 2 : // Apart from L0s, newest Layers should come first
5971 8 : if !LayerMap::is_l0(layer.name.key_range(), layer.name.is_delta()) {
5972 6 : assert!(layer_lsn <= last_lsn);
5973 6 : last_lsn = layer_lsn;
5974 2 : }
5975 2 : }
5976 2 : }
5977 :
5978 : #[tokio::test]
5979 2 : async fn two_layer_eviction_attempts_at_the_same_time() {
5980 2 : let harness = TenantHarness::create("two_layer_eviction_attempts_at_the_same_time")
5981 2 : .await
5982 2 : .unwrap();
5983 2 :
5984 20 : let (tenant, ctx) = harness.load().await;
5985 2 : let timeline = tenant
5986 2 : .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx)
5987 6 : .await
5988 2 : .unwrap();
5989 2 :
5990 2 : let layer = find_some_layer(&timeline).await;
5991 2 : let layer = layer
5992 2 : .keep_resident()
5993 2 : .await
5994 2 : .expect("no download => no downloading errors")
5995 2 : .drop_eviction_guard();
5996 2 :
5997 2 : let forever = std::time::Duration::from_secs(120);
5998 2 :
5999 2 : let first = layer.evict_and_wait(forever);
6000 2 : let second = layer.evict_and_wait(forever);
6001 2 :
6002 2 : let (first, second) = tokio::join!(first, second);
6003 2 :
6004 2 : let res = layer.keep_resident().await;
6005 2 : assert!(res.is_none(), "{res:?}");
6006 2 :
6007 2 : match (first, second) {
6008 2 : (Ok(()), Ok(())) => {
6009 2 : // because there are no more timeline locks being taken on eviction path, we can
6010 2 : // witness all three outcomes here.
6011 2 : }
6012 2 : (Ok(()), Err(EvictionError::NotFound)) | (Err(EvictionError::NotFound), Ok(())) => {
6013 0 : // if one completes before the other, this is fine just as well.
6014 0 : }
6015 2 : other => unreachable!("unexpected {:?}", other),
6016 2 : }
6017 2 : }
6018 :
6019 2 : async fn find_some_layer(timeline: &Timeline) -> Layer {
6020 2 : let layers = timeline.layers.read().await;
6021 2 : let desc = layers
6022 2 : .layer_map()
6023 2 : .unwrap()
6024 2 : .iter_historic_layers()
6025 2 : .next()
6026 2 : .expect("must find one layer to evict");
6027 2 :
6028 2 : layers.get_from_desc(&desc)
6029 2 : }
6030 : }
|