Line data Source code
1 : pub(crate) mod analysis;
2 : pub(crate) mod compaction;
3 : pub mod delete;
4 : pub(crate) mod detach_ancestor;
5 : mod eviction_task;
6 : pub(crate) mod handle;
7 : mod init;
8 : pub mod layer_manager;
9 : pub(crate) mod logical_size;
10 : pub mod span;
11 : pub mod uninit;
12 : mod walreceiver;
13 :
14 : use anyhow::{anyhow, bail, ensure, Context, Result};
15 : use arc_swap::ArcSwap;
16 : use bytes::Bytes;
17 : use camino::Utf8Path;
18 : use chrono::{DateTime, Utc};
19 : use enumset::EnumSet;
20 : use fail::fail_point;
21 : use handle::ShardTimelineId;
22 : use once_cell::sync::Lazy;
23 : use pageserver_api::{
24 : key::{
25 : CompactKey, KEY_SIZE, METADATA_KEY_BEGIN_PREFIX, METADATA_KEY_END_PREFIX,
26 : NON_INHERITED_RANGE, NON_INHERITED_SPARSE_RANGE,
27 : },
28 : keyspace::{KeySpaceAccum, KeySpaceRandomAccum, SparseKeyPartitioning},
29 : models::{
30 : AtomicAuxFilePolicy, AuxFilePolicy, CompactionAlgorithm, CompactionAlgorithmSettings,
31 : DownloadRemoteLayersTaskInfo, DownloadRemoteLayersTaskSpawnRequest, EvictionPolicy,
32 : InMemoryLayerInfo, LayerMapInfo, LsnLease, TimelineState,
33 : },
34 : reltag::BlockNumber,
35 : shard::{ShardIdentity, ShardNumber, TenantShardId},
36 : };
37 : use rand::Rng;
38 : use serde_with::serde_as;
39 : use storage_broker::BrokerClientChannel;
40 : use tokio::{
41 : runtime::Handle,
42 : sync::{oneshot, watch},
43 : };
44 : use tokio_util::sync::CancellationToken;
45 : use tracing::*;
46 : use utils::{
47 : fs_ext, pausable_failpoint,
48 : sync::gate::{Gate, GateGuard},
49 : };
50 :
51 : use std::pin::pin;
52 : use std::sync::atomic::Ordering as AtomicOrdering;
53 : use std::sync::{Arc, Mutex, RwLock, Weak};
54 : use std::time::{Duration, Instant, SystemTime};
55 : use std::{
56 : array,
57 : collections::{BTreeMap, HashMap, HashSet},
58 : sync::atomic::AtomicU64,
59 : };
60 : use std::{cmp::min, ops::ControlFlow};
61 : use std::{
62 : collections::btree_map::Entry,
63 : ops::{Deref, Range},
64 : };
65 :
66 : use crate::{
67 : aux_file::AuxFileSizeEstimator,
68 : tenant::{
69 : layer_map::{LayerMap, SearchResult},
70 : metadata::TimelineMetadata,
71 : storage_layer::{inmemory_layer::IndexEntry, PersistentLayerDesc},
72 : },
73 : walredo,
74 : };
75 : use crate::{
76 : context::{DownloadBehavior, RequestContext},
77 : disk_usage_eviction_task::DiskUsageEvictionInfo,
78 : pgdatadir_mapping::CollectKeySpaceError,
79 : };
80 : use crate::{
81 : disk_usage_eviction_task::finite_f32,
82 : tenant::storage_layer::{
83 : AsLayerDesc, DeltaLayerWriter, EvictionError, ImageLayerWriter, InMemoryLayer, Layer,
84 : LayerAccessStatsReset, LayerName, ResidentLayer, ValueReconstructState,
85 : ValuesReconstructState,
86 : },
87 : };
88 : use crate::{
89 : disk_usage_eviction_task::EvictionCandidate, tenant::storage_layer::delta_layer::DeltaEntry,
90 : };
91 : use crate::{
92 : l0_flush::{self, L0FlushGlobalState},
93 : metrics::GetKind,
94 : };
95 : use crate::{
96 : metrics::ScanLatencyOngoingRecording, tenant::timeline::logical_size::CurrentLogicalSize,
97 : };
98 : use crate::{pgdatadir_mapping::LsnForTimestamp, tenant::tasks::BackgroundLoopKind};
99 : use crate::{pgdatadir_mapping::MAX_AUX_FILE_V2_DELTAS, tenant::storage_layer::PersistentLayerKey};
100 : use crate::{
101 : pgdatadir_mapping::{AuxFilesDirectory, DirectoryKind},
102 : virtual_file::{MaybeFatalIo, VirtualFile},
103 : };
104 : use pageserver_api::config::tenant_conf_defaults::DEFAULT_PITR_INTERVAL;
105 :
106 : use crate::config::PageServerConf;
107 : use crate::keyspace::{KeyPartitioning, KeySpace};
108 : use crate::metrics::TimelineMetrics;
109 : use crate::pgdatadir_mapping::CalculateLogicalSizeError;
110 : use crate::tenant::config::TenantConfOpt;
111 : use pageserver_api::reltag::RelTag;
112 : use pageserver_api::shard::ShardIndex;
113 :
114 : use postgres_connection::PgConnectionConfig;
115 : use postgres_ffi::to_pg_timestamp;
116 : use utils::{
117 : completion,
118 : generation::Generation,
119 : id::TimelineId,
120 : lsn::{AtomicLsn, Lsn, RecordLsn},
121 : seqwait::SeqWait,
122 : simple_rcu::{Rcu, RcuReadGuard},
123 : };
124 :
125 : use crate::repository::GcResult;
126 : use crate::repository::{Key, Value};
127 : use crate::task_mgr;
128 : use crate::task_mgr::TaskKind;
129 : use crate::ZERO_PAGE;
130 :
131 : use self::delete::DeleteTimelineFlow;
132 : pub(super) use self::eviction_task::EvictionTaskTenantState;
133 : use self::eviction_task::EvictionTaskTimelineState;
134 : use self::layer_manager::LayerManager;
135 : use self::logical_size::LogicalSize;
136 : use self::walreceiver::{WalReceiver, WalReceiverConf};
137 :
138 : use super::{
139 : config::TenantConf, storage_layer::inmemory_layer, storage_layer::LayerVisibilityHint,
140 : upload_queue::NotInitialized,
141 : };
142 : use super::{debug_assert_current_span_has_tenant_and_timeline_id, AttachedTenantConf};
143 : use super::{remote_timeline_client::index::IndexPart, storage_layer::LayerFringe};
144 : use super::{
145 : remote_timeline_client::RemoteTimelineClient, remote_timeline_client::WaitCompletionError,
146 : storage_layer::ReadableLayer,
147 : };
148 : use super::{
149 : secondary::heatmap::{HeatMapLayer, HeatMapTimeline},
150 : GcError,
151 : };
152 :
153 : #[derive(Debug, PartialEq, Eq, Clone, Copy)]
154 : pub(crate) enum FlushLoopState {
155 : NotStarted,
156 : Running {
157 : #[cfg(test)]
158 : expect_initdb_optimization: bool,
159 : #[cfg(test)]
160 : initdb_optimization_count: usize,
161 : },
162 : Exited,
163 : }
164 :
165 : #[derive(Debug, Copy, Clone, PartialEq, Eq)]
166 : pub enum ImageLayerCreationMode {
167 : /// Try to create image layers based on `time_for_new_image_layer`. Used in compaction code path.
168 : Try,
169 : /// Force creating the image layers if possible. For now, no image layers will be created
170 : /// for metadata keys. Used in compaction code path with force flag enabled.
171 : Force,
172 : /// Initial ingestion of the data, and no data should be dropped in this function. This
173 : /// means that no metadata keys should be included in the partitions. Used in flush frozen layer
174 : /// code path.
175 : Initial,
176 : }
177 :
178 : impl std::fmt::Display for ImageLayerCreationMode {
179 2136 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
180 2136 : write!(f, "{:?}", self)
181 2136 : }
182 : }
183 :
184 : /// Temporary function for immutable storage state refactor, ensures we are dropping mutex guard instead of other things.
185 : /// Can be removed after all refactors are done.
186 84 : fn drop_rlock<T>(rlock: tokio::sync::RwLockReadGuard<T>) {
187 84 : drop(rlock)
188 84 : }
189 :
190 : /// Temporary function for immutable storage state refactor, ensures we are dropping mutex guard instead of other things.
191 : /// Can be removed after all refactors are done.
192 2220 : fn drop_wlock<T>(rlock: tokio::sync::RwLockWriteGuard<'_, T>) {
193 2220 : drop(rlock)
194 2220 : }
195 :
196 : /// The outward-facing resources required to build a Timeline
197 : pub struct TimelineResources {
198 : pub remote_client: RemoteTimelineClient,
199 : pub timeline_get_throttle: Arc<
200 : crate::tenant::throttle::Throttle<&'static crate::metrics::tenant_throttling::TimelineGet>,
201 : >,
202 : pub l0_flush_global_state: l0_flush::L0FlushGlobalState,
203 : }
204 :
205 : pub(crate) struct AuxFilesState {
206 : pub(crate) dir: Option<AuxFilesDirectory>,
207 : pub(crate) n_deltas: usize,
208 : }
209 :
210 : /// The relation size cache caches relation sizes at the end of the timeline. It speeds up WAL
211 : /// ingestion considerably, because WAL ingestion needs to check on most records if the record
212 : /// implicitly extends the relation. At startup, `complete_as_of` is initialized to the current end
213 : /// of the timeline (disk_consistent_lsn). It's used on reads of relation sizes to check if the
214 : /// value can be used to also update the cache, see [`Timeline::update_cached_rel_size`].
215 : pub(crate) struct RelSizeCache {
216 : pub(crate) complete_as_of: Lsn,
217 : pub(crate) map: HashMap<RelTag, (Lsn, BlockNumber)>,
218 : }
219 :
220 : pub struct Timeline {
221 : pub(crate) conf: &'static PageServerConf,
222 : tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
223 :
224 : myself: Weak<Self>,
225 :
226 : pub(crate) tenant_shard_id: TenantShardId,
227 : pub timeline_id: TimelineId,
228 :
229 : /// The generation of the tenant that instantiated us: this is used for safety when writing remote objects.
230 : /// Never changes for the lifetime of this [`Timeline`] object.
231 : ///
232 : /// This duplicates the generation stored in LocationConf, but that structure is mutable:
233 : /// this copy enforces the invariant that generatio doesn't change during a Tenant's lifetime.
234 : pub(crate) generation: Generation,
235 :
236 : /// The detailed sharding information from our parent Tenant. This enables us to map keys
237 : /// to shards, and is constant through the lifetime of this Timeline.
238 : shard_identity: ShardIdentity,
239 :
240 : pub pg_version: u32,
241 :
242 : /// The tuple has two elements.
243 : /// 1. `LayerFileManager` keeps track of the various physical representations of the layer files (inmem, local, remote).
244 : /// 2. `LayerMap`, the acceleration data structure for `get_reconstruct_data`.
245 : ///
246 : /// `LayerMap` maps out the `(PAGE,LSN) / (KEY,LSN)` space, which is composed of `(KeyRange, LsnRange)` rectangles.
247 : /// We describe these rectangles through the `PersistentLayerDesc` struct.
248 : ///
249 : /// When we want to reconstruct a page, we first find the `PersistentLayerDesc`'s that we need for page reconstruction,
250 : /// using `LayerMap`. Then, we use `LayerFileManager` to get the `PersistentLayer`'s that correspond to the
251 : /// `PersistentLayerDesc`'s.
252 : ///
253 : /// Hence, it's important to keep things coherent. The `LayerFileManager` must always have an entry for all
254 : /// `PersistentLayerDesc`'s in the `LayerMap`. If it doesn't, `LayerFileManager::get_from_desc` will panic at
255 : /// runtime, e.g., during page reconstruction.
256 : ///
257 : /// In the future, we'll be able to split up the tuple of LayerMap and `LayerFileManager`,
258 : /// so that e.g. on-demand-download/eviction, and layer spreading, can operate just on `LayerFileManager`.
259 : pub(crate) layers: tokio::sync::RwLock<LayerManager>,
260 :
261 : last_freeze_at: AtomicLsn,
262 : // Atomic would be more appropriate here.
263 : last_freeze_ts: RwLock<Instant>,
264 :
265 : pub(crate) standby_horizon: AtomicLsn,
266 :
267 : // WAL redo manager. `None` only for broken tenants.
268 : walredo_mgr: Option<Arc<super::WalRedoManager>>,
269 :
270 : /// Remote storage client.
271 : /// See [`remote_timeline_client`](super::remote_timeline_client) module comment for details.
272 : pub remote_client: Arc<RemoteTimelineClient>,
273 :
274 : // What page versions do we hold in the repository? If we get a
275 : // request > last_record_lsn, we need to wait until we receive all
276 : // the WAL up to the request. The SeqWait provides functions for
277 : // that. TODO: If we get a request for an old LSN, such that the
278 : // versions have already been garbage collected away, we should
279 : // throw an error, but we don't track that currently.
280 : //
281 : // last_record_lsn.load().last points to the end of last processed WAL record.
282 : //
283 : // We also remember the starting point of the previous record in
284 : // 'last_record_lsn.load().prev'. It's used to set the xl_prev pointer of the
285 : // first WAL record when the node is started up. But here, we just
286 : // keep track of it.
287 : last_record_lsn: SeqWait<RecordLsn, Lsn>,
288 :
289 : // All WAL records have been processed and stored durably on files on
290 : // local disk, up to this LSN. On crash and restart, we need to re-process
291 : // the WAL starting from this point.
292 : //
293 : // Some later WAL records might have been processed and also flushed to disk
294 : // already, so don't be surprised to see some, but there's no guarantee on
295 : // them yet.
296 : disk_consistent_lsn: AtomicLsn,
297 :
298 : // Parent timeline that this timeline was branched from, and the LSN
299 : // of the branch point.
300 : ancestor_timeline: Option<Arc<Timeline>>,
301 : ancestor_lsn: Lsn,
302 :
303 : pub(super) metrics: TimelineMetrics,
304 :
305 : // `Timeline` doesn't write these metrics itself, but it manages the lifetime. Code
306 : // in `crate::page_service` writes these metrics.
307 : pub(crate) query_metrics: crate::metrics::SmgrQueryTimePerTimeline,
308 :
309 : directory_metrics: [AtomicU64; DirectoryKind::KINDS_NUM],
310 :
311 : /// Ensures layers aren't frozen by checkpointer between
312 : /// [`Timeline::get_layer_for_write`] and layer reads.
313 : /// Locked automatically by [`TimelineWriter`] and checkpointer.
314 : /// Must always be acquired before the layer map/individual layer lock
315 : /// to avoid deadlock.
316 : ///
317 : /// The state is cleared upon freezing.
318 : write_lock: tokio::sync::Mutex<Option<TimelineWriterState>>,
319 :
320 : /// Used to avoid multiple `flush_loop` tasks running
321 : pub(super) flush_loop_state: Mutex<FlushLoopState>,
322 :
323 : /// layer_flush_start_tx can be used to wake up the layer-flushing task.
324 : /// - The u64 value is a counter, incremented every time a new flush cycle is requested.
325 : /// The flush cycle counter is sent back on the layer_flush_done channel when
326 : /// the flush finishes. You can use that to wait for the flush to finish.
327 : /// - The LSN is updated to max() of its current value and the latest disk_consistent_lsn
328 : /// read by whoever sends an update
329 : layer_flush_start_tx: tokio::sync::watch::Sender<(u64, Lsn)>,
330 : /// to be notified when layer flushing has finished, subscribe to the layer_flush_done channel
331 : layer_flush_done_tx: tokio::sync::watch::Sender<(u64, Result<(), FlushLayerError>)>,
332 :
333 : // Needed to ensure that we can't create a branch at a point that was already garbage collected
334 : pub latest_gc_cutoff_lsn: Rcu<Lsn>,
335 :
336 : // List of child timelines and their branch points. This is needed to avoid
337 : // garbage collecting data that is still needed by the child timelines.
338 : pub(crate) gc_info: std::sync::RwLock<GcInfo>,
339 :
340 : // It may change across major versions so for simplicity
341 : // keep it after running initdb for a timeline.
342 : // It is needed in checks when we want to error on some operations
343 : // when they are requested for pre-initdb lsn.
344 : // It can be unified with latest_gc_cutoff_lsn under some "first_valid_lsn",
345 : // though let's keep them both for better error visibility.
346 : pub initdb_lsn: Lsn,
347 :
348 : /// When did we last calculate the partitioning? Make it pub to test cases.
349 : pub(super) partitioning: tokio::sync::Mutex<((KeyPartitioning, SparseKeyPartitioning), Lsn)>,
350 :
351 : /// Configuration: how often should the partitioning be recalculated.
352 : repartition_threshold: u64,
353 :
354 : last_image_layer_creation_check_at: AtomicLsn,
355 : last_image_layer_creation_check_instant: std::sync::Mutex<Option<Instant>>,
356 :
357 : /// Current logical size of the "datadir", at the last LSN.
358 : current_logical_size: LogicalSize,
359 :
360 : /// Information about the last processed message by the WAL receiver,
361 : /// or None if WAL receiver has not received anything for this timeline
362 : /// yet.
363 : pub last_received_wal: Mutex<Option<WalReceiverInfo>>,
364 : pub walreceiver: Mutex<Option<WalReceiver>>,
365 :
366 : /// Relation size cache
367 : pub(crate) rel_size_cache: RwLock<RelSizeCache>,
368 :
369 : download_all_remote_layers_task_info: RwLock<Option<DownloadRemoteLayersTaskInfo>>,
370 :
371 : state: watch::Sender<TimelineState>,
372 :
373 : /// Prevent two tasks from deleting the timeline at the same time. If held, the
374 : /// timeline is being deleted. If 'true', the timeline has already been deleted.
375 : pub delete_progress: Arc<tokio::sync::Mutex<DeleteTimelineFlow>>,
376 :
377 : eviction_task_timeline_state: tokio::sync::Mutex<EvictionTaskTimelineState>,
378 :
379 : /// Load or creation time information about the disk_consistent_lsn and when the loading
380 : /// happened. Used for consumption metrics.
381 : pub(crate) loaded_at: (Lsn, SystemTime),
382 :
383 : /// Gate to prevent shutdown completing while I/O is still happening to this timeline's data
384 : pub(crate) gate: Gate,
385 :
386 : /// Cancellation token scoped to this timeline: anything doing long-running work relating
387 : /// to the timeline should drop out when this token fires.
388 : pub(crate) cancel: CancellationToken,
389 :
390 : /// Make sure we only have one running compaction at a time in tests.
391 : ///
392 : /// Must only be taken in two places:
393 : /// - [`Timeline::compact`] (this file)
394 : /// - [`delete::delete_local_timeline_directory`]
395 : ///
396 : /// Timeline deletion will acquire both compaction and gc locks in whatever order.
397 : compaction_lock: tokio::sync::Mutex<()>,
398 :
399 : /// Make sure we only have one running gc at a time.
400 : ///
401 : /// Must only be taken in two places:
402 : /// - [`Timeline::gc`] (this file)
403 : /// - [`delete::delete_local_timeline_directory`]
404 : ///
405 : /// Timeline deletion will acquire both compaction and gc locks in whatever order.
406 : gc_lock: tokio::sync::Mutex<()>,
407 :
408 : /// Cloned from [`super::Tenant::timeline_get_throttle`] on construction.
409 : timeline_get_throttle: Arc<
410 : crate::tenant::throttle::Throttle<&'static crate::metrics::tenant_throttling::TimelineGet>,
411 : >,
412 :
413 : /// Keep aux directory cache to avoid it's reconstruction on each update
414 : pub(crate) aux_files: tokio::sync::Mutex<AuxFilesState>,
415 :
416 : /// Size estimator for aux file v2
417 : pub(crate) aux_file_size_estimator: AuxFileSizeEstimator,
418 :
419 : /// Indicate whether aux file v2 storage is enabled.
420 : pub(crate) last_aux_file_policy: AtomicAuxFilePolicy,
421 :
422 : /// Some test cases directly place keys into the timeline without actually modifying the directory
423 : /// keys (i.e., DB_DIR). The test cases creating such keys will put the keyspaces here, so that
424 : /// these keys won't get garbage-collected during compaction/GC. This field only modifies the dense
425 : /// keyspace return value of `collect_keyspace`. For sparse keyspaces, use AUX keys for testing, and
426 : /// in the future, add `extra_test_sparse_keyspace` if necessary.
427 : #[cfg(test)]
428 : pub(crate) extra_test_dense_keyspace: ArcSwap<KeySpace>,
429 :
430 : pub(crate) l0_flush_global_state: L0FlushGlobalState,
431 :
432 : pub(crate) handles: handle::PerTimelineState<crate::page_service::TenantManagerTypes>,
433 : }
434 :
435 : pub struct WalReceiverInfo {
436 : pub wal_source_connconf: PgConnectionConfig,
437 : pub last_received_msg_lsn: Lsn,
438 : pub last_received_msg_ts: u128,
439 : }
440 :
441 : /// Information about how much history needs to be retained, needed by
442 : /// Garbage Collection.
443 : #[derive(Default)]
444 : pub(crate) struct GcInfo {
445 : /// Specific LSNs that are needed.
446 : ///
447 : /// Currently, this includes all points where child branches have
448 : /// been forked off from. In the future, could also include
449 : /// explicit user-defined snapshot points.
450 : pub(crate) retain_lsns: Vec<(Lsn, TimelineId)>,
451 :
452 : /// The cutoff coordinates, which are combined by selecting the minimum.
453 : pub(crate) cutoffs: GcCutoffs,
454 :
455 : /// Leases granted to particular LSNs.
456 : pub(crate) leases: BTreeMap<Lsn, LsnLease>,
457 :
458 : /// Whether our branch point is within our ancestor's PITR interval (for cost estimation)
459 : pub(crate) within_ancestor_pitr: bool,
460 : }
461 :
462 : impl GcInfo {
463 678 : pub(crate) fn min_cutoff(&self) -> Lsn {
464 678 : self.cutoffs.select_min()
465 678 : }
466 :
467 684 : pub(super) fn insert_child(&mut self, child_id: TimelineId, child_lsn: Lsn) {
468 684 : self.retain_lsns.push((child_lsn, child_id));
469 684 : self.retain_lsns.sort_by_key(|i| i.0);
470 684 : }
471 :
472 6 : pub(super) fn remove_child(&mut self, child_id: TimelineId) {
473 6 : self.retain_lsns.retain(|i| i.1 != child_id);
474 6 : }
475 : }
476 :
477 : /// The `GcInfo` component describing which Lsns need to be retained. Functionally, this
478 : /// is a single number (the oldest LSN which we must retain), but it internally distinguishes
479 : /// between time-based and space-based retention for observability and consumption metrics purposes.
480 : #[derive(Debug, Clone)]
481 : pub(crate) struct GcCutoffs {
482 : /// Calculated from the [`TenantConf::gc_horizon`], this LSN indicates how much
483 : /// history we must keep to retain a specified number of bytes of WAL.
484 : pub(crate) space: Lsn,
485 :
486 : /// Calculated from [`TenantConf::pitr_interval`], this LSN indicates how much
487 : /// history we must keep to enable reading back at least the PITR interval duration.
488 : pub(crate) time: Lsn,
489 : }
490 :
491 : impl Default for GcCutoffs {
492 1242 : fn default() -> Self {
493 1242 : Self {
494 1242 : space: Lsn::INVALID,
495 1242 : time: Lsn::INVALID,
496 1242 : }
497 1242 : }
498 : }
499 :
500 : impl GcCutoffs {
501 756 : fn select_min(&self) -> Lsn {
502 756 : std::cmp::min(self.space, self.time)
503 756 : }
504 : }
505 :
506 : pub(crate) struct TimelineVisitOutcome {
507 : completed_keyspace: KeySpace,
508 : image_covered_keyspace: KeySpace,
509 : }
510 :
511 : /// An error happened in a get() operation.
512 6 : #[derive(thiserror::Error, Debug)]
513 : pub(crate) enum PageReconstructError {
514 : #[error(transparent)]
515 : Other(anyhow::Error),
516 :
517 : #[error("Ancestor LSN wait error: {0}")]
518 : AncestorLsnTimeout(WaitLsnError),
519 :
520 : #[error("timeline shutting down")]
521 : Cancelled,
522 :
523 : /// An error happened replaying WAL records
524 : #[error(transparent)]
525 : WalRedo(anyhow::Error),
526 :
527 : #[error("{0}")]
528 : MissingKey(MissingKeyError),
529 : }
530 :
531 : impl From<anyhow::Error> for PageReconstructError {
532 0 : fn from(value: anyhow::Error) -> Self {
533 0 : // with walingest.rs many PageReconstructError are wrapped in as anyhow::Error
534 0 : match value.downcast::<PageReconstructError>() {
535 0 : Ok(pre) => pre,
536 0 : Err(other) => PageReconstructError::Other(other),
537 : }
538 0 : }
539 : }
540 :
541 : impl From<utils::bin_ser::DeserializeError> for PageReconstructError {
542 0 : fn from(value: utils::bin_ser::DeserializeError) -> Self {
543 0 : PageReconstructError::Other(anyhow::Error::new(value).context("deserialization failure"))
544 0 : }
545 : }
546 :
547 : impl From<layer_manager::Shutdown> for PageReconstructError {
548 0 : fn from(_: layer_manager::Shutdown) -> Self {
549 0 : PageReconstructError::Cancelled
550 0 : }
551 : }
552 :
553 : impl GetVectoredError {
554 : #[cfg(test)]
555 18 : pub(crate) fn is_missing_key_error(&self) -> bool {
556 18 : matches!(self, Self::MissingKey(_))
557 18 : }
558 : }
559 :
560 : impl From<layer_manager::Shutdown> for GetVectoredError {
561 0 : fn from(_: layer_manager::Shutdown) -> Self {
562 0 : GetVectoredError::Cancelled
563 0 : }
564 : }
565 :
566 : #[derive(thiserror::Error)]
567 : pub struct MissingKeyError {
568 : key: Key,
569 : shard: ShardNumber,
570 : cont_lsn: Lsn,
571 : request_lsn: Lsn,
572 : ancestor_lsn: Option<Lsn>,
573 : backtrace: Option<std::backtrace::Backtrace>,
574 : }
575 :
576 : impl std::fmt::Debug for MissingKeyError {
577 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
578 0 : write!(f, "{}", self)
579 0 : }
580 : }
581 :
582 : impl std::fmt::Display for MissingKeyError {
583 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
584 0 : write!(
585 0 : f,
586 0 : "could not find data for key {} (shard {:?}) at LSN {}, request LSN {}",
587 0 : self.key, self.shard, self.cont_lsn, self.request_lsn
588 0 : )?;
589 0 : if let Some(ref ancestor_lsn) = self.ancestor_lsn {
590 0 : write!(f, ", ancestor {}", ancestor_lsn)?;
591 0 : }
592 :
593 0 : if let Some(ref backtrace) = self.backtrace {
594 0 : write!(f, "\n{}", backtrace)?;
595 0 : }
596 :
597 0 : Ok(())
598 0 : }
599 : }
600 :
601 : impl PageReconstructError {
602 : /// Returns true if this error indicates a tenant/timeline shutdown alike situation
603 0 : pub(crate) fn is_stopping(&self) -> bool {
604 : use PageReconstructError::*;
605 0 : match self {
606 0 : Cancelled => true,
607 0 : Other(_) | AncestorLsnTimeout(_) | WalRedo(_) | MissingKey(_) => false,
608 : }
609 0 : }
610 : }
611 :
612 0 : #[derive(thiserror::Error, Debug)]
613 : pub(crate) enum CreateImageLayersError {
614 : #[error("timeline shutting down")]
615 : Cancelled,
616 :
617 : #[error("read failed")]
618 : GetVectoredError(#[source] GetVectoredError),
619 :
620 : #[error("reconstruction failed")]
621 : PageReconstructError(#[source] PageReconstructError),
622 :
623 : #[error(transparent)]
624 : Other(#[from] anyhow::Error),
625 : }
626 :
627 : impl From<layer_manager::Shutdown> for CreateImageLayersError {
628 0 : fn from(_: layer_manager::Shutdown) -> Self {
629 0 : CreateImageLayersError::Cancelled
630 0 : }
631 : }
632 :
633 0 : #[derive(thiserror::Error, Debug, Clone)]
634 : pub(crate) enum FlushLayerError {
635 : /// Timeline cancellation token was cancelled
636 : #[error("timeline shutting down")]
637 : Cancelled,
638 :
639 : /// We tried to flush a layer while the Timeline is in an unexpected state
640 : #[error("cannot flush frozen layers when flush_loop is not running, state is {0:?}")]
641 : NotRunning(FlushLoopState),
642 :
643 : // Arc<> the following non-clonable error types: we must be Clone-able because the flush error is propagated from the flush
644 : // loop via a watch channel, where we can only borrow it.
645 : #[error("create image layers (shared)")]
646 : CreateImageLayersError(Arc<CreateImageLayersError>),
647 :
648 : #[error("other (shared)")]
649 : Other(#[from] Arc<anyhow::Error>),
650 : }
651 :
652 : impl FlushLayerError {
653 : // When crossing from generic anyhow errors to this error type, we explicitly check
654 : // for timeline cancellation to avoid logging inoffensive shutdown errors as warn/err.
655 0 : fn from_anyhow(timeline: &Timeline, err: anyhow::Error) -> Self {
656 0 : let cancelled = timeline.cancel.is_cancelled()
657 : // The upload queue might have been shut down before the official cancellation of the timeline.
658 0 : || err
659 0 : .downcast_ref::<NotInitialized>()
660 0 : .map(NotInitialized::is_stopping)
661 0 : .unwrap_or_default();
662 0 : if cancelled {
663 0 : Self::Cancelled
664 : } else {
665 0 : Self::Other(Arc::new(err))
666 : }
667 0 : }
668 : }
669 :
670 : impl From<layer_manager::Shutdown> for FlushLayerError {
671 0 : fn from(_: layer_manager::Shutdown) -> Self {
672 0 : FlushLayerError::Cancelled
673 0 : }
674 : }
675 :
676 0 : #[derive(thiserror::Error, Debug)]
677 : pub(crate) enum GetVectoredError {
678 : #[error("timeline shutting down")]
679 : Cancelled,
680 :
681 : #[error("requested too many keys: {0} > {}", Timeline::MAX_GET_VECTORED_KEYS)]
682 : Oversized(u64),
683 :
684 : #[error("requested at invalid LSN: {0}")]
685 : InvalidLsn(Lsn),
686 :
687 : #[error("requested key not found: {0}")]
688 : MissingKey(MissingKeyError),
689 :
690 : #[error("ancestry walk")]
691 : GetReadyAncestorError(#[source] GetReadyAncestorError),
692 :
693 : #[error(transparent)]
694 : Other(#[from] anyhow::Error),
695 : }
696 :
697 : impl From<GetReadyAncestorError> for GetVectoredError {
698 6 : fn from(value: GetReadyAncestorError) -> Self {
699 : use GetReadyAncestorError::*;
700 6 : match value {
701 0 : Cancelled => GetVectoredError::Cancelled,
702 : AncestorLsnTimeout(_) | BadState { .. } => {
703 6 : GetVectoredError::GetReadyAncestorError(value)
704 : }
705 : }
706 6 : }
707 : }
708 :
709 6 : #[derive(thiserror::Error, Debug)]
710 : pub(crate) enum GetReadyAncestorError {
711 : #[error("ancestor LSN wait error")]
712 : AncestorLsnTimeout(#[from] WaitLsnError),
713 :
714 : #[error("bad state on timeline {timeline_id}: {state:?}")]
715 : BadState {
716 : timeline_id: TimelineId,
717 : state: TimelineState,
718 : },
719 :
720 : #[error("cancelled")]
721 : Cancelled,
722 : }
723 :
724 : #[derive(Clone, Copy)]
725 : pub enum LogicalSizeCalculationCause {
726 : Initial,
727 : ConsumptionMetricsSyntheticSize,
728 : EvictionTaskImitation,
729 : TenantSizeHandler,
730 : }
731 :
732 : pub enum GetLogicalSizePriority {
733 : User,
734 : Background,
735 : }
736 :
737 0 : #[derive(enumset::EnumSetType)]
738 : pub(crate) enum CompactFlags {
739 : ForceRepartition,
740 : ForceImageLayerCreation,
741 : EnhancedGcBottomMostCompaction,
742 : DryRun,
743 : }
744 :
745 : impl std::fmt::Debug for Timeline {
746 0 : fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
747 0 : write!(f, "Timeline<{}>", self.timeline_id)
748 0 : }
749 : }
750 :
751 0 : #[derive(thiserror::Error, Debug)]
752 : pub(crate) enum WaitLsnError {
753 : // Called on a timeline which is shutting down
754 : #[error("Shutdown")]
755 : Shutdown,
756 :
757 : // Called on an timeline not in active state or shutting down
758 : #[error("Bad timeline state: {0:?}")]
759 : BadState(TimelineState),
760 :
761 : // Timeout expired while waiting for LSN to catch up with goal.
762 : #[error("{0}")]
763 : Timeout(String),
764 : }
765 :
766 : // The impls below achieve cancellation mapping for errors.
767 : // Perhaps there's a way of achieving this with less cruft.
768 :
769 : impl From<CreateImageLayersError> for CompactionError {
770 0 : fn from(e: CreateImageLayersError) -> Self {
771 0 : match e {
772 0 : CreateImageLayersError::Cancelled => CompactionError::ShuttingDown,
773 0 : CreateImageLayersError::Other(e) => {
774 0 : CompactionError::Other(e.context("create image layers"))
775 : }
776 0 : _ => CompactionError::Other(e.into()),
777 : }
778 0 : }
779 : }
780 :
781 : impl From<CreateImageLayersError> for FlushLayerError {
782 0 : fn from(e: CreateImageLayersError) -> Self {
783 0 : match e {
784 0 : CreateImageLayersError::Cancelled => FlushLayerError::Cancelled,
785 0 : any => FlushLayerError::CreateImageLayersError(Arc::new(any)),
786 : }
787 0 : }
788 : }
789 :
790 : impl From<PageReconstructError> for CreateImageLayersError {
791 0 : fn from(e: PageReconstructError) -> Self {
792 0 : match e {
793 0 : PageReconstructError::Cancelled => CreateImageLayersError::Cancelled,
794 0 : _ => CreateImageLayersError::PageReconstructError(e),
795 : }
796 0 : }
797 : }
798 :
799 : impl From<GetVectoredError> for CreateImageLayersError {
800 0 : fn from(e: GetVectoredError) -> Self {
801 0 : match e {
802 0 : GetVectoredError::Cancelled => CreateImageLayersError::Cancelled,
803 0 : _ => CreateImageLayersError::GetVectoredError(e),
804 : }
805 0 : }
806 : }
807 :
808 : impl From<GetVectoredError> for PageReconstructError {
809 18 : fn from(e: GetVectoredError) -> Self {
810 18 : match e {
811 0 : GetVectoredError::Cancelled => PageReconstructError::Cancelled,
812 0 : GetVectoredError::InvalidLsn(_) => PageReconstructError::Other(anyhow!("Invalid LSN")),
813 0 : err @ GetVectoredError::Oversized(_) => PageReconstructError::Other(err.into()),
814 12 : GetVectoredError::MissingKey(err) => PageReconstructError::MissingKey(err),
815 6 : GetVectoredError::GetReadyAncestorError(err) => PageReconstructError::from(err),
816 0 : GetVectoredError::Other(err) => PageReconstructError::Other(err),
817 : }
818 18 : }
819 : }
820 :
821 : impl From<GetReadyAncestorError> for PageReconstructError {
822 6 : fn from(e: GetReadyAncestorError) -> Self {
823 : use GetReadyAncestorError::*;
824 6 : match e {
825 0 : AncestorLsnTimeout(wait_err) => PageReconstructError::AncestorLsnTimeout(wait_err),
826 6 : bad_state @ BadState { .. } => PageReconstructError::Other(anyhow::anyhow!(bad_state)),
827 0 : Cancelled => PageReconstructError::Cancelled,
828 : }
829 6 : }
830 : }
831 :
832 : pub(crate) enum WaitLsnWaiter<'a> {
833 : Timeline(&'a Timeline),
834 : Tenant,
835 : PageService,
836 : }
837 :
838 : /// Argument to [`Timeline::shutdown`].
839 : #[derive(Debug, Clone, Copy)]
840 : pub(crate) enum ShutdownMode {
841 : /// Graceful shutdown, may do a lot of I/O as we flush any open layers to disk and then
842 : /// also to remote storage. This method can easily take multiple seconds for a busy timeline.
843 : ///
844 : /// While we are flushing, we continue to accept read I/O for LSNs ingested before
845 : /// the call to [`Timeline::shutdown`].
846 : FreezeAndFlush,
847 : /// Shut down immediately, without waiting for any open layers to flush.
848 : Hard,
849 : }
850 :
851 : struct ImageLayerCreationOutcome {
852 : image: Option<ResidentLayer>,
853 : next_start_key: Key,
854 : }
855 :
856 : /// Public interface functions
857 : impl Timeline {
858 : /// Get the LSN where this branch was created
859 6 : pub(crate) fn get_ancestor_lsn(&self) -> Lsn {
860 6 : self.ancestor_lsn
861 6 : }
862 :
863 : /// Get the ancestor's timeline id
864 2262 : pub(crate) fn get_ancestor_timeline_id(&self) -> Option<TimelineId> {
865 2262 : self.ancestor_timeline
866 2262 : .as_ref()
867 2262 : .map(|ancestor| ancestor.timeline_id)
868 2262 : }
869 :
870 : /// Get the ancestor timeline
871 0 : pub(crate) fn ancestor_timeline(&self) -> Option<&Arc<Timeline>> {
872 0 : self.ancestor_timeline.as_ref()
873 0 : }
874 :
875 : /// Get the bytes written since the PITR cutoff on this branch, and
876 : /// whether this branch's ancestor_lsn is within its parent's PITR.
877 0 : pub(crate) fn get_pitr_history_stats(&self) -> (u64, bool) {
878 0 : let gc_info = self.gc_info.read().unwrap();
879 0 : let history = self
880 0 : .get_last_record_lsn()
881 0 : .checked_sub(gc_info.cutoffs.time)
882 0 : .unwrap_or(Lsn(0))
883 0 : .0;
884 0 : (history, gc_info.within_ancestor_pitr)
885 0 : }
886 :
887 : /// Lock and get timeline's GC cutoff
888 2985 : pub(crate) fn get_latest_gc_cutoff_lsn(&self) -> RcuReadGuard<Lsn> {
889 2985 : self.latest_gc_cutoff_lsn.read()
890 2985 : }
891 :
892 : /// Look up given page version.
893 : ///
894 : /// If a remote layer file is needed, it is downloaded as part of this
895 : /// call.
896 : ///
897 : /// This method enforces [`Self::timeline_get_throttle`] internally.
898 : ///
899 : /// NOTE: It is considered an error to 'get' a key that doesn't exist. The
900 : /// abstraction above this needs to store suitable metadata to track what
901 : /// data exists with what keys, in separate metadata entries. If a
902 : /// non-existent key is requested, we may incorrectly return a value from
903 : /// an ancestor branch, for example, or waste a lot of cycles chasing the
904 : /// non-existing key.
905 : ///
906 : /// # Cancel-Safety
907 : ///
908 : /// This method is cancellation-safe.
909 : #[inline(always)]
910 1876113 : pub(crate) async fn get(
911 1876113 : &self,
912 1876113 : key: Key,
913 1876113 : lsn: Lsn,
914 1876113 : ctx: &RequestContext,
915 1876113 : ) -> Result<Bytes, PageReconstructError> {
916 1876113 : if !lsn.is_valid() {
917 0 : return Err(PageReconstructError::Other(anyhow::anyhow!("Invalid LSN")));
918 1876113 : }
919 1876113 :
920 1876113 : // This check is debug-only because of the cost of hashing, and because it's a double-check: we
921 1876113 : // already checked the key against the shard_identity when looking up the Timeline from
922 1876113 : // page_service.
923 1876113 : debug_assert!(!self.shard_identity.is_key_disposable(&key));
924 :
925 1876113 : self.timeline_get_throttle.throttle(ctx, 1).await;
926 :
927 1876113 : let keyspace = KeySpace {
928 1876113 : ranges: vec![key..key.next()],
929 1876113 : };
930 1876113 :
931 1876113 : // Initialise the reconstruct state for the key with the cache
932 1876113 : // entry returned above.
933 1876113 : let mut reconstruct_state = ValuesReconstructState::new();
934 :
935 1876113 : let vectored_res = self
936 1876113 : .get_vectored_impl(keyspace.clone(), lsn, &mut reconstruct_state, ctx)
937 547868 : .await;
938 :
939 1876113 : let key_value = vectored_res?.pop_first();
940 1876095 : match key_value {
941 1875159 : Some((got_key, value)) => {
942 1875159 : if got_key != key {
943 0 : error!(
944 0 : "Expected {}, but singular vectored get returned {}",
945 : key, got_key
946 : );
947 0 : Err(PageReconstructError::Other(anyhow!(
948 0 : "Singular vectored get returned wrong key"
949 0 : )))
950 : } else {
951 1875159 : value
952 : }
953 : }
954 936 : None => Err(PageReconstructError::MissingKey(MissingKeyError {
955 936 : key,
956 936 : shard: self.shard_identity.get_shard_number(&key),
957 936 : cont_lsn: Lsn(0),
958 936 : request_lsn: lsn,
959 936 : ancestor_lsn: None,
960 936 : backtrace: None,
961 936 : })),
962 : }
963 1876113 : }
964 :
965 : pub(crate) const MAX_GET_VECTORED_KEYS: u64 = 32;
966 : pub(crate) const VEC_GET_LAYERS_VISITED_WARN_THRESH: f64 = 512.0;
967 :
968 : /// Look up multiple page versions at a given LSN
969 : ///
970 : /// This naive implementation will be replaced with a more efficient one
971 : /// which actually vectorizes the read path.
972 3396 : pub(crate) async fn get_vectored(
973 3396 : &self,
974 3396 : keyspace: KeySpace,
975 3396 : lsn: Lsn,
976 3396 : ctx: &RequestContext,
977 3396 : ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
978 3396 : if !lsn.is_valid() {
979 0 : return Err(GetVectoredError::InvalidLsn(lsn));
980 3396 : }
981 3396 :
982 3396 : let key_count = keyspace.total_raw_size().try_into().unwrap();
983 3396 : if key_count > Timeline::MAX_GET_VECTORED_KEYS {
984 0 : return Err(GetVectoredError::Oversized(key_count));
985 3396 : }
986 :
987 6792 : for range in &keyspace.ranges {
988 3396 : let mut key = range.start;
989 7416 : while key != range.end {
990 4020 : assert!(!self.shard_identity.is_key_disposable(&key));
991 4020 : key = key.next();
992 : }
993 : }
994 :
995 3396 : trace!(
996 0 : "get vectored request for {:?}@{} from task kind {:?}",
997 0 : keyspace,
998 0 : lsn,
999 0 : ctx.task_kind(),
1000 : );
1001 :
1002 3396 : let start = crate::metrics::GET_VECTORED_LATENCY
1003 3396 : .for_task_kind(ctx.task_kind())
1004 3396 : .map(|metric| (metric, Instant::now()));
1005 :
1006 : // start counting after throttle so that throttle time
1007 : // is always less than observation time
1008 3396 : let throttled = self
1009 3396 : .timeline_get_throttle
1010 3396 : .throttle(ctx, key_count as usize)
1011 0 : .await;
1012 :
1013 3396 : let res = self
1014 3396 : .get_vectored_impl(
1015 3396 : keyspace.clone(),
1016 3396 : lsn,
1017 3396 : &mut ValuesReconstructState::new(),
1018 3396 : ctx,
1019 3396 : )
1020 191 : .await;
1021 :
1022 3396 : if let Some((metric, start)) = start {
1023 0 : let elapsed = start.elapsed();
1024 0 : let ex_throttled = if let Some(throttled) = throttled {
1025 0 : elapsed.checked_sub(throttled)
1026 : } else {
1027 0 : Some(elapsed)
1028 : };
1029 :
1030 0 : if let Some(ex_throttled) = ex_throttled {
1031 0 : metric.observe(ex_throttled.as_secs_f64());
1032 0 : } else {
1033 0 : use utils::rate_limit::RateLimit;
1034 0 : static LOGGED: Lazy<Mutex<RateLimit>> =
1035 0 : Lazy::new(|| Mutex::new(RateLimit::new(Duration::from_secs(10))));
1036 0 : let mut rate_limit = LOGGED.lock().unwrap();
1037 0 : rate_limit.call(|| {
1038 0 : warn!("error deducting time spent throttled; this message is logged at a global rate limit");
1039 0 : });
1040 0 : }
1041 3396 : }
1042 :
1043 3396 : res
1044 3396 : }
1045 :
1046 : /// Scan the keyspace and return all existing key-values in the keyspace. This currently uses vectored
1047 : /// get underlying. Normal vectored get would throw an error when a key in the keyspace is not found
1048 : /// during the search, but for the scan interface, it returns all existing key-value pairs, and does
1049 : /// not expect each single key in the key space will be found. The semantics is closer to the RocksDB
1050 : /// scan iterator interface. We could optimize this interface later to avoid some checks in the vectored
1051 : /// get path to maintain and split the probing and to-be-probe keyspace. We also need to ensure that
1052 : /// the scan operation will not cause OOM in the future.
1053 72 : pub(crate) async fn scan(
1054 72 : &self,
1055 72 : keyspace: KeySpace,
1056 72 : lsn: Lsn,
1057 72 : ctx: &RequestContext,
1058 72 : ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
1059 72 : if !lsn.is_valid() {
1060 0 : return Err(GetVectoredError::InvalidLsn(lsn));
1061 72 : }
1062 72 :
1063 72 : trace!(
1064 0 : "key-value scan request for {:?}@{} from task kind {:?}",
1065 0 : keyspace,
1066 0 : lsn,
1067 0 : ctx.task_kind()
1068 : );
1069 :
1070 : // We should generalize this into Keyspace::contains in the future.
1071 144 : for range in &keyspace.ranges {
1072 72 : if range.start.field1 < METADATA_KEY_BEGIN_PREFIX
1073 72 : || range.end.field1 > METADATA_KEY_END_PREFIX
1074 : {
1075 0 : return Err(GetVectoredError::Other(anyhow::anyhow!(
1076 0 : "only metadata keyspace can be scanned"
1077 0 : )));
1078 72 : }
1079 : }
1080 :
1081 72 : let start = crate::metrics::SCAN_LATENCY
1082 72 : .for_task_kind(ctx.task_kind())
1083 72 : .map(ScanLatencyOngoingRecording::start_recording);
1084 :
1085 : // start counting after throttle so that throttle time
1086 : // is always less than observation time
1087 72 : let throttled = self
1088 72 : .timeline_get_throttle
1089 72 : // assume scan = 1 quota for now until we find a better way to process this
1090 72 : .throttle(ctx, 1)
1091 0 : .await;
1092 :
1093 72 : let vectored_res = self
1094 72 : .get_vectored_impl(
1095 72 : keyspace.clone(),
1096 72 : lsn,
1097 72 : &mut ValuesReconstructState::default(),
1098 72 : ctx,
1099 72 : )
1100 0 : .await;
1101 :
1102 72 : if let Some(recording) = start {
1103 0 : recording.observe(throttled);
1104 72 : }
1105 :
1106 72 : vectored_res
1107 72 : }
1108 :
1109 1880481 : pub(super) async fn get_vectored_impl(
1110 1880481 : &self,
1111 1880481 : keyspace: KeySpace,
1112 1880481 : lsn: Lsn,
1113 1880481 : reconstruct_state: &mut ValuesReconstructState,
1114 1880481 : ctx: &RequestContext,
1115 1880481 : ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
1116 1880481 : let get_kind = if keyspace.total_raw_size() == 1 {
1117 1879149 : GetKind::Singular
1118 : } else {
1119 1332 : GetKind::Vectored
1120 : };
1121 :
1122 1880481 : let get_data_timer = crate::metrics::GET_RECONSTRUCT_DATA_TIME
1123 1880481 : .for_get_kind(get_kind)
1124 1880481 : .start_timer();
1125 1880481 : self.get_vectored_reconstruct_data(keyspace.clone(), lsn, reconstruct_state, ctx)
1126 574902 : .await?;
1127 1880433 : get_data_timer.stop_and_record();
1128 1880433 :
1129 1880433 : let reconstruct_timer = crate::metrics::RECONSTRUCT_TIME
1130 1880433 : .for_get_kind(get_kind)
1131 1880433 : .start_timer();
1132 1880433 : let mut results: BTreeMap<Key, Result<Bytes, PageReconstructError>> = BTreeMap::new();
1133 1880433 : let layers_visited = reconstruct_state.get_layers_visited();
1134 :
1135 2000541 : for (key, res) in std::mem::take(&mut reconstruct_state.keys) {
1136 2000541 : match res {
1137 0 : Err(err) => {
1138 0 : results.insert(key, Err(err));
1139 0 : }
1140 2000541 : Ok(state) => {
1141 2000541 : let state = ValueReconstructState::from(state);
1142 :
1143 2000541 : let reconstruct_res = self.reconstruct_value(key, lsn, state).await;
1144 2000541 : results.insert(key, reconstruct_res);
1145 : }
1146 : }
1147 : }
1148 1880433 : reconstruct_timer.stop_and_record();
1149 1880433 :
1150 1880433 : // For aux file keys (v1 or v2) the vectored read path does not return an error
1151 1880433 : // when they're missing. Instead they are omitted from the resulting btree
1152 1880433 : // (this is a requirement, not a bug). Skip updating the metric in these cases
1153 1880433 : // to avoid infinite results.
1154 1880433 : if !results.is_empty() {
1155 1878915 : let avg = layers_visited as f64 / results.len() as f64;
1156 1878915 : if avg >= Self::VEC_GET_LAYERS_VISITED_WARN_THRESH {
1157 0 : use utils::rate_limit::RateLimit;
1158 0 : static LOGGED: Lazy<Mutex<RateLimit>> =
1159 0 : Lazy::new(|| Mutex::new(RateLimit::new(Duration::from_secs(60))));
1160 0 : let mut rate_limit = LOGGED.lock().unwrap();
1161 0 : rate_limit.call(|| {
1162 0 : tracing::info!(
1163 0 : shard_id = %self.tenant_shard_id.shard_slug(),
1164 0 : lsn = %lsn,
1165 0 : "Vectored read for {} visited {} layers on average per key and {} in total. {}/{} pages were returned",
1166 0 : keyspace, avg, layers_visited, results.len(), keyspace.total_raw_size());
1167 0 : });
1168 1878915 : }
1169 :
1170 : // Note that this is an approximation. Tracking the exact number of layers visited
1171 : // per key requires virtually unbounded memory usage and is inefficient
1172 : // (i.e. segment tree tracking each range queried from a layer)
1173 1878915 : crate::metrics::VEC_READ_NUM_LAYERS_VISITED.observe(avg);
1174 1518 : }
1175 :
1176 1880433 : Ok(results)
1177 1880481 : }
1178 :
1179 : /// Get last or prev record separately. Same as get_last_record_rlsn().last/prev.
1180 829650 : pub(crate) fn get_last_record_lsn(&self) -> Lsn {
1181 829650 : self.last_record_lsn.load().last
1182 829650 : }
1183 :
1184 0 : pub(crate) fn get_prev_record_lsn(&self) -> Lsn {
1185 0 : self.last_record_lsn.load().prev
1186 0 : }
1187 :
1188 : /// Atomically get both last and prev.
1189 678 : pub(crate) fn get_last_record_rlsn(&self) -> RecordLsn {
1190 678 : self.last_record_lsn.load()
1191 678 : }
1192 :
1193 : /// Subscribe to callers of wait_lsn(). The value of the channel is None if there are no
1194 : /// wait_lsn() calls in progress, and Some(Lsn) if there is an active waiter for wait_lsn().
1195 0 : pub(crate) fn subscribe_for_wait_lsn_updates(&self) -> watch::Receiver<Option<Lsn>> {
1196 0 : self.last_record_lsn.status_receiver()
1197 0 : }
1198 :
1199 3477 : pub(crate) fn get_disk_consistent_lsn(&self) -> Lsn {
1200 3477 : self.disk_consistent_lsn.load()
1201 3477 : }
1202 :
1203 : /// remote_consistent_lsn from the perspective of the tenant's current generation,
1204 : /// not validated with control plane yet.
1205 : /// See [`Self::get_remote_consistent_lsn_visible`].
1206 0 : pub(crate) fn get_remote_consistent_lsn_projected(&self) -> Option<Lsn> {
1207 0 : self.remote_client.remote_consistent_lsn_projected()
1208 0 : }
1209 :
1210 : /// remote_consistent_lsn which the tenant is guaranteed not to go backward from,
1211 : /// i.e. a value of remote_consistent_lsn_projected which has undergone
1212 : /// generation validation in the deletion queue.
1213 0 : pub(crate) fn get_remote_consistent_lsn_visible(&self) -> Option<Lsn> {
1214 0 : self.remote_client.remote_consistent_lsn_visible()
1215 0 : }
1216 :
1217 : /// The sum of the file size of all historic layers in the layer map.
1218 : /// This method makes no distinction between local and remote layers.
1219 : /// Hence, the result **does not represent local filesystem usage**.
1220 0 : pub(crate) async fn layer_size_sum(&self) -> u64 {
1221 0 : let guard = self.layers.read().await;
1222 0 : guard.layer_size_sum()
1223 0 : }
1224 :
1225 0 : pub(crate) fn resident_physical_size(&self) -> u64 {
1226 0 : self.metrics.resident_physical_size_get()
1227 0 : }
1228 :
1229 0 : pub(crate) fn get_directory_metrics(&self) -> [u64; DirectoryKind::KINDS_NUM] {
1230 0 : array::from_fn(|idx| self.directory_metrics[idx].load(AtomicOrdering::Relaxed))
1231 0 : }
1232 :
1233 : ///
1234 : /// Wait until WAL has been received and processed up to this LSN.
1235 : ///
1236 : /// You should call this before any of the other get_* or list_* functions. Calling
1237 : /// those functions with an LSN that has been processed yet is an error.
1238 : ///
1239 675132 : pub(crate) async fn wait_lsn(
1240 675132 : &self,
1241 675132 : lsn: Lsn,
1242 675132 : who_is_waiting: WaitLsnWaiter<'_>,
1243 675132 : ctx: &RequestContext, /* Prepare for use by cancellation */
1244 675132 : ) -> Result<(), WaitLsnError> {
1245 675132 : let state = self.current_state();
1246 675132 : if self.cancel.is_cancelled() || matches!(state, TimelineState::Stopping) {
1247 0 : return Err(WaitLsnError::Shutdown);
1248 675132 : } else if !matches!(state, TimelineState::Active) {
1249 0 : return Err(WaitLsnError::BadState(state));
1250 675132 : }
1251 675132 :
1252 675132 : if cfg!(debug_assertions) {
1253 675132 : match ctx.task_kind() {
1254 : TaskKind::WalReceiverManager
1255 : | TaskKind::WalReceiverConnectionHandler
1256 : | TaskKind::WalReceiverConnectionPoller => {
1257 0 : let is_myself = match who_is_waiting {
1258 0 : WaitLsnWaiter::Timeline(waiter) => Weak::ptr_eq(&waiter.myself, &self.myself),
1259 0 : WaitLsnWaiter::Tenant | WaitLsnWaiter::PageService => unreachable!("tenant or page_service context are not expected to have task kind {:?}", ctx.task_kind()),
1260 : };
1261 0 : if is_myself {
1262 0 : if let Err(current) = self.last_record_lsn.would_wait_for(lsn) {
1263 : // walingest is the only one that can advance last_record_lsn; it should make sure to never reach here
1264 0 : panic!("this timeline's walingest task is calling wait_lsn({lsn}) but we only have last_record_lsn={current}; would deadlock");
1265 0 : }
1266 0 : } else {
1267 0 : // if another timeline's is waiting for us, there's no deadlock risk because
1268 0 : // our walreceiver task can make progress independent of theirs
1269 0 : }
1270 : }
1271 675132 : _ => {}
1272 : }
1273 0 : }
1274 :
1275 675132 : let _timer = crate::metrics::WAIT_LSN_TIME.start_timer();
1276 675132 :
1277 675132 : match self
1278 675132 : .last_record_lsn
1279 675132 : .wait_for_timeout(lsn, self.conf.wait_lsn_timeout)
1280 0 : .await
1281 : {
1282 675132 : Ok(()) => Ok(()),
1283 0 : Err(e) => {
1284 : use utils::seqwait::SeqWaitError::*;
1285 0 : match e {
1286 0 : Shutdown => Err(WaitLsnError::Shutdown),
1287 : Timeout => {
1288 : // don't count the time spent waiting for lock below, and also in walreceiver.status(), towards the wait_lsn_time_histo
1289 0 : drop(_timer);
1290 0 : let walreceiver_status = self.walreceiver_status();
1291 0 : Err(WaitLsnError::Timeout(format!(
1292 0 : "Timed out while waiting for WAL record at LSN {} to arrive, last_record_lsn {} disk consistent LSN={}, WalReceiver status: {}",
1293 0 : lsn,
1294 0 : self.get_last_record_lsn(),
1295 0 : self.get_disk_consistent_lsn(),
1296 0 : walreceiver_status,
1297 0 : )))
1298 : }
1299 : }
1300 : }
1301 : }
1302 675132 : }
1303 :
1304 0 : pub(crate) fn walreceiver_status(&self) -> String {
1305 0 : match &*self.walreceiver.lock().unwrap() {
1306 0 : None => "stopping or stopped".to_string(),
1307 0 : Some(walreceiver) => match walreceiver.status() {
1308 0 : Some(status) => status.to_human_readable_string(),
1309 0 : None => "Not active".to_string(),
1310 : },
1311 : }
1312 0 : }
1313 :
1314 : /// Check that it is valid to request operations with that lsn.
1315 690 : pub(crate) fn check_lsn_is_in_scope(
1316 690 : &self,
1317 690 : lsn: Lsn,
1318 690 : latest_gc_cutoff_lsn: &RcuReadGuard<Lsn>,
1319 690 : ) -> anyhow::Result<()> {
1320 690 : ensure!(
1321 690 : lsn >= **latest_gc_cutoff_lsn,
1322 12 : "LSN {} is earlier than latest GC cutoff {} (we might've already garbage collected needed data)",
1323 12 : lsn,
1324 12 : **latest_gc_cutoff_lsn,
1325 : );
1326 678 : Ok(())
1327 690 : }
1328 :
1329 : /// Obtains a temporary lease blocking garbage collection for the given LSN.
1330 : ///
1331 : /// This function will error if the requesting LSN is less than the `latest_gc_cutoff_lsn` and there is also
1332 : /// no existing lease to renew. If there is an existing lease in the map, the lease will be renewed only if
1333 : /// the request extends the lease. The returned lease is therefore the maximum between the existing lease and
1334 : /// the requesting lease.
1335 42 : pub(crate) fn make_lsn_lease(
1336 42 : &self,
1337 42 : lsn: Lsn,
1338 42 : length: Duration,
1339 42 : _ctx: &RequestContext,
1340 42 : ) -> anyhow::Result<LsnLease> {
1341 36 : let lease = {
1342 42 : let mut gc_info = self.gc_info.write().unwrap();
1343 42 :
1344 42 : let valid_until = SystemTime::now() + length;
1345 42 :
1346 42 : let entry = gc_info.leases.entry(lsn);
1347 :
1348 36 : let lease = {
1349 42 : if let Entry::Occupied(mut occupied) = entry {
1350 18 : let existing_lease = occupied.get_mut();
1351 18 : if valid_until > existing_lease.valid_until {
1352 6 : existing_lease.valid_until = valid_until;
1353 6 : let dt: DateTime<Utc> = valid_until.into();
1354 6 : info!("lease extended to {}", dt);
1355 : } else {
1356 12 : let dt: DateTime<Utc> = existing_lease.valid_until.into();
1357 12 : info!("existing lease covers greater length, valid until {}", dt);
1358 : }
1359 :
1360 18 : existing_lease.clone()
1361 : } else {
1362 : // Reject already GC-ed LSN (lsn < latest_gc_cutoff)
1363 24 : let latest_gc_cutoff_lsn = self.get_latest_gc_cutoff_lsn();
1364 24 : if lsn < *latest_gc_cutoff_lsn {
1365 6 : bail!("tried to request a page version that was garbage collected. requested at {} gc cutoff {}", lsn, *latest_gc_cutoff_lsn);
1366 18 : }
1367 18 :
1368 18 : let dt: DateTime<Utc> = valid_until.into();
1369 18 : info!("lease created, valid until {}", dt);
1370 18 : entry.or_insert(LsnLease { valid_until }).clone()
1371 : }
1372 : };
1373 :
1374 36 : lease
1375 36 : };
1376 36 :
1377 36 : Ok(lease)
1378 42 : }
1379 :
1380 : /// Flush to disk all data that was written with the put_* functions
1381 3270 : #[instrument(skip(self), fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id))]
1382 : pub(crate) async fn freeze_and_flush(&self) -> Result<(), FlushLayerError> {
1383 : self.freeze_and_flush0().await
1384 : }
1385 :
1386 : // This exists to provide a non-span creating version of `freeze_and_flush` we can call without
1387 : // polluting the span hierarchy.
1388 3270 : pub(crate) async fn freeze_and_flush0(&self) -> Result<(), FlushLayerError> {
1389 3270 : let token = {
1390 : // Freeze the current open in-memory layer. It will be written to disk on next
1391 : // iteration.
1392 3270 : let mut g = self.write_lock.lock().await;
1393 :
1394 3270 : let to_lsn = self.get_last_record_lsn();
1395 3270 : self.freeze_inmem_layer_at(to_lsn, &mut g).await?
1396 : };
1397 3273 : self.wait_flush_completion(token).await
1398 3270 : }
1399 :
1400 : // Check if an open ephemeral layer should be closed: this provides
1401 : // background enforcement of checkpoint interval if there is no active WAL receiver, to avoid keeping
1402 : // an ephemeral layer open forever when idle. It also freezes layers if the global limit on
1403 : // ephemeral layer bytes has been breached.
1404 0 : pub(super) async fn maybe_freeze_ephemeral_layer(&self) {
1405 0 : let Ok(mut write_guard) = self.write_lock.try_lock() else {
1406 : // If the write lock is held, there is an active wal receiver: rolling open layers
1407 : // is their responsibility while they hold this lock.
1408 0 : return;
1409 : };
1410 :
1411 : // FIXME: why not early exit? because before #7927 the state would had been cleared every
1412 : // time, and this was missed.
1413 : // if write_guard.is_none() { return; }
1414 :
1415 0 : let Ok(layers_guard) = self.layers.try_read() else {
1416 : // Don't block if the layer lock is busy
1417 0 : return;
1418 : };
1419 :
1420 0 : let Ok(lm) = layers_guard.layer_map() else {
1421 0 : return;
1422 : };
1423 :
1424 0 : let Some(open_layer) = &lm.open_layer else {
1425 : // If there is no open layer, we have no layer freezing to do. However, we might need to generate
1426 : // some updates to disk_consistent_lsn and remote_consistent_lsn, in case we ingested some WAL regions
1427 : // that didn't result in writes to this shard.
1428 :
1429 : // Must not hold the layers lock while waiting for a flush.
1430 0 : drop(layers_guard);
1431 0 :
1432 0 : let last_record_lsn = self.get_last_record_lsn();
1433 0 : let disk_consistent_lsn = self.get_disk_consistent_lsn();
1434 0 : if last_record_lsn > disk_consistent_lsn {
1435 : // We have no open layer, but disk_consistent_lsn is behind the last record: this indicates
1436 : // we are a sharded tenant and have skipped some WAL
1437 0 : let last_freeze_ts = *self.last_freeze_ts.read().unwrap();
1438 0 : if last_freeze_ts.elapsed() >= self.get_checkpoint_timeout() {
1439 : // Only do this if have been layer-less longer than get_checkpoint_timeout, so that a shard
1440 : // without any data ingested (yet) doesn't write a remote index as soon as it
1441 : // sees its LSN advance: we only do this if we've been layer-less
1442 : // for some time.
1443 0 : tracing::debug!(
1444 0 : "Advancing disk_consistent_lsn past WAL ingest gap {} -> {}",
1445 : disk_consistent_lsn,
1446 : last_record_lsn
1447 : );
1448 :
1449 : // The flush loop will update remote consistent LSN as well as disk consistent LSN.
1450 : // We know there is no open layer, so we can request freezing without actually
1451 : // freezing anything. This is true even if we have dropped the layers_guard, we
1452 : // still hold the write_guard.
1453 0 : let _ = async {
1454 0 : let token = self
1455 0 : .freeze_inmem_layer_at(last_record_lsn, &mut write_guard)
1456 0 : .await?;
1457 0 : self.wait_flush_completion(token).await
1458 0 : }
1459 0 : .await;
1460 0 : }
1461 0 : }
1462 :
1463 0 : return;
1464 : };
1465 :
1466 0 : let Some(current_size) = open_layer.try_len() else {
1467 : // Unexpected: since we hold the write guard, nobody else should be writing to this layer, so
1468 : // read lock to get size should always succeed.
1469 0 : tracing::warn!("Lock conflict while reading size of open layer");
1470 0 : return;
1471 : };
1472 :
1473 0 : let current_lsn = self.get_last_record_lsn();
1474 :
1475 0 : let checkpoint_distance_override = open_layer.tick().await;
1476 :
1477 0 : if let Some(size_override) = checkpoint_distance_override {
1478 0 : if current_size > size_override {
1479 : // This is not harmful, but it only happens in relatively rare cases where
1480 : // time-based checkpoints are not happening fast enough to keep the amount of
1481 : // ephemeral data within configured limits. It's a sign of stress on the system.
1482 0 : tracing::info!("Early-rolling open layer at size {current_size} (limit {size_override}) due to dirty data pressure");
1483 0 : }
1484 0 : }
1485 :
1486 0 : let checkpoint_distance =
1487 0 : checkpoint_distance_override.unwrap_or(self.get_checkpoint_distance());
1488 0 :
1489 0 : if self.should_roll(
1490 0 : current_size,
1491 0 : current_size,
1492 0 : checkpoint_distance,
1493 0 : self.get_last_record_lsn(),
1494 0 : self.last_freeze_at.load(),
1495 0 : open_layer.get_opened_at(),
1496 0 : ) {
1497 0 : match open_layer.info() {
1498 0 : InMemoryLayerInfo::Frozen { lsn_start, lsn_end } => {
1499 0 : // We may reach this point if the layer was already frozen by not yet flushed: flushing
1500 0 : // happens asynchronously in the background.
1501 0 : tracing::debug!(
1502 0 : "Not freezing open layer, it's already frozen ({lsn_start}..{lsn_end})"
1503 : );
1504 : }
1505 : InMemoryLayerInfo::Open { .. } => {
1506 : // Upgrade to a write lock and freeze the layer
1507 0 : drop(layers_guard);
1508 0 : let res = self
1509 0 : .freeze_inmem_layer_at(current_lsn, &mut write_guard)
1510 0 : .await;
1511 :
1512 0 : if let Err(e) = res {
1513 0 : tracing::info!(
1514 0 : "failed to flush frozen layer after background freeze: {e:#}"
1515 : );
1516 0 : }
1517 : }
1518 : }
1519 0 : }
1520 0 : }
1521 :
1522 : /// Outermost timeline compaction operation; downloads needed layers. Returns whether we have pending
1523 : /// compaction tasks.
1524 1092 : pub(crate) async fn compact(
1525 1092 : self: &Arc<Self>,
1526 1092 : cancel: &CancellationToken,
1527 1092 : flags: EnumSet<CompactFlags>,
1528 1092 : ctx: &RequestContext,
1529 1092 : ) -> Result<bool, CompactionError> {
1530 1092 : // most likely the cancellation token is from background task, but in tests it could be the
1531 1092 : // request task as well.
1532 1092 :
1533 1092 : let prepare = async move {
1534 1092 : let guard = self.compaction_lock.lock().await;
1535 :
1536 1092 : let permit = super::tasks::concurrent_background_tasks_rate_limit_permit(
1537 1092 : BackgroundLoopKind::Compaction,
1538 1092 : ctx,
1539 1092 : )
1540 0 : .await;
1541 :
1542 1092 : (guard, permit)
1543 1092 : };
1544 :
1545 : // this wait probably never needs any "long time spent" logging, because we already nag if
1546 : // compaction task goes over it's period (20s) which is quite often in production.
1547 1092 : let (_guard, _permit) = tokio::select! {
1548 1092 : tuple = prepare => { tuple },
1549 1092 : _ = self.cancel.cancelled() => return Ok(false),
1550 1092 : _ = cancel.cancelled() => return Ok(false),
1551 : };
1552 :
1553 1092 : let last_record_lsn = self.get_last_record_lsn();
1554 1092 :
1555 1092 : // Last record Lsn could be zero in case the timeline was just created
1556 1092 : if !last_record_lsn.is_valid() {
1557 0 : warn!("Skipping compaction for potentially just initialized timeline, it has invalid last record lsn: {last_record_lsn}");
1558 0 : return Ok(false);
1559 1092 : }
1560 1092 :
1561 1092 : match self.get_compaction_algorithm_settings().kind {
1562 : CompactionAlgorithm::Tiered => {
1563 0 : self.compact_tiered(cancel, ctx).await?;
1564 0 : Ok(false)
1565 : }
1566 118515 : CompactionAlgorithm::Legacy => self.compact_legacy(cancel, flags, ctx).await,
1567 : }
1568 1092 : }
1569 :
1570 : /// Mutate the timeline with a [`TimelineWriter`].
1571 15399528 : pub(crate) async fn writer(&self) -> TimelineWriter<'_> {
1572 15399528 : TimelineWriter {
1573 15399528 : tl: self,
1574 15399528 : write_guard: self.write_lock.lock().await,
1575 : }
1576 15399528 : }
1577 :
1578 0 : pub(crate) fn activate(
1579 0 : self: &Arc<Self>,
1580 0 : parent: Arc<crate::tenant::Tenant>,
1581 0 : broker_client: BrokerClientChannel,
1582 0 : background_jobs_can_start: Option<&completion::Barrier>,
1583 0 : ctx: &RequestContext,
1584 0 : ) {
1585 0 : if self.tenant_shard_id.is_shard_zero() {
1586 0 : // Logical size is only maintained accurately on shard zero.
1587 0 : self.spawn_initial_logical_size_computation_task(ctx);
1588 0 : }
1589 0 : self.launch_wal_receiver(ctx, broker_client);
1590 0 : self.set_state(TimelineState::Active);
1591 0 : self.launch_eviction_task(parent, background_jobs_can_start);
1592 0 : }
1593 :
1594 : /// After this function returns, there are no timeline-scoped tasks are left running.
1595 : ///
1596 : /// The preferred pattern for is:
1597 : /// - in any spawned tasks, keep Timeline::guard open + Timeline::cancel / child token
1598 : /// - if early shutdown (not just cancellation) of a sub-tree of tasks is required,
1599 : /// go the extra mile and keep track of JoinHandles
1600 : /// - Keep track of JoinHandles using a passed-down `Arc<Mutex<Option<JoinSet>>>` or similar,
1601 : /// instead of spawning directly on a runtime. It is a more composable / testable pattern.
1602 : ///
1603 : /// For legacy reasons, we still have multiple tasks spawned using
1604 : /// `task_mgr::spawn(X, Some(tenant_id), Some(timeline_id))`.
1605 : /// We refer to these as "timeline-scoped task_mgr tasks".
1606 : /// Some of these tasks are already sensitive to Timeline::cancel while others are
1607 : /// not sensitive to Timeline::cancel and instead respect [`task_mgr::shutdown_token`]
1608 : /// or [`task_mgr::shutdown_watcher`].
1609 : /// We want to gradually convert the code base away from these.
1610 : ///
1611 : /// Here is an inventory of timeline-scoped task_mgr tasks that are still sensitive to
1612 : /// `task_mgr::shutdown_{token,watcher}` (there are also tenant-scoped and global-scoped
1613 : /// ones that aren't mentioned here):
1614 : /// - [`TaskKind::TimelineDeletionWorker`]
1615 : /// - NB: also used for tenant deletion
1616 : /// - [`TaskKind::RemoteUploadTask`]`
1617 : /// - [`TaskKind::InitialLogicalSizeCalculation`]
1618 : /// - [`TaskKind::DownloadAllRemoteLayers`] (can we get rid of it?)
1619 : // Inventory of timeline-scoped task_mgr tasks that use spawn but aren't sensitive:
1620 : /// - [`TaskKind::Eviction`]
1621 : /// - [`TaskKind::LayerFlushTask`]
1622 : /// - [`TaskKind::OndemandLogicalSizeCalculation`]
1623 : /// - [`TaskKind::GarbageCollector`] (immediate_gc is timeline-scoped)
1624 24 : pub(crate) async fn shutdown(&self, mode: ShutdownMode) {
1625 24 : debug_assert_current_span_has_tenant_and_timeline_id();
1626 :
1627 24 : let try_freeze_and_flush = match mode {
1628 18 : ShutdownMode::FreezeAndFlush => true,
1629 6 : ShutdownMode::Hard => false,
1630 : };
1631 :
1632 : // Regardless of whether we're going to try_freeze_and_flush
1633 : // or not, stop ingesting any more data. Walreceiver only provides
1634 : // cancellation but no "wait until gone", because it uses the Timeline::gate.
1635 : // So, only after the self.gate.close() below will we know for sure that
1636 : // no walreceiver tasks are left.
1637 : // For `try_freeze_and_flush=true`, this means that we might still be ingesting
1638 : // data during the call to `self.freeze_and_flush()` below.
1639 : // That's not ideal, but, we don't have the concept of a ChildGuard,
1640 : // which is what we'd need to properly model early shutdown of the walreceiver
1641 : // task sub-tree before the other Timeline task sub-trees.
1642 24 : let walreceiver = self.walreceiver.lock().unwrap().take();
1643 24 : tracing::debug!(
1644 0 : is_some = walreceiver.is_some(),
1645 0 : "Waiting for WalReceiverManager..."
1646 : );
1647 24 : if let Some(walreceiver) = walreceiver {
1648 0 : walreceiver.cancel();
1649 24 : }
1650 : // ... and inform any waiters for newer LSNs that there won't be any.
1651 24 : self.last_record_lsn.shutdown();
1652 24 :
1653 24 : if try_freeze_and_flush {
1654 18 : if let Some((open, frozen)) = self
1655 18 : .layers
1656 18 : .read()
1657 0 : .await
1658 18 : .layer_map()
1659 18 : .map(|lm| (lm.open_layer.is_some(), lm.frozen_layers.len()))
1660 18 : .ok()
1661 18 : .filter(|(open, frozen)| *open || *frozen > 0)
1662 : {
1663 0 : tracing::info!(?open, frozen, "flushing and freezing on shutdown");
1664 18 : } else {
1665 18 : // this is double-shutdown, ignore it
1666 18 : }
1667 :
1668 : // we shut down walreceiver above, so, we won't add anything more
1669 : // to the InMemoryLayer; freeze it and wait for all frozen layers
1670 : // to reach the disk & upload queue, then shut the upload queue and
1671 : // wait for it to drain.
1672 18 : match self.freeze_and_flush().await {
1673 : Ok(_) => {
1674 : // drain the upload queue
1675 : // if we did not wait for completion here, it might be our shutdown process
1676 : // didn't wait for remote uploads to complete at all, as new tasks can forever
1677 : // be spawned.
1678 : //
1679 : // what is problematic is the shutting down of RemoteTimelineClient, because
1680 : // obviously it does not make sense to stop while we wait for it, but what
1681 : // about corner cases like s3 suddenly hanging up?
1682 18 : self.remote_client.shutdown().await;
1683 : }
1684 : Err(FlushLayerError::Cancelled) => {
1685 : // this is likely the second shutdown, ignore silently.
1686 : // TODO: this can be removed once https://github.com/neondatabase/neon/issues/5080
1687 0 : debug_assert!(self.cancel.is_cancelled());
1688 : }
1689 0 : Err(e) => {
1690 0 : // Non-fatal. Shutdown is infallible. Failures to flush just mean that
1691 0 : // we have some extra WAL replay to do next time the timeline starts.
1692 0 : warn!("failed to freeze and flush: {e:#}");
1693 : }
1694 : }
1695 6 : }
1696 :
1697 : // Signal any subscribers to our cancellation token to drop out
1698 24 : tracing::debug!("Cancelling CancellationToken");
1699 24 : self.cancel.cancel();
1700 24 :
1701 24 : // Ensure Prevent new page service requests from starting.
1702 24 : self.handles.shutdown();
1703 24 :
1704 24 : // Transition the remote_client into a state where it's only useful for timeline deletion.
1705 24 : // (The deletion use case is why we can't just hook up remote_client to Self::cancel).)
1706 24 : self.remote_client.stop();
1707 24 :
1708 24 : // As documented in remote_client.stop()'s doc comment, it's our responsibility
1709 24 : // to shut down the upload queue tasks.
1710 24 : // TODO: fix that, task management should be encapsulated inside remote_client.
1711 24 : task_mgr::shutdown_tasks(
1712 24 : Some(TaskKind::RemoteUploadTask),
1713 24 : Some(self.tenant_shard_id),
1714 24 : Some(self.timeline_id),
1715 24 : )
1716 0 : .await;
1717 :
1718 : // TODO: work toward making this a no-op. See this function's doc comment for more context.
1719 24 : tracing::debug!("Waiting for tasks...");
1720 24 : task_mgr::shutdown_tasks(None, Some(self.tenant_shard_id), Some(self.timeline_id)).await;
1721 :
1722 : {
1723 : // Allow any remaining in-memory layers to do cleanup -- until that, they hold the gate
1724 : // open.
1725 24 : let mut write_guard = self.write_lock.lock().await;
1726 24 : self.layers.write().await.shutdown(&mut write_guard);
1727 24 : }
1728 24 :
1729 24 : // Finally wait until any gate-holders are complete.
1730 24 : //
1731 24 : // TODO: once above shutdown_tasks is a no-op, we can close the gate before calling shutdown_tasks
1732 24 : // and use a TBD variant of shutdown_tasks that asserts that there were no tasks left.
1733 24 : self.gate.close().await;
1734 :
1735 24 : self.metrics.shutdown();
1736 24 : }
1737 :
1738 1242 : pub(crate) fn set_state(&self, new_state: TimelineState) {
1739 1242 : match (self.current_state(), new_state) {
1740 1242 : (equal_state_1, equal_state_2) if equal_state_1 == equal_state_2 => {
1741 6 : info!("Ignoring new state, equal to the existing one: {equal_state_2:?}");
1742 : }
1743 0 : (st, TimelineState::Loading) => {
1744 0 : error!("ignoring transition from {st:?} into Loading state");
1745 : }
1746 0 : (TimelineState::Broken { .. }, new_state) => {
1747 0 : error!("Ignoring state update {new_state:?} for broken timeline");
1748 : }
1749 : (TimelineState::Stopping, TimelineState::Active) => {
1750 0 : error!("Not activating a Stopping timeline");
1751 : }
1752 1236 : (_, new_state) => {
1753 1236 : self.state.send_replace(new_state);
1754 1236 : }
1755 : }
1756 1242 : }
1757 :
1758 6 : pub(crate) fn set_broken(&self, reason: String) {
1759 6 : let backtrace_str: String = format!("{}", std::backtrace::Backtrace::force_capture());
1760 6 : let broken_state = TimelineState::Broken {
1761 6 : reason,
1762 6 : backtrace: backtrace_str,
1763 6 : };
1764 6 : self.set_state(broken_state);
1765 6 :
1766 6 : // Although the Broken state is not equivalent to shutdown() (shutdown will be called
1767 6 : // later when this tenant is detach or the process shuts down), firing the cancellation token
1768 6 : // here avoids the need for other tasks to watch for the Broken state explicitly.
1769 6 : self.cancel.cancel();
1770 6 : }
1771 :
1772 680391 : pub(crate) fn current_state(&self) -> TimelineState {
1773 680391 : self.state.borrow().clone()
1774 680391 : }
1775 :
1776 18 : pub(crate) fn is_broken(&self) -> bool {
1777 18 : matches!(&*self.state.borrow(), TimelineState::Broken { .. })
1778 18 : }
1779 :
1780 666 : pub(crate) fn is_active(&self) -> bool {
1781 666 : self.current_state() == TimelineState::Active
1782 666 : }
1783 :
1784 : #[allow(unused)]
1785 0 : pub(crate) fn is_archived(&self) -> Option<bool> {
1786 0 : self.remote_client.is_archived()
1787 0 : }
1788 :
1789 3351 : pub(crate) fn is_stopping(&self) -> bool {
1790 3351 : self.current_state() == TimelineState::Stopping
1791 3351 : }
1792 :
1793 0 : pub(crate) fn subscribe_for_state_updates(&self) -> watch::Receiver<TimelineState> {
1794 0 : self.state.subscribe()
1795 0 : }
1796 :
1797 675138 : pub(crate) async fn wait_to_become_active(
1798 675138 : &self,
1799 675138 : _ctx: &RequestContext, // Prepare for use by cancellation
1800 675138 : ) -> Result<(), TimelineState> {
1801 675138 : let mut receiver = self.state.subscribe();
1802 : loop {
1803 675138 : let current_state = receiver.borrow().clone();
1804 675138 : match current_state {
1805 : TimelineState::Loading => {
1806 0 : receiver
1807 0 : .changed()
1808 0 : .await
1809 0 : .expect("holding a reference to self");
1810 : }
1811 : TimelineState::Active { .. } => {
1812 675132 : return Ok(());
1813 : }
1814 : TimelineState::Broken { .. } | TimelineState::Stopping => {
1815 : // There's no chance the timeline can transition back into ::Active
1816 6 : return Err(current_state);
1817 : }
1818 : }
1819 : }
1820 675138 : }
1821 :
1822 0 : pub(crate) async fn layer_map_info(
1823 0 : &self,
1824 0 : reset: LayerAccessStatsReset,
1825 0 : ) -> Result<LayerMapInfo, layer_manager::Shutdown> {
1826 0 : let guard = self.layers.read().await;
1827 0 : let layer_map = guard.layer_map()?;
1828 0 : let mut in_memory_layers = Vec::with_capacity(layer_map.frozen_layers.len() + 1);
1829 0 : if let Some(open_layer) = &layer_map.open_layer {
1830 0 : in_memory_layers.push(open_layer.info());
1831 0 : }
1832 0 : for frozen_layer in &layer_map.frozen_layers {
1833 0 : in_memory_layers.push(frozen_layer.info());
1834 0 : }
1835 :
1836 0 : let historic_layers = layer_map
1837 0 : .iter_historic_layers()
1838 0 : .map(|desc| guard.get_from_desc(&desc).info(reset))
1839 0 : .collect();
1840 0 :
1841 0 : Ok(LayerMapInfo {
1842 0 : in_memory_layers,
1843 0 : historic_layers,
1844 0 : })
1845 0 : }
1846 :
1847 0 : #[instrument(skip_all, fields(tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))]
1848 : pub(crate) async fn download_layer(
1849 : &self,
1850 : layer_file_name: &LayerName,
1851 : ) -> anyhow::Result<Option<bool>> {
1852 : let Some(layer) = self.find_layer(layer_file_name).await? else {
1853 : return Ok(None);
1854 : };
1855 :
1856 : layer.download().await?;
1857 :
1858 : Ok(Some(true))
1859 : }
1860 :
1861 : /// Evict just one layer.
1862 : ///
1863 : /// Returns `Ok(None)` in the case where the layer could not be found by its `layer_file_name`.
1864 0 : pub(crate) async fn evict_layer(
1865 0 : &self,
1866 0 : layer_file_name: &LayerName,
1867 0 : ) -> anyhow::Result<Option<bool>> {
1868 0 : let _gate = self
1869 0 : .gate
1870 0 : .enter()
1871 0 : .map_err(|_| anyhow::anyhow!("Shutting down"))?;
1872 :
1873 0 : let Some(local_layer) = self.find_layer(layer_file_name).await? else {
1874 0 : return Ok(None);
1875 : };
1876 :
1877 : // curl has this by default
1878 0 : let timeout = std::time::Duration::from_secs(120);
1879 0 :
1880 0 : match local_layer.evict_and_wait(timeout).await {
1881 0 : Ok(()) => Ok(Some(true)),
1882 0 : Err(EvictionError::NotFound) => Ok(Some(false)),
1883 0 : Err(EvictionError::Downloaded) => Ok(Some(false)),
1884 0 : Err(EvictionError::Timeout) => Ok(Some(false)),
1885 : }
1886 0 : }
1887 :
1888 14409060 : fn should_roll(
1889 14409060 : &self,
1890 14409060 : layer_size: u64,
1891 14409060 : projected_layer_size: u64,
1892 14409060 : checkpoint_distance: u64,
1893 14409060 : projected_lsn: Lsn,
1894 14409060 : last_freeze_at: Lsn,
1895 14409060 : opened_at: Instant,
1896 14409060 : ) -> bool {
1897 14409060 : let distance = projected_lsn.widening_sub(last_freeze_at);
1898 14409060 :
1899 14409060 : // Rolling the open layer can be triggered by:
1900 14409060 : // 1. The distance from the last LSN we rolled at. This bounds the amount of WAL that
1901 14409060 : // the safekeepers need to store. For sharded tenants, we multiply by shard count to
1902 14409060 : // account for how writes are distributed across shards: we expect each node to consume
1903 14409060 : // 1/count of the LSN on average.
1904 14409060 : // 2. The size of the currently open layer.
1905 14409060 : // 3. The time since the last roll. It helps safekeepers to regard pageserver as caught
1906 14409060 : // up and suspend activity.
1907 14409060 : if distance >= checkpoint_distance as i128 * self.shard_identity.count.count() as i128 {
1908 0 : info!(
1909 0 : "Will roll layer at {} with layer size {} due to LSN distance ({})",
1910 : projected_lsn, layer_size, distance
1911 : );
1912 :
1913 0 : true
1914 14409060 : } else if projected_layer_size >= checkpoint_distance {
1915 : // NB: this check is relied upon by:
1916 240 : let _ = IndexEntry::validate_checkpoint_distance;
1917 240 : info!(
1918 0 : "Will roll layer at {} with layer size {} due to layer size ({})",
1919 : projected_lsn, layer_size, projected_layer_size
1920 : );
1921 :
1922 240 : true
1923 14408820 : } else if distance > 0 && opened_at.elapsed() >= self.get_checkpoint_timeout() {
1924 0 : info!(
1925 0 : "Will roll layer at {} with layer size {} due to time since first write to the layer ({:?})",
1926 0 : projected_lsn,
1927 0 : layer_size,
1928 0 : opened_at.elapsed()
1929 : );
1930 :
1931 0 : true
1932 : } else {
1933 14408820 : false
1934 : }
1935 14409060 : }
1936 : }
1937 :
1938 : /// Number of times we will compute partition within a checkpoint distance.
1939 : const REPARTITION_FREQ_IN_CHECKPOINT_DISTANCE: u64 = 10;
1940 :
1941 : // Private functions
1942 : impl Timeline {
1943 36 : pub(crate) fn get_lsn_lease_length(&self) -> Duration {
1944 36 : let tenant_conf = self.tenant_conf.load();
1945 36 : tenant_conf
1946 36 : .tenant_conf
1947 36 : .lsn_lease_length
1948 36 : .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length)
1949 36 : }
1950 :
1951 : // TODO(yuchen): remove unused flag after implementing https://github.com/neondatabase/neon/issues/8072
1952 : #[allow(unused)]
1953 0 : pub(crate) fn get_lsn_lease_length_for_ts(&self) -> Duration {
1954 0 : let tenant_conf = self.tenant_conf.load();
1955 0 : tenant_conf
1956 0 : .tenant_conf
1957 0 : .lsn_lease_length_for_ts
1958 0 : .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length_for_ts)
1959 0 : }
1960 :
1961 696 : pub(crate) fn get_switch_aux_file_policy(&self) -> AuxFilePolicy {
1962 696 : let tenant_conf = self.tenant_conf.load();
1963 696 : tenant_conf
1964 696 : .tenant_conf
1965 696 : .switch_aux_file_policy
1966 696 : .unwrap_or(self.conf.default_tenant_conf.switch_aux_file_policy)
1967 696 : }
1968 :
1969 0 : pub(crate) fn get_lazy_slru_download(&self) -> bool {
1970 0 : let tenant_conf = self.tenant_conf.load();
1971 0 : tenant_conf
1972 0 : .tenant_conf
1973 0 : .lazy_slru_download
1974 0 : .unwrap_or(self.conf.default_tenant_conf.lazy_slru_download)
1975 0 : }
1976 :
1977 14413854 : fn get_checkpoint_distance(&self) -> u64 {
1978 14413854 : let tenant_conf = self.tenant_conf.load();
1979 14413854 : tenant_conf
1980 14413854 : .tenant_conf
1981 14413854 : .checkpoint_distance
1982 14413854 : .unwrap_or(self.conf.default_tenant_conf.checkpoint_distance)
1983 14413854 : }
1984 :
1985 14408820 : fn get_checkpoint_timeout(&self) -> Duration {
1986 14408820 : let tenant_conf = self.tenant_conf.load();
1987 14408820 : tenant_conf
1988 14408820 : .tenant_conf
1989 14408820 : .checkpoint_timeout
1990 14408820 : .unwrap_or(self.conf.default_tenant_conf.checkpoint_timeout)
1991 14408820 : }
1992 :
1993 1764 : fn get_compaction_target_size(&self) -> u64 {
1994 1764 : let tenant_conf = self.tenant_conf.load();
1995 1764 : tenant_conf
1996 1764 : .tenant_conf
1997 1764 : .compaction_target_size
1998 1764 : .unwrap_or(self.conf.default_tenant_conf.compaction_target_size)
1999 1764 : }
2000 :
2001 1176 : fn get_compaction_threshold(&self) -> usize {
2002 1176 : let tenant_conf = self.tenant_conf.load();
2003 1176 : tenant_conf
2004 1176 : .tenant_conf
2005 1176 : .compaction_threshold
2006 1176 : .unwrap_or(self.conf.default_tenant_conf.compaction_threshold)
2007 1176 : }
2008 :
2009 42 : fn get_image_creation_threshold(&self) -> usize {
2010 42 : let tenant_conf = self.tenant_conf.load();
2011 42 : tenant_conf
2012 42 : .tenant_conf
2013 42 : .image_creation_threshold
2014 42 : .unwrap_or(self.conf.default_tenant_conf.image_creation_threshold)
2015 42 : }
2016 :
2017 1092 : fn get_compaction_algorithm_settings(&self) -> CompactionAlgorithmSettings {
2018 1092 : let tenant_conf = &self.tenant_conf.load();
2019 1092 : tenant_conf
2020 1092 : .tenant_conf
2021 1092 : .compaction_algorithm
2022 1092 : .as_ref()
2023 1092 : .unwrap_or(&self.conf.default_tenant_conf.compaction_algorithm)
2024 1092 : .clone()
2025 1092 : }
2026 :
2027 0 : fn get_eviction_policy(&self) -> EvictionPolicy {
2028 0 : let tenant_conf = self.tenant_conf.load();
2029 0 : tenant_conf
2030 0 : .tenant_conf
2031 0 : .eviction_policy
2032 0 : .unwrap_or(self.conf.default_tenant_conf.eviction_policy)
2033 0 : }
2034 :
2035 1266 : fn get_evictions_low_residence_duration_metric_threshold(
2036 1266 : tenant_conf: &TenantConfOpt,
2037 1266 : default_tenant_conf: &TenantConf,
2038 1266 : ) -> Duration {
2039 1266 : tenant_conf
2040 1266 : .evictions_low_residence_duration_metric_threshold
2041 1266 : .unwrap_or(default_tenant_conf.evictions_low_residence_duration_metric_threshold)
2042 1266 : }
2043 :
2044 2136 : fn get_image_layer_creation_check_threshold(&self) -> u8 {
2045 2136 : let tenant_conf = self.tenant_conf.load();
2046 2136 : tenant_conf
2047 2136 : .tenant_conf
2048 2136 : .image_layer_creation_check_threshold
2049 2136 : .unwrap_or(
2050 2136 : self.conf
2051 2136 : .default_tenant_conf
2052 2136 : .image_layer_creation_check_threshold,
2053 2136 : )
2054 2136 : }
2055 :
2056 24 : pub(super) fn tenant_conf_updated(&self, new_conf: &TenantConfOpt) {
2057 24 : // NB: Most tenant conf options are read by background loops, so,
2058 24 : // changes will automatically be picked up.
2059 24 :
2060 24 : // The threshold is embedded in the metric. So, we need to update it.
2061 24 : {
2062 24 : let new_threshold = Self::get_evictions_low_residence_duration_metric_threshold(
2063 24 : new_conf,
2064 24 : &self.conf.default_tenant_conf,
2065 24 : );
2066 24 :
2067 24 : let tenant_id_str = self.tenant_shard_id.tenant_id.to_string();
2068 24 : let shard_id_str = format!("{}", self.tenant_shard_id.shard_slug());
2069 24 :
2070 24 : let timeline_id_str = self.timeline_id.to_string();
2071 24 : self.metrics
2072 24 : .evictions_with_low_residence_duration
2073 24 : .write()
2074 24 : .unwrap()
2075 24 : .change_threshold(
2076 24 : &tenant_id_str,
2077 24 : &shard_id_str,
2078 24 : &timeline_id_str,
2079 24 : new_threshold,
2080 24 : );
2081 24 : }
2082 24 : }
2083 :
2084 : /// Open a Timeline handle.
2085 : ///
2086 : /// Loads the metadata for the timeline into memory, but not the layer map.
2087 : #[allow(clippy::too_many_arguments)]
2088 1242 : pub(super) fn new(
2089 1242 : conf: &'static PageServerConf,
2090 1242 : tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
2091 1242 : metadata: &TimelineMetadata,
2092 1242 : ancestor: Option<Arc<Timeline>>,
2093 1242 : timeline_id: TimelineId,
2094 1242 : tenant_shard_id: TenantShardId,
2095 1242 : generation: Generation,
2096 1242 : shard_identity: ShardIdentity,
2097 1242 : walredo_mgr: Option<Arc<super::WalRedoManager>>,
2098 1242 : resources: TimelineResources,
2099 1242 : pg_version: u32,
2100 1242 : state: TimelineState,
2101 1242 : aux_file_policy: Option<AuxFilePolicy>,
2102 1242 : cancel: CancellationToken,
2103 1242 : ) -> Arc<Self> {
2104 1242 : let disk_consistent_lsn = metadata.disk_consistent_lsn();
2105 1242 : let (state, _) = watch::channel(state);
2106 1242 :
2107 1242 : let (layer_flush_start_tx, _) = tokio::sync::watch::channel((0, disk_consistent_lsn));
2108 1242 : let (layer_flush_done_tx, _) = tokio::sync::watch::channel((0, Ok(())));
2109 1242 :
2110 1242 : let evictions_low_residence_duration_metric_threshold = {
2111 1242 : let loaded_tenant_conf = tenant_conf.load();
2112 1242 : Self::get_evictions_low_residence_duration_metric_threshold(
2113 1242 : &loaded_tenant_conf.tenant_conf,
2114 1242 : &conf.default_tenant_conf,
2115 1242 : )
2116 : };
2117 :
2118 1242 : if let Some(ancestor) = &ancestor {
2119 684 : let mut ancestor_gc_info = ancestor.gc_info.write().unwrap();
2120 684 : ancestor_gc_info.insert_child(timeline_id, metadata.ancestor_lsn());
2121 684 : }
2122 :
2123 1242 : Arc::new_cyclic(|myself| {
2124 1242 : let metrics = TimelineMetrics::new(
2125 1242 : &tenant_shard_id,
2126 1242 : &timeline_id,
2127 1242 : crate::metrics::EvictionsWithLowResidenceDurationBuilder::new(
2128 1242 : "mtime",
2129 1242 : evictions_low_residence_duration_metric_threshold,
2130 1242 : ),
2131 1242 : );
2132 1242 : let aux_file_metrics = metrics.aux_file_size_gauge.clone();
2133 :
2134 1242 : let mut result = Timeline {
2135 1242 : conf,
2136 1242 : tenant_conf,
2137 1242 : myself: myself.clone(),
2138 1242 : timeline_id,
2139 1242 : tenant_shard_id,
2140 1242 : generation,
2141 1242 : shard_identity,
2142 1242 : pg_version,
2143 1242 : layers: Default::default(),
2144 1242 :
2145 1242 : walredo_mgr,
2146 1242 : walreceiver: Mutex::new(None),
2147 1242 :
2148 1242 : remote_client: Arc::new(resources.remote_client),
2149 1242 :
2150 1242 : // initialize in-memory 'last_record_lsn' from 'disk_consistent_lsn'.
2151 1242 : last_record_lsn: SeqWait::new(RecordLsn {
2152 1242 : last: disk_consistent_lsn,
2153 1242 : prev: metadata.prev_record_lsn().unwrap_or(Lsn(0)),
2154 1242 : }),
2155 1242 : disk_consistent_lsn: AtomicLsn::new(disk_consistent_lsn.0),
2156 1242 :
2157 1242 : last_freeze_at: AtomicLsn::new(disk_consistent_lsn.0),
2158 1242 : last_freeze_ts: RwLock::new(Instant::now()),
2159 1242 :
2160 1242 : loaded_at: (disk_consistent_lsn, SystemTime::now()),
2161 1242 :
2162 1242 : ancestor_timeline: ancestor,
2163 1242 : ancestor_lsn: metadata.ancestor_lsn(),
2164 1242 :
2165 1242 : metrics,
2166 1242 :
2167 1242 : query_metrics: crate::metrics::SmgrQueryTimePerTimeline::new(
2168 1242 : &tenant_shard_id,
2169 1242 : &timeline_id,
2170 1242 : ),
2171 1242 :
2172 8694 : directory_metrics: array::from_fn(|_| AtomicU64::new(0)),
2173 1242 :
2174 1242 : flush_loop_state: Mutex::new(FlushLoopState::NotStarted),
2175 1242 :
2176 1242 : layer_flush_start_tx,
2177 1242 : layer_flush_done_tx,
2178 1242 :
2179 1242 : write_lock: tokio::sync::Mutex::new(None),
2180 1242 :
2181 1242 : gc_info: std::sync::RwLock::new(GcInfo::default()),
2182 1242 :
2183 1242 : latest_gc_cutoff_lsn: Rcu::new(metadata.latest_gc_cutoff_lsn()),
2184 1242 : initdb_lsn: metadata.initdb_lsn(),
2185 1242 :
2186 1242 : current_logical_size: if disk_consistent_lsn.is_valid() {
2187 : // we're creating timeline data with some layer files existing locally,
2188 : // need to recalculate timeline's logical size based on data in the layers.
2189 696 : LogicalSize::deferred_initial(disk_consistent_lsn)
2190 : } else {
2191 : // we're creating timeline data without any layers existing locally,
2192 : // initial logical size is 0.
2193 546 : LogicalSize::empty_initial()
2194 : },
2195 1242 : partitioning: tokio::sync::Mutex::new((
2196 1242 : (KeyPartitioning::new(), KeyPartitioning::new().into_sparse()),
2197 1242 : Lsn(0),
2198 1242 : )),
2199 1242 : repartition_threshold: 0,
2200 1242 : last_image_layer_creation_check_at: AtomicLsn::new(0),
2201 1242 : last_image_layer_creation_check_instant: Mutex::new(None),
2202 1242 :
2203 1242 : last_received_wal: Mutex::new(None),
2204 1242 : rel_size_cache: RwLock::new(RelSizeCache {
2205 1242 : complete_as_of: disk_consistent_lsn,
2206 1242 : map: HashMap::new(),
2207 1242 : }),
2208 1242 :
2209 1242 : download_all_remote_layers_task_info: RwLock::new(None),
2210 1242 :
2211 1242 : state,
2212 1242 :
2213 1242 : eviction_task_timeline_state: tokio::sync::Mutex::new(
2214 1242 : EvictionTaskTimelineState::default(),
2215 1242 : ),
2216 1242 : delete_progress: Arc::new(tokio::sync::Mutex::new(DeleteTimelineFlow::default())),
2217 1242 :
2218 1242 : cancel,
2219 1242 : gate: Gate::default(),
2220 1242 :
2221 1242 : compaction_lock: tokio::sync::Mutex::default(),
2222 1242 : gc_lock: tokio::sync::Mutex::default(),
2223 1242 :
2224 1242 : standby_horizon: AtomicLsn::new(0),
2225 1242 :
2226 1242 : timeline_get_throttle: resources.timeline_get_throttle,
2227 1242 :
2228 1242 : aux_files: tokio::sync::Mutex::new(AuxFilesState {
2229 1242 : dir: None,
2230 1242 : n_deltas: 0,
2231 1242 : }),
2232 1242 :
2233 1242 : aux_file_size_estimator: AuxFileSizeEstimator::new(aux_file_metrics),
2234 1242 :
2235 1242 : last_aux_file_policy: AtomicAuxFilePolicy::new(aux_file_policy),
2236 1242 :
2237 1242 : #[cfg(test)]
2238 1242 : extra_test_dense_keyspace: ArcSwap::new(Arc::new(KeySpace::default())),
2239 1242 :
2240 1242 : l0_flush_global_state: resources.l0_flush_global_state,
2241 1242 :
2242 1242 : handles: Default::default(),
2243 1242 : };
2244 1242 :
2245 1242 : if aux_file_policy == Some(AuxFilePolicy::V1) {
2246 0 : warn!("this timeline is using deprecated aux file policy V1 (when loading the timeline)");
2247 1242 : }
2248 :
2249 1242 : result.repartition_threshold =
2250 1242 : result.get_checkpoint_distance() / REPARTITION_FREQ_IN_CHECKPOINT_DISTANCE;
2251 1242 :
2252 1242 : result
2253 1242 : .metrics
2254 1242 : .last_record_gauge
2255 1242 : .set(disk_consistent_lsn.0 as i64);
2256 1242 : result
2257 1242 : })
2258 1242 : }
2259 :
2260 1746 : pub(super) fn maybe_spawn_flush_loop(self: &Arc<Self>) {
2261 1746 : let Ok(guard) = self.gate.enter() else {
2262 0 : info!("cannot start flush loop when the timeline gate has already been closed");
2263 0 : return;
2264 : };
2265 1746 : let mut flush_loop_state = self.flush_loop_state.lock().unwrap();
2266 1746 : match *flush_loop_state {
2267 1224 : FlushLoopState::NotStarted => (),
2268 : FlushLoopState::Running { .. } => {
2269 522 : info!(
2270 0 : "skipping attempt to start flush_loop twice {}/{}",
2271 0 : self.tenant_shard_id, self.timeline_id
2272 : );
2273 522 : return;
2274 : }
2275 : FlushLoopState::Exited => {
2276 0 : warn!(
2277 0 : "ignoring attempt to restart exited flush_loop {}/{}",
2278 0 : self.tenant_shard_id, self.timeline_id
2279 : );
2280 0 : return;
2281 : }
2282 : }
2283 :
2284 1224 : let layer_flush_start_rx = self.layer_flush_start_tx.subscribe();
2285 1224 : let self_clone = Arc::clone(self);
2286 1224 :
2287 1224 : debug!("spawning flush loop");
2288 1224 : *flush_loop_state = FlushLoopState::Running {
2289 1224 : #[cfg(test)]
2290 1224 : expect_initdb_optimization: false,
2291 1224 : #[cfg(test)]
2292 1224 : initdb_optimization_count: 0,
2293 1224 : };
2294 1224 : task_mgr::spawn(
2295 1224 : task_mgr::BACKGROUND_RUNTIME.handle(),
2296 1224 : task_mgr::TaskKind::LayerFlushTask,
2297 1224 : self.tenant_shard_id,
2298 1224 : Some(self.timeline_id),
2299 1224 : "layer flush task",
2300 1224 : async move {
2301 1224 : let _guard = guard;
2302 1224 : let background_ctx = RequestContext::todo_child(TaskKind::LayerFlushTask, DownloadBehavior::Error);
2303 54143 : self_clone.flush_loop(layer_flush_start_rx, &background_ctx).await;
2304 24 : let mut flush_loop_state = self_clone.flush_loop_state.lock().unwrap();
2305 24 : assert!(matches!(*flush_loop_state, FlushLoopState::Running{..}));
2306 24 : *flush_loop_state = FlushLoopState::Exited;
2307 24 : Ok(())
2308 24 : }
2309 1224 : .instrument(info_span!(parent: None, "layer flush task", tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))
2310 : );
2311 1746 : }
2312 :
2313 : /// Creates and starts the wal receiver.
2314 : ///
2315 : /// This function is expected to be called at most once per Timeline's lifecycle
2316 : /// when the timeline is activated.
2317 0 : fn launch_wal_receiver(
2318 0 : self: &Arc<Self>,
2319 0 : ctx: &RequestContext,
2320 0 : broker_client: BrokerClientChannel,
2321 0 : ) {
2322 0 : info!(
2323 0 : "launching WAL receiver for timeline {} of tenant {}",
2324 0 : self.timeline_id, self.tenant_shard_id
2325 : );
2326 :
2327 0 : let tenant_conf = self.tenant_conf.load();
2328 0 : let wal_connect_timeout = tenant_conf
2329 0 : .tenant_conf
2330 0 : .walreceiver_connect_timeout
2331 0 : .unwrap_or(self.conf.default_tenant_conf.walreceiver_connect_timeout);
2332 0 : let lagging_wal_timeout = tenant_conf
2333 0 : .tenant_conf
2334 0 : .lagging_wal_timeout
2335 0 : .unwrap_or(self.conf.default_tenant_conf.lagging_wal_timeout);
2336 0 : let max_lsn_wal_lag = tenant_conf
2337 0 : .tenant_conf
2338 0 : .max_lsn_wal_lag
2339 0 : .unwrap_or(self.conf.default_tenant_conf.max_lsn_wal_lag);
2340 0 :
2341 0 : let mut guard = self.walreceiver.lock().unwrap();
2342 0 : assert!(
2343 0 : guard.is_none(),
2344 0 : "multiple launches / re-launches of WAL receiver are not supported"
2345 : );
2346 0 : *guard = Some(WalReceiver::start(
2347 0 : Arc::clone(self),
2348 0 : WalReceiverConf {
2349 0 : wal_connect_timeout,
2350 0 : lagging_wal_timeout,
2351 0 : max_lsn_wal_lag,
2352 0 : auth_token: crate::config::SAFEKEEPER_AUTH_TOKEN.get().cloned(),
2353 0 : availability_zone: self.conf.availability_zone.clone(),
2354 0 : ingest_batch_size: self.conf.ingest_batch_size,
2355 0 : },
2356 0 : broker_client,
2357 0 : ctx,
2358 0 : ));
2359 0 : }
2360 :
2361 : /// Initialize with an empty layer map. Used when creating a new timeline.
2362 1224 : pub(super) fn init_empty_layer_map(&self, start_lsn: Lsn) {
2363 1224 : let mut layers = self.layers.try_write().expect(
2364 1224 : "in the context where we call this function, no other task has access to the object",
2365 1224 : );
2366 1224 : layers
2367 1224 : .open_mut()
2368 1224 : .expect("in this context the LayerManager must still be open")
2369 1224 : .initialize_empty(Lsn(start_lsn.0));
2370 1224 : }
2371 :
2372 : /// Scan the timeline directory, cleanup, populate the layer map, and schedule uploads for local-only
2373 : /// files.
2374 18 : pub(super) async fn load_layer_map(
2375 18 : &self,
2376 18 : disk_consistent_lsn: Lsn,
2377 18 : index_part: Option<IndexPart>,
2378 18 : ) -> anyhow::Result<()> {
2379 : use init::{Decision::*, Discovered, DismissedLayer};
2380 : use LayerName::*;
2381 :
2382 18 : let mut guard = self.layers.write().await;
2383 :
2384 18 : let timer = self.metrics.load_layer_map_histo.start_timer();
2385 18 :
2386 18 : // Scan timeline directory and create ImageLayerName and DeltaFilename
2387 18 : // structs representing all files on disk
2388 18 : let timeline_path = self
2389 18 : .conf
2390 18 : .timeline_path(&self.tenant_shard_id, &self.timeline_id);
2391 18 : let conf = self.conf;
2392 18 : let span = tracing::Span::current();
2393 18 :
2394 18 : // Copy to move into the task we're about to spawn
2395 18 : let this = self.myself.upgrade().expect("&self method holds the arc");
2396 :
2397 18 : let (loaded_layers, needs_cleanup, total_physical_size) = tokio::task::spawn_blocking({
2398 18 : move || {
2399 18 : let _g = span.entered();
2400 18 : let discovered = init::scan_timeline_dir(&timeline_path)?;
2401 18 : let mut discovered_layers = Vec::with_capacity(discovered.len());
2402 18 : let mut unrecognized_files = Vec::new();
2403 18 :
2404 18 : let mut path = timeline_path;
2405 :
2406 66 : for discovered in discovered {
2407 48 : let (name, kind) = match discovered {
2408 48 : Discovered::Layer(layer_file_name, local_metadata) => {
2409 48 : discovered_layers.push((layer_file_name, local_metadata));
2410 48 : continue;
2411 : }
2412 0 : Discovered::IgnoredBackup(path) => {
2413 0 : std::fs::remove_file(path)
2414 0 : .or_else(fs_ext::ignore_not_found)
2415 0 : .fatal_err("Removing .old file");
2416 0 : continue;
2417 : }
2418 0 : Discovered::Unknown(file_name) => {
2419 0 : // we will later error if there are any
2420 0 : unrecognized_files.push(file_name);
2421 0 : continue;
2422 : }
2423 0 : Discovered::Ephemeral(name) => (name, "old ephemeral file"),
2424 0 : Discovered::Temporary(name) => (name, "temporary timeline file"),
2425 0 : Discovered::TemporaryDownload(name) => (name, "temporary download"),
2426 : };
2427 0 : path.push(Utf8Path::new(&name));
2428 0 : init::cleanup(&path, kind)?;
2429 0 : path.pop();
2430 : }
2431 :
2432 18 : if !unrecognized_files.is_empty() {
2433 : // assume that if there are any there are many many.
2434 0 : let n = unrecognized_files.len();
2435 0 : let first = &unrecognized_files[..n.min(10)];
2436 0 : anyhow::bail!(
2437 0 : "unrecognized files in timeline dir (total {n}), first 10: {first:?}"
2438 0 : );
2439 18 : }
2440 18 :
2441 18 : let decided =
2442 18 : init::reconcile(discovered_layers, index_part.as_ref(), disk_consistent_lsn);
2443 18 :
2444 18 : let mut loaded_layers = Vec::new();
2445 18 : let mut needs_cleanup = Vec::new();
2446 18 : let mut total_physical_size = 0;
2447 :
2448 66 : for (name, decision) in decided {
2449 48 : let decision = match decision {
2450 48 : Ok(decision) => decision,
2451 0 : Err(DismissedLayer::Future { local }) => {
2452 0 : if let Some(local) = local {
2453 0 : init::cleanup_future_layer(
2454 0 : &local.local_path,
2455 0 : &name,
2456 0 : disk_consistent_lsn,
2457 0 : )?;
2458 0 : }
2459 0 : needs_cleanup.push(name);
2460 0 : continue;
2461 : }
2462 0 : Err(DismissedLayer::LocalOnly(local)) => {
2463 0 : init::cleanup_local_only_file(&name, &local)?;
2464 : // this file never existed remotely, we will have to do rework
2465 0 : continue;
2466 : }
2467 0 : Err(DismissedLayer::BadMetadata(local)) => {
2468 0 : init::cleanup_local_file_for_remote(&local)?;
2469 : // this file never existed remotely, we will have to do rework
2470 0 : continue;
2471 : }
2472 : };
2473 :
2474 48 : match &name {
2475 36 : Delta(d) => assert!(d.lsn_range.end <= disk_consistent_lsn + 1),
2476 12 : Image(i) => assert!(i.lsn <= disk_consistent_lsn),
2477 : }
2478 :
2479 48 : tracing::debug!(layer=%name, ?decision, "applied");
2480 :
2481 48 : let layer = match decision {
2482 48 : Resident { local, remote } => {
2483 48 : total_physical_size += local.file_size;
2484 48 : Layer::for_resident(conf, &this, local.local_path, name, remote)
2485 48 : .drop_eviction_guard()
2486 : }
2487 0 : Evicted(remote) => Layer::for_evicted(conf, &this, name, remote),
2488 : };
2489 :
2490 48 : loaded_layers.push(layer);
2491 : }
2492 18 : Ok((loaded_layers, needs_cleanup, total_physical_size))
2493 18 : }
2494 18 : })
2495 17 : .await
2496 18 : .map_err(anyhow::Error::new)
2497 18 : .and_then(|x| x)?;
2498 :
2499 18 : let num_layers = loaded_layers.len();
2500 18 :
2501 18 : guard
2502 18 : .open_mut()
2503 18 : .expect("layermanager must be open during init")
2504 18 : .initialize_local_layers(loaded_layers, disk_consistent_lsn + 1);
2505 18 :
2506 18 : self.remote_client
2507 18 : .schedule_layer_file_deletion(&needs_cleanup)?;
2508 18 : self.remote_client
2509 18 : .schedule_index_upload_for_file_changes()?;
2510 : // This barrier orders above DELETEs before any later operations.
2511 : // This is critical because code executing after the barrier might
2512 : // create again objects with the same key that we just scheduled for deletion.
2513 : // For example, if we just scheduled deletion of an image layer "from the future",
2514 : // later compaction might run again and re-create the same image layer.
2515 : // "from the future" here means an image layer whose LSN is > IndexPart::disk_consistent_lsn.
2516 : // "same" here means same key range and LSN.
2517 : //
2518 : // Without a barrier between above DELETEs and the re-creation's PUTs,
2519 : // the upload queue may execute the PUT first, then the DELETE.
2520 : // In our example, we will end up with an IndexPart referencing a non-existent object.
2521 : //
2522 : // 1. a future image layer is created and uploaded
2523 : // 2. ps restart
2524 : // 3. the future layer from (1) is deleted during load layer map
2525 : // 4. image layer is re-created and uploaded
2526 : // 5. deletion queue would like to delete (1) but actually deletes (4)
2527 : // 6. delete by name works as expected, but it now deletes the wrong (later) version
2528 : //
2529 : // See https://github.com/neondatabase/neon/issues/5878
2530 : //
2531 : // NB: generation numbers naturally protect against this because they disambiguate
2532 : // (1) and (4)
2533 18 : self.remote_client.schedule_barrier()?;
2534 : // Tenant::create_timeline will wait for these uploads to happen before returning, or
2535 : // on retry.
2536 :
2537 : // Now that we have the full layer map, we may calculate the visibility of layers within it (a global scan)
2538 18 : drop(guard); // drop write lock, update_layer_visibility will take a read lock.
2539 18 : self.update_layer_visibility().await?;
2540 :
2541 18 : info!(
2542 0 : "loaded layer map with {} layers at {}, total physical size: {}",
2543 : num_layers, disk_consistent_lsn, total_physical_size
2544 : );
2545 :
2546 18 : timer.stop_and_record();
2547 18 : Ok(())
2548 18 : }
2549 :
2550 : /// Retrieve current logical size of the timeline.
2551 : ///
2552 : /// The size could be lagging behind the actual number, in case
2553 : /// the initial size calculation has not been run (gets triggered on the first size access).
2554 : ///
2555 : /// return size and boolean flag that shows if the size is exact
2556 0 : pub(crate) fn get_current_logical_size(
2557 0 : self: &Arc<Self>,
2558 0 : priority: GetLogicalSizePriority,
2559 0 : ctx: &RequestContext,
2560 0 : ) -> logical_size::CurrentLogicalSize {
2561 0 : if !self.tenant_shard_id.is_shard_zero() {
2562 : // Logical size is only accurately maintained on shard zero: when called elsewhere, for example
2563 : // when HTTP API is serving a GET for timeline zero, return zero
2564 0 : return logical_size::CurrentLogicalSize::Approximate(logical_size::Approximate::zero());
2565 0 : }
2566 0 :
2567 0 : let current_size = self.current_logical_size.current_size();
2568 0 : debug!("Current size: {current_size:?}");
2569 :
2570 0 : match (current_size.accuracy(), priority) {
2571 0 : (logical_size::Accuracy::Exact, _) => (), // nothing to do
2572 0 : (logical_size::Accuracy::Approximate, GetLogicalSizePriority::Background) => {
2573 0 : // background task will eventually deliver an exact value, we're in no rush
2574 0 : }
2575 : (logical_size::Accuracy::Approximate, GetLogicalSizePriority::User) => {
2576 : // background task is not ready, but user is asking for it now;
2577 : // => make the background task skip the line
2578 : // (The alternative would be to calculate the size here, but,
2579 : // it can actually take a long time if the user has a lot of rels.
2580 : // And we'll inevitable need it again; So, let the background task do the work.)
2581 0 : match self
2582 0 : .current_logical_size
2583 0 : .cancel_wait_for_background_loop_concurrency_limit_semaphore
2584 0 : .get()
2585 : {
2586 0 : Some(cancel) => cancel.cancel(),
2587 : None => {
2588 0 : let state = self.current_state();
2589 0 : if matches!(
2590 0 : state,
2591 : TimelineState::Broken { .. } | TimelineState::Stopping
2592 0 : ) {
2593 0 :
2594 0 : // Can happen when timeline detail endpoint is used when deletion is ongoing (or its broken).
2595 0 : // Don't make noise.
2596 0 : } else {
2597 0 : warn!("unexpected: cancel_wait_for_background_loop_concurrency_limit_semaphore not set, priority-boosting of logical size calculation will not work");
2598 0 : debug_assert!(false);
2599 : }
2600 : }
2601 : };
2602 : }
2603 : }
2604 :
2605 0 : if let CurrentLogicalSize::Approximate(_) = ¤t_size {
2606 0 : if ctx.task_kind() == TaskKind::WalReceiverConnectionHandler {
2607 0 : let first = self
2608 0 : .current_logical_size
2609 0 : .did_return_approximate_to_walreceiver
2610 0 : .compare_exchange(
2611 0 : false,
2612 0 : true,
2613 0 : AtomicOrdering::Relaxed,
2614 0 : AtomicOrdering::Relaxed,
2615 0 : )
2616 0 : .is_ok();
2617 0 : if first {
2618 0 : crate::metrics::initial_logical_size::TIMELINES_WHERE_WALRECEIVER_GOT_APPROXIMATE_SIZE.inc();
2619 0 : }
2620 0 : }
2621 0 : }
2622 :
2623 0 : current_size
2624 0 : }
2625 :
2626 0 : fn spawn_initial_logical_size_computation_task(self: &Arc<Self>, ctx: &RequestContext) {
2627 0 : let Some(initial_part_end) = self.current_logical_size.initial_part_end else {
2628 : // nothing to do for freshly created timelines;
2629 0 : assert_eq!(
2630 0 : self.current_logical_size.current_size().accuracy(),
2631 0 : logical_size::Accuracy::Exact,
2632 0 : );
2633 0 : self.current_logical_size.initialized.add_permits(1);
2634 0 : return;
2635 : };
2636 :
2637 0 : let cancel_wait_for_background_loop_concurrency_limit_semaphore = CancellationToken::new();
2638 0 : let token = cancel_wait_for_background_loop_concurrency_limit_semaphore.clone();
2639 0 : self.current_logical_size
2640 0 : .cancel_wait_for_background_loop_concurrency_limit_semaphore.set(token)
2641 0 : .expect("initial logical size calculation task must be spawned exactly once per Timeline object");
2642 0 :
2643 0 : let self_clone = Arc::clone(self);
2644 0 : let background_ctx = ctx.detached_child(
2645 0 : TaskKind::InitialLogicalSizeCalculation,
2646 0 : DownloadBehavior::Download,
2647 0 : );
2648 0 : task_mgr::spawn(
2649 0 : task_mgr::BACKGROUND_RUNTIME.handle(),
2650 0 : task_mgr::TaskKind::InitialLogicalSizeCalculation,
2651 0 : self.tenant_shard_id,
2652 0 : Some(self.timeline_id),
2653 0 : "initial size calculation",
2654 : // NB: don't log errors here, task_mgr will do that.
2655 0 : async move {
2656 0 : let cancel = task_mgr::shutdown_token();
2657 0 : self_clone
2658 0 : .initial_logical_size_calculation_task(
2659 0 : initial_part_end,
2660 0 : cancel_wait_for_background_loop_concurrency_limit_semaphore,
2661 0 : cancel,
2662 0 : background_ctx,
2663 0 : )
2664 0 : .await;
2665 0 : Ok(())
2666 0 : }
2667 0 : .instrument(info_span!(parent: None, "initial_size_calculation", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id)),
2668 : );
2669 0 : }
2670 :
2671 0 : async fn initial_logical_size_calculation_task(
2672 0 : self: Arc<Self>,
2673 0 : initial_part_end: Lsn,
2674 0 : skip_concurrency_limiter: CancellationToken,
2675 0 : cancel: CancellationToken,
2676 0 : background_ctx: RequestContext,
2677 0 : ) {
2678 0 : scopeguard::defer! {
2679 0 : // Irrespective of the outcome of this operation, we should unblock anyone waiting for it.
2680 0 : self.current_logical_size.initialized.add_permits(1);
2681 0 : }
2682 0 :
2683 0 : let try_once = |attempt: usize| {
2684 0 : let background_ctx = &background_ctx;
2685 0 : let self_ref = &self;
2686 0 : let skip_concurrency_limiter = &skip_concurrency_limiter;
2687 0 : async move {
2688 0 : let cancel = task_mgr::shutdown_token();
2689 0 : let wait_for_permit = super::tasks::concurrent_background_tasks_rate_limit_permit(
2690 0 : BackgroundLoopKind::InitialLogicalSizeCalculation,
2691 0 : background_ctx,
2692 0 : );
2693 :
2694 : use crate::metrics::initial_logical_size::StartCircumstances;
2695 0 : let (_maybe_permit, circumstances) = tokio::select! {
2696 0 : permit = wait_for_permit => {
2697 0 : (Some(permit), StartCircumstances::AfterBackgroundTasksRateLimit)
2698 : }
2699 0 : _ = self_ref.cancel.cancelled() => {
2700 0 : return Err(CalculateLogicalSizeError::Cancelled);
2701 : }
2702 0 : _ = cancel.cancelled() => {
2703 0 : return Err(CalculateLogicalSizeError::Cancelled);
2704 : },
2705 0 : () = skip_concurrency_limiter.cancelled() => {
2706 : // Some action that is part of a end user interaction requested logical size
2707 : // => break out of the rate limit
2708 : // TODO: ideally we'd not run on BackgroundRuntime but the requester's runtime;
2709 : // but then again what happens if they cancel; also, we should just be using
2710 : // one runtime across the entire process, so, let's leave this for now.
2711 0 : (None, StartCircumstances::SkippedConcurrencyLimiter)
2712 : }
2713 : };
2714 :
2715 0 : let metrics_guard = if attempt == 1 {
2716 0 : crate::metrics::initial_logical_size::START_CALCULATION.first(circumstances)
2717 : } else {
2718 0 : crate::metrics::initial_logical_size::START_CALCULATION.retry(circumstances)
2719 : };
2720 :
2721 0 : let calculated_size = self_ref
2722 0 : .logical_size_calculation_task(
2723 0 : initial_part_end,
2724 0 : LogicalSizeCalculationCause::Initial,
2725 0 : background_ctx,
2726 0 : )
2727 0 : .await?;
2728 :
2729 0 : self_ref
2730 0 : .trigger_aux_file_size_computation(initial_part_end, background_ctx)
2731 0 : .await?;
2732 :
2733 : // TODO: add aux file size to logical size
2734 :
2735 0 : Ok((calculated_size, metrics_guard))
2736 0 : }
2737 0 : };
2738 :
2739 0 : let retrying = async {
2740 0 : let mut attempt = 0;
2741 : loop {
2742 0 : attempt += 1;
2743 0 :
2744 0 : match try_once(attempt).await {
2745 0 : Ok(res) => return ControlFlow::Continue(res),
2746 0 : Err(CalculateLogicalSizeError::Cancelled) => return ControlFlow::Break(()),
2747 : Err(
2748 0 : e @ (CalculateLogicalSizeError::Decode(_)
2749 0 : | CalculateLogicalSizeError::PageRead(_)),
2750 0 : ) => {
2751 0 : warn!(attempt, "initial size calculation failed: {e:?}");
2752 : // exponential back-off doesn't make sense at these long intervals;
2753 : // use fixed retry interval with generous jitter instead
2754 0 : let sleep_duration = Duration::from_secs(
2755 0 : u64::try_from(
2756 0 : // 1hour base
2757 0 : (60_i64 * 60_i64)
2758 0 : // 10min jitter
2759 0 : + rand::thread_rng().gen_range(-10 * 60..10 * 60),
2760 0 : )
2761 0 : .expect("10min < 1hour"),
2762 0 : );
2763 0 : tokio::time::sleep(sleep_duration).await;
2764 : }
2765 : }
2766 : }
2767 0 : };
2768 :
2769 0 : let (calculated_size, metrics_guard) = tokio::select! {
2770 0 : res = retrying => {
2771 0 : match res {
2772 0 : ControlFlow::Continue(calculated_size) => calculated_size,
2773 0 : ControlFlow::Break(()) => return,
2774 : }
2775 : }
2776 0 : _ = cancel.cancelled() => {
2777 0 : return;
2778 : }
2779 : };
2780 :
2781 : // we cannot query current_logical_size.current_size() to know the current
2782 : // *negative* value, only truncated to u64.
2783 0 : let added = self
2784 0 : .current_logical_size
2785 0 : .size_added_after_initial
2786 0 : .load(AtomicOrdering::Relaxed);
2787 0 :
2788 0 : let sum = calculated_size.saturating_add_signed(added);
2789 0 :
2790 0 : // set the gauge value before it can be set in `update_current_logical_size`.
2791 0 : self.metrics.current_logical_size_gauge.set(sum);
2792 0 :
2793 0 : self.current_logical_size
2794 0 : .initial_logical_size
2795 0 : .set((calculated_size, metrics_guard.calculation_result_saved()))
2796 0 : .ok()
2797 0 : .expect("only this task sets it");
2798 0 : }
2799 :
2800 0 : pub(crate) fn spawn_ondemand_logical_size_calculation(
2801 0 : self: &Arc<Self>,
2802 0 : lsn: Lsn,
2803 0 : cause: LogicalSizeCalculationCause,
2804 0 : ctx: RequestContext,
2805 0 : ) -> oneshot::Receiver<Result<u64, CalculateLogicalSizeError>> {
2806 0 : let (sender, receiver) = oneshot::channel();
2807 0 : let self_clone = Arc::clone(self);
2808 0 : // XXX if our caller loses interest, i.e., ctx is cancelled,
2809 0 : // we should stop the size calculation work and return an error.
2810 0 : // That would require restructuring this function's API to
2811 0 : // return the result directly, instead of a Receiver for the result.
2812 0 : let ctx = ctx.detached_child(
2813 0 : TaskKind::OndemandLogicalSizeCalculation,
2814 0 : DownloadBehavior::Download,
2815 0 : );
2816 0 : task_mgr::spawn(
2817 0 : task_mgr::BACKGROUND_RUNTIME.handle(),
2818 0 : task_mgr::TaskKind::OndemandLogicalSizeCalculation,
2819 0 : self.tenant_shard_id,
2820 0 : Some(self.timeline_id),
2821 0 : "ondemand logical size calculation",
2822 0 : async move {
2823 0 : let res = self_clone
2824 0 : .logical_size_calculation_task(lsn, cause, &ctx)
2825 0 : .await;
2826 0 : let _ = sender.send(res).ok();
2827 0 : Ok(()) // Receiver is responsible for handling errors
2828 0 : }
2829 0 : .in_current_span(),
2830 0 : );
2831 0 : receiver
2832 0 : }
2833 :
2834 : /// # Cancel-Safety
2835 : ///
2836 : /// This method is cancellation-safe.
2837 0 : #[instrument(skip_all)]
2838 : async fn logical_size_calculation_task(
2839 : self: &Arc<Self>,
2840 : lsn: Lsn,
2841 : cause: LogicalSizeCalculationCause,
2842 : ctx: &RequestContext,
2843 : ) -> Result<u64, CalculateLogicalSizeError> {
2844 : crate::span::debug_assert_current_span_has_tenant_and_timeline_id();
2845 : // We should never be calculating logical sizes on shard !=0, because these shards do not have
2846 : // accurate relation sizes, and they do not emit consumption metrics.
2847 : debug_assert!(self.tenant_shard_id.is_shard_zero());
2848 :
2849 : let guard = self
2850 : .gate
2851 : .enter()
2852 0 : .map_err(|_| CalculateLogicalSizeError::Cancelled)?;
2853 :
2854 : let self_calculation = Arc::clone(self);
2855 :
2856 0 : let mut calculation = pin!(async {
2857 0 : let ctx = ctx.attached_child();
2858 0 : self_calculation
2859 0 : .calculate_logical_size(lsn, cause, &guard, &ctx)
2860 0 : .await
2861 0 : });
2862 :
2863 : tokio::select! {
2864 : res = &mut calculation => { res }
2865 : _ = self.cancel.cancelled() => {
2866 : debug!("cancelling logical size calculation for timeline shutdown");
2867 : calculation.await
2868 : }
2869 : }
2870 : }
2871 :
2872 : /// Calculate the logical size of the database at the latest LSN.
2873 : ///
2874 : /// NOTE: counted incrementally, includes ancestors. This can be a slow operation,
2875 : /// especially if we need to download remote layers.
2876 : ///
2877 : /// # Cancel-Safety
2878 : ///
2879 : /// This method is cancellation-safe.
2880 0 : async fn calculate_logical_size(
2881 0 : &self,
2882 0 : up_to_lsn: Lsn,
2883 0 : cause: LogicalSizeCalculationCause,
2884 0 : _guard: &GateGuard,
2885 0 : ctx: &RequestContext,
2886 0 : ) -> Result<u64, CalculateLogicalSizeError> {
2887 0 : info!(
2888 0 : "Calculating logical size for timeline {} at {}",
2889 : self.timeline_id, up_to_lsn
2890 : );
2891 :
2892 0 : pausable_failpoint!("timeline-calculate-logical-size-pause");
2893 :
2894 : // See if we've already done the work for initial size calculation.
2895 : // This is a short-cut for timelines that are mostly unused.
2896 0 : if let Some(size) = self.current_logical_size.initialized_size(up_to_lsn) {
2897 0 : return Ok(size);
2898 0 : }
2899 0 : let storage_time_metrics = match cause {
2900 : LogicalSizeCalculationCause::Initial
2901 : | LogicalSizeCalculationCause::ConsumptionMetricsSyntheticSize
2902 0 : | LogicalSizeCalculationCause::TenantSizeHandler => &self.metrics.logical_size_histo,
2903 : LogicalSizeCalculationCause::EvictionTaskImitation => {
2904 0 : &self.metrics.imitate_logical_size_histo
2905 : }
2906 : };
2907 0 : let timer = storage_time_metrics.start_timer();
2908 0 : let logical_size = self
2909 0 : .get_current_logical_size_non_incremental(up_to_lsn, ctx)
2910 0 : .await?;
2911 0 : debug!("calculated logical size: {logical_size}");
2912 0 : timer.stop_and_record();
2913 0 : Ok(logical_size)
2914 0 : }
2915 :
2916 : /// Update current logical size, adding `delta' to the old value.
2917 811710 : fn update_current_logical_size(&self, delta: i64) {
2918 811710 : let logical_size = &self.current_logical_size;
2919 811710 : logical_size.increment_size(delta);
2920 811710 :
2921 811710 : // Also set the value in the prometheus gauge. Note that
2922 811710 : // there is a race condition here: if this is is called by two
2923 811710 : // threads concurrently, the prometheus gauge might be set to
2924 811710 : // one value while current_logical_size is set to the
2925 811710 : // other.
2926 811710 : match logical_size.current_size() {
2927 811710 : CurrentLogicalSize::Exact(ref new_current_size) => self
2928 811710 : .metrics
2929 811710 : .current_logical_size_gauge
2930 811710 : .set(new_current_size.into()),
2931 0 : CurrentLogicalSize::Approximate(_) => {
2932 0 : // don't update the gauge yet, this allows us not to update the gauge back and
2933 0 : // forth between the initial size calculation task.
2934 0 : }
2935 : }
2936 811710 : }
2937 :
2938 8502 : pub(crate) fn update_directory_entries_count(&self, kind: DirectoryKind, count: u64) {
2939 8502 : self.directory_metrics[kind.offset()].store(count, AtomicOrdering::Relaxed);
2940 8502 : let aux_metric =
2941 8502 : self.directory_metrics[DirectoryKind::AuxFiles.offset()].load(AtomicOrdering::Relaxed);
2942 8502 :
2943 8502 : let sum_of_entries = self
2944 8502 : .directory_metrics
2945 8502 : .iter()
2946 59514 : .map(|v| v.load(AtomicOrdering::Relaxed))
2947 8502 : .sum();
2948 : // Set a high general threshold and a lower threshold for the auxiliary files,
2949 : // as we can have large numbers of relations in the db directory.
2950 : const SUM_THRESHOLD: u64 = 5000;
2951 : const AUX_THRESHOLD: u64 = 1000;
2952 8502 : if sum_of_entries >= SUM_THRESHOLD || aux_metric >= AUX_THRESHOLD {
2953 0 : self.metrics
2954 0 : .directory_entries_count_gauge
2955 0 : .set(sum_of_entries);
2956 8502 : } else if let Some(metric) = Lazy::get(&self.metrics.directory_entries_count_gauge) {
2957 0 : metric.set(sum_of_entries);
2958 8502 : }
2959 8502 : }
2960 :
2961 0 : async fn find_layer(
2962 0 : &self,
2963 0 : layer_name: &LayerName,
2964 0 : ) -> Result<Option<Layer>, layer_manager::Shutdown> {
2965 0 : let guard = self.layers.read().await;
2966 0 : let layer = guard
2967 0 : .layer_map()?
2968 0 : .iter_historic_layers()
2969 0 : .find(|l| &l.layer_name() == layer_name)
2970 0 : .map(|found| guard.get_from_desc(&found));
2971 0 : Ok(layer)
2972 0 : }
2973 :
2974 : /// The timeline heatmap is a hint to secondary locations from the primary location,
2975 : /// indicating which layers are currently on-disk on the primary.
2976 : ///
2977 : /// None is returned if the Timeline is in a state where uploading a heatmap
2978 : /// doesn't make sense, such as shutting down or initializing. The caller
2979 : /// should treat this as a cue to simply skip doing any heatmap uploading
2980 : /// for this timeline.
2981 6 : pub(crate) async fn generate_heatmap(&self) -> Option<HeatMapTimeline> {
2982 6 : if !self.is_active() {
2983 0 : return None;
2984 6 : }
2985 :
2986 6 : let guard = self.layers.read().await;
2987 :
2988 30 : let resident = guard.likely_resident_layers().filter_map(|layer| {
2989 30 : match layer.visibility() {
2990 : LayerVisibilityHint::Visible => {
2991 : // Layer is visible to one or more read LSNs: elegible for inclusion in layer map
2992 24 : let last_activity_ts = layer.latest_activity();
2993 24 : Some((layer.layer_desc(), layer.metadata(), last_activity_ts))
2994 : }
2995 : LayerVisibilityHint::Covered => {
2996 : // Layer is resident but unlikely to be read: not elegible for inclusion in heatmap.
2997 6 : None
2998 : }
2999 : }
3000 30 : });
3001 6 :
3002 6 : let mut layers = resident.collect::<Vec<_>>();
3003 6 :
3004 6 : // Sort layers in order of which to download first. For a large set of layers to download, we
3005 6 : // want to prioritize those layers which are most likely to still be in the resident many minutes
3006 6 : // or hours later:
3007 6 : // - Download L0s last, because they churn the fastest: L0s on a fast-writing tenant might
3008 6 : // only exist for a few minutes before being compacted into L1s.
3009 6 : // - For L1 & image layers, download most recent LSNs first: the older the LSN, the sooner
3010 6 : // the layer is likely to be covered by an image layer during compaction.
3011 56 : layers.sort_by_key(|(desc, _meta, _atime)| {
3012 56 : std::cmp::Reverse((
3013 56 : !LayerMap::is_l0(&desc.key_range, desc.is_delta),
3014 56 : desc.lsn_range.end,
3015 56 : ))
3016 56 : });
3017 6 :
3018 6 : let layers = layers
3019 6 : .into_iter()
3020 24 : .map(|(desc, meta, atime)| HeatMapLayer::new(desc.layer_name(), meta, atime))
3021 6 : .collect();
3022 6 :
3023 6 : Some(HeatMapTimeline::new(self.timeline_id, layers))
3024 6 : }
3025 :
3026 : /// Returns true if the given lsn is or was an ancestor branchpoint.
3027 0 : pub(crate) fn is_ancestor_lsn(&self, lsn: Lsn) -> bool {
3028 0 : // upon timeline detach, we set the ancestor_lsn to Lsn::INVALID and the store the original
3029 0 : // branchpoint in the value in IndexPart::lineage
3030 0 : self.ancestor_lsn == lsn
3031 0 : || (self.ancestor_lsn == Lsn::INVALID
3032 0 : && self.remote_client.is_previous_ancestor_lsn(lsn))
3033 0 : }
3034 : }
3035 :
3036 : impl Timeline {
3037 : #[allow(unknown_lints)] // doc_lazy_continuation is still a new lint
3038 : #[allow(clippy::doc_lazy_continuation)]
3039 : /// Get the data needed to reconstruct all keys in the provided keyspace
3040 : ///
3041 : /// The algorithm is as follows:
3042 : /// 1. While some keys are still not done and there's a timeline to visit:
3043 : /// 2. Visit the timeline (see [`Timeline::get_vectored_reconstruct_data_timeline`]:
3044 : /// 2.1: Build the fringe for the current keyspace
3045 : /// 2.2 Visit the newest layer from the fringe to collect all values for the range it
3046 : /// intersects
3047 : /// 2.3. Pop the timeline from the fringe
3048 : /// 2.4. If the fringe is empty, go back to 1
3049 1880481 : async fn get_vectored_reconstruct_data(
3050 1880481 : &self,
3051 1880481 : mut keyspace: KeySpace,
3052 1880481 : request_lsn: Lsn,
3053 1880481 : reconstruct_state: &mut ValuesReconstructState,
3054 1880481 : ctx: &RequestContext,
3055 1880481 : ) -> Result<(), GetVectoredError> {
3056 1880481 : let mut timeline_owned: Arc<Timeline>;
3057 1880481 : let mut timeline = self;
3058 1880481 :
3059 1880481 : let mut cont_lsn = Lsn(request_lsn.0 + 1);
3060 :
3061 1880475 : let missing_keyspace = loop {
3062 2555613 : if self.cancel.is_cancelled() {
3063 0 : return Err(GetVectoredError::Cancelled);
3064 2555613 : }
3065 :
3066 : let TimelineVisitOutcome {
3067 2555613 : completed_keyspace: completed,
3068 2555613 : image_covered_keyspace,
3069 2555613 : } = Self::get_vectored_reconstruct_data_timeline(
3070 2555613 : timeline,
3071 2555613 : keyspace.clone(),
3072 2555613 : cont_lsn,
3073 2555613 : reconstruct_state,
3074 2555613 : &self.cancel,
3075 2555613 : ctx,
3076 2555613 : )
3077 574902 : .await?;
3078 :
3079 2555613 : keyspace.remove_overlapping_with(&completed);
3080 2555613 :
3081 2555613 : // Do not descend into the ancestor timeline for aux files.
3082 2555613 : // We don't return a blanket [`GetVectoredError::MissingKey`] to avoid
3083 2555613 : // stalling compaction.
3084 2555613 : keyspace.remove_overlapping_with(&KeySpace {
3085 2555613 : ranges: vec![NON_INHERITED_RANGE, NON_INHERITED_SPARSE_RANGE],
3086 2555613 : });
3087 2555613 :
3088 2555613 : // Keyspace is fully retrieved
3089 2555613 : if keyspace.is_empty() {
3090 1880433 : break None;
3091 675180 : }
3092 :
3093 675180 : let Some(ancestor_timeline) = timeline.ancestor_timeline.as_ref() else {
3094 : // Not fully retrieved but no ancestor timeline.
3095 42 : break Some(keyspace);
3096 : };
3097 :
3098 : // Now we see if there are keys covered by the image layer but does not exist in the
3099 : // image layer, which means that the key does not exist.
3100 :
3101 : // The block below will stop the vectored search if any of the keys encountered an image layer
3102 : // which did not contain a snapshot for said key. Since we have already removed all completed
3103 : // keys from `keyspace`, we expect there to be no overlap between it and the image covered key
3104 : // space. If that's not the case, we had at least one key encounter a gap in the image layer
3105 : // and stop the search as a result of that.
3106 675138 : let removed = keyspace.remove_overlapping_with(&image_covered_keyspace);
3107 675138 : if !removed.is_empty() {
3108 0 : break Some(removed);
3109 675138 : }
3110 675138 : // If we reached this point, `remove_overlapping_with` should not have made any change to the
3111 675138 : // keyspace.
3112 675138 :
3113 675138 : // Take the min to avoid reconstructing a page with data newer than request Lsn.
3114 675138 : cont_lsn = std::cmp::min(Lsn(request_lsn.0 + 1), Lsn(timeline.ancestor_lsn.0 + 1));
3115 675138 : timeline_owned = timeline
3116 675138 : .get_ready_ancestor_timeline(ancestor_timeline, ctx)
3117 6 : .await?;
3118 675132 : timeline = &*timeline_owned;
3119 : };
3120 :
3121 1880475 : if let Some(missing_keyspace) = missing_keyspace {
3122 42 : return Err(GetVectoredError::MissingKey(MissingKeyError {
3123 42 : key: missing_keyspace.start().unwrap(), /* better if we can store the full keyspace */
3124 42 : shard: self
3125 42 : .shard_identity
3126 42 : .get_shard_number(&missing_keyspace.start().unwrap()),
3127 42 : cont_lsn,
3128 42 : request_lsn,
3129 42 : ancestor_lsn: Some(timeline.ancestor_lsn),
3130 42 : backtrace: None,
3131 42 : }));
3132 1880433 : }
3133 1880433 :
3134 1880433 : Ok(())
3135 1880481 : }
3136 :
3137 : /// Collect the reconstruct data for a keyspace from the specified timeline.
3138 : ///
3139 : /// Maintain a fringe [`LayerFringe`] which tracks all the layers that intersect
3140 : /// the current keyspace. The current keyspace of the search at any given timeline
3141 : /// is the original keyspace minus all the keys that have been completed minus
3142 : /// any keys for which we couldn't find an intersecting layer. It's not tracked explicitly,
3143 : /// but if you merge all the keyspaces in the fringe, you get the "current keyspace".
3144 : ///
3145 : /// This is basically a depth-first search visitor implementation where a vertex
3146 : /// is the (layer, lsn range, key space) tuple. The fringe acts as the stack.
3147 : ///
3148 : /// At each iteration pop the top of the fringe (the layer with the highest Lsn)
3149 : /// and get all the required reconstruct data from the layer in one go.
3150 : ///
3151 : /// Returns the completed keyspace and the keyspaces with image coverage. The caller
3152 : /// decides how to deal with these two keyspaces.
3153 2555613 : async fn get_vectored_reconstruct_data_timeline(
3154 2555613 : timeline: &Timeline,
3155 2555613 : keyspace: KeySpace,
3156 2555613 : mut cont_lsn: Lsn,
3157 2555613 : reconstruct_state: &mut ValuesReconstructState,
3158 2555613 : cancel: &CancellationToken,
3159 2555613 : ctx: &RequestContext,
3160 2555613 : ) -> Result<TimelineVisitOutcome, GetVectoredError> {
3161 2555613 : let mut unmapped_keyspace = keyspace.clone();
3162 2555613 : let mut fringe = LayerFringe::new();
3163 2555613 :
3164 2555613 : let mut completed_keyspace = KeySpace::default();
3165 2555613 : let mut image_covered_keyspace = KeySpaceRandomAccum::new();
3166 :
3167 : loop {
3168 5011669 : if cancel.is_cancelled() {
3169 0 : return Err(GetVectoredError::Cancelled);
3170 5011669 : }
3171 5011669 :
3172 5011669 : let (keys_done_last_step, keys_with_image_coverage) =
3173 5011669 : reconstruct_state.consume_done_keys();
3174 5011669 : unmapped_keyspace.remove_overlapping_with(&keys_done_last_step);
3175 5011669 : completed_keyspace.merge(&keys_done_last_step);
3176 5011669 : if let Some(keys_with_image_coverage) = keys_with_image_coverage {
3177 24150 : unmapped_keyspace
3178 24150 : .remove_overlapping_with(&KeySpace::single(keys_with_image_coverage.clone()));
3179 24150 : image_covered_keyspace.add_range(keys_with_image_coverage);
3180 4987519 : }
3181 :
3182 : // Do not descent any further if the last layer we visited
3183 : // completed all keys in the keyspace it inspected. This is not
3184 : // required for correctness, but avoids visiting extra layers
3185 : // which turns out to be a perf bottleneck in some cases.
3186 5011669 : if !unmapped_keyspace.is_empty() {
3187 3132718 : let guard = timeline.layers.read().await;
3188 3132718 : let layers = guard.layer_map()?;
3189 :
3190 3132718 : let in_memory_layer = layers.find_in_memory_layer(|l| {
3191 2735151 : let start_lsn = l.get_lsn_range().start;
3192 2735151 : cont_lsn > start_lsn
3193 3132718 : });
3194 3132718 :
3195 3132718 : match in_memory_layer {
3196 1819447 : Some(l) => {
3197 1819447 : let lsn_range = l.get_lsn_range().start..cont_lsn;
3198 1819447 : fringe.update(
3199 1819447 : ReadableLayer::InMemoryLayer(l),
3200 1819447 : unmapped_keyspace.clone(),
3201 1819447 : lsn_range,
3202 1819447 : );
3203 1819447 : }
3204 : None => {
3205 1528543 : for range in unmapped_keyspace.ranges.iter() {
3206 1528543 : let results = layers.range_search(range.clone(), cont_lsn);
3207 1528543 :
3208 1528543 : results
3209 1528543 : .found
3210 1528543 : .into_iter()
3211 1528543 : .map(|(SearchResult { layer, lsn_floor }, keyspace_accum)| {
3212 827762 : (
3213 827762 : ReadableLayer::PersistentLayer(guard.get_from_desc(&layer)),
3214 827762 : keyspace_accum.to_keyspace(),
3215 827762 : lsn_floor..cont_lsn,
3216 827762 : )
3217 1528543 : })
3218 1528543 : .for_each(|(layer, keyspace, lsn_range)| {
3219 827762 : fringe.update(layer, keyspace, lsn_range)
3220 1528543 : });
3221 1528543 : }
3222 : }
3223 : }
3224 :
3225 : // It's safe to drop the layer map lock after planning the next round of reads.
3226 : // The fringe keeps readable handles for the layers which are safe to read even
3227 : // if layers were compacted or flushed.
3228 : //
3229 : // The more interesting consideration is: "Why is the read algorithm still correct
3230 : // if the layer map changes while it is operating?". Doing a vectored read on a
3231 : // timeline boils down to pushing an imaginary lsn boundary downwards for each range
3232 : // covered by the read. The layer map tells us how to move the lsn downwards for a
3233 : // range at *a particular point in time*. It is fine for the answer to be different
3234 : // at two different time points.
3235 3132718 : drop(guard);
3236 1878951 : }
3237 :
3238 5011669 : if let Some((layer_to_read, keyspace_to_read, lsn_range)) = fringe.next_layer() {
3239 2456056 : let next_cont_lsn = lsn_range.start;
3240 2456056 : layer_to_read
3241 2456056 : .get_values_reconstruct_data(
3242 2456056 : keyspace_to_read.clone(),
3243 2456056 : lsn_range,
3244 2456056 : reconstruct_state,
3245 2456056 : ctx,
3246 2456056 : )
3247 547549 : .await?;
3248 :
3249 2456056 : unmapped_keyspace = keyspace_to_read;
3250 2456056 : cont_lsn = next_cont_lsn;
3251 2456056 :
3252 2456056 : reconstruct_state.on_layer_visited(&layer_to_read);
3253 : } else {
3254 2555613 : break;
3255 2555613 : }
3256 2555613 : }
3257 2555613 :
3258 2555613 : Ok(TimelineVisitOutcome {
3259 2555613 : completed_keyspace,
3260 2555613 : image_covered_keyspace: image_covered_keyspace.consume_keyspace(),
3261 2555613 : })
3262 2555613 : }
3263 :
3264 675138 : async fn get_ready_ancestor_timeline(
3265 675138 : &self,
3266 675138 : ancestor: &Arc<Timeline>,
3267 675138 : ctx: &RequestContext,
3268 675138 : ) -> Result<Arc<Timeline>, GetReadyAncestorError> {
3269 675138 : // It's possible that the ancestor timeline isn't active yet, or
3270 675138 : // is active but hasn't yet caught up to the branch point. Wait
3271 675138 : // for it.
3272 675138 : //
3273 675138 : // This cannot happen while the pageserver is running normally,
3274 675138 : // because you cannot create a branch from a point that isn't
3275 675138 : // present in the pageserver yet. However, we don't wait for the
3276 675138 : // branch point to be uploaded to cloud storage before creating
3277 675138 : // a branch. I.e., the branch LSN need not be remote consistent
3278 675138 : // for the branching operation to succeed.
3279 675138 : //
3280 675138 : // Hence, if we try to load a tenant in such a state where
3281 675138 : // 1. the existence of the branch was persisted (in IndexPart and/or locally)
3282 675138 : // 2. but the ancestor state is behind branch_lsn because it was not yet persisted
3283 675138 : // then we will need to wait for the ancestor timeline to
3284 675138 : // re-stream WAL up to branch_lsn before we access it.
3285 675138 : //
3286 675138 : // How can a tenant get in such a state?
3287 675138 : // - ungraceful pageserver process exit
3288 675138 : // - detach+attach => this is a bug, https://github.com/neondatabase/neon/issues/4219
3289 675138 : //
3290 675138 : // NB: this could be avoided by requiring
3291 675138 : // branch_lsn >= remote_consistent_lsn
3292 675138 : // during branch creation.
3293 675138 : match ancestor.wait_to_become_active(ctx).await {
3294 675132 : Ok(()) => {}
3295 : Err(TimelineState::Stopping) => {
3296 : // If an ancestor is stopping, it means the tenant is stopping: handle this the same as if this timeline was stopping.
3297 0 : return Err(GetReadyAncestorError::Cancelled);
3298 : }
3299 6 : Err(state) => {
3300 6 : return Err(GetReadyAncestorError::BadState {
3301 6 : timeline_id: ancestor.timeline_id,
3302 6 : state,
3303 6 : });
3304 : }
3305 : }
3306 675132 : ancestor
3307 675132 : .wait_lsn(self.ancestor_lsn, WaitLsnWaiter::Timeline(self), ctx)
3308 0 : .await
3309 675132 : .map_err(|e| match e {
3310 0 : e @ WaitLsnError::Timeout(_) => GetReadyAncestorError::AncestorLsnTimeout(e),
3311 0 : WaitLsnError::Shutdown => GetReadyAncestorError::Cancelled,
3312 0 : WaitLsnError::BadState(state) => GetReadyAncestorError::BadState {
3313 0 : timeline_id: ancestor.timeline_id,
3314 0 : state,
3315 0 : },
3316 675132 : })?;
3317 :
3318 675132 : Ok(ancestor.clone())
3319 675138 : }
3320 :
3321 16356 : pub(crate) fn get_shard_identity(&self) -> &ShardIdentity {
3322 16356 : &self.shard_identity
3323 16356 : }
3324 :
3325 : #[inline(always)]
3326 0 : pub(crate) fn shard_timeline_id(&self) -> ShardTimelineId {
3327 0 : ShardTimelineId {
3328 0 : shard_index: ShardIndex {
3329 0 : shard_number: self.shard_identity.number,
3330 0 : shard_count: self.shard_identity.count,
3331 0 : },
3332 0 : timeline_id: self.timeline_id,
3333 0 : }
3334 0 : }
3335 :
3336 : /// Returns a non-frozen open in-memory layer for ingestion.
3337 : ///
3338 : /// Takes a witness of timeline writer state lock being held, because it makes no sense to call
3339 : /// this function without holding the mutex.
3340 3816 : async fn get_layer_for_write(
3341 3816 : &self,
3342 3816 : lsn: Lsn,
3343 3816 : _guard: &tokio::sync::MutexGuard<'_, Option<TimelineWriterState>>,
3344 3816 : ctx: &RequestContext,
3345 3816 : ) -> anyhow::Result<Arc<InMemoryLayer>> {
3346 3816 : let mut guard = self.layers.write().await;
3347 3816 : let gate_guard = self.gate.enter().context("enter gate for inmem layer")?;
3348 :
3349 3816 : let last_record_lsn = self.get_last_record_lsn();
3350 3816 : ensure!(
3351 3816 : lsn > last_record_lsn,
3352 0 : "cannot modify relation after advancing last_record_lsn (incoming_lsn={}, last_record_lsn={})",
3353 : lsn,
3354 : last_record_lsn,
3355 : );
3356 :
3357 3816 : let layer = guard
3358 3816 : .open_mut()?
3359 3816 : .get_layer_for_write(
3360 3816 : lsn,
3361 3816 : self.conf,
3362 3816 : self.timeline_id,
3363 3816 : self.tenant_shard_id,
3364 3816 : gate_guard,
3365 3816 : ctx,
3366 3816 : )
3367 2160 : .await?;
3368 3816 : Ok(layer)
3369 3816 : }
3370 :
3371 15837234 : pub(crate) fn finish_write(&self, new_lsn: Lsn) {
3372 15837234 : assert!(new_lsn.is_aligned());
3373 :
3374 15837234 : self.metrics.last_record_gauge.set(new_lsn.0 as i64);
3375 15837234 : self.last_record_lsn.advance(new_lsn);
3376 15837234 : }
3377 :
3378 : /// Freeze any existing open in-memory layer and unconditionally notify the flush loop.
3379 : ///
3380 : /// Unconditional flush loop notification is given because in sharded cases we will want to
3381 : /// leave an Lsn gap. Unsharded tenants do not have Lsn gaps.
3382 3510 : async fn freeze_inmem_layer_at(
3383 3510 : &self,
3384 3510 : at: Lsn,
3385 3510 : write_lock: &mut tokio::sync::MutexGuard<'_, Option<TimelineWriterState>>,
3386 3510 : ) -> Result<u64, FlushLayerError> {
3387 3510 : let frozen = {
3388 3510 : let mut guard = self.layers.write().await;
3389 3510 : guard
3390 3510 : .open_mut()?
3391 3510 : .try_freeze_in_memory_layer(at, &self.last_freeze_at, write_lock)
3392 5 : .await
3393 : };
3394 :
3395 3510 : if frozen {
3396 3426 : let now = Instant::now();
3397 3426 : *(self.last_freeze_ts.write().unwrap()) = now;
3398 3426 : }
3399 :
3400 : // Increment the flush cycle counter and wake up the flush task.
3401 : // Remember the new value, so that when we listen for the flush
3402 : // to finish, we know when the flush that we initiated has
3403 : // finished, instead of some other flush that was started earlier.
3404 3510 : let mut my_flush_request = 0;
3405 3510 :
3406 3510 : let flush_loop_state = { *self.flush_loop_state.lock().unwrap() };
3407 3510 : if !matches!(flush_loop_state, FlushLoopState::Running { .. }) {
3408 0 : return Err(FlushLayerError::NotRunning(flush_loop_state));
3409 3510 : }
3410 3510 :
3411 3510 : self.layer_flush_start_tx.send_modify(|(counter, lsn)| {
3412 3510 : my_flush_request = *counter + 1;
3413 3510 : *counter = my_flush_request;
3414 3510 : *lsn = std::cmp::max(at, *lsn);
3415 3510 : });
3416 3510 :
3417 3510 : assert_ne!(my_flush_request, 0);
3418 :
3419 3510 : Ok(my_flush_request)
3420 3510 : }
3421 :
3422 : /// Layer flusher task's main loop.
3423 1224 : async fn flush_loop(
3424 1224 : self: &Arc<Self>,
3425 1224 : mut layer_flush_start_rx: tokio::sync::watch::Receiver<(u64, Lsn)>,
3426 1224 : ctx: &RequestContext,
3427 1224 : ) {
3428 1224 : info!("started flush loop");
3429 : loop {
3430 4615 : tokio::select! {
3431 4615 : _ = self.cancel.cancelled() => {
3432 24 : info!("shutting down layer flush task due to Timeline::cancel");
3433 24 : break;
3434 : },
3435 4615 : _ = layer_flush_start_rx.changed() => {}
3436 3391 : }
3437 3391 : trace!("waking up");
3438 3391 : let (flush_counter, frozen_to_lsn) = *layer_flush_start_rx.borrow();
3439 3391 :
3440 3391 : // The highest LSN to which we flushed in the loop over frozen layers
3441 3391 : let mut flushed_to_lsn = Lsn(0);
3442 :
3443 3391 : let result = loop {
3444 6817 : if self.cancel.is_cancelled() {
3445 0 : info!("dropping out of flush loop for timeline shutdown");
3446 : // Note: we do not bother transmitting into [`layer_flush_done_tx`], because
3447 : // anyone waiting on that will respect self.cancel as well: they will stop
3448 : // waiting at the same time we as drop out of this loop.
3449 0 : return;
3450 6817 : }
3451 6817 :
3452 6817 : let timer = self.metrics.flush_time_histo.start_timer();
3453 :
3454 6817 : let layer_to_flush = {
3455 6817 : let guard = self.layers.read().await;
3456 6817 : let Ok(lm) = guard.layer_map() else {
3457 0 : info!("dropping out of flush loop for timeline shutdown");
3458 0 : return;
3459 : };
3460 6817 : lm.frozen_layers.front().cloned()
3461 : // drop 'layers' lock to allow concurrent reads and writes
3462 : };
3463 6817 : let Some(layer_to_flush) = layer_to_flush else {
3464 3391 : break Ok(());
3465 : };
3466 51254 : match self.flush_frozen_layer(layer_to_flush, ctx).await {
3467 3426 : Ok(this_layer_to_lsn) => {
3468 3426 : flushed_to_lsn = std::cmp::max(flushed_to_lsn, this_layer_to_lsn);
3469 3426 : }
3470 : Err(FlushLayerError::Cancelled) => {
3471 0 : info!("dropping out of flush loop for timeline shutdown");
3472 0 : return;
3473 : }
3474 0 : err @ Err(
3475 0 : FlushLayerError::NotRunning(_)
3476 0 : | FlushLayerError::Other(_)
3477 0 : | FlushLayerError::CreateImageLayersError(_),
3478 0 : ) => {
3479 0 : error!("could not flush frozen layer: {err:?}");
3480 0 : break err.map(|_| ());
3481 : }
3482 : }
3483 3426 : timer.stop_and_record();
3484 : };
3485 :
3486 : // Unsharded tenants should never advance their LSN beyond the end of the
3487 : // highest layer they write: such gaps between layer data and the frozen LSN
3488 : // are only legal on sharded tenants.
3489 3391 : debug_assert!(
3490 3391 : self.shard_identity.count.count() > 1
3491 3391 : || flushed_to_lsn >= frozen_to_lsn
3492 203 : || !flushed_to_lsn.is_valid()
3493 : );
3494 :
3495 3391 : if flushed_to_lsn < frozen_to_lsn && self.shard_identity.count.count() > 1 {
3496 : // If our layer flushes didn't carry disk_consistent_lsn up to the `to_lsn` advertised
3497 : // to us via layer_flush_start_rx, then advance it here.
3498 : //
3499 : // This path is only taken for tenants with multiple shards: single sharded tenants should
3500 : // never encounter a gap in the wal.
3501 0 : let old_disk_consistent_lsn = self.disk_consistent_lsn.load();
3502 0 : tracing::debug!("Advancing disk_consistent_lsn across layer gap {old_disk_consistent_lsn}->{frozen_to_lsn}");
3503 0 : if self.set_disk_consistent_lsn(frozen_to_lsn) {
3504 0 : if let Err(e) = self.schedule_uploads(frozen_to_lsn, vec![]) {
3505 0 : tracing::warn!("Failed to schedule metadata upload after updating disk_consistent_lsn: {e}");
3506 0 : }
3507 0 : }
3508 3391 : }
3509 :
3510 : // Notify any listeners that we're done
3511 3391 : let _ = self
3512 3391 : .layer_flush_done_tx
3513 3391 : .send_replace((flush_counter, result));
3514 : }
3515 24 : }
3516 :
3517 : /// Waits any flush request created by [`Self::freeze_inmem_layer_at`] to complete.
3518 3270 : async fn wait_flush_completion(&self, request: u64) -> Result<(), FlushLayerError> {
3519 3270 : let mut rx = self.layer_flush_done_tx.subscribe();
3520 : loop {
3521 : {
3522 6540 : let (last_result_counter, last_result) = &*rx.borrow();
3523 6540 : if *last_result_counter >= request {
3524 3270 : if let Err(err) = last_result {
3525 : // We already logged the original error in
3526 : // flush_loop. We cannot propagate it to the caller
3527 : // here, because it might not be Cloneable
3528 0 : return Err(err.clone());
3529 : } else {
3530 3270 : return Ok(());
3531 : }
3532 3270 : }
3533 3270 : }
3534 3270 : trace!("waiting for flush to complete");
3535 3270 : tokio::select! {
3536 3270 : rx_e = rx.changed() => {
3537 3270 : rx_e.map_err(|_| FlushLayerError::NotRunning(*self.flush_loop_state.lock().unwrap()))?;
3538 : },
3539 : // Cancellation safety: we are not leaving an I/O in-flight for the flush, we're just ignoring
3540 : // the notification from [`flush_loop`] that it completed.
3541 3270 : _ = self.cancel.cancelled() => {
3542 0 : tracing::info!("Cancelled layer flush due on timeline shutdown");
3543 0 : return Ok(())
3544 : }
3545 : };
3546 3270 : trace!("done")
3547 : }
3548 3270 : }
3549 :
3550 : /// Flush one frozen in-memory layer to disk, as a new delta layer.
3551 : ///
3552 : /// Return value is the last lsn (inclusive) of the layer that was frozen.
3553 3426 : #[instrument(skip_all, fields(layer=%frozen_layer))]
3554 : async fn flush_frozen_layer(
3555 : self: &Arc<Self>,
3556 : frozen_layer: Arc<InMemoryLayer>,
3557 : ctx: &RequestContext,
3558 : ) -> Result<Lsn, FlushLayerError> {
3559 : debug_assert_current_span_has_tenant_and_timeline_id();
3560 :
3561 : // As a special case, when we have just imported an image into the repository,
3562 : // instead of writing out a L0 delta layer, we directly write out image layer
3563 : // files instead. This is possible as long as *all* the data imported into the
3564 : // repository have the same LSN.
3565 : let lsn_range = frozen_layer.get_lsn_range();
3566 :
3567 : // Whether to directly create image layers for this flush, or flush them as delta layers
3568 : let create_image_layer =
3569 : lsn_range.start == self.initdb_lsn && lsn_range.end == Lsn(self.initdb_lsn.0 + 1);
3570 :
3571 : #[cfg(test)]
3572 : {
3573 : match &mut *self.flush_loop_state.lock().unwrap() {
3574 : FlushLoopState::NotStarted | FlushLoopState::Exited => {
3575 : panic!("flush loop not running")
3576 : }
3577 : FlushLoopState::Running {
3578 : expect_initdb_optimization,
3579 : initdb_optimization_count,
3580 : ..
3581 : } => {
3582 : if create_image_layer {
3583 : *initdb_optimization_count += 1;
3584 : } else {
3585 : assert!(!*expect_initdb_optimization, "expected initdb optimization");
3586 : }
3587 : }
3588 : }
3589 : }
3590 :
3591 : let (layers_to_upload, delta_layer_to_add) = if create_image_layer {
3592 : // Note: The 'ctx' in use here has DownloadBehavior::Error. We should not
3593 : // require downloading anything during initial import.
3594 : let ((rel_partition, metadata_partition), _lsn) = self
3595 : .repartition(
3596 : self.initdb_lsn,
3597 : self.get_compaction_target_size(),
3598 : EnumSet::empty(),
3599 : ctx,
3600 : )
3601 : .await
3602 0 : .map_err(|e| FlushLayerError::from_anyhow(self, e))?;
3603 :
3604 : if self.cancel.is_cancelled() {
3605 : return Err(FlushLayerError::Cancelled);
3606 : }
3607 :
3608 : let mut layers_to_upload = Vec::new();
3609 : layers_to_upload.extend(
3610 : self.create_image_layers(
3611 : &rel_partition,
3612 : self.initdb_lsn,
3613 : ImageLayerCreationMode::Initial,
3614 : ctx,
3615 : )
3616 : .await?,
3617 : );
3618 : if !metadata_partition.parts.is_empty() {
3619 : assert_eq!(
3620 : metadata_partition.parts.len(),
3621 : 1,
3622 : "currently sparse keyspace should only contain a single metadata keyspace"
3623 : );
3624 : layers_to_upload.extend(
3625 : self.create_image_layers(
3626 : // Safety: create_image_layers treat sparse keyspaces differently that it does not scan
3627 : // every single key within the keyspace, and therefore, it's safe to force converting it
3628 : // into a dense keyspace before calling this function.
3629 : &metadata_partition.into_dense(),
3630 : self.initdb_lsn,
3631 : ImageLayerCreationMode::Initial,
3632 : ctx,
3633 : )
3634 : .await?,
3635 : );
3636 : }
3637 :
3638 : (layers_to_upload, None)
3639 : } else {
3640 : // Normal case, write out a L0 delta layer file.
3641 : // `create_delta_layer` will not modify the layer map.
3642 : // We will remove frozen layer and add delta layer in one atomic operation later.
3643 : let Some(layer) = self
3644 : .create_delta_layer(&frozen_layer, None, ctx)
3645 : .await
3646 0 : .map_err(|e| FlushLayerError::from_anyhow(self, e))?
3647 : else {
3648 : panic!("delta layer cannot be empty if no filter is applied");
3649 : };
3650 : (
3651 : // FIXME: even though we have a single image and single delta layer assumption
3652 : // we push them to vec
3653 : vec![layer.clone()],
3654 : Some(layer),
3655 : )
3656 : };
3657 :
3658 : pausable_failpoint!("flush-layer-cancel-after-writing-layer-out-pausable");
3659 :
3660 : if self.cancel.is_cancelled() {
3661 : return Err(FlushLayerError::Cancelled);
3662 : }
3663 :
3664 : let disk_consistent_lsn = Lsn(lsn_range.end.0 - 1);
3665 :
3666 : // The new on-disk layers are now in the layer map. We can remove the
3667 : // in-memory layer from the map now. The flushed layer is stored in
3668 : // the mapping in `create_delta_layer`.
3669 : {
3670 : let mut guard = self.layers.write().await;
3671 :
3672 : guard.open_mut()?.finish_flush_l0_layer(
3673 : delta_layer_to_add.as_ref(),
3674 : &frozen_layer,
3675 : &self.metrics,
3676 : );
3677 :
3678 : if self.set_disk_consistent_lsn(disk_consistent_lsn) {
3679 : // Schedule remote uploads that will reflect our new disk_consistent_lsn
3680 : self.schedule_uploads(disk_consistent_lsn, layers_to_upload)
3681 0 : .map_err(|e| FlushLayerError::from_anyhow(self, e))?;
3682 : }
3683 : // release lock on 'layers'
3684 : };
3685 :
3686 : // Backpressure mechanism: wait with continuation of the flush loop until we have uploaded all layer files.
3687 : // This makes us refuse ingest until the new layers have been persisted to the remote.
3688 : self.remote_client
3689 : .wait_completion()
3690 : .await
3691 0 : .map_err(|e| match e {
3692 : WaitCompletionError::UploadQueueShutDownOrStopped
3693 : | WaitCompletionError::NotInitialized(
3694 : NotInitialized::ShuttingDown | NotInitialized::Stopped,
3695 0 : ) => FlushLayerError::Cancelled,
3696 : WaitCompletionError::NotInitialized(NotInitialized::Uninitialized) => {
3697 0 : FlushLayerError::Other(anyhow!(e).into())
3698 : }
3699 0 : })?;
3700 :
3701 : // FIXME: between create_delta_layer and the scheduling of the upload in `update_metadata_file`,
3702 : // a compaction can delete the file and then it won't be available for uploads any more.
3703 : // We still schedule the upload, resulting in an error, but ideally we'd somehow avoid this
3704 : // race situation.
3705 : // See https://github.com/neondatabase/neon/issues/4526
3706 : pausable_failpoint!("flush-frozen-pausable");
3707 :
3708 : // This failpoint is used by another test case `test_pageserver_recovery`.
3709 : fail_point!("flush-frozen-exit");
3710 :
3711 : Ok(Lsn(lsn_range.end.0 - 1))
3712 : }
3713 :
3714 : /// Return true if the value changed
3715 : ///
3716 : /// This function must only be used from the layer flush task.
3717 3426 : fn set_disk_consistent_lsn(&self, new_value: Lsn) -> bool {
3718 3426 : let old_value = self.disk_consistent_lsn.fetch_max(new_value);
3719 3426 : assert!(new_value >= old_value, "disk_consistent_lsn must be growing monotonously at runtime; current {old_value}, offered {new_value}");
3720 3426 : new_value != old_value
3721 3426 : }
3722 :
3723 : /// Update metadata file
3724 3444 : fn schedule_uploads(
3725 3444 : &self,
3726 3444 : disk_consistent_lsn: Lsn,
3727 3444 : layers_to_upload: impl IntoIterator<Item = ResidentLayer>,
3728 3444 : ) -> anyhow::Result<()> {
3729 3444 : // We can only save a valid 'prev_record_lsn' value on disk if we
3730 3444 : // flushed *all* in-memory changes to disk. We only track
3731 3444 : // 'prev_record_lsn' in memory for the latest processed record, so we
3732 3444 : // don't remember what the correct value that corresponds to some old
3733 3444 : // LSN is. But if we flush everything, then the value corresponding
3734 3444 : // current 'last_record_lsn' is correct and we can store it on disk.
3735 3444 : let RecordLsn {
3736 3444 : last: last_record_lsn,
3737 3444 : prev: prev_record_lsn,
3738 3444 : } = self.last_record_lsn.load();
3739 3444 : let ondisk_prev_record_lsn = if disk_consistent_lsn == last_record_lsn {
3740 3204 : Some(prev_record_lsn)
3741 : } else {
3742 240 : None
3743 : };
3744 :
3745 3444 : let update = crate::tenant::metadata::MetadataUpdate::new(
3746 3444 : disk_consistent_lsn,
3747 3444 : ondisk_prev_record_lsn,
3748 3444 : *self.latest_gc_cutoff_lsn.read(),
3749 3444 : );
3750 3444 :
3751 3444 : fail_point!("checkpoint-before-saving-metadata", |x| bail!(
3752 0 : "{}",
3753 0 : x.unwrap()
3754 3444 : ));
3755 :
3756 6906 : for layer in layers_to_upload {
3757 3462 : self.remote_client.schedule_layer_file_upload(layer)?;
3758 : }
3759 3444 : self.remote_client
3760 3444 : .schedule_index_upload_for_metadata_update(&update)?;
3761 :
3762 3444 : Ok(())
3763 3444 : }
3764 :
3765 0 : pub(crate) async fn preserve_initdb_archive(&self) -> anyhow::Result<()> {
3766 0 : self.remote_client
3767 0 : .preserve_initdb_archive(
3768 0 : &self.tenant_shard_id.tenant_id,
3769 0 : &self.timeline_id,
3770 0 : &self.cancel,
3771 0 : )
3772 0 : .await
3773 0 : }
3774 :
3775 : // Write out the given frozen in-memory layer as a new L0 delta file. This L0 file will not be tracked
3776 : // in layer map immediately. The caller is responsible to put it into the layer map.
3777 2904 : async fn create_delta_layer(
3778 2904 : self: &Arc<Self>,
3779 2904 : frozen_layer: &Arc<InMemoryLayer>,
3780 2904 : key_range: Option<Range<Key>>,
3781 2904 : ctx: &RequestContext,
3782 2904 : ) -> anyhow::Result<Option<ResidentLayer>> {
3783 2904 : let self_clone = Arc::clone(self);
3784 2904 : let frozen_layer = Arc::clone(frozen_layer);
3785 2904 : let ctx = ctx.attached_child();
3786 2904 : let work = async move {
3787 2904 : let Some((desc, path)) = frozen_layer
3788 2904 : .write_to_disk(&ctx, key_range, self_clone.l0_flush_global_state.inner())
3789 30745 : .await?
3790 : else {
3791 0 : return Ok(None);
3792 : };
3793 2904 : let new_delta = Layer::finish_creating(self_clone.conf, &self_clone, desc, &path)?;
3794 :
3795 : // The write_to_disk() above calls writer.finish() which already did the fsync of the inodes.
3796 : // We just need to fsync the directory in which these inodes are linked,
3797 : // which we know to be the timeline directory.
3798 : //
3799 : // We use fatal_err() below because the after write_to_disk returns with success,
3800 : // the in-memory state of the filesystem already has the layer file in its final place,
3801 : // and subsequent pageserver code could think it's durable while it really isn't.
3802 2904 : let timeline_dir = VirtualFile::open(
3803 2904 : &self_clone
3804 2904 : .conf
3805 2904 : .timeline_path(&self_clone.tenant_shard_id, &self_clone.timeline_id),
3806 2904 : &ctx,
3807 2904 : )
3808 1460 : .await
3809 2904 : .fatal_err("VirtualFile::open for timeline dir fsync");
3810 2904 : timeline_dir
3811 2904 : .sync_all()
3812 1452 : .await
3813 2904 : .fatal_err("VirtualFile::sync_all timeline dir");
3814 2904 : anyhow::Ok(Some(new_delta))
3815 2904 : };
3816 : // Before tokio-epoll-uring, we ran write_to_disk & the sync_all inside spawn_blocking.
3817 : // Preserve that behavior to maintain the same behavior for `virtual_file_io_engine=std-fs`.
3818 : use crate::virtual_file::io_engine::IoEngine;
3819 2904 : match crate::virtual_file::io_engine::get() {
3820 0 : IoEngine::NotSet => panic!("io engine not set"),
3821 : IoEngine::StdFs => {
3822 1452 : let span = tracing::info_span!("blocking");
3823 1452 : tokio::task::spawn_blocking({
3824 1452 : move || Handle::current().block_on(work.instrument(span))
3825 1452 : })
3826 1452 : .await
3827 1452 : .context("spawn_blocking")
3828 1452 : .and_then(|x| x)
3829 : }
3830 : #[cfg(target_os = "linux")]
3831 33642 : IoEngine::TokioEpollUring => work.await,
3832 : }
3833 2904 : }
3834 :
3835 1614 : async fn repartition(
3836 1614 : &self,
3837 1614 : lsn: Lsn,
3838 1614 : partition_size: u64,
3839 1614 : flags: EnumSet<CompactFlags>,
3840 1614 : ctx: &RequestContext,
3841 1614 : ) -> anyhow::Result<((KeyPartitioning, SparseKeyPartitioning), Lsn)> {
3842 1614 : let Ok(mut partitioning_guard) = self.partitioning.try_lock() else {
3843 : // NB: there are two callers, one is the compaction task, of which there is only one per struct Tenant and hence Timeline.
3844 : // The other is the initdb optimization in flush_frozen_layer, used by `boostrap_timeline`, which runs before `.activate()`
3845 : // and hence before the compaction task starts.
3846 0 : anyhow::bail!("repartition() called concurrently, this should not happen");
3847 : };
3848 1614 : let ((dense_partition, sparse_partition), partition_lsn) = &*partitioning_guard;
3849 1614 : if lsn < *partition_lsn {
3850 0 : anyhow::bail!("repartition() called with LSN going backwards, this should not happen");
3851 1614 : }
3852 1614 :
3853 1614 : let distance = lsn.0 - partition_lsn.0;
3854 1614 : if *partition_lsn != Lsn(0)
3855 786 : && distance <= self.repartition_threshold
3856 786 : && !flags.contains(CompactFlags::ForceRepartition)
3857 : {
3858 744 : debug!(
3859 : distance,
3860 : threshold = self.repartition_threshold,
3861 0 : "no repartitioning needed"
3862 : );
3863 744 : return Ok((
3864 744 : (dense_partition.clone(), sparse_partition.clone()),
3865 744 : *partition_lsn,
3866 744 : ));
3867 870 : }
3868 :
3869 47922 : let (dense_ks, sparse_ks) = self.collect_keyspace(lsn, ctx).await?;
3870 870 : let dense_partitioning = dense_ks.partition(&self.shard_identity, partition_size);
3871 870 : let sparse_partitioning = SparseKeyPartitioning {
3872 870 : parts: vec![sparse_ks],
3873 870 : }; // no partitioning for metadata keys for now
3874 870 : *partitioning_guard = ((dense_partitioning, sparse_partitioning), lsn);
3875 870 :
3876 870 : Ok((partitioning_guard.0.clone(), partitioning_guard.1))
3877 1614 : }
3878 :
3879 : // Is it time to create a new image layer for the given partition?
3880 42 : async fn time_for_new_image_layer(&self, partition: &KeySpace, lsn: Lsn) -> bool {
3881 42 : let threshold = self.get_image_creation_threshold();
3882 :
3883 42 : let guard = self.layers.read().await;
3884 42 : let Ok(layers) = guard.layer_map() else {
3885 0 : return false;
3886 : };
3887 :
3888 42 : let mut max_deltas = 0;
3889 84 : for part_range in &partition.ranges {
3890 42 : let image_coverage = layers.image_coverage(part_range, lsn);
3891 84 : for (img_range, last_img) in image_coverage {
3892 42 : let img_lsn = if let Some(last_img) = last_img {
3893 0 : last_img.get_lsn_range().end
3894 : } else {
3895 42 : Lsn(0)
3896 : };
3897 : // Let's consider an example:
3898 : //
3899 : // delta layer with LSN range 71-81
3900 : // delta layer with LSN range 81-91
3901 : // delta layer with LSN range 91-101
3902 : // image layer at LSN 100
3903 : //
3904 : // If 'lsn' is still 100, i.e. no new WAL has been processed since the last image layer,
3905 : // there's no need to create a new one. We check this case explicitly, to avoid passing
3906 : // a bogus range to count_deltas below, with start > end. It's even possible that there
3907 : // are some delta layers *later* than current 'lsn', if more WAL was processed and flushed
3908 : // after we read last_record_lsn, which is passed here in the 'lsn' argument.
3909 42 : if img_lsn < lsn {
3910 42 : let num_deltas =
3911 42 : layers.count_deltas(&img_range, &(img_lsn..lsn), Some(threshold));
3912 42 :
3913 42 : max_deltas = max_deltas.max(num_deltas);
3914 42 : if num_deltas >= threshold {
3915 0 : debug!(
3916 0 : "key range {}-{}, has {} deltas on this timeline in LSN range {}..{}",
3917 : img_range.start, img_range.end, num_deltas, img_lsn, lsn
3918 : );
3919 0 : return true;
3920 42 : }
3921 0 : }
3922 : }
3923 : }
3924 :
3925 42 : debug!(
3926 : max_deltas,
3927 0 : "none of the partitioned ranges had >= {threshold} deltas"
3928 : );
3929 42 : false
3930 42 : }
3931 :
3932 : /// Create image layers for Postgres data. Assumes the caller passes a partition that is not too large,
3933 : /// so that at most one image layer will be produced from this function.
3934 600 : async fn create_image_layer_for_rel_blocks(
3935 600 : self: &Arc<Self>,
3936 600 : partition: &KeySpace,
3937 600 : mut image_layer_writer: ImageLayerWriter,
3938 600 : lsn: Lsn,
3939 600 : ctx: &RequestContext,
3940 600 : img_range: Range<Key>,
3941 600 : start: Key,
3942 600 : ) -> Result<ImageLayerCreationOutcome, CreateImageLayersError> {
3943 600 : let mut wrote_keys = false;
3944 600 :
3945 600 : let mut key_request_accum = KeySpaceAccum::new();
3946 3990 : for range in &partition.ranges {
3947 3390 : let mut key = range.start;
3948 7350 : while key < range.end {
3949 : // Decide whether to retain this key: usually we do, but sharded tenants may
3950 : // need to drop keys that don't belong to them. If we retain the key, add it
3951 : // to `key_request_accum` for later issuing a vectored get
3952 3960 : if self.shard_identity.is_key_disposable(&key) {
3953 0 : debug!(
3954 0 : "Dropping key {} during compaction (it belongs on shard {:?})",
3955 0 : key,
3956 0 : self.shard_identity.get_shard_number(&key)
3957 : );
3958 3960 : } else {
3959 3960 : key_request_accum.add_key(key);
3960 3960 : }
3961 :
3962 3960 : let last_key_in_range = key.next() == range.end;
3963 3960 : key = key.next();
3964 3960 :
3965 3960 : // Maybe flush `key_rest_accum`
3966 3960 : if key_request_accum.raw_size() >= Timeline::MAX_GET_VECTORED_KEYS
3967 3960 : || (last_key_in_range && key_request_accum.raw_size() > 0)
3968 : {
3969 3390 : let results = self
3970 3390 : .get_vectored(key_request_accum.consume_keyspace(), lsn, ctx)
3971 148 : .await?;
3972 :
3973 3390 : if self.cancel.is_cancelled() {
3974 0 : return Err(CreateImageLayersError::Cancelled);
3975 3390 : }
3976 :
3977 7350 : for (img_key, img) in results {
3978 3960 : let img = match img {
3979 3960 : Ok(img) => img,
3980 0 : Err(err) => {
3981 0 : // If we fail to reconstruct a VM or FSM page, we can zero the
3982 0 : // page without losing any actual user data. That seems better
3983 0 : // than failing repeatedly and getting stuck.
3984 0 : //
3985 0 : // We had a bug at one point, where we truncated the FSM and VM
3986 0 : // in the pageserver, but the Postgres didn't know about that
3987 0 : // and continued to generate incremental WAL records for pages
3988 0 : // that didn't exist in the pageserver. Trying to replay those
3989 0 : // WAL records failed to find the previous image of the page.
3990 0 : // This special case allows us to recover from that situation.
3991 0 : // See https://github.com/neondatabase/neon/issues/2601.
3992 0 : //
3993 0 : // Unfortunately we cannot do this for the main fork, or for
3994 0 : // any metadata keys, keys, as that would lead to actual data
3995 0 : // loss.
3996 0 : if img_key.is_rel_fsm_block_key() || img_key.is_rel_vm_block_key() {
3997 0 : warn!("could not reconstruct FSM or VM key {img_key}, filling with zeros: {err:?}");
3998 0 : ZERO_PAGE.clone()
3999 : } else {
4000 0 : return Err(CreateImageLayersError::from(err));
4001 : }
4002 : }
4003 : };
4004 :
4005 : // Write all the keys we just read into our new image layer.
4006 4313 : image_layer_writer.put_image(img_key, img, ctx).await?;
4007 3960 : wrote_keys = true;
4008 : }
4009 570 : }
4010 : }
4011 : }
4012 :
4013 600 : if wrote_keys {
4014 : // Normal path: we have written some data into the new image layer for this
4015 : // partition, so flush it to disk.
4016 1204 : let (desc, path) = image_layer_writer.finish(ctx).await?;
4017 600 : let image_layer = Layer::finish_creating(self.conf, self, desc, &path)?;
4018 600 : info!("created image layer for rel {}", image_layer.local_path());
4019 600 : Ok(ImageLayerCreationOutcome {
4020 600 : image: Some(image_layer),
4021 600 : next_start_key: img_range.end,
4022 600 : })
4023 : } else {
4024 : // Special case: the image layer may be empty if this is a sharded tenant and the
4025 : // partition does not cover any keys owned by this shard. In this case, to ensure
4026 : // we don't leave gaps between image layers, leave `start` where it is, so that the next
4027 : // layer we write will cover the key range that we just scanned.
4028 0 : tracing::debug!("no data in range {}-{}", img_range.start, img_range.end);
4029 0 : Ok(ImageLayerCreationOutcome {
4030 0 : image: None,
4031 0 : next_start_key: start,
4032 0 : })
4033 : }
4034 600 : }
4035 :
4036 : /// Create an image layer for metadata keys. This function produces one image layer for all metadata
4037 : /// keys for now. Because metadata keys cannot exceed basebackup size limit, the image layer for it
4038 : /// would not be too large to fit in a single image layer.
4039 : #[allow(clippy::too_many_arguments)]
4040 570 : async fn create_image_layer_for_metadata_keys(
4041 570 : self: &Arc<Self>,
4042 570 : partition: &KeySpace,
4043 570 : mut image_layer_writer: ImageLayerWriter,
4044 570 : lsn: Lsn,
4045 570 : ctx: &RequestContext,
4046 570 : img_range: Range<Key>,
4047 570 : mode: ImageLayerCreationMode,
4048 570 : start: Key,
4049 570 : ) -> Result<ImageLayerCreationOutcome, CreateImageLayersError> {
4050 570 : // Metadata keys image layer creation.
4051 570 : let mut reconstruct_state = ValuesReconstructState::default();
4052 570 : let data = self
4053 570 : .get_vectored_impl(partition.clone(), lsn, &mut reconstruct_state, ctx)
4054 9808 : .await?;
4055 570 : let (data, total_kb_retrieved, total_keys_retrieved) = {
4056 570 : let mut new_data = BTreeMap::new();
4057 570 : let mut total_kb_retrieved = 0;
4058 570 : let mut total_keys_retrieved = 0;
4059 30606 : for (k, v) in data {
4060 30036 : let v = v?;
4061 30036 : total_kb_retrieved += KEY_SIZE + v.len();
4062 30036 : total_keys_retrieved += 1;
4063 30036 : new_data.insert(k, v);
4064 : }
4065 570 : (new_data, total_kb_retrieved / 1024, total_keys_retrieved)
4066 570 : };
4067 570 : let delta_files_accessed = reconstruct_state.get_delta_layers_visited();
4068 570 :
4069 570 : let trigger_generation = delta_files_accessed as usize >= MAX_AUX_FILE_V2_DELTAS;
4070 570 : debug!(
4071 : trigger_generation,
4072 : delta_files_accessed,
4073 : total_kb_retrieved,
4074 : total_keys_retrieved,
4075 0 : "generate metadata images"
4076 : );
4077 :
4078 570 : if !trigger_generation && mode == ImageLayerCreationMode::Try {
4079 6 : return Ok(ImageLayerCreationOutcome {
4080 6 : image: None,
4081 6 : next_start_key: img_range.end,
4082 6 : });
4083 564 : }
4084 564 : if self.cancel.is_cancelled() {
4085 0 : return Err(CreateImageLayersError::Cancelled);
4086 564 : }
4087 564 : let mut wrote_any_image = false;
4088 30600 : for (k, v) in data {
4089 30036 : if v.is_empty() {
4090 : // the key has been deleted, it does not need an image
4091 : // in metadata keyspace, an empty image == tombstone
4092 24 : continue;
4093 30012 : }
4094 30012 : wrote_any_image = true;
4095 30012 :
4096 30012 : // No need to handle sharding b/c metadata keys are always on the 0-th shard.
4097 30012 :
4098 30012 : // TODO: split image layers to avoid too large layer files. Too large image files are not handled
4099 30012 : // on the normal data path either.
4100 30482 : image_layer_writer.put_image(k, v, ctx).await?;
4101 : }
4102 :
4103 564 : if wrote_any_image {
4104 : // Normal path: we have written some data into the new image layer for this
4105 : // partition, so flush it to disk.
4106 72 : let (desc, path) = image_layer_writer.finish(ctx).await?;
4107 36 : let image_layer = Layer::finish_creating(self.conf, self, desc, &path)?;
4108 36 : info!(
4109 0 : "created image layer for metadata {}",
4110 0 : image_layer.local_path()
4111 : );
4112 36 : Ok(ImageLayerCreationOutcome {
4113 36 : image: Some(image_layer),
4114 36 : next_start_key: img_range.end,
4115 36 : })
4116 : } else {
4117 : // Special case: the image layer may be empty if this is a sharded tenant and the
4118 : // partition does not cover any keys owned by this shard. In this case, to ensure
4119 : // we don't leave gaps between image layers, leave `start` where it is, so that the next
4120 : // layer we write will cover the key range that we just scanned.
4121 528 : tracing::debug!("no data in range {}-{}", img_range.start, img_range.end);
4122 528 : Ok(ImageLayerCreationOutcome {
4123 528 : image: None,
4124 528 : next_start_key: start,
4125 528 : })
4126 : }
4127 570 : }
4128 :
4129 : /// Predicate function which indicates whether we should check if new image layers
4130 : /// are required. Since checking if new image layers are required is expensive in
4131 : /// terms of CPU, we only do it in the following cases:
4132 : /// 1. If the timeline has ingested sufficient WAL to justify the cost
4133 : /// 2. If enough time has passed since the last check:
4134 : /// 1. For large tenants, we wish to perform the check more often since they
4135 : /// suffer from the lack of image layers
4136 : /// 2. For small tenants (that can mostly fit in RAM), we use a much longer interval
4137 2136 : fn should_check_if_image_layers_required(self: &Arc<Timeline>, lsn: Lsn) -> bool {
4138 : const LARGE_TENANT_THRESHOLD: u64 = 2 * 1024 * 1024 * 1024;
4139 :
4140 2136 : let last_checks_at = self.last_image_layer_creation_check_at.load();
4141 2136 : let distance = lsn
4142 2136 : .checked_sub(last_checks_at)
4143 2136 : .expect("Attempt to compact with LSN going backwards");
4144 2136 : let min_distance =
4145 2136 : self.get_image_layer_creation_check_threshold() as u64 * self.get_checkpoint_distance();
4146 2136 :
4147 2136 : let distance_based_decision = distance.0 >= min_distance;
4148 2136 :
4149 2136 : let mut time_based_decision = false;
4150 2136 : let mut last_check_instant = self.last_image_layer_creation_check_instant.lock().unwrap();
4151 2136 : if let CurrentLogicalSize::Exact(logical_size) = self.current_logical_size.current_size() {
4152 1830 : let check_required_after = if Into::<u64>::into(&logical_size) >= LARGE_TENANT_THRESHOLD
4153 : {
4154 0 : self.get_checkpoint_timeout()
4155 : } else {
4156 1830 : Duration::from_secs(3600 * 48)
4157 : };
4158 :
4159 1830 : time_based_decision = match *last_check_instant {
4160 1308 : Some(last_check) => {
4161 1308 : let elapsed = last_check.elapsed();
4162 1308 : elapsed >= check_required_after
4163 : }
4164 522 : None => true,
4165 : };
4166 306 : }
4167 :
4168 : // Do the expensive delta layer counting only if this timeline has ingested sufficient
4169 : // WAL since the last check or a checkpoint timeout interval has elapsed since the last
4170 : // check.
4171 2136 : let decision = distance_based_decision || time_based_decision;
4172 :
4173 2136 : if decision {
4174 528 : self.last_image_layer_creation_check_at.store(lsn);
4175 528 : *last_check_instant = Some(Instant::now());
4176 1608 : }
4177 :
4178 2136 : decision
4179 2136 : }
4180 :
4181 2136 : #[tracing::instrument(skip_all, fields(%lsn, %mode))]
4182 : async fn create_image_layers(
4183 : self: &Arc<Timeline>,
4184 : partitioning: &KeyPartitioning,
4185 : lsn: Lsn,
4186 : mode: ImageLayerCreationMode,
4187 : ctx: &RequestContext,
4188 : ) -> Result<Vec<ResidentLayer>, CreateImageLayersError> {
4189 : let timer = self.metrics.create_images_time_histo.start_timer();
4190 : let mut image_layers = Vec::new();
4191 :
4192 : // We need to avoid holes between generated image layers.
4193 : // Otherwise LayerMap::image_layer_exists will return false if key range of some layer is covered by more than one
4194 : // image layer with hole between them. In this case such layer can not be utilized by GC.
4195 : //
4196 : // How such hole between partitions can appear?
4197 : // if we have relation with relid=1 and size 100 and relation with relid=2 with size 200 then result of
4198 : // KeySpace::partition may contain partitions <100000000..100000099> and <200000000..200000199>.
4199 : // If there is delta layer <100000000..300000000> then it never be garbage collected because
4200 : // image layers <100000000..100000099> and <200000000..200000199> are not completely covering it.
4201 : let mut start = Key::MIN;
4202 :
4203 : let check_for_image_layers = self.should_check_if_image_layers_required(lsn);
4204 :
4205 : for partition in partitioning.parts.iter() {
4206 : if self.cancel.is_cancelled() {
4207 : return Err(CreateImageLayersError::Cancelled);
4208 : }
4209 :
4210 : let img_range = start..partition.ranges.last().unwrap().end;
4211 : let compact_metadata = partition.overlaps(&Key::metadata_key_range());
4212 : if compact_metadata {
4213 : for range in &partition.ranges {
4214 : assert!(
4215 : range.start.field1 >= METADATA_KEY_BEGIN_PREFIX
4216 : && range.end.field1 <= METADATA_KEY_END_PREFIX,
4217 : "metadata keys must be partitioned separately"
4218 : );
4219 : }
4220 : if mode == ImageLayerCreationMode::Try && !check_for_image_layers {
4221 : // Skip compaction if there are not enough updates. Metadata compaction will do a scan and
4222 : // might mess up with evictions.
4223 : start = img_range.end;
4224 : continue;
4225 : }
4226 : // For initial and force modes, we always generate image layers for metadata keys.
4227 : } else if let ImageLayerCreationMode::Try = mode {
4228 : // check_for_image_layers = false -> skip
4229 : // check_for_image_layers = true -> check time_for_new_image_layer -> skip/generate
4230 : if !check_for_image_layers || !self.time_for_new_image_layer(partition, lsn).await {
4231 : start = img_range.end;
4232 : continue;
4233 : }
4234 : }
4235 : if let ImageLayerCreationMode::Force = mode {
4236 : // When forced to create image layers, we might try and create them where they already
4237 : // exist. This mode is only used in tests/debug.
4238 : let layers = self.layers.read().await;
4239 : if layers.contains_key(&PersistentLayerKey {
4240 : key_range: img_range.clone(),
4241 : lsn_range: PersistentLayerDesc::image_layer_lsn_range(lsn),
4242 : is_delta: false,
4243 : }) {
4244 : tracing::info!(
4245 : "Skipping image layer at {lsn} {}..{}, already exists",
4246 : img_range.start,
4247 : img_range.end
4248 : );
4249 : start = img_range.end;
4250 : continue;
4251 : }
4252 : }
4253 :
4254 : let image_layer_writer = ImageLayerWriter::new(
4255 : self.conf,
4256 : self.timeline_id,
4257 : self.tenant_shard_id,
4258 : &img_range,
4259 : lsn,
4260 : ctx,
4261 : )
4262 : .await?;
4263 :
4264 0 : fail_point!("image-layer-writer-fail-before-finish", |_| {
4265 0 : Err(CreateImageLayersError::Other(anyhow::anyhow!(
4266 0 : "failpoint image-layer-writer-fail-before-finish"
4267 0 : )))
4268 0 : });
4269 :
4270 : if !compact_metadata {
4271 : let ImageLayerCreationOutcome {
4272 : image,
4273 : next_start_key,
4274 : } = self
4275 : .create_image_layer_for_rel_blocks(
4276 : partition,
4277 : image_layer_writer,
4278 : lsn,
4279 : ctx,
4280 : img_range,
4281 : start,
4282 : )
4283 : .await?;
4284 :
4285 : start = next_start_key;
4286 : image_layers.extend(image);
4287 : } else {
4288 : let ImageLayerCreationOutcome {
4289 : image,
4290 : next_start_key,
4291 : } = self
4292 : .create_image_layer_for_metadata_keys(
4293 : partition,
4294 : image_layer_writer,
4295 : lsn,
4296 : ctx,
4297 : img_range,
4298 : mode,
4299 : start,
4300 : )
4301 : .await?;
4302 : start = next_start_key;
4303 : image_layers.extend(image);
4304 : }
4305 : }
4306 :
4307 : let mut guard = self.layers.write().await;
4308 :
4309 : // FIXME: we could add the images to be uploaded *before* returning from here, but right
4310 : // now they are being scheduled outside of write lock; current way is inconsistent with
4311 : // compaction lock order.
4312 : guard
4313 : .open_mut()?
4314 : .track_new_image_layers(&image_layers, &self.metrics);
4315 : drop_wlock(guard);
4316 : timer.stop_and_record();
4317 :
4318 : // Creating image layers may have caused some previously visible layers to be covered
4319 : if !image_layers.is_empty() {
4320 : self.update_layer_visibility().await?;
4321 : }
4322 :
4323 : Ok(image_layers)
4324 : }
4325 :
4326 : /// Wait until the background initial logical size calculation is complete, or
4327 : /// this Timeline is shut down. Calling this function will cause the initial
4328 : /// logical size calculation to skip waiting for the background jobs barrier.
4329 0 : pub(crate) async fn await_initial_logical_size(self: Arc<Self>) {
4330 0 : if !self.shard_identity.is_shard_zero() {
4331 : // We don't populate logical size on shard >0: skip waiting for it.
4332 0 : return;
4333 0 : }
4334 0 :
4335 0 : if self.remote_client.is_deleting() {
4336 : // The timeline was created in a deletion-resume state, we don't expect logical size to be populated
4337 0 : return;
4338 0 : }
4339 0 :
4340 0 : if self.current_logical_size.current_size().is_exact() {
4341 : // root timelines are initialized with exact count, but never start the background
4342 : // calculation
4343 0 : return;
4344 0 : }
4345 :
4346 0 : if let Some(await_bg_cancel) = self
4347 0 : .current_logical_size
4348 0 : .cancel_wait_for_background_loop_concurrency_limit_semaphore
4349 0 : .get()
4350 0 : {
4351 0 : await_bg_cancel.cancel();
4352 0 : } else {
4353 : // We should not wait if we were not able to explicitly instruct
4354 : // the logical size cancellation to skip the concurrency limit semaphore.
4355 : // TODO: this is an unexpected case. We should restructure so that it
4356 : // can't happen.
4357 0 : tracing::warn!(
4358 0 : "await_initial_logical_size: can't get semaphore cancel token, skipping"
4359 : );
4360 0 : debug_assert!(false);
4361 : }
4362 :
4363 0 : tokio::select!(
4364 0 : _ = self.current_logical_size.initialized.acquire() => {},
4365 0 : _ = self.cancel.cancelled() => {}
4366 : )
4367 0 : }
4368 :
4369 : /// Detach this timeline from its ancestor by copying all of ancestors layers as this
4370 : /// Timelines layers up to the ancestor_lsn.
4371 : ///
4372 : /// Requires a timeline that:
4373 : /// - has an ancestor to detach from
4374 : /// - the ancestor does not have an ancestor -- follows from the original RFC limitations, not
4375 : /// a technical requirement
4376 : ///
4377 : /// After the operation has been started, it cannot be canceled. Upon restart it needs to be
4378 : /// polled again until completion.
4379 : ///
4380 : /// During the operation all timelines sharing the data with this timeline will be reparented
4381 : /// from our ancestor to be branches of this timeline.
4382 0 : pub(crate) async fn prepare_to_detach_from_ancestor(
4383 0 : self: &Arc<Timeline>,
4384 0 : tenant: &crate::tenant::Tenant,
4385 0 : options: detach_ancestor::Options,
4386 0 : ctx: &RequestContext,
4387 0 : ) -> Result<detach_ancestor::Progress, detach_ancestor::Error> {
4388 0 : detach_ancestor::prepare(self, tenant, options, ctx).await
4389 0 : }
4390 :
4391 : /// Second step of detach from ancestor; detaches the `self` from it's current ancestor and
4392 : /// reparents any reparentable children of previous ancestor.
4393 : ///
4394 : /// This method is to be called while holding the TenantManager's tenant slot, so during this
4395 : /// method we cannot be deleted nor can any timeline be deleted. After this method returns
4396 : /// successfully, tenant must be reloaded.
4397 : ///
4398 : /// Final step will be to [`Self::complete_detaching_timeline_ancestor`] after optionally
4399 : /// resetting the tenant.
4400 0 : pub(crate) async fn detach_from_ancestor_and_reparent(
4401 0 : self: &Arc<Timeline>,
4402 0 : tenant: &crate::tenant::Tenant,
4403 0 : prepared: detach_ancestor::PreparedTimelineDetach,
4404 0 : ctx: &RequestContext,
4405 0 : ) -> Result<detach_ancestor::DetachingAndReparenting, detach_ancestor::Error> {
4406 0 : detach_ancestor::detach_and_reparent(self, tenant, prepared, ctx).await
4407 0 : }
4408 :
4409 : /// Final step which unblocks the GC.
4410 : ///
4411 : /// The tenant must've been reset if ancestry was modified previously (in tenant manager).
4412 0 : pub(crate) async fn complete_detaching_timeline_ancestor(
4413 0 : self: &Arc<Timeline>,
4414 0 : tenant: &crate::tenant::Tenant,
4415 0 : attempt: detach_ancestor::Attempt,
4416 0 : ctx: &RequestContext,
4417 0 : ) -> Result<(), detach_ancestor::Error> {
4418 0 : detach_ancestor::complete(self, tenant, attempt, ctx).await
4419 0 : }
4420 :
4421 : /// Switch aux file policy and schedule upload to the index part.
4422 48 : pub(crate) fn do_switch_aux_policy(&self, policy: AuxFilePolicy) -> anyhow::Result<()> {
4423 48 : self.last_aux_file_policy.store(Some(policy));
4424 48 : self.remote_client
4425 48 : .schedule_index_upload_for_aux_file_policy_update(Some(policy))?;
4426 48 : Ok(())
4427 48 : }
4428 : }
4429 :
4430 : impl Drop for Timeline {
4431 24 : fn drop(&mut self) {
4432 24 : if let Some(ancestor) = &self.ancestor_timeline {
4433 : // This lock should never be poisoned, but in case it is we do a .map() instead of
4434 : // an unwrap(), to avoid panicking in a destructor and thereby aborting the process.
4435 6 : if let Ok(mut gc_info) = ancestor.gc_info.write() {
4436 6 : gc_info.remove_child(self.timeline_id)
4437 0 : }
4438 18 : }
4439 24 : }
4440 : }
4441 :
4442 : /// Top-level failure to compact.
4443 0 : #[derive(Debug, thiserror::Error)]
4444 : pub(crate) enum CompactionError {
4445 : #[error("The timeline or pageserver is shutting down")]
4446 : ShuttingDown,
4447 : /// Compaction cannot be done right now; page reconstruction and so on.
4448 : #[error(transparent)]
4449 : Other(anyhow::Error),
4450 : }
4451 :
4452 : impl From<CollectKeySpaceError> for CompactionError {
4453 0 : fn from(err: CollectKeySpaceError) -> Self {
4454 0 : match err {
4455 : CollectKeySpaceError::Cancelled
4456 : | CollectKeySpaceError::PageRead(PageReconstructError::Cancelled) => {
4457 0 : CompactionError::ShuttingDown
4458 : }
4459 0 : e => CompactionError::Other(e.into()),
4460 : }
4461 0 : }
4462 : }
4463 :
4464 : impl From<super::upload_queue::NotInitialized> for CompactionError {
4465 0 : fn from(value: super::upload_queue::NotInitialized) -> Self {
4466 0 : match value {
4467 : super::upload_queue::NotInitialized::Uninitialized => {
4468 0 : CompactionError::Other(anyhow::anyhow!(value))
4469 : }
4470 : super::upload_queue::NotInitialized::ShuttingDown
4471 0 : | super::upload_queue::NotInitialized::Stopped => CompactionError::ShuttingDown,
4472 : }
4473 0 : }
4474 : }
4475 :
4476 : impl From<super::storage_layer::layer::DownloadError> for CompactionError {
4477 0 : fn from(e: super::storage_layer::layer::DownloadError) -> Self {
4478 0 : match e {
4479 : super::storage_layer::layer::DownloadError::TimelineShutdown
4480 : | super::storage_layer::layer::DownloadError::DownloadCancelled => {
4481 0 : CompactionError::ShuttingDown
4482 : }
4483 : super::storage_layer::layer::DownloadError::ContextAndConfigReallyDeniesDownloads
4484 : | super::storage_layer::layer::DownloadError::DownloadRequired
4485 : | super::storage_layer::layer::DownloadError::NotFile(_)
4486 : | super::storage_layer::layer::DownloadError::DownloadFailed
4487 : | super::storage_layer::layer::DownloadError::PreStatFailed(_) => {
4488 0 : CompactionError::Other(anyhow::anyhow!(e))
4489 : }
4490 : #[cfg(test)]
4491 : super::storage_layer::layer::DownloadError::Failpoint(_) => {
4492 0 : CompactionError::Other(anyhow::anyhow!(e))
4493 : }
4494 : }
4495 0 : }
4496 : }
4497 :
4498 : impl From<layer_manager::Shutdown> for CompactionError {
4499 0 : fn from(_: layer_manager::Shutdown) -> Self {
4500 0 : CompactionError::ShuttingDown
4501 0 : }
4502 : }
4503 :
4504 : #[serde_as]
4505 588 : #[derive(serde::Serialize)]
4506 : struct RecordedDuration(#[serde_as(as = "serde_with::DurationMicroSeconds")] Duration);
4507 :
4508 : #[derive(Default)]
4509 : enum DurationRecorder {
4510 : #[default]
4511 : NotStarted,
4512 : Recorded(RecordedDuration, tokio::time::Instant),
4513 : }
4514 :
4515 : impl DurationRecorder {
4516 1512 : fn till_now(&self) -> DurationRecorder {
4517 1512 : match self {
4518 : DurationRecorder::NotStarted => {
4519 0 : panic!("must only call on recorded measurements")
4520 : }
4521 1512 : DurationRecorder::Recorded(_, ended) => {
4522 1512 : let now = tokio::time::Instant::now();
4523 1512 : DurationRecorder::Recorded(RecordedDuration(now - *ended), now)
4524 1512 : }
4525 1512 : }
4526 1512 : }
4527 588 : fn into_recorded(self) -> Option<RecordedDuration> {
4528 588 : match self {
4529 0 : DurationRecorder::NotStarted => None,
4530 588 : DurationRecorder::Recorded(recorded, _) => Some(recorded),
4531 : }
4532 588 : }
4533 : }
4534 :
4535 : /// Descriptor for a delta layer used in testing infra. The start/end key/lsn range of the
4536 : /// delta layer might be different from the min/max key/lsn in the delta layer. Therefore,
4537 : /// the layer descriptor requires the user to provide the ranges, which should cover all
4538 : /// keys specified in the `data` field.
4539 : #[cfg(test)]
4540 : #[derive(Clone)]
4541 : pub struct DeltaLayerTestDesc {
4542 : pub lsn_range: Range<Lsn>,
4543 : pub key_range: Range<Key>,
4544 : pub data: Vec<(Key, Lsn, Value)>,
4545 : }
4546 :
4547 : #[cfg(test)]
4548 : impl DeltaLayerTestDesc {
4549 6 : pub fn new(lsn_range: Range<Lsn>, key_range: Range<Key>, data: Vec<(Key, Lsn, Value)>) -> Self {
4550 6 : Self {
4551 6 : lsn_range,
4552 6 : key_range,
4553 6 : data,
4554 6 : }
4555 6 : }
4556 :
4557 174 : pub fn new_with_inferred_key_range(
4558 174 : lsn_range: Range<Lsn>,
4559 174 : data: Vec<(Key, Lsn, Value)>,
4560 174 : ) -> Self {
4561 432 : let key_min = data.iter().map(|(key, _, _)| key).min().unwrap();
4562 432 : let key_max = data.iter().map(|(key, _, _)| key).max().unwrap();
4563 174 : Self {
4564 174 : key_range: (*key_min)..(key_max.next()),
4565 174 : lsn_range,
4566 174 : data,
4567 174 : }
4568 174 : }
4569 :
4570 30 : pub(crate) fn layer_name(&self) -> LayerName {
4571 30 : LayerName::Delta(super::storage_layer::DeltaLayerName {
4572 30 : key_range: self.key_range.clone(),
4573 30 : lsn_range: self.lsn_range.clone(),
4574 30 : })
4575 30 : }
4576 : }
4577 :
4578 : impl Timeline {
4579 84 : async fn finish_compact_batch(
4580 84 : self: &Arc<Self>,
4581 84 : new_deltas: &[ResidentLayer],
4582 84 : new_images: &[ResidentLayer],
4583 84 : layers_to_remove: &[Layer],
4584 84 : ) -> Result<(), CompactionError> {
4585 84 : let mut guard = tokio::select! {
4586 84 : guard = self.layers.write() => guard,
4587 84 : _ = self.cancel.cancelled() => {
4588 0 : return Err(CompactionError::ShuttingDown);
4589 : }
4590 : };
4591 :
4592 84 : let mut duplicated_layers = HashSet::new();
4593 84 :
4594 84 : let mut insert_layers = Vec::with_capacity(new_deltas.len());
4595 :
4596 1008 : for l in new_deltas {
4597 924 : if guard.contains(l.as_ref()) {
4598 : // expected in tests
4599 0 : tracing::error!(layer=%l, "duplicated L1 layer");
4600 :
4601 : // good ways to cause a duplicate: we repeatedly error after taking the writelock
4602 : // `guard` on self.layers. as of writing this, there are no error returns except
4603 : // for compact_level0_phase1 creating an L0, which does not happen in practice
4604 : // because we have not implemented L0 => L0 compaction.
4605 0 : duplicated_layers.insert(l.layer_desc().key());
4606 924 : } else if LayerMap::is_l0(&l.layer_desc().key_range, l.layer_desc().is_delta) {
4607 0 : return Err(CompactionError::Other(anyhow::anyhow!("compaction generates a L0 layer file as output, which will cause infinite compaction.")));
4608 924 : } else {
4609 924 : insert_layers.push(l.clone());
4610 924 : }
4611 : }
4612 :
4613 : // only remove those inputs which were not outputs
4614 84 : let remove_layers: Vec<Layer> = layers_to_remove
4615 84 : .iter()
4616 1206 : .filter(|l| !duplicated_layers.contains(&l.layer_desc().key()))
4617 84 : .cloned()
4618 84 : .collect();
4619 84 :
4620 84 : if !new_images.is_empty() {
4621 0 : guard
4622 0 : .open_mut()?
4623 0 : .track_new_image_layers(new_images, &self.metrics);
4624 84 : }
4625 :
4626 84 : guard
4627 84 : .open_mut()?
4628 84 : .finish_compact_l0(&remove_layers, &insert_layers, &self.metrics);
4629 84 :
4630 84 : self.remote_client
4631 84 : .schedule_compaction_update(&remove_layers, new_deltas)?;
4632 :
4633 84 : drop_wlock(guard);
4634 84 :
4635 84 : Ok(())
4636 84 : }
4637 :
4638 0 : async fn rewrite_layers(
4639 0 : self: &Arc<Self>,
4640 0 : mut replace_layers: Vec<(Layer, ResidentLayer)>,
4641 0 : mut drop_layers: Vec<Layer>,
4642 0 : ) -> Result<(), CompactionError> {
4643 0 : let mut guard = self.layers.write().await;
4644 :
4645 : // Trim our lists in case our caller (compaction) raced with someone else (GC) removing layers: we want
4646 : // to avoid double-removing, and avoid rewriting something that was removed.
4647 0 : replace_layers.retain(|(l, _)| guard.contains(l));
4648 0 : drop_layers.retain(|l| guard.contains(l));
4649 0 :
4650 0 : guard
4651 0 : .open_mut()?
4652 0 : .rewrite_layers(&replace_layers, &drop_layers, &self.metrics);
4653 0 :
4654 0 : let upload_layers: Vec<_> = replace_layers.into_iter().map(|r| r.1).collect();
4655 0 :
4656 0 : self.remote_client
4657 0 : .schedule_compaction_update(&drop_layers, &upload_layers)?;
4658 :
4659 0 : Ok(())
4660 0 : }
4661 :
4662 : /// Schedules the uploads of the given image layers
4663 1092 : fn upload_new_image_layers(
4664 1092 : self: &Arc<Self>,
4665 1092 : new_images: impl IntoIterator<Item = ResidentLayer>,
4666 1092 : ) -> Result<(), super::upload_queue::NotInitialized> {
4667 1170 : for layer in new_images {
4668 78 : self.remote_client.schedule_layer_file_upload(layer)?;
4669 : }
4670 : // should any new image layer been created, not uploading index_part will
4671 : // result in a mismatch between remote_physical_size and layermap calculated
4672 : // size, which will fail some tests, but should not be an issue otherwise.
4673 1092 : self.remote_client
4674 1092 : .schedule_index_upload_for_file_changes()?;
4675 1092 : Ok(())
4676 1092 : }
4677 :
4678 : /// Find the Lsns above which layer files need to be retained on
4679 : /// garbage collection.
4680 : ///
4681 : /// We calculate two cutoffs, one based on time and one based on WAL size. `pitr`
4682 : /// controls the time cutoff (or ZERO to disable time-based retention), and `space_cutoff` controls
4683 : /// the space-based retention.
4684 : ///
4685 : /// This function doesn't simply to calculate time & space based retention: it treats time-based
4686 : /// retention as authoritative if enabled, and falls back to space-based retention if calculating
4687 : /// the LSN for a time point isn't possible. Therefore the GcCutoffs::horizon in the response might
4688 : /// be different to the `space_cutoff` input. Callers should treat the min() of the two cutoffs
4689 : /// in the response as the GC cutoff point for the timeline.
4690 2262 : #[instrument(skip_all, fields(timeline_id=%self.timeline_id))]
4691 : pub(super) async fn find_gc_cutoffs(
4692 : &self,
4693 : space_cutoff: Lsn,
4694 : pitr: Duration,
4695 : cancel: &CancellationToken,
4696 : ctx: &RequestContext,
4697 : ) -> Result<GcCutoffs, PageReconstructError> {
4698 : let _timer = self
4699 : .metrics
4700 : .find_gc_cutoffs_histo
4701 : .start_timer()
4702 : .record_on_drop();
4703 :
4704 : pausable_failpoint!("Timeline::find_gc_cutoffs-pausable");
4705 :
4706 : if cfg!(test) {
4707 : // Unit tests which specify zero PITR interval expect to avoid doing any I/O for timestamp lookup
4708 : if pitr == Duration::ZERO {
4709 : return Ok(GcCutoffs {
4710 : time: self.get_last_record_lsn(),
4711 : space: space_cutoff,
4712 : });
4713 : }
4714 : }
4715 :
4716 : // Calculate a time-based limit on how much to retain:
4717 : // - if PITR interval is set, then this is our cutoff.
4718 : // - if PITR interval is not set, then we do a lookup
4719 : // based on DEFAULT_PITR_INTERVAL, so that size-based retention does not result in keeping history around permanently on idle databases.
4720 : let time_cutoff = {
4721 : let now = SystemTime::now();
4722 : let time_range = if pitr == Duration::ZERO {
4723 : humantime::parse_duration(DEFAULT_PITR_INTERVAL).expect("constant is invalid")
4724 : } else {
4725 : pitr
4726 : };
4727 :
4728 : // If PITR is so large or `now` is so small that this underflows, we will retain no history (highly unexpected case)
4729 : let time_cutoff = now.checked_sub(time_range).unwrap_or(now);
4730 : let timestamp = to_pg_timestamp(time_cutoff);
4731 :
4732 : match self.find_lsn_for_timestamp(timestamp, cancel, ctx).await? {
4733 : LsnForTimestamp::Present(lsn) => Some(lsn),
4734 : LsnForTimestamp::Future(lsn) => {
4735 : // The timestamp is in the future. That sounds impossible,
4736 : // but what it really means is that there hasn't been
4737 : // any commits since the cutoff timestamp.
4738 : //
4739 : // In this case we should use the LSN of the most recent commit,
4740 : // which is implicitly the last LSN in the log.
4741 : debug!("future({})", lsn);
4742 : Some(self.get_last_record_lsn())
4743 : }
4744 : LsnForTimestamp::Past(lsn) => {
4745 : debug!("past({})", lsn);
4746 : None
4747 : }
4748 : LsnForTimestamp::NoData(lsn) => {
4749 : debug!("nodata({})", lsn);
4750 : None
4751 : }
4752 : }
4753 : };
4754 :
4755 : Ok(match (pitr, time_cutoff) {
4756 : (Duration::ZERO, Some(time_cutoff)) => {
4757 : // PITR is not set. Retain the size-based limit, or the default time retention,
4758 : // whichever requires less data.
4759 : GcCutoffs {
4760 : time: self.get_last_record_lsn(),
4761 : space: std::cmp::max(time_cutoff, space_cutoff),
4762 : }
4763 : }
4764 : (Duration::ZERO, None) => {
4765 : // PITR is not set, and time lookup failed
4766 : GcCutoffs {
4767 : time: self.get_last_record_lsn(),
4768 : space: space_cutoff,
4769 : }
4770 : }
4771 : (_, None) => {
4772 : // PITR interval is set & we didn't look up a timestamp successfully. Conservatively assume PITR
4773 : // cannot advance beyond what was already GC'd, and respect space-based retention
4774 : GcCutoffs {
4775 : time: *self.get_latest_gc_cutoff_lsn(),
4776 : space: space_cutoff,
4777 : }
4778 : }
4779 : (_, Some(time_cutoff)) => {
4780 : // PITR interval is set and we looked up timestamp successfully. Ignore
4781 : // size based retention and make time cutoff authoritative
4782 : GcCutoffs {
4783 : time: time_cutoff,
4784 : space: time_cutoff,
4785 : }
4786 : }
4787 : })
4788 : }
4789 :
4790 : /// Garbage collect layer files on a timeline that are no longer needed.
4791 : ///
4792 : /// Currently, we don't make any attempt at removing unneeded page versions
4793 : /// within a layer file. We can only remove the whole file if it's fully
4794 : /// obsolete.
4795 2262 : pub(super) async fn gc(&self) -> Result<GcResult, GcError> {
4796 : // this is most likely the background tasks, but it might be the spawned task from
4797 : // immediate_gc
4798 2262 : let _g = tokio::select! {
4799 2262 : guard = self.gc_lock.lock() => guard,
4800 2262 : _ = self.cancel.cancelled() => return Ok(GcResult::default()),
4801 : };
4802 2259 : let timer = self.metrics.garbage_collect_histo.start_timer();
4803 2259 :
4804 2259 : fail_point!("before-timeline-gc");
4805 2259 :
4806 2259 : // Is the timeline being deleted?
4807 2259 : if self.is_stopping() {
4808 0 : return Err(GcError::TimelineCancelled);
4809 2259 : }
4810 2259 :
4811 2259 : let (space_cutoff, time_cutoff, retain_lsns, max_lsn_with_valid_lease) = {
4812 2259 : let gc_info = self.gc_info.read().unwrap();
4813 2259 :
4814 2259 : let space_cutoff = min(gc_info.cutoffs.space, self.get_disk_consistent_lsn());
4815 2259 : let time_cutoff = gc_info.cutoffs.time;
4816 2259 : let retain_lsns = gc_info
4817 2259 : .retain_lsns
4818 2259 : .iter()
4819 2259 : .map(|(lsn, _child_id)| *lsn)
4820 2259 : .collect();
4821 2259 :
4822 2259 : // Gets the maximum LSN that holds the valid lease.
4823 2259 : //
4824 2259 : // Caveat: `refresh_gc_info` is in charged of updating the lease map.
4825 2259 : // Here, we do not check for stale leases again.
4826 2259 : let max_lsn_with_valid_lease = gc_info.leases.last_key_value().map(|(lsn, _)| *lsn);
4827 2259 :
4828 2259 : (
4829 2259 : space_cutoff,
4830 2259 : time_cutoff,
4831 2259 : retain_lsns,
4832 2259 : max_lsn_with_valid_lease,
4833 2259 : )
4834 2259 : };
4835 2259 :
4836 2259 : let mut new_gc_cutoff = Lsn::min(space_cutoff, time_cutoff);
4837 2259 : let standby_horizon = self.standby_horizon.load();
4838 2259 : // Hold GC for the standby, but as a safety guard do it only within some
4839 2259 : // reasonable lag.
4840 2259 : if standby_horizon != Lsn::INVALID {
4841 0 : if let Some(standby_lag) = new_gc_cutoff.checked_sub(standby_horizon) {
4842 : const MAX_ALLOWED_STANDBY_LAG: u64 = 10u64 << 30; // 10 GB
4843 0 : if standby_lag.0 < MAX_ALLOWED_STANDBY_LAG {
4844 0 : new_gc_cutoff = Lsn::min(standby_horizon, new_gc_cutoff);
4845 0 : trace!("holding off GC for standby apply LSN {}", standby_horizon);
4846 : } else {
4847 0 : warn!(
4848 0 : "standby is lagging for more than {}MB, not holding gc for it",
4849 0 : MAX_ALLOWED_STANDBY_LAG / 1024 / 1024
4850 : )
4851 : }
4852 0 : }
4853 2259 : }
4854 :
4855 : // Reset standby horizon to ignore it if it is not updated till next GC.
4856 : // It is an easy way to unset it when standby disappears without adding
4857 : // more conf options.
4858 2259 : self.standby_horizon.store(Lsn::INVALID);
4859 2259 : self.metrics
4860 2259 : .standby_horizon_gauge
4861 2259 : .set(Lsn::INVALID.0 as i64);
4862 :
4863 2259 : let res = self
4864 2259 : .gc_timeline(
4865 2259 : space_cutoff,
4866 2259 : time_cutoff,
4867 2259 : retain_lsns,
4868 2259 : max_lsn_with_valid_lease,
4869 2259 : new_gc_cutoff,
4870 2259 : )
4871 2259 : .instrument(
4872 2259 : info_span!("gc_timeline", timeline_id = %self.timeline_id, cutoff = %new_gc_cutoff),
4873 : )
4874 0 : .await?;
4875 :
4876 : // only record successes
4877 2259 : timer.stop_and_record();
4878 2259 :
4879 2259 : Ok(res)
4880 2262 : }
4881 :
4882 2259 : async fn gc_timeline(
4883 2259 : &self,
4884 2259 : space_cutoff: Lsn,
4885 2259 : time_cutoff: Lsn,
4886 2259 : retain_lsns: Vec<Lsn>,
4887 2259 : max_lsn_with_valid_lease: Option<Lsn>,
4888 2259 : new_gc_cutoff: Lsn,
4889 2259 : ) -> Result<GcResult, GcError> {
4890 2259 : // FIXME: if there is an ongoing detach_from_ancestor, we should just skip gc
4891 2259 :
4892 2259 : let now = SystemTime::now();
4893 2259 : let mut result: GcResult = GcResult::default();
4894 2259 :
4895 2259 : // Nothing to GC. Return early.
4896 2259 : let latest_gc_cutoff = *self.get_latest_gc_cutoff_lsn();
4897 2259 : if latest_gc_cutoff >= new_gc_cutoff {
4898 66 : info!(
4899 0 : "Nothing to GC: new_gc_cutoff_lsn {new_gc_cutoff}, latest_gc_cutoff_lsn {latest_gc_cutoff}",
4900 : );
4901 66 : return Ok(result);
4902 2193 : }
4903 :
4904 : // We need to ensure that no one tries to read page versions or create
4905 : // branches at a point before latest_gc_cutoff_lsn. See branch_timeline()
4906 : // for details. This will block until the old value is no longer in use.
4907 : //
4908 : // The GC cutoff should only ever move forwards.
4909 2193 : let waitlist = {
4910 2193 : let write_guard = self.latest_gc_cutoff_lsn.lock_for_write();
4911 2193 : if *write_guard > new_gc_cutoff {
4912 0 : return Err(GcError::BadLsn {
4913 0 : why: format!(
4914 0 : "Cannot move GC cutoff LSN backwards (was {}, new {})",
4915 0 : *write_guard, new_gc_cutoff
4916 0 : ),
4917 0 : });
4918 2193 : }
4919 2193 :
4920 2193 : write_guard.store_and_unlock(new_gc_cutoff)
4921 2193 : };
4922 2193 : waitlist.wait().await;
4923 :
4924 2193 : info!("GC starting");
4925 :
4926 2193 : debug!("retain_lsns: {:?}", retain_lsns);
4927 :
4928 2193 : let mut layers_to_remove = Vec::new();
4929 :
4930 : // Scan all layers in the timeline (remote or on-disk).
4931 : //
4932 : // Garbage collect the layer if all conditions are satisfied:
4933 : // 1. it is older than cutoff LSN;
4934 : // 2. it is older than PITR interval;
4935 : // 3. it doesn't need to be retained for 'retain_lsns';
4936 : // 4. it does not need to be kept for LSNs holding valid leases.
4937 : // 5. newer on-disk image layers cover the layer's whole key range
4938 : //
4939 : // TODO holding a write lock is too agressive and avoidable
4940 2193 : let mut guard = self.layers.write().await;
4941 2193 : let layers = guard.layer_map()?;
4942 37245 : 'outer: for l in layers.iter_historic_layers() {
4943 37245 : result.layers_total += 1;
4944 37245 :
4945 37245 : // 1. Is it newer than GC horizon cutoff point?
4946 37245 : if l.get_lsn_range().end > space_cutoff {
4947 2223 : debug!(
4948 0 : "keeping {} because it's newer than space_cutoff {}",
4949 0 : l.layer_name(),
4950 : space_cutoff,
4951 : );
4952 2223 : result.layers_needed_by_cutoff += 1;
4953 2223 : continue 'outer;
4954 35022 : }
4955 35022 :
4956 35022 : // 2. It is newer than PiTR cutoff point?
4957 35022 : if l.get_lsn_range().end > time_cutoff {
4958 0 : debug!(
4959 0 : "keeping {} because it's newer than time_cutoff {}",
4960 0 : l.layer_name(),
4961 : time_cutoff,
4962 : );
4963 0 : result.layers_needed_by_pitr += 1;
4964 0 : continue 'outer;
4965 35022 : }
4966 :
4967 : // 3. Is it needed by a child branch?
4968 : // NOTE With that we would keep data that
4969 : // might be referenced by child branches forever.
4970 : // We can track this in child timeline GC and delete parent layers when
4971 : // they are no longer needed. This might be complicated with long inheritance chains.
4972 : //
4973 : // TODO Vec is not a great choice for `retain_lsns`
4974 35022 : for retain_lsn in &retain_lsns {
4975 : // start_lsn is inclusive
4976 30 : if &l.get_lsn_range().start <= retain_lsn {
4977 30 : debug!(
4978 0 : "keeping {} because it's still might be referenced by child branch forked at {} is_dropped: xx is_incremental: {}",
4979 0 : l.layer_name(),
4980 0 : retain_lsn,
4981 0 : l.is_incremental(),
4982 : );
4983 30 : result.layers_needed_by_branches += 1;
4984 30 : continue 'outer;
4985 0 : }
4986 : }
4987 :
4988 : // 4. Is there a valid lease that requires us to keep this layer?
4989 34992 : if let Some(lsn) = &max_lsn_with_valid_lease {
4990 : // keep if layer start <= any of the lease
4991 54 : if &l.get_lsn_range().start <= lsn {
4992 42 : debug!(
4993 0 : "keeping {} because there is a valid lease preventing GC at {}",
4994 0 : l.layer_name(),
4995 : lsn,
4996 : );
4997 42 : result.layers_needed_by_leases += 1;
4998 42 : continue 'outer;
4999 12 : }
5000 34938 : }
5001 :
5002 : // 5. Is there a later on-disk layer for this relation?
5003 : //
5004 : // The end-LSN is exclusive, while disk_consistent_lsn is
5005 : // inclusive. For example, if disk_consistent_lsn is 100, it is
5006 : // OK for a delta layer to have end LSN 101, but if the end LSN
5007 : // is 102, then it might not have been fully flushed to disk
5008 : // before crash.
5009 : //
5010 : // For example, imagine that the following layers exist:
5011 : //
5012 : // 1000 - image (A)
5013 : // 1000-2000 - delta (B)
5014 : // 2000 - image (C)
5015 : // 2000-3000 - delta (D)
5016 : // 3000 - image (E)
5017 : //
5018 : // If GC horizon is at 2500, we can remove layers A and B, but
5019 : // we cannot remove C, even though it's older than 2500, because
5020 : // the delta layer 2000-3000 depends on it.
5021 34950 : if !layers
5022 34950 : .image_layer_exists(&l.get_key_range(), &(l.get_lsn_range().end..new_gc_cutoff))
5023 : {
5024 34926 : debug!("keeping {} because it is the latest layer", l.layer_name());
5025 34926 : result.layers_not_updated += 1;
5026 34926 : continue 'outer;
5027 24 : }
5028 24 :
5029 24 : // We didn't find any reason to keep this file, so remove it.
5030 24 : debug!(
5031 0 : "garbage collecting {} is_dropped: xx is_incremental: {}",
5032 0 : l.layer_name(),
5033 0 : l.is_incremental(),
5034 : );
5035 24 : layers_to_remove.push(l);
5036 : }
5037 :
5038 2193 : if !layers_to_remove.is_empty() {
5039 : // Persist the new GC cutoff value before we actually remove anything.
5040 : // This unconditionally schedules also an index_part.json update, even though, we will
5041 : // be doing one a bit later with the unlinked gc'd layers.
5042 18 : let disk_consistent_lsn = self.disk_consistent_lsn.load();
5043 18 : self.schedule_uploads(disk_consistent_lsn, None)
5044 18 : .map_err(|e| {
5045 0 : if self.cancel.is_cancelled() {
5046 0 : GcError::TimelineCancelled
5047 : } else {
5048 0 : GcError::Remote(e)
5049 : }
5050 18 : })?;
5051 :
5052 18 : let gc_layers = layers_to_remove
5053 18 : .iter()
5054 24 : .map(|x| guard.get_from_desc(x))
5055 18 : .collect::<Vec<Layer>>();
5056 18 :
5057 18 : result.layers_removed = gc_layers.len() as u64;
5058 18 :
5059 18 : self.remote_client.schedule_gc_update(&gc_layers)?;
5060 :
5061 18 : guard.open_mut()?.finish_gc_timeline(&gc_layers);
5062 18 :
5063 18 : #[cfg(feature = "testing")]
5064 18 : {
5065 18 : result.doomed_layers = gc_layers;
5066 18 : }
5067 2175 : }
5068 :
5069 2193 : info!(
5070 0 : "GC completed removing {} layers, cutoff {}",
5071 : result.layers_removed, new_gc_cutoff
5072 : );
5073 :
5074 2193 : result.elapsed = now.elapsed().unwrap_or(Duration::ZERO);
5075 2193 : Ok(result)
5076 2259 : }
5077 :
5078 : /// Reconstruct a value, using the given base image and WAL records in 'data'.
5079 2001795 : async fn reconstruct_value(
5080 2001795 : &self,
5081 2001795 : key: Key,
5082 2001795 : request_lsn: Lsn,
5083 2001795 : mut data: ValueReconstructState,
5084 2001795 : ) -> Result<Bytes, PageReconstructError> {
5085 2001795 : // Perform WAL redo if needed
5086 2001795 : data.records.reverse();
5087 2001795 :
5088 2001795 : // If we have a page image, and no WAL, we're all set
5089 2001795 : if data.records.is_empty() {
5090 2000757 : if let Some((img_lsn, img)) = &data.img {
5091 2000757 : trace!(
5092 0 : "found page image for key {} at {}, no WAL redo required, req LSN {}",
5093 : key,
5094 : img_lsn,
5095 : request_lsn,
5096 : );
5097 2000757 : Ok(img.clone())
5098 : } else {
5099 0 : Err(PageReconstructError::from(anyhow!(
5100 0 : "base image for {key} at {request_lsn} not found"
5101 0 : )))
5102 : }
5103 : } else {
5104 : // We need to do WAL redo.
5105 : //
5106 : // If we don't have a base image, then the oldest WAL record better initialize
5107 : // the page
5108 1038 : if data.img.is_none() && !data.records.first().unwrap().1.will_init() {
5109 0 : Err(PageReconstructError::from(anyhow!(
5110 0 : "Base image for {} at {} not found, but got {} WAL records",
5111 0 : key,
5112 0 : request_lsn,
5113 0 : data.records.len()
5114 0 : )))
5115 : } else {
5116 1038 : if data.img.is_some() {
5117 1026 : trace!(
5118 0 : "found {} WAL records and a base image for {} at {}, performing WAL redo",
5119 0 : data.records.len(),
5120 : key,
5121 : request_lsn
5122 : );
5123 : } else {
5124 12 : trace!("found {} WAL records that will init the page for {} at {}, performing WAL redo", data.records.len(), key, request_lsn);
5125 : };
5126 1038 : let res = self
5127 1038 : .walredo_mgr
5128 1038 : .as_ref()
5129 1038 : .context("timeline has no walredo manager")
5130 1038 : .map_err(PageReconstructError::WalRedo)?
5131 1038 : .request_redo(key, request_lsn, data.img, data.records, self.pg_version)
5132 0 : .await;
5133 1038 : let img = match res {
5134 1038 : Ok(img) => img,
5135 0 : Err(walredo::Error::Cancelled) => return Err(PageReconstructError::Cancelled),
5136 0 : Err(walredo::Error::Other(e)) => {
5137 0 : return Err(PageReconstructError::WalRedo(
5138 0 : e.context("reconstruct a page image"),
5139 0 : ))
5140 : }
5141 : };
5142 1038 : Ok(img)
5143 : }
5144 : }
5145 2001795 : }
5146 :
5147 0 : pub(crate) async fn spawn_download_all_remote_layers(
5148 0 : self: Arc<Self>,
5149 0 : request: DownloadRemoteLayersTaskSpawnRequest,
5150 0 : ) -> Result<DownloadRemoteLayersTaskInfo, DownloadRemoteLayersTaskInfo> {
5151 : use pageserver_api::models::DownloadRemoteLayersTaskState;
5152 :
5153 : // this is not really needed anymore; it has tests which really check the return value from
5154 : // http api. it would be better not to maintain this anymore.
5155 :
5156 0 : let mut status_guard = self.download_all_remote_layers_task_info.write().unwrap();
5157 0 : if let Some(st) = &*status_guard {
5158 0 : match &st.state {
5159 : DownloadRemoteLayersTaskState::Running => {
5160 0 : return Err(st.clone());
5161 : }
5162 : DownloadRemoteLayersTaskState::ShutDown
5163 0 : | DownloadRemoteLayersTaskState::Completed => {
5164 0 : *status_guard = None;
5165 0 : }
5166 : }
5167 0 : }
5168 :
5169 0 : let self_clone = Arc::clone(&self);
5170 0 : let task_id = task_mgr::spawn(
5171 0 : task_mgr::BACKGROUND_RUNTIME.handle(),
5172 0 : task_mgr::TaskKind::DownloadAllRemoteLayers,
5173 0 : self.tenant_shard_id,
5174 0 : Some(self.timeline_id),
5175 0 : "download all remote layers task",
5176 0 : async move {
5177 0 : self_clone.download_all_remote_layers(request).await;
5178 0 : let mut status_guard = self_clone.download_all_remote_layers_task_info.write().unwrap();
5179 0 : match &mut *status_guard {
5180 : None => {
5181 0 : warn!("tasks status is supposed to be Some(), since we are running");
5182 : }
5183 0 : Some(st) => {
5184 0 : let exp_task_id = format!("{}", task_mgr::current_task_id().unwrap());
5185 0 : if st.task_id != exp_task_id {
5186 0 : warn!("task id changed while we were still running, expecting {} but have {}", exp_task_id, st.task_id);
5187 0 : } else {
5188 0 : st.state = DownloadRemoteLayersTaskState::Completed;
5189 0 : }
5190 : }
5191 : };
5192 0 : Ok(())
5193 0 : }
5194 0 : .instrument(info_span!(parent: None, "download_all_remote_layers", tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))
5195 : );
5196 :
5197 0 : let initial_info = DownloadRemoteLayersTaskInfo {
5198 0 : task_id: format!("{task_id}"),
5199 0 : state: DownloadRemoteLayersTaskState::Running,
5200 0 : total_layer_count: 0,
5201 0 : successful_download_count: 0,
5202 0 : failed_download_count: 0,
5203 0 : };
5204 0 : *status_guard = Some(initial_info.clone());
5205 0 :
5206 0 : Ok(initial_info)
5207 0 : }
5208 :
5209 0 : async fn download_all_remote_layers(
5210 0 : self: &Arc<Self>,
5211 0 : request: DownloadRemoteLayersTaskSpawnRequest,
5212 0 : ) {
5213 : use pageserver_api::models::DownloadRemoteLayersTaskState;
5214 :
5215 0 : let remaining = {
5216 0 : let guard = self.layers.read().await;
5217 0 : let Ok(lm) = guard.layer_map() else {
5218 : // technically here we could look into iterating accessible layers, but downloading
5219 : // all layers of a shutdown timeline makes no sense regardless.
5220 0 : tracing::info!("attempted to download all layers of shutdown timeline");
5221 0 : return;
5222 : };
5223 0 : lm.iter_historic_layers()
5224 0 : .map(|desc| guard.get_from_desc(&desc))
5225 0 : .collect::<Vec<_>>()
5226 0 : };
5227 0 : let total_layer_count = remaining.len();
5228 :
5229 : macro_rules! lock_status {
5230 : ($st:ident) => {
5231 : let mut st = self.download_all_remote_layers_task_info.write().unwrap();
5232 : let st = st
5233 : .as_mut()
5234 : .expect("this function is only called after the task has been spawned");
5235 : assert_eq!(
5236 : st.task_id,
5237 : format!(
5238 : "{}",
5239 : task_mgr::current_task_id().expect("we run inside a task_mgr task")
5240 : )
5241 : );
5242 : let $st = st;
5243 : };
5244 : }
5245 :
5246 : {
5247 0 : lock_status!(st);
5248 0 : st.total_layer_count = total_layer_count as u64;
5249 0 : }
5250 0 :
5251 0 : let mut remaining = remaining.into_iter();
5252 0 : let mut have_remaining = true;
5253 0 : let mut js = tokio::task::JoinSet::new();
5254 0 :
5255 0 : let cancel = task_mgr::shutdown_token();
5256 0 :
5257 0 : let limit = request.max_concurrent_downloads;
5258 :
5259 : loop {
5260 0 : while js.len() < limit.get() && have_remaining && !cancel.is_cancelled() {
5261 0 : let Some(next) = remaining.next() else {
5262 0 : have_remaining = false;
5263 0 : break;
5264 : };
5265 :
5266 0 : let span = tracing::info_span!("download", layer = %next);
5267 :
5268 0 : js.spawn(
5269 0 : async move {
5270 0 : let res = next.download().await;
5271 0 : (next, res)
5272 0 : }
5273 0 : .instrument(span),
5274 0 : );
5275 0 : }
5276 :
5277 0 : while let Some(res) = js.join_next().await {
5278 0 : match res {
5279 : Ok((_, Ok(_))) => {
5280 0 : lock_status!(st);
5281 0 : st.successful_download_count += 1;
5282 : }
5283 0 : Ok((layer, Err(e))) => {
5284 0 : tracing::error!(%layer, "download failed: {e:#}");
5285 0 : lock_status!(st);
5286 0 : st.failed_download_count += 1;
5287 : }
5288 0 : Err(je) if je.is_cancelled() => unreachable!("not used here"),
5289 0 : Err(je) if je.is_panic() => {
5290 0 : lock_status!(st);
5291 0 : st.failed_download_count += 1;
5292 : }
5293 0 : Err(je) => tracing::warn!("unknown joinerror: {je:?}"),
5294 : }
5295 : }
5296 :
5297 0 : if js.is_empty() && (!have_remaining || cancel.is_cancelled()) {
5298 0 : break;
5299 0 : }
5300 : }
5301 :
5302 : {
5303 0 : lock_status!(st);
5304 0 : st.state = DownloadRemoteLayersTaskState::Completed;
5305 : }
5306 0 : }
5307 :
5308 0 : pub(crate) fn get_download_all_remote_layers_task_info(
5309 0 : &self,
5310 0 : ) -> Option<DownloadRemoteLayersTaskInfo> {
5311 0 : self.download_all_remote_layers_task_info
5312 0 : .read()
5313 0 : .unwrap()
5314 0 : .clone()
5315 0 : }
5316 : }
5317 :
5318 : impl Timeline {
5319 : /// Returns non-remote layers for eviction.
5320 0 : pub(crate) async fn get_local_layers_for_disk_usage_eviction(&self) -> DiskUsageEvictionInfo {
5321 0 : let guard = self.layers.read().await;
5322 0 : let mut max_layer_size: Option<u64> = None;
5323 0 :
5324 0 : let resident_layers = guard
5325 0 : .likely_resident_layers()
5326 0 : .map(|layer| {
5327 0 : let file_size = layer.layer_desc().file_size;
5328 0 : max_layer_size = max_layer_size.map_or(Some(file_size), |m| Some(m.max(file_size)));
5329 0 :
5330 0 : let last_activity_ts = layer.latest_activity();
5331 0 :
5332 0 : EvictionCandidate {
5333 0 : layer: layer.to_owned().into(),
5334 0 : last_activity_ts,
5335 0 : relative_last_activity: finite_f32::FiniteF32::ZERO,
5336 0 : visibility: layer.visibility(),
5337 0 : }
5338 0 : })
5339 0 : .collect();
5340 0 :
5341 0 : DiskUsageEvictionInfo {
5342 0 : max_layer_size,
5343 0 : resident_layers,
5344 0 : }
5345 0 : }
5346 :
5347 5124 : pub(crate) fn get_shard_index(&self) -> ShardIndex {
5348 5124 : ShardIndex {
5349 5124 : shard_number: self.tenant_shard_id.shard_number,
5350 5124 : shard_count: self.tenant_shard_id.shard_count,
5351 5124 : }
5352 5124 : }
5353 :
5354 : /// Persistently blocks gc for `Manual` reason.
5355 : ///
5356 : /// Returns true if no such block existed before, false otherwise.
5357 0 : pub(crate) async fn block_gc(&self, tenant: &super::Tenant) -> anyhow::Result<bool> {
5358 : use crate::tenant::remote_timeline_client::index::GcBlockingReason;
5359 0 : assert_eq!(self.tenant_shard_id, tenant.tenant_shard_id);
5360 0 : tenant.gc_block.insert(self, GcBlockingReason::Manual).await
5361 0 : }
5362 :
5363 : /// Persistently unblocks gc for `Manual` reason.
5364 0 : pub(crate) async fn unblock_gc(&self, tenant: &super::Tenant) -> anyhow::Result<()> {
5365 : use crate::tenant::remote_timeline_client::index::GcBlockingReason;
5366 0 : assert_eq!(self.tenant_shard_id, tenant.tenant_shard_id);
5367 0 : tenant.gc_block.remove(self, GcBlockingReason::Manual).await
5368 0 : }
5369 :
5370 : #[cfg(test)]
5371 108 : pub(super) fn force_advance_lsn(self: &Arc<Timeline>, new_lsn: Lsn) {
5372 108 : self.last_record_lsn.advance(new_lsn);
5373 108 : }
5374 :
5375 : #[cfg(test)]
5376 6 : pub(super) fn force_set_disk_consistent_lsn(&self, new_value: Lsn) {
5377 6 : self.disk_consistent_lsn.store(new_value);
5378 6 : }
5379 :
5380 : /// Force create an image layer and place it into the layer map.
5381 : ///
5382 : /// DO NOT use this function directly. Use [`Tenant::branch_timeline_test_with_layers`]
5383 : /// or [`Tenant::create_test_timeline_with_layers`] to ensure all these layers are placed into the layer map in one run.
5384 : #[cfg(test)]
5385 150 : pub(super) async fn force_create_image_layer(
5386 150 : self: &Arc<Timeline>,
5387 150 : lsn: Lsn,
5388 150 : mut images: Vec<(Key, Bytes)>,
5389 150 : check_start_lsn: Option<Lsn>,
5390 150 : ctx: &RequestContext,
5391 150 : ) -> anyhow::Result<()> {
5392 150 : let last_record_lsn = self.get_last_record_lsn();
5393 150 : assert!(
5394 150 : lsn <= last_record_lsn,
5395 0 : "advance last record lsn before inserting a layer, lsn={lsn}, last_record_lsn={last_record_lsn}"
5396 : );
5397 150 : if let Some(check_start_lsn) = check_start_lsn {
5398 150 : assert!(lsn >= check_start_lsn);
5399 0 : }
5400 324 : images.sort_unstable_by(|(ka, _), (kb, _)| ka.cmp(kb));
5401 150 : let min_key = *images.first().map(|(k, _)| k).unwrap();
5402 150 : let end_key = images.last().map(|(k, _)| k).unwrap().next();
5403 150 : let mut image_layer_writer = ImageLayerWriter::new(
5404 150 : self.conf,
5405 150 : self.timeline_id,
5406 150 : self.tenant_shard_id,
5407 150 : &(min_key..end_key),
5408 150 : lsn,
5409 150 : ctx,
5410 150 : )
5411 75 : .await?;
5412 624 : for (key, img) in images {
5413 474 : image_layer_writer.put_image(key, img, ctx).await?;
5414 : }
5415 300 : let (desc, path) = image_layer_writer.finish(ctx).await?;
5416 150 : let image_layer = Layer::finish_creating(self.conf, self, desc, &path)?;
5417 150 : info!("force created image layer {}", image_layer.local_path());
5418 : {
5419 150 : let mut guard = self.layers.write().await;
5420 150 : guard.open_mut().unwrap().force_insert_layer(image_layer);
5421 150 : }
5422 150 :
5423 150 : Ok(())
5424 150 : }
5425 :
5426 : /// Force create a delta layer and place it into the layer map.
5427 : ///
5428 : /// DO NOT use this function directly. Use [`Tenant::branch_timeline_test_with_layers`]
5429 : /// or [`Tenant::create_test_timeline_with_layers`] to ensure all these layers are placed into the layer map in one run.
5430 : #[cfg(test)]
5431 180 : pub(super) async fn force_create_delta_layer(
5432 180 : self: &Arc<Timeline>,
5433 180 : mut deltas: DeltaLayerTestDesc,
5434 180 : check_start_lsn: Option<Lsn>,
5435 180 : ctx: &RequestContext,
5436 180 : ) -> anyhow::Result<()> {
5437 180 : let last_record_lsn = self.get_last_record_lsn();
5438 180 : deltas
5439 180 : .data
5440 258 : .sort_unstable_by(|(ka, la, _), (kb, lb, _)| (ka, la).cmp(&(kb, lb)));
5441 180 : assert!(deltas.data.first().unwrap().0 >= deltas.key_range.start);
5442 180 : assert!(deltas.data.last().unwrap().0 < deltas.key_range.end);
5443 618 : for (_, lsn, _) in &deltas.data {
5444 438 : assert!(deltas.lsn_range.start <= *lsn && *lsn < deltas.lsn_range.end);
5445 : }
5446 180 : assert!(
5447 180 : deltas.lsn_range.end <= last_record_lsn,
5448 0 : "advance last record lsn before inserting a layer, end_lsn={}, last_record_lsn={}",
5449 : deltas.lsn_range.end,
5450 : last_record_lsn
5451 : );
5452 180 : if let Some(check_start_lsn) = check_start_lsn {
5453 180 : assert!(deltas.lsn_range.start >= check_start_lsn);
5454 0 : }
5455 : // check if the delta layer does not violate the LSN invariant, the legacy compaction should always produce a batch of
5456 : // layers of the same start/end LSN, and so should the force inserted layer
5457 : {
5458 : /// Checks if a overlaps with b, assume a/b = [start, end).
5459 114 : pub fn overlaps_with<T: Ord>(a: &Range<T>, b: &Range<T>) -> bool {
5460 114 : !(a.end <= b.start || b.end <= a.start)
5461 114 : }
5462 :
5463 180 : if deltas.key_range.start.next() != deltas.key_range.end {
5464 102 : let guard = self.layers.read().await;
5465 102 : let mut invalid_layers =
5466 198 : guard.layer_map()?.iter_historic_layers().filter(|layer| {
5467 198 : layer.is_delta()
5468 114 : && overlaps_with(&layer.lsn_range, &deltas.lsn_range)
5469 36 : && layer.lsn_range != deltas.lsn_range
5470 : // skip single-key layer files
5471 12 : && layer.key_range.start.next() != layer.key_range.end
5472 198 : });
5473 102 : if let Some(layer) = invalid_layers.next() {
5474 : // If a delta layer overlaps with another delta layer AND their LSN range is not the same, panic
5475 0 : panic!(
5476 0 : "inserted layer violates delta layer LSN invariant: current_lsn_range={}..{}, conflict_lsn_range={}..{}",
5477 0 : deltas.lsn_range.start, deltas.lsn_range.end, layer.lsn_range.start, layer.lsn_range.end
5478 0 : );
5479 102 : }
5480 78 : }
5481 : }
5482 180 : let mut delta_layer_writer = DeltaLayerWriter::new(
5483 180 : self.conf,
5484 180 : self.timeline_id,
5485 180 : self.tenant_shard_id,
5486 180 : deltas.key_range.start,
5487 180 : deltas.lsn_range,
5488 180 : ctx,
5489 180 : )
5490 90 : .await?;
5491 618 : for (key, lsn, val) in deltas.data {
5492 438 : delta_layer_writer.put_value(key, lsn, val, ctx).await?;
5493 : }
5494 450 : let (desc, path) = delta_layer_writer.finish(deltas.key_range.end, ctx).await?;
5495 180 : let delta_layer = Layer::finish_creating(self.conf, self, desc, &path)?;
5496 180 : info!("force created delta layer {}", delta_layer.local_path());
5497 : {
5498 180 : let mut guard = self.layers.write().await;
5499 180 : guard.open_mut().unwrap().force_insert_layer(delta_layer);
5500 180 : }
5501 180 :
5502 180 : Ok(())
5503 180 : }
5504 :
5505 : /// Return all keys at the LSN in the image layers
5506 : #[cfg(test)]
5507 18 : pub(crate) async fn inspect_image_layers(
5508 18 : self: &Arc<Timeline>,
5509 18 : lsn: Lsn,
5510 18 : ctx: &RequestContext,
5511 18 : ) -> anyhow::Result<Vec<(Key, Bytes)>> {
5512 18 : let mut all_data = Vec::new();
5513 18 : let guard = self.layers.read().await;
5514 102 : for layer in guard.layer_map()?.iter_historic_layers() {
5515 102 : if !layer.is_delta() && layer.image_layer_lsn() == lsn {
5516 24 : let layer = guard.get_from_desc(&layer);
5517 24 : let mut reconstruct_data = ValuesReconstructState::default();
5518 24 : layer
5519 24 : .get_values_reconstruct_data(
5520 24 : KeySpace::single(Key::MIN..Key::MAX),
5521 24 : lsn..Lsn(lsn.0 + 1),
5522 24 : &mut reconstruct_data,
5523 24 : ctx,
5524 24 : )
5525 39 : .await?;
5526 222 : for (k, v) in reconstruct_data.keys {
5527 198 : all_data.push((k, v?.img.unwrap().1));
5528 : }
5529 78 : }
5530 : }
5531 18 : all_data.sort();
5532 18 : Ok(all_data)
5533 18 : }
5534 :
5535 : /// Get all historic layer descriptors in the layer map
5536 : #[cfg(test)]
5537 6 : pub(crate) async fn inspect_historic_layers(
5538 6 : self: &Arc<Timeline>,
5539 6 : ) -> anyhow::Result<Vec<super::storage_layer::PersistentLayerKey>> {
5540 6 : let mut layers = Vec::new();
5541 6 : let guard = self.layers.read().await;
5542 18 : for layer in guard.layer_map()?.iter_historic_layers() {
5543 18 : layers.push(layer.key());
5544 18 : }
5545 6 : Ok(layers)
5546 6 : }
5547 :
5548 : #[cfg(test)]
5549 30 : pub(crate) fn add_extra_test_dense_keyspace(&self, ks: KeySpace) {
5550 30 : let mut keyspace = self.extra_test_dense_keyspace.load().as_ref().clone();
5551 30 : keyspace.merge(&ks);
5552 30 : self.extra_test_dense_keyspace.store(Arc::new(keyspace));
5553 30 : }
5554 : }
5555 :
5556 : /// Tracking writes ingestion does to a particular in-memory layer.
5557 : ///
5558 : /// Cleared upon freezing a layer.
5559 : pub(crate) struct TimelineWriterState {
5560 : open_layer: Arc<InMemoryLayer>,
5561 : current_size: u64,
5562 : // Previous Lsn which passed through
5563 : prev_lsn: Option<Lsn>,
5564 : // Largest Lsn which passed through the current writer
5565 : max_lsn: Option<Lsn>,
5566 : // Cached details of the last freeze. Avoids going trough the atomic/lock on every put.
5567 : cached_last_freeze_at: Lsn,
5568 : }
5569 :
5570 : impl TimelineWriterState {
5571 3816 : fn new(open_layer: Arc<InMemoryLayer>, current_size: u64, last_freeze_at: Lsn) -> Self {
5572 3816 : Self {
5573 3816 : open_layer,
5574 3816 : current_size,
5575 3816 : prev_lsn: None,
5576 3816 : max_lsn: None,
5577 3816 : cached_last_freeze_at: last_freeze_at,
5578 3816 : }
5579 3816 : }
5580 : }
5581 :
5582 : /// Various functions to mutate the timeline.
5583 : // TODO Currently, Deref is used to allow easy access to read methods from this trait.
5584 : // This is probably considered a bad practice in Rust and should be fixed eventually,
5585 : // but will cause large code changes.
5586 : pub(crate) struct TimelineWriter<'a> {
5587 : tl: &'a Timeline,
5588 : write_guard: tokio::sync::MutexGuard<'a, Option<TimelineWriterState>>,
5589 : }
5590 :
5591 : impl Deref for TimelineWriter<'_> {
5592 : type Target = Timeline;
5593 :
5594 14421618 : fn deref(&self) -> &Self::Target {
5595 14421618 : self.tl
5596 14421618 : }
5597 : }
5598 :
5599 : #[derive(PartialEq)]
5600 : enum OpenLayerAction {
5601 : Roll,
5602 : Open,
5603 : None,
5604 : }
5605 :
5606 : impl<'a> TimelineWriter<'a> {
5607 14412654 : async fn handle_open_layer_action(
5608 14412654 : &mut self,
5609 14412654 : at: Lsn,
5610 14412654 : action: OpenLayerAction,
5611 14412654 : ctx: &RequestContext,
5612 14412654 : ) -> anyhow::Result<&Arc<InMemoryLayer>> {
5613 14412654 : match action {
5614 : OpenLayerAction::Roll => {
5615 240 : let freeze_at = self.write_guard.as_ref().unwrap().max_lsn.unwrap();
5616 240 : self.roll_layer(freeze_at).await?;
5617 240 : self.open_layer(at, ctx).await?;
5618 : }
5619 3576 : OpenLayerAction::Open => self.open_layer(at, ctx).await?,
5620 : OpenLayerAction::None => {
5621 14408838 : assert!(self.write_guard.is_some());
5622 : }
5623 : }
5624 :
5625 14412654 : Ok(&self.write_guard.as_ref().unwrap().open_layer)
5626 14412654 : }
5627 :
5628 3816 : async fn open_layer(&mut self, at: Lsn, ctx: &RequestContext) -> anyhow::Result<()> {
5629 3816 : let layer = self
5630 3816 : .tl
5631 3816 : .get_layer_for_write(at, &self.write_guard, ctx)
5632 2162 : .await?;
5633 3816 : let initial_size = layer.size().await?;
5634 :
5635 3816 : let last_freeze_at = self.last_freeze_at.load();
5636 3816 : self.write_guard.replace(TimelineWriterState::new(
5637 3816 : layer,
5638 3816 : initial_size,
5639 3816 : last_freeze_at,
5640 3816 : ));
5641 3816 :
5642 3816 : Ok(())
5643 3816 : }
5644 :
5645 240 : async fn roll_layer(&mut self, freeze_at: Lsn) -> Result<(), FlushLayerError> {
5646 240 : let current_size = self.write_guard.as_ref().unwrap().current_size;
5647 240 :
5648 240 : // self.write_guard will be taken by the freezing
5649 240 : self.tl
5650 240 : .freeze_inmem_layer_at(freeze_at, &mut self.write_guard)
5651 16 : .await?;
5652 :
5653 240 : assert!(self.write_guard.is_none());
5654 :
5655 240 : if current_size >= self.get_checkpoint_distance() * 2 {
5656 0 : warn!("Flushed oversized open layer with size {}", current_size)
5657 240 : }
5658 :
5659 240 : Ok(())
5660 240 : }
5661 :
5662 14412654 : fn get_open_layer_action(&self, lsn: Lsn, new_value_size: u64) -> OpenLayerAction {
5663 14412654 : let state = &*self.write_guard;
5664 14412654 : let Some(state) = &state else {
5665 3576 : return OpenLayerAction::Open;
5666 : };
5667 :
5668 : #[cfg(feature = "testing")]
5669 14409078 : if state.cached_last_freeze_at < self.tl.last_freeze_at.load() {
5670 : // this check and assertion are not really needed because
5671 : // LayerManager::try_freeze_in_memory_layer will always clear out the
5672 : // TimelineWriterState if something is frozen. however, we can advance last_freeze_at when there
5673 : // is no TimelineWriterState.
5674 0 : assert!(
5675 0 : state.open_layer.end_lsn.get().is_some(),
5676 0 : "our open_layer must be outdated"
5677 : );
5678 :
5679 : // this would be a memory leak waiting to happen because the in-memory layer always has
5680 : // an index
5681 0 : panic!("BUG: TimelineWriterState held on to frozen in-memory layer.");
5682 14409078 : }
5683 14409078 :
5684 14409078 : if state.prev_lsn == Some(lsn) {
5685 : // Rolling mid LSN is not supported by [downstream code].
5686 : // Hence, only roll at LSN boundaries.
5687 : //
5688 : // [downstream code]: https://github.com/neondatabase/neon/pull/7993#discussion_r1633345422
5689 18 : return OpenLayerAction::None;
5690 14409060 : }
5691 14409060 :
5692 14409060 : if state.current_size == 0 {
5693 : // Don't roll empty layers
5694 0 : return OpenLayerAction::None;
5695 14409060 : }
5696 14409060 :
5697 14409060 : if self.tl.should_roll(
5698 14409060 : state.current_size,
5699 14409060 : state.current_size + new_value_size,
5700 14409060 : self.get_checkpoint_distance(),
5701 14409060 : lsn,
5702 14409060 : state.cached_last_freeze_at,
5703 14409060 : state.open_layer.get_opened_at(),
5704 14409060 : ) {
5705 240 : OpenLayerAction::Roll
5706 : } else {
5707 14408820 : OpenLayerAction::None
5708 : }
5709 14412654 : }
5710 :
5711 : /// Put a batch of keys at the specified Lsns.
5712 14412648 : pub(crate) async fn put_batch(
5713 14412648 : &mut self,
5714 14412648 : batch: Vec<(CompactKey, Lsn, usize, Value)>,
5715 14412648 : ctx: &RequestContext,
5716 14412648 : ) -> anyhow::Result<()> {
5717 14412648 : if batch.is_empty() {
5718 0 : return Ok(());
5719 14412648 : }
5720 :
5721 14412648 : let serialized_batch = inmemory_layer::SerializedBatch::from_values(batch)?;
5722 14412648 : let batch_max_lsn = serialized_batch.max_lsn;
5723 14412648 : let buf_size: u64 = serialized_batch.raw.len() as u64;
5724 14412648 :
5725 14412648 : let action = self.get_open_layer_action(batch_max_lsn, buf_size);
5726 14412648 : let layer = self
5727 14412648 : .handle_open_layer_action(batch_max_lsn, action, ctx)
5728 2178 : .await?;
5729 :
5730 14412648 : let res = layer.put_batch(serialized_batch, ctx).await;
5731 :
5732 14412648 : if res.is_ok() {
5733 14412648 : // Update the current size only when the entire write was ok.
5734 14412648 : // In case of failures, we may have had partial writes which
5735 14412648 : // render the size tracking out of sync. That's ok because
5736 14412648 : // the checkpoint distance should be significantly smaller
5737 14412648 : // than the S3 single shot upload limit of 5GiB.
5738 14412648 : let state = self.write_guard.as_mut().unwrap();
5739 14412648 :
5740 14412648 : state.current_size += buf_size;
5741 14412648 : state.prev_lsn = Some(batch_max_lsn);
5742 14412648 : state.max_lsn = std::cmp::max(state.max_lsn, Some(batch_max_lsn));
5743 14412648 : }
5744 :
5745 14412648 : res
5746 14412648 : }
5747 :
5748 : #[cfg(test)]
5749 : /// Test helper, for tests that would like to poke individual values without composing a batch
5750 13170462 : pub(crate) async fn put(
5751 13170462 : &mut self,
5752 13170462 : key: Key,
5753 13170462 : lsn: Lsn,
5754 13170462 : value: &Value,
5755 13170462 : ctx: &RequestContext,
5756 13170462 : ) -> anyhow::Result<()> {
5757 : use utils::bin_ser::BeSer;
5758 13170462 : if !key.is_valid_key_on_write_path() {
5759 0 : bail!(
5760 0 : "the request contains data not supported by pageserver at TimelineWriter::put: {}",
5761 0 : key
5762 0 : );
5763 13170462 : }
5764 13170462 : let val_ser_size = value.serialized_size().unwrap() as usize;
5765 13170462 : self.put_batch(
5766 13170462 : vec![(key.to_compact(), lsn, val_ser_size, value.clone())],
5767 13170462 : ctx,
5768 13170462 : )
5769 12158 : .await
5770 13170462 : }
5771 :
5772 6 : pub(crate) async fn delete_batch(
5773 6 : &mut self,
5774 6 : batch: &[(Range<Key>, Lsn)],
5775 6 : ctx: &RequestContext,
5776 6 : ) -> anyhow::Result<()> {
5777 6 : if let Some((_, lsn)) = batch.first() {
5778 6 : let action = self.get_open_layer_action(*lsn, 0);
5779 6 : let layer = self.handle_open_layer_action(*lsn, action, ctx).await?;
5780 6 : layer.put_tombstones(batch).await?;
5781 0 : }
5782 :
5783 6 : Ok(())
5784 6 : }
5785 :
5786 : /// Track the end of the latest digested WAL record.
5787 : /// Remember the (end of) last valid WAL record remembered in the timeline.
5788 : ///
5789 : /// Call this after you have finished writing all the WAL up to 'lsn'.
5790 : ///
5791 : /// 'lsn' must be aligned. This wakes up any wait_lsn() callers waiting for
5792 : /// the 'lsn' or anything older. The previous last record LSN is stored alongside
5793 : /// the latest and can be read.
5794 15837234 : pub(crate) fn finish_write(&self, new_lsn: Lsn) {
5795 15837234 : self.tl.finish_write(new_lsn);
5796 15837234 : }
5797 :
5798 811710 : pub(crate) fn update_current_logical_size(&self, delta: i64) {
5799 811710 : self.tl.update_current_logical_size(delta)
5800 811710 : }
5801 : }
5802 :
5803 : // We need TimelineWriter to be send in upcoming conversion of
5804 : // Timeline::layers to tokio::sync::RwLock.
5805 : #[test]
5806 6 : fn is_send() {
5807 6 : fn _assert_send<T: Send>() {}
5808 6 : _assert_send::<TimelineWriter<'_>>();
5809 6 : }
5810 :
5811 : #[cfg(test)]
5812 : mod tests {
5813 : use pageserver_api::key::Key;
5814 : use utils::{id::TimelineId, lsn::Lsn};
5815 :
5816 : use crate::{
5817 : repository::Value,
5818 : tenant::{
5819 : harness::{test_img, TenantHarness},
5820 : layer_map::LayerMap,
5821 : storage_layer::{Layer, LayerName},
5822 : timeline::{DeltaLayerTestDesc, EvictionError},
5823 : Timeline,
5824 : },
5825 : };
5826 :
5827 : #[tokio::test]
5828 6 : async fn test_heatmap_generation() {
5829 6 : let harness = TenantHarness::create("heatmap_generation").await.unwrap();
5830 6 :
5831 6 : let covered_delta = DeltaLayerTestDesc::new_with_inferred_key_range(
5832 6 : Lsn(0x10)..Lsn(0x20),
5833 6 : vec![(
5834 6 : Key::from_hex("620000000033333333444444445500000000").unwrap(),
5835 6 : Lsn(0x11),
5836 6 : Value::Image(test_img("foo")),
5837 6 : )],
5838 6 : );
5839 6 : let visible_delta = DeltaLayerTestDesc::new_with_inferred_key_range(
5840 6 : Lsn(0x10)..Lsn(0x20),
5841 6 : vec![(
5842 6 : Key::from_hex("720000000033333333444444445500000000").unwrap(),
5843 6 : Lsn(0x11),
5844 6 : Value::Image(test_img("foo")),
5845 6 : )],
5846 6 : );
5847 6 : let l0_delta = DeltaLayerTestDesc::new(
5848 6 : Lsn(0x20)..Lsn(0x30),
5849 6 : Key::from_hex("000000000000000000000000000000000000").unwrap()
5850 6 : ..Key::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(),
5851 6 : vec![(
5852 6 : Key::from_hex("720000000033333333444444445500000000").unwrap(),
5853 6 : Lsn(0x25),
5854 6 : Value::Image(test_img("foo")),
5855 6 : )],
5856 6 : );
5857 6 : let delta_layers = vec![
5858 6 : covered_delta.clone(),
5859 6 : visible_delta.clone(),
5860 6 : l0_delta.clone(),
5861 6 : ];
5862 6 :
5863 6 : let image_layer = (
5864 6 : Lsn(0x40),
5865 6 : vec![(
5866 6 : Key::from_hex("620000000033333333444444445500000000").unwrap(),
5867 6 : test_img("bar"),
5868 6 : )],
5869 6 : );
5870 6 : let image_layers = vec![image_layer];
5871 6 :
5872 24 : let (tenant, ctx) = harness.load().await;
5873 6 : let timeline = tenant
5874 6 : .create_test_timeline_with_layers(
5875 6 : TimelineId::generate(),
5876 6 : Lsn(0x10),
5877 6 : 14,
5878 6 : &ctx,
5879 6 : delta_layers,
5880 6 : image_layers,
5881 6 : Lsn(0x100),
5882 6 : )
5883 87 : .await
5884 6 : .unwrap();
5885 6 :
5886 6 : // Layer visibility is an input to heatmap generation, so refresh it first
5887 6 : timeline.update_layer_visibility().await.unwrap();
5888 6 :
5889 6 : let heatmap = timeline
5890 6 : .generate_heatmap()
5891 6 : .await
5892 6 : .expect("Infallible while timeline is not shut down");
5893 6 :
5894 6 : assert_eq!(heatmap.timeline_id, timeline.timeline_id);
5895 6 :
5896 6 : // L0 should come last
5897 6 : assert_eq!(heatmap.layers.last().unwrap().name, l0_delta.layer_name());
5898 6 :
5899 6 : let mut last_lsn = Lsn::MAX;
5900 30 : for layer in heatmap.layers {
5901 6 : // Covered layer should be omitted
5902 24 : assert!(layer.name != covered_delta.layer_name());
5903 6 :
5904 24 : let layer_lsn = match &layer.name {
5905 12 : LayerName::Delta(d) => d.lsn_range.end,
5906 12 : LayerName::Image(i) => i.lsn,
5907 6 : };
5908 6 :
5909 6 : // Apart from L0s, newest Layers should come first
5910 24 : if !LayerMap::is_l0(layer.name.key_range(), layer.name.is_delta()) {
5911 18 : assert!(layer_lsn <= last_lsn);
5912 18 : last_lsn = layer_lsn;
5913 6 : }
5914 6 : }
5915 6 : }
5916 :
5917 : #[tokio::test]
5918 6 : async fn two_layer_eviction_attempts_at_the_same_time() {
5919 6 : let harness = TenantHarness::create("two_layer_eviction_attempts_at_the_same_time")
5920 6 : .await
5921 6 : .unwrap();
5922 6 :
5923 24 : let (tenant, ctx) = harness.load().await;
5924 6 : let timeline = tenant
5925 6 : .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx)
5926 12 : .await
5927 6 : .unwrap();
5928 6 :
5929 6 : let layer = find_some_layer(&timeline).await;
5930 6 : let layer = layer
5931 6 : .keep_resident()
5932 6 : .await
5933 6 : .expect("no download => no downloading errors")
5934 6 : .drop_eviction_guard();
5935 6 :
5936 6 : let forever = std::time::Duration::from_secs(120);
5937 6 :
5938 6 : let first = layer.evict_and_wait(forever);
5939 6 : let second = layer.evict_and_wait(forever);
5940 6 :
5941 6 : let (first, second) = tokio::join!(first, second);
5942 6 :
5943 6 : let res = layer.keep_resident().await;
5944 6 : assert!(res.is_none(), "{res:?}");
5945 6 :
5946 6 : match (first, second) {
5947 6 : (Ok(()), Ok(())) => {
5948 6 : // because there are no more timeline locks being taken on eviction path, we can
5949 6 : // witness all three outcomes here.
5950 6 : }
5951 6 : (Ok(()), Err(EvictionError::NotFound)) | (Err(EvictionError::NotFound), Ok(())) => {
5952 0 : // if one completes before the other, this is fine just as well.
5953 0 : }
5954 6 : other => unreachable!("unexpected {:?}", other),
5955 6 : }
5956 6 : }
5957 :
5958 6 : async fn find_some_layer(timeline: &Timeline) -> Layer {
5959 6 : let layers = timeline.layers.read().await;
5960 6 : let desc = layers
5961 6 : .layer_map()
5962 6 : .unwrap()
5963 6 : .iter_historic_layers()
5964 6 : .next()
5965 6 : .expect("must find one layer to evict");
5966 6 :
5967 6 : layers.get_from_desc(&desc)
5968 6 : }
5969 : }
|