Line data Source code
1 : pub(crate) mod analysis;
2 : pub(crate) mod compaction;
3 : pub mod delete;
4 : pub(crate) mod detach_ancestor;
5 : mod eviction_task;
6 : pub(crate) mod handle;
7 : mod heatmap_layers_downloader;
8 : pub(crate) mod import_pgdata;
9 : mod init;
10 : pub mod layer_manager;
11 : pub(crate) mod logical_size;
12 : pub mod offload;
13 : pub mod span;
14 : pub mod uninit;
15 : mod walreceiver;
16 :
17 : use hashlink::LruCache;
18 : use std::array;
19 : use std::cmp::{max, min};
20 : use std::collections::btree_map::Entry;
21 : use std::collections::{BTreeMap, HashMap, HashSet};
22 : use std::ops::{ControlFlow, Deref, Range};
23 : use std::sync::atomic::{AtomicBool, AtomicU64, Ordering as AtomicOrdering};
24 : use std::sync::{Arc, Mutex, OnceLock, RwLock, Weak};
25 : use std::time::{Duration, Instant, SystemTime};
26 :
27 : use anyhow::{Context, Result, anyhow, bail, ensure};
28 : use arc_swap::{ArcSwap, ArcSwapOption};
29 : use bytes::Bytes;
30 : use camino::Utf8Path;
31 : use chrono::{DateTime, Utc};
32 : use compaction::{CompactionOutcome, GcCompactionCombinedSettings};
33 : use enumset::EnumSet;
34 : use fail::fail_point;
35 : use futures::stream::FuturesUnordered;
36 : use futures::{FutureExt, StreamExt};
37 : use handle::ShardTimelineId;
38 : use layer_manager::{
39 : LayerManagerLockHolder, LayerManagerReadGuard, LayerManagerWriteGuard, LockedLayerManager,
40 : Shutdown,
41 : };
42 :
43 : use offload::OffloadError;
44 : use once_cell::sync::Lazy;
45 : use pageserver_api::config::tenant_conf_defaults::DEFAULT_PITR_INTERVAL;
46 : use pageserver_api::key::{
47 : KEY_SIZE, Key, METADATA_KEY_BEGIN_PREFIX, METADATA_KEY_END_PREFIX, NON_INHERITED_RANGE,
48 : SPARSE_RANGE,
49 : };
50 : use pageserver_api::keyspace::{KeySpaceAccum, KeySpaceRandomAccum, SparseKeyPartitioning};
51 : use pageserver_api::models::{
52 : CompactKeyRange, CompactLsnRange, CompactionAlgorithm, CompactionAlgorithmSettings,
53 : DetachBehavior, DownloadRemoteLayersTaskInfo, DownloadRemoteLayersTaskSpawnRequest,
54 : EvictionPolicy, InMemoryLayerInfo, LayerMapInfo, LsnLease, PageTraceEvent, RelSizeMigration,
55 : TimelineState,
56 : };
57 : use pageserver_api::reltag::{BlockNumber, RelTag};
58 : use pageserver_api::shard::{ShardIdentity, ShardIndex, ShardNumber, TenantShardId};
59 : #[cfg(test)]
60 : use pageserver_api::value::Value;
61 : use postgres_connection::PgConnectionConfig;
62 : use postgres_ffi::v14::xlog_utils;
63 : use postgres_ffi::{WAL_SEGMENT_SIZE, to_pg_timestamp};
64 : use rand::Rng;
65 : use remote_storage::DownloadError;
66 : use serde_with::serde_as;
67 : use storage_broker::BrokerClientChannel;
68 : use tokio::runtime::Handle;
69 : use tokio::sync::mpsc::Sender;
70 : use tokio::sync::{Notify, oneshot, watch};
71 : use tokio_util::sync::CancellationToken;
72 : use tracing::*;
73 : use utils::generation::Generation;
74 : use utils::guard_arc_swap::GuardArcSwap;
75 : use utils::id::TimelineId;
76 : use utils::logging::{MonitorSlowFutureCallback, monitor_slow_future};
77 : use utils::lsn::{AtomicLsn, Lsn, RecordLsn};
78 : use utils::postgres_client::PostgresClientProtocol;
79 : use utils::rate_limit::RateLimit;
80 : use utils::seqwait::SeqWait;
81 : use utils::simple_rcu::{Rcu, RcuReadGuard};
82 : use utils::sync::gate::{Gate, GateGuard};
83 : use utils::{completion, critical, fs_ext, pausable_failpoint};
84 : use wal_decoder::serialized_batch::{SerializedValueBatch, ValueMeta};
85 :
86 : use self::delete::DeleteTimelineFlow;
87 : pub(super) use self::eviction_task::EvictionTaskTenantState;
88 : use self::eviction_task::EvictionTaskTimelineState;
89 : use self::logical_size::LogicalSize;
90 : use self::walreceiver::{WalReceiver, WalReceiverConf};
91 : use super::remote_timeline_client::RemoteTimelineClient;
92 : use super::remote_timeline_client::index::{GcCompactionState, IndexPart};
93 : use super::secondary::heatmap::HeatMapLayer;
94 : use super::storage_layer::{LayerFringe, LayerVisibilityHint, ReadableLayer};
95 : use super::tasks::log_compaction_error;
96 : use super::upload_queue::NotInitialized;
97 : use super::{
98 : AttachedTenantConf, BasebackupPrepareSender, GcError, HeatMapTimeline, MaybeOffloaded,
99 : debug_assert_current_span_has_tenant_and_timeline_id,
100 : };
101 : use crate::PERF_TRACE_TARGET;
102 : use crate::aux_file::AuxFileSizeEstimator;
103 : use crate::basebackup_cache::BasebackupPrepareRequest;
104 : use crate::config::PageServerConf;
105 : use crate::context::{
106 : DownloadBehavior, PerfInstrumentFutureExt, RequestContext, RequestContextBuilder,
107 : };
108 : use crate::disk_usage_eviction_task::{DiskUsageEvictionInfo, EvictionCandidate, finite_f32};
109 : use crate::feature_resolver::FeatureResolver;
110 : use crate::keyspace::{KeyPartitioning, KeySpace};
111 : use crate::l0_flush::{self, L0FlushGlobalState};
112 : use crate::metrics::{
113 : DELTAS_PER_READ_GLOBAL, LAYERS_PER_READ_AMORTIZED_GLOBAL, LAYERS_PER_READ_BATCH_GLOBAL,
114 : LAYERS_PER_READ_GLOBAL, ScanLatencyOngoingRecording, TimelineMetrics,
115 : };
116 : use crate::page_service::TenantManagerTypes;
117 : use crate::pgdatadir_mapping::{
118 : CalculateLogicalSizeError, CollectKeySpaceError, DirectoryKind, LsnForTimestamp,
119 : MAX_AUX_FILE_V2_DELTAS, MetricsUpdate,
120 : };
121 : use crate::task_mgr::TaskKind;
122 : use crate::tenant::config::AttachmentMode;
123 : use crate::tenant::gc_result::GcResult;
124 : use crate::tenant::layer_map::LayerMap;
125 : use crate::tenant::metadata::TimelineMetadata;
126 : use crate::tenant::storage_layer::delta_layer::DeltaEntry;
127 : use crate::tenant::storage_layer::inmemory_layer::IndexEntry;
128 : use crate::tenant::storage_layer::{
129 : AsLayerDesc, BatchLayerWriter, DeltaLayerWriter, EvictionError, ImageLayerName,
130 : ImageLayerWriter, InMemoryLayer, IoConcurrency, Layer, LayerAccessStatsReset, LayerName,
131 : PersistentLayerDesc, PersistentLayerKey, ResidentLayer, ValueReconstructSituation,
132 : ValueReconstructState, ValuesReconstructState,
133 : };
134 : use crate::tenant::tasks::BackgroundLoopKind;
135 : use crate::tenant::timeline::logical_size::CurrentLogicalSize;
136 : use crate::virtual_file::{MaybeFatalIo, VirtualFile};
137 : use crate::walingest::WalLagCooldown;
138 : use crate::walredo::RedoAttemptType;
139 : use crate::{ZERO_PAGE, task_mgr, walredo};
140 :
141 : #[derive(Debug, PartialEq, Eq, Clone, Copy)]
142 : pub(crate) enum FlushLoopState {
143 : NotStarted,
144 : Running {
145 : #[cfg(test)]
146 : expect_initdb_optimization: bool,
147 : #[cfg(test)]
148 : initdb_optimization_count: usize,
149 : },
150 : Exited,
151 : }
152 :
153 : #[derive(Debug, Copy, Clone, PartialEq, Eq)]
154 : pub enum ImageLayerCreationMode {
155 : /// Try to create image layers based on `time_for_new_image_layer`. Used in compaction code path.
156 : Try,
157 : /// Force creating the image layers if possible. For now, no image layers will be created
158 : /// for metadata keys. Used in compaction code path with force flag enabled.
159 : Force,
160 : /// Initial ingestion of the data, and no data should be dropped in this function. This
161 : /// means that no metadata keys should be included in the partitions. Used in flush frozen layer
162 : /// code path.
163 : Initial,
164 : }
165 :
166 : #[derive(Clone, Debug, Default)]
167 : pub enum LastImageLayerCreationStatus {
168 : Incomplete {
169 : /// The last key of the partition (exclusive) that was processed in the last
170 : /// image layer creation attempt. We will continue from this key in the next
171 : /// attempt.
172 : last_key: Key,
173 : },
174 : Complete,
175 : #[default]
176 : Initial,
177 : }
178 :
179 : impl std::fmt::Display for ImageLayerCreationMode {
180 302 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
181 302 : write!(f, "{:?}", self)
182 302 : }
183 : }
184 :
185 : /// Temporary function for immutable storage state refactor, ensures we are dropping mutex guard instead of other things.
186 : /// Can be removed after all refactors are done.
187 23 : fn drop_layer_manager_rlock(rlock: LayerManagerReadGuard<'_>) {
188 23 : drop(rlock)
189 23 : }
190 :
191 : /// Temporary function for immutable storage state refactor, ensures we are dropping mutex guard instead of other things.
192 : /// Can be removed after all refactors are done.
193 325 : fn drop_layer_manager_wlock(rlock: LayerManagerWriteGuard<'_>) {
194 325 : drop(rlock)
195 325 : }
196 :
197 : /// The outward-facing resources required to build a Timeline
198 : pub struct TimelineResources {
199 : pub remote_client: RemoteTimelineClient,
200 : pub pagestream_throttle: Arc<crate::tenant::throttle::Throttle>,
201 : pub pagestream_throttle_metrics: Arc<crate::metrics::tenant_throttling::Pagestream>,
202 : pub l0_compaction_trigger: Arc<Notify>,
203 : pub l0_flush_global_state: l0_flush::L0FlushGlobalState,
204 : pub basebackup_prepare_sender: BasebackupPrepareSender,
205 : pub feature_resolver: FeatureResolver,
206 : }
207 :
208 : pub struct Timeline {
209 : pub(crate) conf: &'static PageServerConf,
210 : tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
211 :
212 : myself: Weak<Self>,
213 :
214 : pub(crate) tenant_shard_id: TenantShardId,
215 : pub timeline_id: TimelineId,
216 :
217 : /// The generation of the tenant that instantiated us: this is used for safety when writing remote objects.
218 : /// Never changes for the lifetime of this [`Timeline`] object.
219 : ///
220 : /// This duplicates the generation stored in LocationConf, but that structure is mutable:
221 : /// this copy enforces the invariant that generatio doesn't change during a Tenant's lifetime.
222 : pub(crate) generation: Generation,
223 :
224 : /// The detailed sharding information from our parent Tenant. This enables us to map keys
225 : /// to shards, and is constant through the lifetime of this Timeline.
226 : shard_identity: ShardIdentity,
227 :
228 : pub pg_version: u32,
229 :
230 : /// The tuple has two elements.
231 : /// 1. `LayerFileManager` keeps track of the various physical representations of the layer files (inmem, local, remote).
232 : /// 2. `LayerMap`, the acceleration data structure for `get_reconstruct_data`.
233 : ///
234 : /// `LayerMap` maps out the `(PAGE,LSN) / (KEY,LSN)` space, which is composed of `(KeyRange, LsnRange)` rectangles.
235 : /// We describe these rectangles through the `PersistentLayerDesc` struct.
236 : ///
237 : /// When we want to reconstruct a page, we first find the `PersistentLayerDesc`'s that we need for page reconstruction,
238 : /// using `LayerMap`. Then, we use `LayerFileManager` to get the `PersistentLayer`'s that correspond to the
239 : /// `PersistentLayerDesc`'s.
240 : ///
241 : /// Hence, it's important to keep things coherent. The `LayerFileManager` must always have an entry for all
242 : /// `PersistentLayerDesc`'s in the `LayerMap`. If it doesn't, `LayerFileManager::get_from_desc` will panic at
243 : /// runtime, e.g., during page reconstruction.
244 : ///
245 : /// In the future, we'll be able to split up the tuple of LayerMap and `LayerFileManager`,
246 : /// so that e.g. on-demand-download/eviction, and layer spreading, can operate just on `LayerFileManager`.
247 : pub(crate) layers: LockedLayerManager,
248 :
249 : last_freeze_at: AtomicLsn,
250 : // Atomic would be more appropriate here.
251 : last_freeze_ts: RwLock<Instant>,
252 :
253 : pub(crate) standby_horizon: AtomicLsn,
254 :
255 : // WAL redo manager. `None` only for broken tenants.
256 : walredo_mgr: Option<Arc<super::WalRedoManager>>,
257 :
258 : /// Remote storage client.
259 : /// See [`remote_timeline_client`](super::remote_timeline_client) module comment for details.
260 : pub(crate) remote_client: Arc<RemoteTimelineClient>,
261 :
262 : // What page versions do we hold in the repository? If we get a
263 : // request > last_record_lsn, we need to wait until we receive all
264 : // the WAL up to the request. The SeqWait provides functions for
265 : // that. TODO: If we get a request for an old LSN, such that the
266 : // versions have already been garbage collected away, we should
267 : // throw an error, but we don't track that currently.
268 : //
269 : // last_record_lsn.load().last points to the end of last processed WAL record.
270 : //
271 : // We also remember the starting point of the previous record in
272 : // 'last_record_lsn.load().prev'. It's used to set the xl_prev pointer of the
273 : // first WAL record when the node is started up. But here, we just
274 : // keep track of it.
275 : last_record_lsn: SeqWait<RecordLsn, Lsn>,
276 :
277 : // All WAL records have been processed and stored durably on files on
278 : // local disk, up to this LSN. On crash and restart, we need to re-process
279 : // the WAL starting from this point.
280 : //
281 : // Some later WAL records might have been processed and also flushed to disk
282 : // already, so don't be surprised to see some, but there's no guarantee on
283 : // them yet.
284 : disk_consistent_lsn: AtomicLsn,
285 :
286 : // Parent timeline that this timeline was branched from, and the LSN
287 : // of the branch point.
288 : ancestor_timeline: Option<Arc<Timeline>>,
289 : ancestor_lsn: Lsn,
290 :
291 : // The LSN of gc-compaction that was last applied to this timeline.
292 : gc_compaction_state: ArcSwap<Option<GcCompactionState>>,
293 :
294 : pub(crate) metrics: Arc<TimelineMetrics>,
295 :
296 : // `Timeline` doesn't write these metrics itself, but it manages the lifetime. Code
297 : // in `crate::page_service` writes these metrics.
298 : pub(crate) query_metrics: crate::metrics::SmgrQueryTimePerTimeline,
299 :
300 : directory_metrics_inited: [AtomicBool; DirectoryKind::KINDS_NUM],
301 : directory_metrics: [AtomicU64; DirectoryKind::KINDS_NUM],
302 :
303 : /// Ensures layers aren't frozen by checkpointer between
304 : /// [`Timeline::get_layer_for_write`] and layer reads.
305 : /// Locked automatically by [`TimelineWriter`] and checkpointer.
306 : /// Must always be acquired before the layer map/individual layer lock
307 : /// to avoid deadlock.
308 : ///
309 : /// The state is cleared upon freezing.
310 : write_lock: tokio::sync::Mutex<Option<TimelineWriterState>>,
311 :
312 : /// Used to avoid multiple `flush_loop` tasks running
313 : pub(super) flush_loop_state: Mutex<FlushLoopState>,
314 :
315 : /// layer_flush_start_tx can be used to wake up the layer-flushing task.
316 : /// - The u64 value is a counter, incremented every time a new flush cycle is requested.
317 : /// The flush cycle counter is sent back on the layer_flush_done channel when
318 : /// the flush finishes. You can use that to wait for the flush to finish.
319 : /// - The LSN is updated to max() of its current value and the latest disk_consistent_lsn
320 : /// read by whoever sends an update
321 : layer_flush_start_tx: tokio::sync::watch::Sender<(u64, Lsn)>,
322 : /// to be notified when layer flushing has finished, subscribe to the layer_flush_done channel
323 : layer_flush_done_tx: tokio::sync::watch::Sender<(u64, Result<(), FlushLayerError>)>,
324 :
325 : // The LSN at which we have executed GC: whereas [`Self::gc_info`] records the LSN at which
326 : // we _intend_ to GC (i.e. the PITR cutoff), this LSN records where we actually last did it.
327 : // Because PITR interval is mutable, it's possible for this LSN to be earlier or later than
328 : // the planned GC cutoff.
329 : pub applied_gc_cutoff_lsn: Rcu<Lsn>,
330 :
331 : pub(crate) gc_compaction_layer_update_lock: tokio::sync::RwLock<()>,
332 :
333 : // List of child timelines and their branch points. This is needed to avoid
334 : // garbage collecting data that is still needed by the child timelines.
335 : pub(crate) gc_info: std::sync::RwLock<GcInfo>,
336 :
337 : pub(crate) last_image_layer_creation_status: ArcSwap<LastImageLayerCreationStatus>,
338 :
339 : // It may change across major versions so for simplicity
340 : // keep it after running initdb for a timeline.
341 : // It is needed in checks when we want to error on some operations
342 : // when they are requested for pre-initdb lsn.
343 : // It can be unified with latest_gc_cutoff_lsn under some "first_valid_lsn",
344 : // though let's keep them both for better error visibility.
345 : pub initdb_lsn: Lsn,
346 :
347 : /// The repartitioning result. Allows a single writer and multiple readers.
348 : pub(crate) partitioning: GuardArcSwap<((KeyPartitioning, SparseKeyPartitioning), Lsn)>,
349 :
350 : /// Configuration: how often should the partitioning be recalculated.
351 : repartition_threshold: u64,
352 :
353 : last_image_layer_creation_check_at: AtomicLsn,
354 : last_image_layer_creation_check_instant: std::sync::Mutex<Option<Instant>>,
355 :
356 : /// Current logical size of the "datadir", at the last LSN.
357 : current_logical_size: LogicalSize,
358 :
359 : /// Information about the last processed message by the WAL receiver,
360 : /// or None if WAL receiver has not received anything for this timeline
361 : /// yet.
362 : pub last_received_wal: Mutex<Option<WalReceiverInfo>>,
363 : pub walreceiver: Mutex<Option<WalReceiver>>,
364 :
365 : /// Relation size cache
366 : pub(crate) rel_size_latest_cache: RwLock<HashMap<RelTag, (Lsn, BlockNumber)>>,
367 : pub(crate) rel_size_snapshot_cache: Mutex<LruCache<(Lsn, RelTag), BlockNumber>>,
368 :
369 : download_all_remote_layers_task_info: RwLock<Option<DownloadRemoteLayersTaskInfo>>,
370 :
371 : state: watch::Sender<TimelineState>,
372 :
373 : /// Prevent two tasks from deleting the timeline at the same time. If held, the
374 : /// timeline is being deleted. If 'true', the timeline has already been deleted.
375 : pub delete_progress: TimelineDeleteProgress,
376 :
377 : eviction_task_timeline_state: tokio::sync::Mutex<EvictionTaskTimelineState>,
378 :
379 : /// Load or creation time information about the disk_consistent_lsn and when the loading
380 : /// happened. Used for consumption metrics.
381 : pub(crate) loaded_at: (Lsn, SystemTime),
382 :
383 : /// Gate to prevent shutdown completing while I/O is still happening to this timeline's data
384 : pub(crate) gate: Gate,
385 :
386 : /// Cancellation token scoped to this timeline: anything doing long-running work relating
387 : /// to the timeline should drop out when this token fires.
388 : pub(crate) cancel: CancellationToken,
389 :
390 : /// Make sure we only have one running compaction at a time in tests.
391 : ///
392 : /// Must only be taken in two places:
393 : /// - [`Timeline::compact`] (this file)
394 : /// - [`delete::delete_local_timeline_directory`]
395 : ///
396 : /// Timeline deletion will acquire both compaction and gc locks in whatever order.
397 : compaction_lock: tokio::sync::Mutex<()>,
398 :
399 : /// If true, the last compaction failed.
400 : compaction_failed: AtomicBool,
401 :
402 : /// Notifies the tenant compaction loop that there is pending L0 compaction work.
403 : l0_compaction_trigger: Arc<Notify>,
404 :
405 : /// Make sure we only have one running gc at a time.
406 : ///
407 : /// Must only be taken in two places:
408 : /// - [`Timeline::gc`] (this file)
409 : /// - [`delete::delete_local_timeline_directory`]
410 : ///
411 : /// Timeline deletion will acquire both compaction and gc locks in whatever order.
412 : gc_lock: tokio::sync::Mutex<()>,
413 :
414 : /// Cloned from [`super::TenantShard::pagestream_throttle`] on construction.
415 : pub(crate) pagestream_throttle: Arc<crate::tenant::throttle::Throttle>,
416 :
417 : /// Size estimator for aux file v2
418 : pub(crate) aux_file_size_estimator: AuxFileSizeEstimator,
419 :
420 : /// Some test cases directly place keys into the timeline without actually modifying the directory
421 : /// keys (i.e., DB_DIR). The test cases creating such keys will put the keyspaces here, so that
422 : /// these keys won't get garbage-collected during compaction/GC. This field only modifies the dense
423 : /// keyspace return value of `collect_keyspace`. For sparse keyspaces, use AUX keys for testing, and
424 : /// in the future, add `extra_test_sparse_keyspace` if necessary.
425 : #[cfg(test)]
426 : pub(crate) extra_test_dense_keyspace: ArcSwap<KeySpace>,
427 :
428 : pub(crate) l0_flush_global_state: L0FlushGlobalState,
429 :
430 : pub(crate) handles: handle::PerTimelineState<TenantManagerTypes>,
431 :
432 : pub(crate) attach_wal_lag_cooldown: Arc<OnceLock<WalLagCooldown>>,
433 :
434 : /// Cf. [`crate::tenant::CreateTimelineIdempotency`].
435 : pub(crate) create_idempotency: crate::tenant::CreateTimelineIdempotency,
436 :
437 : /// If Some, collects GetPage metadata for an ongoing PageTrace.
438 : pub(crate) page_trace: ArcSwapOption<Sender<PageTraceEvent>>,
439 :
440 : pub(super) previous_heatmap: ArcSwapOption<PreviousHeatmap>,
441 :
442 : /// May host a background Tokio task which downloads all the layers from the current
443 : /// heatmap on demand.
444 : heatmap_layers_downloader: Mutex<Option<heatmap_layers_downloader::HeatmapLayersDownloader>>,
445 :
446 : pub(crate) rel_size_v2_status: ArcSwapOption<RelSizeMigration>,
447 :
448 : wait_lsn_log_slow: tokio::sync::Semaphore,
449 :
450 : /// A channel to send async requests to prepare a basebackup for the basebackup cache.
451 : basebackup_prepare_sender: BasebackupPrepareSender,
452 :
453 : feature_resolver: FeatureResolver,
454 : }
455 :
456 : pub(crate) enum PreviousHeatmap {
457 : Active {
458 : heatmap: HeatMapTimeline,
459 : read_at: std::time::Instant,
460 : // End LSN covered by the heatmap if known
461 : end_lsn: Option<Lsn>,
462 : },
463 : Obsolete,
464 : }
465 :
466 : pub type TimelineDeleteProgress = Arc<tokio::sync::Mutex<DeleteTimelineFlow>>;
467 :
468 : pub struct WalReceiverInfo {
469 : pub wal_source_connconf: PgConnectionConfig,
470 : pub last_received_msg_lsn: Lsn,
471 : pub last_received_msg_ts: u128,
472 : }
473 :
474 : /// Information about how much history needs to be retained, needed by
475 : /// Garbage Collection.
476 : #[derive(Default)]
477 : pub(crate) struct GcInfo {
478 : /// Specific LSNs that are needed.
479 : ///
480 : /// Currently, this includes all points where child branches have
481 : /// been forked off from. In the future, could also include
482 : /// explicit user-defined snapshot points.
483 : pub(crate) retain_lsns: Vec<(Lsn, TimelineId, MaybeOffloaded)>,
484 :
485 : /// The cutoff coordinates, which are combined by selecting the minimum.
486 : pub(crate) cutoffs: GcCutoffs,
487 :
488 : /// Leases granted to particular LSNs.
489 : pub(crate) leases: BTreeMap<Lsn, LsnLease>,
490 :
491 : /// Whether our branch point is within our ancestor's PITR interval (for cost estimation)
492 : pub(crate) within_ancestor_pitr: bool,
493 : }
494 :
495 : impl GcInfo {
496 154 : pub(crate) fn min_cutoff(&self) -> Lsn {
497 154 : self.cutoffs.select_min()
498 154 : }
499 :
500 119 : pub(super) fn insert_child(
501 119 : &mut self,
502 119 : child_id: TimelineId,
503 119 : child_lsn: Lsn,
504 119 : is_offloaded: MaybeOffloaded,
505 119 : ) {
506 119 : self.retain_lsns.push((child_lsn, child_id, is_offloaded));
507 119 : self.retain_lsns.sort_by_key(|i| i.0);
508 119 : }
509 :
510 2 : pub(super) fn remove_child_maybe_offloaded(
511 2 : &mut self,
512 2 : child_id: TimelineId,
513 2 : maybe_offloaded: MaybeOffloaded,
514 2 : ) -> bool {
515 2 : // Remove at most one element. Needed for correctness if there is two live `Timeline` objects referencing
516 2 : // the same timeline. Shouldn't but maybe can occur when Arc's live longer than intended.
517 2 : let mut removed = false;
518 3 : self.retain_lsns.retain(|i| {
519 3 : if removed {
520 1 : return true;
521 2 : }
522 2 : let remove = i.1 == child_id && i.2 == maybe_offloaded;
523 2 : removed |= remove;
524 2 : !remove
525 3 : });
526 2 : removed
527 2 : }
528 :
529 2 : pub(super) fn remove_child_not_offloaded(&mut self, child_id: TimelineId) -> bool {
530 2 : self.remove_child_maybe_offloaded(child_id, MaybeOffloaded::No)
531 2 : }
532 :
533 0 : pub(super) fn remove_child_offloaded(&mut self, child_id: TimelineId) -> bool {
534 0 : self.remove_child_maybe_offloaded(child_id, MaybeOffloaded::Yes)
535 0 : }
536 119 : pub(crate) fn lsn_covered_by_lease(&self, lsn: Lsn) -> bool {
537 119 : self.leases.contains_key(&lsn)
538 119 : }
539 : }
540 :
541 : /// The `GcInfo` component describing which Lsns need to be retained. Functionally, this
542 : /// is a single number (the oldest LSN which we must retain), but it internally distinguishes
543 : /// between time-based and space-based retention for observability and consumption metrics purposes.
544 : #[derive(Clone, Debug, Default)]
545 : pub(crate) struct GcCutoffs {
546 : /// Calculated from the [`pageserver_api::models::TenantConfig::gc_horizon`], this LSN indicates how much
547 : /// history we must keep to retain a specified number of bytes of WAL.
548 : pub(crate) space: Lsn,
549 :
550 : /// Calculated from [`pageserver_api::models::TenantConfig::pitr_interval`], this LSN indicates
551 : /// how much history we must keep to enable reading back at least the PITR interval duration.
552 : ///
553 : /// None indicates that the PITR cutoff has not been computed. A PITR interval of 0 will yield
554 : /// Some(last_record_lsn).
555 : pub(crate) time: Option<Lsn>,
556 : }
557 :
558 : impl GcCutoffs {
559 154 : fn select_min(&self) -> Lsn {
560 154 : // NB: if we haven't computed the PITR cutoff yet, we can't GC anything.
561 154 : self.space.min(self.time.unwrap_or_default())
562 154 : }
563 : }
564 :
565 : pub(crate) struct TimelineVisitOutcome {
566 : completed_keyspace: KeySpace,
567 : image_covered_keyspace: KeySpace,
568 : }
569 :
570 : /// An error happened in a get() operation.
571 : #[derive(thiserror::Error, Debug)]
572 : pub(crate) enum PageReconstructError {
573 : #[error(transparent)]
574 : Other(anyhow::Error),
575 :
576 : #[error("Ancestor LSN wait error: {0}")]
577 : AncestorLsnTimeout(WaitLsnError),
578 :
579 : #[error("timeline shutting down")]
580 : Cancelled,
581 :
582 : /// An error happened replaying WAL records
583 : #[error(transparent)]
584 : WalRedo(anyhow::Error),
585 :
586 : #[error("{0}")]
587 : MissingKey(Box<MissingKeyError>),
588 : }
589 :
590 : impl From<anyhow::Error> for PageReconstructError {
591 1 : fn from(value: anyhow::Error) -> Self {
592 1 : // with walingest.rs many PageReconstructError are wrapped in as anyhow::Error
593 1 : match value.downcast::<PageReconstructError>() {
594 0 : Ok(pre) => pre,
595 1 : Err(other) => PageReconstructError::Other(other),
596 : }
597 1 : }
598 : }
599 :
600 : impl From<utils::bin_ser::DeserializeError> for PageReconstructError {
601 0 : fn from(value: utils::bin_ser::DeserializeError) -> Self {
602 0 : PageReconstructError::Other(anyhow::Error::new(value).context("deserialization failure"))
603 0 : }
604 : }
605 :
606 : impl From<layer_manager::Shutdown> for PageReconstructError {
607 0 : fn from(_: layer_manager::Shutdown) -> Self {
608 0 : PageReconstructError::Cancelled
609 0 : }
610 : }
611 :
612 : impl GetVectoredError {
613 : #[cfg(test)]
614 3 : pub(crate) fn is_missing_key_error(&self) -> bool {
615 3 : matches!(self, Self::MissingKey(_))
616 3 : }
617 : }
618 :
619 : impl From<layer_manager::Shutdown> for GetVectoredError {
620 0 : fn from(_: layer_manager::Shutdown) -> Self {
621 0 : GetVectoredError::Cancelled
622 0 : }
623 : }
624 :
625 : /// A layer identifier when used in the [`ReadPath`] structure. This enum is for observability purposes
626 : /// only and not used by the "real read path".
627 : pub enum ReadPathLayerId {
628 : PersistentLayer(PersistentLayerKey),
629 : InMemoryLayer(Range<Lsn>),
630 : }
631 :
632 : impl std::fmt::Display for ReadPathLayerId {
633 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
634 0 : match self {
635 0 : ReadPathLayerId::PersistentLayer(key) => write!(f, "{}", key),
636 0 : ReadPathLayerId::InMemoryLayer(range) => {
637 0 : write!(f, "in-mem {}..{}", range.start, range.end)
638 : }
639 : }
640 0 : }
641 : }
642 : pub struct ReadPath {
643 : keyspace: KeySpace,
644 : lsn: Lsn,
645 : path: Vec<(ReadPathLayerId, KeySpace, Range<Lsn>)>,
646 : }
647 :
648 : impl ReadPath {
649 312343 : pub fn new(keyspace: KeySpace, lsn: Lsn) -> Self {
650 312343 : Self {
651 312343 : keyspace,
652 312343 : lsn,
653 312343 : path: Vec::new(),
654 312343 : }
655 312343 : }
656 :
657 445668 : pub fn record_layer_visit(
658 445668 : &mut self,
659 445668 : layer_to_read: &ReadableLayer,
660 445668 : keyspace_to_read: &KeySpace,
661 445668 : lsn_range: &Range<Lsn>,
662 445668 : ) {
663 445668 : let id = match layer_to_read {
664 138441 : ReadableLayer::PersistentLayer(layer) => {
665 138441 : ReadPathLayerId::PersistentLayer(layer.layer_desc().key())
666 : }
667 307227 : ReadableLayer::InMemoryLayer(layer) => {
668 307227 : ReadPathLayerId::InMemoryLayer(layer.get_lsn_range())
669 : }
670 : };
671 445668 : self.path
672 445668 : .push((id, keyspace_to_read.clone(), lsn_range.clone()));
673 445668 : }
674 : }
675 :
676 : impl std::fmt::Display for ReadPath {
677 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
678 0 : writeln!(f, "Read path for {} at lsn {}:", self.keyspace, self.lsn)?;
679 0 : for (idx, (layer_id, keyspace, lsn_range)) in self.path.iter().enumerate() {
680 0 : writeln!(
681 0 : f,
682 0 : "{}: {} {}..{} {}",
683 0 : idx, layer_id, lsn_range.start, lsn_range.end, keyspace
684 0 : )?;
685 : }
686 0 : Ok(())
687 0 : }
688 : }
689 :
690 : #[derive(thiserror::Error)]
691 : pub struct MissingKeyError {
692 : keyspace: KeySpace,
693 : shard: ShardNumber,
694 : query: Option<VersionedKeySpaceQuery>,
695 : // This is largest request LSN from the get page request batch
696 : original_hwm_lsn: Lsn,
697 : ancestor_lsn: Option<Lsn>,
698 : /// Debug information about the read path if there's an error
699 : read_path: Option<ReadPath>,
700 : backtrace: Option<std::backtrace::Backtrace>,
701 : }
702 :
703 : impl MissingKeyError {
704 7 : fn enrich(&mut self, query: VersionedKeySpaceQuery) {
705 7 : self.query = Some(query);
706 7 : }
707 : }
708 :
709 : impl std::fmt::Debug for MissingKeyError {
710 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
711 0 : write!(f, "{}", self)
712 0 : }
713 : }
714 :
715 : impl std::fmt::Display for MissingKeyError {
716 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
717 0 : write!(
718 0 : f,
719 0 : "could not find data for key {} (shard {:?}), original HWM LSN {}",
720 0 : self.keyspace, self.shard, self.original_hwm_lsn
721 0 : )?;
722 :
723 0 : if let Some(ref ancestor_lsn) = self.ancestor_lsn {
724 0 : write!(f, ", ancestor {}", ancestor_lsn)?;
725 0 : }
726 :
727 0 : if let Some(ref query) = self.query {
728 0 : write!(f, ", query {}", query)?;
729 0 : }
730 :
731 0 : if let Some(ref read_path) = self.read_path {
732 0 : write!(f, "\n{}", read_path)?;
733 0 : }
734 :
735 0 : if let Some(ref backtrace) = self.backtrace {
736 0 : write!(f, "\n{}", backtrace)?;
737 0 : }
738 :
739 0 : Ok(())
740 0 : }
741 : }
742 :
743 : impl PageReconstructError {
744 : /// Returns true if this error indicates a tenant/timeline shutdown alike situation
745 0 : pub(crate) fn is_stopping(&self) -> bool {
746 : use PageReconstructError::*;
747 0 : match self {
748 0 : Cancelled => true,
749 0 : Other(_) | AncestorLsnTimeout(_) | WalRedo(_) | MissingKey(_) => false,
750 : }
751 0 : }
752 : }
753 :
754 : #[derive(thiserror::Error, Debug)]
755 : pub(crate) enum CreateImageLayersError {
756 : #[error("timeline shutting down")]
757 : Cancelled,
758 :
759 : #[error("read failed")]
760 : GetVectoredError(#[source] GetVectoredError),
761 :
762 : #[error("reconstruction failed")]
763 : PageReconstructError(#[source] PageReconstructError),
764 :
765 : #[error(transparent)]
766 : Other(#[from] anyhow::Error),
767 : }
768 :
769 : impl From<layer_manager::Shutdown> for CreateImageLayersError {
770 0 : fn from(_: layer_manager::Shutdown) -> Self {
771 0 : CreateImageLayersError::Cancelled
772 0 : }
773 : }
774 :
775 : #[derive(thiserror::Error, Debug, Clone)]
776 : pub(crate) enum FlushLayerError {
777 : /// Timeline cancellation token was cancelled
778 : #[error("timeline shutting down")]
779 : Cancelled,
780 :
781 : /// We tried to flush a layer while the Timeline is in an unexpected state
782 : #[error("cannot flush frozen layers when flush_loop is not running, state is {0:?}")]
783 : NotRunning(FlushLoopState),
784 :
785 : // Arc<> the following non-clonable error types: we must be Clone-able because the flush error is propagated from the flush
786 : // loop via a watch channel, where we can only borrow it.
787 : #[error("create image layers (shared)")]
788 : CreateImageLayersError(Arc<CreateImageLayersError>),
789 :
790 : #[error("other (shared)")]
791 : Other(#[from] Arc<anyhow::Error>),
792 : }
793 :
794 : impl FlushLayerError {
795 : // When crossing from generic anyhow errors to this error type, we explicitly check
796 : // for timeline cancellation to avoid logging inoffensive shutdown errors as warn/err.
797 0 : fn from_anyhow(timeline: &Timeline, err: anyhow::Error) -> Self {
798 0 : let cancelled = timeline.cancel.is_cancelled()
799 : // The upload queue might have been shut down before the official cancellation of the timeline.
800 0 : || err
801 0 : .downcast_ref::<NotInitialized>()
802 0 : .map(NotInitialized::is_stopping)
803 0 : .unwrap_or_default();
804 0 : if cancelled {
805 0 : Self::Cancelled
806 : } else {
807 0 : Self::Other(Arc::new(err))
808 : }
809 0 : }
810 : }
811 :
812 : impl From<layer_manager::Shutdown> for FlushLayerError {
813 0 : fn from(_: layer_manager::Shutdown) -> Self {
814 0 : FlushLayerError::Cancelled
815 0 : }
816 : }
817 :
818 : #[derive(thiserror::Error, Debug)]
819 : pub(crate) enum GetVectoredError {
820 : #[error("timeline shutting down")]
821 : Cancelled,
822 :
823 : #[error("requested too many keys: {0} > {1}")]
824 : Oversized(u64, u64),
825 :
826 : #[error("requested at invalid LSN: {0}")]
827 : InvalidLsn(Lsn),
828 :
829 : #[error("requested key not found: {0}")]
830 : MissingKey(Box<MissingKeyError>),
831 :
832 : #[error("ancestry walk")]
833 : GetReadyAncestorError(#[source] GetReadyAncestorError),
834 :
835 : #[error(transparent)]
836 : Other(#[from] anyhow::Error),
837 : }
838 :
839 : impl From<GetReadyAncestorError> for GetVectoredError {
840 1 : fn from(value: GetReadyAncestorError) -> Self {
841 : use GetReadyAncestorError::*;
842 1 : match value {
843 0 : Cancelled => GetVectoredError::Cancelled,
844 : AncestorLsnTimeout(_) | BadState { .. } => {
845 1 : GetVectoredError::GetReadyAncestorError(value)
846 : }
847 : }
848 1 : }
849 : }
850 :
851 : #[derive(thiserror::Error, Debug)]
852 : pub(crate) enum GetReadyAncestorError {
853 : #[error("ancestor LSN wait error")]
854 : AncestorLsnTimeout(#[from] WaitLsnError),
855 :
856 : #[error("bad state on timeline {timeline_id}: {state:?}")]
857 : BadState {
858 : timeline_id: TimelineId,
859 : state: TimelineState,
860 : },
861 :
862 : #[error("cancelled")]
863 : Cancelled,
864 : }
865 :
866 : #[derive(Clone, Copy)]
867 : pub enum LogicalSizeCalculationCause {
868 : Initial,
869 : ConsumptionMetricsSyntheticSize,
870 : EvictionTaskImitation,
871 : TenantSizeHandler,
872 : }
873 :
874 : pub enum GetLogicalSizePriority {
875 : User,
876 : Background,
877 : }
878 :
879 0 : #[derive(Debug, enumset::EnumSetType)]
880 : pub(crate) enum CompactFlags {
881 : ForceRepartition,
882 : ForceImageLayerCreation,
883 : ForceL0Compaction,
884 : OnlyL0Compaction,
885 : EnhancedGcBottomMostCompaction,
886 : DryRun,
887 : /// Makes image compaction yield if there's pending L0 compaction. This should always be used in
888 : /// the background compaction task, since we want to aggressively compact down L0 to bound
889 : /// read amplification.
890 : ///
891 : /// It only makes sense to use this when `compaction_l0_first` is enabled (such that we yield to
892 : /// an L0 compaction pass), and without `OnlyL0Compaction` (L0 compaction shouldn't yield for L0
893 : /// compaction).
894 : YieldForL0,
895 : }
896 :
897 : #[serde_with::serde_as]
898 0 : #[derive(Debug, Clone, serde::Deserialize)]
899 : pub(crate) struct CompactRequest {
900 : pub compact_key_range: Option<CompactKeyRange>,
901 : pub compact_lsn_range: Option<CompactLsnRange>,
902 : /// Whether the compaction job should be scheduled.
903 : #[serde(default)]
904 : pub scheduled: bool,
905 : /// Whether the compaction job should be split across key ranges.
906 : #[serde(default)]
907 : pub sub_compaction: bool,
908 : /// Max job size for each subcompaction job.
909 : pub sub_compaction_max_job_size_mb: Option<u64>,
910 : }
911 :
912 0 : #[derive(Debug, Clone, serde::Deserialize)]
913 : pub(crate) struct MarkInvisibleRequest {
914 : #[serde(default)]
915 : pub is_visible: Option<bool>,
916 : }
917 :
918 : #[derive(Debug, Clone, Default)]
919 : pub(crate) struct CompactOptions {
920 : pub flags: EnumSet<CompactFlags>,
921 : /// If set, the compaction will only compact the key range specified by this option.
922 : /// This option is only used by GC compaction. For the full explanation, see [`compaction::GcCompactJob`].
923 : pub compact_key_range: Option<CompactKeyRange>,
924 : /// If set, the compaction will only compact the LSN within this value.
925 : /// This option is only used by GC compaction. For the full explanation, see [`compaction::GcCompactJob`].
926 : pub compact_lsn_range: Option<CompactLsnRange>,
927 : /// Enable sub-compaction (split compaction job across key ranges).
928 : /// This option is only used by GC compaction.
929 : pub sub_compaction: bool,
930 : /// Set job size for the GC compaction.
931 : /// This option is only used by GC compaction.
932 : pub sub_compaction_max_job_size_mb: Option<u64>,
933 : }
934 :
935 : impl std::fmt::Debug for Timeline {
936 0 : fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
937 0 : write!(f, "Timeline<{}>", self.timeline_id)
938 0 : }
939 : }
940 :
941 : #[derive(thiserror::Error, Debug, Clone)]
942 : pub(crate) enum WaitLsnError {
943 : // Called on a timeline which is shutting down
944 : #[error("Shutdown")]
945 : Shutdown,
946 :
947 : // Called on an timeline not in active state or shutting down
948 : #[error("Bad timeline state: {0:?}")]
949 : BadState(TimelineState),
950 :
951 : // Timeout expired while waiting for LSN to catch up with goal.
952 : #[error("{0}")]
953 : Timeout(String),
954 : }
955 :
956 : impl From<WaitLsnError> for tonic::Status {
957 0 : fn from(err: WaitLsnError) -> Self {
958 : use tonic::Code;
959 0 : let code = match &err {
960 0 : WaitLsnError::Timeout(_) => Code::Internal,
961 0 : WaitLsnError::BadState(_) => Code::Internal,
962 0 : WaitLsnError::Shutdown => Code::Unavailable,
963 : };
964 0 : tonic::Status::new(code, err.to_string())
965 0 : }
966 : }
967 :
968 : // The impls below achieve cancellation mapping for errors.
969 : // Perhaps there's a way of achieving this with less cruft.
970 :
971 : impl From<CreateImageLayersError> for CompactionError {
972 0 : fn from(e: CreateImageLayersError) -> Self {
973 0 : match e {
974 0 : CreateImageLayersError::Cancelled => CompactionError::ShuttingDown,
975 0 : CreateImageLayersError::Other(e) => {
976 0 : CompactionError::Other(e.context("create image layers"))
977 : }
978 0 : _ => CompactionError::Other(e.into()),
979 : }
980 0 : }
981 : }
982 :
983 : impl From<CreateImageLayersError> for FlushLayerError {
984 0 : fn from(e: CreateImageLayersError) -> Self {
985 0 : match e {
986 0 : CreateImageLayersError::Cancelled => FlushLayerError::Cancelled,
987 0 : any => FlushLayerError::CreateImageLayersError(Arc::new(any)),
988 : }
989 0 : }
990 : }
991 :
992 : impl From<PageReconstructError> for CreateImageLayersError {
993 0 : fn from(e: PageReconstructError) -> Self {
994 0 : match e {
995 0 : PageReconstructError::Cancelled => CreateImageLayersError::Cancelled,
996 0 : _ => CreateImageLayersError::PageReconstructError(e),
997 : }
998 0 : }
999 : }
1000 :
1001 : impl From<super::storage_layer::errors::PutError> for CreateImageLayersError {
1002 0 : fn from(e: super::storage_layer::errors::PutError) -> Self {
1003 0 : if e.is_cancel() {
1004 0 : CreateImageLayersError::Cancelled
1005 : } else {
1006 0 : CreateImageLayersError::Other(e.into_anyhow())
1007 : }
1008 0 : }
1009 : }
1010 :
1011 : impl From<GetVectoredError> for CreateImageLayersError {
1012 0 : fn from(e: GetVectoredError) -> Self {
1013 0 : match e {
1014 0 : GetVectoredError::Cancelled => CreateImageLayersError::Cancelled,
1015 0 : _ => CreateImageLayersError::GetVectoredError(e),
1016 : }
1017 0 : }
1018 : }
1019 :
1020 : impl From<GetVectoredError> for PageReconstructError {
1021 3 : fn from(e: GetVectoredError) -> Self {
1022 3 : match e {
1023 0 : GetVectoredError::Cancelled => PageReconstructError::Cancelled,
1024 0 : GetVectoredError::InvalidLsn(_) => PageReconstructError::Other(anyhow!("Invalid LSN")),
1025 0 : err @ GetVectoredError::Oversized(_, _) => PageReconstructError::Other(err.into()),
1026 2 : GetVectoredError::MissingKey(err) => PageReconstructError::MissingKey(err),
1027 1 : GetVectoredError::GetReadyAncestorError(err) => PageReconstructError::from(err),
1028 0 : GetVectoredError::Other(err) => PageReconstructError::Other(err),
1029 : }
1030 3 : }
1031 : }
1032 :
1033 : impl From<GetReadyAncestorError> for PageReconstructError {
1034 1 : fn from(e: GetReadyAncestorError) -> Self {
1035 : use GetReadyAncestorError::*;
1036 1 : match e {
1037 0 : AncestorLsnTimeout(wait_err) => PageReconstructError::AncestorLsnTimeout(wait_err),
1038 1 : bad_state @ BadState { .. } => PageReconstructError::Other(anyhow::anyhow!(bad_state)),
1039 0 : Cancelled => PageReconstructError::Cancelled,
1040 : }
1041 1 : }
1042 : }
1043 :
1044 : pub(crate) enum WaitLsnTimeout {
1045 : Custom(Duration),
1046 : // Use the [`PageServerConf::wait_lsn_timeout`] default
1047 : Default,
1048 : }
1049 :
1050 : pub(crate) enum WaitLsnWaiter<'a> {
1051 : Timeline(&'a Timeline),
1052 : Tenant,
1053 : PageService,
1054 : HttpEndpoint,
1055 : BaseBackupCache,
1056 : }
1057 :
1058 : /// Argument to [`Timeline::shutdown`].
1059 : #[derive(Debug, Clone, Copy)]
1060 : pub(crate) enum ShutdownMode {
1061 : /// Graceful shutdown, may do a lot of I/O as we flush any open layers to disk. This method can
1062 : /// take multiple seconds for a busy timeline.
1063 : ///
1064 : /// While we are flushing, we continue to accept read I/O for LSNs ingested before
1065 : /// the call to [`Timeline::shutdown`].
1066 : FreezeAndFlush,
1067 : /// Only flush the layers to the remote storage without freezing any open layers. Flush the deletion
1068 : /// queue. This is the mode used by ancestor detach and any other operations that reloads a tenant
1069 : /// but not increasing the generation number. Note that this mode cannot be used at tenant shutdown,
1070 : /// as flushing the deletion queue at that time will cause shutdown-in-progress errors.
1071 : Reload,
1072 : /// Shut down immediately, without waiting for any open layers to flush.
1073 : Hard,
1074 : }
1075 :
1076 : #[allow(clippy::large_enum_variant, reason = "TODO")]
1077 : enum ImageLayerCreationOutcome {
1078 : /// We generated an image layer
1079 : Generated {
1080 : unfinished_image_layer: ImageLayerWriter,
1081 : },
1082 : /// The key range is empty
1083 : Empty,
1084 : /// (Only used in metadata image layer creation), after reading the metadata keys, we decide to skip
1085 : /// the image layer creation.
1086 : Skip,
1087 : }
1088 :
1089 : /// Public interface functions
1090 : impl Timeline {
1091 : /// Get the LSN where this branch was created
1092 22 : pub(crate) fn get_ancestor_lsn(&self) -> Lsn {
1093 22 : self.ancestor_lsn
1094 22 : }
1095 :
1096 : /// Get the ancestor's timeline id
1097 38 : pub(crate) fn get_ancestor_timeline_id(&self) -> Option<TimelineId> {
1098 38 : self.ancestor_timeline
1099 38 : .as_ref()
1100 38 : .map(|ancestor| ancestor.timeline_id)
1101 38 : }
1102 :
1103 : /// Get the ancestor timeline
1104 1 : pub(crate) fn ancestor_timeline(&self) -> Option<&Arc<Timeline>> {
1105 1 : self.ancestor_timeline.as_ref()
1106 1 : }
1107 :
1108 : /// Get the bytes written since the PITR cutoff on this branch, and
1109 : /// whether this branch's ancestor_lsn is within its parent's PITR.
1110 0 : pub(crate) fn get_pitr_history_stats(&self) -> (u64, bool) {
1111 0 : // TODO: for backwards compatibility, we return the full history back to 0 when the PITR
1112 0 : // cutoff has not yet been initialized. This should return None instead, but this is exposed
1113 0 : // in external HTTP APIs and callers may not handle a null value.
1114 0 : let gc_info = self.gc_info.read().unwrap();
1115 0 : let history = self
1116 0 : .get_last_record_lsn()
1117 0 : .checked_sub(gc_info.cutoffs.time.unwrap_or_default())
1118 0 : .unwrap_or_default()
1119 0 : .0;
1120 0 : (history, gc_info.within_ancestor_pitr)
1121 0 : }
1122 :
1123 : /// Read timeline's GC cutoff: this is the LSN at which GC has started to happen
1124 425360 : pub(crate) fn get_applied_gc_cutoff_lsn(&self) -> RcuReadGuard<Lsn> {
1125 425360 : self.applied_gc_cutoff_lsn.read()
1126 425360 : }
1127 :
1128 : /// Read timeline's planned GC cutoff: this is the logical end of history that users are allowed
1129 : /// to read (based on configured PITR), even if physically we have more history. Returns None
1130 : /// if the PITR cutoff has not yet been initialized.
1131 0 : pub(crate) fn get_gc_cutoff_lsn(&self) -> Option<Lsn> {
1132 0 : self.gc_info.read().unwrap().cutoffs.time
1133 0 : }
1134 :
1135 : /// Look up given page version.
1136 : ///
1137 : /// If a remote layer file is needed, it is downloaded as part of this
1138 : /// call.
1139 : ///
1140 : /// This method enforces [`Self::pagestream_throttle`] internally.
1141 : ///
1142 : /// NOTE: It is considered an error to 'get' a key that doesn't exist. The
1143 : /// abstraction above this needs to store suitable metadata to track what
1144 : /// data exists with what keys, in separate metadata entries. If a
1145 : /// non-existent key is requested, we may incorrectly return a value from
1146 : /// an ancestor branch, for example, or waste a lot of cycles chasing the
1147 : /// non-existing key.
1148 : ///
1149 : /// # Cancel-Safety
1150 : ///
1151 : /// This method is cancellation-safe.
1152 : #[inline(always)]
1153 301262 : pub(crate) async fn get(
1154 301262 : &self,
1155 301262 : key: Key,
1156 301262 : lsn: Lsn,
1157 301262 : ctx: &RequestContext,
1158 301262 : ) -> Result<Bytes, PageReconstructError> {
1159 301262 : if !lsn.is_valid() {
1160 0 : return Err(PageReconstructError::Other(anyhow::anyhow!("Invalid LSN")));
1161 301262 : }
1162 301262 :
1163 301262 : // This check is debug-only because of the cost of hashing, and because it's a double-check: we
1164 301262 : // already checked the key against the shard_identity when looking up the Timeline from
1165 301262 : // page_service.
1166 301262 : debug_assert!(!self.shard_identity.is_key_disposable(&key));
1167 :
1168 301262 : let mut reconstruct_state = ValuesReconstructState::new(IoConcurrency::sequential());
1169 301262 :
1170 301262 : let query = VersionedKeySpaceQuery::uniform(KeySpace::single(key..key.next()), lsn);
1171 :
1172 301262 : let vectored_res = self
1173 301262 : .get_vectored_impl(query, &mut reconstruct_state, ctx)
1174 301262 : .await;
1175 :
1176 301262 : let key_value = vectored_res?.pop_first();
1177 301259 : match key_value {
1178 301253 : Some((got_key, value)) => {
1179 301253 : if got_key != key {
1180 0 : error!(
1181 0 : "Expected {}, but singular vectored get returned {}",
1182 : key, got_key
1183 : );
1184 0 : Err(PageReconstructError::Other(anyhow!(
1185 0 : "Singular vectored get returned wrong key"
1186 0 : )))
1187 : } else {
1188 301253 : value
1189 : }
1190 : }
1191 6 : None => Err(PageReconstructError::MissingKey(Box::new(
1192 6 : MissingKeyError {
1193 6 : keyspace: KeySpace::single(key..key.next()),
1194 6 : shard: self.shard_identity.get_shard_number(&key),
1195 6 : original_hwm_lsn: lsn,
1196 6 : ancestor_lsn: None,
1197 6 : backtrace: None,
1198 6 : read_path: None,
1199 6 : query: None,
1200 6 : },
1201 6 : ))),
1202 : }
1203 301262 : }
1204 :
1205 : pub(crate) const LAYERS_VISITED_WARN_THRESHOLD: u32 = 100;
1206 :
1207 : /// Look up multiple page versions at a given LSN
1208 : ///
1209 : /// This naive implementation will be replaced with a more efficient one
1210 : /// which actually vectorizes the read path.
1211 10888 : pub(crate) async fn get_vectored(
1212 10888 : &self,
1213 10888 : query: VersionedKeySpaceQuery,
1214 10888 : io_concurrency: super::storage_layer::IoConcurrency,
1215 10888 : ctx: &RequestContext,
1216 10888 : ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
1217 10888 : let total_keyspace = query.total_keyspace();
1218 10888 :
1219 10888 : let key_count = total_keyspace.total_raw_size();
1220 10888 : if key_count > self.conf.max_get_vectored_keys.get() {
1221 0 : return Err(GetVectoredError::Oversized(
1222 0 : key_count as u64,
1223 0 : self.conf.max_get_vectored_keys.get() as u64,
1224 0 : ));
1225 10888 : }
1226 :
1227 34195 : for range in &total_keyspace.ranges {
1228 23307 : let mut key = range.start;
1229 65329 : while key != range.end {
1230 42022 : assert!(!self.shard_identity.is_key_disposable(&key));
1231 42022 : key = key.next();
1232 : }
1233 : }
1234 :
1235 10888 : trace!(
1236 0 : "get vectored query {} from task kind {:?}",
1237 0 : query,
1238 0 : ctx.task_kind(),
1239 : );
1240 :
1241 10888 : let start = crate::metrics::GET_VECTORED_LATENCY
1242 10888 : .for_task_kind(ctx.task_kind())
1243 10888 : .map(|metric| (metric, Instant::now()));
1244 :
1245 10888 : let res = self
1246 10888 : .get_vectored_impl(query, &mut ValuesReconstructState::new(io_concurrency), ctx)
1247 10888 : .await;
1248 :
1249 10888 : if let Some((metric, start)) = start {
1250 0 : let elapsed = start.elapsed();
1251 0 : metric.observe(elapsed.as_secs_f64());
1252 10888 : }
1253 :
1254 10888 : res
1255 10888 : }
1256 :
1257 : /// Scan the keyspace and return all existing key-values in the keyspace. This currently uses vectored
1258 : /// get underlying. Normal vectored get would throw an error when a key in the keyspace is not found
1259 : /// during the search, but for the scan interface, it returns all existing key-value pairs, and does
1260 : /// not expect each single key in the key space will be found. The semantics is closer to the RocksDB
1261 : /// scan iterator interface. We could optimize this interface later to avoid some checks in the vectored
1262 : /// get path to maintain and split the probing and to-be-probe keyspace. We also need to ensure that
1263 : /// the scan operation will not cause OOM in the future.
1264 8 : pub(crate) async fn scan(
1265 8 : &self,
1266 8 : keyspace: KeySpace,
1267 8 : lsn: Lsn,
1268 8 : ctx: &RequestContext,
1269 8 : io_concurrency: super::storage_layer::IoConcurrency,
1270 8 : ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
1271 8 : if !lsn.is_valid() {
1272 0 : return Err(GetVectoredError::InvalidLsn(lsn));
1273 8 : }
1274 8 :
1275 8 : trace!(
1276 0 : "key-value scan request for {:?}@{} from task kind {:?}",
1277 0 : keyspace,
1278 0 : lsn,
1279 0 : ctx.task_kind()
1280 : );
1281 :
1282 : // We should generalize this into Keyspace::contains in the future.
1283 16 : for range in &keyspace.ranges {
1284 8 : if range.start.field1 < METADATA_KEY_BEGIN_PREFIX
1285 8 : || range.end.field1 > METADATA_KEY_END_PREFIX
1286 : {
1287 0 : return Err(GetVectoredError::Other(anyhow::anyhow!(
1288 0 : "only metadata keyspace can be scanned"
1289 0 : )));
1290 8 : }
1291 : }
1292 :
1293 8 : let start = crate::metrics::SCAN_LATENCY
1294 8 : .for_task_kind(ctx.task_kind())
1295 8 : .map(ScanLatencyOngoingRecording::start_recording);
1296 8 :
1297 8 : let query = VersionedKeySpaceQuery::uniform(keyspace, lsn);
1298 :
1299 8 : let vectored_res = self
1300 8 : .get_vectored_impl(query, &mut ValuesReconstructState::new(io_concurrency), ctx)
1301 8 : .await;
1302 :
1303 8 : if let Some(recording) = start {
1304 0 : recording.observe();
1305 8 : }
1306 :
1307 8 : vectored_res
1308 8 : }
1309 :
1310 312343 : pub(super) async fn get_vectored_impl(
1311 312343 : &self,
1312 312343 : query: VersionedKeySpaceQuery,
1313 312343 : reconstruct_state: &mut ValuesReconstructState,
1314 312343 : ctx: &RequestContext,
1315 312343 : ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
1316 312343 : if query.is_empty() {
1317 0 : return Ok(BTreeMap::default());
1318 312343 : }
1319 :
1320 312343 : let read_path = if self.conf.enable_read_path_debugging || ctx.read_path_debug() {
1321 : Some(ReadPath::new(
1322 312343 : query.total_keyspace(),
1323 312343 : query.high_watermark_lsn()?,
1324 : ))
1325 : } else {
1326 0 : None
1327 : };
1328 :
1329 312343 : reconstruct_state.read_path = read_path;
1330 :
1331 312343 : let redo_attempt_type = if ctx.task_kind() == TaskKind::Compaction {
1332 0 : RedoAttemptType::LegacyCompaction
1333 : } else {
1334 312343 : RedoAttemptType::ReadPage
1335 : };
1336 :
1337 312343 : let traversal_res: Result<(), _> = {
1338 312343 : let ctx = RequestContextBuilder::from(ctx)
1339 312343 : .perf_span(|crnt_perf_span| {
1340 0 : info_span!(
1341 : target: PERF_TRACE_TARGET,
1342 0 : parent: crnt_perf_span,
1343 : "PLAN_IO",
1344 : )
1345 312343 : })
1346 312343 : .attached_child();
1347 312343 :
1348 312343 : self.get_vectored_reconstruct_data(query.clone(), reconstruct_state, &ctx)
1349 312343 : .maybe_perf_instrument(&ctx, |crnt_perf_span| crnt_perf_span.clone())
1350 312343 : .await
1351 : };
1352 :
1353 312343 : if let Err(err) = traversal_res {
1354 : // Wait for all the spawned IOs to complete.
1355 : // See comments on `spawn_io` inside `storage_layer` for more details.
1356 8 : let mut collect_futs = std::mem::take(&mut reconstruct_state.keys)
1357 8 : .into_values()
1358 8 : .map(|state| state.collect_pending_ios())
1359 8 : .collect::<FuturesUnordered<_>>();
1360 8 : while collect_futs.next().await.is_some() {}
1361 :
1362 : // Enrich the missing key error with the original query.
1363 8 : if let GetVectoredError::MissingKey(mut missing_err) = err {
1364 7 : missing_err.enrich(query.clone());
1365 7 : return Err(GetVectoredError::MissingKey(missing_err));
1366 1 : }
1367 1 :
1368 1 : return Err(err);
1369 312335 : };
1370 312335 :
1371 312335 : let layers_visited = reconstruct_state.get_layers_visited();
1372 312335 :
1373 312335 : let ctx = RequestContextBuilder::from(ctx)
1374 312335 : .perf_span(|crnt_perf_span| {
1375 0 : info_span!(
1376 : target: PERF_TRACE_TARGET,
1377 0 : parent: crnt_perf_span,
1378 : "RECONSTRUCT",
1379 : )
1380 312335 : })
1381 312335 : .attached_child();
1382 312335 :
1383 312335 : let futs = FuturesUnordered::new();
1384 363514 : for (key, state) in std::mem::take(&mut reconstruct_state.keys) {
1385 363514 : let req_lsn_for_key = query.map_key_to_lsn(&key);
1386 363514 :
1387 363514 : futs.push({
1388 363514 : let walredo_self = self.myself.upgrade().expect("&self method holds the arc");
1389 363514 : let ctx = RequestContextBuilder::from(&ctx)
1390 363514 : .perf_span(|crnt_perf_span| {
1391 0 : info_span!(
1392 : target: PERF_TRACE_TARGET,
1393 0 : parent: crnt_perf_span,
1394 : "RECONSTRUCT_KEY",
1395 : key = %key,
1396 : )
1397 363514 : })
1398 363514 : .attached_child();
1399 363514 :
1400 363514 : async move {
1401 363514 : assert_eq!(state.situation, ValueReconstructSituation::Complete);
1402 :
1403 363514 : let res = state
1404 363514 : .collect_pending_ios()
1405 363514 : .maybe_perf_instrument(&ctx, |crnt_perf_span| {
1406 0 : info_span!(
1407 : target: PERF_TRACE_TARGET,
1408 0 : parent: crnt_perf_span,
1409 : "WAIT_FOR_IO_COMPLETIONS",
1410 : )
1411 363514 : })
1412 363514 : .await;
1413 :
1414 363514 : let converted = match res {
1415 363514 : Ok(ok) => ok,
1416 0 : Err(err) => {
1417 0 : return (key, Err(err));
1418 : }
1419 : };
1420 363514 : DELTAS_PER_READ_GLOBAL.observe(converted.num_deltas() as f64);
1421 363514 :
1422 363514 : // The walredo module expects the records to be descending in terms of Lsn.
1423 363514 : // And we submit the IOs in that order, so, there shuold be no need to sort here.
1424 363514 : debug_assert!(
1425 363514 : converted
1426 363514 : .records
1427 1403254 : .is_sorted_by_key(|(lsn, _)| std::cmp::Reverse(*lsn)),
1428 0 : "{converted:?}"
1429 : );
1430 :
1431 363514 : let walredo_deltas = converted.num_deltas();
1432 363514 : let walredo_res = walredo_self
1433 363514 : .reconstruct_value(key, req_lsn_for_key, converted, redo_attempt_type)
1434 363514 : .maybe_perf_instrument(&ctx, |crnt_perf_span| {
1435 0 : info_span!(
1436 : target: PERF_TRACE_TARGET,
1437 0 : parent: crnt_perf_span,
1438 : "WALREDO",
1439 : deltas = %walredo_deltas,
1440 : )
1441 363514 : })
1442 363514 : .await;
1443 :
1444 363514 : (key, walredo_res)
1445 363514 : }
1446 363514 : });
1447 363514 : }
1448 :
1449 312335 : let results = futs
1450 312335 : .collect::<BTreeMap<Key, Result<Bytes, PageReconstructError>>>()
1451 312335 : .maybe_perf_instrument(&ctx, |crnt_perf_span| crnt_perf_span.clone())
1452 312335 : .await;
1453 :
1454 : // For aux file keys (v1 or v2) the vectored read path does not return an error
1455 : // when they're missing. Instead they are omitted from the resulting btree
1456 : // (this is a requirement, not a bug). Skip updating the metric in these cases
1457 : // to avoid infinite results.
1458 312335 : if !results.is_empty() {
1459 312207 : if layers_visited >= Self::LAYERS_VISITED_WARN_THRESHOLD {
1460 0 : let total_keyspace = query.total_keyspace();
1461 0 : let max_request_lsn = query.high_watermark_lsn().expect("Validated previously");
1462 0 :
1463 0 : static LOG_PACER: Lazy<Mutex<RateLimit>> =
1464 0 : Lazy::new(|| Mutex::new(RateLimit::new(Duration::from_secs(60))));
1465 0 : LOG_PACER.lock().unwrap().call(|| {
1466 0 : let num_keys = total_keyspace.total_raw_size();
1467 0 : let num_pages = results.len();
1468 0 : tracing::info!(
1469 0 : shard_id = %self.tenant_shard_id.shard_slug(),
1470 0 : lsn = %max_request_lsn,
1471 0 : "Vectored read for {total_keyspace} visited {layers_visited} layers. Returned {num_pages}/{num_keys} pages.",
1472 : );
1473 0 : });
1474 312207 : }
1475 :
1476 : // Records the number of layers visited in a few different ways:
1477 : //
1478 : // * LAYERS_PER_READ: all layers count towards every read in the batch, because each
1479 : // layer directly affects its observed latency.
1480 : //
1481 : // * LAYERS_PER_READ_BATCH: all layers count towards each batch, to get the per-batch
1482 : // layer visits and access cost.
1483 : //
1484 : // * LAYERS_PER_READ_AMORTIZED: the average layer count per read, to get the amortized
1485 : // read amplification after batching.
1486 312207 : let layers_visited = layers_visited as f64;
1487 312207 : let avg_layers_visited = layers_visited / results.len() as f64;
1488 312207 : LAYERS_PER_READ_BATCH_GLOBAL.observe(layers_visited);
1489 675721 : for _ in &results {
1490 363514 : self.metrics.layers_per_read.observe(layers_visited);
1491 363514 : LAYERS_PER_READ_GLOBAL.observe(layers_visited);
1492 363514 : LAYERS_PER_READ_AMORTIZED_GLOBAL.observe(avg_layers_visited);
1493 363514 : }
1494 128 : }
1495 :
1496 312335 : Ok(results)
1497 312343 : }
1498 :
1499 : /// Get last or prev record separately. Same as get_last_record_rlsn().last/prev.
1500 137129 : pub(crate) fn get_last_record_lsn(&self) -> Lsn {
1501 137129 : self.last_record_lsn.load().last
1502 137129 : }
1503 :
1504 0 : pub(crate) fn get_prev_record_lsn(&self) -> Lsn {
1505 0 : self.last_record_lsn.load().prev
1506 0 : }
1507 :
1508 : /// Atomically get both last and prev.
1509 117 : pub(crate) fn get_last_record_rlsn(&self) -> RecordLsn {
1510 117 : self.last_record_lsn.load()
1511 117 : }
1512 :
1513 : /// Subscribe to callers of wait_lsn(). The value of the channel is None if there are no
1514 : /// wait_lsn() calls in progress, and Some(Lsn) if there is an active waiter for wait_lsn().
1515 0 : pub(crate) fn subscribe_for_wait_lsn_updates(&self) -> watch::Receiver<Option<Lsn>> {
1516 0 : self.last_record_lsn.status_receiver()
1517 0 : }
1518 :
1519 234 : pub(crate) fn get_disk_consistent_lsn(&self) -> Lsn {
1520 234 : self.disk_consistent_lsn.load()
1521 234 : }
1522 :
1523 : /// remote_consistent_lsn from the perspective of the tenant's current generation,
1524 : /// not validated with control plane yet.
1525 : /// See [`Self::get_remote_consistent_lsn_visible`].
1526 2 : pub(crate) fn get_remote_consistent_lsn_projected(&self) -> Option<Lsn> {
1527 2 : self.remote_client.remote_consistent_lsn_projected()
1528 2 : }
1529 :
1530 : /// remote_consistent_lsn which the tenant is guaranteed not to go backward from,
1531 : /// i.e. a value of remote_consistent_lsn_projected which has undergone
1532 : /// generation validation in the deletion queue.
1533 0 : pub(crate) fn get_remote_consistent_lsn_visible(&self) -> Option<Lsn> {
1534 0 : self.remote_client.remote_consistent_lsn_visible()
1535 0 : }
1536 :
1537 : /// The sum of the file size of all historic layers in the layer map.
1538 : /// This method makes no distinction between local and remote layers.
1539 : /// Hence, the result **does not represent local filesystem usage**.
1540 0 : pub(crate) async fn layer_size_sum(&self) -> u64 {
1541 0 : let guard = self
1542 0 : .layers
1543 0 : .read(LayerManagerLockHolder::GetLayerMapInfo)
1544 0 : .await;
1545 0 : guard.layer_size_sum()
1546 0 : }
1547 :
1548 0 : pub(crate) fn resident_physical_size(&self) -> u64 {
1549 0 : self.metrics.resident_physical_size_get()
1550 0 : }
1551 :
1552 0 : pub(crate) fn get_directory_metrics(&self) -> [u64; DirectoryKind::KINDS_NUM] {
1553 0 : array::from_fn(|idx| self.directory_metrics[idx].load(AtomicOrdering::Relaxed))
1554 0 : }
1555 :
1556 : ///
1557 : /// Wait until WAL has been received and processed up to this LSN.
1558 : ///
1559 : /// You should call this before any of the other get_* or list_* functions. Calling
1560 : /// those functions with an LSN that has been processed yet is an error.
1561 : ///
1562 112854 : pub(crate) async fn wait_lsn(
1563 112854 : &self,
1564 112854 : lsn: Lsn,
1565 112854 : who_is_waiting: WaitLsnWaiter<'_>,
1566 112854 : timeout: WaitLsnTimeout,
1567 112854 : ctx: &RequestContext, /* Prepare for use by cancellation */
1568 112854 : ) -> Result<(), WaitLsnError> {
1569 112854 : let state = self.current_state();
1570 112854 : if self.cancel.is_cancelled() || matches!(state, TimelineState::Stopping) {
1571 0 : return Err(WaitLsnError::Shutdown);
1572 112854 : } else if !matches!(state, TimelineState::Active) {
1573 0 : return Err(WaitLsnError::BadState(state));
1574 112854 : }
1575 112854 :
1576 112854 : if cfg!(debug_assertions) {
1577 112854 : match ctx.task_kind() {
1578 : TaskKind::WalReceiverManager
1579 : | TaskKind::WalReceiverConnectionHandler
1580 : | TaskKind::WalReceiverConnectionPoller => {
1581 0 : let is_myself = match who_is_waiting {
1582 0 : WaitLsnWaiter::Timeline(waiter) => {
1583 0 : Weak::ptr_eq(&waiter.myself, &self.myself)
1584 : }
1585 : WaitLsnWaiter::Tenant
1586 : | WaitLsnWaiter::PageService
1587 : | WaitLsnWaiter::HttpEndpoint
1588 0 : | WaitLsnWaiter::BaseBackupCache => unreachable!(
1589 0 : "tenant or page_service context are not expected to have task kind {:?}",
1590 0 : ctx.task_kind()
1591 0 : ),
1592 : };
1593 0 : if is_myself {
1594 0 : if let Err(current) = self.last_record_lsn.would_wait_for(lsn) {
1595 : // walingest is the only one that can advance last_record_lsn; it should make sure to never reach here
1596 0 : panic!(
1597 0 : "this timeline's walingest task is calling wait_lsn({lsn}) but we only have last_record_lsn={current}; would deadlock"
1598 0 : );
1599 0 : }
1600 0 : } else {
1601 0 : // if another timeline's is waiting for us, there's no deadlock risk because
1602 0 : // our walreceiver task can make progress independent of theirs
1603 0 : }
1604 : }
1605 112854 : _ => {}
1606 : }
1607 0 : }
1608 :
1609 112854 : let timeout = match timeout {
1610 0 : WaitLsnTimeout::Custom(t) => t,
1611 112854 : WaitLsnTimeout::Default => self.conf.wait_lsn_timeout,
1612 : };
1613 :
1614 112854 : let timer = crate::metrics::WAIT_LSN_TIME.start_timer();
1615 112854 : let start_finish_counterpair_guard = self.metrics.wait_lsn_start_finish_counterpair.guard();
1616 112854 :
1617 112854 : let wait_for_timeout = self.last_record_lsn.wait_for_timeout(lsn, timeout);
1618 112854 : let wait_for_timeout = std::pin::pin!(wait_for_timeout);
1619 112854 : // Use threshold of 1 because even 1 second of wait for ingest is very much abnormal.
1620 112854 : let log_slow_threshold = Duration::from_secs(1);
1621 112854 : // Use period of 10 to avoid flooding logs during an outage that affects all timelines.
1622 112854 : let log_slow_period = Duration::from_secs(10);
1623 112854 : let mut logging_permit = None;
1624 112854 : let wait_for_timeout = monitor_slow_future(
1625 112854 : log_slow_threshold,
1626 112854 : log_slow_period,
1627 112854 : wait_for_timeout,
1628 112854 : |MonitorSlowFutureCallback {
1629 : ready,
1630 : is_slow,
1631 : elapsed_total,
1632 : elapsed_since_last_callback,
1633 112854 : }| {
1634 112854 : self.metrics
1635 112854 : .wait_lsn_in_progress_micros
1636 112854 : .inc_by(u64::try_from(elapsed_since_last_callback.as_micros()).unwrap());
1637 112854 : if !is_slow {
1638 112854 : return;
1639 0 : }
1640 0 : // It's slow, see if we should log it.
1641 0 : // (We limit the logging to one per invocation per timeline to avoid excessive
1642 0 : // logging during an extended broker / networking outage that affects all timelines.)
1643 0 : if logging_permit.is_none() {
1644 0 : logging_permit = self.wait_lsn_log_slow.try_acquire().ok();
1645 0 : }
1646 0 : if logging_permit.is_none() {
1647 0 : return;
1648 0 : }
1649 0 : // We log it.
1650 0 : if ready {
1651 0 : info!(
1652 0 : "slow wait_lsn completed after {:.3}s",
1653 0 : elapsed_total.as_secs_f64()
1654 : );
1655 : } else {
1656 0 : info!(
1657 0 : "slow wait_lsn still running for {:.3}s",
1658 0 : elapsed_total.as_secs_f64()
1659 : );
1660 : }
1661 112854 : },
1662 112854 : );
1663 112854 : let res = wait_for_timeout.await;
1664 : // don't count the time spent waiting for lock below, and also in walreceiver.status(), towards the wait_lsn_time_histo
1665 112854 : drop(logging_permit);
1666 112854 : drop(start_finish_counterpair_guard);
1667 112854 : drop(timer);
1668 112854 : match res {
1669 112854 : Ok(()) => Ok(()),
1670 0 : Err(e) => {
1671 : use utils::seqwait::SeqWaitError::*;
1672 0 : match e {
1673 0 : Shutdown => Err(WaitLsnError::Shutdown),
1674 : Timeout => {
1675 0 : let walreceiver_status = self.walreceiver_status();
1676 0 : Err(WaitLsnError::Timeout(format!(
1677 0 : "Timed out while waiting for WAL record at LSN {} to arrive, last_record_lsn {} disk consistent LSN={}, WalReceiver status: {}",
1678 0 : lsn,
1679 0 : self.get_last_record_lsn(),
1680 0 : self.get_disk_consistent_lsn(),
1681 0 : walreceiver_status,
1682 0 : )))
1683 : }
1684 : }
1685 : }
1686 : }
1687 112854 : }
1688 :
1689 0 : pub(crate) fn walreceiver_status(&self) -> String {
1690 0 : match &*self.walreceiver.lock().unwrap() {
1691 0 : None => "stopping or stopped".to_string(),
1692 0 : Some(walreceiver) => match walreceiver.status() {
1693 0 : Some(status) => status.to_human_readable_string(),
1694 0 : None => "Not active".to_string(),
1695 : },
1696 : }
1697 0 : }
1698 :
1699 : /// Check that it is valid to request operations with that lsn.
1700 119 : pub(crate) fn check_lsn_is_in_scope(
1701 119 : &self,
1702 119 : lsn: Lsn,
1703 119 : latest_gc_cutoff_lsn: &RcuReadGuard<Lsn>,
1704 119 : ) -> anyhow::Result<()> {
1705 119 : ensure!(
1706 119 : lsn >= **latest_gc_cutoff_lsn,
1707 2 : "LSN {} is earlier than latest GC cutoff {} (we might've already garbage collected needed data)",
1708 2 : lsn,
1709 2 : **latest_gc_cutoff_lsn,
1710 : );
1711 117 : Ok(())
1712 119 : }
1713 :
1714 : /// Initializes an LSN lease. The function will return an error if the requested LSN is less than the `latest_gc_cutoff_lsn`.
1715 5 : pub(crate) fn init_lsn_lease(
1716 5 : &self,
1717 5 : lsn: Lsn,
1718 5 : length: Duration,
1719 5 : ctx: &RequestContext,
1720 5 : ) -> anyhow::Result<LsnLease> {
1721 5 : self.make_lsn_lease(lsn, length, true, ctx)
1722 5 : }
1723 :
1724 : /// Renews a lease at a particular LSN. The requested LSN is not validated against the `latest_gc_cutoff_lsn` when we are in the grace period.
1725 2 : pub(crate) fn renew_lsn_lease(
1726 2 : &self,
1727 2 : lsn: Lsn,
1728 2 : length: Duration,
1729 2 : ctx: &RequestContext,
1730 2 : ) -> anyhow::Result<LsnLease> {
1731 2 : self.make_lsn_lease(lsn, length, false, ctx)
1732 2 : }
1733 :
1734 : /// Obtains a temporary lease blocking garbage collection for the given LSN.
1735 : ///
1736 : /// If we are in `AttachedSingle` mode and is not blocked by the lsn lease deadline, this function will error
1737 : /// if the requesting LSN is less than the `latest_gc_cutoff_lsn` and there is no existing request present.
1738 : ///
1739 : /// If there is an existing lease in the map, the lease will be renewed only if the request extends the lease.
1740 : /// The returned lease is therefore the maximum between the existing lease and the requesting lease.
1741 7 : fn make_lsn_lease(
1742 7 : &self,
1743 7 : lsn: Lsn,
1744 7 : length: Duration,
1745 7 : init: bool,
1746 7 : _ctx: &RequestContext,
1747 7 : ) -> anyhow::Result<LsnLease> {
1748 6 : let lease = {
1749 : // Normalize the requested LSN to be aligned, and move to the first record
1750 : // if it points to the beginning of the page (header).
1751 7 : let lsn = xlog_utils::normalize_lsn(lsn, WAL_SEGMENT_SIZE);
1752 7 :
1753 7 : let mut gc_info = self.gc_info.write().unwrap();
1754 7 : let planned_cutoff = gc_info.min_cutoff();
1755 7 :
1756 7 : let valid_until = SystemTime::now() + length;
1757 7 :
1758 7 : let entry = gc_info.leases.entry(lsn);
1759 7 :
1760 7 : match entry {
1761 3 : Entry::Occupied(mut occupied) => {
1762 3 : let existing_lease = occupied.get_mut();
1763 3 : if valid_until > existing_lease.valid_until {
1764 1 : existing_lease.valid_until = valid_until;
1765 1 : let dt: DateTime<Utc> = valid_until.into();
1766 1 : info!("lease extended to {}", dt);
1767 : } else {
1768 2 : let dt: DateTime<Utc> = existing_lease.valid_until.into();
1769 2 : info!("existing lease covers greater length, valid until {}", dt);
1770 : }
1771 :
1772 3 : existing_lease.clone()
1773 : }
1774 4 : Entry::Vacant(vacant) => {
1775 : // Reject already GC-ed LSN if we are in AttachedSingle and
1776 : // not blocked by the lsn lease deadline.
1777 4 : let validate = {
1778 4 : let conf = self.tenant_conf.load();
1779 4 : conf.location.attach_mode == AttachmentMode::Single
1780 4 : && !conf.is_gc_blocked_by_lsn_lease_deadline()
1781 : };
1782 :
1783 4 : if init || validate {
1784 4 : let latest_gc_cutoff_lsn = self.get_applied_gc_cutoff_lsn();
1785 4 : if lsn < *latest_gc_cutoff_lsn {
1786 1 : bail!(
1787 1 : "tried to request an lsn lease for an lsn below the latest gc cutoff. requested at {} gc cutoff {}",
1788 1 : lsn,
1789 1 : *latest_gc_cutoff_lsn
1790 1 : );
1791 3 : }
1792 3 : if lsn < planned_cutoff {
1793 0 : bail!(
1794 0 : "tried to request an lsn lease for an lsn below the planned gc cutoff. requested at {} planned gc cutoff {}",
1795 0 : lsn,
1796 0 : planned_cutoff
1797 0 : );
1798 3 : }
1799 0 : }
1800 :
1801 3 : let dt: DateTime<Utc> = valid_until.into();
1802 3 : info!("lease created, valid until {}", dt);
1803 3 : vacant.insert(LsnLease { valid_until }).clone()
1804 : }
1805 : }
1806 : };
1807 :
1808 6 : Ok(lease)
1809 7 : }
1810 :
1811 : /// Freeze the current open in-memory layer. It will be written to disk on next iteration.
1812 : /// Returns the flush request ID which can be awaited with wait_flush_completion().
1813 : #[instrument(skip(self), fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id))]
1814 : pub(crate) async fn freeze(&self) -> Result<u64, FlushLayerError> {
1815 : self.freeze0().await
1816 : }
1817 :
1818 : /// Freeze and flush the open in-memory layer, waiting for it to be written to disk.
1819 : #[instrument(skip(self), fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id))]
1820 : pub(crate) async fn freeze_and_flush(&self) -> Result<(), FlushLayerError> {
1821 : self.freeze_and_flush0().await
1822 : }
1823 :
1824 : /// Freeze the current open in-memory layer. It will be written to disk on next iteration.
1825 : /// Returns the flush request ID which can be awaited with wait_flush_completion().
1826 570 : pub(crate) async fn freeze0(&self) -> Result<u64, FlushLayerError> {
1827 570 : let mut g = self.write_lock.lock().await;
1828 570 : let to_lsn = self.get_last_record_lsn();
1829 570 : self.freeze_inmem_layer_at(to_lsn, &mut g).await
1830 570 : }
1831 :
1832 : // This exists to provide a non-span creating version of `freeze_and_flush` we can call without
1833 : // polluting the span hierarchy.
1834 570 : pub(crate) async fn freeze_and_flush0(&self) -> Result<(), FlushLayerError> {
1835 570 : let token = self.freeze0().await?;
1836 570 : self.wait_flush_completion(token).await
1837 570 : }
1838 :
1839 : // Check if an open ephemeral layer should be closed: this provides
1840 : // background enforcement of checkpoint interval if there is no active WAL receiver, to avoid keeping
1841 : // an ephemeral layer open forever when idle. It also freezes layers if the global limit on
1842 : // ephemeral layer bytes has been breached.
1843 0 : pub(super) async fn maybe_freeze_ephemeral_layer(&self) {
1844 0 : let Ok(mut write_guard) = self.write_lock.try_lock() else {
1845 : // If the write lock is held, there is an active wal receiver: rolling open layers
1846 : // is their responsibility while they hold this lock.
1847 0 : return;
1848 : };
1849 :
1850 : // FIXME: why not early exit? because before #7927 the state would had been cleared every
1851 : // time, and this was missed.
1852 : // if write_guard.is_none() { return; }
1853 :
1854 0 : let Ok(layers_guard) = self.layers.try_read(LayerManagerLockHolder::TryFreezeLayer) else {
1855 : // Don't block if the layer lock is busy
1856 0 : return;
1857 : };
1858 :
1859 0 : let Ok(lm) = layers_guard.layer_map() else {
1860 0 : return;
1861 : };
1862 :
1863 0 : let Some(open_layer) = &lm.open_layer else {
1864 : // If there is no open layer, we have no layer freezing to do. However, we might need to generate
1865 : // some updates to disk_consistent_lsn and remote_consistent_lsn, in case we ingested some WAL regions
1866 : // that didn't result in writes to this shard.
1867 :
1868 : // Must not hold the layers lock while waiting for a flush.
1869 0 : drop(layers_guard);
1870 0 :
1871 0 : let last_record_lsn = self.get_last_record_lsn();
1872 0 : let disk_consistent_lsn = self.get_disk_consistent_lsn();
1873 0 : if last_record_lsn > disk_consistent_lsn {
1874 : // We have no open layer, but disk_consistent_lsn is behind the last record: this indicates
1875 : // we are a sharded tenant and have skipped some WAL
1876 0 : let last_freeze_ts = *self.last_freeze_ts.read().unwrap();
1877 0 : if last_freeze_ts.elapsed() >= self.get_checkpoint_timeout() {
1878 : // Only do this if have been layer-less longer than get_checkpoint_timeout, so that a shard
1879 : // without any data ingested (yet) doesn't write a remote index as soon as it
1880 : // sees its LSN advance: we only do this if we've been layer-less
1881 : // for some time.
1882 0 : tracing::debug!(
1883 0 : "Advancing disk_consistent_lsn past WAL ingest gap {} -> {}",
1884 : disk_consistent_lsn,
1885 : last_record_lsn
1886 : );
1887 :
1888 : // The flush loop will update remote consistent LSN as well as disk consistent LSN.
1889 : // We know there is no open layer, so we can request freezing without actually
1890 : // freezing anything. This is true even if we have dropped the layers_guard, we
1891 : // still hold the write_guard.
1892 0 : let _ = async {
1893 0 : let token = self
1894 0 : .freeze_inmem_layer_at(last_record_lsn, &mut write_guard)
1895 0 : .await?;
1896 0 : self.wait_flush_completion(token).await
1897 0 : }
1898 0 : .await;
1899 0 : }
1900 0 : }
1901 :
1902 0 : return;
1903 : };
1904 :
1905 0 : let Some(current_size) = open_layer.try_len() else {
1906 : // Unexpected: since we hold the write guard, nobody else should be writing to this layer, so
1907 : // read lock to get size should always succeed.
1908 0 : tracing::warn!("Lock conflict while reading size of open layer");
1909 0 : return;
1910 : };
1911 :
1912 0 : let current_lsn = self.get_last_record_lsn();
1913 :
1914 0 : let checkpoint_distance_override = open_layer.tick().await;
1915 :
1916 0 : if let Some(size_override) = checkpoint_distance_override {
1917 0 : if current_size > size_override {
1918 : // This is not harmful, but it only happens in relatively rare cases where
1919 : // time-based checkpoints are not happening fast enough to keep the amount of
1920 : // ephemeral data within configured limits. It's a sign of stress on the system.
1921 0 : tracing::info!(
1922 0 : "Early-rolling open layer at size {current_size} (limit {size_override}) due to dirty data pressure"
1923 : );
1924 0 : }
1925 0 : }
1926 :
1927 0 : let checkpoint_distance =
1928 0 : checkpoint_distance_override.unwrap_or(self.get_checkpoint_distance());
1929 0 :
1930 0 : if self.should_roll(
1931 0 : current_size,
1932 0 : current_size,
1933 0 : checkpoint_distance,
1934 0 : self.get_last_record_lsn(),
1935 0 : self.last_freeze_at.load(),
1936 0 : open_layer.get_opened_at(),
1937 0 : ) {
1938 0 : match open_layer.info() {
1939 0 : InMemoryLayerInfo::Frozen { lsn_start, lsn_end } => {
1940 0 : // We may reach this point if the layer was already frozen by not yet flushed: flushing
1941 0 : // happens asynchronously in the background.
1942 0 : tracing::debug!(
1943 0 : "Not freezing open layer, it's already frozen ({lsn_start}..{lsn_end})"
1944 : );
1945 : }
1946 : InMemoryLayerInfo::Open { .. } => {
1947 : // Upgrade to a write lock and freeze the layer
1948 0 : drop(layers_guard);
1949 0 : let res = self
1950 0 : .freeze_inmem_layer_at(current_lsn, &mut write_guard)
1951 0 : .await;
1952 :
1953 0 : if let Err(e) = res {
1954 0 : tracing::info!(
1955 0 : "failed to flush frozen layer after background freeze: {e:#}"
1956 : );
1957 0 : }
1958 : }
1959 : }
1960 0 : }
1961 0 : }
1962 :
1963 : /// Checks if the internal state of the timeline is consistent with it being able to be offloaded.
1964 : ///
1965 : /// This is neccessary but not sufficient for offloading of the timeline as it might have
1966 : /// child timelines that are not offloaded yet.
1967 0 : pub(crate) fn can_offload(&self) -> (bool, &'static str) {
1968 0 : if self.remote_client.is_archived() != Some(true) {
1969 0 : return (false, "the timeline is not archived");
1970 0 : }
1971 0 : if !self.remote_client.no_pending_work() {
1972 : // if the remote client is still processing some work, we can't offload
1973 0 : return (false, "the upload queue is not drained yet");
1974 0 : }
1975 0 :
1976 0 : (true, "ok")
1977 0 : }
1978 :
1979 : /// Outermost timeline compaction operation; downloads needed layers. Returns whether we have pending
1980 : /// compaction tasks.
1981 192 : pub(crate) async fn compact(
1982 192 : self: &Arc<Self>,
1983 192 : cancel: &CancellationToken,
1984 192 : flags: EnumSet<CompactFlags>,
1985 192 : ctx: &RequestContext,
1986 192 : ) -> Result<CompactionOutcome, CompactionError> {
1987 192 : let res = self
1988 192 : .compact_with_options(
1989 192 : cancel,
1990 192 : CompactOptions {
1991 192 : flags,
1992 192 : compact_key_range: None,
1993 192 : compact_lsn_range: None,
1994 192 : sub_compaction: false,
1995 192 : sub_compaction_max_job_size_mb: None,
1996 192 : },
1997 192 : ctx,
1998 192 : )
1999 192 : .await;
2000 192 : if let Err(err) = &res {
2001 0 : log_compaction_error(err, None, cancel.is_cancelled(), false);
2002 192 : }
2003 192 : res
2004 192 : }
2005 :
2006 : /// Outermost timeline compaction operation; downloads needed layers.
2007 : ///
2008 : /// NB: the cancellation token is usually from a background task, but can also come from a
2009 : /// request task.
2010 192 : pub(crate) async fn compact_with_options(
2011 192 : self: &Arc<Self>,
2012 192 : cancel: &CancellationToken,
2013 192 : options: CompactOptions,
2014 192 : ctx: &RequestContext,
2015 192 : ) -> Result<CompactionOutcome, CompactionError> {
2016 192 : // Acquire the compaction lock and task semaphore.
2017 192 : //
2018 192 : // L0-only compaction uses a separate semaphore (if enabled) to make sure it isn't starved
2019 192 : // out by other background tasks (including image compaction). We request this via
2020 192 : // `BackgroundLoopKind::L0Compaction`.
2021 192 : //
2022 192 : // Yield for pending L0 compaction while waiting for the semaphore.
2023 192 : let is_l0_only = options.flags.contains(CompactFlags::OnlyL0Compaction);
2024 192 : let semaphore_kind = match is_l0_only && self.get_compaction_l0_semaphore() {
2025 0 : true => BackgroundLoopKind::L0Compaction,
2026 192 : false => BackgroundLoopKind::Compaction,
2027 : };
2028 192 : let yield_for_l0 = options.flags.contains(CompactFlags::YieldForL0);
2029 192 : if yield_for_l0 {
2030 : // If this is an L0 pass, it doesn't make sense to yield for L0.
2031 0 : debug_assert!(!is_l0_only, "YieldForL0 during L0 pass");
2032 : // If `compaction_l0_first` is disabled, there's no point yielding.
2033 0 : debug_assert!(self.get_compaction_l0_first(), "YieldForL0 without L0 pass");
2034 192 : }
2035 :
2036 192 : let acquire = async move {
2037 192 : let guard = self.compaction_lock.lock().await;
2038 192 : let permit = super::tasks::acquire_concurrency_permit(semaphore_kind, ctx).await;
2039 192 : (guard, permit)
2040 192 : };
2041 :
2042 192 : let (_guard, _permit) = tokio::select! {
2043 192 : (guard, permit) = acquire => (guard, permit),
2044 192 : _ = self.l0_compaction_trigger.notified(), if yield_for_l0 => {
2045 0 : return Ok(CompactionOutcome::YieldForL0);
2046 : }
2047 192 : _ = self.cancel.cancelled() => return Ok(CompactionOutcome::Skipped),
2048 192 : _ = cancel.cancelled() => return Ok(CompactionOutcome::Skipped),
2049 : };
2050 :
2051 192 : let last_record_lsn = self.get_last_record_lsn();
2052 192 :
2053 192 : // Last record Lsn could be zero in case the timeline was just created
2054 192 : if !last_record_lsn.is_valid() {
2055 0 : warn!(
2056 0 : "Skipping compaction for potentially just initialized timeline, it has invalid last record lsn: {last_record_lsn}"
2057 : );
2058 0 : return Ok(CompactionOutcome::Skipped);
2059 192 : }
2060 :
2061 192 : let result = match self.get_compaction_algorithm_settings().kind {
2062 : CompactionAlgorithm::Tiered => {
2063 0 : self.compact_tiered(cancel, ctx).await?;
2064 0 : Ok(CompactionOutcome::Done)
2065 : }
2066 192 : CompactionAlgorithm::Legacy => self.compact_legacy(cancel, options, ctx).await,
2067 : };
2068 :
2069 : // Signal compaction failure to avoid L0 flush stalls when it's broken.
2070 0 : match &result {
2071 192 : Ok(_) => self.compaction_failed.store(false, AtomicOrdering::Relaxed),
2072 0 : Err(e) if e.is_cancel() => {}
2073 0 : Err(CompactionError::ShuttingDown) => {
2074 0 : // Covered by the `Err(e) if e.is_cancel()` branch.
2075 0 : }
2076 0 : Err(CompactionError::AlreadyRunning(_)) => {
2077 0 : // Covered by the `Err(e) if e.is_cancel()` branch.
2078 0 : }
2079 : Err(CompactionError::Other(_)) => {
2080 0 : self.compaction_failed.store(true, AtomicOrdering::Relaxed)
2081 : }
2082 : Err(CompactionError::CollectKeySpaceError(_)) => {
2083 : // Cancelled errors are covered by the `Err(e) if e.is_cancel()` branch.
2084 0 : self.compaction_failed.store(true, AtomicOrdering::Relaxed)
2085 : }
2086 : // Don't change the current value on offload failure or shutdown. We don't want to
2087 : // abruptly stall nor resume L0 flushes in these cases.
2088 0 : Err(CompactionError::Offload(_)) => {}
2089 : };
2090 :
2091 192 : result
2092 192 : }
2093 :
2094 : /// Mutate the timeline with a [`TimelineWriter`].
2095 2566607 : pub(crate) async fn writer(&self) -> TimelineWriter<'_> {
2096 2566607 : TimelineWriter {
2097 2566607 : tl: self,
2098 2566607 : write_guard: self.write_lock.lock().await,
2099 : }
2100 2566607 : }
2101 :
2102 0 : pub(crate) fn activate(
2103 0 : self: &Arc<Self>,
2104 0 : parent: Arc<crate::tenant::TenantShard>,
2105 0 : broker_client: BrokerClientChannel,
2106 0 : background_jobs_can_start: Option<&completion::Barrier>,
2107 0 : ctx: &RequestContext,
2108 0 : ) {
2109 0 : if self.tenant_shard_id.is_shard_zero() {
2110 0 : // Logical size is only maintained accurately on shard zero.
2111 0 : self.spawn_initial_logical_size_computation_task(ctx);
2112 0 : }
2113 0 : self.launch_wal_receiver(ctx, broker_client);
2114 0 : self.set_state(TimelineState::Active);
2115 0 : self.launch_eviction_task(parent, background_jobs_can_start);
2116 0 : }
2117 :
2118 : /// After this function returns, there are no timeline-scoped tasks are left running.
2119 : ///
2120 : /// The preferred pattern for is:
2121 : /// - in any spawned tasks, keep Timeline::guard open + Timeline::cancel / child token
2122 : /// - if early shutdown (not just cancellation) of a sub-tree of tasks is required,
2123 : /// go the extra mile and keep track of JoinHandles
2124 : /// - Keep track of JoinHandles using a passed-down `Arc<Mutex<Option<JoinSet>>>` or similar,
2125 : /// instead of spawning directly on a runtime. It is a more composable / testable pattern.
2126 : ///
2127 : /// For legacy reasons, we still have multiple tasks spawned using
2128 : /// `task_mgr::spawn(X, Some(tenant_id), Some(timeline_id))`.
2129 : /// We refer to these as "timeline-scoped task_mgr tasks".
2130 : /// Some of these tasks are already sensitive to Timeline::cancel while others are
2131 : /// not sensitive to Timeline::cancel and instead respect [`task_mgr::shutdown_token`]
2132 : /// or [`task_mgr::shutdown_watcher`].
2133 : /// We want to gradually convert the code base away from these.
2134 : ///
2135 : /// Here is an inventory of timeline-scoped task_mgr tasks that are still sensitive to
2136 : /// `task_mgr::shutdown_{token,watcher}` (there are also tenant-scoped and global-scoped
2137 : /// ones that aren't mentioned here):
2138 : /// - [`TaskKind::TimelineDeletionWorker`]
2139 : /// - NB: also used for tenant deletion
2140 : /// - [`TaskKind::RemoteUploadTask`]`
2141 : /// - [`TaskKind::InitialLogicalSizeCalculation`]
2142 : /// - [`TaskKind::DownloadAllRemoteLayers`] (can we get rid of it?)
2143 : // Inventory of timeline-scoped task_mgr tasks that use spawn but aren't sensitive:
2144 : /// - [`TaskKind::Eviction`]
2145 : /// - [`TaskKind::LayerFlushTask`]
2146 : /// - [`TaskKind::OndemandLogicalSizeCalculation`]
2147 : /// - [`TaskKind::GarbageCollector`] (immediate_gc is timeline-scoped)
2148 5 : pub(crate) async fn shutdown(&self, mode: ShutdownMode) {
2149 5 : debug_assert_current_span_has_tenant_and_timeline_id();
2150 5 :
2151 5 : // Regardless of whether we're going to try_freeze_and_flush
2152 5 : // or not, stop ingesting any more data.
2153 5 : let walreceiver = self.walreceiver.lock().unwrap().take();
2154 5 : tracing::debug!(
2155 0 : is_some = walreceiver.is_some(),
2156 0 : "Waiting for WalReceiverManager..."
2157 : );
2158 5 : if let Some(walreceiver) = walreceiver {
2159 0 : walreceiver.shutdown().await;
2160 5 : }
2161 : // ... and inform any waiters for newer LSNs that there won't be any.
2162 5 : self.last_record_lsn.shutdown();
2163 5 :
2164 5 : if let ShutdownMode::FreezeAndFlush = mode {
2165 3 : let do_flush = if let Some((open, frozen)) = self
2166 3 : .layers
2167 3 : .read(LayerManagerLockHolder::Shutdown)
2168 3 : .await
2169 3 : .layer_map()
2170 3 : .map(|lm| (lm.open_layer.is_some(), lm.frozen_layers.len()))
2171 3 : .ok()
2172 3 : .filter(|(open, frozen)| *open || *frozen > 0)
2173 : {
2174 0 : if self.remote_client.is_archived() == Some(true) {
2175 : // No point flushing on shutdown for an archived timeline: it is not important
2176 : // to have it nice and fresh after our restart, and trying to flush here might
2177 : // race with trying to offload it (which also stops the flush loop)
2178 0 : false
2179 : } else {
2180 0 : tracing::info!(?open, frozen, "flushing and freezing on shutdown");
2181 0 : true
2182 : }
2183 : } else {
2184 : // this is double-shutdown, it'll be a no-op
2185 3 : true
2186 : };
2187 :
2188 : // we shut down walreceiver above, so, we won't add anything more
2189 : // to the InMemoryLayer; freeze it and wait for all frozen layers
2190 : // to reach the disk & upload queue, then shut the upload queue and
2191 : // wait for it to drain.
2192 3 : if do_flush {
2193 3 : match self.freeze_and_flush().await {
2194 : Ok(_) => {
2195 : // drain the upload queue
2196 : // if we did not wait for completion here, it might be our shutdown process
2197 : // didn't wait for remote uploads to complete at all, as new tasks can forever
2198 : // be spawned.
2199 : //
2200 : // what is problematic is the shutting down of RemoteTimelineClient, because
2201 : // obviously it does not make sense to stop while we wait for it, but what
2202 : // about corner cases like s3 suddenly hanging up?
2203 3 : self.remote_client.shutdown().await;
2204 : }
2205 : Err(FlushLayerError::Cancelled) => {
2206 : // this is likely the second shutdown, ignore silently.
2207 : // TODO: this can be removed once https://github.com/neondatabase/neon/issues/5080
2208 0 : debug_assert!(self.cancel.is_cancelled());
2209 : }
2210 0 : Err(e) => {
2211 0 : // Non-fatal. Shutdown is infallible. Failures to flush just mean that
2212 0 : // we have some extra WAL replay to do next time the timeline starts.
2213 0 : warn!("failed to freeze and flush: {e:#}");
2214 : }
2215 : }
2216 :
2217 : // `self.remote_client.shutdown().await` above should have already flushed everything from the queue, but
2218 : // we also do a final check here to ensure that the queue is empty.
2219 3 : if !self.remote_client.no_pending_work() {
2220 0 : warn!(
2221 0 : "still have pending work in remote upload queue, but continuing shutting down anyways"
2222 : );
2223 3 : }
2224 0 : }
2225 2 : }
2226 :
2227 5 : if let ShutdownMode::Reload = mode {
2228 : // drain the upload queue
2229 1 : self.remote_client.shutdown().await;
2230 1 : if !self.remote_client.no_pending_work() {
2231 0 : warn!(
2232 0 : "still have pending work in remote upload queue, but continuing shutting down anyways"
2233 : );
2234 1 : }
2235 4 : }
2236 :
2237 : // Signal any subscribers to our cancellation token to drop out
2238 5 : tracing::debug!("Cancelling CancellationToken");
2239 5 : self.cancel.cancel();
2240 5 :
2241 5 : // If we have a background task downloading heatmap layers stop it.
2242 5 : // The background downloads are sensitive to timeline cancellation (done above),
2243 5 : // so the drain will be immediate.
2244 5 : self.stop_and_drain_heatmap_layers_download().await;
2245 :
2246 : // Ensure Prevent new page service requests from starting.
2247 5 : self.handles.shutdown();
2248 5 :
2249 5 : // Transition the remote_client into a state where it's only useful for timeline deletion.
2250 5 : // (The deletion use case is why we can't just hook up remote_client to Self::cancel).)
2251 5 : self.remote_client.stop();
2252 5 :
2253 5 : // As documented in remote_client.stop()'s doc comment, it's our responsibility
2254 5 : // to shut down the upload queue tasks.
2255 5 : // TODO: fix that, task management should be encapsulated inside remote_client.
2256 5 : task_mgr::shutdown_tasks(
2257 5 : Some(TaskKind::RemoteUploadTask),
2258 5 : Some(self.tenant_shard_id),
2259 5 : Some(self.timeline_id),
2260 5 : )
2261 5 : .await;
2262 :
2263 : // TODO: work toward making this a no-op. See this function's doc comment for more context.
2264 5 : tracing::debug!("Waiting for tasks...");
2265 5 : task_mgr::shutdown_tasks(None, Some(self.tenant_shard_id), Some(self.timeline_id)).await;
2266 :
2267 : {
2268 : // Allow any remaining in-memory layers to do cleanup -- until that, they hold the gate
2269 : // open.
2270 5 : let mut write_guard = self.write_lock.lock().await;
2271 5 : self.layers
2272 5 : .write(LayerManagerLockHolder::Shutdown)
2273 5 : .await
2274 5 : .shutdown(&mut write_guard);
2275 5 : }
2276 5 :
2277 5 : // Finally wait until any gate-holders are complete.
2278 5 : //
2279 5 : // TODO: once above shutdown_tasks is a no-op, we can close the gate before calling shutdown_tasks
2280 5 : // and use a TBD variant of shutdown_tasks that asserts that there were no tasks left.
2281 5 : self.gate.close().await;
2282 :
2283 5 : self.metrics.shutdown();
2284 5 : }
2285 :
2286 235 : pub(crate) fn set_state(&self, new_state: TimelineState) {
2287 235 : match (self.current_state(), new_state) {
2288 235 : (equal_state_1, equal_state_2) if equal_state_1 == equal_state_2 => {
2289 1 : info!("Ignoring new state, equal to the existing one: {equal_state_2:?}");
2290 : }
2291 0 : (st, TimelineState::Loading) => {
2292 0 : error!("ignoring transition from {st:?} into Loading state");
2293 : }
2294 0 : (TimelineState::Broken { .. }, new_state) => {
2295 0 : error!("Ignoring state update {new_state:?} for broken timeline");
2296 : }
2297 : (TimelineState::Stopping, TimelineState::Active) => {
2298 0 : error!("Not activating a Stopping timeline");
2299 : }
2300 234 : (_, new_state) => {
2301 234 : self.state.send_replace(new_state);
2302 234 : }
2303 : }
2304 235 : }
2305 :
2306 1 : pub(crate) fn set_broken(&self, reason: String) {
2307 1 : let backtrace_str: String = format!("{}", std::backtrace::Backtrace::force_capture());
2308 1 : let broken_state = TimelineState::Broken {
2309 1 : reason,
2310 1 : backtrace: backtrace_str,
2311 1 : };
2312 1 : self.set_state(broken_state);
2313 1 :
2314 1 : // Although the Broken state is not equivalent to shutdown() (shutdown will be called
2315 1 : // later when this tenant is detach or the process shuts down), firing the cancellation token
2316 1 : // here avoids the need for other tasks to watch for the Broken state explicitly.
2317 1 : self.cancel.cancel();
2318 1 : }
2319 :
2320 113409 : pub(crate) fn current_state(&self) -> TimelineState {
2321 113409 : self.state.borrow().clone()
2322 113409 : }
2323 :
2324 3 : pub(crate) fn is_broken(&self) -> bool {
2325 3 : matches!(&*self.state.borrow(), TimelineState::Broken { .. })
2326 3 : }
2327 :
2328 126 : pub(crate) fn is_active(&self) -> bool {
2329 126 : self.current_state() == TimelineState::Active
2330 126 : }
2331 :
2332 8 : pub(crate) fn is_archived(&self) -> Option<bool> {
2333 8 : self.remote_client.is_archived()
2334 8 : }
2335 :
2336 8 : pub(crate) fn is_invisible(&self) -> Option<bool> {
2337 8 : self.remote_client.is_invisible()
2338 8 : }
2339 :
2340 194 : pub(crate) fn is_stopping(&self) -> bool {
2341 194 : self.current_state() == TimelineState::Stopping
2342 194 : }
2343 :
2344 0 : pub(crate) fn subscribe_for_state_updates(&self) -> watch::Receiver<TimelineState> {
2345 0 : self.state.subscribe()
2346 0 : }
2347 :
2348 112855 : pub(crate) async fn wait_to_become_active(
2349 112855 : &self,
2350 112855 : _ctx: &RequestContext, // Prepare for use by cancellation
2351 112855 : ) -> Result<(), TimelineState> {
2352 112855 : let mut receiver = self.state.subscribe();
2353 : loop {
2354 112855 : let current_state = receiver.borrow().clone();
2355 112855 : match current_state {
2356 : TimelineState::Loading => {
2357 0 : receiver
2358 0 : .changed()
2359 0 : .await
2360 0 : .expect("holding a reference to self");
2361 : }
2362 : TimelineState::Active => {
2363 112854 : return Ok(());
2364 : }
2365 : TimelineState::Broken { .. } | TimelineState::Stopping => {
2366 : // There's no chance the timeline can transition back into ::Active
2367 1 : return Err(current_state);
2368 : }
2369 : }
2370 : }
2371 112855 : }
2372 :
2373 0 : pub(crate) async fn layer_map_info(
2374 0 : &self,
2375 0 : reset: LayerAccessStatsReset,
2376 0 : ) -> Result<LayerMapInfo, layer_manager::Shutdown> {
2377 0 : let guard = self
2378 0 : .layers
2379 0 : .read(LayerManagerLockHolder::GetLayerMapInfo)
2380 0 : .await;
2381 0 : let layer_map = guard.layer_map()?;
2382 0 : let mut in_memory_layers = Vec::with_capacity(layer_map.frozen_layers.len() + 1);
2383 0 : if let Some(open_layer) = &layer_map.open_layer {
2384 0 : in_memory_layers.push(open_layer.info());
2385 0 : }
2386 0 : for frozen_layer in &layer_map.frozen_layers {
2387 0 : in_memory_layers.push(frozen_layer.info());
2388 0 : }
2389 :
2390 0 : let historic_layers = layer_map
2391 0 : .iter_historic_layers()
2392 0 : .map(|desc| guard.get_from_desc(&desc).info(reset))
2393 0 : .collect();
2394 0 :
2395 0 : Ok(LayerMapInfo {
2396 0 : in_memory_layers,
2397 0 : historic_layers,
2398 0 : })
2399 0 : }
2400 :
2401 : #[instrument(skip_all, fields(tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))]
2402 : pub(crate) async fn download_layer(
2403 : &self,
2404 : layer_file_name: &LayerName,
2405 : ctx: &RequestContext,
2406 : ) -> Result<Option<bool>, super::storage_layer::layer::DownloadError> {
2407 : let Some(layer) = self
2408 : .find_layer(layer_file_name)
2409 : .await
2410 0 : .map_err(|e| match e {
2411 0 : layer_manager::Shutdown => {
2412 0 : super::storage_layer::layer::DownloadError::TimelineShutdown
2413 0 : }
2414 0 : })?
2415 : else {
2416 : return Ok(None);
2417 : };
2418 :
2419 : layer.download(ctx).await?;
2420 :
2421 : Ok(Some(true))
2422 : }
2423 :
2424 : /// Evict just one layer.
2425 : ///
2426 : /// Returns `Ok(None)` in the case where the layer could not be found by its `layer_file_name`.
2427 0 : pub(crate) async fn evict_layer(
2428 0 : &self,
2429 0 : layer_file_name: &LayerName,
2430 0 : ) -> anyhow::Result<Option<bool>> {
2431 0 : let _gate = self
2432 0 : .gate
2433 0 : .enter()
2434 0 : .map_err(|_| anyhow::anyhow!("Shutting down"))?;
2435 :
2436 0 : let Some(local_layer) = self.find_layer(layer_file_name).await? else {
2437 0 : return Ok(None);
2438 : };
2439 :
2440 : // curl has this by default
2441 0 : let timeout = std::time::Duration::from_secs(120);
2442 0 :
2443 0 : match local_layer.evict_and_wait(timeout).await {
2444 0 : Ok(()) => Ok(Some(true)),
2445 0 : Err(EvictionError::NotFound) => Ok(Some(false)),
2446 0 : Err(EvictionError::Downloaded) => Ok(Some(false)),
2447 0 : Err(EvictionError::Timeout) => Ok(Some(false)),
2448 : }
2449 0 : }
2450 :
2451 2401506 : fn should_roll(
2452 2401506 : &self,
2453 2401506 : layer_size: u64,
2454 2401506 : projected_layer_size: u64,
2455 2401506 : checkpoint_distance: u64,
2456 2401506 : projected_lsn: Lsn,
2457 2401506 : last_freeze_at: Lsn,
2458 2401506 : opened_at: Instant,
2459 2401506 : ) -> bool {
2460 2401506 : let distance = projected_lsn.widening_sub(last_freeze_at);
2461 2401506 :
2462 2401506 : // Rolling the open layer can be triggered by:
2463 2401506 : // 1. The distance from the last LSN we rolled at. This bounds the amount of WAL that
2464 2401506 : // the safekeepers need to store. For sharded tenants, we multiply by shard count to
2465 2401506 : // account for how writes are distributed across shards: we expect each node to consume
2466 2401506 : // 1/count of the LSN on average.
2467 2401506 : // 2. The size of the currently open layer.
2468 2401506 : // 3. The time since the last roll. It helps safekeepers to regard pageserver as caught
2469 2401506 : // up and suspend activity.
2470 2401506 : if distance >= checkpoint_distance as i128 * self.shard_identity.count.count() as i128 {
2471 0 : info!(
2472 0 : "Will roll layer at {} with layer size {} due to LSN distance ({})",
2473 : projected_lsn, layer_size, distance
2474 : );
2475 :
2476 0 : true
2477 2401506 : } else if projected_layer_size >= checkpoint_distance {
2478 : // NB: this check is relied upon by:
2479 40 : let _ = IndexEntry::validate_checkpoint_distance;
2480 40 : info!(
2481 0 : "Will roll layer at {} with layer size {} due to layer size ({})",
2482 : projected_lsn, layer_size, projected_layer_size
2483 : );
2484 :
2485 40 : true
2486 2401466 : } else if distance > 0 && opened_at.elapsed() >= self.get_checkpoint_timeout() {
2487 0 : info!(
2488 0 : "Will roll layer at {} with layer size {} due to time since first write to the layer ({:?})",
2489 0 : projected_lsn,
2490 0 : layer_size,
2491 0 : opened_at.elapsed()
2492 : );
2493 :
2494 0 : true
2495 : } else {
2496 2401466 : false
2497 : }
2498 2401506 : }
2499 :
2500 1 : pub(crate) fn is_basebackup_cache_enabled(&self) -> bool {
2501 1 : let tenant_conf = self.tenant_conf.load();
2502 1 : tenant_conf
2503 1 : .tenant_conf
2504 1 : .basebackup_cache_enabled
2505 1 : .unwrap_or(self.conf.default_tenant_conf.basebackup_cache_enabled)
2506 1 : }
2507 :
2508 : /// Prepare basebackup for the given LSN and store it in the basebackup cache.
2509 : /// The method is asynchronous and returns immediately.
2510 : /// The actual basebackup preparation is performed in the background
2511 : /// by the basebackup cache on a best-effort basis.
2512 1 : pub(crate) fn prepare_basebackup(&self, lsn: Lsn) {
2513 1 : if !self.is_basebackup_cache_enabled() {
2514 1 : return;
2515 0 : }
2516 0 : if !self.tenant_shard_id.is_shard_zero() {
2517 : // In theory we should never get here, but just in case check it.
2518 : // Preparing basebackup doesn't make sense for shards other than shard zero.
2519 0 : return;
2520 0 : }
2521 0 : if !self.is_active() {
2522 : // May happen during initial timeline creation.
2523 : // Such timeline is not in the global timeline map yet,
2524 : // so basebackup cache will not be able to find it.
2525 : // TODO(diko): We can prepare such timelines in finish_creation().
2526 0 : return;
2527 0 : }
2528 0 :
2529 0 : let res = self
2530 0 : .basebackup_prepare_sender
2531 0 : .send(BasebackupPrepareRequest {
2532 0 : tenant_shard_id: self.tenant_shard_id,
2533 0 : timeline_id: self.timeline_id,
2534 0 : lsn,
2535 0 : });
2536 0 : if let Err(e) = res {
2537 : // May happen during shutdown, it's not critical.
2538 0 : info!("Failed to send shutdown checkpoint: {e:#}");
2539 0 : }
2540 1 : }
2541 : }
2542 :
2543 : /// Number of times we will compute partition within a checkpoint distance.
2544 : const REPARTITION_FREQ_IN_CHECKPOINT_DISTANCE: u64 = 10;
2545 :
2546 : // Private functions
2547 : impl Timeline {
2548 6 : pub(crate) fn get_lsn_lease_length(&self) -> Duration {
2549 6 : let tenant_conf = self.tenant_conf.load();
2550 6 : tenant_conf
2551 6 : .tenant_conf
2552 6 : .lsn_lease_length
2553 6 : .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length)
2554 6 : }
2555 :
2556 0 : pub(crate) fn get_lsn_lease_length_for_ts(&self) -> Duration {
2557 0 : let tenant_conf = self.tenant_conf.load();
2558 0 : tenant_conf
2559 0 : .tenant_conf
2560 0 : .lsn_lease_length_for_ts
2561 0 : .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length_for_ts)
2562 0 : }
2563 :
2564 0 : pub(crate) fn is_gc_blocked_by_lsn_lease_deadline(&self) -> bool {
2565 0 : let tenant_conf = self.tenant_conf.load();
2566 0 : tenant_conf.is_gc_blocked_by_lsn_lease_deadline()
2567 0 : }
2568 :
2569 0 : pub(crate) fn get_lazy_slru_download(&self) -> bool {
2570 0 : let tenant_conf = self.tenant_conf.load();
2571 0 : tenant_conf
2572 0 : .tenant_conf
2573 0 : .lazy_slru_download
2574 0 : .unwrap_or(self.conf.default_tenant_conf.lazy_slru_download)
2575 0 : }
2576 :
2577 : /// Checks if a get page request should get perf tracing
2578 : ///
2579 : /// The configuration priority is: tenant config override, default tenant config,
2580 : /// pageserver config.
2581 0 : pub(crate) fn is_get_page_request_sampled(&self) -> bool {
2582 0 : let tenant_conf = self.tenant_conf.load();
2583 0 : let ratio = tenant_conf
2584 0 : .tenant_conf
2585 0 : .sampling_ratio
2586 0 : .flatten()
2587 0 : .or(self.conf.default_tenant_conf.sampling_ratio)
2588 0 : .or(self.conf.tracing.as_ref().map(|t| t.sampling_ratio));
2589 0 :
2590 0 : match ratio {
2591 0 : Some(r) => {
2592 0 : if r.numerator == 0 {
2593 0 : false
2594 : } else {
2595 0 : rand::thread_rng().gen_range(0..r.denominator) < r.numerator
2596 : }
2597 : }
2598 0 : None => false,
2599 : }
2600 0 : }
2601 :
2602 2402297 : fn get_checkpoint_distance(&self) -> u64 {
2603 2402297 : let tenant_conf = self.tenant_conf.load();
2604 2402297 : tenant_conf
2605 2402297 : .tenant_conf
2606 2402297 : .checkpoint_distance
2607 2402297 : .unwrap_or(self.conf.default_tenant_conf.checkpoint_distance)
2608 2402297 : }
2609 :
2610 2401466 : fn get_checkpoint_timeout(&self) -> Duration {
2611 2401466 : let tenant_conf = self.tenant_conf.load();
2612 2401466 : tenant_conf
2613 2401466 : .tenant_conf
2614 2401466 : .checkpoint_timeout
2615 2401466 : .unwrap_or(self.conf.default_tenant_conf.checkpoint_timeout)
2616 2401466 : }
2617 :
2618 0 : pub(crate) fn get_pitr_interval(&self) -> Duration {
2619 0 : let tenant_conf = &self.tenant_conf.load().tenant_conf;
2620 0 : tenant_conf
2621 0 : .pitr_interval
2622 0 : .unwrap_or(self.conf.default_tenant_conf.pitr_interval)
2623 0 : }
2624 :
2625 1271 : fn get_compaction_period(&self) -> Duration {
2626 1271 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2627 1271 : tenant_conf
2628 1271 : .compaction_period
2629 1271 : .unwrap_or(self.conf.default_tenant_conf.compaction_period)
2630 1271 : }
2631 :
2632 351 : fn get_compaction_target_size(&self) -> u64 {
2633 351 : let tenant_conf = self.tenant_conf.load();
2634 351 : tenant_conf
2635 351 : .tenant_conf
2636 351 : .compaction_target_size
2637 351 : .unwrap_or(self.conf.default_tenant_conf.compaction_target_size)
2638 351 : }
2639 :
2640 817 : fn get_compaction_threshold(&self) -> usize {
2641 817 : let tenant_conf = self.tenant_conf.load();
2642 817 : tenant_conf
2643 817 : .tenant_conf
2644 817 : .compaction_threshold
2645 817 : .unwrap_or(self.conf.default_tenant_conf.compaction_threshold)
2646 817 : }
2647 :
2648 : /// Returns `true` if the rel_size_v2 config is enabled. NOTE: the write path and read path
2649 : /// should look at `get_rel_size_v2_status()` to get the actual status of the timeline. It is
2650 : /// possible that the index part persists the state while the config doesn't get persisted.
2651 973 : pub(crate) fn get_rel_size_v2_enabled(&self) -> bool {
2652 973 : let tenant_conf = self.tenant_conf.load();
2653 973 : tenant_conf
2654 973 : .tenant_conf
2655 973 : .rel_size_v2_enabled
2656 973 : .unwrap_or(self.conf.default_tenant_conf.rel_size_v2_enabled)
2657 973 : }
2658 :
2659 1099 : pub(crate) fn get_rel_size_v2_status(&self) -> RelSizeMigration {
2660 1099 : self.rel_size_v2_status
2661 1099 : .load()
2662 1099 : .as_ref()
2663 1099 : .map(|s| s.as_ref().clone())
2664 1099 : .unwrap_or(RelSizeMigration::Legacy)
2665 1099 : }
2666 :
2667 23 : fn get_compaction_upper_limit(&self) -> usize {
2668 23 : let tenant_conf = self.tenant_conf.load();
2669 23 : tenant_conf
2670 23 : .tenant_conf
2671 23 : .compaction_upper_limit
2672 23 : .unwrap_or(self.conf.default_tenant_conf.compaction_upper_limit)
2673 23 : }
2674 :
2675 0 : pub fn get_compaction_l0_first(&self) -> bool {
2676 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2677 0 : tenant_conf
2678 0 : .compaction_l0_first
2679 0 : .unwrap_or(self.conf.default_tenant_conf.compaction_l0_first)
2680 0 : }
2681 :
2682 0 : pub fn get_compaction_l0_semaphore(&self) -> bool {
2683 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2684 0 : tenant_conf
2685 0 : .compaction_l0_semaphore
2686 0 : .unwrap_or(self.conf.default_tenant_conf.compaction_l0_semaphore)
2687 0 : }
2688 :
2689 635 : fn get_l0_flush_delay_threshold(&self) -> Option<usize> {
2690 : // By default, delay L0 flushes at 3x the compaction threshold. The compaction threshold
2691 : // defaults to 10, and L0 compaction is generally able to keep L0 counts below 30.
2692 : const DEFAULT_L0_FLUSH_DELAY_FACTOR: usize = 3;
2693 :
2694 : // If compaction is disabled, don't delay.
2695 635 : if self.get_compaction_period() == Duration::ZERO {
2696 632 : return None;
2697 3 : }
2698 3 :
2699 3 : let compaction_threshold = self.get_compaction_threshold();
2700 3 : let tenant_conf = self.tenant_conf.load();
2701 3 : let l0_flush_delay_threshold = tenant_conf
2702 3 : .tenant_conf
2703 3 : .l0_flush_delay_threshold
2704 3 : .or(self.conf.default_tenant_conf.l0_flush_delay_threshold)
2705 3 : .unwrap_or(DEFAULT_L0_FLUSH_DELAY_FACTOR * compaction_threshold);
2706 3 :
2707 3 : // 0 disables backpressure.
2708 3 : if l0_flush_delay_threshold == 0 {
2709 0 : return None;
2710 3 : }
2711 3 :
2712 3 : // Clamp the flush delay threshold to the compaction threshold; it doesn't make sense to
2713 3 : // backpressure flushes below this.
2714 3 : // TODO: the tenant config should have validation to prevent this instead.
2715 3 : debug_assert!(l0_flush_delay_threshold >= compaction_threshold);
2716 3 : Some(max(l0_flush_delay_threshold, compaction_threshold))
2717 635 : }
2718 :
2719 636 : fn get_l0_flush_stall_threshold(&self) -> Option<usize> {
2720 : // Disable L0 stalls by default. Stalling can cause unavailability if L0 compaction isn't
2721 : // responsive, and it can e.g. block on other compaction via the compaction semaphore or
2722 : // sibling timelines. We need more confidence before enabling this.
2723 : const DEFAULT_L0_FLUSH_STALL_FACTOR: usize = 0; // TODO: default to e.g. 5
2724 :
2725 : // If compaction is disabled, don't stall.
2726 636 : if self.get_compaction_period() == Duration::ZERO {
2727 632 : return None;
2728 4 : }
2729 4 :
2730 4 : // If compaction is failing, don't stall and try to keep the tenant alive. This may not be a
2731 4 : // good idea: read amp can grow unbounded, leading to terrible performance, and we may take
2732 4 : // on unbounded compaction debt that can take a long time to fix once compaction comes back
2733 4 : // online. At least we'll delay flushes, slowing down the growth and buying some time.
2734 4 : if self.compaction_failed.load(AtomicOrdering::Relaxed) {
2735 0 : return None;
2736 4 : }
2737 4 :
2738 4 : let compaction_threshold = self.get_compaction_threshold();
2739 4 : let tenant_conf = self.tenant_conf.load();
2740 4 : let l0_flush_stall_threshold = tenant_conf
2741 4 : .tenant_conf
2742 4 : .l0_flush_stall_threshold
2743 4 : .or(self.conf.default_tenant_conf.l0_flush_stall_threshold);
2744 4 :
2745 4 : // Tests sometimes set compaction_threshold=1 to generate lots of layer files, and don't
2746 4 : // handle the 20-second compaction delay. Some (e.g. `test_backward_compatibility`) can't
2747 4 : // easily adjust the L0 backpressure settings, so just disable stalls in this case.
2748 4 : if cfg!(feature = "testing")
2749 4 : && compaction_threshold == 1
2750 0 : && l0_flush_stall_threshold.is_none()
2751 : {
2752 0 : return None;
2753 4 : }
2754 4 :
2755 4 : let l0_flush_stall_threshold = l0_flush_stall_threshold
2756 4 : .unwrap_or(DEFAULT_L0_FLUSH_STALL_FACTOR * compaction_threshold);
2757 4 :
2758 4 : // 0 disables backpressure.
2759 4 : if l0_flush_stall_threshold == 0 {
2760 4 : return None;
2761 0 : }
2762 0 :
2763 0 : // Clamp the flush stall threshold to the compaction threshold; it doesn't make sense to
2764 0 : // backpressure flushes below this.
2765 0 : // TODO: the tenant config should have validation to prevent this instead.
2766 0 : debug_assert!(l0_flush_stall_threshold >= compaction_threshold);
2767 0 : Some(max(l0_flush_stall_threshold, compaction_threshold))
2768 636 : }
2769 :
2770 7 : fn get_image_creation_threshold(&self) -> usize {
2771 7 : let tenant_conf = self.tenant_conf.load();
2772 7 : tenant_conf
2773 7 : .tenant_conf
2774 7 : .image_creation_threshold
2775 7 : .unwrap_or(self.conf.default_tenant_conf.image_creation_threshold)
2776 7 : }
2777 :
2778 192 : fn get_compaction_algorithm_settings(&self) -> CompactionAlgorithmSettings {
2779 192 : let tenant_conf = &self.tenant_conf.load();
2780 192 : tenant_conf
2781 192 : .tenant_conf
2782 192 : .compaction_algorithm
2783 192 : .as_ref()
2784 192 : .unwrap_or(&self.conf.default_tenant_conf.compaction_algorithm)
2785 192 : .clone()
2786 192 : }
2787 :
2788 192 : pub fn get_compaction_shard_ancestor(&self) -> bool {
2789 192 : let tenant_conf = self.tenant_conf.load();
2790 192 : tenant_conf
2791 192 : .tenant_conf
2792 192 : .compaction_shard_ancestor
2793 192 : .unwrap_or(self.conf.default_tenant_conf.compaction_shard_ancestor)
2794 192 : }
2795 :
2796 0 : fn get_eviction_policy(&self) -> EvictionPolicy {
2797 0 : let tenant_conf = self.tenant_conf.load();
2798 0 : tenant_conf
2799 0 : .tenant_conf
2800 0 : .eviction_policy
2801 0 : .unwrap_or(self.conf.default_tenant_conf.eviction_policy)
2802 0 : }
2803 :
2804 234 : fn get_evictions_low_residence_duration_metric_threshold(
2805 234 : tenant_conf: &pageserver_api::models::TenantConfig,
2806 234 : default_tenant_conf: &pageserver_api::config::TenantConfigToml,
2807 234 : ) -> Duration {
2808 234 : tenant_conf
2809 234 : .evictions_low_residence_duration_metric_threshold
2810 234 : .unwrap_or(default_tenant_conf.evictions_low_residence_duration_metric_threshold)
2811 234 : }
2812 :
2813 302 : fn get_image_layer_creation_check_threshold(&self) -> u8 {
2814 302 : let tenant_conf = self.tenant_conf.load();
2815 302 : tenant_conf
2816 302 : .tenant_conf
2817 302 : .image_layer_creation_check_threshold
2818 302 : .unwrap_or(
2819 302 : self.conf
2820 302 : .default_tenant_conf
2821 302 : .image_layer_creation_check_threshold,
2822 302 : )
2823 302 : }
2824 :
2825 27 : fn get_gc_compaction_settings(&self) -> GcCompactionCombinedSettings {
2826 27 : let tenant_conf = &self.tenant_conf.load();
2827 27 : let gc_compaction_enabled = tenant_conf
2828 27 : .tenant_conf
2829 27 : .gc_compaction_enabled
2830 27 : .unwrap_or(self.conf.default_tenant_conf.gc_compaction_enabled);
2831 27 : let gc_compaction_verification = tenant_conf
2832 27 : .tenant_conf
2833 27 : .gc_compaction_verification
2834 27 : .unwrap_or(self.conf.default_tenant_conf.gc_compaction_verification);
2835 27 : let gc_compaction_initial_threshold_kb = tenant_conf
2836 27 : .tenant_conf
2837 27 : .gc_compaction_initial_threshold_kb
2838 27 : .unwrap_or(
2839 27 : self.conf
2840 27 : .default_tenant_conf
2841 27 : .gc_compaction_initial_threshold_kb,
2842 27 : );
2843 27 : let gc_compaction_ratio_percent = tenant_conf
2844 27 : .tenant_conf
2845 27 : .gc_compaction_ratio_percent
2846 27 : .unwrap_or(self.conf.default_tenant_conf.gc_compaction_ratio_percent);
2847 27 : GcCompactionCombinedSettings {
2848 27 : gc_compaction_enabled,
2849 27 : gc_compaction_verification,
2850 27 : gc_compaction_initial_threshold_kb,
2851 27 : gc_compaction_ratio_percent,
2852 27 : }
2853 27 : }
2854 :
2855 0 : fn get_image_creation_preempt_threshold(&self) -> usize {
2856 0 : let tenant_conf = self.tenant_conf.load();
2857 0 : tenant_conf
2858 0 : .tenant_conf
2859 0 : .image_creation_preempt_threshold
2860 0 : .unwrap_or(
2861 0 : self.conf
2862 0 : .default_tenant_conf
2863 0 : .image_creation_preempt_threshold,
2864 0 : )
2865 0 : }
2866 :
2867 0 : pub(super) fn tenant_conf_updated(&self, new_conf: &AttachedTenantConf) {
2868 0 : // NB: Most tenant conf options are read by background loops, so,
2869 0 : // changes will automatically be picked up.
2870 0 :
2871 0 : // The threshold is embedded in the metric. So, we need to update it.
2872 0 : {
2873 0 : let new_threshold = Self::get_evictions_low_residence_duration_metric_threshold(
2874 0 : &new_conf.tenant_conf,
2875 0 : &self.conf.default_tenant_conf,
2876 0 : );
2877 0 :
2878 0 : let tenant_id_str = self.tenant_shard_id.tenant_id.to_string();
2879 0 : let shard_id_str = format!("{}", self.tenant_shard_id.shard_slug());
2880 0 :
2881 0 : let timeline_id_str = self.timeline_id.to_string();
2882 0 :
2883 0 : self.remote_client.update_config(&new_conf.location);
2884 0 :
2885 0 : let mut rel_size_cache = self.rel_size_snapshot_cache.lock().unwrap();
2886 0 : if let Some(new_capacity) = new_conf.tenant_conf.relsize_snapshot_cache_capacity {
2887 0 : if new_capacity != rel_size_cache.capacity() {
2888 0 : rel_size_cache.set_capacity(new_capacity);
2889 0 : }
2890 0 : }
2891 :
2892 0 : self.metrics
2893 0 : .evictions_with_low_residence_duration
2894 0 : .write()
2895 0 : .unwrap()
2896 0 : .change_threshold(
2897 0 : &tenant_id_str,
2898 0 : &shard_id_str,
2899 0 : &timeline_id_str,
2900 0 : new_threshold,
2901 0 : );
2902 0 : }
2903 0 : }
2904 :
2905 : /// Open a Timeline handle.
2906 : ///
2907 : /// Loads the metadata for the timeline into memory, but not the layer map.
2908 : #[allow(clippy::too_many_arguments)]
2909 234 : pub(super) fn new(
2910 234 : conf: &'static PageServerConf,
2911 234 : tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
2912 234 : metadata: &TimelineMetadata,
2913 234 : previous_heatmap: Option<PreviousHeatmap>,
2914 234 : ancestor: Option<Arc<Timeline>>,
2915 234 : timeline_id: TimelineId,
2916 234 : tenant_shard_id: TenantShardId,
2917 234 : generation: Generation,
2918 234 : shard_identity: ShardIdentity,
2919 234 : walredo_mgr: Option<Arc<super::WalRedoManager>>,
2920 234 : resources: TimelineResources,
2921 234 : pg_version: u32,
2922 234 : state: TimelineState,
2923 234 : attach_wal_lag_cooldown: Arc<OnceLock<WalLagCooldown>>,
2924 234 : create_idempotency: crate::tenant::CreateTimelineIdempotency,
2925 234 : gc_compaction_state: Option<GcCompactionState>,
2926 234 : rel_size_v2_status: Option<RelSizeMigration>,
2927 234 : cancel: CancellationToken,
2928 234 : ) -> Arc<Self> {
2929 234 : let disk_consistent_lsn = metadata.disk_consistent_lsn();
2930 234 : let (state, _) = watch::channel(state);
2931 234 :
2932 234 : let (layer_flush_start_tx, _) = tokio::sync::watch::channel((0, disk_consistent_lsn));
2933 234 : let (layer_flush_done_tx, _) = tokio::sync::watch::channel((0, Ok(())));
2934 234 :
2935 234 : let evictions_low_residence_duration_metric_threshold = {
2936 234 : let loaded_tenant_conf = tenant_conf.load();
2937 234 : Self::get_evictions_low_residence_duration_metric_threshold(
2938 234 : &loaded_tenant_conf.tenant_conf,
2939 234 : &conf.default_tenant_conf,
2940 234 : )
2941 : };
2942 :
2943 234 : if let Some(ancestor) = &ancestor {
2944 118 : let mut ancestor_gc_info = ancestor.gc_info.write().unwrap();
2945 118 : // If we construct an explicit timeline object, it's obviously not offloaded
2946 118 : let is_offloaded = MaybeOffloaded::No;
2947 118 : ancestor_gc_info.insert_child(timeline_id, metadata.ancestor_lsn(), is_offloaded);
2948 118 : }
2949 :
2950 234 : let relsize_snapshot_cache_capacity = {
2951 234 : let loaded_tenant_conf = tenant_conf.load();
2952 234 : loaded_tenant_conf
2953 234 : .tenant_conf
2954 234 : .relsize_snapshot_cache_capacity
2955 234 : .unwrap_or(conf.default_tenant_conf.relsize_snapshot_cache_capacity)
2956 234 : };
2957 234 :
2958 234 : Arc::new_cyclic(|myself| {
2959 234 : let metrics = Arc::new(TimelineMetrics::new(
2960 234 : &tenant_shard_id,
2961 234 : &timeline_id,
2962 234 : crate::metrics::EvictionsWithLowResidenceDurationBuilder::new(
2963 234 : "mtime",
2964 234 : evictions_low_residence_duration_metric_threshold,
2965 234 : ),
2966 234 : ));
2967 234 : let aux_file_metrics = metrics.aux_file_size_gauge.clone();
2968 :
2969 234 : let mut result = Timeline {
2970 234 : conf,
2971 234 : tenant_conf,
2972 234 : myself: myself.clone(),
2973 234 : timeline_id,
2974 234 : tenant_shard_id,
2975 234 : generation,
2976 234 : shard_identity,
2977 234 : pg_version,
2978 234 : layers: Default::default(),
2979 234 : gc_compaction_layer_update_lock: tokio::sync::RwLock::new(()),
2980 234 :
2981 234 : walredo_mgr,
2982 234 : walreceiver: Mutex::new(None),
2983 234 :
2984 234 : remote_client: Arc::new(resources.remote_client),
2985 234 :
2986 234 : // initialize in-memory 'last_record_lsn' from 'disk_consistent_lsn'.
2987 234 : last_record_lsn: SeqWait::new(RecordLsn {
2988 234 : last: disk_consistent_lsn,
2989 234 : prev: metadata.prev_record_lsn().unwrap_or(Lsn(0)),
2990 234 : }),
2991 234 : disk_consistent_lsn: AtomicLsn::new(disk_consistent_lsn.0),
2992 234 :
2993 234 : gc_compaction_state: ArcSwap::new(Arc::new(gc_compaction_state)),
2994 234 :
2995 234 : last_freeze_at: AtomicLsn::new(disk_consistent_lsn.0),
2996 234 : last_freeze_ts: RwLock::new(Instant::now()),
2997 234 :
2998 234 : loaded_at: (disk_consistent_lsn, SystemTime::now()),
2999 234 :
3000 234 : ancestor_timeline: ancestor,
3001 234 : ancestor_lsn: metadata.ancestor_lsn(),
3002 234 :
3003 234 : metrics,
3004 234 :
3005 234 : query_metrics: crate::metrics::SmgrQueryTimePerTimeline::new(
3006 234 : &tenant_shard_id,
3007 234 : &timeline_id,
3008 234 : resources.pagestream_throttle_metrics,
3009 234 : ),
3010 234 :
3011 1872 : directory_metrics: array::from_fn(|_| AtomicU64::new(0)),
3012 1872 : directory_metrics_inited: array::from_fn(|_| AtomicBool::new(false)),
3013 234 :
3014 234 : flush_loop_state: Mutex::new(FlushLoopState::NotStarted),
3015 234 :
3016 234 : layer_flush_start_tx,
3017 234 : layer_flush_done_tx,
3018 234 :
3019 234 : write_lock: tokio::sync::Mutex::new(None),
3020 234 :
3021 234 : gc_info: std::sync::RwLock::new(GcInfo::default()),
3022 234 :
3023 234 : last_image_layer_creation_status: ArcSwap::new(Arc::new(
3024 234 : LastImageLayerCreationStatus::default(),
3025 234 : )),
3026 234 :
3027 234 : applied_gc_cutoff_lsn: Rcu::new(metadata.latest_gc_cutoff_lsn()),
3028 234 : initdb_lsn: metadata.initdb_lsn(),
3029 234 :
3030 234 : current_logical_size: if disk_consistent_lsn.is_valid() {
3031 : // we're creating timeline data with some layer files existing locally,
3032 : // need to recalculate timeline's logical size based on data in the layers.
3033 120 : LogicalSize::deferred_initial(disk_consistent_lsn)
3034 : } else {
3035 : // we're creating timeline data without any layers existing locally,
3036 : // initial logical size is 0.
3037 114 : LogicalSize::empty_initial()
3038 : },
3039 :
3040 234 : partitioning: GuardArcSwap::new((
3041 234 : (KeyPartitioning::new(), KeyPartitioning::new().into_sparse()),
3042 234 : Lsn(0),
3043 234 : )),
3044 234 : repartition_threshold: 0,
3045 234 : last_image_layer_creation_check_at: AtomicLsn::new(0),
3046 234 : last_image_layer_creation_check_instant: Mutex::new(None),
3047 234 :
3048 234 : last_received_wal: Mutex::new(None),
3049 234 : rel_size_latest_cache: RwLock::new(HashMap::new()),
3050 234 : rel_size_snapshot_cache: Mutex::new(LruCache::new(relsize_snapshot_cache_capacity)),
3051 234 :
3052 234 : download_all_remote_layers_task_info: RwLock::new(None),
3053 234 :
3054 234 : state,
3055 234 :
3056 234 : eviction_task_timeline_state: tokio::sync::Mutex::new(
3057 234 : EvictionTaskTimelineState::default(),
3058 234 : ),
3059 234 : delete_progress: TimelineDeleteProgress::default(),
3060 234 :
3061 234 : cancel,
3062 234 : gate: Gate::default(),
3063 234 :
3064 234 : compaction_lock: tokio::sync::Mutex::default(),
3065 234 : compaction_failed: AtomicBool::default(),
3066 234 : l0_compaction_trigger: resources.l0_compaction_trigger,
3067 234 : gc_lock: tokio::sync::Mutex::default(),
3068 234 :
3069 234 : standby_horizon: AtomicLsn::new(0),
3070 234 :
3071 234 : pagestream_throttle: resources.pagestream_throttle,
3072 234 :
3073 234 : aux_file_size_estimator: AuxFileSizeEstimator::new(aux_file_metrics),
3074 234 :
3075 234 : #[cfg(test)]
3076 234 : extra_test_dense_keyspace: ArcSwap::new(Arc::new(KeySpace::default())),
3077 234 :
3078 234 : l0_flush_global_state: resources.l0_flush_global_state,
3079 234 :
3080 234 : handles: Default::default(),
3081 234 :
3082 234 : attach_wal_lag_cooldown,
3083 234 :
3084 234 : create_idempotency,
3085 234 :
3086 234 : page_trace: Default::default(),
3087 234 :
3088 234 : previous_heatmap: ArcSwapOption::from_pointee(previous_heatmap),
3089 234 :
3090 234 : heatmap_layers_downloader: Mutex::new(None),
3091 234 :
3092 234 : rel_size_v2_status: ArcSwapOption::from_pointee(rel_size_v2_status),
3093 234 :
3094 234 : wait_lsn_log_slow: tokio::sync::Semaphore::new(1),
3095 234 :
3096 234 : basebackup_prepare_sender: resources.basebackup_prepare_sender,
3097 234 :
3098 234 : feature_resolver: resources.feature_resolver,
3099 234 : };
3100 234 :
3101 234 : result.repartition_threshold =
3102 234 : result.get_checkpoint_distance() / REPARTITION_FREQ_IN_CHECKPOINT_DISTANCE;
3103 234 :
3104 234 : result
3105 234 : .metrics
3106 234 : .last_record_lsn_gauge
3107 234 : .set(disk_consistent_lsn.0 as i64);
3108 234 : result
3109 234 : })
3110 234 : }
3111 :
3112 341 : pub(super) fn maybe_spawn_flush_loop(self: &Arc<Self>) {
3113 341 : let Ok(guard) = self.gate.enter() else {
3114 0 : info!("cannot start flush loop when the timeline gate has already been closed");
3115 0 : return;
3116 : };
3117 341 : let mut flush_loop_state = self.flush_loop_state.lock().unwrap();
3118 341 : match *flush_loop_state {
3119 231 : FlushLoopState::NotStarted => (),
3120 : FlushLoopState::Running { .. } => {
3121 110 : info!(
3122 0 : "skipping attempt to start flush_loop twice {}/{}",
3123 0 : self.tenant_shard_id, self.timeline_id
3124 : );
3125 110 : return;
3126 : }
3127 : FlushLoopState::Exited => {
3128 0 : info!(
3129 0 : "ignoring attempt to restart exited flush_loop {}/{}",
3130 0 : self.tenant_shard_id, self.timeline_id
3131 : );
3132 0 : return;
3133 : }
3134 : }
3135 :
3136 231 : let layer_flush_start_rx = self.layer_flush_start_tx.subscribe();
3137 231 : let self_clone = Arc::clone(self);
3138 231 :
3139 231 : debug!("spawning flush loop");
3140 231 : *flush_loop_state = FlushLoopState::Running {
3141 231 : #[cfg(test)]
3142 231 : expect_initdb_optimization: false,
3143 231 : #[cfg(test)]
3144 231 : initdb_optimization_count: 0,
3145 231 : };
3146 231 : task_mgr::spawn(
3147 231 : task_mgr::BACKGROUND_RUNTIME.handle(),
3148 231 : task_mgr::TaskKind::LayerFlushTask,
3149 231 : self.tenant_shard_id,
3150 231 : Some(self.timeline_id),
3151 231 : "layer flush task",
3152 231 : async move {
3153 231 : let _guard = guard;
3154 231 : let background_ctx = RequestContext::todo_child(TaskKind::LayerFlushTask, DownloadBehavior::Error).with_scope_timeline(&self_clone);
3155 231 : self_clone.flush_loop(layer_flush_start_rx, &background_ctx).await;
3156 5 : let mut flush_loop_state = self_clone.flush_loop_state.lock().unwrap();
3157 5 : assert!(matches!(*flush_loop_state, FlushLoopState::Running{..}));
3158 5 : *flush_loop_state = FlushLoopState::Exited;
3159 5 : Ok(())
3160 5 : }
3161 231 : .instrument(info_span!(parent: None, "layer flush task", tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))
3162 : );
3163 341 : }
3164 :
3165 0 : pub(crate) fn update_gc_compaction_state(
3166 0 : &self,
3167 0 : gc_compaction_state: GcCompactionState,
3168 0 : ) -> anyhow::Result<()> {
3169 0 : self.gc_compaction_state
3170 0 : .store(Arc::new(Some(gc_compaction_state.clone())));
3171 0 : self.remote_client
3172 0 : .schedule_index_upload_for_gc_compaction_state_update(gc_compaction_state)
3173 0 : }
3174 :
3175 0 : pub(crate) fn update_rel_size_v2_status(
3176 0 : &self,
3177 0 : rel_size_v2_status: RelSizeMigration,
3178 0 : ) -> anyhow::Result<()> {
3179 0 : self.rel_size_v2_status
3180 0 : .store(Some(Arc::new(rel_size_v2_status.clone())));
3181 0 : self.remote_client
3182 0 : .schedule_index_upload_for_rel_size_v2_status_update(rel_size_v2_status)
3183 0 : }
3184 :
3185 0 : pub(crate) fn get_gc_compaction_state(&self) -> Option<GcCompactionState> {
3186 0 : self.gc_compaction_state.load_full().as_ref().clone()
3187 0 : }
3188 :
3189 : /// Creates and starts the wal receiver.
3190 : ///
3191 : /// This function is expected to be called at most once per Timeline's lifecycle
3192 : /// when the timeline is activated.
3193 0 : fn launch_wal_receiver(
3194 0 : self: &Arc<Self>,
3195 0 : ctx: &RequestContext,
3196 0 : broker_client: BrokerClientChannel,
3197 0 : ) {
3198 0 : info!(
3199 0 : "launching WAL receiver for timeline {} of tenant {}",
3200 0 : self.timeline_id, self.tenant_shard_id
3201 : );
3202 :
3203 0 : let tenant_conf = self.tenant_conf.load();
3204 0 : let wal_connect_timeout = tenant_conf
3205 0 : .tenant_conf
3206 0 : .walreceiver_connect_timeout
3207 0 : .unwrap_or(self.conf.default_tenant_conf.walreceiver_connect_timeout);
3208 0 : let lagging_wal_timeout = tenant_conf
3209 0 : .tenant_conf
3210 0 : .lagging_wal_timeout
3211 0 : .unwrap_or(self.conf.default_tenant_conf.lagging_wal_timeout);
3212 0 : let max_lsn_wal_lag = tenant_conf
3213 0 : .tenant_conf
3214 0 : .max_lsn_wal_lag
3215 0 : .unwrap_or(self.conf.default_tenant_conf.max_lsn_wal_lag);
3216 0 :
3217 0 : let mut guard = self.walreceiver.lock().unwrap();
3218 0 : assert!(
3219 0 : guard.is_none(),
3220 0 : "multiple launches / re-launches of WAL receiver are not supported"
3221 : );
3222 :
3223 0 : let protocol = PostgresClientProtocol::Interpreted {
3224 0 : format: utils::postgres_client::InterpretedFormat::Protobuf,
3225 0 : compression: Some(utils::postgres_client::Compression::Zstd { level: 1 }),
3226 0 : };
3227 0 :
3228 0 : *guard = Some(WalReceiver::start(
3229 0 : Arc::clone(self),
3230 0 : WalReceiverConf {
3231 0 : protocol,
3232 0 : wal_connect_timeout,
3233 0 : lagging_wal_timeout,
3234 0 : max_lsn_wal_lag,
3235 0 : auth_token: crate::config::SAFEKEEPER_AUTH_TOKEN.get().cloned(),
3236 0 : availability_zone: self.conf.availability_zone.clone(),
3237 0 : ingest_batch_size: self.conf.ingest_batch_size,
3238 0 : validate_wal_contiguity: self.conf.validate_wal_contiguity,
3239 0 : },
3240 0 : broker_client,
3241 0 : ctx,
3242 0 : ));
3243 0 : }
3244 :
3245 : /// Initialize with an empty layer map. Used when creating a new timeline.
3246 231 : pub(super) fn init_empty_layer_map(&self, start_lsn: Lsn) {
3247 231 : let mut layers = self.layers.try_write(LayerManagerLockHolder::Init).expect(
3248 231 : "in the context where we call this function, no other task has access to the object",
3249 231 : );
3250 231 : layers
3251 231 : .open_mut()
3252 231 : .expect("in this context the LayerManager must still be open")
3253 231 : .initialize_empty(Lsn(start_lsn.0));
3254 231 : }
3255 :
3256 : /// Scan the timeline directory, cleanup, populate the layer map, and schedule uploads for local-only
3257 : /// files.
3258 3 : pub(super) async fn load_layer_map(
3259 3 : &self,
3260 3 : disk_consistent_lsn: Lsn,
3261 3 : index_part: IndexPart,
3262 3 : ) -> anyhow::Result<()> {
3263 : use LayerName::*;
3264 : use init::Decision::*;
3265 : use init::{Discovered, DismissedLayer};
3266 :
3267 3 : let mut guard = self
3268 3 : .layers
3269 3 : .write(LayerManagerLockHolder::LoadLayerMap)
3270 3 : .await;
3271 :
3272 3 : let timer = self.metrics.load_layer_map_histo.start_timer();
3273 3 :
3274 3 : // Scan timeline directory and create ImageLayerName and DeltaFilename
3275 3 : // structs representing all files on disk
3276 3 : let timeline_path = self
3277 3 : .conf
3278 3 : .timeline_path(&self.tenant_shard_id, &self.timeline_id);
3279 3 : let conf = self.conf;
3280 3 : let span = tracing::Span::current();
3281 3 :
3282 3 : // Copy to move into the task we're about to spawn
3283 3 : let this = self.myself.upgrade().expect("&self method holds the arc");
3284 :
3285 3 : let (loaded_layers, needs_cleanup, total_physical_size) = tokio::task::spawn_blocking({
3286 3 : move || {
3287 3 : let _g = span.entered();
3288 3 : let discovered = init::scan_timeline_dir(&timeline_path)?;
3289 3 : let mut discovered_layers = Vec::with_capacity(discovered.len());
3290 3 : let mut unrecognized_files = Vec::new();
3291 3 :
3292 3 : let mut path = timeline_path;
3293 :
3294 11 : for discovered in discovered {
3295 8 : let (name, kind) = match discovered {
3296 8 : Discovered::Layer(layer_file_name, local_metadata) => {
3297 8 : discovered_layers.push((layer_file_name, local_metadata));
3298 8 : continue;
3299 : }
3300 0 : Discovered::IgnoredBackup(path) => {
3301 0 : std::fs::remove_file(path)
3302 0 : .or_else(fs_ext::ignore_not_found)
3303 0 : .fatal_err("Removing .old file");
3304 0 : continue;
3305 : }
3306 0 : Discovered::Unknown(file_name) => {
3307 0 : // we will later error if there are any
3308 0 : unrecognized_files.push(file_name);
3309 0 : continue;
3310 : }
3311 0 : Discovered::Ephemeral(name) => (name, "old ephemeral file"),
3312 0 : Discovered::Temporary(name) => (name, "temporary timeline file"),
3313 0 : Discovered::TemporaryDownload(name) => (name, "temporary download"),
3314 : };
3315 0 : path.push(Utf8Path::new(&name));
3316 0 : init::cleanup(&path, kind)?;
3317 0 : path.pop();
3318 : }
3319 :
3320 3 : if !unrecognized_files.is_empty() {
3321 : // assume that if there are any there are many many.
3322 0 : let n = unrecognized_files.len();
3323 0 : let first = &unrecognized_files[..n.min(10)];
3324 0 : anyhow::bail!(
3325 0 : "unrecognized files in timeline dir (total {n}), first 10: {first:?}"
3326 0 : );
3327 3 : }
3328 3 :
3329 3 : let decided = init::reconcile(discovered_layers, &index_part, disk_consistent_lsn);
3330 3 :
3331 3 : let mut loaded_layers = Vec::new();
3332 3 : let mut needs_cleanup = Vec::new();
3333 3 : let mut total_physical_size = 0;
3334 :
3335 11 : for (name, decision) in decided {
3336 8 : let decision = match decision {
3337 8 : Ok(decision) => decision,
3338 0 : Err(DismissedLayer::Future { local }) => {
3339 0 : if let Some(local) = local {
3340 0 : init::cleanup_future_layer(
3341 0 : &local.local_path,
3342 0 : &name,
3343 0 : disk_consistent_lsn,
3344 0 : )?;
3345 0 : }
3346 0 : needs_cleanup.push(name);
3347 0 : continue;
3348 : }
3349 0 : Err(DismissedLayer::LocalOnly(local)) => {
3350 0 : init::cleanup_local_only_file(&name, &local)?;
3351 : // this file never existed remotely, we will have to do rework
3352 0 : continue;
3353 : }
3354 0 : Err(DismissedLayer::BadMetadata(local)) => {
3355 0 : init::cleanup_local_file_for_remote(&local)?;
3356 : // this file never existed remotely, we will have to do rework
3357 0 : continue;
3358 : }
3359 : };
3360 :
3361 8 : match &name {
3362 6 : Delta(d) => assert!(d.lsn_range.end <= disk_consistent_lsn + 1),
3363 2 : Image(i) => assert!(i.lsn <= disk_consistent_lsn),
3364 : }
3365 :
3366 8 : tracing::debug!(layer=%name, ?decision, "applied");
3367 :
3368 8 : let layer = match decision {
3369 8 : Resident { local, remote } => {
3370 8 : total_physical_size += local.file_size;
3371 8 : Layer::for_resident(conf, &this, local.local_path, name, remote)
3372 8 : .drop_eviction_guard()
3373 : }
3374 0 : Evicted(remote) => Layer::for_evicted(conf, &this, name, remote),
3375 : };
3376 :
3377 8 : loaded_layers.push(layer);
3378 : }
3379 3 : Ok((loaded_layers, needs_cleanup, total_physical_size))
3380 3 : }
3381 3 : })
3382 3 : .await
3383 3 : .map_err(anyhow::Error::new)
3384 3 : .and_then(|x| x)?;
3385 :
3386 3 : let num_layers = loaded_layers.len();
3387 3 :
3388 3 : guard
3389 3 : .open_mut()
3390 3 : .expect("layermanager must be open during init")
3391 3 : .initialize_local_layers(loaded_layers, disk_consistent_lsn + 1);
3392 3 :
3393 3 : self.remote_client
3394 3 : .schedule_layer_file_deletion(&needs_cleanup)?;
3395 3 : self.remote_client
3396 3 : .schedule_index_upload_for_file_changes()?;
3397 : // This barrier orders above DELETEs before any later operations.
3398 : // This is critical because code executing after the barrier might
3399 : // create again objects with the same key that we just scheduled for deletion.
3400 : // For example, if we just scheduled deletion of an image layer "from the future",
3401 : // later compaction might run again and re-create the same image layer.
3402 : // "from the future" here means an image layer whose LSN is > IndexPart::disk_consistent_lsn.
3403 : // "same" here means same key range and LSN.
3404 : //
3405 : // Without a barrier between above DELETEs and the re-creation's PUTs,
3406 : // the upload queue may execute the PUT first, then the DELETE.
3407 : // In our example, we will end up with an IndexPart referencing a non-existent object.
3408 : //
3409 : // 1. a future image layer is created and uploaded
3410 : // 2. ps restart
3411 : // 3. the future layer from (1) is deleted during load layer map
3412 : // 4. image layer is re-created and uploaded
3413 : // 5. deletion queue would like to delete (1) but actually deletes (4)
3414 : // 6. delete by name works as expected, but it now deletes the wrong (later) version
3415 : //
3416 : // See https://github.com/neondatabase/neon/issues/5878
3417 : //
3418 : // NB: generation numbers naturally protect against this because they disambiguate
3419 : // (1) and (4)
3420 : // TODO: this is basically a no-op now, should we remove it?
3421 3 : self.remote_client.schedule_barrier()?;
3422 : // TenantShard::create_timeline will wait for these uploads to happen before returning, or
3423 : // on retry.
3424 :
3425 : // Now that we have the full layer map, we may calculate the visibility of layers within it (a global scan)
3426 3 : drop(guard); // drop write lock, update_layer_visibility will take a read lock.
3427 3 : self.update_layer_visibility().await?;
3428 :
3429 3 : info!(
3430 0 : "loaded layer map with {} layers at {}, total physical size: {}",
3431 : num_layers, disk_consistent_lsn, total_physical_size
3432 : );
3433 :
3434 3 : timer.stop_and_record();
3435 3 : Ok(())
3436 3 : }
3437 :
3438 : /// Retrieve current logical size of the timeline.
3439 : ///
3440 : /// The size could be lagging behind the actual number, in case
3441 : /// the initial size calculation has not been run (gets triggered on the first size access).
3442 : ///
3443 : /// return size and boolean flag that shows if the size is exact
3444 0 : pub(crate) fn get_current_logical_size(
3445 0 : self: &Arc<Self>,
3446 0 : priority: GetLogicalSizePriority,
3447 0 : ctx: &RequestContext,
3448 0 : ) -> logical_size::CurrentLogicalSize {
3449 0 : if !self.tenant_shard_id.is_shard_zero() {
3450 : // Logical size is only accurately maintained on shard zero: when called elsewhere, for example
3451 : // when HTTP API is serving a GET for timeline zero, return zero
3452 0 : return logical_size::CurrentLogicalSize::Approximate(logical_size::Approximate::zero());
3453 0 : }
3454 0 :
3455 0 : let current_size = self.current_logical_size.current_size();
3456 0 : debug!("Current size: {current_size:?}");
3457 :
3458 0 : match (current_size.accuracy(), priority) {
3459 0 : (logical_size::Accuracy::Exact, _) => (), // nothing to do
3460 0 : (logical_size::Accuracy::Approximate, GetLogicalSizePriority::Background) => {
3461 0 : // background task will eventually deliver an exact value, we're in no rush
3462 0 : }
3463 : (logical_size::Accuracy::Approximate, GetLogicalSizePriority::User) => {
3464 : // background task is not ready, but user is asking for it now;
3465 : // => make the background task skip the line
3466 : // (The alternative would be to calculate the size here, but,
3467 : // it can actually take a long time if the user has a lot of rels.
3468 : // And we'll inevitable need it again; So, let the background task do the work.)
3469 0 : match self
3470 0 : .current_logical_size
3471 0 : .cancel_wait_for_background_loop_concurrency_limit_semaphore
3472 0 : .get()
3473 : {
3474 0 : Some(cancel) => cancel.cancel(),
3475 : None => {
3476 0 : match self.current_state() {
3477 0 : TimelineState::Broken { .. } | TimelineState::Stopping => {
3478 0 : // Can happen when timeline detail endpoint is used when deletion is ongoing (or its broken).
3479 0 : // Don't make noise.
3480 0 : }
3481 : TimelineState::Loading => {
3482 : // Import does not return an activated timeline.
3483 0 : info!(
3484 0 : "discarding priority boost for logical size calculation because timeline is not yet active"
3485 : );
3486 : }
3487 : TimelineState::Active => {
3488 : // activation should be setting the once cell
3489 0 : warn!(
3490 0 : "unexpected: cancel_wait_for_background_loop_concurrency_limit_semaphore not set, priority-boosting of logical size calculation will not work"
3491 : );
3492 0 : debug_assert!(false);
3493 : }
3494 : }
3495 : }
3496 : }
3497 : }
3498 : }
3499 :
3500 0 : if let CurrentLogicalSize::Approximate(_) = ¤t_size {
3501 0 : if ctx.task_kind() == TaskKind::WalReceiverConnectionHandler {
3502 0 : let first = self
3503 0 : .current_logical_size
3504 0 : .did_return_approximate_to_walreceiver
3505 0 : .compare_exchange(
3506 0 : false,
3507 0 : true,
3508 0 : AtomicOrdering::Relaxed,
3509 0 : AtomicOrdering::Relaxed,
3510 0 : )
3511 0 : .is_ok();
3512 0 : if first {
3513 0 : crate::metrics::initial_logical_size::TIMELINES_WHERE_WALRECEIVER_GOT_APPROXIMATE_SIZE.inc();
3514 0 : }
3515 0 : }
3516 0 : }
3517 :
3518 0 : current_size
3519 0 : }
3520 :
3521 0 : fn spawn_initial_logical_size_computation_task(self: &Arc<Self>, ctx: &RequestContext) {
3522 0 : let Some(initial_part_end) = self.current_logical_size.initial_part_end else {
3523 : // nothing to do for freshly created timelines;
3524 0 : assert_eq!(
3525 0 : self.current_logical_size.current_size().accuracy(),
3526 0 : logical_size::Accuracy::Exact,
3527 0 : );
3528 0 : self.current_logical_size.initialized.add_permits(1);
3529 0 : return;
3530 : };
3531 :
3532 0 : let cancel_wait_for_background_loop_concurrency_limit_semaphore = CancellationToken::new();
3533 0 : let token = cancel_wait_for_background_loop_concurrency_limit_semaphore.clone();
3534 0 : self.current_logical_size
3535 0 : .cancel_wait_for_background_loop_concurrency_limit_semaphore.set(token)
3536 0 : .expect("initial logical size calculation task must be spawned exactly once per Timeline object");
3537 0 :
3538 0 : let self_clone = Arc::clone(self);
3539 0 : let background_ctx = ctx.detached_child(
3540 0 : TaskKind::InitialLogicalSizeCalculation,
3541 0 : DownloadBehavior::Download,
3542 0 : );
3543 0 : task_mgr::spawn(
3544 0 : task_mgr::BACKGROUND_RUNTIME.handle(),
3545 0 : task_mgr::TaskKind::InitialLogicalSizeCalculation,
3546 0 : self.tenant_shard_id,
3547 0 : Some(self.timeline_id),
3548 0 : "initial size calculation",
3549 : // NB: don't log errors here, task_mgr will do that.
3550 0 : async move {
3551 0 : self_clone
3552 0 : .initial_logical_size_calculation_task(
3553 0 : initial_part_end,
3554 0 : cancel_wait_for_background_loop_concurrency_limit_semaphore,
3555 0 : background_ctx,
3556 0 : )
3557 0 : .await;
3558 0 : Ok(())
3559 0 : }
3560 0 : .instrument(info_span!(parent: None, "initial_size_calculation", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id)),
3561 : );
3562 0 : }
3563 :
3564 : /// # Cancellation
3565 : ///
3566 : /// This method is sensitive to `Timeline::cancel`.
3567 : ///
3568 : /// It is _not_ sensitive to task_mgr::shutdown_token().
3569 : ///
3570 : /// # Cancel-Safety
3571 : ///
3572 : /// It does Timeline IO, hence this should be polled to completion because
3573 : /// we could be leaving in-flight IOs behind, which is safe, but annoying
3574 : /// to reason about.
3575 0 : async fn initial_logical_size_calculation_task(
3576 0 : self: Arc<Self>,
3577 0 : initial_part_end: Lsn,
3578 0 : skip_concurrency_limiter: CancellationToken,
3579 0 : background_ctx: RequestContext,
3580 0 : ) {
3581 0 : scopeguard::defer! {
3582 0 : // Irrespective of the outcome of this operation, we should unblock anyone waiting for it.
3583 0 : self.current_logical_size.initialized.add_permits(1);
3584 0 : }
3585 0 :
3586 0 : let try_once = |attempt: usize| {
3587 0 : let background_ctx = &background_ctx;
3588 0 : let self_ref = &self;
3589 0 : let skip_concurrency_limiter = &skip_concurrency_limiter;
3590 0 : async move {
3591 0 : let wait_for_permit = super::tasks::acquire_concurrency_permit(
3592 0 : BackgroundLoopKind::InitialLogicalSizeCalculation,
3593 0 : background_ctx,
3594 0 : );
3595 :
3596 : use crate::metrics::initial_logical_size::StartCircumstances;
3597 0 : let (_maybe_permit, circumstances) = tokio::select! {
3598 0 : permit = wait_for_permit => {
3599 0 : (Some(permit), StartCircumstances::AfterBackgroundTasksRateLimit)
3600 : }
3601 0 : _ = self_ref.cancel.cancelled() => {
3602 0 : return Err(CalculateLogicalSizeError::Cancelled);
3603 : }
3604 0 : () = skip_concurrency_limiter.cancelled() => {
3605 : // Some action that is part of a end user interaction requested logical size
3606 : // => break out of the rate limit
3607 : // TODO: ideally we'd not run on BackgroundRuntime but the requester's runtime;
3608 : // but then again what happens if they cancel; also, we should just be using
3609 : // one runtime across the entire process, so, let's leave this for now.
3610 0 : (None, StartCircumstances::SkippedConcurrencyLimiter)
3611 : }
3612 : };
3613 :
3614 0 : let metrics_guard = if attempt == 1 {
3615 0 : crate::metrics::initial_logical_size::START_CALCULATION.first(circumstances)
3616 : } else {
3617 0 : crate::metrics::initial_logical_size::START_CALCULATION.retry(circumstances)
3618 : };
3619 :
3620 0 : let io_concurrency = IoConcurrency::spawn_from_conf(
3621 0 : self_ref.conf.get_vectored_concurrent_io,
3622 0 : self_ref
3623 0 : .gate
3624 0 : .enter()
3625 0 : .map_err(|_| CalculateLogicalSizeError::Cancelled)?,
3626 : );
3627 :
3628 0 : let calculated_size = self_ref
3629 0 : .logical_size_calculation_task(
3630 0 : initial_part_end,
3631 0 : LogicalSizeCalculationCause::Initial,
3632 0 : background_ctx,
3633 0 : )
3634 0 : .await?;
3635 :
3636 0 : self_ref
3637 0 : .trigger_aux_file_size_computation(
3638 0 : initial_part_end,
3639 0 : background_ctx,
3640 0 : io_concurrency,
3641 0 : )
3642 0 : .await?;
3643 :
3644 : // TODO: add aux file size to logical size
3645 :
3646 0 : Ok((calculated_size, metrics_guard))
3647 0 : }
3648 0 : };
3649 :
3650 0 : let retrying = async {
3651 0 : let mut attempt = 0;
3652 : loop {
3653 0 : attempt += 1;
3654 0 :
3655 0 : match try_once(attempt).await {
3656 0 : Ok(res) => return ControlFlow::Continue(res),
3657 0 : Err(CalculateLogicalSizeError::Cancelled) => return ControlFlow::Break(()),
3658 : Err(
3659 0 : e @ (CalculateLogicalSizeError::Decode(_)
3660 0 : | CalculateLogicalSizeError::PageRead(_)),
3661 0 : ) => {
3662 0 : warn!(attempt, "initial size calculation failed: {e:?}");
3663 : // exponential back-off doesn't make sense at these long intervals;
3664 : // use fixed retry interval with generous jitter instead
3665 0 : let sleep_duration = Duration::from_secs(
3666 0 : u64::try_from(
3667 0 : // 1hour base
3668 0 : (60_i64 * 60_i64)
3669 0 : // 10min jitter
3670 0 : + rand::thread_rng().gen_range(-10 * 60..10 * 60),
3671 0 : )
3672 0 : .expect("10min < 1hour"),
3673 0 : );
3674 0 : tokio::select! {
3675 0 : _ = tokio::time::sleep(sleep_duration) => {}
3676 0 : _ = self.cancel.cancelled() => return ControlFlow::Break(()),
3677 : }
3678 : }
3679 : }
3680 : }
3681 0 : };
3682 :
3683 0 : let (calculated_size, metrics_guard) = match retrying.await {
3684 0 : ControlFlow::Continue(calculated_size) => calculated_size,
3685 0 : ControlFlow::Break(()) => return,
3686 : };
3687 :
3688 : // we cannot query current_logical_size.current_size() to know the current
3689 : // *negative* value, only truncated to u64.
3690 0 : let added = self
3691 0 : .current_logical_size
3692 0 : .size_added_after_initial
3693 0 : .load(AtomicOrdering::Relaxed);
3694 0 :
3695 0 : let sum = calculated_size.saturating_add_signed(added);
3696 0 :
3697 0 : // set the gauge value before it can be set in `update_current_logical_size`.
3698 0 : self.metrics.current_logical_size_gauge.set(sum);
3699 0 :
3700 0 : self.current_logical_size
3701 0 : .initial_logical_size
3702 0 : .set((calculated_size, metrics_guard.calculation_result_saved()))
3703 0 : .ok()
3704 0 : .expect("only this task sets it");
3705 0 : }
3706 :
3707 7 : pub(crate) fn spawn_ondemand_logical_size_calculation(
3708 7 : self: &Arc<Self>,
3709 7 : lsn: Lsn,
3710 7 : cause: LogicalSizeCalculationCause,
3711 7 : ctx: RequestContext,
3712 7 : ) -> oneshot::Receiver<Result<u64, CalculateLogicalSizeError>> {
3713 7 : let (sender, receiver) = oneshot::channel();
3714 7 : let self_clone = Arc::clone(self);
3715 7 : // XXX if our caller loses interest, i.e., ctx is cancelled,
3716 7 : // we should stop the size calculation work and return an error.
3717 7 : // That would require restructuring this function's API to
3718 7 : // return the result directly, instead of a Receiver for the result.
3719 7 : let ctx = ctx.detached_child(
3720 7 : TaskKind::OndemandLogicalSizeCalculation,
3721 7 : DownloadBehavior::Download,
3722 7 : );
3723 7 : task_mgr::spawn(
3724 7 : task_mgr::BACKGROUND_RUNTIME.handle(),
3725 7 : task_mgr::TaskKind::OndemandLogicalSizeCalculation,
3726 7 : self.tenant_shard_id,
3727 7 : Some(self.timeline_id),
3728 7 : "ondemand logical size calculation",
3729 7 : async move {
3730 7 : let res = self_clone
3731 7 : .logical_size_calculation_task(lsn, cause, &ctx)
3732 7 : .await;
3733 7 : let _ = sender.send(res).ok();
3734 7 : Ok(()) // Receiver is responsible for handling errors
3735 7 : }
3736 7 : .in_current_span(),
3737 7 : );
3738 7 : receiver
3739 7 : }
3740 :
3741 : #[instrument(skip_all)]
3742 : async fn logical_size_calculation_task(
3743 : self: &Arc<Self>,
3744 : lsn: Lsn,
3745 : cause: LogicalSizeCalculationCause,
3746 : ctx: &RequestContext,
3747 : ) -> Result<u64, CalculateLogicalSizeError> {
3748 : crate::span::debug_assert_current_span_has_tenant_and_timeline_id();
3749 : // We should never be calculating logical sizes on shard !=0, because these shards do not have
3750 : // accurate relation sizes, and they do not emit consumption metrics.
3751 : debug_assert!(self.tenant_shard_id.is_shard_zero());
3752 :
3753 : let guard = self
3754 : .gate
3755 : .enter()
3756 0 : .map_err(|_| CalculateLogicalSizeError::Cancelled)?;
3757 :
3758 : self.calculate_logical_size(lsn, cause, &guard, ctx).await
3759 : }
3760 :
3761 : /// Calculate the logical size of the database at the latest LSN.
3762 : ///
3763 : /// NOTE: counted incrementally, includes ancestors. This can be a slow operation,
3764 : /// especially if we need to download remote layers.
3765 7 : async fn calculate_logical_size(
3766 7 : &self,
3767 7 : up_to_lsn: Lsn,
3768 7 : cause: LogicalSizeCalculationCause,
3769 7 : _guard: &GateGuard,
3770 7 : ctx: &RequestContext,
3771 7 : ) -> Result<u64, CalculateLogicalSizeError> {
3772 7 : info!(
3773 0 : "Calculating logical size for timeline {} at {}",
3774 : self.timeline_id, up_to_lsn
3775 : );
3776 :
3777 7 : if let Err(()) = pausable_failpoint!("timeline-calculate-logical-size-pause", &self.cancel)
3778 : {
3779 0 : return Err(CalculateLogicalSizeError::Cancelled);
3780 7 : }
3781 :
3782 : // See if we've already done the work for initial size calculation.
3783 : // This is a short-cut for timelines that are mostly unused.
3784 7 : if let Some(size) = self.current_logical_size.initialized_size(up_to_lsn) {
3785 0 : return Ok(size);
3786 7 : }
3787 7 : let storage_time_metrics = match cause {
3788 : LogicalSizeCalculationCause::Initial
3789 : | LogicalSizeCalculationCause::ConsumptionMetricsSyntheticSize
3790 0 : | LogicalSizeCalculationCause::TenantSizeHandler => &self.metrics.logical_size_histo,
3791 : LogicalSizeCalculationCause::EvictionTaskImitation => {
3792 7 : &self.metrics.imitate_logical_size_histo
3793 : }
3794 : };
3795 7 : let timer = storage_time_metrics.start_timer();
3796 7 : let logical_size = self
3797 7 : .get_current_logical_size_non_incremental(up_to_lsn, ctx)
3798 7 : .await?;
3799 7 : debug!("calculated logical size: {logical_size}");
3800 7 : timer.stop_and_record();
3801 7 : Ok(logical_size)
3802 7 : }
3803 :
3804 : /// Update current logical size, adding `delta' to the old value.
3805 135285 : fn update_current_logical_size(&self, delta: i64) {
3806 135285 : let logical_size = &self.current_logical_size;
3807 135285 : logical_size.increment_size(delta);
3808 135285 :
3809 135285 : // Also set the value in the prometheus gauge. Note that
3810 135285 : // there is a race condition here: if this is is called by two
3811 135285 : // threads concurrently, the prometheus gauge might be set to
3812 135285 : // one value while current_logical_size is set to the
3813 135285 : // other.
3814 135285 : match logical_size.current_size() {
3815 135285 : CurrentLogicalSize::Exact(ref new_current_size) => self
3816 135285 : .metrics
3817 135285 : .current_logical_size_gauge
3818 135285 : .set(new_current_size.into()),
3819 0 : CurrentLogicalSize::Approximate(_) => {
3820 0 : // don't update the gauge yet, this allows us not to update the gauge back and
3821 0 : // forth between the initial size calculation task.
3822 0 : }
3823 : }
3824 135285 : }
3825 :
3826 1522 : pub(crate) fn update_directory_entries_count(&self, kind: DirectoryKind, count: MetricsUpdate) {
3827 1522 : // TODO: this directory metrics is not correct -- we could have multiple reldirs in the system
3828 1522 : // for each of the database, but we only store one value, and therefore each pgdirmodification
3829 1522 : // would overwrite the previous value if they modify different databases.
3830 1522 :
3831 1522 : match count {
3832 561 : MetricsUpdate::Set(count) => {
3833 561 : self.directory_metrics[kind.offset()].store(count, AtomicOrdering::Relaxed);
3834 561 : self.directory_metrics_inited[kind.offset()].store(true, AtomicOrdering::Relaxed);
3835 561 : }
3836 960 : MetricsUpdate::Add(count) => {
3837 960 : // TODO: these operations are not atomic; but we only have one writer to the metrics, so
3838 960 : // it's fine.
3839 960 : if self.directory_metrics_inited[kind.offset()].load(AtomicOrdering::Relaxed) {
3840 960 : // The metrics has been initialized with `MetricsUpdate::Set` before, so we can add/sub
3841 960 : // the value reliably.
3842 960 : self.directory_metrics[kind.offset()].fetch_add(count, AtomicOrdering::Relaxed);
3843 960 : }
3844 : // Otherwise, ignore this update
3845 : }
3846 1 : MetricsUpdate::Sub(count) => {
3847 1 : // TODO: these operations are not atomic; but we only have one writer to the metrics, so
3848 1 : // it's fine.
3849 1 : if self.directory_metrics_inited[kind.offset()].load(AtomicOrdering::Relaxed) {
3850 1 : // The metrics has been initialized with `MetricsUpdate::Set` before.
3851 1 : // The operation could overflow so we need to normalize the value.
3852 1 : let prev_val =
3853 1 : self.directory_metrics[kind.offset()].load(AtomicOrdering::Relaxed);
3854 1 : let res = prev_val.saturating_sub(count);
3855 1 : self.directory_metrics[kind.offset()].store(res, AtomicOrdering::Relaxed);
3856 1 : }
3857 : // Otherwise, ignore this update
3858 : }
3859 : };
3860 :
3861 : // TODO: remove this, there's no place in the code that updates this aux metrics.
3862 1522 : let aux_metric =
3863 1522 : self.directory_metrics[DirectoryKind::AuxFiles.offset()].load(AtomicOrdering::Relaxed);
3864 1522 :
3865 1522 : let sum_of_entries = self
3866 1522 : .directory_metrics
3867 1522 : .iter()
3868 12176 : .map(|v| v.load(AtomicOrdering::Relaxed))
3869 1522 : .sum();
3870 : // Set a high general threshold and a lower threshold for the auxiliary files,
3871 : // as we can have large numbers of relations in the db directory.
3872 : const SUM_THRESHOLD: u64 = 5000;
3873 : const AUX_THRESHOLD: u64 = 1000;
3874 1522 : if sum_of_entries >= SUM_THRESHOLD || aux_metric >= AUX_THRESHOLD {
3875 0 : self.metrics
3876 0 : .directory_entries_count_gauge
3877 0 : .set(sum_of_entries);
3878 1522 : } else if let Some(metric) = Lazy::get(&self.metrics.directory_entries_count_gauge) {
3879 0 : metric.set(sum_of_entries);
3880 1522 : }
3881 1522 : }
3882 :
3883 0 : async fn find_layer(
3884 0 : &self,
3885 0 : layer_name: &LayerName,
3886 0 : ) -> Result<Option<Layer>, layer_manager::Shutdown> {
3887 0 : let guard = self
3888 0 : .layers
3889 0 : .read(LayerManagerLockHolder::GetLayerMapInfo)
3890 0 : .await;
3891 0 : let layer = guard
3892 0 : .layer_map()?
3893 0 : .iter_historic_layers()
3894 0 : .find(|l| &l.layer_name() == layer_name)
3895 0 : .map(|found| guard.get_from_desc(&found));
3896 0 : Ok(layer)
3897 0 : }
3898 :
3899 0 : pub(super) fn should_keep_previous_heatmap(&self, new_heatmap_end_lsn: Lsn) -> bool {
3900 0 : let crnt = self.previous_heatmap.load();
3901 0 : match crnt.as_deref() {
3902 0 : Some(PreviousHeatmap::Active { end_lsn, .. }) => match end_lsn {
3903 0 : Some(crnt_end_lsn) => *crnt_end_lsn > new_heatmap_end_lsn,
3904 0 : None => true,
3905 : },
3906 0 : Some(PreviousHeatmap::Obsolete) => false,
3907 0 : None => false,
3908 : }
3909 0 : }
3910 :
3911 : /// The timeline heatmap is a hint to secondary locations from the primary location,
3912 : /// indicating which layers are currently on-disk on the primary.
3913 : ///
3914 : /// None is returned if the Timeline is in a state where uploading a heatmap
3915 : /// doesn't make sense, such as shutting down or initializing. The caller
3916 : /// should treat this as a cue to simply skip doing any heatmap uploading
3917 : /// for this timeline.
3918 8 : pub(crate) async fn generate_heatmap(&self) -> Option<HeatMapTimeline> {
3919 8 : if !self.is_active() {
3920 0 : return None;
3921 8 : }
3922 :
3923 8 : let guard = self
3924 8 : .layers
3925 8 : .read(LayerManagerLockHolder::GenerateHeatmap)
3926 8 : .await;
3927 :
3928 : // Firstly, if there's any heatmap left over from when this location
3929 : // was a secondary, take that into account. Keep layers that are:
3930 : // * present in the layer map
3931 : // * visible
3932 : // * non-resident
3933 : // * not evicted since we read the heatmap
3934 : //
3935 : // Without this, a new cold, attached location would clobber the previous
3936 : // heatamp.
3937 8 : let previous_heatmap = self.previous_heatmap.load();
3938 8 : let visible_non_resident = match previous_heatmap.as_deref() {
3939 : Some(PreviousHeatmap::Active {
3940 6 : heatmap, read_at, ..
3941 23 : }) => Some(heatmap.all_layers().filter_map(|hl| {
3942 23 : let desc: PersistentLayerDesc = hl.name.clone().into();
3943 23 : let layer = guard.try_get_from_key(&desc.key())?;
3944 :
3945 23 : if layer.visibility() == LayerVisibilityHint::Covered {
3946 0 : return None;
3947 23 : }
3948 23 :
3949 23 : if layer.is_likely_resident() {
3950 10 : return None;
3951 13 : }
3952 13 :
3953 13 : if layer.last_evicted_at().happened_after(*read_at) {
3954 3 : return None;
3955 10 : }
3956 10 :
3957 10 : Some((desc, hl.metadata.clone(), hl.access_time, hl.cold))
3958 23 : })),
3959 0 : Some(PreviousHeatmap::Obsolete) => None,
3960 2 : None => None,
3961 : };
3962 :
3963 : // Secondly, all currently visible, resident layers are included.
3964 18 : let resident = guard.likely_resident_layers().filter_map(|layer| {
3965 18 : match layer.visibility() {
3966 : LayerVisibilityHint::Visible => {
3967 : // Layer is visible to one or more read LSNs: elegible for inclusion in layer map
3968 17 : let last_activity_ts = layer.latest_activity();
3969 17 : Some((
3970 17 : layer.layer_desc().clone(),
3971 17 : layer.metadata(),
3972 17 : last_activity_ts,
3973 17 : false, // these layers are not cold
3974 17 : ))
3975 : }
3976 : LayerVisibilityHint::Covered => {
3977 : // Layer is resident but unlikely to be read: not elegible for inclusion in heatmap.
3978 1 : None
3979 : }
3980 : }
3981 18 : });
3982 :
3983 8 : let mut layers = match visible_non_resident {
3984 6 : Some(non_resident) => {
3985 6 : let mut non_resident = non_resident.peekable();
3986 6 : if non_resident.peek().is_none() {
3987 2 : tracing::info!(timeline_id=%self.timeline_id, "Previous heatmap now obsolete");
3988 2 : self.previous_heatmap
3989 2 : .store(Some(PreviousHeatmap::Obsolete.into()));
3990 4 : }
3991 :
3992 6 : non_resident.chain(resident).collect::<Vec<_>>()
3993 : }
3994 2 : None => resident.collect::<Vec<_>>(),
3995 : };
3996 :
3997 : // Sort layers in order of which to download first. For a large set of layers to download, we
3998 : // want to prioritize those layers which are most likely to still be in the resident many minutes
3999 : // or hours later:
4000 : // - Cold layers go last for convenience when a human inspects the heatmap.
4001 : // - Download L0s last, because they churn the fastest: L0s on a fast-writing tenant might
4002 : // only exist for a few minutes before being compacted into L1s.
4003 : // - For L1 & image layers, download most recent LSNs first: the older the LSN, the sooner
4004 : // the layer is likely to be covered by an image layer during compaction.
4005 56 : layers.sort_by_key(|(desc, _meta, _atime, cold)| {
4006 56 : std::cmp::Reverse((
4007 56 : *cold,
4008 56 : !LayerMap::is_l0(&desc.key_range, desc.is_delta),
4009 56 : desc.lsn_range.end,
4010 56 : ))
4011 56 : });
4012 8 :
4013 8 : let layers = layers
4014 8 : .into_iter()
4015 27 : .map(|(desc, meta, atime, cold)| {
4016 27 : HeatMapLayer::new(desc.layer_name(), meta, atime, cold)
4017 27 : })
4018 8 : .collect();
4019 8 :
4020 8 : Some(HeatMapTimeline::new(self.timeline_id, layers))
4021 8 : }
4022 :
4023 0 : pub(super) async fn generate_unarchival_heatmap(&self, end_lsn: Lsn) -> PreviousHeatmap {
4024 0 : let guard = self
4025 0 : .layers
4026 0 : .read(LayerManagerLockHolder::GenerateHeatmap)
4027 0 : .await;
4028 :
4029 0 : let now = SystemTime::now();
4030 0 : let mut heatmap_layers = Vec::default();
4031 0 : for vl in guard.visible_layers() {
4032 0 : if vl.layer_desc().get_lsn_range().start >= end_lsn {
4033 0 : continue;
4034 0 : }
4035 0 :
4036 0 : let hl = HeatMapLayer {
4037 0 : name: vl.layer_desc().layer_name(),
4038 0 : metadata: vl.metadata(),
4039 0 : access_time: now,
4040 0 : cold: true,
4041 0 : };
4042 0 : heatmap_layers.push(hl);
4043 : }
4044 :
4045 0 : tracing::info!(
4046 0 : "Generating unarchival heatmap with {} layers",
4047 0 : heatmap_layers.len()
4048 : );
4049 :
4050 0 : let heatmap = HeatMapTimeline::new(self.timeline_id, heatmap_layers);
4051 0 : PreviousHeatmap::Active {
4052 0 : heatmap,
4053 0 : read_at: Instant::now(),
4054 0 : end_lsn: Some(end_lsn),
4055 0 : }
4056 0 : }
4057 :
4058 : /// Returns true if the given lsn is or was an ancestor branchpoint.
4059 0 : pub(crate) fn is_ancestor_lsn(&self, lsn: Lsn) -> bool {
4060 0 : // upon timeline detach, we set the ancestor_lsn to Lsn::INVALID and the store the original
4061 0 : // branchpoint in the value in IndexPart::lineage
4062 0 : self.ancestor_lsn == lsn
4063 0 : || (self.ancestor_lsn == Lsn::INVALID
4064 0 : && self.remote_client.is_previous_ancestor_lsn(lsn))
4065 0 : }
4066 : }
4067 :
4068 : #[derive(Clone)]
4069 : /// Type representing a query in the ([`Lsn`], [`Key`]) space.
4070 : /// In other words, a set of segments in a 2D space.
4071 : ///
4072 : /// This representation has the advatange of avoiding hash map
4073 : /// allocations for uniform queries.
4074 : pub(crate) enum VersionedKeySpaceQuery {
4075 : /// Variant for queries at a single [`Lsn`]
4076 : Uniform { keyspace: KeySpace, lsn: Lsn },
4077 : /// Variant for queries at multiple [`Lsn`]s
4078 : Scattered {
4079 : keyspaces_at_lsn: Vec<(Lsn, KeySpace)>,
4080 : },
4081 : }
4082 :
4083 : impl VersionedKeySpaceQuery {
4084 302151 : pub(crate) fn uniform(keyspace: KeySpace, lsn: Lsn) -> Self {
4085 302151 : Self::Uniform { keyspace, lsn }
4086 302151 : }
4087 :
4088 10192 : pub(crate) fn scattered(keyspaces_at_lsn: Vec<(Lsn, KeySpace)>) -> Self {
4089 10192 : Self::Scattered { keyspaces_at_lsn }
4090 10192 : }
4091 :
4092 : /// Returns the most recent (largest) LSN included in the query.
4093 : /// If any of the LSNs included in the query are invalid, returns
4094 : /// an error instead.
4095 624686 : fn high_watermark_lsn(&self) -> Result<Lsn, GetVectoredError> {
4096 624686 : match self {
4097 604302 : Self::Uniform { lsn, .. } => {
4098 604302 : if !lsn.is_valid() {
4099 0 : return Err(GetVectoredError::InvalidLsn(*lsn));
4100 604302 : }
4101 604302 :
4102 604302 : Ok(*lsn)
4103 : }
4104 20384 : Self::Scattered { keyspaces_at_lsn } => {
4105 20384 : let mut max_lsn = None;
4106 42218 : for (lsn, _keyspace) in keyspaces_at_lsn.iter() {
4107 42218 : if !lsn.is_valid() {
4108 0 : return Err(GetVectoredError::InvalidLsn(*lsn));
4109 42218 : }
4110 42218 : max_lsn = std::cmp::max(max_lsn, Some(lsn));
4111 : }
4112 :
4113 20384 : if let Some(computed) = max_lsn {
4114 20384 : Ok(*computed)
4115 : } else {
4116 0 : Err(GetVectoredError::Other(anyhow!("empty input")))
4117 : }
4118 : }
4119 : }
4120 624686 : }
4121 :
4122 : /// Returns the total keyspace being queried: the result of projecting
4123 : /// everything in the key dimensions onto the key axis.
4124 323363 : fn total_keyspace(&self) -> KeySpace {
4125 323363 : match self {
4126 302979 : Self::Uniform { keyspace, .. } => keyspace.clone(),
4127 20384 : Self::Scattered { keyspaces_at_lsn } => keyspaces_at_lsn
4128 20384 : .iter()
4129 42218 : .map(|(_lsn, keyspace)| keyspace)
4130 42218 : .fold(KeySpace::default(), |mut acc, v| {
4131 42218 : acc.merge(v);
4132 42218 : acc
4133 42218 : }),
4134 : }
4135 323363 : }
4136 :
4137 : /// Returns LSN for a specific key.
4138 : ///
4139 : /// Invariant: requested key must be part of [`Self::total_keyspace`]
4140 395514 : pub(super) fn map_key_to_lsn(&self, key: &Key) -> Lsn {
4141 395514 : match self {
4142 322322 : Self::Uniform { lsn, .. } => *lsn,
4143 73192 : Self::Scattered { keyspaces_at_lsn } => {
4144 73192 : keyspaces_at_lsn
4145 73192 : .iter()
4146 420104 : .find(|(_lsn, keyspace)| keyspace.contains(key))
4147 73192 : .expect("Returned key was requested")
4148 73192 : .0
4149 : }
4150 : }
4151 395514 : }
4152 :
4153 : /// Remove any parts of the query (segments) which overlap with the provided
4154 : /// key space (also segments).
4155 963249 : fn remove_overlapping_with(&mut self, to_remove: &KeySpace) -> KeySpace {
4156 963249 : match self {
4157 942865 : Self::Uniform { keyspace, .. } => keyspace.remove_overlapping_with(to_remove),
4158 20384 : Self::Scattered { keyspaces_at_lsn } => {
4159 20384 : let mut removed_accum = KeySpaceRandomAccum::new();
4160 42218 : keyspaces_at_lsn.iter_mut().for_each(|(_lsn, keyspace)| {
4161 42218 : let removed = keyspace.remove_overlapping_with(to_remove);
4162 42218 : removed_accum.add_keyspace(removed);
4163 42218 : });
4164 20384 :
4165 20384 : removed_accum.to_keyspace()
4166 : }
4167 : }
4168 963249 : }
4169 :
4170 737540 : fn is_empty(&self) -> bool {
4171 737540 : match self {
4172 717156 : Self::Uniform { keyspace, .. } => keyspace.is_empty(),
4173 20384 : Self::Scattered { keyspaces_at_lsn } => keyspaces_at_lsn
4174 20384 : .iter()
4175 31301 : .all(|(_lsn, keyspace)| keyspace.is_empty()),
4176 : }
4177 737540 : }
4178 :
4179 : /// "Lower" the query on the LSN dimension
4180 112855 : fn lower(&mut self, to: Lsn) {
4181 112855 : match self {
4182 112855 : Self::Uniform { lsn, .. } => {
4183 112855 : // If the originally requested LSN is smaller than the starting
4184 112855 : // LSN of the ancestor we are descending into, we need to respect that.
4185 112855 : // Hence the min.
4186 112855 : *lsn = std::cmp::min(*lsn, to);
4187 112855 : }
4188 0 : Self::Scattered { keyspaces_at_lsn } => {
4189 0 : keyspaces_at_lsn.iter_mut().for_each(|(lsn, _keyspace)| {
4190 0 : *lsn = std::cmp::min(*lsn, to);
4191 0 : });
4192 0 : }
4193 : }
4194 112855 : }
4195 : }
4196 :
4197 : impl std::fmt::Display for VersionedKeySpaceQuery {
4198 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
4199 0 : write!(f, "[")?;
4200 :
4201 0 : match self {
4202 0 : VersionedKeySpaceQuery::Uniform { keyspace, lsn } => {
4203 0 : write!(f, "{keyspace} @ {lsn}")?;
4204 : }
4205 0 : VersionedKeySpaceQuery::Scattered { keyspaces_at_lsn } => {
4206 0 : for (lsn, keyspace) in keyspaces_at_lsn.iter() {
4207 0 : write!(f, "{keyspace} @ {lsn},")?;
4208 : }
4209 : }
4210 : }
4211 :
4212 0 : write!(f, "]")
4213 0 : }
4214 : }
4215 :
4216 : impl Timeline {
4217 : #[allow(clippy::doc_lazy_continuation)]
4218 : /// Get the data needed to reconstruct all keys in the provided keyspace
4219 : ///
4220 : /// The algorithm is as follows:
4221 : /// 1. While some keys are still not done and there's a timeline to visit:
4222 : /// 2. Visit the timeline (see [`Timeline::get_vectored_reconstruct_data_timeline`]:
4223 : /// 2.1: Build the fringe for the current keyspace
4224 : /// 2.2 Visit the newest layer from the fringe to collect all values for the range it
4225 : /// intersects
4226 : /// 2.3. Pop the timeline from the fringe
4227 : /// 2.4. If the fringe is empty, go back to 1
4228 312343 : async fn get_vectored_reconstruct_data(
4229 312343 : &self,
4230 312343 : mut query: VersionedKeySpaceQuery,
4231 312343 : reconstruct_state: &mut ValuesReconstructState,
4232 312343 : ctx: &RequestContext,
4233 312343 : ) -> Result<(), GetVectoredError> {
4234 312343 : let original_hwm_lsn = query.high_watermark_lsn().unwrap();
4235 312343 :
4236 312343 : let mut timeline_owned: Arc<Timeline>;
4237 312343 : let mut timeline = self;
4238 :
4239 312342 : let missing_keyspace = loop {
4240 425197 : if self.cancel.is_cancelled() {
4241 0 : return Err(GetVectoredError::Cancelled);
4242 425197 : }
4243 :
4244 : let TimelineVisitOutcome {
4245 425197 : completed_keyspace: completed,
4246 425197 : image_covered_keyspace,
4247 : } = {
4248 425197 : let ctx = RequestContextBuilder::from(ctx)
4249 425197 : .perf_span(|crnt_perf_span| {
4250 0 : info_span!(
4251 : target: PERF_TRACE_TARGET,
4252 0 : parent: crnt_perf_span,
4253 : "PLAN_IO_TIMELINE",
4254 : timeline = %timeline.timeline_id,
4255 0 : high_watermark_lsn = %query.high_watermark_lsn().unwrap(),
4256 : )
4257 425197 : })
4258 425197 : .attached_child();
4259 425197 :
4260 425197 : Self::get_vectored_reconstruct_data_timeline(
4261 425197 : timeline,
4262 425197 : &query,
4263 425197 : reconstruct_state,
4264 425197 : &self.cancel,
4265 425197 : &ctx,
4266 425197 : )
4267 425197 : .maybe_perf_instrument(&ctx, |crnt_perf_span| crnt_perf_span.clone())
4268 425197 : .await?
4269 : };
4270 :
4271 425197 : query.remove_overlapping_with(&completed);
4272 425197 :
4273 425197 : // Do not descend into the ancestor timeline for aux files.
4274 425197 : // We don't return a blanket [`GetVectoredError::MissingKey`] to avoid
4275 425197 : // stalling compaction.
4276 425197 : query.remove_overlapping_with(&KeySpace {
4277 425197 : ranges: vec![NON_INHERITED_RANGE, Key::sparse_non_inherited_keyspace()],
4278 425197 : });
4279 425197 :
4280 425197 : // Keyspace is fully retrieved
4281 425197 : if query.is_empty() {
4282 312210 : break None;
4283 112987 : }
4284 :
4285 112987 : let Some(ancestor_timeline) = timeline.ancestor_timeline.as_ref() else {
4286 : // Not fully retrieved but no ancestor timeline.
4287 132 : break Some(query.total_keyspace());
4288 : };
4289 :
4290 : // Now we see if there are keys covered by the image layer but does not exist in the
4291 : // image layer, which means that the key does not exist.
4292 :
4293 : // The block below will stop the vectored search if any of the keys encountered an image layer
4294 : // which did not contain a snapshot for said key. Since we have already removed all completed
4295 : // keys from `keyspace`, we expect there to be no overlap between it and the image covered key
4296 : // space. If that's not the case, we had at least one key encounter a gap in the image layer
4297 : // and stop the search as a result of that.
4298 112855 : let mut removed = query.remove_overlapping_with(&image_covered_keyspace);
4299 112855 : // Do not fire missing key error and end early for sparse keys. Note that we hava already removed
4300 112855 : // non-inherited keyspaces before, so we can safely do a full `SPARSE_RANGE` remove instead of
4301 112855 : // figuring out what is the inherited key range and do a fine-grained pruning.
4302 112855 : removed.remove_overlapping_with(&KeySpace {
4303 112855 : ranges: vec![SPARSE_RANGE],
4304 112855 : });
4305 112855 : if !removed.is_empty() {
4306 0 : break Some(removed);
4307 112855 : }
4308 112855 :
4309 112855 : // Each key range in the original query is at some point in the LSN space.
4310 112855 : // When descending into the ancestor, lower all ranges in the LSN space
4311 112855 : // such that new changes on the parent timeline are not visible.
4312 112855 : query.lower(timeline.ancestor_lsn);
4313 112855 :
4314 112855 : let ctx = RequestContextBuilder::from(ctx)
4315 112855 : .perf_span(|crnt_perf_span| {
4316 0 : info_span!(
4317 : target: PERF_TRACE_TARGET,
4318 0 : parent: crnt_perf_span,
4319 : "GET_ANCESTOR",
4320 : timeline = %timeline.timeline_id,
4321 0 : ancestor = %ancestor_timeline.timeline_id,
4322 : ancestor_lsn = %timeline.ancestor_lsn
4323 : )
4324 112855 : })
4325 112855 : .attached_child();
4326 :
4327 112855 : timeline_owned = timeline
4328 112855 : .get_ready_ancestor_timeline(ancestor_timeline, &ctx)
4329 112855 : .maybe_perf_instrument(&ctx, |crnt_perf_span| crnt_perf_span.clone())
4330 112855 : .await?;
4331 112854 : timeline = &*timeline_owned;
4332 : };
4333 :
4334 : // Remove sparse keys from the keyspace so that it doesn't fire errors.
4335 312342 : let missing_keyspace = if let Some(missing_keyspace) = missing_keyspace {
4336 132 : let mut missing_keyspace = missing_keyspace;
4337 132 : missing_keyspace.remove_overlapping_with(&KeySpace {
4338 132 : ranges: vec![SPARSE_RANGE],
4339 132 : });
4340 132 : if missing_keyspace.is_empty() {
4341 125 : None
4342 : } else {
4343 7 : Some(missing_keyspace)
4344 : }
4345 : } else {
4346 312210 : None
4347 : };
4348 :
4349 312342 : if let Some(missing_keyspace) = missing_keyspace {
4350 7 : return Err(GetVectoredError::MissingKey(Box::new(MissingKeyError {
4351 7 : keyspace: missing_keyspace, /* better if we can store the full keyspace */
4352 7 : shard: self.shard_identity.number,
4353 7 : original_hwm_lsn,
4354 7 : ancestor_lsn: Some(timeline.ancestor_lsn),
4355 7 : backtrace: None,
4356 7 : read_path: std::mem::take(&mut reconstruct_state.read_path),
4357 7 : query: None,
4358 7 : })));
4359 312335 : }
4360 312335 :
4361 312335 : Ok(())
4362 312343 : }
4363 :
4364 425197 : async fn get_vectored_init_fringe(
4365 425197 : &self,
4366 425197 : query: &VersionedKeySpaceQuery,
4367 425197 : ) -> Result<LayerFringe, GetVectoredError> {
4368 425197 : let mut fringe = LayerFringe::new();
4369 425197 : let guard = self.layers.read(LayerManagerLockHolder::GetPage).await;
4370 :
4371 425197 : match query {
4372 415005 : VersionedKeySpaceQuery::Uniform { keyspace, lsn } => {
4373 415005 : // LSNs requested by the compute or determined by the pageserver
4374 415005 : // are inclusive. Queries to the layer map use exclusive LSNs.
4375 415005 : // Hence, bump the value before the query - same in the other
4376 415005 : // match arm.
4377 415005 : let cont_lsn = Lsn(lsn.0 + 1);
4378 415005 : guard.update_search_fringe(keyspace, cont_lsn, &mut fringe)?;
4379 : }
4380 10192 : VersionedKeySpaceQuery::Scattered { keyspaces_at_lsn } => {
4381 21109 : for (lsn, keyspace) in keyspaces_at_lsn.iter() {
4382 21109 : let cont_lsn_for_keyspace = Lsn(lsn.0 + 1);
4383 21109 : guard.update_search_fringe(keyspace, cont_lsn_for_keyspace, &mut fringe)?;
4384 : }
4385 : }
4386 : }
4387 :
4388 425197 : Ok(fringe)
4389 425197 : }
4390 :
4391 : /// Collect the reconstruct data for a keyspace from the specified timeline.
4392 : ///
4393 : /// Maintain a fringe [`LayerFringe`] which tracks all the layers that intersect
4394 : /// the current keyspace. The current keyspace of the search at any given timeline
4395 : /// is the original keyspace minus all the keys that have been completed minus
4396 : /// any keys for which we couldn't find an intersecting layer. It's not tracked explicitly,
4397 : /// but if you merge all the keyspaces in the fringe, you get the "current keyspace".
4398 : ///
4399 : /// This is basically a depth-first search visitor implementation where a vertex
4400 : /// is the (layer, lsn range, key space) tuple. The fringe acts as the stack.
4401 : ///
4402 : /// At each iteration pop the top of the fringe (the layer with the highest Lsn)
4403 : /// and get all the required reconstruct data from the layer in one go.
4404 : ///
4405 : /// Returns the completed keyspace and the keyspaces with image coverage. The caller
4406 : /// decides how to deal with these two keyspaces.
4407 425197 : async fn get_vectored_reconstruct_data_timeline(
4408 425197 : timeline: &Timeline,
4409 425197 : query: &VersionedKeySpaceQuery,
4410 425197 : reconstruct_state: &mut ValuesReconstructState,
4411 425197 : cancel: &CancellationToken,
4412 425197 : ctx: &RequestContext,
4413 425197 : ) -> Result<TimelineVisitOutcome, GetVectoredError> {
4414 425197 : // Prevent GC from progressing while visiting the current timeline.
4415 425197 : // If we are GC-ing because a new image layer was added while traversing
4416 425197 : // the timeline, then it will remove layers that are required for fulfilling
4417 425197 : // the current get request (read-path cannot "look back" and notice the new
4418 425197 : // image layer).
4419 425197 : let _gc_cutoff_holder = timeline.get_applied_gc_cutoff_lsn();
4420 :
4421 : // See `compaction::compact_with_gc` for why we need this.
4422 425197 : let _guard = timeline.gc_compaction_layer_update_lock.read().await;
4423 :
4424 : // Initialize the fringe
4425 425197 : let mut fringe = timeline.get_vectored_init_fringe(query).await?;
4426 :
4427 425197 : let mut completed_keyspace = KeySpace::default();
4428 425197 : let mut image_covered_keyspace = KeySpaceRandomAccum::new();
4429 :
4430 870865 : while let Some((layer_to_read, keyspace_to_read, lsn_range)) = fringe.next_layer() {
4431 445668 : if cancel.is_cancelled() {
4432 0 : return Err(GetVectoredError::Cancelled);
4433 445668 : }
4434 :
4435 445668 : if let Some(ref mut read_path) = reconstruct_state.read_path {
4436 445668 : read_path.record_layer_visit(&layer_to_read, &keyspace_to_read, &lsn_range);
4437 445668 : }
4438 :
4439 : // Visit the layer and plan IOs for it
4440 445668 : let next_cont_lsn = lsn_range.start;
4441 445668 : layer_to_read
4442 445668 : .get_values_reconstruct_data(
4443 445668 : keyspace_to_read.clone(),
4444 445668 : lsn_range,
4445 445668 : reconstruct_state,
4446 445668 : ctx,
4447 445668 : )
4448 445668 : .await?;
4449 :
4450 445668 : let mut unmapped_keyspace = keyspace_to_read;
4451 445668 : let cont_lsn = next_cont_lsn;
4452 445668 :
4453 445668 : reconstruct_state.on_layer_visited(&layer_to_read);
4454 445668 :
4455 445668 : let (keys_done_last_step, keys_with_image_coverage) =
4456 445668 : reconstruct_state.consume_done_keys();
4457 445668 : unmapped_keyspace.remove_overlapping_with(&keys_done_last_step);
4458 445668 : completed_keyspace.merge(&keys_done_last_step);
4459 445668 : if let Some(keys_with_image_coverage) = keys_with_image_coverage {
4460 14124 : unmapped_keyspace
4461 14124 : .remove_overlapping_with(&KeySpace::single(keys_with_image_coverage.clone()));
4462 14124 : image_covered_keyspace.add_range(keys_with_image_coverage);
4463 431544 : }
4464 :
4465 : // Query the layer map for the next layers to read.
4466 : //
4467 : // Do not descent any further if the last layer we visited
4468 : // completed all keys in the keyspace it inspected. This is not
4469 : // required for correctness, but avoids visiting extra layers
4470 : // which turns out to be a perf bottleneck in some cases.
4471 445668 : if !unmapped_keyspace.is_empty() {
4472 129002 : let guard = timeline.layers.read(LayerManagerLockHolder::GetPage).await;
4473 129002 : guard.update_search_fringe(&unmapped_keyspace, cont_lsn, &mut fringe)?;
4474 :
4475 : // It's safe to drop the layer map lock after planning the next round of reads.
4476 : // The fringe keeps readable handles for the layers which are safe to read even
4477 : // if layers were compacted or flushed.
4478 : //
4479 : // The more interesting consideration is: "Why is the read algorithm still correct
4480 : // if the layer map changes while it is operating?". Doing a vectored read on a
4481 : // timeline boils down to pushing an imaginary lsn boundary downwards for each range
4482 : // covered by the read. The layer map tells us how to move the lsn downwards for a
4483 : // range at *a particular point in time*. It is fine for the answer to be different
4484 : // at two different time points.
4485 129002 : drop(guard);
4486 316666 : }
4487 : }
4488 :
4489 425197 : Ok(TimelineVisitOutcome {
4490 425197 : completed_keyspace,
4491 425197 : image_covered_keyspace: image_covered_keyspace.consume_keyspace(),
4492 425197 : })
4493 425197 : }
4494 :
4495 112855 : async fn get_ready_ancestor_timeline(
4496 112855 : &self,
4497 112855 : ancestor: &Arc<Timeline>,
4498 112855 : ctx: &RequestContext,
4499 112855 : ) -> Result<Arc<Timeline>, GetReadyAncestorError> {
4500 112855 : // It's possible that the ancestor timeline isn't active yet, or
4501 112855 : // is active but hasn't yet caught up to the branch point. Wait
4502 112855 : // for it.
4503 112855 : //
4504 112855 : // This cannot happen while the pageserver is running normally,
4505 112855 : // because you cannot create a branch from a point that isn't
4506 112855 : // present in the pageserver yet. However, we don't wait for the
4507 112855 : // branch point to be uploaded to cloud storage before creating
4508 112855 : // a branch. I.e., the branch LSN need not be remote consistent
4509 112855 : // for the branching operation to succeed.
4510 112855 : //
4511 112855 : // Hence, if we try to load a tenant in such a state where
4512 112855 : // 1. the existence of the branch was persisted (in IndexPart and/or locally)
4513 112855 : // 2. but the ancestor state is behind branch_lsn because it was not yet persisted
4514 112855 : // then we will need to wait for the ancestor timeline to
4515 112855 : // re-stream WAL up to branch_lsn before we access it.
4516 112855 : //
4517 112855 : // How can a tenant get in such a state?
4518 112855 : // - ungraceful pageserver process exit
4519 112855 : // - detach+attach => this is a bug, https://github.com/neondatabase/neon/issues/4219
4520 112855 : //
4521 112855 : // NB: this could be avoided by requiring
4522 112855 : // branch_lsn >= remote_consistent_lsn
4523 112855 : // during branch creation.
4524 112855 : match ancestor.wait_to_become_active(ctx).await {
4525 112854 : Ok(()) => {}
4526 : Err(TimelineState::Stopping) => {
4527 : // If an ancestor is stopping, it means the tenant is stopping: handle this the same as if this timeline was stopping.
4528 0 : return Err(GetReadyAncestorError::Cancelled);
4529 : }
4530 1 : Err(state) => {
4531 1 : return Err(GetReadyAncestorError::BadState {
4532 1 : timeline_id: ancestor.timeline_id,
4533 1 : state,
4534 1 : });
4535 : }
4536 : }
4537 112854 : ancestor
4538 112854 : .wait_lsn(
4539 112854 : self.ancestor_lsn,
4540 112854 : WaitLsnWaiter::Timeline(self),
4541 112854 : WaitLsnTimeout::Default,
4542 112854 : ctx,
4543 112854 : )
4544 112854 : .await
4545 112854 : .map_err(|e| match e {
4546 0 : e @ WaitLsnError::Timeout(_) => GetReadyAncestorError::AncestorLsnTimeout(e),
4547 0 : WaitLsnError::Shutdown => GetReadyAncestorError::Cancelled,
4548 0 : WaitLsnError::BadState(state) => GetReadyAncestorError::BadState {
4549 0 : timeline_id: ancestor.timeline_id,
4550 0 : state,
4551 0 : },
4552 112854 : })?;
4553 :
4554 112854 : Ok(ancestor.clone())
4555 112855 : }
4556 :
4557 148592 : pub(crate) fn get_shard_identity(&self) -> &ShardIdentity {
4558 148592 : &self.shard_identity
4559 148592 : }
4560 :
4561 : #[inline(always)]
4562 0 : pub(crate) fn shard_timeline_id(&self) -> ShardTimelineId {
4563 0 : ShardTimelineId {
4564 0 : shard_index: ShardIndex {
4565 0 : shard_number: self.shard_identity.number,
4566 0 : shard_count: self.shard_identity.count,
4567 0 : },
4568 0 : timeline_id: self.timeline_id,
4569 0 : }
4570 0 : }
4571 :
4572 : /// Returns a non-frozen open in-memory layer for ingestion.
4573 : ///
4574 : /// Takes a witness of timeline writer state lock being held, because it makes no sense to call
4575 : /// this function without holding the mutex.
4576 659 : async fn get_layer_for_write(
4577 659 : &self,
4578 659 : lsn: Lsn,
4579 659 : _guard: &tokio::sync::MutexGuard<'_, Option<TimelineWriterState>>,
4580 659 : ctx: &RequestContext,
4581 659 : ) -> anyhow::Result<Arc<InMemoryLayer>> {
4582 659 : let mut guard = self
4583 659 : .layers
4584 659 : .write(LayerManagerLockHolder::GetLayerForWrite)
4585 659 : .await;
4586 :
4587 659 : let last_record_lsn = self.get_last_record_lsn();
4588 659 : ensure!(
4589 659 : lsn > last_record_lsn,
4590 0 : "cannot modify relation after advancing last_record_lsn (incoming_lsn={}, last_record_lsn={})",
4591 : lsn,
4592 : last_record_lsn,
4593 : );
4594 :
4595 659 : let layer = guard
4596 659 : .open_mut()?
4597 659 : .get_layer_for_write(
4598 659 : lsn,
4599 659 : self.conf,
4600 659 : self.timeline_id,
4601 659 : self.tenant_shard_id,
4602 659 : &self.gate,
4603 659 : &self.cancel,
4604 659 : ctx,
4605 659 : )
4606 659 : .await?;
4607 659 : Ok(layer)
4608 659 : }
4609 :
4610 2639558 : pub(crate) fn finish_write(&self, new_lsn: Lsn) {
4611 2639558 : assert!(new_lsn.is_aligned());
4612 :
4613 2639558 : self.metrics.last_record_lsn_gauge.set(new_lsn.0 as i64);
4614 2639558 : self.last_record_lsn.advance(new_lsn);
4615 2639558 : }
4616 :
4617 : /// Freeze any existing open in-memory layer and unconditionally notify the flush loop.
4618 : ///
4619 : /// Unconditional flush loop notification is given because in sharded cases we will want to
4620 : /// leave an Lsn gap. Unsharded tenants do not have Lsn gaps.
4621 610 : async fn freeze_inmem_layer_at(
4622 610 : &self,
4623 610 : at: Lsn,
4624 610 : write_lock: &mut tokio::sync::MutexGuard<'_, Option<TimelineWriterState>>,
4625 610 : ) -> Result<u64, FlushLayerError> {
4626 610 : let frozen = {
4627 610 : let mut guard = self
4628 610 : .layers
4629 610 : .write(LayerManagerLockHolder::TryFreezeLayer)
4630 610 : .await;
4631 610 : guard
4632 610 : .open_mut()?
4633 610 : .try_freeze_in_memory_layer(at, &self.last_freeze_at, write_lock, &self.metrics)
4634 610 : .await
4635 : };
4636 :
4637 610 : if frozen {
4638 596 : let now = Instant::now();
4639 596 : *(self.last_freeze_ts.write().unwrap()) = now;
4640 596 : }
4641 :
4642 : // Increment the flush cycle counter and wake up the flush task.
4643 : // Remember the new value, so that when we listen for the flush
4644 : // to finish, we know when the flush that we initiated has
4645 : // finished, instead of some other flush that was started earlier.
4646 610 : let mut my_flush_request = 0;
4647 610 :
4648 610 : let flush_loop_state = { *self.flush_loop_state.lock().unwrap() };
4649 610 : if !matches!(flush_loop_state, FlushLoopState::Running { .. }) {
4650 0 : return Err(FlushLayerError::NotRunning(flush_loop_state));
4651 610 : }
4652 610 :
4653 610 : self.layer_flush_start_tx.send_modify(|(counter, lsn)| {
4654 610 : my_flush_request = *counter + 1;
4655 610 : *counter = my_flush_request;
4656 610 : *lsn = std::cmp::max(at, *lsn);
4657 610 : });
4658 610 :
4659 610 : assert_ne!(my_flush_request, 0);
4660 :
4661 610 : Ok(my_flush_request)
4662 610 : }
4663 :
4664 : /// Layer flusher task's main loop.
4665 231 : async fn flush_loop(
4666 231 : self: &Arc<Self>,
4667 231 : mut layer_flush_start_rx: tokio::sync::watch::Receiver<(u64, Lsn)>,
4668 231 : ctx: &RequestContext,
4669 231 : ) {
4670 : // Subscribe to L0 delta layer updates, for compaction backpressure.
4671 231 : let mut watch_l0 = match self
4672 231 : .layers
4673 231 : .read(LayerManagerLockHolder::FlushLoop)
4674 231 : .await
4675 231 : .layer_map()
4676 : {
4677 231 : Ok(lm) => lm.watch_level0_deltas(),
4678 0 : Err(Shutdown) => return,
4679 : };
4680 :
4681 231 : info!("started flush loop");
4682 : loop {
4683 833 : tokio::select! {
4684 833 : _ = self.cancel.cancelled() => {
4685 5 : info!("shutting down layer flush task due to Timeline::cancel");
4686 5 : break;
4687 : },
4688 833 : _ = layer_flush_start_rx.changed() => {}
4689 602 : }
4690 602 : trace!("waking up");
4691 602 : let (flush_counter, frozen_to_lsn) = *layer_flush_start_rx.borrow();
4692 602 :
4693 602 : // The highest LSN to which we flushed in the loop over frozen layers
4694 602 : let mut flushed_to_lsn = Lsn(0);
4695 :
4696 602 : let result = loop {
4697 1197 : if self.cancel.is_cancelled() {
4698 0 : info!("dropping out of flush loop for timeline shutdown");
4699 : // Note: we do not bother transmitting into [`layer_flush_done_tx`], because
4700 : // anyone waiting on that will respect self.cancel as well: they will stop
4701 : // waiting at the same time we as drop out of this loop.
4702 0 : return;
4703 1197 : }
4704 1197 :
4705 1197 : // Break to notify potential waiters as soon as we've flushed the requested LSN. If
4706 1197 : // more requests have arrived in the meanwhile, we'll resume flushing afterwards.
4707 1197 : if flushed_to_lsn >= frozen_to_lsn {
4708 587 : break Ok(());
4709 610 : }
4710 :
4711 : // Fetch the next layer to flush, if any.
4712 610 : let (layer, l0_count, frozen_count, frozen_size) = {
4713 610 : let layers = self.layers.read(LayerManagerLockHolder::FlushLoop).await;
4714 610 : let Ok(lm) = layers.layer_map() else {
4715 0 : info!("dropping out of flush loop for timeline shutdown");
4716 0 : return;
4717 : };
4718 610 : let l0_count = lm.level0_deltas().len();
4719 610 : let frozen_count = lm.frozen_layers.len();
4720 610 : let frozen_size: u64 = lm
4721 610 : .frozen_layers
4722 610 : .iter()
4723 610 : .map(|l| l.estimated_in_mem_size())
4724 610 : .sum();
4725 610 : let layer = lm.frozen_layers.front().cloned();
4726 610 : (layer, l0_count, frozen_count, frozen_size)
4727 610 : // drop 'layers' lock
4728 610 : };
4729 610 : let Some(layer) = layer else {
4730 14 : break Ok(());
4731 : };
4732 :
4733 : // Stall flushes to backpressure if compaction can't keep up. This is propagated up
4734 : // to WAL ingestion by having ephemeral layer rolls wait for flushes.
4735 596 : if let Some(stall_threshold) = self.get_l0_flush_stall_threshold() {
4736 0 : if l0_count >= stall_threshold {
4737 0 : warn!(
4738 0 : "stalling layer flushes for compaction backpressure at {l0_count} \
4739 0 : L0 layers ({frozen_count} frozen layers with {frozen_size} bytes)"
4740 : );
4741 0 : let stall_timer = self
4742 0 : .metrics
4743 0 : .flush_delay_histo
4744 0 : .start_timer()
4745 0 : .record_on_drop();
4746 0 : tokio::select! {
4747 0 : result = watch_l0.wait_for(|l0| *l0 < stall_threshold) => {
4748 0 : if let Ok(l0) = result.as_deref() {
4749 0 : let delay = stall_timer.elapsed().as_secs_f64();
4750 0 : info!("resuming layer flushes at {l0} L0 layers after {delay:.3}s");
4751 0 : }
4752 : },
4753 0 : _ = self.cancel.cancelled() => {},
4754 : }
4755 0 : continue; // check again
4756 0 : }
4757 596 : }
4758 :
4759 : // Flush the layer.
4760 596 : let flush_timer = self.metrics.flush_time_histo.start_timer();
4761 596 : match self.flush_frozen_layer(layer, ctx).await {
4762 595 : Ok(layer_lsn) => flushed_to_lsn = max(flushed_to_lsn, layer_lsn),
4763 : Err(FlushLayerError::Cancelled) => {
4764 0 : info!("dropping out of flush loop for timeline shutdown");
4765 0 : return;
4766 : }
4767 1 : err @ Err(
4768 1 : FlushLayerError::NotRunning(_)
4769 1 : | FlushLayerError::Other(_)
4770 1 : | FlushLayerError::CreateImageLayersError(_),
4771 1 : ) => {
4772 1 : error!("could not flush frozen layer: {err:?}");
4773 1 : break err.map(|_| ());
4774 : }
4775 : }
4776 595 : let flush_duration = flush_timer.stop_and_record();
4777 595 :
4778 595 : // Notify the tenant compaction loop if L0 compaction is needed.
4779 595 : let l0_count = *watch_l0.borrow();
4780 595 : if l0_count >= self.get_compaction_threshold() {
4781 238 : self.l0_compaction_trigger.notify_one();
4782 357 : }
4783 :
4784 : // Delay the next flush to backpressure if compaction can't keep up. We delay by the
4785 : // flush duration such that the flush takes 2x as long. This is propagated up to WAL
4786 : // ingestion by having ephemeral layer rolls wait for flushes.
4787 595 : if let Some(delay_threshold) = self.get_l0_flush_delay_threshold() {
4788 3 : if l0_count >= delay_threshold {
4789 0 : let delay = flush_duration.as_secs_f64();
4790 0 : info!(
4791 0 : "delaying layer flush by {delay:.3}s for compaction backpressure at \
4792 0 : {l0_count} L0 layers ({frozen_count} frozen layers with {frozen_size} bytes)"
4793 : );
4794 0 : let _delay_timer = self
4795 0 : .metrics
4796 0 : .flush_delay_histo
4797 0 : .start_timer()
4798 0 : .record_on_drop();
4799 0 : tokio::select! {
4800 0 : _ = tokio::time::sleep(flush_duration) => {},
4801 0 : _ = watch_l0.wait_for(|l0| *l0 < delay_threshold) => {},
4802 0 : _ = self.cancel.cancelled() => {},
4803 : }
4804 3 : }
4805 592 : }
4806 : };
4807 :
4808 : // Unsharded tenants should never advance their LSN beyond the end of the
4809 : // highest layer they write: such gaps between layer data and the frozen LSN
4810 : // are only legal on sharded tenants.
4811 602 : debug_assert!(
4812 602 : self.shard_identity.count.count() > 1
4813 595 : || flushed_to_lsn >= frozen_to_lsn
4814 14 : || !flushed_to_lsn.is_valid()
4815 : );
4816 :
4817 602 : if flushed_to_lsn < frozen_to_lsn
4818 15 : && self.shard_identity.count.count() > 1
4819 1 : && result.is_ok()
4820 : {
4821 : // If our layer flushes didn't carry disk_consistent_lsn up to the `to_lsn` advertised
4822 : // to us via layer_flush_start_rx, then advance it here.
4823 : //
4824 : // This path is only taken for tenants with multiple shards: single sharded tenants should
4825 : // never encounter a gap in the wal.
4826 0 : let old_disk_consistent_lsn = self.disk_consistent_lsn.load();
4827 0 : tracing::debug!(
4828 0 : "Advancing disk_consistent_lsn across layer gap {old_disk_consistent_lsn}->{frozen_to_lsn}"
4829 : );
4830 0 : if self.set_disk_consistent_lsn(frozen_to_lsn) {
4831 0 : if let Err(e) = self.schedule_uploads(frozen_to_lsn, vec![]) {
4832 0 : tracing::warn!(
4833 0 : "Failed to schedule metadata upload after updating disk_consistent_lsn: {e}"
4834 : );
4835 0 : }
4836 0 : }
4837 602 : }
4838 :
4839 : // Notify any listeners that we're done
4840 602 : let _ = self
4841 602 : .layer_flush_done_tx
4842 602 : .send_replace((flush_counter, result));
4843 : }
4844 5 : }
4845 :
4846 : /// Waits any flush request created by [`Self::freeze_inmem_layer_at`] to complete.
4847 570 : async fn wait_flush_completion(&self, request: u64) -> Result<(), FlushLayerError> {
4848 570 : let mut rx = self.layer_flush_done_tx.subscribe();
4849 : loop {
4850 : {
4851 1156 : let (last_result_counter, last_result) = &*rx.borrow();
4852 1156 : if *last_result_counter >= request {
4853 570 : if let Err(err) = last_result {
4854 : // We already logged the original error in
4855 : // flush_loop. We cannot propagate it to the caller
4856 : // here, because it might not be Cloneable
4857 1 : return Err(err.clone());
4858 : } else {
4859 569 : return Ok(());
4860 : }
4861 586 : }
4862 586 : }
4863 586 : trace!("waiting for flush to complete");
4864 586 : tokio::select! {
4865 586 : rx_e = rx.changed() => {
4866 586 : rx_e.map_err(|_| FlushLayerError::NotRunning(*self.flush_loop_state.lock().unwrap()))?;
4867 : },
4868 : // Cancellation safety: we are not leaving an I/O in-flight for the flush, we're just ignoring
4869 : // the notification from [`flush_loop`] that it completed.
4870 586 : _ = self.cancel.cancelled() => {
4871 0 : tracing::info!("Cancelled layer flush due on timeline shutdown");
4872 0 : return Ok(())
4873 : }
4874 : };
4875 586 : trace!("done")
4876 : }
4877 570 : }
4878 :
4879 : /// Flush one frozen in-memory layer to disk, as a new delta layer.
4880 : ///
4881 : /// Return value is the last lsn (inclusive) of the layer that was frozen.
4882 : #[instrument(skip_all, fields(layer=%frozen_layer))]
4883 : async fn flush_frozen_layer(
4884 : self: &Arc<Self>,
4885 : frozen_layer: Arc<InMemoryLayer>,
4886 : ctx: &RequestContext,
4887 : ) -> Result<Lsn, FlushLayerError> {
4888 : debug_assert_current_span_has_tenant_and_timeline_id();
4889 :
4890 : // As a special case, when we have just imported an image into the repository,
4891 : // instead of writing out a L0 delta layer, we directly write out image layer
4892 : // files instead. This is possible as long as *all* the data imported into the
4893 : // repository have the same LSN.
4894 : let lsn_range = frozen_layer.get_lsn_range();
4895 :
4896 : // Whether to directly create image layers for this flush, or flush them as delta layers
4897 : let create_image_layer =
4898 : lsn_range.start == self.initdb_lsn && lsn_range.end == Lsn(self.initdb_lsn.0 + 1);
4899 :
4900 : #[cfg(test)]
4901 : {
4902 : match &mut *self.flush_loop_state.lock().unwrap() {
4903 : FlushLoopState::NotStarted | FlushLoopState::Exited => {
4904 : panic!("flush loop not running")
4905 : }
4906 : FlushLoopState::Running {
4907 : expect_initdb_optimization,
4908 : initdb_optimization_count,
4909 : ..
4910 : } => {
4911 : if create_image_layer {
4912 : *initdb_optimization_count += 1;
4913 : } else {
4914 : assert!(!*expect_initdb_optimization, "expected initdb optimization");
4915 : }
4916 : }
4917 : }
4918 : }
4919 :
4920 : let (layers_to_upload, delta_layer_to_add) = if create_image_layer {
4921 : // Note: The 'ctx' in use here has DownloadBehavior::Error. We should not
4922 : // require downloading anything during initial import.
4923 : let ((rel_partition, metadata_partition), _lsn) = self
4924 : .repartition(
4925 : self.initdb_lsn,
4926 : self.get_compaction_target_size(),
4927 : EnumSet::empty(),
4928 : ctx,
4929 : )
4930 : .await
4931 0 : .map_err(|e| FlushLayerError::from_anyhow(self, e.into()))?;
4932 :
4933 : if self.cancel.is_cancelled() {
4934 : return Err(FlushLayerError::Cancelled);
4935 : }
4936 :
4937 : // Ensure that we have a single call to `create_image_layers` with a combined dense keyspace.
4938 : // So that the key ranges don't overlap.
4939 : let mut partitions = KeyPartitioning::default();
4940 : partitions.parts.extend(rel_partition.parts);
4941 : if !metadata_partition.parts.is_empty() {
4942 : assert_eq!(
4943 : metadata_partition.parts.len(),
4944 : 1,
4945 : "currently sparse keyspace should only contain a single metadata keyspace"
4946 : );
4947 : // Safety: create_image_layers treat sparse keyspaces differently that it does not scan
4948 : // every single key within the keyspace, and therefore, it's safe to force converting it
4949 : // into a dense keyspace before calling this function.
4950 : partitions
4951 : .parts
4952 : .extend(metadata_partition.into_dense().parts);
4953 : }
4954 :
4955 : let mut layers_to_upload = Vec::new();
4956 : let (generated_image_layers, is_complete) = self
4957 : .create_image_layers(
4958 : &partitions,
4959 : self.initdb_lsn,
4960 : ImageLayerCreationMode::Initial,
4961 : ctx,
4962 : LastImageLayerCreationStatus::Initial,
4963 : false, // don't yield for L0, we're flushing L0
4964 : )
4965 : .instrument(info_span!("create_image_layers", mode = %ImageLayerCreationMode::Initial, partition_mode = "initial", lsn = %self.initdb_lsn))
4966 : .await?;
4967 : debug_assert!(
4968 : matches!(is_complete, LastImageLayerCreationStatus::Complete),
4969 : "init image generation mode must fully cover the keyspace"
4970 : );
4971 : layers_to_upload.extend(generated_image_layers);
4972 :
4973 : (layers_to_upload, None)
4974 : } else {
4975 : // Normal case, write out a L0 delta layer file.
4976 : // `create_delta_layer` will not modify the layer map.
4977 : // We will remove frozen layer and add delta layer in one atomic operation later.
4978 : let Some(layer) = self
4979 : .create_delta_layer(&frozen_layer, None, ctx)
4980 : .await
4981 0 : .map_err(|e| FlushLayerError::from_anyhow(self, e))?
4982 : else {
4983 : panic!("delta layer cannot be empty if no filter is applied");
4984 : };
4985 : (
4986 : // FIXME: even though we have a single image and single delta layer assumption
4987 : // we push them to vec
4988 : vec![layer.clone()],
4989 : Some(layer),
4990 : )
4991 : };
4992 :
4993 : pausable_failpoint!("flush-layer-cancel-after-writing-layer-out-pausable");
4994 :
4995 : if self.cancel.is_cancelled() {
4996 : return Err(FlushLayerError::Cancelled);
4997 : }
4998 :
4999 1 : fail_point!("flush-layer-before-update-remote-consistent-lsn", |_| {
5000 1 : Err(FlushLayerError::Other(anyhow!("failpoint").into()))
5001 1 : });
5002 :
5003 : let disk_consistent_lsn = Lsn(lsn_range.end.0 - 1);
5004 :
5005 : // The new on-disk layers are now in the layer map. We can remove the
5006 : // in-memory layer from the map now. The flushed layer is stored in
5007 : // the mapping in `create_delta_layer`.
5008 : {
5009 : let mut guard = self
5010 : .layers
5011 : .write(LayerManagerLockHolder::FlushFrozenLayer)
5012 : .await;
5013 :
5014 : guard.open_mut()?.finish_flush_l0_layer(
5015 : delta_layer_to_add.as_ref(),
5016 : &frozen_layer,
5017 : &self.metrics,
5018 : );
5019 :
5020 : if self.set_disk_consistent_lsn(disk_consistent_lsn) {
5021 : // Schedule remote uploads that will reflect our new disk_consistent_lsn
5022 : self.schedule_uploads(disk_consistent_lsn, layers_to_upload)
5023 0 : .map_err(|e| FlushLayerError::from_anyhow(self, e))?;
5024 : }
5025 : // release lock on 'layers'
5026 : };
5027 :
5028 : // FIXME: between create_delta_layer and the scheduling of the upload in `update_metadata_file`,
5029 : // a compaction can delete the file and then it won't be available for uploads any more.
5030 : // We still schedule the upload, resulting in an error, but ideally we'd somehow avoid this
5031 : // race situation.
5032 : // See https://github.com/neondatabase/neon/issues/4526
5033 : pausable_failpoint!("flush-frozen-pausable");
5034 :
5035 : // This failpoint is used by another test case `test_pageserver_recovery`.
5036 : fail_point!("flush-frozen-exit");
5037 :
5038 : Ok(Lsn(lsn_range.end.0 - 1))
5039 : }
5040 :
5041 : /// Return true if the value changed
5042 : ///
5043 : /// This function must only be used from the layer flush task.
5044 595 : fn set_disk_consistent_lsn(&self, new_value: Lsn) -> bool {
5045 595 : let old_value = self.disk_consistent_lsn.fetch_max(new_value);
5046 595 : assert!(
5047 595 : new_value >= old_value,
5048 0 : "disk_consistent_lsn must be growing monotonously at runtime; current {old_value}, offered {new_value}"
5049 : );
5050 :
5051 595 : self.metrics
5052 595 : .disk_consistent_lsn_gauge
5053 595 : .set(new_value.0 as i64);
5054 595 : new_value != old_value
5055 595 : }
5056 :
5057 : /// Update metadata file
5058 620 : fn schedule_uploads(
5059 620 : &self,
5060 620 : disk_consistent_lsn: Lsn,
5061 620 : layers_to_upload: impl IntoIterator<Item = ResidentLayer>,
5062 620 : ) -> anyhow::Result<()> {
5063 620 : // We can only save a valid 'prev_record_lsn' value on disk if we
5064 620 : // flushed *all* in-memory changes to disk. We only track
5065 620 : // 'prev_record_lsn' in memory for the latest processed record, so we
5066 620 : // don't remember what the correct value that corresponds to some old
5067 620 : // LSN is. But if we flush everything, then the value corresponding
5068 620 : // current 'last_record_lsn' is correct and we can store it on disk.
5069 620 : let RecordLsn {
5070 620 : last: last_record_lsn,
5071 620 : prev: prev_record_lsn,
5072 620 : } = self.last_record_lsn.load();
5073 620 : let ondisk_prev_record_lsn = if disk_consistent_lsn == last_record_lsn {
5074 557 : Some(prev_record_lsn)
5075 : } else {
5076 63 : None
5077 : };
5078 :
5079 620 : let update = crate::tenant::metadata::MetadataUpdate::new(
5080 620 : disk_consistent_lsn,
5081 620 : ondisk_prev_record_lsn,
5082 620 : *self.applied_gc_cutoff_lsn.read(),
5083 620 : );
5084 620 :
5085 620 : fail_point!("checkpoint-before-saving-metadata", |x| bail!(
5086 0 : "{}",
5087 0 : x.unwrap()
5088 620 : ));
5089 :
5090 1221 : for layer in layers_to_upload {
5091 601 : self.remote_client.schedule_layer_file_upload(layer)?;
5092 : }
5093 620 : self.remote_client
5094 620 : .schedule_index_upload_for_metadata_update(&update)?;
5095 :
5096 620 : Ok(())
5097 620 : }
5098 :
5099 0 : pub(crate) async fn preserve_initdb_archive(&self) -> anyhow::Result<()> {
5100 0 : self.remote_client
5101 0 : .preserve_initdb_archive(
5102 0 : &self.tenant_shard_id.tenant_id,
5103 0 : &self.timeline_id,
5104 0 : &self.cancel,
5105 0 : )
5106 0 : .await
5107 0 : }
5108 :
5109 : // Write out the given frozen in-memory layer as a new L0 delta file. This L0 file will not be tracked
5110 : // in layer map immediately. The caller is responsible to put it into the layer map.
5111 486 : async fn create_delta_layer(
5112 486 : self: &Arc<Self>,
5113 486 : frozen_layer: &Arc<InMemoryLayer>,
5114 486 : key_range: Option<Range<Key>>,
5115 486 : ctx: &RequestContext,
5116 486 : ) -> anyhow::Result<Option<ResidentLayer>> {
5117 486 : let self_clone = Arc::clone(self);
5118 486 : let frozen_layer = Arc::clone(frozen_layer);
5119 486 : let ctx = ctx.attached_child();
5120 486 : let work = async move {
5121 486 : let Some((desc, path)) = frozen_layer
5122 486 : .write_to_disk(
5123 486 : &ctx,
5124 486 : key_range,
5125 486 : self_clone.l0_flush_global_state.inner(),
5126 486 : &self_clone.gate,
5127 486 : self_clone.cancel.clone(),
5128 486 : )
5129 486 : .await?
5130 : else {
5131 0 : return Ok(None);
5132 : };
5133 486 : let new_delta = Layer::finish_creating(self_clone.conf, &self_clone, desc, &path)?;
5134 :
5135 : // The write_to_disk() above calls writer.finish() which already did the fsync of the inodes.
5136 : // We just need to fsync the directory in which these inodes are linked,
5137 : // which we know to be the timeline directory.
5138 : //
5139 : // We use fatal_err() below because the after write_to_disk returns with success,
5140 : // the in-memory state of the filesystem already has the layer file in its final place,
5141 : // and subsequent pageserver code could think it's durable while it really isn't.
5142 486 : let timeline_dir = VirtualFile::open(
5143 486 : &self_clone
5144 486 : .conf
5145 486 : .timeline_path(&self_clone.tenant_shard_id, &self_clone.timeline_id),
5146 486 : &ctx,
5147 486 : )
5148 486 : .await
5149 486 : .fatal_err("VirtualFile::open for timeline dir fsync");
5150 486 : timeline_dir
5151 486 : .sync_all()
5152 486 : .await
5153 486 : .fatal_err("VirtualFile::sync_all timeline dir");
5154 486 : anyhow::Ok(Some(new_delta))
5155 486 : };
5156 : // Before tokio-epoll-uring, we ran write_to_disk & the sync_all inside spawn_blocking.
5157 : // Preserve that behavior to maintain the same behavior for `virtual_file_io_engine=std-fs`.
5158 : use crate::virtual_file::io_engine::IoEngine;
5159 486 : match crate::virtual_file::io_engine::get() {
5160 0 : IoEngine::NotSet => panic!("io engine not set"),
5161 : IoEngine::StdFs => {
5162 0 : let span = tracing::info_span!("blocking");
5163 0 : tokio::task::spawn_blocking({
5164 0 : move || Handle::current().block_on(work.instrument(span))
5165 0 : })
5166 0 : .await
5167 0 : .context("spawn_blocking")
5168 0 : .and_then(|x| x)
5169 : }
5170 : #[cfg(target_os = "linux")]
5171 486 : IoEngine::TokioEpollUring => work.await,
5172 : }
5173 486 : }
5174 :
5175 302 : async fn repartition(
5176 302 : &self,
5177 302 : lsn: Lsn,
5178 302 : partition_size: u64,
5179 302 : flags: EnumSet<CompactFlags>,
5180 302 : ctx: &RequestContext,
5181 302 : ) -> Result<((KeyPartitioning, SparseKeyPartitioning), Lsn), CompactionError> {
5182 302 : let Ok(mut guard) = self.partitioning.try_write_guard() else {
5183 : // NB: there are two callers, one is the compaction task, of which there is only one per struct Tenant and hence Timeline.
5184 : // The other is the initdb optimization in flush_frozen_layer, used by `boostrap_timeline`, which runs before `.activate()`
5185 : // and hence before the compaction task starts.
5186 0 : return Err(CompactionError::Other(anyhow!(
5187 0 : "repartition() called concurrently"
5188 0 : )));
5189 : };
5190 302 : let ((dense_partition, sparse_partition), partition_lsn) = &*guard.read();
5191 302 : if lsn < *partition_lsn {
5192 0 : return Err(CompactionError::Other(anyhow!(
5193 0 : "repartition() called with LSN going backwards, this should not happen"
5194 0 : )));
5195 302 : }
5196 302 :
5197 302 : let distance = lsn.0 - partition_lsn.0;
5198 302 : if *partition_lsn != Lsn(0)
5199 141 : && distance <= self.repartition_threshold
5200 141 : && !flags.contains(CompactFlags::ForceRepartition)
5201 : {
5202 134 : debug!(
5203 : distance,
5204 : threshold = self.repartition_threshold,
5205 0 : "no repartitioning needed"
5206 : );
5207 134 : return Ok((
5208 134 : (dense_partition.clone(), sparse_partition.clone()),
5209 134 : *partition_lsn,
5210 134 : ));
5211 168 : }
5212 :
5213 168 : let (dense_ks, sparse_ks) = self.collect_keyspace(lsn, ctx).await?;
5214 168 : let dense_partitioning = dense_ks.partition(&self.shard_identity, partition_size);
5215 168 : let sparse_partitioning = SparseKeyPartitioning {
5216 168 : parts: vec![sparse_ks],
5217 168 : }; // no partitioning for metadata keys for now
5218 168 : let result = ((dense_partitioning, sparse_partitioning), lsn);
5219 168 : guard.write(result.clone());
5220 168 : Ok(result)
5221 302 : }
5222 :
5223 : // Is it time to create a new image layer for the given partition? True if we want to generate.
5224 7 : async fn time_for_new_image_layer(&self, partition: &KeySpace, lsn: Lsn) -> bool {
5225 7 : let threshold = self.get_image_creation_threshold();
5226 :
5227 7 : let guard = self.layers.read(LayerManagerLockHolder::Compaction).await;
5228 7 : let Ok(layers) = guard.layer_map() else {
5229 0 : return false;
5230 : };
5231 :
5232 7 : let mut max_deltas = 0;
5233 14 : for part_range in &partition.ranges {
5234 7 : let image_coverage = layers.image_coverage(part_range, lsn);
5235 14 : for (img_range, last_img) in image_coverage {
5236 7 : let img_lsn = if let Some(last_img) = last_img {
5237 0 : last_img.get_lsn_range().end
5238 : } else {
5239 7 : Lsn(0)
5240 : };
5241 : // Let's consider an example:
5242 : //
5243 : // delta layer with LSN range 71-81
5244 : // delta layer with LSN range 81-91
5245 : // delta layer with LSN range 91-101
5246 : // image layer at LSN 100
5247 : //
5248 : // If 'lsn' is still 100, i.e. no new WAL has been processed since the last image layer,
5249 : // there's no need to create a new one. We check this case explicitly, to avoid passing
5250 : // a bogus range to count_deltas below, with start > end. It's even possible that there
5251 : // are some delta layers *later* than current 'lsn', if more WAL was processed and flushed
5252 : // after we read last_record_lsn, which is passed here in the 'lsn' argument.
5253 7 : if img_lsn < lsn {
5254 7 : let num_deltas =
5255 7 : layers.count_deltas(&img_range, &(img_lsn..lsn), Some(threshold));
5256 7 :
5257 7 : max_deltas = max_deltas.max(num_deltas);
5258 7 : if num_deltas >= threshold {
5259 0 : debug!(
5260 0 : "key range {}-{}, has {} deltas on this timeline in LSN range {}..{}",
5261 : img_range.start, img_range.end, num_deltas, img_lsn, lsn
5262 : );
5263 0 : return true;
5264 7 : }
5265 0 : }
5266 : }
5267 : }
5268 :
5269 7 : debug!(
5270 : max_deltas,
5271 0 : "none of the partitioned ranges had >= {threshold} deltas"
5272 : );
5273 7 : false
5274 7 : }
5275 :
5276 : /// Create image layers for Postgres data. Assumes the caller passes a partition that is not too large,
5277 : /// so that at most one image layer will be produced from this function.
5278 : #[allow(clippy::too_many_arguments)]
5279 123 : async fn create_image_layer_for_rel_blocks(
5280 123 : self: &Arc<Self>,
5281 123 : partition: &KeySpace,
5282 123 : mut image_layer_writer: ImageLayerWriter,
5283 123 : lsn: Lsn,
5284 123 : ctx: &RequestContext,
5285 123 : img_range: Range<Key>,
5286 123 : io_concurrency: IoConcurrency,
5287 123 : ) -> Result<ImageLayerCreationOutcome, CreateImageLayersError> {
5288 123 : let mut wrote_keys = false;
5289 123 :
5290 123 : let mut key_request_accum = KeySpaceAccum::new();
5291 817 : for range in &partition.ranges {
5292 694 : let mut key = range.start;
5293 1504 : while key < range.end {
5294 : // Decide whether to retain this key: usually we do, but sharded tenants may
5295 : // need to drop keys that don't belong to them. If we retain the key, add it
5296 : // to `key_request_accum` for later issuing a vectored get
5297 810 : if self.shard_identity.is_key_disposable(&key) {
5298 0 : debug!(
5299 0 : "Dropping key {} during compaction (it belongs on shard {:?})",
5300 0 : key,
5301 0 : self.shard_identity.get_shard_number(&key)
5302 : );
5303 810 : } else {
5304 810 : key_request_accum.add_key(key);
5305 810 : }
5306 :
5307 810 : let last_key_in_range = key.next() == range.end;
5308 810 : key = key.next();
5309 810 :
5310 810 : // Maybe flush `key_rest_accum`
5311 810 : if key_request_accum.raw_size() >= self.conf.max_get_vectored_keys.get() as u64
5312 810 : || (last_key_in_range && key_request_accum.raw_size() > 0)
5313 : {
5314 694 : let query =
5315 694 : VersionedKeySpaceQuery::uniform(key_request_accum.consume_keyspace(), lsn);
5316 :
5317 694 : let results = self
5318 694 : .get_vectored(query, io_concurrency.clone(), ctx)
5319 694 : .await?;
5320 :
5321 694 : if self.cancel.is_cancelled() {
5322 0 : return Err(CreateImageLayersError::Cancelled);
5323 694 : }
5324 :
5325 1504 : for (img_key, img) in results {
5326 810 : let img = match img {
5327 810 : Ok(img) => img,
5328 0 : Err(err) => {
5329 0 : // If we fail to reconstruct a VM or FSM page, we can zero the
5330 0 : // page without losing any actual user data. That seems better
5331 0 : // than failing repeatedly and getting stuck.
5332 0 : //
5333 0 : // We had a bug at one point, where we truncated the FSM and VM
5334 0 : // in the pageserver, but the Postgres didn't know about that
5335 0 : // and continued to generate incremental WAL records for pages
5336 0 : // that didn't exist in the pageserver. Trying to replay those
5337 0 : // WAL records failed to find the previous image of the page.
5338 0 : // This special case allows us to recover from that situation.
5339 0 : // See https://github.com/neondatabase/neon/issues/2601.
5340 0 : //
5341 0 : // Unfortunately we cannot do this for the main fork, or for
5342 0 : // any metadata keys, keys, as that would lead to actual data
5343 0 : // loss.
5344 0 : if img_key.is_rel_fsm_block_key() || img_key.is_rel_vm_block_key() {
5345 0 : warn!(
5346 0 : "could not reconstruct FSM or VM key {img_key}, filling with zeros: {err:?}"
5347 : );
5348 0 : ZERO_PAGE.clone()
5349 : } else {
5350 0 : return Err(CreateImageLayersError::from(err));
5351 : }
5352 : }
5353 : };
5354 :
5355 : // Write all the keys we just read into our new image layer.
5356 810 : image_layer_writer.put_image(img_key, img, ctx).await?;
5357 810 : wrote_keys = true;
5358 : }
5359 116 : }
5360 : }
5361 : }
5362 :
5363 123 : if wrote_keys {
5364 : // Normal path: we have written some data into the new image layer for this
5365 : // partition, so flush it to disk.
5366 123 : info!(
5367 0 : "produced image layer for rel {}",
5368 0 : ImageLayerName {
5369 0 : key_range: img_range.clone(),
5370 0 : lsn
5371 0 : },
5372 : );
5373 123 : Ok(ImageLayerCreationOutcome::Generated {
5374 123 : unfinished_image_layer: image_layer_writer,
5375 123 : })
5376 : } else {
5377 0 : tracing::debug!("no data in range {}-{}", img_range.start, img_range.end);
5378 0 : Ok(ImageLayerCreationOutcome::Empty)
5379 : }
5380 123 : }
5381 :
5382 : /// Create an image layer for metadata keys. This function produces one image layer for all metadata
5383 : /// keys for now. Because metadata keys cannot exceed basebackup size limit, the image layer for it
5384 : /// would not be too large to fit in a single image layer.
5385 : ///
5386 : /// Creating image layers for metadata keys are different from relational keys. Firstly, instead of
5387 : /// iterating each key and get an image for each of them, we do a `vectored_get` scan over the sparse
5388 : /// keyspace to get all images in one run. Secondly, we use a different image layer generation metrics
5389 : /// for metadata keys than relational keys, which is the number of delta files visited during the scan.
5390 : #[allow(clippy::too_many_arguments)]
5391 118 : async fn create_image_layer_for_metadata_keys(
5392 118 : self: &Arc<Self>,
5393 118 : partition: &KeySpace,
5394 118 : mut image_layer_writer: ImageLayerWriter,
5395 118 : lsn: Lsn,
5396 118 : ctx: &RequestContext,
5397 118 : img_range: Range<Key>,
5398 118 : mode: ImageLayerCreationMode,
5399 118 : io_concurrency: IoConcurrency,
5400 118 : ) -> Result<ImageLayerCreationOutcome, CreateImageLayersError> {
5401 118 : // Metadata keys image layer creation.
5402 118 : let mut reconstruct_state = ValuesReconstructState::new(io_concurrency);
5403 118 : let begin = Instant::now();
5404 : // Directly use `get_vectored_impl` to skip the max_vectored_read_key limit check. Note that the keyspace should
5405 : // not contain too many keys, otherwise this takes a lot of memory.
5406 118 : let data = self
5407 118 : .get_vectored_impl(
5408 118 : VersionedKeySpaceQuery::uniform(partition.clone(), lsn),
5409 118 : &mut reconstruct_state,
5410 118 : ctx,
5411 118 : )
5412 118 : .await?;
5413 118 : let (data, total_kb_retrieved, total_keys_retrieved) = {
5414 118 : let mut new_data = BTreeMap::new();
5415 118 : let mut total_kb_retrieved = 0;
5416 118 : let mut total_keys_retrieved = 0;
5417 5124 : for (k, v) in data {
5418 5006 : let v = v?;
5419 5006 : total_kb_retrieved += KEY_SIZE + v.len();
5420 5006 : total_keys_retrieved += 1;
5421 5006 : new_data.insert(k, v);
5422 : }
5423 118 : (new_data, total_kb_retrieved / 1024, total_keys_retrieved)
5424 118 : };
5425 118 : let delta_files_accessed = reconstruct_state.get_delta_layers_visited();
5426 118 : let elapsed = begin.elapsed();
5427 118 :
5428 118 : let trigger_generation = delta_files_accessed as usize >= MAX_AUX_FILE_V2_DELTAS;
5429 118 : info!(
5430 0 : "metadata key compaction: trigger_generation={trigger_generation}, delta_files_accessed={delta_files_accessed}, total_kb_retrieved={total_kb_retrieved}, total_keys_retrieved={total_keys_retrieved}, read_time={}s",
5431 0 : elapsed.as_secs_f64()
5432 : );
5433 :
5434 118 : if !trigger_generation && mode == ImageLayerCreationMode::Try {
5435 1 : return Ok(ImageLayerCreationOutcome::Skip);
5436 117 : }
5437 117 : if self.cancel.is_cancelled() {
5438 0 : return Err(CreateImageLayersError::Cancelled);
5439 117 : }
5440 117 : let mut wrote_any_image = false;
5441 5123 : for (k, v) in data {
5442 5006 : if v.is_empty() {
5443 : // the key has been deleted, it does not need an image
5444 : // in metadata keyspace, an empty image == tombstone
5445 4 : continue;
5446 5002 : }
5447 5002 : wrote_any_image = true;
5448 5002 :
5449 5002 : // No need to handle sharding b/c metadata keys are always on the 0-th shard.
5450 5002 :
5451 5002 : // TODO: split image layers to avoid too large layer files. Too large image files are not handled
5452 5002 : // on the normal data path either.
5453 5002 : image_layer_writer.put_image(k, v, ctx).await?;
5454 : }
5455 :
5456 117 : if wrote_any_image {
5457 : // Normal path: we have written some data into the new image layer for this
5458 : // partition, so flush it to disk.
5459 6 : info!(
5460 0 : "created image layer for metadata {}",
5461 0 : ImageLayerName {
5462 0 : key_range: img_range.clone(),
5463 0 : lsn
5464 0 : }
5465 : );
5466 6 : Ok(ImageLayerCreationOutcome::Generated {
5467 6 : unfinished_image_layer: image_layer_writer,
5468 6 : })
5469 : } else {
5470 111 : tracing::debug!("no data in range {}-{}", img_range.start, img_range.end);
5471 111 : Ok(ImageLayerCreationOutcome::Empty)
5472 : }
5473 118 : }
5474 :
5475 : /// Predicate function which indicates whether we should check if new image layers
5476 : /// are required. Since checking if new image layers are required is expensive in
5477 : /// terms of CPU, we only do it in the following cases:
5478 : /// 1. If the timeline has ingested sufficient WAL to justify the cost
5479 : /// 2. If enough time has passed since the last check:
5480 : /// 1. For large tenants, we wish to perform the check more often since they
5481 : /// suffer from the lack of image layers
5482 : /// 2. For small tenants (that can mostly fit in RAM), we use a much longer interval
5483 302 : fn should_check_if_image_layers_required(self: &Arc<Timeline>, lsn: Lsn) -> bool {
5484 : const LARGE_TENANT_THRESHOLD: u64 = 2 * 1024 * 1024 * 1024;
5485 :
5486 302 : let last_checks_at = self.last_image_layer_creation_check_at.load();
5487 302 : let distance = lsn
5488 302 : .checked_sub(last_checks_at)
5489 302 : .expect("Attempt to compact with LSN going backwards");
5490 302 : let min_distance =
5491 302 : self.get_image_layer_creation_check_threshold() as u64 * self.get_checkpoint_distance();
5492 302 :
5493 302 : let distance_based_decision = distance.0 >= min_distance;
5494 302 :
5495 302 : let mut time_based_decision = false;
5496 302 : let mut last_check_instant = self.last_image_layer_creation_check_instant.lock().unwrap();
5497 302 : if let CurrentLogicalSize::Exact(logical_size) = self.current_logical_size.current_size() {
5498 251 : let check_required_after = if Into::<u64>::into(&logical_size) >= LARGE_TENANT_THRESHOLD
5499 : {
5500 0 : self.get_checkpoint_timeout()
5501 : } else {
5502 251 : Duration::from_secs(3600 * 48)
5503 : };
5504 :
5505 251 : time_based_decision = match *last_check_instant {
5506 141 : Some(last_check) => {
5507 141 : let elapsed = last_check.elapsed();
5508 141 : elapsed >= check_required_after
5509 : }
5510 110 : None => true,
5511 : };
5512 51 : }
5513 :
5514 : // Do the expensive delta layer counting only if this timeline has ingested sufficient
5515 : // WAL since the last check or a checkpoint timeout interval has elapsed since the last
5516 : // check.
5517 302 : let decision = distance_based_decision || time_based_decision;
5518 :
5519 302 : if decision {
5520 111 : self.last_image_layer_creation_check_at.store(lsn);
5521 111 : *last_check_instant = Some(Instant::now());
5522 191 : }
5523 :
5524 302 : decision
5525 302 : }
5526 :
5527 : /// Returns the image layers generated and an enum indicating whether the process is fully completed.
5528 : /// true = we have generate all image layers, false = we preempt the process for L0 compaction.
5529 : ///
5530 : /// `partition_mode` is only for logging purpose and is not used anywhere in this function.
5531 302 : async fn create_image_layers(
5532 302 : self: &Arc<Timeline>,
5533 302 : partitioning: &KeyPartitioning,
5534 302 : lsn: Lsn,
5535 302 : mode: ImageLayerCreationMode,
5536 302 : ctx: &RequestContext,
5537 302 : last_status: LastImageLayerCreationStatus,
5538 302 : yield_for_l0: bool,
5539 302 : ) -> Result<(Vec<ResidentLayer>, LastImageLayerCreationStatus), CreateImageLayersError> {
5540 302 : let timer = self.metrics.create_images_time_histo.start_timer();
5541 302 :
5542 302 : if partitioning.parts.is_empty() {
5543 0 : warn!("no partitions to create image layers for");
5544 0 : return Ok((vec![], LastImageLayerCreationStatus::Complete));
5545 302 : }
5546 302 :
5547 302 : // We need to avoid holes between generated image layers.
5548 302 : // Otherwise LayerMap::image_layer_exists will return false if key range of some layer is covered by more than one
5549 302 : // image layer with hole between them. In this case such layer can not be utilized by GC.
5550 302 : //
5551 302 : // How such hole between partitions can appear?
5552 302 : // if we have relation with relid=1 and size 100 and relation with relid=2 with size 200 then result of
5553 302 : // KeySpace::partition may contain partitions <100000000..100000099> and <200000000..200000199>.
5554 302 : // If there is delta layer <100000000..300000000> then it never be garbage collected because
5555 302 : // image layers <100000000..100000099> and <200000000..200000199> are not completely covering it.
5556 302 : let mut start = Key::MIN;
5557 :
5558 302 : let check_for_image_layers =
5559 302 : if let LastImageLayerCreationStatus::Incomplete { last_key } = last_status {
5560 0 : info!(
5561 0 : "resuming image layer creation: last_status=incomplete, continue from {}",
5562 : last_key
5563 : );
5564 0 : true
5565 : } else {
5566 302 : self.should_check_if_image_layers_required(lsn)
5567 : };
5568 :
5569 302 : let mut batch_image_writer = BatchLayerWriter::new(self.conf).await?;
5570 :
5571 302 : let mut all_generated = true;
5572 302 :
5573 302 : let mut partition_processed = 0;
5574 302 : let mut total_partitions = partitioning.parts.len();
5575 302 : let mut last_partition_processed = None;
5576 302 : let mut partition_parts = partitioning.parts.clone();
5577 :
5578 302 : if let LastImageLayerCreationStatus::Incomplete { last_key } = last_status {
5579 : // We need to skip the partitions that have already been processed.
5580 0 : let mut found = false;
5581 0 : for (i, partition) in partition_parts.iter().enumerate() {
5582 0 : if last_key <= partition.end().unwrap() {
5583 : // ```plain
5584 : // |------|--------|----------|------|
5585 : // ^last_key
5586 : // ^start from this partition
5587 : // ```
5588 : // Why `i+1` instead of `i`?
5589 : // It is possible that the user did some writes after the previous image layer creation attempt so that
5590 : // a relation grows in size, and the last_key is now in the middle of the partition. In this case, we
5591 : // still want to skip this partition, so that we can make progress and avoid generating image layers over
5592 : // the same partition. Doing a mod to ensure we don't end up with an empty vec.
5593 0 : if i + 1 >= total_partitions {
5594 : // In general, this case should not happen -- if last_key is on the last partition, the previous
5595 : // iteration of image layer creation should return a complete status.
5596 0 : break; // with found=false
5597 0 : }
5598 0 : partition_parts = partition_parts.split_off(i + 1); // Remove the first i + 1 elements
5599 0 : total_partitions = partition_parts.len();
5600 0 : // Update the start key to the partition start.
5601 0 : start = partition_parts[0].start().unwrap();
5602 0 : found = true;
5603 0 : break;
5604 0 : }
5605 : }
5606 0 : if !found {
5607 : // Last key is within the last partition, or larger than all partitions.
5608 0 : return Ok((vec![], LastImageLayerCreationStatus::Complete));
5609 0 : }
5610 302 : }
5611 :
5612 616 : for partition in partition_parts.iter() {
5613 616 : if self.cancel.is_cancelled() {
5614 0 : return Err(CreateImageLayersError::Cancelled);
5615 616 : }
5616 616 : partition_processed += 1;
5617 616 : let img_range = start..partition.ranges.last().unwrap().end;
5618 616 : let compact_metadata = partition.overlaps(&Key::metadata_key_range());
5619 616 : if compact_metadata {
5620 1208 : for range in &partition.ranges {
5621 906 : assert!(
5622 906 : range.start.field1 >= METADATA_KEY_BEGIN_PREFIX
5623 906 : && range.end.field1 <= METADATA_KEY_END_PREFIX,
5624 0 : "metadata keys must be partitioned separately"
5625 : );
5626 : }
5627 302 : if mode == ImageLayerCreationMode::Try && !check_for_image_layers {
5628 : // Skip compaction if there are not enough updates. Metadata compaction will do a scan and
5629 : // might mess up with evictions.
5630 184 : start = img_range.end;
5631 184 : continue;
5632 118 : }
5633 : // For initial and force modes, we always generate image layers for metadata keys.
5634 314 : } else if let ImageLayerCreationMode::Try = mode {
5635 : // check_for_image_layers = false -> skip
5636 : // check_for_image_layers = true -> check time_for_new_image_layer -> skip/generate
5637 191 : if !check_for_image_layers || !self.time_for_new_image_layer(partition, lsn).await {
5638 191 : start = img_range.end;
5639 191 : continue;
5640 0 : }
5641 123 : }
5642 241 : if let ImageLayerCreationMode::Force = mode {
5643 : // When forced to create image layers, we might try and create them where they already
5644 : // exist. This mode is only used in tests/debug.
5645 14 : let layers = self.layers.read(LayerManagerLockHolder::Compaction).await;
5646 14 : if layers.contains_key(&PersistentLayerKey {
5647 14 : key_range: img_range.clone(),
5648 14 : lsn_range: PersistentLayerDesc::image_layer_lsn_range(lsn),
5649 14 : is_delta: false,
5650 14 : }) {
5651 : // TODO: this can be processed with the BatchLayerWriter::finish_with_discard
5652 : // in the future.
5653 0 : tracing::info!(
5654 0 : "Skipping image layer at {lsn} {}..{}, already exists",
5655 : img_range.start,
5656 : img_range.end
5657 : );
5658 0 : start = img_range.end;
5659 0 : continue;
5660 14 : }
5661 227 : }
5662 :
5663 241 : let image_layer_writer = ImageLayerWriter::new(
5664 241 : self.conf,
5665 241 : self.timeline_id,
5666 241 : self.tenant_shard_id,
5667 241 : &img_range,
5668 241 : lsn,
5669 241 : &self.gate,
5670 241 : self.cancel.clone(),
5671 241 : ctx,
5672 241 : )
5673 241 : .await?;
5674 :
5675 241 : fail_point!("image-layer-writer-fail-before-finish", |_| {
5676 0 : Err(CreateImageLayersError::Other(anyhow::anyhow!(
5677 0 : "failpoint image-layer-writer-fail-before-finish"
5678 0 : )))
5679 241 : });
5680 :
5681 241 : let io_concurrency = IoConcurrency::spawn_from_conf(
5682 241 : self.conf.get_vectored_concurrent_io,
5683 241 : self.gate
5684 241 : .enter()
5685 241 : .map_err(|_| CreateImageLayersError::Cancelled)?,
5686 : );
5687 :
5688 241 : let outcome = if !compact_metadata {
5689 123 : self.create_image_layer_for_rel_blocks(
5690 123 : partition,
5691 123 : image_layer_writer,
5692 123 : lsn,
5693 123 : ctx,
5694 123 : img_range.clone(),
5695 123 : io_concurrency,
5696 123 : )
5697 123 : .await?
5698 : } else {
5699 118 : self.create_image_layer_for_metadata_keys(
5700 118 : partition,
5701 118 : image_layer_writer,
5702 118 : lsn,
5703 118 : ctx,
5704 118 : img_range.clone(),
5705 118 : mode,
5706 118 : io_concurrency,
5707 118 : )
5708 118 : .await?
5709 : };
5710 241 : match outcome {
5711 111 : ImageLayerCreationOutcome::Empty => {
5712 111 : // No data in this partition, so we don't need to create an image layer (for now).
5713 111 : // The next image layer should cover this key range, so we don't advance the `start`
5714 111 : // key.
5715 111 : }
5716 : ImageLayerCreationOutcome::Generated {
5717 129 : unfinished_image_layer,
5718 129 : } => {
5719 129 : batch_image_writer.add_unfinished_image_writer(
5720 129 : unfinished_image_layer,
5721 129 : img_range.clone(),
5722 129 : lsn,
5723 129 : );
5724 129 : // The next image layer should be generated right after this one.
5725 129 : start = img_range.end;
5726 129 : }
5727 1 : ImageLayerCreationOutcome::Skip => {
5728 1 : // We don't need to create an image layer for this partition.
5729 1 : // The next image layer should NOT cover this range, otherwise
5730 1 : // the keyspace becomes empty (reads don't go past image layers).
5731 1 : start = img_range.end;
5732 1 : }
5733 : }
5734 :
5735 241 : if let ImageLayerCreationMode::Try = mode {
5736 : // We have at least made some progress
5737 1 : if yield_for_l0 && batch_image_writer.pending_layer_num() >= 1 {
5738 : // The `Try` mode is currently only used on the compaction path. We want to avoid
5739 : // image layer generation taking too long time and blocking L0 compaction. So in this
5740 : // mode, we also inspect the current number of L0 layers and skip image layer generation
5741 : // if there are too many of them.
5742 0 : let image_preempt_threshold = self.get_image_creation_preempt_threshold()
5743 0 : * self.get_compaction_threshold();
5744 0 : // TODO: currently we do not respect `get_image_creation_preempt_threshold` and always yield
5745 0 : // when there is a single timeline with more than L0 threshold L0 layers. As long as the
5746 0 : // `get_image_creation_preempt_threshold` is set to a value greater than 0, we will yield for L0 compaction.
5747 0 : if image_preempt_threshold != 0 {
5748 0 : let should_yield = self
5749 0 : .l0_compaction_trigger
5750 0 : .notified()
5751 0 : .now_or_never()
5752 0 : .is_some();
5753 0 : if should_yield {
5754 0 : tracing::info!(
5755 0 : "preempt image layer generation at {lsn} when processing partition {}..{}: too many L0 layers",
5756 0 : partition.start().unwrap(),
5757 0 : partition.end().unwrap()
5758 : );
5759 0 : last_partition_processed = Some(partition.clone());
5760 0 : all_generated = false;
5761 0 : break;
5762 0 : }
5763 0 : }
5764 1 : }
5765 240 : }
5766 : }
5767 :
5768 302 : let image_layers = batch_image_writer.finish(self, ctx).await?;
5769 :
5770 302 : let mut guard = self.layers.write(LayerManagerLockHolder::Compaction).await;
5771 :
5772 : // FIXME: we could add the images to be uploaded *before* returning from here, but right
5773 : // now they are being scheduled outside of write lock; current way is inconsistent with
5774 : // compaction lock order.
5775 302 : guard
5776 302 : .open_mut()?
5777 302 : .track_new_image_layers(&image_layers, &self.metrics);
5778 302 : drop_layer_manager_wlock(guard);
5779 302 : let duration = timer.stop_and_record();
5780 302 :
5781 302 : // Creating image layers may have caused some previously visible layers to be covered
5782 302 : if !image_layers.is_empty() {
5783 117 : self.update_layer_visibility().await?;
5784 185 : }
5785 :
5786 302 : let total_layer_size = image_layers
5787 302 : .iter()
5788 302 : .map(|l| l.metadata().file_size)
5789 302 : .sum::<u64>();
5790 302 :
5791 302 : if !image_layers.is_empty() {
5792 117 : info!(
5793 0 : "created {} image layers ({} bytes) in {}s, processed {} out of {} partitions",
5794 0 : image_layers.len(),
5795 0 : total_layer_size,
5796 0 : duration.as_secs_f64(),
5797 : partition_processed,
5798 : total_partitions
5799 : );
5800 185 : }
5801 :
5802 : Ok((
5803 302 : image_layers,
5804 302 : if all_generated {
5805 302 : LastImageLayerCreationStatus::Complete
5806 : } else {
5807 : LastImageLayerCreationStatus::Incomplete {
5808 0 : last_key: if let Some(last_partition_processed) = last_partition_processed {
5809 0 : last_partition_processed.end().unwrap_or(Key::MIN)
5810 : } else {
5811 : // This branch should be unreachable, but in case it happens, we can just return the start key.
5812 0 : Key::MIN
5813 : },
5814 : }
5815 : },
5816 : ))
5817 302 : }
5818 :
5819 : /// Wait until the background initial logical size calculation is complete, or
5820 : /// this Timeline is shut down. Calling this function will cause the initial
5821 : /// logical size calculation to skip waiting for the background jobs barrier.
5822 0 : pub(crate) async fn await_initial_logical_size(self: Arc<Self>) {
5823 0 : if !self.shard_identity.is_shard_zero() {
5824 : // We don't populate logical size on shard >0: skip waiting for it.
5825 0 : return;
5826 0 : }
5827 0 :
5828 0 : if self.remote_client.is_deleting() {
5829 : // The timeline was created in a deletion-resume state, we don't expect logical size to be populated
5830 0 : return;
5831 0 : }
5832 0 :
5833 0 : if self.current_logical_size.current_size().is_exact() {
5834 : // root timelines are initialized with exact count, but never start the background
5835 : // calculation
5836 0 : return;
5837 0 : }
5838 0 :
5839 0 : if self.cancel.is_cancelled() {
5840 : // We already requested stopping the tenant, so we cannot wait for the logical size
5841 : // calculation to complete given the task might have been already cancelled.
5842 0 : return;
5843 0 : }
5844 :
5845 0 : if let Some(await_bg_cancel) = self
5846 0 : .current_logical_size
5847 0 : .cancel_wait_for_background_loop_concurrency_limit_semaphore
5848 0 : .get()
5849 0 : {
5850 0 : await_bg_cancel.cancel();
5851 0 : } else {
5852 : // We should not wait if we were not able to explicitly instruct
5853 : // the logical size cancellation to skip the concurrency limit semaphore.
5854 : // TODO: this is an unexpected case. We should restructure so that it
5855 : // can't happen.
5856 0 : tracing::warn!(
5857 0 : "await_initial_logical_size: can't get semaphore cancel token, skipping"
5858 : );
5859 0 : debug_assert!(false);
5860 : }
5861 :
5862 0 : tokio::select!(
5863 0 : _ = self.current_logical_size.initialized.acquire() => {},
5864 0 : _ = self.cancel.cancelled() => {}
5865 : )
5866 0 : }
5867 :
5868 : /// Detach this timeline from its ancestor by copying all of ancestors layers as this
5869 : /// Timelines layers up to the ancestor_lsn.
5870 : ///
5871 : /// Requires a timeline that:
5872 : /// - has an ancestor to detach from
5873 : /// - the ancestor does not have an ancestor -- follows from the original RFC limitations, not
5874 : /// a technical requirement
5875 : ///
5876 : /// After the operation has been started, it cannot be canceled. Upon restart it needs to be
5877 : /// polled again until completion.
5878 : ///
5879 : /// During the operation all timelines sharing the data with this timeline will be reparented
5880 : /// from our ancestor to be branches of this timeline.
5881 0 : pub(crate) async fn prepare_to_detach_from_ancestor(
5882 0 : self: &Arc<Timeline>,
5883 0 : tenant: &crate::tenant::TenantShard,
5884 0 : options: detach_ancestor::Options,
5885 0 : behavior: DetachBehavior,
5886 0 : ctx: &RequestContext,
5887 0 : ) -> Result<detach_ancestor::Progress, detach_ancestor::Error> {
5888 0 : detach_ancestor::prepare(self, tenant, behavior, options, ctx).await
5889 0 : }
5890 :
5891 : /// Second step of detach from ancestor; detaches the `self` from it's current ancestor and
5892 : /// reparents any reparentable children of previous ancestor.
5893 : ///
5894 : /// This method is to be called while holding the TenantManager's tenant slot, so during this
5895 : /// method we cannot be deleted nor can any timeline be deleted. After this method returns
5896 : /// successfully, tenant must be reloaded.
5897 : ///
5898 : /// Final step will be to [`Self::complete_detaching_timeline_ancestor`] after optionally
5899 : /// resetting the tenant.
5900 0 : pub(crate) async fn detach_from_ancestor_and_reparent(
5901 0 : self: &Arc<Timeline>,
5902 0 : tenant: &crate::tenant::TenantShard,
5903 0 : prepared: detach_ancestor::PreparedTimelineDetach,
5904 0 : ancestor_timeline_id: TimelineId,
5905 0 : ancestor_lsn: Lsn,
5906 0 : behavior: DetachBehavior,
5907 0 : ctx: &RequestContext,
5908 0 : ) -> Result<detach_ancestor::DetachingAndReparenting, detach_ancestor::Error> {
5909 0 : detach_ancestor::detach_and_reparent(
5910 0 : self,
5911 0 : tenant,
5912 0 : prepared,
5913 0 : ancestor_timeline_id,
5914 0 : ancestor_lsn,
5915 0 : behavior,
5916 0 : ctx,
5917 0 : )
5918 0 : .await
5919 0 : }
5920 :
5921 : /// Final step which unblocks the GC.
5922 : ///
5923 : /// The tenant must've been reset if ancestry was modified previously (in tenant manager).
5924 0 : pub(crate) async fn complete_detaching_timeline_ancestor(
5925 0 : self: &Arc<Timeline>,
5926 0 : tenant: &crate::tenant::TenantShard,
5927 0 : attempt: detach_ancestor::Attempt,
5928 0 : ctx: &RequestContext,
5929 0 : ) -> Result<(), detach_ancestor::Error> {
5930 0 : detach_ancestor::complete(self, tenant, attempt, ctx).await
5931 0 : }
5932 : }
5933 :
5934 : impl Drop for Timeline {
5935 5 : fn drop(&mut self) {
5936 5 : if let Some(ancestor) = &self.ancestor_timeline {
5937 : // This lock should never be poisoned, but in case it is we do a .map() instead of
5938 : // an unwrap(), to avoid panicking in a destructor and thereby aborting the process.
5939 2 : if let Ok(mut gc_info) = ancestor.gc_info.write() {
5940 2 : if !gc_info.remove_child_not_offloaded(self.timeline_id) {
5941 0 : tracing::error!(tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id,
5942 0 : "Couldn't remove retain_lsn entry from offloaded timeline's parent: already removed");
5943 2 : }
5944 0 : }
5945 3 : }
5946 5 : info!(
5947 0 : "Timeline {} for tenant {} is being dropped",
5948 : self.timeline_id, self.tenant_shard_id.tenant_id
5949 : );
5950 5 : }
5951 : }
5952 :
5953 : /// Top-level failure to compact.
5954 : #[derive(Debug, thiserror::Error)]
5955 : pub(crate) enum CompactionError {
5956 : #[error("The timeline or pageserver is shutting down")]
5957 : ShuttingDown,
5958 : /// Compaction tried to offload a timeline and failed
5959 : #[error("Failed to offload timeline: {0}")]
5960 : Offload(OffloadError),
5961 : /// Compaction cannot be done right now; page reconstruction and so on.
5962 : #[error("Failed to collect keyspace: {0}")]
5963 : CollectKeySpaceError(#[from] CollectKeySpaceError),
5964 : #[error(transparent)]
5965 : Other(anyhow::Error),
5966 : #[error("Compaction already running: {0}")]
5967 : AlreadyRunning(&'static str),
5968 : }
5969 :
5970 : impl CompactionError {
5971 : /// Errors that can be ignored, i.e., cancel and shutdown.
5972 0 : pub fn is_cancel(&self) -> bool {
5973 0 : matches!(
5974 0 : self,
5975 : Self::ShuttingDown
5976 : | Self::AlreadyRunning(_)
5977 : | Self::CollectKeySpaceError(CollectKeySpaceError::Cancelled)
5978 : | Self::CollectKeySpaceError(CollectKeySpaceError::PageRead(
5979 : PageReconstructError::Cancelled
5980 : ))
5981 : | Self::Offload(OffloadError::Cancelled)
5982 : )
5983 0 : }
5984 :
5985 : /// Critical errors that indicate data corruption.
5986 0 : pub fn is_critical(&self) -> bool {
5987 0 : matches!(
5988 0 : self,
5989 : Self::CollectKeySpaceError(
5990 : CollectKeySpaceError::Decode(_)
5991 : | CollectKeySpaceError::PageRead(
5992 : PageReconstructError::MissingKey(_) | PageReconstructError::WalRedo(_),
5993 : )
5994 : )
5995 : )
5996 0 : }
5997 : }
5998 :
5999 : impl From<OffloadError> for CompactionError {
6000 0 : fn from(e: OffloadError) -> Self {
6001 0 : match e {
6002 0 : OffloadError::Cancelled => Self::ShuttingDown,
6003 0 : _ => Self::Offload(e),
6004 : }
6005 0 : }
6006 : }
6007 :
6008 : impl From<super::upload_queue::NotInitialized> for CompactionError {
6009 0 : fn from(value: super::upload_queue::NotInitialized) -> Self {
6010 0 : match value {
6011 : super::upload_queue::NotInitialized::Uninitialized => {
6012 0 : CompactionError::Other(anyhow::anyhow!(value))
6013 : }
6014 : super::upload_queue::NotInitialized::ShuttingDown
6015 0 : | super::upload_queue::NotInitialized::Stopped => CompactionError::ShuttingDown,
6016 : }
6017 0 : }
6018 : }
6019 :
6020 : impl From<super::storage_layer::layer::DownloadError> for CompactionError {
6021 0 : fn from(e: super::storage_layer::layer::DownloadError) -> Self {
6022 0 : match e {
6023 : super::storage_layer::layer::DownloadError::TimelineShutdown
6024 : | super::storage_layer::layer::DownloadError::DownloadCancelled => {
6025 0 : CompactionError::ShuttingDown
6026 : }
6027 : super::storage_layer::layer::DownloadError::ContextAndConfigReallyDeniesDownloads
6028 : | super::storage_layer::layer::DownloadError::DownloadRequired
6029 : | super::storage_layer::layer::DownloadError::NotFile(_)
6030 : | super::storage_layer::layer::DownloadError::DownloadFailed
6031 : | super::storage_layer::layer::DownloadError::PreStatFailed(_) => {
6032 0 : CompactionError::Other(anyhow::anyhow!(e))
6033 : }
6034 : #[cfg(test)]
6035 : super::storage_layer::layer::DownloadError::Failpoint(_) => {
6036 0 : CompactionError::Other(anyhow::anyhow!(e))
6037 : }
6038 : }
6039 0 : }
6040 : }
6041 :
6042 : impl From<layer_manager::Shutdown> for CompactionError {
6043 0 : fn from(_: layer_manager::Shutdown) -> Self {
6044 0 : CompactionError::ShuttingDown
6045 0 : }
6046 : }
6047 :
6048 : impl From<super::storage_layer::errors::PutError> for CompactionError {
6049 0 : fn from(e: super::storage_layer::errors::PutError) -> Self {
6050 0 : if e.is_cancel() {
6051 0 : CompactionError::ShuttingDown
6052 : } else {
6053 0 : CompactionError::Other(e.into_anyhow())
6054 : }
6055 0 : }
6056 : }
6057 :
6058 : #[serde_as]
6059 161 : #[derive(serde::Serialize)]
6060 : struct RecordedDuration(#[serde_as(as = "serde_with::DurationMicroSeconds")] Duration);
6061 :
6062 : #[derive(Default)]
6063 : enum DurationRecorder {
6064 : #[default]
6065 : NotStarted,
6066 : Recorded(RecordedDuration, tokio::time::Instant),
6067 : }
6068 :
6069 : impl DurationRecorder {
6070 307 : fn till_now(&self) -> DurationRecorder {
6071 307 : match self {
6072 : DurationRecorder::NotStarted => {
6073 0 : panic!("must only call on recorded measurements")
6074 : }
6075 307 : DurationRecorder::Recorded(_, ended) => {
6076 307 : let now = tokio::time::Instant::now();
6077 307 : DurationRecorder::Recorded(RecordedDuration(now - *ended), now)
6078 307 : }
6079 307 : }
6080 307 : }
6081 161 : fn into_recorded(self) -> Option<RecordedDuration> {
6082 161 : match self {
6083 0 : DurationRecorder::NotStarted => None,
6084 161 : DurationRecorder::Recorded(recorded, _) => Some(recorded),
6085 : }
6086 161 : }
6087 : }
6088 :
6089 : /// Descriptor for a delta layer used in testing infra. The start/end key/lsn range of the
6090 : /// delta layer might be different from the min/max key/lsn in the delta layer. Therefore,
6091 : /// the layer descriptor requires the user to provide the ranges, which should cover all
6092 : /// keys specified in the `data` field.
6093 : #[cfg(test)]
6094 : #[derive(Clone)]
6095 : pub struct DeltaLayerTestDesc {
6096 : pub lsn_range: Range<Lsn>,
6097 : pub key_range: Range<Key>,
6098 : pub data: Vec<(Key, Lsn, Value)>,
6099 : }
6100 :
6101 : #[cfg(test)]
6102 : #[derive(Clone)]
6103 : pub struct InMemoryLayerTestDesc {
6104 : pub lsn_range: Range<Lsn>,
6105 : pub data: Vec<(Key, Lsn, Value)>,
6106 : pub is_open: bool,
6107 : }
6108 :
6109 : #[cfg(test)]
6110 : impl DeltaLayerTestDesc {
6111 2 : pub fn new(lsn_range: Range<Lsn>, key_range: Range<Key>, data: Vec<(Key, Lsn, Value)>) -> Self {
6112 2 : Self {
6113 2 : lsn_range,
6114 2 : key_range,
6115 2 : data,
6116 2 : }
6117 2 : }
6118 :
6119 45 : pub fn new_with_inferred_key_range(
6120 45 : lsn_range: Range<Lsn>,
6121 45 : data: Vec<(Key, Lsn, Value)>,
6122 45 : ) -> Self {
6123 116 : let key_min = data.iter().map(|(key, _, _)| key).min().unwrap();
6124 116 : let key_max = data.iter().map(|(key, _, _)| key).max().unwrap();
6125 45 : Self {
6126 45 : key_range: (*key_min)..(key_max.next()),
6127 45 : lsn_range,
6128 45 : data,
6129 45 : }
6130 45 : }
6131 :
6132 5 : pub(crate) fn layer_name(&self) -> LayerName {
6133 5 : LayerName::Delta(super::storage_layer::DeltaLayerName {
6134 5 : key_range: self.key_range.clone(),
6135 5 : lsn_range: self.lsn_range.clone(),
6136 5 : })
6137 5 : }
6138 : }
6139 :
6140 : impl Timeline {
6141 23 : async fn finish_compact_batch(
6142 23 : self: &Arc<Self>,
6143 23 : new_deltas: &[ResidentLayer],
6144 23 : new_images: &[ResidentLayer],
6145 23 : layers_to_remove: &[Layer],
6146 23 : ) -> Result<(), CompactionError> {
6147 23 : let mut guard = tokio::select! {
6148 23 : guard = self.layers.write(LayerManagerLockHolder::Compaction) => guard,
6149 23 : _ = self.cancel.cancelled() => {
6150 0 : return Err(CompactionError::ShuttingDown);
6151 : }
6152 : };
6153 :
6154 23 : let mut duplicated_layers = HashSet::new();
6155 23 :
6156 23 : let mut insert_layers = Vec::with_capacity(new_deltas.len());
6157 :
6158 186 : for l in new_deltas {
6159 163 : if guard.contains(l.as_ref()) {
6160 : // expected in tests
6161 0 : tracing::error!(layer=%l, "duplicated L1 layer");
6162 :
6163 : // good ways to cause a duplicate: we repeatedly error after taking the writelock
6164 : // `guard` on self.layers. as of writing this, there are no error returns except
6165 : // for compact_level0_phase1 creating an L0, which does not happen in practice
6166 : // because we have not implemented L0 => L0 compaction.
6167 0 : duplicated_layers.insert(l.layer_desc().key());
6168 163 : } else if LayerMap::is_l0(&l.layer_desc().key_range, l.layer_desc().is_delta) {
6169 0 : return Err(CompactionError::Other(anyhow::anyhow!(
6170 0 : "compaction generates a L0 layer file as output, which will cause infinite compaction."
6171 0 : )));
6172 163 : } else {
6173 163 : insert_layers.push(l.clone());
6174 163 : }
6175 : }
6176 :
6177 : // only remove those inputs which were not outputs
6178 23 : let remove_layers: Vec<Layer> = layers_to_remove
6179 23 : .iter()
6180 201 : .filter(|l| !duplicated_layers.contains(&l.layer_desc().key()))
6181 23 : .cloned()
6182 23 : .collect();
6183 23 :
6184 23 : if !new_images.is_empty() {
6185 0 : guard
6186 0 : .open_mut()?
6187 0 : .track_new_image_layers(new_images, &self.metrics);
6188 23 : }
6189 :
6190 23 : guard
6191 23 : .open_mut()?
6192 23 : .finish_compact_l0(&remove_layers, &insert_layers, &self.metrics);
6193 23 :
6194 23 : self.remote_client
6195 23 : .schedule_compaction_update(&remove_layers, new_deltas)?;
6196 :
6197 23 : drop_layer_manager_wlock(guard);
6198 23 :
6199 23 : Ok(())
6200 23 : }
6201 :
6202 0 : async fn rewrite_layers(
6203 0 : self: &Arc<Self>,
6204 0 : mut replace_layers: Vec<(Layer, ResidentLayer)>,
6205 0 : mut drop_layers: Vec<Layer>,
6206 0 : ) -> Result<(), CompactionError> {
6207 0 : let mut guard = self.layers.write(LayerManagerLockHolder::Compaction).await;
6208 :
6209 : // Trim our lists in case our caller (compaction) raced with someone else (GC) removing layers: we want
6210 : // to avoid double-removing, and avoid rewriting something that was removed.
6211 0 : replace_layers.retain(|(l, _)| guard.contains(l));
6212 0 : drop_layers.retain(|l| guard.contains(l));
6213 0 :
6214 0 : guard
6215 0 : .open_mut()?
6216 0 : .rewrite_layers(&replace_layers, &drop_layers, &self.metrics);
6217 0 :
6218 0 : let upload_layers: Vec<_> = replace_layers.into_iter().map(|r| r.1).collect();
6219 0 :
6220 0 : self.remote_client
6221 0 : .schedule_compaction_update(&drop_layers, &upload_layers)?;
6222 :
6223 0 : Ok(())
6224 0 : }
6225 :
6226 : /// Schedules the uploads of the given image layers
6227 192 : fn upload_new_image_layers(
6228 192 : self: &Arc<Self>,
6229 192 : new_images: impl IntoIterator<Item = ResidentLayer>,
6230 192 : ) -> Result<(), super::upload_queue::NotInitialized> {
6231 205 : for layer in new_images {
6232 13 : self.remote_client.schedule_layer_file_upload(layer)?;
6233 : }
6234 : // should any new image layer been created, not uploading index_part will
6235 : // result in a mismatch between remote_physical_size and layermap calculated
6236 : // size, which will fail some tests, but should not be an issue otherwise.
6237 192 : self.remote_client
6238 192 : .schedule_index_upload_for_file_changes()?;
6239 192 : Ok(())
6240 192 : }
6241 :
6242 0 : async fn find_gc_time_cutoff(
6243 0 : &self,
6244 0 : now: SystemTime,
6245 0 : pitr: Duration,
6246 0 : cancel: &CancellationToken,
6247 0 : ctx: &RequestContext,
6248 0 : ) -> Result<Option<Lsn>, PageReconstructError> {
6249 0 : debug_assert_current_span_has_tenant_and_timeline_id();
6250 0 : if self.shard_identity.is_shard_zero() {
6251 : // Shard Zero has SLRU data and can calculate the PITR time -> LSN mapping itself
6252 0 : let time_range = if pitr == Duration::ZERO {
6253 0 : humantime::parse_duration(DEFAULT_PITR_INTERVAL).expect("constant is invalid")
6254 : } else {
6255 0 : pitr
6256 : };
6257 :
6258 : // If PITR is so large or `now` is so small that this underflows, we will retain no history (highly unexpected case)
6259 0 : let time_cutoff = now.checked_sub(time_range).unwrap_or(now);
6260 0 : let timestamp = to_pg_timestamp(time_cutoff);
6261 :
6262 0 : let time_cutoff = match self.find_lsn_for_timestamp(timestamp, cancel, ctx).await? {
6263 0 : LsnForTimestamp::Present(lsn) => Some(lsn),
6264 0 : LsnForTimestamp::Future(lsn) => {
6265 0 : // The timestamp is in the future. That sounds impossible,
6266 0 : // but what it really means is that there hasn't been
6267 0 : // any commits since the cutoff timestamp.
6268 0 : //
6269 0 : // In this case we should use the LSN of the most recent commit,
6270 0 : // which is implicitly the last LSN in the log.
6271 0 : debug!("future({})", lsn);
6272 0 : Some(self.get_last_record_lsn())
6273 : }
6274 0 : LsnForTimestamp::Past(lsn) => {
6275 0 : debug!("past({})", lsn);
6276 0 : None
6277 : }
6278 0 : LsnForTimestamp::NoData(lsn) => {
6279 0 : debug!("nodata({})", lsn);
6280 0 : None
6281 : }
6282 : };
6283 0 : Ok(time_cutoff)
6284 : } else {
6285 : // Shards other than shard zero cannot do timestamp->lsn lookups, and must instead learn their GC cutoff
6286 : // from shard zero's index. The index doesn't explicitly tell us the time cutoff, but we may assume that
6287 : // the point up to which shard zero's last_gc_cutoff has advanced will either be the time cutoff, or a
6288 : // space cutoff that we would also have respected ourselves.
6289 0 : match self
6290 0 : .remote_client
6291 0 : .download_foreign_index(ShardNumber(0), cancel)
6292 0 : .await
6293 : {
6294 0 : Ok((index_part, index_generation, _index_mtime)) => {
6295 0 : tracing::info!(
6296 0 : "GC loaded shard zero metadata (gen {index_generation:?}): latest_gc_cutoff_lsn: {}",
6297 0 : index_part.metadata.latest_gc_cutoff_lsn()
6298 : );
6299 0 : Ok(Some(index_part.metadata.latest_gc_cutoff_lsn()))
6300 : }
6301 : Err(DownloadError::NotFound) => {
6302 : // This is unexpected, because during timeline creations shard zero persists to remote
6303 : // storage before other shards are called, and during timeline deletion non-zeroth shards are
6304 : // deleted before the zeroth one. However, it should be harmless: if we somehow end up in this
6305 : // state, then shard zero should _eventually_ write an index when it GCs.
6306 0 : tracing::warn!("GC couldn't find shard zero's index for timeline");
6307 0 : Ok(None)
6308 : }
6309 0 : Err(e) => {
6310 0 : // TODO: this function should return a different error type than page reconstruct error
6311 0 : Err(PageReconstructError::Other(anyhow::anyhow!(e)))
6312 : }
6313 : }
6314 :
6315 : // TODO: after reading shard zero's GC cutoff, we should validate its generation with the storage
6316 : // controller. Otherwise, it is possible that we see the GC cutoff go backwards while shard zero
6317 : // is going through a migration if we read the old location's index and it has GC'd ahead of the
6318 : // new location. This is legal in principle, but problematic in practice because it might result
6319 : // in a timeline creation succeeding on shard zero ('s new location) but then failing on other shards
6320 : // because they have GC'd past the branch point.
6321 : }
6322 0 : }
6323 :
6324 : /// Find the Lsns above which layer files need to be retained on
6325 : /// garbage collection.
6326 : ///
6327 : /// We calculate two cutoffs, one based on time and one based on WAL size. `pitr`
6328 : /// controls the time cutoff (or ZERO to disable time-based retention), and `space_cutoff` controls
6329 : /// the space-based retention.
6330 : ///
6331 : /// This function doesn't simply to calculate time & space based retention: it treats time-based
6332 : /// retention as authoritative if enabled, and falls back to space-based retention if calculating
6333 : /// the LSN for a time point isn't possible. Therefore the GcCutoffs::horizon in the response might
6334 : /// be different to the `space_cutoff` input. Callers should treat the min() of the two cutoffs
6335 : /// in the response as the GC cutoff point for the timeline.
6336 : #[instrument(skip_all, fields(timeline_id=%self.timeline_id))]
6337 : pub(super) async fn find_gc_cutoffs(
6338 : &self,
6339 : now: SystemTime,
6340 : space_cutoff: Lsn,
6341 : pitr: Duration,
6342 : cancel: &CancellationToken,
6343 : ctx: &RequestContext,
6344 : ) -> Result<GcCutoffs, PageReconstructError> {
6345 : let _timer = self
6346 : .metrics
6347 : .find_gc_cutoffs_histo
6348 : .start_timer()
6349 : .record_on_drop();
6350 :
6351 : pausable_failpoint!("Timeline::find_gc_cutoffs-pausable");
6352 :
6353 : if cfg!(test) && pitr == Duration::ZERO {
6354 : // Unit tests which specify zero PITR interval expect to avoid doing any I/O for timestamp lookup
6355 : return Ok(GcCutoffs {
6356 : time: Some(self.get_last_record_lsn()),
6357 : space: space_cutoff,
6358 : });
6359 : }
6360 :
6361 : // Calculate a time-based limit on how much to retain:
6362 : // - if PITR interval is set, then this is our cutoff.
6363 : // - if PITR interval is not set, then we do a lookup
6364 : // based on DEFAULT_PITR_INTERVAL, so that size-based retention does not result in keeping history around permanently on idle databases.
6365 : let time_cutoff = self.find_gc_time_cutoff(now, pitr, cancel, ctx).await?;
6366 :
6367 : Ok(match (pitr, time_cutoff) {
6368 : (Duration::ZERO, Some(time_cutoff)) => {
6369 : // PITR is not set. Retain the size-based limit, or the default time retention,
6370 : // whichever requires less data.
6371 : GcCutoffs {
6372 : time: Some(self.get_last_record_lsn()),
6373 : space: std::cmp::max(time_cutoff, space_cutoff),
6374 : }
6375 : }
6376 : (Duration::ZERO, None) => {
6377 : // PITR is not set, and time lookup failed
6378 : GcCutoffs {
6379 : time: Some(self.get_last_record_lsn()),
6380 : space: space_cutoff,
6381 : }
6382 : }
6383 : (_, None) => {
6384 : // PITR interval is set & we didn't look up a timestamp successfully. Conservatively assume PITR
6385 : // cannot advance beyond what was already GC'd, and respect space-based retention
6386 : GcCutoffs {
6387 : time: Some(*self.get_applied_gc_cutoff_lsn()),
6388 : space: space_cutoff,
6389 : }
6390 : }
6391 : (_, Some(time_cutoff)) => {
6392 : // PITR interval is set and we looked up timestamp successfully. Ignore
6393 : // size based retention and make time cutoff authoritative
6394 : GcCutoffs {
6395 : time: Some(time_cutoff),
6396 : space: time_cutoff,
6397 : }
6398 : }
6399 : })
6400 : }
6401 :
6402 : /// Garbage collect layer files on a timeline that are no longer needed.
6403 : ///
6404 : /// Currently, we don't make any attempt at removing unneeded page versions
6405 : /// within a layer file. We can only remove the whole file if it's fully
6406 : /// obsolete.
6407 2 : pub(super) async fn gc(&self) -> Result<GcResult, GcError> {
6408 : // this is most likely the background tasks, but it might be the spawned task from
6409 : // immediate_gc
6410 2 : let _g = tokio::select! {
6411 2 : guard = self.gc_lock.lock() => guard,
6412 2 : _ = self.cancel.cancelled() => return Ok(GcResult::default()),
6413 : };
6414 2 : let timer = self.metrics.garbage_collect_histo.start_timer();
6415 2 :
6416 2 : fail_point!("before-timeline-gc");
6417 2 :
6418 2 : // Is the timeline being deleted?
6419 2 : if self.is_stopping() {
6420 0 : return Err(GcError::TimelineCancelled);
6421 2 : }
6422 2 :
6423 2 : let (space_cutoff, time_cutoff, retain_lsns, max_lsn_with_valid_lease) = {
6424 2 : let gc_info = self.gc_info.read().unwrap();
6425 2 :
6426 2 : let space_cutoff = min(gc_info.cutoffs.space, self.get_disk_consistent_lsn());
6427 2 : let time_cutoff = gc_info.cutoffs.time;
6428 2 : let retain_lsns = gc_info
6429 2 : .retain_lsns
6430 2 : .iter()
6431 2 : .map(|(lsn, _child_id, _is_offloaded)| *lsn)
6432 2 : .collect();
6433 2 :
6434 2 : // Gets the maximum LSN that holds the valid lease.
6435 2 : //
6436 2 : // Caveat: `refresh_gc_info` is in charged of updating the lease map.
6437 2 : // Here, we do not check for stale leases again.
6438 2 : let max_lsn_with_valid_lease = gc_info.leases.last_key_value().map(|(lsn, _)| *lsn);
6439 2 :
6440 2 : (
6441 2 : space_cutoff,
6442 2 : time_cutoff,
6443 2 : retain_lsns,
6444 2 : max_lsn_with_valid_lease,
6445 2 : )
6446 2 : };
6447 2 :
6448 2 : let mut new_gc_cutoff = space_cutoff.min(time_cutoff.unwrap_or_default());
6449 2 : let standby_horizon = self.standby_horizon.load();
6450 2 : // Hold GC for the standby, but as a safety guard do it only within some
6451 2 : // reasonable lag.
6452 2 : if standby_horizon != Lsn::INVALID {
6453 0 : if let Some(standby_lag) = new_gc_cutoff.checked_sub(standby_horizon) {
6454 : const MAX_ALLOWED_STANDBY_LAG: u64 = 10u64 << 30; // 10 GB
6455 0 : if standby_lag.0 < MAX_ALLOWED_STANDBY_LAG {
6456 0 : new_gc_cutoff = Lsn::min(standby_horizon, new_gc_cutoff);
6457 0 : trace!("holding off GC for standby apply LSN {}", standby_horizon);
6458 : } else {
6459 0 : warn!(
6460 0 : "standby is lagging for more than {}MB, not holding gc for it",
6461 0 : MAX_ALLOWED_STANDBY_LAG / 1024 / 1024
6462 : )
6463 : }
6464 0 : }
6465 2 : }
6466 :
6467 : // Reset standby horizon to ignore it if it is not updated till next GC.
6468 : // It is an easy way to unset it when standby disappears without adding
6469 : // more conf options.
6470 2 : self.standby_horizon.store(Lsn::INVALID);
6471 2 : self.metrics
6472 2 : .standby_horizon_gauge
6473 2 : .set(Lsn::INVALID.0 as i64);
6474 :
6475 2 : let res = self
6476 2 : .gc_timeline(
6477 2 : space_cutoff,
6478 2 : time_cutoff,
6479 2 : retain_lsns,
6480 2 : max_lsn_with_valid_lease,
6481 2 : new_gc_cutoff,
6482 2 : )
6483 2 : .instrument(
6484 2 : info_span!("gc_timeline", timeline_id = %self.timeline_id, cutoff = %new_gc_cutoff),
6485 : )
6486 2 : .await?;
6487 :
6488 : // only record successes
6489 2 : timer.stop_and_record();
6490 2 :
6491 2 : Ok(res)
6492 2 : }
6493 :
6494 2 : async fn gc_timeline(
6495 2 : &self,
6496 2 : space_cutoff: Lsn,
6497 2 : time_cutoff: Option<Lsn>, // None if uninitialized
6498 2 : retain_lsns: Vec<Lsn>,
6499 2 : max_lsn_with_valid_lease: Option<Lsn>,
6500 2 : new_gc_cutoff: Lsn,
6501 2 : ) -> Result<GcResult, GcError> {
6502 2 : // FIXME: if there is an ongoing detach_from_ancestor, we should just skip gc
6503 2 :
6504 2 : let now = SystemTime::now();
6505 2 : let mut result: GcResult = GcResult::default();
6506 2 :
6507 2 : // Nothing to GC. Return early.
6508 2 : let latest_gc_cutoff = *self.get_applied_gc_cutoff_lsn();
6509 2 : if latest_gc_cutoff >= new_gc_cutoff {
6510 0 : info!(
6511 0 : "Nothing to GC: new_gc_cutoff_lsn {new_gc_cutoff}, latest_gc_cutoff_lsn {latest_gc_cutoff}",
6512 : );
6513 0 : return Ok(result);
6514 2 : }
6515 :
6516 2 : let Some(time_cutoff) = time_cutoff else {
6517 : // The GC cutoff should have been computed by now, but let's be defensive.
6518 0 : info!("Nothing to GC: time_cutoff not yet computed");
6519 0 : return Ok(result);
6520 : };
6521 :
6522 : // We need to ensure that no one tries to read page versions or create
6523 : // branches at a point before latest_gc_cutoff_lsn. See branch_timeline()
6524 : // for details. This will block until the old value is no longer in use.
6525 : //
6526 : // The GC cutoff should only ever move forwards.
6527 2 : let waitlist = {
6528 2 : let write_guard = self.applied_gc_cutoff_lsn.lock_for_write();
6529 2 : if *write_guard > new_gc_cutoff {
6530 0 : return Err(GcError::BadLsn {
6531 0 : why: format!(
6532 0 : "Cannot move GC cutoff LSN backwards (was {}, new {})",
6533 0 : *write_guard, new_gc_cutoff
6534 0 : ),
6535 0 : });
6536 2 : }
6537 2 :
6538 2 : write_guard.store_and_unlock(new_gc_cutoff)
6539 2 : };
6540 2 : waitlist.wait().await;
6541 :
6542 2 : info!("GC starting");
6543 :
6544 2 : debug!("retain_lsns: {:?}", retain_lsns);
6545 :
6546 2 : let mut layers_to_remove = Vec::new();
6547 :
6548 : // Scan all layers in the timeline (remote or on-disk).
6549 : //
6550 : // Garbage collect the layer if all conditions are satisfied:
6551 : // 1. it is older than cutoff LSN;
6552 : // 2. it is older than PITR interval;
6553 : // 3. it doesn't need to be retained for 'retain_lsns';
6554 : // 4. it does not need to be kept for LSNs holding valid leases.
6555 : // 5. newer on-disk image layers cover the layer's whole key range
6556 : //
6557 : // TODO holding a write lock is too agressive and avoidable
6558 2 : let mut guard = self
6559 2 : .layers
6560 2 : .write(LayerManagerLockHolder::GarbageCollection)
6561 2 : .await;
6562 2 : let layers = guard.layer_map()?;
6563 12 : 'outer: for l in layers.iter_historic_layers() {
6564 12 : result.layers_total += 1;
6565 12 :
6566 12 : // 1. Is it newer than GC horizon cutoff point?
6567 12 : if l.get_lsn_range().end > space_cutoff {
6568 1 : info!(
6569 0 : "keeping {} because it's newer than space_cutoff {}",
6570 0 : l.layer_name(),
6571 : space_cutoff,
6572 : );
6573 1 : result.layers_needed_by_cutoff += 1;
6574 1 : continue 'outer;
6575 11 : }
6576 11 :
6577 11 : // 2. It is newer than PiTR cutoff point?
6578 11 : if l.get_lsn_range().end > time_cutoff {
6579 0 : info!(
6580 0 : "keeping {} because it's newer than time_cutoff {}",
6581 0 : l.layer_name(),
6582 : time_cutoff,
6583 : );
6584 0 : result.layers_needed_by_pitr += 1;
6585 0 : continue 'outer;
6586 11 : }
6587 :
6588 : // 3. Is it needed by a child branch?
6589 : // NOTE With that we would keep data that
6590 : // might be referenced by child branches forever.
6591 : // We can track this in child timeline GC and delete parent layers when
6592 : // they are no longer needed. This might be complicated with long inheritance chains.
6593 : //
6594 : // TODO Vec is not a great choice for `retain_lsns`
6595 11 : for retain_lsn in &retain_lsns {
6596 : // start_lsn is inclusive
6597 0 : if &l.get_lsn_range().start <= retain_lsn {
6598 0 : info!(
6599 0 : "keeping {} because it's still might be referenced by child branch forked at {} is_dropped: xx is_incremental: {}",
6600 0 : l.layer_name(),
6601 0 : retain_lsn,
6602 0 : l.is_incremental(),
6603 : );
6604 0 : result.layers_needed_by_branches += 1;
6605 0 : continue 'outer;
6606 0 : }
6607 : }
6608 :
6609 : // 4. Is there a valid lease that requires us to keep this layer?
6610 11 : if let Some(lsn) = &max_lsn_with_valid_lease {
6611 : // keep if layer start <= any of the lease
6612 9 : if &l.get_lsn_range().start <= lsn {
6613 7 : info!(
6614 0 : "keeping {} because there is a valid lease preventing GC at {}",
6615 0 : l.layer_name(),
6616 : lsn,
6617 : );
6618 7 : result.layers_needed_by_leases += 1;
6619 7 : continue 'outer;
6620 2 : }
6621 2 : }
6622 :
6623 : // 5. Is there a later on-disk layer for this relation?
6624 : //
6625 : // The end-LSN is exclusive, while disk_consistent_lsn is
6626 : // inclusive. For example, if disk_consistent_lsn is 100, it is
6627 : // OK for a delta layer to have end LSN 101, but if the end LSN
6628 : // is 102, then it might not have been fully flushed to disk
6629 : // before crash.
6630 : //
6631 : // For example, imagine that the following layers exist:
6632 : //
6633 : // 1000 - image (A)
6634 : // 1000-2000 - delta (B)
6635 : // 2000 - image (C)
6636 : // 2000-3000 - delta (D)
6637 : // 3000 - image (E)
6638 : //
6639 : // If GC horizon is at 2500, we can remove layers A and B, but
6640 : // we cannot remove C, even though it's older than 2500, because
6641 : // the delta layer 2000-3000 depends on it.
6642 4 : if !layers
6643 4 : .image_layer_exists(&l.get_key_range(), &(l.get_lsn_range().end..new_gc_cutoff))
6644 : {
6645 3 : info!("keeping {} because it is the latest layer", l.layer_name());
6646 3 : result.layers_not_updated += 1;
6647 3 : continue 'outer;
6648 1 : }
6649 1 :
6650 1 : // We didn't find any reason to keep this file, so remove it.
6651 1 : info!(
6652 0 : "garbage collecting {} is_dropped: xx is_incremental: {}",
6653 0 : l.layer_name(),
6654 0 : l.is_incremental(),
6655 : );
6656 1 : layers_to_remove.push(l);
6657 : }
6658 :
6659 2 : if !layers_to_remove.is_empty() {
6660 : // Persist the new GC cutoff value before we actually remove anything.
6661 : // This unconditionally schedules also an index_part.json update, even though, we will
6662 : // be doing one a bit later with the unlinked gc'd layers.
6663 1 : let disk_consistent_lsn = self.disk_consistent_lsn.load();
6664 1 : self.schedule_uploads(disk_consistent_lsn, None)
6665 1 : .map_err(|e| {
6666 0 : if self.cancel.is_cancelled() {
6667 0 : GcError::TimelineCancelled
6668 : } else {
6669 0 : GcError::Remote(e)
6670 : }
6671 1 : })?;
6672 :
6673 1 : let gc_layers = layers_to_remove
6674 1 : .iter()
6675 1 : .map(|x| guard.get_from_desc(x))
6676 1 : .collect::<Vec<Layer>>();
6677 1 :
6678 1 : result.layers_removed = gc_layers.len() as u64;
6679 1 :
6680 1 : self.remote_client.schedule_gc_update(&gc_layers)?;
6681 :
6682 1 : guard.open_mut()?.finish_gc_timeline(&gc_layers);
6683 1 :
6684 1 : #[cfg(feature = "testing")]
6685 1 : {
6686 1 : result.doomed_layers = gc_layers;
6687 1 : }
6688 1 : }
6689 :
6690 2 : info!(
6691 0 : "GC completed removing {} layers, cutoff {}",
6692 : result.layers_removed, new_gc_cutoff
6693 : );
6694 :
6695 2 : result.elapsed = now.elapsed().unwrap_or(Duration::ZERO);
6696 2 : Ok(result)
6697 2 : }
6698 :
6699 : /// Reconstruct a value, using the given base image and WAL records in 'data'.
6700 364847 : async fn reconstruct_value(
6701 364847 : &self,
6702 364847 : key: Key,
6703 364847 : request_lsn: Lsn,
6704 364847 : mut data: ValueReconstructState,
6705 364847 : redo_attempt_type: RedoAttemptType,
6706 364847 : ) -> Result<Bytes, PageReconstructError> {
6707 364847 : // Perform WAL redo if needed
6708 364847 : data.records.reverse();
6709 :
6710 364847 : let fire_critical_error = match redo_attempt_type {
6711 363514 : RedoAttemptType::ReadPage => true,
6712 0 : RedoAttemptType::LegacyCompaction => true,
6713 1333 : RedoAttemptType::GcCompaction => false,
6714 : };
6715 :
6716 : // If we have a page image, and no WAL, we're all set
6717 364847 : if data.records.is_empty() {
6718 338073 : if let Some((img_lsn, img)) = &data.img {
6719 338073 : trace!(
6720 0 : "found page image for key {} at {}, no WAL redo required, req LSN {}",
6721 : key, img_lsn, request_lsn,
6722 : );
6723 338073 : Ok(img.clone())
6724 : } else {
6725 0 : Err(PageReconstructError::from(anyhow!(
6726 0 : "base image for {key} at {request_lsn} not found"
6727 0 : )))
6728 : }
6729 : } else {
6730 : // We need to do WAL redo.
6731 : //
6732 : // If we don't have a base image, then the oldest WAL record better initialize
6733 : // the page
6734 26774 : if data.img.is_none() && !data.records.first().unwrap().1.will_init() {
6735 0 : Err(PageReconstructError::from(anyhow!(
6736 0 : "Base image for {} at {} not found, but got {} WAL records",
6737 0 : key,
6738 0 : request_lsn,
6739 0 : data.records.len()
6740 0 : )))
6741 : } else {
6742 26774 : if data.img.is_some() {
6743 13029 : trace!(
6744 0 : "found {} WAL records and a base image for {} at {}, performing WAL redo",
6745 0 : data.records.len(),
6746 : key,
6747 : request_lsn
6748 : );
6749 : } else {
6750 13745 : trace!(
6751 0 : "found {} WAL records that will init the page for {} at {}, performing WAL redo",
6752 0 : data.records.len(),
6753 : key,
6754 : request_lsn
6755 : );
6756 : };
6757 26774 : let res = self
6758 26774 : .walredo_mgr
6759 26774 : .as_ref()
6760 26774 : .context("timeline has no walredo manager")
6761 26774 : .map_err(PageReconstructError::WalRedo)?
6762 26774 : .request_redo(
6763 26774 : key,
6764 26774 : request_lsn,
6765 26774 : data.img,
6766 26774 : data.records,
6767 26774 : self.pg_version,
6768 26774 : redo_attempt_type,
6769 26774 : )
6770 26774 : .await;
6771 26773 : let img = match res {
6772 26773 : Ok(img) => img,
6773 0 : Err(walredo::Error::Cancelled) => return Err(PageReconstructError::Cancelled),
6774 1 : Err(walredo::Error::Other(err)) => {
6775 1 : if fire_critical_error {
6776 0 : critical!("walredo failure during page reconstruction: {err:?}");
6777 1 : }
6778 1 : return Err(PageReconstructError::WalRedo(
6779 1 : err.context("reconstruct a page image"),
6780 1 : ));
6781 : }
6782 : };
6783 26773 : Ok(img)
6784 : }
6785 : }
6786 364847 : }
6787 :
6788 0 : pub(crate) async fn spawn_download_all_remote_layers(
6789 0 : self: Arc<Self>,
6790 0 : request: DownloadRemoteLayersTaskSpawnRequest,
6791 0 : ctx: &RequestContext,
6792 0 : ) -> Result<DownloadRemoteLayersTaskInfo, DownloadRemoteLayersTaskInfo> {
6793 : use pageserver_api::models::DownloadRemoteLayersTaskState;
6794 :
6795 : // this is not really needed anymore; it has tests which really check the return value from
6796 : // http api. it would be better not to maintain this anymore.
6797 :
6798 0 : let mut status_guard = self.download_all_remote_layers_task_info.write().unwrap();
6799 0 : if let Some(st) = &*status_guard {
6800 0 : match &st.state {
6801 : DownloadRemoteLayersTaskState::Running => {
6802 0 : return Err(st.clone());
6803 : }
6804 : DownloadRemoteLayersTaskState::ShutDown
6805 0 : | DownloadRemoteLayersTaskState::Completed => {
6806 0 : *status_guard = None;
6807 0 : }
6808 : }
6809 0 : }
6810 :
6811 0 : let self_clone = Arc::clone(&self);
6812 0 : let task_ctx = ctx.detached_child(
6813 0 : TaskKind::DownloadAllRemoteLayers,
6814 0 : DownloadBehavior::Download,
6815 0 : );
6816 0 : let task_id = task_mgr::spawn(
6817 0 : task_mgr::BACKGROUND_RUNTIME.handle(),
6818 0 : task_mgr::TaskKind::DownloadAllRemoteLayers,
6819 0 : self.tenant_shard_id,
6820 0 : Some(self.timeline_id),
6821 0 : "download all remote layers task",
6822 0 : async move {
6823 0 : self_clone.download_all_remote_layers(request, &task_ctx).await;
6824 0 : let mut status_guard = self_clone.download_all_remote_layers_task_info.write().unwrap();
6825 0 : match &mut *status_guard {
6826 : None => {
6827 0 : warn!("tasks status is supposed to be Some(), since we are running");
6828 : }
6829 0 : Some(st) => {
6830 0 : let exp_task_id = format!("{}", task_mgr::current_task_id().unwrap());
6831 0 : if st.task_id != exp_task_id {
6832 0 : warn!("task id changed while we were still running, expecting {} but have {}", exp_task_id, st.task_id);
6833 0 : } else {
6834 0 : st.state = DownloadRemoteLayersTaskState::Completed;
6835 0 : }
6836 : }
6837 : };
6838 0 : Ok(())
6839 0 : }
6840 0 : .instrument(info_span!(parent: None, "download_all_remote_layers", tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))
6841 : );
6842 :
6843 0 : let initial_info = DownloadRemoteLayersTaskInfo {
6844 0 : task_id: format!("{task_id}"),
6845 0 : state: DownloadRemoteLayersTaskState::Running,
6846 0 : total_layer_count: 0,
6847 0 : successful_download_count: 0,
6848 0 : failed_download_count: 0,
6849 0 : };
6850 0 : *status_guard = Some(initial_info.clone());
6851 0 :
6852 0 : Ok(initial_info)
6853 0 : }
6854 :
6855 0 : async fn download_all_remote_layers(
6856 0 : self: &Arc<Self>,
6857 0 : request: DownloadRemoteLayersTaskSpawnRequest,
6858 0 : ctx: &RequestContext,
6859 0 : ) {
6860 : use pageserver_api::models::DownloadRemoteLayersTaskState;
6861 :
6862 0 : let remaining = {
6863 0 : let guard = self
6864 0 : .layers
6865 0 : .read(LayerManagerLockHolder::GetLayerMapInfo)
6866 0 : .await;
6867 0 : let Ok(lm) = guard.layer_map() else {
6868 : // technically here we could look into iterating accessible layers, but downloading
6869 : // all layers of a shutdown timeline makes no sense regardless.
6870 0 : tracing::info!("attempted to download all layers of shutdown timeline");
6871 0 : return;
6872 : };
6873 0 : lm.iter_historic_layers()
6874 0 : .map(|desc| guard.get_from_desc(&desc))
6875 0 : .collect::<Vec<_>>()
6876 0 : };
6877 0 : let total_layer_count = remaining.len();
6878 :
6879 : macro_rules! lock_status {
6880 : ($st:ident) => {
6881 : let mut st = self.download_all_remote_layers_task_info.write().unwrap();
6882 : let st = st
6883 : .as_mut()
6884 : .expect("this function is only called after the task has been spawned");
6885 : assert_eq!(
6886 : st.task_id,
6887 : format!(
6888 : "{}",
6889 : task_mgr::current_task_id().expect("we run inside a task_mgr task")
6890 : )
6891 : );
6892 : let $st = st;
6893 : };
6894 : }
6895 :
6896 : {
6897 0 : lock_status!(st);
6898 0 : st.total_layer_count = total_layer_count as u64;
6899 0 : }
6900 0 :
6901 0 : let mut remaining = remaining.into_iter();
6902 0 : let mut have_remaining = true;
6903 0 : let mut js = tokio::task::JoinSet::new();
6904 0 :
6905 0 : let cancel = task_mgr::shutdown_token();
6906 0 :
6907 0 : let limit = request.max_concurrent_downloads;
6908 :
6909 : loop {
6910 0 : while js.len() < limit.get() && have_remaining && !cancel.is_cancelled() {
6911 0 : let Some(next) = remaining.next() else {
6912 0 : have_remaining = false;
6913 0 : break;
6914 : };
6915 :
6916 0 : let span = tracing::info_span!("download", layer = %next);
6917 :
6918 0 : let ctx = ctx.attached_child();
6919 0 : js.spawn(
6920 0 : async move {
6921 0 : let res = next.download(&ctx).await;
6922 0 : (next, res)
6923 0 : }
6924 0 : .instrument(span),
6925 0 : );
6926 0 : }
6927 :
6928 0 : while let Some(res) = js.join_next().await {
6929 0 : match res {
6930 : Ok((_, Ok(_))) => {
6931 0 : lock_status!(st);
6932 0 : st.successful_download_count += 1;
6933 : }
6934 0 : Ok((layer, Err(e))) => {
6935 0 : tracing::error!(%layer, "download failed: {e:#}");
6936 0 : lock_status!(st);
6937 0 : st.failed_download_count += 1;
6938 : }
6939 0 : Err(je) if je.is_cancelled() => unreachable!("not used here"),
6940 0 : Err(je) if je.is_panic() => {
6941 0 : lock_status!(st);
6942 0 : st.failed_download_count += 1;
6943 : }
6944 0 : Err(je) => tracing::warn!("unknown joinerror: {je:?}"),
6945 : }
6946 : }
6947 :
6948 0 : if js.is_empty() && (!have_remaining || cancel.is_cancelled()) {
6949 0 : break;
6950 0 : }
6951 : }
6952 :
6953 : {
6954 0 : lock_status!(st);
6955 0 : st.state = DownloadRemoteLayersTaskState::Completed;
6956 : }
6957 0 : }
6958 :
6959 0 : pub(crate) fn get_download_all_remote_layers_task_info(
6960 0 : &self,
6961 0 : ) -> Option<DownloadRemoteLayersTaskInfo> {
6962 0 : self.download_all_remote_layers_task_info
6963 0 : .read()
6964 0 : .unwrap()
6965 0 : .clone()
6966 0 : }
6967 : }
6968 :
6969 : impl Timeline {
6970 : /// Returns non-remote layers for eviction.
6971 0 : pub(crate) async fn get_local_layers_for_disk_usage_eviction(&self) -> DiskUsageEvictionInfo {
6972 0 : let guard = self.layers.read(LayerManagerLockHolder::Eviction).await;
6973 0 : let mut max_layer_size: Option<u64> = None;
6974 0 :
6975 0 : let resident_layers = guard
6976 0 : .likely_resident_layers()
6977 0 : .map(|layer| {
6978 0 : let file_size = layer.layer_desc().file_size;
6979 0 : max_layer_size = max_layer_size.map_or(Some(file_size), |m| Some(m.max(file_size)));
6980 0 :
6981 0 : let last_activity_ts = layer.latest_activity();
6982 0 :
6983 0 : EvictionCandidate {
6984 0 : layer: layer.to_owned().into(),
6985 0 : last_activity_ts,
6986 0 : relative_last_activity: finite_f32::FiniteF32::ZERO,
6987 0 : visibility: layer.visibility(),
6988 0 : }
6989 0 : })
6990 0 : .collect();
6991 0 :
6992 0 : DiskUsageEvictionInfo {
6993 0 : max_layer_size,
6994 0 : resident_layers,
6995 0 : }
6996 0 : }
6997 :
6998 961 : pub(crate) fn get_shard_index(&self) -> ShardIndex {
6999 961 : ShardIndex {
7000 961 : shard_number: self.tenant_shard_id.shard_number,
7001 961 : shard_count: self.tenant_shard_id.shard_count,
7002 961 : }
7003 961 : }
7004 :
7005 : /// Persistently blocks gc for `Manual` reason.
7006 : ///
7007 : /// Returns true if no such block existed before, false otherwise.
7008 0 : pub(crate) async fn block_gc(&self, tenant: &super::TenantShard) -> anyhow::Result<bool> {
7009 : use crate::tenant::remote_timeline_client::index::GcBlockingReason;
7010 0 : assert_eq!(self.tenant_shard_id, tenant.tenant_shard_id);
7011 0 : tenant.gc_block.insert(self, GcBlockingReason::Manual).await
7012 0 : }
7013 :
7014 : /// Persistently unblocks gc for `Manual` reason.
7015 0 : pub(crate) async fn unblock_gc(&self, tenant: &super::TenantShard) -> anyhow::Result<()> {
7016 : use crate::tenant::remote_timeline_client::index::GcBlockingReason;
7017 0 : assert_eq!(self.tenant_shard_id, tenant.tenant_shard_id);
7018 0 : tenant.gc_block.remove(self, GcBlockingReason::Manual).await
7019 0 : }
7020 :
7021 : #[cfg(test)]
7022 31 : pub(super) fn force_advance_lsn(self: &Arc<Timeline>, new_lsn: Lsn) {
7023 31 : self.last_record_lsn.advance(new_lsn);
7024 31 : }
7025 :
7026 : #[cfg(test)]
7027 2 : pub(super) fn force_set_disk_consistent_lsn(&self, new_value: Lsn) {
7028 2 : self.disk_consistent_lsn.store(new_value);
7029 2 : }
7030 :
7031 : /// Force create an image layer and place it into the layer map.
7032 : ///
7033 : /// DO NOT use this function directly. Use [`TenantShard::branch_timeline_test_with_layers`]
7034 : /// or [`TenantShard::create_test_timeline_with_layers`] to ensure all these layers are
7035 : /// placed into the layer map in one run AND be validated.
7036 : #[cfg(test)]
7037 36 : pub(super) async fn force_create_image_layer(
7038 36 : self: &Arc<Timeline>,
7039 36 : lsn: Lsn,
7040 36 : mut images: Vec<(Key, Bytes)>,
7041 36 : check_start_lsn: Option<Lsn>,
7042 36 : ctx: &RequestContext,
7043 36 : ) -> anyhow::Result<()> {
7044 36 : let last_record_lsn = self.get_last_record_lsn();
7045 36 : assert!(
7046 36 : lsn <= last_record_lsn,
7047 0 : "advance last record lsn before inserting a layer, lsn={lsn}, last_record_lsn={last_record_lsn}"
7048 : );
7049 36 : if let Some(check_start_lsn) = check_start_lsn {
7050 36 : assert!(lsn >= check_start_lsn);
7051 0 : }
7052 240 : images.sort_unstable_by(|(ka, _), (kb, _)| ka.cmp(kb));
7053 36 : let min_key = *images.first().map(|(k, _)| k).unwrap();
7054 36 : let end_key = images.last().map(|(k, _)| k).unwrap().next();
7055 36 : let mut image_layer_writer = ImageLayerWriter::new(
7056 36 : self.conf,
7057 36 : self.timeline_id,
7058 36 : self.tenant_shard_id,
7059 36 : &(min_key..end_key),
7060 36 : lsn,
7061 36 : &self.gate,
7062 36 : self.cancel.clone(),
7063 36 : ctx,
7064 36 : )
7065 36 : .await?;
7066 312 : for (key, img) in images {
7067 276 : image_layer_writer.put_image(key, img, ctx).await?;
7068 : }
7069 36 : let (desc, path) = image_layer_writer.finish(ctx).await?;
7070 36 : let image_layer = Layer::finish_creating(self.conf, self, desc, &path)?;
7071 36 : info!("force created image layer {}", image_layer.local_path());
7072 : {
7073 36 : let mut guard = self.layers.write(LayerManagerLockHolder::Testing).await;
7074 36 : guard
7075 36 : .open_mut()
7076 36 : .unwrap()
7077 36 : .force_insert_layer(image_layer.clone());
7078 36 : }
7079 36 :
7080 36 : // Update remote_timeline_client state to reflect existence of this layer
7081 36 : self.remote_client
7082 36 : .schedule_layer_file_upload(image_layer)
7083 36 : .unwrap();
7084 36 :
7085 36 : Ok(())
7086 36 : }
7087 :
7088 : /// Force create a delta layer and place it into the layer map.
7089 : ///
7090 : /// DO NOT use this function directly. Use [`TenantShard::branch_timeline_test_with_layers`]
7091 : /// or [`TenantShard::create_test_timeline_with_layers`] to ensure all these layers are
7092 : /// placed into the layer map in one run AND be validated.
7093 : #[cfg(test)]
7094 50 : pub(super) async fn force_create_delta_layer(
7095 50 : self: &Arc<Timeline>,
7096 50 : mut deltas: DeltaLayerTestDesc,
7097 50 : check_start_lsn: Option<Lsn>,
7098 50 : ctx: &RequestContext,
7099 50 : ) -> anyhow::Result<()> {
7100 50 : let last_record_lsn = self.get_last_record_lsn();
7101 50 : deltas
7102 50 : .data
7103 124364 : .sort_unstable_by(|(ka, la, _), (kb, lb, _)| (ka, la).cmp(&(kb, lb)));
7104 50 : assert!(deltas.data.first().unwrap().0 >= deltas.key_range.start);
7105 50 : assert!(deltas.data.last().unwrap().0 < deltas.key_range.end);
7106 10464 : for (_, lsn, _) in &deltas.data {
7107 10414 : assert!(deltas.lsn_range.start <= *lsn && *lsn < deltas.lsn_range.end);
7108 : }
7109 50 : assert!(
7110 50 : deltas.lsn_range.end <= last_record_lsn,
7111 0 : "advance last record lsn before inserting a layer, end_lsn={}, last_record_lsn={}",
7112 : deltas.lsn_range.end,
7113 : last_record_lsn
7114 : );
7115 50 : if let Some(check_start_lsn) = check_start_lsn {
7116 50 : assert!(deltas.lsn_range.start >= check_start_lsn);
7117 0 : }
7118 50 : let mut delta_layer_writer = DeltaLayerWriter::new(
7119 50 : self.conf,
7120 50 : self.timeline_id,
7121 50 : self.tenant_shard_id,
7122 50 : deltas.key_range.start,
7123 50 : deltas.lsn_range,
7124 50 : &self.gate,
7125 50 : self.cancel.clone(),
7126 50 : ctx,
7127 50 : )
7128 50 : .await?;
7129 10464 : for (key, lsn, val) in deltas.data {
7130 10414 : delta_layer_writer.put_value(key, lsn, val, ctx).await?;
7131 : }
7132 50 : let (desc, path) = delta_layer_writer.finish(deltas.key_range.end, ctx).await?;
7133 50 : let delta_layer = Layer::finish_creating(self.conf, self, desc, &path)?;
7134 50 : info!("force created delta layer {}", delta_layer.local_path());
7135 : {
7136 50 : let mut guard = self.layers.write(LayerManagerLockHolder::Testing).await;
7137 50 : guard
7138 50 : .open_mut()
7139 50 : .unwrap()
7140 50 : .force_insert_layer(delta_layer.clone());
7141 50 : }
7142 50 :
7143 50 : // Update remote_timeline_client state to reflect existence of this layer
7144 50 : self.remote_client
7145 50 : .schedule_layer_file_upload(delta_layer)
7146 50 : .unwrap();
7147 50 :
7148 50 : Ok(())
7149 50 : }
7150 :
7151 : /// Force create an in-memory layer and place them into the layer map.
7152 : #[cfg(test)]
7153 4 : pub(super) async fn force_create_in_memory_layer(
7154 4 : self: &Arc<Timeline>,
7155 4 : mut in_memory: InMemoryLayerTestDesc,
7156 4 : check_start_lsn: Option<Lsn>,
7157 4 : ctx: &RequestContext,
7158 4 : ) -> anyhow::Result<()> {
7159 : use utils::bin_ser::BeSer;
7160 :
7161 : // Validate LSNs
7162 4 : if let Some(check_start_lsn) = check_start_lsn {
7163 4 : assert!(in_memory.lsn_range.start >= check_start_lsn);
7164 0 : }
7165 :
7166 4 : let last_record_lsn = self.get_last_record_lsn();
7167 4 : let layer_end_lsn = if in_memory.is_open {
7168 1 : in_memory
7169 1 : .data
7170 1 : .iter()
7171 10 : .map(|(_key, lsn, _value)| lsn)
7172 1 : .max()
7173 1 : .cloned()
7174 : } else {
7175 3 : Some(in_memory.lsn_range.end)
7176 : };
7177 :
7178 4 : if let Some(end) = layer_end_lsn {
7179 4 : assert!(
7180 4 : end <= last_record_lsn,
7181 0 : "advance last record lsn before inserting a layer, end_lsn={}, last_record_lsn={}",
7182 : end,
7183 : last_record_lsn,
7184 : );
7185 0 : }
7186 :
7187 19820 : in_memory.data.iter().for_each(|(_key, lsn, _value)| {
7188 19820 : assert!(*lsn >= in_memory.lsn_range.start);
7189 19820 : assert!(*lsn < in_memory.lsn_range.end);
7190 19820 : });
7191 4 :
7192 4 : // Build the batch
7193 4 : in_memory
7194 4 : .data
7195 273384 : .sort_unstable_by(|(ka, la, _), (kb, lb, _)| (ka, la).cmp(&(kb, lb)));
7196 4 :
7197 4 : let data = in_memory
7198 4 : .data
7199 4 : .into_iter()
7200 19820 : .map(|(key, lsn, value)| {
7201 19820 : let value_size = value.serialized_size().unwrap() as usize;
7202 19820 : (key.to_compact(), lsn, value_size, value)
7203 19820 : })
7204 4 : .collect::<Vec<_>>();
7205 4 :
7206 4 : let batch = SerializedValueBatch::from_values(data);
7207 :
7208 : // Create the in-memory layer and write the batch into it
7209 4 : let layer = InMemoryLayer::create(
7210 4 : self.conf,
7211 4 : self.timeline_id,
7212 4 : self.tenant_shard_id,
7213 4 : in_memory.lsn_range.start,
7214 4 : &self.gate,
7215 4 : // TODO: if we ever use this function in production code, we need to pass the real cancellation token
7216 4 : &CancellationToken::new(),
7217 4 : ctx,
7218 4 : )
7219 4 : .await
7220 4 : .unwrap();
7221 4 :
7222 4 : layer.put_batch(batch, ctx).await.unwrap();
7223 4 : if !in_memory.is_open {
7224 3 : layer.freeze(in_memory.lsn_range.end).await;
7225 1 : }
7226 :
7227 4 : info!("force created in-memory layer {:?}", in_memory.lsn_range);
7228 :
7229 : // Link the layer to the layer map
7230 : {
7231 4 : let mut guard = self.layers.write(LayerManagerLockHolder::Testing).await;
7232 4 : let layer_map = guard.open_mut().unwrap();
7233 4 : layer_map.force_insert_in_memory_layer(Arc::new(layer));
7234 4 : }
7235 4 :
7236 4 : Ok(())
7237 4 : }
7238 :
7239 : /// Return all keys at the LSN in the image layers
7240 : #[cfg(test)]
7241 3 : pub(crate) async fn inspect_image_layers(
7242 3 : self: &Arc<Timeline>,
7243 3 : lsn: Lsn,
7244 3 : ctx: &RequestContext,
7245 3 : io_concurrency: IoConcurrency,
7246 3 : ) -> anyhow::Result<Vec<(Key, Bytes)>> {
7247 3 : let mut all_data = Vec::new();
7248 3 : let guard = self.layers.read(LayerManagerLockHolder::Testing).await;
7249 17 : for layer in guard.layer_map()?.iter_historic_layers() {
7250 17 : if !layer.is_delta() && layer.image_layer_lsn() == lsn {
7251 4 : let layer = guard.get_from_desc(&layer);
7252 4 : let mut reconstruct_data = ValuesReconstructState::new(io_concurrency.clone());
7253 4 : layer
7254 4 : .get_values_reconstruct_data(
7255 4 : KeySpace::single(Key::MIN..Key::MAX),
7256 4 : lsn..Lsn(lsn.0 + 1),
7257 4 : &mut reconstruct_data,
7258 4 : ctx,
7259 4 : )
7260 4 : .await?;
7261 33 : for (k, v) in std::mem::take(&mut reconstruct_data.keys) {
7262 33 : let v = v.collect_pending_ios().await?;
7263 33 : all_data.push((k, v.img.unwrap().1));
7264 : }
7265 13 : }
7266 : }
7267 3 : all_data.sort();
7268 3 : Ok(all_data)
7269 3 : }
7270 :
7271 : /// Get all historic layer descriptors in the layer map
7272 : #[cfg(test)]
7273 12 : pub(crate) async fn inspect_historic_layers(
7274 12 : self: &Arc<Timeline>,
7275 12 : ) -> anyhow::Result<Vec<super::storage_layer::PersistentLayerKey>> {
7276 12 : let mut layers = Vec::new();
7277 12 : let guard = self.layers.read(LayerManagerLockHolder::Testing).await;
7278 57 : for layer in guard.layer_map()?.iter_historic_layers() {
7279 57 : layers.push(layer.key());
7280 57 : }
7281 12 : Ok(layers)
7282 12 : }
7283 :
7284 : #[cfg(test)]
7285 5 : pub(crate) fn add_extra_test_dense_keyspace(&self, ks: KeySpace) {
7286 5 : let mut keyspace = self.extra_test_dense_keyspace.load().as_ref().clone();
7287 5 : keyspace.merge(&ks);
7288 5 : self.extra_test_dense_keyspace.store(Arc::new(keyspace));
7289 5 : }
7290 : }
7291 :
7292 : /// Tracking writes ingestion does to a particular in-memory layer.
7293 : ///
7294 : /// Cleared upon freezing a layer.
7295 : pub(crate) struct TimelineWriterState {
7296 : open_layer: Arc<InMemoryLayer>,
7297 : current_size: u64,
7298 : // Previous Lsn which passed through
7299 : prev_lsn: Option<Lsn>,
7300 : // Largest Lsn which passed through the current writer
7301 : max_lsn: Option<Lsn>,
7302 : // Cached details of the last freeze. Avoids going trough the atomic/lock on every put.
7303 : cached_last_freeze_at: Lsn,
7304 : }
7305 :
7306 : impl TimelineWriterState {
7307 659 : fn new(open_layer: Arc<InMemoryLayer>, current_size: u64, last_freeze_at: Lsn) -> Self {
7308 659 : Self {
7309 659 : open_layer,
7310 659 : current_size,
7311 659 : prev_lsn: None,
7312 659 : max_lsn: None,
7313 659 : cached_last_freeze_at: last_freeze_at,
7314 659 : }
7315 659 : }
7316 : }
7317 :
7318 : /// Various functions to mutate the timeline.
7319 : // TODO Currently, Deref is used to allow easy access to read methods from this trait.
7320 : // This is probably considered a bad practice in Rust and should be fixed eventually,
7321 : // but will cause large code changes.
7322 : pub(crate) struct TimelineWriter<'a> {
7323 : tl: &'a Timeline,
7324 : write_guard: tokio::sync::MutexGuard<'a, Option<TimelineWriterState>>,
7325 : }
7326 :
7327 : impl Deref for TimelineWriter<'_> {
7328 : type Target = Timeline;
7329 :
7330 4949213 : fn deref(&self) -> &Self::Target {
7331 4949213 : self.tl
7332 4949213 : }
7333 : }
7334 :
7335 : #[derive(PartialEq)]
7336 : enum OpenLayerAction {
7337 : Roll,
7338 : Open,
7339 : None,
7340 : }
7341 :
7342 : impl TimelineWriter<'_> {
7343 2402128 : async fn handle_open_layer_action(
7344 2402128 : &mut self,
7345 2402128 : at: Lsn,
7346 2402128 : action: OpenLayerAction,
7347 2402128 : ctx: &RequestContext,
7348 2402128 : ) -> anyhow::Result<&Arc<InMemoryLayer>> {
7349 2402128 : match action {
7350 : OpenLayerAction::Roll => {
7351 40 : let freeze_at = self.write_guard.as_ref().unwrap().max_lsn.unwrap();
7352 40 : self.roll_layer(freeze_at).await?;
7353 40 : self.open_layer(at, ctx).await?;
7354 : }
7355 619 : OpenLayerAction::Open => self.open_layer(at, ctx).await?,
7356 : OpenLayerAction::None => {
7357 2401469 : assert!(self.write_guard.is_some());
7358 : }
7359 : }
7360 :
7361 2402128 : Ok(&self.write_guard.as_ref().unwrap().open_layer)
7362 2402128 : }
7363 :
7364 659 : async fn open_layer(&mut self, at: Lsn, ctx: &RequestContext) -> anyhow::Result<()> {
7365 659 : let layer = self
7366 659 : .tl
7367 659 : .get_layer_for_write(at, &self.write_guard, ctx)
7368 659 : .await?;
7369 659 : let initial_size = layer.size().await?;
7370 :
7371 659 : let last_freeze_at = self.last_freeze_at.load();
7372 659 : self.write_guard.replace(TimelineWriterState::new(
7373 659 : layer,
7374 659 : initial_size,
7375 659 : last_freeze_at,
7376 659 : ));
7377 659 :
7378 659 : Ok(())
7379 659 : }
7380 :
7381 40 : async fn roll_layer(&mut self, freeze_at: Lsn) -> Result<(), FlushLayerError> {
7382 40 : let current_size = self.write_guard.as_ref().unwrap().current_size;
7383 :
7384 : // If layer flushes are backpressured due to compaction not keeping up, wait for the flush
7385 : // to propagate the backpressure up into WAL ingestion.
7386 40 : let l0_count = self
7387 40 : .tl
7388 40 : .layers
7389 40 : .read(LayerManagerLockHolder::GetLayerMapInfo)
7390 40 : .await
7391 40 : .layer_map()?
7392 40 : .level0_deltas()
7393 40 : .len();
7394 40 : let wait_thresholds = [
7395 40 : self.get_l0_flush_delay_threshold(),
7396 40 : self.get_l0_flush_stall_threshold(),
7397 40 : ];
7398 40 : let wait_threshold = wait_thresholds.into_iter().flatten().min();
7399 :
7400 : // self.write_guard will be taken by the freezing
7401 40 : let flush_id = self
7402 40 : .tl
7403 40 : .freeze_inmem_layer_at(freeze_at, &mut self.write_guard)
7404 40 : .await?;
7405 :
7406 40 : assert!(self.write_guard.is_none());
7407 :
7408 40 : if let Some(wait_threshold) = wait_threshold {
7409 0 : if l0_count >= wait_threshold {
7410 0 : debug!(
7411 0 : "layer roll waiting for flush due to compaction backpressure at {l0_count} L0 layers"
7412 : );
7413 0 : self.tl.wait_flush_completion(flush_id).await?;
7414 0 : }
7415 40 : }
7416 :
7417 40 : if current_size >= self.get_checkpoint_distance() * 2 {
7418 0 : warn!("Flushed oversized open layer with size {}", current_size)
7419 40 : }
7420 :
7421 40 : Ok(())
7422 40 : }
7423 :
7424 2402128 : fn get_open_layer_action(&self, lsn: Lsn, new_value_size: u64) -> OpenLayerAction {
7425 2402128 : let state = &*self.write_guard;
7426 2402128 : let Some(state) = &state else {
7427 619 : return OpenLayerAction::Open;
7428 : };
7429 :
7430 : #[cfg(feature = "testing")]
7431 2401509 : if state.cached_last_freeze_at < self.tl.last_freeze_at.load() {
7432 : // this check and assertion are not really needed because
7433 : // LayerManager::try_freeze_in_memory_layer will always clear out the
7434 : // TimelineWriterState if something is frozen. however, we can advance last_freeze_at when there
7435 : // is no TimelineWriterState.
7436 0 : assert!(
7437 0 : state.open_layer.end_lsn.get().is_some(),
7438 0 : "our open_layer must be outdated"
7439 : );
7440 :
7441 : // this would be a memory leak waiting to happen because the in-memory layer always has
7442 : // an index
7443 0 : panic!("BUG: TimelineWriterState held on to frozen in-memory layer.");
7444 2401509 : }
7445 2401509 :
7446 2401509 : if state.prev_lsn == Some(lsn) {
7447 : // Rolling mid LSN is not supported by [downstream code].
7448 : // Hence, only roll at LSN boundaries.
7449 : //
7450 : // [downstream code]: https://github.com/neondatabase/neon/pull/7993#discussion_r1633345422
7451 3 : return OpenLayerAction::None;
7452 2401506 : }
7453 2401506 :
7454 2401506 : if state.current_size == 0 {
7455 : // Don't roll empty layers
7456 0 : return OpenLayerAction::None;
7457 2401506 : }
7458 2401506 :
7459 2401506 : if self.tl.should_roll(
7460 2401506 : state.current_size,
7461 2401506 : state.current_size + new_value_size,
7462 2401506 : self.get_checkpoint_distance(),
7463 2401506 : lsn,
7464 2401506 : state.cached_last_freeze_at,
7465 2401506 : state.open_layer.get_opened_at(),
7466 2401506 : ) {
7467 40 : OpenLayerAction::Roll
7468 : } else {
7469 2401466 : OpenLayerAction::None
7470 : }
7471 2402128 : }
7472 :
7473 : /// Put a batch of keys at the specified Lsns.
7474 2402127 : pub(crate) async fn put_batch(
7475 2402127 : &mut self,
7476 2402127 : batch: SerializedValueBatch,
7477 2402127 : ctx: &RequestContext,
7478 2402127 : ) -> anyhow::Result<()> {
7479 2402127 : if !batch.has_data() {
7480 0 : return Ok(());
7481 2402127 : }
7482 2402127 :
7483 2402127 : // In debug builds, assert that we don't write any keys that don't belong to this shard.
7484 2402127 : // We don't assert this in release builds, since key ownership policies may change over
7485 2402127 : // time. Stray keys will be removed during compaction.
7486 2402127 : if cfg!(debug_assertions) {
7487 4947521 : for metadata in &batch.metadata {
7488 2545394 : if let ValueMeta::Serialized(metadata) = metadata {
7489 2545394 : let key = Key::from_compact(metadata.key);
7490 2545394 : assert!(
7491 2545394 : self.shard_identity.is_key_local(&key)
7492 12 : || self.shard_identity.is_key_global(&key),
7493 0 : "key {key} does not belong on shard {}",
7494 0 : self.shard_identity.shard_index()
7495 : );
7496 0 : }
7497 : }
7498 0 : }
7499 :
7500 2402127 : let batch_max_lsn = batch.max_lsn;
7501 2402127 : let buf_size: u64 = batch.buffer_size() as u64;
7502 2402127 :
7503 2402127 : let action = self.get_open_layer_action(batch_max_lsn, buf_size);
7504 2402127 : let layer = self
7505 2402127 : .handle_open_layer_action(batch_max_lsn, action, ctx)
7506 2402127 : .await?;
7507 :
7508 2402127 : let res = layer.put_batch(batch, ctx).await;
7509 :
7510 2402127 : if res.is_ok() {
7511 2402127 : // Update the current size only when the entire write was ok.
7512 2402127 : // In case of failures, we may have had partial writes which
7513 2402127 : // render the size tracking out of sync. That's ok because
7514 2402127 : // the checkpoint distance should be significantly smaller
7515 2402127 : // than the S3 single shot upload limit of 5GiB.
7516 2402127 : let state = self.write_guard.as_mut().unwrap();
7517 2402127 :
7518 2402127 : state.current_size += buf_size;
7519 2402127 : state.prev_lsn = Some(batch_max_lsn);
7520 2402127 : state.max_lsn = std::cmp::max(state.max_lsn, Some(batch_max_lsn));
7521 2402127 : }
7522 :
7523 2402127 : res
7524 2402127 : }
7525 :
7526 : #[cfg(test)]
7527 : /// Test helper, for tests that would like to poke individual values without composing a batch
7528 2195079 : pub(crate) async fn put(
7529 2195079 : &mut self,
7530 2195079 : key: Key,
7531 2195079 : lsn: Lsn,
7532 2195079 : value: &Value,
7533 2195079 : ctx: &RequestContext,
7534 2195079 : ) -> anyhow::Result<()> {
7535 : use utils::bin_ser::BeSer;
7536 2195079 : if !key.is_valid_key_on_write_path() {
7537 0 : bail!(
7538 0 : "the request contains data not supported by pageserver at TimelineWriter::put: {}",
7539 0 : key
7540 0 : );
7541 2195079 : }
7542 2195079 : let val_ser_size = value.serialized_size().unwrap() as usize;
7543 2195079 : let batch = SerializedValueBatch::from_values(vec![(
7544 2195079 : key.to_compact(),
7545 2195079 : lsn,
7546 2195079 : val_ser_size,
7547 2195079 : value.clone(),
7548 2195079 : )]);
7549 2195079 :
7550 2195079 : self.put_batch(batch, ctx).await
7551 2195079 : }
7552 :
7553 1 : pub(crate) async fn delete_batch(
7554 1 : &mut self,
7555 1 : batch: &[(Range<Key>, Lsn)],
7556 1 : ctx: &RequestContext,
7557 1 : ) -> anyhow::Result<()> {
7558 1 : if let Some((_, lsn)) = batch.first() {
7559 1 : let action = self.get_open_layer_action(*lsn, 0);
7560 1 : let layer = self.handle_open_layer_action(*lsn, action, ctx).await?;
7561 1 : layer.put_tombstones(batch).await?;
7562 0 : }
7563 :
7564 1 : Ok(())
7565 1 : }
7566 :
7567 : /// Track the end of the latest digested WAL record.
7568 : /// Remember the (end of) last valid WAL record remembered in the timeline.
7569 : ///
7570 : /// Call this after you have finished writing all the WAL up to 'lsn'.
7571 : ///
7572 : /// 'lsn' must be aligned. This wakes up any wait_lsn() callers waiting for
7573 : /// the 'lsn' or anything older. The previous last record LSN is stored alongside
7574 : /// the latest and can be read.
7575 2639558 : pub(crate) fn finish_write(&self, new_lsn: Lsn) {
7576 2639558 : self.tl.finish_write(new_lsn);
7577 2639558 : }
7578 :
7579 135285 : pub(crate) fn update_current_logical_size(&self, delta: i64) {
7580 135285 : self.tl.update_current_logical_size(delta)
7581 135285 : }
7582 : }
7583 :
7584 : // We need TimelineWriter to be send in upcoming conversion of
7585 : // Timeline::layers to tokio::sync::RwLock.
7586 : #[test]
7587 1 : fn is_send() {
7588 1 : fn _assert_send<T: Send>() {}
7589 1 : _assert_send::<TimelineWriter<'_>>();
7590 1 : }
7591 :
7592 : #[cfg(test)]
7593 : mod tests {
7594 : use std::sync::Arc;
7595 :
7596 : use pageserver_api::key::Key;
7597 : use pageserver_api::value::Value;
7598 : use std::iter::Iterator;
7599 : use tracing::Instrument;
7600 : use utils::id::TimelineId;
7601 : use utils::lsn::Lsn;
7602 :
7603 : use super::HeatMapTimeline;
7604 : use crate::context::RequestContextBuilder;
7605 : use crate::tenant::harness::{TenantHarness, test_img};
7606 : use crate::tenant::layer_map::LayerMap;
7607 : use crate::tenant::storage_layer::{Layer, LayerName, LayerVisibilityHint};
7608 : use crate::tenant::timeline::layer_manager::LayerManagerLockHolder;
7609 : use crate::tenant::timeline::{DeltaLayerTestDesc, EvictionError};
7610 : use crate::tenant::{PreviousHeatmap, Timeline};
7611 :
7612 5 : fn assert_heatmaps_have_same_layers(lhs: &HeatMapTimeline, rhs: &HeatMapTimeline) {
7613 5 : assert_eq!(lhs.all_layers().count(), rhs.all_layers().count());
7614 5 : let lhs_rhs = lhs.all_layers().zip(rhs.all_layers());
7615 25 : for (l, r) in lhs_rhs {
7616 20 : assert_eq!(l.name, r.name);
7617 20 : assert_eq!(l.metadata, r.metadata);
7618 : }
7619 5 : }
7620 :
7621 : #[tokio::test]
7622 1 : async fn test_heatmap_generation() {
7623 1 : let harness = TenantHarness::create("heatmap_generation").await.unwrap();
7624 1 :
7625 1 : let covered_delta = DeltaLayerTestDesc::new_with_inferred_key_range(
7626 1 : Lsn(0x10)..Lsn(0x20),
7627 1 : vec![(
7628 1 : Key::from_hex("620000000033333333444444445500000000").unwrap(),
7629 1 : Lsn(0x11),
7630 1 : Value::Image(test_img("foo")),
7631 1 : )],
7632 1 : );
7633 1 : let visible_delta = DeltaLayerTestDesc::new_with_inferred_key_range(
7634 1 : Lsn(0x10)..Lsn(0x20),
7635 1 : vec![(
7636 1 : Key::from_hex("720000000033333333444444445500000000").unwrap(),
7637 1 : Lsn(0x11),
7638 1 : Value::Image(test_img("foo")),
7639 1 : )],
7640 1 : );
7641 1 : let l0_delta = DeltaLayerTestDesc::new(
7642 1 : Lsn(0x20)..Lsn(0x30),
7643 1 : Key::from_hex("000000000000000000000000000000000000").unwrap()
7644 1 : ..Key::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(),
7645 1 : vec![(
7646 1 : Key::from_hex("720000000033333333444444445500000000").unwrap(),
7647 1 : Lsn(0x25),
7648 1 : Value::Image(test_img("foo")),
7649 1 : )],
7650 1 : );
7651 1 : let delta_layers = vec![
7652 1 : covered_delta.clone(),
7653 1 : visible_delta.clone(),
7654 1 : l0_delta.clone(),
7655 1 : ];
7656 1 :
7657 1 : let image_layer = (
7658 1 : Lsn(0x40),
7659 1 : vec![(
7660 1 : Key::from_hex("620000000033333333444444445500000000").unwrap(),
7661 1 : test_img("bar"),
7662 1 : )],
7663 1 : );
7664 1 : let image_layers = vec![image_layer];
7665 1 :
7666 1 : let (tenant, ctx) = harness.load().await;
7667 1 : let timeline = tenant
7668 1 : .create_test_timeline_with_layers(
7669 1 : TimelineId::generate(),
7670 1 : Lsn(0x10),
7671 1 : 14,
7672 1 : &ctx,
7673 1 : Vec::new(), // in-memory layers
7674 1 : delta_layers,
7675 1 : image_layers,
7676 1 : Lsn(0x100),
7677 1 : )
7678 1 : .await
7679 1 : .unwrap();
7680 1 : let ctx = &ctx.with_scope_timeline(&timeline);
7681 1 :
7682 1 : // Layer visibility is an input to heatmap generation, so refresh it first
7683 1 : timeline.update_layer_visibility().await.unwrap();
7684 1 :
7685 1 : let heatmap = timeline
7686 1 : .generate_heatmap()
7687 1 : .await
7688 1 : .expect("Infallible while timeline is not shut down");
7689 1 :
7690 1 : assert_eq!(heatmap.timeline_id, timeline.timeline_id);
7691 1 :
7692 1 : // L0 should come last
7693 1 : let heatmap_layers = heatmap.all_layers().collect::<Vec<_>>();
7694 1 : assert_eq!(heatmap_layers.last().unwrap().name, l0_delta.layer_name());
7695 1 :
7696 1 : let mut last_lsn = Lsn::MAX;
7697 5 : for layer in heatmap_layers {
7698 1 : // Covered layer should be omitted
7699 4 : assert!(layer.name != covered_delta.layer_name());
7700 1 :
7701 4 : let layer_lsn = match &layer.name {
7702 2 : LayerName::Delta(d) => d.lsn_range.end,
7703 2 : LayerName::Image(i) => i.lsn,
7704 1 : };
7705 1 :
7706 1 : // Apart from L0s, newest Layers should come first
7707 4 : if !LayerMap::is_l0(layer.name.key_range(), layer.name.is_delta()) {
7708 3 : assert!(layer_lsn <= last_lsn);
7709 3 : last_lsn = layer_lsn;
7710 1 : }
7711 1 : }
7712 1 :
7713 1 : // Evict all the layers and stash the old heatmap in the timeline.
7714 1 : // This simulates a migration to a cold secondary location.
7715 1 :
7716 1 : let guard = timeline.layers.read(LayerManagerLockHolder::Testing).await;
7717 1 : let mut all_layers = Vec::new();
7718 1 : let forever = std::time::Duration::from_secs(120);
7719 5 : for layer in guard.likely_resident_layers() {
7720 5 : all_layers.push(layer.clone());
7721 5 : layer.evict_and_wait(forever).await.unwrap();
7722 1 : }
7723 1 : drop(guard);
7724 1 :
7725 1 : timeline
7726 1 : .previous_heatmap
7727 1 : .store(Some(Arc::new(PreviousHeatmap::Active {
7728 1 : heatmap: heatmap.clone(),
7729 1 : read_at: std::time::Instant::now(),
7730 1 : end_lsn: None,
7731 1 : })));
7732 1 :
7733 1 : // Generate a new heatmap and assert that it contains the same layers as the old one.
7734 1 : let post_migration_heatmap = timeline.generate_heatmap().await.unwrap();
7735 1 : assert_heatmaps_have_same_layers(&heatmap, &post_migration_heatmap);
7736 1 :
7737 1 : // Download each layer one by one. Generate the heatmap at each step and check
7738 1 : // that it's stable.
7739 6 : for layer in all_layers {
7740 5 : if layer.visibility() == LayerVisibilityHint::Covered {
7741 1 : continue;
7742 4 : }
7743 4 :
7744 4 : eprintln!("Downloading {layer} and re-generating heatmap");
7745 4 :
7746 4 : let ctx = &RequestContextBuilder::from(ctx)
7747 4 : .download_behavior(crate::context::DownloadBehavior::Download)
7748 4 : .attached_child();
7749 1 :
7750 4 : let _resident = layer
7751 4 : .download_and_keep_resident(ctx)
7752 4 : .instrument(tracing::info_span!(
7753 4 : parent: None,
7754 1 : "download_layer",
7755 1 : tenant_id = %timeline.tenant_shard_id.tenant_id,
7756 0 : shard_id = %timeline.tenant_shard_id.shard_slug(),
7757 0 : timeline_id = %timeline.timeline_id
7758 1 : ))
7759 4 : .await
7760 4 : .unwrap();
7761 1 :
7762 4 : let post_download_heatmap = timeline.generate_heatmap().await.unwrap();
7763 4 : assert_heatmaps_have_same_layers(&heatmap, &post_download_heatmap);
7764 1 : }
7765 1 :
7766 1 : // Everything from the post-migration heatmap is now resident.
7767 1 : // Check that we drop it from memory.
7768 1 : assert!(matches!(
7769 1 : timeline.previous_heatmap.load().as_deref(),
7770 1 : Some(PreviousHeatmap::Obsolete)
7771 1 : ));
7772 1 : }
7773 :
7774 : #[tokio::test]
7775 1 : async fn test_previous_heatmap_obsoletion() {
7776 1 : let harness = TenantHarness::create("heatmap_previous_heatmap_obsoletion")
7777 1 : .await
7778 1 : .unwrap();
7779 1 :
7780 1 : let l0_delta = DeltaLayerTestDesc::new(
7781 1 : Lsn(0x20)..Lsn(0x30),
7782 1 : Key::from_hex("000000000000000000000000000000000000").unwrap()
7783 1 : ..Key::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(),
7784 1 : vec![(
7785 1 : Key::from_hex("720000000033333333444444445500000000").unwrap(),
7786 1 : Lsn(0x25),
7787 1 : Value::Image(test_img("foo")),
7788 1 : )],
7789 1 : );
7790 1 :
7791 1 : let image_layer = (
7792 1 : Lsn(0x40),
7793 1 : vec![(
7794 1 : Key::from_hex("620000000033333333444444445500000000").unwrap(),
7795 1 : test_img("bar"),
7796 1 : )],
7797 1 : );
7798 1 :
7799 1 : let delta_layers = vec![l0_delta];
7800 1 : let image_layers = vec![image_layer];
7801 1 :
7802 1 : let (tenant, ctx) = harness.load().await;
7803 1 : let timeline = tenant
7804 1 : .create_test_timeline_with_layers(
7805 1 : TimelineId::generate(),
7806 1 : Lsn(0x10),
7807 1 : 14,
7808 1 : &ctx,
7809 1 : Vec::new(), // in-memory layers
7810 1 : delta_layers,
7811 1 : image_layers,
7812 1 : Lsn(0x100),
7813 1 : )
7814 1 : .await
7815 1 : .unwrap();
7816 1 :
7817 1 : // Layer visibility is an input to heatmap generation, so refresh it first
7818 1 : timeline.update_layer_visibility().await.unwrap();
7819 1 :
7820 1 : let heatmap = timeline
7821 1 : .generate_heatmap()
7822 1 : .await
7823 1 : .expect("Infallible while timeline is not shut down");
7824 1 :
7825 1 : // Both layers should be in the heatmap
7826 1 : assert!(heatmap.all_layers().count() > 0);
7827 1 :
7828 1 : // Now simulate a migration.
7829 1 : timeline
7830 1 : .previous_heatmap
7831 1 : .store(Some(Arc::new(PreviousHeatmap::Active {
7832 1 : heatmap: heatmap.clone(),
7833 1 : read_at: std::time::Instant::now(),
7834 1 : end_lsn: None,
7835 1 : })));
7836 1 :
7837 1 : // Evict all the layers in the previous heatmap
7838 1 : let guard = timeline.layers.read(LayerManagerLockHolder::Testing).await;
7839 1 : let forever = std::time::Duration::from_secs(120);
7840 3 : for layer in guard.likely_resident_layers() {
7841 3 : layer.evict_and_wait(forever).await.unwrap();
7842 1 : }
7843 1 : drop(guard);
7844 1 :
7845 1 : // Generate a new heatmap and check that the previous heatmap
7846 1 : // has been marked obsolete.
7847 1 : let post_eviction_heatmap = timeline
7848 1 : .generate_heatmap()
7849 1 : .await
7850 1 : .expect("Infallible while timeline is not shut down");
7851 1 :
7852 1 : assert_eq!(post_eviction_heatmap.all_layers().count(), 0);
7853 1 : assert!(matches!(
7854 1 : timeline.previous_heatmap.load().as_deref(),
7855 1 : Some(PreviousHeatmap::Obsolete)
7856 1 : ));
7857 1 : }
7858 :
7859 : #[tokio::test]
7860 1 : async fn two_layer_eviction_attempts_at_the_same_time() {
7861 1 : let harness = TenantHarness::create("two_layer_eviction_attempts_at_the_same_time")
7862 1 : .await
7863 1 : .unwrap();
7864 1 :
7865 1 : let (tenant, ctx) = harness.load().await;
7866 1 : let timeline = tenant
7867 1 : .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx)
7868 1 : .await
7869 1 : .unwrap();
7870 1 :
7871 1 : let layer = find_some_layer(&timeline).await;
7872 1 : let layer = layer
7873 1 : .keep_resident()
7874 1 : .await
7875 1 : .expect("no download => no downloading errors")
7876 1 : .drop_eviction_guard();
7877 1 :
7878 1 : let forever = std::time::Duration::from_secs(120);
7879 1 :
7880 1 : let first = layer.evict_and_wait(forever);
7881 1 : let second = layer.evict_and_wait(forever);
7882 1 :
7883 1 : let (first, second) = tokio::join!(first, second);
7884 1 :
7885 1 : let res = layer.keep_resident().await;
7886 1 : assert!(res.is_none(), "{res:?}");
7887 1 :
7888 1 : match (first, second) {
7889 1 : (Ok(()), Ok(())) => {
7890 1 : // because there are no more timeline locks being taken on eviction path, we can
7891 1 : // witness all three outcomes here.
7892 1 : }
7893 1 : (Ok(()), Err(EvictionError::NotFound)) | (Err(EvictionError::NotFound), Ok(())) => {
7894 0 : // if one completes before the other, this is fine just as well.
7895 0 : }
7896 1 : other => unreachable!("unexpected {:?}", other),
7897 1 : }
7898 1 : }
7899 :
7900 1 : async fn find_some_layer(timeline: &Timeline) -> Layer {
7901 1 : let layers = timeline
7902 1 : .layers
7903 1 : .read(LayerManagerLockHolder::GetLayerMapInfo)
7904 1 : .await;
7905 1 : let desc = layers
7906 1 : .layer_map()
7907 1 : .unwrap()
7908 1 : .iter_historic_layers()
7909 1 : .next()
7910 1 : .expect("must find one layer to evict");
7911 1 :
7912 1 : layers.get_from_desc(&desc)
7913 1 : }
7914 : }
|