Line data Source code
1 : pub(crate) mod analysis;
2 : mod compaction;
3 : pub mod delete;
4 : pub(crate) mod detach_ancestor;
5 : mod eviction_task;
6 : mod init;
7 : pub mod layer_manager;
8 : pub(crate) mod logical_size;
9 : pub mod span;
10 : pub mod uninit;
11 : mod walreceiver;
12 :
13 : use anyhow::{anyhow, bail, ensure, Context, Result};
14 : use arc_swap::ArcSwap;
15 : use bytes::Bytes;
16 : use camino::Utf8Path;
17 : use enumset::EnumSet;
18 : use fail::fail_point;
19 : use once_cell::sync::Lazy;
20 : use pageserver_api::{
21 : key::{
22 : AUX_FILES_KEY, KEY_SIZE, METADATA_KEY_BEGIN_PREFIX, METADATA_KEY_END_PREFIX,
23 : NON_INHERITED_RANGE, NON_INHERITED_SPARSE_RANGE,
24 : },
25 : keyspace::{KeySpaceAccum, KeySpaceRandomAccum, SparseKeyPartitioning},
26 : models::{
27 : AtomicAuxFilePolicy, AuxFilePolicy, CompactionAlgorithm, CompactionAlgorithmSettings,
28 : DownloadRemoteLayersTaskInfo, DownloadRemoteLayersTaskSpawnRequest, EvictionPolicy,
29 : InMemoryLayerInfo, LayerMapInfo, LsnLease, TimelineState,
30 : },
31 : reltag::BlockNumber,
32 : shard::{ShardIdentity, ShardNumber, TenantShardId},
33 : };
34 : use rand::Rng;
35 : use serde_with::serde_as;
36 : use storage_broker::BrokerClientChannel;
37 : use tokio::{
38 : runtime::Handle,
39 : sync::{oneshot, watch},
40 : };
41 : use tokio_util::sync::CancellationToken;
42 : use tracing::*;
43 : use utils::{
44 : bin_ser::BeSer,
45 : fs_ext, pausable_failpoint,
46 : sync::gate::{Gate, GateGuard},
47 : vec_map::VecMap,
48 : };
49 :
50 : use std::pin::pin;
51 : use std::sync::atomic::Ordering as AtomicOrdering;
52 : use std::sync::{Arc, Mutex, RwLock, Weak};
53 : use std::time::{Duration, Instant, SystemTime};
54 : use std::{
55 : array,
56 : collections::{BTreeMap, HashMap, HashSet},
57 : sync::atomic::AtomicU64,
58 : };
59 : use std::{
60 : cmp::{max, min, Ordering},
61 : ops::ControlFlow,
62 : };
63 : use std::{
64 : collections::btree_map::Entry,
65 : ops::{Deref, Range},
66 : };
67 :
68 : use crate::metrics::GetKind;
69 : use crate::pgdatadir_mapping::MAX_AUX_FILE_V2_DELTAS;
70 : use crate::{
71 : aux_file::AuxFileSizeEstimator,
72 : tenant::{
73 : layer_map::{LayerMap, SearchResult},
74 : metadata::TimelineMetadata,
75 : },
76 : };
77 : use crate::{
78 : context::{DownloadBehavior, RequestContext},
79 : disk_usage_eviction_task::DiskUsageEvictionInfo,
80 : pgdatadir_mapping::CollectKeySpaceError,
81 : };
82 : use crate::{
83 : disk_usage_eviction_task::finite_f32,
84 : tenant::storage_layer::{
85 : AsLayerDesc, DeltaLayerWriter, EvictionError, ImageLayerWriter, InMemoryLayer, Layer,
86 : LayerAccessStatsReset, LayerName, ResidentLayer, ValueReconstructResult,
87 : ValueReconstructState, ValuesReconstructState,
88 : },
89 : };
90 : use crate::{
91 : disk_usage_eviction_task::EvictionCandidate, tenant::storage_layer::delta_layer::DeltaEntry,
92 : };
93 : use crate::{
94 : metrics::ScanLatencyOngoingRecording, tenant::timeline::logical_size::CurrentLogicalSize,
95 : };
96 : use crate::{pgdatadir_mapping::LsnForTimestamp, tenant::tasks::BackgroundLoopKind};
97 : use crate::{
98 : pgdatadir_mapping::{AuxFilesDirectory, DirectoryKind},
99 : virtual_file::{MaybeFatalIo, VirtualFile},
100 : };
101 :
102 : use crate::config::PageServerConf;
103 : use crate::keyspace::{KeyPartitioning, KeySpace};
104 : use crate::metrics::{
105 : TimelineMetrics, MATERIALIZED_PAGE_CACHE_HIT, MATERIALIZED_PAGE_CACHE_HIT_DIRECT,
106 : };
107 : use crate::pgdatadir_mapping::CalculateLogicalSizeError;
108 : use crate::tenant::config::TenantConfOpt;
109 : use pageserver_api::reltag::RelTag;
110 : use pageserver_api::shard::ShardIndex;
111 :
112 : use postgres_connection::PgConnectionConfig;
113 : use postgres_ffi::to_pg_timestamp;
114 : use utils::{
115 : completion,
116 : generation::Generation,
117 : id::TimelineId,
118 : lsn::{AtomicLsn, Lsn, RecordLsn},
119 : seqwait::SeqWait,
120 : simple_rcu::{Rcu, RcuReadGuard},
121 : };
122 :
123 : use crate::page_cache;
124 : use crate::repository::GcResult;
125 : use crate::repository::{Key, Value};
126 : use crate::task_mgr;
127 : use crate::task_mgr::TaskKind;
128 : use crate::ZERO_PAGE;
129 :
130 : use self::delete::DeleteTimelineFlow;
131 : pub(super) use self::eviction_task::EvictionTaskTenantState;
132 : use self::eviction_task::EvictionTaskTimelineState;
133 : use self::layer_manager::LayerManager;
134 : use self::logical_size::LogicalSize;
135 : use self::walreceiver::{WalReceiver, WalReceiverConf};
136 :
137 : use super::{config::TenantConf, storage_layer::VectoredValueReconstructState};
138 : use super::{debug_assert_current_span_has_tenant_and_timeline_id, AttachedTenantConf};
139 : use super::{remote_timeline_client::index::IndexPart, storage_layer::LayerFringe};
140 : use super::{remote_timeline_client::RemoteTimelineClient, storage_layer::ReadableLayer};
141 : use super::{
142 : secondary::heatmap::{HeatMapLayer, HeatMapTimeline},
143 : GcError,
144 : };
145 :
146 : #[derive(Debug, PartialEq, Eq, Clone, Copy)]
147 : pub(crate) enum FlushLoopState {
148 : NotStarted,
149 : Running {
150 : #[cfg(test)]
151 : expect_initdb_optimization: bool,
152 : #[cfg(test)]
153 : initdb_optimization_count: usize,
154 : },
155 : Exited,
156 : }
157 :
158 : #[derive(Debug, Copy, Clone, PartialEq, Eq)]
159 : pub enum ImageLayerCreationMode {
160 : /// Try to create image layers based on `time_for_new_image_layer`. Used in compaction code path.
161 : Try,
162 : /// Force creating the image layers if possible. For now, no image layers will be created
163 : /// for metadata keys. Used in compaction code path with force flag enabled.
164 : Force,
165 : /// Initial ingestion of the data, and no data should be dropped in this function. This
166 : /// means that no metadata keys should be included in the partitions. Used in flush frozen layer
167 : /// code path.
168 : Initial,
169 : }
170 :
171 : impl std::fmt::Display for ImageLayerCreationMode {
172 504 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
173 504 : write!(f, "{:?}", self)
174 504 : }
175 : }
176 :
177 : /// Wrapper for key range to provide reverse ordering by range length for BinaryHeap
178 : #[derive(Debug, Clone, PartialEq, Eq)]
179 : pub(crate) struct Hole {
180 : key_range: Range<Key>,
181 : coverage_size: usize,
182 : }
183 :
184 : impl Ord for Hole {
185 0 : fn cmp(&self, other: &Self) -> Ordering {
186 0 : other.coverage_size.cmp(&self.coverage_size) // inverse order
187 0 : }
188 : }
189 :
190 : impl PartialOrd for Hole {
191 0 : fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
192 0 : Some(self.cmp(other))
193 0 : }
194 : }
195 :
196 : /// Temporary function for immutable storage state refactor, ensures we are dropping mutex guard instead of other things.
197 : /// Can be removed after all refactors are done.
198 28 : fn drop_rlock<T>(rlock: tokio::sync::OwnedRwLockReadGuard<T>) {
199 28 : drop(rlock)
200 28 : }
201 :
202 : /// Temporary function for immutable storage state refactor, ensures we are dropping mutex guard instead of other things.
203 : /// Can be removed after all refactors are done.
204 532 : fn drop_wlock<T>(rlock: tokio::sync::RwLockWriteGuard<'_, T>) {
205 532 : drop(rlock)
206 532 : }
207 :
208 : /// The outward-facing resources required to build a Timeline
209 : pub struct TimelineResources {
210 : pub remote_client: RemoteTimelineClient,
211 : pub timeline_get_throttle: Arc<
212 : crate::tenant::throttle::Throttle<&'static crate::metrics::tenant_throttling::TimelineGet>,
213 : >,
214 : }
215 :
216 : pub(crate) struct AuxFilesState {
217 : pub(crate) dir: Option<AuxFilesDirectory>,
218 : pub(crate) n_deltas: usize,
219 : }
220 :
221 : /// The relation size cache caches relation sizes at the end of the timeline. It speeds up WAL
222 : /// ingestion considerably, because WAL ingestion needs to check on most records if the record
223 : /// implicitly extends the relation. At startup, `complete_as_of` is initialized to the current end
224 : /// of the timeline (disk_consistent_lsn). It's used on reads of relation sizes to check if the
225 : /// value can be used to also update the cache, see [`Timeline::update_cached_rel_size`].
226 : pub(crate) struct RelSizeCache {
227 : pub(crate) complete_as_of: Lsn,
228 : pub(crate) map: HashMap<RelTag, (Lsn, BlockNumber)>,
229 : }
230 :
231 : pub struct Timeline {
232 : conf: &'static PageServerConf,
233 : tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
234 :
235 : myself: Weak<Self>,
236 :
237 : pub(crate) tenant_shard_id: TenantShardId,
238 : pub timeline_id: TimelineId,
239 :
240 : /// The generation of the tenant that instantiated us: this is used for safety when writing remote objects.
241 : /// Never changes for the lifetime of this [`Timeline`] object.
242 : ///
243 : /// This duplicates the generation stored in LocationConf, but that structure is mutable:
244 : /// this copy enforces the invariant that generatio doesn't change during a Tenant's lifetime.
245 : pub(crate) generation: Generation,
246 :
247 : /// The detailed sharding information from our parent Tenant. This enables us to map keys
248 : /// to shards, and is constant through the lifetime of this Timeline.
249 : shard_identity: ShardIdentity,
250 :
251 : pub pg_version: u32,
252 :
253 : /// The tuple has two elements.
254 : /// 1. `LayerFileManager` keeps track of the various physical representations of the layer files (inmem, local, remote).
255 : /// 2. `LayerMap`, the acceleration data structure for `get_reconstruct_data`.
256 : ///
257 : /// `LayerMap` maps out the `(PAGE,LSN) / (KEY,LSN)` space, which is composed of `(KeyRange, LsnRange)` rectangles.
258 : /// We describe these rectangles through the `PersistentLayerDesc` struct.
259 : ///
260 : /// When we want to reconstruct a page, we first find the `PersistentLayerDesc`'s that we need for page reconstruction,
261 : /// using `LayerMap`. Then, we use `LayerFileManager` to get the `PersistentLayer`'s that correspond to the
262 : /// `PersistentLayerDesc`'s.
263 : ///
264 : /// Hence, it's important to keep things coherent. The `LayerFileManager` must always have an entry for all
265 : /// `PersistentLayerDesc`'s in the `LayerMap`. If it doesn't, `LayerFileManager::get_from_desc` will panic at
266 : /// runtime, e.g., during page reconstruction.
267 : ///
268 : /// In the future, we'll be able to split up the tuple of LayerMap and `LayerFileManager`,
269 : /// so that e.g. on-demand-download/eviction, and layer spreading, can operate just on `LayerFileManager`.
270 : pub(crate) layers: Arc<tokio::sync::RwLock<LayerManager>>,
271 :
272 : last_freeze_at: AtomicLsn,
273 : // Atomic would be more appropriate here.
274 : last_freeze_ts: RwLock<Instant>,
275 :
276 : pub(crate) standby_horizon: AtomicLsn,
277 :
278 : // WAL redo manager. `None` only for broken tenants.
279 : walredo_mgr: Option<Arc<super::WalRedoManager>>,
280 :
281 : /// Remote storage client.
282 : /// See [`remote_timeline_client`](super::remote_timeline_client) module comment for details.
283 : pub remote_client: Arc<RemoteTimelineClient>,
284 :
285 : // What page versions do we hold in the repository? If we get a
286 : // request > last_record_lsn, we need to wait until we receive all
287 : // the WAL up to the request. The SeqWait provides functions for
288 : // that. TODO: If we get a request for an old LSN, such that the
289 : // versions have already been garbage collected away, we should
290 : // throw an error, but we don't track that currently.
291 : //
292 : // last_record_lsn.load().last points to the end of last processed WAL record.
293 : //
294 : // We also remember the starting point of the previous record in
295 : // 'last_record_lsn.load().prev'. It's used to set the xl_prev pointer of the
296 : // first WAL record when the node is started up. But here, we just
297 : // keep track of it.
298 : last_record_lsn: SeqWait<RecordLsn, Lsn>,
299 :
300 : // All WAL records have been processed and stored durably on files on
301 : // local disk, up to this LSN. On crash and restart, we need to re-process
302 : // the WAL starting from this point.
303 : //
304 : // Some later WAL records might have been processed and also flushed to disk
305 : // already, so don't be surprised to see some, but there's no guarantee on
306 : // them yet.
307 : disk_consistent_lsn: AtomicLsn,
308 :
309 : // Parent timeline that this timeline was branched from, and the LSN
310 : // of the branch point.
311 : ancestor_timeline: Option<Arc<Timeline>>,
312 : ancestor_lsn: Lsn,
313 :
314 : pub(super) metrics: TimelineMetrics,
315 :
316 : // `Timeline` doesn't write these metrics itself, but it manages the lifetime. Code
317 : // in `crate::page_service` writes these metrics.
318 : pub(crate) query_metrics: crate::metrics::SmgrQueryTimePerTimeline,
319 :
320 : directory_metrics: [AtomicU64; DirectoryKind::KINDS_NUM],
321 :
322 : /// Ensures layers aren't frozen by checkpointer between
323 : /// [`Timeline::get_layer_for_write`] and layer reads.
324 : /// Locked automatically by [`TimelineWriter`] and checkpointer.
325 : /// Must always be acquired before the layer map/individual layer lock
326 : /// to avoid deadlock.
327 : ///
328 : /// The state is cleared upon freezing.
329 : write_lock: tokio::sync::Mutex<Option<TimelineWriterState>>,
330 :
331 : /// Used to avoid multiple `flush_loop` tasks running
332 : pub(super) flush_loop_state: Mutex<FlushLoopState>,
333 :
334 : /// layer_flush_start_tx can be used to wake up the layer-flushing task.
335 : /// - The u64 value is a counter, incremented every time a new flush cycle is requested.
336 : /// The flush cycle counter is sent back on the layer_flush_done channel when
337 : /// the flush finishes. You can use that to wait for the flush to finish.
338 : /// - The LSN is updated to max() of its current value and the latest disk_consistent_lsn
339 : /// read by whoever sends an update
340 : layer_flush_start_tx: tokio::sync::watch::Sender<(u64, Lsn)>,
341 : /// to be notified when layer flushing has finished, subscribe to the layer_flush_done channel
342 : layer_flush_done_tx: tokio::sync::watch::Sender<(u64, Result<(), FlushLayerError>)>,
343 :
344 : // Needed to ensure that we can't create a branch at a point that was already garbage collected
345 : pub latest_gc_cutoff_lsn: Rcu<Lsn>,
346 :
347 : // List of child timelines and their branch points. This is needed to avoid
348 : // garbage collecting data that is still needed by the child timelines.
349 : pub(crate) gc_info: std::sync::RwLock<GcInfo>,
350 :
351 : // It may change across major versions so for simplicity
352 : // keep it after running initdb for a timeline.
353 : // It is needed in checks when we want to error on some operations
354 : // when they are requested for pre-initdb lsn.
355 : // It can be unified with latest_gc_cutoff_lsn under some "first_valid_lsn",
356 : // though let's keep them both for better error visibility.
357 : pub initdb_lsn: Lsn,
358 :
359 : /// When did we last calculate the partitioning? Make it pub to test cases.
360 : pub(super) partitioning: tokio::sync::Mutex<((KeyPartitioning, SparseKeyPartitioning), Lsn)>,
361 :
362 : /// Configuration: how often should the partitioning be recalculated.
363 : repartition_threshold: u64,
364 :
365 : last_image_layer_creation_check_at: AtomicLsn,
366 :
367 : /// Current logical size of the "datadir", at the last LSN.
368 : current_logical_size: LogicalSize,
369 :
370 : /// Information about the last processed message by the WAL receiver,
371 : /// or None if WAL receiver has not received anything for this timeline
372 : /// yet.
373 : pub last_received_wal: Mutex<Option<WalReceiverInfo>>,
374 : pub walreceiver: Mutex<Option<WalReceiver>>,
375 :
376 : /// Relation size cache
377 : pub(crate) rel_size_cache: RwLock<RelSizeCache>,
378 :
379 : download_all_remote_layers_task_info: RwLock<Option<DownloadRemoteLayersTaskInfo>>,
380 :
381 : state: watch::Sender<TimelineState>,
382 :
383 : /// Prevent two tasks from deleting the timeline at the same time. If held, the
384 : /// timeline is being deleted. If 'true', the timeline has already been deleted.
385 : pub delete_progress: Arc<tokio::sync::Mutex<DeleteTimelineFlow>>,
386 :
387 : eviction_task_timeline_state: tokio::sync::Mutex<EvictionTaskTimelineState>,
388 :
389 : /// Load or creation time information about the disk_consistent_lsn and when the loading
390 : /// happened. Used for consumption metrics.
391 : pub(crate) loaded_at: (Lsn, SystemTime),
392 :
393 : /// Gate to prevent shutdown completing while I/O is still happening to this timeline's data
394 : pub(crate) gate: Gate,
395 :
396 : /// Cancellation token scoped to this timeline: anything doing long-running work relating
397 : /// to the timeline should drop out when this token fires.
398 : pub(crate) cancel: CancellationToken,
399 :
400 : /// Make sure we only have one running compaction at a time in tests.
401 : ///
402 : /// Must only be taken in two places:
403 : /// - [`Timeline::compact`] (this file)
404 : /// - [`delete::delete_local_timeline_directory`]
405 : ///
406 : /// Timeline deletion will acquire both compaction and gc locks in whatever order.
407 : compaction_lock: tokio::sync::Mutex<()>,
408 :
409 : /// Make sure we only have one running gc at a time.
410 : ///
411 : /// Must only be taken in two places:
412 : /// - [`Timeline::gc`] (this file)
413 : /// - [`delete::delete_local_timeline_directory`]
414 : ///
415 : /// Timeline deletion will acquire both compaction and gc locks in whatever order.
416 : gc_lock: tokio::sync::Mutex<()>,
417 :
418 : /// Cloned from [`super::Tenant::timeline_get_throttle`] on construction.
419 : timeline_get_throttle: Arc<
420 : crate::tenant::throttle::Throttle<&'static crate::metrics::tenant_throttling::TimelineGet>,
421 : >,
422 :
423 : /// Keep aux directory cache to avoid it's reconstruction on each update
424 : pub(crate) aux_files: tokio::sync::Mutex<AuxFilesState>,
425 :
426 : /// Size estimator for aux file v2
427 : pub(crate) aux_file_size_estimator: AuxFileSizeEstimator,
428 :
429 : /// Indicate whether aux file v2 storage is enabled.
430 : pub(crate) last_aux_file_policy: AtomicAuxFilePolicy,
431 :
432 : /// Some test cases directly place keys into the timeline without actually modifying the directory
433 : /// keys (i.e., DB_DIR). The test cases creating such keys will put the keyspaces here, so that
434 : /// these keys won't get garbage-collected during compaction/GC. This field only modifies the dense
435 : /// keyspace return value of `collect_keyspace`. For sparse keyspaces, use AUX keys for testing, and
436 : /// in the future, add `extra_test_sparse_keyspace` if necessary.
437 : #[cfg(test)]
438 : pub(crate) extra_test_dense_keyspace: ArcSwap<KeySpace>,
439 : }
440 :
441 : pub struct WalReceiverInfo {
442 : pub wal_source_connconf: PgConnectionConfig,
443 : pub last_received_msg_lsn: Lsn,
444 : pub last_received_msg_ts: u128,
445 : }
446 :
447 : /// Information about how much history needs to be retained, needed by
448 : /// Garbage Collection.
449 : #[derive(Default)]
450 : pub(crate) struct GcInfo {
451 : /// Specific LSNs that are needed.
452 : ///
453 : /// Currently, this includes all points where child branches have
454 : /// been forked off from. In the future, could also include
455 : /// explicit user-defined snapshot points.
456 : pub(crate) retain_lsns: Vec<Lsn>,
457 :
458 : /// The cutoff coordinates, which are combined by selecting the minimum.
459 : pub(crate) cutoffs: GcCutoffs,
460 :
461 : /// Leases granted to particular LSNs.
462 : pub(crate) leases: BTreeMap<Lsn, LsnLease>,
463 : }
464 :
465 : impl GcInfo {
466 224 : pub(crate) fn min_cutoff(&self) -> Lsn {
467 224 : self.cutoffs.select_min()
468 224 : }
469 : }
470 :
471 : /// The `GcInfo` component describing which Lsns need to be retained.
472 : #[derive(Debug)]
473 : pub(crate) struct GcCutoffs {
474 : /// Keep everything newer than this point.
475 : ///
476 : /// This is calculated by subtracting 'gc_horizon' setting from
477 : /// last-record LSN
478 : ///
479 : /// FIXME: is this inclusive or exclusive?
480 : pub(crate) horizon: Lsn,
481 :
482 : /// In addition to 'retain_lsns' and 'horizon_cutoff', keep everything newer than this
483 : /// point.
484 : ///
485 : /// This is calculated by finding a number such that a record is needed for PITR
486 : /// if only if its LSN is larger than 'pitr_cutoff'.
487 : pub(crate) pitr: Lsn,
488 : }
489 :
490 : impl Default for GcCutoffs {
491 379 : fn default() -> Self {
492 379 : Self {
493 379 : horizon: Lsn::INVALID,
494 379 : pitr: Lsn::INVALID,
495 379 : }
496 379 : }
497 : }
498 :
499 : impl GcCutoffs {
500 224 : fn select_min(&self) -> Lsn {
501 224 : std::cmp::min(self.horizon, self.pitr)
502 224 : }
503 : }
504 :
505 : pub(crate) struct TimelineVisitOutcome {
506 : completed_keyspace: KeySpace,
507 : image_covered_keyspace: KeySpace,
508 : }
509 :
510 : /// An error happened in a get() operation.
511 2 : #[derive(thiserror::Error, Debug)]
512 : pub(crate) enum PageReconstructError {
513 : #[error(transparent)]
514 : Other(#[from] anyhow::Error),
515 :
516 : #[error("Ancestor LSN wait error: {0}")]
517 : AncestorLsnTimeout(WaitLsnError),
518 :
519 : #[error("timeline shutting down")]
520 : Cancelled,
521 :
522 : /// An error happened replaying WAL records
523 : #[error(transparent)]
524 : WalRedo(anyhow::Error),
525 :
526 : #[error("{0}")]
527 : MissingKey(MissingKeyError),
528 : }
529 :
530 : impl GetVectoredError {
531 : #[cfg(test)]
532 6 : pub(crate) fn is_missing_key_error(&self) -> bool {
533 6 : matches!(self, Self::MissingKey(_))
534 6 : }
535 : }
536 :
537 : #[derive(Debug)]
538 : pub struct MissingKeyError {
539 : key: Key,
540 : shard: ShardNumber,
541 : cont_lsn: Lsn,
542 : request_lsn: Lsn,
543 : ancestor_lsn: Option<Lsn>,
544 : traversal_path: Vec<TraversalPathItem>,
545 : backtrace: Option<std::backtrace::Backtrace>,
546 : }
547 :
548 : impl std::fmt::Display for MissingKeyError {
549 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
550 0 : write!(
551 0 : f,
552 0 : "could not find data for key {} (shard {:?}) at LSN {}, request LSN {}",
553 0 : self.key, self.shard, self.cont_lsn, self.request_lsn
554 0 : )?;
555 0 : if let Some(ref ancestor_lsn) = self.ancestor_lsn {
556 0 : write!(f, ", ancestor {}", ancestor_lsn)?;
557 0 : }
558 :
559 0 : if !self.traversal_path.is_empty() {
560 0 : writeln!(f)?;
561 0 : }
562 :
563 0 : for (r, c, l) in &self.traversal_path {
564 0 : writeln!(
565 0 : f,
566 0 : "layer traversal: result {:?}, cont_lsn {}, layer: {}",
567 0 : r, c, l,
568 0 : )?;
569 : }
570 :
571 0 : if let Some(ref backtrace) = self.backtrace {
572 0 : write!(f, "\n{}", backtrace)?;
573 0 : }
574 :
575 0 : Ok(())
576 0 : }
577 : }
578 :
579 : impl PageReconstructError {
580 : /// Returns true if this error indicates a tenant/timeline shutdown alike situation
581 0 : pub(crate) fn is_stopping(&self) -> bool {
582 0 : use PageReconstructError::*;
583 0 : match self {
584 0 : Other(_) => false,
585 0 : AncestorLsnTimeout(_) => false,
586 0 : Cancelled => true,
587 0 : WalRedo(_) => false,
588 0 : MissingKey { .. } => false,
589 : }
590 0 : }
591 : }
592 :
593 0 : #[derive(thiserror::Error, Debug)]
594 : pub(crate) enum CreateImageLayersError {
595 : #[error("timeline shutting down")]
596 : Cancelled,
597 :
598 : #[error(transparent)]
599 : GetVectoredError(GetVectoredError),
600 :
601 : #[error(transparent)]
602 : PageReconstructError(PageReconstructError),
603 :
604 : #[error(transparent)]
605 : Other(#[from] anyhow::Error),
606 : }
607 :
608 0 : #[derive(thiserror::Error, Debug, Clone)]
609 : pub(crate) enum FlushLayerError {
610 : /// Timeline cancellation token was cancelled
611 : #[error("timeline shutting down")]
612 : Cancelled,
613 :
614 : /// We tried to flush a layer while the Timeline is in an unexpected state
615 : #[error("cannot flush frozen layers when flush_loop is not running, state is {0:?}")]
616 : NotRunning(FlushLoopState),
617 :
618 : // Arc<> the following non-clonable error types: we must be Clone-able because the flush error is propagated from the flush
619 : // loop via a watch channel, where we can only borrow it.
620 : #[error(transparent)]
621 : CreateImageLayersError(Arc<CreateImageLayersError>),
622 :
623 : #[error(transparent)]
624 : Other(#[from] Arc<anyhow::Error>),
625 : }
626 :
627 : impl FlushLayerError {
628 : // When crossing from generic anyhow errors to this error type, we explicitly check
629 : // for timeline cancellation to avoid logging inoffensive shutdown errors as warn/err.
630 0 : fn from_anyhow(timeline: &Timeline, err: anyhow::Error) -> Self {
631 0 : if timeline.cancel.is_cancelled() {
632 0 : Self::Cancelled
633 : } else {
634 0 : Self::Other(Arc::new(err))
635 : }
636 0 : }
637 : }
638 :
639 0 : #[derive(thiserror::Error, Debug)]
640 : pub(crate) enum GetVectoredError {
641 : #[error("timeline shutting down")]
642 : Cancelled,
643 :
644 : #[error("Requested too many keys: {0} > {}", Timeline::MAX_GET_VECTORED_KEYS)]
645 : Oversized(u64),
646 :
647 : #[error("Requested at invalid LSN: {0}")]
648 : InvalidLsn(Lsn),
649 :
650 : #[error("Requested key not found: {0}")]
651 : MissingKey(MissingKeyError),
652 :
653 : #[error(transparent)]
654 : GetReadyAncestorError(GetReadyAncestorError),
655 :
656 : #[error(transparent)]
657 : Other(#[from] anyhow::Error),
658 : }
659 :
660 2 : #[derive(thiserror::Error, Debug)]
661 : pub(crate) enum GetReadyAncestorError {
662 : #[error("Ancestor LSN wait error: {0}")]
663 : AncestorLsnTimeout(#[from] WaitLsnError),
664 :
665 : #[error("Bad state on timeline {timeline_id}: {state:?}")]
666 : BadState {
667 : timeline_id: TimelineId,
668 : state: TimelineState,
669 : },
670 :
671 : #[error("Cancelled")]
672 : Cancelled,
673 : }
674 :
675 : #[derive(Clone, Copy)]
676 : pub enum LogicalSizeCalculationCause {
677 : Initial,
678 : ConsumptionMetricsSyntheticSize,
679 : EvictionTaskImitation,
680 : TenantSizeHandler,
681 : }
682 :
683 : pub enum GetLogicalSizePriority {
684 : User,
685 : Background,
686 : }
687 :
688 0 : #[derive(enumset::EnumSetType)]
689 : pub(crate) enum CompactFlags {
690 : ForceRepartition,
691 : ForceImageLayerCreation,
692 : }
693 :
694 : impl std::fmt::Debug for Timeline {
695 0 : fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
696 0 : write!(f, "Timeline<{}>", self.timeline_id)
697 0 : }
698 : }
699 :
700 0 : #[derive(thiserror::Error, Debug)]
701 : pub(crate) enum WaitLsnError {
702 : // Called on a timeline which is shutting down
703 : #[error("Shutdown")]
704 : Shutdown,
705 :
706 : // Called on an timeline not in active state or shutting down
707 : #[error("Bad timeline state: {0:?}")]
708 : BadState(TimelineState),
709 :
710 : // Timeout expired while waiting for LSN to catch up with goal.
711 : #[error("{0}")]
712 : Timeout(String),
713 : }
714 :
715 : // The impls below achieve cancellation mapping for errors.
716 : // Perhaps there's a way of achieving this with less cruft.
717 :
718 : impl From<CreateImageLayersError> for CompactionError {
719 0 : fn from(e: CreateImageLayersError) -> Self {
720 0 : match e {
721 0 : CreateImageLayersError::Cancelled => CompactionError::ShuttingDown,
722 0 : _ => CompactionError::Other(e.into()),
723 : }
724 0 : }
725 : }
726 :
727 : impl From<CreateImageLayersError> for FlushLayerError {
728 0 : fn from(e: CreateImageLayersError) -> Self {
729 0 : match e {
730 0 : CreateImageLayersError::Cancelled => FlushLayerError::Cancelled,
731 0 : any => FlushLayerError::CreateImageLayersError(Arc::new(any)),
732 : }
733 0 : }
734 : }
735 :
736 : impl From<PageReconstructError> for CreateImageLayersError {
737 0 : fn from(e: PageReconstructError) -> Self {
738 0 : match e {
739 0 : PageReconstructError::Cancelled => CreateImageLayersError::Cancelled,
740 0 : _ => CreateImageLayersError::PageReconstructError(e),
741 : }
742 0 : }
743 : }
744 :
745 : impl From<GetVectoredError> for CreateImageLayersError {
746 0 : fn from(e: GetVectoredError) -> Self {
747 0 : match e {
748 0 : GetVectoredError::Cancelled => CreateImageLayersError::Cancelled,
749 0 : _ => CreateImageLayersError::GetVectoredError(e),
750 : }
751 0 : }
752 : }
753 :
754 : impl From<GetVectoredError> for PageReconstructError {
755 0 : fn from(e: GetVectoredError) -> Self {
756 0 : match e {
757 0 : GetVectoredError::Cancelled => PageReconstructError::Cancelled,
758 0 : GetVectoredError::InvalidLsn(_) => PageReconstructError::Other(anyhow!("Invalid LSN")),
759 0 : err @ GetVectoredError::Oversized(_) => PageReconstructError::Other(err.into()),
760 0 : GetVectoredError::MissingKey(err) => PageReconstructError::MissingKey(err),
761 0 : GetVectoredError::GetReadyAncestorError(err) => PageReconstructError::from(err),
762 0 : GetVectoredError::Other(err) => PageReconstructError::Other(err),
763 : }
764 0 : }
765 : }
766 :
767 : impl From<GetReadyAncestorError> for PageReconstructError {
768 2 : fn from(e: GetReadyAncestorError) -> Self {
769 2 : use GetReadyAncestorError::*;
770 2 : match e {
771 0 : AncestorLsnTimeout(wait_err) => PageReconstructError::AncestorLsnTimeout(wait_err),
772 2 : bad_state @ BadState { .. } => PageReconstructError::Other(anyhow::anyhow!(bad_state)),
773 0 : Cancelled => PageReconstructError::Cancelled,
774 : }
775 2 : }
776 : }
777 :
778 : #[derive(
779 : Eq,
780 : PartialEq,
781 : Debug,
782 : Copy,
783 : Clone,
784 187 : strum_macros::EnumString,
785 0 : strum_macros::Display,
786 0 : serde_with::DeserializeFromStr,
787 : serde_with::SerializeDisplay,
788 : )]
789 : #[strum(serialize_all = "kebab-case")]
790 : pub enum GetVectoredImpl {
791 : Sequential,
792 : Vectored,
793 : }
794 :
795 : #[derive(
796 : Eq,
797 : PartialEq,
798 : Debug,
799 : Copy,
800 : Clone,
801 187 : strum_macros::EnumString,
802 0 : strum_macros::Display,
803 0 : serde_with::DeserializeFromStr,
804 : serde_with::SerializeDisplay,
805 : )]
806 : #[strum(serialize_all = "kebab-case")]
807 : pub enum GetImpl {
808 : Legacy,
809 : Vectored,
810 : }
811 :
812 : pub(crate) enum WaitLsnWaiter<'a> {
813 : Timeline(&'a Timeline),
814 : Tenant,
815 : PageService,
816 : }
817 :
818 : /// Argument to [`Timeline::shutdown`].
819 : #[derive(Debug, Clone, Copy)]
820 : pub(crate) enum ShutdownMode {
821 : /// Graceful shutdown, may do a lot of I/O as we flush any open layers to disk and then
822 : /// also to remote storage. This method can easily take multiple seconds for a busy timeline.
823 : ///
824 : /// While we are flushing, we continue to accept read I/O for LSNs ingested before
825 : /// the call to [`Timeline::shutdown`].
826 : FreezeAndFlush,
827 : /// Shut down immediately, without waiting for any open layers to flush.
828 : Hard,
829 : }
830 :
831 : struct ImageLayerCreationOutcome {
832 : image: Option<ResidentLayer>,
833 : next_start_key: Key,
834 : }
835 :
836 : /// Public interface functions
837 : impl Timeline {
838 : /// Get the LSN where this branch was created
839 8 : pub(crate) fn get_ancestor_lsn(&self) -> Lsn {
840 8 : self.ancestor_lsn
841 8 : }
842 :
843 : /// Get the ancestor's timeline id
844 3310 : pub(crate) fn get_ancestor_timeline_id(&self) -> Option<TimelineId> {
845 3310 : self.ancestor_timeline
846 3310 : .as_ref()
847 3310 : .map(|ancestor| ancestor.timeline_id)
848 3310 : }
849 :
850 : /// Lock and get timeline's GC cutoff
851 992 : pub(crate) fn get_latest_gc_cutoff_lsn(&self) -> RcuReadGuard<Lsn> {
852 992 : self.latest_gc_cutoff_lsn.read()
853 992 : }
854 :
855 : /// Look up given page version.
856 : ///
857 : /// If a remote layer file is needed, it is downloaded as part of this
858 : /// call.
859 : ///
860 : /// This method enforces [`Self::timeline_get_throttle`] internally.
861 : ///
862 : /// NOTE: It is considered an error to 'get' a key that doesn't exist. The
863 : /// abstraction above this needs to store suitable metadata to track what
864 : /// data exists with what keys, in separate metadata entries. If a
865 : /// non-existent key is requested, we may incorrectly return a value from
866 : /// an ancestor branch, for example, or waste a lot of cycles chasing the
867 : /// non-existing key.
868 : ///
869 : /// # Cancel-Safety
870 : ///
871 : /// This method is cancellation-safe.
872 : #[inline(always)]
873 624102 : pub(crate) async fn get(
874 624102 : &self,
875 624102 : key: Key,
876 624102 : lsn: Lsn,
877 624102 : ctx: &RequestContext,
878 624102 : ) -> Result<Bytes, PageReconstructError> {
879 624102 : if !lsn.is_valid() {
880 0 : return Err(PageReconstructError::Other(anyhow::anyhow!("Invalid LSN")));
881 624102 : }
882 624102 :
883 624102 : // This check is debug-only because of the cost of hashing, and because it's a double-check: we
884 624102 : // already checked the key against the shard_identity when looking up the Timeline from
885 624102 : // page_service.
886 624102 : debug_assert!(!self.shard_identity.is_key_disposable(&key));
887 :
888 624102 : self.timeline_get_throttle.throttle(ctx, 1).await;
889 :
890 : // Check the page cache. We will get back the most recent page with lsn <= `lsn`.
891 : // The cached image can be returned directly if there is no WAL between the cached image
892 : // and requested LSN. The cached image can also be used to reduce the amount of WAL needed
893 : // for redo.
894 624102 : let cached_page_img = match self.lookup_cached_page(&key, lsn, ctx).await {
895 0 : Some((cached_lsn, cached_img)) => {
896 0 : match cached_lsn.cmp(&lsn) {
897 0 : Ordering::Less => {} // there might be WAL between cached_lsn and lsn, we need to check
898 : Ordering::Equal => {
899 0 : MATERIALIZED_PAGE_CACHE_HIT_DIRECT.inc();
900 0 : return Ok(cached_img); // exact LSN match, return the image
901 : }
902 : Ordering::Greater => {
903 0 : unreachable!("the returned lsn should never be after the requested lsn")
904 : }
905 : }
906 0 : Some((cached_lsn, cached_img))
907 : }
908 624102 : None => None,
909 : };
910 :
911 624102 : match self.conf.get_impl {
912 : GetImpl::Legacy => {
913 624102 : let reconstruct_state = ValueReconstructState {
914 624102 : records: Vec::new(),
915 624102 : img: cached_page_img,
916 624102 : };
917 624102 :
918 624102 : self.get_impl(key, lsn, reconstruct_state, ctx).await
919 : }
920 : GetImpl::Vectored => {
921 0 : let keyspace = KeySpace {
922 0 : ranges: vec![key..key.next()],
923 0 : };
924 0 :
925 0 : // Initialise the reconstruct state for the key with the cache
926 0 : // entry returned above.
927 0 : let mut reconstruct_state = ValuesReconstructState::new();
928 0 :
929 0 : // Only add the cached image to the reconstruct state when it exists.
930 0 : if cached_page_img.is_some() {
931 0 : let mut key_state = VectoredValueReconstructState::default();
932 0 : key_state.img = cached_page_img;
933 0 : reconstruct_state.keys.insert(key, Ok(key_state));
934 0 : }
935 :
936 0 : let vectored_res = self
937 0 : .get_vectored_impl(keyspace.clone(), lsn, &mut reconstruct_state, ctx)
938 0 : .await;
939 :
940 0 : if self.conf.validate_vectored_get {
941 0 : self.validate_get_vectored_impl(&vectored_res, keyspace, lsn, ctx)
942 0 : .await;
943 0 : }
944 :
945 0 : let key_value = vectored_res?.pop_first();
946 0 : match key_value {
947 0 : Some((got_key, value)) => {
948 0 : if got_key != key {
949 0 : error!(
950 0 : "Expected {}, but singular vectored get returned {}",
951 : key, got_key
952 : );
953 0 : Err(PageReconstructError::Other(anyhow!(
954 0 : "Singular vectored get returned wrong key"
955 0 : )))
956 : } else {
957 0 : value
958 : }
959 : }
960 0 : None => Err(PageReconstructError::MissingKey(MissingKeyError {
961 0 : key,
962 0 : shard: self.shard_identity.get_shard_number(&key),
963 0 : cont_lsn: Lsn(0),
964 0 : request_lsn: lsn,
965 0 : ancestor_lsn: None,
966 0 : traversal_path: Vec::new(),
967 0 : backtrace: None,
968 0 : })),
969 : }
970 : }
971 : }
972 624102 : }
973 :
974 : /// Not subject to [`Self::timeline_get_throttle`].
975 625654 : async fn get_impl(
976 625654 : &self,
977 625654 : key: Key,
978 625654 : lsn: Lsn,
979 625654 : mut reconstruct_state: ValueReconstructState,
980 625654 : ctx: &RequestContext,
981 625654 : ) -> Result<Bytes, PageReconstructError> {
982 625654 : // XXX: structured stats collection for layer eviction here.
983 625654 : trace!(
984 0 : "get page request for {}@{} from task kind {:?}",
985 0 : key,
986 0 : lsn,
987 0 : ctx.task_kind()
988 : );
989 :
990 625654 : let timer = crate::metrics::GET_RECONSTRUCT_DATA_TIME
991 625654 : .for_get_kind(GetKind::Singular)
992 625654 : .start_timer();
993 625654 : let path = self
994 625654 : .get_reconstruct_data(key, lsn, &mut reconstruct_state, ctx)
995 45928 : .await?;
996 625524 : timer.stop_and_record();
997 625524 :
998 625524 : let start = Instant::now();
999 625524 : let res = self.reconstruct_value(key, lsn, reconstruct_state).await;
1000 625524 : let elapsed = start.elapsed();
1001 625524 : crate::metrics::RECONSTRUCT_TIME
1002 625524 : .for_get_kind(GetKind::Singular)
1003 625524 : .observe(elapsed.as_secs_f64());
1004 625524 :
1005 625524 : if cfg!(feature = "testing") && res.is_err() {
1006 : // it can only be walredo issue
1007 : use std::fmt::Write;
1008 :
1009 0 : let mut msg = String::new();
1010 0 :
1011 0 : path.into_iter().for_each(|(res, cont_lsn, layer)| {
1012 0 : writeln!(
1013 0 : msg,
1014 0 : "- layer traversal: result {res:?}, cont_lsn {cont_lsn}, layer: {}",
1015 0 : layer,
1016 0 : )
1017 0 : .expect("string grows")
1018 0 : });
1019 0 :
1020 0 : // this is to rule out or provide evidence that we could in some cases read a duplicate
1021 0 : // walrecord
1022 0 : tracing::info!("walredo failed, path:\n{msg}");
1023 625524 : }
1024 :
1025 625524 : res
1026 625654 : }
1027 :
1028 : pub(crate) const MAX_GET_VECTORED_KEYS: u64 = 32;
1029 :
1030 : /// Look up multiple page versions at a given LSN
1031 : ///
1032 : /// This naive implementation will be replaced with a more efficient one
1033 : /// which actually vectorizes the read path.
1034 928 : pub(crate) async fn get_vectored(
1035 928 : &self,
1036 928 : keyspace: KeySpace,
1037 928 : lsn: Lsn,
1038 928 : ctx: &RequestContext,
1039 928 : ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
1040 928 : if !lsn.is_valid() {
1041 0 : return Err(GetVectoredError::InvalidLsn(lsn));
1042 928 : }
1043 928 :
1044 928 : let key_count = keyspace.total_raw_size().try_into().unwrap();
1045 928 : if key_count > Timeline::MAX_GET_VECTORED_KEYS {
1046 0 : return Err(GetVectoredError::Oversized(key_count));
1047 928 : }
1048 :
1049 1856 : for range in &keyspace.ranges {
1050 928 : let mut key = range.start;
1051 2158 : while key != range.end {
1052 1230 : assert!(!self.shard_identity.is_key_disposable(&key));
1053 1230 : key = key.next();
1054 : }
1055 : }
1056 :
1057 928 : trace!(
1058 0 : "get vectored request for {:?}@{} from task kind {:?} will use {} implementation",
1059 0 : keyspace,
1060 0 : lsn,
1061 0 : ctx.task_kind(),
1062 : self.conf.get_vectored_impl
1063 : );
1064 :
1065 928 : let start = crate::metrics::GET_VECTORED_LATENCY
1066 928 : .for_task_kind(ctx.task_kind())
1067 928 : .map(|metric| (metric, Instant::now()));
1068 :
1069 : // start counting after throttle so that throttle time
1070 : // is always less than observation time
1071 928 : let throttled = self
1072 928 : .timeline_get_throttle
1073 928 : .throttle(ctx, key_count as usize)
1074 0 : .await;
1075 :
1076 928 : let res = match self.conf.get_vectored_impl {
1077 : GetVectoredImpl::Sequential => {
1078 928 : self.get_vectored_sequential_impl(keyspace, lsn, ctx).await
1079 : }
1080 : GetVectoredImpl::Vectored => {
1081 0 : let vectored_res = self
1082 0 : .get_vectored_impl(
1083 0 : keyspace.clone(),
1084 0 : lsn,
1085 0 : &mut ValuesReconstructState::new(),
1086 0 : ctx,
1087 0 : )
1088 0 : .await;
1089 :
1090 0 : if self.conf.validate_vectored_get {
1091 0 : self.validate_get_vectored_impl(&vectored_res, keyspace, lsn, ctx)
1092 0 : .await;
1093 0 : }
1094 :
1095 0 : vectored_res
1096 : }
1097 : };
1098 :
1099 928 : if let Some((metric, start)) = start {
1100 0 : let elapsed = start.elapsed();
1101 0 : let ex_throttled = if let Some(throttled) = throttled {
1102 0 : elapsed.checked_sub(throttled)
1103 : } else {
1104 0 : Some(elapsed)
1105 : };
1106 :
1107 0 : if let Some(ex_throttled) = ex_throttled {
1108 0 : metric.observe(ex_throttled.as_secs_f64());
1109 0 : } else {
1110 0 : use utils::rate_limit::RateLimit;
1111 0 : static LOGGED: Lazy<Mutex<RateLimit>> =
1112 0 : Lazy::new(|| Mutex::new(RateLimit::new(Duration::from_secs(10))));
1113 0 : let mut rate_limit = LOGGED.lock().unwrap();
1114 0 : rate_limit.call(|| {
1115 0 : warn!("error deducting time spent throttled; this message is logged at a global rate limit");
1116 0 : });
1117 0 : }
1118 928 : }
1119 :
1120 928 : res
1121 928 : }
1122 :
1123 : /// Scan the keyspace and return all existing key-values in the keyspace. This currently uses vectored
1124 : /// get underlying. Normal vectored get would throw an error when a key in the keyspace is not found
1125 : /// during the search, but for the scan interface, it returns all existing key-value pairs, and does
1126 : /// not expect each single key in the key space will be found. The semantics is closer to the RocksDB
1127 : /// scan iterator interface. We could optimize this interface later to avoid some checks in the vectored
1128 : /// get path to maintain and split the probing and to-be-probe keyspace. We also need to ensure that
1129 : /// the scan operation will not cause OOM in the future.
1130 : #[allow(dead_code)]
1131 12 : pub(crate) async fn scan(
1132 12 : &self,
1133 12 : keyspace: KeySpace,
1134 12 : lsn: Lsn,
1135 12 : ctx: &RequestContext,
1136 12 : ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
1137 12 : if !lsn.is_valid() {
1138 0 : return Err(GetVectoredError::InvalidLsn(lsn));
1139 12 : }
1140 12 :
1141 12 : trace!(
1142 0 : "key-value scan request for {:?}@{} from task kind {:?}",
1143 0 : keyspace,
1144 0 : lsn,
1145 0 : ctx.task_kind()
1146 : );
1147 :
1148 : // We should generalize this into Keyspace::contains in the future.
1149 24 : for range in &keyspace.ranges {
1150 12 : if range.start.field1 < METADATA_KEY_BEGIN_PREFIX
1151 12 : || range.end.field1 > METADATA_KEY_END_PREFIX
1152 : {
1153 0 : return Err(GetVectoredError::Other(anyhow::anyhow!(
1154 0 : "only metadata keyspace can be scanned"
1155 0 : )));
1156 12 : }
1157 : }
1158 :
1159 12 : let start = crate::metrics::SCAN_LATENCY
1160 12 : .for_task_kind(ctx.task_kind())
1161 12 : .map(ScanLatencyOngoingRecording::start_recording);
1162 :
1163 : // start counting after throttle so that throttle time
1164 : // is always less than observation time
1165 12 : let throttled = self
1166 12 : .timeline_get_throttle
1167 12 : // assume scan = 1 quota for now until we find a better way to process this
1168 12 : .throttle(ctx, 1)
1169 0 : .await;
1170 :
1171 12 : let vectored_res = self
1172 12 : .get_vectored_impl(
1173 12 : keyspace.clone(),
1174 12 : lsn,
1175 12 : &mut ValuesReconstructState::default(),
1176 12 : ctx,
1177 12 : )
1178 0 : .await;
1179 :
1180 12 : if let Some(recording) = start {
1181 0 : recording.observe(throttled);
1182 12 : }
1183 :
1184 12 : vectored_res
1185 12 : }
1186 :
1187 : /// Not subject to [`Self::timeline_get_throttle`].
1188 940 : pub(super) async fn get_vectored_sequential_impl(
1189 940 : &self,
1190 940 : keyspace: KeySpace,
1191 940 : lsn: Lsn,
1192 940 : ctx: &RequestContext,
1193 940 : ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
1194 940 : let mut values = BTreeMap::new();
1195 :
1196 1880 : for range in keyspace.ranges {
1197 940 : let mut key = range.start;
1198 2492 : while key != range.end {
1199 1552 : let block = self
1200 1552 : .get_impl(key, lsn, ValueReconstructState::default(), ctx)
1201 27 : .await;
1202 :
1203 : use PageReconstructError::*;
1204 0 : match block {
1205 0 : Err(Cancelled) => return Err(GetVectoredError::Cancelled),
1206 : Err(MissingKey(_))
1207 2 : if NON_INHERITED_RANGE.contains(&key)
1208 2 : || NON_INHERITED_SPARSE_RANGE.contains(&key) =>
1209 2 : {
1210 2 : // Ignore missing key error for aux key range. TODO: currently, we assume non_inherited_range == aux_key_range.
1211 2 : // When we add more types of keys into the page server, we should revisit this part of code and throw errors
1212 2 : // accordingly.
1213 2 : key = key.next();
1214 2 : }
1215 0 : Err(MissingKey(err)) => {
1216 0 : return Err(GetVectoredError::MissingKey(err));
1217 : }
1218 0 : Err(Other(err))
1219 0 : if err
1220 0 : .to_string()
1221 0 : .contains("downloading evicted layer file failed") =>
1222 0 : {
1223 0 : return Err(GetVectoredError::Other(err))
1224 : }
1225 0 : Err(Other(err))
1226 0 : if err
1227 0 : .chain()
1228 0 : .any(|cause| cause.to_string().contains("layer loading failed")) =>
1229 0 : {
1230 0 : // The intent here is to achieve error parity with the vectored read path.
1231 0 : // When vectored read fails to load a layer it fails the whole read, hence
1232 0 : // we mimic this behaviour here to keep the validation happy.
1233 0 : return Err(GetVectoredError::Other(err));
1234 : }
1235 1550 : _ => {
1236 1550 : values.insert(key, block);
1237 1550 : key = key.next();
1238 1550 : }
1239 : }
1240 : }
1241 : }
1242 :
1243 940 : Ok(values)
1244 940 : }
1245 :
1246 138 : pub(super) async fn get_vectored_impl(
1247 138 : &self,
1248 138 : keyspace: KeySpace,
1249 138 : lsn: Lsn,
1250 138 : reconstruct_state: &mut ValuesReconstructState,
1251 138 : ctx: &RequestContext,
1252 138 : ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
1253 138 : let get_kind = if keyspace.total_raw_size() == 1 {
1254 68 : GetKind::Singular
1255 : } else {
1256 70 : GetKind::Vectored
1257 : };
1258 :
1259 138 : let get_data_timer = crate::metrics::GET_RECONSTRUCT_DATA_TIME
1260 138 : .for_get_kind(get_kind)
1261 138 : .start_timer();
1262 138 : self.get_vectored_reconstruct_data(keyspace, lsn, reconstruct_state, ctx)
1263 11414 : .await?;
1264 128 : get_data_timer.stop_and_record();
1265 128 :
1266 128 : let reconstruct_timer = crate::metrics::RECONSTRUCT_TIME
1267 128 : .for_get_kind(get_kind)
1268 128 : .start_timer();
1269 128 : let mut results: BTreeMap<Key, Result<Bytes, PageReconstructError>> = BTreeMap::new();
1270 128 : let layers_visited = reconstruct_state.get_layers_visited();
1271 :
1272 40436 : for (key, res) in std::mem::take(&mut reconstruct_state.keys) {
1273 40436 : match res {
1274 0 : Err(err) => {
1275 0 : results.insert(key, Err(err));
1276 0 : }
1277 40436 : Ok(state) => {
1278 40436 : let state = ValueReconstructState::from(state);
1279 :
1280 40436 : let reconstruct_res = self.reconstruct_value(key, lsn, state).await;
1281 40436 : results.insert(key, reconstruct_res);
1282 : }
1283 : }
1284 : }
1285 128 : reconstruct_timer.stop_and_record();
1286 128 :
1287 128 : // For aux file keys (v1 or v2) the vectored read path does not return an error
1288 128 : // when they're missing. Instead they are omitted from the resulting btree
1289 128 : // (this is a requirement, not a bug). Skip updating the metric in these cases
1290 128 : // to avoid infinite results.
1291 128 : if !results.is_empty() {
1292 110 : // Note that this is an approximation. Tracking the exact number of layers visited
1293 110 : // per key requires virtually unbounded memory usage and is inefficient
1294 110 : // (i.e. segment tree tracking each range queried from a layer)
1295 110 : crate::metrics::VEC_READ_NUM_LAYERS_VISITED
1296 110 : .observe(layers_visited as f64 / results.len() as f64);
1297 110 : }
1298 :
1299 128 : Ok(results)
1300 138 : }
1301 :
1302 : /// Not subject to [`Self::timeline_get_throttle`].
1303 12 : pub(super) async fn validate_get_vectored_impl(
1304 12 : &self,
1305 12 : vectored_res: &Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError>,
1306 12 : keyspace: KeySpace,
1307 12 : lsn: Lsn,
1308 12 : ctx: &RequestContext,
1309 12 : ) {
1310 12 : if keyspace.overlaps(&Key::metadata_key_range()) {
1311 : // skip validation for metadata key range
1312 0 : return;
1313 12 : }
1314 :
1315 12 : let sequential_res = self
1316 12 : .get_vectored_sequential_impl(keyspace.clone(), lsn, ctx)
1317 19 : .await;
1318 :
1319 0 : fn errors_match(lhs: &GetVectoredError, rhs: &GetVectoredError) -> bool {
1320 0 : use GetVectoredError::*;
1321 0 : match (lhs, rhs) {
1322 0 : (Oversized(l), Oversized(r)) => l == r,
1323 0 : (InvalidLsn(l), InvalidLsn(r)) => l == r,
1324 0 : (MissingKey(l), MissingKey(r)) => l.key == r.key,
1325 0 : (GetReadyAncestorError(_), GetReadyAncestorError(_)) => true,
1326 0 : (Other(_), Other(_)) => true,
1327 0 : _ => false,
1328 : }
1329 0 : }
1330 :
1331 12 : match (&sequential_res, vectored_res) {
1332 0 : (Err(GetVectoredError::Cancelled), _) => {},
1333 0 : (_, Err(GetVectoredError::Cancelled)) => {},
1334 0 : (Err(seq_err), Ok(_)) => {
1335 0 : panic!(concat!("Sequential get failed with {}, but vectored get did not",
1336 0 : " - keyspace={:?} lsn={}"),
1337 0 : seq_err, keyspace, lsn) },
1338 0 : (Ok(_), Err(GetVectoredError::GetReadyAncestorError(GetReadyAncestorError::AncestorLsnTimeout(_)))) => {
1339 0 : // Sequential get runs after vectored get, so it is possible for the later
1340 0 : // to time out while waiting for its ancestor's Lsn to become ready and for the
1341 0 : // former to succeed (it essentially has a doubled wait time).
1342 0 : },
1343 0 : (Ok(_), Err(vec_err)) => {
1344 0 : panic!(concat!("Vectored get failed with {}, but sequential get did not",
1345 0 : " - keyspace={:?} lsn={}"),
1346 0 : vec_err, keyspace, lsn) },
1347 0 : (Err(seq_err), Err(vec_err)) => {
1348 0 : assert!(errors_match(seq_err, vec_err),
1349 0 : "Mismatched errors: {seq_err} != {vec_err} - keyspace={keyspace:?} lsn={lsn}")},
1350 12 : (Ok(seq_values), Ok(vec_values)) => {
1351 320 : seq_values.iter().zip(vec_values.iter()).for_each(|((seq_key, seq_res), (vec_key, vec_res))| {
1352 320 : assert_eq!(seq_key, vec_key);
1353 320 : match (seq_res, vec_res) {
1354 320 : (Ok(seq_blob), Ok(vec_blob)) => {
1355 320 : Self::validate_key_equivalence(seq_key, &keyspace, lsn, seq_blob, vec_blob);
1356 320 : },
1357 0 : (Err(err), Ok(_)) => {
1358 0 : panic!(
1359 0 : concat!("Sequential get failed with {} for key {}, but vectored get did not",
1360 0 : " - keyspace={:?} lsn={}"),
1361 0 : err, seq_key, keyspace, lsn) },
1362 0 : (Ok(_), Err(err)) => {
1363 0 : panic!(
1364 0 : concat!("Vectored get failed with {} for key {}, but sequential get did not",
1365 0 : " - keyspace={:?} lsn={}"),
1366 0 : err, seq_key, keyspace, lsn) },
1367 0 : (Err(_), Err(_)) => {}
1368 : }
1369 320 : })
1370 : }
1371 : }
1372 12 : }
1373 :
1374 320 : fn validate_key_equivalence(
1375 320 : key: &Key,
1376 320 : keyspace: &KeySpace,
1377 320 : lsn: Lsn,
1378 320 : seq: &Bytes,
1379 320 : vec: &Bytes,
1380 320 : ) {
1381 320 : if *key == AUX_FILES_KEY {
1382 : // The value reconstruct of AUX_FILES_KEY from records is not deterministic
1383 : // since it uses a hash map under the hood. Hence, deserialise both results
1384 : // before comparing.
1385 0 : let seq_aux_dir_res = AuxFilesDirectory::des(seq);
1386 0 : let vec_aux_dir_res = AuxFilesDirectory::des(vec);
1387 0 : match (&seq_aux_dir_res, &vec_aux_dir_res) {
1388 0 : (Ok(seq_aux_dir), Ok(vec_aux_dir)) => {
1389 0 : assert_eq!(
1390 : seq_aux_dir, vec_aux_dir,
1391 0 : "Mismatch for key {} - keyspace={:?} lsn={}",
1392 : key, keyspace, lsn
1393 : );
1394 : }
1395 0 : (Err(_), Err(_)) => {}
1396 : _ => {
1397 0 : panic!("Mismatch for {key}: {seq_aux_dir_res:?} != {vec_aux_dir_res:?}");
1398 : }
1399 : }
1400 : } else {
1401 : // All other keys should reconstruct deterministically, so we simply compare the blobs.
1402 320 : assert_eq!(
1403 : seq, vec,
1404 0 : "Image mismatch for key {key} - keyspace={keyspace:?} lsn={lsn}"
1405 : );
1406 : }
1407 320 : }
1408 :
1409 : /// Get last or prev record separately. Same as get_last_record_rlsn().last/prev.
1410 275432 : pub(crate) fn get_last_record_lsn(&self) -> Lsn {
1411 275432 : self.last_record_lsn.load().last
1412 275432 : }
1413 :
1414 0 : pub(crate) fn get_prev_record_lsn(&self) -> Lsn {
1415 0 : self.last_record_lsn.load().prev
1416 0 : }
1417 :
1418 : /// Atomically get both last and prev.
1419 224 : pub(crate) fn get_last_record_rlsn(&self) -> RecordLsn {
1420 224 : self.last_record_lsn.load()
1421 224 : }
1422 :
1423 : /// Subscribe to callers of wait_lsn(). The value of the channel is None if there are no
1424 : /// wait_lsn() calls in progress, and Some(Lsn) if there is an active waiter for wait_lsn().
1425 0 : pub(crate) fn subscribe_for_wait_lsn_updates(&self) -> watch::Receiver<Option<Lsn>> {
1426 0 : self.last_record_lsn.status_receiver()
1427 0 : }
1428 :
1429 1122 : pub(crate) fn get_disk_consistent_lsn(&self) -> Lsn {
1430 1122 : self.disk_consistent_lsn.load()
1431 1122 : }
1432 :
1433 : /// remote_consistent_lsn from the perspective of the tenant's current generation,
1434 : /// not validated with control plane yet.
1435 : /// See [`Self::get_remote_consistent_lsn_visible`].
1436 0 : pub(crate) fn get_remote_consistent_lsn_projected(&self) -> Option<Lsn> {
1437 0 : self.remote_client.remote_consistent_lsn_projected()
1438 0 : }
1439 :
1440 : /// remote_consistent_lsn which the tenant is guaranteed not to go backward from,
1441 : /// i.e. a value of remote_consistent_lsn_projected which has undergone
1442 : /// generation validation in the deletion queue.
1443 0 : pub(crate) fn get_remote_consistent_lsn_visible(&self) -> Option<Lsn> {
1444 0 : self.remote_client.remote_consistent_lsn_visible()
1445 0 : }
1446 :
1447 : /// The sum of the file size of all historic layers in the layer map.
1448 : /// This method makes no distinction between local and remote layers.
1449 : /// Hence, the result **does not represent local filesystem usage**.
1450 0 : pub(crate) async fn layer_size_sum(&self) -> u64 {
1451 0 : let guard = self.layers.read().await;
1452 0 : let layer_map = guard.layer_map();
1453 0 : let mut size = 0;
1454 0 : for l in layer_map.iter_historic_layers() {
1455 0 : size += l.file_size;
1456 0 : }
1457 0 : size
1458 0 : }
1459 :
1460 0 : pub(crate) fn resident_physical_size(&self) -> u64 {
1461 0 : self.metrics.resident_physical_size_get()
1462 0 : }
1463 :
1464 0 : pub(crate) fn get_directory_metrics(&self) -> [u64; DirectoryKind::KINDS_NUM] {
1465 0 : array::from_fn(|idx| self.directory_metrics[idx].load(AtomicOrdering::Relaxed))
1466 0 : }
1467 :
1468 : ///
1469 : /// Wait until WAL has been received and processed up to this LSN.
1470 : ///
1471 : /// You should call this before any of the other get_* or list_* functions. Calling
1472 : /// those functions with an LSN that has been processed yet is an error.
1473 : ///
1474 223661 : pub(crate) async fn wait_lsn(
1475 223661 : &self,
1476 223661 : lsn: Lsn,
1477 223661 : who_is_waiting: WaitLsnWaiter<'_>,
1478 223661 : ctx: &RequestContext, /* Prepare for use by cancellation */
1479 223661 : ) -> Result<(), WaitLsnError> {
1480 223661 : let state = self.current_state();
1481 223661 : if self.cancel.is_cancelled() || matches!(state, TimelineState::Stopping) {
1482 0 : return Err(WaitLsnError::Shutdown);
1483 223661 : } else if !matches!(state, TimelineState::Active) {
1484 0 : return Err(WaitLsnError::BadState(state));
1485 223661 : }
1486 223661 :
1487 223661 : if cfg!(debug_assertions) {
1488 223661 : match ctx.task_kind() {
1489 : TaskKind::WalReceiverManager
1490 : | TaskKind::WalReceiverConnectionHandler
1491 : | TaskKind::WalReceiverConnectionPoller => {
1492 0 : let is_myself = match who_is_waiting {
1493 0 : WaitLsnWaiter::Timeline(waiter) => Weak::ptr_eq(&waiter.myself, &self.myself),
1494 0 : WaitLsnWaiter::Tenant | WaitLsnWaiter::PageService => unreachable!("tenant or page_service context are not expected to have task kind {:?}", ctx.task_kind()),
1495 : };
1496 0 : if is_myself {
1497 0 : if let Err(current) = self.last_record_lsn.would_wait_for(lsn) {
1498 : // walingest is the only one that can advance last_record_lsn; it should make sure to never reach here
1499 0 : panic!("this timeline's walingest task is calling wait_lsn({lsn}) but we only have last_record_lsn={current}; would deadlock");
1500 0 : }
1501 0 : } else {
1502 0 : // if another timeline's is waiting for us, there's no deadlock risk because
1503 0 : // our walreceiver task can make progress independent of theirs
1504 0 : }
1505 : }
1506 223661 : _ => {}
1507 : }
1508 0 : }
1509 :
1510 223661 : let _timer = crate::metrics::WAIT_LSN_TIME.start_timer();
1511 223661 :
1512 223661 : match self
1513 223661 : .last_record_lsn
1514 223661 : .wait_for_timeout(lsn, self.conf.wait_lsn_timeout)
1515 0 : .await
1516 : {
1517 223661 : Ok(()) => Ok(()),
1518 0 : Err(e) => {
1519 0 : use utils::seqwait::SeqWaitError::*;
1520 0 : match e {
1521 0 : Shutdown => Err(WaitLsnError::Shutdown),
1522 : Timeout => {
1523 : // don't count the time spent waiting for lock below, and also in walreceiver.status(), towards the wait_lsn_time_histo
1524 0 : drop(_timer);
1525 0 : let walreceiver_status = self.walreceiver_status();
1526 0 : Err(WaitLsnError::Timeout(format!(
1527 0 : "Timed out while waiting for WAL record at LSN {} to arrive, last_record_lsn {} disk consistent LSN={}, WalReceiver status: {}",
1528 0 : lsn,
1529 0 : self.get_last_record_lsn(),
1530 0 : self.get_disk_consistent_lsn(),
1531 0 : walreceiver_status,
1532 0 : )))
1533 : }
1534 : }
1535 : }
1536 : }
1537 223661 : }
1538 :
1539 0 : pub(crate) fn walreceiver_status(&self) -> String {
1540 0 : match &*self.walreceiver.lock().unwrap() {
1541 0 : None => "stopping or stopped".to_string(),
1542 0 : Some(walreceiver) => match walreceiver.status() {
1543 0 : Some(status) => status.to_human_readable_string(),
1544 0 : None => "Not active".to_string(),
1545 : },
1546 : }
1547 0 : }
1548 :
1549 : /// Check that it is valid to request operations with that lsn.
1550 228 : pub(crate) fn check_lsn_is_in_scope(
1551 228 : &self,
1552 228 : lsn: Lsn,
1553 228 : latest_gc_cutoff_lsn: &RcuReadGuard<Lsn>,
1554 228 : ) -> anyhow::Result<()> {
1555 228 : ensure!(
1556 228 : lsn >= **latest_gc_cutoff_lsn,
1557 4 : "LSN {} is earlier than latest GC horizon {} (we might've already garbage collected needed data)",
1558 4 : lsn,
1559 4 : **latest_gc_cutoff_lsn,
1560 : );
1561 224 : Ok(())
1562 228 : }
1563 :
1564 : /// Obtains a temporary lease blocking garbage collection for the given LSN.
1565 : ///
1566 : /// This function will error if the requesting LSN is less than the `latest_gc_cutoff_lsn` and there is also
1567 : /// no existing lease to renew. If there is an existing lease in the map, the lease will be renewed only if
1568 : /// the request extends the lease. The returned lease is therefore the maximum between the existing lease and
1569 : /// the requesting lease.
1570 14 : pub(crate) fn make_lsn_lease(
1571 14 : &self,
1572 14 : lsn: Lsn,
1573 14 : length: Duration,
1574 14 : _ctx: &RequestContext,
1575 14 : ) -> anyhow::Result<LsnLease> {
1576 12 : let lease = {
1577 14 : let mut gc_info = self.gc_info.write().unwrap();
1578 14 :
1579 14 : let valid_until = SystemTime::now() + length;
1580 14 :
1581 14 : let entry = gc_info.leases.entry(lsn);
1582 :
1583 12 : let lease = {
1584 14 : if let Entry::Occupied(mut occupied) = entry {
1585 6 : let existing_lease = occupied.get_mut();
1586 6 : if valid_until > existing_lease.valid_until {
1587 2 : existing_lease.valid_until = valid_until;
1588 4 : }
1589 6 : existing_lease.clone()
1590 : } else {
1591 : // Reject already GC-ed LSN (lsn < latest_gc_cutoff)
1592 8 : let latest_gc_cutoff_lsn = self.get_latest_gc_cutoff_lsn();
1593 8 : if lsn < *latest_gc_cutoff_lsn {
1594 2 : bail!("tried to request a page version that was garbage collected. requested at {} gc cutoff {}", lsn, *latest_gc_cutoff_lsn);
1595 6 : }
1596 6 :
1597 6 : entry.or_insert(LsnLease { valid_until }).clone()
1598 : }
1599 : };
1600 :
1601 12 : lease
1602 12 : };
1603 12 :
1604 12 : Ok(lease)
1605 14 : }
1606 :
1607 : /// Flush to disk all data that was written with the put_* functions
1608 2112 : #[instrument(skip(self), fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id))]
1609 : pub(crate) async fn freeze_and_flush(&self) -> Result<(), FlushLayerError> {
1610 : self.freeze_and_flush0().await
1611 : }
1612 :
1613 : // This exists to provide a non-span creating version of `freeze_and_flush` we can call without
1614 : // polluting the span hierarchy.
1615 1056 : pub(crate) async fn freeze_and_flush0(&self) -> Result<(), FlushLayerError> {
1616 1056 : let to_lsn = {
1617 : // Freeze the current open in-memory layer. It will be written to disk on next
1618 : // iteration.
1619 1056 : let mut g = self.write_lock.lock().await;
1620 :
1621 1056 : let to_lsn = self.get_last_record_lsn();
1622 1056 : self.freeze_inmem_layer_at(to_lsn, &mut g).await;
1623 1056 : to_lsn
1624 1056 : };
1625 1056 : self.flush_frozen_layers_and_wait(to_lsn).await
1626 1056 : }
1627 :
1628 : // Check if an open ephemeral layer should be closed: this provides
1629 : // background enforcement of checkpoint interval if there is no active WAL receiver, to avoid keeping
1630 : // an ephemeral layer open forever when idle. It also freezes layers if the global limit on
1631 : // ephemeral layer bytes has been breached.
1632 0 : pub(super) async fn maybe_freeze_ephemeral_layer(&self) {
1633 0 : let Ok(mut write_guard) = self.write_lock.try_lock() else {
1634 : // If the write lock is held, there is an active wal receiver: rolling open layers
1635 : // is their responsibility while they hold this lock.
1636 0 : return;
1637 : };
1638 :
1639 0 : let Ok(layers_guard) = self.layers.try_read() else {
1640 : // Don't block if the layer lock is busy
1641 0 : return;
1642 : };
1643 :
1644 0 : let Some(open_layer) = &layers_guard.layer_map().open_layer else {
1645 : // If there is no open layer, we have no layer freezing to do. However, we might need to generate
1646 : // some updates to disk_consistent_lsn and remote_consistent_lsn, in case we ingested some WAL regions
1647 : // that didn't result in writes to this shard.
1648 :
1649 : // Must not hold the layers lock while waiting for a flush.
1650 0 : drop(layers_guard);
1651 0 :
1652 0 : let last_record_lsn = self.get_last_record_lsn();
1653 0 : let disk_consistent_lsn = self.get_disk_consistent_lsn();
1654 0 : if last_record_lsn > disk_consistent_lsn {
1655 : // We have no open layer, but disk_consistent_lsn is behind the last record: this indicates
1656 : // we are a sharded tenant and have skipped some WAL
1657 0 : let last_freeze_ts = *self.last_freeze_ts.read().unwrap();
1658 0 : if last_freeze_ts.elapsed() >= self.get_checkpoint_timeout() {
1659 : // Only do this if have been layer-less longer than get_checkpoint_timeout, so that a shard
1660 : // without any data ingested (yet) doesn't write a remote index as soon as it
1661 : // sees its LSN advance: we only do this if we've been layer-less
1662 : // for some time.
1663 0 : tracing::debug!(
1664 0 : "Advancing disk_consistent_lsn past WAL ingest gap {} -> {}",
1665 : disk_consistent_lsn,
1666 : last_record_lsn
1667 : );
1668 :
1669 : // The flush loop will update remote consistent LSN as well as disk consistent LSN.
1670 0 : self.flush_frozen_layers_and_wait(last_record_lsn)
1671 0 : .await
1672 0 : .ok();
1673 0 : }
1674 0 : }
1675 :
1676 0 : return;
1677 : };
1678 :
1679 0 : let Some(current_size) = open_layer.try_len() else {
1680 : // Unexpected: since we hold the write guard, nobody else should be writing to this layer, so
1681 : // read lock to get size should always succeed.
1682 0 : tracing::warn!("Lock conflict while reading size of open layer");
1683 0 : return;
1684 : };
1685 :
1686 0 : let current_lsn = self.get_last_record_lsn();
1687 :
1688 0 : let checkpoint_distance_override = open_layer.tick().await;
1689 :
1690 0 : if let Some(size_override) = checkpoint_distance_override {
1691 0 : if current_size > size_override {
1692 : // This is not harmful, but it only happens in relatively rare cases where
1693 : // time-based checkpoints are not happening fast enough to keep the amount of
1694 : // ephemeral data within configured limits. It's a sign of stress on the system.
1695 0 : tracing::info!("Early-rolling open layer at size {current_size} (limit {size_override}) due to dirty data pressure");
1696 0 : }
1697 0 : }
1698 :
1699 0 : let checkpoint_distance =
1700 0 : checkpoint_distance_override.unwrap_or(self.get_checkpoint_distance());
1701 0 :
1702 0 : if self.should_roll(
1703 0 : current_size,
1704 0 : current_size,
1705 0 : checkpoint_distance,
1706 0 : self.get_last_record_lsn(),
1707 0 : self.last_freeze_at.load(),
1708 0 : open_layer.get_opened_at(),
1709 0 : ) {
1710 0 : let at_lsn = match open_layer.info() {
1711 0 : InMemoryLayerInfo::Frozen { lsn_start, lsn_end } => {
1712 0 : // We may reach this point if the layer was already frozen by not yet flushed: flushing
1713 0 : // happens asynchronously in the background.
1714 0 : tracing::debug!(
1715 0 : "Not freezing open layer, it's already frozen ({lsn_start}..{lsn_end})"
1716 : );
1717 0 : None
1718 : }
1719 : InMemoryLayerInfo::Open { .. } => {
1720 : // Upgrade to a write lock and freeze the layer
1721 0 : drop(layers_guard);
1722 0 : let mut layers_guard = self.layers.write().await;
1723 0 : let froze = layers_guard
1724 0 : .try_freeze_in_memory_layer(
1725 0 : current_lsn,
1726 0 : &self.last_freeze_at,
1727 0 : &mut write_guard,
1728 0 : )
1729 0 : .await;
1730 0 : Some(current_lsn).filter(|_| froze)
1731 : }
1732 : };
1733 0 : if let Some(lsn) = at_lsn {
1734 0 : let res: Result<u64, _> = self.flush_frozen_layers(lsn);
1735 0 : if let Err(e) = res {
1736 0 : tracing::info!("failed to flush frozen layer after background freeze: {e:#}");
1737 0 : }
1738 0 : }
1739 0 : }
1740 0 : }
1741 :
1742 : /// Outermost timeline compaction operation; downloads needed layers.
1743 364 : pub(crate) async fn compact(
1744 364 : self: &Arc<Self>,
1745 364 : cancel: &CancellationToken,
1746 364 : flags: EnumSet<CompactFlags>,
1747 364 : ctx: &RequestContext,
1748 364 : ) -> Result<(), CompactionError> {
1749 364 : // most likely the cancellation token is from background task, but in tests it could be the
1750 364 : // request task as well.
1751 364 :
1752 364 : let prepare = async move {
1753 364 : let guard = self.compaction_lock.lock().await;
1754 :
1755 364 : let permit = super::tasks::concurrent_background_tasks_rate_limit_permit(
1756 364 : BackgroundLoopKind::Compaction,
1757 364 : ctx,
1758 364 : )
1759 0 : .await;
1760 :
1761 364 : (guard, permit)
1762 364 : };
1763 :
1764 : // this wait probably never needs any "long time spent" logging, because we already nag if
1765 : // compaction task goes over it's period (20s) which is quite often in production.
1766 364 : let (_guard, _permit) = tokio::select! {
1767 : tuple = prepare => { tuple },
1768 : _ = self.cancel.cancelled() => return Ok(()),
1769 : _ = cancel.cancelled() => return Ok(()),
1770 : };
1771 :
1772 364 : let last_record_lsn = self.get_last_record_lsn();
1773 364 :
1774 364 : // Last record Lsn could be zero in case the timeline was just created
1775 364 : if !last_record_lsn.is_valid() {
1776 0 : warn!("Skipping compaction for potentially just initialized timeline, it has invalid last record lsn: {last_record_lsn}");
1777 0 : return Ok(());
1778 364 : }
1779 364 :
1780 364 : match self.get_compaction_algorithm_settings().kind {
1781 0 : CompactionAlgorithm::Tiered => self.compact_tiered(cancel, ctx).await,
1782 71143 : CompactionAlgorithm::Legacy => self.compact_legacy(cancel, flags, ctx).await,
1783 : }
1784 364 : }
1785 :
1786 : /// Mutate the timeline with a [`TimelineWriter`].
1787 5133142 : pub(crate) async fn writer(&self) -> TimelineWriter<'_> {
1788 5133142 : TimelineWriter {
1789 5133142 : tl: self,
1790 5133142 : write_guard: self.write_lock.lock().await,
1791 : }
1792 5133142 : }
1793 :
1794 0 : pub(crate) fn activate(
1795 0 : self: &Arc<Self>,
1796 0 : parent: Arc<crate::tenant::Tenant>,
1797 0 : broker_client: BrokerClientChannel,
1798 0 : background_jobs_can_start: Option<&completion::Barrier>,
1799 0 : ctx: &RequestContext,
1800 0 : ) {
1801 0 : if self.tenant_shard_id.is_shard_zero() {
1802 0 : // Logical size is only maintained accurately on shard zero.
1803 0 : self.spawn_initial_logical_size_computation_task(ctx);
1804 0 : }
1805 0 : self.launch_wal_receiver(ctx, broker_client);
1806 0 : self.set_state(TimelineState::Active);
1807 0 : self.launch_eviction_task(parent, background_jobs_can_start);
1808 0 : }
1809 :
1810 : /// After this function returns, there are no timeline-scoped tasks are left running.
1811 : ///
1812 : /// The preferred pattern for is:
1813 : /// - in any spawned tasks, keep Timeline::guard open + Timeline::cancel / child token
1814 : /// - if early shutdown (not just cancellation) of a sub-tree of tasks is required,
1815 : /// go the extra mile and keep track of JoinHandles
1816 : /// - Keep track of JoinHandles using a passed-down `Arc<Mutex<Option<JoinSet>>>` or similar,
1817 : /// instead of spawning directly on a runtime. It is a more composable / testable pattern.
1818 : ///
1819 : /// For legacy reasons, we still have multiple tasks spawned using
1820 : /// `task_mgr::spawn(X, Some(tenant_id), Some(timeline_id))`.
1821 : /// We refer to these as "timeline-scoped task_mgr tasks".
1822 : /// Some of these tasks are already sensitive to Timeline::cancel while others are
1823 : /// not sensitive to Timeline::cancel and instead respect [`task_mgr::shutdown_token`]
1824 : /// or [`task_mgr::shutdown_watcher`].
1825 : /// We want to gradually convert the code base away from these.
1826 : ///
1827 : /// Here is an inventory of timeline-scoped task_mgr tasks that are still sensitive to
1828 : /// `task_mgr::shutdown_{token,watcher}` (there are also tenant-scoped and global-scoped
1829 : /// ones that aren't mentioned here):
1830 : /// - [`TaskKind::TimelineDeletionWorker`]
1831 : /// - NB: also used for tenant deletion
1832 : /// - [`TaskKind::RemoteUploadTask`]`
1833 : /// - [`TaskKind::InitialLogicalSizeCalculation`]
1834 : /// - [`TaskKind::DownloadAllRemoteLayers`] (can we get rid of it?)
1835 : // Inventory of timeline-scoped task_mgr tasks that use spawn but aren't sensitive:
1836 : /// - [`TaskKind::Eviction`]
1837 : /// - [`TaskKind::LayerFlushTask`]
1838 : /// - [`TaskKind::OndemandLogicalSizeCalculation`]
1839 : /// - [`TaskKind::GarbageCollector`] (immediate_gc is timeline-scoped)
1840 8 : pub(crate) async fn shutdown(&self, mode: ShutdownMode) {
1841 8 : debug_assert_current_span_has_tenant_and_timeline_id();
1842 :
1843 8 : let try_freeze_and_flush = match mode {
1844 6 : ShutdownMode::FreezeAndFlush => true,
1845 2 : ShutdownMode::Hard => false,
1846 : };
1847 :
1848 : // Regardless of whether we're going to try_freeze_and_flush
1849 : // or not, stop ingesting any more data. Walreceiver only provides
1850 : // cancellation but no "wait until gone", because it uses the Timeline::gate.
1851 : // So, only after the self.gate.close() below will we know for sure that
1852 : // no walreceiver tasks are left.
1853 : // For `try_freeze_and_flush=true`, this means that we might still be ingesting
1854 : // data during the call to `self.freeze_and_flush()` below.
1855 : // That's not ideal, but, we don't have the concept of a ChildGuard,
1856 : // which is what we'd need to properly model early shutdown of the walreceiver
1857 : // task sub-tree before the other Timeline task sub-trees.
1858 8 : let walreceiver = self.walreceiver.lock().unwrap().take();
1859 8 : tracing::debug!(
1860 0 : is_some = walreceiver.is_some(),
1861 0 : "Waiting for WalReceiverManager..."
1862 : );
1863 8 : if let Some(walreceiver) = walreceiver {
1864 0 : walreceiver.cancel();
1865 8 : }
1866 : // ... and inform any waiters for newer LSNs that there won't be any.
1867 8 : self.last_record_lsn.shutdown();
1868 8 :
1869 8 : if try_freeze_and_flush {
1870 : // we shut down walreceiver above, so, we won't add anything more
1871 : // to the InMemoryLayer; freeze it and wait for all frozen layers
1872 : // to reach the disk & upload queue, then shut the upload queue and
1873 : // wait for it to drain.
1874 6 : match self.freeze_and_flush().await {
1875 : Ok(_) => {
1876 : // drain the upload queue
1877 : // if we did not wait for completion here, it might be our shutdown process
1878 : // didn't wait for remote uploads to complete at all, as new tasks can forever
1879 : // be spawned.
1880 : //
1881 : // what is problematic is the shutting down of RemoteTimelineClient, because
1882 : // obviously it does not make sense to stop while we wait for it, but what
1883 : // about corner cases like s3 suddenly hanging up?
1884 6 : self.remote_client.shutdown().await;
1885 : }
1886 0 : Err(e) => {
1887 0 : // Non-fatal. Shutdown is infallible. Failures to flush just mean that
1888 0 : // we have some extra WAL replay to do next time the timeline starts.
1889 0 : warn!("failed to freeze and flush: {e:#}");
1890 : }
1891 : }
1892 2 : }
1893 :
1894 : // Signal any subscribers to our cancellation token to drop out
1895 8 : tracing::debug!("Cancelling CancellationToken");
1896 8 : self.cancel.cancel();
1897 8 :
1898 8 : // Transition the remote_client into a state where it's only useful for timeline deletion.
1899 8 : // (The deletion use case is why we can't just hook up remote_client to Self::cancel).)
1900 8 : self.remote_client.stop();
1901 8 : // As documented in remote_client.stop()'s doc comment, it's our responsibility
1902 8 : // to shut down the upload queue tasks.
1903 8 : // TODO: fix that, task management should be encapsulated inside remote_client.
1904 8 : task_mgr::shutdown_tasks(
1905 8 : Some(TaskKind::RemoteUploadTask),
1906 8 : Some(self.tenant_shard_id),
1907 8 : Some(self.timeline_id),
1908 8 : )
1909 0 : .await;
1910 :
1911 : // TODO: work toward making this a no-op. See this funciton's doc comment for more context.
1912 8 : tracing::debug!("Waiting for tasks...");
1913 8 : task_mgr::shutdown_tasks(None, Some(self.tenant_shard_id), Some(self.timeline_id)).await;
1914 :
1915 : // Finally wait until any gate-holders are complete.
1916 : //
1917 : // TODO: once above shutdown_tasks is a no-op, we can close the gate before calling shutdown_tasks
1918 : // and use a TBD variant of shutdown_tasks that asserts that there were no tasks left.
1919 8 : self.gate.close().await;
1920 :
1921 8 : self.metrics.shutdown();
1922 8 : }
1923 :
1924 378 : pub(crate) fn set_state(&self, new_state: TimelineState) {
1925 378 : match (self.current_state(), new_state) {
1926 378 : (equal_state_1, equal_state_2) if equal_state_1 == equal_state_2 => {
1927 2 : info!("Ignoring new state, equal to the existing one: {equal_state_2:?}");
1928 : }
1929 0 : (st, TimelineState::Loading) => {
1930 0 : error!("ignoring transition from {st:?} into Loading state");
1931 : }
1932 0 : (TimelineState::Broken { .. }, new_state) => {
1933 0 : error!("Ignoring state update {new_state:?} for broken timeline");
1934 : }
1935 : (TimelineState::Stopping, TimelineState::Active) => {
1936 0 : error!("Not activating a Stopping timeline");
1937 : }
1938 376 : (_, new_state) => {
1939 376 : self.state.send_replace(new_state);
1940 376 : }
1941 : }
1942 378 : }
1943 :
1944 2 : pub(crate) fn set_broken(&self, reason: String) {
1945 2 : let backtrace_str: String = format!("{}", std::backtrace::Backtrace::force_capture());
1946 2 : let broken_state = TimelineState::Broken {
1947 2 : reason,
1948 2 : backtrace: backtrace_str,
1949 2 : };
1950 2 : self.set_state(broken_state);
1951 2 :
1952 2 : // Although the Broken state is not equivalent to shutdown() (shutdown will be called
1953 2 : // later when this tenant is detach or the process shuts down), firing the cancellation token
1954 2 : // here avoids the need for other tasks to watch for the Broken state explicitly.
1955 2 : self.cancel.cancel();
1956 2 : }
1957 :
1958 225375 : pub(crate) fn current_state(&self) -> TimelineState {
1959 225375 : self.state.borrow().clone()
1960 225375 : }
1961 :
1962 6 : pub(crate) fn is_broken(&self) -> bool {
1963 6 : matches!(&*self.state.borrow(), TimelineState::Broken { .. })
1964 6 : }
1965 :
1966 220 : pub(crate) fn is_active(&self) -> bool {
1967 220 : self.current_state() == TimelineState::Active
1968 220 : }
1969 :
1970 1116 : pub(crate) fn is_stopping(&self) -> bool {
1971 1116 : self.current_state() == TimelineState::Stopping
1972 1116 : }
1973 :
1974 0 : pub(crate) fn subscribe_for_state_updates(&self) -> watch::Receiver<TimelineState> {
1975 0 : self.state.subscribe()
1976 0 : }
1977 :
1978 223663 : pub(crate) async fn wait_to_become_active(
1979 223663 : &self,
1980 223663 : _ctx: &RequestContext, // Prepare for use by cancellation
1981 223663 : ) -> Result<(), TimelineState> {
1982 223663 : let mut receiver = self.state.subscribe();
1983 223663 : loop {
1984 223663 : let current_state = receiver.borrow().clone();
1985 223663 : match current_state {
1986 : TimelineState::Loading => {
1987 0 : receiver
1988 0 : .changed()
1989 0 : .await
1990 0 : .expect("holding a reference to self");
1991 : }
1992 : TimelineState::Active { .. } => {
1993 223661 : return Ok(());
1994 : }
1995 : TimelineState::Broken { .. } | TimelineState::Stopping => {
1996 : // There's no chance the timeline can transition back into ::Active
1997 2 : return Err(current_state);
1998 : }
1999 : }
2000 : }
2001 223663 : }
2002 :
2003 0 : pub(crate) async fn layer_map_info(&self, reset: LayerAccessStatsReset) -> LayerMapInfo {
2004 0 : let guard = self.layers.read().await;
2005 0 : let layer_map = guard.layer_map();
2006 0 : let mut in_memory_layers = Vec::with_capacity(layer_map.frozen_layers.len() + 1);
2007 0 : if let Some(open_layer) = &layer_map.open_layer {
2008 0 : in_memory_layers.push(open_layer.info());
2009 0 : }
2010 0 : for frozen_layer in &layer_map.frozen_layers {
2011 0 : in_memory_layers.push(frozen_layer.info());
2012 0 : }
2013 :
2014 0 : let mut historic_layers = Vec::new();
2015 0 : for historic_layer in layer_map.iter_historic_layers() {
2016 0 : let historic_layer = guard.get_from_desc(&historic_layer);
2017 0 : historic_layers.push(historic_layer.info(reset));
2018 0 : }
2019 :
2020 0 : LayerMapInfo {
2021 0 : in_memory_layers,
2022 0 : historic_layers,
2023 0 : }
2024 0 : }
2025 :
2026 0 : #[instrument(skip_all, fields(tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))]
2027 : pub(crate) async fn download_layer(
2028 : &self,
2029 : layer_file_name: &LayerName,
2030 : ) -> anyhow::Result<Option<bool>> {
2031 : let Some(layer) = self.find_layer(layer_file_name).await else {
2032 : return Ok(None);
2033 : };
2034 :
2035 : layer.download().await?;
2036 :
2037 : Ok(Some(true))
2038 : }
2039 :
2040 : /// Evict just one layer.
2041 : ///
2042 : /// Returns `Ok(None)` in the case where the layer could not be found by its `layer_file_name`.
2043 0 : pub(crate) async fn evict_layer(
2044 0 : &self,
2045 0 : layer_file_name: &LayerName,
2046 0 : ) -> anyhow::Result<Option<bool>> {
2047 0 : let _gate = self
2048 0 : .gate
2049 0 : .enter()
2050 0 : .map_err(|_| anyhow::anyhow!("Shutting down"))?;
2051 :
2052 0 : let Some(local_layer) = self.find_layer(layer_file_name).await else {
2053 0 : return Ok(None);
2054 : };
2055 :
2056 : // curl has this by default
2057 0 : let timeout = std::time::Duration::from_secs(120);
2058 0 :
2059 0 : match local_layer.evict_and_wait(timeout).await {
2060 0 : Ok(()) => Ok(Some(true)),
2061 0 : Err(EvictionError::NotFound) => Ok(Some(false)),
2062 0 : Err(EvictionError::Downloaded) => Ok(Some(false)),
2063 0 : Err(EvictionError::Timeout) => Ok(Some(false)),
2064 : }
2065 0 : }
2066 :
2067 4803026 : fn should_roll(
2068 4803026 : &self,
2069 4803026 : layer_size: u64,
2070 4803026 : projected_layer_size: u64,
2071 4803026 : checkpoint_distance: u64,
2072 4803026 : projected_lsn: Lsn,
2073 4803026 : last_freeze_at: Lsn,
2074 4803026 : opened_at: Instant,
2075 4803026 : ) -> bool {
2076 4803026 : let distance = projected_lsn.widening_sub(last_freeze_at);
2077 4803026 :
2078 4803026 : // Rolling the open layer can be triggered by:
2079 4803026 : // 1. The distance from the last LSN we rolled at. This bounds the amount of WAL that
2080 4803026 : // the safekeepers need to store. For sharded tenants, we multiply by shard count to
2081 4803026 : // account for how writes are distributed across shards: we expect each node to consume
2082 4803026 : // 1/count of the LSN on average.
2083 4803026 : // 2. The size of the currently open layer.
2084 4803026 : // 3. The time since the last roll. It helps safekeepers to regard pageserver as caught
2085 4803026 : // up and suspend activity.
2086 4803026 : if distance >= checkpoint_distance as i128 * self.shard_identity.count.count() as i128 {
2087 0 : info!(
2088 0 : "Will roll layer at {} with layer size {} due to LSN distance ({})",
2089 : projected_lsn, layer_size, distance
2090 : );
2091 :
2092 0 : true
2093 4803026 : } else if projected_layer_size >= checkpoint_distance {
2094 80 : info!(
2095 0 : "Will roll layer at {} with layer size {} due to layer size ({})",
2096 : projected_lsn, layer_size, projected_layer_size
2097 : );
2098 :
2099 80 : true
2100 4802946 : } else if distance > 0 && opened_at.elapsed() >= self.get_checkpoint_timeout() {
2101 0 : info!(
2102 0 : "Will roll layer at {} with layer size {} due to time since first write to the layer ({:?})",
2103 0 : projected_lsn,
2104 0 : layer_size,
2105 0 : opened_at.elapsed()
2106 : );
2107 :
2108 0 : true
2109 : } else {
2110 4802946 : false
2111 : }
2112 4803026 : }
2113 : }
2114 :
2115 : /// Number of times we will compute partition within a checkpoint distance.
2116 : const REPARTITION_FREQ_IN_CHECKPOINT_DISTANCE: u64 = 10;
2117 :
2118 : // Private functions
2119 : impl Timeline {
2120 12 : pub(crate) fn get_lsn_lease_length(&self) -> Duration {
2121 12 : let tenant_conf = self.tenant_conf.load();
2122 12 : tenant_conf
2123 12 : .tenant_conf
2124 12 : .lsn_lease_length
2125 12 : .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length)
2126 12 : }
2127 :
2128 : // TODO(yuchen): remove unused flag after implementing https://github.com/neondatabase/neon/issues/8072
2129 : #[allow(unused)]
2130 0 : pub(crate) fn get_lsn_lease_length_for_ts(&self) -> Duration {
2131 0 : let tenant_conf = self.tenant_conf.load();
2132 0 : tenant_conf
2133 0 : .tenant_conf
2134 0 : .lsn_lease_length_for_ts
2135 0 : .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length_for_ts)
2136 0 : }
2137 :
2138 198 : pub(crate) fn get_switch_aux_file_policy(&self) -> AuxFilePolicy {
2139 198 : let tenant_conf = self.tenant_conf.load();
2140 198 : tenant_conf
2141 198 : .tenant_conf
2142 198 : .switch_aux_file_policy
2143 198 : .unwrap_or(self.conf.default_tenant_conf.switch_aux_file_policy)
2144 198 : }
2145 :
2146 0 : pub(crate) fn get_lazy_slru_download(&self) -> bool {
2147 0 : let tenant_conf = self.tenant_conf.load();
2148 0 : tenant_conf
2149 0 : .tenant_conf
2150 0 : .lazy_slru_download
2151 0 : .unwrap_or(self.conf.default_tenant_conf.lazy_slru_download)
2152 0 : }
2153 :
2154 4804353 : fn get_checkpoint_distance(&self) -> u64 {
2155 4804353 : let tenant_conf = self.tenant_conf.load();
2156 4804353 : tenant_conf
2157 4804353 : .tenant_conf
2158 4804353 : .checkpoint_distance
2159 4804353 : .unwrap_or(self.conf.default_tenant_conf.checkpoint_distance)
2160 4804353 : }
2161 :
2162 4802946 : fn get_checkpoint_timeout(&self) -> Duration {
2163 4802946 : let tenant_conf = self.tenant_conf.load();
2164 4802946 : tenant_conf
2165 4802946 : .tenant_conf
2166 4802946 : .checkpoint_timeout
2167 4802946 : .unwrap_or(self.conf.default_tenant_conf.checkpoint_timeout)
2168 4802946 : }
2169 :
2170 504 : fn get_compaction_target_size(&self) -> u64 {
2171 504 : let tenant_conf = self.tenant_conf.load();
2172 504 : tenant_conf
2173 504 : .tenant_conf
2174 504 : .compaction_target_size
2175 504 : .unwrap_or(self.conf.default_tenant_conf.compaction_target_size)
2176 504 : }
2177 :
2178 364 : fn get_compaction_threshold(&self) -> usize {
2179 364 : let tenant_conf = self.tenant_conf.load();
2180 364 : tenant_conf
2181 364 : .tenant_conf
2182 364 : .compaction_threshold
2183 364 : .unwrap_or(self.conf.default_tenant_conf.compaction_threshold)
2184 364 : }
2185 :
2186 14 : fn get_image_creation_threshold(&self) -> usize {
2187 14 : let tenant_conf = self.tenant_conf.load();
2188 14 : tenant_conf
2189 14 : .tenant_conf
2190 14 : .image_creation_threshold
2191 14 : .unwrap_or(self.conf.default_tenant_conf.image_creation_threshold)
2192 14 : }
2193 :
2194 364 : fn get_compaction_algorithm_settings(&self) -> CompactionAlgorithmSettings {
2195 364 : let tenant_conf = &self.tenant_conf.load();
2196 364 : tenant_conf
2197 364 : .tenant_conf
2198 364 : .compaction_algorithm
2199 364 : .as_ref()
2200 364 : .unwrap_or(&self.conf.default_tenant_conf.compaction_algorithm)
2201 364 : .clone()
2202 364 : }
2203 :
2204 0 : fn get_eviction_policy(&self) -> EvictionPolicy {
2205 0 : let tenant_conf = self.tenant_conf.load();
2206 0 : tenant_conf
2207 0 : .tenant_conf
2208 0 : .eviction_policy
2209 0 : .unwrap_or(self.conf.default_tenant_conf.eviction_policy)
2210 0 : }
2211 :
2212 387 : fn get_evictions_low_residence_duration_metric_threshold(
2213 387 : tenant_conf: &TenantConfOpt,
2214 387 : default_tenant_conf: &TenantConf,
2215 387 : ) -> Duration {
2216 387 : tenant_conf
2217 387 : .evictions_low_residence_duration_metric_threshold
2218 387 : .unwrap_or(default_tenant_conf.evictions_low_residence_duration_metric_threshold)
2219 387 : }
2220 :
2221 504 : fn get_image_layer_creation_check_threshold(&self) -> u8 {
2222 504 : let tenant_conf = self.tenant_conf.load();
2223 504 : tenant_conf
2224 504 : .tenant_conf
2225 504 : .image_layer_creation_check_threshold
2226 504 : .unwrap_or(
2227 504 : self.conf
2228 504 : .default_tenant_conf
2229 504 : .image_layer_creation_check_threshold,
2230 504 : )
2231 504 : }
2232 :
2233 8 : pub(super) fn tenant_conf_updated(&self, new_conf: &TenantConfOpt) {
2234 8 : // NB: Most tenant conf options are read by background loops, so,
2235 8 : // changes will automatically be picked up.
2236 8 :
2237 8 : // The threshold is embedded in the metric. So, we need to update it.
2238 8 : {
2239 8 : let new_threshold = Self::get_evictions_low_residence_duration_metric_threshold(
2240 8 : new_conf,
2241 8 : &self.conf.default_tenant_conf,
2242 8 : );
2243 8 :
2244 8 : let tenant_id_str = self.tenant_shard_id.tenant_id.to_string();
2245 8 : let shard_id_str = format!("{}", self.tenant_shard_id.shard_slug());
2246 8 :
2247 8 : let timeline_id_str = self.timeline_id.to_string();
2248 8 : self.metrics
2249 8 : .evictions_with_low_residence_duration
2250 8 : .write()
2251 8 : .unwrap()
2252 8 : .change_threshold(
2253 8 : &tenant_id_str,
2254 8 : &shard_id_str,
2255 8 : &timeline_id_str,
2256 8 : new_threshold,
2257 8 : );
2258 8 : }
2259 8 : }
2260 :
2261 : /// Open a Timeline handle.
2262 : ///
2263 : /// Loads the metadata for the timeline into memory, but not the layer map.
2264 : #[allow(clippy::too_many_arguments)]
2265 379 : pub(super) fn new(
2266 379 : conf: &'static PageServerConf,
2267 379 : tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
2268 379 : metadata: &TimelineMetadata,
2269 379 : ancestor: Option<Arc<Timeline>>,
2270 379 : timeline_id: TimelineId,
2271 379 : tenant_shard_id: TenantShardId,
2272 379 : generation: Generation,
2273 379 : shard_identity: ShardIdentity,
2274 379 : walredo_mgr: Option<Arc<super::WalRedoManager>>,
2275 379 : resources: TimelineResources,
2276 379 : pg_version: u32,
2277 379 : state: TimelineState,
2278 379 : aux_file_policy: Option<AuxFilePolicy>,
2279 379 : cancel: CancellationToken,
2280 379 : ) -> Arc<Self> {
2281 379 : let disk_consistent_lsn = metadata.disk_consistent_lsn();
2282 379 : let (state, _) = watch::channel(state);
2283 379 :
2284 379 : let (layer_flush_start_tx, _) = tokio::sync::watch::channel((0, disk_consistent_lsn));
2285 379 : let (layer_flush_done_tx, _) = tokio::sync::watch::channel((0, Ok(())));
2286 379 :
2287 379 : let evictions_low_residence_duration_metric_threshold = {
2288 379 : let loaded_tenant_conf = tenant_conf.load();
2289 379 : Self::get_evictions_low_residence_duration_metric_threshold(
2290 379 : &loaded_tenant_conf.tenant_conf,
2291 379 : &conf.default_tenant_conf,
2292 379 : )
2293 379 : };
2294 379 :
2295 379 : Arc::new_cyclic(|myself| {
2296 379 : let metrics = TimelineMetrics::new(
2297 379 : &tenant_shard_id,
2298 379 : &timeline_id,
2299 379 : crate::metrics::EvictionsWithLowResidenceDurationBuilder::new(
2300 379 : "mtime",
2301 379 : evictions_low_residence_duration_metric_threshold,
2302 379 : ),
2303 379 : );
2304 379 : let aux_file_metrics = metrics.aux_file_size_gauge.clone();
2305 :
2306 379 : let mut result = Timeline {
2307 379 : conf,
2308 379 : tenant_conf,
2309 379 : myself: myself.clone(),
2310 379 : timeline_id,
2311 379 : tenant_shard_id,
2312 379 : generation,
2313 379 : shard_identity,
2314 379 : pg_version,
2315 379 : layers: Default::default(),
2316 379 :
2317 379 : walredo_mgr,
2318 379 : walreceiver: Mutex::new(None),
2319 379 :
2320 379 : remote_client: Arc::new(resources.remote_client),
2321 379 :
2322 379 : // initialize in-memory 'last_record_lsn' from 'disk_consistent_lsn'.
2323 379 : last_record_lsn: SeqWait::new(RecordLsn {
2324 379 : last: disk_consistent_lsn,
2325 379 : prev: metadata.prev_record_lsn().unwrap_or(Lsn(0)),
2326 379 : }),
2327 379 : disk_consistent_lsn: AtomicLsn::new(disk_consistent_lsn.0),
2328 379 :
2329 379 : last_freeze_at: AtomicLsn::new(disk_consistent_lsn.0),
2330 379 : last_freeze_ts: RwLock::new(Instant::now()),
2331 379 :
2332 379 : loaded_at: (disk_consistent_lsn, SystemTime::now()),
2333 379 :
2334 379 : ancestor_timeline: ancestor,
2335 379 : ancestor_lsn: metadata.ancestor_lsn(),
2336 379 :
2337 379 : metrics,
2338 379 :
2339 379 : query_metrics: crate::metrics::SmgrQueryTimePerTimeline::new(
2340 379 : &tenant_shard_id,
2341 379 : &timeline_id,
2342 379 : ),
2343 379 :
2344 2653 : directory_metrics: array::from_fn(|_| AtomicU64::new(0)),
2345 379 :
2346 379 : flush_loop_state: Mutex::new(FlushLoopState::NotStarted),
2347 379 :
2348 379 : layer_flush_start_tx,
2349 379 : layer_flush_done_tx,
2350 379 :
2351 379 : write_lock: tokio::sync::Mutex::new(None),
2352 379 :
2353 379 : gc_info: std::sync::RwLock::new(GcInfo::default()),
2354 379 :
2355 379 : latest_gc_cutoff_lsn: Rcu::new(metadata.latest_gc_cutoff_lsn()),
2356 379 : initdb_lsn: metadata.initdb_lsn(),
2357 379 :
2358 379 : current_logical_size: if disk_consistent_lsn.is_valid() {
2359 : // we're creating timeline data with some layer files existing locally,
2360 : // need to recalculate timeline's logical size based on data in the layers.
2361 230 : LogicalSize::deferred_initial(disk_consistent_lsn)
2362 : } else {
2363 : // we're creating timeline data without any layers existing locally,
2364 : // initial logical size is 0.
2365 149 : LogicalSize::empty_initial()
2366 : },
2367 379 : partitioning: tokio::sync::Mutex::new((
2368 379 : (KeyPartitioning::new(), KeyPartitioning::new().into_sparse()),
2369 379 : Lsn(0),
2370 379 : )),
2371 379 : repartition_threshold: 0,
2372 379 : last_image_layer_creation_check_at: AtomicLsn::new(0),
2373 379 :
2374 379 : last_received_wal: Mutex::new(None),
2375 379 : rel_size_cache: RwLock::new(RelSizeCache {
2376 379 : complete_as_of: disk_consistent_lsn,
2377 379 : map: HashMap::new(),
2378 379 : }),
2379 379 :
2380 379 : download_all_remote_layers_task_info: RwLock::new(None),
2381 379 :
2382 379 : state,
2383 379 :
2384 379 : eviction_task_timeline_state: tokio::sync::Mutex::new(
2385 379 : EvictionTaskTimelineState::default(),
2386 379 : ),
2387 379 : delete_progress: Arc::new(tokio::sync::Mutex::new(DeleteTimelineFlow::default())),
2388 379 :
2389 379 : cancel,
2390 379 : gate: Gate::default(),
2391 379 :
2392 379 : compaction_lock: tokio::sync::Mutex::default(),
2393 379 : gc_lock: tokio::sync::Mutex::default(),
2394 379 :
2395 379 : standby_horizon: AtomicLsn::new(0),
2396 379 :
2397 379 : timeline_get_throttle: resources.timeline_get_throttle,
2398 379 :
2399 379 : aux_files: tokio::sync::Mutex::new(AuxFilesState {
2400 379 : dir: None,
2401 379 : n_deltas: 0,
2402 379 : }),
2403 379 :
2404 379 : aux_file_size_estimator: AuxFileSizeEstimator::new(aux_file_metrics),
2405 379 :
2406 379 : last_aux_file_policy: AtomicAuxFilePolicy::new(aux_file_policy),
2407 379 :
2408 379 : #[cfg(test)]
2409 379 : extra_test_dense_keyspace: ArcSwap::new(Arc::new(KeySpace::default())),
2410 379 : };
2411 379 : result.repartition_threshold =
2412 379 : result.get_checkpoint_distance() / REPARTITION_FREQ_IN_CHECKPOINT_DISTANCE;
2413 379 :
2414 379 : result
2415 379 : .metrics
2416 379 : .last_record_gauge
2417 379 : .set(disk_consistent_lsn.0 as i64);
2418 379 : result
2419 379 : })
2420 379 : }
2421 :
2422 512 : pub(super) fn maybe_spawn_flush_loop(self: &Arc<Self>) {
2423 512 : let Ok(guard) = self.gate.enter() else {
2424 0 : info!("cannot start flush loop when the timeline gate has already been closed");
2425 0 : return;
2426 : };
2427 512 : let mut flush_loop_state = self.flush_loop_state.lock().unwrap();
2428 512 : match *flush_loop_state {
2429 372 : FlushLoopState::NotStarted => (),
2430 : FlushLoopState::Running { .. } => {
2431 140 : info!(
2432 0 : "skipping attempt to start flush_loop twice {}/{}",
2433 0 : self.tenant_shard_id, self.timeline_id
2434 : );
2435 140 : return;
2436 : }
2437 : FlushLoopState::Exited => {
2438 0 : warn!(
2439 0 : "ignoring attempt to restart exited flush_loop {}/{}",
2440 0 : self.tenant_shard_id, self.timeline_id
2441 : );
2442 0 : return;
2443 : }
2444 : }
2445 :
2446 372 : let layer_flush_start_rx = self.layer_flush_start_tx.subscribe();
2447 372 : let self_clone = Arc::clone(self);
2448 372 :
2449 372 : debug!("spawning flush loop");
2450 372 : *flush_loop_state = FlushLoopState::Running {
2451 372 : #[cfg(test)]
2452 372 : expect_initdb_optimization: false,
2453 372 : #[cfg(test)]
2454 372 : initdb_optimization_count: 0,
2455 372 : };
2456 372 : task_mgr::spawn(
2457 372 : task_mgr::BACKGROUND_RUNTIME.handle(),
2458 372 : task_mgr::TaskKind::LayerFlushTask,
2459 372 : Some(self.tenant_shard_id),
2460 372 : Some(self.timeline_id),
2461 372 : "layer flush task",
2462 : false,
2463 372 : async move {
2464 372 : let _guard = guard;
2465 372 : let background_ctx = RequestContext::todo_child(TaskKind::LayerFlushTask, DownloadBehavior::Error);
2466 60145 : self_clone.flush_loop(layer_flush_start_rx, &background_ctx).await;
2467 8 : let mut flush_loop_state = self_clone.flush_loop_state.lock().unwrap();
2468 8 : assert!(matches!(*flush_loop_state, FlushLoopState::Running{..}));
2469 8 : *flush_loop_state = FlushLoopState::Exited;
2470 8 : Ok(())
2471 8 : }
2472 372 : .instrument(info_span!(parent: None, "layer flush task", tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))
2473 : );
2474 512 : }
2475 :
2476 : /// Creates and starts the wal receiver.
2477 : ///
2478 : /// This function is expected to be called at most once per Timeline's lifecycle
2479 : /// when the timeline is activated.
2480 0 : fn launch_wal_receiver(
2481 0 : self: &Arc<Self>,
2482 0 : ctx: &RequestContext,
2483 0 : broker_client: BrokerClientChannel,
2484 0 : ) {
2485 0 : info!(
2486 0 : "launching WAL receiver for timeline {} of tenant {}",
2487 0 : self.timeline_id, self.tenant_shard_id
2488 : );
2489 :
2490 0 : let tenant_conf = self.tenant_conf.load();
2491 0 : let wal_connect_timeout = tenant_conf
2492 0 : .tenant_conf
2493 0 : .walreceiver_connect_timeout
2494 0 : .unwrap_or(self.conf.default_tenant_conf.walreceiver_connect_timeout);
2495 0 : let lagging_wal_timeout = tenant_conf
2496 0 : .tenant_conf
2497 0 : .lagging_wal_timeout
2498 0 : .unwrap_or(self.conf.default_tenant_conf.lagging_wal_timeout);
2499 0 : let max_lsn_wal_lag = tenant_conf
2500 0 : .tenant_conf
2501 0 : .max_lsn_wal_lag
2502 0 : .unwrap_or(self.conf.default_tenant_conf.max_lsn_wal_lag);
2503 0 :
2504 0 : let mut guard = self.walreceiver.lock().unwrap();
2505 0 : assert!(
2506 0 : guard.is_none(),
2507 0 : "multiple launches / re-launches of WAL receiver are not supported"
2508 : );
2509 0 : *guard = Some(WalReceiver::start(
2510 0 : Arc::clone(self),
2511 0 : WalReceiverConf {
2512 0 : wal_connect_timeout,
2513 0 : lagging_wal_timeout,
2514 0 : max_lsn_wal_lag,
2515 0 : auth_token: crate::config::SAFEKEEPER_AUTH_TOKEN.get().cloned(),
2516 0 : availability_zone: self.conf.availability_zone.clone(),
2517 0 : ingest_batch_size: self.conf.ingest_batch_size,
2518 0 : },
2519 0 : broker_client,
2520 0 : ctx,
2521 0 : ));
2522 0 : }
2523 :
2524 : /// Initialize with an empty layer map. Used when creating a new timeline.
2525 373 : pub(super) fn init_empty_layer_map(&self, start_lsn: Lsn) {
2526 373 : let mut layers = self.layers.try_write().expect(
2527 373 : "in the context where we call this function, no other task has access to the object",
2528 373 : );
2529 373 : layers.initialize_empty(Lsn(start_lsn.0));
2530 373 : }
2531 :
2532 : /// Scan the timeline directory, cleanup, populate the layer map, and schedule uploads for local-only
2533 : /// files.
2534 6 : pub(super) async fn load_layer_map(
2535 6 : &self,
2536 6 : disk_consistent_lsn: Lsn,
2537 6 : index_part: Option<IndexPart>,
2538 6 : ) -> anyhow::Result<()> {
2539 : use init::{Decision::*, Discovered, DismissedLayer};
2540 : use LayerName::*;
2541 :
2542 6 : let mut guard = self.layers.write().await;
2543 :
2544 6 : let timer = self.metrics.load_layer_map_histo.start_timer();
2545 6 :
2546 6 : // Scan timeline directory and create ImageLayerName and DeltaFilename
2547 6 : // structs representing all files on disk
2548 6 : let timeline_path = self
2549 6 : .conf
2550 6 : .timeline_path(&self.tenant_shard_id, &self.timeline_id);
2551 6 : let conf = self.conf;
2552 6 : let span = tracing::Span::current();
2553 6 :
2554 6 : // Copy to move into the task we're about to spawn
2555 6 : let this = self.myself.upgrade().expect("&self method holds the arc");
2556 :
2557 6 : let (loaded_layers, needs_cleanup, total_physical_size) = tokio::task::spawn_blocking({
2558 6 : move || {
2559 6 : let _g = span.entered();
2560 6 : let discovered = init::scan_timeline_dir(&timeline_path)?;
2561 6 : let mut discovered_layers = Vec::with_capacity(discovered.len());
2562 6 : let mut unrecognized_files = Vec::new();
2563 6 :
2564 6 : let mut path = timeline_path;
2565 :
2566 22 : for discovered in discovered {
2567 16 : let (name, kind) = match discovered {
2568 16 : Discovered::Layer(layer_file_name, local_metadata) => {
2569 16 : discovered_layers.push((layer_file_name, local_metadata));
2570 16 : continue;
2571 : }
2572 0 : Discovered::IgnoredBackup(path) => {
2573 0 : std::fs::remove_file(path)
2574 0 : .or_else(fs_ext::ignore_not_found)
2575 0 : .fatal_err("Removing .old file");
2576 0 : continue;
2577 : }
2578 0 : Discovered::Unknown(file_name) => {
2579 0 : // we will later error if there are any
2580 0 : unrecognized_files.push(file_name);
2581 0 : continue;
2582 : }
2583 0 : Discovered::Ephemeral(name) => (name, "old ephemeral file"),
2584 0 : Discovered::Temporary(name) => (name, "temporary timeline file"),
2585 0 : Discovered::TemporaryDownload(name) => (name, "temporary download"),
2586 : };
2587 0 : path.push(Utf8Path::new(&name));
2588 0 : init::cleanup(&path, kind)?;
2589 0 : path.pop();
2590 : }
2591 :
2592 6 : if !unrecognized_files.is_empty() {
2593 : // assume that if there are any there are many many.
2594 0 : let n = unrecognized_files.len();
2595 0 : let first = &unrecognized_files[..n.min(10)];
2596 0 : anyhow::bail!(
2597 0 : "unrecognized files in timeline dir (total {n}), first 10: {first:?}"
2598 0 : );
2599 6 : }
2600 6 :
2601 6 : let decided =
2602 6 : init::reconcile(discovered_layers, index_part.as_ref(), disk_consistent_lsn);
2603 6 :
2604 6 : let mut loaded_layers = Vec::new();
2605 6 : let mut needs_cleanup = Vec::new();
2606 6 : let mut total_physical_size = 0;
2607 :
2608 22 : for (name, decision) in decided {
2609 16 : let decision = match decision {
2610 16 : Ok(decision) => decision,
2611 0 : Err(DismissedLayer::Future { local }) => {
2612 0 : if let Some(local) = local {
2613 0 : init::cleanup_future_layer(
2614 0 : &local.local_path,
2615 0 : &name,
2616 0 : disk_consistent_lsn,
2617 0 : )?;
2618 0 : }
2619 0 : needs_cleanup.push(name);
2620 0 : continue;
2621 : }
2622 0 : Err(DismissedLayer::LocalOnly(local)) => {
2623 0 : init::cleanup_local_only_file(&name, &local)?;
2624 : // this file never existed remotely, we will have to do rework
2625 0 : continue;
2626 : }
2627 0 : Err(DismissedLayer::BadMetadata(local)) => {
2628 0 : init::cleanup_local_file_for_remote(&local)?;
2629 : // this file never existed remotely, we will have to do rework
2630 0 : continue;
2631 : }
2632 : };
2633 :
2634 16 : match &name {
2635 12 : Delta(d) => assert!(d.lsn_range.end <= disk_consistent_lsn + 1),
2636 4 : Image(i) => assert!(i.lsn <= disk_consistent_lsn),
2637 : }
2638 :
2639 16 : tracing::debug!(layer=%name, ?decision, "applied");
2640 :
2641 16 : let layer = match decision {
2642 16 : Resident { local, remote } => {
2643 16 : total_physical_size += local.file_size;
2644 16 : Layer::for_resident(conf, &this, local.local_path, name, remote)
2645 16 : .drop_eviction_guard()
2646 : }
2647 0 : Evicted(remote) => Layer::for_evicted(conf, &this, name, remote),
2648 : };
2649 :
2650 16 : loaded_layers.push(layer);
2651 : }
2652 6 : Ok((loaded_layers, needs_cleanup, total_physical_size))
2653 6 : }
2654 6 : })
2655 6 : .await
2656 6 : .map_err(anyhow::Error::new)
2657 6 : .and_then(|x| x)?;
2658 :
2659 6 : let num_layers = loaded_layers.len();
2660 6 :
2661 6 : guard.initialize_local_layers(loaded_layers, disk_consistent_lsn + 1);
2662 6 :
2663 6 : self.remote_client
2664 6 : .schedule_layer_file_deletion(&needs_cleanup)?;
2665 6 : self.remote_client
2666 6 : .schedule_index_upload_for_file_changes()?;
2667 : // This barrier orders above DELETEs before any later operations.
2668 : // This is critical because code executing after the barrier might
2669 : // create again objects with the same key that we just scheduled for deletion.
2670 : // For example, if we just scheduled deletion of an image layer "from the future",
2671 : // later compaction might run again and re-create the same image layer.
2672 : // "from the future" here means an image layer whose LSN is > IndexPart::disk_consistent_lsn.
2673 : // "same" here means same key range and LSN.
2674 : //
2675 : // Without a barrier between above DELETEs and the re-creation's PUTs,
2676 : // the upload queue may execute the PUT first, then the DELETE.
2677 : // In our example, we will end up with an IndexPart referencing a non-existent object.
2678 : //
2679 : // 1. a future image layer is created and uploaded
2680 : // 2. ps restart
2681 : // 3. the future layer from (1) is deleted during load layer map
2682 : // 4. image layer is re-created and uploaded
2683 : // 5. deletion queue would like to delete (1) but actually deletes (4)
2684 : // 6. delete by name works as expected, but it now deletes the wrong (later) version
2685 : //
2686 : // See https://github.com/neondatabase/neon/issues/5878
2687 : //
2688 : // NB: generation numbers naturally protect against this because they disambiguate
2689 : // (1) and (4)
2690 6 : self.remote_client.schedule_barrier()?;
2691 : // Tenant::create_timeline will wait for these uploads to happen before returning, or
2692 : // on retry.
2693 :
2694 6 : info!(
2695 0 : "loaded layer map with {} layers at {}, total physical size: {}",
2696 : num_layers, disk_consistent_lsn, total_physical_size
2697 : );
2698 :
2699 6 : timer.stop_and_record();
2700 6 : Ok(())
2701 6 : }
2702 :
2703 : /// Retrieve current logical size of the timeline.
2704 : ///
2705 : /// The size could be lagging behind the actual number, in case
2706 : /// the initial size calculation has not been run (gets triggered on the first size access).
2707 : ///
2708 : /// return size and boolean flag that shows if the size is exact
2709 0 : pub(crate) fn get_current_logical_size(
2710 0 : self: &Arc<Self>,
2711 0 : priority: GetLogicalSizePriority,
2712 0 : ctx: &RequestContext,
2713 0 : ) -> logical_size::CurrentLogicalSize {
2714 0 : if !self.tenant_shard_id.is_shard_zero() {
2715 : // Logical size is only accurately maintained on shard zero: when called elsewhere, for example
2716 : // when HTTP API is serving a GET for timeline zero, return zero
2717 0 : return logical_size::CurrentLogicalSize::Approximate(logical_size::Approximate::zero());
2718 0 : }
2719 0 :
2720 0 : let current_size = self.current_logical_size.current_size();
2721 0 : debug!("Current size: {current_size:?}");
2722 :
2723 0 : match (current_size.accuracy(), priority) {
2724 0 : (logical_size::Accuracy::Exact, _) => (), // nothing to do
2725 0 : (logical_size::Accuracy::Approximate, GetLogicalSizePriority::Background) => {
2726 0 : // background task will eventually deliver an exact value, we're in no rush
2727 0 : }
2728 : (logical_size::Accuracy::Approximate, GetLogicalSizePriority::User) => {
2729 : // background task is not ready, but user is asking for it now;
2730 : // => make the background task skip the line
2731 : // (The alternative would be to calculate the size here, but,
2732 : // it can actually take a long time if the user has a lot of rels.
2733 : // And we'll inevitable need it again; So, let the background task do the work.)
2734 0 : match self
2735 0 : .current_logical_size
2736 0 : .cancel_wait_for_background_loop_concurrency_limit_semaphore
2737 0 : .get()
2738 : {
2739 0 : Some(cancel) => cancel.cancel(),
2740 : None => {
2741 0 : let state = self.current_state();
2742 0 : if matches!(
2743 0 : state,
2744 : TimelineState::Broken { .. } | TimelineState::Stopping
2745 0 : ) {
2746 0 :
2747 0 : // Can happen when timeline detail endpoint is used when deletion is ongoing (or its broken).
2748 0 : // Don't make noise.
2749 0 : } else {
2750 0 : warn!("unexpected: cancel_wait_for_background_loop_concurrency_limit_semaphore not set, priority-boosting of logical size calculation will not work");
2751 0 : debug_assert!(false);
2752 : }
2753 : }
2754 : };
2755 : }
2756 : }
2757 :
2758 0 : if let CurrentLogicalSize::Approximate(_) = ¤t_size {
2759 0 : if ctx.task_kind() == TaskKind::WalReceiverConnectionHandler {
2760 0 : let first = self
2761 0 : .current_logical_size
2762 0 : .did_return_approximate_to_walreceiver
2763 0 : .compare_exchange(
2764 0 : false,
2765 0 : true,
2766 0 : AtomicOrdering::Relaxed,
2767 0 : AtomicOrdering::Relaxed,
2768 0 : )
2769 0 : .is_ok();
2770 0 : if first {
2771 0 : crate::metrics::initial_logical_size::TIMELINES_WHERE_WALRECEIVER_GOT_APPROXIMATE_SIZE.inc();
2772 0 : }
2773 0 : }
2774 0 : }
2775 :
2776 0 : current_size
2777 0 : }
2778 :
2779 0 : fn spawn_initial_logical_size_computation_task(self: &Arc<Self>, ctx: &RequestContext) {
2780 0 : let Some(initial_part_end) = self.current_logical_size.initial_part_end else {
2781 : // nothing to do for freshly created timelines;
2782 0 : assert_eq!(
2783 0 : self.current_logical_size.current_size().accuracy(),
2784 0 : logical_size::Accuracy::Exact,
2785 0 : );
2786 0 : self.current_logical_size.initialized.add_permits(1);
2787 0 : return;
2788 : };
2789 :
2790 0 : let cancel_wait_for_background_loop_concurrency_limit_semaphore = CancellationToken::new();
2791 0 : let token = cancel_wait_for_background_loop_concurrency_limit_semaphore.clone();
2792 0 : self.current_logical_size
2793 0 : .cancel_wait_for_background_loop_concurrency_limit_semaphore.set(token)
2794 0 : .expect("initial logical size calculation task must be spawned exactly once per Timeline object");
2795 0 :
2796 0 : let self_clone = Arc::clone(self);
2797 0 : let background_ctx = ctx.detached_child(
2798 0 : TaskKind::InitialLogicalSizeCalculation,
2799 0 : DownloadBehavior::Download,
2800 0 : );
2801 0 : task_mgr::spawn(
2802 0 : task_mgr::BACKGROUND_RUNTIME.handle(),
2803 0 : task_mgr::TaskKind::InitialLogicalSizeCalculation,
2804 0 : Some(self.tenant_shard_id),
2805 0 : Some(self.timeline_id),
2806 0 : "initial size calculation",
2807 : false,
2808 : // NB: don't log errors here, task_mgr will do that.
2809 0 : async move {
2810 0 : let cancel = task_mgr::shutdown_token();
2811 0 : self_clone
2812 0 : .initial_logical_size_calculation_task(
2813 0 : initial_part_end,
2814 0 : cancel_wait_for_background_loop_concurrency_limit_semaphore,
2815 0 : cancel,
2816 0 : background_ctx,
2817 0 : )
2818 0 : .await;
2819 0 : Ok(())
2820 0 : }
2821 0 : .instrument(info_span!(parent: None, "initial_size_calculation", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id)),
2822 : );
2823 0 : }
2824 :
2825 0 : async fn initial_logical_size_calculation_task(
2826 0 : self: Arc<Self>,
2827 0 : initial_part_end: Lsn,
2828 0 : skip_concurrency_limiter: CancellationToken,
2829 0 : cancel: CancellationToken,
2830 0 : background_ctx: RequestContext,
2831 0 : ) {
2832 : scopeguard::defer! {
2833 : // Irrespective of the outcome of this operation, we should unblock anyone waiting for it.
2834 : self.current_logical_size.initialized.add_permits(1);
2835 : }
2836 :
2837 0 : let try_once = |attempt: usize| {
2838 0 : let background_ctx = &background_ctx;
2839 0 : let self_ref = &self;
2840 0 : let skip_concurrency_limiter = &skip_concurrency_limiter;
2841 0 : async move {
2842 0 : let cancel = task_mgr::shutdown_token();
2843 0 : let wait_for_permit = super::tasks::concurrent_background_tasks_rate_limit_permit(
2844 0 : BackgroundLoopKind::InitialLogicalSizeCalculation,
2845 0 : background_ctx,
2846 0 : );
2847 :
2848 : use crate::metrics::initial_logical_size::StartCircumstances;
2849 0 : let (_maybe_permit, circumstances) = tokio::select! {
2850 : permit = wait_for_permit => {
2851 : (Some(permit), StartCircumstances::AfterBackgroundTasksRateLimit)
2852 : }
2853 : _ = self_ref.cancel.cancelled() => {
2854 : return Err(CalculateLogicalSizeError::Cancelled);
2855 : }
2856 : _ = cancel.cancelled() => {
2857 : return Err(CalculateLogicalSizeError::Cancelled);
2858 : },
2859 : () = skip_concurrency_limiter.cancelled() => {
2860 : // Some action that is part of a end user interaction requested logical size
2861 : // => break out of the rate limit
2862 : // TODO: ideally we'd not run on BackgroundRuntime but the requester's runtime;
2863 : // but then again what happens if they cancel; also, we should just be using
2864 : // one runtime across the entire process, so, let's leave this for now.
2865 : (None, StartCircumstances::SkippedConcurrencyLimiter)
2866 : }
2867 : };
2868 :
2869 0 : let metrics_guard = if attempt == 1 {
2870 0 : crate::metrics::initial_logical_size::START_CALCULATION.first(circumstances)
2871 : } else {
2872 0 : crate::metrics::initial_logical_size::START_CALCULATION.retry(circumstances)
2873 : };
2874 :
2875 0 : let calculated_size = self_ref
2876 0 : .logical_size_calculation_task(
2877 0 : initial_part_end,
2878 0 : LogicalSizeCalculationCause::Initial,
2879 0 : background_ctx,
2880 0 : )
2881 0 : .await?;
2882 :
2883 0 : self_ref
2884 0 : .trigger_aux_file_size_computation(initial_part_end, background_ctx)
2885 0 : .await?;
2886 :
2887 : // TODO: add aux file size to logical size
2888 :
2889 0 : Ok((calculated_size, metrics_guard))
2890 0 : }
2891 0 : };
2892 :
2893 0 : let retrying = async {
2894 0 : let mut attempt = 0;
2895 0 : loop {
2896 0 : attempt += 1;
2897 0 :
2898 0 : match try_once(attempt).await {
2899 0 : Ok(res) => return ControlFlow::Continue(res),
2900 0 : Err(CalculateLogicalSizeError::Cancelled) => return ControlFlow::Break(()),
2901 : Err(
2902 0 : e @ (CalculateLogicalSizeError::Decode(_)
2903 0 : | CalculateLogicalSizeError::PageRead(_)),
2904 0 : ) => {
2905 0 : warn!(attempt, "initial size calculation failed: {e:?}");
2906 : // exponential back-off doesn't make sense at these long intervals;
2907 : // use fixed retry interval with generous jitter instead
2908 0 : let sleep_duration = Duration::from_secs(
2909 0 : u64::try_from(
2910 0 : // 1hour base
2911 0 : (60_i64 * 60_i64)
2912 0 : // 10min jitter
2913 0 : + rand::thread_rng().gen_range(-10 * 60..10 * 60),
2914 0 : )
2915 0 : .expect("10min < 1hour"),
2916 0 : );
2917 0 : tokio::time::sleep(sleep_duration).await;
2918 : }
2919 : }
2920 : }
2921 0 : };
2922 :
2923 0 : let (calculated_size, metrics_guard) = tokio::select! {
2924 : res = retrying => {
2925 : match res {
2926 : ControlFlow::Continue(calculated_size) => calculated_size,
2927 : ControlFlow::Break(()) => return,
2928 : }
2929 : }
2930 : _ = cancel.cancelled() => {
2931 : return;
2932 : }
2933 : };
2934 :
2935 : // we cannot query current_logical_size.current_size() to know the current
2936 : // *negative* value, only truncated to u64.
2937 0 : let added = self
2938 0 : .current_logical_size
2939 0 : .size_added_after_initial
2940 0 : .load(AtomicOrdering::Relaxed);
2941 0 :
2942 0 : let sum = calculated_size.saturating_add_signed(added);
2943 0 :
2944 0 : // set the gauge value before it can be set in `update_current_logical_size`.
2945 0 : self.metrics.current_logical_size_gauge.set(sum);
2946 0 :
2947 0 : self.current_logical_size
2948 0 : .initial_logical_size
2949 0 : .set((calculated_size, metrics_guard.calculation_result_saved()))
2950 0 : .ok()
2951 0 : .expect("only this task sets it");
2952 0 : }
2953 :
2954 0 : pub(crate) fn spawn_ondemand_logical_size_calculation(
2955 0 : self: &Arc<Self>,
2956 0 : lsn: Lsn,
2957 0 : cause: LogicalSizeCalculationCause,
2958 0 : ctx: RequestContext,
2959 0 : ) -> oneshot::Receiver<Result<u64, CalculateLogicalSizeError>> {
2960 0 : let (sender, receiver) = oneshot::channel();
2961 0 : let self_clone = Arc::clone(self);
2962 0 : // XXX if our caller loses interest, i.e., ctx is cancelled,
2963 0 : // we should stop the size calculation work and return an error.
2964 0 : // That would require restructuring this function's API to
2965 0 : // return the result directly, instead of a Receiver for the result.
2966 0 : let ctx = ctx.detached_child(
2967 0 : TaskKind::OndemandLogicalSizeCalculation,
2968 0 : DownloadBehavior::Download,
2969 0 : );
2970 0 : task_mgr::spawn(
2971 0 : task_mgr::BACKGROUND_RUNTIME.handle(),
2972 0 : task_mgr::TaskKind::OndemandLogicalSizeCalculation,
2973 0 : Some(self.tenant_shard_id),
2974 0 : Some(self.timeline_id),
2975 0 : "ondemand logical size calculation",
2976 0 : false,
2977 0 : async move {
2978 0 : let res = self_clone
2979 0 : .logical_size_calculation_task(lsn, cause, &ctx)
2980 0 : .await;
2981 0 : let _ = sender.send(res).ok();
2982 0 : Ok(()) // Receiver is responsible for handling errors
2983 0 : }
2984 0 : .in_current_span(),
2985 0 : );
2986 0 : receiver
2987 0 : }
2988 :
2989 : /// # Cancel-Safety
2990 : ///
2991 : /// This method is cancellation-safe.
2992 0 : #[instrument(skip_all)]
2993 : async fn logical_size_calculation_task(
2994 : self: &Arc<Self>,
2995 : lsn: Lsn,
2996 : cause: LogicalSizeCalculationCause,
2997 : ctx: &RequestContext,
2998 : ) -> Result<u64, CalculateLogicalSizeError> {
2999 : crate::span::debug_assert_current_span_has_tenant_and_timeline_id();
3000 : // We should never be calculating logical sizes on shard !=0, because these shards do not have
3001 : // accurate relation sizes, and they do not emit consumption metrics.
3002 : debug_assert!(self.tenant_shard_id.is_shard_zero());
3003 :
3004 : let guard = self
3005 : .gate
3006 : .enter()
3007 0 : .map_err(|_| CalculateLogicalSizeError::Cancelled)?;
3008 :
3009 : let self_calculation = Arc::clone(self);
3010 :
3011 0 : let mut calculation = pin!(async {
3012 0 : let ctx = ctx.attached_child();
3013 0 : self_calculation
3014 0 : .calculate_logical_size(lsn, cause, &guard, &ctx)
3015 0 : .await
3016 0 : });
3017 :
3018 : tokio::select! {
3019 : res = &mut calculation => { res }
3020 : _ = self.cancel.cancelled() => {
3021 : debug!("cancelling logical size calculation for timeline shutdown");
3022 : calculation.await
3023 : }
3024 : }
3025 : }
3026 :
3027 : /// Calculate the logical size of the database at the latest LSN.
3028 : ///
3029 : /// NOTE: counted incrementally, includes ancestors. This can be a slow operation,
3030 : /// especially if we need to download remote layers.
3031 : ///
3032 : /// # Cancel-Safety
3033 : ///
3034 : /// This method is cancellation-safe.
3035 0 : async fn calculate_logical_size(
3036 0 : &self,
3037 0 : up_to_lsn: Lsn,
3038 0 : cause: LogicalSizeCalculationCause,
3039 0 : _guard: &GateGuard,
3040 0 : ctx: &RequestContext,
3041 0 : ) -> Result<u64, CalculateLogicalSizeError> {
3042 0 : info!(
3043 0 : "Calculating logical size for timeline {} at {}",
3044 : self.timeline_id, up_to_lsn
3045 : );
3046 :
3047 : pausable_failpoint!("timeline-calculate-logical-size-pause");
3048 :
3049 : // See if we've already done the work for initial size calculation.
3050 : // This is a short-cut for timelines that are mostly unused.
3051 0 : if let Some(size) = self.current_logical_size.initialized_size(up_to_lsn) {
3052 0 : return Ok(size);
3053 0 : }
3054 0 : let storage_time_metrics = match cause {
3055 : LogicalSizeCalculationCause::Initial
3056 : | LogicalSizeCalculationCause::ConsumptionMetricsSyntheticSize
3057 0 : | LogicalSizeCalculationCause::TenantSizeHandler => &self.metrics.logical_size_histo,
3058 : LogicalSizeCalculationCause::EvictionTaskImitation => {
3059 0 : &self.metrics.imitate_logical_size_histo
3060 : }
3061 : };
3062 0 : let timer = storage_time_metrics.start_timer();
3063 0 : let logical_size = self
3064 0 : .get_current_logical_size_non_incremental(up_to_lsn, ctx)
3065 0 : .await?;
3066 0 : debug!("calculated logical size: {logical_size}");
3067 0 : timer.stop_and_record();
3068 0 : Ok(logical_size)
3069 0 : }
3070 :
3071 : /// Update current logical size, adding `delta' to the old value.
3072 270570 : fn update_current_logical_size(&self, delta: i64) {
3073 270570 : let logical_size = &self.current_logical_size;
3074 270570 : logical_size.increment_size(delta);
3075 270570 :
3076 270570 : // Also set the value in the prometheus gauge. Note that
3077 270570 : // there is a race condition here: if this is is called by two
3078 270570 : // threads concurrently, the prometheus gauge might be set to
3079 270570 : // one value while current_logical_size is set to the
3080 270570 : // other.
3081 270570 : match logical_size.current_size() {
3082 270570 : CurrentLogicalSize::Exact(ref new_current_size) => self
3083 270570 : .metrics
3084 270570 : .current_logical_size_gauge
3085 270570 : .set(new_current_size.into()),
3086 0 : CurrentLogicalSize::Approximate(_) => {
3087 0 : // don't update the gauge yet, this allows us not to update the gauge back and
3088 0 : // forth between the initial size calculation task.
3089 0 : }
3090 : }
3091 270570 : }
3092 :
3093 2832 : pub(crate) fn update_directory_entries_count(&self, kind: DirectoryKind, count: u64) {
3094 2832 : self.directory_metrics[kind.offset()].store(count, AtomicOrdering::Relaxed);
3095 2832 : let aux_metric =
3096 2832 : self.directory_metrics[DirectoryKind::AuxFiles.offset()].load(AtomicOrdering::Relaxed);
3097 2832 :
3098 2832 : let sum_of_entries = self
3099 2832 : .directory_metrics
3100 2832 : .iter()
3101 19824 : .map(|v| v.load(AtomicOrdering::Relaxed))
3102 2832 : .sum();
3103 2832 : // Set a high general threshold and a lower threshold for the auxiliary files,
3104 2832 : // as we can have large numbers of relations in the db directory.
3105 2832 : const SUM_THRESHOLD: u64 = 5000;
3106 2832 : const AUX_THRESHOLD: u64 = 1000;
3107 2832 : if sum_of_entries >= SUM_THRESHOLD || aux_metric >= AUX_THRESHOLD {
3108 0 : self.metrics
3109 0 : .directory_entries_count_gauge
3110 0 : .set(sum_of_entries);
3111 2832 : } else if let Some(metric) = Lazy::get(&self.metrics.directory_entries_count_gauge) {
3112 0 : metric.set(sum_of_entries);
3113 2832 : }
3114 2832 : }
3115 :
3116 0 : async fn find_layer(&self, layer_name: &LayerName) -> Option<Layer> {
3117 0 : let guard = self.layers.read().await;
3118 0 : for historic_layer in guard.layer_map().iter_historic_layers() {
3119 0 : let historic_layer_name = historic_layer.layer_name();
3120 0 : if layer_name == &historic_layer_name {
3121 0 : return Some(guard.get_from_desc(&historic_layer));
3122 0 : }
3123 : }
3124 :
3125 0 : None
3126 0 : }
3127 :
3128 : /// The timeline heatmap is a hint to secondary locations from the primary location,
3129 : /// indicating which layers are currently on-disk on the primary.
3130 : ///
3131 : /// None is returned if the Timeline is in a state where uploading a heatmap
3132 : /// doesn't make sense, such as shutting down or initializing. The caller
3133 : /// should treat this as a cue to simply skip doing any heatmap uploading
3134 : /// for this timeline.
3135 0 : pub(crate) async fn generate_heatmap(&self) -> Option<HeatMapTimeline> {
3136 0 : if !self.is_active() {
3137 0 : return None;
3138 0 : }
3139 :
3140 0 : let guard = self.layers.read().await;
3141 :
3142 0 : let resident = guard.likely_resident_layers().map(|layer| {
3143 0 : let last_activity_ts = layer.access_stats().latest_activity_or_now();
3144 0 :
3145 0 : HeatMapLayer::new(
3146 0 : layer.layer_desc().layer_name(),
3147 0 : layer.metadata(),
3148 0 : last_activity_ts,
3149 0 : )
3150 0 : });
3151 0 :
3152 0 : let layers = resident.collect();
3153 0 :
3154 0 : Some(HeatMapTimeline::new(self.timeline_id, layers))
3155 0 : }
3156 :
3157 : /// Returns true if the given lsn is or was an ancestor branchpoint.
3158 0 : pub(crate) fn is_ancestor_lsn(&self, lsn: Lsn) -> bool {
3159 0 : // upon timeline detach, we set the ancestor_lsn to Lsn::INVALID and the store the original
3160 0 : // branchpoint in the value in IndexPart::lineage
3161 0 : self.ancestor_lsn == lsn
3162 0 : || (self.ancestor_lsn == Lsn::INVALID
3163 0 : && self.remote_client.is_previous_ancestor_lsn(lsn))
3164 0 : }
3165 : }
3166 :
3167 : type TraversalId = Arc<str>;
3168 :
3169 : trait TraversalLayerExt {
3170 : fn traversal_id(&self) -> TraversalId;
3171 : }
3172 :
3173 : impl TraversalLayerExt for Layer {
3174 211376 : fn traversal_id(&self) -> TraversalId {
3175 211376 : Arc::clone(self.debug_str())
3176 211376 : }
3177 : }
3178 :
3179 : impl TraversalLayerExt for Arc<InMemoryLayer> {
3180 606205 : fn traversal_id(&self) -> TraversalId {
3181 606205 : Arc::clone(self.local_path_str())
3182 606205 : }
3183 : }
3184 :
3185 : impl Timeline {
3186 : ///
3187 : /// Get a handle to a Layer for reading.
3188 : ///
3189 : /// The returned Layer might be from an ancestor timeline, if the
3190 : /// segment hasn't been updated on this timeline yet.
3191 : ///
3192 : /// This function takes the current timeline's locked LayerMap as an argument,
3193 : /// so callers can avoid potential race conditions.
3194 : ///
3195 : /// # Cancel-Safety
3196 : ///
3197 : /// This method is cancellation-safe.
3198 625654 : async fn get_reconstruct_data(
3199 625654 : &self,
3200 625654 : key: Key,
3201 625654 : request_lsn: Lsn,
3202 625654 : reconstruct_state: &mut ValueReconstructState,
3203 625654 : ctx: &RequestContext,
3204 625654 : ) -> Result<Vec<TraversalPathItem>, PageReconstructError> {
3205 625654 : // Start from the current timeline.
3206 625654 : let mut timeline_owned;
3207 625654 : let mut timeline = self;
3208 625654 :
3209 625654 : let mut read_count = scopeguard::guard(0, |cnt| {
3210 625654 : crate::metrics::READ_NUM_LAYERS_VISITED.observe(cnt as f64)
3211 625654 : });
3212 625654 :
3213 625654 : // For debugging purposes, collect the path of layers that we traversed
3214 625654 : // through. It's included in the error message if we fail to find the key.
3215 625654 : let mut traversal_path = Vec::<TraversalPathItem>::new();
3216 :
3217 625654 : let cached_lsn = if let Some((cached_lsn, _)) = &reconstruct_state.img {
3218 0 : *cached_lsn
3219 : } else {
3220 625654 : Lsn(0)
3221 : };
3222 :
3223 : // 'prev_lsn' tracks the last LSN that we were at in our search. It's used
3224 : // to check that each iteration make some progress, to break infinite
3225 : // looping if something goes wrong.
3226 625654 : let mut prev_lsn = None;
3227 625654 :
3228 625654 : let mut result = ValueReconstructResult::Continue;
3229 625654 : let mut cont_lsn = Lsn(request_lsn.0 + 1);
3230 :
3231 1666984 : 'outer: loop {
3232 1666984 : if self.cancel.is_cancelled() {
3233 0 : return Err(PageReconstructError::Cancelled);
3234 1666984 : }
3235 1666984 :
3236 1666984 : // The function should have updated 'state'
3237 1666984 : //info!("CALLED for {} at {}: {:?} with {} records, cached {}", key, cont_lsn, result, reconstruct_state.records.len(), cached_lsn);
3238 1666984 : match result {
3239 625524 : ValueReconstructResult::Complete => return Ok(traversal_path),
3240 : ValueReconstructResult::Continue => {
3241 : // If we reached an earlier cached page image, we're done.
3242 1041438 : if cont_lsn == cached_lsn + 1 {
3243 0 : MATERIALIZED_PAGE_CACHE_HIT.inc_by(1);
3244 0 : return Ok(traversal_path);
3245 1041438 : }
3246 1041438 : if let Some(prev) = prev_lsn {
3247 192159 : if prev <= cont_lsn {
3248 : // Didn't make any progress in last iteration. Error out to avoid
3249 : // getting stuck in the loop.
3250 106 : return Err(PageReconstructError::MissingKey(MissingKeyError {
3251 106 : key,
3252 106 : shard: self.shard_identity.get_shard_number(&key),
3253 106 : cont_lsn: Lsn(cont_lsn.0 - 1),
3254 106 : request_lsn,
3255 106 : ancestor_lsn: Some(timeline.ancestor_lsn),
3256 106 : traversal_path,
3257 106 : backtrace: None,
3258 106 : }));
3259 192053 : }
3260 849279 : }
3261 1041332 : prev_lsn = Some(cont_lsn);
3262 : }
3263 : ValueReconstructResult::Missing => {
3264 : return Err(PageReconstructError::MissingKey(MissingKeyError {
3265 22 : key,
3266 22 : shard: self.shard_identity.get_shard_number(&key),
3267 22 : cont_lsn,
3268 22 : request_lsn,
3269 22 : ancestor_lsn: None,
3270 22 : traversal_path,
3271 22 : backtrace: if cfg!(test) {
3272 22 : Some(std::backtrace::Backtrace::force_capture())
3273 : } else {
3274 0 : None
3275 : },
3276 : }));
3277 : }
3278 : }
3279 :
3280 : // Recurse into ancestor if needed
3281 1041332 : if let Some(ancestor_timeline) = timeline.ancestor_timeline.as_ref() {
3282 399605 : if key.is_inherited_key() && Lsn(cont_lsn.0 - 1) <= timeline.ancestor_lsn {
3283 223627 : trace!(
3284 0 : "going into ancestor {}, cont_lsn is {}",
3285 : timeline.ancestor_lsn,
3286 : cont_lsn
3287 : );
3288 :
3289 223627 : timeline_owned = timeline
3290 223627 : .get_ready_ancestor_timeline(ancestor_timeline, ctx)
3291 2 : .await?;
3292 223625 : timeline = &*timeline_owned;
3293 223625 : prev_lsn = None;
3294 223625 : continue 'outer;
3295 175978 : }
3296 641727 : }
3297 :
3298 817705 : let guard = timeline.layers.read().await;
3299 817705 : let layers = guard.layer_map();
3300 :
3301 : // Check the open and frozen in-memory layers first, in order from newest
3302 : // to oldest.
3303 817705 : if let Some(open_layer) = &layers.open_layer {
3304 718004 : let start_lsn = open_layer.get_lsn_range().start;
3305 718004 : if cont_lsn > start_lsn {
3306 : //info!("CHECKING for {} at {} on open layer {}", key, cont_lsn, open_layer.layer_name().display());
3307 : // Get all the data needed to reconstruct the page version from this layer.
3308 : // But if we have an older cached page image, no need to go past that.
3309 604247 : let lsn_floor = max(cached_lsn + 1, start_lsn);
3310 604247 :
3311 604247 : let open_layer = open_layer.clone();
3312 604247 : drop(guard);
3313 604247 :
3314 604247 : result = match open_layer
3315 604247 : .get_value_reconstruct_data(
3316 604247 : key,
3317 604247 : lsn_floor..cont_lsn,
3318 604247 : reconstruct_state,
3319 604247 : ctx,
3320 604247 : )
3321 10815 : .await
3322 : {
3323 604247 : Ok(result) => result,
3324 0 : Err(e) => return Err(PageReconstructError::from(e)),
3325 : };
3326 604247 : cont_lsn = lsn_floor;
3327 604247 : *read_count += 1;
3328 604247 : traversal_path.push((result, cont_lsn, open_layer.traversal_id()));
3329 604247 : continue 'outer;
3330 113757 : }
3331 99701 : }
3332 213458 : for frozen_layer in layers.frozen_layers.iter().rev() {
3333 1960 : let start_lsn = frozen_layer.get_lsn_range().start;
3334 1960 : if cont_lsn > start_lsn {
3335 : //info!("CHECKING for {} at {} on frozen layer {}", key, cont_lsn, frozen_layer.layer_name().display());
3336 1958 : let lsn_floor = max(cached_lsn + 1, start_lsn);
3337 1958 :
3338 1958 : let frozen_layer = frozen_layer.clone();
3339 1958 : drop(guard);
3340 1958 :
3341 1958 : result = match frozen_layer
3342 1958 : .get_value_reconstruct_data(
3343 1958 : key,
3344 1958 : lsn_floor..cont_lsn,
3345 1958 : reconstruct_state,
3346 1958 : ctx,
3347 1958 : )
3348 0 : .await
3349 : {
3350 1958 : Ok(result) => result,
3351 0 : Err(e) => return Err(PageReconstructError::from(e)),
3352 : };
3353 1958 : cont_lsn = lsn_floor;
3354 1958 : *read_count += 1;
3355 1958 : traversal_path.push((result, cont_lsn, frozen_layer.traversal_id()));
3356 1958 : continue 'outer;
3357 2 : }
3358 : }
3359 :
3360 211500 : if let Some(SearchResult { lsn_floor, layer }) = layers.search(key, cont_lsn) {
3361 211376 : let layer = guard.get_from_desc(&layer);
3362 211376 : drop(guard);
3363 211376 : // Get all the data needed to reconstruct the page version from this layer.
3364 211376 : // But if we have an older cached page image, no need to go past that.
3365 211376 : let lsn_floor = max(cached_lsn + 1, lsn_floor);
3366 211376 : result = match layer
3367 211376 : .get_value_reconstruct_data(key, lsn_floor..cont_lsn, reconstruct_state, ctx)
3368 30777 : .await
3369 : {
3370 211376 : Ok(result) => result,
3371 0 : Err(e) => return Err(PageReconstructError::from(e)),
3372 : };
3373 211376 : cont_lsn = lsn_floor;
3374 211376 : *read_count += 1;
3375 211376 : traversal_path.push((result, cont_lsn, layer.traversal_id()));
3376 211376 : continue 'outer;
3377 124 : } else if timeline.ancestor_timeline.is_some() {
3378 : // Nothing on this timeline. Traverse to parent
3379 106 : result = ValueReconstructResult::Continue;
3380 106 : cont_lsn = Lsn(timeline.ancestor_lsn.0 + 1);
3381 106 : continue 'outer;
3382 : } else {
3383 : // Nothing found
3384 18 : result = ValueReconstructResult::Missing;
3385 18 : continue 'outer;
3386 : }
3387 : }
3388 625654 : }
3389 :
3390 : /// Get the data needed to reconstruct all keys in the provided keyspace
3391 : ///
3392 : /// The algorithm is as follows:
3393 : /// 1. While some keys are still not done and there's a timeline to visit:
3394 : /// 2. Visit the timeline (see [`Timeline::get_vectored_reconstruct_data_timeline`]:
3395 : /// 2.1: Build the fringe for the current keyspace
3396 : /// 2.2 Visit the newest layer from the fringe to collect all values for the range it
3397 : /// intersects
3398 : /// 2.3. Pop the timeline from the fringe
3399 : /// 2.4. If the fringe is empty, go back to 1
3400 138 : async fn get_vectored_reconstruct_data(
3401 138 : &self,
3402 138 : mut keyspace: KeySpace,
3403 138 : request_lsn: Lsn,
3404 138 : reconstruct_state: &mut ValuesReconstructState,
3405 138 : ctx: &RequestContext,
3406 138 : ) -> Result<(), GetVectoredError> {
3407 138 : let mut timeline_owned: Arc<Timeline>;
3408 138 : let mut timeline = self;
3409 138 :
3410 138 : let mut cont_lsn = Lsn(request_lsn.0 + 1);
3411 :
3412 138 : let missing_keyspace = loop {
3413 174 : if self.cancel.is_cancelled() {
3414 0 : return Err(GetVectoredError::Cancelled);
3415 174 : }
3416 :
3417 : let TimelineVisitOutcome {
3418 174 : completed_keyspace: completed,
3419 174 : image_covered_keyspace,
3420 174 : } = Self::get_vectored_reconstruct_data_timeline(
3421 174 : timeline,
3422 174 : keyspace.clone(),
3423 174 : cont_lsn,
3424 174 : reconstruct_state,
3425 174 : &self.cancel,
3426 174 : ctx,
3427 174 : )
3428 11414 : .await?;
3429 :
3430 174 : keyspace.remove_overlapping_with(&completed);
3431 174 :
3432 174 : // Do not descend into the ancestor timeline for aux files.
3433 174 : // We don't return a blanket [`GetVectoredError::MissingKey`] to avoid
3434 174 : // stalling compaction.
3435 174 : keyspace.remove_overlapping_with(&KeySpace {
3436 174 : ranges: vec![NON_INHERITED_RANGE, NON_INHERITED_SPARSE_RANGE],
3437 174 : });
3438 174 :
3439 174 : // Keyspace is fully retrieved
3440 174 : if keyspace.is_empty() {
3441 128 : break None;
3442 46 : }
3443 :
3444 46 : let Some(ancestor_timeline) = timeline.ancestor_timeline.as_ref() else {
3445 : // Not fully retrieved but no ancestor timeline.
3446 10 : break Some(keyspace);
3447 : };
3448 :
3449 : // Now we see if there are keys covered by the image layer but does not exist in the
3450 : // image layer, which means that the key does not exist.
3451 :
3452 : // The block below will stop the vectored search if any of the keys encountered an image layer
3453 : // which did not contain a snapshot for said key. Since we have already removed all completed
3454 : // keys from `keyspace`, we expect there to be no overlap between it and the image covered key
3455 : // space. If that's not the case, we had at least one key encounter a gap in the image layer
3456 : // and stop the search as a result of that.
3457 36 : let removed = keyspace.remove_overlapping_with(&image_covered_keyspace);
3458 36 : if !removed.is_empty() {
3459 0 : break Some(removed);
3460 36 : }
3461 36 : // If we reached this point, `remove_overlapping_with` should not have made any change to the
3462 36 : // keyspace.
3463 36 :
3464 36 : // Take the min to avoid reconstructing a page with data newer than request Lsn.
3465 36 : cont_lsn = std::cmp::min(Lsn(request_lsn.0 + 1), Lsn(timeline.ancestor_lsn.0 + 1));
3466 36 : timeline_owned = timeline
3467 36 : .get_ready_ancestor_timeline(ancestor_timeline, ctx)
3468 0 : .await
3469 36 : .map_err(GetVectoredError::GetReadyAncestorError)?;
3470 36 : timeline = &*timeline_owned;
3471 : };
3472 :
3473 138 : if let Some(missing_keyspace) = missing_keyspace {
3474 10 : return Err(GetVectoredError::MissingKey(MissingKeyError {
3475 10 : key: missing_keyspace.start().unwrap(), /* better if we can store the full keyspace */
3476 10 : shard: self
3477 10 : .shard_identity
3478 10 : .get_shard_number(&missing_keyspace.start().unwrap()),
3479 10 : cont_lsn,
3480 10 : request_lsn,
3481 10 : ancestor_lsn: Some(timeline.ancestor_lsn),
3482 10 : traversal_path: vec![],
3483 10 : backtrace: None,
3484 10 : }));
3485 128 : }
3486 128 :
3487 128 : Ok(())
3488 138 : }
3489 :
3490 : /// Collect the reconstruct data for a keyspace from the specified timeline.
3491 : ///
3492 : /// Maintain a fringe [`LayerFringe`] which tracks all the layers that intersect
3493 : /// the current keyspace. The current keyspace of the search at any given timeline
3494 : /// is the original keyspace minus all the keys that have been completed minus
3495 : /// any keys for which we couldn't find an intersecting layer. It's not tracked explicitly,
3496 : /// but if you merge all the keyspaces in the fringe, you get the "current keyspace".
3497 : ///
3498 : /// This is basically a depth-first search visitor implementation where a vertex
3499 : /// is the (layer, lsn range, key space) tuple. The fringe acts as the stack.
3500 : ///
3501 : /// At each iteration pop the top of the fringe (the layer with the highest Lsn)
3502 : /// and get all the required reconstruct data from the layer in one go.
3503 : ///
3504 : /// Returns the completed keyspace and the keyspaces with image coverage. The caller
3505 : /// decides how to deal with these two keyspaces.
3506 174 : async fn get_vectored_reconstruct_data_timeline(
3507 174 : timeline: &Timeline,
3508 174 : keyspace: KeySpace,
3509 174 : mut cont_lsn: Lsn,
3510 174 : reconstruct_state: &mut ValuesReconstructState,
3511 174 : cancel: &CancellationToken,
3512 174 : ctx: &RequestContext,
3513 174 : ) -> Result<TimelineVisitOutcome, GetVectoredError> {
3514 174 : let mut unmapped_keyspace = keyspace.clone();
3515 174 : let mut fringe = LayerFringe::new();
3516 174 :
3517 174 : let mut completed_keyspace = KeySpace::default();
3518 174 : let mut image_covered_keyspace = KeySpaceRandomAccum::new();
3519 :
3520 408 : loop {
3521 408 : if cancel.is_cancelled() {
3522 0 : return Err(GetVectoredError::Cancelled);
3523 408 : }
3524 408 :
3525 408 : let (keys_done_last_step, keys_with_image_coverage) =
3526 408 : reconstruct_state.consume_done_keys();
3527 408 : unmapped_keyspace.remove_overlapping_with(&keys_done_last_step);
3528 408 : completed_keyspace.merge(&keys_done_last_step);
3529 408 : if let Some(keys_with_image_coverage) = keys_with_image_coverage {
3530 66 : unmapped_keyspace
3531 66 : .remove_overlapping_with(&KeySpace::single(keys_with_image_coverage.clone()));
3532 66 : image_covered_keyspace.add_range(keys_with_image_coverage);
3533 342 : }
3534 :
3535 : // Do not descent any further if the last layer we visited
3536 : // completed all keys in the keyspace it inspected. This is not
3537 : // required for correctness, but avoids visiting extra layers
3538 : // which turns out to be a perf bottleneck in some cases.
3539 408 : if !unmapped_keyspace.is_empty() {
3540 288 : let guard = timeline.layers.read().await;
3541 288 : let layers = guard.layer_map();
3542 288 :
3543 288 : let in_memory_layer = layers.find_in_memory_layer(|l| {
3544 28 : let start_lsn = l.get_lsn_range().start;
3545 28 : cont_lsn > start_lsn
3546 288 : });
3547 288 :
3548 288 : match in_memory_layer {
3549 14 : Some(l) => {
3550 14 : let lsn_range = l.get_lsn_range().start..cont_lsn;
3551 14 : fringe.update(
3552 14 : ReadableLayer::InMemoryLayer(l),
3553 14 : unmapped_keyspace.clone(),
3554 14 : lsn_range,
3555 14 : );
3556 14 : }
3557 : None => {
3558 71995 : for range in unmapped_keyspace.ranges.iter() {
3559 71995 : let results = layers.range_search(range.clone(), cont_lsn);
3560 71995 :
3561 71995 : results
3562 71995 : .found
3563 71995 : .into_iter()
3564 71995 : .map(|(SearchResult { layer, lsn_floor }, keyspace_accum)| {
3565 63916 : (
3566 63916 : ReadableLayer::PersistentLayer(guard.get_from_desc(&layer)),
3567 63916 : keyspace_accum.to_keyspace(),
3568 63916 : lsn_floor..cont_lsn,
3569 63916 : )
3570 71995 : })
3571 71995 : .for_each(|(layer, keyspace, lsn_range)| {
3572 63916 : fringe.update(layer, keyspace, lsn_range)
3573 71995 : });
3574 71995 : }
3575 : }
3576 : }
3577 :
3578 : // It's safe to drop the layer map lock after planning the next round of reads.
3579 : // The fringe keeps readable handles for the layers which are safe to read even
3580 : // if layers were compacted or flushed.
3581 : //
3582 : // The more interesting consideration is: "Why is the read algorithm still correct
3583 : // if the layer map changes while it is operating?". Doing a vectored read on a
3584 : // timeline boils down to pushing an imaginary lsn boundary downwards for each range
3585 : // covered by the read. The layer map tells us how to move the lsn downwards for a
3586 : // range at *a particular point in time*. It is fine for the answer to be different
3587 : // at two different time points.
3588 288 : drop(guard);
3589 120 : }
3590 :
3591 408 : if let Some((layer_to_read, keyspace_to_read, lsn_range)) = fringe.next_layer() {
3592 234 : let next_cont_lsn = lsn_range.start;
3593 234 : layer_to_read
3594 234 : .get_values_reconstruct_data(
3595 234 : keyspace_to_read.clone(),
3596 234 : lsn_range,
3597 234 : reconstruct_state,
3598 234 : ctx,
3599 234 : )
3600 11413 : .await?;
3601 :
3602 234 : unmapped_keyspace = keyspace_to_read;
3603 234 : cont_lsn = next_cont_lsn;
3604 234 :
3605 234 : reconstruct_state.on_layer_visited(&layer_to_read);
3606 : } else {
3607 174 : break;
3608 174 : }
3609 174 : }
3610 174 :
3611 174 : Ok(TimelineVisitOutcome {
3612 174 : completed_keyspace,
3613 174 : image_covered_keyspace: image_covered_keyspace.consume_keyspace(),
3614 174 : })
3615 174 : }
3616 :
3617 : /// # Cancel-safety
3618 : ///
3619 : /// This method is cancellation-safe.
3620 624102 : async fn lookup_cached_page(
3621 624102 : &self,
3622 624102 : key: &Key,
3623 624102 : lsn: Lsn,
3624 624102 : ctx: &RequestContext,
3625 624102 : ) -> Option<(Lsn, Bytes)> {
3626 624102 : let cache = page_cache::get();
3627 :
3628 : // FIXME: It's pointless to check the cache for things that are not 8kB pages.
3629 : // We should look at the key to determine if it's a cacheable object
3630 624102 : let (lsn, read_guard) = cache
3631 624102 : .lookup_materialized_page(self.tenant_shard_id, self.timeline_id, key, lsn, ctx)
3632 624102 : .await?;
3633 0 : let img = Bytes::from(read_guard.to_vec());
3634 0 : Some((lsn, img))
3635 624102 : }
3636 :
3637 223663 : async fn get_ready_ancestor_timeline(
3638 223663 : &self,
3639 223663 : ancestor: &Arc<Timeline>,
3640 223663 : ctx: &RequestContext,
3641 223663 : ) -> Result<Arc<Timeline>, GetReadyAncestorError> {
3642 223663 : // It's possible that the ancestor timeline isn't active yet, or
3643 223663 : // is active but hasn't yet caught up to the branch point. Wait
3644 223663 : // for it.
3645 223663 : //
3646 223663 : // This cannot happen while the pageserver is running normally,
3647 223663 : // because you cannot create a branch from a point that isn't
3648 223663 : // present in the pageserver yet. However, we don't wait for the
3649 223663 : // branch point to be uploaded to cloud storage before creating
3650 223663 : // a branch. I.e., the branch LSN need not be remote consistent
3651 223663 : // for the branching operation to succeed.
3652 223663 : //
3653 223663 : // Hence, if we try to load a tenant in such a state where
3654 223663 : // 1. the existence of the branch was persisted (in IndexPart and/or locally)
3655 223663 : // 2. but the ancestor state is behind branch_lsn because it was not yet persisted
3656 223663 : // then we will need to wait for the ancestor timeline to
3657 223663 : // re-stream WAL up to branch_lsn before we access it.
3658 223663 : //
3659 223663 : // How can a tenant get in such a state?
3660 223663 : // - ungraceful pageserver process exit
3661 223663 : // - detach+attach => this is a bug, https://github.com/neondatabase/neon/issues/4219
3662 223663 : //
3663 223663 : // NB: this could be avoided by requiring
3664 223663 : // branch_lsn >= remote_consistent_lsn
3665 223663 : // during branch creation.
3666 223663 : match ancestor.wait_to_become_active(ctx).await {
3667 223661 : Ok(()) => {}
3668 : Err(TimelineState::Stopping) => {
3669 : // If an ancestor is stopping, it means the tenant is stopping: handle this the same as if this timeline was stopping.
3670 0 : return Err(GetReadyAncestorError::Cancelled);
3671 : }
3672 2 : Err(state) => {
3673 2 : return Err(GetReadyAncestorError::BadState {
3674 2 : timeline_id: ancestor.timeline_id,
3675 2 : state,
3676 2 : });
3677 : }
3678 : }
3679 223661 : ancestor
3680 223661 : .wait_lsn(self.ancestor_lsn, WaitLsnWaiter::Timeline(self), ctx)
3681 0 : .await
3682 223661 : .map_err(|e| match e {
3683 0 : e @ WaitLsnError::Timeout(_) => GetReadyAncestorError::AncestorLsnTimeout(e),
3684 0 : WaitLsnError::Shutdown => GetReadyAncestorError::Cancelled,
3685 0 : WaitLsnError::BadState(state) => GetReadyAncestorError::BadState {
3686 0 : timeline_id: ancestor.timeline_id,
3687 0 : state,
3688 0 : },
3689 223661 : })?;
3690 :
3691 223661 : Ok(ancestor.clone())
3692 223663 : }
3693 :
3694 0 : pub(crate) fn get_ancestor_timeline(&self) -> Option<Arc<Timeline>> {
3695 0 : self.ancestor_timeline.clone()
3696 0 : }
3697 :
3698 5452 : pub(crate) fn get_shard_identity(&self) -> &ShardIdentity {
3699 5452 : &self.shard_identity
3700 5452 : }
3701 :
3702 : ///
3703 : /// Get a handle to the latest layer for appending.
3704 : ///
3705 1238 : async fn get_layer_for_write(
3706 1238 : &self,
3707 1238 : lsn: Lsn,
3708 1238 : ctx: &RequestContext,
3709 1238 : ) -> anyhow::Result<Arc<InMemoryLayer>> {
3710 1238 : let mut guard = self.layers.write().await;
3711 1238 : let layer = guard
3712 1238 : .get_layer_for_write(
3713 1238 : lsn,
3714 1238 : self.get_last_record_lsn(),
3715 1238 : self.conf,
3716 1238 : self.timeline_id,
3717 1238 : self.tenant_shard_id,
3718 1238 : ctx,
3719 1238 : )
3720 686 : .await?;
3721 1238 : Ok(layer)
3722 1238 : }
3723 :
3724 5279044 : pub(crate) fn finish_write(&self, new_lsn: Lsn) {
3725 5279044 : assert!(new_lsn.is_aligned());
3726 :
3727 5279044 : self.metrics.last_record_gauge.set(new_lsn.0 as i64);
3728 5279044 : self.last_record_lsn.advance(new_lsn);
3729 5279044 : }
3730 :
3731 1136 : async fn freeze_inmem_layer_at(
3732 1136 : &self,
3733 1136 : at: Lsn,
3734 1136 : write_lock: &mut tokio::sync::MutexGuard<'_, Option<TimelineWriterState>>,
3735 1136 : ) {
3736 1136 : let frozen = {
3737 1136 : let mut guard = self.layers.write().await;
3738 1136 : guard
3739 1136 : .try_freeze_in_memory_layer(at, &self.last_freeze_at, write_lock)
3740 4 : .await
3741 : };
3742 1136 : if frozen {
3743 1108 : let now = Instant::now();
3744 1108 : *(self.last_freeze_ts.write().unwrap()) = now;
3745 1108 : }
3746 1136 : }
3747 :
3748 : /// Layer flusher task's main loop.
3749 372 : async fn flush_loop(
3750 372 : self: &Arc<Self>,
3751 372 : mut layer_flush_start_rx: tokio::sync::watch::Receiver<(u64, Lsn)>,
3752 372 : ctx: &RequestContext,
3753 372 : ) {
3754 372 : info!("started flush loop");
3755 1100 : loop {
3756 1100 : tokio::select! {
3757 : _ = self.cancel.cancelled() => {
3758 : info!("shutting down layer flush task due to Timeline::cancel");
3759 : break;
3760 : },
3761 : _ = layer_flush_start_rx.changed() => {}
3762 : }
3763 1100 : trace!("waking up");
3764 1100 : let (flush_counter, frozen_to_lsn) = *layer_flush_start_rx.borrow();
3765 1100 :
3766 1100 : // The highest LSN to which we flushed in the loop over frozen layers
3767 1100 : let mut flushed_to_lsn = Lsn(0);
3768 :
3769 1100 : let result = loop {
3770 2208 : if self.cancel.is_cancelled() {
3771 0 : info!("dropping out of flush loop for timeline shutdown");
3772 : // Note: we do not bother transmitting into [`layer_flush_done_tx`], because
3773 : // anyone waiting on that will respect self.cancel as well: they will stop
3774 : // waiting at the same time we as drop out of this loop.
3775 0 : return;
3776 2208 : }
3777 2208 :
3778 2208 : let timer = self.metrics.flush_time_histo.start_timer();
3779 :
3780 2208 : let layer_to_flush = {
3781 2208 : let guard = self.layers.read().await;
3782 2208 : guard.layer_map().frozen_layers.front().cloned()
3783 : // drop 'layers' lock to allow concurrent reads and writes
3784 : };
3785 2208 : let Some(layer_to_flush) = layer_to_flush else {
3786 1100 : break Ok(());
3787 : };
3788 59172 : match self.flush_frozen_layer(layer_to_flush, ctx).await {
3789 1108 : Ok(this_layer_to_lsn) => {
3790 1108 : flushed_to_lsn = std::cmp::max(flushed_to_lsn, this_layer_to_lsn);
3791 1108 : }
3792 : Err(FlushLayerError::Cancelled) => {
3793 0 : info!("dropping out of flush loop for timeline shutdown");
3794 0 : return;
3795 : }
3796 0 : err @ Err(
3797 0 : FlushLayerError::NotRunning(_)
3798 0 : | FlushLayerError::Other(_)
3799 0 : | FlushLayerError::CreateImageLayersError(_),
3800 0 : ) => {
3801 0 : error!("could not flush frozen layer: {err:?}");
3802 0 : break err.map(|_| ());
3803 : }
3804 : }
3805 1108 : timer.stop_and_record();
3806 : };
3807 :
3808 : // Unsharded tenants should never advance their LSN beyond the end of the
3809 : // highest layer they write: such gaps between layer data and the frozen LSN
3810 : // are only legal on sharded tenants.
3811 1100 : debug_assert!(
3812 1100 : self.shard_identity.count.count() > 1
3813 1100 : || flushed_to_lsn >= frozen_to_lsn
3814 68 : || !flushed_to_lsn.is_valid()
3815 : );
3816 :
3817 1100 : if flushed_to_lsn < frozen_to_lsn && self.shard_identity.count.count() > 1 {
3818 : // If our layer flushes didn't carry disk_consistent_lsn up to the `to_lsn` advertised
3819 : // to us via layer_flush_start_rx, then advance it here.
3820 : //
3821 : // This path is only taken for tenants with multiple shards: single sharded tenants should
3822 : // never encounter a gap in the wal.
3823 0 : let old_disk_consistent_lsn = self.disk_consistent_lsn.load();
3824 0 : tracing::debug!("Advancing disk_consistent_lsn across layer gap {old_disk_consistent_lsn}->{frozen_to_lsn}");
3825 0 : if self.set_disk_consistent_lsn(frozen_to_lsn) {
3826 0 : if let Err(e) = self.schedule_uploads(frozen_to_lsn, vec![]) {
3827 0 : tracing::warn!("Failed to schedule metadata upload after updating disk_consistent_lsn: {e}");
3828 0 : }
3829 0 : }
3830 1100 : }
3831 :
3832 : // Notify any listeners that we're done
3833 1100 : let _ = self
3834 1100 : .layer_flush_done_tx
3835 1100 : .send_replace((flush_counter, result));
3836 : }
3837 8 : }
3838 :
3839 : /// Request the flush loop to write out all frozen layers up to `at_lsn` as Delta L0 files to disk.
3840 : /// The caller is responsible for the freezing, e.g., [`Self::freeze_inmem_layer_at`].
3841 : ///
3842 : /// `at_lsn` may be higher than the highest LSN of a frozen layer: if this is the
3843 : /// case, it means no data will be written between the top of the highest frozen layer and
3844 : /// to_lsn, e.g. because this tenant shard has ingested up to to_lsn and not written any data
3845 : /// locally for that part of the WAL.
3846 1136 : fn flush_frozen_layers(&self, at_lsn: Lsn) -> Result<u64, FlushLayerError> {
3847 1136 : // Increment the flush cycle counter and wake up the flush task.
3848 1136 : // Remember the new value, so that when we listen for the flush
3849 1136 : // to finish, we know when the flush that we initiated has
3850 1136 : // finished, instead of some other flush that was started earlier.
3851 1136 : let mut my_flush_request = 0;
3852 1136 :
3853 1136 : let flush_loop_state = { *self.flush_loop_state.lock().unwrap() };
3854 1136 : if !matches!(flush_loop_state, FlushLoopState::Running { .. }) {
3855 0 : return Err(FlushLayerError::NotRunning(flush_loop_state));
3856 1136 : }
3857 1136 :
3858 1136 : self.layer_flush_start_tx.send_modify(|(counter, lsn)| {
3859 1136 : my_flush_request = *counter + 1;
3860 1136 : *counter = my_flush_request;
3861 1136 : *lsn = std::cmp::max(at_lsn, *lsn);
3862 1136 : });
3863 1136 :
3864 1136 : Ok(my_flush_request)
3865 1136 : }
3866 :
3867 1056 : async fn wait_flush_completion(&self, request: u64) -> Result<(), FlushLayerError> {
3868 1056 : let mut rx = self.layer_flush_done_tx.subscribe();
3869 2111 : loop {
3870 2111 : {
3871 2111 : let (last_result_counter, last_result) = &*rx.borrow();
3872 2111 : if *last_result_counter >= request {
3873 1056 : if let Err(err) = last_result {
3874 : // We already logged the original error in
3875 : // flush_loop. We cannot propagate it to the caller
3876 : // here, because it might not be Cloneable
3877 0 : return Err(err.clone());
3878 : } else {
3879 1056 : return Ok(());
3880 : }
3881 1055 : }
3882 1055 : }
3883 1055 : trace!("waiting for flush to complete");
3884 : tokio::select! {
3885 : rx_e = rx.changed() => {
3886 0 : rx_e.map_err(|_| FlushLayerError::NotRunning(*self.flush_loop_state.lock().unwrap()))?;
3887 : },
3888 : // Cancellation safety: we are not leaving an I/O in-flight for the flush, we're just ignoring
3889 : // the notification from [`flush_loop`] that it completed.
3890 : _ = self.cancel.cancelled() => {
3891 : tracing::info!("Cancelled layer flush due on timeline shutdown");
3892 : return Ok(())
3893 : }
3894 : };
3895 1055 : trace!("done")
3896 : }
3897 1056 : }
3898 :
3899 1056 : async fn flush_frozen_layers_and_wait(&self, at_lsn: Lsn) -> Result<(), FlushLayerError> {
3900 1056 : let token = self.flush_frozen_layers(at_lsn)?;
3901 1056 : self.wait_flush_completion(token).await
3902 1056 : }
3903 :
3904 : /// Flush one frozen in-memory layer to disk, as a new delta layer.
3905 : ///
3906 : /// Return value is the last lsn (inclusive) of the layer that was frozen.
3907 2216 : #[instrument(skip_all, fields(layer=%frozen_layer))]
3908 : async fn flush_frozen_layer(
3909 : self: &Arc<Self>,
3910 : frozen_layer: Arc<InMemoryLayer>,
3911 : ctx: &RequestContext,
3912 : ) -> Result<Lsn, FlushLayerError> {
3913 : debug_assert_current_span_has_tenant_and_timeline_id();
3914 :
3915 : // As a special case, when we have just imported an image into the repository,
3916 : // instead of writing out a L0 delta layer, we directly write out image layer
3917 : // files instead. This is possible as long as *all* the data imported into the
3918 : // repository have the same LSN.
3919 : let lsn_range = frozen_layer.get_lsn_range();
3920 :
3921 : // Whether to directly create image layers for this flush, or flush them as delta layers
3922 : let create_image_layer =
3923 : lsn_range.start == self.initdb_lsn && lsn_range.end == Lsn(self.initdb_lsn.0 + 1);
3924 :
3925 : #[cfg(test)]
3926 : {
3927 : match &mut *self.flush_loop_state.lock().unwrap() {
3928 : FlushLoopState::NotStarted | FlushLoopState::Exited => {
3929 : panic!("flush loop not running")
3930 : }
3931 : FlushLoopState::Running {
3932 : expect_initdb_optimization,
3933 : initdb_optimization_count,
3934 : ..
3935 : } => {
3936 : if create_image_layer {
3937 : *initdb_optimization_count += 1;
3938 : } else {
3939 : assert!(!*expect_initdb_optimization, "expected initdb optimization");
3940 : }
3941 : }
3942 : }
3943 : }
3944 :
3945 : let (layers_to_upload, delta_layer_to_add) = if create_image_layer {
3946 : // Note: The 'ctx' in use here has DownloadBehavior::Error. We should not
3947 : // require downloading anything during initial import.
3948 : let ((rel_partition, metadata_partition), _lsn) = self
3949 : .repartition(
3950 : self.initdb_lsn,
3951 : self.get_compaction_target_size(),
3952 : EnumSet::empty(),
3953 : ctx,
3954 : )
3955 : .await
3956 0 : .map_err(|e| FlushLayerError::from_anyhow(self, e))?;
3957 :
3958 : if self.cancel.is_cancelled() {
3959 : return Err(FlushLayerError::Cancelled);
3960 : }
3961 :
3962 : // FIXME(auxfilesv2): support multiple metadata key partitions might need initdb support as well?
3963 : // This code path will not be hit during regression tests. After #7099 we have a single partition
3964 : // with two key ranges. If someone wants to fix initdb optimization in the future, this might need
3965 : // to be fixed.
3966 :
3967 : // For metadata, always create delta layers.
3968 : let delta_layer = if !metadata_partition.parts.is_empty() {
3969 : assert_eq!(
3970 : metadata_partition.parts.len(),
3971 : 1,
3972 : "currently sparse keyspace should only contain a single metadata keyspace"
3973 : );
3974 : let metadata_keyspace = &metadata_partition.parts[0];
3975 : self.create_delta_layer(
3976 : &frozen_layer,
3977 : Some(
3978 : metadata_keyspace.0.ranges.first().unwrap().start
3979 : ..metadata_keyspace.0.ranges.last().unwrap().end,
3980 : ),
3981 : ctx,
3982 : )
3983 : .await
3984 0 : .map_err(|e| FlushLayerError::from_anyhow(self, e))?
3985 : } else {
3986 : None
3987 : };
3988 :
3989 : // For image layers, we add them immediately into the layer map.
3990 : let mut layers_to_upload = Vec::new();
3991 : layers_to_upload.extend(
3992 : self.create_image_layers(
3993 : &rel_partition,
3994 : self.initdb_lsn,
3995 : ImageLayerCreationMode::Initial,
3996 : ctx,
3997 : )
3998 : .await?,
3999 : );
4000 :
4001 : if let Some(delta_layer) = delta_layer {
4002 : layers_to_upload.push(delta_layer.clone());
4003 : (layers_to_upload, Some(delta_layer))
4004 : } else {
4005 : (layers_to_upload, None)
4006 : }
4007 : } else {
4008 : // Normal case, write out a L0 delta layer file.
4009 : // `create_delta_layer` will not modify the layer map.
4010 : // We will remove frozen layer and add delta layer in one atomic operation later.
4011 : let Some(layer) = self
4012 : .create_delta_layer(&frozen_layer, None, ctx)
4013 : .await
4014 0 : .map_err(|e| FlushLayerError::from_anyhow(self, e))?
4015 : else {
4016 : panic!("delta layer cannot be empty if no filter is applied");
4017 : };
4018 : (
4019 : // FIXME: even though we have a single image and single delta layer assumption
4020 : // we push them to vec
4021 : vec![layer.clone()],
4022 : Some(layer),
4023 : )
4024 : };
4025 :
4026 : pausable_failpoint!("flush-layer-cancel-after-writing-layer-out-pausable");
4027 :
4028 : if self.cancel.is_cancelled() {
4029 : return Err(FlushLayerError::Cancelled);
4030 : }
4031 :
4032 : let disk_consistent_lsn = Lsn(lsn_range.end.0 - 1);
4033 :
4034 : // The new on-disk layers are now in the layer map. We can remove the
4035 : // in-memory layer from the map now. The flushed layer is stored in
4036 : // the mapping in `create_delta_layer`.
4037 : {
4038 : let mut guard = self.layers.write().await;
4039 :
4040 : if self.cancel.is_cancelled() {
4041 : return Err(FlushLayerError::Cancelled);
4042 : }
4043 :
4044 : guard.finish_flush_l0_layer(delta_layer_to_add.as_ref(), &frozen_layer, &self.metrics);
4045 :
4046 : if self.set_disk_consistent_lsn(disk_consistent_lsn) {
4047 : // Schedule remote uploads that will reflect our new disk_consistent_lsn
4048 : self.schedule_uploads(disk_consistent_lsn, layers_to_upload)
4049 0 : .map_err(|e| FlushLayerError::from_anyhow(self, e))?;
4050 : }
4051 : // release lock on 'layers'
4052 : };
4053 :
4054 : // FIXME: between create_delta_layer and the scheduling of the upload in `update_metadata_file`,
4055 : // a compaction can delete the file and then it won't be available for uploads any more.
4056 : // We still schedule the upload, resulting in an error, but ideally we'd somehow avoid this
4057 : // race situation.
4058 : // See https://github.com/neondatabase/neon/issues/4526
4059 : pausable_failpoint!("flush-frozen-pausable");
4060 :
4061 : // This failpoint is used by another test case `test_pageserver_recovery`.
4062 : fail_point!("flush-frozen-exit");
4063 :
4064 : Ok(Lsn(lsn_range.end.0 - 1))
4065 : }
4066 :
4067 : /// Return true if the value changed
4068 : ///
4069 : /// This function must only be used from the layer flush task, and may not be called concurrently.
4070 1108 : fn set_disk_consistent_lsn(&self, new_value: Lsn) -> bool {
4071 1108 : // We do a simple load/store cycle: that's why this function isn't safe for concurrent use.
4072 1108 : let old_value = self.disk_consistent_lsn.load();
4073 1108 : if new_value != old_value {
4074 1108 : assert!(new_value >= old_value);
4075 1108 : self.disk_consistent_lsn.store(new_value);
4076 1108 : true
4077 : } else {
4078 0 : false
4079 : }
4080 1108 : }
4081 :
4082 : /// Update metadata file
4083 1114 : fn schedule_uploads(
4084 1114 : &self,
4085 1114 : disk_consistent_lsn: Lsn,
4086 1114 : layers_to_upload: impl IntoIterator<Item = ResidentLayer>,
4087 1114 : ) -> anyhow::Result<()> {
4088 1114 : // We can only save a valid 'prev_record_lsn' value on disk if we
4089 1114 : // flushed *all* in-memory changes to disk. We only track
4090 1114 : // 'prev_record_lsn' in memory for the latest processed record, so we
4091 1114 : // don't remember what the correct value that corresponds to some old
4092 1114 : // LSN is. But if we flush everything, then the value corresponding
4093 1114 : // current 'last_record_lsn' is correct and we can store it on disk.
4094 1114 : let RecordLsn {
4095 1114 : last: last_record_lsn,
4096 1114 : prev: prev_record_lsn,
4097 1114 : } = self.last_record_lsn.load();
4098 1114 : let ondisk_prev_record_lsn = if disk_consistent_lsn == last_record_lsn {
4099 1035 : Some(prev_record_lsn)
4100 : } else {
4101 79 : None
4102 : };
4103 :
4104 1114 : let update = crate::tenant::metadata::MetadataUpdate::new(
4105 1114 : disk_consistent_lsn,
4106 1114 : ondisk_prev_record_lsn,
4107 1114 : *self.latest_gc_cutoff_lsn.read(),
4108 1114 : );
4109 1114 :
4110 1114 : fail_point!("checkpoint-before-saving-metadata", |x| bail!(
4111 0 : "{}",
4112 0 : x.unwrap()
4113 1114 : ));
4114 :
4115 2236 : for layer in layers_to_upload {
4116 1122 : self.remote_client.schedule_layer_file_upload(layer)?;
4117 : }
4118 1114 : self.remote_client
4119 1114 : .schedule_index_upload_for_metadata_update(&update)?;
4120 :
4121 1114 : Ok(())
4122 1114 : }
4123 :
4124 0 : pub(crate) async fn preserve_initdb_archive(&self) -> anyhow::Result<()> {
4125 0 : self.remote_client
4126 0 : .preserve_initdb_archive(
4127 0 : &self.tenant_shard_id.tenant_id,
4128 0 : &self.timeline_id,
4129 0 : &self.cancel,
4130 0 : )
4131 0 : .await
4132 0 : }
4133 :
4134 : // Write out the given frozen in-memory layer as a new L0 delta file. This L0 file will not be tracked
4135 : // in layer map immediately. The caller is responsible to put it into the layer map.
4136 1108 : async fn create_delta_layer(
4137 1108 : self: &Arc<Self>,
4138 1108 : frozen_layer: &Arc<InMemoryLayer>,
4139 1108 : key_range: Option<Range<Key>>,
4140 1108 : ctx: &RequestContext,
4141 1108 : ) -> anyhow::Result<Option<ResidentLayer>> {
4142 1108 : let self_clone = Arc::clone(self);
4143 1108 : let frozen_layer = Arc::clone(frozen_layer);
4144 1108 : let ctx = ctx.attached_child();
4145 1108 : let work = async move {
4146 1108 : let Some(new_delta) = frozen_layer
4147 1108 : .write_to_disk(&self_clone, &ctx, key_range)
4148 85742 : .await?
4149 : else {
4150 140 : return Ok(None);
4151 : };
4152 : // The write_to_disk() above calls writer.finish() which already did the fsync of the inodes.
4153 : // We just need to fsync the directory in which these inodes are linked,
4154 : // which we know to be the timeline directory.
4155 : //
4156 : // We use fatal_err() below because the after write_to_disk returns with success,
4157 : // the in-memory state of the filesystem already has the layer file in its final place,
4158 : // and subsequent pageserver code could think it's durable while it really isn't.
4159 968 : let timeline_dir = VirtualFile::open(
4160 968 : &self_clone
4161 968 : .conf
4162 968 : .timeline_path(&self_clone.tenant_shard_id, &self_clone.timeline_id),
4163 968 : &ctx,
4164 968 : )
4165 487 : .await
4166 968 : .fatal_err("VirtualFile::open for timeline dir fsync");
4167 968 : timeline_dir
4168 968 : .sync_all()
4169 484 : .await
4170 968 : .fatal_err("VirtualFile::sync_all timeline dir");
4171 968 : anyhow::Ok(Some(new_delta))
4172 1108 : };
4173 : // Before tokio-epoll-uring, we ran write_to_disk & the sync_all inside spawn_blocking.
4174 : // Preserve that behavior to maintain the same behavior for `virtual_file_io_engine=std-fs`.
4175 : use crate::virtual_file::io_engine::IoEngine;
4176 1108 : match crate::virtual_file::io_engine::get() {
4177 0 : IoEngine::NotSet => panic!("io engine not set"),
4178 : IoEngine::StdFs => {
4179 554 : let span = tracing::info_span!("blocking");
4180 554 : tokio::task::spawn_blocking({
4181 554 : move || Handle::current().block_on(work.instrument(span))
4182 554 : })
4183 554 : .await
4184 554 : .context("spawn_blocking")
4185 554 : .and_then(|x| x)
4186 : }
4187 : #[cfg(target_os = "linux")]
4188 54627 : IoEngine::TokioEpollUring => work.await,
4189 : }
4190 1108 : }
4191 :
4192 504 : async fn repartition(
4193 504 : &self,
4194 504 : lsn: Lsn,
4195 504 : partition_size: u64,
4196 504 : flags: EnumSet<CompactFlags>,
4197 504 : ctx: &RequestContext,
4198 504 : ) -> anyhow::Result<((KeyPartitioning, SparseKeyPartitioning), Lsn)> {
4199 504 : let Ok(mut partitioning_guard) = self.partitioning.try_lock() else {
4200 : // NB: there are two callers, one is the compaction task, of which there is only one per struct Tenant and hence Timeline.
4201 : // The other is the initdb optimization in flush_frozen_layer, used by `boostrap_timeline`, which runs before `.activate()`
4202 : // and hence before the compaction task starts.
4203 0 : anyhow::bail!("repartition() called concurrently, this should not happen");
4204 : };
4205 504 : let ((dense_partition, sparse_partition), partition_lsn) = &*partitioning_guard;
4206 504 : if lsn < *partition_lsn {
4207 0 : anyhow::bail!("repartition() called with LSN going backwards, this should not happen");
4208 504 : }
4209 504 :
4210 504 : let distance = lsn.0 - partition_lsn.0;
4211 504 : if *partition_lsn != Lsn(0)
4212 262 : && distance <= self.repartition_threshold
4213 262 : && !flags.contains(CompactFlags::ForceRepartition)
4214 : {
4215 248 : debug!(
4216 : distance,
4217 : threshold = self.repartition_threshold,
4218 0 : "no repartitioning needed"
4219 : );
4220 248 : return Ok((
4221 248 : (dense_partition.clone(), sparse_partition.clone()),
4222 248 : *partition_lsn,
4223 248 : ));
4224 256 : }
4225 :
4226 14054 : let (dense_ks, sparse_ks) = self.collect_keyspace(lsn, ctx).await?;
4227 256 : let dense_partitioning = dense_ks.partition(&self.shard_identity, partition_size);
4228 256 : let sparse_partitioning = SparseKeyPartitioning {
4229 256 : parts: vec![sparse_ks],
4230 256 : }; // no partitioning for metadata keys for now
4231 256 : *partitioning_guard = ((dense_partitioning, sparse_partitioning), lsn);
4232 256 :
4233 256 : Ok((partitioning_guard.0.clone(), partitioning_guard.1))
4234 504 : }
4235 :
4236 : // Is it time to create a new image layer for the given partition?
4237 14 : async fn time_for_new_image_layer(&self, partition: &KeySpace, lsn: Lsn) -> bool {
4238 14 : let threshold = self.get_image_creation_threshold();
4239 :
4240 14 : let guard = self.layers.read().await;
4241 14 : let layers = guard.layer_map();
4242 14 :
4243 14 : let mut max_deltas = 0;
4244 28 : for part_range in &partition.ranges {
4245 14 : let image_coverage = layers.image_coverage(part_range, lsn);
4246 28 : for (img_range, last_img) in image_coverage {
4247 14 : let img_lsn = if let Some(last_img) = last_img {
4248 0 : last_img.get_lsn_range().end
4249 : } else {
4250 14 : Lsn(0)
4251 : };
4252 : // Let's consider an example:
4253 : //
4254 : // delta layer with LSN range 71-81
4255 : // delta layer with LSN range 81-91
4256 : // delta layer with LSN range 91-101
4257 : // image layer at LSN 100
4258 : //
4259 : // If 'lsn' is still 100, i.e. no new WAL has been processed since the last image layer,
4260 : // there's no need to create a new one. We check this case explicitly, to avoid passing
4261 : // a bogus range to count_deltas below, with start > end. It's even possible that there
4262 : // are some delta layers *later* than current 'lsn', if more WAL was processed and flushed
4263 : // after we read last_record_lsn, which is passed here in the 'lsn' argument.
4264 14 : if img_lsn < lsn {
4265 14 : let num_deltas =
4266 14 : layers.count_deltas(&img_range, &(img_lsn..lsn), Some(threshold));
4267 14 :
4268 14 : max_deltas = max_deltas.max(num_deltas);
4269 14 : if num_deltas >= threshold {
4270 0 : debug!(
4271 0 : "key range {}-{}, has {} deltas on this timeline in LSN range {}..{}",
4272 : img_range.start, img_range.end, num_deltas, img_lsn, lsn
4273 : );
4274 0 : return true;
4275 14 : }
4276 0 : }
4277 : }
4278 : }
4279 :
4280 14 : debug!(
4281 : max_deltas,
4282 0 : "none of the partitioned ranges had >= {threshold} deltas"
4283 : );
4284 14 : false
4285 14 : }
4286 :
4287 : /// Create image layers for Postgres data. Assumes the caller passes a partition that is not too large,
4288 : /// so that at most one image layer will be produced from this function.
4289 168 : async fn create_image_layer_for_rel_blocks(
4290 168 : self: &Arc<Self>,
4291 168 : partition: &KeySpace,
4292 168 : mut image_layer_writer: ImageLayerWriter,
4293 168 : lsn: Lsn,
4294 168 : ctx: &RequestContext,
4295 168 : img_range: Range<Key>,
4296 168 : start: Key,
4297 168 : ) -> Result<ImageLayerCreationOutcome, CreateImageLayersError> {
4298 168 : let mut wrote_keys = false;
4299 168 :
4300 168 : let mut key_request_accum = KeySpaceAccum::new();
4301 1096 : for range in &partition.ranges {
4302 928 : let mut key = range.start;
4303 2158 : while key < range.end {
4304 : // Decide whether to retain this key: usually we do, but sharded tenants may
4305 : // need to drop keys that don't belong to them. If we retain the key, add it
4306 : // to `key_request_accum` for later issuing a vectored get
4307 1230 : if self.shard_identity.is_key_disposable(&key) {
4308 0 : debug!(
4309 0 : "Dropping key {} during compaction (it belongs on shard {:?})",
4310 0 : key,
4311 0 : self.shard_identity.get_shard_number(&key)
4312 : );
4313 1230 : } else {
4314 1230 : key_request_accum.add_key(key);
4315 1230 : }
4316 :
4317 1230 : let last_key_in_range = key.next() == range.end;
4318 1230 : key = key.next();
4319 1230 :
4320 1230 : // Maybe flush `key_rest_accum`
4321 1230 : if key_request_accum.raw_size() >= Timeline::MAX_GET_VECTORED_KEYS
4322 1230 : || (last_key_in_range && key_request_accum.raw_size() > 0)
4323 : {
4324 928 : let results = self
4325 928 : .get_vectored(key_request_accum.consume_keyspace(), lsn, ctx)
4326 8 : .await?;
4327 :
4328 2158 : for (img_key, img) in results {
4329 1230 : let img = match img {
4330 1230 : Ok(img) => img,
4331 0 : Err(err) => {
4332 0 : // If we fail to reconstruct a VM or FSM page, we can zero the
4333 0 : // page without losing any actual user data. That seems better
4334 0 : // than failing repeatedly and getting stuck.
4335 0 : //
4336 0 : // We had a bug at one point, where we truncated the FSM and VM
4337 0 : // in the pageserver, but the Postgres didn't know about that
4338 0 : // and continued to generate incremental WAL records for pages
4339 0 : // that didn't exist in the pageserver. Trying to replay those
4340 0 : // WAL records failed to find the previous image of the page.
4341 0 : // This special case allows us to recover from that situation.
4342 0 : // See https://github.com/neondatabase/neon/issues/2601.
4343 0 : //
4344 0 : // Unfortunately we cannot do this for the main fork, or for
4345 0 : // any metadata keys, keys, as that would lead to actual data
4346 0 : // loss.
4347 0 : if img_key.is_rel_fsm_block_key() || img_key.is_rel_vm_block_key() {
4348 0 : warn!("could not reconstruct FSM or VM key {img_key}, filling with zeros: {err:?}");
4349 0 : ZERO_PAGE.clone()
4350 : } else {
4351 0 : return Err(CreateImageLayersError::PageReconstructError(err));
4352 : }
4353 : }
4354 : };
4355 :
4356 : // Write all the keys we just read into our new image layer.
4357 1301 : image_layer_writer.put_image(img_key, img, ctx).await?;
4358 1230 : wrote_keys = true;
4359 : }
4360 302 : }
4361 : }
4362 : }
4363 :
4364 168 : if wrote_keys {
4365 : // Normal path: we have written some data into the new image layer for this
4366 : // partition, so flush it to disk.
4367 336 : let image_layer = image_layer_writer.finish(self, ctx).await?;
4368 168 : Ok(ImageLayerCreationOutcome {
4369 168 : image: Some(image_layer),
4370 168 : next_start_key: img_range.end,
4371 168 : })
4372 : } else {
4373 : // Special case: the image layer may be empty if this is a sharded tenant and the
4374 : // partition does not cover any keys owned by this shard. In this case, to ensure
4375 : // we don't leave gaps between image layers, leave `start` where it is, so that the next
4376 : // layer we write will cover the key range that we just scanned.
4377 0 : tracing::debug!("no data in range {}-{}", img_range.start, img_range.end);
4378 0 : Ok(ImageLayerCreationOutcome {
4379 0 : image: None,
4380 0 : next_start_key: start,
4381 0 : })
4382 : }
4383 168 : }
4384 :
4385 : /// Create an image layer for metadata keys. This function produces one image layer for all metadata
4386 : /// keys for now. Because metadata keys cannot exceed basebackup size limit, the image layer for it
4387 : /// would not be too large to fit in a single image layer.
4388 : #[allow(clippy::too_many_arguments)]
4389 16 : async fn create_image_layer_for_metadata_keys(
4390 16 : self: &Arc<Self>,
4391 16 : partition: &KeySpace,
4392 16 : mut image_layer_writer: ImageLayerWriter,
4393 16 : lsn: Lsn,
4394 16 : ctx: &RequestContext,
4395 16 : img_range: Range<Key>,
4396 16 : mode: ImageLayerCreationMode,
4397 16 : start: Key,
4398 16 : ) -> Result<ImageLayerCreationOutcome, CreateImageLayersError> {
4399 16 : assert!(!matches!(mode, ImageLayerCreationMode::Initial));
4400 :
4401 : // Metadata keys image layer creation.
4402 16 : let mut reconstruct_state = ValuesReconstructState::default();
4403 16 : let data = self
4404 16 : .get_vectored_impl(partition.clone(), lsn, &mut reconstruct_state, ctx)
4405 4066 : .await?;
4406 16 : let (data, total_kb_retrieved, total_keys_retrieved) = {
4407 16 : let mut new_data = BTreeMap::new();
4408 16 : let mut total_kb_retrieved = 0;
4409 16 : let mut total_keys_retrieved = 0;
4410 10028 : for (k, v) in data {
4411 10012 : let v = v.map_err(CreateImageLayersError::PageReconstructError)?;
4412 10012 : total_kb_retrieved += KEY_SIZE + v.len();
4413 10012 : total_keys_retrieved += 1;
4414 10012 : new_data.insert(k, v);
4415 : }
4416 16 : (new_data, total_kb_retrieved / 1024, total_keys_retrieved)
4417 16 : };
4418 16 : let delta_files_accessed = reconstruct_state.get_delta_layers_visited();
4419 16 :
4420 16 : let trigger_generation = delta_files_accessed as usize >= MAX_AUX_FILE_V2_DELTAS;
4421 16 : debug!(
4422 : trigger_generation,
4423 : delta_files_accessed,
4424 : total_kb_retrieved,
4425 : total_keys_retrieved,
4426 0 : "generate metadata images"
4427 : );
4428 :
4429 16 : if !trigger_generation && mode == ImageLayerCreationMode::Try {
4430 2 : return Ok(ImageLayerCreationOutcome {
4431 2 : image: None,
4432 2 : next_start_key: img_range.end,
4433 2 : });
4434 14 : }
4435 14 : let mut wrote_any_image = false;
4436 10026 : for (k, v) in data {
4437 10012 : if v.is_empty() {
4438 : // the key has been deleted, it does not need an image
4439 : // in metadata keyspace, an empty image == tombstone
4440 8 : continue;
4441 10004 : }
4442 10004 : wrote_any_image = true;
4443 10004 :
4444 10004 : // No need to handle sharding b/c metadata keys are always on the 0-th shard.
4445 10004 :
4446 10004 : // TODO: split image layers to avoid too large layer files. Too large image files are not handled
4447 10004 : // on the normal data path either.
4448 10161 : image_layer_writer.put_image(k, v, ctx).await?;
4449 : }
4450 :
4451 14 : if wrote_any_image {
4452 : // Normal path: we have written some data into the new image layer for this
4453 : // partition, so flush it to disk.
4454 24 : let image_layer = image_layer_writer.finish(self, ctx).await?;
4455 12 : Ok(ImageLayerCreationOutcome {
4456 12 : image: Some(image_layer),
4457 12 : next_start_key: img_range.end,
4458 12 : })
4459 : } else {
4460 : // Special case: the image layer may be empty if this is a sharded tenant and the
4461 : // partition does not cover any keys owned by this shard. In this case, to ensure
4462 : // we don't leave gaps between image layers, leave `start` where it is, so that the next
4463 : // layer we write will cover the key range that we just scanned.
4464 2 : tracing::debug!("no data in range {}-{}", img_range.start, img_range.end);
4465 2 : Ok(ImageLayerCreationOutcome {
4466 2 : image: None,
4467 2 : next_start_key: start,
4468 2 : })
4469 : }
4470 16 : }
4471 :
4472 1008 : #[tracing::instrument(skip_all, fields(%lsn, %mode))]
4473 : async fn create_image_layers(
4474 : self: &Arc<Timeline>,
4475 : partitioning: &KeyPartitioning,
4476 : lsn: Lsn,
4477 : mode: ImageLayerCreationMode,
4478 : ctx: &RequestContext,
4479 : ) -> Result<Vec<ResidentLayer>, CreateImageLayersError> {
4480 : let timer = self.metrics.create_images_time_histo.start_timer();
4481 : let mut image_layers = Vec::new();
4482 :
4483 : // We need to avoid holes between generated image layers.
4484 : // Otherwise LayerMap::image_layer_exists will return false if key range of some layer is covered by more than one
4485 : // image layer with hole between them. In this case such layer can not be utilized by GC.
4486 : //
4487 : // How such hole between partitions can appear?
4488 : // if we have relation with relid=1 and size 100 and relation with relid=2 with size 200 then result of
4489 : // KeySpace::partition may contain partitions <100000000..100000099> and <200000000..200000199>.
4490 : // If there is delta layer <100000000..300000000> then it never be garbage collected because
4491 : // image layers <100000000..100000099> and <200000000..200000199> are not completely covering it.
4492 : let mut start = Key::MIN;
4493 :
4494 : let check_for_image_layers = {
4495 : let last_checks_at = self.last_image_layer_creation_check_at.load();
4496 : let distance = lsn
4497 : .checked_sub(last_checks_at)
4498 : .expect("Attempt to compact with LSN going backwards");
4499 : let min_distance = self.get_image_layer_creation_check_threshold() as u64
4500 : * self.get_checkpoint_distance();
4501 :
4502 : // Skip the expensive delta layer counting if this timeline has not ingested sufficient
4503 : // WAL since the last check.
4504 : distance.0 >= min_distance
4505 : };
4506 :
4507 : if check_for_image_layers {
4508 : self.last_image_layer_creation_check_at.store(lsn);
4509 : }
4510 :
4511 : for partition in partitioning.parts.iter() {
4512 : let img_range = start..partition.ranges.last().unwrap().end;
4513 : let compact_metadata = partition.overlaps(&Key::metadata_key_range());
4514 : if compact_metadata {
4515 : for range in &partition.ranges {
4516 : assert!(
4517 : range.start.field1 >= METADATA_KEY_BEGIN_PREFIX
4518 : && range.end.field1 <= METADATA_KEY_END_PREFIX,
4519 : "metadata keys must be partitioned separately"
4520 : );
4521 : }
4522 : if mode == ImageLayerCreationMode::Initial {
4523 : return Err(CreateImageLayersError::Other(anyhow::anyhow!("no image layer should be created for metadata keys when flushing frozen layers")));
4524 : }
4525 : if mode == ImageLayerCreationMode::Try && !check_for_image_layers {
4526 : // Skip compaction if there are not enough updates. Metadata compaction will do a scan and
4527 : // might mess up with evictions.
4528 : start = img_range.end;
4529 : continue;
4530 : }
4531 : } else if let ImageLayerCreationMode::Try = mode {
4532 : // check_for_image_layers = false -> skip
4533 : // check_for_image_layers = true -> check time_for_new_image_layer -> skip/generate
4534 : if !check_for_image_layers || !self.time_for_new_image_layer(partition, lsn).await {
4535 : start = img_range.end;
4536 : continue;
4537 : }
4538 : }
4539 :
4540 : let image_layer_writer = ImageLayerWriter::new(
4541 : self.conf,
4542 : self.timeline_id,
4543 : self.tenant_shard_id,
4544 : &img_range,
4545 : lsn,
4546 : ctx,
4547 : )
4548 : .await?;
4549 :
4550 0 : fail_point!("image-layer-writer-fail-before-finish", |_| {
4551 0 : Err(CreateImageLayersError::Other(anyhow::anyhow!(
4552 0 : "failpoint image-layer-writer-fail-before-finish"
4553 0 : )))
4554 0 : });
4555 :
4556 : if !compact_metadata {
4557 : let ImageLayerCreationOutcome {
4558 : image,
4559 : next_start_key,
4560 : } = self
4561 : .create_image_layer_for_rel_blocks(
4562 : partition,
4563 : image_layer_writer,
4564 : lsn,
4565 : ctx,
4566 : img_range,
4567 : start,
4568 : )
4569 : .await?;
4570 :
4571 : start = next_start_key;
4572 : image_layers.extend(image);
4573 : } else {
4574 : let ImageLayerCreationOutcome {
4575 : image,
4576 : next_start_key,
4577 : } = self
4578 : .create_image_layer_for_metadata_keys(
4579 : partition,
4580 : image_layer_writer,
4581 : lsn,
4582 : ctx,
4583 : img_range,
4584 : mode,
4585 : start,
4586 : )
4587 : .await?;
4588 : start = next_start_key;
4589 : image_layers.extend(image);
4590 : }
4591 : }
4592 :
4593 : // The writer.finish() above already did the fsync of the inodes.
4594 : // We just need to fsync the directory in which these inodes are linked,
4595 : // which we know to be the timeline directory.
4596 : if !image_layers.is_empty() {
4597 : // We use fatal_err() below because the after writer.finish() returns with success,
4598 : // the in-memory state of the filesystem already has the layer file in its final place,
4599 : // and subsequent pageserver code could think it's durable while it really isn't.
4600 : let timeline_dir = VirtualFile::open(
4601 : &self
4602 : .conf
4603 : .timeline_path(&self.tenant_shard_id, &self.timeline_id),
4604 : ctx,
4605 : )
4606 : .await
4607 : .fatal_err("VirtualFile::open for timeline dir fsync");
4608 : timeline_dir
4609 : .sync_all()
4610 : .await
4611 : .fatal_err("VirtualFile::sync_all timeline dir");
4612 : }
4613 :
4614 : let mut guard = self.layers.write().await;
4615 :
4616 : // FIXME: we could add the images to be uploaded *before* returning from here, but right
4617 : // now they are being scheduled outside of write lock
4618 : guard.track_new_image_layers(&image_layers, &self.metrics);
4619 : drop_wlock(guard);
4620 : timer.stop_and_record();
4621 :
4622 : Ok(image_layers)
4623 : }
4624 :
4625 : /// Wait until the background initial logical size calculation is complete, or
4626 : /// this Timeline is shut down. Calling this function will cause the initial
4627 : /// logical size calculation to skip waiting for the background jobs barrier.
4628 0 : pub(crate) async fn await_initial_logical_size(self: Arc<Self>) {
4629 0 : if !self.shard_identity.is_shard_zero() {
4630 : // We don't populate logical size on shard >0: skip waiting for it.
4631 0 : return;
4632 0 : }
4633 0 :
4634 0 : if self.remote_client.is_deleting() {
4635 : // The timeline was created in a deletion-resume state, we don't expect logical size to be populated
4636 0 : return;
4637 0 : }
4638 :
4639 0 : if let Some(await_bg_cancel) = self
4640 0 : .current_logical_size
4641 0 : .cancel_wait_for_background_loop_concurrency_limit_semaphore
4642 0 : .get()
4643 0 : {
4644 0 : await_bg_cancel.cancel();
4645 0 : } else {
4646 : // We should not wait if we were not able to explicitly instruct
4647 : // the logical size cancellation to skip the concurrency limit semaphore.
4648 : // TODO: this is an unexpected case. We should restructure so that it
4649 : // can't happen.
4650 0 : tracing::warn!(
4651 0 : "await_initial_logical_size: can't get semaphore cancel token, skipping"
4652 : );
4653 0 : debug_assert!(false);
4654 : }
4655 :
4656 : tokio::select!(
4657 : _ = self.current_logical_size.initialized.acquire() => {},
4658 : _ = self.cancel.cancelled() => {}
4659 : )
4660 0 : }
4661 :
4662 : /// Detach this timeline from its ancestor by copying all of ancestors layers as this
4663 : /// Timelines layers up to the ancestor_lsn.
4664 : ///
4665 : /// Requires a timeline that:
4666 : /// - has an ancestor to detach from
4667 : /// - the ancestor does not have an ancestor -- follows from the original RFC limitations, not
4668 : /// a technical requirement
4669 : ///
4670 : /// After the operation has been started, it cannot be canceled. Upon restart it needs to be
4671 : /// polled again until completion.
4672 : ///
4673 : /// During the operation all timelines sharing the data with this timeline will be reparented
4674 : /// from our ancestor to be branches of this timeline.
4675 0 : pub(crate) async fn prepare_to_detach_from_ancestor(
4676 0 : self: &Arc<Timeline>,
4677 0 : tenant: &crate::tenant::Tenant,
4678 0 : options: detach_ancestor::Options,
4679 0 : ctx: &RequestContext,
4680 0 : ) -> Result<
4681 0 : (
4682 0 : completion::Completion,
4683 0 : detach_ancestor::PreparedTimelineDetach,
4684 0 : ),
4685 0 : detach_ancestor::Error,
4686 0 : > {
4687 0 : detach_ancestor::prepare(self, tenant, options, ctx).await
4688 0 : }
4689 :
4690 : /// Completes the ancestor detach. This method is to be called while holding the
4691 : /// TenantManager's tenant slot, so during this method we cannot be deleted nor can any
4692 : /// timeline be deleted. After this method returns successfully, tenant must be reloaded.
4693 : ///
4694 : /// Pageserver receiving a SIGKILL during this operation is not supported (yet).
4695 0 : pub(crate) async fn complete_detaching_timeline_ancestor(
4696 0 : self: &Arc<Timeline>,
4697 0 : tenant: &crate::tenant::Tenant,
4698 0 : prepared: detach_ancestor::PreparedTimelineDetach,
4699 0 : ctx: &RequestContext,
4700 0 : ) -> Result<Vec<TimelineId>, anyhow::Error> {
4701 0 : detach_ancestor::complete(self, tenant, prepared, ctx).await
4702 0 : }
4703 :
4704 : /// Switch aux file policy and schedule upload to the index part.
4705 16 : pub(crate) fn do_switch_aux_policy(&self, policy: AuxFilePolicy) -> anyhow::Result<()> {
4706 16 : self.last_aux_file_policy.store(Some(policy));
4707 16 : self.remote_client
4708 16 : .schedule_index_upload_for_aux_file_policy_update(Some(policy))?;
4709 16 : Ok(())
4710 16 : }
4711 : }
4712 :
4713 : /// Top-level failure to compact.
4714 0 : #[derive(Debug, thiserror::Error)]
4715 : pub(crate) enum CompactionError {
4716 : #[error("The timeline or pageserver is shutting down")]
4717 : ShuttingDown,
4718 : /// Compaction cannot be done right now; page reconstruction and so on.
4719 : #[error(transparent)]
4720 : Other(#[from] anyhow::Error),
4721 : }
4722 :
4723 : impl From<CollectKeySpaceError> for CompactionError {
4724 0 : fn from(err: CollectKeySpaceError) -> Self {
4725 0 : match err {
4726 : CollectKeySpaceError::Cancelled
4727 : | CollectKeySpaceError::PageRead(PageReconstructError::Cancelled) => {
4728 0 : CompactionError::ShuttingDown
4729 : }
4730 0 : e => CompactionError::Other(e.into()),
4731 : }
4732 0 : }
4733 : }
4734 :
4735 : #[serde_as]
4736 196 : #[derive(serde::Serialize)]
4737 : struct RecordedDuration(#[serde_as(as = "serde_with::DurationMicroSeconds")] Duration);
4738 :
4739 : #[derive(Default)]
4740 : enum DurationRecorder {
4741 : #[default]
4742 : NotStarted,
4743 : Recorded(RecordedDuration, tokio::time::Instant),
4744 : }
4745 :
4746 : impl DurationRecorder {
4747 504 : fn till_now(&self) -> DurationRecorder {
4748 504 : match self {
4749 : DurationRecorder::NotStarted => {
4750 0 : panic!("must only call on recorded measurements")
4751 : }
4752 504 : DurationRecorder::Recorded(_, ended) => {
4753 504 : let now = tokio::time::Instant::now();
4754 504 : DurationRecorder::Recorded(RecordedDuration(now - *ended), now)
4755 504 : }
4756 504 : }
4757 504 : }
4758 196 : fn into_recorded(self) -> Option<RecordedDuration> {
4759 196 : match self {
4760 0 : DurationRecorder::NotStarted => None,
4761 196 : DurationRecorder::Recorded(recorded, _) => Some(recorded),
4762 : }
4763 196 : }
4764 : }
4765 :
4766 : impl Timeline {
4767 28 : async fn finish_compact_batch(
4768 28 : self: &Arc<Self>,
4769 28 : new_deltas: &[ResidentLayer],
4770 28 : new_images: &[ResidentLayer],
4771 28 : layers_to_remove: &[Layer],
4772 28 : ) -> anyhow::Result<()> {
4773 28 : let mut guard = self.layers.write().await;
4774 :
4775 28 : let mut duplicated_layers = HashSet::new();
4776 28 :
4777 28 : let mut insert_layers = Vec::with_capacity(new_deltas.len());
4778 :
4779 336 : for l in new_deltas {
4780 308 : if guard.contains(l.as_ref()) {
4781 : // expected in tests
4782 0 : tracing::error!(layer=%l, "duplicated L1 layer");
4783 :
4784 : // good ways to cause a duplicate: we repeatedly error after taking the writelock
4785 : // `guard` on self.layers. as of writing this, there are no error returns except
4786 : // for compact_level0_phase1 creating an L0, which does not happen in practice
4787 : // because we have not implemented L0 => L0 compaction.
4788 0 : duplicated_layers.insert(l.layer_desc().key());
4789 308 : } else if LayerMap::is_l0(l.layer_desc()) {
4790 0 : bail!("compaction generates a L0 layer file as output, which will cause infinite compaction.");
4791 308 : } else {
4792 308 : insert_layers.push(l.clone());
4793 308 : }
4794 : }
4795 :
4796 : // only remove those inputs which were not outputs
4797 28 : let remove_layers: Vec<Layer> = layers_to_remove
4798 28 : .iter()
4799 402 : .filter(|l| !duplicated_layers.contains(&l.layer_desc().key()))
4800 28 : .cloned()
4801 28 : .collect();
4802 28 :
4803 28 : if !new_images.is_empty() {
4804 0 : guard.track_new_image_layers(new_images, &self.metrics);
4805 28 : }
4806 :
4807 : // deletion will happen later, the layer file manager calls garbage_collect_on_drop
4808 28 : guard.finish_compact_l0(&remove_layers, &insert_layers, &self.metrics);
4809 28 :
4810 28 : self.remote_client
4811 28 : .schedule_compaction_update(&remove_layers, new_deltas)?;
4812 :
4813 28 : drop_wlock(guard);
4814 28 :
4815 28 : Ok(())
4816 28 : }
4817 :
4818 0 : async fn rewrite_layers(
4819 0 : self: &Arc<Self>,
4820 0 : mut replace_layers: Vec<(Layer, ResidentLayer)>,
4821 0 : mut drop_layers: Vec<Layer>,
4822 0 : ) -> anyhow::Result<()> {
4823 0 : let mut guard = self.layers.write().await;
4824 :
4825 : // Trim our lists in case our caller (compaction) raced with someone else (GC) removing layers: we want
4826 : // to avoid double-removing, and avoid rewriting something that was removed.
4827 0 : replace_layers.retain(|(l, _)| guard.contains(l));
4828 0 : drop_layers.retain(|l| guard.contains(l));
4829 0 :
4830 0 : guard.rewrite_layers(&replace_layers, &drop_layers, &self.metrics);
4831 0 :
4832 0 : let upload_layers: Vec<_> = replace_layers.into_iter().map(|r| r.1).collect();
4833 0 :
4834 0 : self.remote_client
4835 0 : .schedule_compaction_update(&drop_layers, &upload_layers)?;
4836 :
4837 0 : Ok(())
4838 0 : }
4839 :
4840 : /// Schedules the uploads of the given image layers
4841 364 : fn upload_new_image_layers(
4842 364 : self: &Arc<Self>,
4843 364 : new_images: impl IntoIterator<Item = ResidentLayer>,
4844 364 : ) -> anyhow::Result<()> {
4845 390 : for layer in new_images {
4846 26 : self.remote_client.schedule_layer_file_upload(layer)?;
4847 : }
4848 : // should any new image layer been created, not uploading index_part will
4849 : // result in a mismatch between remote_physical_size and layermap calculated
4850 : // size, which will fail some tests, but should not be an issue otherwise.
4851 364 : self.remote_client
4852 364 : .schedule_index_upload_for_file_changes()?;
4853 364 : Ok(())
4854 364 : }
4855 :
4856 : /// Find the Lsns above which layer files need to be retained on
4857 : /// garbage collection. This is separate from actually performing the GC,
4858 : /// and is updated more frequently, so that compaction can remove obsolete
4859 : /// page versions more aggressively.
4860 : ///
4861 : /// TODO: that's wishful thinking, compaction doesn't actually do that
4862 : /// currently.
4863 : ///
4864 : /// The 'cutoff_horizon' point is used to retain recent versions that might still be
4865 : /// needed by read-only nodes. (As of this writing, the caller just passes
4866 : /// the latest LSN subtracted by a constant, and doesn't do anything smart
4867 : /// to figure out what read-only nodes might actually need.)
4868 : ///
4869 : /// The 'pitr' duration is used to calculate a 'pitr_cutoff', which can be used to determine
4870 : /// whether a record is needed for PITR.
4871 1508 : #[instrument(skip_all, fields(timeline_id=%self.timeline_id))]
4872 : pub(super) async fn find_gc_cutoffs(
4873 : &self,
4874 : cutoff_horizon: Lsn,
4875 : pitr: Duration,
4876 : cancel: &CancellationToken,
4877 : ctx: &RequestContext,
4878 : ) -> Result<GcCutoffs, PageReconstructError> {
4879 : let _timer = self
4880 : .metrics
4881 : .find_gc_cutoffs_histo
4882 : .start_timer()
4883 : .record_on_drop();
4884 :
4885 : pausable_failpoint!("Timeline::find_gc_cutoffs-pausable");
4886 :
4887 : // First, calculate pitr_cutoff_timestamp and then convert it to LSN.
4888 : //
4889 : // Some unit tests depend on garbage-collection working even when
4890 : // CLOG data is missing, so that find_lsn_for_timestamp() doesn't
4891 : // work, so avoid calling it altogether if time-based retention is not
4892 : // configured. It would be pointless anyway.
4893 : let pitr_cutoff = if pitr != Duration::ZERO {
4894 : let now = SystemTime::now();
4895 : if let Some(pitr_cutoff_timestamp) = now.checked_sub(pitr) {
4896 : let pitr_timestamp = to_pg_timestamp(pitr_cutoff_timestamp);
4897 :
4898 : match self
4899 : .find_lsn_for_timestamp(pitr_timestamp, cancel, ctx)
4900 : .await?
4901 : {
4902 : LsnForTimestamp::Present(lsn) => lsn,
4903 : LsnForTimestamp::Future(lsn) => {
4904 : // The timestamp is in the future. That sounds impossible,
4905 : // but what it really means is that there hasn't been
4906 : // any commits since the cutoff timestamp.
4907 : //
4908 : // In this case we should use the LSN of the most recent commit,
4909 : // which is implicitly the last LSN in the log.
4910 : debug!("future({})", lsn);
4911 : self.get_last_record_lsn()
4912 : }
4913 : LsnForTimestamp::Past(lsn) => {
4914 : debug!("past({})", lsn);
4915 : // conservative, safe default is to remove nothing, when we
4916 : // have no commit timestamp data available
4917 : *self.get_latest_gc_cutoff_lsn()
4918 : }
4919 : LsnForTimestamp::NoData(lsn) => {
4920 : debug!("nodata({})", lsn);
4921 : // conservative, safe default is to remove nothing, when we
4922 : // have no commit timestamp data available
4923 : *self.get_latest_gc_cutoff_lsn()
4924 : }
4925 : }
4926 : } else {
4927 : // If we don't have enough data to convert to LSN,
4928 : // play safe and don't remove any layers.
4929 : *self.get_latest_gc_cutoff_lsn()
4930 : }
4931 : } else {
4932 : // No time-based retention was configured. Interpret this as "keep no history".
4933 : self.get_last_record_lsn()
4934 : };
4935 :
4936 : Ok(GcCutoffs {
4937 : horizon: cutoff_horizon,
4938 : pitr: pitr_cutoff,
4939 : })
4940 : }
4941 :
4942 : /// Garbage collect layer files on a timeline that are no longer needed.
4943 : ///
4944 : /// Currently, we don't make any attempt at removing unneeded page versions
4945 : /// within a layer file. We can only remove the whole file if it's fully
4946 : /// obsolete.
4947 754 : pub(super) async fn gc(&self) -> Result<GcResult, GcError> {
4948 : // this is most likely the background tasks, but it might be the spawned task from
4949 : // immediate_gc
4950 752 : let _g = tokio::select! {
4951 : guard = self.gc_lock.lock() => guard,
4952 : _ = self.cancel.cancelled() => return Ok(GcResult::default()),
4953 : };
4954 752 : let timer = self.metrics.garbage_collect_histo.start_timer();
4955 :
4956 : fail_point!("before-timeline-gc");
4957 :
4958 : // Is the timeline being deleted?
4959 752 : if self.is_stopping() {
4960 0 : return Err(GcError::TimelineCancelled);
4961 752 : }
4962 752 :
4963 752 : let (horizon_cutoff, pitr_cutoff, retain_lsns, max_lsn_with_valid_lease) = {
4964 752 : let gc_info = self.gc_info.read().unwrap();
4965 752 :
4966 752 : let horizon_cutoff = min(gc_info.cutoffs.horizon, self.get_disk_consistent_lsn());
4967 752 : let pitr_cutoff = gc_info.cutoffs.pitr;
4968 752 : let retain_lsns = gc_info.retain_lsns.clone();
4969 752 :
4970 752 : // Gets the maximum LSN that holds the valid lease.
4971 752 : //
4972 752 : // Caveat: `refresh_gc_info` is in charged of updating the lease map.
4973 752 : // Here, we do not check for stale leases again.
4974 752 : let max_lsn_with_valid_lease = gc_info.leases.last_key_value().map(|(lsn, _)| *lsn);
4975 752 :
4976 752 : (
4977 752 : horizon_cutoff,
4978 752 : pitr_cutoff,
4979 752 : retain_lsns,
4980 752 : max_lsn_with_valid_lease,
4981 752 : )
4982 752 : };
4983 752 :
4984 752 : let mut new_gc_cutoff = Lsn::min(horizon_cutoff, pitr_cutoff);
4985 752 : let standby_horizon = self.standby_horizon.load();
4986 752 : // Hold GC for the standby, but as a safety guard do it only within some
4987 752 : // reasonable lag.
4988 752 : if standby_horizon != Lsn::INVALID {
4989 0 : if let Some(standby_lag) = new_gc_cutoff.checked_sub(standby_horizon) {
4990 : const MAX_ALLOWED_STANDBY_LAG: u64 = 10u64 << 30; // 10 GB
4991 0 : if standby_lag.0 < MAX_ALLOWED_STANDBY_LAG {
4992 0 : new_gc_cutoff = Lsn::min(standby_horizon, new_gc_cutoff);
4993 0 : trace!("holding off GC for standby apply LSN {}", standby_horizon);
4994 : } else {
4995 0 : warn!(
4996 0 : "standby is lagging for more than {}MB, not holding gc for it",
4997 0 : MAX_ALLOWED_STANDBY_LAG / 1024 / 1024
4998 : )
4999 : }
5000 0 : }
5001 752 : }
5002 :
5003 : // Reset standby horizon to ignore it if it is not updated till next GC.
5004 : // It is an easy way to unset it when standby disappears without adding
5005 : // more conf options.
5006 752 : self.standby_horizon.store(Lsn::INVALID);
5007 752 : self.metrics
5008 752 : .standby_horizon_gauge
5009 752 : .set(Lsn::INVALID.0 as i64);
5010 :
5011 752 : let res = self
5012 752 : .gc_timeline(
5013 752 : horizon_cutoff,
5014 752 : pitr_cutoff,
5015 752 : retain_lsns,
5016 752 : max_lsn_with_valid_lease,
5017 752 : new_gc_cutoff,
5018 752 : )
5019 752 : .instrument(
5020 752 : info_span!("gc_timeline", timeline_id = %self.timeline_id, cutoff = %new_gc_cutoff),
5021 : )
5022 0 : .await?;
5023 :
5024 : // only record successes
5025 752 : timer.stop_and_record();
5026 752 :
5027 752 : Ok(res)
5028 754 : }
5029 :
5030 752 : async fn gc_timeline(
5031 752 : &self,
5032 752 : horizon_cutoff: Lsn,
5033 752 : pitr_cutoff: Lsn,
5034 752 : retain_lsns: Vec<Lsn>,
5035 752 : max_lsn_with_valid_lease: Option<Lsn>,
5036 752 : new_gc_cutoff: Lsn,
5037 752 : ) -> Result<GcResult, GcError> {
5038 752 : // FIXME: if there is an ongoing detach_from_ancestor, we should just skip gc
5039 752 :
5040 752 : let now = SystemTime::now();
5041 752 : let mut result: GcResult = GcResult::default();
5042 752 :
5043 752 : // Nothing to GC. Return early.
5044 752 : let latest_gc_cutoff = *self.get_latest_gc_cutoff_lsn();
5045 752 : if latest_gc_cutoff >= new_gc_cutoff {
5046 22 : info!(
5047 0 : "Nothing to GC: new_gc_cutoff_lsn {new_gc_cutoff}, latest_gc_cutoff_lsn {latest_gc_cutoff}",
5048 : );
5049 22 : return Ok(result);
5050 730 : }
5051 :
5052 : // We need to ensure that no one tries to read page versions or create
5053 : // branches at a point before latest_gc_cutoff_lsn. See branch_timeline()
5054 : // for details. This will block until the old value is no longer in use.
5055 : //
5056 : // The GC cutoff should only ever move forwards.
5057 730 : let waitlist = {
5058 730 : let write_guard = self.latest_gc_cutoff_lsn.lock_for_write();
5059 730 : if *write_guard > new_gc_cutoff {
5060 0 : return Err(GcError::BadLsn {
5061 0 : why: format!(
5062 0 : "Cannot move GC cutoff LSN backwards (was {}, new {})",
5063 0 : *write_guard, new_gc_cutoff
5064 0 : ),
5065 0 : });
5066 730 : }
5067 730 :
5068 730 : write_guard.store_and_unlock(new_gc_cutoff)
5069 730 : };
5070 730 : waitlist.wait().await;
5071 :
5072 730 : info!("GC starting");
5073 :
5074 730 : debug!("retain_lsns: {:?}", retain_lsns);
5075 :
5076 730 : let mut layers_to_remove = Vec::new();
5077 :
5078 : // Scan all layers in the timeline (remote or on-disk).
5079 : //
5080 : // Garbage collect the layer if all conditions are satisfied:
5081 : // 1. it is older than cutoff LSN;
5082 : // 2. it is older than PITR interval;
5083 : // 3. it doesn't need to be retained for 'retain_lsns';
5084 : // 4. it does not need to be kept for LSNs holding valid leases.
5085 : // 5. newer on-disk image layers cover the layer's whole key range
5086 : //
5087 : // TODO holding a write lock is too agressive and avoidable
5088 730 : let mut guard = self.layers.write().await;
5089 730 : let layers = guard.layer_map();
5090 12412 : 'outer: for l in layers.iter_historic_layers() {
5091 12412 : result.layers_total += 1;
5092 12412 :
5093 12412 : // 1. Is it newer than GC horizon cutoff point?
5094 12412 : if l.get_lsn_range().end > horizon_cutoff {
5095 740 : debug!(
5096 0 : "keeping {} because it's newer than horizon_cutoff {}",
5097 0 : l.layer_name(),
5098 : horizon_cutoff,
5099 : );
5100 740 : result.layers_needed_by_cutoff += 1;
5101 740 : continue 'outer;
5102 11672 : }
5103 11672 :
5104 11672 : // 2. It is newer than PiTR cutoff point?
5105 11672 : if l.get_lsn_range().end > pitr_cutoff {
5106 0 : debug!(
5107 0 : "keeping {} because it's newer than pitr_cutoff {}",
5108 0 : l.layer_name(),
5109 : pitr_cutoff,
5110 : );
5111 0 : result.layers_needed_by_pitr += 1;
5112 0 : continue 'outer;
5113 11672 : }
5114 :
5115 : // 3. Is it needed by a child branch?
5116 : // NOTE With that we would keep data that
5117 : // might be referenced by child branches forever.
5118 : // We can track this in child timeline GC and delete parent layers when
5119 : // they are no longer needed. This might be complicated with long inheritance chains.
5120 : //
5121 : // TODO Vec is not a great choice for `retain_lsns`
5122 11672 : for retain_lsn in &retain_lsns {
5123 : // start_lsn is inclusive
5124 8 : if &l.get_lsn_range().start <= retain_lsn {
5125 8 : debug!(
5126 0 : "keeping {} because it's still might be referenced by child branch forked at {} is_dropped: xx is_incremental: {}",
5127 0 : l.layer_name(),
5128 0 : retain_lsn,
5129 0 : l.is_incremental(),
5130 : );
5131 8 : result.layers_needed_by_branches += 1;
5132 8 : continue 'outer;
5133 0 : }
5134 : }
5135 :
5136 : // 4. Is there a valid lease that requires us to keep this layer?
5137 11664 : if let Some(lsn) = &max_lsn_with_valid_lease {
5138 : // keep if layer start <= any of the lease
5139 18 : if &l.get_lsn_range().start <= lsn {
5140 14 : debug!(
5141 0 : "keeping {} because there is a valid lease preventing GC at {}",
5142 0 : l.layer_name(),
5143 : lsn,
5144 : );
5145 14 : result.layers_needed_by_leases += 1;
5146 14 : continue 'outer;
5147 4 : }
5148 11646 : }
5149 :
5150 : // 5. Is there a later on-disk layer for this relation?
5151 : //
5152 : // The end-LSN is exclusive, while disk_consistent_lsn is
5153 : // inclusive. For example, if disk_consistent_lsn is 100, it is
5154 : // OK for a delta layer to have end LSN 101, but if the end LSN
5155 : // is 102, then it might not have been fully flushed to disk
5156 : // before crash.
5157 : //
5158 : // For example, imagine that the following layers exist:
5159 : //
5160 : // 1000 - image (A)
5161 : // 1000-2000 - delta (B)
5162 : // 2000 - image (C)
5163 : // 2000-3000 - delta (D)
5164 : // 3000 - image (E)
5165 : //
5166 : // If GC horizon is at 2500, we can remove layers A and B, but
5167 : // we cannot remove C, even though it's older than 2500, because
5168 : // the delta layer 2000-3000 depends on it.
5169 11650 : if !layers
5170 11650 : .image_layer_exists(&l.get_key_range(), &(l.get_lsn_range().end..new_gc_cutoff))
5171 : {
5172 11642 : debug!("keeping {} because it is the latest layer", l.layer_name());
5173 11642 : result.layers_not_updated += 1;
5174 11642 : continue 'outer;
5175 8 : }
5176 8 :
5177 8 : // We didn't find any reason to keep this file, so remove it.
5178 8 : debug!(
5179 0 : "garbage collecting {} is_dropped: xx is_incremental: {}",
5180 0 : l.layer_name(),
5181 0 : l.is_incremental(),
5182 : );
5183 8 : layers_to_remove.push(l);
5184 : }
5185 :
5186 730 : if !layers_to_remove.is_empty() {
5187 : // Persist the new GC cutoff value before we actually remove anything.
5188 : // This unconditionally schedules also an index_part.json update, even though, we will
5189 : // be doing one a bit later with the unlinked gc'd layers.
5190 6 : let disk_consistent_lsn = self.disk_consistent_lsn.load();
5191 6 : self.schedule_uploads(disk_consistent_lsn, None)
5192 6 : .map_err(|e| {
5193 0 : if self.cancel.is_cancelled() {
5194 0 : GcError::TimelineCancelled
5195 : } else {
5196 0 : GcError::Remote(e)
5197 : }
5198 6 : })?;
5199 :
5200 6 : let gc_layers = layers_to_remove
5201 6 : .iter()
5202 8 : .map(|x| guard.get_from_desc(x))
5203 6 : .collect::<Vec<Layer>>();
5204 6 :
5205 6 : result.layers_removed = gc_layers.len() as u64;
5206 6 :
5207 6 : self.remote_client
5208 6 : .schedule_gc_update(&gc_layers)
5209 6 : .map_err(|e| {
5210 0 : if self.cancel.is_cancelled() {
5211 0 : GcError::TimelineCancelled
5212 : } else {
5213 0 : GcError::Remote(e)
5214 : }
5215 6 : })?;
5216 :
5217 6 : guard.finish_gc_timeline(&gc_layers);
5218 6 :
5219 6 : #[cfg(feature = "testing")]
5220 6 : {
5221 6 : result.doomed_layers = gc_layers;
5222 6 : }
5223 724 : }
5224 :
5225 730 : info!(
5226 0 : "GC completed removing {} layers, cutoff {}",
5227 : result.layers_removed, new_gc_cutoff
5228 : );
5229 :
5230 730 : result.elapsed = now.elapsed().unwrap_or(Duration::ZERO);
5231 730 : Ok(result)
5232 752 : }
5233 :
5234 : /// Reconstruct a value, using the given base image and WAL records in 'data'.
5235 665996 : async fn reconstruct_value(
5236 665996 : &self,
5237 665996 : key: Key,
5238 665996 : request_lsn: Lsn,
5239 665996 : mut data: ValueReconstructState,
5240 665996 : ) -> Result<Bytes, PageReconstructError> {
5241 665996 : // Perform WAL redo if needed
5242 665996 : data.records.reverse();
5243 665996 :
5244 665996 : // If we have a page image, and no WAL, we're all set
5245 665996 : if data.records.is_empty() {
5246 665978 : if let Some((img_lsn, img)) = &data.img {
5247 665978 : trace!(
5248 0 : "found page image for key {} at {}, no WAL redo required, req LSN {}",
5249 : key,
5250 : img_lsn,
5251 : request_lsn,
5252 : );
5253 665978 : Ok(img.clone())
5254 : } else {
5255 0 : Err(PageReconstructError::from(anyhow!(
5256 0 : "base image for {key} at {request_lsn} not found"
5257 0 : )))
5258 : }
5259 : } else {
5260 : // We need to do WAL redo.
5261 : //
5262 : // If we don't have a base image, then the oldest WAL record better initialize
5263 : // the page
5264 18 : if data.img.is_none() && !data.records.first().unwrap().1.will_init() {
5265 0 : Err(PageReconstructError::from(anyhow!(
5266 0 : "Base image for {} at {} not found, but got {} WAL records",
5267 0 : key,
5268 0 : request_lsn,
5269 0 : data.records.len()
5270 0 : )))
5271 : } else {
5272 18 : if data.img.is_some() {
5273 18 : trace!(
5274 0 : "found {} WAL records and a base image for {} at {}, performing WAL redo",
5275 0 : data.records.len(),
5276 : key,
5277 : request_lsn
5278 : );
5279 : } else {
5280 0 : trace!("found {} WAL records that will init the page for {} at {}, performing WAL redo", data.records.len(), key, request_lsn);
5281 : };
5282 :
5283 18 : let last_rec_lsn = data.records.last().unwrap().0;
5284 :
5285 18 : let img = match self
5286 18 : .walredo_mgr
5287 18 : .as_ref()
5288 18 : .context("timeline has no walredo manager")
5289 18 : .map_err(PageReconstructError::WalRedo)?
5290 18 : .request_redo(key, request_lsn, data.img, data.records, self.pg_version)
5291 0 : .await
5292 18 : .context("reconstruct a page image")
5293 : {
5294 18 : Ok(img) => img,
5295 0 : Err(e) => return Err(PageReconstructError::WalRedo(e)),
5296 : };
5297 :
5298 18 : if img.len() == page_cache::PAGE_SZ {
5299 0 : let cache = page_cache::get();
5300 0 : if let Err(e) = cache
5301 0 : .memorize_materialized_page(
5302 0 : self.tenant_shard_id,
5303 0 : self.timeline_id,
5304 0 : key,
5305 0 : last_rec_lsn,
5306 0 : &img,
5307 0 : )
5308 0 : .await
5309 0 : .context("Materialized page memoization failed")
5310 : {
5311 0 : return Err(PageReconstructError::from(e));
5312 0 : }
5313 18 : }
5314 :
5315 18 : Ok(img)
5316 : }
5317 : }
5318 665996 : }
5319 :
5320 0 : pub(crate) async fn spawn_download_all_remote_layers(
5321 0 : self: Arc<Self>,
5322 0 : request: DownloadRemoteLayersTaskSpawnRequest,
5323 0 : ) -> Result<DownloadRemoteLayersTaskInfo, DownloadRemoteLayersTaskInfo> {
5324 0 : use pageserver_api::models::DownloadRemoteLayersTaskState;
5325 0 :
5326 0 : // this is not really needed anymore; it has tests which really check the return value from
5327 0 : // http api. it would be better not to maintain this anymore.
5328 0 :
5329 0 : let mut status_guard = self.download_all_remote_layers_task_info.write().unwrap();
5330 0 : if let Some(st) = &*status_guard {
5331 0 : match &st.state {
5332 : DownloadRemoteLayersTaskState::Running => {
5333 0 : return Err(st.clone());
5334 : }
5335 : DownloadRemoteLayersTaskState::ShutDown
5336 0 : | DownloadRemoteLayersTaskState::Completed => {
5337 0 : *status_guard = None;
5338 0 : }
5339 : }
5340 0 : }
5341 :
5342 0 : let self_clone = Arc::clone(&self);
5343 0 : let task_id = task_mgr::spawn(
5344 0 : task_mgr::BACKGROUND_RUNTIME.handle(),
5345 0 : task_mgr::TaskKind::DownloadAllRemoteLayers,
5346 0 : Some(self.tenant_shard_id),
5347 0 : Some(self.timeline_id),
5348 0 : "download all remote layers task",
5349 : false,
5350 0 : async move {
5351 0 : self_clone.download_all_remote_layers(request).await;
5352 0 : let mut status_guard = self_clone.download_all_remote_layers_task_info.write().unwrap();
5353 0 : match &mut *status_guard {
5354 : None => {
5355 0 : warn!("tasks status is supposed to be Some(), since we are running");
5356 : }
5357 0 : Some(st) => {
5358 0 : let exp_task_id = format!("{}", task_mgr::current_task_id().unwrap());
5359 0 : if st.task_id != exp_task_id {
5360 0 : warn!("task id changed while we were still running, expecting {} but have {}", exp_task_id, st.task_id);
5361 0 : } else {
5362 0 : st.state = DownloadRemoteLayersTaskState::Completed;
5363 0 : }
5364 : }
5365 : };
5366 0 : Ok(())
5367 0 : }
5368 0 : .instrument(info_span!(parent: None, "download_all_remote_layers", tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))
5369 : );
5370 :
5371 0 : let initial_info = DownloadRemoteLayersTaskInfo {
5372 0 : task_id: format!("{task_id}"),
5373 0 : state: DownloadRemoteLayersTaskState::Running,
5374 0 : total_layer_count: 0,
5375 0 : successful_download_count: 0,
5376 0 : failed_download_count: 0,
5377 0 : };
5378 0 : *status_guard = Some(initial_info.clone());
5379 0 :
5380 0 : Ok(initial_info)
5381 0 : }
5382 :
5383 0 : async fn download_all_remote_layers(
5384 0 : self: &Arc<Self>,
5385 0 : request: DownloadRemoteLayersTaskSpawnRequest,
5386 0 : ) {
5387 : use pageserver_api::models::DownloadRemoteLayersTaskState;
5388 :
5389 0 : let remaining = {
5390 0 : let guard = self.layers.read().await;
5391 0 : guard
5392 0 : .layer_map()
5393 0 : .iter_historic_layers()
5394 0 : .map(|desc| guard.get_from_desc(&desc))
5395 0 : .collect::<Vec<_>>()
5396 0 : };
5397 0 : let total_layer_count = remaining.len();
5398 0 :
5399 0 : macro_rules! lock_status {
5400 0 : ($st:ident) => {
5401 0 : let mut st = self.download_all_remote_layers_task_info.write().unwrap();
5402 0 : let st = st
5403 0 : .as_mut()
5404 0 : .expect("this function is only called after the task has been spawned");
5405 0 : assert_eq!(
5406 0 : st.task_id,
5407 0 : format!(
5408 0 : "{}",
5409 0 : task_mgr::current_task_id().expect("we run inside a task_mgr task")
5410 0 : )
5411 0 : );
5412 0 : let $st = st;
5413 0 : };
5414 0 : }
5415 0 :
5416 0 : {
5417 0 : lock_status!(st);
5418 0 : st.total_layer_count = total_layer_count as u64;
5419 0 : }
5420 0 :
5421 0 : let mut remaining = remaining.into_iter();
5422 0 : let mut have_remaining = true;
5423 0 : let mut js = tokio::task::JoinSet::new();
5424 0 :
5425 0 : let cancel = task_mgr::shutdown_token();
5426 0 :
5427 0 : let limit = request.max_concurrent_downloads;
5428 :
5429 : loop {
5430 0 : while js.len() < limit.get() && have_remaining && !cancel.is_cancelled() {
5431 0 : let Some(next) = remaining.next() else {
5432 0 : have_remaining = false;
5433 0 : break;
5434 : };
5435 :
5436 0 : let span = tracing::info_span!("download", layer = %next);
5437 :
5438 0 : js.spawn(
5439 0 : async move {
5440 0 : let res = next.download().await;
5441 0 : (next, res)
5442 0 : }
5443 0 : .instrument(span),
5444 0 : );
5445 : }
5446 :
5447 0 : while let Some(res) = js.join_next().await {
5448 0 : match res {
5449 : Ok((_, Ok(_))) => {
5450 0 : lock_status!(st);
5451 0 : st.successful_download_count += 1;
5452 : }
5453 0 : Ok((layer, Err(e))) => {
5454 0 : tracing::error!(%layer, "download failed: {e:#}");
5455 0 : lock_status!(st);
5456 0 : st.failed_download_count += 1;
5457 : }
5458 0 : Err(je) if je.is_cancelled() => unreachable!("not used here"),
5459 0 : Err(je) if je.is_panic() => {
5460 0 : lock_status!(st);
5461 0 : st.failed_download_count += 1;
5462 : }
5463 0 : Err(je) => tracing::warn!("unknown joinerror: {je:?}"),
5464 : }
5465 : }
5466 :
5467 0 : if js.is_empty() && (!have_remaining || cancel.is_cancelled()) {
5468 0 : break;
5469 0 : }
5470 : }
5471 :
5472 : {
5473 0 : lock_status!(st);
5474 0 : st.state = DownloadRemoteLayersTaskState::Completed;
5475 0 : }
5476 0 : }
5477 :
5478 0 : pub(crate) fn get_download_all_remote_layers_task_info(
5479 0 : &self,
5480 0 : ) -> Option<DownloadRemoteLayersTaskInfo> {
5481 0 : self.download_all_remote_layers_task_info
5482 0 : .read()
5483 0 : .unwrap()
5484 0 : .clone()
5485 0 : }
5486 : }
5487 :
5488 : impl Timeline {
5489 : /// Returns non-remote layers for eviction.
5490 0 : pub(crate) async fn get_local_layers_for_disk_usage_eviction(&self) -> DiskUsageEvictionInfo {
5491 0 : let guard = self.layers.read().await;
5492 0 : let mut max_layer_size: Option<u64> = None;
5493 0 :
5494 0 : let resident_layers = guard
5495 0 : .likely_resident_layers()
5496 0 : .map(|layer| {
5497 0 : let file_size = layer.layer_desc().file_size;
5498 0 : max_layer_size = max_layer_size.map_or(Some(file_size), |m| Some(m.max(file_size)));
5499 0 :
5500 0 : let last_activity_ts = layer.access_stats().latest_activity_or_now();
5501 0 :
5502 0 : EvictionCandidate {
5503 0 : layer: layer.into(),
5504 0 : last_activity_ts,
5505 0 : relative_last_activity: finite_f32::FiniteF32::ZERO,
5506 0 : }
5507 0 : })
5508 0 : .collect();
5509 0 :
5510 0 : DiskUsageEvictionInfo {
5511 0 : max_layer_size,
5512 0 : resident_layers,
5513 0 : }
5514 0 : }
5515 :
5516 1544 : pub(crate) fn get_shard_index(&self) -> ShardIndex {
5517 1544 : ShardIndex {
5518 1544 : shard_number: self.tenant_shard_id.shard_number,
5519 1544 : shard_count: self.tenant_shard_id.shard_count,
5520 1544 : }
5521 1544 : }
5522 :
5523 : #[cfg(test)]
5524 20 : pub(super) fn force_advance_lsn(self: &Arc<Timeline>, new_lsn: Lsn) {
5525 20 : self.last_record_lsn.advance(new_lsn);
5526 20 : }
5527 :
5528 : #[cfg(test)]
5529 2 : pub(super) fn force_set_disk_consistent_lsn(&self, new_value: Lsn) {
5530 2 : self.disk_consistent_lsn.store(new_value);
5531 2 : }
5532 :
5533 : /// Force create an image layer and place it into the layer map.
5534 : ///
5535 : /// DO NOT use this function directly. Use [`Tenant::branch_timeline_test_with_layers`]
5536 : /// or [`Tenant::create_test_timeline_with_layers`] to ensure all these layers are placed into the layer map in one run.
5537 : #[cfg(test)]
5538 36 : pub(super) async fn force_create_image_layer(
5539 36 : self: &Arc<Timeline>,
5540 36 : lsn: Lsn,
5541 36 : mut images: Vec<(Key, Bytes)>,
5542 36 : check_start_lsn: Option<Lsn>,
5543 36 : ctx: &RequestContext,
5544 36 : ) -> anyhow::Result<()> {
5545 36 : let last_record_lsn = self.get_last_record_lsn();
5546 36 : assert!(
5547 36 : lsn <= last_record_lsn,
5548 0 : "advance last record lsn before inserting a layer, lsn={lsn}, last_record_lsn={last_record_lsn}"
5549 : );
5550 36 : if let Some(check_start_lsn) = check_start_lsn {
5551 36 : assert!(lsn >= check_start_lsn);
5552 0 : }
5553 36 : images.sort_unstable_by(|(ka, _), (kb, _)| ka.cmp(kb));
5554 36 : let min_key = *images.first().map(|(k, _)| k).unwrap();
5555 36 : let max_key = images.last().map(|(k, _)| k).unwrap().next();
5556 36 : let mut image_layer_writer = ImageLayerWriter::new(
5557 36 : self.conf,
5558 36 : self.timeline_id,
5559 36 : self.tenant_shard_id,
5560 36 : &(min_key..max_key),
5561 36 : lsn,
5562 36 : ctx,
5563 36 : )
5564 18 : .await?;
5565 92 : for (key, img) in images {
5566 56 : image_layer_writer.put_image(key, img, ctx).await?;
5567 : }
5568 72 : let image_layer = image_layer_writer.finish(self, ctx).await?;
5569 :
5570 36 : {
5571 36 : let mut guard = self.layers.write().await;
5572 36 : guard.force_insert_layer(image_layer);
5573 36 : }
5574 36 :
5575 36 : Ok(())
5576 36 : }
5577 :
5578 : /// Force create a delta layer and place it into the layer map.
5579 : ///
5580 : /// DO NOT use this function directly. Use [`Tenant::branch_timeline_test_with_layers`]
5581 : /// or [`Tenant::create_test_timeline_with_layers`] to ensure all these layers are placed into the layer map in one run.
5582 : #[cfg(test)]
5583 28 : pub(super) async fn force_create_delta_layer(
5584 28 : self: &Arc<Timeline>,
5585 28 : mut deltas: Vec<(Key, Lsn, Value)>,
5586 28 : check_start_lsn: Option<Lsn>,
5587 28 : ctx: &RequestContext,
5588 28 : ) -> anyhow::Result<()> {
5589 28 : let last_record_lsn = self.get_last_record_lsn();
5590 28 : deltas.sort_unstable_by(|(ka, la, _), (kb, lb, _)| (ka, la).cmp(&(kb, lb)));
5591 28 : let min_key = *deltas.first().map(|(k, _, _)| k).unwrap();
5592 28 : let max_key = deltas.last().map(|(k, _, _)| k).unwrap().next();
5593 54 : let min_lsn = *deltas.iter().map(|(_, lsn, _)| lsn).min().unwrap();
5594 54 : let max_lsn = *deltas.iter().map(|(_, lsn, _)| lsn).max().unwrap();
5595 28 : assert!(
5596 28 : max_lsn <= last_record_lsn,
5597 0 : "advance last record lsn before inserting a layer, max_lsn={max_lsn}, last_record_lsn={last_record_lsn}"
5598 : );
5599 28 : let end_lsn = Lsn(max_lsn.0 + 1);
5600 28 : if let Some(check_start_lsn) = check_start_lsn {
5601 28 : assert!(min_lsn >= check_start_lsn);
5602 0 : }
5603 28 : let mut delta_layer_writer = DeltaLayerWriter::new(
5604 28 : self.conf,
5605 28 : self.timeline_id,
5606 28 : self.tenant_shard_id,
5607 28 : min_key,
5608 28 : min_lsn..end_lsn,
5609 28 : ctx,
5610 28 : )
5611 14 : .await?;
5612 82 : for (key, lsn, val) in deltas {
5613 54 : delta_layer_writer.put_value(key, lsn, val, ctx).await?;
5614 : }
5615 70 : let delta_layer = delta_layer_writer.finish(max_key, self, ctx).await?;
5616 :
5617 28 : {
5618 28 : let mut guard = self.layers.write().await;
5619 28 : guard.force_insert_layer(delta_layer);
5620 28 : }
5621 28 :
5622 28 : Ok(())
5623 28 : }
5624 :
5625 : /// Return all keys at the LSN in the image layers
5626 : #[cfg(test)]
5627 6 : pub(crate) async fn inspect_image_layers(
5628 6 : self: &Arc<Timeline>,
5629 6 : lsn: Lsn,
5630 6 : ctx: &RequestContext,
5631 6 : ) -> anyhow::Result<Vec<(Key, Bytes)>> {
5632 6 : let mut all_data = Vec::new();
5633 6 : let guard = self.layers.read().await;
5634 34 : for layer in guard.layer_map().iter_historic_layers() {
5635 34 : if !layer.is_delta() && layer.image_layer_lsn() == lsn {
5636 8 : let layer = guard.get_from_desc(&layer);
5637 8 : let mut reconstruct_data = ValuesReconstructState::default();
5638 8 : layer
5639 8 : .get_values_reconstruct_data(
5640 8 : KeySpace::single(Key::MIN..Key::MAX),
5641 8 : lsn..Lsn(lsn.0 + 1),
5642 8 : &mut reconstruct_data,
5643 8 : ctx,
5644 8 : )
5645 17 : .await?;
5646 80 : for (k, v) in reconstruct_data.keys {
5647 72 : all_data.push((k, v?.img.unwrap().1));
5648 : }
5649 26 : }
5650 : }
5651 6 : all_data.sort();
5652 6 : Ok(all_data)
5653 6 : }
5654 :
5655 : /// Get all historic layer descriptors in the layer map
5656 : #[cfg(test)]
5657 2 : pub(crate) async fn inspect_historic_layers(
5658 2 : self: &Arc<Timeline>,
5659 2 : ) -> anyhow::Result<Vec<super::storage_layer::PersistentLayerKey>> {
5660 2 : let mut layers = Vec::new();
5661 2 : let guard = self.layers.read().await;
5662 6 : for layer in guard.layer_map().iter_historic_layers() {
5663 6 : layers.push(layer.key());
5664 6 : }
5665 2 : Ok(layers)
5666 2 : }
5667 :
5668 : #[cfg(test)]
5669 6 : pub(crate) fn add_extra_test_dense_keyspace(&self, ks: KeySpace) {
5670 6 : let mut keyspace = self.extra_test_dense_keyspace.load().as_ref().clone();
5671 6 : keyspace.merge(&ks);
5672 6 : self.extra_test_dense_keyspace.store(Arc::new(keyspace));
5673 6 : }
5674 : }
5675 :
5676 : type TraversalPathItem = (ValueReconstructResult, Lsn, TraversalId);
5677 :
5678 : /// Tracking writes ingestion does to a particular in-memory layer.
5679 : ///
5680 : /// Cleared upon freezing a layer.
5681 : struct TimelineWriterState {
5682 : open_layer: Arc<InMemoryLayer>,
5683 : current_size: u64,
5684 : // Previous Lsn which passed through
5685 : prev_lsn: Option<Lsn>,
5686 : // Largest Lsn which passed through the current writer
5687 : max_lsn: Option<Lsn>,
5688 : // Cached details of the last freeze. Avoids going trough the atomic/lock on every put.
5689 : cached_last_freeze_at: Lsn,
5690 : }
5691 :
5692 : impl TimelineWriterState {
5693 1238 : fn new(open_layer: Arc<InMemoryLayer>, current_size: u64, last_freeze_at: Lsn) -> Self {
5694 1238 : Self {
5695 1238 : open_layer,
5696 1238 : current_size,
5697 1238 : prev_lsn: None,
5698 1238 : max_lsn: None,
5699 1238 : cached_last_freeze_at: last_freeze_at,
5700 1238 : }
5701 1238 : }
5702 : }
5703 :
5704 : /// Various functions to mutate the timeline.
5705 : // TODO Currently, Deref is used to allow easy access to read methods from this trait.
5706 : // This is probably considered a bad practice in Rust and should be fixed eventually,
5707 : // but will cause large code changes.
5708 : pub(crate) struct TimelineWriter<'a> {
5709 : tl: &'a Timeline,
5710 : write_guard: tokio::sync::MutexGuard<'a, Option<TimelineWriterState>>,
5711 : }
5712 :
5713 : impl Deref for TimelineWriter<'_> {
5714 : type Target = Timeline;
5715 :
5716 4807176 : fn deref(&self) -> &Self::Target {
5717 4807176 : self.tl
5718 4807176 : }
5719 : }
5720 :
5721 : #[derive(PartialEq)]
5722 : enum OpenLayerAction {
5723 : Roll,
5724 : Open,
5725 : None,
5726 : }
5727 :
5728 : impl<'a> TimelineWriter<'a> {
5729 : /// Put a new page version that can be constructed from a WAL record
5730 : ///
5731 : /// This will implicitly extend the relation, if the page is beyond the
5732 : /// current end-of-file.
5733 5090402 : pub(crate) async fn put(
5734 5090402 : &mut self,
5735 5090402 : key: Key,
5736 5090402 : lsn: Lsn,
5737 5090402 : value: &Value,
5738 5090402 : ctx: &RequestContext,
5739 5090402 : ) -> anyhow::Result<()> {
5740 5090402 : // Avoid doing allocations for "small" values.
5741 5090402 : // In the regression test suite, the limit of 256 avoided allocations in 95% of cases:
5742 5090402 : // https://github.com/neondatabase/neon/pull/5056#discussion_r1301975061
5743 5090402 : let mut buf = smallvec::SmallVec::<[u8; 256]>::new();
5744 5090402 : value.ser_into(&mut buf)?;
5745 5090402 : let buf_size: u64 = buf.len().try_into().expect("oversized value buf");
5746 5090402 :
5747 5090402 : let action = self.get_open_layer_action(lsn, buf_size);
5748 5090402 : let layer = self.handle_open_layer_action(lsn, action, ctx).await?;
5749 5090402 : let res = layer.put_value(key, lsn, &buf, ctx).await;
5750 :
5751 5090402 : if res.is_ok() {
5752 5090402 : // Update the current size only when the entire write was ok.
5753 5090402 : // In case of failures, we may have had partial writes which
5754 5090402 : // render the size tracking out of sync. That's ok because
5755 5090402 : // the checkpoint distance should be significantly smaller
5756 5090402 : // than the S3 single shot upload limit of 5GiB.
5757 5090402 : let state = self.write_guard.as_mut().unwrap();
5758 5090402 :
5759 5090402 : state.current_size += buf_size;
5760 5090402 : state.prev_lsn = Some(lsn);
5761 5090402 : state.max_lsn = std::cmp::max(state.max_lsn, Some(lsn));
5762 5090402 : }
5763 :
5764 5090402 : res
5765 5090402 : }
5766 :
5767 5090404 : async fn handle_open_layer_action(
5768 5090404 : &mut self,
5769 5090404 : at: Lsn,
5770 5090404 : action: OpenLayerAction,
5771 5090404 : ctx: &RequestContext,
5772 5090404 : ) -> anyhow::Result<&Arc<InMemoryLayer>> {
5773 5090404 : match action {
5774 : OpenLayerAction::Roll => {
5775 80 : let freeze_at = self.write_guard.as_ref().unwrap().max_lsn.unwrap();
5776 80 : self.roll_layer(freeze_at).await?;
5777 80 : self.open_layer(at, ctx).await?;
5778 : }
5779 1158 : OpenLayerAction::Open => self.open_layer(at, ctx).await?,
5780 : OpenLayerAction::None => {
5781 5089166 : assert!(self.write_guard.is_some());
5782 : }
5783 : }
5784 :
5785 5090404 : Ok(&self.write_guard.as_ref().unwrap().open_layer)
5786 5090404 : }
5787 :
5788 1238 : async fn open_layer(&mut self, at: Lsn, ctx: &RequestContext) -> anyhow::Result<()> {
5789 1238 : let layer = self.tl.get_layer_for_write(at, ctx).await?;
5790 1238 : let initial_size = layer.size().await?;
5791 :
5792 1238 : let last_freeze_at = self.last_freeze_at.load();
5793 1238 : self.write_guard.replace(TimelineWriterState::new(
5794 1238 : layer,
5795 1238 : initial_size,
5796 1238 : last_freeze_at,
5797 1238 : ));
5798 1238 :
5799 1238 : Ok(())
5800 1238 : }
5801 :
5802 80 : async fn roll_layer(&mut self, freeze_at: Lsn) -> anyhow::Result<()> {
5803 80 : let current_size = self.write_guard.as_ref().unwrap().current_size;
5804 80 :
5805 80 : // self.write_guard will be taken by the freezing
5806 80 : self.tl
5807 80 : .freeze_inmem_layer_at(freeze_at, &mut self.write_guard)
5808 4 : .await;
5809 :
5810 80 : self.tl.flush_frozen_layers(freeze_at)?;
5811 :
5812 80 : if current_size >= self.get_checkpoint_distance() * 2 {
5813 0 : warn!("Flushed oversized open layer with size {}", current_size)
5814 80 : }
5815 :
5816 80 : Ok(())
5817 80 : }
5818 :
5819 5090404 : fn get_open_layer_action(&self, lsn: Lsn, new_value_size: u64) -> OpenLayerAction {
5820 5090404 : let state = &*self.write_guard;
5821 5090404 : let Some(state) = &state else {
5822 1158 : return OpenLayerAction::Open;
5823 : };
5824 :
5825 : #[cfg(feature = "testing")]
5826 5089246 : if state.cached_last_freeze_at < self.tl.last_freeze_at.load() {
5827 : // this check and assertion are not really needed because
5828 : // LayerManager::try_freeze_in_memory_layer will always clear out the
5829 : // TimelineWriterState if something is frozen. however, we can advance last_freeze_at when there
5830 : // is no TimelineWriterState.
5831 0 : assert!(
5832 0 : state.open_layer.end_lsn.get().is_some(),
5833 0 : "our open_layer must be outdated"
5834 : );
5835 :
5836 : // this would be a memory leak waiting to happen because the in-memory layer always has
5837 : // an index
5838 0 : panic!("BUG: TimelineWriterState held on to frozen in-memory layer.");
5839 5089246 : }
5840 5089246 :
5841 5089246 : if state.prev_lsn == Some(lsn) {
5842 : // Rolling mid LSN is not supported by [downstream code].
5843 : // Hence, only roll at LSN boundaries.
5844 : //
5845 : // [downstream code]: https://github.com/neondatabase/neon/pull/7993#discussion_r1633345422
5846 286220 : return OpenLayerAction::None;
5847 4803026 : }
5848 4803026 :
5849 4803026 : if state.current_size == 0 {
5850 : // Don't roll empty layers
5851 0 : return OpenLayerAction::None;
5852 4803026 : }
5853 4803026 :
5854 4803026 : if self.tl.should_roll(
5855 4803026 : state.current_size,
5856 4803026 : state.current_size + new_value_size,
5857 4803026 : self.get_checkpoint_distance(),
5858 4803026 : lsn,
5859 4803026 : state.cached_last_freeze_at,
5860 4803026 : state.open_layer.get_opened_at(),
5861 4803026 : ) {
5862 80 : OpenLayerAction::Roll
5863 : } else {
5864 4802946 : OpenLayerAction::None
5865 : }
5866 5090404 : }
5867 :
5868 : /// Put a batch of keys at the specified Lsns.
5869 : ///
5870 : /// The batch is sorted by Lsn (enforced by usage of [`utils::vec_map::VecMap`].
5871 414028 : pub(crate) async fn put_batch(
5872 414028 : &mut self,
5873 414028 : batch: VecMap<Lsn, (Key, Value)>,
5874 414028 : ctx: &RequestContext,
5875 414028 : ) -> anyhow::Result<()> {
5876 1114276 : for (lsn, (key, val)) in batch {
5877 700248 : self.put(key, lsn, &val, ctx).await?
5878 : }
5879 :
5880 414028 : Ok(())
5881 414028 : }
5882 :
5883 2 : pub(crate) async fn delete_batch(
5884 2 : &mut self,
5885 2 : batch: &[(Range<Key>, Lsn)],
5886 2 : ctx: &RequestContext,
5887 2 : ) -> anyhow::Result<()> {
5888 2 : if let Some((_, lsn)) = batch.first() {
5889 2 : let action = self.get_open_layer_action(*lsn, 0);
5890 2 : let layer = self.handle_open_layer_action(*lsn, action, ctx).await?;
5891 2 : layer.put_tombstones(batch).await?;
5892 0 : }
5893 :
5894 2 : Ok(())
5895 2 : }
5896 :
5897 : /// Track the end of the latest digested WAL record.
5898 : /// Remember the (end of) last valid WAL record remembered in the timeline.
5899 : ///
5900 : /// Call this after you have finished writing all the WAL up to 'lsn'.
5901 : ///
5902 : /// 'lsn' must be aligned. This wakes up any wait_lsn() callers waiting for
5903 : /// the 'lsn' or anything older. The previous last record LSN is stored alongside
5904 : /// the latest and can be read.
5905 5279044 : pub(crate) fn finish_write(&self, new_lsn: Lsn) {
5906 5279044 : self.tl.finish_write(new_lsn);
5907 5279044 : }
5908 :
5909 270570 : pub(crate) fn update_current_logical_size(&self, delta: i64) {
5910 270570 : self.tl.update_current_logical_size(delta)
5911 270570 : }
5912 : }
5913 :
5914 : // We need TimelineWriter to be send in upcoming conversion of
5915 : // Timeline::layers to tokio::sync::RwLock.
5916 : #[test]
5917 2 : fn is_send() {
5918 2 : fn _assert_send<T: Send>() {}
5919 2 : _assert_send::<TimelineWriter<'_>>();
5920 2 : }
5921 :
5922 : #[cfg(test)]
5923 : mod tests {
5924 : use utils::{id::TimelineId, lsn::Lsn};
5925 :
5926 : use crate::tenant::{
5927 : harness::TenantHarness, storage_layer::Layer, timeline::EvictionError, Timeline,
5928 : };
5929 :
5930 : #[tokio::test]
5931 2 : async fn two_layer_eviction_attempts_at_the_same_time() {
5932 2 : let harness =
5933 2 : TenantHarness::create("two_layer_eviction_attempts_at_the_same_time").unwrap();
5934 2 :
5935 8 : let (tenant, ctx) = harness.load().await;
5936 2 : let timeline = tenant
5937 2 : .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx)
5938 6 : .await
5939 2 : .unwrap();
5940 2 :
5941 2 : let layer = find_some_layer(&timeline).await;
5942 2 : let layer = layer
5943 2 : .keep_resident()
5944 2 : .await
5945 2 : .expect("no download => no downloading errors")
5946 2 : .drop_eviction_guard();
5947 2 :
5948 2 : let forever = std::time::Duration::from_secs(120);
5949 2 :
5950 2 : let first = layer.evict_and_wait(forever);
5951 2 : let second = layer.evict_and_wait(forever);
5952 2 :
5953 2 : let (first, second) = tokio::join!(first, second);
5954 2 :
5955 2 : let res = layer.keep_resident().await;
5956 2 : assert!(res.is_none(), "{res:?}");
5957 2 :
5958 2 : match (first, second) {
5959 2 : (Ok(()), Ok(())) => {
5960 2 : // because there are no more timeline locks being taken on eviction path, we can
5961 2 : // witness all three outcomes here.
5962 2 : }
5963 2 : (Ok(()), Err(EvictionError::NotFound)) | (Err(EvictionError::NotFound), Ok(())) => {
5964 0 : // if one completes before the other, this is fine just as well.
5965 0 : }
5966 2 : other => unreachable!("unexpected {:?}", other),
5967 2 : }
5968 2 : }
5969 :
5970 2 : async fn find_some_layer(timeline: &Timeline) -> Layer {
5971 2 : let layers = timeline.layers.read().await;
5972 2 : let desc = layers
5973 2 : .layer_map()
5974 2 : .iter_historic_layers()
5975 2 : .next()
5976 2 : .expect("must find one layer to evict");
5977 2 :
5978 2 : layers.get_from_desc(&desc)
5979 2 : }
5980 : }
|