Line data Source code
1 : pub(crate) mod analysis;
2 : mod compaction;
3 : pub mod delete;
4 : pub(crate) mod detach_ancestor;
5 : mod eviction_task;
6 : mod init;
7 : pub mod layer_manager;
8 : pub(crate) mod logical_size;
9 : pub mod span;
10 : pub mod uninit;
11 : mod walreceiver;
12 :
13 : use anyhow::{anyhow, bail, ensure, Context, Result};
14 : use arc_swap::ArcSwap;
15 : use bytes::Bytes;
16 : use camino::Utf8Path;
17 : use chrono::{DateTime, Utc};
18 : use enumset::EnumSet;
19 : use fail::fail_point;
20 : use once_cell::sync::Lazy;
21 : use pageserver_api::{
22 : key::{
23 : AUX_FILES_KEY, KEY_SIZE, METADATA_KEY_BEGIN_PREFIX, METADATA_KEY_END_PREFIX,
24 : NON_INHERITED_RANGE, NON_INHERITED_SPARSE_RANGE,
25 : },
26 : keyspace::{KeySpaceAccum, KeySpaceRandomAccum, SparseKeyPartitioning},
27 : models::{
28 : AtomicAuxFilePolicy, AuxFilePolicy, CompactionAlgorithm, CompactionAlgorithmSettings,
29 : DownloadRemoteLayersTaskInfo, DownloadRemoteLayersTaskSpawnRequest, EvictionPolicy,
30 : InMemoryLayerInfo, LayerMapInfo, LsnLease, TimelineState,
31 : },
32 : reltag::BlockNumber,
33 : shard::{ShardIdentity, ShardNumber, TenantShardId},
34 : };
35 : use rand::Rng;
36 : use serde_with::serde_as;
37 : use storage_broker::BrokerClientChannel;
38 : use tokio::{
39 : runtime::Handle,
40 : sync::{oneshot, watch},
41 : };
42 : use tokio_util::sync::CancellationToken;
43 : use tracing::*;
44 : use utils::{
45 : bin_ser::BeSer,
46 : fs_ext, pausable_failpoint,
47 : sync::gate::{Gate, GateGuard},
48 : vec_map::VecMap,
49 : };
50 :
51 : use std::pin::pin;
52 : use std::sync::atomic::Ordering as AtomicOrdering;
53 : use std::sync::{Arc, Mutex, RwLock, Weak};
54 : use std::time::{Duration, Instant, SystemTime};
55 : use std::{
56 : array,
57 : collections::{BTreeMap, HashMap, HashSet},
58 : sync::atomic::AtomicU64,
59 : };
60 : use std::{
61 : cmp::{max, min, Ordering},
62 : ops::ControlFlow,
63 : };
64 : use std::{
65 : collections::btree_map::Entry,
66 : ops::{Deref, Range},
67 : };
68 :
69 : use crate::{
70 : aux_file::AuxFileSizeEstimator,
71 : tenant::{
72 : config::defaults::DEFAULT_PITR_INTERVAL,
73 : layer_map::{LayerMap, SearchResult},
74 : metadata::TimelineMetadata,
75 : storage_layer::PersistentLayerDesc,
76 : },
77 : };
78 : use crate::{
79 : context::{DownloadBehavior, RequestContext},
80 : disk_usage_eviction_task::DiskUsageEvictionInfo,
81 : pgdatadir_mapping::CollectKeySpaceError,
82 : };
83 : use crate::{
84 : disk_usage_eviction_task::finite_f32,
85 : tenant::storage_layer::{
86 : AsLayerDesc, DeltaLayerWriter, EvictionError, ImageLayerWriter, InMemoryLayer, Layer,
87 : LayerAccessStatsReset, LayerName, ResidentLayer, ValueReconstructResult,
88 : ValueReconstructState, ValuesReconstructState,
89 : },
90 : };
91 : use crate::{
92 : disk_usage_eviction_task::EvictionCandidate, tenant::storage_layer::delta_layer::DeltaEntry,
93 : };
94 : use crate::{
95 : l0_flush::{self, L0FlushGlobalState},
96 : metrics::GetKind,
97 : };
98 : use crate::{
99 : metrics::ScanLatencyOngoingRecording, tenant::timeline::logical_size::CurrentLogicalSize,
100 : };
101 : use crate::{pgdatadir_mapping::LsnForTimestamp, tenant::tasks::BackgroundLoopKind};
102 : use crate::{pgdatadir_mapping::MAX_AUX_FILE_V2_DELTAS, tenant::storage_layer::PersistentLayerKey};
103 : use crate::{
104 : pgdatadir_mapping::{AuxFilesDirectory, DirectoryKind},
105 : virtual_file::{MaybeFatalIo, VirtualFile},
106 : };
107 :
108 : use crate::config::PageServerConf;
109 : use crate::keyspace::{KeyPartitioning, KeySpace};
110 : use crate::metrics::TimelineMetrics;
111 : use crate::pgdatadir_mapping::CalculateLogicalSizeError;
112 : use crate::tenant::config::TenantConfOpt;
113 : use pageserver_api::reltag::RelTag;
114 : use pageserver_api::shard::ShardIndex;
115 :
116 : use postgres_connection::PgConnectionConfig;
117 : use postgres_ffi::to_pg_timestamp;
118 : use utils::{
119 : completion,
120 : generation::Generation,
121 : id::TimelineId,
122 : lsn::{AtomicLsn, Lsn, RecordLsn},
123 : seqwait::SeqWait,
124 : simple_rcu::{Rcu, RcuReadGuard},
125 : };
126 :
127 : use crate::repository::GcResult;
128 : use crate::repository::{Key, Value};
129 : use crate::task_mgr;
130 : use crate::task_mgr::TaskKind;
131 : use crate::ZERO_PAGE;
132 :
133 : use self::delete::DeleteTimelineFlow;
134 : pub(super) use self::eviction_task::EvictionTaskTenantState;
135 : use self::eviction_task::EvictionTaskTimelineState;
136 : use self::layer_manager::LayerManager;
137 : use self::logical_size::LogicalSize;
138 : use self::walreceiver::{WalReceiver, WalReceiverConf};
139 :
140 : use super::config::TenantConf;
141 : use super::{debug_assert_current_span_has_tenant_and_timeline_id, AttachedTenantConf};
142 : use super::{remote_timeline_client::index::IndexPart, storage_layer::LayerFringe};
143 : use super::{remote_timeline_client::RemoteTimelineClient, storage_layer::ReadableLayer};
144 : use super::{
145 : secondary::heatmap::{HeatMapLayer, HeatMapTimeline},
146 : GcError,
147 : };
148 :
149 : #[derive(Debug, PartialEq, Eq, Clone, Copy)]
150 : pub(crate) enum FlushLoopState {
151 : NotStarted,
152 : Running {
153 : #[cfg(test)]
154 : expect_initdb_optimization: bool,
155 : #[cfg(test)]
156 : initdb_optimization_count: usize,
157 : },
158 : Exited,
159 : }
160 :
161 : #[derive(Debug, Copy, Clone, PartialEq, Eq)]
162 : pub enum ImageLayerCreationMode {
163 : /// Try to create image layers based on `time_for_new_image_layer`. Used in compaction code path.
164 : Try,
165 : /// Force creating the image layers if possible. For now, no image layers will be created
166 : /// for metadata keys. Used in compaction code path with force flag enabled.
167 : Force,
168 : /// Initial ingestion of the data, and no data should be dropped in this function. This
169 : /// means that no metadata keys should be included in the partitions. Used in flush frozen layer
170 : /// code path.
171 : Initial,
172 : }
173 :
174 : impl std::fmt::Display for ImageLayerCreationMode {
175 516 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
176 516 : write!(f, "{:?}", self)
177 516 : }
178 : }
179 :
180 : /// Wrapper for key range to provide reverse ordering by range length for BinaryHeap
181 : #[derive(Debug, Clone, PartialEq, Eq)]
182 : pub(crate) struct Hole {
183 : key_range: Range<Key>,
184 : coverage_size: usize,
185 : }
186 :
187 : impl Ord for Hole {
188 0 : fn cmp(&self, other: &Self) -> Ordering {
189 0 : other.coverage_size.cmp(&self.coverage_size) // inverse order
190 0 : }
191 : }
192 :
193 : impl PartialOrd for Hole {
194 0 : fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
195 0 : Some(self.cmp(other))
196 0 : }
197 : }
198 :
199 : /// Temporary function for immutable storage state refactor, ensures we are dropping mutex guard instead of other things.
200 : /// Can be removed after all refactors are done.
201 28 : fn drop_rlock<T>(rlock: tokio::sync::RwLockReadGuard<T>) {
202 28 : drop(rlock)
203 28 : }
204 :
205 : /// Temporary function for immutable storage state refactor, ensures we are dropping mutex guard instead of other things.
206 : /// Can be removed after all refactors are done.
207 544 : fn drop_wlock<T>(rlock: tokio::sync::RwLockWriteGuard<'_, T>) {
208 544 : drop(rlock)
209 544 : }
210 :
211 : /// The outward-facing resources required to build a Timeline
212 : pub struct TimelineResources {
213 : pub remote_client: RemoteTimelineClient,
214 : pub timeline_get_throttle: Arc<
215 : crate::tenant::throttle::Throttle<&'static crate::metrics::tenant_throttling::TimelineGet>,
216 : >,
217 : pub l0_flush_global_state: l0_flush::L0FlushGlobalState,
218 : }
219 :
220 : pub(crate) struct AuxFilesState {
221 : pub(crate) dir: Option<AuxFilesDirectory>,
222 : pub(crate) n_deltas: usize,
223 : }
224 :
225 : /// The relation size cache caches relation sizes at the end of the timeline. It speeds up WAL
226 : /// ingestion considerably, because WAL ingestion needs to check on most records if the record
227 : /// implicitly extends the relation. At startup, `complete_as_of` is initialized to the current end
228 : /// of the timeline (disk_consistent_lsn). It's used on reads of relation sizes to check if the
229 : /// value can be used to also update the cache, see [`Timeline::update_cached_rel_size`].
230 : pub(crate) struct RelSizeCache {
231 : pub(crate) complete_as_of: Lsn,
232 : pub(crate) map: HashMap<RelTag, (Lsn, BlockNumber)>,
233 : }
234 :
235 : pub struct Timeline {
236 : conf: &'static PageServerConf,
237 : tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
238 :
239 : myself: Weak<Self>,
240 :
241 : pub(crate) tenant_shard_id: TenantShardId,
242 : pub timeline_id: TimelineId,
243 :
244 : /// The generation of the tenant that instantiated us: this is used for safety when writing remote objects.
245 : /// Never changes for the lifetime of this [`Timeline`] object.
246 : ///
247 : /// This duplicates the generation stored in LocationConf, but that structure is mutable:
248 : /// this copy enforces the invariant that generatio doesn't change during a Tenant's lifetime.
249 : pub(crate) generation: Generation,
250 :
251 : /// The detailed sharding information from our parent Tenant. This enables us to map keys
252 : /// to shards, and is constant through the lifetime of this Timeline.
253 : shard_identity: ShardIdentity,
254 :
255 : pub pg_version: u32,
256 :
257 : /// The tuple has two elements.
258 : /// 1. `LayerFileManager` keeps track of the various physical representations of the layer files (inmem, local, remote).
259 : /// 2. `LayerMap`, the acceleration data structure for `get_reconstruct_data`.
260 : ///
261 : /// `LayerMap` maps out the `(PAGE,LSN) / (KEY,LSN)` space, which is composed of `(KeyRange, LsnRange)` rectangles.
262 : /// We describe these rectangles through the `PersistentLayerDesc` struct.
263 : ///
264 : /// When we want to reconstruct a page, we first find the `PersistentLayerDesc`'s that we need for page reconstruction,
265 : /// using `LayerMap`. Then, we use `LayerFileManager` to get the `PersistentLayer`'s that correspond to the
266 : /// `PersistentLayerDesc`'s.
267 : ///
268 : /// Hence, it's important to keep things coherent. The `LayerFileManager` must always have an entry for all
269 : /// `PersistentLayerDesc`'s in the `LayerMap`. If it doesn't, `LayerFileManager::get_from_desc` will panic at
270 : /// runtime, e.g., during page reconstruction.
271 : ///
272 : /// In the future, we'll be able to split up the tuple of LayerMap and `LayerFileManager`,
273 : /// so that e.g. on-demand-download/eviction, and layer spreading, can operate just on `LayerFileManager`.
274 : pub(crate) layers: tokio::sync::RwLock<LayerManager>,
275 :
276 : last_freeze_at: AtomicLsn,
277 : // Atomic would be more appropriate here.
278 : last_freeze_ts: RwLock<Instant>,
279 :
280 : pub(crate) standby_horizon: AtomicLsn,
281 :
282 : // WAL redo manager. `None` only for broken tenants.
283 : walredo_mgr: Option<Arc<super::WalRedoManager>>,
284 :
285 : /// Remote storage client.
286 : /// See [`remote_timeline_client`](super::remote_timeline_client) module comment for details.
287 : pub remote_client: Arc<RemoteTimelineClient>,
288 :
289 : // What page versions do we hold in the repository? If we get a
290 : // request > last_record_lsn, we need to wait until we receive all
291 : // the WAL up to the request. The SeqWait provides functions for
292 : // that. TODO: If we get a request for an old LSN, such that the
293 : // versions have already been garbage collected away, we should
294 : // throw an error, but we don't track that currently.
295 : //
296 : // last_record_lsn.load().last points to the end of last processed WAL record.
297 : //
298 : // We also remember the starting point of the previous record in
299 : // 'last_record_lsn.load().prev'. It's used to set the xl_prev pointer of the
300 : // first WAL record when the node is started up. But here, we just
301 : // keep track of it.
302 : last_record_lsn: SeqWait<RecordLsn, Lsn>,
303 :
304 : // All WAL records have been processed and stored durably on files on
305 : // local disk, up to this LSN. On crash and restart, we need to re-process
306 : // the WAL starting from this point.
307 : //
308 : // Some later WAL records might have been processed and also flushed to disk
309 : // already, so don't be surprised to see some, but there's no guarantee on
310 : // them yet.
311 : disk_consistent_lsn: AtomicLsn,
312 :
313 : // Parent timeline that this timeline was branched from, and the LSN
314 : // of the branch point.
315 : ancestor_timeline: Option<Arc<Timeline>>,
316 : ancestor_lsn: Lsn,
317 :
318 : pub(super) metrics: TimelineMetrics,
319 :
320 : // `Timeline` doesn't write these metrics itself, but it manages the lifetime. Code
321 : // in `crate::page_service` writes these metrics.
322 : pub(crate) query_metrics: crate::metrics::SmgrQueryTimePerTimeline,
323 :
324 : directory_metrics: [AtomicU64; DirectoryKind::KINDS_NUM],
325 :
326 : /// Ensures layers aren't frozen by checkpointer between
327 : /// [`Timeline::get_layer_for_write`] and layer reads.
328 : /// Locked automatically by [`TimelineWriter`] and checkpointer.
329 : /// Must always be acquired before the layer map/individual layer lock
330 : /// to avoid deadlock.
331 : ///
332 : /// The state is cleared upon freezing.
333 : write_lock: tokio::sync::Mutex<Option<TimelineWriterState>>,
334 :
335 : /// Used to avoid multiple `flush_loop` tasks running
336 : pub(super) flush_loop_state: Mutex<FlushLoopState>,
337 :
338 : /// layer_flush_start_tx can be used to wake up the layer-flushing task.
339 : /// - The u64 value is a counter, incremented every time a new flush cycle is requested.
340 : /// The flush cycle counter is sent back on the layer_flush_done channel when
341 : /// the flush finishes. You can use that to wait for the flush to finish.
342 : /// - The LSN is updated to max() of its current value and the latest disk_consistent_lsn
343 : /// read by whoever sends an update
344 : layer_flush_start_tx: tokio::sync::watch::Sender<(u64, Lsn)>,
345 : /// to be notified when layer flushing has finished, subscribe to the layer_flush_done channel
346 : layer_flush_done_tx: tokio::sync::watch::Sender<(u64, Result<(), FlushLayerError>)>,
347 :
348 : // Needed to ensure that we can't create a branch at a point that was already garbage collected
349 : pub latest_gc_cutoff_lsn: Rcu<Lsn>,
350 :
351 : // List of child timelines and their branch points. This is needed to avoid
352 : // garbage collecting data that is still needed by the child timelines.
353 : pub(crate) gc_info: std::sync::RwLock<GcInfo>,
354 :
355 : // It may change across major versions so for simplicity
356 : // keep it after running initdb for a timeline.
357 : // It is needed in checks when we want to error on some operations
358 : // when they are requested for pre-initdb lsn.
359 : // It can be unified with latest_gc_cutoff_lsn under some "first_valid_lsn",
360 : // though let's keep them both for better error visibility.
361 : pub initdb_lsn: Lsn,
362 :
363 : /// When did we last calculate the partitioning? Make it pub to test cases.
364 : pub(super) partitioning: tokio::sync::Mutex<((KeyPartitioning, SparseKeyPartitioning), Lsn)>,
365 :
366 : /// Configuration: how often should the partitioning be recalculated.
367 : repartition_threshold: u64,
368 :
369 : last_image_layer_creation_check_at: AtomicLsn,
370 : last_image_layer_creation_check_instant: std::sync::Mutex<Option<Instant>>,
371 :
372 : /// Current logical size of the "datadir", at the last LSN.
373 : current_logical_size: LogicalSize,
374 :
375 : /// Information about the last processed message by the WAL receiver,
376 : /// or None if WAL receiver has not received anything for this timeline
377 : /// yet.
378 : pub last_received_wal: Mutex<Option<WalReceiverInfo>>,
379 : pub walreceiver: Mutex<Option<WalReceiver>>,
380 :
381 : /// Relation size cache
382 : pub(crate) rel_size_cache: RwLock<RelSizeCache>,
383 :
384 : download_all_remote_layers_task_info: RwLock<Option<DownloadRemoteLayersTaskInfo>>,
385 :
386 : state: watch::Sender<TimelineState>,
387 :
388 : /// Prevent two tasks from deleting the timeline at the same time. If held, the
389 : /// timeline is being deleted. If 'true', the timeline has already been deleted.
390 : pub delete_progress: Arc<tokio::sync::Mutex<DeleteTimelineFlow>>,
391 :
392 : eviction_task_timeline_state: tokio::sync::Mutex<EvictionTaskTimelineState>,
393 :
394 : /// Load or creation time information about the disk_consistent_lsn and when the loading
395 : /// happened. Used for consumption metrics.
396 : pub(crate) loaded_at: (Lsn, SystemTime),
397 :
398 : /// Gate to prevent shutdown completing while I/O is still happening to this timeline's data
399 : pub(crate) gate: Gate,
400 :
401 : /// Cancellation token scoped to this timeline: anything doing long-running work relating
402 : /// to the timeline should drop out when this token fires.
403 : pub(crate) cancel: CancellationToken,
404 :
405 : /// Make sure we only have one running compaction at a time in tests.
406 : ///
407 : /// Must only be taken in two places:
408 : /// - [`Timeline::compact`] (this file)
409 : /// - [`delete::delete_local_timeline_directory`]
410 : ///
411 : /// Timeline deletion will acquire both compaction and gc locks in whatever order.
412 : compaction_lock: tokio::sync::Mutex<()>,
413 :
414 : /// Make sure we only have one running gc at a time.
415 : ///
416 : /// Must only be taken in two places:
417 : /// - [`Timeline::gc`] (this file)
418 : /// - [`delete::delete_local_timeline_directory`]
419 : ///
420 : /// Timeline deletion will acquire both compaction and gc locks in whatever order.
421 : gc_lock: tokio::sync::Mutex<()>,
422 :
423 : /// Cloned from [`super::Tenant::timeline_get_throttle`] on construction.
424 : timeline_get_throttle: Arc<
425 : crate::tenant::throttle::Throttle<&'static crate::metrics::tenant_throttling::TimelineGet>,
426 : >,
427 :
428 : /// Keep aux directory cache to avoid it's reconstruction on each update
429 : pub(crate) aux_files: tokio::sync::Mutex<AuxFilesState>,
430 :
431 : /// Size estimator for aux file v2
432 : pub(crate) aux_file_size_estimator: AuxFileSizeEstimator,
433 :
434 : /// Indicate whether aux file v2 storage is enabled.
435 : pub(crate) last_aux_file_policy: AtomicAuxFilePolicy,
436 :
437 : /// Some test cases directly place keys into the timeline without actually modifying the directory
438 : /// keys (i.e., DB_DIR). The test cases creating such keys will put the keyspaces here, so that
439 : /// these keys won't get garbage-collected during compaction/GC. This field only modifies the dense
440 : /// keyspace return value of `collect_keyspace`. For sparse keyspaces, use AUX keys for testing, and
441 : /// in the future, add `extra_test_sparse_keyspace` if necessary.
442 : #[cfg(test)]
443 : pub(crate) extra_test_dense_keyspace: ArcSwap<KeySpace>,
444 :
445 : pub(crate) l0_flush_global_state: L0FlushGlobalState,
446 : }
447 :
448 : pub struct WalReceiverInfo {
449 : pub wal_source_connconf: PgConnectionConfig,
450 : pub last_received_msg_lsn: Lsn,
451 : pub last_received_msg_ts: u128,
452 : }
453 :
454 : /// Information about how much history needs to be retained, needed by
455 : /// Garbage Collection.
456 : #[derive(Default)]
457 : pub(crate) struct GcInfo {
458 : /// Specific LSNs that are needed.
459 : ///
460 : /// Currently, this includes all points where child branches have
461 : /// been forked off from. In the future, could also include
462 : /// explicit user-defined snapshot points.
463 : pub(crate) retain_lsns: Vec<Lsn>,
464 :
465 : /// The cutoff coordinates, which are combined by selecting the minimum.
466 : pub(crate) cutoffs: GcCutoffs,
467 :
468 : /// Leases granted to particular LSNs.
469 : pub(crate) leases: BTreeMap<Lsn, LsnLease>,
470 :
471 : /// Whether our branch point is within our ancestor's PITR interval (for cost estimation)
472 : pub(crate) within_ancestor_pitr: bool,
473 : }
474 :
475 : impl GcInfo {
476 224 : pub(crate) fn min_cutoff(&self) -> Lsn {
477 224 : self.cutoffs.select_min()
478 224 : }
479 : }
480 :
481 : /// The `GcInfo` component describing which Lsns need to be retained. Functionally, this
482 : /// is a single number (the oldest LSN which we must retain), but it internally distinguishes
483 : /// between time-based and space-based retention for observability and consumption metrics purposes.
484 : #[derive(Debug)]
485 : pub(crate) struct GcCutoffs {
486 : /// Calculated from the [`TenantConf::gc_horizon`], this LSN indicates how much
487 : /// history we must keep to retain a specified number of bytes of WAL.
488 : pub(crate) space: Lsn,
489 :
490 : /// Calculated from [`TenantConf::pitr_interval`], this LSN indicates how much
491 : /// history we must keep to enable reading back at least the PITR interval duration.
492 : pub(crate) time: Lsn,
493 : }
494 :
495 : impl Default for GcCutoffs {
496 390 : fn default() -> Self {
497 390 : Self {
498 390 : space: Lsn::INVALID,
499 390 : time: Lsn::INVALID,
500 390 : }
501 390 : }
502 : }
503 :
504 : impl GcCutoffs {
505 228 : fn select_min(&self) -> Lsn {
506 228 : std::cmp::min(self.space, self.time)
507 228 : }
508 : }
509 :
510 : pub(crate) struct TimelineVisitOutcome {
511 : completed_keyspace: KeySpace,
512 : image_covered_keyspace: KeySpace,
513 : }
514 :
515 : /// An error happened in a get() operation.
516 2 : #[derive(thiserror::Error, Debug)]
517 : pub(crate) enum PageReconstructError {
518 : #[error(transparent)]
519 : Other(#[from] anyhow::Error),
520 :
521 : #[error("Ancestor LSN wait error: {0}")]
522 : AncestorLsnTimeout(WaitLsnError),
523 :
524 : #[error("timeline shutting down")]
525 : Cancelled,
526 :
527 : /// An error happened replaying WAL records
528 : #[error(transparent)]
529 : WalRedo(anyhow::Error),
530 :
531 : #[error("{0}")]
532 : MissingKey(MissingKeyError),
533 : }
534 :
535 : impl GetVectoredError {
536 : #[cfg(test)]
537 6 : pub(crate) fn is_missing_key_error(&self) -> bool {
538 6 : matches!(self, Self::MissingKey(_))
539 6 : }
540 : }
541 :
542 : #[derive(Debug)]
543 : pub struct MissingKeyError {
544 : key: Key,
545 : shard: ShardNumber,
546 : cont_lsn: Lsn,
547 : request_lsn: Lsn,
548 : ancestor_lsn: Option<Lsn>,
549 : traversal_path: Vec<TraversalPathItem>,
550 : backtrace: Option<std::backtrace::Backtrace>,
551 : }
552 :
553 : impl std::fmt::Display for MissingKeyError {
554 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
555 0 : write!(
556 0 : f,
557 0 : "could not find data for key {} (shard {:?}) at LSN {}, request LSN {}",
558 0 : self.key, self.shard, self.cont_lsn, self.request_lsn
559 0 : )?;
560 0 : if let Some(ref ancestor_lsn) = self.ancestor_lsn {
561 0 : write!(f, ", ancestor {}", ancestor_lsn)?;
562 0 : }
563 :
564 0 : if !self.traversal_path.is_empty() {
565 0 : writeln!(f)?;
566 0 : }
567 :
568 0 : for (r, c, l) in &self.traversal_path {
569 0 : writeln!(
570 0 : f,
571 0 : "layer traversal: result {:?}, cont_lsn {}, layer: {}",
572 0 : r, c, l,
573 0 : )?;
574 : }
575 :
576 0 : if let Some(ref backtrace) = self.backtrace {
577 0 : write!(f, "\n{}", backtrace)?;
578 0 : }
579 :
580 0 : Ok(())
581 0 : }
582 : }
583 :
584 : impl PageReconstructError {
585 : /// Returns true if this error indicates a tenant/timeline shutdown alike situation
586 0 : pub(crate) fn is_stopping(&self) -> bool {
587 0 : use PageReconstructError::*;
588 0 : match self {
589 0 : Other(_) => false,
590 0 : AncestorLsnTimeout(_) => false,
591 0 : Cancelled => true,
592 0 : WalRedo(_) => false,
593 0 : MissingKey { .. } => false,
594 : }
595 0 : }
596 : }
597 :
598 0 : #[derive(thiserror::Error, Debug)]
599 : pub(crate) enum CreateImageLayersError {
600 : #[error("timeline shutting down")]
601 : Cancelled,
602 :
603 : #[error(transparent)]
604 : GetVectoredError(GetVectoredError),
605 :
606 : #[error(transparent)]
607 : PageReconstructError(PageReconstructError),
608 :
609 : #[error(transparent)]
610 : Other(#[from] anyhow::Error),
611 : }
612 :
613 0 : #[derive(thiserror::Error, Debug, Clone)]
614 : pub(crate) enum FlushLayerError {
615 : /// Timeline cancellation token was cancelled
616 : #[error("timeline shutting down")]
617 : Cancelled,
618 :
619 : /// We tried to flush a layer while the Timeline is in an unexpected state
620 : #[error("cannot flush frozen layers when flush_loop is not running, state is {0:?}")]
621 : NotRunning(FlushLoopState),
622 :
623 : // Arc<> the following non-clonable error types: we must be Clone-able because the flush error is propagated from the flush
624 : // loop via a watch channel, where we can only borrow it.
625 : #[error(transparent)]
626 : CreateImageLayersError(Arc<CreateImageLayersError>),
627 :
628 : #[error(transparent)]
629 : Other(#[from] Arc<anyhow::Error>),
630 : }
631 :
632 : impl FlushLayerError {
633 : // When crossing from generic anyhow errors to this error type, we explicitly check
634 : // for timeline cancellation to avoid logging inoffensive shutdown errors as warn/err.
635 0 : fn from_anyhow(timeline: &Timeline, err: anyhow::Error) -> Self {
636 0 : if timeline.cancel.is_cancelled() {
637 0 : Self::Cancelled
638 : } else {
639 0 : Self::Other(Arc::new(err))
640 : }
641 0 : }
642 : }
643 :
644 0 : #[derive(thiserror::Error, Debug)]
645 : pub(crate) enum GetVectoredError {
646 : #[error("timeline shutting down")]
647 : Cancelled,
648 :
649 : #[error("Requested too many keys: {0} > {}", Timeline::MAX_GET_VECTORED_KEYS)]
650 : Oversized(u64),
651 :
652 : #[error("Requested at invalid LSN: {0}")]
653 : InvalidLsn(Lsn),
654 :
655 : #[error("Requested key not found: {0}")]
656 : MissingKey(MissingKeyError),
657 :
658 : #[error(transparent)]
659 : GetReadyAncestorError(GetReadyAncestorError),
660 :
661 : #[error(transparent)]
662 : Other(#[from] anyhow::Error),
663 : }
664 :
665 2 : #[derive(thiserror::Error, Debug)]
666 : pub(crate) enum GetReadyAncestorError {
667 : #[error("Ancestor LSN wait error: {0}")]
668 : AncestorLsnTimeout(#[from] WaitLsnError),
669 :
670 : #[error("Bad state on timeline {timeline_id}: {state:?}")]
671 : BadState {
672 : timeline_id: TimelineId,
673 : state: TimelineState,
674 : },
675 :
676 : #[error("Cancelled")]
677 : Cancelled,
678 : }
679 :
680 : #[derive(Clone, Copy)]
681 : pub enum LogicalSizeCalculationCause {
682 : Initial,
683 : ConsumptionMetricsSyntheticSize,
684 : EvictionTaskImitation,
685 : TenantSizeHandler,
686 : }
687 :
688 : pub enum GetLogicalSizePriority {
689 : User,
690 : Background,
691 : }
692 :
693 0 : #[derive(enumset::EnumSetType)]
694 : pub(crate) enum CompactFlags {
695 : ForceRepartition,
696 : ForceImageLayerCreation,
697 : EnhancedGcBottomMostCompaction,
698 : }
699 :
700 : impl std::fmt::Debug for Timeline {
701 0 : fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
702 0 : write!(f, "Timeline<{}>", self.timeline_id)
703 0 : }
704 : }
705 :
706 0 : #[derive(thiserror::Error, Debug)]
707 : pub(crate) enum WaitLsnError {
708 : // Called on a timeline which is shutting down
709 : #[error("Shutdown")]
710 : Shutdown,
711 :
712 : // Called on an timeline not in active state or shutting down
713 : #[error("Bad timeline state: {0:?}")]
714 : BadState(TimelineState),
715 :
716 : // Timeout expired while waiting for LSN to catch up with goal.
717 : #[error("{0}")]
718 : Timeout(String),
719 : }
720 :
721 : // The impls below achieve cancellation mapping for errors.
722 : // Perhaps there's a way of achieving this with less cruft.
723 :
724 : impl From<CreateImageLayersError> for CompactionError {
725 0 : fn from(e: CreateImageLayersError) -> Self {
726 0 : match e {
727 0 : CreateImageLayersError::Cancelled => CompactionError::ShuttingDown,
728 0 : CreateImageLayersError::Other(e) => {
729 0 : CompactionError::Other(e.context("create image layers"))
730 : }
731 0 : _ => CompactionError::Other(e.into()),
732 : }
733 0 : }
734 : }
735 :
736 : impl From<CreateImageLayersError> for FlushLayerError {
737 0 : fn from(e: CreateImageLayersError) -> Self {
738 0 : match e {
739 0 : CreateImageLayersError::Cancelled => FlushLayerError::Cancelled,
740 0 : any => FlushLayerError::CreateImageLayersError(Arc::new(any)),
741 : }
742 0 : }
743 : }
744 :
745 : impl From<PageReconstructError> for CreateImageLayersError {
746 0 : fn from(e: PageReconstructError) -> Self {
747 0 : match e {
748 0 : PageReconstructError::Cancelled => CreateImageLayersError::Cancelled,
749 0 : _ => CreateImageLayersError::PageReconstructError(e),
750 : }
751 0 : }
752 : }
753 :
754 : impl From<GetVectoredError> for CreateImageLayersError {
755 0 : fn from(e: GetVectoredError) -> Self {
756 0 : match e {
757 0 : GetVectoredError::Cancelled => CreateImageLayersError::Cancelled,
758 0 : _ => CreateImageLayersError::GetVectoredError(e),
759 : }
760 0 : }
761 : }
762 :
763 : impl From<GetVectoredError> for PageReconstructError {
764 0 : fn from(e: GetVectoredError) -> Self {
765 0 : match e {
766 0 : GetVectoredError::Cancelled => PageReconstructError::Cancelled,
767 0 : GetVectoredError::InvalidLsn(_) => PageReconstructError::Other(anyhow!("Invalid LSN")),
768 0 : err @ GetVectoredError::Oversized(_) => PageReconstructError::Other(err.into()),
769 0 : GetVectoredError::MissingKey(err) => PageReconstructError::MissingKey(err),
770 0 : GetVectoredError::GetReadyAncestorError(err) => PageReconstructError::from(err),
771 0 : GetVectoredError::Other(err) => PageReconstructError::Other(err),
772 : }
773 0 : }
774 : }
775 :
776 : impl From<GetReadyAncestorError> for PageReconstructError {
777 2 : fn from(e: GetReadyAncestorError) -> Self {
778 2 : use GetReadyAncestorError::*;
779 2 : match e {
780 0 : AncestorLsnTimeout(wait_err) => PageReconstructError::AncestorLsnTimeout(wait_err),
781 2 : bad_state @ BadState { .. } => PageReconstructError::Other(anyhow::anyhow!(bad_state)),
782 0 : Cancelled => PageReconstructError::Cancelled,
783 : }
784 2 : }
785 : }
786 :
787 : #[derive(
788 : Eq,
789 : PartialEq,
790 : Debug,
791 : Copy,
792 : Clone,
793 196 : strum_macros::EnumString,
794 0 : strum_macros::Display,
795 0 : serde_with::DeserializeFromStr,
796 : serde_with::SerializeDisplay,
797 : )]
798 : #[strum(serialize_all = "kebab-case")]
799 : pub enum GetVectoredImpl {
800 : Sequential,
801 : Vectored,
802 : }
803 :
804 : #[derive(
805 : Eq,
806 : PartialEq,
807 : Debug,
808 : Copy,
809 : Clone,
810 196 : strum_macros::EnumString,
811 0 : strum_macros::Display,
812 0 : serde_with::DeserializeFromStr,
813 : serde_with::SerializeDisplay,
814 : )]
815 : #[strum(serialize_all = "kebab-case")]
816 : pub enum GetImpl {
817 : Legacy,
818 : Vectored,
819 : }
820 :
821 : pub(crate) enum WaitLsnWaiter<'a> {
822 : Timeline(&'a Timeline),
823 : Tenant,
824 : PageService,
825 : }
826 :
827 : /// Argument to [`Timeline::shutdown`].
828 : #[derive(Debug, Clone, Copy)]
829 : pub(crate) enum ShutdownMode {
830 : /// Graceful shutdown, may do a lot of I/O as we flush any open layers to disk and then
831 : /// also to remote storage. This method can easily take multiple seconds for a busy timeline.
832 : ///
833 : /// While we are flushing, we continue to accept read I/O for LSNs ingested before
834 : /// the call to [`Timeline::shutdown`].
835 : FreezeAndFlush,
836 : /// Shut down immediately, without waiting for any open layers to flush.
837 : Hard,
838 : }
839 :
840 : struct ImageLayerCreationOutcome {
841 : image: Option<ResidentLayer>,
842 : next_start_key: Key,
843 : }
844 :
845 : /// Public interface functions
846 : impl Timeline {
847 : /// Get the LSN where this branch was created
848 8 : pub(crate) fn get_ancestor_lsn(&self) -> Lsn {
849 8 : self.ancestor_lsn
850 8 : }
851 :
852 : /// Get the ancestor's timeline id
853 4064 : pub(crate) fn get_ancestor_timeline_id(&self) -> Option<TimelineId> {
854 4064 : self.ancestor_timeline
855 4064 : .as_ref()
856 4064 : .map(|ancestor| ancestor.timeline_id)
857 4064 : }
858 :
859 : /// Get the bytes written since the PITR cutoff on this branch, and
860 : /// whether this branch's ancestor_lsn is within its parent's PITR.
861 0 : pub(crate) fn get_pitr_history_stats(&self) -> (u64, bool) {
862 0 : let gc_info = self.gc_info.read().unwrap();
863 0 : let history = self
864 0 : .get_last_record_lsn()
865 0 : .checked_sub(gc_info.cutoffs.time)
866 0 : .unwrap_or(Lsn(0))
867 0 : .0;
868 0 : (history, gc_info.within_ancestor_pitr)
869 0 : }
870 :
871 : /// Lock and get timeline's GC cutoff
872 993 : pub(crate) fn get_latest_gc_cutoff_lsn(&self) -> RcuReadGuard<Lsn> {
873 993 : self.latest_gc_cutoff_lsn.read()
874 993 : }
875 :
876 : /// Look up given page version.
877 : ///
878 : /// If a remote layer file is needed, it is downloaded as part of this
879 : /// call.
880 : ///
881 : /// This method enforces [`Self::timeline_get_throttle`] internally.
882 : ///
883 : /// NOTE: It is considered an error to 'get' a key that doesn't exist. The
884 : /// abstraction above this needs to store suitable metadata to track what
885 : /// data exists with what keys, in separate metadata entries. If a
886 : /// non-existent key is requested, we may incorrectly return a value from
887 : /// an ancestor branch, for example, or waste a lot of cycles chasing the
888 : /// non-existing key.
889 : ///
890 : /// # Cancel-Safety
891 : ///
892 : /// This method is cancellation-safe.
893 : #[inline(always)]
894 624352 : pub(crate) async fn get(
895 624352 : &self,
896 624352 : key: Key,
897 624352 : lsn: Lsn,
898 624352 : ctx: &RequestContext,
899 624352 : ) -> Result<Bytes, PageReconstructError> {
900 624352 : if !lsn.is_valid() {
901 0 : return Err(PageReconstructError::Other(anyhow::anyhow!("Invalid LSN")));
902 624352 : }
903 624352 :
904 624352 : // This check is debug-only because of the cost of hashing, and because it's a double-check: we
905 624352 : // already checked the key against the shard_identity when looking up the Timeline from
906 624352 : // page_service.
907 624352 : debug_assert!(!self.shard_identity.is_key_disposable(&key));
908 :
909 624352 : self.timeline_get_throttle.throttle(ctx, 1).await;
910 :
911 624352 : match self.conf.get_impl {
912 : GetImpl::Legacy => {
913 624352 : let reconstruct_state = ValueReconstructState {
914 624352 : records: Vec::new(),
915 624352 : img: None,
916 624352 : };
917 624352 :
918 624352 : self.get_impl(key, lsn, reconstruct_state, ctx).await
919 : }
920 : GetImpl::Vectored => {
921 0 : let keyspace = KeySpace {
922 0 : ranges: vec![key..key.next()],
923 0 : };
924 0 :
925 0 : // Initialise the reconstruct state for the key with the cache
926 0 : // entry returned above.
927 0 : let mut reconstruct_state = ValuesReconstructState::new();
928 :
929 0 : let vectored_res = self
930 0 : .get_vectored_impl(keyspace.clone(), lsn, &mut reconstruct_state, ctx)
931 0 : .await;
932 :
933 0 : if self.conf.validate_vectored_get {
934 0 : self.validate_get_vectored_impl(&vectored_res, keyspace, lsn, ctx)
935 0 : .await;
936 0 : }
937 :
938 0 : let key_value = vectored_res?.pop_first();
939 0 : match key_value {
940 0 : Some((got_key, value)) => {
941 0 : if got_key != key {
942 0 : error!(
943 0 : "Expected {}, but singular vectored get returned {}",
944 : key, got_key
945 : );
946 0 : Err(PageReconstructError::Other(anyhow!(
947 0 : "Singular vectored get returned wrong key"
948 0 : )))
949 : } else {
950 0 : value
951 : }
952 : }
953 0 : None => Err(PageReconstructError::MissingKey(MissingKeyError {
954 0 : key,
955 0 : shard: self.shard_identity.get_shard_number(&key),
956 0 : cont_lsn: Lsn(0),
957 0 : request_lsn: lsn,
958 0 : ancestor_lsn: None,
959 0 : traversal_path: Vec::new(),
960 0 : backtrace: None,
961 0 : })),
962 : }
963 : }
964 : }
965 624352 : }
966 :
967 : /// Not subject to [`Self::timeline_get_throttle`].
968 626000 : async fn get_impl(
969 626000 : &self,
970 626000 : key: Key,
971 626000 : lsn: Lsn,
972 626000 : mut reconstruct_state: ValueReconstructState,
973 626000 : ctx: &RequestContext,
974 626000 : ) -> Result<Bytes, PageReconstructError> {
975 626000 : // XXX: structured stats collection for layer eviction here.
976 626000 : trace!(
977 0 : "get page request for {}@{} from task kind {:?}",
978 0 : key,
979 0 : lsn,
980 0 : ctx.task_kind()
981 : );
982 :
983 626000 : let timer = crate::metrics::GET_RECONSTRUCT_DATA_TIME
984 626000 : .for_get_kind(GetKind::Singular)
985 626000 : .start_timer();
986 626000 : let path = self
987 626000 : .get_reconstruct_data(key, lsn, &mut reconstruct_state, ctx)
988 45464 : .await?;
989 625870 : timer.stop_and_record();
990 625870 :
991 625870 : let start = Instant::now();
992 625870 : let res = self.reconstruct_value(key, lsn, reconstruct_state).await;
993 625870 : let elapsed = start.elapsed();
994 625870 : crate::metrics::RECONSTRUCT_TIME
995 625870 : .for_get_kind(GetKind::Singular)
996 625870 : .observe(elapsed.as_secs_f64());
997 625870 :
998 625870 : if cfg!(feature = "testing") && res.is_err() {
999 : // it can only be walredo issue
1000 : use std::fmt::Write;
1001 :
1002 0 : let mut msg = String::new();
1003 0 :
1004 0 : path.into_iter().for_each(|(res, cont_lsn, layer)| {
1005 0 : writeln!(
1006 0 : msg,
1007 0 : "- layer traversal: result {res:?}, cont_lsn {cont_lsn}, layer: {}",
1008 0 : layer,
1009 0 : )
1010 0 : .expect("string grows")
1011 0 : });
1012 0 :
1013 0 : // this is to rule out or provide evidence that we could in some cases read a duplicate
1014 0 : // walrecord
1015 0 : tracing::info!("walredo failed, path:\n{msg}");
1016 625870 : }
1017 :
1018 625870 : res
1019 626000 : }
1020 :
1021 : pub(crate) const MAX_GET_VECTORED_KEYS: u64 = 32;
1022 : pub(crate) const VEC_GET_LAYERS_VISITED_WARN_THRESH: f64 = 512.0;
1023 :
1024 : /// Look up multiple page versions at a given LSN
1025 : ///
1026 : /// This naive implementation will be replaced with a more efficient one
1027 : /// which actually vectorizes the read path.
1028 1000 : pub(crate) async fn get_vectored(
1029 1000 : &self,
1030 1000 : keyspace: KeySpace,
1031 1000 : lsn: Lsn,
1032 1000 : ctx: &RequestContext,
1033 1000 : ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
1034 1000 : if !lsn.is_valid() {
1035 0 : return Err(GetVectoredError::InvalidLsn(lsn));
1036 1000 : }
1037 1000 :
1038 1000 : let key_count = keyspace.total_raw_size().try_into().unwrap();
1039 1000 : if key_count > Timeline::MAX_GET_VECTORED_KEYS {
1040 0 : return Err(GetVectoredError::Oversized(key_count));
1041 1000 : }
1042 :
1043 2000 : for range in &keyspace.ranges {
1044 1000 : let mut key = range.start;
1045 2326 : while key != range.end {
1046 1326 : assert!(!self.shard_identity.is_key_disposable(&key));
1047 1326 : key = key.next();
1048 : }
1049 : }
1050 :
1051 1000 : trace!(
1052 0 : "get vectored request for {:?}@{} from task kind {:?} will use {} implementation",
1053 0 : keyspace,
1054 0 : lsn,
1055 0 : ctx.task_kind(),
1056 : self.conf.get_vectored_impl
1057 : );
1058 :
1059 1000 : let start = crate::metrics::GET_VECTORED_LATENCY
1060 1000 : .for_task_kind(ctx.task_kind())
1061 1000 : .map(|metric| (metric, Instant::now()));
1062 :
1063 : // start counting after throttle so that throttle time
1064 : // is always less than observation time
1065 1000 : let throttled = self
1066 1000 : .timeline_get_throttle
1067 1000 : .throttle(ctx, key_count as usize)
1068 0 : .await;
1069 :
1070 1000 : let res = match self.conf.get_vectored_impl {
1071 : GetVectoredImpl::Sequential => {
1072 1000 : self.get_vectored_sequential_impl(keyspace, lsn, ctx).await
1073 : }
1074 : GetVectoredImpl::Vectored => {
1075 0 : let vectored_res = self
1076 0 : .get_vectored_impl(
1077 0 : keyspace.clone(),
1078 0 : lsn,
1079 0 : &mut ValuesReconstructState::new(),
1080 0 : ctx,
1081 0 : )
1082 0 : .await;
1083 :
1084 0 : if self.conf.validate_vectored_get {
1085 0 : self.validate_get_vectored_impl(&vectored_res, keyspace, lsn, ctx)
1086 0 : .await;
1087 0 : }
1088 :
1089 0 : vectored_res
1090 : }
1091 : };
1092 :
1093 1000 : if let Some((metric, start)) = start {
1094 0 : let elapsed = start.elapsed();
1095 0 : let ex_throttled = if let Some(throttled) = throttled {
1096 0 : elapsed.checked_sub(throttled)
1097 : } else {
1098 0 : Some(elapsed)
1099 : };
1100 :
1101 0 : if let Some(ex_throttled) = ex_throttled {
1102 0 : metric.observe(ex_throttled.as_secs_f64());
1103 0 : } else {
1104 0 : use utils::rate_limit::RateLimit;
1105 0 : static LOGGED: Lazy<Mutex<RateLimit>> =
1106 0 : Lazy::new(|| Mutex::new(RateLimit::new(Duration::from_secs(10))));
1107 0 : let mut rate_limit = LOGGED.lock().unwrap();
1108 0 : rate_limit.call(|| {
1109 0 : warn!("error deducting time spent throttled; this message is logged at a global rate limit");
1110 0 : });
1111 0 : }
1112 1000 : }
1113 :
1114 1000 : res
1115 1000 : }
1116 :
1117 : /// Scan the keyspace and return all existing key-values in the keyspace. This currently uses vectored
1118 : /// get underlying. Normal vectored get would throw an error when a key in the keyspace is not found
1119 : /// during the search, but for the scan interface, it returns all existing key-value pairs, and does
1120 : /// not expect each single key in the key space will be found. The semantics is closer to the RocksDB
1121 : /// scan iterator interface. We could optimize this interface later to avoid some checks in the vectored
1122 : /// get path to maintain and split the probing and to-be-probe keyspace. We also need to ensure that
1123 : /// the scan operation will not cause OOM in the future.
1124 12 : pub(crate) async fn scan(
1125 12 : &self,
1126 12 : keyspace: KeySpace,
1127 12 : lsn: Lsn,
1128 12 : ctx: &RequestContext,
1129 12 : ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
1130 12 : if !lsn.is_valid() {
1131 0 : return Err(GetVectoredError::InvalidLsn(lsn));
1132 12 : }
1133 12 :
1134 12 : trace!(
1135 0 : "key-value scan request for {:?}@{} from task kind {:?}",
1136 0 : keyspace,
1137 0 : lsn,
1138 0 : ctx.task_kind()
1139 : );
1140 :
1141 : // We should generalize this into Keyspace::contains in the future.
1142 24 : for range in &keyspace.ranges {
1143 12 : if range.start.field1 < METADATA_KEY_BEGIN_PREFIX
1144 12 : || range.end.field1 > METADATA_KEY_END_PREFIX
1145 : {
1146 0 : return Err(GetVectoredError::Other(anyhow::anyhow!(
1147 0 : "only metadata keyspace can be scanned"
1148 0 : )));
1149 12 : }
1150 : }
1151 :
1152 12 : let start = crate::metrics::SCAN_LATENCY
1153 12 : .for_task_kind(ctx.task_kind())
1154 12 : .map(ScanLatencyOngoingRecording::start_recording);
1155 :
1156 : // start counting after throttle so that throttle time
1157 : // is always less than observation time
1158 12 : let throttled = self
1159 12 : .timeline_get_throttle
1160 12 : // assume scan = 1 quota for now until we find a better way to process this
1161 12 : .throttle(ctx, 1)
1162 0 : .await;
1163 :
1164 12 : let vectored_res = self
1165 12 : .get_vectored_impl(
1166 12 : keyspace.clone(),
1167 12 : lsn,
1168 12 : &mut ValuesReconstructState::default(),
1169 12 : ctx,
1170 12 : )
1171 0 : .await;
1172 :
1173 12 : if let Some(recording) = start {
1174 0 : recording.observe(throttled);
1175 12 : }
1176 :
1177 12 : vectored_res
1178 12 : }
1179 :
1180 : /// Not subject to [`Self::timeline_get_throttle`].
1181 1012 : pub(super) async fn get_vectored_sequential_impl(
1182 1012 : &self,
1183 1012 : keyspace: KeySpace,
1184 1012 : lsn: Lsn,
1185 1012 : ctx: &RequestContext,
1186 1012 : ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
1187 1012 : let mut values = BTreeMap::new();
1188 :
1189 2024 : for range in keyspace.ranges {
1190 1012 : let mut key = range.start;
1191 2660 : while key != range.end {
1192 1648 : let block = self
1193 1648 : .get_impl(key, lsn, ValueReconstructState::default(), ctx)
1194 29 : .await;
1195 :
1196 : use PageReconstructError::*;
1197 0 : match block {
1198 0 : Err(Cancelled) => return Err(GetVectoredError::Cancelled),
1199 : Err(MissingKey(_))
1200 2 : if NON_INHERITED_RANGE.contains(&key)
1201 2 : || NON_INHERITED_SPARSE_RANGE.contains(&key) =>
1202 2 : {
1203 2 : // Ignore missing key error for aux key range. TODO: currently, we assume non_inherited_range == aux_key_range.
1204 2 : // When we add more types of keys into the page server, we should revisit this part of code and throw errors
1205 2 : // accordingly.
1206 2 : key = key.next();
1207 2 : }
1208 0 : Err(MissingKey(err)) => {
1209 0 : return Err(GetVectoredError::MissingKey(err));
1210 : }
1211 0 : Err(Other(err))
1212 0 : if err
1213 0 : .to_string()
1214 0 : .contains("downloading evicted layer file failed") =>
1215 0 : {
1216 0 : return Err(GetVectoredError::Other(err))
1217 : }
1218 0 : Err(Other(err))
1219 0 : if err
1220 0 : .chain()
1221 0 : .any(|cause| cause.to_string().contains("layer loading failed")) =>
1222 0 : {
1223 0 : // The intent here is to achieve error parity with the vectored read path.
1224 0 : // When vectored read fails to load a layer it fails the whole read, hence
1225 0 : // we mimic this behaviour here to keep the validation happy.
1226 0 : return Err(GetVectoredError::Other(err));
1227 : }
1228 1646 : _ => {
1229 1646 : values.insert(key, block);
1230 1646 : key = key.next();
1231 1646 : }
1232 : }
1233 : }
1234 : }
1235 :
1236 1012 : Ok(values)
1237 1012 : }
1238 :
1239 138 : pub(super) async fn get_vectored_impl(
1240 138 : &self,
1241 138 : keyspace: KeySpace,
1242 138 : lsn: Lsn,
1243 138 : reconstruct_state: &mut ValuesReconstructState,
1244 138 : ctx: &RequestContext,
1245 138 : ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
1246 138 : let get_kind = if keyspace.total_raw_size() == 1 {
1247 68 : GetKind::Singular
1248 : } else {
1249 70 : GetKind::Vectored
1250 : };
1251 :
1252 138 : let get_data_timer = crate::metrics::GET_RECONSTRUCT_DATA_TIME
1253 138 : .for_get_kind(get_kind)
1254 138 : .start_timer();
1255 138 : self.get_vectored_reconstruct_data(keyspace.clone(), lsn, reconstruct_state, ctx)
1256 11428 : .await?;
1257 128 : get_data_timer.stop_and_record();
1258 128 :
1259 128 : let reconstruct_timer = crate::metrics::RECONSTRUCT_TIME
1260 128 : .for_get_kind(get_kind)
1261 128 : .start_timer();
1262 128 : let mut results: BTreeMap<Key, Result<Bytes, PageReconstructError>> = BTreeMap::new();
1263 128 : let layers_visited = reconstruct_state.get_layers_visited();
1264 :
1265 40436 : for (key, res) in std::mem::take(&mut reconstruct_state.keys) {
1266 40436 : match res {
1267 0 : Err(err) => {
1268 0 : results.insert(key, Err(err));
1269 0 : }
1270 40436 : Ok(state) => {
1271 40436 : let state = ValueReconstructState::from(state);
1272 :
1273 40436 : let reconstruct_res = self.reconstruct_value(key, lsn, state).await;
1274 40436 : results.insert(key, reconstruct_res);
1275 : }
1276 : }
1277 : }
1278 128 : reconstruct_timer.stop_and_record();
1279 128 :
1280 128 : // For aux file keys (v1 or v2) the vectored read path does not return an error
1281 128 : // when they're missing. Instead they are omitted from the resulting btree
1282 128 : // (this is a requirement, not a bug). Skip updating the metric in these cases
1283 128 : // to avoid infinite results.
1284 128 : if !results.is_empty() {
1285 110 : let avg = layers_visited as f64 / results.len() as f64;
1286 110 : if avg >= Self::VEC_GET_LAYERS_VISITED_WARN_THRESH {
1287 0 : use utils::rate_limit::RateLimit;
1288 0 : static LOGGED: Lazy<Mutex<RateLimit>> =
1289 0 : Lazy::new(|| Mutex::new(RateLimit::new(Duration::from_secs(60))));
1290 0 : let mut rate_limit = LOGGED.lock().unwrap();
1291 0 : rate_limit.call(|| {
1292 0 : tracing::info!(
1293 0 : shard_id = %self.tenant_shard_id.shard_slug(),
1294 0 : lsn = %lsn,
1295 0 : "Vectored read for {} visited {} layers on average per key and {} in total. {}/{} pages were returned",
1296 0 : keyspace, avg, layers_visited, results.len(), keyspace.total_raw_size());
1297 0 : });
1298 110 : }
1299 :
1300 : // Note that this is an approximation. Tracking the exact number of layers visited
1301 : // per key requires virtually unbounded memory usage and is inefficient
1302 : // (i.e. segment tree tracking each range queried from a layer)
1303 110 : crate::metrics::VEC_READ_NUM_LAYERS_VISITED.observe(avg);
1304 18 : }
1305 :
1306 128 : Ok(results)
1307 138 : }
1308 :
1309 : /// Not subject to [`Self::timeline_get_throttle`].
1310 12 : pub(super) async fn validate_get_vectored_impl(
1311 12 : &self,
1312 12 : vectored_res: &Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError>,
1313 12 : keyspace: KeySpace,
1314 12 : lsn: Lsn,
1315 12 : ctx: &RequestContext,
1316 12 : ) {
1317 12 : if keyspace.overlaps(&Key::metadata_key_range()) {
1318 : // skip validation for metadata key range
1319 0 : return;
1320 12 : }
1321 :
1322 12 : let sequential_res = self
1323 12 : .get_vectored_sequential_impl(keyspace.clone(), lsn, ctx)
1324 19 : .await;
1325 :
1326 0 : fn errors_match(lhs: &GetVectoredError, rhs: &GetVectoredError) -> bool {
1327 0 : use GetVectoredError::*;
1328 0 : match (lhs, rhs) {
1329 0 : (Oversized(l), Oversized(r)) => l == r,
1330 0 : (InvalidLsn(l), InvalidLsn(r)) => l == r,
1331 0 : (MissingKey(l), MissingKey(r)) => l.key == r.key,
1332 0 : (GetReadyAncestorError(_), GetReadyAncestorError(_)) => true,
1333 0 : (Other(_), Other(_)) => true,
1334 0 : _ => false,
1335 : }
1336 0 : }
1337 :
1338 12 : match (&sequential_res, vectored_res) {
1339 0 : (Err(GetVectoredError::Cancelled), _) => {},
1340 0 : (_, Err(GetVectoredError::Cancelled)) => {},
1341 0 : (Err(seq_err), Ok(_)) => {
1342 0 : panic!(concat!("Sequential get failed with {}, but vectored get did not",
1343 0 : " - keyspace={:?} lsn={}"),
1344 0 : seq_err, keyspace, lsn) },
1345 0 : (Ok(_), Err(GetVectoredError::GetReadyAncestorError(GetReadyAncestorError::AncestorLsnTimeout(_)))) => {
1346 0 : // Sequential get runs after vectored get, so it is possible for the later
1347 0 : // to time out while waiting for its ancestor's Lsn to become ready and for the
1348 0 : // former to succeed (it essentially has a doubled wait time).
1349 0 : },
1350 0 : (Ok(_), Err(vec_err)) => {
1351 0 : panic!(concat!("Vectored get failed with {}, but sequential get did not",
1352 0 : " - keyspace={:?} lsn={}"),
1353 0 : vec_err, keyspace, lsn) },
1354 0 : (Err(seq_err), Err(vec_err)) => {
1355 0 : assert!(errors_match(seq_err, vec_err),
1356 0 : "Mismatched errors: {seq_err} != {vec_err} - keyspace={keyspace:?} lsn={lsn}")},
1357 12 : (Ok(seq_values), Ok(vec_values)) => {
1358 320 : seq_values.iter().zip(vec_values.iter()).for_each(|((seq_key, seq_res), (vec_key, vec_res))| {
1359 320 : assert_eq!(seq_key, vec_key);
1360 320 : match (seq_res, vec_res) {
1361 320 : (Ok(seq_blob), Ok(vec_blob)) => {
1362 320 : Self::validate_key_equivalence(seq_key, &keyspace, lsn, seq_blob, vec_blob);
1363 320 : },
1364 0 : (Err(err), Ok(_)) => {
1365 0 : panic!(
1366 0 : concat!("Sequential get failed with {} for key {}, but vectored get did not",
1367 0 : " - keyspace={:?} lsn={}"),
1368 0 : err, seq_key, keyspace, lsn) },
1369 0 : (Ok(_), Err(err)) => {
1370 0 : panic!(
1371 0 : concat!("Vectored get failed with {} for key {}, but sequential get did not",
1372 0 : " - keyspace={:?} lsn={}"),
1373 0 : err, seq_key, keyspace, lsn) },
1374 0 : (Err(_), Err(_)) => {}
1375 : }
1376 320 : })
1377 : }
1378 : }
1379 12 : }
1380 :
1381 320 : fn validate_key_equivalence(
1382 320 : key: &Key,
1383 320 : keyspace: &KeySpace,
1384 320 : lsn: Lsn,
1385 320 : seq: &Bytes,
1386 320 : vec: &Bytes,
1387 320 : ) {
1388 320 : if *key == AUX_FILES_KEY {
1389 : // The value reconstruct of AUX_FILES_KEY from records is not deterministic
1390 : // since it uses a hash map under the hood. Hence, deserialise both results
1391 : // before comparing.
1392 0 : let seq_aux_dir_res = AuxFilesDirectory::des(seq);
1393 0 : let vec_aux_dir_res = AuxFilesDirectory::des(vec);
1394 0 : match (&seq_aux_dir_res, &vec_aux_dir_res) {
1395 0 : (Ok(seq_aux_dir), Ok(vec_aux_dir)) => {
1396 0 : assert_eq!(
1397 : seq_aux_dir, vec_aux_dir,
1398 0 : "Mismatch for key {} - keyspace={:?} lsn={}",
1399 : key, keyspace, lsn
1400 : );
1401 : }
1402 0 : (Err(_), Err(_)) => {}
1403 : _ => {
1404 0 : panic!("Mismatch for {key}: {seq_aux_dir_res:?} != {vec_aux_dir_res:?}");
1405 : }
1406 : }
1407 : } else {
1408 : // All other keys should reconstruct deterministically, so we simply compare the blobs.
1409 320 : assert_eq!(
1410 : seq, vec,
1411 0 : "Image mismatch for key {key} - keyspace={keyspace:?} lsn={lsn}"
1412 : );
1413 : }
1414 320 : }
1415 :
1416 : /// Get last or prev record separately. Same as get_last_record_rlsn().last/prev.
1417 276230 : pub(crate) fn get_last_record_lsn(&self) -> Lsn {
1418 276230 : self.last_record_lsn.load().last
1419 276230 : }
1420 :
1421 0 : pub(crate) fn get_prev_record_lsn(&self) -> Lsn {
1422 0 : self.last_record_lsn.load().prev
1423 0 : }
1424 :
1425 : /// Atomically get both last and prev.
1426 224 : pub(crate) fn get_last_record_rlsn(&self) -> RecordLsn {
1427 224 : self.last_record_lsn.load()
1428 224 : }
1429 :
1430 : /// Subscribe to callers of wait_lsn(). The value of the channel is None if there are no
1431 : /// wait_lsn() calls in progress, and Some(Lsn) if there is an active waiter for wait_lsn().
1432 0 : pub(crate) fn subscribe_for_wait_lsn_updates(&self) -> watch::Receiver<Option<Lsn>> {
1433 0 : self.last_record_lsn.status_receiver()
1434 0 : }
1435 :
1436 1135 : pub(crate) fn get_disk_consistent_lsn(&self) -> Lsn {
1437 1135 : self.disk_consistent_lsn.load()
1438 1135 : }
1439 :
1440 : /// remote_consistent_lsn from the perspective of the tenant's current generation,
1441 : /// not validated with control plane yet.
1442 : /// See [`Self::get_remote_consistent_lsn_visible`].
1443 0 : pub(crate) fn get_remote_consistent_lsn_projected(&self) -> Option<Lsn> {
1444 0 : self.remote_client.remote_consistent_lsn_projected()
1445 0 : }
1446 :
1447 : /// remote_consistent_lsn which the tenant is guaranteed not to go backward from,
1448 : /// i.e. a value of remote_consistent_lsn_projected which has undergone
1449 : /// generation validation in the deletion queue.
1450 0 : pub(crate) fn get_remote_consistent_lsn_visible(&self) -> Option<Lsn> {
1451 0 : self.remote_client.remote_consistent_lsn_visible()
1452 0 : }
1453 :
1454 : /// The sum of the file size of all historic layers in the layer map.
1455 : /// This method makes no distinction between local and remote layers.
1456 : /// Hence, the result **does not represent local filesystem usage**.
1457 0 : pub(crate) async fn layer_size_sum(&self) -> u64 {
1458 0 : let guard = self.layers.read().await;
1459 0 : let layer_map = guard.layer_map();
1460 0 : let mut size = 0;
1461 0 : for l in layer_map.iter_historic_layers() {
1462 0 : size += l.file_size;
1463 0 : }
1464 0 : size
1465 0 : }
1466 :
1467 0 : pub(crate) fn resident_physical_size(&self) -> u64 {
1468 0 : self.metrics.resident_physical_size_get()
1469 0 : }
1470 :
1471 0 : pub(crate) fn get_directory_metrics(&self) -> [u64; DirectoryKind::KINDS_NUM] {
1472 0 : array::from_fn(|idx| self.directory_metrics[idx].load(AtomicOrdering::Relaxed))
1473 0 : }
1474 :
1475 : ///
1476 : /// Wait until WAL has been received and processed up to this LSN.
1477 : ///
1478 : /// You should call this before any of the other get_* or list_* functions. Calling
1479 : /// those functions with an LSN that has been processed yet is an error.
1480 : ///
1481 226280 : pub(crate) async fn wait_lsn(
1482 226280 : &self,
1483 226280 : lsn: Lsn,
1484 226280 : who_is_waiting: WaitLsnWaiter<'_>,
1485 226280 : ctx: &RequestContext, /* Prepare for use by cancellation */
1486 226280 : ) -> Result<(), WaitLsnError> {
1487 226280 : let state = self.current_state();
1488 226280 : if self.cancel.is_cancelled() || matches!(state, TimelineState::Stopping) {
1489 0 : return Err(WaitLsnError::Shutdown);
1490 226280 : } else if !matches!(state, TimelineState::Active) {
1491 0 : return Err(WaitLsnError::BadState(state));
1492 226280 : }
1493 226280 :
1494 226280 : if cfg!(debug_assertions) {
1495 226280 : match ctx.task_kind() {
1496 : TaskKind::WalReceiverManager
1497 : | TaskKind::WalReceiverConnectionHandler
1498 : | TaskKind::WalReceiverConnectionPoller => {
1499 0 : let is_myself = match who_is_waiting {
1500 0 : WaitLsnWaiter::Timeline(waiter) => Weak::ptr_eq(&waiter.myself, &self.myself),
1501 0 : WaitLsnWaiter::Tenant | WaitLsnWaiter::PageService => unreachable!("tenant or page_service context are not expected to have task kind {:?}", ctx.task_kind()),
1502 : };
1503 0 : if is_myself {
1504 0 : if let Err(current) = self.last_record_lsn.would_wait_for(lsn) {
1505 : // walingest is the only one that can advance last_record_lsn; it should make sure to never reach here
1506 0 : panic!("this timeline's walingest task is calling wait_lsn({lsn}) but we only have last_record_lsn={current}; would deadlock");
1507 0 : }
1508 0 : } else {
1509 0 : // if another timeline's is waiting for us, there's no deadlock risk because
1510 0 : // our walreceiver task can make progress independent of theirs
1511 0 : }
1512 : }
1513 226280 : _ => {}
1514 : }
1515 0 : }
1516 :
1517 226280 : let _timer = crate::metrics::WAIT_LSN_TIME.start_timer();
1518 226280 :
1519 226280 : match self
1520 226280 : .last_record_lsn
1521 226280 : .wait_for_timeout(lsn, self.conf.wait_lsn_timeout)
1522 0 : .await
1523 : {
1524 226280 : Ok(()) => Ok(()),
1525 0 : Err(e) => {
1526 0 : use utils::seqwait::SeqWaitError::*;
1527 0 : match e {
1528 0 : Shutdown => Err(WaitLsnError::Shutdown),
1529 : Timeout => {
1530 : // don't count the time spent waiting for lock below, and also in walreceiver.status(), towards the wait_lsn_time_histo
1531 0 : drop(_timer);
1532 0 : let walreceiver_status = self.walreceiver_status();
1533 0 : Err(WaitLsnError::Timeout(format!(
1534 0 : "Timed out while waiting for WAL record at LSN {} to arrive, last_record_lsn {} disk consistent LSN={}, WalReceiver status: {}",
1535 0 : lsn,
1536 0 : self.get_last_record_lsn(),
1537 0 : self.get_disk_consistent_lsn(),
1538 0 : walreceiver_status,
1539 0 : )))
1540 : }
1541 : }
1542 : }
1543 : }
1544 226280 : }
1545 :
1546 0 : pub(crate) fn walreceiver_status(&self) -> String {
1547 0 : match &*self.walreceiver.lock().unwrap() {
1548 0 : None => "stopping or stopped".to_string(),
1549 0 : Some(walreceiver) => match walreceiver.status() {
1550 0 : Some(status) => status.to_human_readable_string(),
1551 0 : None => "Not active".to_string(),
1552 : },
1553 : }
1554 0 : }
1555 :
1556 : /// Check that it is valid to request operations with that lsn.
1557 228 : pub(crate) fn check_lsn_is_in_scope(
1558 228 : &self,
1559 228 : lsn: Lsn,
1560 228 : latest_gc_cutoff_lsn: &RcuReadGuard<Lsn>,
1561 228 : ) -> anyhow::Result<()> {
1562 228 : ensure!(
1563 228 : lsn >= **latest_gc_cutoff_lsn,
1564 4 : "LSN {} is earlier than latest GC cutoff {} (we might've already garbage collected needed data)",
1565 4 : lsn,
1566 4 : **latest_gc_cutoff_lsn,
1567 : );
1568 224 : Ok(())
1569 228 : }
1570 :
1571 : /// Obtains a temporary lease blocking garbage collection for the given LSN.
1572 : ///
1573 : /// This function will error if the requesting LSN is less than the `latest_gc_cutoff_lsn` and there is also
1574 : /// no existing lease to renew. If there is an existing lease in the map, the lease will be renewed only if
1575 : /// the request extends the lease. The returned lease is therefore the maximum between the existing lease and
1576 : /// the requesting lease.
1577 14 : pub(crate) fn make_lsn_lease(
1578 14 : &self,
1579 14 : lsn: Lsn,
1580 14 : length: Duration,
1581 14 : _ctx: &RequestContext,
1582 14 : ) -> anyhow::Result<LsnLease> {
1583 12 : let lease = {
1584 14 : let mut gc_info = self.gc_info.write().unwrap();
1585 14 :
1586 14 : let valid_until = SystemTime::now() + length;
1587 14 :
1588 14 : let entry = gc_info.leases.entry(lsn);
1589 :
1590 12 : let lease = {
1591 14 : if let Entry::Occupied(mut occupied) = entry {
1592 6 : let existing_lease = occupied.get_mut();
1593 6 : if valid_until > existing_lease.valid_until {
1594 2 : existing_lease.valid_until = valid_until;
1595 2 : let dt: DateTime<Utc> = valid_until.into();
1596 2 : info!("lease extended to {}", dt);
1597 : } else {
1598 4 : let dt: DateTime<Utc> = existing_lease.valid_until.into();
1599 4 : info!("existing lease covers greater length, valid until {}", dt);
1600 : }
1601 :
1602 6 : existing_lease.clone()
1603 : } else {
1604 : // Reject already GC-ed LSN (lsn < latest_gc_cutoff)
1605 8 : let latest_gc_cutoff_lsn = self.get_latest_gc_cutoff_lsn();
1606 8 : if lsn < *latest_gc_cutoff_lsn {
1607 2 : bail!("tried to request a page version that was garbage collected. requested at {} gc cutoff {}", lsn, *latest_gc_cutoff_lsn);
1608 6 : }
1609 6 :
1610 6 : let dt: DateTime<Utc> = valid_until.into();
1611 6 : info!("lease created, valid until {}", dt);
1612 6 : entry.or_insert(LsnLease { valid_until }).clone()
1613 : }
1614 : };
1615 :
1616 12 : lease
1617 12 : };
1618 12 :
1619 12 : Ok(lease)
1620 14 : }
1621 :
1622 : /// Flush to disk all data that was written with the put_* functions
1623 2136 : #[instrument(skip(self), fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id))]
1624 : pub(crate) async fn freeze_and_flush(&self) -> Result<(), FlushLayerError> {
1625 : self.freeze_and_flush0().await
1626 : }
1627 :
1628 : // This exists to provide a non-span creating version of `freeze_and_flush` we can call without
1629 : // polluting the span hierarchy.
1630 1068 : pub(crate) async fn freeze_and_flush0(&self) -> Result<(), FlushLayerError> {
1631 1068 : let to_lsn = {
1632 : // Freeze the current open in-memory layer. It will be written to disk on next
1633 : // iteration.
1634 1068 : let mut g = self.write_lock.lock().await;
1635 :
1636 1068 : let to_lsn = self.get_last_record_lsn();
1637 1068 : self.freeze_inmem_layer_at(to_lsn, &mut g).await;
1638 1068 : to_lsn
1639 1068 : };
1640 1068 : self.flush_frozen_layers_and_wait(to_lsn).await
1641 1068 : }
1642 :
1643 : // Check if an open ephemeral layer should be closed: this provides
1644 : // background enforcement of checkpoint interval if there is no active WAL receiver, to avoid keeping
1645 : // an ephemeral layer open forever when idle. It also freezes layers if the global limit on
1646 : // ephemeral layer bytes has been breached.
1647 0 : pub(super) async fn maybe_freeze_ephemeral_layer(&self) {
1648 0 : let Ok(mut write_guard) = self.write_lock.try_lock() else {
1649 : // If the write lock is held, there is an active wal receiver: rolling open layers
1650 : // is their responsibility while they hold this lock.
1651 0 : return;
1652 : };
1653 :
1654 0 : let Ok(layers_guard) = self.layers.try_read() else {
1655 : // Don't block if the layer lock is busy
1656 0 : return;
1657 : };
1658 :
1659 0 : let Some(open_layer) = &layers_guard.layer_map().open_layer else {
1660 : // If there is no open layer, we have no layer freezing to do. However, we might need to generate
1661 : // some updates to disk_consistent_lsn and remote_consistent_lsn, in case we ingested some WAL regions
1662 : // that didn't result in writes to this shard.
1663 :
1664 : // Must not hold the layers lock while waiting for a flush.
1665 0 : drop(layers_guard);
1666 0 :
1667 0 : let last_record_lsn = self.get_last_record_lsn();
1668 0 : let disk_consistent_lsn = self.get_disk_consistent_lsn();
1669 0 : if last_record_lsn > disk_consistent_lsn {
1670 : // We have no open layer, but disk_consistent_lsn is behind the last record: this indicates
1671 : // we are a sharded tenant and have skipped some WAL
1672 0 : let last_freeze_ts = *self.last_freeze_ts.read().unwrap();
1673 0 : if last_freeze_ts.elapsed() >= self.get_checkpoint_timeout() {
1674 : // Only do this if have been layer-less longer than get_checkpoint_timeout, so that a shard
1675 : // without any data ingested (yet) doesn't write a remote index as soon as it
1676 : // sees its LSN advance: we only do this if we've been layer-less
1677 : // for some time.
1678 0 : tracing::debug!(
1679 0 : "Advancing disk_consistent_lsn past WAL ingest gap {} -> {}",
1680 : disk_consistent_lsn,
1681 : last_record_lsn
1682 : );
1683 :
1684 : // The flush loop will update remote consistent LSN as well as disk consistent LSN.
1685 0 : self.flush_frozen_layers_and_wait(last_record_lsn)
1686 0 : .await
1687 0 : .ok();
1688 0 : }
1689 0 : }
1690 :
1691 0 : return;
1692 : };
1693 :
1694 0 : let Some(current_size) = open_layer.try_len() else {
1695 : // Unexpected: since we hold the write guard, nobody else should be writing to this layer, so
1696 : // read lock to get size should always succeed.
1697 0 : tracing::warn!("Lock conflict while reading size of open layer");
1698 0 : return;
1699 : };
1700 :
1701 0 : let current_lsn = self.get_last_record_lsn();
1702 :
1703 0 : let checkpoint_distance_override = open_layer.tick().await;
1704 :
1705 0 : if let Some(size_override) = checkpoint_distance_override {
1706 0 : if current_size > size_override {
1707 : // This is not harmful, but it only happens in relatively rare cases where
1708 : // time-based checkpoints are not happening fast enough to keep the amount of
1709 : // ephemeral data within configured limits. It's a sign of stress on the system.
1710 0 : tracing::info!("Early-rolling open layer at size {current_size} (limit {size_override}) due to dirty data pressure");
1711 0 : }
1712 0 : }
1713 :
1714 0 : let checkpoint_distance =
1715 0 : checkpoint_distance_override.unwrap_or(self.get_checkpoint_distance());
1716 0 :
1717 0 : if self.should_roll(
1718 0 : current_size,
1719 0 : current_size,
1720 0 : checkpoint_distance,
1721 0 : self.get_last_record_lsn(),
1722 0 : self.last_freeze_at.load(),
1723 0 : open_layer.get_opened_at(),
1724 0 : ) {
1725 0 : let at_lsn = match open_layer.info() {
1726 0 : InMemoryLayerInfo::Frozen { lsn_start, lsn_end } => {
1727 0 : // We may reach this point if the layer was already frozen by not yet flushed: flushing
1728 0 : // happens asynchronously in the background.
1729 0 : tracing::debug!(
1730 0 : "Not freezing open layer, it's already frozen ({lsn_start}..{lsn_end})"
1731 : );
1732 0 : None
1733 : }
1734 : InMemoryLayerInfo::Open { .. } => {
1735 : // Upgrade to a write lock and freeze the layer
1736 0 : drop(layers_guard);
1737 0 : let mut layers_guard = self.layers.write().await;
1738 0 : let froze = layers_guard
1739 0 : .try_freeze_in_memory_layer(
1740 0 : current_lsn,
1741 0 : &self.last_freeze_at,
1742 0 : &mut write_guard,
1743 0 : )
1744 0 : .await;
1745 0 : Some(current_lsn).filter(|_| froze)
1746 : }
1747 : };
1748 0 : if let Some(lsn) = at_lsn {
1749 0 : let res: Result<u64, _> = self.flush_frozen_layers(lsn);
1750 0 : if let Err(e) = res {
1751 0 : tracing::info!("failed to flush frozen layer after background freeze: {e:#}");
1752 0 : }
1753 0 : }
1754 0 : }
1755 0 : }
1756 :
1757 : /// Outermost timeline compaction operation; downloads needed layers.
1758 364 : pub(crate) async fn compact(
1759 364 : self: &Arc<Self>,
1760 364 : cancel: &CancellationToken,
1761 364 : flags: EnumSet<CompactFlags>,
1762 364 : ctx: &RequestContext,
1763 364 : ) -> Result<(), CompactionError> {
1764 364 : // most likely the cancellation token is from background task, but in tests it could be the
1765 364 : // request task as well.
1766 364 :
1767 364 : let prepare = async move {
1768 364 : let guard = self.compaction_lock.lock().await;
1769 :
1770 364 : let permit = super::tasks::concurrent_background_tasks_rate_limit_permit(
1771 364 : BackgroundLoopKind::Compaction,
1772 364 : ctx,
1773 364 : )
1774 0 : .await;
1775 :
1776 364 : (guard, permit)
1777 364 : };
1778 :
1779 : // this wait probably never needs any "long time spent" logging, because we already nag if
1780 : // compaction task goes over it's period (20s) which is quite often in production.
1781 364 : let (_guard, _permit) = tokio::select! {
1782 : tuple = prepare => { tuple },
1783 : _ = self.cancel.cancelled() => return Ok(()),
1784 : _ = cancel.cancelled() => return Ok(()),
1785 : };
1786 :
1787 364 : let last_record_lsn = self.get_last_record_lsn();
1788 364 :
1789 364 : // Last record Lsn could be zero in case the timeline was just created
1790 364 : if !last_record_lsn.is_valid() {
1791 0 : warn!("Skipping compaction for potentially just initialized timeline, it has invalid last record lsn: {last_record_lsn}");
1792 0 : return Ok(());
1793 364 : }
1794 364 :
1795 364 : match self.get_compaction_algorithm_settings().kind {
1796 0 : CompactionAlgorithm::Tiered => self.compact_tiered(cancel, ctx).await,
1797 71197 : CompactionAlgorithm::Legacy => self.compact_legacy(cancel, flags, ctx).await,
1798 : }
1799 364 : }
1800 :
1801 : /// Mutate the timeline with a [`TimelineWriter`].
1802 5133154 : pub(crate) async fn writer(&self) -> TimelineWriter<'_> {
1803 5133154 : TimelineWriter {
1804 5133154 : tl: self,
1805 5133154 : write_guard: self.write_lock.lock().await,
1806 : }
1807 5133154 : }
1808 :
1809 0 : pub(crate) fn activate(
1810 0 : self: &Arc<Self>,
1811 0 : parent: Arc<crate::tenant::Tenant>,
1812 0 : broker_client: BrokerClientChannel,
1813 0 : background_jobs_can_start: Option<&completion::Barrier>,
1814 0 : ctx: &RequestContext,
1815 0 : ) {
1816 0 : if self.tenant_shard_id.is_shard_zero() {
1817 0 : // Logical size is only maintained accurately on shard zero.
1818 0 : self.spawn_initial_logical_size_computation_task(ctx);
1819 0 : }
1820 0 : self.launch_wal_receiver(ctx, broker_client);
1821 0 : self.set_state(TimelineState::Active);
1822 0 : self.launch_eviction_task(parent, background_jobs_can_start);
1823 0 : }
1824 :
1825 : /// After this function returns, there are no timeline-scoped tasks are left running.
1826 : ///
1827 : /// The preferred pattern for is:
1828 : /// - in any spawned tasks, keep Timeline::guard open + Timeline::cancel / child token
1829 : /// - if early shutdown (not just cancellation) of a sub-tree of tasks is required,
1830 : /// go the extra mile and keep track of JoinHandles
1831 : /// - Keep track of JoinHandles using a passed-down `Arc<Mutex<Option<JoinSet>>>` or similar,
1832 : /// instead of spawning directly on a runtime. It is a more composable / testable pattern.
1833 : ///
1834 : /// For legacy reasons, we still have multiple tasks spawned using
1835 : /// `task_mgr::spawn(X, Some(tenant_id), Some(timeline_id))`.
1836 : /// We refer to these as "timeline-scoped task_mgr tasks".
1837 : /// Some of these tasks are already sensitive to Timeline::cancel while others are
1838 : /// not sensitive to Timeline::cancel and instead respect [`task_mgr::shutdown_token`]
1839 : /// or [`task_mgr::shutdown_watcher`].
1840 : /// We want to gradually convert the code base away from these.
1841 : ///
1842 : /// Here is an inventory of timeline-scoped task_mgr tasks that are still sensitive to
1843 : /// `task_mgr::shutdown_{token,watcher}` (there are also tenant-scoped and global-scoped
1844 : /// ones that aren't mentioned here):
1845 : /// - [`TaskKind::TimelineDeletionWorker`]
1846 : /// - NB: also used for tenant deletion
1847 : /// - [`TaskKind::RemoteUploadTask`]`
1848 : /// - [`TaskKind::InitialLogicalSizeCalculation`]
1849 : /// - [`TaskKind::DownloadAllRemoteLayers`] (can we get rid of it?)
1850 : // Inventory of timeline-scoped task_mgr tasks that use spawn but aren't sensitive:
1851 : /// - [`TaskKind::Eviction`]
1852 : /// - [`TaskKind::LayerFlushTask`]
1853 : /// - [`TaskKind::OndemandLogicalSizeCalculation`]
1854 : /// - [`TaskKind::GarbageCollector`] (immediate_gc is timeline-scoped)
1855 8 : pub(crate) async fn shutdown(&self, mode: ShutdownMode) {
1856 8 : debug_assert_current_span_has_tenant_and_timeline_id();
1857 :
1858 8 : let try_freeze_and_flush = match mode {
1859 6 : ShutdownMode::FreezeAndFlush => true,
1860 2 : ShutdownMode::Hard => false,
1861 : };
1862 :
1863 : // Regardless of whether we're going to try_freeze_and_flush
1864 : // or not, stop ingesting any more data. Walreceiver only provides
1865 : // cancellation but no "wait until gone", because it uses the Timeline::gate.
1866 : // So, only after the self.gate.close() below will we know for sure that
1867 : // no walreceiver tasks are left.
1868 : // For `try_freeze_and_flush=true`, this means that we might still be ingesting
1869 : // data during the call to `self.freeze_and_flush()` below.
1870 : // That's not ideal, but, we don't have the concept of a ChildGuard,
1871 : // which is what we'd need to properly model early shutdown of the walreceiver
1872 : // task sub-tree before the other Timeline task sub-trees.
1873 8 : let walreceiver = self.walreceiver.lock().unwrap().take();
1874 8 : tracing::debug!(
1875 0 : is_some = walreceiver.is_some(),
1876 0 : "Waiting for WalReceiverManager..."
1877 : );
1878 8 : if let Some(walreceiver) = walreceiver {
1879 0 : walreceiver.cancel();
1880 8 : }
1881 : // ... and inform any waiters for newer LSNs that there won't be any.
1882 8 : self.last_record_lsn.shutdown();
1883 8 :
1884 8 : if try_freeze_and_flush {
1885 : // we shut down walreceiver above, so, we won't add anything more
1886 : // to the InMemoryLayer; freeze it and wait for all frozen layers
1887 : // to reach the disk & upload queue, then shut the upload queue and
1888 : // wait for it to drain.
1889 6 : match self.freeze_and_flush().await {
1890 : Ok(_) => {
1891 : // drain the upload queue
1892 : // if we did not wait for completion here, it might be our shutdown process
1893 : // didn't wait for remote uploads to complete at all, as new tasks can forever
1894 : // be spawned.
1895 : //
1896 : // what is problematic is the shutting down of RemoteTimelineClient, because
1897 : // obviously it does not make sense to stop while we wait for it, but what
1898 : // about corner cases like s3 suddenly hanging up?
1899 6 : self.remote_client.shutdown().await;
1900 : }
1901 0 : Err(e) => {
1902 0 : // Non-fatal. Shutdown is infallible. Failures to flush just mean that
1903 0 : // we have some extra WAL replay to do next time the timeline starts.
1904 0 : warn!("failed to freeze and flush: {e:#}");
1905 : }
1906 : }
1907 2 : }
1908 :
1909 : // Signal any subscribers to our cancellation token to drop out
1910 8 : tracing::debug!("Cancelling CancellationToken");
1911 8 : self.cancel.cancel();
1912 8 :
1913 8 : // Transition the remote_client into a state where it's only useful for timeline deletion.
1914 8 : // (The deletion use case is why we can't just hook up remote_client to Self::cancel).)
1915 8 : self.remote_client.stop();
1916 8 : // As documented in remote_client.stop()'s doc comment, it's our responsibility
1917 8 : // to shut down the upload queue tasks.
1918 8 : // TODO: fix that, task management should be encapsulated inside remote_client.
1919 8 : task_mgr::shutdown_tasks(
1920 8 : Some(TaskKind::RemoteUploadTask),
1921 8 : Some(self.tenant_shard_id),
1922 8 : Some(self.timeline_id),
1923 8 : )
1924 0 : .await;
1925 :
1926 : // TODO: work toward making this a no-op. See this funciton's doc comment for more context.
1927 8 : tracing::debug!("Waiting for tasks...");
1928 8 : task_mgr::shutdown_tasks(None, Some(self.tenant_shard_id), Some(self.timeline_id)).await;
1929 :
1930 : // Finally wait until any gate-holders are complete.
1931 : //
1932 : // TODO: once above shutdown_tasks is a no-op, we can close the gate before calling shutdown_tasks
1933 : // and use a TBD variant of shutdown_tasks that asserts that there were no tasks left.
1934 8 : self.gate.close().await;
1935 :
1936 8 : self.metrics.shutdown();
1937 8 : }
1938 :
1939 390 : pub(crate) fn set_state(&self, new_state: TimelineState) {
1940 390 : match (self.current_state(), new_state) {
1941 390 : (equal_state_1, equal_state_2) if equal_state_1 == equal_state_2 => {
1942 2 : info!("Ignoring new state, equal to the existing one: {equal_state_2:?}");
1943 : }
1944 0 : (st, TimelineState::Loading) => {
1945 0 : error!("ignoring transition from {st:?} into Loading state");
1946 : }
1947 0 : (TimelineState::Broken { .. }, new_state) => {
1948 0 : error!("Ignoring state update {new_state:?} for broken timeline");
1949 : }
1950 : (TimelineState::Stopping, TimelineState::Active) => {
1951 0 : error!("Not activating a Stopping timeline");
1952 : }
1953 388 : (_, new_state) => {
1954 388 : self.state.send_replace(new_state);
1955 388 : }
1956 : }
1957 390 : }
1958 :
1959 2 : pub(crate) fn set_broken(&self, reason: String) {
1960 2 : let backtrace_str: String = format!("{}", std::backtrace::Backtrace::force_capture());
1961 2 : let broken_state = TimelineState::Broken {
1962 2 : reason,
1963 2 : backtrace: backtrace_str,
1964 2 : };
1965 2 : self.set_state(broken_state);
1966 2 :
1967 2 : // Although the Broken state is not equivalent to shutdown() (shutdown will be called
1968 2 : // later when this tenant is detach or the process shuts down), firing the cancellation token
1969 2 : // here avoids the need for other tasks to watch for the Broken state explicitly.
1970 2 : self.cancel.cancel();
1971 2 : }
1972 :
1973 228007 : pub(crate) fn current_state(&self) -> TimelineState {
1974 228007 : self.state.borrow().clone()
1975 228007 : }
1976 :
1977 6 : pub(crate) fn is_broken(&self) -> bool {
1978 6 : matches!(&*self.state.borrow(), TimelineState::Broken { .. })
1979 6 : }
1980 :
1981 220 : pub(crate) fn is_active(&self) -> bool {
1982 220 : self.current_state() == TimelineState::Active
1983 220 : }
1984 :
1985 1117 : pub(crate) fn is_stopping(&self) -> bool {
1986 1117 : self.current_state() == TimelineState::Stopping
1987 1117 : }
1988 :
1989 0 : pub(crate) fn subscribe_for_state_updates(&self) -> watch::Receiver<TimelineState> {
1990 0 : self.state.subscribe()
1991 0 : }
1992 :
1993 226282 : pub(crate) async fn wait_to_become_active(
1994 226282 : &self,
1995 226282 : _ctx: &RequestContext, // Prepare for use by cancellation
1996 226282 : ) -> Result<(), TimelineState> {
1997 226282 : let mut receiver = self.state.subscribe();
1998 226282 : loop {
1999 226282 : let current_state = receiver.borrow().clone();
2000 226282 : match current_state {
2001 : TimelineState::Loading => {
2002 0 : receiver
2003 0 : .changed()
2004 0 : .await
2005 0 : .expect("holding a reference to self");
2006 : }
2007 : TimelineState::Active { .. } => {
2008 226280 : return Ok(());
2009 : }
2010 : TimelineState::Broken { .. } | TimelineState::Stopping => {
2011 : // There's no chance the timeline can transition back into ::Active
2012 2 : return Err(current_state);
2013 : }
2014 : }
2015 : }
2016 226282 : }
2017 :
2018 0 : pub(crate) async fn layer_map_info(&self, reset: LayerAccessStatsReset) -> LayerMapInfo {
2019 0 : let guard = self.layers.read().await;
2020 0 : let layer_map = guard.layer_map();
2021 0 : let mut in_memory_layers = Vec::with_capacity(layer_map.frozen_layers.len() + 1);
2022 0 : if let Some(open_layer) = &layer_map.open_layer {
2023 0 : in_memory_layers.push(open_layer.info());
2024 0 : }
2025 0 : for frozen_layer in &layer_map.frozen_layers {
2026 0 : in_memory_layers.push(frozen_layer.info());
2027 0 : }
2028 :
2029 0 : let mut historic_layers = Vec::new();
2030 0 : for historic_layer in layer_map.iter_historic_layers() {
2031 0 : let historic_layer = guard.get_from_desc(&historic_layer);
2032 0 : historic_layers.push(historic_layer.info(reset));
2033 0 : }
2034 :
2035 0 : LayerMapInfo {
2036 0 : in_memory_layers,
2037 0 : historic_layers,
2038 0 : }
2039 0 : }
2040 :
2041 0 : #[instrument(skip_all, fields(tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))]
2042 : pub(crate) async fn download_layer(
2043 : &self,
2044 : layer_file_name: &LayerName,
2045 : ) -> anyhow::Result<Option<bool>> {
2046 : let Some(layer) = self.find_layer(layer_file_name).await else {
2047 : return Ok(None);
2048 : };
2049 :
2050 : layer.download().await?;
2051 :
2052 : Ok(Some(true))
2053 : }
2054 :
2055 : /// Evict just one layer.
2056 : ///
2057 : /// Returns `Ok(None)` in the case where the layer could not be found by its `layer_file_name`.
2058 0 : pub(crate) async fn evict_layer(
2059 0 : &self,
2060 0 : layer_file_name: &LayerName,
2061 0 : ) -> anyhow::Result<Option<bool>> {
2062 0 : let _gate = self
2063 0 : .gate
2064 0 : .enter()
2065 0 : .map_err(|_| anyhow::anyhow!("Shutting down"))?;
2066 :
2067 0 : let Some(local_layer) = self.find_layer(layer_file_name).await else {
2068 0 : return Ok(None);
2069 : };
2070 :
2071 : // curl has this by default
2072 0 : let timeout = std::time::Duration::from_secs(120);
2073 0 :
2074 0 : match local_layer.evict_and_wait(timeout).await {
2075 0 : Ok(()) => Ok(Some(true)),
2076 0 : Err(EvictionError::NotFound) => Ok(Some(false)),
2077 0 : Err(EvictionError::Downloaded) => Ok(Some(false)),
2078 0 : Err(EvictionError::Timeout) => Ok(Some(false)),
2079 : }
2080 0 : }
2081 :
2082 4803026 : fn should_roll(
2083 4803026 : &self,
2084 4803026 : layer_size: u64,
2085 4803026 : projected_layer_size: u64,
2086 4803026 : checkpoint_distance: u64,
2087 4803026 : projected_lsn: Lsn,
2088 4803026 : last_freeze_at: Lsn,
2089 4803026 : opened_at: Instant,
2090 4803026 : ) -> bool {
2091 4803026 : let distance = projected_lsn.widening_sub(last_freeze_at);
2092 4803026 :
2093 4803026 : // Rolling the open layer can be triggered by:
2094 4803026 : // 1. The distance from the last LSN we rolled at. This bounds the amount of WAL that
2095 4803026 : // the safekeepers need to store. For sharded tenants, we multiply by shard count to
2096 4803026 : // account for how writes are distributed across shards: we expect each node to consume
2097 4803026 : // 1/count of the LSN on average.
2098 4803026 : // 2. The size of the currently open layer.
2099 4803026 : // 3. The time since the last roll. It helps safekeepers to regard pageserver as caught
2100 4803026 : // up and suspend activity.
2101 4803026 : if distance >= checkpoint_distance as i128 * self.shard_identity.count.count() as i128 {
2102 0 : info!(
2103 0 : "Will roll layer at {} with layer size {} due to LSN distance ({})",
2104 : projected_lsn, layer_size, distance
2105 : );
2106 :
2107 0 : true
2108 4803026 : } else if projected_layer_size >= checkpoint_distance {
2109 80 : info!(
2110 0 : "Will roll layer at {} with layer size {} due to layer size ({})",
2111 : projected_lsn, layer_size, projected_layer_size
2112 : );
2113 :
2114 80 : true
2115 4802946 : } else if distance > 0 && opened_at.elapsed() >= self.get_checkpoint_timeout() {
2116 0 : info!(
2117 0 : "Will roll layer at {} with layer size {} due to time since first write to the layer ({:?})",
2118 0 : projected_lsn,
2119 0 : layer_size,
2120 0 : opened_at.elapsed()
2121 : );
2122 :
2123 0 : true
2124 : } else {
2125 4802946 : false
2126 : }
2127 4803026 : }
2128 : }
2129 :
2130 : /// Number of times we will compute partition within a checkpoint distance.
2131 : const REPARTITION_FREQ_IN_CHECKPOINT_DISTANCE: u64 = 10;
2132 :
2133 : // Private functions
2134 : impl Timeline {
2135 12 : pub(crate) fn get_lsn_lease_length(&self) -> Duration {
2136 12 : let tenant_conf = self.tenant_conf.load();
2137 12 : tenant_conf
2138 12 : .tenant_conf
2139 12 : .lsn_lease_length
2140 12 : .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length)
2141 12 : }
2142 :
2143 : // TODO(yuchen): remove unused flag after implementing https://github.com/neondatabase/neon/issues/8072
2144 : #[allow(unused)]
2145 0 : pub(crate) fn get_lsn_lease_length_for_ts(&self) -> Duration {
2146 0 : let tenant_conf = self.tenant_conf.load();
2147 0 : tenant_conf
2148 0 : .tenant_conf
2149 0 : .lsn_lease_length_for_ts
2150 0 : .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length_for_ts)
2151 0 : }
2152 :
2153 210 : pub(crate) fn get_switch_aux_file_policy(&self) -> AuxFilePolicy {
2154 210 : let tenant_conf = self.tenant_conf.load();
2155 210 : tenant_conf
2156 210 : .tenant_conf
2157 210 : .switch_aux_file_policy
2158 210 : .unwrap_or(self.conf.default_tenant_conf.switch_aux_file_policy)
2159 210 : }
2160 :
2161 0 : pub(crate) fn get_lazy_slru_download(&self) -> bool {
2162 0 : let tenant_conf = self.tenant_conf.load();
2163 0 : tenant_conf
2164 0 : .tenant_conf
2165 0 : .lazy_slru_download
2166 0 : .unwrap_or(self.conf.default_tenant_conf.lazy_slru_download)
2167 0 : }
2168 :
2169 4804404 : fn get_checkpoint_distance(&self) -> u64 {
2170 4804404 : let tenant_conf = self.tenant_conf.load();
2171 4804404 : tenant_conf
2172 4804404 : .tenant_conf
2173 4804404 : .checkpoint_distance
2174 4804404 : .unwrap_or(self.conf.default_tenant_conf.checkpoint_distance)
2175 4804404 : }
2176 :
2177 4802946 : fn get_checkpoint_timeout(&self) -> Duration {
2178 4802946 : let tenant_conf = self.tenant_conf.load();
2179 4802946 : tenant_conf
2180 4802946 : .tenant_conf
2181 4802946 : .checkpoint_timeout
2182 4802946 : .unwrap_or(self.conf.default_tenant_conf.checkpoint_timeout)
2183 4802946 : }
2184 :
2185 516 : fn get_compaction_target_size(&self) -> u64 {
2186 516 : let tenant_conf = self.tenant_conf.load();
2187 516 : tenant_conf
2188 516 : .tenant_conf
2189 516 : .compaction_target_size
2190 516 : .unwrap_or(self.conf.default_tenant_conf.compaction_target_size)
2191 516 : }
2192 :
2193 392 : fn get_compaction_threshold(&self) -> usize {
2194 392 : let tenant_conf = self.tenant_conf.load();
2195 392 : tenant_conf
2196 392 : .tenant_conf
2197 392 : .compaction_threshold
2198 392 : .unwrap_or(self.conf.default_tenant_conf.compaction_threshold)
2199 392 : }
2200 :
2201 14 : fn get_image_creation_threshold(&self) -> usize {
2202 14 : let tenant_conf = self.tenant_conf.load();
2203 14 : tenant_conf
2204 14 : .tenant_conf
2205 14 : .image_creation_threshold
2206 14 : .unwrap_or(self.conf.default_tenant_conf.image_creation_threshold)
2207 14 : }
2208 :
2209 364 : fn get_compaction_algorithm_settings(&self) -> CompactionAlgorithmSettings {
2210 364 : let tenant_conf = &self.tenant_conf.load();
2211 364 : tenant_conf
2212 364 : .tenant_conf
2213 364 : .compaction_algorithm
2214 364 : .as_ref()
2215 364 : .unwrap_or(&self.conf.default_tenant_conf.compaction_algorithm)
2216 364 : .clone()
2217 364 : }
2218 :
2219 0 : fn get_eviction_policy(&self) -> EvictionPolicy {
2220 0 : let tenant_conf = self.tenant_conf.load();
2221 0 : tenant_conf
2222 0 : .tenant_conf
2223 0 : .eviction_policy
2224 0 : .unwrap_or(self.conf.default_tenant_conf.eviction_policy)
2225 0 : }
2226 :
2227 398 : fn get_evictions_low_residence_duration_metric_threshold(
2228 398 : tenant_conf: &TenantConfOpt,
2229 398 : default_tenant_conf: &TenantConf,
2230 398 : ) -> Duration {
2231 398 : tenant_conf
2232 398 : .evictions_low_residence_duration_metric_threshold
2233 398 : .unwrap_or(default_tenant_conf.evictions_low_residence_duration_metric_threshold)
2234 398 : }
2235 :
2236 516 : fn get_image_layer_creation_check_threshold(&self) -> u8 {
2237 516 : let tenant_conf = self.tenant_conf.load();
2238 516 : tenant_conf
2239 516 : .tenant_conf
2240 516 : .image_layer_creation_check_threshold
2241 516 : .unwrap_or(
2242 516 : self.conf
2243 516 : .default_tenant_conf
2244 516 : .image_layer_creation_check_threshold,
2245 516 : )
2246 516 : }
2247 :
2248 8 : pub(super) fn tenant_conf_updated(&self, new_conf: &TenantConfOpt) {
2249 8 : // NB: Most tenant conf options are read by background loops, so,
2250 8 : // changes will automatically be picked up.
2251 8 :
2252 8 : // The threshold is embedded in the metric. So, we need to update it.
2253 8 : {
2254 8 : let new_threshold = Self::get_evictions_low_residence_duration_metric_threshold(
2255 8 : new_conf,
2256 8 : &self.conf.default_tenant_conf,
2257 8 : );
2258 8 :
2259 8 : let tenant_id_str = self.tenant_shard_id.tenant_id.to_string();
2260 8 : let shard_id_str = format!("{}", self.tenant_shard_id.shard_slug());
2261 8 :
2262 8 : let timeline_id_str = self.timeline_id.to_string();
2263 8 : self.metrics
2264 8 : .evictions_with_low_residence_duration
2265 8 : .write()
2266 8 : .unwrap()
2267 8 : .change_threshold(
2268 8 : &tenant_id_str,
2269 8 : &shard_id_str,
2270 8 : &timeline_id_str,
2271 8 : new_threshold,
2272 8 : );
2273 8 : }
2274 8 : }
2275 :
2276 : /// Open a Timeline handle.
2277 : ///
2278 : /// Loads the metadata for the timeline into memory, but not the layer map.
2279 : #[allow(clippy::too_many_arguments)]
2280 390 : pub(super) fn new(
2281 390 : conf: &'static PageServerConf,
2282 390 : tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
2283 390 : metadata: &TimelineMetadata,
2284 390 : ancestor: Option<Arc<Timeline>>,
2285 390 : timeline_id: TimelineId,
2286 390 : tenant_shard_id: TenantShardId,
2287 390 : generation: Generation,
2288 390 : shard_identity: ShardIdentity,
2289 390 : walredo_mgr: Option<Arc<super::WalRedoManager>>,
2290 390 : resources: TimelineResources,
2291 390 : pg_version: u32,
2292 390 : state: TimelineState,
2293 390 : aux_file_policy: Option<AuxFilePolicy>,
2294 390 : cancel: CancellationToken,
2295 390 : ) -> Arc<Self> {
2296 390 : let disk_consistent_lsn = metadata.disk_consistent_lsn();
2297 390 : let (state, _) = watch::channel(state);
2298 390 :
2299 390 : let (layer_flush_start_tx, _) = tokio::sync::watch::channel((0, disk_consistent_lsn));
2300 390 : let (layer_flush_done_tx, _) = tokio::sync::watch::channel((0, Ok(())));
2301 390 :
2302 390 : let evictions_low_residence_duration_metric_threshold = {
2303 390 : let loaded_tenant_conf = tenant_conf.load();
2304 390 : Self::get_evictions_low_residence_duration_metric_threshold(
2305 390 : &loaded_tenant_conf.tenant_conf,
2306 390 : &conf.default_tenant_conf,
2307 390 : )
2308 390 : };
2309 390 :
2310 390 : Arc::new_cyclic(|myself| {
2311 390 : let metrics = TimelineMetrics::new(
2312 390 : &tenant_shard_id,
2313 390 : &timeline_id,
2314 390 : crate::metrics::EvictionsWithLowResidenceDurationBuilder::new(
2315 390 : "mtime",
2316 390 : evictions_low_residence_duration_metric_threshold,
2317 390 : ),
2318 390 : );
2319 390 : let aux_file_metrics = metrics.aux_file_size_gauge.clone();
2320 :
2321 390 : let mut result = Timeline {
2322 390 : conf,
2323 390 : tenant_conf,
2324 390 : myself: myself.clone(),
2325 390 : timeline_id,
2326 390 : tenant_shard_id,
2327 390 : generation,
2328 390 : shard_identity,
2329 390 : pg_version,
2330 390 : layers: Default::default(),
2331 390 :
2332 390 : walredo_mgr,
2333 390 : walreceiver: Mutex::new(None),
2334 390 :
2335 390 : remote_client: Arc::new(resources.remote_client),
2336 390 :
2337 390 : // initialize in-memory 'last_record_lsn' from 'disk_consistent_lsn'.
2338 390 : last_record_lsn: SeqWait::new(RecordLsn {
2339 390 : last: disk_consistent_lsn,
2340 390 : prev: metadata.prev_record_lsn().unwrap_or(Lsn(0)),
2341 390 : }),
2342 390 : disk_consistent_lsn: AtomicLsn::new(disk_consistent_lsn.0),
2343 390 :
2344 390 : last_freeze_at: AtomicLsn::new(disk_consistent_lsn.0),
2345 390 : last_freeze_ts: RwLock::new(Instant::now()),
2346 390 :
2347 390 : loaded_at: (disk_consistent_lsn, SystemTime::now()),
2348 390 :
2349 390 : ancestor_timeline: ancestor,
2350 390 : ancestor_lsn: metadata.ancestor_lsn(),
2351 390 :
2352 390 : metrics,
2353 390 :
2354 390 : query_metrics: crate::metrics::SmgrQueryTimePerTimeline::new(
2355 390 : &tenant_shard_id,
2356 390 : &timeline_id,
2357 390 : ),
2358 390 :
2359 2730 : directory_metrics: array::from_fn(|_| AtomicU64::new(0)),
2360 390 :
2361 390 : flush_loop_state: Mutex::new(FlushLoopState::NotStarted),
2362 390 :
2363 390 : layer_flush_start_tx,
2364 390 : layer_flush_done_tx,
2365 390 :
2366 390 : write_lock: tokio::sync::Mutex::new(None),
2367 390 :
2368 390 : gc_info: std::sync::RwLock::new(GcInfo::default()),
2369 390 :
2370 390 : latest_gc_cutoff_lsn: Rcu::new(metadata.latest_gc_cutoff_lsn()),
2371 390 : initdb_lsn: metadata.initdb_lsn(),
2372 390 :
2373 390 : current_logical_size: if disk_consistent_lsn.is_valid() {
2374 : // we're creating timeline data with some layer files existing locally,
2375 : // need to recalculate timeline's logical size based on data in the layers.
2376 230 : LogicalSize::deferred_initial(disk_consistent_lsn)
2377 : } else {
2378 : // we're creating timeline data without any layers existing locally,
2379 : // initial logical size is 0.
2380 160 : LogicalSize::empty_initial()
2381 : },
2382 390 : partitioning: tokio::sync::Mutex::new((
2383 390 : (KeyPartitioning::new(), KeyPartitioning::new().into_sparse()),
2384 390 : Lsn(0),
2385 390 : )),
2386 390 : repartition_threshold: 0,
2387 390 : last_image_layer_creation_check_at: AtomicLsn::new(0),
2388 390 : last_image_layer_creation_check_instant: Mutex::new(None),
2389 390 :
2390 390 : last_received_wal: Mutex::new(None),
2391 390 : rel_size_cache: RwLock::new(RelSizeCache {
2392 390 : complete_as_of: disk_consistent_lsn,
2393 390 : map: HashMap::new(),
2394 390 : }),
2395 390 :
2396 390 : download_all_remote_layers_task_info: RwLock::new(None),
2397 390 :
2398 390 : state,
2399 390 :
2400 390 : eviction_task_timeline_state: tokio::sync::Mutex::new(
2401 390 : EvictionTaskTimelineState::default(),
2402 390 : ),
2403 390 : delete_progress: Arc::new(tokio::sync::Mutex::new(DeleteTimelineFlow::default())),
2404 390 :
2405 390 : cancel,
2406 390 : gate: Gate::default(),
2407 390 :
2408 390 : compaction_lock: tokio::sync::Mutex::default(),
2409 390 : gc_lock: tokio::sync::Mutex::default(),
2410 390 :
2411 390 : standby_horizon: AtomicLsn::new(0),
2412 390 :
2413 390 : timeline_get_throttle: resources.timeline_get_throttle,
2414 390 :
2415 390 : aux_files: tokio::sync::Mutex::new(AuxFilesState {
2416 390 : dir: None,
2417 390 : n_deltas: 0,
2418 390 : }),
2419 390 :
2420 390 : aux_file_size_estimator: AuxFileSizeEstimator::new(aux_file_metrics),
2421 390 :
2422 390 : last_aux_file_policy: AtomicAuxFilePolicy::new(aux_file_policy),
2423 390 :
2424 390 : #[cfg(test)]
2425 390 : extra_test_dense_keyspace: ArcSwap::new(Arc::new(KeySpace::default())),
2426 390 :
2427 390 : l0_flush_global_state: resources.l0_flush_global_state,
2428 390 : };
2429 390 : result.repartition_threshold =
2430 390 : result.get_checkpoint_distance() / REPARTITION_FREQ_IN_CHECKPOINT_DISTANCE;
2431 390 :
2432 390 : result
2433 390 : .metrics
2434 390 : .last_record_gauge
2435 390 : .set(disk_consistent_lsn.0 as i64);
2436 390 : result
2437 390 : })
2438 390 : }
2439 :
2440 536 : pub(super) fn maybe_spawn_flush_loop(self: &Arc<Self>) {
2441 536 : let Ok(guard) = self.gate.enter() else {
2442 0 : info!("cannot start flush loop when the timeline gate has already been closed");
2443 0 : return;
2444 : };
2445 536 : let mut flush_loop_state = self.flush_loop_state.lock().unwrap();
2446 536 : match *flush_loop_state {
2447 384 : FlushLoopState::NotStarted => (),
2448 : FlushLoopState::Running { .. } => {
2449 152 : info!(
2450 0 : "skipping attempt to start flush_loop twice {}/{}",
2451 0 : self.tenant_shard_id, self.timeline_id
2452 : );
2453 152 : return;
2454 : }
2455 : FlushLoopState::Exited => {
2456 0 : warn!(
2457 0 : "ignoring attempt to restart exited flush_loop {}/{}",
2458 0 : self.tenant_shard_id, self.timeline_id
2459 : );
2460 0 : return;
2461 : }
2462 : }
2463 :
2464 384 : let layer_flush_start_rx = self.layer_flush_start_tx.subscribe();
2465 384 : let self_clone = Arc::clone(self);
2466 384 :
2467 384 : debug!("spawning flush loop");
2468 384 : *flush_loop_state = FlushLoopState::Running {
2469 384 : #[cfg(test)]
2470 384 : expect_initdb_optimization: false,
2471 384 : #[cfg(test)]
2472 384 : initdb_optimization_count: 0,
2473 384 : };
2474 384 : task_mgr::spawn(
2475 384 : task_mgr::BACKGROUND_RUNTIME.handle(),
2476 384 : task_mgr::TaskKind::LayerFlushTask,
2477 384 : Some(self.tenant_shard_id),
2478 384 : Some(self.timeline_id),
2479 384 : "layer flush task",
2480 : false,
2481 384 : async move {
2482 384 : let _guard = guard;
2483 384 : let background_ctx = RequestContext::todo_child(TaskKind::LayerFlushTask, DownloadBehavior::Error);
2484 60411 : self_clone.flush_loop(layer_flush_start_rx, &background_ctx).await;
2485 8 : let mut flush_loop_state = self_clone.flush_loop_state.lock().unwrap();
2486 8 : assert!(matches!(*flush_loop_state, FlushLoopState::Running{..}));
2487 8 : *flush_loop_state = FlushLoopState::Exited;
2488 8 : Ok(())
2489 8 : }
2490 384 : .instrument(info_span!(parent: None, "layer flush task", tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))
2491 : );
2492 536 : }
2493 :
2494 : /// Creates and starts the wal receiver.
2495 : ///
2496 : /// This function is expected to be called at most once per Timeline's lifecycle
2497 : /// when the timeline is activated.
2498 0 : fn launch_wal_receiver(
2499 0 : self: &Arc<Self>,
2500 0 : ctx: &RequestContext,
2501 0 : broker_client: BrokerClientChannel,
2502 0 : ) {
2503 0 : info!(
2504 0 : "launching WAL receiver for timeline {} of tenant {}",
2505 0 : self.timeline_id, self.tenant_shard_id
2506 : );
2507 :
2508 0 : let tenant_conf = self.tenant_conf.load();
2509 0 : let wal_connect_timeout = tenant_conf
2510 0 : .tenant_conf
2511 0 : .walreceiver_connect_timeout
2512 0 : .unwrap_or(self.conf.default_tenant_conf.walreceiver_connect_timeout);
2513 0 : let lagging_wal_timeout = tenant_conf
2514 0 : .tenant_conf
2515 0 : .lagging_wal_timeout
2516 0 : .unwrap_or(self.conf.default_tenant_conf.lagging_wal_timeout);
2517 0 : let max_lsn_wal_lag = tenant_conf
2518 0 : .tenant_conf
2519 0 : .max_lsn_wal_lag
2520 0 : .unwrap_or(self.conf.default_tenant_conf.max_lsn_wal_lag);
2521 0 :
2522 0 : let mut guard = self.walreceiver.lock().unwrap();
2523 0 : assert!(
2524 0 : guard.is_none(),
2525 0 : "multiple launches / re-launches of WAL receiver are not supported"
2526 : );
2527 0 : *guard = Some(WalReceiver::start(
2528 0 : Arc::clone(self),
2529 0 : WalReceiverConf {
2530 0 : wal_connect_timeout,
2531 0 : lagging_wal_timeout,
2532 0 : max_lsn_wal_lag,
2533 0 : auth_token: crate::config::SAFEKEEPER_AUTH_TOKEN.get().cloned(),
2534 0 : availability_zone: self.conf.availability_zone.clone(),
2535 0 : ingest_batch_size: self.conf.ingest_batch_size,
2536 0 : },
2537 0 : broker_client,
2538 0 : ctx,
2539 0 : ));
2540 0 : }
2541 :
2542 : /// Initialize with an empty layer map. Used when creating a new timeline.
2543 384 : pub(super) fn init_empty_layer_map(&self, start_lsn: Lsn) {
2544 384 : let mut layers = self.layers.try_write().expect(
2545 384 : "in the context where we call this function, no other task has access to the object",
2546 384 : );
2547 384 : layers.initialize_empty(Lsn(start_lsn.0));
2548 384 : }
2549 :
2550 : /// Scan the timeline directory, cleanup, populate the layer map, and schedule uploads for local-only
2551 : /// files.
2552 6 : pub(super) async fn load_layer_map(
2553 6 : &self,
2554 6 : disk_consistent_lsn: Lsn,
2555 6 : index_part: Option<IndexPart>,
2556 6 : ) -> anyhow::Result<()> {
2557 : use init::{Decision::*, Discovered, DismissedLayer};
2558 : use LayerName::*;
2559 :
2560 6 : let mut guard = self.layers.write().await;
2561 :
2562 6 : let timer = self.metrics.load_layer_map_histo.start_timer();
2563 6 :
2564 6 : // Scan timeline directory and create ImageLayerName and DeltaFilename
2565 6 : // structs representing all files on disk
2566 6 : let timeline_path = self
2567 6 : .conf
2568 6 : .timeline_path(&self.tenant_shard_id, &self.timeline_id);
2569 6 : let conf = self.conf;
2570 6 : let span = tracing::Span::current();
2571 6 :
2572 6 : // Copy to move into the task we're about to spawn
2573 6 : let this = self.myself.upgrade().expect("&self method holds the arc");
2574 :
2575 6 : let (loaded_layers, needs_cleanup, total_physical_size) = tokio::task::spawn_blocking({
2576 6 : move || {
2577 6 : let _g = span.entered();
2578 6 : let discovered = init::scan_timeline_dir(&timeline_path)?;
2579 6 : let mut discovered_layers = Vec::with_capacity(discovered.len());
2580 6 : let mut unrecognized_files = Vec::new();
2581 6 :
2582 6 : let mut path = timeline_path;
2583 :
2584 22 : for discovered in discovered {
2585 16 : let (name, kind) = match discovered {
2586 16 : Discovered::Layer(layer_file_name, local_metadata) => {
2587 16 : discovered_layers.push((layer_file_name, local_metadata));
2588 16 : continue;
2589 : }
2590 0 : Discovered::IgnoredBackup(path) => {
2591 0 : std::fs::remove_file(path)
2592 0 : .or_else(fs_ext::ignore_not_found)
2593 0 : .fatal_err("Removing .old file");
2594 0 : continue;
2595 : }
2596 0 : Discovered::Unknown(file_name) => {
2597 0 : // we will later error if there are any
2598 0 : unrecognized_files.push(file_name);
2599 0 : continue;
2600 : }
2601 0 : Discovered::Ephemeral(name) => (name, "old ephemeral file"),
2602 0 : Discovered::Temporary(name) => (name, "temporary timeline file"),
2603 0 : Discovered::TemporaryDownload(name) => (name, "temporary download"),
2604 : };
2605 0 : path.push(Utf8Path::new(&name));
2606 0 : init::cleanup(&path, kind)?;
2607 0 : path.pop();
2608 : }
2609 :
2610 6 : if !unrecognized_files.is_empty() {
2611 : // assume that if there are any there are many many.
2612 0 : let n = unrecognized_files.len();
2613 0 : let first = &unrecognized_files[..n.min(10)];
2614 0 : anyhow::bail!(
2615 0 : "unrecognized files in timeline dir (total {n}), first 10: {first:?}"
2616 0 : );
2617 6 : }
2618 6 :
2619 6 : let decided =
2620 6 : init::reconcile(discovered_layers, index_part.as_ref(), disk_consistent_lsn);
2621 6 :
2622 6 : let mut loaded_layers = Vec::new();
2623 6 : let mut needs_cleanup = Vec::new();
2624 6 : let mut total_physical_size = 0;
2625 :
2626 22 : for (name, decision) in decided {
2627 16 : let decision = match decision {
2628 16 : Ok(decision) => decision,
2629 0 : Err(DismissedLayer::Future { local }) => {
2630 0 : if let Some(local) = local {
2631 0 : init::cleanup_future_layer(
2632 0 : &local.local_path,
2633 0 : &name,
2634 0 : disk_consistent_lsn,
2635 0 : )?;
2636 0 : }
2637 0 : needs_cleanup.push(name);
2638 0 : continue;
2639 : }
2640 0 : Err(DismissedLayer::LocalOnly(local)) => {
2641 0 : init::cleanup_local_only_file(&name, &local)?;
2642 : // this file never existed remotely, we will have to do rework
2643 0 : continue;
2644 : }
2645 0 : Err(DismissedLayer::BadMetadata(local)) => {
2646 0 : init::cleanup_local_file_for_remote(&local)?;
2647 : // this file never existed remotely, we will have to do rework
2648 0 : continue;
2649 : }
2650 : };
2651 :
2652 16 : match &name {
2653 12 : Delta(d) => assert!(d.lsn_range.end <= disk_consistent_lsn + 1),
2654 4 : Image(i) => assert!(i.lsn <= disk_consistent_lsn),
2655 : }
2656 :
2657 16 : tracing::debug!(layer=%name, ?decision, "applied");
2658 :
2659 16 : let layer = match decision {
2660 16 : Resident { local, remote } => {
2661 16 : total_physical_size += local.file_size;
2662 16 : Layer::for_resident(conf, &this, local.local_path, name, remote)
2663 16 : .drop_eviction_guard()
2664 : }
2665 0 : Evicted(remote) => Layer::for_evicted(conf, &this, name, remote),
2666 : };
2667 :
2668 16 : loaded_layers.push(layer);
2669 : }
2670 6 : Ok((loaded_layers, needs_cleanup, total_physical_size))
2671 6 : }
2672 6 : })
2673 6 : .await
2674 6 : .map_err(anyhow::Error::new)
2675 6 : .and_then(|x| x)?;
2676 :
2677 6 : let num_layers = loaded_layers.len();
2678 6 :
2679 6 : guard.initialize_local_layers(loaded_layers, disk_consistent_lsn + 1);
2680 6 :
2681 6 : self.remote_client
2682 6 : .schedule_layer_file_deletion(&needs_cleanup)?;
2683 6 : self.remote_client
2684 6 : .schedule_index_upload_for_file_changes()?;
2685 : // This barrier orders above DELETEs before any later operations.
2686 : // This is critical because code executing after the barrier might
2687 : // create again objects with the same key that we just scheduled for deletion.
2688 : // For example, if we just scheduled deletion of an image layer "from the future",
2689 : // later compaction might run again and re-create the same image layer.
2690 : // "from the future" here means an image layer whose LSN is > IndexPart::disk_consistent_lsn.
2691 : // "same" here means same key range and LSN.
2692 : //
2693 : // Without a barrier between above DELETEs and the re-creation's PUTs,
2694 : // the upload queue may execute the PUT first, then the DELETE.
2695 : // In our example, we will end up with an IndexPart referencing a non-existent object.
2696 : //
2697 : // 1. a future image layer is created and uploaded
2698 : // 2. ps restart
2699 : // 3. the future layer from (1) is deleted during load layer map
2700 : // 4. image layer is re-created and uploaded
2701 : // 5. deletion queue would like to delete (1) but actually deletes (4)
2702 : // 6. delete by name works as expected, but it now deletes the wrong (later) version
2703 : //
2704 : // See https://github.com/neondatabase/neon/issues/5878
2705 : //
2706 : // NB: generation numbers naturally protect against this because they disambiguate
2707 : // (1) and (4)
2708 6 : self.remote_client.schedule_barrier()?;
2709 : // Tenant::create_timeline will wait for these uploads to happen before returning, or
2710 : // on retry.
2711 :
2712 6 : info!(
2713 0 : "loaded layer map with {} layers at {}, total physical size: {}",
2714 : num_layers, disk_consistent_lsn, total_physical_size
2715 : );
2716 :
2717 6 : timer.stop_and_record();
2718 6 : Ok(())
2719 6 : }
2720 :
2721 : /// Retrieve current logical size of the timeline.
2722 : ///
2723 : /// The size could be lagging behind the actual number, in case
2724 : /// the initial size calculation has not been run (gets triggered on the first size access).
2725 : ///
2726 : /// return size and boolean flag that shows if the size is exact
2727 0 : pub(crate) fn get_current_logical_size(
2728 0 : self: &Arc<Self>,
2729 0 : priority: GetLogicalSizePriority,
2730 0 : ctx: &RequestContext,
2731 0 : ) -> logical_size::CurrentLogicalSize {
2732 0 : if !self.tenant_shard_id.is_shard_zero() {
2733 : // Logical size is only accurately maintained on shard zero: when called elsewhere, for example
2734 : // when HTTP API is serving a GET for timeline zero, return zero
2735 0 : return logical_size::CurrentLogicalSize::Approximate(logical_size::Approximate::zero());
2736 0 : }
2737 0 :
2738 0 : let current_size = self.current_logical_size.current_size();
2739 0 : debug!("Current size: {current_size:?}");
2740 :
2741 0 : match (current_size.accuracy(), priority) {
2742 0 : (logical_size::Accuracy::Exact, _) => (), // nothing to do
2743 0 : (logical_size::Accuracy::Approximate, GetLogicalSizePriority::Background) => {
2744 0 : // background task will eventually deliver an exact value, we're in no rush
2745 0 : }
2746 : (logical_size::Accuracy::Approximate, GetLogicalSizePriority::User) => {
2747 : // background task is not ready, but user is asking for it now;
2748 : // => make the background task skip the line
2749 : // (The alternative would be to calculate the size here, but,
2750 : // it can actually take a long time if the user has a lot of rels.
2751 : // And we'll inevitable need it again; So, let the background task do the work.)
2752 0 : match self
2753 0 : .current_logical_size
2754 0 : .cancel_wait_for_background_loop_concurrency_limit_semaphore
2755 0 : .get()
2756 : {
2757 0 : Some(cancel) => cancel.cancel(),
2758 : None => {
2759 0 : let state = self.current_state();
2760 0 : if matches!(
2761 0 : state,
2762 : TimelineState::Broken { .. } | TimelineState::Stopping
2763 0 : ) {
2764 0 :
2765 0 : // Can happen when timeline detail endpoint is used when deletion is ongoing (or its broken).
2766 0 : // Don't make noise.
2767 0 : } else {
2768 0 : warn!("unexpected: cancel_wait_for_background_loop_concurrency_limit_semaphore not set, priority-boosting of logical size calculation will not work");
2769 0 : debug_assert!(false);
2770 : }
2771 : }
2772 : };
2773 : }
2774 : }
2775 :
2776 0 : if let CurrentLogicalSize::Approximate(_) = ¤t_size {
2777 0 : if ctx.task_kind() == TaskKind::WalReceiverConnectionHandler {
2778 0 : let first = self
2779 0 : .current_logical_size
2780 0 : .did_return_approximate_to_walreceiver
2781 0 : .compare_exchange(
2782 0 : false,
2783 0 : true,
2784 0 : AtomicOrdering::Relaxed,
2785 0 : AtomicOrdering::Relaxed,
2786 0 : )
2787 0 : .is_ok();
2788 0 : if first {
2789 0 : crate::metrics::initial_logical_size::TIMELINES_WHERE_WALRECEIVER_GOT_APPROXIMATE_SIZE.inc();
2790 0 : }
2791 0 : }
2792 0 : }
2793 :
2794 0 : current_size
2795 0 : }
2796 :
2797 0 : fn spawn_initial_logical_size_computation_task(self: &Arc<Self>, ctx: &RequestContext) {
2798 0 : let Some(initial_part_end) = self.current_logical_size.initial_part_end else {
2799 : // nothing to do for freshly created timelines;
2800 0 : assert_eq!(
2801 0 : self.current_logical_size.current_size().accuracy(),
2802 0 : logical_size::Accuracy::Exact,
2803 0 : );
2804 0 : self.current_logical_size.initialized.add_permits(1);
2805 0 : return;
2806 : };
2807 :
2808 0 : let cancel_wait_for_background_loop_concurrency_limit_semaphore = CancellationToken::new();
2809 0 : let token = cancel_wait_for_background_loop_concurrency_limit_semaphore.clone();
2810 0 : self.current_logical_size
2811 0 : .cancel_wait_for_background_loop_concurrency_limit_semaphore.set(token)
2812 0 : .expect("initial logical size calculation task must be spawned exactly once per Timeline object");
2813 0 :
2814 0 : let self_clone = Arc::clone(self);
2815 0 : let background_ctx = ctx.detached_child(
2816 0 : TaskKind::InitialLogicalSizeCalculation,
2817 0 : DownloadBehavior::Download,
2818 0 : );
2819 0 : task_mgr::spawn(
2820 0 : task_mgr::BACKGROUND_RUNTIME.handle(),
2821 0 : task_mgr::TaskKind::InitialLogicalSizeCalculation,
2822 0 : Some(self.tenant_shard_id),
2823 0 : Some(self.timeline_id),
2824 0 : "initial size calculation",
2825 : false,
2826 : // NB: don't log errors here, task_mgr will do that.
2827 0 : async move {
2828 0 : let cancel = task_mgr::shutdown_token();
2829 0 : self_clone
2830 0 : .initial_logical_size_calculation_task(
2831 0 : initial_part_end,
2832 0 : cancel_wait_for_background_loop_concurrency_limit_semaphore,
2833 0 : cancel,
2834 0 : background_ctx,
2835 0 : )
2836 0 : .await;
2837 0 : Ok(())
2838 0 : }
2839 0 : .instrument(info_span!(parent: None, "initial_size_calculation", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id)),
2840 : );
2841 0 : }
2842 :
2843 0 : async fn initial_logical_size_calculation_task(
2844 0 : self: Arc<Self>,
2845 0 : initial_part_end: Lsn,
2846 0 : skip_concurrency_limiter: CancellationToken,
2847 0 : cancel: CancellationToken,
2848 0 : background_ctx: RequestContext,
2849 0 : ) {
2850 : scopeguard::defer! {
2851 : // Irrespective of the outcome of this operation, we should unblock anyone waiting for it.
2852 : self.current_logical_size.initialized.add_permits(1);
2853 : }
2854 :
2855 0 : let try_once = |attempt: usize| {
2856 0 : let background_ctx = &background_ctx;
2857 0 : let self_ref = &self;
2858 0 : let skip_concurrency_limiter = &skip_concurrency_limiter;
2859 0 : async move {
2860 0 : let cancel = task_mgr::shutdown_token();
2861 0 : let wait_for_permit = super::tasks::concurrent_background_tasks_rate_limit_permit(
2862 0 : BackgroundLoopKind::InitialLogicalSizeCalculation,
2863 0 : background_ctx,
2864 0 : );
2865 :
2866 : use crate::metrics::initial_logical_size::StartCircumstances;
2867 0 : let (_maybe_permit, circumstances) = tokio::select! {
2868 : permit = wait_for_permit => {
2869 : (Some(permit), StartCircumstances::AfterBackgroundTasksRateLimit)
2870 : }
2871 : _ = self_ref.cancel.cancelled() => {
2872 : return Err(CalculateLogicalSizeError::Cancelled);
2873 : }
2874 : _ = cancel.cancelled() => {
2875 : return Err(CalculateLogicalSizeError::Cancelled);
2876 : },
2877 : () = skip_concurrency_limiter.cancelled() => {
2878 : // Some action that is part of a end user interaction requested logical size
2879 : // => break out of the rate limit
2880 : // TODO: ideally we'd not run on BackgroundRuntime but the requester's runtime;
2881 : // but then again what happens if they cancel; also, we should just be using
2882 : // one runtime across the entire process, so, let's leave this for now.
2883 : (None, StartCircumstances::SkippedConcurrencyLimiter)
2884 : }
2885 : };
2886 :
2887 0 : let metrics_guard = if attempt == 1 {
2888 0 : crate::metrics::initial_logical_size::START_CALCULATION.first(circumstances)
2889 : } else {
2890 0 : crate::metrics::initial_logical_size::START_CALCULATION.retry(circumstances)
2891 : };
2892 :
2893 0 : let calculated_size = self_ref
2894 0 : .logical_size_calculation_task(
2895 0 : initial_part_end,
2896 0 : LogicalSizeCalculationCause::Initial,
2897 0 : background_ctx,
2898 0 : )
2899 0 : .await?;
2900 :
2901 0 : self_ref
2902 0 : .trigger_aux_file_size_computation(initial_part_end, background_ctx)
2903 0 : .await?;
2904 :
2905 : // TODO: add aux file size to logical size
2906 :
2907 0 : Ok((calculated_size, metrics_guard))
2908 0 : }
2909 0 : };
2910 :
2911 0 : let retrying = async {
2912 0 : let mut attempt = 0;
2913 0 : loop {
2914 0 : attempt += 1;
2915 0 :
2916 0 : match try_once(attempt).await {
2917 0 : Ok(res) => return ControlFlow::Continue(res),
2918 0 : Err(CalculateLogicalSizeError::Cancelled) => return ControlFlow::Break(()),
2919 : Err(
2920 0 : e @ (CalculateLogicalSizeError::Decode(_)
2921 0 : | CalculateLogicalSizeError::PageRead(_)),
2922 0 : ) => {
2923 0 : warn!(attempt, "initial size calculation failed: {e:?}");
2924 : // exponential back-off doesn't make sense at these long intervals;
2925 : // use fixed retry interval with generous jitter instead
2926 0 : let sleep_duration = Duration::from_secs(
2927 0 : u64::try_from(
2928 0 : // 1hour base
2929 0 : (60_i64 * 60_i64)
2930 0 : // 10min jitter
2931 0 : + rand::thread_rng().gen_range(-10 * 60..10 * 60),
2932 0 : )
2933 0 : .expect("10min < 1hour"),
2934 0 : );
2935 0 : tokio::time::sleep(sleep_duration).await;
2936 : }
2937 : }
2938 : }
2939 0 : };
2940 :
2941 0 : let (calculated_size, metrics_guard) = tokio::select! {
2942 : res = retrying => {
2943 : match res {
2944 : ControlFlow::Continue(calculated_size) => calculated_size,
2945 : ControlFlow::Break(()) => return,
2946 : }
2947 : }
2948 : _ = cancel.cancelled() => {
2949 : return;
2950 : }
2951 : };
2952 :
2953 : // we cannot query current_logical_size.current_size() to know the current
2954 : // *negative* value, only truncated to u64.
2955 0 : let added = self
2956 0 : .current_logical_size
2957 0 : .size_added_after_initial
2958 0 : .load(AtomicOrdering::Relaxed);
2959 0 :
2960 0 : let sum = calculated_size.saturating_add_signed(added);
2961 0 :
2962 0 : // set the gauge value before it can be set in `update_current_logical_size`.
2963 0 : self.metrics.current_logical_size_gauge.set(sum);
2964 0 :
2965 0 : self.current_logical_size
2966 0 : .initial_logical_size
2967 0 : .set((calculated_size, metrics_guard.calculation_result_saved()))
2968 0 : .ok()
2969 0 : .expect("only this task sets it");
2970 0 : }
2971 :
2972 0 : pub(crate) fn spawn_ondemand_logical_size_calculation(
2973 0 : self: &Arc<Self>,
2974 0 : lsn: Lsn,
2975 0 : cause: LogicalSizeCalculationCause,
2976 0 : ctx: RequestContext,
2977 0 : ) -> oneshot::Receiver<Result<u64, CalculateLogicalSizeError>> {
2978 0 : let (sender, receiver) = oneshot::channel();
2979 0 : let self_clone = Arc::clone(self);
2980 0 : // XXX if our caller loses interest, i.e., ctx is cancelled,
2981 0 : // we should stop the size calculation work and return an error.
2982 0 : // That would require restructuring this function's API to
2983 0 : // return the result directly, instead of a Receiver for the result.
2984 0 : let ctx = ctx.detached_child(
2985 0 : TaskKind::OndemandLogicalSizeCalculation,
2986 0 : DownloadBehavior::Download,
2987 0 : );
2988 0 : task_mgr::spawn(
2989 0 : task_mgr::BACKGROUND_RUNTIME.handle(),
2990 0 : task_mgr::TaskKind::OndemandLogicalSizeCalculation,
2991 0 : Some(self.tenant_shard_id),
2992 0 : Some(self.timeline_id),
2993 0 : "ondemand logical size calculation",
2994 0 : false,
2995 0 : async move {
2996 0 : let res = self_clone
2997 0 : .logical_size_calculation_task(lsn, cause, &ctx)
2998 0 : .await;
2999 0 : let _ = sender.send(res).ok();
3000 0 : Ok(()) // Receiver is responsible for handling errors
3001 0 : }
3002 0 : .in_current_span(),
3003 0 : );
3004 0 : receiver
3005 0 : }
3006 :
3007 : /// # Cancel-Safety
3008 : ///
3009 : /// This method is cancellation-safe.
3010 0 : #[instrument(skip_all)]
3011 : async fn logical_size_calculation_task(
3012 : self: &Arc<Self>,
3013 : lsn: Lsn,
3014 : cause: LogicalSizeCalculationCause,
3015 : ctx: &RequestContext,
3016 : ) -> Result<u64, CalculateLogicalSizeError> {
3017 : crate::span::debug_assert_current_span_has_tenant_and_timeline_id();
3018 : // We should never be calculating logical sizes on shard !=0, because these shards do not have
3019 : // accurate relation sizes, and they do not emit consumption metrics.
3020 : debug_assert!(self.tenant_shard_id.is_shard_zero());
3021 :
3022 : let guard = self
3023 : .gate
3024 : .enter()
3025 0 : .map_err(|_| CalculateLogicalSizeError::Cancelled)?;
3026 :
3027 : let self_calculation = Arc::clone(self);
3028 :
3029 0 : let mut calculation = pin!(async {
3030 0 : let ctx = ctx.attached_child();
3031 0 : self_calculation
3032 0 : .calculate_logical_size(lsn, cause, &guard, &ctx)
3033 0 : .await
3034 0 : });
3035 :
3036 : tokio::select! {
3037 : res = &mut calculation => { res }
3038 : _ = self.cancel.cancelled() => {
3039 : debug!("cancelling logical size calculation for timeline shutdown");
3040 : calculation.await
3041 : }
3042 : }
3043 : }
3044 :
3045 : /// Calculate the logical size of the database at the latest LSN.
3046 : ///
3047 : /// NOTE: counted incrementally, includes ancestors. This can be a slow operation,
3048 : /// especially if we need to download remote layers.
3049 : ///
3050 : /// # Cancel-Safety
3051 : ///
3052 : /// This method is cancellation-safe.
3053 0 : async fn calculate_logical_size(
3054 0 : &self,
3055 0 : up_to_lsn: Lsn,
3056 0 : cause: LogicalSizeCalculationCause,
3057 0 : _guard: &GateGuard,
3058 0 : ctx: &RequestContext,
3059 0 : ) -> Result<u64, CalculateLogicalSizeError> {
3060 0 : info!(
3061 0 : "Calculating logical size for timeline {} at {}",
3062 : self.timeline_id, up_to_lsn
3063 : );
3064 :
3065 : pausable_failpoint!("timeline-calculate-logical-size-pause");
3066 :
3067 : // See if we've already done the work for initial size calculation.
3068 : // This is a short-cut for timelines that are mostly unused.
3069 0 : if let Some(size) = self.current_logical_size.initialized_size(up_to_lsn) {
3070 0 : return Ok(size);
3071 0 : }
3072 0 : let storage_time_metrics = match cause {
3073 : LogicalSizeCalculationCause::Initial
3074 : | LogicalSizeCalculationCause::ConsumptionMetricsSyntheticSize
3075 0 : | LogicalSizeCalculationCause::TenantSizeHandler => &self.metrics.logical_size_histo,
3076 : LogicalSizeCalculationCause::EvictionTaskImitation => {
3077 0 : &self.metrics.imitate_logical_size_histo
3078 : }
3079 : };
3080 0 : let timer = storage_time_metrics.start_timer();
3081 0 : let logical_size = self
3082 0 : .get_current_logical_size_non_incremental(up_to_lsn, ctx)
3083 0 : .await?;
3084 0 : debug!("calculated logical size: {logical_size}");
3085 0 : timer.stop_and_record();
3086 0 : Ok(logical_size)
3087 0 : }
3088 :
3089 : /// Update current logical size, adding `delta' to the old value.
3090 270570 : fn update_current_logical_size(&self, delta: i64) {
3091 270570 : let logical_size = &self.current_logical_size;
3092 270570 : logical_size.increment_size(delta);
3093 270570 :
3094 270570 : // Also set the value in the prometheus gauge. Note that
3095 270570 : // there is a race condition here: if this is is called by two
3096 270570 : // threads concurrently, the prometheus gauge might be set to
3097 270570 : // one value while current_logical_size is set to the
3098 270570 : // other.
3099 270570 : match logical_size.current_size() {
3100 270570 : CurrentLogicalSize::Exact(ref new_current_size) => self
3101 270570 : .metrics
3102 270570 : .current_logical_size_gauge
3103 270570 : .set(new_current_size.into()),
3104 0 : CurrentLogicalSize::Approximate(_) => {
3105 0 : // don't update the gauge yet, this allows us not to update the gauge back and
3106 0 : // forth between the initial size calculation task.
3107 0 : }
3108 : }
3109 270570 : }
3110 :
3111 2904 : pub(crate) fn update_directory_entries_count(&self, kind: DirectoryKind, count: u64) {
3112 2904 : self.directory_metrics[kind.offset()].store(count, AtomicOrdering::Relaxed);
3113 2904 : let aux_metric =
3114 2904 : self.directory_metrics[DirectoryKind::AuxFiles.offset()].load(AtomicOrdering::Relaxed);
3115 2904 :
3116 2904 : let sum_of_entries = self
3117 2904 : .directory_metrics
3118 2904 : .iter()
3119 20328 : .map(|v| v.load(AtomicOrdering::Relaxed))
3120 2904 : .sum();
3121 2904 : // Set a high general threshold and a lower threshold for the auxiliary files,
3122 2904 : // as we can have large numbers of relations in the db directory.
3123 2904 : const SUM_THRESHOLD: u64 = 5000;
3124 2904 : const AUX_THRESHOLD: u64 = 1000;
3125 2904 : if sum_of_entries >= SUM_THRESHOLD || aux_metric >= AUX_THRESHOLD {
3126 0 : self.metrics
3127 0 : .directory_entries_count_gauge
3128 0 : .set(sum_of_entries);
3129 2904 : } else if let Some(metric) = Lazy::get(&self.metrics.directory_entries_count_gauge) {
3130 0 : metric.set(sum_of_entries);
3131 2904 : }
3132 2904 : }
3133 :
3134 0 : async fn find_layer(&self, layer_name: &LayerName) -> Option<Layer> {
3135 0 : let guard = self.layers.read().await;
3136 0 : for historic_layer in guard.layer_map().iter_historic_layers() {
3137 0 : let historic_layer_name = historic_layer.layer_name();
3138 0 : if layer_name == &historic_layer_name {
3139 0 : return Some(guard.get_from_desc(&historic_layer));
3140 0 : }
3141 : }
3142 :
3143 0 : None
3144 0 : }
3145 :
3146 : /// The timeline heatmap is a hint to secondary locations from the primary location,
3147 : /// indicating which layers are currently on-disk on the primary.
3148 : ///
3149 : /// None is returned if the Timeline is in a state where uploading a heatmap
3150 : /// doesn't make sense, such as shutting down or initializing. The caller
3151 : /// should treat this as a cue to simply skip doing any heatmap uploading
3152 : /// for this timeline.
3153 0 : pub(crate) async fn generate_heatmap(&self) -> Option<HeatMapTimeline> {
3154 0 : if !self.is_active() {
3155 0 : return None;
3156 0 : }
3157 :
3158 0 : let guard = self.layers.read().await;
3159 :
3160 0 : let resident = guard.likely_resident_layers().map(|layer| {
3161 0 : let last_activity_ts = layer.access_stats().latest_activity_or_now();
3162 0 :
3163 0 : HeatMapLayer::new(
3164 0 : layer.layer_desc().layer_name(),
3165 0 : layer.metadata(),
3166 0 : last_activity_ts,
3167 0 : )
3168 0 : });
3169 0 :
3170 0 : let layers = resident.collect();
3171 0 :
3172 0 : Some(HeatMapTimeline::new(self.timeline_id, layers))
3173 0 : }
3174 :
3175 : /// Returns true if the given lsn is or was an ancestor branchpoint.
3176 0 : pub(crate) fn is_ancestor_lsn(&self, lsn: Lsn) -> bool {
3177 0 : // upon timeline detach, we set the ancestor_lsn to Lsn::INVALID and the store the original
3178 0 : // branchpoint in the value in IndexPart::lineage
3179 0 : self.ancestor_lsn == lsn
3180 0 : || (self.ancestor_lsn == Lsn::INVALID
3181 0 : && self.remote_client.is_previous_ancestor_lsn(lsn))
3182 0 : }
3183 : }
3184 :
3185 : type TraversalId = Arc<str>;
3186 :
3187 : trait TraversalLayerExt {
3188 : fn traversal_id(&self) -> TraversalId;
3189 : }
3190 :
3191 : impl TraversalLayerExt for Layer {
3192 211498 : fn traversal_id(&self) -> TraversalId {
3193 211498 : Arc::clone(self.debug_str())
3194 211498 : }
3195 : }
3196 :
3197 : impl TraversalLayerExt for Arc<InMemoryLayer> {
3198 606340 : fn traversal_id(&self) -> TraversalId {
3199 606340 : Arc::clone(self.local_path_str())
3200 606340 : }
3201 : }
3202 :
3203 : impl Timeline {
3204 : ///
3205 : /// Get a handle to a Layer for reading.
3206 : ///
3207 : /// The returned Layer might be from an ancestor timeline, if the
3208 : /// segment hasn't been updated on this timeline yet.
3209 : ///
3210 : /// This function takes the current timeline's locked LayerMap as an argument,
3211 : /// so callers can avoid potential race conditions.
3212 : ///
3213 : /// # Cancel-Safety
3214 : ///
3215 : /// This method is cancellation-safe.
3216 626000 : async fn get_reconstruct_data(
3217 626000 : &self,
3218 626000 : key: Key,
3219 626000 : request_lsn: Lsn,
3220 626000 : reconstruct_state: &mut ValueReconstructState,
3221 626000 : ctx: &RequestContext,
3222 626000 : ) -> Result<Vec<TraversalPathItem>, PageReconstructError> {
3223 626000 : // Start from the current timeline.
3224 626000 : let mut timeline_owned;
3225 626000 : let mut timeline = self;
3226 626000 :
3227 626000 : let mut read_count = scopeguard::guard(0, |cnt| {
3228 626000 : crate::metrics::READ_NUM_LAYERS_VISITED.observe(cnt as f64)
3229 626000 : });
3230 626000 :
3231 626000 : // For debugging purposes, collect the path of layers that we traversed
3232 626000 : // through. It's included in the error message if we fail to find the key.
3233 626000 : let mut traversal_path = Vec::<TraversalPathItem>::new();
3234 :
3235 626000 : let cached_lsn = if let Some((cached_lsn, _)) = &reconstruct_state.img {
3236 0 : *cached_lsn
3237 : } else {
3238 626000 : Lsn(0)
3239 : };
3240 :
3241 : // 'prev_lsn' tracks the last LSN that we were at in our search. It's used
3242 : // to check that each iteration make some progress, to break infinite
3243 : // looping if something goes wrong.
3244 626000 : let mut prev_lsn = None;
3245 626000 :
3246 626000 : let mut result = ValueReconstructResult::Continue;
3247 626000 : let mut cont_lsn = Lsn(request_lsn.0 + 1);
3248 :
3249 1670206 : 'outer: loop {
3250 1670206 : if self.cancel.is_cancelled() {
3251 0 : return Err(PageReconstructError::Cancelled);
3252 1670206 : }
3253 1670206 :
3254 1670206 : // The function should have updated 'state'
3255 1670206 : //info!("CALLED for {} at {}: {:?} with {} records, cached {}", key, cont_lsn, result, reconstruct_state.records.len(), cached_lsn);
3256 1670206 : match result {
3257 625870 : ValueReconstructResult::Complete => return Ok(traversal_path),
3258 : ValueReconstructResult::Continue => {
3259 : // If we reached an earlier cached page image, we're done.
3260 1044314 : if cont_lsn == cached_lsn + 1 {
3261 0 : return Ok(traversal_path);
3262 1044314 : }
3263 1044314 : if let Some(prev) = prev_lsn {
3264 192070 : if prev <= cont_lsn {
3265 : // Didn't make any progress in last iteration. Error out to avoid
3266 : // getting stuck in the loop.
3267 106 : return Err(PageReconstructError::MissingKey(MissingKeyError {
3268 106 : key,
3269 106 : shard: self.shard_identity.get_shard_number(&key),
3270 106 : cont_lsn: Lsn(cont_lsn.0 - 1),
3271 106 : request_lsn,
3272 106 : ancestor_lsn: Some(timeline.ancestor_lsn),
3273 106 : traversal_path,
3274 106 : backtrace: None,
3275 106 : }));
3276 191964 : }
3277 852244 : }
3278 1044208 : prev_lsn = Some(cont_lsn);
3279 : }
3280 : ValueReconstructResult::Missing => {
3281 : return Err(PageReconstructError::MissingKey(MissingKeyError {
3282 22 : key,
3283 22 : shard: self.shard_identity.get_shard_number(&key),
3284 22 : cont_lsn,
3285 22 : request_lsn,
3286 22 : ancestor_lsn: None,
3287 22 : traversal_path,
3288 22 : backtrace: if cfg!(test) {
3289 22 : Some(std::backtrace::Backtrace::force_capture())
3290 : } else {
3291 0 : None
3292 : },
3293 : }));
3294 : }
3295 : }
3296 :
3297 : // Recurse into ancestor if needed
3298 1044208 : if let Some(ancestor_timeline) = timeline.ancestor_timeline.as_ref() {
3299 402943 : if key.is_inherited_key() && Lsn(cont_lsn.0 - 1) <= timeline.ancestor_lsn {
3300 226246 : trace!(
3301 0 : "going into ancestor {}, cont_lsn is {}",
3302 : timeline.ancestor_lsn,
3303 : cont_lsn
3304 : );
3305 :
3306 226246 : timeline_owned = timeline
3307 226246 : .get_ready_ancestor_timeline(ancestor_timeline, ctx)
3308 2 : .await?;
3309 226244 : timeline = &*timeline_owned;
3310 226244 : prev_lsn = None;
3311 226244 : continue 'outer;
3312 176697 : }
3313 641265 : }
3314 :
3315 817962 : let guard = timeline.layers.read().await;
3316 817962 : let layers = guard.layer_map();
3317 :
3318 : // Check the open and frozen in-memory layers first, in order from newest
3319 : // to oldest.
3320 817962 : if let Some(open_layer) = &layers.open_layer {
3321 717344 : let start_lsn = open_layer.get_lsn_range().start;
3322 717344 : if cont_lsn > start_lsn {
3323 : //info!("CHECKING for {} at {} on open layer {}", key, cont_lsn, open_layer.layer_name().display());
3324 : // Get all the data needed to reconstruct the page version from this layer.
3325 : // But if we have an older cached page image, no need to go past that.
3326 604214 : let lsn_floor = max(cached_lsn + 1, start_lsn);
3327 604214 :
3328 604214 : let open_layer = open_layer.clone();
3329 604214 : drop(guard);
3330 604214 :
3331 604214 : result = match open_layer
3332 604214 : .get_value_reconstruct_data(
3333 604214 : key,
3334 604214 : lsn_floor..cont_lsn,
3335 604214 : reconstruct_state,
3336 604214 : ctx,
3337 604214 : )
3338 9655 : .await
3339 : {
3340 604214 : Ok(result) => result,
3341 0 : Err(e) => return Err(PageReconstructError::from(e)),
3342 : };
3343 604214 : cont_lsn = lsn_floor;
3344 604214 : *read_count += 1;
3345 604214 : traversal_path.push((result, cont_lsn, open_layer.traversal_id()));
3346 604214 : continue 'outer;
3347 113130 : }
3348 100618 : }
3349 213748 : for frozen_layer in layers.frozen_layers.iter().rev() {
3350 2128 : let start_lsn = frozen_layer.get_lsn_range().start;
3351 2128 : if cont_lsn > start_lsn {
3352 : //info!("CHECKING for {} at {} on frozen layer {}", key, cont_lsn, frozen_layer.layer_name().display());
3353 2126 : let lsn_floor = max(cached_lsn + 1, start_lsn);
3354 2126 :
3355 2126 : let frozen_layer = frozen_layer.clone();
3356 2126 : drop(guard);
3357 2126 :
3358 2126 : result = match frozen_layer
3359 2126 : .get_value_reconstruct_data(
3360 2126 : key,
3361 2126 : lsn_floor..cont_lsn,
3362 2126 : reconstruct_state,
3363 2126 : ctx,
3364 2126 : )
3365 0 : .await
3366 : {
3367 2126 : Ok(result) => result,
3368 0 : Err(e) => return Err(PageReconstructError::from(e)),
3369 : };
3370 2126 : cont_lsn = lsn_floor;
3371 2126 : *read_count += 1;
3372 2126 : traversal_path.push((result, cont_lsn, frozen_layer.traversal_id()));
3373 2126 : continue 'outer;
3374 2 : }
3375 : }
3376 :
3377 211622 : if let Some(SearchResult { lsn_floor, layer }) = layers.search(key, cont_lsn) {
3378 211498 : let layer = guard.get_from_desc(&layer);
3379 211498 : drop(guard);
3380 211498 : // Get all the data needed to reconstruct the page version from this layer.
3381 211498 : // But if we have an older cached page image, no need to go past that.
3382 211498 : let lsn_floor = max(cached_lsn + 1, lsn_floor);
3383 211498 : result = match layer
3384 211498 : .get_value_reconstruct_data(key, lsn_floor..cont_lsn, reconstruct_state, ctx)
3385 30352 : .await
3386 : {
3387 211498 : Ok(result) => result,
3388 0 : Err(e) => return Err(PageReconstructError::from(e)),
3389 : };
3390 211498 : cont_lsn = lsn_floor;
3391 211498 : *read_count += 1;
3392 211498 : traversal_path.push((result, cont_lsn, layer.traversal_id()));
3393 211498 : continue 'outer;
3394 124 : } else if timeline.ancestor_timeline.is_some() {
3395 : // Nothing on this timeline. Traverse to parent
3396 106 : result = ValueReconstructResult::Continue;
3397 106 : cont_lsn = Lsn(timeline.ancestor_lsn.0 + 1);
3398 106 : continue 'outer;
3399 : } else {
3400 : // Nothing found
3401 18 : result = ValueReconstructResult::Missing;
3402 18 : continue 'outer;
3403 : }
3404 : }
3405 626000 : }
3406 :
3407 : #[allow(unknown_lints)] // doc_lazy_continuation is still a new lint
3408 : #[allow(clippy::doc_lazy_continuation)]
3409 : /// Get the data needed to reconstruct all keys in the provided keyspace
3410 : ///
3411 : /// The algorithm is as follows:
3412 : /// 1. While some keys are still not done and there's a timeline to visit:
3413 : /// 2. Visit the timeline (see [`Timeline::get_vectored_reconstruct_data_timeline`]:
3414 : /// 2.1: Build the fringe for the current keyspace
3415 : /// 2.2 Visit the newest layer from the fringe to collect all values for the range it
3416 : /// intersects
3417 : /// 2.3. Pop the timeline from the fringe
3418 : /// 2.4. If the fringe is empty, go back to 1
3419 138 : async fn get_vectored_reconstruct_data(
3420 138 : &self,
3421 138 : mut keyspace: KeySpace,
3422 138 : request_lsn: Lsn,
3423 138 : reconstruct_state: &mut ValuesReconstructState,
3424 138 : ctx: &RequestContext,
3425 138 : ) -> Result<(), GetVectoredError> {
3426 138 : let mut timeline_owned: Arc<Timeline>;
3427 138 : let mut timeline = self;
3428 138 :
3429 138 : let mut cont_lsn = Lsn(request_lsn.0 + 1);
3430 :
3431 138 : let missing_keyspace = loop {
3432 174 : if self.cancel.is_cancelled() {
3433 0 : return Err(GetVectoredError::Cancelled);
3434 174 : }
3435 :
3436 : let TimelineVisitOutcome {
3437 174 : completed_keyspace: completed,
3438 174 : image_covered_keyspace,
3439 174 : } = Self::get_vectored_reconstruct_data_timeline(
3440 174 : timeline,
3441 174 : keyspace.clone(),
3442 174 : cont_lsn,
3443 174 : reconstruct_state,
3444 174 : &self.cancel,
3445 174 : ctx,
3446 174 : )
3447 11428 : .await?;
3448 :
3449 174 : keyspace.remove_overlapping_with(&completed);
3450 174 :
3451 174 : // Do not descend into the ancestor timeline for aux files.
3452 174 : // We don't return a blanket [`GetVectoredError::MissingKey`] to avoid
3453 174 : // stalling compaction.
3454 174 : keyspace.remove_overlapping_with(&KeySpace {
3455 174 : ranges: vec![NON_INHERITED_RANGE, NON_INHERITED_SPARSE_RANGE],
3456 174 : });
3457 174 :
3458 174 : // Keyspace is fully retrieved
3459 174 : if keyspace.is_empty() {
3460 128 : break None;
3461 46 : }
3462 :
3463 46 : let Some(ancestor_timeline) = timeline.ancestor_timeline.as_ref() else {
3464 : // Not fully retrieved but no ancestor timeline.
3465 10 : break Some(keyspace);
3466 : };
3467 :
3468 : // Now we see if there are keys covered by the image layer but does not exist in the
3469 : // image layer, which means that the key does not exist.
3470 :
3471 : // The block below will stop the vectored search if any of the keys encountered an image layer
3472 : // which did not contain a snapshot for said key. Since we have already removed all completed
3473 : // keys from `keyspace`, we expect there to be no overlap between it and the image covered key
3474 : // space. If that's not the case, we had at least one key encounter a gap in the image layer
3475 : // and stop the search as a result of that.
3476 36 : let removed = keyspace.remove_overlapping_with(&image_covered_keyspace);
3477 36 : if !removed.is_empty() {
3478 0 : break Some(removed);
3479 36 : }
3480 36 : // If we reached this point, `remove_overlapping_with` should not have made any change to the
3481 36 : // keyspace.
3482 36 :
3483 36 : // Take the min to avoid reconstructing a page with data newer than request Lsn.
3484 36 : cont_lsn = std::cmp::min(Lsn(request_lsn.0 + 1), Lsn(timeline.ancestor_lsn.0 + 1));
3485 36 : timeline_owned = timeline
3486 36 : .get_ready_ancestor_timeline(ancestor_timeline, ctx)
3487 0 : .await
3488 36 : .map_err(GetVectoredError::GetReadyAncestorError)?;
3489 36 : timeline = &*timeline_owned;
3490 : };
3491 :
3492 138 : if let Some(missing_keyspace) = missing_keyspace {
3493 10 : return Err(GetVectoredError::MissingKey(MissingKeyError {
3494 10 : key: missing_keyspace.start().unwrap(), /* better if we can store the full keyspace */
3495 10 : shard: self
3496 10 : .shard_identity
3497 10 : .get_shard_number(&missing_keyspace.start().unwrap()),
3498 10 : cont_lsn,
3499 10 : request_lsn,
3500 10 : ancestor_lsn: Some(timeline.ancestor_lsn),
3501 10 : traversal_path: vec![],
3502 10 : backtrace: None,
3503 10 : }));
3504 128 : }
3505 128 :
3506 128 : Ok(())
3507 138 : }
3508 :
3509 : /// Collect the reconstruct data for a keyspace from the specified timeline.
3510 : ///
3511 : /// Maintain a fringe [`LayerFringe`] which tracks all the layers that intersect
3512 : /// the current keyspace. The current keyspace of the search at any given timeline
3513 : /// is the original keyspace minus all the keys that have been completed minus
3514 : /// any keys for which we couldn't find an intersecting layer. It's not tracked explicitly,
3515 : /// but if you merge all the keyspaces in the fringe, you get the "current keyspace".
3516 : ///
3517 : /// This is basically a depth-first search visitor implementation where a vertex
3518 : /// is the (layer, lsn range, key space) tuple. The fringe acts as the stack.
3519 : ///
3520 : /// At each iteration pop the top of the fringe (the layer with the highest Lsn)
3521 : /// and get all the required reconstruct data from the layer in one go.
3522 : ///
3523 : /// Returns the completed keyspace and the keyspaces with image coverage. The caller
3524 : /// decides how to deal with these two keyspaces.
3525 174 : async fn get_vectored_reconstruct_data_timeline(
3526 174 : timeline: &Timeline,
3527 174 : keyspace: KeySpace,
3528 174 : mut cont_lsn: Lsn,
3529 174 : reconstruct_state: &mut ValuesReconstructState,
3530 174 : cancel: &CancellationToken,
3531 174 : ctx: &RequestContext,
3532 174 : ) -> Result<TimelineVisitOutcome, GetVectoredError> {
3533 174 : let mut unmapped_keyspace = keyspace.clone();
3534 174 : let mut fringe = LayerFringe::new();
3535 174 :
3536 174 : let mut completed_keyspace = KeySpace::default();
3537 174 : let mut image_covered_keyspace = KeySpaceRandomAccum::new();
3538 :
3539 406 : loop {
3540 406 : if cancel.is_cancelled() {
3541 0 : return Err(GetVectoredError::Cancelled);
3542 406 : }
3543 406 :
3544 406 : let (keys_done_last_step, keys_with_image_coverage) =
3545 406 : reconstruct_state.consume_done_keys();
3546 406 : unmapped_keyspace.remove_overlapping_with(&keys_done_last_step);
3547 406 : completed_keyspace.merge(&keys_done_last_step);
3548 406 : if let Some(keys_with_image_coverage) = keys_with_image_coverage {
3549 66 : unmapped_keyspace
3550 66 : .remove_overlapping_with(&KeySpace::single(keys_with_image_coverage.clone()));
3551 66 : image_covered_keyspace.add_range(keys_with_image_coverage);
3552 340 : }
3553 :
3554 : // Do not descent any further if the last layer we visited
3555 : // completed all keys in the keyspace it inspected. This is not
3556 : // required for correctness, but avoids visiting extra layers
3557 : // which turns out to be a perf bottleneck in some cases.
3558 406 : if !unmapped_keyspace.is_empty() {
3559 286 : let guard = timeline.layers.read().await;
3560 286 : let layers = guard.layer_map();
3561 286 :
3562 286 : let in_memory_layer = layers.find_in_memory_layer(|l| {
3563 28 : let start_lsn = l.get_lsn_range().start;
3564 28 : cont_lsn > start_lsn
3565 286 : });
3566 286 :
3567 286 : match in_memory_layer {
3568 14 : Some(l) => {
3569 14 : let lsn_range = l.get_lsn_range().start..cont_lsn;
3570 14 : fringe.update(
3571 14 : ReadableLayer::InMemoryLayer(l),
3572 14 : unmapped_keyspace.clone(),
3573 14 : lsn_range,
3574 14 : );
3575 14 : }
3576 : None => {
3577 72113 : for range in unmapped_keyspace.ranges.iter() {
3578 72113 : let results = layers.range_search(range.clone(), cont_lsn);
3579 72113 :
3580 72113 : results
3581 72113 : .found
3582 72113 : .into_iter()
3583 72113 : .map(|(SearchResult { layer, lsn_floor }, keyspace_accum)| {
3584 64036 : (
3585 64036 : ReadableLayer::PersistentLayer(guard.get_from_desc(&layer)),
3586 64036 : keyspace_accum.to_keyspace(),
3587 64036 : lsn_floor..cont_lsn,
3588 64036 : )
3589 72113 : })
3590 72113 : .for_each(|(layer, keyspace, lsn_range)| {
3591 64036 : fringe.update(layer, keyspace, lsn_range)
3592 72113 : });
3593 72113 : }
3594 : }
3595 : }
3596 :
3597 : // It's safe to drop the layer map lock after planning the next round of reads.
3598 : // The fringe keeps readable handles for the layers which are safe to read even
3599 : // if layers were compacted or flushed.
3600 : //
3601 : // The more interesting consideration is: "Why is the read algorithm still correct
3602 : // if the layer map changes while it is operating?". Doing a vectored read on a
3603 : // timeline boils down to pushing an imaginary lsn boundary downwards for each range
3604 : // covered by the read. The layer map tells us how to move the lsn downwards for a
3605 : // range at *a particular point in time*. It is fine for the answer to be different
3606 : // at two different time points.
3607 286 : drop(guard);
3608 120 : }
3609 :
3610 406 : if let Some((layer_to_read, keyspace_to_read, lsn_range)) = fringe.next_layer() {
3611 232 : let next_cont_lsn = lsn_range.start;
3612 232 : layer_to_read
3613 232 : .get_values_reconstruct_data(
3614 232 : keyspace_to_read.clone(),
3615 232 : lsn_range,
3616 232 : reconstruct_state,
3617 232 : ctx,
3618 232 : )
3619 11428 : .await?;
3620 :
3621 232 : unmapped_keyspace = keyspace_to_read;
3622 232 : cont_lsn = next_cont_lsn;
3623 232 :
3624 232 : reconstruct_state.on_layer_visited(&layer_to_read);
3625 : } else {
3626 174 : break;
3627 174 : }
3628 174 : }
3629 174 :
3630 174 : Ok(TimelineVisitOutcome {
3631 174 : completed_keyspace,
3632 174 : image_covered_keyspace: image_covered_keyspace.consume_keyspace(),
3633 174 : })
3634 174 : }
3635 :
3636 226282 : async fn get_ready_ancestor_timeline(
3637 226282 : &self,
3638 226282 : ancestor: &Arc<Timeline>,
3639 226282 : ctx: &RequestContext,
3640 226282 : ) -> Result<Arc<Timeline>, GetReadyAncestorError> {
3641 226282 : // It's possible that the ancestor timeline isn't active yet, or
3642 226282 : // is active but hasn't yet caught up to the branch point. Wait
3643 226282 : // for it.
3644 226282 : //
3645 226282 : // This cannot happen while the pageserver is running normally,
3646 226282 : // because you cannot create a branch from a point that isn't
3647 226282 : // present in the pageserver yet. However, we don't wait for the
3648 226282 : // branch point to be uploaded to cloud storage before creating
3649 226282 : // a branch. I.e., the branch LSN need not be remote consistent
3650 226282 : // for the branching operation to succeed.
3651 226282 : //
3652 226282 : // Hence, if we try to load a tenant in such a state where
3653 226282 : // 1. the existence of the branch was persisted (in IndexPart and/or locally)
3654 226282 : // 2. but the ancestor state is behind branch_lsn because it was not yet persisted
3655 226282 : // then we will need to wait for the ancestor timeline to
3656 226282 : // re-stream WAL up to branch_lsn before we access it.
3657 226282 : //
3658 226282 : // How can a tenant get in such a state?
3659 226282 : // - ungraceful pageserver process exit
3660 226282 : // - detach+attach => this is a bug, https://github.com/neondatabase/neon/issues/4219
3661 226282 : //
3662 226282 : // NB: this could be avoided by requiring
3663 226282 : // branch_lsn >= remote_consistent_lsn
3664 226282 : // during branch creation.
3665 226282 : match ancestor.wait_to_become_active(ctx).await {
3666 226280 : Ok(()) => {}
3667 : Err(TimelineState::Stopping) => {
3668 : // If an ancestor is stopping, it means the tenant is stopping: handle this the same as if this timeline was stopping.
3669 0 : return Err(GetReadyAncestorError::Cancelled);
3670 : }
3671 2 : Err(state) => {
3672 2 : return Err(GetReadyAncestorError::BadState {
3673 2 : timeline_id: ancestor.timeline_id,
3674 2 : state,
3675 2 : });
3676 : }
3677 : }
3678 226280 : ancestor
3679 226280 : .wait_lsn(self.ancestor_lsn, WaitLsnWaiter::Timeline(self), ctx)
3680 0 : .await
3681 226280 : .map_err(|e| match e {
3682 0 : e @ WaitLsnError::Timeout(_) => GetReadyAncestorError::AncestorLsnTimeout(e),
3683 0 : WaitLsnError::Shutdown => GetReadyAncestorError::Cancelled,
3684 0 : WaitLsnError::BadState(state) => GetReadyAncestorError::BadState {
3685 0 : timeline_id: ancestor.timeline_id,
3686 0 : state,
3687 0 : },
3688 226280 : })?;
3689 :
3690 226280 : Ok(ancestor.clone())
3691 226282 : }
3692 :
3693 0 : pub(crate) fn get_ancestor_timeline(&self) -> Option<Arc<Timeline>> {
3694 0 : self.ancestor_timeline.clone()
3695 0 : }
3696 :
3697 5452 : pub(crate) fn get_shard_identity(&self) -> &ShardIdentity {
3698 5452 : &self.shard_identity
3699 5452 : }
3700 :
3701 : ///
3702 : /// Get a handle to the latest layer for appending.
3703 : ///
3704 1250 : async fn get_layer_for_write(
3705 1250 : &self,
3706 1250 : lsn: Lsn,
3707 1250 : ctx: &RequestContext,
3708 1250 : ) -> anyhow::Result<Arc<InMemoryLayer>> {
3709 1250 : let mut guard = self.layers.write().await;
3710 1250 : let layer = guard
3711 1250 : .get_layer_for_write(
3712 1250 : lsn,
3713 1250 : self.get_last_record_lsn(),
3714 1250 : self.conf,
3715 1250 : self.timeline_id,
3716 1250 : self.tenant_shard_id,
3717 1250 : ctx,
3718 1250 : )
3719 698 : .await?;
3720 1250 : Ok(layer)
3721 1250 : }
3722 :
3723 5279056 : pub(crate) fn finish_write(&self, new_lsn: Lsn) {
3724 5279056 : assert!(new_lsn.is_aligned());
3725 :
3726 5279056 : self.metrics.last_record_gauge.set(new_lsn.0 as i64);
3727 5279056 : self.last_record_lsn.advance(new_lsn);
3728 5279056 : }
3729 :
3730 1148 : async fn freeze_inmem_layer_at(
3731 1148 : &self,
3732 1148 : at: Lsn,
3733 1148 : write_lock: &mut tokio::sync::MutexGuard<'_, Option<TimelineWriterState>>,
3734 1148 : ) {
3735 1148 : let frozen = {
3736 1148 : let mut guard = self.layers.write().await;
3737 1148 : guard
3738 1148 : .try_freeze_in_memory_layer(at, &self.last_freeze_at, write_lock)
3739 3 : .await
3740 : };
3741 1148 : if frozen {
3742 1120 : let now = Instant::now();
3743 1120 : *(self.last_freeze_ts.write().unwrap()) = now;
3744 1120 : }
3745 1148 : }
3746 :
3747 : /// Layer flusher task's main loop.
3748 384 : async fn flush_loop(
3749 384 : self: &Arc<Self>,
3750 384 : mut layer_flush_start_rx: tokio::sync::watch::Receiver<(u64, Lsn)>,
3751 384 : ctx: &RequestContext,
3752 384 : ) {
3753 384 : info!("started flush loop");
3754 1112 : loop {
3755 1112 : tokio::select! {
3756 : _ = self.cancel.cancelled() => {
3757 : info!("shutting down layer flush task due to Timeline::cancel");
3758 : break;
3759 : },
3760 : _ = layer_flush_start_rx.changed() => {}
3761 : }
3762 1112 : trace!("waking up");
3763 1112 : let (flush_counter, frozen_to_lsn) = *layer_flush_start_rx.borrow();
3764 1112 :
3765 1112 : // The highest LSN to which we flushed in the loop over frozen layers
3766 1112 : let mut flushed_to_lsn = Lsn(0);
3767 :
3768 1112 : let result = loop {
3769 2232 : if self.cancel.is_cancelled() {
3770 0 : info!("dropping out of flush loop for timeline shutdown");
3771 : // Note: we do not bother transmitting into [`layer_flush_done_tx`], because
3772 : // anyone waiting on that will respect self.cancel as well: they will stop
3773 : // waiting at the same time we as drop out of this loop.
3774 0 : return;
3775 2232 : }
3776 2232 :
3777 2232 : let timer = self.metrics.flush_time_histo.start_timer();
3778 :
3779 2232 : let layer_to_flush = {
3780 2232 : let guard = self.layers.read().await;
3781 2232 : guard.layer_map().frozen_layers.front().cloned()
3782 : // drop 'layers' lock to allow concurrent reads and writes
3783 : };
3784 2232 : let Some(layer_to_flush) = layer_to_flush else {
3785 1112 : break Ok(());
3786 : };
3787 59440 : match self.flush_frozen_layer(layer_to_flush, ctx).await {
3788 1120 : Ok(this_layer_to_lsn) => {
3789 1120 : flushed_to_lsn = std::cmp::max(flushed_to_lsn, this_layer_to_lsn);
3790 1120 : }
3791 : Err(FlushLayerError::Cancelled) => {
3792 0 : info!("dropping out of flush loop for timeline shutdown");
3793 0 : return;
3794 : }
3795 0 : err @ Err(
3796 0 : FlushLayerError::NotRunning(_)
3797 0 : | FlushLayerError::Other(_)
3798 0 : | FlushLayerError::CreateImageLayersError(_),
3799 0 : ) => {
3800 0 : error!("could not flush frozen layer: {err:?}");
3801 0 : break err.map(|_| ());
3802 : }
3803 : }
3804 1120 : timer.stop_and_record();
3805 : };
3806 :
3807 : // Unsharded tenants should never advance their LSN beyond the end of the
3808 : // highest layer they write: such gaps between layer data and the frozen LSN
3809 : // are only legal on sharded tenants.
3810 1112 : debug_assert!(
3811 1112 : self.shard_identity.count.count() > 1
3812 1112 : || flushed_to_lsn >= frozen_to_lsn
3813 68 : || !flushed_to_lsn.is_valid()
3814 : );
3815 :
3816 1112 : if flushed_to_lsn < frozen_to_lsn && self.shard_identity.count.count() > 1 {
3817 : // If our layer flushes didn't carry disk_consistent_lsn up to the `to_lsn` advertised
3818 : // to us via layer_flush_start_rx, then advance it here.
3819 : //
3820 : // This path is only taken for tenants with multiple shards: single sharded tenants should
3821 : // never encounter a gap in the wal.
3822 0 : let old_disk_consistent_lsn = self.disk_consistent_lsn.load();
3823 0 : tracing::debug!("Advancing disk_consistent_lsn across layer gap {old_disk_consistent_lsn}->{frozen_to_lsn}");
3824 0 : if self.set_disk_consistent_lsn(frozen_to_lsn) {
3825 0 : if let Err(e) = self.schedule_uploads(frozen_to_lsn, vec![]) {
3826 0 : tracing::warn!("Failed to schedule metadata upload after updating disk_consistent_lsn: {e}");
3827 0 : }
3828 0 : }
3829 1112 : }
3830 :
3831 : // Notify any listeners that we're done
3832 1112 : let _ = self
3833 1112 : .layer_flush_done_tx
3834 1112 : .send_replace((flush_counter, result));
3835 : }
3836 8 : }
3837 :
3838 : /// Request the flush loop to write out all frozen layers up to `at_lsn` as Delta L0 files to disk.
3839 : /// The caller is responsible for the freezing, e.g., [`Self::freeze_inmem_layer_at`].
3840 : ///
3841 : /// `at_lsn` may be higher than the highest LSN of a frozen layer: if this is the
3842 : /// case, it means no data will be written between the top of the highest frozen layer and
3843 : /// to_lsn, e.g. because this tenant shard has ingested up to to_lsn and not written any data
3844 : /// locally for that part of the WAL.
3845 1148 : fn flush_frozen_layers(&self, at_lsn: Lsn) -> Result<u64, FlushLayerError> {
3846 1148 : // Increment the flush cycle counter and wake up the flush task.
3847 1148 : // Remember the new value, so that when we listen for the flush
3848 1148 : // to finish, we know when the flush that we initiated has
3849 1148 : // finished, instead of some other flush that was started earlier.
3850 1148 : let mut my_flush_request = 0;
3851 1148 :
3852 1148 : let flush_loop_state = { *self.flush_loop_state.lock().unwrap() };
3853 1148 : if !matches!(flush_loop_state, FlushLoopState::Running { .. }) {
3854 0 : return Err(FlushLayerError::NotRunning(flush_loop_state));
3855 1148 : }
3856 1148 :
3857 1148 : self.layer_flush_start_tx.send_modify(|(counter, lsn)| {
3858 1148 : my_flush_request = *counter + 1;
3859 1148 : *counter = my_flush_request;
3860 1148 : *lsn = std::cmp::max(at_lsn, *lsn);
3861 1148 : });
3862 1148 :
3863 1148 : Ok(my_flush_request)
3864 1148 : }
3865 :
3866 1068 : async fn wait_flush_completion(&self, request: u64) -> Result<(), FlushLayerError> {
3867 1068 : let mut rx = self.layer_flush_done_tx.subscribe();
3868 2136 : loop {
3869 2136 : {
3870 2136 : let (last_result_counter, last_result) = &*rx.borrow();
3871 2136 : if *last_result_counter >= request {
3872 1068 : if let Err(err) = last_result {
3873 : // We already logged the original error in
3874 : // flush_loop. We cannot propagate it to the caller
3875 : // here, because it might not be Cloneable
3876 0 : return Err(err.clone());
3877 : } else {
3878 1068 : return Ok(());
3879 : }
3880 1068 : }
3881 1068 : }
3882 1068 : trace!("waiting for flush to complete");
3883 : tokio::select! {
3884 : rx_e = rx.changed() => {
3885 0 : rx_e.map_err(|_| FlushLayerError::NotRunning(*self.flush_loop_state.lock().unwrap()))?;
3886 : },
3887 : // Cancellation safety: we are not leaving an I/O in-flight for the flush, we're just ignoring
3888 : // the notification from [`flush_loop`] that it completed.
3889 : _ = self.cancel.cancelled() => {
3890 : tracing::info!("Cancelled layer flush due on timeline shutdown");
3891 : return Ok(())
3892 : }
3893 : };
3894 1068 : trace!("done")
3895 : }
3896 1068 : }
3897 :
3898 1068 : async fn flush_frozen_layers_and_wait(&self, at_lsn: Lsn) -> Result<(), FlushLayerError> {
3899 1068 : let token = self.flush_frozen_layers(at_lsn)?;
3900 1068 : self.wait_flush_completion(token).await
3901 1068 : }
3902 :
3903 : /// Flush one frozen in-memory layer to disk, as a new delta layer.
3904 : ///
3905 : /// Return value is the last lsn (inclusive) of the layer that was frozen.
3906 2240 : #[instrument(skip_all, fields(layer=%frozen_layer))]
3907 : async fn flush_frozen_layer(
3908 : self: &Arc<Self>,
3909 : frozen_layer: Arc<InMemoryLayer>,
3910 : ctx: &RequestContext,
3911 : ) -> Result<Lsn, FlushLayerError> {
3912 : debug_assert_current_span_has_tenant_and_timeline_id();
3913 :
3914 : // As a special case, when we have just imported an image into the repository,
3915 : // instead of writing out a L0 delta layer, we directly write out image layer
3916 : // files instead. This is possible as long as *all* the data imported into the
3917 : // repository have the same LSN.
3918 : let lsn_range = frozen_layer.get_lsn_range();
3919 :
3920 : // Whether to directly create image layers for this flush, or flush them as delta layers
3921 : let create_image_layer =
3922 : lsn_range.start == self.initdb_lsn && lsn_range.end == Lsn(self.initdb_lsn.0 + 1);
3923 :
3924 : #[cfg(test)]
3925 : {
3926 : match &mut *self.flush_loop_state.lock().unwrap() {
3927 : FlushLoopState::NotStarted | FlushLoopState::Exited => {
3928 : panic!("flush loop not running")
3929 : }
3930 : FlushLoopState::Running {
3931 : expect_initdb_optimization,
3932 : initdb_optimization_count,
3933 : ..
3934 : } => {
3935 : if create_image_layer {
3936 : *initdb_optimization_count += 1;
3937 : } else {
3938 : assert!(!*expect_initdb_optimization, "expected initdb optimization");
3939 : }
3940 : }
3941 : }
3942 : }
3943 :
3944 : let (layers_to_upload, delta_layer_to_add) = if create_image_layer {
3945 : // Note: The 'ctx' in use here has DownloadBehavior::Error. We should not
3946 : // require downloading anything during initial import.
3947 : let ((rel_partition, metadata_partition), _lsn) = self
3948 : .repartition(
3949 : self.initdb_lsn,
3950 : self.get_compaction_target_size(),
3951 : EnumSet::empty(),
3952 : ctx,
3953 : )
3954 : .await
3955 0 : .map_err(|e| FlushLayerError::from_anyhow(self, e))?;
3956 :
3957 : if self.cancel.is_cancelled() {
3958 : return Err(FlushLayerError::Cancelled);
3959 : }
3960 :
3961 : // FIXME(auxfilesv2): support multiple metadata key partitions might need initdb support as well?
3962 : // This code path will not be hit during regression tests. After #7099 we have a single partition
3963 : // with two key ranges. If someone wants to fix initdb optimization in the future, this might need
3964 : // to be fixed.
3965 :
3966 : // For metadata, always create delta layers.
3967 : let delta_layer = if !metadata_partition.parts.is_empty() {
3968 : assert_eq!(
3969 : metadata_partition.parts.len(),
3970 : 1,
3971 : "currently sparse keyspace should only contain a single metadata keyspace"
3972 : );
3973 : let metadata_keyspace = &metadata_partition.parts[0];
3974 : self.create_delta_layer(
3975 : &frozen_layer,
3976 : Some(
3977 : metadata_keyspace.0.ranges.first().unwrap().start
3978 : ..metadata_keyspace.0.ranges.last().unwrap().end,
3979 : ),
3980 : ctx,
3981 : )
3982 : .await
3983 0 : .map_err(|e| FlushLayerError::from_anyhow(self, e))?
3984 : } else {
3985 : None
3986 : };
3987 :
3988 : // For image layers, we add them immediately into the layer map.
3989 : let mut layers_to_upload = Vec::new();
3990 : layers_to_upload.extend(
3991 : self.create_image_layers(
3992 : &rel_partition,
3993 : self.initdb_lsn,
3994 : ImageLayerCreationMode::Initial,
3995 : ctx,
3996 : )
3997 : .await?,
3998 : );
3999 :
4000 : if let Some(delta_layer) = delta_layer {
4001 : layers_to_upload.push(delta_layer.clone());
4002 : (layers_to_upload, Some(delta_layer))
4003 : } else {
4004 : (layers_to_upload, None)
4005 : }
4006 : } else {
4007 : // Normal case, write out a L0 delta layer file.
4008 : // `create_delta_layer` will not modify the layer map.
4009 : // We will remove frozen layer and add delta layer in one atomic operation later.
4010 : let Some(layer) = self
4011 : .create_delta_layer(&frozen_layer, None, ctx)
4012 : .await
4013 0 : .map_err(|e| FlushLayerError::from_anyhow(self, e))?
4014 : else {
4015 : panic!("delta layer cannot be empty if no filter is applied");
4016 : };
4017 : (
4018 : // FIXME: even though we have a single image and single delta layer assumption
4019 : // we push them to vec
4020 : vec![layer.clone()],
4021 : Some(layer),
4022 : )
4023 : };
4024 :
4025 : pausable_failpoint!("flush-layer-cancel-after-writing-layer-out-pausable");
4026 :
4027 : if self.cancel.is_cancelled() {
4028 : return Err(FlushLayerError::Cancelled);
4029 : }
4030 :
4031 : let disk_consistent_lsn = Lsn(lsn_range.end.0 - 1);
4032 :
4033 : // The new on-disk layers are now in the layer map. We can remove the
4034 : // in-memory layer from the map now. The flushed layer is stored in
4035 : // the mapping in `create_delta_layer`.
4036 : {
4037 : let mut guard = self.layers.write().await;
4038 :
4039 : if self.cancel.is_cancelled() {
4040 : return Err(FlushLayerError::Cancelled);
4041 : }
4042 :
4043 : guard.finish_flush_l0_layer(delta_layer_to_add.as_ref(), &frozen_layer, &self.metrics);
4044 :
4045 : if self.set_disk_consistent_lsn(disk_consistent_lsn) {
4046 : // Schedule remote uploads that will reflect our new disk_consistent_lsn
4047 : self.schedule_uploads(disk_consistent_lsn, layers_to_upload)
4048 0 : .map_err(|e| FlushLayerError::from_anyhow(self, e))?;
4049 : }
4050 : // release lock on 'layers'
4051 : };
4052 :
4053 : // FIXME: between create_delta_layer and the scheduling of the upload in `update_metadata_file`,
4054 : // a compaction can delete the file and then it won't be available for uploads any more.
4055 : // We still schedule the upload, resulting in an error, but ideally we'd somehow avoid this
4056 : // race situation.
4057 : // See https://github.com/neondatabase/neon/issues/4526
4058 : pausable_failpoint!("flush-frozen-pausable");
4059 :
4060 : // This failpoint is used by another test case `test_pageserver_recovery`.
4061 : fail_point!("flush-frozen-exit");
4062 :
4063 : Ok(Lsn(lsn_range.end.0 - 1))
4064 : }
4065 :
4066 : /// Return true if the value changed
4067 : ///
4068 : /// This function must only be used from the layer flush task, and may not be called concurrently.
4069 1120 : fn set_disk_consistent_lsn(&self, new_value: Lsn) -> bool {
4070 1120 : // We do a simple load/store cycle: that's why this function isn't safe for concurrent use.
4071 1120 : let old_value = self.disk_consistent_lsn.load();
4072 1120 : if new_value != old_value {
4073 1120 : assert!(new_value >= old_value);
4074 1120 : self.disk_consistent_lsn.store(new_value);
4075 1120 : true
4076 : } else {
4077 0 : false
4078 : }
4079 1120 : }
4080 :
4081 : /// Update metadata file
4082 1126 : fn schedule_uploads(
4083 1126 : &self,
4084 1126 : disk_consistent_lsn: Lsn,
4085 1126 : layers_to_upload: impl IntoIterator<Item = ResidentLayer>,
4086 1126 : ) -> anyhow::Result<()> {
4087 1126 : // We can only save a valid 'prev_record_lsn' value on disk if we
4088 1126 : // flushed *all* in-memory changes to disk. We only track
4089 1126 : // 'prev_record_lsn' in memory for the latest processed record, so we
4090 1126 : // don't remember what the correct value that corresponds to some old
4091 1126 : // LSN is. But if we flush everything, then the value corresponding
4092 1126 : // current 'last_record_lsn' is correct and we can store it on disk.
4093 1126 : let RecordLsn {
4094 1126 : last: last_record_lsn,
4095 1126 : prev: prev_record_lsn,
4096 1126 : } = self.last_record_lsn.load();
4097 1126 : let ondisk_prev_record_lsn = if disk_consistent_lsn == last_record_lsn {
4098 1046 : Some(prev_record_lsn)
4099 : } else {
4100 80 : None
4101 : };
4102 :
4103 1126 : let update = crate::tenant::metadata::MetadataUpdate::new(
4104 1126 : disk_consistent_lsn,
4105 1126 : ondisk_prev_record_lsn,
4106 1126 : *self.latest_gc_cutoff_lsn.read(),
4107 1126 : );
4108 1126 :
4109 1126 : fail_point!("checkpoint-before-saving-metadata", |x| bail!(
4110 0 : "{}",
4111 0 : x.unwrap()
4112 1126 : ));
4113 :
4114 2260 : for layer in layers_to_upload {
4115 1134 : self.remote_client.schedule_layer_file_upload(layer)?;
4116 : }
4117 1126 : self.remote_client
4118 1126 : .schedule_index_upload_for_metadata_update(&update)?;
4119 :
4120 1126 : Ok(())
4121 1126 : }
4122 :
4123 0 : pub(crate) async fn preserve_initdb_archive(&self) -> anyhow::Result<()> {
4124 0 : self.remote_client
4125 0 : .preserve_initdb_archive(
4126 0 : &self.tenant_shard_id.tenant_id,
4127 0 : &self.timeline_id,
4128 0 : &self.cancel,
4129 0 : )
4130 0 : .await
4131 0 : }
4132 :
4133 : // Write out the given frozen in-memory layer as a new L0 delta file. This L0 file will not be tracked
4134 : // in layer map immediately. The caller is responsible to put it into the layer map.
4135 1120 : async fn create_delta_layer(
4136 1120 : self: &Arc<Self>,
4137 1120 : frozen_layer: &Arc<InMemoryLayer>,
4138 1120 : key_range: Option<Range<Key>>,
4139 1120 : ctx: &RequestContext,
4140 1120 : ) -> anyhow::Result<Option<ResidentLayer>> {
4141 1120 : let self_clone = Arc::clone(self);
4142 1120 : let frozen_layer = Arc::clone(frozen_layer);
4143 1120 : let ctx = ctx.attached_child();
4144 1120 : let work = async move {
4145 1120 : let Some(new_delta) = frozen_layer
4146 1120 : .write_to_disk(&self_clone, &ctx, key_range)
4147 85770 : .await?
4148 : else {
4149 152 : return Ok(None);
4150 : };
4151 : // The write_to_disk() above calls writer.finish() which already did the fsync of the inodes.
4152 : // We just need to fsync the directory in which these inodes are linked,
4153 : // which we know to be the timeline directory.
4154 : //
4155 : // We use fatal_err() below because the after write_to_disk returns with success,
4156 : // the in-memory state of the filesystem already has the layer file in its final place,
4157 : // and subsequent pageserver code could think it's durable while it really isn't.
4158 968 : let timeline_dir = VirtualFile::open(
4159 968 : &self_clone
4160 968 : .conf
4161 968 : .timeline_path(&self_clone.tenant_shard_id, &self_clone.timeline_id),
4162 968 : &ctx,
4163 968 : )
4164 487 : .await
4165 968 : .fatal_err("VirtualFile::open for timeline dir fsync");
4166 968 : timeline_dir
4167 968 : .sync_all()
4168 484 : .await
4169 968 : .fatal_err("VirtualFile::sync_all timeline dir");
4170 968 : anyhow::Ok(Some(new_delta))
4171 1120 : };
4172 : // Before tokio-epoll-uring, we ran write_to_disk & the sync_all inside spawn_blocking.
4173 : // Preserve that behavior to maintain the same behavior for `virtual_file_io_engine=std-fs`.
4174 : use crate::virtual_file::io_engine::IoEngine;
4175 1120 : match crate::virtual_file::io_engine::get() {
4176 0 : IoEngine::NotSet => panic!("io engine not set"),
4177 : IoEngine::StdFs => {
4178 560 : let span = tracing::info_span!("blocking");
4179 560 : tokio::task::spawn_blocking({
4180 560 : move || Handle::current().block_on(work.instrument(span))
4181 560 : })
4182 560 : .await
4183 560 : .context("spawn_blocking")
4184 560 : .and_then(|x| x)
4185 : }
4186 : #[cfg(target_os = "linux")]
4187 54655 : IoEngine::TokioEpollUring => work.await,
4188 : }
4189 1120 : }
4190 :
4191 516 : async fn repartition(
4192 516 : &self,
4193 516 : lsn: Lsn,
4194 516 : partition_size: u64,
4195 516 : flags: EnumSet<CompactFlags>,
4196 516 : ctx: &RequestContext,
4197 516 : ) -> anyhow::Result<((KeyPartitioning, SparseKeyPartitioning), Lsn)> {
4198 516 : let Ok(mut partitioning_guard) = self.partitioning.try_lock() else {
4199 : // NB: there are two callers, one is the compaction task, of which there is only one per struct Tenant and hence Timeline.
4200 : // The other is the initdb optimization in flush_frozen_layer, used by `boostrap_timeline`, which runs before `.activate()`
4201 : // and hence before the compaction task starts.
4202 0 : anyhow::bail!("repartition() called concurrently, this should not happen");
4203 : };
4204 516 : let ((dense_partition, sparse_partition), partition_lsn) = &*partitioning_guard;
4205 516 : if lsn < *partition_lsn {
4206 0 : anyhow::bail!("repartition() called with LSN going backwards, this should not happen");
4207 516 : }
4208 516 :
4209 516 : let distance = lsn.0 - partition_lsn.0;
4210 516 : if *partition_lsn != Lsn(0)
4211 262 : && distance <= self.repartition_threshold
4212 262 : && !flags.contains(CompactFlags::ForceRepartition)
4213 : {
4214 248 : debug!(
4215 : distance,
4216 : threshold = self.repartition_threshold,
4217 0 : "no repartitioning needed"
4218 : );
4219 248 : return Ok((
4220 248 : (dense_partition.clone(), sparse_partition.clone()),
4221 248 : *partition_lsn,
4222 248 : ));
4223 268 : }
4224 :
4225 14135 : let (dense_ks, sparse_ks) = self.collect_keyspace(lsn, ctx).await?;
4226 268 : let dense_partitioning = dense_ks.partition(&self.shard_identity, partition_size);
4227 268 : let sparse_partitioning = SparseKeyPartitioning {
4228 268 : parts: vec![sparse_ks],
4229 268 : }; // no partitioning for metadata keys for now
4230 268 : *partitioning_guard = ((dense_partitioning, sparse_partitioning), lsn);
4231 268 :
4232 268 : Ok((partitioning_guard.0.clone(), partitioning_guard.1))
4233 516 : }
4234 :
4235 : // Is it time to create a new image layer for the given partition?
4236 14 : async fn time_for_new_image_layer(&self, partition: &KeySpace, lsn: Lsn) -> bool {
4237 14 : let threshold = self.get_image_creation_threshold();
4238 :
4239 14 : let guard = self.layers.read().await;
4240 14 : let layers = guard.layer_map();
4241 14 :
4242 14 : let mut max_deltas = 0;
4243 28 : for part_range in &partition.ranges {
4244 14 : let image_coverage = layers.image_coverage(part_range, lsn);
4245 28 : for (img_range, last_img) in image_coverage {
4246 14 : let img_lsn = if let Some(last_img) = last_img {
4247 0 : last_img.get_lsn_range().end
4248 : } else {
4249 14 : Lsn(0)
4250 : };
4251 : // Let's consider an example:
4252 : //
4253 : // delta layer with LSN range 71-81
4254 : // delta layer with LSN range 81-91
4255 : // delta layer with LSN range 91-101
4256 : // image layer at LSN 100
4257 : //
4258 : // If 'lsn' is still 100, i.e. no new WAL has been processed since the last image layer,
4259 : // there's no need to create a new one. We check this case explicitly, to avoid passing
4260 : // a bogus range to count_deltas below, with start > end. It's even possible that there
4261 : // are some delta layers *later* than current 'lsn', if more WAL was processed and flushed
4262 : // after we read last_record_lsn, which is passed here in the 'lsn' argument.
4263 14 : if img_lsn < lsn {
4264 14 : let num_deltas =
4265 14 : layers.count_deltas(&img_range, &(img_lsn..lsn), Some(threshold));
4266 14 :
4267 14 : max_deltas = max_deltas.max(num_deltas);
4268 14 : if num_deltas >= threshold {
4269 0 : debug!(
4270 0 : "key range {}-{}, has {} deltas on this timeline in LSN range {}..{}",
4271 : img_range.start, img_range.end, num_deltas, img_lsn, lsn
4272 : );
4273 0 : return true;
4274 14 : }
4275 0 : }
4276 : }
4277 : }
4278 :
4279 14 : debug!(
4280 : max_deltas,
4281 0 : "none of the partitioned ranges had >= {threshold} deltas"
4282 : );
4283 14 : false
4284 14 : }
4285 :
4286 : /// Create image layers for Postgres data. Assumes the caller passes a partition that is not too large,
4287 : /// so that at most one image layer will be produced from this function.
4288 180 : async fn create_image_layer_for_rel_blocks(
4289 180 : self: &Arc<Self>,
4290 180 : partition: &KeySpace,
4291 180 : mut image_layer_writer: ImageLayerWriter,
4292 180 : lsn: Lsn,
4293 180 : ctx: &RequestContext,
4294 180 : img_range: Range<Key>,
4295 180 : start: Key,
4296 180 : ) -> Result<ImageLayerCreationOutcome, CreateImageLayersError> {
4297 180 : let mut wrote_keys = false;
4298 180 :
4299 180 : let mut key_request_accum = KeySpaceAccum::new();
4300 1180 : for range in &partition.ranges {
4301 1000 : let mut key = range.start;
4302 2326 : while key < range.end {
4303 : // Decide whether to retain this key: usually we do, but sharded tenants may
4304 : // need to drop keys that don't belong to them. If we retain the key, add it
4305 : // to `key_request_accum` for later issuing a vectored get
4306 1326 : if self.shard_identity.is_key_disposable(&key) {
4307 0 : debug!(
4308 0 : "Dropping key {} during compaction (it belongs on shard {:?})",
4309 0 : key,
4310 0 : self.shard_identity.get_shard_number(&key)
4311 : );
4312 1326 : } else {
4313 1326 : key_request_accum.add_key(key);
4314 1326 : }
4315 :
4316 1326 : let last_key_in_range = key.next() == range.end;
4317 1326 : key = key.next();
4318 1326 :
4319 1326 : // Maybe flush `key_rest_accum`
4320 1326 : if key_request_accum.raw_size() >= Timeline::MAX_GET_VECTORED_KEYS
4321 1326 : || (last_key_in_range && key_request_accum.raw_size() > 0)
4322 : {
4323 1000 : let results = self
4324 1000 : .get_vectored(key_request_accum.consume_keyspace(), lsn, ctx)
4325 10 : .await?;
4326 :
4327 2326 : for (img_key, img) in results {
4328 1326 : let img = match img {
4329 1326 : Ok(img) => img,
4330 0 : Err(err) => {
4331 0 : // If we fail to reconstruct a VM or FSM page, we can zero the
4332 0 : // page without losing any actual user data. That seems better
4333 0 : // than failing repeatedly and getting stuck.
4334 0 : //
4335 0 : // We had a bug at one point, where we truncated the FSM and VM
4336 0 : // in the pageserver, but the Postgres didn't know about that
4337 0 : // and continued to generate incremental WAL records for pages
4338 0 : // that didn't exist in the pageserver. Trying to replay those
4339 0 : // WAL records failed to find the previous image of the page.
4340 0 : // This special case allows us to recover from that situation.
4341 0 : // See https://github.com/neondatabase/neon/issues/2601.
4342 0 : //
4343 0 : // Unfortunately we cannot do this for the main fork, or for
4344 0 : // any metadata keys, keys, as that would lead to actual data
4345 0 : // loss.
4346 0 : if img_key.is_rel_fsm_block_key() || img_key.is_rel_vm_block_key() {
4347 0 : warn!("could not reconstruct FSM or VM key {img_key}, filling with zeros: {err:?}");
4348 0 : ZERO_PAGE.clone()
4349 : } else {
4350 0 : return Err(CreateImageLayersError::PageReconstructError(err));
4351 : }
4352 : }
4353 : };
4354 :
4355 : // Write all the keys we just read into our new image layer.
4356 1463 : image_layer_writer.put_image(img_key, img, ctx).await?;
4357 1326 : wrote_keys = true;
4358 : }
4359 326 : }
4360 : }
4361 : }
4362 :
4363 180 : if wrote_keys {
4364 : // Normal path: we have written some data into the new image layer for this
4365 : // partition, so flush it to disk.
4366 371 : let image_layer = image_layer_writer.finish(self, ctx).await?;
4367 180 : Ok(ImageLayerCreationOutcome {
4368 180 : image: Some(image_layer),
4369 180 : next_start_key: img_range.end,
4370 180 : })
4371 : } else {
4372 : // Special case: the image layer may be empty if this is a sharded tenant and the
4373 : // partition does not cover any keys owned by this shard. In this case, to ensure
4374 : // we don't leave gaps between image layers, leave `start` where it is, so that the next
4375 : // layer we write will cover the key range that we just scanned.
4376 0 : tracing::debug!("no data in range {}-{}", img_range.start, img_range.end);
4377 0 : Ok(ImageLayerCreationOutcome {
4378 0 : image: None,
4379 0 : next_start_key: start,
4380 0 : })
4381 : }
4382 180 : }
4383 :
4384 : /// Create an image layer for metadata keys. This function produces one image layer for all metadata
4385 : /// keys for now. Because metadata keys cannot exceed basebackup size limit, the image layer for it
4386 : /// would not be too large to fit in a single image layer.
4387 : #[allow(clippy::too_many_arguments)]
4388 16 : async fn create_image_layer_for_metadata_keys(
4389 16 : self: &Arc<Self>,
4390 16 : partition: &KeySpace,
4391 16 : mut image_layer_writer: ImageLayerWriter,
4392 16 : lsn: Lsn,
4393 16 : ctx: &RequestContext,
4394 16 : img_range: Range<Key>,
4395 16 : mode: ImageLayerCreationMode,
4396 16 : start: Key,
4397 16 : ) -> Result<ImageLayerCreationOutcome, CreateImageLayersError> {
4398 16 : assert!(!matches!(mode, ImageLayerCreationMode::Initial));
4399 :
4400 : // Metadata keys image layer creation.
4401 16 : let mut reconstruct_state = ValuesReconstructState::default();
4402 16 : let data = self
4403 16 : .get_vectored_impl(partition.clone(), lsn, &mut reconstruct_state, ctx)
4404 4047 : .await?;
4405 16 : let (data, total_kb_retrieved, total_keys_retrieved) = {
4406 16 : let mut new_data = BTreeMap::new();
4407 16 : let mut total_kb_retrieved = 0;
4408 16 : let mut total_keys_retrieved = 0;
4409 10028 : for (k, v) in data {
4410 10012 : let v = v.map_err(CreateImageLayersError::PageReconstructError)?;
4411 10012 : total_kb_retrieved += KEY_SIZE + v.len();
4412 10012 : total_keys_retrieved += 1;
4413 10012 : new_data.insert(k, v);
4414 : }
4415 16 : (new_data, total_kb_retrieved / 1024, total_keys_retrieved)
4416 16 : };
4417 16 : let delta_files_accessed = reconstruct_state.get_delta_layers_visited();
4418 16 :
4419 16 : let trigger_generation = delta_files_accessed as usize >= MAX_AUX_FILE_V2_DELTAS;
4420 16 : debug!(
4421 : trigger_generation,
4422 : delta_files_accessed,
4423 : total_kb_retrieved,
4424 : total_keys_retrieved,
4425 0 : "generate metadata images"
4426 : );
4427 :
4428 16 : if !trigger_generation && mode == ImageLayerCreationMode::Try {
4429 2 : return Ok(ImageLayerCreationOutcome {
4430 2 : image: None,
4431 2 : next_start_key: img_range.end,
4432 2 : });
4433 14 : }
4434 14 : let mut wrote_any_image = false;
4435 10026 : for (k, v) in data {
4436 10012 : if v.is_empty() {
4437 : // the key has been deleted, it does not need an image
4438 : // in metadata keyspace, an empty image == tombstone
4439 8 : continue;
4440 10004 : }
4441 10004 : wrote_any_image = true;
4442 10004 :
4443 10004 : // No need to handle sharding b/c metadata keys are always on the 0-th shard.
4444 10004 :
4445 10004 : // TODO: split image layers to avoid too large layer files. Too large image files are not handled
4446 10004 : // on the normal data path either.
4447 10163 : image_layer_writer.put_image(k, v, ctx).await?;
4448 : }
4449 :
4450 14 : if wrote_any_image {
4451 : // Normal path: we have written some data into the new image layer for this
4452 : // partition, so flush it to disk.
4453 24 : let image_layer = image_layer_writer.finish(self, ctx).await?;
4454 12 : Ok(ImageLayerCreationOutcome {
4455 12 : image: Some(image_layer),
4456 12 : next_start_key: img_range.end,
4457 12 : })
4458 : } else {
4459 : // Special case: the image layer may be empty if this is a sharded tenant and the
4460 : // partition does not cover any keys owned by this shard. In this case, to ensure
4461 : // we don't leave gaps between image layers, leave `start` where it is, so that the next
4462 : // layer we write will cover the key range that we just scanned.
4463 2 : tracing::debug!("no data in range {}-{}", img_range.start, img_range.end);
4464 2 : Ok(ImageLayerCreationOutcome {
4465 2 : image: None,
4466 2 : next_start_key: start,
4467 2 : })
4468 : }
4469 16 : }
4470 :
4471 : /// Predicate function which indicates whether we should check if new image layers
4472 : /// are required. Since checking if new image layers are required is expensive in
4473 : /// terms of CPU, we only do it in the following cases:
4474 : /// 1. If the timeline has ingested sufficient WAL to justify the cost
4475 : /// 2. If enough time has passed since the last check:
4476 : /// 1. For large tenants, we wish to perform the check more often since they
4477 : /// suffer from the lack of image layers
4478 : /// 2. For small tenants (that can mostly fit in RAM), we use a much longer interval
4479 516 : fn should_check_if_image_layers_required(self: &Arc<Timeline>, lsn: Lsn) -> bool {
4480 516 : const LARGE_TENANT_THRESHOLD: u64 = 2 * 1024 * 1024 * 1024;
4481 516 :
4482 516 : let last_checks_at = self.last_image_layer_creation_check_at.load();
4483 516 : let distance = lsn
4484 516 : .checked_sub(last_checks_at)
4485 516 : .expect("Attempt to compact with LSN going backwards");
4486 516 : let min_distance =
4487 516 : self.get_image_layer_creation_check_threshold() as u64 * self.get_checkpoint_distance();
4488 516 :
4489 516 : let distance_based_decision = distance.0 >= min_distance;
4490 516 :
4491 516 : let mut time_based_decision = false;
4492 516 : let mut last_check_instant = self.last_image_layer_creation_check_instant.lock().unwrap();
4493 516 : if let CurrentLogicalSize::Exact(logical_size) = self.current_logical_size.current_size() {
4494 414 : let check_required_after = if Into::<u64>::into(&logical_size) >= LARGE_TENANT_THRESHOLD
4495 : {
4496 0 : self.get_checkpoint_timeout()
4497 : } else {
4498 414 : Duration::from_secs(3600 * 48)
4499 : };
4500 :
4501 414 : time_based_decision = match *last_check_instant {
4502 262 : Some(last_check) => {
4503 262 : let elapsed = last_check.elapsed();
4504 262 : elapsed >= check_required_after
4505 : }
4506 152 : None => true,
4507 : };
4508 102 : }
4509 :
4510 : // Do the expensive delta layer counting only if this timeline has ingested sufficient
4511 : // WAL since the last check or a checkpoint timeout interval has elapsed since the last
4512 : // check.
4513 516 : let decision = distance_based_decision || time_based_decision;
4514 :
4515 516 : if decision {
4516 154 : self.last_image_layer_creation_check_at.store(lsn);
4517 154 : *last_check_instant = Some(Instant::now());
4518 362 : }
4519 :
4520 516 : decision
4521 516 : }
4522 :
4523 1032 : #[tracing::instrument(skip_all, fields(%lsn, %mode))]
4524 : async fn create_image_layers(
4525 : self: &Arc<Timeline>,
4526 : partitioning: &KeyPartitioning,
4527 : lsn: Lsn,
4528 : mode: ImageLayerCreationMode,
4529 : ctx: &RequestContext,
4530 : ) -> Result<Vec<ResidentLayer>, CreateImageLayersError> {
4531 : let timer = self.metrics.create_images_time_histo.start_timer();
4532 : let mut image_layers = Vec::new();
4533 :
4534 : // We need to avoid holes between generated image layers.
4535 : // Otherwise LayerMap::image_layer_exists will return false if key range of some layer is covered by more than one
4536 : // image layer with hole between them. In this case such layer can not be utilized by GC.
4537 : //
4538 : // How such hole between partitions can appear?
4539 : // if we have relation with relid=1 and size 100 and relation with relid=2 with size 200 then result of
4540 : // KeySpace::partition may contain partitions <100000000..100000099> and <200000000..200000199>.
4541 : // If there is delta layer <100000000..300000000> then it never be garbage collected because
4542 : // image layers <100000000..100000099> and <200000000..200000199> are not completely covering it.
4543 : let mut start = Key::MIN;
4544 :
4545 : let check_for_image_layers = self.should_check_if_image_layers_required(lsn);
4546 :
4547 : for partition in partitioning.parts.iter() {
4548 : let img_range = start..partition.ranges.last().unwrap().end;
4549 : let compact_metadata = partition.overlaps(&Key::metadata_key_range());
4550 : if compact_metadata {
4551 : for range in &partition.ranges {
4552 : assert!(
4553 : range.start.field1 >= METADATA_KEY_BEGIN_PREFIX
4554 : && range.end.field1 <= METADATA_KEY_END_PREFIX,
4555 : "metadata keys must be partitioned separately"
4556 : );
4557 : }
4558 : if mode == ImageLayerCreationMode::Initial {
4559 : return Err(CreateImageLayersError::Other(anyhow::anyhow!("no image layer should be created for metadata keys when flushing frozen layers")));
4560 : }
4561 : if mode == ImageLayerCreationMode::Try && !check_for_image_layers {
4562 : // Skip compaction if there are not enough updates. Metadata compaction will do a scan and
4563 : // might mess up with evictions.
4564 : start = img_range.end;
4565 : continue;
4566 : }
4567 : } else if let ImageLayerCreationMode::Try = mode {
4568 : // check_for_image_layers = false -> skip
4569 : // check_for_image_layers = true -> check time_for_new_image_layer -> skip/generate
4570 : if !check_for_image_layers || !self.time_for_new_image_layer(partition, lsn).await {
4571 : start = img_range.end;
4572 : continue;
4573 : }
4574 : } else if let ImageLayerCreationMode::Force = mode {
4575 : // When forced to create image layers, we might try and create them where they already
4576 : // exist. This mode is only used in tests/debug.
4577 : let layers = self.layers.read().await;
4578 : if layers.contains_key(&PersistentLayerKey {
4579 : key_range: img_range.clone(),
4580 : lsn_range: PersistentLayerDesc::image_layer_lsn_range(lsn),
4581 : is_delta: false,
4582 : }) {
4583 : tracing::info!(
4584 : "Skipping image layer at {lsn} {}..{}, already exists",
4585 : img_range.start,
4586 : img_range.end
4587 : );
4588 : continue;
4589 : }
4590 : }
4591 :
4592 : let image_layer_writer = ImageLayerWriter::new(
4593 : self.conf,
4594 : self.timeline_id,
4595 : self.tenant_shard_id,
4596 : &img_range,
4597 : lsn,
4598 : ctx,
4599 : )
4600 : .await?;
4601 :
4602 0 : fail_point!("image-layer-writer-fail-before-finish", |_| {
4603 0 : Err(CreateImageLayersError::Other(anyhow::anyhow!(
4604 0 : "failpoint image-layer-writer-fail-before-finish"
4605 0 : )))
4606 0 : });
4607 :
4608 : if !compact_metadata {
4609 : let ImageLayerCreationOutcome {
4610 : image,
4611 : next_start_key,
4612 : } = self
4613 : .create_image_layer_for_rel_blocks(
4614 : partition,
4615 : image_layer_writer,
4616 : lsn,
4617 : ctx,
4618 : img_range,
4619 : start,
4620 : )
4621 : .await?;
4622 :
4623 : start = next_start_key;
4624 : image_layers.extend(image);
4625 : } else {
4626 : let ImageLayerCreationOutcome {
4627 : image,
4628 : next_start_key,
4629 : } = self
4630 : .create_image_layer_for_metadata_keys(
4631 : partition,
4632 : image_layer_writer,
4633 : lsn,
4634 : ctx,
4635 : img_range,
4636 : mode,
4637 : start,
4638 : )
4639 : .await?;
4640 : start = next_start_key;
4641 : image_layers.extend(image);
4642 : }
4643 : }
4644 :
4645 : // The writer.finish() above already did the fsync of the inodes.
4646 : // We just need to fsync the directory in which these inodes are linked,
4647 : // which we know to be the timeline directory.
4648 : if !image_layers.is_empty() {
4649 : // We use fatal_err() below because the after writer.finish() returns with success,
4650 : // the in-memory state of the filesystem already has the layer file in its final place,
4651 : // and subsequent pageserver code could think it's durable while it really isn't.
4652 : let timeline_dir = VirtualFile::open(
4653 : &self
4654 : .conf
4655 : .timeline_path(&self.tenant_shard_id, &self.timeline_id),
4656 : ctx,
4657 : )
4658 : .await
4659 : .fatal_err("VirtualFile::open for timeline dir fsync");
4660 : timeline_dir
4661 : .sync_all()
4662 : .await
4663 : .fatal_err("VirtualFile::sync_all timeline dir");
4664 : }
4665 :
4666 : let mut guard = self.layers.write().await;
4667 :
4668 : // FIXME: we could add the images to be uploaded *before* returning from here, but right
4669 : // now they are being scheduled outside of write lock
4670 : guard.track_new_image_layers(&image_layers, &self.metrics);
4671 : drop_wlock(guard);
4672 : timer.stop_and_record();
4673 :
4674 : Ok(image_layers)
4675 : }
4676 :
4677 : /// Wait until the background initial logical size calculation is complete, or
4678 : /// this Timeline is shut down. Calling this function will cause the initial
4679 : /// logical size calculation to skip waiting for the background jobs barrier.
4680 0 : pub(crate) async fn await_initial_logical_size(self: Arc<Self>) {
4681 0 : if !self.shard_identity.is_shard_zero() {
4682 : // We don't populate logical size on shard >0: skip waiting for it.
4683 0 : return;
4684 0 : }
4685 0 :
4686 0 : if self.remote_client.is_deleting() {
4687 : // The timeline was created in a deletion-resume state, we don't expect logical size to be populated
4688 0 : return;
4689 0 : }
4690 :
4691 0 : if let Some(await_bg_cancel) = self
4692 0 : .current_logical_size
4693 0 : .cancel_wait_for_background_loop_concurrency_limit_semaphore
4694 0 : .get()
4695 0 : {
4696 0 : await_bg_cancel.cancel();
4697 0 : } else {
4698 : // We should not wait if we were not able to explicitly instruct
4699 : // the logical size cancellation to skip the concurrency limit semaphore.
4700 : // TODO: this is an unexpected case. We should restructure so that it
4701 : // can't happen.
4702 0 : tracing::warn!(
4703 0 : "await_initial_logical_size: can't get semaphore cancel token, skipping"
4704 : );
4705 0 : debug_assert!(false);
4706 : }
4707 :
4708 : tokio::select!(
4709 : _ = self.current_logical_size.initialized.acquire() => {},
4710 : _ = self.cancel.cancelled() => {}
4711 : )
4712 0 : }
4713 :
4714 : /// Detach this timeline from its ancestor by copying all of ancestors layers as this
4715 : /// Timelines layers up to the ancestor_lsn.
4716 : ///
4717 : /// Requires a timeline that:
4718 : /// - has an ancestor to detach from
4719 : /// - the ancestor does not have an ancestor -- follows from the original RFC limitations, not
4720 : /// a technical requirement
4721 : ///
4722 : /// After the operation has been started, it cannot be canceled. Upon restart it needs to be
4723 : /// polled again until completion.
4724 : ///
4725 : /// During the operation all timelines sharing the data with this timeline will be reparented
4726 : /// from our ancestor to be branches of this timeline.
4727 0 : pub(crate) async fn prepare_to_detach_from_ancestor(
4728 0 : self: &Arc<Timeline>,
4729 0 : tenant: &crate::tenant::Tenant,
4730 0 : options: detach_ancestor::Options,
4731 0 : ctx: &RequestContext,
4732 0 : ) -> Result<detach_ancestor::Progress, detach_ancestor::Error> {
4733 0 : detach_ancestor::prepare(self, tenant, options, ctx).await
4734 0 : }
4735 :
4736 : /// Completes the ancestor detach. This method is to be called while holding the
4737 : /// TenantManager's tenant slot, so during this method we cannot be deleted nor can any
4738 : /// timeline be deleted. After this method returns successfully, tenant must be reloaded.
4739 : ///
4740 : /// Pageserver receiving a SIGKILL during this operation is not supported (yet).
4741 0 : pub(crate) async fn complete_detaching_timeline_ancestor(
4742 0 : self: &Arc<Timeline>,
4743 0 : tenant: &crate::tenant::Tenant,
4744 0 : prepared: detach_ancestor::PreparedTimelineDetach,
4745 0 : ctx: &RequestContext,
4746 0 : ) -> Result<Vec<TimelineId>, anyhow::Error> {
4747 0 : detach_ancestor::complete(self, tenant, prepared, ctx).await
4748 0 : }
4749 :
4750 : /// Switch aux file policy and schedule upload to the index part.
4751 16 : pub(crate) fn do_switch_aux_policy(&self, policy: AuxFilePolicy) -> anyhow::Result<()> {
4752 16 : self.last_aux_file_policy.store(Some(policy));
4753 16 : self.remote_client
4754 16 : .schedule_index_upload_for_aux_file_policy_update(Some(policy))?;
4755 16 : Ok(())
4756 16 : }
4757 : }
4758 :
4759 : /// Top-level failure to compact.
4760 0 : #[derive(Debug, thiserror::Error)]
4761 : pub(crate) enum CompactionError {
4762 : #[error("The timeline or pageserver is shutting down")]
4763 : ShuttingDown,
4764 : /// Compaction cannot be done right now; page reconstruction and so on.
4765 : #[error(transparent)]
4766 : Other(#[from] anyhow::Error),
4767 : }
4768 :
4769 : impl From<CollectKeySpaceError> for CompactionError {
4770 0 : fn from(err: CollectKeySpaceError) -> Self {
4771 0 : match err {
4772 : CollectKeySpaceError::Cancelled
4773 : | CollectKeySpaceError::PageRead(PageReconstructError::Cancelled) => {
4774 0 : CompactionError::ShuttingDown
4775 : }
4776 0 : e => CompactionError::Other(e.into()),
4777 : }
4778 0 : }
4779 : }
4780 :
4781 : #[serde_as]
4782 196 : #[derive(serde::Serialize)]
4783 : struct RecordedDuration(#[serde_as(as = "serde_with::DurationMicroSeconds")] Duration);
4784 :
4785 : #[derive(Default)]
4786 : enum DurationRecorder {
4787 : #[default]
4788 : NotStarted,
4789 : Recorded(RecordedDuration, tokio::time::Instant),
4790 : }
4791 :
4792 : impl DurationRecorder {
4793 504 : fn till_now(&self) -> DurationRecorder {
4794 504 : match self {
4795 : DurationRecorder::NotStarted => {
4796 0 : panic!("must only call on recorded measurements")
4797 : }
4798 504 : DurationRecorder::Recorded(_, ended) => {
4799 504 : let now = tokio::time::Instant::now();
4800 504 : DurationRecorder::Recorded(RecordedDuration(now - *ended), now)
4801 504 : }
4802 504 : }
4803 504 : }
4804 196 : fn into_recorded(self) -> Option<RecordedDuration> {
4805 196 : match self {
4806 0 : DurationRecorder::NotStarted => None,
4807 196 : DurationRecorder::Recorded(recorded, _) => Some(recorded),
4808 : }
4809 196 : }
4810 : }
4811 :
4812 : /// Descriptor for a delta layer used in testing infra. The start/end key/lsn range of the
4813 : /// delta layer might be different from the min/max key/lsn in the delta layer. Therefore,
4814 : /// the layer descriptor requires the user to provide the ranges, which should cover all
4815 : /// keys specified in the `data` field.
4816 : #[cfg(test)]
4817 : pub struct DeltaLayerTestDesc {
4818 : pub lsn_range: Range<Lsn>,
4819 : pub key_range: Range<Key>,
4820 : pub data: Vec<(Key, Lsn, Value)>,
4821 : }
4822 :
4823 : #[cfg(test)]
4824 : impl DeltaLayerTestDesc {
4825 : #[allow(dead_code)]
4826 0 : pub fn new(lsn_range: Range<Lsn>, key_range: Range<Key>, data: Vec<(Key, Lsn, Value)>) -> Self {
4827 0 : Self {
4828 0 : lsn_range,
4829 0 : key_range,
4830 0 : data,
4831 0 : }
4832 0 : }
4833 :
4834 34 : pub fn new_with_inferred_key_range(
4835 34 : lsn_range: Range<Lsn>,
4836 34 : data: Vec<(Key, Lsn, Value)>,
4837 34 : ) -> Self {
4838 72 : let key_min = data.iter().map(|(key, _, _)| key).min().unwrap();
4839 72 : let key_max = data.iter().map(|(key, _, _)| key).max().unwrap();
4840 34 : Self {
4841 34 : key_range: (*key_min)..(key_max.next()),
4842 34 : lsn_range,
4843 34 : data,
4844 34 : }
4845 34 : }
4846 : }
4847 :
4848 : impl Timeline {
4849 28 : async fn finish_compact_batch(
4850 28 : self: &Arc<Self>,
4851 28 : new_deltas: &[ResidentLayer],
4852 28 : new_images: &[ResidentLayer],
4853 28 : layers_to_remove: &[Layer],
4854 28 : ) -> anyhow::Result<()> {
4855 28 : let mut guard = self.layers.write().await;
4856 :
4857 28 : let mut duplicated_layers = HashSet::new();
4858 28 :
4859 28 : let mut insert_layers = Vec::with_capacity(new_deltas.len());
4860 :
4861 336 : for l in new_deltas {
4862 308 : if guard.contains(l.as_ref()) {
4863 : // expected in tests
4864 0 : tracing::error!(layer=%l, "duplicated L1 layer");
4865 :
4866 : // good ways to cause a duplicate: we repeatedly error after taking the writelock
4867 : // `guard` on self.layers. as of writing this, there are no error returns except
4868 : // for compact_level0_phase1 creating an L0, which does not happen in practice
4869 : // because we have not implemented L0 => L0 compaction.
4870 0 : duplicated_layers.insert(l.layer_desc().key());
4871 308 : } else if LayerMap::is_l0(l.layer_desc()) {
4872 0 : bail!("compaction generates a L0 layer file as output, which will cause infinite compaction.");
4873 308 : } else {
4874 308 : insert_layers.push(l.clone());
4875 308 : }
4876 : }
4877 :
4878 : // only remove those inputs which were not outputs
4879 28 : let remove_layers: Vec<Layer> = layers_to_remove
4880 28 : .iter()
4881 402 : .filter(|l| !duplicated_layers.contains(&l.layer_desc().key()))
4882 28 : .cloned()
4883 28 : .collect();
4884 28 :
4885 28 : if !new_images.is_empty() {
4886 0 : guard.track_new_image_layers(new_images, &self.metrics);
4887 28 : }
4888 :
4889 : // deletion will happen later, the layer file manager calls garbage_collect_on_drop
4890 28 : guard.finish_compact_l0(&remove_layers, &insert_layers, &self.metrics);
4891 28 :
4892 28 : self.remote_client
4893 28 : .schedule_compaction_update(&remove_layers, new_deltas)?;
4894 :
4895 28 : drop_wlock(guard);
4896 28 :
4897 28 : Ok(())
4898 28 : }
4899 :
4900 0 : async fn rewrite_layers(
4901 0 : self: &Arc<Self>,
4902 0 : mut replace_layers: Vec<(Layer, ResidentLayer)>,
4903 0 : mut drop_layers: Vec<Layer>,
4904 0 : ) -> anyhow::Result<()> {
4905 0 : let mut guard = self.layers.write().await;
4906 :
4907 : // Trim our lists in case our caller (compaction) raced with someone else (GC) removing layers: we want
4908 : // to avoid double-removing, and avoid rewriting something that was removed.
4909 0 : replace_layers.retain(|(l, _)| guard.contains(l));
4910 0 : drop_layers.retain(|l| guard.contains(l));
4911 0 :
4912 0 : guard.rewrite_layers(&replace_layers, &drop_layers, &self.metrics);
4913 0 :
4914 0 : let upload_layers: Vec<_> = replace_layers.into_iter().map(|r| r.1).collect();
4915 0 :
4916 0 : self.remote_client
4917 0 : .schedule_compaction_update(&drop_layers, &upload_layers)?;
4918 :
4919 0 : Ok(())
4920 0 : }
4921 :
4922 : /// Schedules the uploads of the given image layers
4923 364 : fn upload_new_image_layers(
4924 364 : self: &Arc<Self>,
4925 364 : new_images: impl IntoIterator<Item = ResidentLayer>,
4926 364 : ) -> anyhow::Result<()> {
4927 390 : for layer in new_images {
4928 26 : self.remote_client.schedule_layer_file_upload(layer)?;
4929 : }
4930 : // should any new image layer been created, not uploading index_part will
4931 : // result in a mismatch between remote_physical_size and layermap calculated
4932 : // size, which will fail some tests, but should not be an issue otherwise.
4933 364 : self.remote_client
4934 364 : .schedule_index_upload_for_file_changes()?;
4935 364 : Ok(())
4936 364 : }
4937 :
4938 : /// Find the Lsns above which layer files need to be retained on
4939 : /// garbage collection.
4940 : ///
4941 : /// We calculate two cutoffs, one based on time and one based on WAL size. `pitr`
4942 : /// controls the time cutoff (or ZERO to disable time-based retention), and `space_cutoff` controls
4943 : /// the space-based retention.
4944 : ///
4945 : /// This function doesn't simply to calculate time & space based retention: it treats time-based
4946 : /// retention as authoritative if enabled, and falls back to space-based retention if calculating
4947 : /// the LSN for a time point isn't possible. Therefore the GcCutoffs::horizon in the response might
4948 : /// be different to the `space_cutoff` input. Callers should treat the min() of the two cutoffs
4949 : /// in the response as the GC cutoff point for the timeline.
4950 1508 : #[instrument(skip_all, fields(timeline_id=%self.timeline_id))]
4951 : pub(super) async fn find_gc_cutoffs(
4952 : &self,
4953 : space_cutoff: Lsn,
4954 : pitr: Duration,
4955 : cancel: &CancellationToken,
4956 : ctx: &RequestContext,
4957 : ) -> Result<GcCutoffs, PageReconstructError> {
4958 : let _timer = self
4959 : .metrics
4960 : .find_gc_cutoffs_histo
4961 : .start_timer()
4962 : .record_on_drop();
4963 :
4964 : pausable_failpoint!("Timeline::find_gc_cutoffs-pausable");
4965 :
4966 : if cfg!(test) {
4967 : // Unit tests which specify zero PITR interval expect to avoid doing any I/O for timestamp lookup
4968 : if pitr == Duration::ZERO {
4969 : return Ok(GcCutoffs {
4970 : time: self.get_last_record_lsn(),
4971 : space: space_cutoff,
4972 : });
4973 : }
4974 : }
4975 :
4976 : // Calculate a time-based limit on how much to retain:
4977 : // - if PITR interval is set, then this is our cutoff.
4978 : // - if PITR interval is not set, then we do a lookup
4979 : // based on DEFAULT_PITR_INTERVAL, so that size-based retention does not result in keeping history around permanently on idle databases.
4980 : let time_cutoff = {
4981 : let now = SystemTime::now();
4982 : let time_range = if pitr == Duration::ZERO {
4983 : humantime::parse_duration(DEFAULT_PITR_INTERVAL).expect("constant is invalid")
4984 : } else {
4985 : pitr
4986 : };
4987 :
4988 : // If PITR is so large or `now` is so small that this underflows, we will retain no history (highly unexpected case)
4989 : let time_cutoff = now.checked_sub(time_range).unwrap_or(now);
4990 : let timestamp = to_pg_timestamp(time_cutoff);
4991 :
4992 : match self.find_lsn_for_timestamp(timestamp, cancel, ctx).await? {
4993 : LsnForTimestamp::Present(lsn) => Some(lsn),
4994 : LsnForTimestamp::Future(lsn) => {
4995 : // The timestamp is in the future. That sounds impossible,
4996 : // but what it really means is that there hasn't been
4997 : // any commits since the cutoff timestamp.
4998 : //
4999 : // In this case we should use the LSN of the most recent commit,
5000 : // which is implicitly the last LSN in the log.
5001 : debug!("future({})", lsn);
5002 : Some(self.get_last_record_lsn())
5003 : }
5004 : LsnForTimestamp::Past(lsn) => {
5005 : debug!("past({})", lsn);
5006 : None
5007 : }
5008 : LsnForTimestamp::NoData(lsn) => {
5009 : debug!("nodata({})", lsn);
5010 : None
5011 : }
5012 : }
5013 : };
5014 :
5015 : Ok(match (pitr, time_cutoff) {
5016 : (Duration::ZERO, Some(time_cutoff)) => {
5017 : // PITR is not set. Retain the size-based limit, or the default time retention,
5018 : // whichever requires less data.
5019 : GcCutoffs {
5020 : time: self.get_last_record_lsn(),
5021 : space: std::cmp::max(time_cutoff, space_cutoff),
5022 : }
5023 : }
5024 : (Duration::ZERO, None) => {
5025 : // PITR is not set, and time lookup failed
5026 : GcCutoffs {
5027 : time: self.get_last_record_lsn(),
5028 : space: space_cutoff,
5029 : }
5030 : }
5031 : (_, None) => {
5032 : // PITR interval is set & we didn't look up a timestamp successfully. Conservatively assume PITR
5033 : // cannot advance beyond what was already GC'd, and respect space-based retention
5034 : GcCutoffs {
5035 : time: *self.get_latest_gc_cutoff_lsn(),
5036 : space: space_cutoff,
5037 : }
5038 : }
5039 : (_, Some(time_cutoff)) => {
5040 : // PITR interval is set and we looked up timestamp successfully. Ignore
5041 : // size based retention and make time cutoff authoritative
5042 : GcCutoffs {
5043 : time: time_cutoff,
5044 : space: time_cutoff,
5045 : }
5046 : }
5047 : })
5048 : }
5049 :
5050 : /// Garbage collect layer files on a timeline that are no longer needed.
5051 : ///
5052 : /// Currently, we don't make any attempt at removing unneeded page versions
5053 : /// within a layer file. We can only remove the whole file if it's fully
5054 : /// obsolete.
5055 754 : pub(super) async fn gc(&self) -> Result<GcResult, GcError> {
5056 : // this is most likely the background tasks, but it might be the spawned task from
5057 : // immediate_gc
5058 753 : let _g = tokio::select! {
5059 : guard = self.gc_lock.lock() => guard,
5060 : _ = self.cancel.cancelled() => return Ok(GcResult::default()),
5061 : };
5062 753 : let timer = self.metrics.garbage_collect_histo.start_timer();
5063 :
5064 : fail_point!("before-timeline-gc");
5065 :
5066 : // Is the timeline being deleted?
5067 753 : if self.is_stopping() {
5068 0 : return Err(GcError::TimelineCancelled);
5069 753 : }
5070 753 :
5071 753 : let (space_cutoff, time_cutoff, retain_lsns, max_lsn_with_valid_lease) = {
5072 753 : let gc_info = self.gc_info.read().unwrap();
5073 753 :
5074 753 : let space_cutoff = min(gc_info.cutoffs.space, self.get_disk_consistent_lsn());
5075 753 : let time_cutoff = gc_info.cutoffs.time;
5076 753 : let retain_lsns = gc_info.retain_lsns.clone();
5077 753 :
5078 753 : // Gets the maximum LSN that holds the valid lease.
5079 753 : //
5080 753 : // Caveat: `refresh_gc_info` is in charged of updating the lease map.
5081 753 : // Here, we do not check for stale leases again.
5082 753 : let max_lsn_with_valid_lease = gc_info.leases.last_key_value().map(|(lsn, _)| *lsn);
5083 753 :
5084 753 : (
5085 753 : space_cutoff,
5086 753 : time_cutoff,
5087 753 : retain_lsns,
5088 753 : max_lsn_with_valid_lease,
5089 753 : )
5090 753 : };
5091 753 :
5092 753 : let mut new_gc_cutoff = Lsn::min(space_cutoff, time_cutoff);
5093 753 : let standby_horizon = self.standby_horizon.load();
5094 753 : // Hold GC for the standby, but as a safety guard do it only within some
5095 753 : // reasonable lag.
5096 753 : if standby_horizon != Lsn::INVALID {
5097 0 : if let Some(standby_lag) = new_gc_cutoff.checked_sub(standby_horizon) {
5098 : const MAX_ALLOWED_STANDBY_LAG: u64 = 10u64 << 30; // 10 GB
5099 0 : if standby_lag.0 < MAX_ALLOWED_STANDBY_LAG {
5100 0 : new_gc_cutoff = Lsn::min(standby_horizon, new_gc_cutoff);
5101 0 : trace!("holding off GC for standby apply LSN {}", standby_horizon);
5102 : } else {
5103 0 : warn!(
5104 0 : "standby is lagging for more than {}MB, not holding gc for it",
5105 0 : MAX_ALLOWED_STANDBY_LAG / 1024 / 1024
5106 : )
5107 : }
5108 0 : }
5109 753 : }
5110 :
5111 : // Reset standby horizon to ignore it if it is not updated till next GC.
5112 : // It is an easy way to unset it when standby disappears without adding
5113 : // more conf options.
5114 753 : self.standby_horizon.store(Lsn::INVALID);
5115 753 : self.metrics
5116 753 : .standby_horizon_gauge
5117 753 : .set(Lsn::INVALID.0 as i64);
5118 :
5119 753 : let res = self
5120 753 : .gc_timeline(
5121 753 : space_cutoff,
5122 753 : time_cutoff,
5123 753 : retain_lsns,
5124 753 : max_lsn_with_valid_lease,
5125 753 : new_gc_cutoff,
5126 753 : )
5127 753 : .instrument(
5128 753 : info_span!("gc_timeline", timeline_id = %self.timeline_id, cutoff = %new_gc_cutoff),
5129 : )
5130 0 : .await?;
5131 :
5132 : // only record successes
5133 753 : timer.stop_and_record();
5134 753 :
5135 753 : Ok(res)
5136 754 : }
5137 :
5138 753 : async fn gc_timeline(
5139 753 : &self,
5140 753 : space_cutoff: Lsn,
5141 753 : time_cutoff: Lsn,
5142 753 : retain_lsns: Vec<Lsn>,
5143 753 : max_lsn_with_valid_lease: Option<Lsn>,
5144 753 : new_gc_cutoff: Lsn,
5145 753 : ) -> Result<GcResult, GcError> {
5146 753 : // FIXME: if there is an ongoing detach_from_ancestor, we should just skip gc
5147 753 :
5148 753 : let now = SystemTime::now();
5149 753 : let mut result: GcResult = GcResult::default();
5150 753 :
5151 753 : // Nothing to GC. Return early.
5152 753 : let latest_gc_cutoff = *self.get_latest_gc_cutoff_lsn();
5153 753 : if latest_gc_cutoff >= new_gc_cutoff {
5154 22 : info!(
5155 0 : "Nothing to GC: new_gc_cutoff_lsn {new_gc_cutoff}, latest_gc_cutoff_lsn {latest_gc_cutoff}",
5156 : );
5157 22 : return Ok(result);
5158 731 : }
5159 :
5160 : // We need to ensure that no one tries to read page versions or create
5161 : // branches at a point before latest_gc_cutoff_lsn. See branch_timeline()
5162 : // for details. This will block until the old value is no longer in use.
5163 : //
5164 : // The GC cutoff should only ever move forwards.
5165 731 : let waitlist = {
5166 731 : let write_guard = self.latest_gc_cutoff_lsn.lock_for_write();
5167 731 : if *write_guard > new_gc_cutoff {
5168 0 : return Err(GcError::BadLsn {
5169 0 : why: format!(
5170 0 : "Cannot move GC cutoff LSN backwards (was {}, new {})",
5171 0 : *write_guard, new_gc_cutoff
5172 0 : ),
5173 0 : });
5174 731 : }
5175 731 :
5176 731 : write_guard.store_and_unlock(new_gc_cutoff)
5177 731 : };
5178 731 : waitlist.wait().await;
5179 :
5180 731 : info!("GC starting");
5181 :
5182 731 : debug!("retain_lsns: {:?}", retain_lsns);
5183 :
5184 731 : let mut layers_to_remove = Vec::new();
5185 :
5186 : // Scan all layers in the timeline (remote or on-disk).
5187 : //
5188 : // Garbage collect the layer if all conditions are satisfied:
5189 : // 1. it is older than cutoff LSN;
5190 : // 2. it is older than PITR interval;
5191 : // 3. it doesn't need to be retained for 'retain_lsns';
5192 : // 4. it does not need to be kept for LSNs holding valid leases.
5193 : // 5. newer on-disk image layers cover the layer's whole key range
5194 : //
5195 : // TODO holding a write lock is too agressive and avoidable
5196 731 : let mut guard = self.layers.write().await;
5197 731 : let layers = guard.layer_map();
5198 12415 : 'outer: for l in layers.iter_historic_layers() {
5199 12415 : result.layers_total += 1;
5200 12415 :
5201 12415 : // 1. Is it newer than GC horizon cutoff point?
5202 12415 : if l.get_lsn_range().end > space_cutoff {
5203 741 : debug!(
5204 0 : "keeping {} because it's newer than space_cutoff {}",
5205 0 : l.layer_name(),
5206 : space_cutoff,
5207 : );
5208 741 : result.layers_needed_by_cutoff += 1;
5209 741 : continue 'outer;
5210 11674 : }
5211 11674 :
5212 11674 : // 2. It is newer than PiTR cutoff point?
5213 11674 : if l.get_lsn_range().end > time_cutoff {
5214 0 : debug!(
5215 0 : "keeping {} because it's newer than time_cutoff {}",
5216 0 : l.layer_name(),
5217 : time_cutoff,
5218 : );
5219 0 : result.layers_needed_by_pitr += 1;
5220 0 : continue 'outer;
5221 11674 : }
5222 :
5223 : // 3. Is it needed by a child branch?
5224 : // NOTE With that we would keep data that
5225 : // might be referenced by child branches forever.
5226 : // We can track this in child timeline GC and delete parent layers when
5227 : // they are no longer needed. This might be complicated with long inheritance chains.
5228 : //
5229 : // TODO Vec is not a great choice for `retain_lsns`
5230 11674 : for retain_lsn in &retain_lsns {
5231 : // start_lsn is inclusive
5232 10 : if &l.get_lsn_range().start <= retain_lsn {
5233 10 : debug!(
5234 0 : "keeping {} because it's still might be referenced by child branch forked at {} is_dropped: xx is_incremental: {}",
5235 0 : l.layer_name(),
5236 0 : retain_lsn,
5237 0 : l.is_incremental(),
5238 : );
5239 10 : result.layers_needed_by_branches += 1;
5240 10 : continue 'outer;
5241 0 : }
5242 : }
5243 :
5244 : // 4. Is there a valid lease that requires us to keep this layer?
5245 11664 : if let Some(lsn) = &max_lsn_with_valid_lease {
5246 : // keep if layer start <= any of the lease
5247 18 : if &l.get_lsn_range().start <= lsn {
5248 14 : debug!(
5249 0 : "keeping {} because there is a valid lease preventing GC at {}",
5250 0 : l.layer_name(),
5251 : lsn,
5252 : );
5253 14 : result.layers_needed_by_leases += 1;
5254 14 : continue 'outer;
5255 4 : }
5256 11646 : }
5257 :
5258 : // 5. Is there a later on-disk layer for this relation?
5259 : //
5260 : // The end-LSN is exclusive, while disk_consistent_lsn is
5261 : // inclusive. For example, if disk_consistent_lsn is 100, it is
5262 : // OK for a delta layer to have end LSN 101, but if the end LSN
5263 : // is 102, then it might not have been fully flushed to disk
5264 : // before crash.
5265 : //
5266 : // For example, imagine that the following layers exist:
5267 : //
5268 : // 1000 - image (A)
5269 : // 1000-2000 - delta (B)
5270 : // 2000 - image (C)
5271 : // 2000-3000 - delta (D)
5272 : // 3000 - image (E)
5273 : //
5274 : // If GC horizon is at 2500, we can remove layers A and B, but
5275 : // we cannot remove C, even though it's older than 2500, because
5276 : // the delta layer 2000-3000 depends on it.
5277 11650 : if !layers
5278 11650 : .image_layer_exists(&l.get_key_range(), &(l.get_lsn_range().end..new_gc_cutoff))
5279 : {
5280 11642 : debug!("keeping {} because it is the latest layer", l.layer_name());
5281 11642 : result.layers_not_updated += 1;
5282 11642 : continue 'outer;
5283 8 : }
5284 8 :
5285 8 : // We didn't find any reason to keep this file, so remove it.
5286 8 : debug!(
5287 0 : "garbage collecting {} is_dropped: xx is_incremental: {}",
5288 0 : l.layer_name(),
5289 0 : l.is_incremental(),
5290 : );
5291 8 : layers_to_remove.push(l);
5292 : }
5293 :
5294 731 : if !layers_to_remove.is_empty() {
5295 : // Persist the new GC cutoff value before we actually remove anything.
5296 : // This unconditionally schedules also an index_part.json update, even though, we will
5297 : // be doing one a bit later with the unlinked gc'd layers.
5298 6 : let disk_consistent_lsn = self.disk_consistent_lsn.load();
5299 6 : self.schedule_uploads(disk_consistent_lsn, None)
5300 6 : .map_err(|e| {
5301 0 : if self.cancel.is_cancelled() {
5302 0 : GcError::TimelineCancelled
5303 : } else {
5304 0 : GcError::Remote(e)
5305 : }
5306 6 : })?;
5307 :
5308 6 : let gc_layers = layers_to_remove
5309 6 : .iter()
5310 8 : .map(|x| guard.get_from_desc(x))
5311 6 : .collect::<Vec<Layer>>();
5312 6 :
5313 6 : result.layers_removed = gc_layers.len() as u64;
5314 6 :
5315 6 : self.remote_client
5316 6 : .schedule_gc_update(&gc_layers)
5317 6 : .map_err(|e| {
5318 0 : if self.cancel.is_cancelled() {
5319 0 : GcError::TimelineCancelled
5320 : } else {
5321 0 : GcError::Remote(e)
5322 : }
5323 6 : })?;
5324 :
5325 6 : guard.finish_gc_timeline(&gc_layers);
5326 6 :
5327 6 : #[cfg(feature = "testing")]
5328 6 : {
5329 6 : result.doomed_layers = gc_layers;
5330 6 : }
5331 725 : }
5332 :
5333 731 : info!(
5334 0 : "GC completed removing {} layers, cutoff {}",
5335 : result.layers_removed, new_gc_cutoff
5336 : );
5337 :
5338 731 : result.elapsed = now.elapsed().unwrap_or(Duration::ZERO);
5339 731 : Ok(result)
5340 753 : }
5341 :
5342 : /// Reconstruct a value, using the given base image and WAL records in 'data'.
5343 666378 : async fn reconstruct_value(
5344 666378 : &self,
5345 666378 : key: Key,
5346 666378 : request_lsn: Lsn,
5347 666378 : mut data: ValueReconstructState,
5348 666378 : ) -> Result<Bytes, PageReconstructError> {
5349 666378 : // Perform WAL redo if needed
5350 666378 : data.records.reverse();
5351 666378 :
5352 666378 : // If we have a page image, and no WAL, we're all set
5353 666378 : if data.records.is_empty() {
5354 666320 : if let Some((img_lsn, img)) = &data.img {
5355 666320 : trace!(
5356 0 : "found page image for key {} at {}, no WAL redo required, req LSN {}",
5357 : key,
5358 : img_lsn,
5359 : request_lsn,
5360 : );
5361 666320 : Ok(img.clone())
5362 : } else {
5363 0 : Err(PageReconstructError::from(anyhow!(
5364 0 : "base image for {key} at {request_lsn} not found"
5365 0 : )))
5366 : }
5367 : } else {
5368 : // We need to do WAL redo.
5369 : //
5370 : // If we don't have a base image, then the oldest WAL record better initialize
5371 : // the page
5372 58 : if data.img.is_none() && !data.records.first().unwrap().1.will_init() {
5373 0 : Err(PageReconstructError::from(anyhow!(
5374 0 : "Base image for {} at {} not found, but got {} WAL records",
5375 0 : key,
5376 0 : request_lsn,
5377 0 : data.records.len()
5378 0 : )))
5379 : } else {
5380 58 : if data.img.is_some() {
5381 58 : trace!(
5382 0 : "found {} WAL records and a base image for {} at {}, performing WAL redo",
5383 0 : data.records.len(),
5384 : key,
5385 : request_lsn
5386 : );
5387 : } else {
5388 0 : trace!("found {} WAL records that will init the page for {} at {}, performing WAL redo", data.records.len(), key, request_lsn);
5389 : };
5390 :
5391 58 : let img = match self
5392 58 : .walredo_mgr
5393 58 : .as_ref()
5394 58 : .context("timeline has no walredo manager")
5395 58 : .map_err(PageReconstructError::WalRedo)?
5396 58 : .request_redo(key, request_lsn, data.img, data.records, self.pg_version)
5397 0 : .await
5398 58 : .context("reconstruct a page image")
5399 : {
5400 58 : Ok(img) => img,
5401 0 : Err(e) => return Err(PageReconstructError::WalRedo(e)),
5402 : };
5403 :
5404 58 : Ok(img)
5405 : }
5406 : }
5407 666378 : }
5408 :
5409 0 : pub(crate) async fn spawn_download_all_remote_layers(
5410 0 : self: Arc<Self>,
5411 0 : request: DownloadRemoteLayersTaskSpawnRequest,
5412 0 : ) -> Result<DownloadRemoteLayersTaskInfo, DownloadRemoteLayersTaskInfo> {
5413 0 : use pageserver_api::models::DownloadRemoteLayersTaskState;
5414 0 :
5415 0 : // this is not really needed anymore; it has tests which really check the return value from
5416 0 : // http api. it would be better not to maintain this anymore.
5417 0 :
5418 0 : let mut status_guard = self.download_all_remote_layers_task_info.write().unwrap();
5419 0 : if let Some(st) = &*status_guard {
5420 0 : match &st.state {
5421 : DownloadRemoteLayersTaskState::Running => {
5422 0 : return Err(st.clone());
5423 : }
5424 : DownloadRemoteLayersTaskState::ShutDown
5425 0 : | DownloadRemoteLayersTaskState::Completed => {
5426 0 : *status_guard = None;
5427 0 : }
5428 : }
5429 0 : }
5430 :
5431 0 : let self_clone = Arc::clone(&self);
5432 0 : let task_id = task_mgr::spawn(
5433 0 : task_mgr::BACKGROUND_RUNTIME.handle(),
5434 0 : task_mgr::TaskKind::DownloadAllRemoteLayers,
5435 0 : Some(self.tenant_shard_id),
5436 0 : Some(self.timeline_id),
5437 0 : "download all remote layers task",
5438 : false,
5439 0 : async move {
5440 0 : self_clone.download_all_remote_layers(request).await;
5441 0 : let mut status_guard = self_clone.download_all_remote_layers_task_info.write().unwrap();
5442 0 : match &mut *status_guard {
5443 : None => {
5444 0 : warn!("tasks status is supposed to be Some(), since we are running");
5445 : }
5446 0 : Some(st) => {
5447 0 : let exp_task_id = format!("{}", task_mgr::current_task_id().unwrap());
5448 0 : if st.task_id != exp_task_id {
5449 0 : warn!("task id changed while we were still running, expecting {} but have {}", exp_task_id, st.task_id);
5450 0 : } else {
5451 0 : st.state = DownloadRemoteLayersTaskState::Completed;
5452 0 : }
5453 : }
5454 : };
5455 0 : Ok(())
5456 0 : }
5457 0 : .instrument(info_span!(parent: None, "download_all_remote_layers", tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))
5458 : );
5459 :
5460 0 : let initial_info = DownloadRemoteLayersTaskInfo {
5461 0 : task_id: format!("{task_id}"),
5462 0 : state: DownloadRemoteLayersTaskState::Running,
5463 0 : total_layer_count: 0,
5464 0 : successful_download_count: 0,
5465 0 : failed_download_count: 0,
5466 0 : };
5467 0 : *status_guard = Some(initial_info.clone());
5468 0 :
5469 0 : Ok(initial_info)
5470 0 : }
5471 :
5472 0 : async fn download_all_remote_layers(
5473 0 : self: &Arc<Self>,
5474 0 : request: DownloadRemoteLayersTaskSpawnRequest,
5475 0 : ) {
5476 : use pageserver_api::models::DownloadRemoteLayersTaskState;
5477 :
5478 0 : let remaining = {
5479 0 : let guard = self.layers.read().await;
5480 0 : guard
5481 0 : .layer_map()
5482 0 : .iter_historic_layers()
5483 0 : .map(|desc| guard.get_from_desc(&desc))
5484 0 : .collect::<Vec<_>>()
5485 0 : };
5486 0 : let total_layer_count = remaining.len();
5487 0 :
5488 0 : macro_rules! lock_status {
5489 0 : ($st:ident) => {
5490 0 : let mut st = self.download_all_remote_layers_task_info.write().unwrap();
5491 0 : let st = st
5492 0 : .as_mut()
5493 0 : .expect("this function is only called after the task has been spawned");
5494 0 : assert_eq!(
5495 0 : st.task_id,
5496 0 : format!(
5497 0 : "{}",
5498 0 : task_mgr::current_task_id().expect("we run inside a task_mgr task")
5499 0 : )
5500 0 : );
5501 0 : let $st = st;
5502 0 : };
5503 0 : }
5504 0 :
5505 0 : {
5506 0 : lock_status!(st);
5507 0 : st.total_layer_count = total_layer_count as u64;
5508 0 : }
5509 0 :
5510 0 : let mut remaining = remaining.into_iter();
5511 0 : let mut have_remaining = true;
5512 0 : let mut js = tokio::task::JoinSet::new();
5513 0 :
5514 0 : let cancel = task_mgr::shutdown_token();
5515 0 :
5516 0 : let limit = request.max_concurrent_downloads;
5517 :
5518 : loop {
5519 0 : while js.len() < limit.get() && have_remaining && !cancel.is_cancelled() {
5520 0 : let Some(next) = remaining.next() else {
5521 0 : have_remaining = false;
5522 0 : break;
5523 : };
5524 :
5525 0 : let span = tracing::info_span!("download", layer = %next);
5526 :
5527 0 : js.spawn(
5528 0 : async move {
5529 0 : let res = next.download().await;
5530 0 : (next, res)
5531 0 : }
5532 0 : .instrument(span),
5533 0 : );
5534 : }
5535 :
5536 0 : while let Some(res) = js.join_next().await {
5537 0 : match res {
5538 : Ok((_, Ok(_))) => {
5539 0 : lock_status!(st);
5540 0 : st.successful_download_count += 1;
5541 : }
5542 0 : Ok((layer, Err(e))) => {
5543 0 : tracing::error!(%layer, "download failed: {e:#}");
5544 0 : lock_status!(st);
5545 0 : st.failed_download_count += 1;
5546 : }
5547 0 : Err(je) if je.is_cancelled() => unreachable!("not used here"),
5548 0 : Err(je) if je.is_panic() => {
5549 0 : lock_status!(st);
5550 0 : st.failed_download_count += 1;
5551 : }
5552 0 : Err(je) => tracing::warn!("unknown joinerror: {je:?}"),
5553 : }
5554 : }
5555 :
5556 0 : if js.is_empty() && (!have_remaining || cancel.is_cancelled()) {
5557 0 : break;
5558 0 : }
5559 : }
5560 :
5561 : {
5562 0 : lock_status!(st);
5563 0 : st.state = DownloadRemoteLayersTaskState::Completed;
5564 0 : }
5565 0 : }
5566 :
5567 0 : pub(crate) fn get_download_all_remote_layers_task_info(
5568 0 : &self,
5569 0 : ) -> Option<DownloadRemoteLayersTaskInfo> {
5570 0 : self.download_all_remote_layers_task_info
5571 0 : .read()
5572 0 : .unwrap()
5573 0 : .clone()
5574 0 : }
5575 : }
5576 :
5577 : impl Timeline {
5578 : /// Returns non-remote layers for eviction.
5579 0 : pub(crate) async fn get_local_layers_for_disk_usage_eviction(&self) -> DiskUsageEvictionInfo {
5580 0 : let guard = self.layers.read().await;
5581 0 : let mut max_layer_size: Option<u64> = None;
5582 0 :
5583 0 : let resident_layers = guard
5584 0 : .likely_resident_layers()
5585 0 : .map(|layer| {
5586 0 : let file_size = layer.layer_desc().file_size;
5587 0 : max_layer_size = max_layer_size.map_or(Some(file_size), |m| Some(m.max(file_size)));
5588 0 :
5589 0 : let last_activity_ts = layer.access_stats().latest_activity_or_now();
5590 0 :
5591 0 : EvictionCandidate {
5592 0 : layer: layer.into(),
5593 0 : last_activity_ts,
5594 0 : relative_last_activity: finite_f32::FiniteF32::ZERO,
5595 0 : }
5596 0 : })
5597 0 : .collect();
5598 0 :
5599 0 : DiskUsageEvictionInfo {
5600 0 : max_layer_size,
5601 0 : resident_layers,
5602 0 : }
5603 0 : }
5604 :
5605 1590 : pub(crate) fn get_shard_index(&self) -> ShardIndex {
5606 1590 : ShardIndex {
5607 1590 : shard_number: self.tenant_shard_id.shard_number,
5608 1590 : shard_count: self.tenant_shard_id.shard_count,
5609 1590 : }
5610 1590 : }
5611 :
5612 : #[cfg(test)]
5613 22 : pub(super) fn force_advance_lsn(self: &Arc<Timeline>, new_lsn: Lsn) {
5614 22 : self.last_record_lsn.advance(new_lsn);
5615 22 : }
5616 :
5617 : #[cfg(test)]
5618 2 : pub(super) fn force_set_disk_consistent_lsn(&self, new_value: Lsn) {
5619 2 : self.disk_consistent_lsn.store(new_value);
5620 2 : }
5621 :
5622 : /// Force create an image layer and place it into the layer map.
5623 : ///
5624 : /// DO NOT use this function directly. Use [`Tenant::branch_timeline_test_with_layers`]
5625 : /// or [`Tenant::create_test_timeline_with_layers`] to ensure all these layers are placed into the layer map in one run.
5626 : #[cfg(test)]
5627 38 : pub(super) async fn force_create_image_layer(
5628 38 : self: &Arc<Timeline>,
5629 38 : lsn: Lsn,
5630 38 : mut images: Vec<(Key, Bytes)>,
5631 38 : check_start_lsn: Option<Lsn>,
5632 38 : ctx: &RequestContext,
5633 38 : ) -> anyhow::Result<()> {
5634 38 : let last_record_lsn = self.get_last_record_lsn();
5635 38 : assert!(
5636 38 : lsn <= last_record_lsn,
5637 0 : "advance last record lsn before inserting a layer, lsn={lsn}, last_record_lsn={last_record_lsn}"
5638 : );
5639 38 : if let Some(check_start_lsn) = check_start_lsn {
5640 38 : assert!(lsn >= check_start_lsn);
5641 0 : }
5642 38 : images.sort_unstable_by(|(ka, _), (kb, _)| ka.cmp(kb));
5643 38 : let min_key = *images.first().map(|(k, _)| k).unwrap();
5644 38 : let end_key = images.last().map(|(k, _)| k).unwrap().next();
5645 38 : let mut image_layer_writer = ImageLayerWriter::new(
5646 38 : self.conf,
5647 38 : self.timeline_id,
5648 38 : self.tenant_shard_id,
5649 38 : &(min_key..end_key),
5650 38 : lsn,
5651 38 : ctx,
5652 38 : )
5653 19 : .await?;
5654 114 : for (key, img) in images {
5655 76 : image_layer_writer.put_image(key, img, ctx).await?;
5656 : }
5657 76 : let image_layer = image_layer_writer.finish(self, ctx).await?;
5658 :
5659 38 : {
5660 38 : let mut guard = self.layers.write().await;
5661 38 : guard.force_insert_layer(image_layer);
5662 38 : }
5663 38 :
5664 38 : Ok(())
5665 38 : }
5666 :
5667 : /// Force create a delta layer and place it into the layer map.
5668 : ///
5669 : /// DO NOT use this function directly. Use [`Tenant::branch_timeline_test_with_layers`]
5670 : /// or [`Tenant::create_test_timeline_with_layers`] to ensure all these layers are placed into the layer map in one run.
5671 : #[cfg(test)]
5672 34 : pub(super) async fn force_create_delta_layer(
5673 34 : self: &Arc<Timeline>,
5674 34 : mut deltas: DeltaLayerTestDesc,
5675 34 : check_start_lsn: Option<Lsn>,
5676 34 : ctx: &RequestContext,
5677 34 : ) -> anyhow::Result<()> {
5678 34 : let last_record_lsn = self.get_last_record_lsn();
5679 34 : deltas
5680 34 : .data
5681 38 : .sort_unstable_by(|(ka, la, _), (kb, lb, _)| (ka, la).cmp(&(kb, lb)));
5682 34 : assert!(deltas.data.first().unwrap().0 >= deltas.key_range.start);
5683 34 : assert!(deltas.data.last().unwrap().0 < deltas.key_range.end);
5684 106 : for (_, lsn, _) in &deltas.data {
5685 72 : assert!(deltas.lsn_range.start <= *lsn && *lsn < deltas.lsn_range.end);
5686 : }
5687 34 : assert!(
5688 34 : deltas.lsn_range.end <= last_record_lsn,
5689 0 : "advance last record lsn before inserting a layer, end_lsn={}, last_record_lsn={}",
5690 : deltas.lsn_range.end,
5691 : last_record_lsn
5692 : );
5693 34 : if let Some(check_start_lsn) = check_start_lsn {
5694 34 : assert!(deltas.lsn_range.start >= check_start_lsn);
5695 0 : }
5696 : // check if the delta layer does not violate the LSN invariant, the legacy compaction should always produce a batch of
5697 : // layers of the same start/end LSN, and so should the force inserted layer
5698 : {
5699 : /// Checks if a overlaps with b, assume a/b = [start, end).
5700 36 : pub fn overlaps_with<T: Ord>(a: &Range<T>, b: &Range<T>) -> bool {
5701 36 : !(a.end <= b.start || b.end <= a.start)
5702 36 : }
5703 :
5704 34 : let guard = self.layers.read().await;
5705 70 : for layer in guard.layer_map().iter_historic_layers() {
5706 70 : if layer.is_delta()
5707 36 : && overlaps_with(&layer.lsn_range, &deltas.lsn_range)
5708 10 : && layer.lsn_range != deltas.lsn_range
5709 : {
5710 : // If a delta layer overlaps with another delta layer AND their LSN range is not the same, panic
5711 0 : panic!(
5712 0 : "inserted layer violates delta layer LSN invariant: current_lsn_range={}..{}, conflict_lsn_range={}..{}",
5713 0 : deltas.lsn_range.start, deltas.lsn_range.end, layer.lsn_range.start, layer.lsn_range.end
5714 0 : );
5715 70 : }
5716 : }
5717 : }
5718 34 : let mut delta_layer_writer = DeltaLayerWriter::new(
5719 34 : self.conf,
5720 34 : self.timeline_id,
5721 34 : self.tenant_shard_id,
5722 34 : deltas.key_range.start,
5723 34 : deltas.lsn_range,
5724 34 : ctx,
5725 34 : )
5726 17 : .await?;
5727 106 : for (key, lsn, val) in deltas.data {
5728 72 : delta_layer_writer.put_value(key, lsn, val, ctx).await?;
5729 : }
5730 34 : let delta_layer = delta_layer_writer
5731 34 : .finish(deltas.key_range.end, self, ctx)
5732 85 : .await?;
5733 :
5734 34 : {
5735 34 : let mut guard = self.layers.write().await;
5736 34 : guard.force_insert_layer(delta_layer);
5737 34 : }
5738 34 :
5739 34 : Ok(())
5740 34 : }
5741 :
5742 : /// Return all keys at the LSN in the image layers
5743 : #[cfg(test)]
5744 6 : pub(crate) async fn inspect_image_layers(
5745 6 : self: &Arc<Timeline>,
5746 6 : lsn: Lsn,
5747 6 : ctx: &RequestContext,
5748 6 : ) -> anyhow::Result<Vec<(Key, Bytes)>> {
5749 6 : let mut all_data = Vec::new();
5750 6 : let guard = self.layers.read().await;
5751 34 : for layer in guard.layer_map().iter_historic_layers() {
5752 34 : if !layer.is_delta() && layer.image_layer_lsn() == lsn {
5753 8 : let layer = guard.get_from_desc(&layer);
5754 8 : let mut reconstruct_data = ValuesReconstructState::default();
5755 8 : layer
5756 8 : .get_values_reconstruct_data(
5757 8 : KeySpace::single(Key::MIN..Key::MAX),
5758 8 : lsn..Lsn(lsn.0 + 1),
5759 8 : &mut reconstruct_data,
5760 8 : ctx,
5761 8 : )
5762 13 : .await?;
5763 80 : for (k, v) in reconstruct_data.keys {
5764 72 : all_data.push((k, v?.img.unwrap().1));
5765 : }
5766 26 : }
5767 : }
5768 6 : all_data.sort();
5769 6 : Ok(all_data)
5770 6 : }
5771 :
5772 : /// Get all historic layer descriptors in the layer map
5773 : #[cfg(test)]
5774 2 : pub(crate) async fn inspect_historic_layers(
5775 2 : self: &Arc<Timeline>,
5776 2 : ) -> anyhow::Result<Vec<super::storage_layer::PersistentLayerKey>> {
5777 2 : let mut layers = Vec::new();
5778 2 : let guard = self.layers.read().await;
5779 6 : for layer in guard.layer_map().iter_historic_layers() {
5780 6 : layers.push(layer.key());
5781 6 : }
5782 2 : Ok(layers)
5783 2 : }
5784 :
5785 : #[cfg(test)]
5786 6 : pub(crate) fn add_extra_test_dense_keyspace(&self, ks: KeySpace) {
5787 6 : let mut keyspace = self.extra_test_dense_keyspace.load().as_ref().clone();
5788 6 : keyspace.merge(&ks);
5789 6 : self.extra_test_dense_keyspace.store(Arc::new(keyspace));
5790 6 : }
5791 : }
5792 :
5793 : type TraversalPathItem = (ValueReconstructResult, Lsn, TraversalId);
5794 :
5795 : /// Tracking writes ingestion does to a particular in-memory layer.
5796 : ///
5797 : /// Cleared upon freezing a layer.
5798 : struct TimelineWriterState {
5799 : open_layer: Arc<InMemoryLayer>,
5800 : current_size: u64,
5801 : // Previous Lsn which passed through
5802 : prev_lsn: Option<Lsn>,
5803 : // Largest Lsn which passed through the current writer
5804 : max_lsn: Option<Lsn>,
5805 : // Cached details of the last freeze. Avoids going trough the atomic/lock on every put.
5806 : cached_last_freeze_at: Lsn,
5807 : }
5808 :
5809 : impl TimelineWriterState {
5810 1250 : fn new(open_layer: Arc<InMemoryLayer>, current_size: u64, last_freeze_at: Lsn) -> Self {
5811 1250 : Self {
5812 1250 : open_layer,
5813 1250 : current_size,
5814 1250 : prev_lsn: None,
5815 1250 : max_lsn: None,
5816 1250 : cached_last_freeze_at: last_freeze_at,
5817 1250 : }
5818 1250 : }
5819 : }
5820 :
5821 : /// Various functions to mutate the timeline.
5822 : // TODO Currently, Deref is used to allow easy access to read methods from this trait.
5823 : // This is probably considered a bad practice in Rust and should be fixed eventually,
5824 : // but will cause large code changes.
5825 : pub(crate) struct TimelineWriter<'a> {
5826 : tl: &'a Timeline,
5827 : write_guard: tokio::sync::MutexGuard<'a, Option<TimelineWriterState>>,
5828 : }
5829 :
5830 : impl Deref for TimelineWriter<'_> {
5831 : type Target = Timeline;
5832 :
5833 4807260 : fn deref(&self) -> &Self::Target {
5834 4807260 : self.tl
5835 4807260 : }
5836 : }
5837 :
5838 : #[derive(PartialEq)]
5839 : enum OpenLayerAction {
5840 : Roll,
5841 : Open,
5842 : None,
5843 : }
5844 :
5845 : impl<'a> TimelineWriter<'a> {
5846 : /// Put a new page version that can be constructed from a WAL record
5847 : ///
5848 : /// This will implicitly extend the relation, if the page is beyond the
5849 : /// current end-of-file.
5850 5090498 : pub(crate) async fn put(
5851 5090498 : &mut self,
5852 5090498 : key: Key,
5853 5090498 : lsn: Lsn,
5854 5090498 : value: &Value,
5855 5090498 : ctx: &RequestContext,
5856 5090498 : ) -> anyhow::Result<()> {
5857 5090498 : // Avoid doing allocations for "small" values.
5858 5090498 : // In the regression test suite, the limit of 256 avoided allocations in 95% of cases:
5859 5090498 : // https://github.com/neondatabase/neon/pull/5056#discussion_r1301975061
5860 5090498 : let mut buf = smallvec::SmallVec::<[u8; 256]>::new();
5861 5090498 : value.ser_into(&mut buf)?;
5862 5090498 : let buf_size: u64 = buf.len().try_into().expect("oversized value buf");
5863 5090498 :
5864 5090498 : let action = self.get_open_layer_action(lsn, buf_size);
5865 5090498 : let layer = self.handle_open_layer_action(lsn, action, ctx).await?;
5866 5090498 : let res = layer.put_value(key, lsn, &buf, ctx).await;
5867 :
5868 5090498 : if res.is_ok() {
5869 5090498 : // Update the current size only when the entire write was ok.
5870 5090498 : // In case of failures, we may have had partial writes which
5871 5090498 : // render the size tracking out of sync. That's ok because
5872 5090498 : // the checkpoint distance should be significantly smaller
5873 5090498 : // than the S3 single shot upload limit of 5GiB.
5874 5090498 : let state = self.write_guard.as_mut().unwrap();
5875 5090498 :
5876 5090498 : state.current_size += buf_size;
5877 5090498 : state.prev_lsn = Some(lsn);
5878 5090498 : state.max_lsn = std::cmp::max(state.max_lsn, Some(lsn));
5879 5090498 : }
5880 :
5881 5090498 : res
5882 5090498 : }
5883 :
5884 5090500 : async fn handle_open_layer_action(
5885 5090500 : &mut self,
5886 5090500 : at: Lsn,
5887 5090500 : action: OpenLayerAction,
5888 5090500 : ctx: &RequestContext,
5889 5090500 : ) -> anyhow::Result<&Arc<InMemoryLayer>> {
5890 5090500 : match action {
5891 : OpenLayerAction::Roll => {
5892 80 : let freeze_at = self.write_guard.as_ref().unwrap().max_lsn.unwrap();
5893 80 : self.roll_layer(freeze_at).await?;
5894 80 : self.open_layer(at, ctx).await?;
5895 : }
5896 1170 : OpenLayerAction::Open => self.open_layer(at, ctx).await?,
5897 : OpenLayerAction::None => {
5898 5089250 : assert!(self.write_guard.is_some());
5899 : }
5900 : }
5901 :
5902 5090500 : Ok(&self.write_guard.as_ref().unwrap().open_layer)
5903 5090500 : }
5904 :
5905 1250 : async fn open_layer(&mut self, at: Lsn, ctx: &RequestContext) -> anyhow::Result<()> {
5906 1250 : let layer = self.tl.get_layer_for_write(at, ctx).await?;
5907 1250 : let initial_size = layer.size().await?;
5908 :
5909 1250 : let last_freeze_at = self.last_freeze_at.load();
5910 1250 : self.write_guard.replace(TimelineWriterState::new(
5911 1250 : layer,
5912 1250 : initial_size,
5913 1250 : last_freeze_at,
5914 1250 : ));
5915 1250 :
5916 1250 : Ok(())
5917 1250 : }
5918 :
5919 80 : async fn roll_layer(&mut self, freeze_at: Lsn) -> anyhow::Result<()> {
5920 80 : let current_size = self.write_guard.as_ref().unwrap().current_size;
5921 80 :
5922 80 : // self.write_guard will be taken by the freezing
5923 80 : self.tl
5924 80 : .freeze_inmem_layer_at(freeze_at, &mut self.write_guard)
5925 5 : .await;
5926 :
5927 80 : self.tl.flush_frozen_layers(freeze_at)?;
5928 :
5929 80 : if current_size >= self.get_checkpoint_distance() * 2 {
5930 0 : warn!("Flushed oversized open layer with size {}", current_size)
5931 80 : }
5932 :
5933 80 : Ok(())
5934 80 : }
5935 :
5936 5090500 : fn get_open_layer_action(&self, lsn: Lsn, new_value_size: u64) -> OpenLayerAction {
5937 5090500 : let state = &*self.write_guard;
5938 5090500 : let Some(state) = &state else {
5939 1170 : return OpenLayerAction::Open;
5940 : };
5941 :
5942 : #[cfg(feature = "testing")]
5943 5089330 : if state.cached_last_freeze_at < self.tl.last_freeze_at.load() {
5944 : // this check and assertion are not really needed because
5945 : // LayerManager::try_freeze_in_memory_layer will always clear out the
5946 : // TimelineWriterState if something is frozen. however, we can advance last_freeze_at when there
5947 : // is no TimelineWriterState.
5948 0 : assert!(
5949 0 : state.open_layer.end_lsn.get().is_some(),
5950 0 : "our open_layer must be outdated"
5951 : );
5952 :
5953 : // this would be a memory leak waiting to happen because the in-memory layer always has
5954 : // an index
5955 0 : panic!("BUG: TimelineWriterState held on to frozen in-memory layer.");
5956 5089330 : }
5957 5089330 :
5958 5089330 : if state.prev_lsn == Some(lsn) {
5959 : // Rolling mid LSN is not supported by [downstream code].
5960 : // Hence, only roll at LSN boundaries.
5961 : //
5962 : // [downstream code]: https://github.com/neondatabase/neon/pull/7993#discussion_r1633345422
5963 286304 : return OpenLayerAction::None;
5964 4803026 : }
5965 4803026 :
5966 4803026 : if state.current_size == 0 {
5967 : // Don't roll empty layers
5968 0 : return OpenLayerAction::None;
5969 4803026 : }
5970 4803026 :
5971 4803026 : if self.tl.should_roll(
5972 4803026 : state.current_size,
5973 4803026 : state.current_size + new_value_size,
5974 4803026 : self.get_checkpoint_distance(),
5975 4803026 : lsn,
5976 4803026 : state.cached_last_freeze_at,
5977 4803026 : state.open_layer.get_opened_at(),
5978 4803026 : ) {
5979 80 : OpenLayerAction::Roll
5980 : } else {
5981 4802946 : OpenLayerAction::None
5982 : }
5983 5090500 : }
5984 :
5985 : /// Put a batch of keys at the specified Lsns.
5986 : ///
5987 : /// The batch is sorted by Lsn (enforced by usage of [`utils::vec_map::VecMap`].
5988 414040 : pub(crate) async fn put_batch(
5989 414040 : &mut self,
5990 414040 : batch: VecMap<Lsn, (Key, Value)>,
5991 414040 : ctx: &RequestContext,
5992 414040 : ) -> anyhow::Result<()> {
5993 1114384 : for (lsn, (key, val)) in batch {
5994 700344 : self.put(key, lsn, &val, ctx).await?
5995 : }
5996 :
5997 414040 : Ok(())
5998 414040 : }
5999 :
6000 2 : pub(crate) async fn delete_batch(
6001 2 : &mut self,
6002 2 : batch: &[(Range<Key>, Lsn)],
6003 2 : ctx: &RequestContext,
6004 2 : ) -> anyhow::Result<()> {
6005 2 : if let Some((_, lsn)) = batch.first() {
6006 2 : let action = self.get_open_layer_action(*lsn, 0);
6007 2 : let layer = self.handle_open_layer_action(*lsn, action, ctx).await?;
6008 2 : layer.put_tombstones(batch).await?;
6009 0 : }
6010 :
6011 2 : Ok(())
6012 2 : }
6013 :
6014 : /// Track the end of the latest digested WAL record.
6015 : /// Remember the (end of) last valid WAL record remembered in the timeline.
6016 : ///
6017 : /// Call this after you have finished writing all the WAL up to 'lsn'.
6018 : ///
6019 : /// 'lsn' must be aligned. This wakes up any wait_lsn() callers waiting for
6020 : /// the 'lsn' or anything older. The previous last record LSN is stored alongside
6021 : /// the latest and can be read.
6022 5279056 : pub(crate) fn finish_write(&self, new_lsn: Lsn) {
6023 5279056 : self.tl.finish_write(new_lsn);
6024 5279056 : }
6025 :
6026 270570 : pub(crate) fn update_current_logical_size(&self, delta: i64) {
6027 270570 : self.tl.update_current_logical_size(delta)
6028 270570 : }
6029 : }
6030 :
6031 : // We need TimelineWriter to be send in upcoming conversion of
6032 : // Timeline::layers to tokio::sync::RwLock.
6033 : #[test]
6034 2 : fn is_send() {
6035 2 : fn _assert_send<T: Send>() {}
6036 2 : _assert_send::<TimelineWriter<'_>>();
6037 2 : }
6038 :
6039 : #[cfg(test)]
6040 : mod tests {
6041 : use utils::{id::TimelineId, lsn::Lsn};
6042 :
6043 : use crate::tenant::{
6044 : harness::TenantHarness, storage_layer::Layer, timeline::EvictionError, Timeline,
6045 : };
6046 :
6047 : #[tokio::test]
6048 2 : async fn two_layer_eviction_attempts_at_the_same_time() {
6049 2 : let harness = TenantHarness::create("two_layer_eviction_attempts_at_the_same_time")
6050 2 : .await
6051 2 : .unwrap();
6052 2 :
6053 8 : let (tenant, ctx) = harness.load().await;
6054 2 : let timeline = tenant
6055 2 : .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx)
6056 6 : .await
6057 2 : .unwrap();
6058 2 :
6059 2 : let layer = find_some_layer(&timeline).await;
6060 2 : let layer = layer
6061 2 : .keep_resident()
6062 2 : .await
6063 2 : .expect("no download => no downloading errors")
6064 2 : .drop_eviction_guard();
6065 2 :
6066 2 : let forever = std::time::Duration::from_secs(120);
6067 2 :
6068 2 : let first = layer.evict_and_wait(forever);
6069 2 : let second = layer.evict_and_wait(forever);
6070 2 :
6071 2 : let (first, second) = tokio::join!(first, second);
6072 2 :
6073 2 : let res = layer.keep_resident().await;
6074 2 : assert!(res.is_none(), "{res:?}");
6075 2 :
6076 2 : match (first, second) {
6077 2 : (Ok(()), Ok(())) => {
6078 2 : // because there are no more timeline locks being taken on eviction path, we can
6079 2 : // witness all three outcomes here.
6080 2 : }
6081 2 : (Ok(()), Err(EvictionError::NotFound)) | (Err(EvictionError::NotFound), Ok(())) => {
6082 0 : // if one completes before the other, this is fine just as well.
6083 0 : }
6084 2 : other => unreachable!("unexpected {:?}", other),
6085 2 : }
6086 2 : }
6087 :
6088 2 : async fn find_some_layer(timeline: &Timeline) -> Layer {
6089 2 : let layers = timeline.layers.read().await;
6090 2 : let desc = layers
6091 2 : .layer_map()
6092 2 : .iter_historic_layers()
6093 2 : .next()
6094 2 : .expect("must find one layer to evict");
6095 2 :
6096 2 : layers.get_from_desc(&desc)
6097 2 : }
6098 : }
|