Line data Source code
1 : pub(crate) mod analysis;
2 : pub(crate) mod compaction;
3 : pub mod delete;
4 : pub(crate) mod detach_ancestor;
5 : mod eviction_task;
6 : pub(crate) mod handle;
7 : pub(crate) mod import_pgdata;
8 : mod init;
9 : pub mod layer_manager;
10 : pub(crate) mod logical_size;
11 : pub mod offload;
12 : pub mod span;
13 : pub mod uninit;
14 : mod walreceiver;
15 :
16 : use anyhow::{anyhow, bail, ensure, Context, Result};
17 : use arc_swap::ArcSwap;
18 : use bytes::Bytes;
19 : use camino::Utf8Path;
20 : use chrono::{DateTime, Utc};
21 : use enumset::EnumSet;
22 : use fail::fail_point;
23 : use handle::ShardTimelineId;
24 : use offload::OffloadError;
25 : use once_cell::sync::Lazy;
26 : use pageserver_api::{
27 : config::tenant_conf_defaults::DEFAULT_COMPACTION_THRESHOLD,
28 : key::{
29 : KEY_SIZE, METADATA_KEY_BEGIN_PREFIX, METADATA_KEY_END_PREFIX, NON_INHERITED_RANGE,
30 : NON_INHERITED_SPARSE_RANGE,
31 : },
32 : keyspace::{KeySpaceAccum, KeySpaceRandomAccum, SparseKeyPartitioning},
33 : models::{
34 : CompactKeyRange, CompactLsnRange, CompactionAlgorithm, CompactionAlgorithmSettings,
35 : DownloadRemoteLayersTaskInfo, DownloadRemoteLayersTaskSpawnRequest, EvictionPolicy,
36 : InMemoryLayerInfo, LayerMapInfo, LsnLease, TimelineState,
37 : },
38 : reltag::BlockNumber,
39 : shard::{ShardIdentity, ShardNumber, TenantShardId},
40 : };
41 : use rand::Rng;
42 : use remote_storage::DownloadError;
43 : use serde_with::serde_as;
44 : use storage_broker::BrokerClientChannel;
45 : use tokio::{
46 : runtime::Handle,
47 : sync::{oneshot, watch},
48 : };
49 : use tokio_util::sync::CancellationToken;
50 : use tracing::*;
51 : use utils::{
52 : fs_ext, pausable_failpoint,
53 : postgres_client::PostgresClientProtocol,
54 : sync::gate::{Gate, GateGuard},
55 : };
56 : use wal_decoder::serialized_batch::{SerializedValueBatch, ValueMeta};
57 :
58 : use std::sync::atomic::Ordering as AtomicOrdering;
59 : use std::sync::{Arc, Mutex, RwLock, Weak};
60 : use std::time::{Duration, Instant, SystemTime};
61 : use std::{
62 : array,
63 : collections::{BTreeMap, HashMap, HashSet},
64 : sync::atomic::AtomicU64,
65 : };
66 : use std::{cmp::min, ops::ControlFlow};
67 : use std::{
68 : collections::btree_map::Entry,
69 : ops::{Deref, Range},
70 : };
71 : use std::{pin::pin, sync::OnceLock};
72 :
73 : use crate::{
74 : aux_file::AuxFileSizeEstimator,
75 : tenant::{
76 : config::AttachmentMode,
77 : layer_map::{LayerMap, SearchResult},
78 : metadata::TimelineMetadata,
79 : storage_layer::{inmemory_layer::IndexEntry, PersistentLayerDesc},
80 : },
81 : walingest::WalLagCooldown,
82 : walredo,
83 : };
84 : use crate::{
85 : context::{DownloadBehavior, RequestContext},
86 : disk_usage_eviction_task::DiskUsageEvictionInfo,
87 : pgdatadir_mapping::CollectKeySpaceError,
88 : };
89 : use crate::{
90 : disk_usage_eviction_task::finite_f32,
91 : tenant::storage_layer::{
92 : AsLayerDesc, DeltaLayerWriter, EvictionError, ImageLayerWriter, InMemoryLayer, Layer,
93 : LayerAccessStatsReset, LayerName, ResidentLayer, ValueReconstructState,
94 : ValuesReconstructState,
95 : },
96 : };
97 : use crate::{
98 : disk_usage_eviction_task::EvictionCandidate, tenant::storage_layer::delta_layer::DeltaEntry,
99 : };
100 : use crate::{
101 : l0_flush::{self, L0FlushGlobalState},
102 : metrics::GetKind,
103 : };
104 : use crate::{
105 : metrics::ScanLatencyOngoingRecording, tenant::timeline::logical_size::CurrentLogicalSize,
106 : };
107 : use crate::{
108 : pgdatadir_mapping::DirectoryKind,
109 : virtual_file::{MaybeFatalIo, VirtualFile},
110 : };
111 : use crate::{pgdatadir_mapping::LsnForTimestamp, tenant::tasks::BackgroundLoopKind};
112 : use crate::{pgdatadir_mapping::MAX_AUX_FILE_V2_DELTAS, tenant::storage_layer::PersistentLayerKey};
113 : use pageserver_api::config::tenant_conf_defaults::DEFAULT_PITR_INTERVAL;
114 :
115 : use crate::config::PageServerConf;
116 : use crate::keyspace::{KeyPartitioning, KeySpace};
117 : use crate::metrics::TimelineMetrics;
118 : use crate::pgdatadir_mapping::CalculateLogicalSizeError;
119 : use crate::tenant::config::TenantConfOpt;
120 : use pageserver_api::reltag::RelTag;
121 : use pageserver_api::shard::ShardIndex;
122 :
123 : use postgres_connection::PgConnectionConfig;
124 : use postgres_ffi::{to_pg_timestamp, v14::xlog_utils, WAL_SEGMENT_SIZE};
125 : use utils::{
126 : completion,
127 : generation::Generation,
128 : id::TimelineId,
129 : lsn::{AtomicLsn, Lsn, RecordLsn},
130 : seqwait::SeqWait,
131 : simple_rcu::{Rcu, RcuReadGuard},
132 : };
133 :
134 : use crate::task_mgr;
135 : use crate::task_mgr::TaskKind;
136 : use crate::tenant::gc_result::GcResult;
137 : use crate::ZERO_PAGE;
138 : use pageserver_api::key::Key;
139 :
140 : use self::delete::DeleteTimelineFlow;
141 : pub(super) use self::eviction_task::EvictionTaskTenantState;
142 : use self::eviction_task::EvictionTaskTimelineState;
143 : use self::layer_manager::LayerManager;
144 : use self::logical_size::LogicalSize;
145 : use self::walreceiver::{WalReceiver, WalReceiverConf};
146 :
147 : use super::{
148 : config::TenantConf, storage_layer::LayerVisibilityHint, upload_queue::NotInitialized,
149 : MaybeOffloaded,
150 : };
151 : use super::{debug_assert_current_span_has_tenant_and_timeline_id, AttachedTenantConf};
152 : use super::{remote_timeline_client::index::IndexPart, storage_layer::LayerFringe};
153 : use super::{
154 : remote_timeline_client::RemoteTimelineClient, remote_timeline_client::WaitCompletionError,
155 : storage_layer::ReadableLayer,
156 : };
157 : use super::{
158 : secondary::heatmap::{HeatMapLayer, HeatMapTimeline},
159 : GcError,
160 : };
161 :
162 : #[cfg(test)]
163 : use pageserver_api::value::Value;
164 :
165 : #[derive(Debug, PartialEq, Eq, Clone, Copy)]
166 : pub(crate) enum FlushLoopState {
167 : NotStarted,
168 : Running {
169 : #[cfg(test)]
170 : expect_initdb_optimization: bool,
171 : #[cfg(test)]
172 : initdb_optimization_count: usize,
173 : },
174 : Exited,
175 : }
176 :
177 : #[derive(Debug, Copy, Clone, PartialEq, Eq)]
178 : pub enum ImageLayerCreationMode {
179 : /// Try to create image layers based on `time_for_new_image_layer`. Used in compaction code path.
180 : Try,
181 : /// Force creating the image layers if possible. For now, no image layers will be created
182 : /// for metadata keys. Used in compaction code path with force flag enabled.
183 : Force,
184 : /// Initial ingestion of the data, and no data should be dropped in this function. This
185 : /// means that no metadata keys should be included in the partitions. Used in flush frozen layer
186 : /// code path.
187 : Initial,
188 : }
189 :
190 : impl std::fmt::Display for ImageLayerCreationMode {
191 724 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
192 724 : write!(f, "{:?}", self)
193 724 : }
194 : }
195 :
196 : /// Temporary function for immutable storage state refactor, ensures we are dropping mutex guard instead of other things.
197 : /// Can be removed after all refactors are done.
198 28 : fn drop_rlock<T>(rlock: tokio::sync::RwLockReadGuard<T>) {
199 28 : drop(rlock)
200 28 : }
201 :
202 : /// Temporary function for immutable storage state refactor, ensures we are dropping mutex guard instead of other things.
203 : /// Can be removed after all refactors are done.
204 752 : fn drop_wlock<T>(rlock: tokio::sync::RwLockWriteGuard<'_, T>) {
205 752 : drop(rlock)
206 752 : }
207 :
208 : /// The outward-facing resources required to build a Timeline
209 : pub struct TimelineResources {
210 : pub remote_client: RemoteTimelineClient,
211 : pub pagestream_throttle:
212 : Arc<crate::tenant::throttle::Throttle<crate::metrics::tenant_throttling::Pagestream>>,
213 : pub l0_flush_global_state: l0_flush::L0FlushGlobalState,
214 : }
215 :
216 : /// The relation size cache caches relation sizes at the end of the timeline. It speeds up WAL
217 : /// ingestion considerably, because WAL ingestion needs to check on most records if the record
218 : /// implicitly extends the relation. At startup, `complete_as_of` is initialized to the current end
219 : /// of the timeline (disk_consistent_lsn). It's used on reads of relation sizes to check if the
220 : /// value can be used to also update the cache, see [`Timeline::update_cached_rel_size`].
221 : pub(crate) struct RelSizeCache {
222 : pub(crate) complete_as_of: Lsn,
223 : pub(crate) map: HashMap<RelTag, (Lsn, BlockNumber)>,
224 : }
225 :
226 : pub struct Timeline {
227 : pub(crate) conf: &'static PageServerConf,
228 : tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
229 :
230 : myself: Weak<Self>,
231 :
232 : pub(crate) tenant_shard_id: TenantShardId,
233 : pub timeline_id: TimelineId,
234 :
235 : /// The generation of the tenant that instantiated us: this is used for safety when writing remote objects.
236 : /// Never changes for the lifetime of this [`Timeline`] object.
237 : ///
238 : /// This duplicates the generation stored in LocationConf, but that structure is mutable:
239 : /// this copy enforces the invariant that generatio doesn't change during a Tenant's lifetime.
240 : pub(crate) generation: Generation,
241 :
242 : /// The detailed sharding information from our parent Tenant. This enables us to map keys
243 : /// to shards, and is constant through the lifetime of this Timeline.
244 : shard_identity: ShardIdentity,
245 :
246 : pub pg_version: u32,
247 :
248 : /// The tuple has two elements.
249 : /// 1. `LayerFileManager` keeps track of the various physical representations of the layer files (inmem, local, remote).
250 : /// 2. `LayerMap`, the acceleration data structure for `get_reconstruct_data`.
251 : ///
252 : /// `LayerMap` maps out the `(PAGE,LSN) / (KEY,LSN)` space, which is composed of `(KeyRange, LsnRange)` rectangles.
253 : /// We describe these rectangles through the `PersistentLayerDesc` struct.
254 : ///
255 : /// When we want to reconstruct a page, we first find the `PersistentLayerDesc`'s that we need for page reconstruction,
256 : /// using `LayerMap`. Then, we use `LayerFileManager` to get the `PersistentLayer`'s that correspond to the
257 : /// `PersistentLayerDesc`'s.
258 : ///
259 : /// Hence, it's important to keep things coherent. The `LayerFileManager` must always have an entry for all
260 : /// `PersistentLayerDesc`'s in the `LayerMap`. If it doesn't, `LayerFileManager::get_from_desc` will panic at
261 : /// runtime, e.g., during page reconstruction.
262 : ///
263 : /// In the future, we'll be able to split up the tuple of LayerMap and `LayerFileManager`,
264 : /// so that e.g. on-demand-download/eviction, and layer spreading, can operate just on `LayerFileManager`.
265 : pub(crate) layers: tokio::sync::RwLock<LayerManager>,
266 :
267 : last_freeze_at: AtomicLsn,
268 : // Atomic would be more appropriate here.
269 : last_freeze_ts: RwLock<Instant>,
270 :
271 : pub(crate) standby_horizon: AtomicLsn,
272 :
273 : // WAL redo manager. `None` only for broken tenants.
274 : walredo_mgr: Option<Arc<super::WalRedoManager>>,
275 :
276 : /// Remote storage client.
277 : /// See [`remote_timeline_client`](super::remote_timeline_client) module comment for details.
278 : pub(crate) remote_client: Arc<RemoteTimelineClient>,
279 :
280 : // What page versions do we hold in the repository? If we get a
281 : // request > last_record_lsn, we need to wait until we receive all
282 : // the WAL up to the request. The SeqWait provides functions for
283 : // that. TODO: If we get a request for an old LSN, such that the
284 : // versions have already been garbage collected away, we should
285 : // throw an error, but we don't track that currently.
286 : //
287 : // last_record_lsn.load().last points to the end of last processed WAL record.
288 : //
289 : // We also remember the starting point of the previous record in
290 : // 'last_record_lsn.load().prev'. It's used to set the xl_prev pointer of the
291 : // first WAL record when the node is started up. But here, we just
292 : // keep track of it.
293 : last_record_lsn: SeqWait<RecordLsn, Lsn>,
294 :
295 : // All WAL records have been processed and stored durably on files on
296 : // local disk, up to this LSN. On crash and restart, we need to re-process
297 : // the WAL starting from this point.
298 : //
299 : // Some later WAL records might have been processed and also flushed to disk
300 : // already, so don't be surprised to see some, but there's no guarantee on
301 : // them yet.
302 : disk_consistent_lsn: AtomicLsn,
303 :
304 : // Parent timeline that this timeline was branched from, and the LSN
305 : // of the branch point.
306 : ancestor_timeline: Option<Arc<Timeline>>,
307 : ancestor_lsn: Lsn,
308 :
309 : pub(super) metrics: TimelineMetrics,
310 :
311 : // `Timeline` doesn't write these metrics itself, but it manages the lifetime. Code
312 : // in `crate::page_service` writes these metrics.
313 : pub(crate) query_metrics: crate::metrics::SmgrQueryTimePerTimeline,
314 :
315 : directory_metrics: [AtomicU64; DirectoryKind::KINDS_NUM],
316 :
317 : /// Ensures layers aren't frozen by checkpointer between
318 : /// [`Timeline::get_layer_for_write`] and layer reads.
319 : /// Locked automatically by [`TimelineWriter`] and checkpointer.
320 : /// Must always be acquired before the layer map/individual layer lock
321 : /// to avoid deadlock.
322 : ///
323 : /// The state is cleared upon freezing.
324 : write_lock: tokio::sync::Mutex<Option<TimelineWriterState>>,
325 :
326 : /// Used to avoid multiple `flush_loop` tasks running
327 : pub(super) flush_loop_state: Mutex<FlushLoopState>,
328 :
329 : /// layer_flush_start_tx can be used to wake up the layer-flushing task.
330 : /// - The u64 value is a counter, incremented every time a new flush cycle is requested.
331 : /// The flush cycle counter is sent back on the layer_flush_done channel when
332 : /// the flush finishes. You can use that to wait for the flush to finish.
333 : /// - The LSN is updated to max() of its current value and the latest disk_consistent_lsn
334 : /// read by whoever sends an update
335 : layer_flush_start_tx: tokio::sync::watch::Sender<(u64, Lsn)>,
336 : /// to be notified when layer flushing has finished, subscribe to the layer_flush_done channel
337 : layer_flush_done_tx: tokio::sync::watch::Sender<(u64, Result<(), FlushLayerError>)>,
338 :
339 : // Needed to ensure that we can't create a branch at a point that was already garbage collected
340 : pub latest_gc_cutoff_lsn: Rcu<Lsn>,
341 :
342 : // List of child timelines and their branch points. This is needed to avoid
343 : // garbage collecting data that is still needed by the child timelines.
344 : pub(crate) gc_info: std::sync::RwLock<GcInfo>,
345 :
346 : // It may change across major versions so for simplicity
347 : // keep it after running initdb for a timeline.
348 : // It is needed in checks when we want to error on some operations
349 : // when they are requested for pre-initdb lsn.
350 : // It can be unified with latest_gc_cutoff_lsn under some "first_valid_lsn",
351 : // though let's keep them both for better error visibility.
352 : pub initdb_lsn: Lsn,
353 :
354 : /// When did we last calculate the partitioning? Make it pub to test cases.
355 : pub(super) partitioning: tokio::sync::Mutex<((KeyPartitioning, SparseKeyPartitioning), Lsn)>,
356 :
357 : /// Configuration: how often should the partitioning be recalculated.
358 : repartition_threshold: u64,
359 :
360 : last_image_layer_creation_check_at: AtomicLsn,
361 : last_image_layer_creation_check_instant: std::sync::Mutex<Option<Instant>>,
362 :
363 : /// Current logical size of the "datadir", at the last LSN.
364 : current_logical_size: LogicalSize,
365 :
366 : /// Information about the last processed message by the WAL receiver,
367 : /// or None if WAL receiver has not received anything for this timeline
368 : /// yet.
369 : pub last_received_wal: Mutex<Option<WalReceiverInfo>>,
370 : pub walreceiver: Mutex<Option<WalReceiver>>,
371 :
372 : /// Relation size cache
373 : pub(crate) rel_size_cache: RwLock<RelSizeCache>,
374 :
375 : download_all_remote_layers_task_info: RwLock<Option<DownloadRemoteLayersTaskInfo>>,
376 :
377 : state: watch::Sender<TimelineState>,
378 :
379 : /// Prevent two tasks from deleting the timeline at the same time. If held, the
380 : /// timeline is being deleted. If 'true', the timeline has already been deleted.
381 : pub delete_progress: TimelineDeleteProgress,
382 :
383 : eviction_task_timeline_state: tokio::sync::Mutex<EvictionTaskTimelineState>,
384 :
385 : /// Load or creation time information about the disk_consistent_lsn and when the loading
386 : /// happened. Used for consumption metrics.
387 : pub(crate) loaded_at: (Lsn, SystemTime),
388 :
389 : /// Gate to prevent shutdown completing while I/O is still happening to this timeline's data
390 : pub(crate) gate: Gate,
391 :
392 : /// Cancellation token scoped to this timeline: anything doing long-running work relating
393 : /// to the timeline should drop out when this token fires.
394 : pub(crate) cancel: CancellationToken,
395 :
396 : /// Make sure we only have one running compaction at a time in tests.
397 : ///
398 : /// Must only be taken in two places:
399 : /// - [`Timeline::compact`] (this file)
400 : /// - [`delete::delete_local_timeline_directory`]
401 : ///
402 : /// Timeline deletion will acquire both compaction and gc locks in whatever order.
403 : compaction_lock: tokio::sync::Mutex<()>,
404 :
405 : /// Make sure we only have one running gc at a time.
406 : ///
407 : /// Must only be taken in two places:
408 : /// - [`Timeline::gc`] (this file)
409 : /// - [`delete::delete_local_timeline_directory`]
410 : ///
411 : /// Timeline deletion will acquire both compaction and gc locks in whatever order.
412 : gc_lock: tokio::sync::Mutex<()>,
413 :
414 : /// Cloned from [`super::Tenant::pagestream_throttle`] on construction.
415 : pub(crate) pagestream_throttle:
416 : Arc<crate::tenant::throttle::Throttle<crate::metrics::tenant_throttling::Pagestream>>,
417 :
418 : /// Size estimator for aux file v2
419 : pub(crate) aux_file_size_estimator: AuxFileSizeEstimator,
420 :
421 : /// Some test cases directly place keys into the timeline without actually modifying the directory
422 : /// keys (i.e., DB_DIR). The test cases creating such keys will put the keyspaces here, so that
423 : /// these keys won't get garbage-collected during compaction/GC. This field only modifies the dense
424 : /// keyspace return value of `collect_keyspace`. For sparse keyspaces, use AUX keys for testing, and
425 : /// in the future, add `extra_test_sparse_keyspace` if necessary.
426 : #[cfg(test)]
427 : pub(crate) extra_test_dense_keyspace: ArcSwap<KeySpace>,
428 :
429 : pub(crate) l0_flush_global_state: L0FlushGlobalState,
430 :
431 : pub(crate) handles: handle::PerTimelineState<crate::page_service::TenantManagerTypes>,
432 :
433 : pub(crate) attach_wal_lag_cooldown: Arc<OnceLock<WalLagCooldown>>,
434 :
435 : /// Cf. [`crate::tenant::CreateTimelineIdempotency`].
436 : pub(crate) create_idempotency: crate::tenant::CreateTimelineIdempotency,
437 : }
438 :
439 : pub type TimelineDeleteProgress = Arc<tokio::sync::Mutex<DeleteTimelineFlow>>;
440 :
441 : pub struct WalReceiverInfo {
442 : pub wal_source_connconf: PgConnectionConfig,
443 : pub last_received_msg_lsn: Lsn,
444 : pub last_received_msg_ts: u128,
445 : }
446 :
447 : /// Information about how much history needs to be retained, needed by
448 : /// Garbage Collection.
449 : #[derive(Default)]
450 : pub(crate) struct GcInfo {
451 : /// Specific LSNs that are needed.
452 : ///
453 : /// Currently, this includes all points where child branches have
454 : /// been forked off from. In the future, could also include
455 : /// explicit user-defined snapshot points.
456 : pub(crate) retain_lsns: Vec<(Lsn, TimelineId, MaybeOffloaded)>,
457 :
458 : /// The cutoff coordinates, which are combined by selecting the minimum.
459 : pub(crate) cutoffs: GcCutoffs,
460 :
461 : /// Leases granted to particular LSNs.
462 : pub(crate) leases: BTreeMap<Lsn, LsnLease>,
463 :
464 : /// Whether our branch point is within our ancestor's PITR interval (for cost estimation)
465 : pub(crate) within_ancestor_pitr: bool,
466 : }
467 :
468 : impl GcInfo {
469 282 : pub(crate) fn min_cutoff(&self) -> Lsn {
470 282 : self.cutoffs.select_min()
471 282 : }
472 :
473 232 : pub(super) fn insert_child(
474 232 : &mut self,
475 232 : child_id: TimelineId,
476 232 : child_lsn: Lsn,
477 232 : is_offloaded: MaybeOffloaded,
478 232 : ) {
479 232 : self.retain_lsns.push((child_lsn, child_id, is_offloaded));
480 232 : self.retain_lsns.sort_by_key(|i| i.0);
481 232 : }
482 :
483 4 : pub(super) fn remove_child_maybe_offloaded(
484 4 : &mut self,
485 4 : child_id: TimelineId,
486 4 : maybe_offloaded: MaybeOffloaded,
487 4 : ) -> bool {
488 4 : // Remove at most one element. Needed for correctness if there is two live `Timeline` objects referencing
489 4 : // the same timeline. Shouldn't but maybe can occur when Arc's live longer than intended.
490 4 : let mut removed = false;
491 6 : self.retain_lsns.retain(|i| {
492 6 : if removed {
493 2 : return true;
494 4 : }
495 4 : let remove = i.1 == child_id && i.2 == maybe_offloaded;
496 4 : removed |= remove;
497 4 : !remove
498 6 : });
499 4 : removed
500 4 : }
501 :
502 4 : pub(super) fn remove_child_not_offloaded(&mut self, child_id: TimelineId) -> bool {
503 4 : self.remove_child_maybe_offloaded(child_id, MaybeOffloaded::No)
504 4 : }
505 :
506 0 : pub(super) fn remove_child_offloaded(&mut self, child_id: TimelineId) -> bool {
507 0 : self.remove_child_maybe_offloaded(child_id, MaybeOffloaded::Yes)
508 0 : }
509 : }
510 :
511 : /// The `GcInfo` component describing which Lsns need to be retained. Functionally, this
512 : /// is a single number (the oldest LSN which we must retain), but it internally distinguishes
513 : /// between time-based and space-based retention for observability and consumption metrics purposes.
514 : #[derive(Debug, Clone)]
515 : pub(crate) struct GcCutoffs {
516 : /// Calculated from the [`TenantConf::gc_horizon`], this LSN indicates how much
517 : /// history we must keep to retain a specified number of bytes of WAL.
518 : pub(crate) space: Lsn,
519 :
520 : /// Calculated from [`TenantConf::pitr_interval`], this LSN indicates how much
521 : /// history we must keep to enable reading back at least the PITR interval duration.
522 : pub(crate) time: Lsn,
523 : }
524 :
525 : impl Default for GcCutoffs {
526 422 : fn default() -> Self {
527 422 : Self {
528 422 : space: Lsn::INVALID,
529 422 : time: Lsn::INVALID,
530 422 : }
531 422 : }
532 : }
533 :
534 : impl GcCutoffs {
535 282 : fn select_min(&self) -> Lsn {
536 282 : std::cmp::min(self.space, self.time)
537 282 : }
538 : }
539 :
540 : pub(crate) struct TimelineVisitOutcome {
541 : completed_keyspace: KeySpace,
542 : image_covered_keyspace: KeySpace,
543 : }
544 :
545 : /// An error happened in a get() operation.
546 : #[derive(thiserror::Error, Debug)]
547 : pub(crate) enum PageReconstructError {
548 : #[error(transparent)]
549 : Other(anyhow::Error),
550 :
551 : #[error("Ancestor LSN wait error: {0}")]
552 : AncestorLsnTimeout(WaitLsnError),
553 :
554 : #[error("timeline shutting down")]
555 : Cancelled,
556 :
557 : /// An error happened replaying WAL records
558 : #[error(transparent)]
559 : WalRedo(anyhow::Error),
560 :
561 : #[error("{0}")]
562 : MissingKey(MissingKeyError),
563 : }
564 :
565 : impl From<anyhow::Error> for PageReconstructError {
566 0 : fn from(value: anyhow::Error) -> Self {
567 0 : // with walingest.rs many PageReconstructError are wrapped in as anyhow::Error
568 0 : match value.downcast::<PageReconstructError>() {
569 0 : Ok(pre) => pre,
570 0 : Err(other) => PageReconstructError::Other(other),
571 : }
572 0 : }
573 : }
574 :
575 : impl From<utils::bin_ser::DeserializeError> for PageReconstructError {
576 0 : fn from(value: utils::bin_ser::DeserializeError) -> Self {
577 0 : PageReconstructError::Other(anyhow::Error::new(value).context("deserialization failure"))
578 0 : }
579 : }
580 :
581 : impl From<layer_manager::Shutdown> for PageReconstructError {
582 0 : fn from(_: layer_manager::Shutdown) -> Self {
583 0 : PageReconstructError::Cancelled
584 0 : }
585 : }
586 :
587 : impl GetVectoredError {
588 : #[cfg(test)]
589 6 : pub(crate) fn is_missing_key_error(&self) -> bool {
590 6 : matches!(self, Self::MissingKey(_))
591 6 : }
592 : }
593 :
594 : impl From<layer_manager::Shutdown> for GetVectoredError {
595 0 : fn from(_: layer_manager::Shutdown) -> Self {
596 0 : GetVectoredError::Cancelled
597 0 : }
598 : }
599 :
600 : #[derive(thiserror::Error)]
601 : pub struct MissingKeyError {
602 : key: Key,
603 : shard: ShardNumber,
604 : cont_lsn: Lsn,
605 : request_lsn: Lsn,
606 : ancestor_lsn: Option<Lsn>,
607 : backtrace: Option<std::backtrace::Backtrace>,
608 : }
609 :
610 : impl std::fmt::Debug for MissingKeyError {
611 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
612 0 : write!(f, "{}", self)
613 0 : }
614 : }
615 :
616 : impl std::fmt::Display for MissingKeyError {
617 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
618 0 : write!(
619 0 : f,
620 0 : "could not find data for key {} (shard {:?}) at LSN {}, request LSN {}",
621 0 : self.key, self.shard, self.cont_lsn, self.request_lsn
622 0 : )?;
623 0 : if let Some(ref ancestor_lsn) = self.ancestor_lsn {
624 0 : write!(f, ", ancestor {}", ancestor_lsn)?;
625 0 : }
626 :
627 0 : if let Some(ref backtrace) = self.backtrace {
628 0 : write!(f, "\n{}", backtrace)?;
629 0 : }
630 :
631 0 : Ok(())
632 0 : }
633 : }
634 :
635 : impl PageReconstructError {
636 : /// Returns true if this error indicates a tenant/timeline shutdown alike situation
637 0 : pub(crate) fn is_stopping(&self) -> bool {
638 : use PageReconstructError::*;
639 0 : match self {
640 0 : Cancelled => true,
641 0 : Other(_) | AncestorLsnTimeout(_) | WalRedo(_) | MissingKey(_) => false,
642 : }
643 0 : }
644 : }
645 :
646 : #[derive(thiserror::Error, Debug)]
647 : pub(crate) enum CreateImageLayersError {
648 : #[error("timeline shutting down")]
649 : Cancelled,
650 :
651 : #[error("read failed")]
652 : GetVectoredError(#[source] GetVectoredError),
653 :
654 : #[error("reconstruction failed")]
655 : PageReconstructError(#[source] PageReconstructError),
656 :
657 : #[error(transparent)]
658 : Other(#[from] anyhow::Error),
659 : }
660 :
661 : impl From<layer_manager::Shutdown> for CreateImageLayersError {
662 0 : fn from(_: layer_manager::Shutdown) -> Self {
663 0 : CreateImageLayersError::Cancelled
664 0 : }
665 : }
666 :
667 : #[derive(thiserror::Error, Debug, Clone)]
668 : pub(crate) enum FlushLayerError {
669 : /// Timeline cancellation token was cancelled
670 : #[error("timeline shutting down")]
671 : Cancelled,
672 :
673 : /// We tried to flush a layer while the Timeline is in an unexpected state
674 : #[error("cannot flush frozen layers when flush_loop is not running, state is {0:?}")]
675 : NotRunning(FlushLoopState),
676 :
677 : // Arc<> the following non-clonable error types: we must be Clone-able because the flush error is propagated from the flush
678 : // loop via a watch channel, where we can only borrow it.
679 : #[error("create image layers (shared)")]
680 : CreateImageLayersError(Arc<CreateImageLayersError>),
681 :
682 : #[error("other (shared)")]
683 : Other(#[from] Arc<anyhow::Error>),
684 : }
685 :
686 : impl FlushLayerError {
687 : // When crossing from generic anyhow errors to this error type, we explicitly check
688 : // for timeline cancellation to avoid logging inoffensive shutdown errors as warn/err.
689 0 : fn from_anyhow(timeline: &Timeline, err: anyhow::Error) -> Self {
690 0 : let cancelled = timeline.cancel.is_cancelled()
691 : // The upload queue might have been shut down before the official cancellation of the timeline.
692 0 : || err
693 0 : .downcast_ref::<NotInitialized>()
694 0 : .map(NotInitialized::is_stopping)
695 0 : .unwrap_or_default();
696 0 : if cancelled {
697 0 : Self::Cancelled
698 : } else {
699 0 : Self::Other(Arc::new(err))
700 : }
701 0 : }
702 : }
703 :
704 : impl From<layer_manager::Shutdown> for FlushLayerError {
705 0 : fn from(_: layer_manager::Shutdown) -> Self {
706 0 : FlushLayerError::Cancelled
707 0 : }
708 : }
709 :
710 : #[derive(thiserror::Error, Debug)]
711 : pub(crate) enum GetVectoredError {
712 : #[error("timeline shutting down")]
713 : Cancelled,
714 :
715 : #[error("requested too many keys: {0} > {}", Timeline::MAX_GET_VECTORED_KEYS)]
716 : Oversized(u64),
717 :
718 : #[error("requested at invalid LSN: {0}")]
719 : InvalidLsn(Lsn),
720 :
721 : #[error("requested key not found: {0}")]
722 : MissingKey(MissingKeyError),
723 :
724 : #[error("ancestry walk")]
725 : GetReadyAncestorError(#[source] GetReadyAncestorError),
726 :
727 : #[error(transparent)]
728 : Other(#[from] anyhow::Error),
729 : }
730 :
731 : impl From<GetReadyAncestorError> for GetVectoredError {
732 2 : fn from(value: GetReadyAncestorError) -> Self {
733 : use GetReadyAncestorError::*;
734 2 : match value {
735 0 : Cancelled => GetVectoredError::Cancelled,
736 : AncestorLsnTimeout(_) | BadState { .. } => {
737 2 : GetVectoredError::GetReadyAncestorError(value)
738 : }
739 : }
740 2 : }
741 : }
742 :
743 : #[derive(thiserror::Error, Debug)]
744 : pub(crate) enum GetReadyAncestorError {
745 : #[error("ancestor LSN wait error")]
746 : AncestorLsnTimeout(#[from] WaitLsnError),
747 :
748 : #[error("bad state on timeline {timeline_id}: {state:?}")]
749 : BadState {
750 : timeline_id: TimelineId,
751 : state: TimelineState,
752 : },
753 :
754 : #[error("cancelled")]
755 : Cancelled,
756 : }
757 :
758 : #[derive(Clone, Copy)]
759 : pub enum LogicalSizeCalculationCause {
760 : Initial,
761 : ConsumptionMetricsSyntheticSize,
762 : EvictionTaskImitation,
763 : TenantSizeHandler,
764 : }
765 :
766 : pub enum GetLogicalSizePriority {
767 : User,
768 : Background,
769 : }
770 :
771 0 : #[derive(Debug, enumset::EnumSetType)]
772 : pub(crate) enum CompactFlags {
773 : ForceRepartition,
774 : ForceImageLayerCreation,
775 : ForceL0Compaction,
776 : EnhancedGcBottomMostCompaction,
777 : DryRun,
778 : }
779 :
780 : #[serde_with::serde_as]
781 0 : #[derive(Debug, Clone, serde::Deserialize)]
782 : pub(crate) struct CompactRequest {
783 : pub compact_key_range: Option<CompactKeyRange>,
784 : pub compact_lsn_range: Option<CompactLsnRange>,
785 : /// Whether the compaction job should be scheduled.
786 : #[serde(default)]
787 : pub scheduled: bool,
788 : /// Whether the compaction job should be split across key ranges.
789 : #[serde(default)]
790 : pub sub_compaction: bool,
791 : /// Max job size for each subcompaction job.
792 : pub sub_compaction_max_job_size_mb: Option<u64>,
793 : }
794 :
795 : #[derive(Debug, Clone, Default)]
796 : pub(crate) struct CompactOptions {
797 : pub flags: EnumSet<CompactFlags>,
798 : /// If set, the compaction will only compact the key range specified by this option.
799 : /// This option is only used by GC compaction. For the full explanation, see [`compaction::GcCompactJob`].
800 : pub compact_key_range: Option<CompactKeyRange>,
801 : /// If set, the compaction will only compact the LSN within this value.
802 : /// This option is only used by GC compaction. For the full explanation, see [`compaction::GcCompactJob`].
803 : pub compact_lsn_range: Option<CompactLsnRange>,
804 : /// Enable sub-compaction (split compaction job across key ranges).
805 : /// This option is only used by GC compaction.
806 : pub sub_compaction: bool,
807 : /// Set job size for the GC compaction.
808 : /// This option is only used by GC compaction.
809 : pub sub_compaction_max_job_size_mb: Option<u64>,
810 : }
811 :
812 : impl std::fmt::Debug for Timeline {
813 0 : fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
814 0 : write!(f, "Timeline<{}>", self.timeline_id)
815 0 : }
816 : }
817 :
818 : #[derive(thiserror::Error, Debug)]
819 : pub(crate) enum WaitLsnError {
820 : // Called on a timeline which is shutting down
821 : #[error("Shutdown")]
822 : Shutdown,
823 :
824 : // Called on an timeline not in active state or shutting down
825 : #[error("Bad timeline state: {0:?}")]
826 : BadState(TimelineState),
827 :
828 : // Timeout expired while waiting for LSN to catch up with goal.
829 : #[error("{0}")]
830 : Timeout(String),
831 : }
832 :
833 : // The impls below achieve cancellation mapping for errors.
834 : // Perhaps there's a way of achieving this with less cruft.
835 :
836 : impl From<CreateImageLayersError> for CompactionError {
837 0 : fn from(e: CreateImageLayersError) -> Self {
838 0 : match e {
839 0 : CreateImageLayersError::Cancelled => CompactionError::ShuttingDown,
840 0 : CreateImageLayersError::Other(e) => {
841 0 : CompactionError::Other(e.context("create image layers"))
842 : }
843 0 : _ => CompactionError::Other(e.into()),
844 : }
845 0 : }
846 : }
847 :
848 : impl From<CreateImageLayersError> for FlushLayerError {
849 0 : fn from(e: CreateImageLayersError) -> Self {
850 0 : match e {
851 0 : CreateImageLayersError::Cancelled => FlushLayerError::Cancelled,
852 0 : any => FlushLayerError::CreateImageLayersError(Arc::new(any)),
853 : }
854 0 : }
855 : }
856 :
857 : impl From<PageReconstructError> for CreateImageLayersError {
858 0 : fn from(e: PageReconstructError) -> Self {
859 0 : match e {
860 0 : PageReconstructError::Cancelled => CreateImageLayersError::Cancelled,
861 0 : _ => CreateImageLayersError::PageReconstructError(e),
862 : }
863 0 : }
864 : }
865 :
866 : impl From<GetVectoredError> for CreateImageLayersError {
867 0 : fn from(e: GetVectoredError) -> Self {
868 0 : match e {
869 0 : GetVectoredError::Cancelled => CreateImageLayersError::Cancelled,
870 0 : _ => CreateImageLayersError::GetVectoredError(e),
871 : }
872 0 : }
873 : }
874 :
875 : impl From<GetVectoredError> for PageReconstructError {
876 6 : fn from(e: GetVectoredError) -> Self {
877 6 : match e {
878 0 : GetVectoredError::Cancelled => PageReconstructError::Cancelled,
879 0 : GetVectoredError::InvalidLsn(_) => PageReconstructError::Other(anyhow!("Invalid LSN")),
880 0 : err @ GetVectoredError::Oversized(_) => PageReconstructError::Other(err.into()),
881 4 : GetVectoredError::MissingKey(err) => PageReconstructError::MissingKey(err),
882 2 : GetVectoredError::GetReadyAncestorError(err) => PageReconstructError::from(err),
883 0 : GetVectoredError::Other(err) => PageReconstructError::Other(err),
884 : }
885 6 : }
886 : }
887 :
888 : impl From<GetReadyAncestorError> for PageReconstructError {
889 2 : fn from(e: GetReadyAncestorError) -> Self {
890 : use GetReadyAncestorError::*;
891 2 : match e {
892 0 : AncestorLsnTimeout(wait_err) => PageReconstructError::AncestorLsnTimeout(wait_err),
893 2 : bad_state @ BadState { .. } => PageReconstructError::Other(anyhow::anyhow!(bad_state)),
894 0 : Cancelled => PageReconstructError::Cancelled,
895 : }
896 2 : }
897 : }
898 :
899 : pub(crate) enum WaitLsnWaiter<'a> {
900 : Timeline(&'a Timeline),
901 : Tenant,
902 : PageService,
903 : }
904 :
905 : /// Argument to [`Timeline::shutdown`].
906 : #[derive(Debug, Clone, Copy)]
907 : pub(crate) enum ShutdownMode {
908 : /// Graceful shutdown, may do a lot of I/O as we flush any open layers to disk and then
909 : /// also to remote storage. This method can easily take multiple seconds for a busy timeline.
910 : ///
911 : /// While we are flushing, we continue to accept read I/O for LSNs ingested before
912 : /// the call to [`Timeline::shutdown`].
913 : FreezeAndFlush,
914 : /// Only flush the layers to the remote storage without freezing any open layers. Flush the deletion
915 : /// queue. This is the mode used by ancestor detach and any other operations that reloads a tenant
916 : /// but not increasing the generation number. Note that this mode cannot be used at tenant shutdown,
917 : /// as flushing the deletion queue at that time will cause shutdown-in-progress errors.
918 : Reload,
919 : /// Shut down immediately, without waiting for any open layers to flush.
920 : Hard,
921 : }
922 :
923 : struct ImageLayerCreationOutcome {
924 : image: Option<ResidentLayer>,
925 : next_start_key: Key,
926 : }
927 :
928 : /// Public interface functions
929 : impl Timeline {
930 : /// Get the LSN where this branch was created
931 4 : pub(crate) fn get_ancestor_lsn(&self) -> Lsn {
932 4 : self.ancestor_lsn
933 4 : }
934 :
935 : /// Get the ancestor's timeline id
936 12 : pub(crate) fn get_ancestor_timeline_id(&self) -> Option<TimelineId> {
937 12 : self.ancestor_timeline
938 12 : .as_ref()
939 12 : .map(|ancestor| ancestor.timeline_id)
940 12 : }
941 :
942 : /// Get the ancestor timeline
943 2 : pub(crate) fn ancestor_timeline(&self) -> Option<&Arc<Timeline>> {
944 2 : self.ancestor_timeline.as_ref()
945 2 : }
946 :
947 : /// Get the bytes written since the PITR cutoff on this branch, and
948 : /// whether this branch's ancestor_lsn is within its parent's PITR.
949 0 : pub(crate) fn get_pitr_history_stats(&self) -> (u64, bool) {
950 0 : let gc_info = self.gc_info.read().unwrap();
951 0 : let history = self
952 0 : .get_last_record_lsn()
953 0 : .checked_sub(gc_info.cutoffs.time)
954 0 : .unwrap_or(Lsn(0))
955 0 : .0;
956 0 : (history, gc_info.within_ancestor_pitr)
957 0 : }
958 :
959 : /// Lock and get timeline's GC cutoff
960 302 : pub(crate) fn get_latest_gc_cutoff_lsn(&self) -> RcuReadGuard<Lsn> {
961 302 : self.latest_gc_cutoff_lsn.read()
962 302 : }
963 :
964 : /// Look up given page version.
965 : ///
966 : /// If a remote layer file is needed, it is downloaded as part of this
967 : /// call.
968 : ///
969 : /// This method enforces [`Self::pagestream_throttle`] internally.
970 : ///
971 : /// NOTE: It is considered an error to 'get' a key that doesn't exist. The
972 : /// abstraction above this needs to store suitable metadata to track what
973 : /// data exists with what keys, in separate metadata entries. If a
974 : /// non-existent key is requested, we may incorrectly return a value from
975 : /// an ancestor branch, for example, or waste a lot of cycles chasing the
976 : /// non-existing key.
977 : ///
978 : /// # Cancel-Safety
979 : ///
980 : /// This method is cancellation-safe.
981 : #[inline(always)]
982 607565 : pub(crate) async fn get(
983 607565 : &self,
984 607565 : key: Key,
985 607565 : lsn: Lsn,
986 607565 : ctx: &RequestContext,
987 607565 : ) -> Result<Bytes, PageReconstructError> {
988 607565 : if !lsn.is_valid() {
989 0 : return Err(PageReconstructError::Other(anyhow::anyhow!("Invalid LSN")));
990 607565 : }
991 607565 :
992 607565 : // This check is debug-only because of the cost of hashing, and because it's a double-check: we
993 607565 : // already checked the key against the shard_identity when looking up the Timeline from
994 607565 : // page_service.
995 607565 : debug_assert!(!self.shard_identity.is_key_disposable(&key));
996 :
997 607565 : let keyspace = KeySpace {
998 607565 : ranges: vec![key..key.next()],
999 607565 : };
1000 607565 :
1001 607565 : // Initialise the reconstruct state for the key with the cache
1002 607565 : // entry returned above.
1003 607565 : let mut reconstruct_state = ValuesReconstructState::new();
1004 :
1005 607565 : let vectored_res = self
1006 607565 : .get_vectored_impl(keyspace.clone(), lsn, &mut reconstruct_state, ctx)
1007 607565 : .await;
1008 :
1009 607565 : let key_value = vectored_res?.pop_first();
1010 607559 : match key_value {
1011 607547 : Some((got_key, value)) => {
1012 607547 : if got_key != key {
1013 0 : error!(
1014 0 : "Expected {}, but singular vectored get returned {}",
1015 : key, got_key
1016 : );
1017 0 : Err(PageReconstructError::Other(anyhow!(
1018 0 : "Singular vectored get returned wrong key"
1019 0 : )))
1020 : } else {
1021 607547 : value
1022 : }
1023 : }
1024 12 : None => Err(PageReconstructError::MissingKey(MissingKeyError {
1025 12 : key,
1026 12 : shard: self.shard_identity.get_shard_number(&key),
1027 12 : cont_lsn: Lsn(0),
1028 12 : request_lsn: lsn,
1029 12 : ancestor_lsn: None,
1030 12 : backtrace: None,
1031 12 : })),
1032 : }
1033 607565 : }
1034 :
1035 : pub(crate) const MAX_GET_VECTORED_KEYS: u64 = 32;
1036 : pub(crate) const VEC_GET_LAYERS_VISITED_WARN_THRESH: f64 = 512.0;
1037 :
1038 : /// Look up multiple page versions at a given LSN
1039 : ///
1040 : /// This naive implementation will be replaced with a more efficient one
1041 : /// which actually vectorizes the read path.
1042 19534 : pub(crate) async fn get_vectored(
1043 19534 : &self,
1044 19534 : keyspace: KeySpace,
1045 19534 : lsn: Lsn,
1046 19534 : ctx: &RequestContext,
1047 19534 : ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
1048 19534 : if !lsn.is_valid() {
1049 0 : return Err(GetVectoredError::InvalidLsn(lsn));
1050 19534 : }
1051 19534 :
1052 19534 : let key_count = keyspace.total_raw_size().try_into().unwrap();
1053 19534 : if key_count > Timeline::MAX_GET_VECTORED_KEYS {
1054 0 : return Err(GetVectoredError::Oversized(key_count));
1055 19534 : }
1056 :
1057 39068 : for range in &keyspace.ranges {
1058 19534 : let mut key = range.start;
1059 39278 : while key != range.end {
1060 19744 : assert!(!self.shard_identity.is_key_disposable(&key));
1061 19744 : key = key.next();
1062 : }
1063 : }
1064 :
1065 19534 : trace!(
1066 0 : "get vectored request for {:?}@{} from task kind {:?}",
1067 0 : keyspace,
1068 0 : lsn,
1069 0 : ctx.task_kind(),
1070 : );
1071 :
1072 19534 : let start = crate::metrics::GET_VECTORED_LATENCY
1073 19534 : .for_task_kind(ctx.task_kind())
1074 19534 : .map(|metric| (metric, Instant::now()));
1075 :
1076 19534 : let res = self
1077 19534 : .get_vectored_impl(
1078 19534 : keyspace.clone(),
1079 19534 : lsn,
1080 19534 : &mut ValuesReconstructState::new(),
1081 19534 : ctx,
1082 19534 : )
1083 19534 : .await;
1084 :
1085 19534 : if let Some((metric, start)) = start {
1086 0 : let elapsed = start.elapsed();
1087 0 : metric.observe(elapsed.as_secs_f64());
1088 19534 : }
1089 :
1090 19534 : res
1091 19534 : }
1092 :
1093 : /// Scan the keyspace and return all existing key-values in the keyspace. This currently uses vectored
1094 : /// get underlying. Normal vectored get would throw an error when a key in the keyspace is not found
1095 : /// during the search, but for the scan interface, it returns all existing key-value pairs, and does
1096 : /// not expect each single key in the key space will be found. The semantics is closer to the RocksDB
1097 : /// scan iterator interface. We could optimize this interface later to avoid some checks in the vectored
1098 : /// get path to maintain and split the probing and to-be-probe keyspace. We also need to ensure that
1099 : /// the scan operation will not cause OOM in the future.
1100 12 : pub(crate) async fn scan(
1101 12 : &self,
1102 12 : keyspace: KeySpace,
1103 12 : lsn: Lsn,
1104 12 : ctx: &RequestContext,
1105 12 : ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
1106 12 : if !lsn.is_valid() {
1107 0 : return Err(GetVectoredError::InvalidLsn(lsn));
1108 12 : }
1109 12 :
1110 12 : trace!(
1111 0 : "key-value scan request for {:?}@{} from task kind {:?}",
1112 0 : keyspace,
1113 0 : lsn,
1114 0 : ctx.task_kind()
1115 : );
1116 :
1117 : // We should generalize this into Keyspace::contains in the future.
1118 24 : for range in &keyspace.ranges {
1119 12 : if range.start.field1 < METADATA_KEY_BEGIN_PREFIX
1120 12 : || range.end.field1 > METADATA_KEY_END_PREFIX
1121 : {
1122 0 : return Err(GetVectoredError::Other(anyhow::anyhow!(
1123 0 : "only metadata keyspace can be scanned"
1124 0 : )));
1125 12 : }
1126 : }
1127 :
1128 12 : let start = crate::metrics::SCAN_LATENCY
1129 12 : .for_task_kind(ctx.task_kind())
1130 12 : .map(ScanLatencyOngoingRecording::start_recording);
1131 :
1132 12 : let vectored_res = self
1133 12 : .get_vectored_impl(
1134 12 : keyspace.clone(),
1135 12 : lsn,
1136 12 : &mut ValuesReconstructState::default(),
1137 12 : ctx,
1138 12 : )
1139 12 : .await;
1140 :
1141 12 : if let Some(recording) = start {
1142 0 : recording.observe();
1143 12 : }
1144 :
1145 12 : vectored_res
1146 12 : }
1147 :
1148 627417 : pub(super) async fn get_vectored_impl(
1149 627417 : &self,
1150 627417 : keyspace: KeySpace,
1151 627417 : lsn: Lsn,
1152 627417 : reconstruct_state: &mut ValuesReconstructState,
1153 627417 : ctx: &RequestContext,
1154 627417 : ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
1155 627417 : let get_kind = if keyspace.total_raw_size() == 1 {
1156 626973 : GetKind::Singular
1157 : } else {
1158 444 : GetKind::Vectored
1159 : };
1160 :
1161 627417 : let get_data_timer = crate::metrics::GET_RECONSTRUCT_DATA_TIME
1162 627417 : .for_get_kind(get_kind)
1163 627417 : .start_timer();
1164 627417 : self.get_vectored_reconstruct_data(keyspace.clone(), lsn, reconstruct_state, ctx)
1165 627417 : .await?;
1166 627401 : get_data_timer.stop_and_record();
1167 627401 :
1168 627401 : let reconstruct_timer = crate::metrics::RECONSTRUCT_TIME
1169 627401 : .for_get_kind(get_kind)
1170 627401 : .start_timer();
1171 627401 : let mut results: BTreeMap<Key, Result<Bytes, PageReconstructError>> = BTreeMap::new();
1172 627401 : let layers_visited = reconstruct_state.get_layers_visited();
1173 :
1174 667727 : for (key, res) in std::mem::take(&mut reconstruct_state.keys) {
1175 667727 : match res {
1176 0 : Err(err) => {
1177 0 : results.insert(key, Err(err));
1178 0 : }
1179 667727 : Ok(state) => {
1180 667727 : let state = ValueReconstructState::from(state);
1181 :
1182 667727 : let reconstruct_res = self.reconstruct_value(key, lsn, state).await;
1183 667727 : results.insert(key, reconstruct_res);
1184 : }
1185 : }
1186 : }
1187 627401 : reconstruct_timer.stop_and_record();
1188 627401 :
1189 627401 : // For aux file keys (v1 or v2) the vectored read path does not return an error
1190 627401 : // when they're missing. Instead they are omitted from the resulting btree
1191 627401 : // (this is a requirement, not a bug). Skip updating the metric in these cases
1192 627401 : // to avoid infinite results.
1193 627401 : if !results.is_empty() {
1194 627191 : let avg = layers_visited as f64 / results.len() as f64;
1195 627191 : if avg >= Self::VEC_GET_LAYERS_VISITED_WARN_THRESH {
1196 0 : use utils::rate_limit::RateLimit;
1197 0 : static LOGGED: Lazy<Mutex<RateLimit>> =
1198 0 : Lazy::new(|| Mutex::new(RateLimit::new(Duration::from_secs(60))));
1199 0 : let mut rate_limit = LOGGED.lock().unwrap();
1200 0 : rate_limit.call(|| {
1201 0 : tracing::info!(
1202 0 : shard_id = %self.tenant_shard_id.shard_slug(),
1203 0 : lsn = %lsn,
1204 0 : "Vectored read for {} visited {} layers on average per key and {} in total. {}/{} pages were returned",
1205 0 : keyspace, avg, layers_visited, results.len(), keyspace.total_raw_size());
1206 0 : });
1207 627191 : }
1208 :
1209 : // Note that this is an approximation. Tracking the exact number of layers visited
1210 : // per key requires virtually unbounded memory usage and is inefficient
1211 : // (i.e. segment tree tracking each range queried from a layer)
1212 627191 : crate::metrics::VEC_READ_NUM_LAYERS_VISITED.observe(avg);
1213 210 : }
1214 :
1215 627401 : Ok(results)
1216 627417 : }
1217 :
1218 : /// Get last or prev record separately. Same as get_last_record_rlsn().last/prev.
1219 274326 : pub(crate) fn get_last_record_lsn(&self) -> Lsn {
1220 274326 : self.last_record_lsn.load().last
1221 274326 : }
1222 :
1223 0 : pub(crate) fn get_prev_record_lsn(&self) -> Lsn {
1224 0 : self.last_record_lsn.load().prev
1225 0 : }
1226 :
1227 : /// Atomically get both last and prev.
1228 228 : pub(crate) fn get_last_record_rlsn(&self) -> RecordLsn {
1229 228 : self.last_record_lsn.load()
1230 228 : }
1231 :
1232 : /// Subscribe to callers of wait_lsn(). The value of the channel is None if there are no
1233 : /// wait_lsn() calls in progress, and Some(Lsn) if there is an active waiter for wait_lsn().
1234 0 : pub(crate) fn subscribe_for_wait_lsn_updates(&self) -> watch::Receiver<Option<Lsn>> {
1235 0 : self.last_record_lsn.status_receiver()
1236 0 : }
1237 :
1238 418 : pub(crate) fn get_disk_consistent_lsn(&self) -> Lsn {
1239 418 : self.disk_consistent_lsn.load()
1240 418 : }
1241 :
1242 : /// remote_consistent_lsn from the perspective of the tenant's current generation,
1243 : /// not validated with control plane yet.
1244 : /// See [`Self::get_remote_consistent_lsn_visible`].
1245 0 : pub(crate) fn get_remote_consistent_lsn_projected(&self) -> Option<Lsn> {
1246 0 : self.remote_client.remote_consistent_lsn_projected()
1247 0 : }
1248 :
1249 : /// remote_consistent_lsn which the tenant is guaranteed not to go backward from,
1250 : /// i.e. a value of remote_consistent_lsn_projected which has undergone
1251 : /// generation validation in the deletion queue.
1252 0 : pub(crate) fn get_remote_consistent_lsn_visible(&self) -> Option<Lsn> {
1253 0 : self.remote_client.remote_consistent_lsn_visible()
1254 0 : }
1255 :
1256 : /// The sum of the file size of all historic layers in the layer map.
1257 : /// This method makes no distinction between local and remote layers.
1258 : /// Hence, the result **does not represent local filesystem usage**.
1259 0 : pub(crate) async fn layer_size_sum(&self) -> u64 {
1260 0 : let guard = self.layers.read().await;
1261 0 : guard.layer_size_sum()
1262 0 : }
1263 :
1264 0 : pub(crate) fn resident_physical_size(&self) -> u64 {
1265 0 : self.metrics.resident_physical_size_get()
1266 0 : }
1267 :
1268 0 : pub(crate) fn get_directory_metrics(&self) -> [u64; DirectoryKind::KINDS_NUM] {
1269 0 : array::from_fn(|idx| self.directory_metrics[idx].load(AtomicOrdering::Relaxed))
1270 0 : }
1271 :
1272 : ///
1273 : /// Wait until WAL has been received and processed up to this LSN.
1274 : ///
1275 : /// You should call this before any of the other get_* or list_* functions. Calling
1276 : /// those functions with an LSN that has been processed yet is an error.
1277 : ///
1278 228428 : pub(crate) async fn wait_lsn(
1279 228428 : &self,
1280 228428 : lsn: Lsn,
1281 228428 : who_is_waiting: WaitLsnWaiter<'_>,
1282 228428 : ctx: &RequestContext, /* Prepare for use by cancellation */
1283 228428 : ) -> Result<(), WaitLsnError> {
1284 228428 : let state = self.current_state();
1285 228428 : if self.cancel.is_cancelled() || matches!(state, TimelineState::Stopping) {
1286 0 : return Err(WaitLsnError::Shutdown);
1287 228428 : } else if !matches!(state, TimelineState::Active) {
1288 0 : return Err(WaitLsnError::BadState(state));
1289 228428 : }
1290 228428 :
1291 228428 : if cfg!(debug_assertions) {
1292 228428 : match ctx.task_kind() {
1293 : TaskKind::WalReceiverManager
1294 : | TaskKind::WalReceiverConnectionHandler
1295 : | TaskKind::WalReceiverConnectionPoller => {
1296 0 : let is_myself = match who_is_waiting {
1297 0 : WaitLsnWaiter::Timeline(waiter) => Weak::ptr_eq(&waiter.myself, &self.myself),
1298 0 : WaitLsnWaiter::Tenant | WaitLsnWaiter::PageService => unreachable!("tenant or page_service context are not expected to have task kind {:?}", ctx.task_kind()),
1299 : };
1300 0 : if is_myself {
1301 0 : if let Err(current) = self.last_record_lsn.would_wait_for(lsn) {
1302 : // walingest is the only one that can advance last_record_lsn; it should make sure to never reach here
1303 0 : panic!("this timeline's walingest task is calling wait_lsn({lsn}) but we only have last_record_lsn={current}; would deadlock");
1304 0 : }
1305 0 : } else {
1306 0 : // if another timeline's is waiting for us, there's no deadlock risk because
1307 0 : // our walreceiver task can make progress independent of theirs
1308 0 : }
1309 : }
1310 228428 : _ => {}
1311 : }
1312 0 : }
1313 :
1314 228428 : let _timer = crate::metrics::WAIT_LSN_TIME.start_timer();
1315 228428 :
1316 228428 : match self
1317 228428 : .last_record_lsn
1318 228428 : .wait_for_timeout(lsn, self.conf.wait_lsn_timeout)
1319 228428 : .await
1320 : {
1321 228428 : Ok(()) => Ok(()),
1322 0 : Err(e) => {
1323 : use utils::seqwait::SeqWaitError::*;
1324 0 : match e {
1325 0 : Shutdown => Err(WaitLsnError::Shutdown),
1326 : Timeout => {
1327 : // don't count the time spent waiting for lock below, and also in walreceiver.status(), towards the wait_lsn_time_histo
1328 0 : drop(_timer);
1329 0 : let walreceiver_status = self.walreceiver_status();
1330 0 : Err(WaitLsnError::Timeout(format!(
1331 0 : "Timed out while waiting for WAL record at LSN {} to arrive, last_record_lsn {} disk consistent LSN={}, WalReceiver status: {}",
1332 0 : lsn,
1333 0 : self.get_last_record_lsn(),
1334 0 : self.get_disk_consistent_lsn(),
1335 0 : walreceiver_status,
1336 0 : )))
1337 : }
1338 : }
1339 : }
1340 : }
1341 228428 : }
1342 :
1343 0 : pub(crate) fn walreceiver_status(&self) -> String {
1344 0 : match &*self.walreceiver.lock().unwrap() {
1345 0 : None => "stopping or stopped".to_string(),
1346 0 : Some(walreceiver) => match walreceiver.status() {
1347 0 : Some(status) => status.to_human_readable_string(),
1348 0 : None => "Not active".to_string(),
1349 : },
1350 : }
1351 0 : }
1352 :
1353 : /// Check that it is valid to request operations with that lsn.
1354 232 : pub(crate) fn check_lsn_is_in_scope(
1355 232 : &self,
1356 232 : lsn: Lsn,
1357 232 : latest_gc_cutoff_lsn: &RcuReadGuard<Lsn>,
1358 232 : ) -> anyhow::Result<()> {
1359 232 : ensure!(
1360 232 : lsn >= **latest_gc_cutoff_lsn,
1361 4 : "LSN {} is earlier than latest GC cutoff {} (we might've already garbage collected needed data)",
1362 4 : lsn,
1363 4 : **latest_gc_cutoff_lsn,
1364 : );
1365 228 : Ok(())
1366 232 : }
1367 :
1368 : /// Initializes an LSN lease. The function will return an error if the requested LSN is less than the `latest_gc_cutoff_lsn`.
1369 10 : pub(crate) fn init_lsn_lease(
1370 10 : &self,
1371 10 : lsn: Lsn,
1372 10 : length: Duration,
1373 10 : ctx: &RequestContext,
1374 10 : ) -> anyhow::Result<LsnLease> {
1375 10 : self.make_lsn_lease(lsn, length, true, ctx)
1376 10 : }
1377 :
1378 : /// Renews a lease at a particular LSN. The requested LSN is not validated against the `latest_gc_cutoff_lsn` when we are in the grace period.
1379 4 : pub(crate) fn renew_lsn_lease(
1380 4 : &self,
1381 4 : lsn: Lsn,
1382 4 : length: Duration,
1383 4 : ctx: &RequestContext,
1384 4 : ) -> anyhow::Result<LsnLease> {
1385 4 : self.make_lsn_lease(lsn, length, false, ctx)
1386 4 : }
1387 :
1388 : /// Obtains a temporary lease blocking garbage collection for the given LSN.
1389 : ///
1390 : /// If we are in `AttachedSingle` mode and is not blocked by the lsn lease deadline, this function will error
1391 : /// if the requesting LSN is less than the `latest_gc_cutoff_lsn` and there is no existing request present.
1392 : ///
1393 : /// If there is an existing lease in the map, the lease will be renewed only if the request extends the lease.
1394 : /// The returned lease is therefore the maximum between the existing lease and the requesting lease.
1395 14 : fn make_lsn_lease(
1396 14 : &self,
1397 14 : lsn: Lsn,
1398 14 : length: Duration,
1399 14 : init: bool,
1400 14 : _ctx: &RequestContext,
1401 14 : ) -> anyhow::Result<LsnLease> {
1402 12 : let lease = {
1403 : // Normalize the requested LSN to be aligned, and move to the first record
1404 : // if it points to the beginning of the page (header).
1405 14 : let lsn = xlog_utils::normalize_lsn(lsn, WAL_SEGMENT_SIZE);
1406 14 :
1407 14 : let mut gc_info = self.gc_info.write().unwrap();
1408 14 :
1409 14 : let valid_until = SystemTime::now() + length;
1410 14 :
1411 14 : let entry = gc_info.leases.entry(lsn);
1412 14 :
1413 14 : match entry {
1414 6 : Entry::Occupied(mut occupied) => {
1415 6 : let existing_lease = occupied.get_mut();
1416 6 : if valid_until > existing_lease.valid_until {
1417 2 : existing_lease.valid_until = valid_until;
1418 2 : let dt: DateTime<Utc> = valid_until.into();
1419 2 : info!("lease extended to {}", dt);
1420 : } else {
1421 4 : let dt: DateTime<Utc> = existing_lease.valid_until.into();
1422 4 : info!("existing lease covers greater length, valid until {}", dt);
1423 : }
1424 :
1425 6 : existing_lease.clone()
1426 : }
1427 8 : Entry::Vacant(vacant) => {
1428 : // Reject already GC-ed LSN (lsn < latest_gc_cutoff) if we are in AttachedSingle and
1429 : // not blocked by the lsn lease deadline.
1430 8 : let validate = {
1431 8 : let conf = self.tenant_conf.load();
1432 8 : conf.location.attach_mode == AttachmentMode::Single
1433 8 : && !conf.is_gc_blocked_by_lsn_lease_deadline()
1434 : };
1435 :
1436 8 : if init || validate {
1437 8 : let latest_gc_cutoff_lsn = self.get_latest_gc_cutoff_lsn();
1438 8 : if lsn < *latest_gc_cutoff_lsn {
1439 2 : bail!("tried to request a page version that was garbage collected. requested at {} gc cutoff {}", lsn, *latest_gc_cutoff_lsn);
1440 6 : }
1441 0 : }
1442 :
1443 6 : let dt: DateTime<Utc> = valid_until.into();
1444 6 : info!("lease created, valid until {}", dt);
1445 6 : vacant.insert(LsnLease { valid_until }).clone()
1446 : }
1447 : }
1448 : };
1449 :
1450 12 : Ok(lease)
1451 14 : }
1452 :
1453 : /// Freeze the current open in-memory layer. It will be written to disk on next iteration.
1454 : /// Returns the flush request ID which can be awaited with wait_flush_completion().
1455 0 : #[instrument(skip(self), fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id))]
1456 : pub(crate) async fn freeze(&self) -> Result<u64, FlushLayerError> {
1457 : self.freeze0().await
1458 : }
1459 :
1460 : /// Freeze and flush the open in-memory layer, waiting for it to be written to disk.
1461 1096 : #[instrument(skip(self), fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id))]
1462 : pub(crate) async fn freeze_and_flush(&self) -> Result<(), FlushLayerError> {
1463 : self.freeze_and_flush0().await
1464 : }
1465 :
1466 : /// Freeze the current open in-memory layer. It will be written to disk on next iteration.
1467 : /// Returns the flush request ID which can be awaited with wait_flush_completion().
1468 1096 : pub(crate) async fn freeze0(&self) -> Result<u64, FlushLayerError> {
1469 1096 : let mut g = self.write_lock.lock().await;
1470 1096 : let to_lsn = self.get_last_record_lsn();
1471 1096 : self.freeze_inmem_layer_at(to_lsn, &mut g).await
1472 1096 : }
1473 :
1474 : // This exists to provide a non-span creating version of `freeze_and_flush` we can call without
1475 : // polluting the span hierarchy.
1476 1096 : pub(crate) async fn freeze_and_flush0(&self) -> Result<(), FlushLayerError> {
1477 1096 : let token = self.freeze0().await?;
1478 1096 : self.wait_flush_completion(token).await
1479 1096 : }
1480 :
1481 : // Check if an open ephemeral layer should be closed: this provides
1482 : // background enforcement of checkpoint interval if there is no active WAL receiver, to avoid keeping
1483 : // an ephemeral layer open forever when idle. It also freezes layers if the global limit on
1484 : // ephemeral layer bytes has been breached.
1485 0 : pub(super) async fn maybe_freeze_ephemeral_layer(&self) {
1486 0 : let Ok(mut write_guard) = self.write_lock.try_lock() else {
1487 : // If the write lock is held, there is an active wal receiver: rolling open layers
1488 : // is their responsibility while they hold this lock.
1489 0 : return;
1490 : };
1491 :
1492 : // FIXME: why not early exit? because before #7927 the state would had been cleared every
1493 : // time, and this was missed.
1494 : // if write_guard.is_none() { return; }
1495 :
1496 0 : let Ok(layers_guard) = self.layers.try_read() else {
1497 : // Don't block if the layer lock is busy
1498 0 : return;
1499 : };
1500 :
1501 0 : let Ok(lm) = layers_guard.layer_map() else {
1502 0 : return;
1503 : };
1504 :
1505 0 : let Some(open_layer) = &lm.open_layer else {
1506 : // If there is no open layer, we have no layer freezing to do. However, we might need to generate
1507 : // some updates to disk_consistent_lsn and remote_consistent_lsn, in case we ingested some WAL regions
1508 : // that didn't result in writes to this shard.
1509 :
1510 : // Must not hold the layers lock while waiting for a flush.
1511 0 : drop(layers_guard);
1512 0 :
1513 0 : let last_record_lsn = self.get_last_record_lsn();
1514 0 : let disk_consistent_lsn = self.get_disk_consistent_lsn();
1515 0 : if last_record_lsn > disk_consistent_lsn {
1516 : // We have no open layer, but disk_consistent_lsn is behind the last record: this indicates
1517 : // we are a sharded tenant and have skipped some WAL
1518 0 : let last_freeze_ts = *self.last_freeze_ts.read().unwrap();
1519 0 : if last_freeze_ts.elapsed() >= self.get_checkpoint_timeout() {
1520 : // Only do this if have been layer-less longer than get_checkpoint_timeout, so that a shard
1521 : // without any data ingested (yet) doesn't write a remote index as soon as it
1522 : // sees its LSN advance: we only do this if we've been layer-less
1523 : // for some time.
1524 0 : tracing::debug!(
1525 0 : "Advancing disk_consistent_lsn past WAL ingest gap {} -> {}",
1526 : disk_consistent_lsn,
1527 : last_record_lsn
1528 : );
1529 :
1530 : // The flush loop will update remote consistent LSN as well as disk consistent LSN.
1531 : // We know there is no open layer, so we can request freezing without actually
1532 : // freezing anything. This is true even if we have dropped the layers_guard, we
1533 : // still hold the write_guard.
1534 0 : let _ = async {
1535 0 : let token = self
1536 0 : .freeze_inmem_layer_at(last_record_lsn, &mut write_guard)
1537 0 : .await?;
1538 0 : self.wait_flush_completion(token).await
1539 0 : }
1540 0 : .await;
1541 0 : }
1542 0 : }
1543 :
1544 0 : return;
1545 : };
1546 :
1547 0 : let Some(current_size) = open_layer.try_len() else {
1548 : // Unexpected: since we hold the write guard, nobody else should be writing to this layer, so
1549 : // read lock to get size should always succeed.
1550 0 : tracing::warn!("Lock conflict while reading size of open layer");
1551 0 : return;
1552 : };
1553 :
1554 0 : let current_lsn = self.get_last_record_lsn();
1555 :
1556 0 : let checkpoint_distance_override = open_layer.tick().await;
1557 :
1558 0 : if let Some(size_override) = checkpoint_distance_override {
1559 0 : if current_size > size_override {
1560 : // This is not harmful, but it only happens in relatively rare cases where
1561 : // time-based checkpoints are not happening fast enough to keep the amount of
1562 : // ephemeral data within configured limits. It's a sign of stress on the system.
1563 0 : tracing::info!("Early-rolling open layer at size {current_size} (limit {size_override}) due to dirty data pressure");
1564 0 : }
1565 0 : }
1566 :
1567 0 : let checkpoint_distance =
1568 0 : checkpoint_distance_override.unwrap_or(self.get_checkpoint_distance());
1569 0 :
1570 0 : if self.should_roll(
1571 0 : current_size,
1572 0 : current_size,
1573 0 : checkpoint_distance,
1574 0 : self.get_last_record_lsn(),
1575 0 : self.last_freeze_at.load(),
1576 0 : open_layer.get_opened_at(),
1577 0 : ) {
1578 0 : match open_layer.info() {
1579 0 : InMemoryLayerInfo::Frozen { lsn_start, lsn_end } => {
1580 0 : // We may reach this point if the layer was already frozen by not yet flushed: flushing
1581 0 : // happens asynchronously in the background.
1582 0 : tracing::debug!(
1583 0 : "Not freezing open layer, it's already frozen ({lsn_start}..{lsn_end})"
1584 : );
1585 : }
1586 : InMemoryLayerInfo::Open { .. } => {
1587 : // Upgrade to a write lock and freeze the layer
1588 0 : drop(layers_guard);
1589 0 : let res = self
1590 0 : .freeze_inmem_layer_at(current_lsn, &mut write_guard)
1591 0 : .await;
1592 :
1593 0 : if let Err(e) = res {
1594 0 : tracing::info!(
1595 0 : "failed to flush frozen layer after background freeze: {e:#}"
1596 : );
1597 0 : }
1598 : }
1599 : }
1600 0 : }
1601 0 : }
1602 :
1603 : /// Checks if the internal state of the timeline is consistent with it being able to be offloaded.
1604 : ///
1605 : /// This is neccessary but not sufficient for offloading of the timeline as it might have
1606 : /// child timelines that are not offloaded yet.
1607 0 : pub(crate) fn can_offload(&self) -> (bool, &'static str) {
1608 0 : if self.remote_client.is_archived() != Some(true) {
1609 0 : return (false, "the timeline is not archived");
1610 0 : }
1611 0 : if !self.remote_client.no_pending_work() {
1612 : // if the remote client is still processing some work, we can't offload
1613 0 : return (false, "the upload queue is not drained yet");
1614 0 : }
1615 0 :
1616 0 : (true, "ok")
1617 0 : }
1618 :
1619 : /// Outermost timeline compaction operation; downloads needed layers. Returns whether we have pending
1620 : /// compaction tasks.
1621 364 : pub(crate) async fn compact(
1622 364 : self: &Arc<Self>,
1623 364 : cancel: &CancellationToken,
1624 364 : flags: EnumSet<CompactFlags>,
1625 364 : ctx: &RequestContext,
1626 364 : ) -> Result<bool, CompactionError> {
1627 364 : self.compact_with_options(
1628 364 : cancel,
1629 364 : CompactOptions {
1630 364 : flags,
1631 364 : compact_key_range: None,
1632 364 : compact_lsn_range: None,
1633 364 : sub_compaction: false,
1634 364 : sub_compaction_max_job_size_mb: None,
1635 364 : },
1636 364 : ctx,
1637 364 : )
1638 364 : .await
1639 364 : }
1640 :
1641 : /// Outermost timeline compaction operation; downloads needed layers. Returns whether we have pending
1642 : /// compaction tasks.
1643 364 : pub(crate) async fn compact_with_options(
1644 364 : self: &Arc<Self>,
1645 364 : cancel: &CancellationToken,
1646 364 : options: CompactOptions,
1647 364 : ctx: &RequestContext,
1648 364 : ) -> Result<bool, CompactionError> {
1649 364 : // most likely the cancellation token is from background task, but in tests it could be the
1650 364 : // request task as well.
1651 364 :
1652 364 : let prepare = async move {
1653 364 : let guard = self.compaction_lock.lock().await;
1654 :
1655 364 : let permit = super::tasks::concurrent_background_tasks_rate_limit_permit(
1656 364 : BackgroundLoopKind::Compaction,
1657 364 : ctx,
1658 364 : )
1659 364 : .await;
1660 :
1661 364 : (guard, permit)
1662 364 : };
1663 :
1664 : // this wait probably never needs any "long time spent" logging, because we already nag if
1665 : // compaction task goes over it's period (20s) which is quite often in production.
1666 364 : let (_guard, _permit) = tokio::select! {
1667 364 : tuple = prepare => { tuple },
1668 364 : _ = self.cancel.cancelled() => return Ok(false),
1669 364 : _ = cancel.cancelled() => return Ok(false),
1670 : };
1671 :
1672 364 : let last_record_lsn = self.get_last_record_lsn();
1673 364 :
1674 364 : // Last record Lsn could be zero in case the timeline was just created
1675 364 : if !last_record_lsn.is_valid() {
1676 0 : warn!("Skipping compaction for potentially just initialized timeline, it has invalid last record lsn: {last_record_lsn}");
1677 0 : return Ok(false);
1678 364 : }
1679 364 :
1680 364 : match self.get_compaction_algorithm_settings().kind {
1681 : CompactionAlgorithm::Tiered => {
1682 0 : self.compact_tiered(cancel, ctx).await?;
1683 0 : Ok(false)
1684 : }
1685 364 : CompactionAlgorithm::Legacy => self.compact_legacy(cancel, options, ctx).await,
1686 : }
1687 364 : }
1688 :
1689 : /// Mutate the timeline with a [`TimelineWriter`].
1690 5133166 : pub(crate) async fn writer(&self) -> TimelineWriter<'_> {
1691 5133166 : TimelineWriter {
1692 5133166 : tl: self,
1693 5133166 : write_guard: self.write_lock.lock().await,
1694 : }
1695 5133166 : }
1696 :
1697 0 : pub(crate) fn activate(
1698 0 : self: &Arc<Self>,
1699 0 : parent: Arc<crate::tenant::Tenant>,
1700 0 : broker_client: BrokerClientChannel,
1701 0 : background_jobs_can_start: Option<&completion::Barrier>,
1702 0 : ctx: &RequestContext,
1703 0 : ) {
1704 0 : if self.tenant_shard_id.is_shard_zero() {
1705 0 : // Logical size is only maintained accurately on shard zero.
1706 0 : self.spawn_initial_logical_size_computation_task(ctx);
1707 0 : }
1708 0 : self.launch_wal_receiver(ctx, broker_client);
1709 0 : self.set_state(TimelineState::Active);
1710 0 : self.launch_eviction_task(parent, background_jobs_can_start);
1711 0 : }
1712 :
1713 : /// After this function returns, there are no timeline-scoped tasks are left running.
1714 : ///
1715 : /// The preferred pattern for is:
1716 : /// - in any spawned tasks, keep Timeline::guard open + Timeline::cancel / child token
1717 : /// - if early shutdown (not just cancellation) of a sub-tree of tasks is required,
1718 : /// go the extra mile and keep track of JoinHandles
1719 : /// - Keep track of JoinHandles using a passed-down `Arc<Mutex<Option<JoinSet>>>` or similar,
1720 : /// instead of spawning directly on a runtime. It is a more composable / testable pattern.
1721 : ///
1722 : /// For legacy reasons, we still have multiple tasks spawned using
1723 : /// `task_mgr::spawn(X, Some(tenant_id), Some(timeline_id))`.
1724 : /// We refer to these as "timeline-scoped task_mgr tasks".
1725 : /// Some of these tasks are already sensitive to Timeline::cancel while others are
1726 : /// not sensitive to Timeline::cancel and instead respect [`task_mgr::shutdown_token`]
1727 : /// or [`task_mgr::shutdown_watcher`].
1728 : /// We want to gradually convert the code base away from these.
1729 : ///
1730 : /// Here is an inventory of timeline-scoped task_mgr tasks that are still sensitive to
1731 : /// `task_mgr::shutdown_{token,watcher}` (there are also tenant-scoped and global-scoped
1732 : /// ones that aren't mentioned here):
1733 : /// - [`TaskKind::TimelineDeletionWorker`]
1734 : /// - NB: also used for tenant deletion
1735 : /// - [`TaskKind::RemoteUploadTask`]`
1736 : /// - [`TaskKind::InitialLogicalSizeCalculation`]
1737 : /// - [`TaskKind::DownloadAllRemoteLayers`] (can we get rid of it?)
1738 : // Inventory of timeline-scoped task_mgr tasks that use spawn but aren't sensitive:
1739 : /// - [`TaskKind::Eviction`]
1740 : /// - [`TaskKind::LayerFlushTask`]
1741 : /// - [`TaskKind::OndemandLogicalSizeCalculation`]
1742 : /// - [`TaskKind::GarbageCollector`] (immediate_gc is timeline-scoped)
1743 10 : pub(crate) async fn shutdown(&self, mode: ShutdownMode) {
1744 10 : debug_assert_current_span_has_tenant_and_timeline_id();
1745 10 :
1746 10 : // Regardless of whether we're going to try_freeze_and_flush
1747 10 : // or not, stop ingesting any more data. Walreceiver only provides
1748 10 : // cancellation but no "wait until gone", because it uses the Timeline::gate.
1749 10 : // So, only after the self.gate.close() below will we know for sure that
1750 10 : // no walreceiver tasks are left.
1751 10 : // For `try_freeze_and_flush=true`, this means that we might still be ingesting
1752 10 : // data during the call to `self.freeze_and_flush()` below.
1753 10 : // That's not ideal, but, we don't have the concept of a ChildGuard,
1754 10 : // which is what we'd need to properly model early shutdown of the walreceiver
1755 10 : // task sub-tree before the other Timeline task sub-trees.
1756 10 : let walreceiver = self.walreceiver.lock().unwrap().take();
1757 10 : tracing::debug!(
1758 0 : is_some = walreceiver.is_some(),
1759 0 : "Waiting for WalReceiverManager..."
1760 : );
1761 10 : if let Some(walreceiver) = walreceiver {
1762 0 : walreceiver.cancel();
1763 10 : }
1764 : // ... and inform any waiters for newer LSNs that there won't be any.
1765 10 : self.last_record_lsn.shutdown();
1766 10 :
1767 10 : if let ShutdownMode::FreezeAndFlush = mode {
1768 6 : if let Some((open, frozen)) = self
1769 6 : .layers
1770 6 : .read()
1771 6 : .await
1772 6 : .layer_map()
1773 6 : .map(|lm| (lm.open_layer.is_some(), lm.frozen_layers.len()))
1774 6 : .ok()
1775 6 : .filter(|(open, frozen)| *open || *frozen > 0)
1776 : {
1777 0 : tracing::info!(?open, frozen, "flushing and freezing on shutdown");
1778 6 : } else {
1779 6 : // this is double-shutdown, ignore it
1780 6 : }
1781 :
1782 : // we shut down walreceiver above, so, we won't add anything more
1783 : // to the InMemoryLayer; freeze it and wait for all frozen layers
1784 : // to reach the disk & upload queue, then shut the upload queue and
1785 : // wait for it to drain.
1786 6 : match self.freeze_and_flush().await {
1787 : Ok(_) => {
1788 : // drain the upload queue
1789 : // if we did not wait for completion here, it might be our shutdown process
1790 : // didn't wait for remote uploads to complete at all, as new tasks can forever
1791 : // be spawned.
1792 : //
1793 : // what is problematic is the shutting down of RemoteTimelineClient, because
1794 : // obviously it does not make sense to stop while we wait for it, but what
1795 : // about corner cases like s3 suddenly hanging up?
1796 6 : self.remote_client.shutdown().await;
1797 : }
1798 : Err(FlushLayerError::Cancelled) => {
1799 : // this is likely the second shutdown, ignore silently.
1800 : // TODO: this can be removed once https://github.com/neondatabase/neon/issues/5080
1801 0 : debug_assert!(self.cancel.is_cancelled());
1802 : }
1803 0 : Err(e) => {
1804 0 : // Non-fatal. Shutdown is infallible. Failures to flush just mean that
1805 0 : // we have some extra WAL replay to do next time the timeline starts.
1806 0 : warn!("failed to freeze and flush: {e:#}");
1807 : }
1808 : }
1809 :
1810 : // `self.remote_client.shutdown().await` above should have already flushed everything from the queue, but
1811 : // we also do a final check here to ensure that the queue is empty.
1812 6 : if !self.remote_client.no_pending_work() {
1813 0 : warn!("still have pending work in remote upload queue, but continuing shutting down anyways");
1814 6 : }
1815 4 : }
1816 :
1817 10 : if let ShutdownMode::Reload = mode {
1818 : // drain the upload queue
1819 2 : self.remote_client.shutdown().await;
1820 2 : if !self.remote_client.no_pending_work() {
1821 0 : warn!("still have pending work in remote upload queue, but continuing shutting down anyways");
1822 2 : }
1823 8 : }
1824 :
1825 : // Signal any subscribers to our cancellation token to drop out
1826 10 : tracing::debug!("Cancelling CancellationToken");
1827 10 : self.cancel.cancel();
1828 10 :
1829 10 : // Ensure Prevent new page service requests from starting.
1830 10 : self.handles.shutdown();
1831 10 :
1832 10 : // Transition the remote_client into a state where it's only useful for timeline deletion.
1833 10 : // (The deletion use case is why we can't just hook up remote_client to Self::cancel).)
1834 10 : self.remote_client.stop();
1835 10 :
1836 10 : // As documented in remote_client.stop()'s doc comment, it's our responsibility
1837 10 : // to shut down the upload queue tasks.
1838 10 : // TODO: fix that, task management should be encapsulated inside remote_client.
1839 10 : task_mgr::shutdown_tasks(
1840 10 : Some(TaskKind::RemoteUploadTask),
1841 10 : Some(self.tenant_shard_id),
1842 10 : Some(self.timeline_id),
1843 10 : )
1844 10 : .await;
1845 :
1846 : // TODO: work toward making this a no-op. See this function's doc comment for more context.
1847 10 : tracing::debug!("Waiting for tasks...");
1848 10 : task_mgr::shutdown_tasks(None, Some(self.tenant_shard_id), Some(self.timeline_id)).await;
1849 :
1850 : {
1851 : // Allow any remaining in-memory layers to do cleanup -- until that, they hold the gate
1852 : // open.
1853 10 : let mut write_guard = self.write_lock.lock().await;
1854 10 : self.layers.write().await.shutdown(&mut write_guard);
1855 10 : }
1856 10 :
1857 10 : // Finally wait until any gate-holders are complete.
1858 10 : //
1859 10 : // TODO: once above shutdown_tasks is a no-op, we can close the gate before calling shutdown_tasks
1860 10 : // and use a TBD variant of shutdown_tasks that asserts that there were no tasks left.
1861 10 : self.gate.close().await;
1862 :
1863 10 : self.metrics.shutdown();
1864 10 : }
1865 :
1866 424 : pub(crate) fn set_state(&self, new_state: TimelineState) {
1867 424 : match (self.current_state(), new_state) {
1868 424 : (equal_state_1, equal_state_2) if equal_state_1 == equal_state_2 => {
1869 2 : info!("Ignoring new state, equal to the existing one: {equal_state_2:?}");
1870 : }
1871 0 : (st, TimelineState::Loading) => {
1872 0 : error!("ignoring transition from {st:?} into Loading state");
1873 : }
1874 0 : (TimelineState::Broken { .. }, new_state) => {
1875 0 : error!("Ignoring state update {new_state:?} for broken timeline");
1876 : }
1877 : (TimelineState::Stopping, TimelineState::Active) => {
1878 0 : error!("Not activating a Stopping timeline");
1879 : }
1880 422 : (_, new_state) => {
1881 422 : self.state.send_replace(new_state);
1882 422 : }
1883 : }
1884 424 : }
1885 :
1886 2 : pub(crate) fn set_broken(&self, reason: String) {
1887 2 : let backtrace_str: String = format!("{}", std::backtrace::Backtrace::force_capture());
1888 2 : let broken_state = TimelineState::Broken {
1889 2 : reason,
1890 2 : backtrace: backtrace_str,
1891 2 : };
1892 2 : self.set_state(broken_state);
1893 2 :
1894 2 : // Although the Broken state is not equivalent to shutdown() (shutdown will be called
1895 2 : // later when this tenant is detach or the process shuts down), firing the cancellation token
1896 2 : // here avoids the need for other tasks to watch for the Broken state explicitly.
1897 2 : self.cancel.cancel();
1898 2 : }
1899 :
1900 229442 : pub(crate) fn current_state(&self) -> TimelineState {
1901 229442 : self.state.borrow().clone()
1902 229442 : }
1903 :
1904 6 : pub(crate) fn is_broken(&self) -> bool {
1905 6 : matches!(&*self.state.borrow(), TimelineState::Broken { .. })
1906 6 : }
1907 :
1908 222 : pub(crate) fn is_active(&self) -> bool {
1909 222 : self.current_state() == TimelineState::Active
1910 222 : }
1911 :
1912 2 : pub(crate) fn is_archived(&self) -> Option<bool> {
1913 2 : self.remote_client.is_archived()
1914 2 : }
1915 :
1916 368 : pub(crate) fn is_stopping(&self) -> bool {
1917 368 : self.current_state() == TimelineState::Stopping
1918 368 : }
1919 :
1920 0 : pub(crate) fn subscribe_for_state_updates(&self) -> watch::Receiver<TimelineState> {
1921 0 : self.state.subscribe()
1922 0 : }
1923 :
1924 228430 : pub(crate) async fn wait_to_become_active(
1925 228430 : &self,
1926 228430 : _ctx: &RequestContext, // Prepare for use by cancellation
1927 228430 : ) -> Result<(), TimelineState> {
1928 228430 : let mut receiver = self.state.subscribe();
1929 : loop {
1930 228430 : let current_state = receiver.borrow().clone();
1931 228430 : match current_state {
1932 : TimelineState::Loading => {
1933 0 : receiver
1934 0 : .changed()
1935 0 : .await
1936 0 : .expect("holding a reference to self");
1937 : }
1938 : TimelineState::Active { .. } => {
1939 228428 : return Ok(());
1940 : }
1941 : TimelineState::Broken { .. } | TimelineState::Stopping => {
1942 : // There's no chance the timeline can transition back into ::Active
1943 2 : return Err(current_state);
1944 : }
1945 : }
1946 : }
1947 228430 : }
1948 :
1949 0 : pub(crate) async fn layer_map_info(
1950 0 : &self,
1951 0 : reset: LayerAccessStatsReset,
1952 0 : ) -> Result<LayerMapInfo, layer_manager::Shutdown> {
1953 0 : let guard = self.layers.read().await;
1954 0 : let layer_map = guard.layer_map()?;
1955 0 : let mut in_memory_layers = Vec::with_capacity(layer_map.frozen_layers.len() + 1);
1956 0 : if let Some(open_layer) = &layer_map.open_layer {
1957 0 : in_memory_layers.push(open_layer.info());
1958 0 : }
1959 0 : for frozen_layer in &layer_map.frozen_layers {
1960 0 : in_memory_layers.push(frozen_layer.info());
1961 0 : }
1962 :
1963 0 : let historic_layers = layer_map
1964 0 : .iter_historic_layers()
1965 0 : .map(|desc| guard.get_from_desc(&desc).info(reset))
1966 0 : .collect();
1967 0 :
1968 0 : Ok(LayerMapInfo {
1969 0 : in_memory_layers,
1970 0 : historic_layers,
1971 0 : })
1972 0 : }
1973 :
1974 0 : #[instrument(skip_all, fields(tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))]
1975 : pub(crate) async fn download_layer(
1976 : &self,
1977 : layer_file_name: &LayerName,
1978 : ) -> anyhow::Result<Option<bool>> {
1979 : let Some(layer) = self.find_layer(layer_file_name).await? else {
1980 : return Ok(None);
1981 : };
1982 :
1983 : layer.download().await?;
1984 :
1985 : Ok(Some(true))
1986 : }
1987 :
1988 : /// Evict just one layer.
1989 : ///
1990 : /// Returns `Ok(None)` in the case where the layer could not be found by its `layer_file_name`.
1991 0 : pub(crate) async fn evict_layer(
1992 0 : &self,
1993 0 : layer_file_name: &LayerName,
1994 0 : ) -> anyhow::Result<Option<bool>> {
1995 0 : let _gate = self
1996 0 : .gate
1997 0 : .enter()
1998 0 : .map_err(|_| anyhow::anyhow!("Shutting down"))?;
1999 :
2000 0 : let Some(local_layer) = self.find_layer(layer_file_name).await? else {
2001 0 : return Ok(None);
2002 : };
2003 :
2004 : // curl has this by default
2005 0 : let timeout = std::time::Duration::from_secs(120);
2006 0 :
2007 0 : match local_layer.evict_and_wait(timeout).await {
2008 0 : Ok(()) => Ok(Some(true)),
2009 0 : Err(EvictionError::NotFound) => Ok(Some(false)),
2010 0 : Err(EvictionError::Downloaded) => Ok(Some(false)),
2011 0 : Err(EvictionError::Timeout) => Ok(Some(false)),
2012 : }
2013 0 : }
2014 :
2015 4803010 : fn should_roll(
2016 4803010 : &self,
2017 4803010 : layer_size: u64,
2018 4803010 : projected_layer_size: u64,
2019 4803010 : checkpoint_distance: u64,
2020 4803010 : projected_lsn: Lsn,
2021 4803010 : last_freeze_at: Lsn,
2022 4803010 : opened_at: Instant,
2023 4803010 : ) -> bool {
2024 4803010 : let distance = projected_lsn.widening_sub(last_freeze_at);
2025 4803010 :
2026 4803010 : // Rolling the open layer can be triggered by:
2027 4803010 : // 1. The distance from the last LSN we rolled at. This bounds the amount of WAL that
2028 4803010 : // the safekeepers need to store. For sharded tenants, we multiply by shard count to
2029 4803010 : // account for how writes are distributed across shards: we expect each node to consume
2030 4803010 : // 1/count of the LSN on average.
2031 4803010 : // 2. The size of the currently open layer.
2032 4803010 : // 3. The time since the last roll. It helps safekeepers to regard pageserver as caught
2033 4803010 : // up and suspend activity.
2034 4803010 : if distance >= checkpoint_distance as i128 * self.shard_identity.count.count() as i128 {
2035 0 : info!(
2036 0 : "Will roll layer at {} with layer size {} due to LSN distance ({})",
2037 : projected_lsn, layer_size, distance
2038 : );
2039 :
2040 0 : true
2041 4803010 : } else if projected_layer_size >= checkpoint_distance {
2042 : // NB: this check is relied upon by:
2043 80 : let _ = IndexEntry::validate_checkpoint_distance;
2044 80 : info!(
2045 0 : "Will roll layer at {} with layer size {} due to layer size ({})",
2046 : projected_lsn, layer_size, projected_layer_size
2047 : );
2048 :
2049 80 : true
2050 4802930 : } else if distance > 0 && opened_at.elapsed() >= self.get_checkpoint_timeout() {
2051 0 : info!(
2052 0 : "Will roll layer at {} with layer size {} due to time since first write to the layer ({:?})",
2053 0 : projected_lsn,
2054 0 : layer_size,
2055 0 : opened_at.elapsed()
2056 : );
2057 :
2058 0 : true
2059 : } else {
2060 4802930 : false
2061 : }
2062 4803010 : }
2063 : }
2064 :
2065 : /// Number of times we will compute partition within a checkpoint distance.
2066 : const REPARTITION_FREQ_IN_CHECKPOINT_DISTANCE: u64 = 10;
2067 :
2068 : // Private functions
2069 : impl Timeline {
2070 12 : pub(crate) fn get_lsn_lease_length(&self) -> Duration {
2071 12 : let tenant_conf = self.tenant_conf.load();
2072 12 : tenant_conf
2073 12 : .tenant_conf
2074 12 : .lsn_lease_length
2075 12 : .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length)
2076 12 : }
2077 :
2078 0 : pub(crate) fn get_lsn_lease_length_for_ts(&self) -> Duration {
2079 0 : let tenant_conf = self.tenant_conf.load();
2080 0 : tenant_conf
2081 0 : .tenant_conf
2082 0 : .lsn_lease_length_for_ts
2083 0 : .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length_for_ts)
2084 0 : }
2085 :
2086 0 : pub(crate) fn is_gc_blocked_by_lsn_lease_deadline(&self) -> bool {
2087 0 : let tenant_conf = self.tenant_conf.load();
2088 0 : tenant_conf.is_gc_blocked_by_lsn_lease_deadline()
2089 0 : }
2090 :
2091 0 : pub(crate) fn get_lazy_slru_download(&self) -> bool {
2092 0 : let tenant_conf = self.tenant_conf.load();
2093 0 : tenant_conf
2094 0 : .tenant_conf
2095 0 : .lazy_slru_download
2096 0 : .unwrap_or(self.conf.default_tenant_conf.lazy_slru_download)
2097 0 : }
2098 :
2099 4804628 : fn get_checkpoint_distance(&self) -> u64 {
2100 4804628 : let tenant_conf = self.tenant_conf.load();
2101 4804628 : tenant_conf
2102 4804628 : .tenant_conf
2103 4804628 : .checkpoint_distance
2104 4804628 : .unwrap_or(self.conf.default_tenant_conf.checkpoint_distance)
2105 4804628 : }
2106 :
2107 4802930 : fn get_checkpoint_timeout(&self) -> Duration {
2108 4802930 : let tenant_conf = self.tenant_conf.load();
2109 4802930 : tenant_conf
2110 4802930 : .tenant_conf
2111 4802930 : .checkpoint_timeout
2112 4802930 : .unwrap_or(self.conf.default_tenant_conf.checkpoint_timeout)
2113 4802930 : }
2114 :
2115 638 : fn get_compaction_target_size(&self) -> u64 {
2116 638 : let tenant_conf = self.tenant_conf.load();
2117 638 : tenant_conf
2118 638 : .tenant_conf
2119 638 : .compaction_target_size
2120 638 : .unwrap_or(self.conf.default_tenant_conf.compaction_target_size)
2121 638 : }
2122 :
2123 1540 : fn get_compaction_threshold(&self) -> usize {
2124 1540 : let tenant_conf = self.tenant_conf.load();
2125 1540 : tenant_conf
2126 1540 : .tenant_conf
2127 1540 : .compaction_threshold
2128 1540 : .unwrap_or(self.conf.default_tenant_conf.compaction_threshold)
2129 1540 : }
2130 :
2131 14 : fn get_image_creation_threshold(&self) -> usize {
2132 14 : let tenant_conf = self.tenant_conf.load();
2133 14 : tenant_conf
2134 14 : .tenant_conf
2135 14 : .image_creation_threshold
2136 14 : .unwrap_or(self.conf.default_tenant_conf.image_creation_threshold)
2137 14 : }
2138 :
2139 364 : fn get_compaction_algorithm_settings(&self) -> CompactionAlgorithmSettings {
2140 364 : let tenant_conf = &self.tenant_conf.load();
2141 364 : tenant_conf
2142 364 : .tenant_conf
2143 364 : .compaction_algorithm
2144 364 : .as_ref()
2145 364 : .unwrap_or(&self.conf.default_tenant_conf.compaction_algorithm)
2146 364 : .clone()
2147 364 : }
2148 :
2149 0 : fn get_eviction_policy(&self) -> EvictionPolicy {
2150 0 : let tenant_conf = self.tenant_conf.load();
2151 0 : tenant_conf
2152 0 : .tenant_conf
2153 0 : .eviction_policy
2154 0 : .unwrap_or(self.conf.default_tenant_conf.eviction_policy)
2155 0 : }
2156 :
2157 422 : fn get_evictions_low_residence_duration_metric_threshold(
2158 422 : tenant_conf: &TenantConfOpt,
2159 422 : default_tenant_conf: &TenantConf,
2160 422 : ) -> Duration {
2161 422 : tenant_conf
2162 422 : .evictions_low_residence_duration_metric_threshold
2163 422 : .unwrap_or(default_tenant_conf.evictions_low_residence_duration_metric_threshold)
2164 422 : }
2165 :
2166 724 : fn get_image_layer_creation_check_threshold(&self) -> u8 {
2167 724 : let tenant_conf = self.tenant_conf.load();
2168 724 : tenant_conf
2169 724 : .tenant_conf
2170 724 : .image_layer_creation_check_threshold
2171 724 : .unwrap_or(
2172 724 : self.conf
2173 724 : .default_tenant_conf
2174 724 : .image_layer_creation_check_threshold,
2175 724 : )
2176 724 : }
2177 :
2178 : /// Resolve the effective WAL receiver protocol to use for this tenant.
2179 : ///
2180 : /// Priority order is:
2181 : /// 1. Tenant config override
2182 : /// 2. Default value for tenant config override
2183 : /// 3. Pageserver config override
2184 : /// 4. Pageserver config default
2185 0 : pub fn resolve_wal_receiver_protocol(&self) -> PostgresClientProtocol {
2186 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2187 0 : tenant_conf
2188 0 : .wal_receiver_protocol_override
2189 0 : .or(self.conf.default_tenant_conf.wal_receiver_protocol_override)
2190 0 : .unwrap_or(self.conf.wal_receiver_protocol)
2191 0 : }
2192 :
2193 0 : pub(super) fn tenant_conf_updated(&self, new_conf: &AttachedTenantConf) {
2194 0 : // NB: Most tenant conf options are read by background loops, so,
2195 0 : // changes will automatically be picked up.
2196 0 :
2197 0 : // The threshold is embedded in the metric. So, we need to update it.
2198 0 : {
2199 0 : let new_threshold = Self::get_evictions_low_residence_duration_metric_threshold(
2200 0 : &new_conf.tenant_conf,
2201 0 : &self.conf.default_tenant_conf,
2202 0 : );
2203 0 :
2204 0 : let tenant_id_str = self.tenant_shard_id.tenant_id.to_string();
2205 0 : let shard_id_str = format!("{}", self.tenant_shard_id.shard_slug());
2206 0 :
2207 0 : let timeline_id_str = self.timeline_id.to_string();
2208 0 :
2209 0 : self.remote_client.update_config(&new_conf.location);
2210 0 :
2211 0 : self.metrics
2212 0 : .evictions_with_low_residence_duration
2213 0 : .write()
2214 0 : .unwrap()
2215 0 : .change_threshold(
2216 0 : &tenant_id_str,
2217 0 : &shard_id_str,
2218 0 : &timeline_id_str,
2219 0 : new_threshold,
2220 0 : );
2221 0 : }
2222 0 : }
2223 :
2224 : /// Open a Timeline handle.
2225 : ///
2226 : /// Loads the metadata for the timeline into memory, but not the layer map.
2227 : #[allow(clippy::too_many_arguments)]
2228 422 : pub(super) fn new(
2229 422 : conf: &'static PageServerConf,
2230 422 : tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
2231 422 : metadata: &TimelineMetadata,
2232 422 : ancestor: Option<Arc<Timeline>>,
2233 422 : timeline_id: TimelineId,
2234 422 : tenant_shard_id: TenantShardId,
2235 422 : generation: Generation,
2236 422 : shard_identity: ShardIdentity,
2237 422 : walredo_mgr: Option<Arc<super::WalRedoManager>>,
2238 422 : resources: TimelineResources,
2239 422 : pg_version: u32,
2240 422 : state: TimelineState,
2241 422 : attach_wal_lag_cooldown: Arc<OnceLock<WalLagCooldown>>,
2242 422 : create_idempotency: crate::tenant::CreateTimelineIdempotency,
2243 422 : cancel: CancellationToken,
2244 422 : ) -> Arc<Self> {
2245 422 : let disk_consistent_lsn = metadata.disk_consistent_lsn();
2246 422 : let (state, _) = watch::channel(state);
2247 422 :
2248 422 : let (layer_flush_start_tx, _) = tokio::sync::watch::channel((0, disk_consistent_lsn));
2249 422 : let (layer_flush_done_tx, _) = tokio::sync::watch::channel((0, Ok(())));
2250 422 :
2251 422 : let evictions_low_residence_duration_metric_threshold = {
2252 422 : let loaded_tenant_conf = tenant_conf.load();
2253 422 : Self::get_evictions_low_residence_duration_metric_threshold(
2254 422 : &loaded_tenant_conf.tenant_conf,
2255 422 : &conf.default_tenant_conf,
2256 422 : )
2257 : };
2258 :
2259 422 : if let Some(ancestor) = &ancestor {
2260 230 : let mut ancestor_gc_info = ancestor.gc_info.write().unwrap();
2261 230 : // If we construct an explicit timeline object, it's obviously not offloaded
2262 230 : let is_offloaded = MaybeOffloaded::No;
2263 230 : ancestor_gc_info.insert_child(timeline_id, metadata.ancestor_lsn(), is_offloaded);
2264 230 : }
2265 :
2266 422 : Arc::new_cyclic(|myself| {
2267 422 : let metrics = TimelineMetrics::new(
2268 422 : &tenant_shard_id,
2269 422 : &timeline_id,
2270 422 : crate::metrics::EvictionsWithLowResidenceDurationBuilder::new(
2271 422 : "mtime",
2272 422 : evictions_low_residence_duration_metric_threshold,
2273 422 : ),
2274 422 : );
2275 422 : let aux_file_metrics = metrics.aux_file_size_gauge.clone();
2276 :
2277 422 : let mut result = Timeline {
2278 422 : conf,
2279 422 : tenant_conf,
2280 422 : myself: myself.clone(),
2281 422 : timeline_id,
2282 422 : tenant_shard_id,
2283 422 : generation,
2284 422 : shard_identity,
2285 422 : pg_version,
2286 422 : layers: Default::default(),
2287 422 :
2288 422 : walredo_mgr,
2289 422 : walreceiver: Mutex::new(None),
2290 422 :
2291 422 : remote_client: Arc::new(resources.remote_client),
2292 422 :
2293 422 : // initialize in-memory 'last_record_lsn' from 'disk_consistent_lsn'.
2294 422 : last_record_lsn: SeqWait::new(RecordLsn {
2295 422 : last: disk_consistent_lsn,
2296 422 : prev: metadata.prev_record_lsn().unwrap_or(Lsn(0)),
2297 422 : }),
2298 422 : disk_consistent_lsn: AtomicLsn::new(disk_consistent_lsn.0),
2299 422 :
2300 422 : last_freeze_at: AtomicLsn::new(disk_consistent_lsn.0),
2301 422 : last_freeze_ts: RwLock::new(Instant::now()),
2302 422 :
2303 422 : loaded_at: (disk_consistent_lsn, SystemTime::now()),
2304 422 :
2305 422 : ancestor_timeline: ancestor,
2306 422 : ancestor_lsn: metadata.ancestor_lsn(),
2307 422 :
2308 422 : metrics,
2309 422 :
2310 422 : query_metrics: crate::metrics::SmgrQueryTimePerTimeline::new(
2311 422 : &tenant_shard_id,
2312 422 : &timeline_id,
2313 422 : ),
2314 422 :
2315 2954 : directory_metrics: array::from_fn(|_| AtomicU64::new(0)),
2316 422 :
2317 422 : flush_loop_state: Mutex::new(FlushLoopState::NotStarted),
2318 422 :
2319 422 : layer_flush_start_tx,
2320 422 : layer_flush_done_tx,
2321 422 :
2322 422 : write_lock: tokio::sync::Mutex::new(None),
2323 422 :
2324 422 : gc_info: std::sync::RwLock::new(GcInfo::default()),
2325 422 :
2326 422 : latest_gc_cutoff_lsn: Rcu::new(metadata.latest_gc_cutoff_lsn()),
2327 422 : initdb_lsn: metadata.initdb_lsn(),
2328 422 :
2329 422 : current_logical_size: if disk_consistent_lsn.is_valid() {
2330 : // we're creating timeline data with some layer files existing locally,
2331 : // need to recalculate timeline's logical size based on data in the layers.
2332 234 : LogicalSize::deferred_initial(disk_consistent_lsn)
2333 : } else {
2334 : // we're creating timeline data without any layers existing locally,
2335 : // initial logical size is 0.
2336 188 : LogicalSize::empty_initial()
2337 : },
2338 422 : partitioning: tokio::sync::Mutex::new((
2339 422 : (KeyPartitioning::new(), KeyPartitioning::new().into_sparse()),
2340 422 : Lsn(0),
2341 422 : )),
2342 422 : repartition_threshold: 0,
2343 422 : last_image_layer_creation_check_at: AtomicLsn::new(0),
2344 422 : last_image_layer_creation_check_instant: Mutex::new(None),
2345 422 :
2346 422 : last_received_wal: Mutex::new(None),
2347 422 : rel_size_cache: RwLock::new(RelSizeCache {
2348 422 : complete_as_of: disk_consistent_lsn,
2349 422 : map: HashMap::new(),
2350 422 : }),
2351 422 :
2352 422 : download_all_remote_layers_task_info: RwLock::new(None),
2353 422 :
2354 422 : state,
2355 422 :
2356 422 : eviction_task_timeline_state: tokio::sync::Mutex::new(
2357 422 : EvictionTaskTimelineState::default(),
2358 422 : ),
2359 422 : delete_progress: TimelineDeleteProgress::default(),
2360 422 :
2361 422 : cancel,
2362 422 : gate: Gate::default(),
2363 422 :
2364 422 : compaction_lock: tokio::sync::Mutex::default(),
2365 422 : gc_lock: tokio::sync::Mutex::default(),
2366 422 :
2367 422 : standby_horizon: AtomicLsn::new(0),
2368 422 :
2369 422 : pagestream_throttle: resources.pagestream_throttle,
2370 422 :
2371 422 : aux_file_size_estimator: AuxFileSizeEstimator::new(aux_file_metrics),
2372 422 :
2373 422 : #[cfg(test)]
2374 422 : extra_test_dense_keyspace: ArcSwap::new(Arc::new(KeySpace::default())),
2375 422 :
2376 422 : l0_flush_global_state: resources.l0_flush_global_state,
2377 422 :
2378 422 : handles: Default::default(),
2379 422 :
2380 422 : attach_wal_lag_cooldown,
2381 422 :
2382 422 : create_idempotency,
2383 422 : };
2384 422 :
2385 422 : result.repartition_threshold =
2386 422 : result.get_checkpoint_distance() / REPARTITION_FREQ_IN_CHECKPOINT_DISTANCE;
2387 422 :
2388 422 : result
2389 422 : .metrics
2390 422 : .last_record_lsn_gauge
2391 422 : .set(disk_consistent_lsn.0 as i64);
2392 422 : result
2393 422 : })
2394 422 : }
2395 :
2396 596 : pub(super) fn maybe_spawn_flush_loop(self: &Arc<Self>) {
2397 596 : let Ok(guard) = self.gate.enter() else {
2398 0 : info!("cannot start flush loop when the timeline gate has already been closed");
2399 0 : return;
2400 : };
2401 596 : let mut flush_loop_state = self.flush_loop_state.lock().unwrap();
2402 596 : match *flush_loop_state {
2403 416 : FlushLoopState::NotStarted => (),
2404 : FlushLoopState::Running { .. } => {
2405 180 : info!(
2406 0 : "skipping attempt to start flush_loop twice {}/{}",
2407 0 : self.tenant_shard_id, self.timeline_id
2408 : );
2409 180 : return;
2410 : }
2411 : FlushLoopState::Exited => {
2412 0 : warn!(
2413 0 : "ignoring attempt to restart exited flush_loop {}/{}",
2414 0 : self.tenant_shard_id, self.timeline_id
2415 : );
2416 0 : return;
2417 : }
2418 : }
2419 :
2420 416 : let layer_flush_start_rx = self.layer_flush_start_tx.subscribe();
2421 416 : let self_clone = Arc::clone(self);
2422 416 :
2423 416 : debug!("spawning flush loop");
2424 416 : *flush_loop_state = FlushLoopState::Running {
2425 416 : #[cfg(test)]
2426 416 : expect_initdb_optimization: false,
2427 416 : #[cfg(test)]
2428 416 : initdb_optimization_count: 0,
2429 416 : };
2430 416 : task_mgr::spawn(
2431 416 : task_mgr::BACKGROUND_RUNTIME.handle(),
2432 416 : task_mgr::TaskKind::LayerFlushTask,
2433 416 : self.tenant_shard_id,
2434 416 : Some(self.timeline_id),
2435 416 : "layer flush task",
2436 416 : async move {
2437 416 : let _guard = guard;
2438 416 : let background_ctx = RequestContext::todo_child(TaskKind::LayerFlushTask, DownloadBehavior::Error);
2439 416 : self_clone.flush_loop(layer_flush_start_rx, &background_ctx).await;
2440 10 : let mut flush_loop_state = self_clone.flush_loop_state.lock().unwrap();
2441 10 : assert!(matches!(*flush_loop_state, FlushLoopState::Running{..}));
2442 10 : *flush_loop_state = FlushLoopState::Exited;
2443 10 : Ok(())
2444 10 : }
2445 416 : .instrument(info_span!(parent: None, "layer flush task", tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))
2446 : );
2447 596 : }
2448 :
2449 : /// Creates and starts the wal receiver.
2450 : ///
2451 : /// This function is expected to be called at most once per Timeline's lifecycle
2452 : /// when the timeline is activated.
2453 0 : fn launch_wal_receiver(
2454 0 : self: &Arc<Self>,
2455 0 : ctx: &RequestContext,
2456 0 : broker_client: BrokerClientChannel,
2457 0 : ) {
2458 0 : info!(
2459 0 : "launching WAL receiver for timeline {} of tenant {}",
2460 0 : self.timeline_id, self.tenant_shard_id
2461 : );
2462 :
2463 0 : let tenant_conf = self.tenant_conf.load();
2464 0 : let wal_connect_timeout = tenant_conf
2465 0 : .tenant_conf
2466 0 : .walreceiver_connect_timeout
2467 0 : .unwrap_or(self.conf.default_tenant_conf.walreceiver_connect_timeout);
2468 0 : let lagging_wal_timeout = tenant_conf
2469 0 : .tenant_conf
2470 0 : .lagging_wal_timeout
2471 0 : .unwrap_or(self.conf.default_tenant_conf.lagging_wal_timeout);
2472 0 : let max_lsn_wal_lag = tenant_conf
2473 0 : .tenant_conf
2474 0 : .max_lsn_wal_lag
2475 0 : .unwrap_or(self.conf.default_tenant_conf.max_lsn_wal_lag);
2476 0 :
2477 0 : let mut guard = self.walreceiver.lock().unwrap();
2478 0 : assert!(
2479 0 : guard.is_none(),
2480 0 : "multiple launches / re-launches of WAL receiver are not supported"
2481 : );
2482 0 : *guard = Some(WalReceiver::start(
2483 0 : Arc::clone(self),
2484 0 : WalReceiverConf {
2485 0 : protocol: self.resolve_wal_receiver_protocol(),
2486 0 : wal_connect_timeout,
2487 0 : lagging_wal_timeout,
2488 0 : max_lsn_wal_lag,
2489 0 : auth_token: crate::config::SAFEKEEPER_AUTH_TOKEN.get().cloned(),
2490 0 : availability_zone: self.conf.availability_zone.clone(),
2491 0 : ingest_batch_size: self.conf.ingest_batch_size,
2492 0 : },
2493 0 : broker_client,
2494 0 : ctx,
2495 0 : ));
2496 0 : }
2497 :
2498 : /// Initialize with an empty layer map. Used when creating a new timeline.
2499 416 : pub(super) fn init_empty_layer_map(&self, start_lsn: Lsn) {
2500 416 : let mut layers = self.layers.try_write().expect(
2501 416 : "in the context where we call this function, no other task has access to the object",
2502 416 : );
2503 416 : layers
2504 416 : .open_mut()
2505 416 : .expect("in this context the LayerManager must still be open")
2506 416 : .initialize_empty(Lsn(start_lsn.0));
2507 416 : }
2508 :
2509 : /// Scan the timeline directory, cleanup, populate the layer map, and schedule uploads for local-only
2510 : /// files.
2511 6 : pub(super) async fn load_layer_map(
2512 6 : &self,
2513 6 : disk_consistent_lsn: Lsn,
2514 6 : index_part: IndexPart,
2515 6 : ) -> anyhow::Result<()> {
2516 : use init::{Decision::*, Discovered, DismissedLayer};
2517 : use LayerName::*;
2518 :
2519 6 : let mut guard = self.layers.write().await;
2520 :
2521 6 : let timer = self.metrics.load_layer_map_histo.start_timer();
2522 6 :
2523 6 : // Scan timeline directory and create ImageLayerName and DeltaFilename
2524 6 : // structs representing all files on disk
2525 6 : let timeline_path = self
2526 6 : .conf
2527 6 : .timeline_path(&self.tenant_shard_id, &self.timeline_id);
2528 6 : let conf = self.conf;
2529 6 : let span = tracing::Span::current();
2530 6 :
2531 6 : // Copy to move into the task we're about to spawn
2532 6 : let this = self.myself.upgrade().expect("&self method holds the arc");
2533 :
2534 6 : let (loaded_layers, needs_cleanup, total_physical_size) = tokio::task::spawn_blocking({
2535 6 : move || {
2536 6 : let _g = span.entered();
2537 6 : let discovered = init::scan_timeline_dir(&timeline_path)?;
2538 6 : let mut discovered_layers = Vec::with_capacity(discovered.len());
2539 6 : let mut unrecognized_files = Vec::new();
2540 6 :
2541 6 : let mut path = timeline_path;
2542 :
2543 22 : for discovered in discovered {
2544 16 : let (name, kind) = match discovered {
2545 16 : Discovered::Layer(layer_file_name, local_metadata) => {
2546 16 : discovered_layers.push((layer_file_name, local_metadata));
2547 16 : continue;
2548 : }
2549 0 : Discovered::IgnoredBackup(path) => {
2550 0 : std::fs::remove_file(path)
2551 0 : .or_else(fs_ext::ignore_not_found)
2552 0 : .fatal_err("Removing .old file");
2553 0 : continue;
2554 : }
2555 0 : Discovered::Unknown(file_name) => {
2556 0 : // we will later error if there are any
2557 0 : unrecognized_files.push(file_name);
2558 0 : continue;
2559 : }
2560 0 : Discovered::Ephemeral(name) => (name, "old ephemeral file"),
2561 0 : Discovered::Temporary(name) => (name, "temporary timeline file"),
2562 0 : Discovered::TemporaryDownload(name) => (name, "temporary download"),
2563 : };
2564 0 : path.push(Utf8Path::new(&name));
2565 0 : init::cleanup(&path, kind)?;
2566 0 : path.pop();
2567 : }
2568 :
2569 6 : if !unrecognized_files.is_empty() {
2570 : // assume that if there are any there are many many.
2571 0 : let n = unrecognized_files.len();
2572 0 : let first = &unrecognized_files[..n.min(10)];
2573 0 : anyhow::bail!(
2574 0 : "unrecognized files in timeline dir (total {n}), first 10: {first:?}"
2575 0 : );
2576 6 : }
2577 6 :
2578 6 : let decided = init::reconcile(discovered_layers, &index_part, disk_consistent_lsn);
2579 6 :
2580 6 : let mut loaded_layers = Vec::new();
2581 6 : let mut needs_cleanup = Vec::new();
2582 6 : let mut total_physical_size = 0;
2583 :
2584 22 : for (name, decision) in decided {
2585 16 : let decision = match decision {
2586 16 : Ok(decision) => decision,
2587 0 : Err(DismissedLayer::Future { local }) => {
2588 0 : if let Some(local) = local {
2589 0 : init::cleanup_future_layer(
2590 0 : &local.local_path,
2591 0 : &name,
2592 0 : disk_consistent_lsn,
2593 0 : )?;
2594 0 : }
2595 0 : needs_cleanup.push(name);
2596 0 : continue;
2597 : }
2598 0 : Err(DismissedLayer::LocalOnly(local)) => {
2599 0 : init::cleanup_local_only_file(&name, &local)?;
2600 : // this file never existed remotely, we will have to do rework
2601 0 : continue;
2602 : }
2603 0 : Err(DismissedLayer::BadMetadata(local)) => {
2604 0 : init::cleanup_local_file_for_remote(&local)?;
2605 : // this file never existed remotely, we will have to do rework
2606 0 : continue;
2607 : }
2608 : };
2609 :
2610 16 : match &name {
2611 12 : Delta(d) => assert!(d.lsn_range.end <= disk_consistent_lsn + 1),
2612 4 : Image(i) => assert!(i.lsn <= disk_consistent_lsn),
2613 : }
2614 :
2615 16 : tracing::debug!(layer=%name, ?decision, "applied");
2616 :
2617 16 : let layer = match decision {
2618 16 : Resident { local, remote } => {
2619 16 : total_physical_size += local.file_size;
2620 16 : Layer::for_resident(conf, &this, local.local_path, name, remote)
2621 16 : .drop_eviction_guard()
2622 : }
2623 0 : Evicted(remote) => Layer::for_evicted(conf, &this, name, remote),
2624 : };
2625 :
2626 16 : loaded_layers.push(layer);
2627 : }
2628 6 : Ok((loaded_layers, needs_cleanup, total_physical_size))
2629 6 : }
2630 6 : })
2631 6 : .await
2632 6 : .map_err(anyhow::Error::new)
2633 6 : .and_then(|x| x)?;
2634 :
2635 6 : let num_layers = loaded_layers.len();
2636 6 :
2637 6 : guard
2638 6 : .open_mut()
2639 6 : .expect("layermanager must be open during init")
2640 6 : .initialize_local_layers(loaded_layers, disk_consistent_lsn + 1);
2641 6 :
2642 6 : self.remote_client
2643 6 : .schedule_layer_file_deletion(&needs_cleanup)?;
2644 6 : self.remote_client
2645 6 : .schedule_index_upload_for_file_changes()?;
2646 : // This barrier orders above DELETEs before any later operations.
2647 : // This is critical because code executing after the barrier might
2648 : // create again objects with the same key that we just scheduled for deletion.
2649 : // For example, if we just scheduled deletion of an image layer "from the future",
2650 : // later compaction might run again and re-create the same image layer.
2651 : // "from the future" here means an image layer whose LSN is > IndexPart::disk_consistent_lsn.
2652 : // "same" here means same key range and LSN.
2653 : //
2654 : // Without a barrier between above DELETEs and the re-creation's PUTs,
2655 : // the upload queue may execute the PUT first, then the DELETE.
2656 : // In our example, we will end up with an IndexPart referencing a non-existent object.
2657 : //
2658 : // 1. a future image layer is created and uploaded
2659 : // 2. ps restart
2660 : // 3. the future layer from (1) is deleted during load layer map
2661 : // 4. image layer is re-created and uploaded
2662 : // 5. deletion queue would like to delete (1) but actually deletes (4)
2663 : // 6. delete by name works as expected, but it now deletes the wrong (later) version
2664 : //
2665 : // See https://github.com/neondatabase/neon/issues/5878
2666 : //
2667 : // NB: generation numbers naturally protect against this because they disambiguate
2668 : // (1) and (4)
2669 : // TODO: this is basically a no-op now, should we remove it?
2670 6 : self.remote_client.schedule_barrier()?;
2671 : // Tenant::create_timeline will wait for these uploads to happen before returning, or
2672 : // on retry.
2673 :
2674 : // Now that we have the full layer map, we may calculate the visibility of layers within it (a global scan)
2675 6 : drop(guard); // drop write lock, update_layer_visibility will take a read lock.
2676 6 : self.update_layer_visibility().await?;
2677 :
2678 6 : info!(
2679 0 : "loaded layer map with {} layers at {}, total physical size: {}",
2680 : num_layers, disk_consistent_lsn, total_physical_size
2681 : );
2682 :
2683 6 : timer.stop_and_record();
2684 6 : Ok(())
2685 6 : }
2686 :
2687 : /// Retrieve current logical size of the timeline.
2688 : ///
2689 : /// The size could be lagging behind the actual number, in case
2690 : /// the initial size calculation has not been run (gets triggered on the first size access).
2691 : ///
2692 : /// return size and boolean flag that shows if the size is exact
2693 0 : pub(crate) fn get_current_logical_size(
2694 0 : self: &Arc<Self>,
2695 0 : priority: GetLogicalSizePriority,
2696 0 : ctx: &RequestContext,
2697 0 : ) -> logical_size::CurrentLogicalSize {
2698 0 : if !self.tenant_shard_id.is_shard_zero() {
2699 : // Logical size is only accurately maintained on shard zero: when called elsewhere, for example
2700 : // when HTTP API is serving a GET for timeline zero, return zero
2701 0 : return logical_size::CurrentLogicalSize::Approximate(logical_size::Approximate::zero());
2702 0 : }
2703 0 :
2704 0 : let current_size = self.current_logical_size.current_size();
2705 0 : debug!("Current size: {current_size:?}");
2706 :
2707 0 : match (current_size.accuracy(), priority) {
2708 0 : (logical_size::Accuracy::Exact, _) => (), // nothing to do
2709 0 : (logical_size::Accuracy::Approximate, GetLogicalSizePriority::Background) => {
2710 0 : // background task will eventually deliver an exact value, we're in no rush
2711 0 : }
2712 : (logical_size::Accuracy::Approximate, GetLogicalSizePriority::User) => {
2713 : // background task is not ready, but user is asking for it now;
2714 : // => make the background task skip the line
2715 : // (The alternative would be to calculate the size here, but,
2716 : // it can actually take a long time if the user has a lot of rels.
2717 : // And we'll inevitable need it again; So, let the background task do the work.)
2718 0 : match self
2719 0 : .current_logical_size
2720 0 : .cancel_wait_for_background_loop_concurrency_limit_semaphore
2721 0 : .get()
2722 : {
2723 0 : Some(cancel) => cancel.cancel(),
2724 : None => {
2725 0 : match self.current_state() {
2726 0 : TimelineState::Broken { .. } | TimelineState::Stopping => {
2727 0 : // Can happen when timeline detail endpoint is used when deletion is ongoing (or its broken).
2728 0 : // Don't make noise.
2729 0 : }
2730 : TimelineState::Loading => {
2731 : // Import does not return an activated timeline.
2732 0 : info!("discarding priority boost for logical size calculation because timeline is not yet active");
2733 : }
2734 : TimelineState::Active => {
2735 : // activation should be setting the once cell
2736 0 : warn!("unexpected: cancel_wait_for_background_loop_concurrency_limit_semaphore not set, priority-boosting of logical size calculation will not work");
2737 0 : debug_assert!(false);
2738 : }
2739 : }
2740 : }
2741 : }
2742 : }
2743 : }
2744 :
2745 0 : if let CurrentLogicalSize::Approximate(_) = ¤t_size {
2746 0 : if ctx.task_kind() == TaskKind::WalReceiverConnectionHandler {
2747 0 : let first = self
2748 0 : .current_logical_size
2749 0 : .did_return_approximate_to_walreceiver
2750 0 : .compare_exchange(
2751 0 : false,
2752 0 : true,
2753 0 : AtomicOrdering::Relaxed,
2754 0 : AtomicOrdering::Relaxed,
2755 0 : )
2756 0 : .is_ok();
2757 0 : if first {
2758 0 : crate::metrics::initial_logical_size::TIMELINES_WHERE_WALRECEIVER_GOT_APPROXIMATE_SIZE.inc();
2759 0 : }
2760 0 : }
2761 0 : }
2762 :
2763 0 : current_size
2764 0 : }
2765 :
2766 0 : fn spawn_initial_logical_size_computation_task(self: &Arc<Self>, ctx: &RequestContext) {
2767 0 : let Some(initial_part_end) = self.current_logical_size.initial_part_end else {
2768 : // nothing to do for freshly created timelines;
2769 0 : assert_eq!(
2770 0 : self.current_logical_size.current_size().accuracy(),
2771 0 : logical_size::Accuracy::Exact,
2772 0 : );
2773 0 : self.current_logical_size.initialized.add_permits(1);
2774 0 : return;
2775 : };
2776 :
2777 0 : let cancel_wait_for_background_loop_concurrency_limit_semaphore = CancellationToken::new();
2778 0 : let token = cancel_wait_for_background_loop_concurrency_limit_semaphore.clone();
2779 0 : self.current_logical_size
2780 0 : .cancel_wait_for_background_loop_concurrency_limit_semaphore.set(token)
2781 0 : .expect("initial logical size calculation task must be spawned exactly once per Timeline object");
2782 0 :
2783 0 : let self_clone = Arc::clone(self);
2784 0 : let background_ctx = ctx.detached_child(
2785 0 : TaskKind::InitialLogicalSizeCalculation,
2786 0 : DownloadBehavior::Download,
2787 0 : );
2788 0 : task_mgr::spawn(
2789 0 : task_mgr::BACKGROUND_RUNTIME.handle(),
2790 0 : task_mgr::TaskKind::InitialLogicalSizeCalculation,
2791 0 : self.tenant_shard_id,
2792 0 : Some(self.timeline_id),
2793 0 : "initial size calculation",
2794 : // NB: don't log errors here, task_mgr will do that.
2795 0 : async move {
2796 0 : let cancel = task_mgr::shutdown_token();
2797 0 : self_clone
2798 0 : .initial_logical_size_calculation_task(
2799 0 : initial_part_end,
2800 0 : cancel_wait_for_background_loop_concurrency_limit_semaphore,
2801 0 : cancel,
2802 0 : background_ctx,
2803 0 : )
2804 0 : .await;
2805 0 : Ok(())
2806 0 : }
2807 0 : .instrument(info_span!(parent: None, "initial_size_calculation", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id)),
2808 : );
2809 0 : }
2810 :
2811 0 : async fn initial_logical_size_calculation_task(
2812 0 : self: Arc<Self>,
2813 0 : initial_part_end: Lsn,
2814 0 : skip_concurrency_limiter: CancellationToken,
2815 0 : cancel: CancellationToken,
2816 0 : background_ctx: RequestContext,
2817 0 : ) {
2818 0 : scopeguard::defer! {
2819 0 : // Irrespective of the outcome of this operation, we should unblock anyone waiting for it.
2820 0 : self.current_logical_size.initialized.add_permits(1);
2821 0 : }
2822 0 :
2823 0 : let try_once = |attempt: usize| {
2824 0 : let background_ctx = &background_ctx;
2825 0 : let self_ref = &self;
2826 0 : let skip_concurrency_limiter = &skip_concurrency_limiter;
2827 0 : async move {
2828 0 : let cancel = task_mgr::shutdown_token();
2829 0 : let wait_for_permit = super::tasks::concurrent_background_tasks_rate_limit_permit(
2830 0 : BackgroundLoopKind::InitialLogicalSizeCalculation,
2831 0 : background_ctx,
2832 0 : );
2833 :
2834 : use crate::metrics::initial_logical_size::StartCircumstances;
2835 0 : let (_maybe_permit, circumstances) = tokio::select! {
2836 0 : permit = wait_for_permit => {
2837 0 : (Some(permit), StartCircumstances::AfterBackgroundTasksRateLimit)
2838 : }
2839 0 : _ = self_ref.cancel.cancelled() => {
2840 0 : return Err(CalculateLogicalSizeError::Cancelled);
2841 : }
2842 0 : _ = cancel.cancelled() => {
2843 0 : return Err(CalculateLogicalSizeError::Cancelled);
2844 : },
2845 0 : () = skip_concurrency_limiter.cancelled() => {
2846 : // Some action that is part of a end user interaction requested logical size
2847 : // => break out of the rate limit
2848 : // TODO: ideally we'd not run on BackgroundRuntime but the requester's runtime;
2849 : // but then again what happens if they cancel; also, we should just be using
2850 : // one runtime across the entire process, so, let's leave this for now.
2851 0 : (None, StartCircumstances::SkippedConcurrencyLimiter)
2852 : }
2853 : };
2854 :
2855 0 : let metrics_guard = if attempt == 1 {
2856 0 : crate::metrics::initial_logical_size::START_CALCULATION.first(circumstances)
2857 : } else {
2858 0 : crate::metrics::initial_logical_size::START_CALCULATION.retry(circumstances)
2859 : };
2860 :
2861 0 : let calculated_size = self_ref
2862 0 : .logical_size_calculation_task(
2863 0 : initial_part_end,
2864 0 : LogicalSizeCalculationCause::Initial,
2865 0 : background_ctx,
2866 0 : )
2867 0 : .await?;
2868 :
2869 0 : self_ref
2870 0 : .trigger_aux_file_size_computation(initial_part_end, background_ctx)
2871 0 : .await?;
2872 :
2873 : // TODO: add aux file size to logical size
2874 :
2875 0 : Ok((calculated_size, metrics_guard))
2876 0 : }
2877 0 : };
2878 :
2879 0 : let retrying = async {
2880 0 : let mut attempt = 0;
2881 : loop {
2882 0 : attempt += 1;
2883 0 :
2884 0 : match try_once(attempt).await {
2885 0 : Ok(res) => return ControlFlow::Continue(res),
2886 0 : Err(CalculateLogicalSizeError::Cancelled) => return ControlFlow::Break(()),
2887 : Err(
2888 0 : e @ (CalculateLogicalSizeError::Decode(_)
2889 0 : | CalculateLogicalSizeError::PageRead(_)),
2890 0 : ) => {
2891 0 : warn!(attempt, "initial size calculation failed: {e:?}");
2892 : // exponential back-off doesn't make sense at these long intervals;
2893 : // use fixed retry interval with generous jitter instead
2894 0 : let sleep_duration = Duration::from_secs(
2895 0 : u64::try_from(
2896 0 : // 1hour base
2897 0 : (60_i64 * 60_i64)
2898 0 : // 10min jitter
2899 0 : + rand::thread_rng().gen_range(-10 * 60..10 * 60),
2900 0 : )
2901 0 : .expect("10min < 1hour"),
2902 0 : );
2903 0 : tokio::time::sleep(sleep_duration).await;
2904 : }
2905 : }
2906 : }
2907 0 : };
2908 :
2909 0 : let (calculated_size, metrics_guard) = tokio::select! {
2910 0 : res = retrying => {
2911 0 : match res {
2912 0 : ControlFlow::Continue(calculated_size) => calculated_size,
2913 0 : ControlFlow::Break(()) => return,
2914 : }
2915 : }
2916 0 : _ = cancel.cancelled() => {
2917 0 : return;
2918 : }
2919 : };
2920 :
2921 : // we cannot query current_logical_size.current_size() to know the current
2922 : // *negative* value, only truncated to u64.
2923 0 : let added = self
2924 0 : .current_logical_size
2925 0 : .size_added_after_initial
2926 0 : .load(AtomicOrdering::Relaxed);
2927 0 :
2928 0 : let sum = calculated_size.saturating_add_signed(added);
2929 0 :
2930 0 : // set the gauge value before it can be set in `update_current_logical_size`.
2931 0 : self.metrics.current_logical_size_gauge.set(sum);
2932 0 :
2933 0 : self.current_logical_size
2934 0 : .initial_logical_size
2935 0 : .set((calculated_size, metrics_guard.calculation_result_saved()))
2936 0 : .ok()
2937 0 : .expect("only this task sets it");
2938 0 : }
2939 :
2940 0 : pub(crate) fn spawn_ondemand_logical_size_calculation(
2941 0 : self: &Arc<Self>,
2942 0 : lsn: Lsn,
2943 0 : cause: LogicalSizeCalculationCause,
2944 0 : ctx: RequestContext,
2945 0 : ) -> oneshot::Receiver<Result<u64, CalculateLogicalSizeError>> {
2946 0 : let (sender, receiver) = oneshot::channel();
2947 0 : let self_clone = Arc::clone(self);
2948 0 : // XXX if our caller loses interest, i.e., ctx is cancelled,
2949 0 : // we should stop the size calculation work and return an error.
2950 0 : // That would require restructuring this function's API to
2951 0 : // return the result directly, instead of a Receiver for the result.
2952 0 : let ctx = ctx.detached_child(
2953 0 : TaskKind::OndemandLogicalSizeCalculation,
2954 0 : DownloadBehavior::Download,
2955 0 : );
2956 0 : task_mgr::spawn(
2957 0 : task_mgr::BACKGROUND_RUNTIME.handle(),
2958 0 : task_mgr::TaskKind::OndemandLogicalSizeCalculation,
2959 0 : self.tenant_shard_id,
2960 0 : Some(self.timeline_id),
2961 0 : "ondemand logical size calculation",
2962 0 : async move {
2963 0 : let res = self_clone
2964 0 : .logical_size_calculation_task(lsn, cause, &ctx)
2965 0 : .await;
2966 0 : let _ = sender.send(res).ok();
2967 0 : Ok(()) // Receiver is responsible for handling errors
2968 0 : }
2969 0 : .in_current_span(),
2970 0 : );
2971 0 : receiver
2972 0 : }
2973 :
2974 : /// # Cancel-Safety
2975 : ///
2976 : /// This method is cancellation-safe.
2977 0 : #[instrument(skip_all)]
2978 : async fn logical_size_calculation_task(
2979 : self: &Arc<Self>,
2980 : lsn: Lsn,
2981 : cause: LogicalSizeCalculationCause,
2982 : ctx: &RequestContext,
2983 : ) -> Result<u64, CalculateLogicalSizeError> {
2984 : crate::span::debug_assert_current_span_has_tenant_and_timeline_id();
2985 : // We should never be calculating logical sizes on shard !=0, because these shards do not have
2986 : // accurate relation sizes, and they do not emit consumption metrics.
2987 : debug_assert!(self.tenant_shard_id.is_shard_zero());
2988 :
2989 : let guard = self
2990 : .gate
2991 : .enter()
2992 0 : .map_err(|_| CalculateLogicalSizeError::Cancelled)?;
2993 :
2994 : let self_calculation = Arc::clone(self);
2995 :
2996 0 : let mut calculation = pin!(async {
2997 0 : let ctx = ctx.attached_child();
2998 0 : self_calculation
2999 0 : .calculate_logical_size(lsn, cause, &guard, &ctx)
3000 0 : .await
3001 0 : });
3002 :
3003 : tokio::select! {
3004 : res = &mut calculation => { res }
3005 : _ = self.cancel.cancelled() => {
3006 : debug!("cancelling logical size calculation for timeline shutdown");
3007 : calculation.await
3008 : }
3009 : }
3010 : }
3011 :
3012 : /// Calculate the logical size of the database at the latest LSN.
3013 : ///
3014 : /// NOTE: counted incrementally, includes ancestors. This can be a slow operation,
3015 : /// especially if we need to download remote layers.
3016 : ///
3017 : /// # Cancel-Safety
3018 : ///
3019 : /// This method is cancellation-safe.
3020 0 : async fn calculate_logical_size(
3021 0 : &self,
3022 0 : up_to_lsn: Lsn,
3023 0 : cause: LogicalSizeCalculationCause,
3024 0 : _guard: &GateGuard,
3025 0 : ctx: &RequestContext,
3026 0 : ) -> Result<u64, CalculateLogicalSizeError> {
3027 0 : info!(
3028 0 : "Calculating logical size for timeline {} at {}",
3029 : self.timeline_id, up_to_lsn
3030 : );
3031 :
3032 0 : pausable_failpoint!("timeline-calculate-logical-size-pause");
3033 :
3034 : // See if we've already done the work for initial size calculation.
3035 : // This is a short-cut for timelines that are mostly unused.
3036 0 : if let Some(size) = self.current_logical_size.initialized_size(up_to_lsn) {
3037 0 : return Ok(size);
3038 0 : }
3039 0 : let storage_time_metrics = match cause {
3040 : LogicalSizeCalculationCause::Initial
3041 : | LogicalSizeCalculationCause::ConsumptionMetricsSyntheticSize
3042 0 : | LogicalSizeCalculationCause::TenantSizeHandler => &self.metrics.logical_size_histo,
3043 : LogicalSizeCalculationCause::EvictionTaskImitation => {
3044 0 : &self.metrics.imitate_logical_size_histo
3045 : }
3046 : };
3047 0 : let timer = storage_time_metrics.start_timer();
3048 0 : let logical_size = self
3049 0 : .get_current_logical_size_non_incremental(up_to_lsn, ctx)
3050 0 : .await?;
3051 0 : debug!("calculated logical size: {logical_size}");
3052 0 : timer.stop_and_record();
3053 0 : Ok(logical_size)
3054 0 : }
3055 :
3056 : /// Update current logical size, adding `delta' to the old value.
3057 270570 : fn update_current_logical_size(&self, delta: i64) {
3058 270570 : let logical_size = &self.current_logical_size;
3059 270570 : logical_size.increment_size(delta);
3060 270570 :
3061 270570 : // Also set the value in the prometheus gauge. Note that
3062 270570 : // there is a race condition here: if this is is called by two
3063 270570 : // threads concurrently, the prometheus gauge might be set to
3064 270570 : // one value while current_logical_size is set to the
3065 270570 : // other.
3066 270570 : match logical_size.current_size() {
3067 270570 : CurrentLogicalSize::Exact(ref new_current_size) => self
3068 270570 : .metrics
3069 270570 : .current_logical_size_gauge
3070 270570 : .set(new_current_size.into()),
3071 0 : CurrentLogicalSize::Approximate(_) => {
3072 0 : // don't update the gauge yet, this allows us not to update the gauge back and
3073 0 : // forth between the initial size calculation task.
3074 0 : }
3075 : }
3076 270570 : }
3077 :
3078 2836 : pub(crate) fn update_directory_entries_count(&self, kind: DirectoryKind, count: u64) {
3079 2836 : self.directory_metrics[kind.offset()].store(count, AtomicOrdering::Relaxed);
3080 2836 : let aux_metric =
3081 2836 : self.directory_metrics[DirectoryKind::AuxFiles.offset()].load(AtomicOrdering::Relaxed);
3082 2836 :
3083 2836 : let sum_of_entries = self
3084 2836 : .directory_metrics
3085 2836 : .iter()
3086 19852 : .map(|v| v.load(AtomicOrdering::Relaxed))
3087 2836 : .sum();
3088 : // Set a high general threshold and a lower threshold for the auxiliary files,
3089 : // as we can have large numbers of relations in the db directory.
3090 : const SUM_THRESHOLD: u64 = 5000;
3091 : const AUX_THRESHOLD: u64 = 1000;
3092 2836 : if sum_of_entries >= SUM_THRESHOLD || aux_metric >= AUX_THRESHOLD {
3093 0 : self.metrics
3094 0 : .directory_entries_count_gauge
3095 0 : .set(sum_of_entries);
3096 2836 : } else if let Some(metric) = Lazy::get(&self.metrics.directory_entries_count_gauge) {
3097 0 : metric.set(sum_of_entries);
3098 2836 : }
3099 2836 : }
3100 :
3101 0 : async fn find_layer(
3102 0 : &self,
3103 0 : layer_name: &LayerName,
3104 0 : ) -> Result<Option<Layer>, layer_manager::Shutdown> {
3105 0 : let guard = self.layers.read().await;
3106 0 : let layer = guard
3107 0 : .layer_map()?
3108 0 : .iter_historic_layers()
3109 0 : .find(|l| &l.layer_name() == layer_name)
3110 0 : .map(|found| guard.get_from_desc(&found));
3111 0 : Ok(layer)
3112 0 : }
3113 :
3114 : /// The timeline heatmap is a hint to secondary locations from the primary location,
3115 : /// indicating which layers are currently on-disk on the primary.
3116 : ///
3117 : /// None is returned if the Timeline is in a state where uploading a heatmap
3118 : /// doesn't make sense, such as shutting down or initializing. The caller
3119 : /// should treat this as a cue to simply skip doing any heatmap uploading
3120 : /// for this timeline.
3121 2 : pub(crate) async fn generate_heatmap(&self) -> Option<HeatMapTimeline> {
3122 2 : if !self.is_active() {
3123 0 : return None;
3124 2 : }
3125 :
3126 2 : let guard = self.layers.read().await;
3127 :
3128 10 : let resident = guard.likely_resident_layers().filter_map(|layer| {
3129 10 : match layer.visibility() {
3130 : LayerVisibilityHint::Visible => {
3131 : // Layer is visible to one or more read LSNs: elegible for inclusion in layer map
3132 8 : let last_activity_ts = layer.latest_activity();
3133 8 : Some((layer.layer_desc(), layer.metadata(), last_activity_ts))
3134 : }
3135 : LayerVisibilityHint::Covered => {
3136 : // Layer is resident but unlikely to be read: not elegible for inclusion in heatmap.
3137 2 : None
3138 : }
3139 : }
3140 10 : });
3141 2 :
3142 2 : let mut layers = resident.collect::<Vec<_>>();
3143 2 :
3144 2 : // Sort layers in order of which to download first. For a large set of layers to download, we
3145 2 : // want to prioritize those layers which are most likely to still be in the resident many minutes
3146 2 : // or hours later:
3147 2 : // - Download L0s last, because they churn the fastest: L0s on a fast-writing tenant might
3148 2 : // only exist for a few minutes before being compacted into L1s.
3149 2 : // - For L1 & image layers, download most recent LSNs first: the older the LSN, the sooner
3150 2 : // the layer is likely to be covered by an image layer during compaction.
3151 20 : layers.sort_by_key(|(desc, _meta, _atime)| {
3152 20 : std::cmp::Reverse((
3153 20 : !LayerMap::is_l0(&desc.key_range, desc.is_delta),
3154 20 : desc.lsn_range.end,
3155 20 : ))
3156 20 : });
3157 2 :
3158 2 : let layers = layers
3159 2 : .into_iter()
3160 8 : .map(|(desc, meta, atime)| HeatMapLayer::new(desc.layer_name(), meta, atime))
3161 2 : .collect();
3162 2 :
3163 2 : Some(HeatMapTimeline::new(self.timeline_id, layers))
3164 2 : }
3165 :
3166 : /// Returns true if the given lsn is or was an ancestor branchpoint.
3167 0 : pub(crate) fn is_ancestor_lsn(&self, lsn: Lsn) -> bool {
3168 0 : // upon timeline detach, we set the ancestor_lsn to Lsn::INVALID and the store the original
3169 0 : // branchpoint in the value in IndexPart::lineage
3170 0 : self.ancestor_lsn == lsn
3171 0 : || (self.ancestor_lsn == Lsn::INVALID
3172 0 : && self.remote_client.is_previous_ancestor_lsn(lsn))
3173 0 : }
3174 : }
3175 :
3176 : impl Timeline {
3177 : #[allow(clippy::doc_lazy_continuation)]
3178 : /// Get the data needed to reconstruct all keys in the provided keyspace
3179 : ///
3180 : /// The algorithm is as follows:
3181 : /// 1. While some keys are still not done and there's a timeline to visit:
3182 : /// 2. Visit the timeline (see [`Timeline::get_vectored_reconstruct_data_timeline`]:
3183 : /// 2.1: Build the fringe for the current keyspace
3184 : /// 2.2 Visit the newest layer from the fringe to collect all values for the range it
3185 : /// intersects
3186 : /// 2.3. Pop the timeline from the fringe
3187 : /// 2.4. If the fringe is empty, go back to 1
3188 627417 : async fn get_vectored_reconstruct_data(
3189 627417 : &self,
3190 627417 : mut keyspace: KeySpace,
3191 627417 : request_lsn: Lsn,
3192 627417 : reconstruct_state: &mut ValuesReconstructState,
3193 627417 : ctx: &RequestContext,
3194 627417 : ) -> Result<(), GetVectoredError> {
3195 627417 : let mut timeline_owned: Arc<Timeline>;
3196 627417 : let mut timeline = self;
3197 627417 :
3198 627417 : let mut cont_lsn = Lsn(request_lsn.0 + 1);
3199 :
3200 627415 : let missing_keyspace = loop {
3201 855845 : if self.cancel.is_cancelled() {
3202 0 : return Err(GetVectoredError::Cancelled);
3203 855845 : }
3204 :
3205 : let TimelineVisitOutcome {
3206 855845 : completed_keyspace: completed,
3207 855845 : image_covered_keyspace,
3208 855845 : } = Self::get_vectored_reconstruct_data_timeline(
3209 855845 : timeline,
3210 855845 : keyspace.clone(),
3211 855845 : cont_lsn,
3212 855845 : reconstruct_state,
3213 855845 : &self.cancel,
3214 855845 : ctx,
3215 855845 : )
3216 855845 : .await?;
3217 :
3218 855845 : keyspace.remove_overlapping_with(&completed);
3219 855845 :
3220 855845 : // Do not descend into the ancestor timeline for aux files.
3221 855845 : // We don't return a blanket [`GetVectoredError::MissingKey`] to avoid
3222 855845 : // stalling compaction.
3223 855845 : keyspace.remove_overlapping_with(&KeySpace {
3224 855845 : ranges: vec![NON_INHERITED_RANGE, NON_INHERITED_SPARSE_RANGE],
3225 855845 : });
3226 855845 :
3227 855845 : // Keyspace is fully retrieved
3228 855845 : if keyspace.is_empty() {
3229 627401 : break None;
3230 228444 : }
3231 :
3232 228444 : let Some(ancestor_timeline) = timeline.ancestor_timeline.as_ref() else {
3233 : // Not fully retrieved but no ancestor timeline.
3234 14 : break Some(keyspace);
3235 : };
3236 :
3237 : // Now we see if there are keys covered by the image layer but does not exist in the
3238 : // image layer, which means that the key does not exist.
3239 :
3240 : // The block below will stop the vectored search if any of the keys encountered an image layer
3241 : // which did not contain a snapshot for said key. Since we have already removed all completed
3242 : // keys from `keyspace`, we expect there to be no overlap between it and the image covered key
3243 : // space. If that's not the case, we had at least one key encounter a gap in the image layer
3244 : // and stop the search as a result of that.
3245 228430 : let removed = keyspace.remove_overlapping_with(&image_covered_keyspace);
3246 228430 : if !removed.is_empty() {
3247 0 : break Some(removed);
3248 228430 : }
3249 228430 : // If we reached this point, `remove_overlapping_with` should not have made any change to the
3250 228430 : // keyspace.
3251 228430 :
3252 228430 : // Take the min to avoid reconstructing a page with data newer than request Lsn.
3253 228430 : cont_lsn = std::cmp::min(Lsn(request_lsn.0 + 1), Lsn(timeline.ancestor_lsn.0 + 1));
3254 228430 : timeline_owned = timeline
3255 228430 : .get_ready_ancestor_timeline(ancestor_timeline, ctx)
3256 228430 : .await?;
3257 228428 : timeline = &*timeline_owned;
3258 : };
3259 :
3260 627415 : if let Some(missing_keyspace) = missing_keyspace {
3261 14 : return Err(GetVectoredError::MissingKey(MissingKeyError {
3262 14 : key: missing_keyspace.start().unwrap(), /* better if we can store the full keyspace */
3263 14 : shard: self
3264 14 : .shard_identity
3265 14 : .get_shard_number(&missing_keyspace.start().unwrap()),
3266 14 : cont_lsn,
3267 14 : request_lsn,
3268 14 : ancestor_lsn: Some(timeline.ancestor_lsn),
3269 14 : backtrace: None,
3270 14 : }));
3271 627401 : }
3272 627401 :
3273 627401 : Ok(())
3274 627417 : }
3275 :
3276 : /// Collect the reconstruct data for a keyspace from the specified timeline.
3277 : ///
3278 : /// Maintain a fringe [`LayerFringe`] which tracks all the layers that intersect
3279 : /// the current keyspace. The current keyspace of the search at any given timeline
3280 : /// is the original keyspace minus all the keys that have been completed minus
3281 : /// any keys for which we couldn't find an intersecting layer. It's not tracked explicitly,
3282 : /// but if you merge all the keyspaces in the fringe, you get the "current keyspace".
3283 : ///
3284 : /// This is basically a depth-first search visitor implementation where a vertex
3285 : /// is the (layer, lsn range, key space) tuple. The fringe acts as the stack.
3286 : ///
3287 : /// At each iteration pop the top of the fringe (the layer with the highest Lsn)
3288 : /// and get all the required reconstruct data from the layer in one go.
3289 : ///
3290 : /// Returns the completed keyspace and the keyspaces with image coverage. The caller
3291 : /// decides how to deal with these two keyspaces.
3292 855845 : async fn get_vectored_reconstruct_data_timeline(
3293 855845 : timeline: &Timeline,
3294 855845 : keyspace: KeySpace,
3295 855845 : mut cont_lsn: Lsn,
3296 855845 : reconstruct_state: &mut ValuesReconstructState,
3297 855845 : cancel: &CancellationToken,
3298 855845 : ctx: &RequestContext,
3299 855845 : ) -> Result<TimelineVisitOutcome, GetVectoredError> {
3300 855845 : let mut unmapped_keyspace = keyspace.clone();
3301 855845 : let mut fringe = LayerFringe::new();
3302 855845 :
3303 855845 : let mut completed_keyspace = KeySpace::default();
3304 855845 : let mut image_covered_keyspace = KeySpaceRandomAccum::new();
3305 :
3306 : loop {
3307 1703522 : if cancel.is_cancelled() {
3308 0 : return Err(GetVectoredError::Cancelled);
3309 1703522 : }
3310 1703522 :
3311 1703522 : let (keys_done_last_step, keys_with_image_coverage) =
3312 1703522 : reconstruct_state.consume_done_keys();
3313 1703522 : unmapped_keyspace.remove_overlapping_with(&keys_done_last_step);
3314 1703522 : completed_keyspace.merge(&keys_done_last_step);
3315 1703522 : if let Some(keys_with_image_coverage) = keys_with_image_coverage {
3316 22512 : unmapped_keyspace
3317 22512 : .remove_overlapping_with(&KeySpace::single(keys_with_image_coverage.clone()));
3318 22512 : image_covered_keyspace.add_range(keys_with_image_coverage);
3319 1681010 : }
3320 :
3321 : // Do not descent any further if the last layer we visited
3322 : // completed all keys in the keyspace it inspected. This is not
3323 : // required for correctness, but avoids visiting extra layers
3324 : // which turns out to be a perf bottleneck in some cases.
3325 1703522 : if !unmapped_keyspace.is_empty() {
3326 1078369 : let guard = timeline.layers.read().await;
3327 1078369 : let layers = guard.layer_map()?;
3328 :
3329 1078369 : let in_memory_layer = layers.find_in_memory_layer(|l| {
3330 916184 : let start_lsn = l.get_lsn_range().start;
3331 916184 : cont_lsn > start_lsn
3332 1078369 : });
3333 1078369 :
3334 1078369 : match in_memory_layer {
3335 606484 : Some(l) => {
3336 606484 : let lsn_range = l.get_lsn_range().start..cont_lsn;
3337 606484 : fringe.update(
3338 606484 : ReadableLayer::InMemoryLayer(l),
3339 606484 : unmapped_keyspace.clone(),
3340 606484 : lsn_range,
3341 606484 : );
3342 606484 : }
3343 : None => {
3344 471907 : for range in unmapped_keyspace.ranges.iter() {
3345 471907 : let results = layers.range_search(range.clone(), cont_lsn);
3346 471907 :
3347 471907 : results
3348 471907 : .found
3349 471907 : .into_iter()
3350 471907 : .map(|(SearchResult { layer, lsn_floor }, keyspace_accum)| {
3351 241207 : (
3352 241207 : ReadableLayer::PersistentLayer(guard.get_from_desc(&layer)),
3353 241207 : keyspace_accum.to_keyspace(),
3354 241207 : lsn_floor..cont_lsn,
3355 241207 : )
3356 471907 : })
3357 471907 : .for_each(|(layer, keyspace, lsn_range)| {
3358 241207 : fringe.update(layer, keyspace, lsn_range)
3359 471907 : });
3360 471907 : }
3361 : }
3362 : }
3363 :
3364 : // It's safe to drop the layer map lock after planning the next round of reads.
3365 : // The fringe keeps readable handles for the layers which are safe to read even
3366 : // if layers were compacted or flushed.
3367 : //
3368 : // The more interesting consideration is: "Why is the read algorithm still correct
3369 : // if the layer map changes while it is operating?". Doing a vectored read on a
3370 : // timeline boils down to pushing an imaginary lsn boundary downwards for each range
3371 : // covered by the read. The layer map tells us how to move the lsn downwards for a
3372 : // range at *a particular point in time*. It is fine for the answer to be different
3373 : // at two different time points.
3374 1078369 : drop(guard);
3375 625153 : }
3376 :
3377 1703522 : if let Some((layer_to_read, keyspace_to_read, lsn_range)) = fringe.next_layer() {
3378 847677 : let next_cont_lsn = lsn_range.start;
3379 847677 : layer_to_read
3380 847677 : .get_values_reconstruct_data(
3381 847677 : keyspace_to_read.clone(),
3382 847677 : lsn_range,
3383 847677 : reconstruct_state,
3384 847677 : ctx,
3385 847677 : )
3386 847677 : .await?;
3387 :
3388 847677 : unmapped_keyspace = keyspace_to_read;
3389 847677 : cont_lsn = next_cont_lsn;
3390 847677 :
3391 847677 : reconstruct_state.on_layer_visited(&layer_to_read);
3392 : } else {
3393 855845 : break;
3394 855845 : }
3395 855845 : }
3396 855845 :
3397 855845 : Ok(TimelineVisitOutcome {
3398 855845 : completed_keyspace,
3399 855845 : image_covered_keyspace: image_covered_keyspace.consume_keyspace(),
3400 855845 : })
3401 855845 : }
3402 :
3403 228430 : async fn get_ready_ancestor_timeline(
3404 228430 : &self,
3405 228430 : ancestor: &Arc<Timeline>,
3406 228430 : ctx: &RequestContext,
3407 228430 : ) -> Result<Arc<Timeline>, GetReadyAncestorError> {
3408 228430 : // It's possible that the ancestor timeline isn't active yet, or
3409 228430 : // is active but hasn't yet caught up to the branch point. Wait
3410 228430 : // for it.
3411 228430 : //
3412 228430 : // This cannot happen while the pageserver is running normally,
3413 228430 : // because you cannot create a branch from a point that isn't
3414 228430 : // present in the pageserver yet. However, we don't wait for the
3415 228430 : // branch point to be uploaded to cloud storage before creating
3416 228430 : // a branch. I.e., the branch LSN need not be remote consistent
3417 228430 : // for the branching operation to succeed.
3418 228430 : //
3419 228430 : // Hence, if we try to load a tenant in such a state where
3420 228430 : // 1. the existence of the branch was persisted (in IndexPart and/or locally)
3421 228430 : // 2. but the ancestor state is behind branch_lsn because it was not yet persisted
3422 228430 : // then we will need to wait for the ancestor timeline to
3423 228430 : // re-stream WAL up to branch_lsn before we access it.
3424 228430 : //
3425 228430 : // How can a tenant get in such a state?
3426 228430 : // - ungraceful pageserver process exit
3427 228430 : // - detach+attach => this is a bug, https://github.com/neondatabase/neon/issues/4219
3428 228430 : //
3429 228430 : // NB: this could be avoided by requiring
3430 228430 : // branch_lsn >= remote_consistent_lsn
3431 228430 : // during branch creation.
3432 228430 : match ancestor.wait_to_become_active(ctx).await {
3433 228428 : Ok(()) => {}
3434 : Err(TimelineState::Stopping) => {
3435 : // If an ancestor is stopping, it means the tenant is stopping: handle this the same as if this timeline was stopping.
3436 0 : return Err(GetReadyAncestorError::Cancelled);
3437 : }
3438 2 : Err(state) => {
3439 2 : return Err(GetReadyAncestorError::BadState {
3440 2 : timeline_id: ancestor.timeline_id,
3441 2 : state,
3442 2 : });
3443 : }
3444 : }
3445 228428 : ancestor
3446 228428 : .wait_lsn(self.ancestor_lsn, WaitLsnWaiter::Timeline(self), ctx)
3447 228428 : .await
3448 228428 : .map_err(|e| match e {
3449 0 : e @ WaitLsnError::Timeout(_) => GetReadyAncestorError::AncestorLsnTimeout(e),
3450 0 : WaitLsnError::Shutdown => GetReadyAncestorError::Cancelled,
3451 0 : WaitLsnError::BadState(state) => GetReadyAncestorError::BadState {
3452 0 : timeline_id: ancestor.timeline_id,
3453 0 : state,
3454 0 : },
3455 228428 : })?;
3456 :
3457 228428 : Ok(ancestor.clone())
3458 228430 : }
3459 :
3460 151304 : pub(crate) fn get_shard_identity(&self) -> &ShardIdentity {
3461 151304 : &self.shard_identity
3462 151304 : }
3463 :
3464 : #[inline(always)]
3465 0 : pub(crate) fn shard_timeline_id(&self) -> ShardTimelineId {
3466 0 : ShardTimelineId {
3467 0 : shard_index: ShardIndex {
3468 0 : shard_number: self.shard_identity.number,
3469 0 : shard_count: self.shard_identity.count,
3470 0 : },
3471 0 : timeline_id: self.timeline_id,
3472 0 : }
3473 0 : }
3474 :
3475 : /// Returns a non-frozen open in-memory layer for ingestion.
3476 : ///
3477 : /// Takes a witness of timeline writer state lock being held, because it makes no sense to call
3478 : /// this function without holding the mutex.
3479 1272 : async fn get_layer_for_write(
3480 1272 : &self,
3481 1272 : lsn: Lsn,
3482 1272 : _guard: &tokio::sync::MutexGuard<'_, Option<TimelineWriterState>>,
3483 1272 : ctx: &RequestContext,
3484 1272 : ) -> anyhow::Result<Arc<InMemoryLayer>> {
3485 1272 : let mut guard = self.layers.write().await;
3486 :
3487 1272 : let last_record_lsn = self.get_last_record_lsn();
3488 1272 : ensure!(
3489 1272 : lsn > last_record_lsn,
3490 0 : "cannot modify relation after advancing last_record_lsn (incoming_lsn={}, last_record_lsn={})",
3491 : lsn,
3492 : last_record_lsn,
3493 : );
3494 :
3495 1272 : let layer = guard
3496 1272 : .open_mut()?
3497 1272 : .get_layer_for_write(
3498 1272 : lsn,
3499 1272 : self.conf,
3500 1272 : self.timeline_id,
3501 1272 : self.tenant_shard_id,
3502 1272 : &self.gate,
3503 1272 : ctx,
3504 1272 : )
3505 1272 : .await?;
3506 1272 : Ok(layer)
3507 1272 : }
3508 :
3509 5279068 : pub(crate) fn finish_write(&self, new_lsn: Lsn) {
3510 5279068 : assert!(new_lsn.is_aligned());
3511 :
3512 5279068 : self.metrics.last_record_lsn_gauge.set(new_lsn.0 as i64);
3513 5279068 : self.last_record_lsn.advance(new_lsn);
3514 5279068 : }
3515 :
3516 : /// Freeze any existing open in-memory layer and unconditionally notify the flush loop.
3517 : ///
3518 : /// Unconditional flush loop notification is given because in sharded cases we will want to
3519 : /// leave an Lsn gap. Unsharded tenants do not have Lsn gaps.
3520 1176 : async fn freeze_inmem_layer_at(
3521 1176 : &self,
3522 1176 : at: Lsn,
3523 1176 : write_lock: &mut tokio::sync::MutexGuard<'_, Option<TimelineWriterState>>,
3524 1176 : ) -> Result<u64, FlushLayerError> {
3525 1176 : let frozen = {
3526 1176 : let mut guard = self.layers.write().await;
3527 1176 : guard
3528 1176 : .open_mut()?
3529 1176 : .try_freeze_in_memory_layer(at, &self.last_freeze_at, write_lock)
3530 1176 : .await
3531 : };
3532 :
3533 1176 : if frozen {
3534 1148 : let now = Instant::now();
3535 1148 : *(self.last_freeze_ts.write().unwrap()) = now;
3536 1148 : }
3537 :
3538 : // Increment the flush cycle counter and wake up the flush task.
3539 : // Remember the new value, so that when we listen for the flush
3540 : // to finish, we know when the flush that we initiated has
3541 : // finished, instead of some other flush that was started earlier.
3542 1176 : let mut my_flush_request = 0;
3543 1176 :
3544 1176 : let flush_loop_state = { *self.flush_loop_state.lock().unwrap() };
3545 1176 : if !matches!(flush_loop_state, FlushLoopState::Running { .. }) {
3546 0 : return Err(FlushLayerError::NotRunning(flush_loop_state));
3547 1176 : }
3548 1176 :
3549 1176 : self.layer_flush_start_tx.send_modify(|(counter, lsn)| {
3550 1176 : my_flush_request = *counter + 1;
3551 1176 : *counter = my_flush_request;
3552 1176 : *lsn = std::cmp::max(at, *lsn);
3553 1176 : });
3554 1176 :
3555 1176 : assert_ne!(my_flush_request, 0);
3556 :
3557 1176 : Ok(my_flush_request)
3558 1176 : }
3559 :
3560 : /// Layer flusher task's main loop.
3561 416 : async fn flush_loop(
3562 416 : self: &Arc<Self>,
3563 416 : mut layer_flush_start_rx: tokio::sync::watch::Receiver<(u64, Lsn)>,
3564 416 : ctx: &RequestContext,
3565 416 : ) {
3566 416 : info!("started flush loop");
3567 : loop {
3568 1556 : tokio::select! {
3569 1556 : _ = self.cancel.cancelled() => {
3570 10 : info!("shutting down layer flush task due to Timeline::cancel");
3571 10 : break;
3572 : },
3573 1556 : _ = layer_flush_start_rx.changed() => {}
3574 1140 : }
3575 1140 : trace!("waking up");
3576 1140 : let (flush_counter, frozen_to_lsn) = *layer_flush_start_rx.borrow();
3577 1140 :
3578 1140 : // The highest LSN to which we flushed in the loop over frozen layers
3579 1140 : let mut flushed_to_lsn = Lsn(0);
3580 :
3581 1140 : let result = loop {
3582 2288 : if self.cancel.is_cancelled() {
3583 0 : info!("dropping out of flush loop for timeline shutdown");
3584 : // Note: we do not bother transmitting into [`layer_flush_done_tx`], because
3585 : // anyone waiting on that will respect self.cancel as well: they will stop
3586 : // waiting at the same time we as drop out of this loop.
3587 0 : return;
3588 2288 : }
3589 2288 :
3590 2288 : let timer = self.metrics.flush_time_histo.start_timer();
3591 :
3592 : let num_frozen_layers;
3593 : let frozen_layer_total_size;
3594 2288 : let layer_to_flush = {
3595 2288 : let guard = self.layers.read().await;
3596 2288 : let Ok(lm) = guard.layer_map() else {
3597 0 : info!("dropping out of flush loop for timeline shutdown");
3598 0 : return;
3599 : };
3600 2288 : num_frozen_layers = lm.frozen_layers.len();
3601 2288 : frozen_layer_total_size = lm
3602 2288 : .frozen_layers
3603 2288 : .iter()
3604 2288 : .map(|l| l.estimated_in_mem_size())
3605 2288 : .sum::<u64>();
3606 2288 : lm.frozen_layers.front().cloned()
3607 : // drop 'layers' lock to allow concurrent reads and writes
3608 : };
3609 2288 : let Some(layer_to_flush) = layer_to_flush else {
3610 1140 : break Ok(());
3611 : };
3612 1148 : if num_frozen_layers
3613 1148 : > std::cmp::max(
3614 1148 : self.get_compaction_threshold(),
3615 1148 : DEFAULT_COMPACTION_THRESHOLD,
3616 1148 : )
3617 0 : && frozen_layer_total_size >= /* 128 MB */ 128000000
3618 : {
3619 0 : tracing::warn!(
3620 0 : "too many frozen layers: {num_frozen_layers} layers with estimated in-mem size of {frozen_layer_total_size} bytes",
3621 : );
3622 1148 : }
3623 1148 : match self.flush_frozen_layer(layer_to_flush, ctx).await {
3624 1148 : Ok(this_layer_to_lsn) => {
3625 1148 : flushed_to_lsn = std::cmp::max(flushed_to_lsn, this_layer_to_lsn);
3626 1148 : }
3627 : Err(FlushLayerError::Cancelled) => {
3628 0 : info!("dropping out of flush loop for timeline shutdown");
3629 0 : return;
3630 : }
3631 0 : err @ Err(
3632 0 : FlushLayerError::NotRunning(_)
3633 0 : | FlushLayerError::Other(_)
3634 0 : | FlushLayerError::CreateImageLayersError(_),
3635 0 : ) => {
3636 0 : error!("could not flush frozen layer: {err:?}");
3637 0 : break err.map(|_| ());
3638 : }
3639 : }
3640 1148 : timer.stop_and_record();
3641 : };
3642 :
3643 : // Unsharded tenants should never advance their LSN beyond the end of the
3644 : // highest layer they write: such gaps between layer data and the frozen LSN
3645 : // are only legal on sharded tenants.
3646 1140 : debug_assert!(
3647 1140 : self.shard_identity.count.count() > 1
3648 1140 : || flushed_to_lsn >= frozen_to_lsn
3649 65 : || !flushed_to_lsn.is_valid()
3650 : );
3651 :
3652 1140 : if flushed_to_lsn < frozen_to_lsn && self.shard_identity.count.count() > 1 {
3653 : // If our layer flushes didn't carry disk_consistent_lsn up to the `to_lsn` advertised
3654 : // to us via layer_flush_start_rx, then advance it here.
3655 : //
3656 : // This path is only taken for tenants with multiple shards: single sharded tenants should
3657 : // never encounter a gap in the wal.
3658 0 : let old_disk_consistent_lsn = self.disk_consistent_lsn.load();
3659 0 : tracing::debug!("Advancing disk_consistent_lsn across layer gap {old_disk_consistent_lsn}->{frozen_to_lsn}");
3660 0 : if self.set_disk_consistent_lsn(frozen_to_lsn) {
3661 0 : if let Err(e) = self.schedule_uploads(frozen_to_lsn, vec![]) {
3662 0 : tracing::warn!("Failed to schedule metadata upload after updating disk_consistent_lsn: {e}");
3663 0 : }
3664 0 : }
3665 1140 : }
3666 :
3667 : // Notify any listeners that we're done
3668 1140 : let _ = self
3669 1140 : .layer_flush_done_tx
3670 1140 : .send_replace((flush_counter, result));
3671 : }
3672 10 : }
3673 :
3674 : /// Waits any flush request created by [`Self::freeze_inmem_layer_at`] to complete.
3675 1096 : async fn wait_flush_completion(&self, request: u64) -> Result<(), FlushLayerError> {
3676 1096 : let mut rx = self.layer_flush_done_tx.subscribe();
3677 : loop {
3678 : {
3679 2190 : let (last_result_counter, last_result) = &*rx.borrow();
3680 2190 : if *last_result_counter >= request {
3681 1096 : if let Err(err) = last_result {
3682 : // We already logged the original error in
3683 : // flush_loop. We cannot propagate it to the caller
3684 : // here, because it might not be Cloneable
3685 0 : return Err(err.clone());
3686 : } else {
3687 1096 : return Ok(());
3688 : }
3689 1094 : }
3690 1094 : }
3691 1094 : trace!("waiting for flush to complete");
3692 1094 : tokio::select! {
3693 1094 : rx_e = rx.changed() => {
3694 1094 : rx_e.map_err(|_| FlushLayerError::NotRunning(*self.flush_loop_state.lock().unwrap()))?;
3695 : },
3696 : // Cancellation safety: we are not leaving an I/O in-flight for the flush, we're just ignoring
3697 : // the notification from [`flush_loop`] that it completed.
3698 1094 : _ = self.cancel.cancelled() => {
3699 0 : tracing::info!("Cancelled layer flush due on timeline shutdown");
3700 0 : return Ok(())
3701 : }
3702 : };
3703 1094 : trace!("done")
3704 : }
3705 1096 : }
3706 :
3707 : /// Flush one frozen in-memory layer to disk, as a new delta layer.
3708 : ///
3709 : /// Return value is the last lsn (inclusive) of the layer that was frozen.
3710 1148 : #[instrument(skip_all, fields(layer=%frozen_layer))]
3711 : async fn flush_frozen_layer(
3712 : self: &Arc<Self>,
3713 : frozen_layer: Arc<InMemoryLayer>,
3714 : ctx: &RequestContext,
3715 : ) -> Result<Lsn, FlushLayerError> {
3716 : debug_assert_current_span_has_tenant_and_timeline_id();
3717 :
3718 : // As a special case, when we have just imported an image into the repository,
3719 : // instead of writing out a L0 delta layer, we directly write out image layer
3720 : // files instead. This is possible as long as *all* the data imported into the
3721 : // repository have the same LSN.
3722 : let lsn_range = frozen_layer.get_lsn_range();
3723 :
3724 : // Whether to directly create image layers for this flush, or flush them as delta layers
3725 : let create_image_layer =
3726 : lsn_range.start == self.initdb_lsn && lsn_range.end == Lsn(self.initdb_lsn.0 + 1);
3727 :
3728 : #[cfg(test)]
3729 : {
3730 : match &mut *self.flush_loop_state.lock().unwrap() {
3731 : FlushLoopState::NotStarted | FlushLoopState::Exited => {
3732 : panic!("flush loop not running")
3733 : }
3734 : FlushLoopState::Running {
3735 : expect_initdb_optimization,
3736 : initdb_optimization_count,
3737 : ..
3738 : } => {
3739 : if create_image_layer {
3740 : *initdb_optimization_count += 1;
3741 : } else {
3742 : assert!(!*expect_initdb_optimization, "expected initdb optimization");
3743 : }
3744 : }
3745 : }
3746 : }
3747 :
3748 : let (layers_to_upload, delta_layer_to_add) = if create_image_layer {
3749 : // Note: The 'ctx' in use here has DownloadBehavior::Error. We should not
3750 : // require downloading anything during initial import.
3751 : let ((rel_partition, metadata_partition), _lsn) = self
3752 : .repartition(
3753 : self.initdb_lsn,
3754 : self.get_compaction_target_size(),
3755 : EnumSet::empty(),
3756 : ctx,
3757 : )
3758 : .await
3759 0 : .map_err(|e| FlushLayerError::from_anyhow(self, e.into()))?;
3760 :
3761 : if self.cancel.is_cancelled() {
3762 : return Err(FlushLayerError::Cancelled);
3763 : }
3764 :
3765 : let mut layers_to_upload = Vec::new();
3766 : layers_to_upload.extend(
3767 : self.create_image_layers(
3768 : &rel_partition,
3769 : self.initdb_lsn,
3770 : ImageLayerCreationMode::Initial,
3771 : ctx,
3772 : )
3773 : .await?,
3774 : );
3775 : if !metadata_partition.parts.is_empty() {
3776 : assert_eq!(
3777 : metadata_partition.parts.len(),
3778 : 1,
3779 : "currently sparse keyspace should only contain a single metadata keyspace"
3780 : );
3781 : layers_to_upload.extend(
3782 : self.create_image_layers(
3783 : // Safety: create_image_layers treat sparse keyspaces differently that it does not scan
3784 : // every single key within the keyspace, and therefore, it's safe to force converting it
3785 : // into a dense keyspace before calling this function.
3786 : &metadata_partition.into_dense(),
3787 : self.initdb_lsn,
3788 : ImageLayerCreationMode::Initial,
3789 : ctx,
3790 : )
3791 : .await?,
3792 : );
3793 : }
3794 :
3795 : (layers_to_upload, None)
3796 : } else {
3797 : // Normal case, write out a L0 delta layer file.
3798 : // `create_delta_layer` will not modify the layer map.
3799 : // We will remove frozen layer and add delta layer in one atomic operation later.
3800 : let Some(layer) = self
3801 : .create_delta_layer(&frozen_layer, None, ctx)
3802 : .await
3803 0 : .map_err(|e| FlushLayerError::from_anyhow(self, e))?
3804 : else {
3805 : panic!("delta layer cannot be empty if no filter is applied");
3806 : };
3807 : (
3808 : // FIXME: even though we have a single image and single delta layer assumption
3809 : // we push them to vec
3810 : vec![layer.clone()],
3811 : Some(layer),
3812 : )
3813 : };
3814 :
3815 : pausable_failpoint!("flush-layer-cancel-after-writing-layer-out-pausable");
3816 :
3817 : if self.cancel.is_cancelled() {
3818 : return Err(FlushLayerError::Cancelled);
3819 : }
3820 :
3821 : let disk_consistent_lsn = Lsn(lsn_range.end.0 - 1);
3822 :
3823 : // The new on-disk layers are now in the layer map. We can remove the
3824 : // in-memory layer from the map now. The flushed layer is stored in
3825 : // the mapping in `create_delta_layer`.
3826 : {
3827 : let mut guard = self.layers.write().await;
3828 :
3829 : guard.open_mut()?.finish_flush_l0_layer(
3830 : delta_layer_to_add.as_ref(),
3831 : &frozen_layer,
3832 : &self.metrics,
3833 : );
3834 :
3835 : if self.set_disk_consistent_lsn(disk_consistent_lsn) {
3836 : // Schedule remote uploads that will reflect our new disk_consistent_lsn
3837 : self.schedule_uploads(disk_consistent_lsn, layers_to_upload)
3838 0 : .map_err(|e| FlushLayerError::from_anyhow(self, e))?;
3839 : }
3840 : // release lock on 'layers'
3841 : };
3842 :
3843 : // Backpressure mechanism: wait with continuation of the flush loop until we have uploaded all layer files.
3844 : // This makes us refuse ingest until the new layers have been persisted to the remote
3845 : let start = Instant::now();
3846 : self.remote_client
3847 : .wait_completion()
3848 : .await
3849 0 : .map_err(|e| match e {
3850 : WaitCompletionError::UploadQueueShutDownOrStopped
3851 : | WaitCompletionError::NotInitialized(
3852 : NotInitialized::ShuttingDown | NotInitialized::Stopped,
3853 0 : ) => FlushLayerError::Cancelled,
3854 : WaitCompletionError::NotInitialized(NotInitialized::Uninitialized) => {
3855 0 : FlushLayerError::Other(anyhow!(e).into())
3856 : }
3857 0 : })?;
3858 : let duration = start.elapsed().as_secs_f64();
3859 : self.metrics.flush_wait_upload_time_gauge_add(duration);
3860 :
3861 : // FIXME: between create_delta_layer and the scheduling of the upload in `update_metadata_file`,
3862 : // a compaction can delete the file and then it won't be available for uploads any more.
3863 : // We still schedule the upload, resulting in an error, but ideally we'd somehow avoid this
3864 : // race situation.
3865 : // See https://github.com/neondatabase/neon/issues/4526
3866 : pausable_failpoint!("flush-frozen-pausable");
3867 :
3868 : // This failpoint is used by another test case `test_pageserver_recovery`.
3869 : fail_point!("flush-frozen-exit");
3870 :
3871 : Ok(Lsn(lsn_range.end.0 - 1))
3872 : }
3873 :
3874 : /// Return true if the value changed
3875 : ///
3876 : /// This function must only be used from the layer flush task.
3877 1148 : fn set_disk_consistent_lsn(&self, new_value: Lsn) -> bool {
3878 1148 : let old_value = self.disk_consistent_lsn.fetch_max(new_value);
3879 1148 : assert!(new_value >= old_value, "disk_consistent_lsn must be growing monotonously at runtime; current {old_value}, offered {new_value}");
3880 :
3881 1148 : self.metrics
3882 1148 : .disk_consistent_lsn_gauge
3883 1148 : .set(new_value.0 as i64);
3884 1148 : new_value != old_value
3885 1148 : }
3886 :
3887 : /// Update metadata file
3888 1198 : fn schedule_uploads(
3889 1198 : &self,
3890 1198 : disk_consistent_lsn: Lsn,
3891 1198 : layers_to_upload: impl IntoIterator<Item = ResidentLayer>,
3892 1198 : ) -> anyhow::Result<()> {
3893 1198 : // We can only save a valid 'prev_record_lsn' value on disk if we
3894 1198 : // flushed *all* in-memory changes to disk. We only track
3895 1198 : // 'prev_record_lsn' in memory for the latest processed record, so we
3896 1198 : // don't remember what the correct value that corresponds to some old
3897 1198 : // LSN is. But if we flush everything, then the value corresponding
3898 1198 : // current 'last_record_lsn' is correct and we can store it on disk.
3899 1198 : let RecordLsn {
3900 1198 : last: last_record_lsn,
3901 1198 : prev: prev_record_lsn,
3902 1198 : } = self.last_record_lsn.load();
3903 1198 : let ondisk_prev_record_lsn = if disk_consistent_lsn == last_record_lsn {
3904 1072 : Some(prev_record_lsn)
3905 : } else {
3906 126 : None
3907 : };
3908 :
3909 1198 : let update = crate::tenant::metadata::MetadataUpdate::new(
3910 1198 : disk_consistent_lsn,
3911 1198 : ondisk_prev_record_lsn,
3912 1198 : *self.latest_gc_cutoff_lsn.read(),
3913 1198 : );
3914 1198 :
3915 1198 : fail_point!("checkpoint-before-saving-metadata", |x| bail!(
3916 0 : "{}",
3917 0 : x.unwrap()
3918 1198 : ));
3919 :
3920 2358 : for layer in layers_to_upload {
3921 1160 : self.remote_client.schedule_layer_file_upload(layer)?;
3922 : }
3923 1198 : self.remote_client
3924 1198 : .schedule_index_upload_for_metadata_update(&update)?;
3925 :
3926 1198 : Ok(())
3927 1198 : }
3928 :
3929 0 : pub(crate) async fn preserve_initdb_archive(&self) -> anyhow::Result<()> {
3930 0 : self.remote_client
3931 0 : .preserve_initdb_archive(
3932 0 : &self.tenant_shard_id.tenant_id,
3933 0 : &self.timeline_id,
3934 0 : &self.cancel,
3935 0 : )
3936 0 : .await
3937 0 : }
3938 :
3939 : // Write out the given frozen in-memory layer as a new L0 delta file. This L0 file will not be tracked
3940 : // in layer map immediately. The caller is responsible to put it into the layer map.
3941 968 : async fn create_delta_layer(
3942 968 : self: &Arc<Self>,
3943 968 : frozen_layer: &Arc<InMemoryLayer>,
3944 968 : key_range: Option<Range<Key>>,
3945 968 : ctx: &RequestContext,
3946 968 : ) -> anyhow::Result<Option<ResidentLayer>> {
3947 968 : let self_clone = Arc::clone(self);
3948 968 : let frozen_layer = Arc::clone(frozen_layer);
3949 968 : let ctx = ctx.attached_child();
3950 968 : let work = async move {
3951 968 : let Some((desc, path)) = frozen_layer
3952 968 : .write_to_disk(&ctx, key_range, self_clone.l0_flush_global_state.inner())
3953 968 : .await?
3954 : else {
3955 0 : return Ok(None);
3956 : };
3957 968 : let new_delta = Layer::finish_creating(self_clone.conf, &self_clone, desc, &path)?;
3958 :
3959 : // The write_to_disk() above calls writer.finish() which already did the fsync of the inodes.
3960 : // We just need to fsync the directory in which these inodes are linked,
3961 : // which we know to be the timeline directory.
3962 : //
3963 : // We use fatal_err() below because the after write_to_disk returns with success,
3964 : // the in-memory state of the filesystem already has the layer file in its final place,
3965 : // and subsequent pageserver code could think it's durable while it really isn't.
3966 968 : let timeline_dir = VirtualFile::open(
3967 968 : &self_clone
3968 968 : .conf
3969 968 : .timeline_path(&self_clone.tenant_shard_id, &self_clone.timeline_id),
3970 968 : &ctx,
3971 968 : )
3972 968 : .await
3973 968 : .fatal_err("VirtualFile::open for timeline dir fsync");
3974 968 : timeline_dir
3975 968 : .sync_all()
3976 968 : .await
3977 968 : .fatal_err("VirtualFile::sync_all timeline dir");
3978 968 : anyhow::Ok(Some(new_delta))
3979 968 : };
3980 : // Before tokio-epoll-uring, we ran write_to_disk & the sync_all inside spawn_blocking.
3981 : // Preserve that behavior to maintain the same behavior for `virtual_file_io_engine=std-fs`.
3982 : use crate::virtual_file::io_engine::IoEngine;
3983 968 : match crate::virtual_file::io_engine::get() {
3984 0 : IoEngine::NotSet => panic!("io engine not set"),
3985 : IoEngine::StdFs => {
3986 484 : let span = tracing::info_span!("blocking");
3987 484 : tokio::task::spawn_blocking({
3988 484 : move || Handle::current().block_on(work.instrument(span))
3989 484 : })
3990 484 : .await
3991 484 : .context("spawn_blocking")
3992 484 : .and_then(|x| x)
3993 : }
3994 : #[cfg(target_os = "linux")]
3995 484 : IoEngine::TokioEpollUring => work.await,
3996 : }
3997 968 : }
3998 :
3999 544 : async fn repartition(
4000 544 : &self,
4001 544 : lsn: Lsn,
4002 544 : partition_size: u64,
4003 544 : flags: EnumSet<CompactFlags>,
4004 544 : ctx: &RequestContext,
4005 544 : ) -> Result<((KeyPartitioning, SparseKeyPartitioning), Lsn), CompactionError> {
4006 544 : let Ok(mut partitioning_guard) = self.partitioning.try_lock() else {
4007 : // NB: there are two callers, one is the compaction task, of which there is only one per struct Tenant and hence Timeline.
4008 : // The other is the initdb optimization in flush_frozen_layer, used by `boostrap_timeline`, which runs before `.activate()`
4009 : // and hence before the compaction task starts.
4010 : // Note that there are a third "caller" that will take the `partitioning` lock. It is `gc_compaction_split_jobs` for
4011 : // gc-compaction where it uses the repartition data to determine the split jobs. In the future, it might use its own
4012 : // heuristics, but for now, we should allow concurrent access to it and let the caller retry compaction.
4013 0 : return Err(CompactionError::Other(anyhow!(
4014 0 : "repartition() called concurrently, this is rare and a retry should be fine"
4015 0 : )));
4016 : };
4017 544 : let ((dense_partition, sparse_partition), partition_lsn) = &*partitioning_guard;
4018 544 : if lsn < *partition_lsn {
4019 0 : return Err(CompactionError::Other(anyhow!(
4020 0 : "repartition() called with LSN going backwards, this should not happen"
4021 0 : )));
4022 544 : }
4023 544 :
4024 544 : let distance = lsn.0 - partition_lsn.0;
4025 544 : if *partition_lsn != Lsn(0)
4026 262 : && distance <= self.repartition_threshold
4027 262 : && !flags.contains(CompactFlags::ForceRepartition)
4028 : {
4029 248 : debug!(
4030 : distance,
4031 : threshold = self.repartition_threshold,
4032 0 : "no repartitioning needed"
4033 : );
4034 248 : return Ok((
4035 248 : (dense_partition.clone(), sparse_partition.clone()),
4036 248 : *partition_lsn,
4037 248 : ));
4038 296 : }
4039 :
4040 296 : let (dense_ks, sparse_ks) = self.collect_keyspace(lsn, ctx).await?;
4041 296 : let dense_partitioning = dense_ks.partition(&self.shard_identity, partition_size);
4042 296 : let sparse_partitioning = SparseKeyPartitioning {
4043 296 : parts: vec![sparse_ks],
4044 296 : }; // no partitioning for metadata keys for now
4045 296 : *partitioning_guard = ((dense_partitioning, sparse_partitioning), lsn);
4046 296 :
4047 296 : Ok((partitioning_guard.0.clone(), partitioning_guard.1))
4048 544 : }
4049 :
4050 : // Is it time to create a new image layer for the given partition?
4051 14 : async fn time_for_new_image_layer(&self, partition: &KeySpace, lsn: Lsn) -> bool {
4052 14 : let threshold = self.get_image_creation_threshold();
4053 :
4054 14 : let guard = self.layers.read().await;
4055 14 : let Ok(layers) = guard.layer_map() else {
4056 0 : return false;
4057 : };
4058 :
4059 14 : let mut max_deltas = 0;
4060 28 : for part_range in &partition.ranges {
4061 14 : let image_coverage = layers.image_coverage(part_range, lsn);
4062 28 : for (img_range, last_img) in image_coverage {
4063 14 : let img_lsn = if let Some(last_img) = last_img {
4064 0 : last_img.get_lsn_range().end
4065 : } else {
4066 14 : Lsn(0)
4067 : };
4068 : // Let's consider an example:
4069 : //
4070 : // delta layer with LSN range 71-81
4071 : // delta layer with LSN range 81-91
4072 : // delta layer with LSN range 91-101
4073 : // image layer at LSN 100
4074 : //
4075 : // If 'lsn' is still 100, i.e. no new WAL has been processed since the last image layer,
4076 : // there's no need to create a new one. We check this case explicitly, to avoid passing
4077 : // a bogus range to count_deltas below, with start > end. It's even possible that there
4078 : // are some delta layers *later* than current 'lsn', if more WAL was processed and flushed
4079 : // after we read last_record_lsn, which is passed here in the 'lsn' argument.
4080 14 : if img_lsn < lsn {
4081 14 : let num_deltas =
4082 14 : layers.count_deltas(&img_range, &(img_lsn..lsn), Some(threshold));
4083 14 :
4084 14 : max_deltas = max_deltas.max(num_deltas);
4085 14 : if num_deltas >= threshold {
4086 0 : debug!(
4087 0 : "key range {}-{}, has {} deltas on this timeline in LSN range {}..{}",
4088 : img_range.start, img_range.end, num_deltas, img_lsn, lsn
4089 : );
4090 0 : return true;
4091 14 : }
4092 0 : }
4093 : }
4094 : }
4095 :
4096 14 : debug!(
4097 : max_deltas,
4098 0 : "none of the partitioned ranges had >= {threshold} deltas"
4099 : );
4100 14 : false
4101 14 : }
4102 :
4103 : /// Create image layers for Postgres data. Assumes the caller passes a partition that is not too large,
4104 : /// so that at most one image layer will be produced from this function.
4105 206 : async fn create_image_layer_for_rel_blocks(
4106 206 : self: &Arc<Self>,
4107 206 : partition: &KeySpace,
4108 206 : mut image_layer_writer: ImageLayerWriter,
4109 206 : lsn: Lsn,
4110 206 : ctx: &RequestContext,
4111 206 : img_range: Range<Key>,
4112 206 : start: Key,
4113 206 : ) -> Result<ImageLayerCreationOutcome, CreateImageLayersError> {
4114 206 : let mut wrote_keys = false;
4115 206 :
4116 206 : let mut key_request_accum = KeySpaceAccum::new();
4117 1354 : for range in &partition.ranges {
4118 1148 : let mut key = range.start;
4119 2488 : while key < range.end {
4120 : // Decide whether to retain this key: usually we do, but sharded tenants may
4121 : // need to drop keys that don't belong to them. If we retain the key, add it
4122 : // to `key_request_accum` for later issuing a vectored get
4123 1340 : if self.shard_identity.is_key_disposable(&key) {
4124 0 : debug!(
4125 0 : "Dropping key {} during compaction (it belongs on shard {:?})",
4126 0 : key,
4127 0 : self.shard_identity.get_shard_number(&key)
4128 : );
4129 1340 : } else {
4130 1340 : key_request_accum.add_key(key);
4131 1340 : }
4132 :
4133 1340 : let last_key_in_range = key.next() == range.end;
4134 1340 : key = key.next();
4135 1340 :
4136 1340 : // Maybe flush `key_rest_accum`
4137 1340 : if key_request_accum.raw_size() >= Timeline::MAX_GET_VECTORED_KEYS
4138 1340 : || (last_key_in_range && key_request_accum.raw_size() > 0)
4139 : {
4140 1148 : let results = self
4141 1148 : .get_vectored(key_request_accum.consume_keyspace(), lsn, ctx)
4142 1148 : .await?;
4143 :
4144 1148 : if self.cancel.is_cancelled() {
4145 0 : return Err(CreateImageLayersError::Cancelled);
4146 1148 : }
4147 :
4148 2488 : for (img_key, img) in results {
4149 1340 : let img = match img {
4150 1340 : Ok(img) => img,
4151 0 : Err(err) => {
4152 0 : // If we fail to reconstruct a VM or FSM page, we can zero the
4153 0 : // page without losing any actual user data. That seems better
4154 0 : // than failing repeatedly and getting stuck.
4155 0 : //
4156 0 : // We had a bug at one point, where we truncated the FSM and VM
4157 0 : // in the pageserver, but the Postgres didn't know about that
4158 0 : // and continued to generate incremental WAL records for pages
4159 0 : // that didn't exist in the pageserver. Trying to replay those
4160 0 : // WAL records failed to find the previous image of the page.
4161 0 : // This special case allows us to recover from that situation.
4162 0 : // See https://github.com/neondatabase/neon/issues/2601.
4163 0 : //
4164 0 : // Unfortunately we cannot do this for the main fork, or for
4165 0 : // any metadata keys, keys, as that would lead to actual data
4166 0 : // loss.
4167 0 : if img_key.is_rel_fsm_block_key() || img_key.is_rel_vm_block_key() {
4168 0 : warn!("could not reconstruct FSM or VM key {img_key}, filling with zeros: {err:?}");
4169 0 : ZERO_PAGE.clone()
4170 : } else {
4171 0 : return Err(CreateImageLayersError::from(err));
4172 : }
4173 : }
4174 : };
4175 :
4176 : // Write all the keys we just read into our new image layer.
4177 1340 : image_layer_writer.put_image(img_key, img, ctx).await?;
4178 1340 : wrote_keys = true;
4179 : }
4180 192 : }
4181 : }
4182 : }
4183 :
4184 206 : if wrote_keys {
4185 : // Normal path: we have written some data into the new image layer for this
4186 : // partition, so flush it to disk.
4187 206 : let (desc, path) = image_layer_writer.finish(ctx).await?;
4188 206 : let image_layer = Layer::finish_creating(self.conf, self, desc, &path)?;
4189 206 : info!("created image layer for rel {}", image_layer.local_path());
4190 206 : Ok(ImageLayerCreationOutcome {
4191 206 : image: Some(image_layer),
4192 206 : next_start_key: img_range.end,
4193 206 : })
4194 : } else {
4195 : // Special case: the image layer may be empty if this is a sharded tenant and the
4196 : // partition does not cover any keys owned by this shard. In this case, to ensure
4197 : // we don't leave gaps between image layers, leave `start` where it is, so that the next
4198 : // layer we write will cover the key range that we just scanned.
4199 0 : tracing::debug!("no data in range {}-{}", img_range.start, img_range.end);
4200 0 : Ok(ImageLayerCreationOutcome {
4201 0 : image: None,
4202 0 : next_start_key: start,
4203 0 : })
4204 : }
4205 206 : }
4206 :
4207 : /// Create an image layer for metadata keys. This function produces one image layer for all metadata
4208 : /// keys for now. Because metadata keys cannot exceed basebackup size limit, the image layer for it
4209 : /// would not be too large to fit in a single image layer.
4210 : #[allow(clippy::too_many_arguments)]
4211 196 : async fn create_image_layer_for_metadata_keys(
4212 196 : self: &Arc<Self>,
4213 196 : partition: &KeySpace,
4214 196 : mut image_layer_writer: ImageLayerWriter,
4215 196 : lsn: Lsn,
4216 196 : ctx: &RequestContext,
4217 196 : img_range: Range<Key>,
4218 196 : mode: ImageLayerCreationMode,
4219 196 : start: Key,
4220 196 : ) -> Result<ImageLayerCreationOutcome, CreateImageLayersError> {
4221 196 : // Metadata keys image layer creation.
4222 196 : let mut reconstruct_state = ValuesReconstructState::default();
4223 196 : let begin = Instant::now();
4224 196 : let data = self
4225 196 : .get_vectored_impl(partition.clone(), lsn, &mut reconstruct_state, ctx)
4226 196 : .await?;
4227 196 : let (data, total_kb_retrieved, total_keys_retrieved) = {
4228 196 : let mut new_data = BTreeMap::new();
4229 196 : let mut total_kb_retrieved = 0;
4230 196 : let mut total_keys_retrieved = 0;
4231 10208 : for (k, v) in data {
4232 10012 : let v = v?;
4233 10012 : total_kb_retrieved += KEY_SIZE + v.len();
4234 10012 : total_keys_retrieved += 1;
4235 10012 : new_data.insert(k, v);
4236 : }
4237 196 : (new_data, total_kb_retrieved / 1024, total_keys_retrieved)
4238 196 : };
4239 196 : let delta_files_accessed = reconstruct_state.get_delta_layers_visited();
4240 196 : let elapsed = begin.elapsed();
4241 196 :
4242 196 : let trigger_generation = delta_files_accessed as usize >= MAX_AUX_FILE_V2_DELTAS;
4243 196 : info!(
4244 0 : "metadata key compaction: trigger_generation={trigger_generation}, delta_files_accessed={delta_files_accessed}, total_kb_retrieved={total_kb_retrieved}, total_keys_retrieved={total_keys_retrieved}, read_time={}s", elapsed.as_secs_f64()
4245 : );
4246 :
4247 196 : if !trigger_generation && mode == ImageLayerCreationMode::Try {
4248 2 : return Ok(ImageLayerCreationOutcome {
4249 2 : image: None,
4250 2 : next_start_key: img_range.end,
4251 2 : });
4252 194 : }
4253 194 : if self.cancel.is_cancelled() {
4254 0 : return Err(CreateImageLayersError::Cancelled);
4255 194 : }
4256 194 : let mut wrote_any_image = false;
4257 10206 : for (k, v) in data {
4258 10012 : if v.is_empty() {
4259 : // the key has been deleted, it does not need an image
4260 : // in metadata keyspace, an empty image == tombstone
4261 8 : continue;
4262 10004 : }
4263 10004 : wrote_any_image = true;
4264 10004 :
4265 10004 : // No need to handle sharding b/c metadata keys are always on the 0-th shard.
4266 10004 :
4267 10004 : // TODO: split image layers to avoid too large layer files. Too large image files are not handled
4268 10004 : // on the normal data path either.
4269 10004 : image_layer_writer.put_image(k, v, ctx).await?;
4270 : }
4271 :
4272 194 : if wrote_any_image {
4273 : // Normal path: we have written some data into the new image layer for this
4274 : // partition, so flush it to disk.
4275 12 : let (desc, path) = image_layer_writer.finish(ctx).await?;
4276 12 : let image_layer = Layer::finish_creating(self.conf, self, desc, &path)?;
4277 12 : info!(
4278 0 : "created image layer for metadata {}",
4279 0 : image_layer.local_path()
4280 : );
4281 12 : Ok(ImageLayerCreationOutcome {
4282 12 : image: Some(image_layer),
4283 12 : next_start_key: img_range.end,
4284 12 : })
4285 : } else {
4286 : // Special case: the image layer may be empty if this is a sharded tenant and the
4287 : // partition does not cover any keys owned by this shard. In this case, to ensure
4288 : // we don't leave gaps between image layers, leave `start` where it is, so that the next
4289 : // layer we write will cover the key range that we just scanned.
4290 182 : tracing::debug!("no data in range {}-{}", img_range.start, img_range.end);
4291 182 : Ok(ImageLayerCreationOutcome {
4292 182 : image: None,
4293 182 : next_start_key: start,
4294 182 : })
4295 : }
4296 196 : }
4297 :
4298 : /// Predicate function which indicates whether we should check if new image layers
4299 : /// are required. Since checking if new image layers are required is expensive in
4300 : /// terms of CPU, we only do it in the following cases:
4301 : /// 1. If the timeline has ingested sufficient WAL to justify the cost
4302 : /// 2. If enough time has passed since the last check:
4303 : /// 1. For large tenants, we wish to perform the check more often since they
4304 : /// suffer from the lack of image layers
4305 : /// 2. For small tenants (that can mostly fit in RAM), we use a much longer interval
4306 724 : fn should_check_if_image_layers_required(self: &Arc<Timeline>, lsn: Lsn) -> bool {
4307 : const LARGE_TENANT_THRESHOLD: u64 = 2 * 1024 * 1024 * 1024;
4308 :
4309 724 : let last_checks_at = self.last_image_layer_creation_check_at.load();
4310 724 : let distance = lsn
4311 724 : .checked_sub(last_checks_at)
4312 724 : .expect("Attempt to compact with LSN going backwards");
4313 724 : let min_distance =
4314 724 : self.get_image_layer_creation_check_threshold() as u64 * self.get_checkpoint_distance();
4315 724 :
4316 724 : let distance_based_decision = distance.0 >= min_distance;
4317 724 :
4318 724 : let mut time_based_decision = false;
4319 724 : let mut last_check_instant = self.last_image_layer_creation_check_instant.lock().unwrap();
4320 724 : if let CurrentLogicalSize::Exact(logical_size) = self.current_logical_size.current_size() {
4321 622 : let check_required_after = if Into::<u64>::into(&logical_size) >= LARGE_TENANT_THRESHOLD
4322 : {
4323 0 : self.get_checkpoint_timeout()
4324 : } else {
4325 622 : Duration::from_secs(3600 * 48)
4326 : };
4327 :
4328 622 : time_based_decision = match *last_check_instant {
4329 442 : Some(last_check) => {
4330 442 : let elapsed = last_check.elapsed();
4331 442 : elapsed >= check_required_after
4332 : }
4333 180 : None => true,
4334 : };
4335 102 : }
4336 :
4337 : // Do the expensive delta layer counting only if this timeline has ingested sufficient
4338 : // WAL since the last check or a checkpoint timeout interval has elapsed since the last
4339 : // check.
4340 724 : let decision = distance_based_decision || time_based_decision;
4341 :
4342 724 : if decision {
4343 182 : self.last_image_layer_creation_check_at.store(lsn);
4344 182 : *last_check_instant = Some(Instant::now());
4345 542 : }
4346 :
4347 724 : decision
4348 724 : }
4349 :
4350 724 : #[tracing::instrument(skip_all, fields(%lsn, %mode))]
4351 : async fn create_image_layers(
4352 : self: &Arc<Timeline>,
4353 : partitioning: &KeyPartitioning,
4354 : lsn: Lsn,
4355 : mode: ImageLayerCreationMode,
4356 : ctx: &RequestContext,
4357 : ) -> Result<Vec<ResidentLayer>, CreateImageLayersError> {
4358 : let timer = self.metrics.create_images_time_histo.start_timer();
4359 : let mut image_layers = Vec::new();
4360 :
4361 : // We need to avoid holes between generated image layers.
4362 : // Otherwise LayerMap::image_layer_exists will return false if key range of some layer is covered by more than one
4363 : // image layer with hole between them. In this case such layer can not be utilized by GC.
4364 : //
4365 : // How such hole between partitions can appear?
4366 : // if we have relation with relid=1 and size 100 and relation with relid=2 with size 200 then result of
4367 : // KeySpace::partition may contain partitions <100000000..100000099> and <200000000..200000199>.
4368 : // If there is delta layer <100000000..300000000> then it never be garbage collected because
4369 : // image layers <100000000..100000099> and <200000000..200000199> are not completely covering it.
4370 : let mut start = Key::MIN;
4371 :
4372 : let check_for_image_layers = self.should_check_if_image_layers_required(lsn);
4373 :
4374 : for partition in partitioning.parts.iter() {
4375 : if self.cancel.is_cancelled() {
4376 : return Err(CreateImageLayersError::Cancelled);
4377 : }
4378 :
4379 : let img_range = start..partition.ranges.last().unwrap().end;
4380 : let compact_metadata = partition.overlaps(&Key::metadata_key_range());
4381 : if compact_metadata {
4382 : for range in &partition.ranges {
4383 : assert!(
4384 : range.start.field1 >= METADATA_KEY_BEGIN_PREFIX
4385 : && range.end.field1 <= METADATA_KEY_END_PREFIX,
4386 : "metadata keys must be partitioned separately"
4387 : );
4388 : }
4389 : if mode == ImageLayerCreationMode::Try && !check_for_image_layers {
4390 : // Skip compaction if there are not enough updates. Metadata compaction will do a scan and
4391 : // might mess up with evictions.
4392 : start = img_range.end;
4393 : continue;
4394 : }
4395 : // For initial and force modes, we always generate image layers for metadata keys.
4396 : } else if let ImageLayerCreationMode::Try = mode {
4397 : // check_for_image_layers = false -> skip
4398 : // check_for_image_layers = true -> check time_for_new_image_layer -> skip/generate
4399 : if !check_for_image_layers || !self.time_for_new_image_layer(partition, lsn).await {
4400 : start = img_range.end;
4401 : continue;
4402 : }
4403 : }
4404 : if let ImageLayerCreationMode::Force = mode {
4405 : // When forced to create image layers, we might try and create them where they already
4406 : // exist. This mode is only used in tests/debug.
4407 : let layers = self.layers.read().await;
4408 : if layers.contains_key(&PersistentLayerKey {
4409 : key_range: img_range.clone(),
4410 : lsn_range: PersistentLayerDesc::image_layer_lsn_range(lsn),
4411 : is_delta: false,
4412 : }) {
4413 : tracing::info!(
4414 : "Skipping image layer at {lsn} {}..{}, already exists",
4415 : img_range.start,
4416 : img_range.end
4417 : );
4418 : start = img_range.end;
4419 : continue;
4420 : }
4421 : }
4422 :
4423 : let image_layer_writer = ImageLayerWriter::new(
4424 : self.conf,
4425 : self.timeline_id,
4426 : self.tenant_shard_id,
4427 : &img_range,
4428 : lsn,
4429 : ctx,
4430 : )
4431 : .await?;
4432 :
4433 0 : fail_point!("image-layer-writer-fail-before-finish", |_| {
4434 0 : Err(CreateImageLayersError::Other(anyhow::anyhow!(
4435 0 : "failpoint image-layer-writer-fail-before-finish"
4436 0 : )))
4437 0 : });
4438 :
4439 : if !compact_metadata {
4440 : let ImageLayerCreationOutcome {
4441 : image,
4442 : next_start_key,
4443 : } = self
4444 : .create_image_layer_for_rel_blocks(
4445 : partition,
4446 : image_layer_writer,
4447 : lsn,
4448 : ctx,
4449 : img_range,
4450 : start,
4451 : )
4452 : .await?;
4453 :
4454 : start = next_start_key;
4455 : image_layers.extend(image);
4456 : } else {
4457 : let ImageLayerCreationOutcome {
4458 : image,
4459 : next_start_key,
4460 : } = self
4461 : .create_image_layer_for_metadata_keys(
4462 : partition,
4463 : image_layer_writer,
4464 : lsn,
4465 : ctx,
4466 : img_range,
4467 : mode,
4468 : start,
4469 : )
4470 : .await?;
4471 : start = next_start_key;
4472 : image_layers.extend(image);
4473 : }
4474 : }
4475 :
4476 : let mut guard = self.layers.write().await;
4477 :
4478 : // FIXME: we could add the images to be uploaded *before* returning from here, but right
4479 : // now they are being scheduled outside of write lock; current way is inconsistent with
4480 : // compaction lock order.
4481 : guard
4482 : .open_mut()?
4483 : .track_new_image_layers(&image_layers, &self.metrics);
4484 : drop_wlock(guard);
4485 : timer.stop_and_record();
4486 :
4487 : // Creating image layers may have caused some previously visible layers to be covered
4488 : if !image_layers.is_empty() {
4489 : self.update_layer_visibility().await?;
4490 : }
4491 :
4492 : Ok(image_layers)
4493 : }
4494 :
4495 : /// Wait until the background initial logical size calculation is complete, or
4496 : /// this Timeline is shut down. Calling this function will cause the initial
4497 : /// logical size calculation to skip waiting for the background jobs barrier.
4498 0 : pub(crate) async fn await_initial_logical_size(self: Arc<Self>) {
4499 0 : if !self.shard_identity.is_shard_zero() {
4500 : // We don't populate logical size on shard >0: skip waiting for it.
4501 0 : return;
4502 0 : }
4503 0 :
4504 0 : if self.remote_client.is_deleting() {
4505 : // The timeline was created in a deletion-resume state, we don't expect logical size to be populated
4506 0 : return;
4507 0 : }
4508 0 :
4509 0 : if self.current_logical_size.current_size().is_exact() {
4510 : // root timelines are initialized with exact count, but never start the background
4511 : // calculation
4512 0 : return;
4513 0 : }
4514 :
4515 0 : if let Some(await_bg_cancel) = self
4516 0 : .current_logical_size
4517 0 : .cancel_wait_for_background_loop_concurrency_limit_semaphore
4518 0 : .get()
4519 0 : {
4520 0 : await_bg_cancel.cancel();
4521 0 : } else {
4522 : // We should not wait if we were not able to explicitly instruct
4523 : // the logical size cancellation to skip the concurrency limit semaphore.
4524 : // TODO: this is an unexpected case. We should restructure so that it
4525 : // can't happen.
4526 0 : tracing::warn!(
4527 0 : "await_initial_logical_size: can't get semaphore cancel token, skipping"
4528 : );
4529 0 : debug_assert!(false);
4530 : }
4531 :
4532 0 : tokio::select!(
4533 0 : _ = self.current_logical_size.initialized.acquire() => {},
4534 0 : _ = self.cancel.cancelled() => {}
4535 : )
4536 0 : }
4537 :
4538 : /// Detach this timeline from its ancestor by copying all of ancestors layers as this
4539 : /// Timelines layers up to the ancestor_lsn.
4540 : ///
4541 : /// Requires a timeline that:
4542 : /// - has an ancestor to detach from
4543 : /// - the ancestor does not have an ancestor -- follows from the original RFC limitations, not
4544 : /// a technical requirement
4545 : ///
4546 : /// After the operation has been started, it cannot be canceled. Upon restart it needs to be
4547 : /// polled again until completion.
4548 : ///
4549 : /// During the operation all timelines sharing the data with this timeline will be reparented
4550 : /// from our ancestor to be branches of this timeline.
4551 0 : pub(crate) async fn prepare_to_detach_from_ancestor(
4552 0 : self: &Arc<Timeline>,
4553 0 : tenant: &crate::tenant::Tenant,
4554 0 : options: detach_ancestor::Options,
4555 0 : ctx: &RequestContext,
4556 0 : ) -> Result<detach_ancestor::Progress, detach_ancestor::Error> {
4557 0 : detach_ancestor::prepare(self, tenant, options, ctx).await
4558 0 : }
4559 :
4560 : /// Second step of detach from ancestor; detaches the `self` from it's current ancestor and
4561 : /// reparents any reparentable children of previous ancestor.
4562 : ///
4563 : /// This method is to be called while holding the TenantManager's tenant slot, so during this
4564 : /// method we cannot be deleted nor can any timeline be deleted. After this method returns
4565 : /// successfully, tenant must be reloaded.
4566 : ///
4567 : /// Final step will be to [`Self::complete_detaching_timeline_ancestor`] after optionally
4568 : /// resetting the tenant.
4569 0 : pub(crate) async fn detach_from_ancestor_and_reparent(
4570 0 : self: &Arc<Timeline>,
4571 0 : tenant: &crate::tenant::Tenant,
4572 0 : prepared: detach_ancestor::PreparedTimelineDetach,
4573 0 : ctx: &RequestContext,
4574 0 : ) -> Result<detach_ancestor::DetachingAndReparenting, detach_ancestor::Error> {
4575 0 : detach_ancestor::detach_and_reparent(self, tenant, prepared, ctx).await
4576 0 : }
4577 :
4578 : /// Final step which unblocks the GC.
4579 : ///
4580 : /// The tenant must've been reset if ancestry was modified previously (in tenant manager).
4581 0 : pub(crate) async fn complete_detaching_timeline_ancestor(
4582 0 : self: &Arc<Timeline>,
4583 0 : tenant: &crate::tenant::Tenant,
4584 0 : attempt: detach_ancestor::Attempt,
4585 0 : ctx: &RequestContext,
4586 0 : ) -> Result<(), detach_ancestor::Error> {
4587 0 : detach_ancestor::complete(self, tenant, attempt, ctx).await
4588 0 : }
4589 : }
4590 :
4591 : impl Drop for Timeline {
4592 10 : fn drop(&mut self) {
4593 10 : if let Some(ancestor) = &self.ancestor_timeline {
4594 : // This lock should never be poisoned, but in case it is we do a .map() instead of
4595 : // an unwrap(), to avoid panicking in a destructor and thereby aborting the process.
4596 4 : if let Ok(mut gc_info) = ancestor.gc_info.write() {
4597 4 : if !gc_info.remove_child_not_offloaded(self.timeline_id) {
4598 0 : tracing::error!(tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id,
4599 0 : "Couldn't remove retain_lsn entry from offloaded timeline's parent: already removed");
4600 4 : }
4601 0 : }
4602 6 : }
4603 10 : }
4604 : }
4605 :
4606 : /// Top-level failure to compact.
4607 : #[derive(Debug, thiserror::Error)]
4608 : pub(crate) enum CompactionError {
4609 : #[error("The timeline or pageserver is shutting down")]
4610 : ShuttingDown,
4611 : /// Compaction tried to offload a timeline and failed
4612 : #[error("Failed to offload timeline: {0}")]
4613 : Offload(OffloadError),
4614 : /// Compaction cannot be done right now; page reconstruction and so on.
4615 : #[error(transparent)]
4616 : Other(anyhow::Error),
4617 : }
4618 :
4619 : impl From<OffloadError> for CompactionError {
4620 0 : fn from(e: OffloadError) -> Self {
4621 0 : match e {
4622 0 : OffloadError::Cancelled => Self::ShuttingDown,
4623 0 : _ => Self::Offload(e),
4624 : }
4625 0 : }
4626 : }
4627 :
4628 : impl CompactionError {
4629 0 : pub fn is_cancelled(&self) -> bool {
4630 0 : matches!(self, CompactionError::ShuttingDown)
4631 0 : }
4632 : }
4633 :
4634 : impl From<CollectKeySpaceError> for CompactionError {
4635 0 : fn from(err: CollectKeySpaceError) -> Self {
4636 0 : match err {
4637 : CollectKeySpaceError::Cancelled
4638 : | CollectKeySpaceError::PageRead(PageReconstructError::Cancelled) => {
4639 0 : CompactionError::ShuttingDown
4640 : }
4641 0 : e => CompactionError::Other(e.into()),
4642 : }
4643 0 : }
4644 : }
4645 :
4646 : impl From<super::upload_queue::NotInitialized> for CompactionError {
4647 0 : fn from(value: super::upload_queue::NotInitialized) -> Self {
4648 0 : match value {
4649 : super::upload_queue::NotInitialized::Uninitialized => {
4650 0 : CompactionError::Other(anyhow::anyhow!(value))
4651 : }
4652 : super::upload_queue::NotInitialized::ShuttingDown
4653 0 : | super::upload_queue::NotInitialized::Stopped => CompactionError::ShuttingDown,
4654 : }
4655 0 : }
4656 : }
4657 :
4658 : impl From<super::storage_layer::layer::DownloadError> for CompactionError {
4659 0 : fn from(e: super::storage_layer::layer::DownloadError) -> Self {
4660 0 : match e {
4661 : super::storage_layer::layer::DownloadError::TimelineShutdown
4662 : | super::storage_layer::layer::DownloadError::DownloadCancelled => {
4663 0 : CompactionError::ShuttingDown
4664 : }
4665 : super::storage_layer::layer::DownloadError::ContextAndConfigReallyDeniesDownloads
4666 : | super::storage_layer::layer::DownloadError::DownloadRequired
4667 : | super::storage_layer::layer::DownloadError::NotFile(_)
4668 : | super::storage_layer::layer::DownloadError::DownloadFailed
4669 : | super::storage_layer::layer::DownloadError::PreStatFailed(_) => {
4670 0 : CompactionError::Other(anyhow::anyhow!(e))
4671 : }
4672 : #[cfg(test)]
4673 : super::storage_layer::layer::DownloadError::Failpoint(_) => {
4674 0 : CompactionError::Other(anyhow::anyhow!(e))
4675 : }
4676 : }
4677 0 : }
4678 : }
4679 :
4680 : impl From<layer_manager::Shutdown> for CompactionError {
4681 0 : fn from(_: layer_manager::Shutdown) -> Self {
4682 0 : CompactionError::ShuttingDown
4683 0 : }
4684 : }
4685 :
4686 : #[serde_as]
4687 196 : #[derive(serde::Serialize)]
4688 : struct RecordedDuration(#[serde_as(as = "serde_with::DurationMicroSeconds")] Duration);
4689 :
4690 : #[derive(Default)]
4691 : enum DurationRecorder {
4692 : #[default]
4693 : NotStarted,
4694 : Recorded(RecordedDuration, tokio::time::Instant),
4695 : }
4696 :
4697 : impl DurationRecorder {
4698 504 : fn till_now(&self) -> DurationRecorder {
4699 504 : match self {
4700 : DurationRecorder::NotStarted => {
4701 0 : panic!("must only call on recorded measurements")
4702 : }
4703 504 : DurationRecorder::Recorded(_, ended) => {
4704 504 : let now = tokio::time::Instant::now();
4705 504 : DurationRecorder::Recorded(RecordedDuration(now - *ended), now)
4706 504 : }
4707 504 : }
4708 504 : }
4709 196 : fn into_recorded(self) -> Option<RecordedDuration> {
4710 196 : match self {
4711 0 : DurationRecorder::NotStarted => None,
4712 196 : DurationRecorder::Recorded(recorded, _) => Some(recorded),
4713 : }
4714 196 : }
4715 : }
4716 :
4717 : /// Descriptor for a delta layer used in testing infra. The start/end key/lsn range of the
4718 : /// delta layer might be different from the min/max key/lsn in the delta layer. Therefore,
4719 : /// the layer descriptor requires the user to provide the ranges, which should cover all
4720 : /// keys specified in the `data` field.
4721 : #[cfg(test)]
4722 : #[derive(Clone)]
4723 : pub struct DeltaLayerTestDesc {
4724 : pub lsn_range: Range<Lsn>,
4725 : pub key_range: Range<Key>,
4726 : pub data: Vec<(Key, Lsn, Value)>,
4727 : }
4728 :
4729 : #[cfg(test)]
4730 : impl DeltaLayerTestDesc {
4731 2 : pub fn new(lsn_range: Range<Lsn>, key_range: Range<Key>, data: Vec<(Key, Lsn, Value)>) -> Self {
4732 2 : Self {
4733 2 : lsn_range,
4734 2 : key_range,
4735 2 : data,
4736 2 : }
4737 2 : }
4738 :
4739 88 : pub fn new_with_inferred_key_range(
4740 88 : lsn_range: Range<Lsn>,
4741 88 : data: Vec<(Key, Lsn, Value)>,
4742 88 : ) -> Self {
4743 220 : let key_min = data.iter().map(|(key, _, _)| key).min().unwrap();
4744 220 : let key_max = data.iter().map(|(key, _, _)| key).max().unwrap();
4745 88 : Self {
4746 88 : key_range: (*key_min)..(key_max.next()),
4747 88 : lsn_range,
4748 88 : data,
4749 88 : }
4750 88 : }
4751 :
4752 10 : pub(crate) fn layer_name(&self) -> LayerName {
4753 10 : LayerName::Delta(super::storage_layer::DeltaLayerName {
4754 10 : key_range: self.key_range.clone(),
4755 10 : lsn_range: self.lsn_range.clone(),
4756 10 : })
4757 10 : }
4758 : }
4759 :
4760 : impl Timeline {
4761 28 : async fn finish_compact_batch(
4762 28 : self: &Arc<Self>,
4763 28 : new_deltas: &[ResidentLayer],
4764 28 : new_images: &[ResidentLayer],
4765 28 : layers_to_remove: &[Layer],
4766 28 : ) -> Result<(), CompactionError> {
4767 28 : let mut guard = tokio::select! {
4768 28 : guard = self.layers.write() => guard,
4769 28 : _ = self.cancel.cancelled() => {
4770 0 : return Err(CompactionError::ShuttingDown);
4771 : }
4772 : };
4773 :
4774 28 : let mut duplicated_layers = HashSet::new();
4775 28 :
4776 28 : let mut insert_layers = Vec::with_capacity(new_deltas.len());
4777 :
4778 336 : for l in new_deltas {
4779 308 : if guard.contains(l.as_ref()) {
4780 : // expected in tests
4781 0 : tracing::error!(layer=%l, "duplicated L1 layer");
4782 :
4783 : // good ways to cause a duplicate: we repeatedly error after taking the writelock
4784 : // `guard` on self.layers. as of writing this, there are no error returns except
4785 : // for compact_level0_phase1 creating an L0, which does not happen in practice
4786 : // because we have not implemented L0 => L0 compaction.
4787 0 : duplicated_layers.insert(l.layer_desc().key());
4788 308 : } else if LayerMap::is_l0(&l.layer_desc().key_range, l.layer_desc().is_delta) {
4789 0 : return Err(CompactionError::Other(anyhow::anyhow!("compaction generates a L0 layer file as output, which will cause infinite compaction.")));
4790 308 : } else {
4791 308 : insert_layers.push(l.clone());
4792 308 : }
4793 : }
4794 :
4795 : // only remove those inputs which were not outputs
4796 28 : let remove_layers: Vec<Layer> = layers_to_remove
4797 28 : .iter()
4798 402 : .filter(|l| !duplicated_layers.contains(&l.layer_desc().key()))
4799 28 : .cloned()
4800 28 : .collect();
4801 28 :
4802 28 : if !new_images.is_empty() {
4803 0 : guard
4804 0 : .open_mut()?
4805 0 : .track_new_image_layers(new_images, &self.metrics);
4806 28 : }
4807 :
4808 28 : guard
4809 28 : .open_mut()?
4810 28 : .finish_compact_l0(&remove_layers, &insert_layers, &self.metrics);
4811 28 :
4812 28 : self.remote_client
4813 28 : .schedule_compaction_update(&remove_layers, new_deltas)?;
4814 :
4815 28 : drop_wlock(guard);
4816 28 :
4817 28 : Ok(())
4818 28 : }
4819 :
4820 0 : async fn rewrite_layers(
4821 0 : self: &Arc<Self>,
4822 0 : mut replace_layers: Vec<(Layer, ResidentLayer)>,
4823 0 : mut drop_layers: Vec<Layer>,
4824 0 : ) -> Result<(), CompactionError> {
4825 0 : let mut guard = self.layers.write().await;
4826 :
4827 : // Trim our lists in case our caller (compaction) raced with someone else (GC) removing layers: we want
4828 : // to avoid double-removing, and avoid rewriting something that was removed.
4829 0 : replace_layers.retain(|(l, _)| guard.contains(l));
4830 0 : drop_layers.retain(|l| guard.contains(l));
4831 0 :
4832 0 : guard
4833 0 : .open_mut()?
4834 0 : .rewrite_layers(&replace_layers, &drop_layers, &self.metrics);
4835 0 :
4836 0 : let upload_layers: Vec<_> = replace_layers.into_iter().map(|r| r.1).collect();
4837 0 :
4838 0 : self.remote_client
4839 0 : .schedule_compaction_update(&drop_layers, &upload_layers)?;
4840 :
4841 0 : Ok(())
4842 0 : }
4843 :
4844 : /// Schedules the uploads of the given image layers
4845 364 : fn upload_new_image_layers(
4846 364 : self: &Arc<Self>,
4847 364 : new_images: impl IntoIterator<Item = ResidentLayer>,
4848 364 : ) -> Result<(), super::upload_queue::NotInitialized> {
4849 390 : for layer in new_images {
4850 26 : self.remote_client.schedule_layer_file_upload(layer)?;
4851 : }
4852 : // should any new image layer been created, not uploading index_part will
4853 : // result in a mismatch between remote_physical_size and layermap calculated
4854 : // size, which will fail some tests, but should not be an issue otherwise.
4855 364 : self.remote_client
4856 364 : .schedule_index_upload_for_file_changes()?;
4857 364 : Ok(())
4858 364 : }
4859 :
4860 0 : async fn find_gc_time_cutoff(
4861 0 : &self,
4862 0 : now: SystemTime,
4863 0 : pitr: Duration,
4864 0 : cancel: &CancellationToken,
4865 0 : ctx: &RequestContext,
4866 0 : ) -> Result<Option<Lsn>, PageReconstructError> {
4867 0 : debug_assert_current_span_has_tenant_and_timeline_id();
4868 0 : if self.shard_identity.is_shard_zero() {
4869 : // Shard Zero has SLRU data and can calculate the PITR time -> LSN mapping itself
4870 0 : let time_range = if pitr == Duration::ZERO {
4871 0 : humantime::parse_duration(DEFAULT_PITR_INTERVAL).expect("constant is invalid")
4872 : } else {
4873 0 : pitr
4874 : };
4875 :
4876 : // If PITR is so large or `now` is so small that this underflows, we will retain no history (highly unexpected case)
4877 0 : let time_cutoff = now.checked_sub(time_range).unwrap_or(now);
4878 0 : let timestamp = to_pg_timestamp(time_cutoff);
4879 :
4880 0 : let time_cutoff = match self.find_lsn_for_timestamp(timestamp, cancel, ctx).await? {
4881 0 : LsnForTimestamp::Present(lsn) => Some(lsn),
4882 0 : LsnForTimestamp::Future(lsn) => {
4883 0 : // The timestamp is in the future. That sounds impossible,
4884 0 : // but what it really means is that there hasn't been
4885 0 : // any commits since the cutoff timestamp.
4886 0 : //
4887 0 : // In this case we should use the LSN of the most recent commit,
4888 0 : // which is implicitly the last LSN in the log.
4889 0 : debug!("future({})", lsn);
4890 0 : Some(self.get_last_record_lsn())
4891 : }
4892 0 : LsnForTimestamp::Past(lsn) => {
4893 0 : debug!("past({})", lsn);
4894 0 : None
4895 : }
4896 0 : LsnForTimestamp::NoData(lsn) => {
4897 0 : debug!("nodata({})", lsn);
4898 0 : None
4899 : }
4900 : };
4901 0 : Ok(time_cutoff)
4902 : } else {
4903 : // Shards other than shard zero cannot do timestamp->lsn lookups, and must instead learn their GC cutoff
4904 : // from shard zero's index. The index doesn't explicitly tell us the time cutoff, but we may assume that
4905 : // the point up to which shard zero's last_gc_cutoff has advanced will either be the time cutoff, or a
4906 : // space cutoff that we would also have respected ourselves.
4907 0 : match self
4908 0 : .remote_client
4909 0 : .download_foreign_index(ShardNumber(0), cancel)
4910 0 : .await
4911 : {
4912 0 : Ok((index_part, index_generation, _index_mtime)) => {
4913 0 : tracing::info!("GC loaded shard zero metadata (gen {index_generation:?}): latest_gc_cutoff_lsn: {}",
4914 0 : index_part.metadata.latest_gc_cutoff_lsn());
4915 0 : Ok(Some(index_part.metadata.latest_gc_cutoff_lsn()))
4916 : }
4917 : Err(DownloadError::NotFound) => {
4918 : // This is unexpected, because during timeline creations shard zero persists to remote
4919 : // storage before other shards are called, and during timeline deletion non-zeroth shards are
4920 : // deleted before the zeroth one. However, it should be harmless: if we somehow end up in this
4921 : // state, then shard zero should _eventually_ write an index when it GCs.
4922 0 : tracing::warn!("GC couldn't find shard zero's index for timeline");
4923 0 : Ok(None)
4924 : }
4925 0 : Err(e) => {
4926 0 : // TODO: this function should return a different error type than page reconstruct error
4927 0 : Err(PageReconstructError::Other(anyhow::anyhow!(e)))
4928 : }
4929 : }
4930 :
4931 : // TODO: after reading shard zero's GC cutoff, we should validate its generation with the storage
4932 : // controller. Otherwise, it is possible that we see the GC cutoff go backwards while shard zero
4933 : // is going through a migration if we read the old location's index and it has GC'd ahead of the
4934 : // new location. This is legal in principle, but problematic in practice because it might result
4935 : // in a timeline creation succeeding on shard zero ('s new location) but then failing on other shards
4936 : // because they have GC'd past the branch point.
4937 : }
4938 0 : }
4939 :
4940 : /// Find the Lsns above which layer files need to be retained on
4941 : /// garbage collection.
4942 : ///
4943 : /// We calculate two cutoffs, one based on time and one based on WAL size. `pitr`
4944 : /// controls the time cutoff (or ZERO to disable time-based retention), and `space_cutoff` controls
4945 : /// the space-based retention.
4946 : ///
4947 : /// This function doesn't simply to calculate time & space based retention: it treats time-based
4948 : /// retention as authoritative if enabled, and falls back to space-based retention if calculating
4949 : /// the LSN for a time point isn't possible. Therefore the GcCutoffs::horizon in the response might
4950 : /// be different to the `space_cutoff` input. Callers should treat the min() of the two cutoffs
4951 : /// in the response as the GC cutoff point for the timeline.
4952 4 : #[instrument(skip_all, fields(timeline_id=%self.timeline_id))]
4953 : pub(super) async fn find_gc_cutoffs(
4954 : &self,
4955 : now: SystemTime,
4956 : space_cutoff: Lsn,
4957 : pitr: Duration,
4958 : cancel: &CancellationToken,
4959 : ctx: &RequestContext,
4960 : ) -> Result<GcCutoffs, PageReconstructError> {
4961 : let _timer = self
4962 : .metrics
4963 : .find_gc_cutoffs_histo
4964 : .start_timer()
4965 : .record_on_drop();
4966 :
4967 : pausable_failpoint!("Timeline::find_gc_cutoffs-pausable");
4968 :
4969 : if cfg!(test) {
4970 : // Unit tests which specify zero PITR interval expect to avoid doing any I/O for timestamp lookup
4971 : if pitr == Duration::ZERO {
4972 : return Ok(GcCutoffs {
4973 : time: self.get_last_record_lsn(),
4974 : space: space_cutoff,
4975 : });
4976 : }
4977 : }
4978 :
4979 : // Calculate a time-based limit on how much to retain:
4980 : // - if PITR interval is set, then this is our cutoff.
4981 : // - if PITR interval is not set, then we do a lookup
4982 : // based on DEFAULT_PITR_INTERVAL, so that size-based retention does not result in keeping history around permanently on idle databases.
4983 : let time_cutoff = self.find_gc_time_cutoff(now, pitr, cancel, ctx).await?;
4984 :
4985 : Ok(match (pitr, time_cutoff) {
4986 : (Duration::ZERO, Some(time_cutoff)) => {
4987 : // PITR is not set. Retain the size-based limit, or the default time retention,
4988 : // whichever requires less data.
4989 : GcCutoffs {
4990 : time: self.get_last_record_lsn(),
4991 : space: std::cmp::max(time_cutoff, space_cutoff),
4992 : }
4993 : }
4994 : (Duration::ZERO, None) => {
4995 : // PITR is not set, and time lookup failed
4996 : GcCutoffs {
4997 : time: self.get_last_record_lsn(),
4998 : space: space_cutoff,
4999 : }
5000 : }
5001 : (_, None) => {
5002 : // PITR interval is set & we didn't look up a timestamp successfully. Conservatively assume PITR
5003 : // cannot advance beyond what was already GC'd, and respect space-based retention
5004 : GcCutoffs {
5005 : time: *self.get_latest_gc_cutoff_lsn(),
5006 : space: space_cutoff,
5007 : }
5008 : }
5009 : (_, Some(time_cutoff)) => {
5010 : // PITR interval is set and we looked up timestamp successfully. Ignore
5011 : // size based retention and make time cutoff authoritative
5012 : GcCutoffs {
5013 : time: time_cutoff,
5014 : space: time_cutoff,
5015 : }
5016 : }
5017 : })
5018 : }
5019 :
5020 : /// Garbage collect layer files on a timeline that are no longer needed.
5021 : ///
5022 : /// Currently, we don't make any attempt at removing unneeded page versions
5023 : /// within a layer file. We can only remove the whole file if it's fully
5024 : /// obsolete.
5025 4 : pub(super) async fn gc(&self) -> Result<GcResult, GcError> {
5026 : // this is most likely the background tasks, but it might be the spawned task from
5027 : // immediate_gc
5028 4 : let _g = tokio::select! {
5029 4 : guard = self.gc_lock.lock() => guard,
5030 4 : _ = self.cancel.cancelled() => return Ok(GcResult::default()),
5031 : };
5032 4 : let timer = self.metrics.garbage_collect_histo.start_timer();
5033 4 :
5034 4 : fail_point!("before-timeline-gc");
5035 4 :
5036 4 : // Is the timeline being deleted?
5037 4 : if self.is_stopping() {
5038 0 : return Err(GcError::TimelineCancelled);
5039 4 : }
5040 4 :
5041 4 : let (space_cutoff, time_cutoff, retain_lsns, max_lsn_with_valid_lease) = {
5042 4 : let gc_info = self.gc_info.read().unwrap();
5043 4 :
5044 4 : let space_cutoff = min(gc_info.cutoffs.space, self.get_disk_consistent_lsn());
5045 4 : let time_cutoff = gc_info.cutoffs.time;
5046 4 : let retain_lsns = gc_info
5047 4 : .retain_lsns
5048 4 : .iter()
5049 4 : .map(|(lsn, _child_id, _is_offloaded)| *lsn)
5050 4 : .collect();
5051 4 :
5052 4 : // Gets the maximum LSN that holds the valid lease.
5053 4 : //
5054 4 : // Caveat: `refresh_gc_info` is in charged of updating the lease map.
5055 4 : // Here, we do not check for stale leases again.
5056 4 : let max_lsn_with_valid_lease = gc_info.leases.last_key_value().map(|(lsn, _)| *lsn);
5057 4 :
5058 4 : (
5059 4 : space_cutoff,
5060 4 : time_cutoff,
5061 4 : retain_lsns,
5062 4 : max_lsn_with_valid_lease,
5063 4 : )
5064 4 : };
5065 4 :
5066 4 : let mut new_gc_cutoff = Lsn::min(space_cutoff, time_cutoff);
5067 4 : let standby_horizon = self.standby_horizon.load();
5068 4 : // Hold GC for the standby, but as a safety guard do it only within some
5069 4 : // reasonable lag.
5070 4 : if standby_horizon != Lsn::INVALID {
5071 0 : if let Some(standby_lag) = new_gc_cutoff.checked_sub(standby_horizon) {
5072 : const MAX_ALLOWED_STANDBY_LAG: u64 = 10u64 << 30; // 10 GB
5073 0 : if standby_lag.0 < MAX_ALLOWED_STANDBY_LAG {
5074 0 : new_gc_cutoff = Lsn::min(standby_horizon, new_gc_cutoff);
5075 0 : trace!("holding off GC for standby apply LSN {}", standby_horizon);
5076 : } else {
5077 0 : warn!(
5078 0 : "standby is lagging for more than {}MB, not holding gc for it",
5079 0 : MAX_ALLOWED_STANDBY_LAG / 1024 / 1024
5080 : )
5081 : }
5082 0 : }
5083 4 : }
5084 :
5085 : // Reset standby horizon to ignore it if it is not updated till next GC.
5086 : // It is an easy way to unset it when standby disappears without adding
5087 : // more conf options.
5088 4 : self.standby_horizon.store(Lsn::INVALID);
5089 4 : self.metrics
5090 4 : .standby_horizon_gauge
5091 4 : .set(Lsn::INVALID.0 as i64);
5092 :
5093 4 : let res = self
5094 4 : .gc_timeline(
5095 4 : space_cutoff,
5096 4 : time_cutoff,
5097 4 : retain_lsns,
5098 4 : max_lsn_with_valid_lease,
5099 4 : new_gc_cutoff,
5100 4 : )
5101 4 : .instrument(
5102 4 : info_span!("gc_timeline", timeline_id = %self.timeline_id, cutoff = %new_gc_cutoff),
5103 : )
5104 4 : .await?;
5105 :
5106 : // only record successes
5107 4 : timer.stop_and_record();
5108 4 :
5109 4 : Ok(res)
5110 4 : }
5111 :
5112 4 : async fn gc_timeline(
5113 4 : &self,
5114 4 : space_cutoff: Lsn,
5115 4 : time_cutoff: Lsn,
5116 4 : retain_lsns: Vec<Lsn>,
5117 4 : max_lsn_with_valid_lease: Option<Lsn>,
5118 4 : new_gc_cutoff: Lsn,
5119 4 : ) -> Result<GcResult, GcError> {
5120 4 : // FIXME: if there is an ongoing detach_from_ancestor, we should just skip gc
5121 4 :
5122 4 : let now = SystemTime::now();
5123 4 : let mut result: GcResult = GcResult::default();
5124 4 :
5125 4 : // Nothing to GC. Return early.
5126 4 : let latest_gc_cutoff = *self.get_latest_gc_cutoff_lsn();
5127 4 : if latest_gc_cutoff >= new_gc_cutoff {
5128 0 : info!(
5129 0 : "Nothing to GC: new_gc_cutoff_lsn {new_gc_cutoff}, latest_gc_cutoff_lsn {latest_gc_cutoff}",
5130 : );
5131 0 : return Ok(result);
5132 4 : }
5133 :
5134 : // We need to ensure that no one tries to read page versions or create
5135 : // branches at a point before latest_gc_cutoff_lsn. See branch_timeline()
5136 : // for details. This will block until the old value is no longer in use.
5137 : //
5138 : // The GC cutoff should only ever move forwards.
5139 4 : let waitlist = {
5140 4 : let write_guard = self.latest_gc_cutoff_lsn.lock_for_write();
5141 4 : if *write_guard > new_gc_cutoff {
5142 0 : return Err(GcError::BadLsn {
5143 0 : why: format!(
5144 0 : "Cannot move GC cutoff LSN backwards (was {}, new {})",
5145 0 : *write_guard, new_gc_cutoff
5146 0 : ),
5147 0 : });
5148 4 : }
5149 4 :
5150 4 : write_guard.store_and_unlock(new_gc_cutoff)
5151 4 : };
5152 4 : waitlist.wait().await;
5153 :
5154 4 : info!("GC starting");
5155 :
5156 4 : debug!("retain_lsns: {:?}", retain_lsns);
5157 :
5158 4 : let mut layers_to_remove = Vec::new();
5159 :
5160 : // Scan all layers in the timeline (remote or on-disk).
5161 : //
5162 : // Garbage collect the layer if all conditions are satisfied:
5163 : // 1. it is older than cutoff LSN;
5164 : // 2. it is older than PITR interval;
5165 : // 3. it doesn't need to be retained for 'retain_lsns';
5166 : // 4. it does not need to be kept for LSNs holding valid leases.
5167 : // 5. newer on-disk image layers cover the layer's whole key range
5168 : //
5169 : // TODO holding a write lock is too agressive and avoidable
5170 4 : let mut guard = self.layers.write().await;
5171 4 : let layers = guard.layer_map()?;
5172 24 : 'outer: for l in layers.iter_historic_layers() {
5173 24 : result.layers_total += 1;
5174 24 :
5175 24 : // 1. Is it newer than GC horizon cutoff point?
5176 24 : if l.get_lsn_range().end > space_cutoff {
5177 2 : info!(
5178 0 : "keeping {} because it's newer than space_cutoff {}",
5179 0 : l.layer_name(),
5180 : space_cutoff,
5181 : );
5182 2 : result.layers_needed_by_cutoff += 1;
5183 2 : continue 'outer;
5184 22 : }
5185 22 :
5186 22 : // 2. It is newer than PiTR cutoff point?
5187 22 : if l.get_lsn_range().end > time_cutoff {
5188 0 : info!(
5189 0 : "keeping {} because it's newer than time_cutoff {}",
5190 0 : l.layer_name(),
5191 : time_cutoff,
5192 : );
5193 0 : result.layers_needed_by_pitr += 1;
5194 0 : continue 'outer;
5195 22 : }
5196 :
5197 : // 3. Is it needed by a child branch?
5198 : // NOTE With that we would keep data that
5199 : // might be referenced by child branches forever.
5200 : // We can track this in child timeline GC and delete parent layers when
5201 : // they are no longer needed. This might be complicated with long inheritance chains.
5202 : //
5203 : // TODO Vec is not a great choice for `retain_lsns`
5204 22 : for retain_lsn in &retain_lsns {
5205 : // start_lsn is inclusive
5206 0 : if &l.get_lsn_range().start <= retain_lsn {
5207 0 : info!(
5208 0 : "keeping {} because it's still might be referenced by child branch forked at {} is_dropped: xx is_incremental: {}",
5209 0 : l.layer_name(),
5210 0 : retain_lsn,
5211 0 : l.is_incremental(),
5212 : );
5213 0 : result.layers_needed_by_branches += 1;
5214 0 : continue 'outer;
5215 0 : }
5216 : }
5217 :
5218 : // 4. Is there a valid lease that requires us to keep this layer?
5219 22 : if let Some(lsn) = &max_lsn_with_valid_lease {
5220 : // keep if layer start <= any of the lease
5221 18 : if &l.get_lsn_range().start <= lsn {
5222 14 : info!(
5223 0 : "keeping {} because there is a valid lease preventing GC at {}",
5224 0 : l.layer_name(),
5225 : lsn,
5226 : );
5227 14 : result.layers_needed_by_leases += 1;
5228 14 : continue 'outer;
5229 4 : }
5230 4 : }
5231 :
5232 : // 5. Is there a later on-disk layer for this relation?
5233 : //
5234 : // The end-LSN is exclusive, while disk_consistent_lsn is
5235 : // inclusive. For example, if disk_consistent_lsn is 100, it is
5236 : // OK for a delta layer to have end LSN 101, but if the end LSN
5237 : // is 102, then it might not have been fully flushed to disk
5238 : // before crash.
5239 : //
5240 : // For example, imagine that the following layers exist:
5241 : //
5242 : // 1000 - image (A)
5243 : // 1000-2000 - delta (B)
5244 : // 2000 - image (C)
5245 : // 2000-3000 - delta (D)
5246 : // 3000 - image (E)
5247 : //
5248 : // If GC horizon is at 2500, we can remove layers A and B, but
5249 : // we cannot remove C, even though it's older than 2500, because
5250 : // the delta layer 2000-3000 depends on it.
5251 8 : if !layers
5252 8 : .image_layer_exists(&l.get_key_range(), &(l.get_lsn_range().end..new_gc_cutoff))
5253 : {
5254 6 : info!("keeping {} because it is the latest layer", l.layer_name());
5255 6 : result.layers_not_updated += 1;
5256 6 : continue 'outer;
5257 2 : }
5258 2 :
5259 2 : // We didn't find any reason to keep this file, so remove it.
5260 2 : info!(
5261 0 : "garbage collecting {} is_dropped: xx is_incremental: {}",
5262 0 : l.layer_name(),
5263 0 : l.is_incremental(),
5264 : );
5265 2 : layers_to_remove.push(l);
5266 : }
5267 :
5268 4 : if !layers_to_remove.is_empty() {
5269 : // Persist the new GC cutoff value before we actually remove anything.
5270 : // This unconditionally schedules also an index_part.json update, even though, we will
5271 : // be doing one a bit later with the unlinked gc'd layers.
5272 2 : let disk_consistent_lsn = self.disk_consistent_lsn.load();
5273 2 : self.schedule_uploads(disk_consistent_lsn, None)
5274 2 : .map_err(|e| {
5275 0 : if self.cancel.is_cancelled() {
5276 0 : GcError::TimelineCancelled
5277 : } else {
5278 0 : GcError::Remote(e)
5279 : }
5280 2 : })?;
5281 :
5282 2 : let gc_layers = layers_to_remove
5283 2 : .iter()
5284 2 : .map(|x| guard.get_from_desc(x))
5285 2 : .collect::<Vec<Layer>>();
5286 2 :
5287 2 : result.layers_removed = gc_layers.len() as u64;
5288 2 :
5289 2 : self.remote_client.schedule_gc_update(&gc_layers)?;
5290 :
5291 2 : guard.open_mut()?.finish_gc_timeline(&gc_layers);
5292 2 :
5293 2 : #[cfg(feature = "testing")]
5294 2 : {
5295 2 : result.doomed_layers = gc_layers;
5296 2 : }
5297 2 : }
5298 :
5299 4 : info!(
5300 0 : "GC completed removing {} layers, cutoff {}",
5301 : result.layers_removed, new_gc_cutoff
5302 : );
5303 :
5304 4 : result.elapsed = now.elapsed().unwrap_or(Duration::ZERO);
5305 4 : Ok(result)
5306 4 : }
5307 :
5308 : /// Reconstruct a value, using the given base image and WAL records in 'data'.
5309 668321 : async fn reconstruct_value(
5310 668321 : &self,
5311 668321 : key: Key,
5312 668321 : request_lsn: Lsn,
5313 668321 : mut data: ValueReconstructState,
5314 668321 : ) -> Result<Bytes, PageReconstructError> {
5315 668321 : // Perform WAL redo if needed
5316 668321 : data.records.reverse();
5317 668321 :
5318 668321 : // If we have a page image, and no WAL, we're all set
5319 668321 : if data.records.is_empty() {
5320 667801 : if let Some((img_lsn, img)) = &data.img {
5321 667801 : trace!(
5322 0 : "found page image for key {} at {}, no WAL redo required, req LSN {}",
5323 : key,
5324 : img_lsn,
5325 : request_lsn,
5326 : );
5327 667801 : Ok(img.clone())
5328 : } else {
5329 0 : Err(PageReconstructError::from(anyhow!(
5330 0 : "base image for {key} at {request_lsn} not found"
5331 0 : )))
5332 : }
5333 : } else {
5334 : // We need to do WAL redo.
5335 : //
5336 : // If we don't have a base image, then the oldest WAL record better initialize
5337 : // the page
5338 520 : if data.img.is_none() && !data.records.first().unwrap().1.will_init() {
5339 0 : Err(PageReconstructError::from(anyhow!(
5340 0 : "Base image for {} at {} not found, but got {} WAL records",
5341 0 : key,
5342 0 : request_lsn,
5343 0 : data.records.len()
5344 0 : )))
5345 : } else {
5346 520 : if data.img.is_some() {
5347 454 : trace!(
5348 0 : "found {} WAL records and a base image for {} at {}, performing WAL redo",
5349 0 : data.records.len(),
5350 : key,
5351 : request_lsn
5352 : );
5353 : } else {
5354 66 : trace!("found {} WAL records that will init the page for {} at {}, performing WAL redo", data.records.len(), key, request_lsn);
5355 : };
5356 520 : let res = self
5357 520 : .walredo_mgr
5358 520 : .as_ref()
5359 520 : .context("timeline has no walredo manager")
5360 520 : .map_err(PageReconstructError::WalRedo)?
5361 520 : .request_redo(key, request_lsn, data.img, data.records, self.pg_version)
5362 520 : .await;
5363 520 : let img = match res {
5364 520 : Ok(img) => img,
5365 0 : Err(walredo::Error::Cancelled) => return Err(PageReconstructError::Cancelled),
5366 0 : Err(walredo::Error::Other(e)) => {
5367 0 : return Err(PageReconstructError::WalRedo(
5368 0 : e.context("reconstruct a page image"),
5369 0 : ))
5370 : }
5371 : };
5372 520 : Ok(img)
5373 : }
5374 : }
5375 668321 : }
5376 :
5377 0 : pub(crate) async fn spawn_download_all_remote_layers(
5378 0 : self: Arc<Self>,
5379 0 : request: DownloadRemoteLayersTaskSpawnRequest,
5380 0 : ) -> Result<DownloadRemoteLayersTaskInfo, DownloadRemoteLayersTaskInfo> {
5381 : use pageserver_api::models::DownloadRemoteLayersTaskState;
5382 :
5383 : // this is not really needed anymore; it has tests which really check the return value from
5384 : // http api. it would be better not to maintain this anymore.
5385 :
5386 0 : let mut status_guard = self.download_all_remote_layers_task_info.write().unwrap();
5387 0 : if let Some(st) = &*status_guard {
5388 0 : match &st.state {
5389 : DownloadRemoteLayersTaskState::Running => {
5390 0 : return Err(st.clone());
5391 : }
5392 : DownloadRemoteLayersTaskState::ShutDown
5393 0 : | DownloadRemoteLayersTaskState::Completed => {
5394 0 : *status_guard = None;
5395 0 : }
5396 : }
5397 0 : }
5398 :
5399 0 : let self_clone = Arc::clone(&self);
5400 0 : let task_id = task_mgr::spawn(
5401 0 : task_mgr::BACKGROUND_RUNTIME.handle(),
5402 0 : task_mgr::TaskKind::DownloadAllRemoteLayers,
5403 0 : self.tenant_shard_id,
5404 0 : Some(self.timeline_id),
5405 0 : "download all remote layers task",
5406 0 : async move {
5407 0 : self_clone.download_all_remote_layers(request).await;
5408 0 : let mut status_guard = self_clone.download_all_remote_layers_task_info.write().unwrap();
5409 0 : match &mut *status_guard {
5410 : None => {
5411 0 : warn!("tasks status is supposed to be Some(), since we are running");
5412 : }
5413 0 : Some(st) => {
5414 0 : let exp_task_id = format!("{}", task_mgr::current_task_id().unwrap());
5415 0 : if st.task_id != exp_task_id {
5416 0 : warn!("task id changed while we were still running, expecting {} but have {}", exp_task_id, st.task_id);
5417 0 : } else {
5418 0 : st.state = DownloadRemoteLayersTaskState::Completed;
5419 0 : }
5420 : }
5421 : };
5422 0 : Ok(())
5423 0 : }
5424 0 : .instrument(info_span!(parent: None, "download_all_remote_layers", tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))
5425 : );
5426 :
5427 0 : let initial_info = DownloadRemoteLayersTaskInfo {
5428 0 : task_id: format!("{task_id}"),
5429 0 : state: DownloadRemoteLayersTaskState::Running,
5430 0 : total_layer_count: 0,
5431 0 : successful_download_count: 0,
5432 0 : failed_download_count: 0,
5433 0 : };
5434 0 : *status_guard = Some(initial_info.clone());
5435 0 :
5436 0 : Ok(initial_info)
5437 0 : }
5438 :
5439 0 : async fn download_all_remote_layers(
5440 0 : self: &Arc<Self>,
5441 0 : request: DownloadRemoteLayersTaskSpawnRequest,
5442 0 : ) {
5443 : use pageserver_api::models::DownloadRemoteLayersTaskState;
5444 :
5445 0 : let remaining = {
5446 0 : let guard = self.layers.read().await;
5447 0 : let Ok(lm) = guard.layer_map() else {
5448 : // technically here we could look into iterating accessible layers, but downloading
5449 : // all layers of a shutdown timeline makes no sense regardless.
5450 0 : tracing::info!("attempted to download all layers of shutdown timeline");
5451 0 : return;
5452 : };
5453 0 : lm.iter_historic_layers()
5454 0 : .map(|desc| guard.get_from_desc(&desc))
5455 0 : .collect::<Vec<_>>()
5456 0 : };
5457 0 : let total_layer_count = remaining.len();
5458 :
5459 : macro_rules! lock_status {
5460 : ($st:ident) => {
5461 : let mut st = self.download_all_remote_layers_task_info.write().unwrap();
5462 : let st = st
5463 : .as_mut()
5464 : .expect("this function is only called after the task has been spawned");
5465 : assert_eq!(
5466 : st.task_id,
5467 : format!(
5468 : "{}",
5469 : task_mgr::current_task_id().expect("we run inside a task_mgr task")
5470 : )
5471 : );
5472 : let $st = st;
5473 : };
5474 : }
5475 :
5476 : {
5477 0 : lock_status!(st);
5478 0 : st.total_layer_count = total_layer_count as u64;
5479 0 : }
5480 0 :
5481 0 : let mut remaining = remaining.into_iter();
5482 0 : let mut have_remaining = true;
5483 0 : let mut js = tokio::task::JoinSet::new();
5484 0 :
5485 0 : let cancel = task_mgr::shutdown_token();
5486 0 :
5487 0 : let limit = request.max_concurrent_downloads;
5488 :
5489 : loop {
5490 0 : while js.len() < limit.get() && have_remaining && !cancel.is_cancelled() {
5491 0 : let Some(next) = remaining.next() else {
5492 0 : have_remaining = false;
5493 0 : break;
5494 : };
5495 :
5496 0 : let span = tracing::info_span!("download", layer = %next);
5497 :
5498 0 : js.spawn(
5499 0 : async move {
5500 0 : let res = next.download().await;
5501 0 : (next, res)
5502 0 : }
5503 0 : .instrument(span),
5504 0 : );
5505 0 : }
5506 :
5507 0 : while let Some(res) = js.join_next().await {
5508 0 : match res {
5509 : Ok((_, Ok(_))) => {
5510 0 : lock_status!(st);
5511 0 : st.successful_download_count += 1;
5512 : }
5513 0 : Ok((layer, Err(e))) => {
5514 0 : tracing::error!(%layer, "download failed: {e:#}");
5515 0 : lock_status!(st);
5516 0 : st.failed_download_count += 1;
5517 : }
5518 0 : Err(je) if je.is_cancelled() => unreachable!("not used here"),
5519 0 : Err(je) if je.is_panic() => {
5520 0 : lock_status!(st);
5521 0 : st.failed_download_count += 1;
5522 : }
5523 0 : Err(je) => tracing::warn!("unknown joinerror: {je:?}"),
5524 : }
5525 : }
5526 :
5527 0 : if js.is_empty() && (!have_remaining || cancel.is_cancelled()) {
5528 0 : break;
5529 0 : }
5530 : }
5531 :
5532 : {
5533 0 : lock_status!(st);
5534 0 : st.state = DownloadRemoteLayersTaskState::Completed;
5535 : }
5536 0 : }
5537 :
5538 0 : pub(crate) fn get_download_all_remote_layers_task_info(
5539 0 : &self,
5540 0 : ) -> Option<DownloadRemoteLayersTaskInfo> {
5541 0 : self.download_all_remote_layers_task_info
5542 0 : .read()
5543 0 : .unwrap()
5544 0 : .clone()
5545 0 : }
5546 : }
5547 :
5548 : impl Timeline {
5549 : /// Returns non-remote layers for eviction.
5550 0 : pub(crate) async fn get_local_layers_for_disk_usage_eviction(&self) -> DiskUsageEvictionInfo {
5551 0 : let guard = self.layers.read().await;
5552 0 : let mut max_layer_size: Option<u64> = None;
5553 0 :
5554 0 : let resident_layers = guard
5555 0 : .likely_resident_layers()
5556 0 : .map(|layer| {
5557 0 : let file_size = layer.layer_desc().file_size;
5558 0 : max_layer_size = max_layer_size.map_or(Some(file_size), |m| Some(m.max(file_size)));
5559 0 :
5560 0 : let last_activity_ts = layer.latest_activity();
5561 0 :
5562 0 : EvictionCandidate {
5563 0 : layer: layer.to_owned().into(),
5564 0 : last_activity_ts,
5565 0 : relative_last_activity: finite_f32::FiniteF32::ZERO,
5566 0 : visibility: layer.visibility(),
5567 0 : }
5568 0 : })
5569 0 : .collect();
5570 0 :
5571 0 : DiskUsageEvictionInfo {
5572 0 : max_layer_size,
5573 0 : resident_layers,
5574 0 : }
5575 0 : }
5576 :
5577 1768 : pub(crate) fn get_shard_index(&self) -> ShardIndex {
5578 1768 : ShardIndex {
5579 1768 : shard_number: self.tenant_shard_id.shard_number,
5580 1768 : shard_count: self.tenant_shard_id.shard_count,
5581 1768 : }
5582 1768 : }
5583 :
5584 : /// Persistently blocks gc for `Manual` reason.
5585 : ///
5586 : /// Returns true if no such block existed before, false otherwise.
5587 0 : pub(crate) async fn block_gc(&self, tenant: &super::Tenant) -> anyhow::Result<bool> {
5588 : use crate::tenant::remote_timeline_client::index::GcBlockingReason;
5589 0 : assert_eq!(self.tenant_shard_id, tenant.tenant_shard_id);
5590 0 : tenant.gc_block.insert(self, GcBlockingReason::Manual).await
5591 0 : }
5592 :
5593 : /// Persistently unblocks gc for `Manual` reason.
5594 0 : pub(crate) async fn unblock_gc(&self, tenant: &super::Tenant) -> anyhow::Result<()> {
5595 : use crate::tenant::remote_timeline_client::index::GcBlockingReason;
5596 0 : assert_eq!(self.tenant_shard_id, tenant.tenant_shard_id);
5597 0 : tenant.gc_block.remove(self, GcBlockingReason::Manual).await
5598 0 : }
5599 :
5600 : #[cfg(test)]
5601 44 : pub(super) fn force_advance_lsn(self: &Arc<Timeline>, new_lsn: Lsn) {
5602 44 : self.last_record_lsn.advance(new_lsn);
5603 44 : }
5604 :
5605 : #[cfg(test)]
5606 2 : pub(super) fn force_set_disk_consistent_lsn(&self, new_value: Lsn) {
5607 2 : self.disk_consistent_lsn.store(new_value);
5608 2 : }
5609 :
5610 : /// Force create an image layer and place it into the layer map.
5611 : ///
5612 : /// DO NOT use this function directly. Use [`Tenant::branch_timeline_test_with_layers`]
5613 : /// or [`Tenant::create_test_timeline_with_layers`] to ensure all these layers are
5614 : /// placed into the layer map in one run AND be validated.
5615 : #[cfg(test)]
5616 56 : pub(super) async fn force_create_image_layer(
5617 56 : self: &Arc<Timeline>,
5618 56 : lsn: Lsn,
5619 56 : mut images: Vec<(Key, Bytes)>,
5620 56 : check_start_lsn: Option<Lsn>,
5621 56 : ctx: &RequestContext,
5622 56 : ) -> anyhow::Result<()> {
5623 56 : let last_record_lsn = self.get_last_record_lsn();
5624 56 : assert!(
5625 56 : lsn <= last_record_lsn,
5626 0 : "advance last record lsn before inserting a layer, lsn={lsn}, last_record_lsn={last_record_lsn}"
5627 : );
5628 56 : if let Some(check_start_lsn) = check_start_lsn {
5629 56 : assert!(lsn >= check_start_lsn);
5630 0 : }
5631 162 : images.sort_unstable_by(|(ka, _), (kb, _)| ka.cmp(kb));
5632 56 : let min_key = *images.first().map(|(k, _)| k).unwrap();
5633 56 : let end_key = images.last().map(|(k, _)| k).unwrap().next();
5634 56 : let mut image_layer_writer = ImageLayerWriter::new(
5635 56 : self.conf,
5636 56 : self.timeline_id,
5637 56 : self.tenant_shard_id,
5638 56 : &(min_key..end_key),
5639 56 : lsn,
5640 56 : ctx,
5641 56 : )
5642 56 : .await?;
5643 274 : for (key, img) in images {
5644 218 : image_layer_writer.put_image(key, img, ctx).await?;
5645 : }
5646 56 : let (desc, path) = image_layer_writer.finish(ctx).await?;
5647 56 : let image_layer = Layer::finish_creating(self.conf, self, desc, &path)?;
5648 56 : info!("force created image layer {}", image_layer.local_path());
5649 : {
5650 56 : let mut guard = self.layers.write().await;
5651 56 : guard.open_mut().unwrap().force_insert_layer(image_layer);
5652 56 : }
5653 56 :
5654 56 : Ok(())
5655 56 : }
5656 :
5657 : /// Force create a delta layer and place it into the layer map.
5658 : ///
5659 : /// DO NOT use this function directly. Use [`Tenant::branch_timeline_test_with_layers`]
5660 : /// or [`Tenant::create_test_timeline_with_layers`] to ensure all these layers are
5661 : /// placed into the layer map in one run AND be validated.
5662 : #[cfg(test)]
5663 90 : pub(super) async fn force_create_delta_layer(
5664 90 : self: &Arc<Timeline>,
5665 90 : mut deltas: DeltaLayerTestDesc,
5666 90 : check_start_lsn: Option<Lsn>,
5667 90 : ctx: &RequestContext,
5668 90 : ) -> anyhow::Result<()> {
5669 90 : let last_record_lsn = self.get_last_record_lsn();
5670 90 : deltas
5671 90 : .data
5672 132 : .sort_unstable_by(|(ka, la, _), (kb, lb, _)| (ka, la).cmp(&(kb, lb)));
5673 90 : assert!(deltas.data.first().unwrap().0 >= deltas.key_range.start);
5674 90 : assert!(deltas.data.last().unwrap().0 < deltas.key_range.end);
5675 312 : for (_, lsn, _) in &deltas.data {
5676 222 : assert!(deltas.lsn_range.start <= *lsn && *lsn < deltas.lsn_range.end);
5677 : }
5678 90 : assert!(
5679 90 : deltas.lsn_range.end <= last_record_lsn,
5680 0 : "advance last record lsn before inserting a layer, end_lsn={}, last_record_lsn={}",
5681 : deltas.lsn_range.end,
5682 : last_record_lsn
5683 : );
5684 90 : if let Some(check_start_lsn) = check_start_lsn {
5685 90 : assert!(deltas.lsn_range.start >= check_start_lsn);
5686 0 : }
5687 90 : let mut delta_layer_writer = DeltaLayerWriter::new(
5688 90 : self.conf,
5689 90 : self.timeline_id,
5690 90 : self.tenant_shard_id,
5691 90 : deltas.key_range.start,
5692 90 : deltas.lsn_range,
5693 90 : ctx,
5694 90 : )
5695 90 : .await?;
5696 312 : for (key, lsn, val) in deltas.data {
5697 222 : delta_layer_writer.put_value(key, lsn, val, ctx).await?;
5698 : }
5699 90 : let (desc, path) = delta_layer_writer.finish(deltas.key_range.end, ctx).await?;
5700 90 : let delta_layer = Layer::finish_creating(self.conf, self, desc, &path)?;
5701 90 : info!("force created delta layer {}", delta_layer.local_path());
5702 : {
5703 90 : let mut guard = self.layers.write().await;
5704 90 : guard.open_mut().unwrap().force_insert_layer(delta_layer);
5705 90 : }
5706 90 :
5707 90 : Ok(())
5708 90 : }
5709 :
5710 : /// Return all keys at the LSN in the image layers
5711 : #[cfg(test)]
5712 6 : pub(crate) async fn inspect_image_layers(
5713 6 : self: &Arc<Timeline>,
5714 6 : lsn: Lsn,
5715 6 : ctx: &RequestContext,
5716 6 : ) -> anyhow::Result<Vec<(Key, Bytes)>> {
5717 6 : let mut all_data = Vec::new();
5718 6 : let guard = self.layers.read().await;
5719 34 : for layer in guard.layer_map()?.iter_historic_layers() {
5720 34 : if !layer.is_delta() && layer.image_layer_lsn() == lsn {
5721 8 : let layer = guard.get_from_desc(&layer);
5722 8 : let mut reconstruct_data = ValuesReconstructState::default();
5723 8 : layer
5724 8 : .get_values_reconstruct_data(
5725 8 : KeySpace::single(Key::MIN..Key::MAX),
5726 8 : lsn..Lsn(lsn.0 + 1),
5727 8 : &mut reconstruct_data,
5728 8 : ctx,
5729 8 : )
5730 8 : .await?;
5731 74 : for (k, v) in reconstruct_data.keys {
5732 66 : all_data.push((k, v?.img.unwrap().1));
5733 : }
5734 26 : }
5735 : }
5736 6 : all_data.sort();
5737 6 : Ok(all_data)
5738 6 : }
5739 :
5740 : /// Get all historic layer descriptors in the layer map
5741 : #[cfg(test)]
5742 24 : pub(crate) async fn inspect_historic_layers(
5743 24 : self: &Arc<Timeline>,
5744 24 : ) -> anyhow::Result<Vec<super::storage_layer::PersistentLayerKey>> {
5745 24 : let mut layers = Vec::new();
5746 24 : let guard = self.layers.read().await;
5747 114 : for layer in guard.layer_map()?.iter_historic_layers() {
5748 114 : layers.push(layer.key());
5749 114 : }
5750 24 : Ok(layers)
5751 24 : }
5752 :
5753 : #[cfg(test)]
5754 10 : pub(crate) fn add_extra_test_dense_keyspace(&self, ks: KeySpace) {
5755 10 : let mut keyspace = self.extra_test_dense_keyspace.load().as_ref().clone();
5756 10 : keyspace.merge(&ks);
5757 10 : self.extra_test_dense_keyspace.store(Arc::new(keyspace));
5758 10 : }
5759 : }
5760 :
5761 : /// Tracking writes ingestion does to a particular in-memory layer.
5762 : ///
5763 : /// Cleared upon freezing a layer.
5764 : pub(crate) struct TimelineWriterState {
5765 : open_layer: Arc<InMemoryLayer>,
5766 : current_size: u64,
5767 : // Previous Lsn which passed through
5768 : prev_lsn: Option<Lsn>,
5769 : // Largest Lsn which passed through the current writer
5770 : max_lsn: Option<Lsn>,
5771 : // Cached details of the last freeze. Avoids going trough the atomic/lock on every put.
5772 : cached_last_freeze_at: Lsn,
5773 : }
5774 :
5775 : impl TimelineWriterState {
5776 1272 : fn new(open_layer: Arc<InMemoryLayer>, current_size: u64, last_freeze_at: Lsn) -> Self {
5777 1272 : Self {
5778 1272 : open_layer,
5779 1272 : current_size,
5780 1272 : prev_lsn: None,
5781 1272 : max_lsn: None,
5782 1272 : cached_last_freeze_at: last_freeze_at,
5783 1272 : }
5784 1272 : }
5785 : }
5786 :
5787 : /// Various functions to mutate the timeline.
5788 : // TODO Currently, Deref is used to allow easy access to read methods from this trait.
5789 : // This is probably considered a bad practice in Rust and should be fixed eventually,
5790 : // but will cause large code changes.
5791 : pub(crate) struct TimelineWriter<'a> {
5792 : tl: &'a Timeline,
5793 : write_guard: tokio::sync::MutexGuard<'a, Option<TimelineWriterState>>,
5794 : }
5795 :
5796 : impl Deref for TimelineWriter<'_> {
5797 : type Target = Timeline;
5798 :
5799 9897696 : fn deref(&self) -> &Self::Target {
5800 9897696 : self.tl
5801 9897696 : }
5802 : }
5803 :
5804 : #[derive(PartialEq)]
5805 : enum OpenLayerAction {
5806 : Roll,
5807 : Open,
5808 : None,
5809 : }
5810 :
5811 : impl TimelineWriter<'_> {
5812 4804208 : async fn handle_open_layer_action(
5813 4804208 : &mut self,
5814 4804208 : at: Lsn,
5815 4804208 : action: OpenLayerAction,
5816 4804208 : ctx: &RequestContext,
5817 4804208 : ) -> anyhow::Result<&Arc<InMemoryLayer>> {
5818 4804208 : match action {
5819 : OpenLayerAction::Roll => {
5820 80 : let freeze_at = self.write_guard.as_ref().unwrap().max_lsn.unwrap();
5821 80 : self.roll_layer(freeze_at).await?;
5822 80 : self.open_layer(at, ctx).await?;
5823 : }
5824 1192 : OpenLayerAction::Open => self.open_layer(at, ctx).await?,
5825 : OpenLayerAction::None => {
5826 4802936 : assert!(self.write_guard.is_some());
5827 : }
5828 : }
5829 :
5830 4804208 : Ok(&self.write_guard.as_ref().unwrap().open_layer)
5831 4804208 : }
5832 :
5833 1272 : async fn open_layer(&mut self, at: Lsn, ctx: &RequestContext) -> anyhow::Result<()> {
5834 1272 : let layer = self
5835 1272 : .tl
5836 1272 : .get_layer_for_write(at, &self.write_guard, ctx)
5837 1272 : .await?;
5838 1272 : let initial_size = layer.size().await?;
5839 :
5840 1272 : let last_freeze_at = self.last_freeze_at.load();
5841 1272 : self.write_guard.replace(TimelineWriterState::new(
5842 1272 : layer,
5843 1272 : initial_size,
5844 1272 : last_freeze_at,
5845 1272 : ));
5846 1272 :
5847 1272 : Ok(())
5848 1272 : }
5849 :
5850 80 : async fn roll_layer(&mut self, freeze_at: Lsn) -> Result<(), FlushLayerError> {
5851 80 : let current_size = self.write_guard.as_ref().unwrap().current_size;
5852 80 :
5853 80 : // self.write_guard will be taken by the freezing
5854 80 : self.tl
5855 80 : .freeze_inmem_layer_at(freeze_at, &mut self.write_guard)
5856 80 : .await?;
5857 :
5858 80 : assert!(self.write_guard.is_none());
5859 :
5860 80 : if current_size >= self.get_checkpoint_distance() * 2 {
5861 0 : warn!("Flushed oversized open layer with size {}", current_size)
5862 80 : }
5863 :
5864 80 : Ok(())
5865 80 : }
5866 :
5867 4804208 : fn get_open_layer_action(&self, lsn: Lsn, new_value_size: u64) -> OpenLayerAction {
5868 4804208 : let state = &*self.write_guard;
5869 4804208 : let Some(state) = &state else {
5870 1192 : return OpenLayerAction::Open;
5871 : };
5872 :
5873 : #[cfg(feature = "testing")]
5874 4803016 : if state.cached_last_freeze_at < self.tl.last_freeze_at.load() {
5875 : // this check and assertion are not really needed because
5876 : // LayerManager::try_freeze_in_memory_layer will always clear out the
5877 : // TimelineWriterState if something is frozen. however, we can advance last_freeze_at when there
5878 : // is no TimelineWriterState.
5879 0 : assert!(
5880 0 : state.open_layer.end_lsn.get().is_some(),
5881 0 : "our open_layer must be outdated"
5882 : );
5883 :
5884 : // this would be a memory leak waiting to happen because the in-memory layer always has
5885 : // an index
5886 0 : panic!("BUG: TimelineWriterState held on to frozen in-memory layer.");
5887 4803016 : }
5888 4803016 :
5889 4803016 : if state.prev_lsn == Some(lsn) {
5890 : // Rolling mid LSN is not supported by [downstream code].
5891 : // Hence, only roll at LSN boundaries.
5892 : //
5893 : // [downstream code]: https://github.com/neondatabase/neon/pull/7993#discussion_r1633345422
5894 6 : return OpenLayerAction::None;
5895 4803010 : }
5896 4803010 :
5897 4803010 : if state.current_size == 0 {
5898 : // Don't roll empty layers
5899 0 : return OpenLayerAction::None;
5900 4803010 : }
5901 4803010 :
5902 4803010 : if self.tl.should_roll(
5903 4803010 : state.current_size,
5904 4803010 : state.current_size + new_value_size,
5905 4803010 : self.get_checkpoint_distance(),
5906 4803010 : lsn,
5907 4803010 : state.cached_last_freeze_at,
5908 4803010 : state.open_layer.get_opened_at(),
5909 4803010 : ) {
5910 80 : OpenLayerAction::Roll
5911 : } else {
5912 4802930 : OpenLayerAction::None
5913 : }
5914 4804208 : }
5915 :
5916 : /// Put a batch of keys at the specified Lsns.
5917 4804206 : pub(crate) async fn put_batch(
5918 4804206 : &mut self,
5919 4804206 : batch: SerializedValueBatch,
5920 4804206 : ctx: &RequestContext,
5921 4804206 : ) -> anyhow::Result<()> {
5922 4804206 : if !batch.has_data() {
5923 0 : return Ok(());
5924 4804206 : }
5925 4804206 :
5926 4804206 : // In debug builds, assert that we don't write any keys that don't belong to this shard.
5927 4804206 : // We don't assert this in release builds, since key ownership policies may change over
5928 4804206 : // time. Stray keys will be removed during compaction.
5929 4804206 : if cfg!(debug_assertions) {
5930 9894704 : for metadata in &batch.metadata {
5931 5090498 : if let ValueMeta::Serialized(metadata) = metadata {
5932 5090498 : let key = Key::from_compact(metadata.key);
5933 5090498 : assert!(
5934 5090498 : self.shard_identity.is_key_local(&key)
5935 0 : || self.shard_identity.is_key_global(&key),
5936 0 : "key {key} does not belong on shard {}",
5937 0 : self.shard_identity.shard_index()
5938 : );
5939 0 : }
5940 : }
5941 0 : }
5942 :
5943 4804206 : let batch_max_lsn = batch.max_lsn;
5944 4804206 : let buf_size: u64 = batch.buffer_size() as u64;
5945 4804206 :
5946 4804206 : let action = self.get_open_layer_action(batch_max_lsn, buf_size);
5947 4804206 : let layer = self
5948 4804206 : .handle_open_layer_action(batch_max_lsn, action, ctx)
5949 4804206 : .await?;
5950 :
5951 4804206 : let res = layer.put_batch(batch, ctx).await;
5952 :
5953 4804206 : if res.is_ok() {
5954 4804206 : // Update the current size only when the entire write was ok.
5955 4804206 : // In case of failures, we may have had partial writes which
5956 4804206 : // render the size tracking out of sync. That's ok because
5957 4804206 : // the checkpoint distance should be significantly smaller
5958 4804206 : // than the S3 single shot upload limit of 5GiB.
5959 4804206 : let state = self.write_guard.as_mut().unwrap();
5960 4804206 :
5961 4804206 : state.current_size += buf_size;
5962 4804206 : state.prev_lsn = Some(batch_max_lsn);
5963 4804206 : state.max_lsn = std::cmp::max(state.max_lsn, Some(batch_max_lsn));
5964 4804206 : }
5965 :
5966 4804206 : res
5967 4804206 : }
5968 :
5969 : #[cfg(test)]
5970 : /// Test helper, for tests that would like to poke individual values without composing a batch
5971 4390154 : pub(crate) async fn put(
5972 4390154 : &mut self,
5973 4390154 : key: Key,
5974 4390154 : lsn: Lsn,
5975 4390154 : value: &Value,
5976 4390154 : ctx: &RequestContext,
5977 4390154 : ) -> anyhow::Result<()> {
5978 : use utils::bin_ser::BeSer;
5979 4390154 : if !key.is_valid_key_on_write_path() {
5980 0 : bail!(
5981 0 : "the request contains data not supported by pageserver at TimelineWriter::put: {}",
5982 0 : key
5983 0 : );
5984 4390154 : }
5985 4390154 : let val_ser_size = value.serialized_size().unwrap() as usize;
5986 4390154 : let batch = SerializedValueBatch::from_values(vec![(
5987 4390154 : key.to_compact(),
5988 4390154 : lsn,
5989 4390154 : val_ser_size,
5990 4390154 : value.clone(),
5991 4390154 : )]);
5992 4390154 :
5993 4390154 : self.put_batch(batch, ctx).await
5994 4390154 : }
5995 :
5996 2 : pub(crate) async fn delete_batch(
5997 2 : &mut self,
5998 2 : batch: &[(Range<Key>, Lsn)],
5999 2 : ctx: &RequestContext,
6000 2 : ) -> anyhow::Result<()> {
6001 2 : if let Some((_, lsn)) = batch.first() {
6002 2 : let action = self.get_open_layer_action(*lsn, 0);
6003 2 : let layer = self.handle_open_layer_action(*lsn, action, ctx).await?;
6004 2 : layer.put_tombstones(batch).await?;
6005 0 : }
6006 :
6007 2 : Ok(())
6008 2 : }
6009 :
6010 : /// Track the end of the latest digested WAL record.
6011 : /// Remember the (end of) last valid WAL record remembered in the timeline.
6012 : ///
6013 : /// Call this after you have finished writing all the WAL up to 'lsn'.
6014 : ///
6015 : /// 'lsn' must be aligned. This wakes up any wait_lsn() callers waiting for
6016 : /// the 'lsn' or anything older. The previous last record LSN is stored alongside
6017 : /// the latest and can be read.
6018 5279068 : pub(crate) fn finish_write(&self, new_lsn: Lsn) {
6019 5279068 : self.tl.finish_write(new_lsn);
6020 5279068 : }
6021 :
6022 270570 : pub(crate) fn update_current_logical_size(&self, delta: i64) {
6023 270570 : self.tl.update_current_logical_size(delta)
6024 270570 : }
6025 : }
6026 :
6027 : // We need TimelineWriter to be send in upcoming conversion of
6028 : // Timeline::layers to tokio::sync::RwLock.
6029 : #[test]
6030 2 : fn is_send() {
6031 2 : fn _assert_send<T: Send>() {}
6032 2 : _assert_send::<TimelineWriter<'_>>();
6033 2 : }
6034 :
6035 : #[cfg(test)]
6036 : mod tests {
6037 : use pageserver_api::key::Key;
6038 : use pageserver_api::value::Value;
6039 : use utils::{id::TimelineId, lsn::Lsn};
6040 :
6041 : use crate::tenant::{
6042 : harness::{test_img, TenantHarness},
6043 : layer_map::LayerMap,
6044 : storage_layer::{Layer, LayerName},
6045 : timeline::{DeltaLayerTestDesc, EvictionError},
6046 : Timeline,
6047 : };
6048 :
6049 : #[tokio::test]
6050 2 : async fn test_heatmap_generation() {
6051 2 : let harness = TenantHarness::create("heatmap_generation").await.unwrap();
6052 2 :
6053 2 : let covered_delta = DeltaLayerTestDesc::new_with_inferred_key_range(
6054 2 : Lsn(0x10)..Lsn(0x20),
6055 2 : vec![(
6056 2 : Key::from_hex("620000000033333333444444445500000000").unwrap(),
6057 2 : Lsn(0x11),
6058 2 : Value::Image(test_img("foo")),
6059 2 : )],
6060 2 : );
6061 2 : let visible_delta = DeltaLayerTestDesc::new_with_inferred_key_range(
6062 2 : Lsn(0x10)..Lsn(0x20),
6063 2 : vec![(
6064 2 : Key::from_hex("720000000033333333444444445500000000").unwrap(),
6065 2 : Lsn(0x11),
6066 2 : Value::Image(test_img("foo")),
6067 2 : )],
6068 2 : );
6069 2 : let l0_delta = DeltaLayerTestDesc::new(
6070 2 : Lsn(0x20)..Lsn(0x30),
6071 2 : Key::from_hex("000000000000000000000000000000000000").unwrap()
6072 2 : ..Key::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(),
6073 2 : vec![(
6074 2 : Key::from_hex("720000000033333333444444445500000000").unwrap(),
6075 2 : Lsn(0x25),
6076 2 : Value::Image(test_img("foo")),
6077 2 : )],
6078 2 : );
6079 2 : let delta_layers = vec![
6080 2 : covered_delta.clone(),
6081 2 : visible_delta.clone(),
6082 2 : l0_delta.clone(),
6083 2 : ];
6084 2 :
6085 2 : let image_layer = (
6086 2 : Lsn(0x40),
6087 2 : vec![(
6088 2 : Key::from_hex("620000000033333333444444445500000000").unwrap(),
6089 2 : test_img("bar"),
6090 2 : )],
6091 2 : );
6092 2 : let image_layers = vec![image_layer];
6093 2 :
6094 2 : let (tenant, ctx) = harness.load().await;
6095 2 : let timeline = tenant
6096 2 : .create_test_timeline_with_layers(
6097 2 : TimelineId::generate(),
6098 2 : Lsn(0x10),
6099 2 : 14,
6100 2 : &ctx,
6101 2 : delta_layers,
6102 2 : image_layers,
6103 2 : Lsn(0x100),
6104 2 : )
6105 2 : .await
6106 2 : .unwrap();
6107 2 :
6108 2 : // Layer visibility is an input to heatmap generation, so refresh it first
6109 2 : timeline.update_layer_visibility().await.unwrap();
6110 2 :
6111 2 : let heatmap = timeline
6112 2 : .generate_heatmap()
6113 2 : .await
6114 2 : .expect("Infallible while timeline is not shut down");
6115 2 :
6116 2 : assert_eq!(heatmap.timeline_id, timeline.timeline_id);
6117 2 :
6118 2 : // L0 should come last
6119 2 : assert_eq!(heatmap.layers.last().unwrap().name, l0_delta.layer_name());
6120 2 :
6121 2 : let mut last_lsn = Lsn::MAX;
6122 10 : for layer in heatmap.layers {
6123 2 : // Covered layer should be omitted
6124 8 : assert!(layer.name != covered_delta.layer_name());
6125 2 :
6126 8 : let layer_lsn = match &layer.name {
6127 4 : LayerName::Delta(d) => d.lsn_range.end,
6128 4 : LayerName::Image(i) => i.lsn,
6129 2 : };
6130 2 :
6131 2 : // Apart from L0s, newest Layers should come first
6132 8 : if !LayerMap::is_l0(layer.name.key_range(), layer.name.is_delta()) {
6133 6 : assert!(layer_lsn <= last_lsn);
6134 6 : last_lsn = layer_lsn;
6135 2 : }
6136 2 : }
6137 2 : }
6138 :
6139 : #[tokio::test]
6140 2 : async fn two_layer_eviction_attempts_at_the_same_time() {
6141 2 : let harness = TenantHarness::create("two_layer_eviction_attempts_at_the_same_time")
6142 2 : .await
6143 2 : .unwrap();
6144 2 :
6145 2 : let (tenant, ctx) = harness.load().await;
6146 2 : let timeline = tenant
6147 2 : .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx)
6148 2 : .await
6149 2 : .unwrap();
6150 2 :
6151 2 : let layer = find_some_layer(&timeline).await;
6152 2 : let layer = layer
6153 2 : .keep_resident()
6154 2 : .await
6155 2 : .expect("no download => no downloading errors")
6156 2 : .drop_eviction_guard();
6157 2 :
6158 2 : let forever = std::time::Duration::from_secs(120);
6159 2 :
6160 2 : let first = layer.evict_and_wait(forever);
6161 2 : let second = layer.evict_and_wait(forever);
6162 2 :
6163 2 : let (first, second) = tokio::join!(first, second);
6164 2 :
6165 2 : let res = layer.keep_resident().await;
6166 2 : assert!(res.is_none(), "{res:?}");
6167 2 :
6168 2 : match (first, second) {
6169 2 : (Ok(()), Ok(())) => {
6170 2 : // because there are no more timeline locks being taken on eviction path, we can
6171 2 : // witness all three outcomes here.
6172 2 : }
6173 2 : (Ok(()), Err(EvictionError::NotFound)) | (Err(EvictionError::NotFound), Ok(())) => {
6174 0 : // if one completes before the other, this is fine just as well.
6175 0 : }
6176 2 : other => unreachable!("unexpected {:?}", other),
6177 2 : }
6178 2 : }
6179 :
6180 2 : async fn find_some_layer(timeline: &Timeline) -> Layer {
6181 2 : let layers = timeline.layers.read().await;
6182 2 : let desc = layers
6183 2 : .layer_map()
6184 2 : .unwrap()
6185 2 : .iter_historic_layers()
6186 2 : .next()
6187 2 : .expect("must find one layer to evict");
6188 2 :
6189 2 : layers.get_from_desc(&desc)
6190 2 : }
6191 : }
|