Line data Source code
1 : mod compaction;
2 : pub mod delete;
3 : pub(crate) mod detach_ancestor;
4 : mod eviction_task;
5 : mod init;
6 : pub mod layer_manager;
7 : pub(crate) mod logical_size;
8 : pub mod span;
9 : pub mod uninit;
10 : mod walreceiver;
11 :
12 : use anyhow::{anyhow, bail, ensure, Context, Result};
13 : use arc_swap::ArcSwap;
14 : use bytes::Bytes;
15 : use camino::Utf8Path;
16 : use enumset::EnumSet;
17 : use fail::fail_point;
18 : use once_cell::sync::Lazy;
19 : use pageserver_api::{
20 : key::{
21 : AUX_FILES_KEY, METADATA_KEY_BEGIN_PREFIX, METADATA_KEY_END_PREFIX, NON_INHERITED_RANGE,
22 : NON_INHERITED_SPARSE_RANGE,
23 : },
24 : keyspace::{KeySpaceAccum, SparseKeyPartitioning},
25 : models::{
26 : AuxFilePolicy, CompactionAlgorithm, DownloadRemoteLayersTaskInfo,
27 : DownloadRemoteLayersTaskSpawnRequest, EvictionPolicy, InMemoryLayerInfo, LayerMapInfo,
28 : TimelineState,
29 : },
30 : reltag::BlockNumber,
31 : shard::{ShardIdentity, ShardNumber, TenantShardId},
32 : };
33 : use rand::Rng;
34 : use serde_with::serde_as;
35 : use storage_broker::BrokerClientChannel;
36 : use tokio::{
37 : runtime::Handle,
38 : sync::{oneshot, watch},
39 : };
40 : use tokio_util::sync::CancellationToken;
41 : use tracing::*;
42 : use utils::{
43 : bin_ser::BeSer,
44 : sync::gate::{Gate, GateGuard},
45 : vec_map::VecMap,
46 : };
47 :
48 : use std::ops::{Deref, Range};
49 : use std::pin::pin;
50 : use std::sync::atomic::Ordering as AtomicOrdering;
51 : use std::sync::{Arc, Mutex, RwLock, Weak};
52 : use std::time::{Duration, Instant, SystemTime};
53 : use std::{
54 : array,
55 : collections::{BTreeMap, HashMap, HashSet},
56 : sync::atomic::AtomicU64,
57 : };
58 : use std::{
59 : cmp::{max, min, Ordering},
60 : ops::ControlFlow,
61 : };
62 :
63 : use crate::tenant::timeline::init::LocalLayerFileMetadata;
64 : use crate::tenant::{
65 : layer_map::{LayerMap, SearchResult},
66 : metadata::TimelineMetadata,
67 : };
68 : use crate::{
69 : context::{DownloadBehavior, RequestContext},
70 : disk_usage_eviction_task::DiskUsageEvictionInfo,
71 : pgdatadir_mapping::CollectKeySpaceError,
72 : };
73 : use crate::{deletion_queue::DeletionQueueClient, metrics::GetKind};
74 : use crate::{
75 : disk_usage_eviction_task::finite_f32,
76 : tenant::storage_layer::{
77 : AsLayerDesc, DeltaLayerWriter, EvictionError, ImageLayerWriter, InMemoryLayer, Layer,
78 : LayerAccessStatsReset, LayerName, ResidentLayer, ValueReconstructResult,
79 : ValueReconstructState, ValuesReconstructState,
80 : },
81 : };
82 : use crate::{
83 : disk_usage_eviction_task::EvictionCandidate, tenant::storage_layer::delta_layer::DeltaEntry,
84 : };
85 : use crate::{
86 : metrics::ScanLatencyOngoingRecording, tenant::timeline::logical_size::CurrentLogicalSize,
87 : };
88 : use crate::{pgdatadir_mapping::LsnForTimestamp, tenant::tasks::BackgroundLoopKind};
89 : use crate::{
90 : pgdatadir_mapping::{AuxFilesDirectory, DirectoryKind},
91 : virtual_file::{MaybeFatalIo, VirtualFile},
92 : };
93 :
94 : use crate::config::PageServerConf;
95 : use crate::keyspace::{KeyPartitioning, KeySpace};
96 : use crate::metrics::{
97 : TimelineMetrics, MATERIALIZED_PAGE_CACHE_HIT, MATERIALIZED_PAGE_CACHE_HIT_DIRECT,
98 : };
99 : use crate::pgdatadir_mapping::CalculateLogicalSizeError;
100 : use crate::tenant::config::TenantConfOpt;
101 : use pageserver_api::key::{is_inherited_key, is_rel_fsm_block_key, is_rel_vm_block_key};
102 : use pageserver_api::reltag::RelTag;
103 : use pageserver_api::shard::ShardIndex;
104 :
105 : use postgres_connection::PgConnectionConfig;
106 : use postgres_ffi::to_pg_timestamp;
107 : use utils::{
108 : completion,
109 : generation::Generation,
110 : id::TimelineId,
111 : lsn::{AtomicLsn, Lsn, RecordLsn},
112 : seqwait::SeqWait,
113 : simple_rcu::{Rcu, RcuReadGuard},
114 : };
115 :
116 : use crate::page_cache;
117 : use crate::repository::GcResult;
118 : use crate::repository::{Key, Value};
119 : use crate::task_mgr;
120 : use crate::task_mgr::TaskKind;
121 : use crate::ZERO_PAGE;
122 :
123 : use self::delete::DeleteTimelineFlow;
124 : pub(super) use self::eviction_task::EvictionTaskTenantState;
125 : use self::eviction_task::EvictionTaskTimelineState;
126 : use self::layer_manager::LayerManager;
127 : use self::logical_size::LogicalSize;
128 : use self::walreceiver::{WalReceiver, WalReceiverConf};
129 :
130 : use super::secondary::heatmap::{HeatMapLayer, HeatMapTimeline};
131 : use super::{config::TenantConf, storage_layer::VectoredValueReconstructState};
132 : use super::{debug_assert_current_span_has_tenant_and_timeline_id, AttachedTenantConf};
133 : use super::{remote_timeline_client::index::IndexPart, storage_layer::LayerFringe};
134 : use super::{remote_timeline_client::RemoteTimelineClient, storage_layer::ReadableLayer};
135 :
136 : #[derive(Debug, PartialEq, Eq, Clone, Copy)]
137 : pub(super) enum FlushLoopState {
138 : NotStarted,
139 : Running {
140 : #[cfg(test)]
141 : expect_initdb_optimization: bool,
142 : #[cfg(test)]
143 : initdb_optimization_count: usize,
144 : },
145 : Exited,
146 : }
147 :
148 : #[derive(Debug, Copy, Clone, PartialEq, Eq)]
149 : pub enum ImageLayerCreationMode {
150 : /// Try to create image layers based on `time_for_new_image_layer`. Used in compaction code path.
151 : Try,
152 : /// Force creating the image layers if possible. For now, no image layers will be created
153 : /// for metadata keys. Used in compaction code path with force flag enabled.
154 : Force,
155 : /// Initial ingestion of the data, and no data should be dropped in this function. This
156 : /// means that no metadata keys should be included in the partitions. Used in flush frozen layer
157 : /// code path.
158 : Initial,
159 : }
160 :
161 : impl std::fmt::Display for ImageLayerCreationMode {
162 762 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
163 762 : write!(f, "{:?}", self)
164 762 : }
165 : }
166 :
167 : /// Wrapper for key range to provide reverse ordering by range length for BinaryHeap
168 : #[derive(Debug, Clone, PartialEq, Eq)]
169 : pub(crate) struct Hole {
170 : key_range: Range<Key>,
171 : coverage_size: usize,
172 : }
173 :
174 : impl Ord for Hole {
175 0 : fn cmp(&self, other: &Self) -> Ordering {
176 0 : other.coverage_size.cmp(&self.coverage_size) // inverse order
177 0 : }
178 : }
179 :
180 : impl PartialOrd for Hole {
181 0 : fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
182 0 : Some(self.cmp(other))
183 0 : }
184 : }
185 :
186 : /// Temporary function for immutable storage state refactor, ensures we are dropping mutex guard instead of other things.
187 : /// Can be removed after all refactors are done.
188 24 : fn drop_rlock<T>(rlock: tokio::sync::OwnedRwLockReadGuard<T>) {
189 24 : drop(rlock)
190 24 : }
191 :
192 : /// Temporary function for immutable storage state refactor, ensures we are dropping mutex guard instead of other things.
193 : /// Can be removed after all refactors are done.
194 786 : fn drop_wlock<T>(rlock: tokio::sync::RwLockWriteGuard<'_, T>) {
195 786 : drop(rlock)
196 786 : }
197 :
198 : /// The outward-facing resources required to build a Timeline
199 : pub struct TimelineResources {
200 : pub remote_client: Option<RemoteTimelineClient>,
201 : pub deletion_queue_client: DeletionQueueClient,
202 : pub timeline_get_throttle: Arc<
203 : crate::tenant::throttle::Throttle<&'static crate::metrics::tenant_throttling::TimelineGet>,
204 : >,
205 : }
206 :
207 : pub(crate) struct AuxFilesState {
208 : pub(crate) dir: Option<AuxFilesDirectory>,
209 : pub(crate) n_deltas: usize,
210 : }
211 :
212 : /// The relation size cache caches relation sizes at the end of the timeline. It speeds up WAL
213 : /// ingestion considerably, because WAL ingestion needs to check on most records if the record
214 : /// implicitly extends the relation. At startup, `complete_as_of` is initialized to the current end
215 : /// of the timeline (disk_consistent_lsn). It's used on reads of relation sizes to check if the
216 : /// value can be used to also update the cache, see [`Timeline::update_cached_rel_size`].
217 : pub(crate) struct RelSizeCache {
218 : pub(crate) complete_as_of: Lsn,
219 : pub(crate) map: HashMap<RelTag, (Lsn, BlockNumber)>,
220 : }
221 :
222 : pub struct Timeline {
223 : conf: &'static PageServerConf,
224 : tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
225 :
226 : myself: Weak<Self>,
227 :
228 : pub(crate) tenant_shard_id: TenantShardId,
229 : pub timeline_id: TimelineId,
230 :
231 : /// The generation of the tenant that instantiated us: this is used for safety when writing remote objects.
232 : /// Never changes for the lifetime of this [`Timeline`] object.
233 : ///
234 : /// This duplicates the generation stored in LocationConf, but that structure is mutable:
235 : /// this copy enforces the invariant that generatio doesn't change during a Tenant's lifetime.
236 : pub(crate) generation: Generation,
237 :
238 : /// The detailed sharding information from our parent Tenant. This enables us to map keys
239 : /// to shards, and is constant through the lifetime of this Timeline.
240 : shard_identity: ShardIdentity,
241 :
242 : pub pg_version: u32,
243 :
244 : /// The tuple has two elements.
245 : /// 1. `LayerFileManager` keeps track of the various physical representations of the layer files (inmem, local, remote).
246 : /// 2. `LayerMap`, the acceleration data structure for `get_reconstruct_data`.
247 : ///
248 : /// `LayerMap` maps out the `(PAGE,LSN) / (KEY,LSN)` space, which is composed of `(KeyRange, LsnRange)` rectangles.
249 : /// We describe these rectangles through the `PersistentLayerDesc` struct.
250 : ///
251 : /// When we want to reconstruct a page, we first find the `PersistentLayerDesc`'s that we need for page reconstruction,
252 : /// using `LayerMap`. Then, we use `LayerFileManager` to get the `PersistentLayer`'s that correspond to the
253 : /// `PersistentLayerDesc`'s.
254 : ///
255 : /// Hence, it's important to keep things coherent. The `LayerFileManager` must always have an entry for all
256 : /// `PersistentLayerDesc`'s in the `LayerMap`. If it doesn't, `LayerFileManager::get_from_desc` will panic at
257 : /// runtime, e.g., during page reconstruction.
258 : ///
259 : /// In the future, we'll be able to split up the tuple of LayerMap and `LayerFileManager`,
260 : /// so that e.g. on-demand-download/eviction, and layer spreading, can operate just on `LayerFileManager`.
261 : pub(crate) layers: Arc<tokio::sync::RwLock<LayerManager>>,
262 :
263 : last_freeze_at: AtomicLsn,
264 : // Atomic would be more appropriate here.
265 : last_freeze_ts: RwLock<Instant>,
266 :
267 : // WAL redo manager. `None` only for broken tenants.
268 : walredo_mgr: Option<Arc<super::WalRedoManager>>,
269 :
270 : /// Remote storage client.
271 : /// See [`remote_timeline_client`](super::remote_timeline_client) module comment for details.
272 : pub remote_client: Option<Arc<RemoteTimelineClient>>,
273 :
274 : // What page versions do we hold in the repository? If we get a
275 : // request > last_record_lsn, we need to wait until we receive all
276 : // the WAL up to the request. The SeqWait provides functions for
277 : // that. TODO: If we get a request for an old LSN, such that the
278 : // versions have already been garbage collected away, we should
279 : // throw an error, but we don't track that currently.
280 : //
281 : // last_record_lsn.load().last points to the end of last processed WAL record.
282 : //
283 : // We also remember the starting point of the previous record in
284 : // 'last_record_lsn.load().prev'. It's used to set the xl_prev pointer of the
285 : // first WAL record when the node is started up. But here, we just
286 : // keep track of it.
287 : last_record_lsn: SeqWait<RecordLsn, Lsn>,
288 :
289 : // All WAL records have been processed and stored durably on files on
290 : // local disk, up to this LSN. On crash and restart, we need to re-process
291 : // the WAL starting from this point.
292 : //
293 : // Some later WAL records might have been processed and also flushed to disk
294 : // already, so don't be surprised to see some, but there's no guarantee on
295 : // them yet.
296 : disk_consistent_lsn: AtomicLsn,
297 :
298 : // Parent timeline that this timeline was branched from, and the LSN
299 : // of the branch point.
300 : ancestor_timeline: Option<Arc<Timeline>>,
301 : ancestor_lsn: Lsn,
302 :
303 : pub(super) metrics: TimelineMetrics,
304 :
305 : // `Timeline` doesn't write these metrics itself, but it manages the lifetime. Code
306 : // in `crate::page_service` writes these metrics.
307 : pub(crate) query_metrics: crate::metrics::SmgrQueryTimePerTimeline,
308 :
309 : directory_metrics: [AtomicU64; DirectoryKind::KINDS_NUM],
310 :
311 : /// Ensures layers aren't frozen by checkpointer between
312 : /// [`Timeline::get_layer_for_write`] and layer reads.
313 : /// Locked automatically by [`TimelineWriter`] and checkpointer.
314 : /// Must always be acquired before the layer map/individual layer lock
315 : /// to avoid deadlock.
316 : write_lock: tokio::sync::Mutex<Option<TimelineWriterState>>,
317 :
318 : /// Used to avoid multiple `flush_loop` tasks running
319 : pub(super) flush_loop_state: Mutex<FlushLoopState>,
320 :
321 : /// layer_flush_start_tx can be used to wake up the layer-flushing task.
322 : /// - The u64 value is a counter, incremented every time a new flush cycle is requested.
323 : /// The flush cycle counter is sent back on the layer_flush_done channel when
324 : /// the flush finishes. You can use that to wait for the flush to finish.
325 : /// - The LSN is updated to max() of its current value and the latest disk_consistent_lsn
326 : /// read by whoever sends an update
327 : layer_flush_start_tx: tokio::sync::watch::Sender<(u64, Lsn)>,
328 : /// to be notified when layer flushing has finished, subscribe to the layer_flush_done channel
329 : layer_flush_done_tx: tokio::sync::watch::Sender<(u64, Result<(), FlushLayerError>)>,
330 :
331 : // Needed to ensure that we can't create a branch at a point that was already garbage collected
332 : pub latest_gc_cutoff_lsn: Rcu<Lsn>,
333 :
334 : // List of child timelines and their branch points. This is needed to avoid
335 : // garbage collecting data that is still needed by the child timelines.
336 : pub(crate) gc_info: std::sync::RwLock<GcInfo>,
337 :
338 : // It may change across major versions so for simplicity
339 : // keep it after running initdb for a timeline.
340 : // It is needed in checks when we want to error on some operations
341 : // when they are requested for pre-initdb lsn.
342 : // It can be unified with latest_gc_cutoff_lsn under some "first_valid_lsn",
343 : // though let's keep them both for better error visibility.
344 : pub initdb_lsn: Lsn,
345 :
346 : /// When did we last calculate the partitioning?
347 : partitioning: tokio::sync::Mutex<((KeyPartitioning, SparseKeyPartitioning), Lsn)>,
348 :
349 : /// Configuration: how often should the partitioning be recalculated.
350 : repartition_threshold: u64,
351 :
352 : last_image_layer_creation_check_at: AtomicLsn,
353 :
354 : /// Current logical size of the "datadir", at the last LSN.
355 : current_logical_size: LogicalSize,
356 :
357 : /// Information about the last processed message by the WAL receiver,
358 : /// or None if WAL receiver has not received anything for this timeline
359 : /// yet.
360 : pub last_received_wal: Mutex<Option<WalReceiverInfo>>,
361 : pub walreceiver: Mutex<Option<WalReceiver>>,
362 :
363 : /// Relation size cache
364 : pub(crate) rel_size_cache: RwLock<RelSizeCache>,
365 :
366 : download_all_remote_layers_task_info: RwLock<Option<DownloadRemoteLayersTaskInfo>>,
367 :
368 : state: watch::Sender<TimelineState>,
369 :
370 : /// Prevent two tasks from deleting the timeline at the same time. If held, the
371 : /// timeline is being deleted. If 'true', the timeline has already been deleted.
372 : pub delete_progress: Arc<tokio::sync::Mutex<DeleteTimelineFlow>>,
373 :
374 : eviction_task_timeline_state: tokio::sync::Mutex<EvictionTaskTimelineState>,
375 :
376 : /// Load or creation time information about the disk_consistent_lsn and when the loading
377 : /// happened. Used for consumption metrics.
378 : pub(crate) loaded_at: (Lsn, SystemTime),
379 :
380 : /// Gate to prevent shutdown completing while I/O is still happening to this timeline's data
381 : pub(crate) gate: Gate,
382 :
383 : /// Cancellation token scoped to this timeline: anything doing long-running work relating
384 : /// to the timeline should drop out when this token fires.
385 : pub(crate) cancel: CancellationToken,
386 :
387 : /// Make sure we only have one running compaction at a time in tests.
388 : ///
389 : /// Must only be taken in two places:
390 : /// - [`Timeline::compact`] (this file)
391 : /// - [`delete::delete_local_timeline_directory`]
392 : ///
393 : /// Timeline deletion will acquire both compaction and gc locks in whatever order.
394 : compaction_lock: tokio::sync::Mutex<()>,
395 :
396 : /// Make sure we only have one running gc at a time.
397 : ///
398 : /// Must only be taken in two places:
399 : /// - [`Timeline::gc`] (this file)
400 : /// - [`delete::delete_local_timeline_directory`]
401 : ///
402 : /// Timeline deletion will acquire both compaction and gc locks in whatever order.
403 : gc_lock: tokio::sync::Mutex<()>,
404 :
405 : /// Cloned from [`super::Tenant::timeline_get_throttle`] on construction.
406 : timeline_get_throttle: Arc<
407 : crate::tenant::throttle::Throttle<&'static crate::metrics::tenant_throttling::TimelineGet>,
408 : >,
409 :
410 : /// Keep aux directory cache to avoid it's reconstruction on each update
411 : pub(crate) aux_files: tokio::sync::Mutex<AuxFilesState>,
412 : }
413 :
414 : pub struct WalReceiverInfo {
415 : pub wal_source_connconf: PgConnectionConfig,
416 : pub last_received_msg_lsn: Lsn,
417 : pub last_received_msg_ts: u128,
418 : }
419 :
420 : /// Information about how much history needs to be retained, needed by
421 : /// Garbage Collection.
422 : #[derive(Default)]
423 : pub(crate) struct GcInfo {
424 : /// Specific LSNs that are needed.
425 : ///
426 : /// Currently, this includes all points where child branches have
427 : /// been forked off from. In the future, could also include
428 : /// explicit user-defined snapshot points.
429 : pub(crate) retain_lsns: Vec<Lsn>,
430 :
431 : /// The cutoff coordinates, which are combined by selecting the minimum.
432 : pub(crate) cutoffs: GcCutoffs,
433 : }
434 :
435 : impl GcInfo {
436 218 : pub(crate) fn min_cutoff(&self) -> Lsn {
437 218 : self.cutoffs.select_min()
438 218 : }
439 : }
440 :
441 : /// The `GcInfo` component describing which Lsns need to be retained.
442 : #[derive(Debug)]
443 : pub(crate) struct GcCutoffs {
444 : /// Keep everything newer than this point.
445 : ///
446 : /// This is calculated by subtracting 'gc_horizon' setting from
447 : /// last-record LSN
448 : ///
449 : /// FIXME: is this inclusive or exclusive?
450 : pub(crate) horizon: Lsn,
451 :
452 : /// In addition to 'retain_lsns' and 'horizon_cutoff', keep everything newer than this
453 : /// point.
454 : ///
455 : /// This is calculated by finding a number such that a record is needed for PITR
456 : /// if only if its LSN is larger than 'pitr_cutoff'.
457 : pub(crate) pitr: Lsn,
458 : }
459 :
460 : impl Default for GcCutoffs {
461 334 : fn default() -> Self {
462 334 : Self {
463 334 : horizon: Lsn::INVALID,
464 334 : pitr: Lsn::INVALID,
465 334 : }
466 334 : }
467 : }
468 :
469 : impl GcCutoffs {
470 218 : fn select_min(&self) -> Lsn {
471 218 : std::cmp::min(self.horizon, self.pitr)
472 218 : }
473 : }
474 :
475 : /// An error happened in a get() operation.
476 2 : #[derive(thiserror::Error, Debug)]
477 : pub(crate) enum PageReconstructError {
478 : #[error(transparent)]
479 : Other(#[from] anyhow::Error),
480 :
481 : #[error("Ancestor LSN wait error: {0}")]
482 : AncestorLsnTimeout(#[from] WaitLsnError),
483 :
484 : #[error("timeline shutting down")]
485 : Cancelled,
486 :
487 : /// The ancestor of this is being stopped
488 : #[error("ancestor timeline {0} is being stopped")]
489 : AncestorStopping(TimelineId),
490 :
491 : /// An error happened replaying WAL records
492 : #[error(transparent)]
493 : WalRedo(anyhow::Error),
494 :
495 : #[error("{0}")]
496 : MissingKey(MissingKeyError),
497 : }
498 :
499 : #[derive(Debug)]
500 : pub struct MissingKeyError {
501 : key: Key,
502 : shard: ShardNumber,
503 : cont_lsn: Lsn,
504 : request_lsn: Lsn,
505 : ancestor_lsn: Option<Lsn>,
506 : traversal_path: Vec<TraversalPathItem>,
507 : backtrace: Option<std::backtrace::Backtrace>,
508 : }
509 :
510 : impl std::fmt::Display for MissingKeyError {
511 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
512 0 : write!(
513 0 : f,
514 0 : "could not find data for key {} (shard {:?}) at LSN {}, request LSN {}",
515 0 : self.key, self.shard, self.cont_lsn, self.request_lsn
516 0 : )?;
517 0 : if let Some(ref ancestor_lsn) = self.ancestor_lsn {
518 0 : write!(f, ", ancestor {}", ancestor_lsn)?;
519 0 : }
520 :
521 0 : if !self.traversal_path.is_empty() {
522 0 : writeln!(f)?;
523 0 : }
524 :
525 0 : for (r, c, l) in &self.traversal_path {
526 0 : writeln!(
527 0 : f,
528 0 : "layer traversal: result {:?}, cont_lsn {}, layer: {}",
529 0 : r, c, l,
530 0 : )?;
531 : }
532 :
533 0 : if let Some(ref backtrace) = self.backtrace {
534 0 : write!(f, "\n{}", backtrace)?;
535 0 : }
536 :
537 0 : Ok(())
538 0 : }
539 : }
540 :
541 : impl PageReconstructError {
542 : /// Returns true if this error indicates a tenant/timeline shutdown alike situation
543 0 : pub(crate) fn is_stopping(&self) -> bool {
544 0 : use PageReconstructError::*;
545 0 : match self {
546 0 : Other(_) => false,
547 0 : AncestorLsnTimeout(_) => false,
548 0 : Cancelled | AncestorStopping(_) => true,
549 0 : WalRedo(_) => false,
550 0 : MissingKey { .. } => false,
551 : }
552 0 : }
553 : }
554 :
555 0 : #[derive(thiserror::Error, Debug)]
556 : enum CreateImageLayersError {
557 : #[error("timeline shutting down")]
558 : Cancelled,
559 :
560 : #[error(transparent)]
561 : GetVectoredError(GetVectoredError),
562 :
563 : #[error(transparent)]
564 : PageReconstructError(PageReconstructError),
565 :
566 : #[error(transparent)]
567 : Other(#[from] anyhow::Error),
568 : }
569 :
570 0 : #[derive(thiserror::Error, Debug)]
571 : enum FlushLayerError {
572 : /// Timeline cancellation token was cancelled
573 : #[error("timeline shutting down")]
574 : Cancelled,
575 :
576 : #[error(transparent)]
577 : CreateImageLayersError(CreateImageLayersError),
578 :
579 : #[error(transparent)]
580 : Other(#[from] anyhow::Error),
581 : }
582 :
583 0 : #[derive(thiserror::Error, Debug)]
584 : pub(crate) enum GetVectoredError {
585 : #[error("timeline shutting down")]
586 : Cancelled,
587 :
588 : #[error("Requested too many keys: {0} > {}", Timeline::MAX_GET_VECTORED_KEYS)]
589 : Oversized(u64),
590 :
591 : #[error("Requested at invalid LSN: {0}")]
592 : InvalidLsn(Lsn),
593 :
594 : #[error("Requested key not found: {0}")]
595 : MissingKey(MissingKeyError),
596 :
597 : #[error(transparent)]
598 : GetReadyAncestorError(GetReadyAncestorError),
599 :
600 : #[error(transparent)]
601 : Other(#[from] anyhow::Error),
602 : }
603 :
604 0 : #[derive(thiserror::Error, Debug)]
605 : pub(crate) enum GetReadyAncestorError {
606 : #[error("ancestor timeline {0} is being stopped")]
607 : AncestorStopping(TimelineId),
608 :
609 : #[error("Ancestor LSN wait error: {0}")]
610 : AncestorLsnTimeout(#[from] WaitLsnError),
611 :
612 : #[error("Cancelled")]
613 : Cancelled,
614 :
615 : #[error(transparent)]
616 : Other(#[from] anyhow::Error),
617 : }
618 :
619 : #[derive(Clone, Copy)]
620 : pub enum LogicalSizeCalculationCause {
621 : Initial,
622 : ConsumptionMetricsSyntheticSize,
623 : EvictionTaskImitation,
624 : TenantSizeHandler,
625 : }
626 :
627 : pub enum GetLogicalSizePriority {
628 : User,
629 : Background,
630 : }
631 :
632 0 : #[derive(enumset::EnumSetType)]
633 : pub(crate) enum CompactFlags {
634 : ForceRepartition,
635 : ForceImageLayerCreation,
636 : }
637 :
638 : impl std::fmt::Debug for Timeline {
639 0 : fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
640 0 : write!(f, "Timeline<{}>", self.timeline_id)
641 0 : }
642 : }
643 :
644 0 : #[derive(thiserror::Error, Debug)]
645 : pub(crate) enum WaitLsnError {
646 : // Called on a timeline which is shutting down
647 : #[error("Shutdown")]
648 : Shutdown,
649 :
650 : // Called on an timeline not in active state or shutting down
651 : #[error("Bad state (not active)")]
652 : BadState,
653 :
654 : // Timeout expired while waiting for LSN to catch up with goal.
655 : #[error("{0}")]
656 : Timeout(String),
657 : }
658 :
659 : // The impls below achieve cancellation mapping for errors.
660 : // Perhaps there's a way of achieving this with less cruft.
661 :
662 : impl From<CreateImageLayersError> for CompactionError {
663 0 : fn from(e: CreateImageLayersError) -> Self {
664 0 : match e {
665 0 : CreateImageLayersError::Cancelled => CompactionError::ShuttingDown,
666 0 : _ => CompactionError::Other(e.into()),
667 : }
668 0 : }
669 : }
670 :
671 : impl From<CreateImageLayersError> for FlushLayerError {
672 0 : fn from(e: CreateImageLayersError) -> Self {
673 0 : match e {
674 0 : CreateImageLayersError::Cancelled => FlushLayerError::Cancelled,
675 0 : any => FlushLayerError::CreateImageLayersError(any),
676 : }
677 0 : }
678 : }
679 :
680 : impl From<PageReconstructError> for CreateImageLayersError {
681 0 : fn from(e: PageReconstructError) -> Self {
682 0 : match e {
683 0 : PageReconstructError::Cancelled => CreateImageLayersError::Cancelled,
684 0 : _ => CreateImageLayersError::PageReconstructError(e),
685 : }
686 0 : }
687 : }
688 :
689 : impl From<GetVectoredError> for CreateImageLayersError {
690 0 : fn from(e: GetVectoredError) -> Self {
691 0 : match e {
692 0 : GetVectoredError::Cancelled => CreateImageLayersError::Cancelled,
693 0 : _ => CreateImageLayersError::GetVectoredError(e),
694 : }
695 0 : }
696 : }
697 :
698 : impl From<GetVectoredError> for PageReconstructError {
699 0 : fn from(e: GetVectoredError) -> Self {
700 0 : match e {
701 0 : GetVectoredError::Cancelled => PageReconstructError::Cancelled,
702 0 : GetVectoredError::InvalidLsn(_) => PageReconstructError::Other(anyhow!("Invalid LSN")),
703 0 : err @ GetVectoredError::Oversized(_) => PageReconstructError::Other(err.into()),
704 0 : GetVectoredError::MissingKey(err) => PageReconstructError::MissingKey(err),
705 0 : GetVectoredError::GetReadyAncestorError(err) => PageReconstructError::from(err),
706 0 : GetVectoredError::Other(err) => PageReconstructError::Other(err),
707 : }
708 0 : }
709 : }
710 :
711 : impl From<GetReadyAncestorError> for PageReconstructError {
712 2 : fn from(e: GetReadyAncestorError) -> Self {
713 2 : use GetReadyAncestorError::*;
714 2 : match e {
715 0 : AncestorStopping(tid) => PageReconstructError::AncestorStopping(tid),
716 0 : AncestorLsnTimeout(wait_err) => PageReconstructError::AncestorLsnTimeout(wait_err),
717 0 : Cancelled => PageReconstructError::Cancelled,
718 2 : Other(other) => PageReconstructError::Other(other),
719 : }
720 2 : }
721 : }
722 :
723 : #[derive(
724 : Eq,
725 : PartialEq,
726 : Debug,
727 : Copy,
728 : Clone,
729 148 : strum_macros::EnumString,
730 0 : strum_macros::Display,
731 0 : serde_with::DeserializeFromStr,
732 : serde_with::SerializeDisplay,
733 : )]
734 : #[strum(serialize_all = "kebab-case")]
735 : pub enum GetVectoredImpl {
736 : Sequential,
737 : Vectored,
738 : }
739 :
740 : #[derive(
741 : Eq,
742 : PartialEq,
743 : Debug,
744 : Copy,
745 : Clone,
746 148 : strum_macros::EnumString,
747 0 : strum_macros::Display,
748 0 : serde_with::DeserializeFromStr,
749 : serde_with::SerializeDisplay,
750 : )]
751 : #[strum(serialize_all = "kebab-case")]
752 : pub enum GetImpl {
753 : Legacy,
754 : Vectored,
755 : }
756 :
757 : pub(crate) enum WaitLsnWaiter<'a> {
758 : Timeline(&'a Timeline),
759 : Tenant,
760 : PageService,
761 : }
762 :
763 : /// Argument to [`Timeline::shutdown`].
764 : #[derive(Debug, Clone, Copy)]
765 : pub(crate) enum ShutdownMode {
766 : /// Graceful shutdown, may do a lot of I/O as we flush any open layers to disk and then
767 : /// also to remote storage. This method can easily take multiple seconds for a busy timeline.
768 : ///
769 : /// While we are flushing, we continue to accept read I/O for LSNs ingested before
770 : /// the call to [`Timeline::shutdown`].
771 : FreezeAndFlush,
772 : /// Shut down immediately, without waiting for any open layers to flush.
773 : Hard,
774 : }
775 :
776 : /// Public interface functions
777 : impl Timeline {
778 : /// Get the LSN where this branch was created
779 8 : pub(crate) fn get_ancestor_lsn(&self) -> Lsn {
780 8 : self.ancestor_lsn
781 8 : }
782 :
783 : /// Get the ancestor's timeline id
784 3284 : pub(crate) fn get_ancestor_timeline_id(&self) -> Option<TimelineId> {
785 3284 : self.ancestor_timeline
786 3284 : .as_ref()
787 3284 : .map(|ancestor| ancestor.timeline_id)
788 3284 : }
789 :
790 : /// Lock and get timeline's GC cutoff
791 950 : pub(crate) fn get_latest_gc_cutoff_lsn(&self) -> RcuReadGuard<Lsn> {
792 950 : self.latest_gc_cutoff_lsn.read()
793 950 : }
794 :
795 : /// Look up given page version.
796 : ///
797 : /// If a remote layer file is needed, it is downloaded as part of this
798 : /// call.
799 : ///
800 : /// This method enforces [`Self::timeline_get_throttle`] internally.
801 : ///
802 : /// NOTE: It is considered an error to 'get' a key that doesn't exist. The
803 : /// abstraction above this needs to store suitable metadata to track what
804 : /// data exists with what keys, in separate metadata entries. If a
805 : /// non-existent key is requested, we may incorrectly return a value from
806 : /// an ancestor branch, for example, or waste a lot of cycles chasing the
807 : /// non-existing key.
808 : ///
809 : /// # Cancel-Safety
810 : ///
811 : /// This method is cancellation-safe.
812 : #[inline(always)]
813 621765 : pub(crate) async fn get(
814 621765 : &self,
815 621765 : key: Key,
816 621765 : lsn: Lsn,
817 621765 : ctx: &RequestContext,
818 621765 : ) -> Result<Bytes, PageReconstructError> {
819 621765 : if !lsn.is_valid() {
820 0 : return Err(PageReconstructError::Other(anyhow::anyhow!("Invalid LSN")));
821 621765 : }
822 621765 :
823 621765 : // This check is debug-only because of the cost of hashing, and because it's a double-check: we
824 621765 : // already checked the key against the shard_identity when looking up the Timeline from
825 621765 : // page_service.
826 621765 : debug_assert!(!self.shard_identity.is_key_disposable(&key));
827 :
828 621765 : self.timeline_get_throttle.throttle(ctx, 1).await;
829 :
830 : // Check the page cache. We will get back the most recent page with lsn <= `lsn`.
831 : // The cached image can be returned directly if there is no WAL between the cached image
832 : // and requested LSN. The cached image can also be used to reduce the amount of WAL needed
833 : // for redo.
834 621765 : let cached_page_img = match self.lookup_cached_page(&key, lsn, ctx).await {
835 0 : Some((cached_lsn, cached_img)) => {
836 0 : match cached_lsn.cmp(&lsn) {
837 0 : Ordering::Less => {} // there might be WAL between cached_lsn and lsn, we need to check
838 : Ordering::Equal => {
839 0 : MATERIALIZED_PAGE_CACHE_HIT_DIRECT.inc();
840 0 : return Ok(cached_img); // exact LSN match, return the image
841 : }
842 : Ordering::Greater => {
843 0 : unreachable!("the returned lsn should never be after the requested lsn")
844 : }
845 : }
846 0 : Some((cached_lsn, cached_img))
847 : }
848 621765 : None => None,
849 : };
850 :
851 621765 : match self.conf.get_impl {
852 : GetImpl::Legacy => {
853 621765 : let reconstruct_state = ValueReconstructState {
854 621765 : records: Vec::new(),
855 621765 : img: cached_page_img,
856 621765 : };
857 621765 :
858 621765 : self.get_impl(key, lsn, reconstruct_state, ctx).await
859 : }
860 : GetImpl::Vectored => {
861 0 : let keyspace = KeySpace {
862 0 : ranges: vec![key..key.next()],
863 0 : };
864 0 :
865 0 : // Initialise the reconstruct state for the key with the cache
866 0 : // entry returned above.
867 0 : let mut reconstruct_state = ValuesReconstructState::new();
868 0 :
869 0 : // Only add the cached image to the reconstruct state when it exists.
870 0 : if cached_page_img.is_some() {
871 0 : let mut key_state = VectoredValueReconstructState::default();
872 0 : key_state.img = cached_page_img;
873 0 : reconstruct_state.keys.insert(key, Ok(key_state));
874 0 : }
875 :
876 0 : let vectored_res = self
877 0 : .get_vectored_impl(keyspace.clone(), lsn, reconstruct_state, ctx)
878 0 : .await;
879 :
880 0 : if self.conf.validate_vectored_get {
881 0 : self.validate_get_vectored_impl(&vectored_res, keyspace, lsn, ctx)
882 0 : .await;
883 0 : }
884 :
885 0 : let key_value = vectored_res?.pop_first();
886 0 : match key_value {
887 0 : Some((got_key, value)) => {
888 0 : if got_key != key {
889 0 : error!(
890 0 : "Expected {}, but singular vectored get returned {}",
891 : key, got_key
892 : );
893 0 : Err(PageReconstructError::Other(anyhow!(
894 0 : "Singular vectored get returned wrong key"
895 0 : )))
896 : } else {
897 0 : value
898 : }
899 : }
900 0 : None => Err(PageReconstructError::MissingKey(MissingKeyError {
901 0 : key,
902 0 : shard: self.shard_identity.get_shard_number(&key),
903 0 : cont_lsn: Lsn(0),
904 0 : request_lsn: lsn,
905 0 : ancestor_lsn: None,
906 0 : traversal_path: Vec::new(),
907 0 : backtrace: None,
908 0 : })),
909 : }
910 : }
911 : }
912 621765 : }
913 :
914 : /// Not subject to [`Self::timeline_get_throttle`].
915 622903 : async fn get_impl(
916 622903 : &self,
917 622903 : key: Key,
918 622903 : lsn: Lsn,
919 622903 : mut reconstruct_state: ValueReconstructState,
920 622903 : ctx: &RequestContext,
921 622903 : ) -> Result<Bytes, PageReconstructError> {
922 622903 : // XXX: structured stats collection for layer eviction here.
923 622903 : trace!(
924 0 : "get page request for {}@{} from task kind {:?}",
925 0 : key,
926 0 : lsn,
927 0 : ctx.task_kind()
928 : );
929 :
930 622903 : let timer = crate::metrics::GET_RECONSTRUCT_DATA_TIME
931 622903 : .for_get_kind(GetKind::Singular)
932 622903 : .start_timer();
933 622903 : let path = self
934 622903 : .get_reconstruct_data(key, lsn, &mut reconstruct_state, ctx)
935 42240 : .await?;
936 622789 : timer.stop_and_record();
937 622789 :
938 622789 : let start = Instant::now();
939 622789 : let res = self.reconstruct_value(key, lsn, reconstruct_state).await;
940 622789 : let elapsed = start.elapsed();
941 622789 : crate::metrics::RECONSTRUCT_TIME
942 622789 : .for_get_kind(GetKind::Singular)
943 622789 : .observe(elapsed.as_secs_f64());
944 622789 :
945 622789 : if cfg!(feature = "testing") && res.is_err() {
946 : // it can only be walredo issue
947 : use std::fmt::Write;
948 :
949 0 : let mut msg = String::new();
950 0 :
951 0 : path.into_iter().for_each(|(res, cont_lsn, layer)| {
952 0 : writeln!(
953 0 : msg,
954 0 : "- layer traversal: result {res:?}, cont_lsn {cont_lsn}, layer: {}",
955 0 : layer,
956 0 : )
957 0 : .expect("string grows")
958 0 : });
959 0 :
960 0 : // this is to rule out or provide evidence that we could in some cases read a duplicate
961 0 : // walrecord
962 0 : tracing::info!("walredo failed, path:\n{msg}");
963 622789 : }
964 :
965 622789 : res
966 622903 : }
967 :
968 : pub(crate) const MAX_GET_VECTORED_KEYS: u64 = 32;
969 :
970 : /// Look up multiple page versions at a given LSN
971 : ///
972 : /// This naive implementation will be replaced with a more efficient one
973 : /// which actually vectorizes the read path.
974 616 : pub(crate) async fn get_vectored(
975 616 : &self,
976 616 : keyspace: KeySpace,
977 616 : lsn: Lsn,
978 616 : ctx: &RequestContext,
979 616 : ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
980 616 : if !lsn.is_valid() {
981 0 : return Err(GetVectoredError::InvalidLsn(lsn));
982 616 : }
983 616 :
984 616 : let key_count = keyspace.total_raw_size().try_into().unwrap();
985 616 : if key_count > Timeline::MAX_GET_VECTORED_KEYS {
986 0 : return Err(GetVectoredError::Oversized(key_count));
987 616 : }
988 :
989 1232 : for range in &keyspace.ranges {
990 616 : let mut key = range.start;
991 1432 : while key != range.end {
992 816 : assert!(!self.shard_identity.is_key_disposable(&key));
993 816 : key = key.next();
994 : }
995 : }
996 :
997 616 : trace!(
998 0 : "get vectored request for {:?}@{} from task kind {:?} will use {} implementation",
999 0 : keyspace,
1000 0 : lsn,
1001 0 : ctx.task_kind(),
1002 : self.conf.get_vectored_impl
1003 : );
1004 :
1005 616 : let start = crate::metrics::GET_VECTORED_LATENCY
1006 616 : .for_task_kind(ctx.task_kind())
1007 616 : .map(|metric| (metric, Instant::now()));
1008 :
1009 : // start counting after throttle so that throttle time
1010 : // is always less than observation time
1011 616 : let throttled = self
1012 616 : .timeline_get_throttle
1013 616 : .throttle(ctx, key_count as usize)
1014 0 : .await;
1015 :
1016 616 : let res = match self.conf.get_vectored_impl {
1017 : GetVectoredImpl::Sequential => {
1018 616 : self.get_vectored_sequential_impl(keyspace, lsn, ctx).await
1019 : }
1020 : GetVectoredImpl::Vectored => {
1021 0 : let vectored_res = self
1022 0 : .get_vectored_impl(keyspace.clone(), lsn, ValuesReconstructState::new(), ctx)
1023 0 : .await;
1024 :
1025 0 : if self.conf.validate_vectored_get {
1026 0 : self.validate_get_vectored_impl(&vectored_res, keyspace, lsn, ctx)
1027 0 : .await;
1028 0 : }
1029 :
1030 0 : vectored_res
1031 : }
1032 : };
1033 :
1034 616 : if let Some((metric, start)) = start {
1035 0 : let elapsed = start.elapsed();
1036 0 : let ex_throttled = if let Some(throttled) = throttled {
1037 0 : elapsed.checked_sub(throttled)
1038 : } else {
1039 0 : Some(elapsed)
1040 : };
1041 :
1042 0 : if let Some(ex_throttled) = ex_throttled {
1043 0 : metric.observe(ex_throttled.as_secs_f64());
1044 0 : } else {
1045 0 : use utils::rate_limit::RateLimit;
1046 0 : static LOGGED: Lazy<Mutex<RateLimit>> =
1047 0 : Lazy::new(|| Mutex::new(RateLimit::new(Duration::from_secs(10))));
1048 0 : let mut rate_limit = LOGGED.lock().unwrap();
1049 0 : rate_limit.call(|| {
1050 0 : warn!("error deducting time spent throttled; this message is logged at a global rate limit");
1051 0 : });
1052 0 : }
1053 616 : }
1054 :
1055 616 : res
1056 616 : }
1057 :
1058 : /// Scan the keyspace and return all existing key-values in the keyspace. This currently uses vectored
1059 : /// get underlying. Normal vectored get would throw an error when a key in the keyspace is not found
1060 : /// during the search, but for the scan interface, it returns all existing key-value pairs, and does
1061 : /// not expect each single key in the key space will be found. The semantics is closer to the RocksDB
1062 : /// scan iterator interface. We could optimize this interface later to avoid some checks in the vectored
1063 : /// get path to maintain and split the probing and to-be-probe keyspace. We also need to ensure that
1064 : /// the scan operation will not cause OOM in the future.
1065 : #[allow(dead_code)]
1066 0 : pub(crate) async fn scan(
1067 0 : &self,
1068 0 : keyspace: KeySpace,
1069 0 : lsn: Lsn,
1070 0 : ctx: &RequestContext,
1071 0 : ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
1072 0 : if !lsn.is_valid() {
1073 0 : return Err(GetVectoredError::InvalidLsn(lsn));
1074 0 : }
1075 0 :
1076 0 : trace!(
1077 0 : "key-value scan request for {:?}@{} from task kind {:?}",
1078 0 : keyspace,
1079 0 : lsn,
1080 0 : ctx.task_kind()
1081 : );
1082 :
1083 : // We should generalize this into Keyspace::contains in the future.
1084 0 : for range in &keyspace.ranges {
1085 0 : if range.start.field1 < METADATA_KEY_BEGIN_PREFIX
1086 0 : || range.end.field1 > METADATA_KEY_END_PREFIX
1087 : {
1088 0 : return Err(GetVectoredError::Other(anyhow::anyhow!(
1089 0 : "only metadata keyspace can be scanned"
1090 0 : )));
1091 0 : }
1092 : }
1093 :
1094 0 : let start = crate::metrics::SCAN_LATENCY
1095 0 : .for_task_kind(ctx.task_kind())
1096 0 : .map(ScanLatencyOngoingRecording::start_recording);
1097 :
1098 : // start counting after throttle so that throttle time
1099 : // is always less than observation time
1100 0 : let throttled = self
1101 0 : .timeline_get_throttle
1102 0 : // assume scan = 1 quota for now until we find a better way to process this
1103 0 : .throttle(ctx, 1)
1104 0 : .await;
1105 :
1106 0 : let vectored_res = self
1107 0 : .get_vectored_impl(
1108 0 : keyspace.clone(),
1109 0 : lsn,
1110 0 : ValuesReconstructState::default(),
1111 0 : ctx,
1112 0 : )
1113 0 : .await;
1114 :
1115 0 : if let Some(recording) = start {
1116 0 : recording.observe(throttled);
1117 0 : }
1118 :
1119 0 : vectored_res
1120 0 : }
1121 :
1122 : /// Not subject to [`Self::timeline_get_throttle`].
1123 628 : pub(super) async fn get_vectored_sequential_impl(
1124 628 : &self,
1125 628 : keyspace: KeySpace,
1126 628 : lsn: Lsn,
1127 628 : ctx: &RequestContext,
1128 628 : ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
1129 628 : let mut values = BTreeMap::new();
1130 :
1131 1256 : for range in keyspace.ranges {
1132 628 : let mut key = range.start;
1133 1766 : while key != range.end {
1134 1138 : let block = self
1135 1138 : .get_impl(key, lsn, ValueReconstructState::default(), ctx)
1136 19 : .await;
1137 :
1138 : use PageReconstructError::*;
1139 0 : match block {
1140 : Err(Cancelled | AncestorStopping(_)) => {
1141 0 : return Err(GetVectoredError::Cancelled)
1142 : }
1143 : Err(MissingKey(_))
1144 2 : if NON_INHERITED_RANGE.contains(&key)
1145 2 : || NON_INHERITED_SPARSE_RANGE.contains(&key) =>
1146 2 : {
1147 2 : // Ignore missing key error for aux key range. TODO: currently, we assume non_inherited_range == aux_key_range.
1148 2 : // When we add more types of keys into the page server, we should revisit this part of code and throw errors
1149 2 : // accordingly.
1150 2 : key = key.next();
1151 2 : }
1152 0 : Err(MissingKey(err)) => {
1153 0 : return Err(GetVectoredError::MissingKey(err));
1154 : }
1155 0 : Err(Other(err))
1156 0 : if err
1157 0 : .to_string()
1158 0 : .contains("downloading evicted layer file failed") =>
1159 0 : {
1160 0 : return Err(GetVectoredError::Other(err))
1161 : }
1162 0 : Err(Other(err))
1163 0 : if err
1164 0 : .chain()
1165 0 : .any(|cause| cause.to_string().contains("layer loading failed")) =>
1166 0 : {
1167 0 : // The intent here is to achieve error parity with the vectored read path.
1168 0 : // When vectored read fails to load a layer it fails the whole read, hence
1169 0 : // we mimic this behaviour here to keep the validation happy.
1170 0 : return Err(GetVectoredError::Other(err));
1171 : }
1172 1136 : _ => {
1173 1136 : values.insert(key, block);
1174 1136 : key = key.next();
1175 1136 : }
1176 : }
1177 : }
1178 : }
1179 :
1180 628 : Ok(values)
1181 628 : }
1182 :
1183 64 : pub(super) async fn get_vectored_impl(
1184 64 : &self,
1185 64 : keyspace: KeySpace,
1186 64 : lsn: Lsn,
1187 64 : mut reconstruct_state: ValuesReconstructState,
1188 64 : ctx: &RequestContext,
1189 64 : ) -> Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError> {
1190 64 : let get_kind = if keyspace.total_raw_size() == 1 {
1191 32 : GetKind::Singular
1192 : } else {
1193 32 : GetKind::Vectored
1194 : };
1195 :
1196 64 : let get_data_timer = crate::metrics::GET_RECONSTRUCT_DATA_TIME
1197 64 : .for_get_kind(get_kind)
1198 64 : .start_timer();
1199 64 : self.get_vectored_reconstruct_data(keyspace, lsn, &mut reconstruct_state, ctx)
1200 7413 : .await?;
1201 60 : get_data_timer.stop_and_record();
1202 60 :
1203 60 : let reconstruct_timer = crate::metrics::RECONSTRUCT_TIME
1204 60 : .for_get_kind(get_kind)
1205 60 : .start_timer();
1206 60 : let mut results: BTreeMap<Key, Result<Bytes, PageReconstructError>> = BTreeMap::new();
1207 60 : let layers_visited = reconstruct_state.get_layers_visited();
1208 20448 : for (key, res) in reconstruct_state.keys {
1209 20388 : match res {
1210 0 : Err(err) => {
1211 0 : results.insert(key, Err(err));
1212 0 : }
1213 20388 : Ok(state) => {
1214 20388 : let state = ValueReconstructState::from(state);
1215 :
1216 20388 : let reconstruct_res = self.reconstruct_value(key, lsn, state).await;
1217 20388 : results.insert(key, reconstruct_res);
1218 : }
1219 : }
1220 : }
1221 60 : reconstruct_timer.stop_and_record();
1222 60 :
1223 60 : // For aux file keys (v1 or v2) the vectored read path does not return an error
1224 60 : // when they're missing. Instead they are omitted from the resulting btree
1225 60 : // (this is a requirement, not a bug). Skip updating the metric in these cases
1226 60 : // to avoid infinite results.
1227 60 : if !results.is_empty() {
1228 58 : // Note that this is an approximation. Tracking the exact number of layers visited
1229 58 : // per key requires virtually unbounded memory usage and is inefficient
1230 58 : // (i.e. segment tree tracking each range queried from a layer)
1231 58 : crate::metrics::VEC_READ_NUM_LAYERS_VISITED
1232 58 : .observe(layers_visited as f64 / results.len() as f64);
1233 58 : }
1234 :
1235 60 : Ok(results)
1236 64 : }
1237 :
1238 : /// Not subject to [`Self::timeline_get_throttle`].
1239 12 : pub(super) async fn validate_get_vectored_impl(
1240 12 : &self,
1241 12 : vectored_res: &Result<BTreeMap<Key, Result<Bytes, PageReconstructError>>, GetVectoredError>,
1242 12 : keyspace: KeySpace,
1243 12 : lsn: Lsn,
1244 12 : ctx: &RequestContext,
1245 12 : ) {
1246 12 : if keyspace.overlaps(&Key::metadata_key_range()) {
1247 : // skip validation for metadata key range
1248 0 : return;
1249 12 : }
1250 :
1251 12 : let sequential_res = self
1252 12 : .get_vectored_sequential_impl(keyspace.clone(), lsn, ctx)
1253 19 : .await;
1254 :
1255 0 : fn errors_match(lhs: &GetVectoredError, rhs: &GetVectoredError) -> bool {
1256 0 : use GetVectoredError::*;
1257 0 : match (lhs, rhs) {
1258 0 : (Oversized(l), Oversized(r)) => l == r,
1259 0 : (InvalidLsn(l), InvalidLsn(r)) => l == r,
1260 0 : (MissingKey(l), MissingKey(r)) => l.key == r.key,
1261 0 : (GetReadyAncestorError(_), GetReadyAncestorError(_)) => true,
1262 0 : (Other(_), Other(_)) => true,
1263 0 : _ => false,
1264 : }
1265 0 : }
1266 :
1267 12 : match (&sequential_res, vectored_res) {
1268 0 : (Err(GetVectoredError::Cancelled), _) => {},
1269 0 : (_, Err(GetVectoredError::Cancelled)) => {},
1270 0 : (Err(seq_err), Ok(_)) => {
1271 0 : panic!(concat!("Sequential get failed with {}, but vectored get did not",
1272 0 : " - keyspace={:?} lsn={}"),
1273 0 : seq_err, keyspace, lsn) },
1274 0 : (Ok(_), Err(GetVectoredError::GetReadyAncestorError(GetReadyAncestorError::AncestorLsnTimeout(_)))) => {
1275 0 : // Sequential get runs after vectored get, so it is possible for the later
1276 0 : // to time out while waiting for its ancestor's Lsn to become ready and for the
1277 0 : // former to succeed (it essentially has a doubled wait time).
1278 0 : },
1279 0 : (Ok(_), Err(vec_err)) => {
1280 0 : panic!(concat!("Vectored get failed with {}, but sequential get did not",
1281 0 : " - keyspace={:?} lsn={}"),
1282 0 : vec_err, keyspace, lsn) },
1283 0 : (Err(seq_err), Err(vec_err)) => {
1284 0 : assert!(errors_match(seq_err, vec_err),
1285 0 : "Mismatched errors: {seq_err} != {vec_err} - keyspace={keyspace:?} lsn={lsn}")},
1286 12 : (Ok(seq_values), Ok(vec_values)) => {
1287 320 : seq_values.iter().zip(vec_values.iter()).for_each(|((seq_key, seq_res), (vec_key, vec_res))| {
1288 320 : assert_eq!(seq_key, vec_key);
1289 320 : match (seq_res, vec_res) {
1290 320 : (Ok(seq_blob), Ok(vec_blob)) => {
1291 320 : Self::validate_key_equivalence(seq_key, &keyspace, lsn, seq_blob, vec_blob);
1292 320 : },
1293 0 : (Err(err), Ok(_)) => {
1294 0 : panic!(
1295 0 : concat!("Sequential get failed with {} for key {}, but vectored get did not",
1296 0 : " - keyspace={:?} lsn={}"),
1297 0 : err, seq_key, keyspace, lsn) },
1298 0 : (Ok(_), Err(err)) => {
1299 0 : panic!(
1300 0 : concat!("Vectored get failed with {} for key {}, but sequential get did not",
1301 0 : " - keyspace={:?} lsn={}"),
1302 0 : err, seq_key, keyspace, lsn) },
1303 0 : (Err(_), Err(_)) => {}
1304 : }
1305 320 : })
1306 : }
1307 : }
1308 12 : }
1309 :
1310 320 : fn validate_key_equivalence(
1311 320 : key: &Key,
1312 320 : keyspace: &KeySpace,
1313 320 : lsn: Lsn,
1314 320 : seq: &Bytes,
1315 320 : vec: &Bytes,
1316 320 : ) {
1317 320 : if *key == AUX_FILES_KEY {
1318 : // The value reconstruct of AUX_FILES_KEY from records is not deterministic
1319 : // since it uses a hash map under the hood. Hence, deserialise both results
1320 : // before comparing.
1321 0 : let seq_aux_dir_res = AuxFilesDirectory::des(seq);
1322 0 : let vec_aux_dir_res = AuxFilesDirectory::des(vec);
1323 0 : match (&seq_aux_dir_res, &vec_aux_dir_res) {
1324 0 : (Ok(seq_aux_dir), Ok(vec_aux_dir)) => {
1325 0 : assert_eq!(
1326 : seq_aux_dir, vec_aux_dir,
1327 0 : "Mismatch for key {} - keyspace={:?} lsn={}",
1328 : key, keyspace, lsn
1329 : );
1330 : }
1331 0 : (Err(_), Err(_)) => {}
1332 : _ => {
1333 0 : panic!("Mismatch for {key}: {seq_aux_dir_res:?} != {vec_aux_dir_res:?}");
1334 : }
1335 : }
1336 : } else {
1337 : // All other keys should reconstruct deterministically, so we simply compare the blobs.
1338 320 : assert_eq!(
1339 : seq, vec,
1340 0 : "Image mismatch for key {key} - keyspace={keyspace:?} lsn={lsn}"
1341 : );
1342 : }
1343 320 : }
1344 :
1345 : /// Get last or prev record separately. Same as get_last_record_rlsn().last/prev.
1346 5053844 : pub(crate) fn get_last_record_lsn(&self) -> Lsn {
1347 5053844 : self.last_record_lsn.load().last
1348 5053844 : }
1349 :
1350 0 : pub(crate) fn get_prev_record_lsn(&self) -> Lsn {
1351 0 : self.last_record_lsn.load().prev
1352 0 : }
1353 :
1354 : /// Atomically get both last and prev.
1355 218 : pub(crate) fn get_last_record_rlsn(&self) -> RecordLsn {
1356 218 : self.last_record_lsn.load()
1357 218 : }
1358 :
1359 : /// Subscribe to callers of wait_lsn(). The value of the channel is None if there are no
1360 : /// wait_lsn() calls in progress, and Some(Lsn) if there is an active waiter for wait_lsn().
1361 0 : pub(crate) fn subscribe_for_wait_lsn_updates(&self) -> watch::Receiver<Option<Lsn>> {
1362 0 : self.last_record_lsn.status_receiver()
1363 0 : }
1364 :
1365 1054 : pub(crate) fn get_disk_consistent_lsn(&self) -> Lsn {
1366 1054 : self.disk_consistent_lsn.load()
1367 1054 : }
1368 :
1369 : /// remote_consistent_lsn from the perspective of the tenant's current generation,
1370 : /// not validated with control plane yet.
1371 : /// See [`Self::get_remote_consistent_lsn_visible`].
1372 0 : pub(crate) fn get_remote_consistent_lsn_projected(&self) -> Option<Lsn> {
1373 0 : if let Some(remote_client) = &self.remote_client {
1374 0 : remote_client.remote_consistent_lsn_projected()
1375 : } else {
1376 0 : None
1377 : }
1378 0 : }
1379 :
1380 : /// remote_consistent_lsn which the tenant is guaranteed not to go backward from,
1381 : /// i.e. a value of remote_consistent_lsn_projected which has undergone
1382 : /// generation validation in the deletion queue.
1383 0 : pub(crate) fn get_remote_consistent_lsn_visible(&self) -> Option<Lsn> {
1384 0 : if let Some(remote_client) = &self.remote_client {
1385 0 : remote_client.remote_consistent_lsn_visible()
1386 : } else {
1387 0 : None
1388 : }
1389 0 : }
1390 :
1391 : /// The sum of the file size of all historic layers in the layer map.
1392 : /// This method makes no distinction between local and remote layers.
1393 : /// Hence, the result **does not represent local filesystem usage**.
1394 0 : pub(crate) async fn layer_size_sum(&self) -> u64 {
1395 0 : let guard = self.layers.read().await;
1396 0 : let layer_map = guard.layer_map();
1397 0 : let mut size = 0;
1398 0 : for l in layer_map.iter_historic_layers() {
1399 0 : size += l.file_size();
1400 0 : }
1401 0 : size
1402 0 : }
1403 :
1404 0 : pub(crate) fn resident_physical_size(&self) -> u64 {
1405 0 : self.metrics.resident_physical_size_get()
1406 0 : }
1407 :
1408 0 : pub(crate) fn get_directory_metrics(&self) -> [u64; DirectoryKind::KINDS_NUM] {
1409 0 : array::from_fn(|idx| self.directory_metrics[idx].load(AtomicOrdering::Relaxed))
1410 0 : }
1411 :
1412 : ///
1413 : /// Wait until WAL has been received and processed up to this LSN.
1414 : ///
1415 : /// You should call this before any of the other get_* or list_* functions. Calling
1416 : /// those functions with an LSN that has been processed yet is an error.
1417 : ///
1418 224283 : pub(crate) async fn wait_lsn(
1419 224283 : &self,
1420 224283 : lsn: Lsn,
1421 224283 : who_is_waiting: WaitLsnWaiter<'_>,
1422 224283 : ctx: &RequestContext, /* Prepare for use by cancellation */
1423 224283 : ) -> Result<(), WaitLsnError> {
1424 224283 : if self.cancel.is_cancelled() {
1425 0 : return Err(WaitLsnError::Shutdown);
1426 224283 : } else if !self.is_active() {
1427 0 : return Err(WaitLsnError::BadState);
1428 224283 : }
1429 224283 :
1430 224283 : if cfg!(debug_assertions) {
1431 224283 : match ctx.task_kind() {
1432 : TaskKind::WalReceiverManager
1433 : | TaskKind::WalReceiverConnectionHandler
1434 : | TaskKind::WalReceiverConnectionPoller => {
1435 0 : let is_myself = match who_is_waiting {
1436 0 : WaitLsnWaiter::Timeline(waiter) => Weak::ptr_eq(&waiter.myself, &self.myself),
1437 0 : WaitLsnWaiter::Tenant | WaitLsnWaiter::PageService => unreachable!("tenant or page_service context are not expected to have task kind {:?}", ctx.task_kind()),
1438 : };
1439 0 : if is_myself {
1440 0 : if let Err(current) = self.last_record_lsn.would_wait_for(lsn) {
1441 : // walingest is the only one that can advance last_record_lsn; it should make sure to never reach here
1442 0 : panic!("this timeline's walingest task is calling wait_lsn({lsn}) but we only have last_record_lsn={current}; would deadlock");
1443 0 : }
1444 0 : } else {
1445 0 : // if another timeline's is waiting for us, there's no deadlock risk because
1446 0 : // our walreceiver task can make progress independent of theirs
1447 0 : }
1448 : }
1449 224283 : _ => {}
1450 : }
1451 0 : }
1452 :
1453 224283 : let _timer = crate::metrics::WAIT_LSN_TIME.start_timer();
1454 224283 :
1455 224283 : match self
1456 224283 : .last_record_lsn
1457 224283 : .wait_for_timeout(lsn, self.conf.wait_lsn_timeout)
1458 0 : .await
1459 : {
1460 224283 : Ok(()) => Ok(()),
1461 0 : Err(e) => {
1462 0 : use utils::seqwait::SeqWaitError::*;
1463 0 : match e {
1464 0 : Shutdown => Err(WaitLsnError::Shutdown),
1465 : Timeout => {
1466 : // don't count the time spent waiting for lock below, and also in walreceiver.status(), towards the wait_lsn_time_histo
1467 0 : drop(_timer);
1468 0 : let walreceiver_status = self.walreceiver_status();
1469 0 : Err(WaitLsnError::Timeout(format!(
1470 0 : "Timed out while waiting for WAL record at LSN {} to arrive, last_record_lsn {} disk consistent LSN={}, WalReceiver status: {}",
1471 0 : lsn,
1472 0 : self.get_last_record_lsn(),
1473 0 : self.get_disk_consistent_lsn(),
1474 0 : walreceiver_status,
1475 0 : )))
1476 : }
1477 : }
1478 : }
1479 : }
1480 224283 : }
1481 :
1482 0 : pub(crate) fn walreceiver_status(&self) -> String {
1483 0 : match &*self.walreceiver.lock().unwrap() {
1484 0 : None => "stopping or stopped".to_string(),
1485 0 : Some(walreceiver) => match walreceiver.status() {
1486 0 : Some(status) => status.to_human_readable_string(),
1487 0 : None => "Not active".to_string(),
1488 : },
1489 : }
1490 0 : }
1491 :
1492 : /// Check that it is valid to request operations with that lsn.
1493 222 : pub(crate) fn check_lsn_is_in_scope(
1494 222 : &self,
1495 222 : lsn: Lsn,
1496 222 : latest_gc_cutoff_lsn: &RcuReadGuard<Lsn>,
1497 222 : ) -> anyhow::Result<()> {
1498 222 : ensure!(
1499 222 : lsn >= **latest_gc_cutoff_lsn,
1500 4 : "LSN {} is earlier than latest GC horizon {} (we might've already garbage collected needed data)",
1501 4 : lsn,
1502 4 : **latest_gc_cutoff_lsn,
1503 : );
1504 218 : Ok(())
1505 222 : }
1506 :
1507 : /// Flush to disk all data that was written with the put_* functions
1508 1868 : #[instrument(skip(self), fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id))]
1509 : pub(crate) async fn freeze_and_flush(&self) -> anyhow::Result<()> {
1510 : self.freeze_and_flush0().await
1511 : }
1512 :
1513 : // This exists to provide a non-span creating version of `freeze_and_flush` we can call without
1514 : // polluting the span hierarchy.
1515 934 : pub(crate) async fn freeze_and_flush0(&self) -> anyhow::Result<()> {
1516 934 : let to_lsn = self.freeze_inmem_layer(false).await;
1517 934 : self.flush_frozen_layers_and_wait(to_lsn).await
1518 934 : }
1519 :
1520 : // Check if an open ephemeral layer should be closed: this provides
1521 : // background enforcement of checkpoint interval if there is no active WAL receiver, to avoid keeping
1522 : // an ephemeral layer open forever when idle. It also freezes layers if the global limit on
1523 : // ephemeral layer bytes has been breached.
1524 0 : pub(super) async fn maybe_freeze_ephemeral_layer(&self) {
1525 0 : let Ok(_write_guard) = self.write_lock.try_lock() else {
1526 : // If the write lock is held, there is an active wal receiver: rolling open layers
1527 : // is their responsibility while they hold this lock.
1528 0 : return;
1529 : };
1530 :
1531 0 : let Ok(layers_guard) = self.layers.try_read() else {
1532 : // Don't block if the layer lock is busy
1533 0 : return;
1534 : };
1535 :
1536 0 : let Some(open_layer) = &layers_guard.layer_map().open_layer else {
1537 : // If there is no open layer, we have no layer freezing to do. However, we might need to generate
1538 : // some updates to disk_consistent_lsn and remote_consistent_lsn, in case we ingested some WAL regions
1539 : // that didn't result in writes to this shard.
1540 :
1541 : // Must not hold the layers lock while waiting for a flush.
1542 0 : drop(layers_guard);
1543 0 :
1544 0 : let last_record_lsn = self.get_last_record_lsn();
1545 0 : let disk_consistent_lsn = self.get_disk_consistent_lsn();
1546 0 : if last_record_lsn > disk_consistent_lsn {
1547 : // We have no open layer, but disk_consistent_lsn is behind the last record: this indicates
1548 : // we are a sharded tenant and have skipped some WAL
1549 0 : let last_freeze_ts = *self.last_freeze_ts.read().unwrap();
1550 0 : if last_freeze_ts.elapsed() >= self.get_checkpoint_timeout() {
1551 : // Only do this if have been layer-less longer than get_checkpoint_timeout, so that a shard
1552 : // without any data ingested (yet) doesn't write a remote index as soon as it
1553 : // sees its LSN advance: we only do this if we've been layer-less
1554 : // for some time.
1555 0 : tracing::debug!(
1556 0 : "Advancing disk_consistent_lsn past WAL ingest gap {} -> {}",
1557 : disk_consistent_lsn,
1558 : last_record_lsn
1559 : );
1560 :
1561 : // The flush loop will update remote consistent LSN as well as disk consistent LSN.
1562 0 : self.flush_frozen_layers_and_wait(last_record_lsn)
1563 0 : .await
1564 0 : .ok();
1565 0 : }
1566 0 : }
1567 :
1568 0 : return;
1569 : };
1570 :
1571 0 : let Some(current_size) = open_layer.try_len() else {
1572 : // Unexpected: since we hold the write guard, nobody else should be writing to this layer, so
1573 : // read lock to get size should always succeed.
1574 0 : tracing::warn!("Lock conflict while reading size of open layer");
1575 0 : return;
1576 : };
1577 :
1578 0 : let current_lsn = self.get_last_record_lsn();
1579 :
1580 0 : let checkpoint_distance_override = open_layer.tick().await;
1581 :
1582 0 : if let Some(size_override) = checkpoint_distance_override {
1583 0 : if current_size > size_override {
1584 : // This is not harmful, but it only happens in relatively rare cases where
1585 : // time-based checkpoints are not happening fast enough to keep the amount of
1586 : // ephemeral data within configured limits. It's a sign of stress on the system.
1587 0 : tracing::info!("Early-rolling open layer at size {current_size} (limit {size_override}) due to dirty data pressure");
1588 0 : }
1589 0 : }
1590 :
1591 0 : let checkpoint_distance =
1592 0 : checkpoint_distance_override.unwrap_or(self.get_checkpoint_distance());
1593 0 :
1594 0 : if self.should_roll(
1595 0 : current_size,
1596 0 : current_size,
1597 0 : checkpoint_distance,
1598 0 : self.get_last_record_lsn(),
1599 0 : self.last_freeze_at.load(),
1600 0 : open_layer.get_opened_at(),
1601 0 : ) {
1602 0 : match open_layer.info() {
1603 0 : InMemoryLayerInfo::Frozen { lsn_start, lsn_end } => {
1604 0 : // We may reach this point if the layer was already frozen by not yet flushed: flushing
1605 0 : // happens asynchronously in the background.
1606 0 : tracing::debug!(
1607 0 : "Not freezing open layer, it's already frozen ({lsn_start}..{lsn_end})"
1608 : );
1609 : }
1610 : InMemoryLayerInfo::Open { .. } => {
1611 : // Upgrade to a write lock and freeze the layer
1612 0 : drop(layers_guard);
1613 0 : let mut layers_guard = self.layers.write().await;
1614 0 : layers_guard
1615 0 : .try_freeze_in_memory_layer(current_lsn, &self.last_freeze_at)
1616 0 : .await;
1617 : }
1618 : }
1619 0 : self.flush_frozen_layers();
1620 0 : }
1621 0 : }
1622 :
1623 : /// Outermost timeline compaction operation; downloads needed layers.
1624 330 : pub(crate) async fn compact(
1625 330 : self: &Arc<Self>,
1626 330 : cancel: &CancellationToken,
1627 330 : flags: EnumSet<CompactFlags>,
1628 330 : ctx: &RequestContext,
1629 330 : ) -> Result<(), CompactionError> {
1630 330 : // most likely the cancellation token is from background task, but in tests it could be the
1631 330 : // request task as well.
1632 330 :
1633 330 : let prepare = async move {
1634 330 : let guard = self.compaction_lock.lock().await;
1635 :
1636 330 : let permit = super::tasks::concurrent_background_tasks_rate_limit_permit(
1637 330 : BackgroundLoopKind::Compaction,
1638 330 : ctx,
1639 330 : )
1640 0 : .await;
1641 :
1642 330 : (guard, permit)
1643 330 : };
1644 :
1645 : // this wait probably never needs any "long time spent" logging, because we already nag if
1646 : // compaction task goes over it's period (20s) which is quite often in production.
1647 330 : let (_guard, _permit) = tokio::select! {
1648 : tuple = prepare => { tuple },
1649 : _ = self.cancel.cancelled() => return Ok(()),
1650 : _ = cancel.cancelled() => return Ok(()),
1651 : };
1652 :
1653 330 : let last_record_lsn = self.get_last_record_lsn();
1654 330 :
1655 330 : // Last record Lsn could be zero in case the timeline was just created
1656 330 : if !last_record_lsn.is_valid() {
1657 0 : warn!("Skipping compaction for potentially just initialized timeline, it has invalid last record lsn: {last_record_lsn}");
1658 0 : return Ok(());
1659 330 : }
1660 330 :
1661 330 : match self.get_compaction_algorithm() {
1662 0 : CompactionAlgorithm::Tiered => self.compact_tiered(cancel, ctx).await,
1663 54933 : CompactionAlgorithm::Legacy => self.compact_legacy(cancel, flags, ctx).await,
1664 : }
1665 330 : }
1666 :
1667 : /// Mutate the timeline with a [`TimelineWriter`].
1668 5109044 : pub(crate) async fn writer(&self) -> TimelineWriter<'_> {
1669 5109044 : TimelineWriter {
1670 5109044 : tl: self,
1671 5109044 : write_guard: self.write_lock.lock().await,
1672 : }
1673 5109044 : }
1674 :
1675 0 : pub(crate) fn activate(
1676 0 : self: &Arc<Self>,
1677 0 : parent: Arc<crate::tenant::Tenant>,
1678 0 : broker_client: BrokerClientChannel,
1679 0 : background_jobs_can_start: Option<&completion::Barrier>,
1680 0 : ctx: &RequestContext,
1681 0 : ) {
1682 0 : if self.tenant_shard_id.is_shard_zero() {
1683 0 : // Logical size is only maintained accurately on shard zero.
1684 0 : self.spawn_initial_logical_size_computation_task(ctx);
1685 0 : }
1686 0 : self.launch_wal_receiver(ctx, broker_client);
1687 0 : self.set_state(TimelineState::Active);
1688 0 : self.launch_eviction_task(parent, background_jobs_can_start);
1689 0 : }
1690 :
1691 : /// After this function returns, there are no timeline-scoped tasks are left running.
1692 : ///
1693 : /// The preferred pattern for is:
1694 : /// - in any spawned tasks, keep Timeline::guard open + Timeline::cancel / child token
1695 : /// - if early shutdown (not just cancellation) of a sub-tree of tasks is required,
1696 : /// go the extra mile and keep track of JoinHandles
1697 : /// - Keep track of JoinHandles using a passed-down `Arc<Mutex<Option<JoinSet>>>` or similar,
1698 : /// instead of spawning directly on a runtime. It is a more composable / testable pattern.
1699 : ///
1700 : /// For legacy reasons, we still have multiple tasks spawned using
1701 : /// `task_mgr::spawn(X, Some(tenant_id), Some(timeline_id))`.
1702 : /// We refer to these as "timeline-scoped task_mgr tasks".
1703 : /// Some of these tasks are already sensitive to Timeline::cancel while others are
1704 : /// not sensitive to Timeline::cancel and instead respect [`task_mgr::shutdown_token`]
1705 : /// or [`task_mgr::shutdown_watcher`].
1706 : /// We want to gradually convert the code base away from these.
1707 : ///
1708 : /// Here is an inventory of timeline-scoped task_mgr tasks that are still sensitive to
1709 : /// `task_mgr::shutdown_{token,watcher}` (there are also tenant-scoped and global-scoped
1710 : /// ones that aren't mentioned here):
1711 : /// - [`TaskKind::TimelineDeletionWorker`]
1712 : /// - NB: also used for tenant deletion
1713 : /// - [`TaskKind::RemoteUploadTask`]`
1714 : /// - [`TaskKind::InitialLogicalSizeCalculation`]
1715 : /// - [`TaskKind::DownloadAllRemoteLayers`] (can we get rid of it?)
1716 : // Inventory of timeline-scoped task_mgr tasks that use spawn but aren't sensitive:
1717 : /// - [`TaskKind::Eviction`]
1718 : /// - [`TaskKind::LayerFlushTask`]
1719 : /// - [`TaskKind::OndemandLogicalSizeCalculation`]
1720 : /// - [`TaskKind::GarbageCollector`] (immediate_gc is timeline-scoped)
1721 8 : pub(crate) async fn shutdown(&self, mode: ShutdownMode) {
1722 8 : debug_assert_current_span_has_tenant_and_timeline_id();
1723 :
1724 8 : let try_freeze_and_flush = match mode {
1725 6 : ShutdownMode::FreezeAndFlush => true,
1726 2 : ShutdownMode::Hard => false,
1727 : };
1728 :
1729 : // Regardless of whether we're going to try_freeze_and_flush
1730 : // or not, stop ingesting any more data. Walreceiver only provides
1731 : // cancellation but no "wait until gone", because it uses the Timeline::gate.
1732 : // So, only after the self.gate.close() below will we know for sure that
1733 : // no walreceiver tasks are left.
1734 : // For `try_freeze_and_flush=true`, this means that we might still be ingesting
1735 : // data during the call to `self.freeze_and_flush()` below.
1736 : // That's not ideal, but, we don't have the concept of a ChildGuard,
1737 : // which is what we'd need to properly model early shutdown of the walreceiver
1738 : // task sub-tree before the other Timeline task sub-trees.
1739 8 : let walreceiver = self.walreceiver.lock().unwrap().take();
1740 8 : tracing::debug!(
1741 0 : is_some = walreceiver.is_some(),
1742 0 : "Waiting for WalReceiverManager..."
1743 : );
1744 8 : if let Some(walreceiver) = walreceiver {
1745 0 : walreceiver.cancel();
1746 8 : }
1747 : // ... and inform any waiters for newer LSNs that there won't be any.
1748 8 : self.last_record_lsn.shutdown();
1749 8 :
1750 8 : if try_freeze_and_flush {
1751 : // we shut down walreceiver above, so, we won't add anything more
1752 : // to the InMemoryLayer; freeze it and wait for all frozen layers
1753 : // to reach the disk & upload queue, then shut the upload queue and
1754 : // wait for it to drain.
1755 6 : match self.freeze_and_flush().await {
1756 : Ok(_) => {
1757 : // drain the upload queue
1758 6 : if let Some(client) = self.remote_client.as_ref() {
1759 : // if we did not wait for completion here, it might be our shutdown process
1760 : // didn't wait for remote uploads to complete at all, as new tasks can forever
1761 : // be spawned.
1762 : //
1763 : // what is problematic is the shutting down of RemoteTimelineClient, because
1764 : // obviously it does not make sense to stop while we wait for it, but what
1765 : // about corner cases like s3 suddenly hanging up?
1766 6 : client.shutdown().await;
1767 0 : }
1768 : }
1769 0 : Err(e) => {
1770 0 : // Non-fatal. Shutdown is infallible. Failures to flush just mean that
1771 0 : // we have some extra WAL replay to do next time the timeline starts.
1772 0 : warn!("failed to freeze and flush: {e:#}");
1773 : }
1774 : }
1775 2 : }
1776 :
1777 : // Signal any subscribers to our cancellation token to drop out
1778 8 : tracing::debug!("Cancelling CancellationToken");
1779 8 : self.cancel.cancel();
1780 :
1781 : // Transition the remote_client into a state where it's only useful for timeline deletion.
1782 : // (The deletion use case is why we can't just hook up remote_client to Self::cancel).)
1783 8 : if let Some(remote_client) = self.remote_client.as_ref() {
1784 8 : remote_client.stop();
1785 8 : // As documented in remote_client.stop()'s doc comment, it's our responsibility
1786 8 : // to shut down the upload queue tasks.
1787 8 : // TODO: fix that, task management should be encapsulated inside remote_client.
1788 8 : task_mgr::shutdown_tasks(
1789 8 : Some(TaskKind::RemoteUploadTask),
1790 8 : Some(self.tenant_shard_id),
1791 8 : Some(self.timeline_id),
1792 8 : )
1793 0 : .await;
1794 0 : }
1795 :
1796 : // TODO: work toward making this a no-op. See this funciton's doc comment for more context.
1797 8 : tracing::debug!("Waiting for tasks...");
1798 8 : task_mgr::shutdown_tasks(None, Some(self.tenant_shard_id), Some(self.timeline_id)).await;
1799 :
1800 : // Finally wait until any gate-holders are complete.
1801 : //
1802 : // TODO: once above shutdown_tasks is a no-op, we can close the gate before calling shutdown_tasks
1803 : // and use a TBD variant of shutdown_tasks that asserts that there were no tasks left.
1804 8 : self.gate.close().await;
1805 :
1806 8 : self.metrics.shutdown();
1807 8 : }
1808 :
1809 334 : pub(crate) fn set_state(&self, new_state: TimelineState) {
1810 334 : match (self.current_state(), new_state) {
1811 334 : (equal_state_1, equal_state_2) if equal_state_1 == equal_state_2 => {
1812 2 : info!("Ignoring new state, equal to the existing one: {equal_state_2:?}");
1813 : }
1814 0 : (st, TimelineState::Loading) => {
1815 0 : error!("ignoring transition from {st:?} into Loading state");
1816 : }
1817 0 : (TimelineState::Broken { .. }, new_state) => {
1818 0 : error!("Ignoring state update {new_state:?} for broken timeline");
1819 : }
1820 : (TimelineState::Stopping, TimelineState::Active) => {
1821 0 : error!("Not activating a Stopping timeline");
1822 : }
1823 332 : (_, new_state) => {
1824 332 : self.state.send_replace(new_state);
1825 332 : }
1826 : }
1827 334 : }
1828 :
1829 2 : pub(crate) fn set_broken(&self, reason: String) {
1830 2 : let backtrace_str: String = format!("{}", std::backtrace::Backtrace::force_capture());
1831 2 : let broken_state = TimelineState::Broken {
1832 2 : reason,
1833 2 : backtrace: backtrace_str,
1834 2 : };
1835 2 : self.set_state(broken_state);
1836 2 :
1837 2 : // Although the Broken state is not equivalent to shutdown() (shutdown will be called
1838 2 : // later when this tenant is detach or the process shuts down), firing the cancellation token
1839 2 : // here avoids the need for other tasks to watch for the Broken state explicitly.
1840 2 : self.cancel.cancel();
1841 2 : }
1842 :
1843 225895 : pub(crate) fn current_state(&self) -> TimelineState {
1844 225895 : self.state.borrow().clone()
1845 225895 : }
1846 :
1847 6 : pub(crate) fn is_broken(&self) -> bool {
1848 6 : matches!(&*self.state.borrow(), TimelineState::Broken { .. })
1849 6 : }
1850 :
1851 224503 : pub(crate) fn is_active(&self) -> bool {
1852 224503 : self.current_state() == TimelineState::Active
1853 224503 : }
1854 :
1855 1058 : pub(crate) fn is_stopping(&self) -> bool {
1856 1058 : self.current_state() == TimelineState::Stopping
1857 1058 : }
1858 :
1859 0 : pub(crate) fn subscribe_for_state_updates(&self) -> watch::Receiver<TimelineState> {
1860 0 : self.state.subscribe()
1861 0 : }
1862 :
1863 224285 : pub(crate) async fn wait_to_become_active(
1864 224285 : &self,
1865 224285 : _ctx: &RequestContext, // Prepare for use by cancellation
1866 224285 : ) -> Result<(), TimelineState> {
1867 224285 : let mut receiver = self.state.subscribe();
1868 224285 : loop {
1869 224285 : let current_state = receiver.borrow().clone();
1870 224285 : match current_state {
1871 : TimelineState::Loading => {
1872 0 : receiver
1873 0 : .changed()
1874 0 : .await
1875 0 : .expect("holding a reference to self");
1876 : }
1877 : TimelineState::Active { .. } => {
1878 224283 : return Ok(());
1879 : }
1880 : TimelineState::Broken { .. } | TimelineState::Stopping => {
1881 : // There's no chance the timeline can transition back into ::Active
1882 2 : return Err(current_state);
1883 : }
1884 : }
1885 : }
1886 224285 : }
1887 :
1888 0 : pub(crate) async fn layer_map_info(&self, reset: LayerAccessStatsReset) -> LayerMapInfo {
1889 0 : let guard = self.layers.read().await;
1890 0 : let layer_map = guard.layer_map();
1891 0 : let mut in_memory_layers = Vec::with_capacity(layer_map.frozen_layers.len() + 1);
1892 0 : if let Some(open_layer) = &layer_map.open_layer {
1893 0 : in_memory_layers.push(open_layer.info());
1894 0 : }
1895 0 : for frozen_layer in &layer_map.frozen_layers {
1896 0 : in_memory_layers.push(frozen_layer.info());
1897 0 : }
1898 :
1899 0 : let mut historic_layers = Vec::new();
1900 0 : for historic_layer in layer_map.iter_historic_layers() {
1901 0 : let historic_layer = guard.get_from_desc(&historic_layer);
1902 0 : historic_layers.push(historic_layer.info(reset));
1903 0 : }
1904 :
1905 0 : LayerMapInfo {
1906 0 : in_memory_layers,
1907 0 : historic_layers,
1908 0 : }
1909 0 : }
1910 :
1911 0 : #[instrument(skip_all, fields(tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))]
1912 : pub(crate) async fn download_layer(
1913 : &self,
1914 : layer_file_name: &LayerName,
1915 : ) -> anyhow::Result<Option<bool>> {
1916 : let Some(layer) = self.find_layer(layer_file_name).await else {
1917 : return Ok(None);
1918 : };
1919 :
1920 : if self.remote_client.is_none() {
1921 : return Ok(Some(false));
1922 : }
1923 :
1924 : layer.download().await?;
1925 :
1926 : Ok(Some(true))
1927 : }
1928 :
1929 : /// Evict just one layer.
1930 : ///
1931 : /// Returns `Ok(None)` in the case where the layer could not be found by its `layer_file_name`.
1932 0 : pub(crate) async fn evict_layer(
1933 0 : &self,
1934 0 : layer_file_name: &LayerName,
1935 0 : ) -> anyhow::Result<Option<bool>> {
1936 0 : let _gate = self
1937 0 : .gate
1938 0 : .enter()
1939 0 : .map_err(|_| anyhow::anyhow!("Shutting down"))?;
1940 :
1941 0 : let Some(local_layer) = self.find_layer(layer_file_name).await else {
1942 0 : return Ok(None);
1943 : };
1944 :
1945 : // curl has this by default
1946 0 : let timeout = std::time::Duration::from_secs(120);
1947 0 :
1948 0 : match local_layer.evict_and_wait(timeout).await {
1949 0 : Ok(()) => Ok(Some(true)),
1950 0 : Err(EvictionError::NotFound) => Ok(Some(false)),
1951 0 : Err(EvictionError::Downloaded) => Ok(Some(false)),
1952 0 : Err(EvictionError::Timeout) => Ok(Some(false)),
1953 : }
1954 0 : }
1955 :
1956 58 : fn should_roll(
1957 58 : &self,
1958 58 : layer_size: u64,
1959 58 : projected_layer_size: u64,
1960 58 : checkpoint_distance: u64,
1961 58 : projected_lsn: Lsn,
1962 58 : last_freeze_at: Lsn,
1963 58 : opened_at: Instant,
1964 58 : ) -> bool {
1965 58 : let distance = projected_lsn.widening_sub(last_freeze_at);
1966 58 :
1967 58 : // Rolling the open layer can be triggered by:
1968 58 : // 1. The distance from the last LSN we rolled at. This bounds the amount of WAL that
1969 58 : // the safekeepers need to store. For sharded tenants, we multiply by shard count to
1970 58 : // account for how writes are distributed across shards: we expect each node to consume
1971 58 : // 1/count of the LSN on average.
1972 58 : // 2. The size of the currently open layer.
1973 58 : // 3. The time since the last roll. It helps safekeepers to regard pageserver as caught
1974 58 : // up and suspend activity.
1975 58 : if distance >= checkpoint_distance as i128 * self.shard_identity.count.count() as i128 {
1976 0 : info!(
1977 0 : "Will roll layer at {} with layer size {} due to LSN distance ({})",
1978 : projected_lsn, layer_size, distance
1979 : );
1980 :
1981 0 : true
1982 58 : } else if projected_layer_size >= checkpoint_distance {
1983 0 : info!(
1984 0 : "Will roll layer at {} with layer size {} due to layer size ({})",
1985 : projected_lsn, layer_size, projected_layer_size
1986 : );
1987 :
1988 0 : true
1989 58 : } else if distance > 0 && opened_at.elapsed() >= self.get_checkpoint_timeout() {
1990 0 : info!(
1991 0 : "Will roll layer at {} with layer size {} due to time since first write to the layer ({:?})",
1992 0 : projected_lsn,
1993 0 : layer_size,
1994 0 : opened_at.elapsed()
1995 : );
1996 :
1997 0 : true
1998 : } else {
1999 58 : false
2000 : }
2001 58 : }
2002 : }
2003 :
2004 : /// Number of times we will compute partition within a checkpoint distance.
2005 : const REPARTITION_FREQ_IN_CHECKPOINT_DISTANCE: u64 = 10;
2006 :
2007 : // Private functions
2008 : impl Timeline {
2009 138 : pub(crate) fn get_switch_aux_file_policy(&self) -> AuxFilePolicy {
2010 138 : let tenant_conf = self.tenant_conf.load();
2011 138 : tenant_conf
2012 138 : .tenant_conf
2013 138 : .switch_aux_file_policy
2014 138 : .unwrap_or(self.conf.default_tenant_conf.switch_aux_file_policy)
2015 138 : }
2016 :
2017 0 : pub(crate) fn get_lazy_slru_download(&self) -> bool {
2018 0 : let tenant_conf = self.tenant_conf.load();
2019 0 : tenant_conf
2020 0 : .tenant_conf
2021 0 : .lazy_slru_download
2022 0 : .unwrap_or(self.conf.default_tenant_conf.lazy_slru_download)
2023 0 : }
2024 :
2025 1484 : fn get_checkpoint_distance(&self) -> u64 {
2026 1484 : let tenant_conf = self.tenant_conf.load();
2027 1484 : tenant_conf
2028 1484 : .tenant_conf
2029 1484 : .checkpoint_distance
2030 1484 : .unwrap_or(self.conf.default_tenant_conf.checkpoint_distance)
2031 1484 : }
2032 :
2033 58 : fn get_checkpoint_timeout(&self) -> Duration {
2034 58 : let tenant_conf = self.tenant_conf.load();
2035 58 : tenant_conf
2036 58 : .tenant_conf
2037 58 : .checkpoint_timeout
2038 58 : .unwrap_or(self.conf.default_tenant_conf.checkpoint_timeout)
2039 58 : }
2040 :
2041 432 : fn get_compaction_target_size(&self) -> u64 {
2042 432 : let tenant_conf = self.tenant_conf.load();
2043 432 : tenant_conf
2044 432 : .tenant_conf
2045 432 : .compaction_target_size
2046 432 : .unwrap_or(self.conf.default_tenant_conf.compaction_target_size)
2047 432 : }
2048 :
2049 330 : fn get_compaction_threshold(&self) -> usize {
2050 330 : let tenant_conf = self.tenant_conf.load();
2051 330 : tenant_conf
2052 330 : .tenant_conf
2053 330 : .compaction_threshold
2054 330 : .unwrap_or(self.conf.default_tenant_conf.compaction_threshold)
2055 330 : }
2056 :
2057 14 : fn get_image_creation_threshold(&self) -> usize {
2058 14 : let tenant_conf = self.tenant_conf.load();
2059 14 : tenant_conf
2060 14 : .tenant_conf
2061 14 : .image_creation_threshold
2062 14 : .unwrap_or(self.conf.default_tenant_conf.image_creation_threshold)
2063 14 : }
2064 :
2065 330 : fn get_compaction_algorithm(&self) -> CompactionAlgorithm {
2066 330 : let tenant_conf = &self.tenant_conf.load();
2067 330 : tenant_conf
2068 330 : .tenant_conf
2069 330 : .compaction_algorithm
2070 330 : .unwrap_or(self.conf.default_tenant_conf.compaction_algorithm)
2071 330 : }
2072 :
2073 0 : fn get_eviction_policy(&self) -> EvictionPolicy {
2074 0 : let tenant_conf = self.tenant_conf.load();
2075 0 : tenant_conf
2076 0 : .tenant_conf
2077 0 : .eviction_policy
2078 0 : .unwrap_or(self.conf.default_tenant_conf.eviction_policy)
2079 0 : }
2080 :
2081 334 : fn get_evictions_low_residence_duration_metric_threshold(
2082 334 : tenant_conf: &TenantConfOpt,
2083 334 : default_tenant_conf: &TenantConf,
2084 334 : ) -> Duration {
2085 334 : tenant_conf
2086 334 : .evictions_low_residence_duration_metric_threshold
2087 334 : .unwrap_or(default_tenant_conf.evictions_low_residence_duration_metric_threshold)
2088 334 : }
2089 :
2090 762 : fn get_image_layer_creation_check_threshold(&self) -> u8 {
2091 762 : let tenant_conf = self.tenant_conf.load();
2092 762 : tenant_conf
2093 762 : .tenant_conf
2094 762 : .image_layer_creation_check_threshold
2095 762 : .unwrap_or(
2096 762 : self.conf
2097 762 : .default_tenant_conf
2098 762 : .image_layer_creation_check_threshold,
2099 762 : )
2100 762 : }
2101 :
2102 0 : pub(super) fn tenant_conf_updated(&self, new_conf: &TenantConfOpt) {
2103 0 : // NB: Most tenant conf options are read by background loops, so,
2104 0 : // changes will automatically be picked up.
2105 0 :
2106 0 : // The threshold is embedded in the metric. So, we need to update it.
2107 0 : {
2108 0 : let new_threshold = Self::get_evictions_low_residence_duration_metric_threshold(
2109 0 : new_conf,
2110 0 : &self.conf.default_tenant_conf,
2111 0 : );
2112 0 :
2113 0 : let tenant_id_str = self.tenant_shard_id.tenant_id.to_string();
2114 0 : let shard_id_str = format!("{}", self.tenant_shard_id.shard_slug());
2115 0 :
2116 0 : let timeline_id_str = self.timeline_id.to_string();
2117 0 : self.metrics
2118 0 : .evictions_with_low_residence_duration
2119 0 : .write()
2120 0 : .unwrap()
2121 0 : .change_threshold(
2122 0 : &tenant_id_str,
2123 0 : &shard_id_str,
2124 0 : &timeline_id_str,
2125 0 : new_threshold,
2126 0 : );
2127 0 : }
2128 0 : }
2129 :
2130 : /// Open a Timeline handle.
2131 : ///
2132 : /// Loads the metadata for the timeline into memory, but not the layer map.
2133 : #[allow(clippy::too_many_arguments)]
2134 334 : pub(super) fn new(
2135 334 : conf: &'static PageServerConf,
2136 334 : tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
2137 334 : metadata: &TimelineMetadata,
2138 334 : ancestor: Option<Arc<Timeline>>,
2139 334 : timeline_id: TimelineId,
2140 334 : tenant_shard_id: TenantShardId,
2141 334 : generation: Generation,
2142 334 : shard_identity: ShardIdentity,
2143 334 : walredo_mgr: Option<Arc<super::WalRedoManager>>,
2144 334 : resources: TimelineResources,
2145 334 : pg_version: u32,
2146 334 : state: TimelineState,
2147 334 : cancel: CancellationToken,
2148 334 : ) -> Arc<Self> {
2149 334 : let disk_consistent_lsn = metadata.disk_consistent_lsn();
2150 334 : let (state, _) = watch::channel(state);
2151 334 :
2152 334 : let (layer_flush_start_tx, _) = tokio::sync::watch::channel((0, disk_consistent_lsn));
2153 334 : let (layer_flush_done_tx, _) = tokio::sync::watch::channel((0, Ok(())));
2154 334 :
2155 334 : let evictions_low_residence_duration_metric_threshold = {
2156 334 : let loaded_tenant_conf = tenant_conf.load();
2157 334 : Self::get_evictions_low_residence_duration_metric_threshold(
2158 334 : &loaded_tenant_conf.tenant_conf,
2159 334 : &conf.default_tenant_conf,
2160 334 : )
2161 334 : };
2162 334 :
2163 334 : Arc::new_cyclic(|myself| {
2164 334 : let mut result = Timeline {
2165 334 : conf,
2166 334 : tenant_conf,
2167 334 : myself: myself.clone(),
2168 334 : timeline_id,
2169 334 : tenant_shard_id,
2170 334 : generation,
2171 334 : shard_identity,
2172 334 : pg_version,
2173 334 : layers: Default::default(),
2174 334 :
2175 334 : walredo_mgr,
2176 334 : walreceiver: Mutex::new(None),
2177 334 :
2178 334 : remote_client: resources.remote_client.map(Arc::new),
2179 334 :
2180 334 : // initialize in-memory 'last_record_lsn' from 'disk_consistent_lsn'.
2181 334 : last_record_lsn: SeqWait::new(RecordLsn {
2182 334 : last: disk_consistent_lsn,
2183 334 : prev: metadata.prev_record_lsn().unwrap_or(Lsn(0)),
2184 334 : }),
2185 334 : disk_consistent_lsn: AtomicLsn::new(disk_consistent_lsn.0),
2186 334 :
2187 334 : last_freeze_at: AtomicLsn::new(disk_consistent_lsn.0),
2188 334 : last_freeze_ts: RwLock::new(Instant::now()),
2189 334 :
2190 334 : loaded_at: (disk_consistent_lsn, SystemTime::now()),
2191 334 :
2192 334 : ancestor_timeline: ancestor,
2193 334 : ancestor_lsn: metadata.ancestor_lsn(),
2194 334 :
2195 334 : metrics: TimelineMetrics::new(
2196 334 : &tenant_shard_id,
2197 334 : &timeline_id,
2198 334 : crate::metrics::EvictionsWithLowResidenceDurationBuilder::new(
2199 334 : "mtime",
2200 334 : evictions_low_residence_duration_metric_threshold,
2201 334 : ),
2202 334 : ),
2203 334 :
2204 334 : query_metrics: crate::metrics::SmgrQueryTimePerTimeline::new(
2205 334 : &tenant_shard_id,
2206 334 : &timeline_id,
2207 334 : ),
2208 334 :
2209 2338 : directory_metrics: array::from_fn(|_| AtomicU64::new(0)),
2210 334 :
2211 334 : flush_loop_state: Mutex::new(FlushLoopState::NotStarted),
2212 334 :
2213 334 : layer_flush_start_tx,
2214 334 : layer_flush_done_tx,
2215 334 :
2216 334 : write_lock: tokio::sync::Mutex::new(None),
2217 334 :
2218 334 : gc_info: std::sync::RwLock::new(GcInfo::default()),
2219 334 :
2220 334 : latest_gc_cutoff_lsn: Rcu::new(metadata.latest_gc_cutoff_lsn()),
2221 334 : initdb_lsn: metadata.initdb_lsn(),
2222 334 :
2223 334 : current_logical_size: if disk_consistent_lsn.is_valid() {
2224 : // we're creating timeline data with some layer files existing locally,
2225 : // need to recalculate timeline's logical size based on data in the layers.
2226 224 : LogicalSize::deferred_initial(disk_consistent_lsn)
2227 : } else {
2228 : // we're creating timeline data without any layers existing locally,
2229 : // initial logical size is 0.
2230 110 : LogicalSize::empty_initial()
2231 : },
2232 334 : partitioning: tokio::sync::Mutex::new((
2233 334 : (KeyPartitioning::new(), KeyPartitioning::new().into_sparse()),
2234 334 : Lsn(0),
2235 334 : )),
2236 334 : repartition_threshold: 0,
2237 334 : last_image_layer_creation_check_at: AtomicLsn::new(0),
2238 334 :
2239 334 : last_received_wal: Mutex::new(None),
2240 334 : rel_size_cache: RwLock::new(RelSizeCache {
2241 334 : complete_as_of: disk_consistent_lsn,
2242 334 : map: HashMap::new(),
2243 334 : }),
2244 334 :
2245 334 : download_all_remote_layers_task_info: RwLock::new(None),
2246 334 :
2247 334 : state,
2248 334 :
2249 334 : eviction_task_timeline_state: tokio::sync::Mutex::new(
2250 334 : EvictionTaskTimelineState::default(),
2251 334 : ),
2252 334 : delete_progress: Arc::new(tokio::sync::Mutex::new(DeleteTimelineFlow::default())),
2253 334 :
2254 334 : cancel,
2255 334 : gate: Gate::default(),
2256 334 :
2257 334 : compaction_lock: tokio::sync::Mutex::default(),
2258 334 : gc_lock: tokio::sync::Mutex::default(),
2259 334 :
2260 334 : timeline_get_throttle: resources.timeline_get_throttle,
2261 334 :
2262 334 : aux_files: tokio::sync::Mutex::new(AuxFilesState {
2263 334 : dir: None,
2264 334 : n_deltas: 0,
2265 334 : }),
2266 334 : };
2267 334 : result.repartition_threshold =
2268 334 : result.get_checkpoint_distance() / REPARTITION_FREQ_IN_CHECKPOINT_DISTANCE;
2269 334 :
2270 334 : result
2271 334 : .metrics
2272 334 : .last_record_gauge
2273 334 : .set(disk_consistent_lsn.0 as i64);
2274 334 : result
2275 334 : })
2276 334 : }
2277 :
2278 430 : pub(super) fn maybe_spawn_flush_loop(self: &Arc<Self>) {
2279 430 : let Ok(guard) = self.gate.enter() else {
2280 0 : info!("cannot start flush loop when the timeline gate has already been closed");
2281 0 : return;
2282 : };
2283 430 : let mut flush_loop_state = self.flush_loop_state.lock().unwrap();
2284 430 : match *flush_loop_state {
2285 328 : FlushLoopState::NotStarted => (),
2286 : FlushLoopState::Running { .. } => {
2287 102 : info!(
2288 0 : "skipping attempt to start flush_loop twice {}/{}",
2289 0 : self.tenant_shard_id, self.timeline_id
2290 : );
2291 102 : return;
2292 : }
2293 : FlushLoopState::Exited => {
2294 0 : warn!(
2295 0 : "ignoring attempt to restart exited flush_loop {}/{}",
2296 0 : self.tenant_shard_id, self.timeline_id
2297 : );
2298 0 : return;
2299 : }
2300 : }
2301 :
2302 328 : let layer_flush_start_rx = self.layer_flush_start_tx.subscribe();
2303 328 : let self_clone = Arc::clone(self);
2304 328 :
2305 328 : debug!("spawning flush loop");
2306 328 : *flush_loop_state = FlushLoopState::Running {
2307 328 : #[cfg(test)]
2308 328 : expect_initdb_optimization: false,
2309 328 : #[cfg(test)]
2310 328 : initdb_optimization_count: 0,
2311 328 : };
2312 328 : task_mgr::spawn(
2313 328 : task_mgr::BACKGROUND_RUNTIME.handle(),
2314 328 : task_mgr::TaskKind::LayerFlushTask,
2315 328 : Some(self.tenant_shard_id),
2316 328 : Some(self.timeline_id),
2317 328 : "layer flush task",
2318 : false,
2319 328 : async move {
2320 328 : let _guard = guard;
2321 328 : let background_ctx = RequestContext::todo_child(TaskKind::LayerFlushTask, DownloadBehavior::Error);
2322 58339 : self_clone.flush_loop(layer_flush_start_rx, &background_ctx).await;
2323 8 : let mut flush_loop_state = self_clone.flush_loop_state.lock().unwrap();
2324 8 : assert!(matches!(*flush_loop_state, FlushLoopState::Running{ ..}));
2325 8 : *flush_loop_state = FlushLoopState::Exited;
2326 8 : Ok(())
2327 8 : }
2328 328 : .instrument(info_span!(parent: None, "layer flush task", tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))
2329 : );
2330 430 : }
2331 :
2332 : /// Creates and starts the wal receiver.
2333 : ///
2334 : /// This function is expected to be called at most once per Timeline's lifecycle
2335 : /// when the timeline is activated.
2336 0 : fn launch_wal_receiver(
2337 0 : self: &Arc<Self>,
2338 0 : ctx: &RequestContext,
2339 0 : broker_client: BrokerClientChannel,
2340 0 : ) {
2341 0 : info!(
2342 0 : "launching WAL receiver for timeline {} of tenant {}",
2343 0 : self.timeline_id, self.tenant_shard_id
2344 : );
2345 :
2346 0 : let tenant_conf = self.tenant_conf.load();
2347 0 : let wal_connect_timeout = tenant_conf
2348 0 : .tenant_conf
2349 0 : .walreceiver_connect_timeout
2350 0 : .unwrap_or(self.conf.default_tenant_conf.walreceiver_connect_timeout);
2351 0 : let lagging_wal_timeout = tenant_conf
2352 0 : .tenant_conf
2353 0 : .lagging_wal_timeout
2354 0 : .unwrap_or(self.conf.default_tenant_conf.lagging_wal_timeout);
2355 0 : let max_lsn_wal_lag = tenant_conf
2356 0 : .tenant_conf
2357 0 : .max_lsn_wal_lag
2358 0 : .unwrap_or(self.conf.default_tenant_conf.max_lsn_wal_lag);
2359 0 :
2360 0 : let mut guard = self.walreceiver.lock().unwrap();
2361 0 : assert!(
2362 0 : guard.is_none(),
2363 0 : "multiple launches / re-launches of WAL receiver are not supported"
2364 : );
2365 0 : *guard = Some(WalReceiver::start(
2366 0 : Arc::clone(self),
2367 0 : WalReceiverConf {
2368 0 : wal_connect_timeout,
2369 0 : lagging_wal_timeout,
2370 0 : max_lsn_wal_lag,
2371 0 : auth_token: crate::config::SAFEKEEPER_AUTH_TOKEN.get().cloned(),
2372 0 : availability_zone: self.conf.availability_zone.clone(),
2373 0 : ingest_batch_size: self.conf.ingest_batch_size,
2374 0 : },
2375 0 : broker_client,
2376 0 : ctx,
2377 0 : ));
2378 0 : }
2379 :
2380 : /// Initialize with an empty layer map. Used when creating a new timeline.
2381 328 : pub(super) fn init_empty_layer_map(&self, start_lsn: Lsn) {
2382 328 : let mut layers = self.layers.try_write().expect(
2383 328 : "in the context where we call this function, no other task has access to the object",
2384 328 : );
2385 328 : layers.initialize_empty(Lsn(start_lsn.0));
2386 328 : }
2387 :
2388 : /// Scan the timeline directory, cleanup, populate the layer map, and schedule uploads for local-only
2389 : /// files.
2390 6 : pub(super) async fn load_layer_map(
2391 6 : &self,
2392 6 : disk_consistent_lsn: Lsn,
2393 6 : index_part: Option<IndexPart>,
2394 6 : ) -> anyhow::Result<()> {
2395 : use init::{Decision::*, Discovered, DismissedLayer};
2396 : use LayerName::*;
2397 :
2398 6 : let mut guard = self.layers.write().await;
2399 :
2400 6 : let timer = self.metrics.load_layer_map_histo.start_timer();
2401 6 :
2402 6 : // Scan timeline directory and create ImageLayerName and DeltaFilename
2403 6 : // structs representing all files on disk
2404 6 : let timeline_path = self
2405 6 : .conf
2406 6 : .timeline_path(&self.tenant_shard_id, &self.timeline_id);
2407 6 : let conf = self.conf;
2408 6 : let span = tracing::Span::current();
2409 6 :
2410 6 : // Copy to move into the task we're about to spawn
2411 6 : let generation = self.generation;
2412 6 : let shard = self.get_shard_index();
2413 6 : let this = self.myself.upgrade().expect("&self method holds the arc");
2414 :
2415 6 : let (loaded_layers, needs_cleanup, total_physical_size) = tokio::task::spawn_blocking({
2416 6 : move || {
2417 6 : let _g = span.entered();
2418 6 : let discovered = init::scan_timeline_dir(&timeline_path)?;
2419 6 : let mut discovered_layers = Vec::with_capacity(discovered.len());
2420 6 : let mut unrecognized_files = Vec::new();
2421 6 :
2422 6 : let mut path = timeline_path;
2423 :
2424 22 : for discovered in discovered {
2425 16 : let (name, kind) = match discovered {
2426 16 : Discovered::Layer(layer_file_name, local_path, file_size) => {
2427 16 : discovered_layers.push((layer_file_name, local_path, file_size));
2428 16 : continue;
2429 : }
2430 : Discovered::Metadata => {
2431 0 : warn!("found legacy metadata file, these should have been removed in load_tenant_config");
2432 0 : continue;
2433 : }
2434 : Discovered::IgnoredBackup => {
2435 0 : continue;
2436 : }
2437 0 : Discovered::Unknown(file_name) => {
2438 0 : // we will later error if there are any
2439 0 : unrecognized_files.push(file_name);
2440 0 : continue;
2441 : }
2442 0 : Discovered::Ephemeral(name) => (name, "old ephemeral file"),
2443 0 : Discovered::Temporary(name) => (name, "temporary timeline file"),
2444 0 : Discovered::TemporaryDownload(name) => (name, "temporary download"),
2445 : };
2446 0 : path.push(Utf8Path::new(&name));
2447 0 : init::cleanup(&path, kind)?;
2448 0 : path.pop();
2449 : }
2450 :
2451 6 : if !unrecognized_files.is_empty() {
2452 : // assume that if there are any there are many many.
2453 0 : let n = unrecognized_files.len();
2454 0 : let first = &unrecognized_files[..n.min(10)];
2455 0 : anyhow::bail!(
2456 0 : "unrecognized files in timeline dir (total {n}), first 10: {first:?}"
2457 0 : );
2458 6 : }
2459 6 :
2460 6 : let decided = init::reconcile(
2461 6 : discovered_layers,
2462 6 : index_part.as_ref(),
2463 6 : disk_consistent_lsn,
2464 6 : generation,
2465 6 : shard,
2466 6 : );
2467 6 :
2468 6 : let mut loaded_layers = Vec::new();
2469 6 : let mut needs_cleanup = Vec::new();
2470 6 : let mut total_physical_size = 0;
2471 :
2472 22 : for (name, decision) in decided {
2473 16 : let decision = match decision {
2474 0 : Ok(UseRemote { local, remote }) => {
2475 0 : // Remote is authoritative, but we may still choose to retain
2476 0 : // the local file if the contents appear to match
2477 0 : if local.metadata.file_size() == remote.file_size() {
2478 : // Use the local file, but take the remote metadata so that we pick up
2479 : // the correct generation.
2480 0 : UseLocal(
2481 0 : LocalLayerFileMetadata {
2482 0 : metadata: remote,
2483 0 : local_path: local.local_path
2484 0 : }
2485 0 : )
2486 : } else {
2487 0 : init::cleanup_local_file_for_remote(&local, &remote)?;
2488 0 : UseRemote { local, remote }
2489 : }
2490 : }
2491 16 : Ok(decision) => decision,
2492 0 : Err(DismissedLayer::Future { local }) => {
2493 0 : if let Some(local) = local {
2494 0 : init::cleanup_future_layer(&local.local_path, &name, disk_consistent_lsn)?;
2495 0 : }
2496 0 : needs_cleanup.push(name);
2497 0 : continue;
2498 : }
2499 0 : Err(DismissedLayer::LocalOnly(local)) => {
2500 0 : init::cleanup_local_only_file(&name, &local)?;
2501 : // this file never existed remotely, we will have to do rework
2502 0 : continue;
2503 : }
2504 : };
2505 :
2506 16 : match &name {
2507 12 : Delta(d) => assert!(d.lsn_range.end <= disk_consistent_lsn + 1),
2508 4 : Image(i) => assert!(i.lsn <= disk_consistent_lsn),
2509 : }
2510 :
2511 16 : tracing::debug!(layer=%name, ?decision, "applied");
2512 :
2513 16 : let layer = match decision {
2514 16 : UseLocal(local) => {
2515 16 : total_physical_size += local.metadata.file_size();
2516 16 : Layer::for_resident(conf, &this, local.local_path, name, local.metadata).drop_eviction_guard()
2517 : }
2518 0 : Evicted(remote) | UseRemote { remote, .. } => {
2519 0 : Layer::for_evicted(conf, &this, name, remote)
2520 : }
2521 : };
2522 :
2523 16 : loaded_layers.push(layer);
2524 : }
2525 6 : Ok((loaded_layers, needs_cleanup, total_physical_size))
2526 6 : }
2527 6 : })
2528 6 : .await
2529 6 : .map_err(anyhow::Error::new)
2530 6 : .and_then(|x| x)?;
2531 :
2532 6 : let num_layers = loaded_layers.len();
2533 6 :
2534 6 : guard.initialize_local_layers(loaded_layers, disk_consistent_lsn + 1);
2535 :
2536 6 : if let Some(rtc) = self.remote_client.as_ref() {
2537 6 : rtc.schedule_layer_file_deletion(&needs_cleanup)?;
2538 6 : rtc.schedule_index_upload_for_file_changes()?;
2539 : // This barrier orders above DELETEs before any later operations.
2540 : // This is critical because code executing after the barrier might
2541 : // create again objects with the same key that we just scheduled for deletion.
2542 : // For example, if we just scheduled deletion of an image layer "from the future",
2543 : // later compaction might run again and re-create the same image layer.
2544 : // "from the future" here means an image layer whose LSN is > IndexPart::disk_consistent_lsn.
2545 : // "same" here means same key range and LSN.
2546 : //
2547 : // Without a barrier between above DELETEs and the re-creation's PUTs,
2548 : // the upload queue may execute the PUT first, then the DELETE.
2549 : // In our example, we will end up with an IndexPart referencing a non-existent object.
2550 : //
2551 : // 1. a future image layer is created and uploaded
2552 : // 2. ps restart
2553 : // 3. the future layer from (1) is deleted during load layer map
2554 : // 4. image layer is re-created and uploaded
2555 : // 5. deletion queue would like to delete (1) but actually deletes (4)
2556 : // 6. delete by name works as expected, but it now deletes the wrong (later) version
2557 : //
2558 : // See https://github.com/neondatabase/neon/issues/5878
2559 : //
2560 : // NB: generation numbers naturally protect against this because they disambiguate
2561 : // (1) and (4)
2562 6 : rtc.schedule_barrier()?;
2563 : // Tenant::create_timeline will wait for these uploads to happen before returning, or
2564 : // on retry.
2565 0 : }
2566 :
2567 6 : info!(
2568 0 : "loaded layer map with {} layers at {}, total physical size: {}",
2569 : num_layers, disk_consistent_lsn, total_physical_size
2570 : );
2571 :
2572 6 : timer.stop_and_record();
2573 6 : Ok(())
2574 6 : }
2575 :
2576 : /// Retrieve current logical size of the timeline.
2577 : ///
2578 : /// The size could be lagging behind the actual number, in case
2579 : /// the initial size calculation has not been run (gets triggered on the first size access).
2580 : ///
2581 : /// return size and boolean flag that shows if the size is exact
2582 0 : pub(crate) fn get_current_logical_size(
2583 0 : self: &Arc<Self>,
2584 0 : priority: GetLogicalSizePriority,
2585 0 : ctx: &RequestContext,
2586 0 : ) -> logical_size::CurrentLogicalSize {
2587 0 : if !self.tenant_shard_id.is_shard_zero() {
2588 : // Logical size is only accurately maintained on shard zero: when called elsewhere, for example
2589 : // when HTTP API is serving a GET for timeline zero, return zero
2590 0 : return logical_size::CurrentLogicalSize::Approximate(logical_size::Approximate::zero());
2591 0 : }
2592 0 :
2593 0 : let current_size = self.current_logical_size.current_size();
2594 0 : debug!("Current size: {current_size:?}");
2595 :
2596 0 : match (current_size.accuracy(), priority) {
2597 0 : (logical_size::Accuracy::Exact, _) => (), // nothing to do
2598 0 : (logical_size::Accuracy::Approximate, GetLogicalSizePriority::Background) => {
2599 0 : // background task will eventually deliver an exact value, we're in no rush
2600 0 : }
2601 : (logical_size::Accuracy::Approximate, GetLogicalSizePriority::User) => {
2602 : // background task is not ready, but user is asking for it now;
2603 : // => make the background task skip the line
2604 : // (The alternative would be to calculate the size here, but,
2605 : // it can actually take a long time if the user has a lot of rels.
2606 : // And we'll inevitable need it again; So, let the background task do the work.)
2607 0 : match self
2608 0 : .current_logical_size
2609 0 : .cancel_wait_for_background_loop_concurrency_limit_semaphore
2610 0 : .get()
2611 : {
2612 0 : Some(cancel) => cancel.cancel(),
2613 : None => {
2614 0 : let state = self.current_state();
2615 0 : if matches!(
2616 0 : state,
2617 : TimelineState::Broken { .. } | TimelineState::Stopping
2618 0 : ) {
2619 0 :
2620 0 : // Can happen when timeline detail endpoint is used when deletion is ongoing (or its broken).
2621 0 : // Don't make noise.
2622 0 : } else {
2623 0 : warn!("unexpected: cancel_wait_for_background_loop_concurrency_limit_semaphore not set, priority-boosting of logical size calculation will not work");
2624 : }
2625 : }
2626 : };
2627 : }
2628 : }
2629 :
2630 0 : if let CurrentLogicalSize::Approximate(_) = ¤t_size {
2631 0 : if ctx.task_kind() == TaskKind::WalReceiverConnectionHandler {
2632 0 : let first = self
2633 0 : .current_logical_size
2634 0 : .did_return_approximate_to_walreceiver
2635 0 : .compare_exchange(
2636 0 : false,
2637 0 : true,
2638 0 : AtomicOrdering::Relaxed,
2639 0 : AtomicOrdering::Relaxed,
2640 0 : )
2641 0 : .is_ok();
2642 0 : if first {
2643 0 : crate::metrics::initial_logical_size::TIMELINES_WHERE_WALRECEIVER_GOT_APPROXIMATE_SIZE.inc();
2644 0 : }
2645 0 : }
2646 0 : }
2647 :
2648 0 : current_size
2649 0 : }
2650 :
2651 0 : fn spawn_initial_logical_size_computation_task(self: &Arc<Self>, ctx: &RequestContext) {
2652 0 : let Some(initial_part_end) = self.current_logical_size.initial_part_end else {
2653 : // nothing to do for freshly created timelines;
2654 0 : assert_eq!(
2655 0 : self.current_logical_size.current_size().accuracy(),
2656 0 : logical_size::Accuracy::Exact,
2657 0 : );
2658 0 : self.current_logical_size.initialized.add_permits(1);
2659 0 : return;
2660 : };
2661 :
2662 0 : let cancel_wait_for_background_loop_concurrency_limit_semaphore = CancellationToken::new();
2663 0 : let token = cancel_wait_for_background_loop_concurrency_limit_semaphore.clone();
2664 0 : self.current_logical_size
2665 0 : .cancel_wait_for_background_loop_concurrency_limit_semaphore.set(token)
2666 0 : .expect("initial logical size calculation task must be spawned exactly once per Timeline object");
2667 0 :
2668 0 : let self_clone = Arc::clone(self);
2669 0 : let background_ctx = ctx.detached_child(
2670 0 : TaskKind::InitialLogicalSizeCalculation,
2671 0 : DownloadBehavior::Download,
2672 0 : );
2673 0 : task_mgr::spawn(
2674 0 : task_mgr::BACKGROUND_RUNTIME.handle(),
2675 0 : task_mgr::TaskKind::InitialLogicalSizeCalculation,
2676 0 : Some(self.tenant_shard_id),
2677 0 : Some(self.timeline_id),
2678 0 : "initial size calculation",
2679 : false,
2680 : // NB: don't log errors here, task_mgr will do that.
2681 0 : async move {
2682 0 : let cancel = task_mgr::shutdown_token();
2683 0 : self_clone
2684 0 : .initial_logical_size_calculation_task(
2685 0 : initial_part_end,
2686 0 : cancel_wait_for_background_loop_concurrency_limit_semaphore,
2687 0 : cancel,
2688 0 : background_ctx,
2689 0 : )
2690 0 : .await;
2691 0 : Ok(())
2692 0 : }
2693 0 : .instrument(info_span!(parent: None, "initial_size_calculation", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id)),
2694 : );
2695 0 : }
2696 :
2697 0 : async fn initial_logical_size_calculation_task(
2698 0 : self: Arc<Self>,
2699 0 : initial_part_end: Lsn,
2700 0 : skip_concurrency_limiter: CancellationToken,
2701 0 : cancel: CancellationToken,
2702 0 : background_ctx: RequestContext,
2703 0 : ) {
2704 : scopeguard::defer! {
2705 : // Irrespective of the outcome of this operation, we should unblock anyone waiting for it.
2706 : self.current_logical_size.initialized.add_permits(1);
2707 : }
2708 :
2709 : enum BackgroundCalculationError {
2710 : Cancelled,
2711 : Other(anyhow::Error),
2712 : }
2713 :
2714 0 : let try_once = |attempt: usize| {
2715 0 : let background_ctx = &background_ctx;
2716 0 : let self_ref = &self;
2717 0 : let skip_concurrency_limiter = &skip_concurrency_limiter;
2718 0 : async move {
2719 0 : let cancel = task_mgr::shutdown_token();
2720 0 : let wait_for_permit = super::tasks::concurrent_background_tasks_rate_limit_permit(
2721 0 : BackgroundLoopKind::InitialLogicalSizeCalculation,
2722 0 : background_ctx,
2723 0 : );
2724 :
2725 : use crate::metrics::initial_logical_size::StartCircumstances;
2726 0 : let (_maybe_permit, circumstances) = tokio::select! {
2727 : permit = wait_for_permit => {
2728 : (Some(permit), StartCircumstances::AfterBackgroundTasksRateLimit)
2729 : }
2730 : _ = self_ref.cancel.cancelled() => {
2731 : return Err(BackgroundCalculationError::Cancelled);
2732 : }
2733 : _ = cancel.cancelled() => {
2734 : return Err(BackgroundCalculationError::Cancelled);
2735 : },
2736 : () = skip_concurrency_limiter.cancelled() => {
2737 : // Some action that is part of a end user interaction requested logical size
2738 : // => break out of the rate limit
2739 : // TODO: ideally we'd not run on BackgroundRuntime but the requester's runtime;
2740 : // but then again what happens if they cancel; also, we should just be using
2741 : // one runtime across the entire process, so, let's leave this for now.
2742 : (None, StartCircumstances::SkippedConcurrencyLimiter)
2743 : }
2744 : };
2745 :
2746 0 : let metrics_guard = if attempt == 1 {
2747 0 : crate::metrics::initial_logical_size::START_CALCULATION.first(circumstances)
2748 : } else {
2749 0 : crate::metrics::initial_logical_size::START_CALCULATION.retry(circumstances)
2750 : };
2751 :
2752 0 : match self_ref
2753 0 : .logical_size_calculation_task(
2754 0 : initial_part_end,
2755 0 : LogicalSizeCalculationCause::Initial,
2756 0 : background_ctx,
2757 0 : )
2758 0 : .await
2759 : {
2760 0 : Ok(calculated_size) => Ok((calculated_size, metrics_guard)),
2761 : Err(CalculateLogicalSizeError::Cancelled) => {
2762 0 : Err(BackgroundCalculationError::Cancelled)
2763 : }
2764 0 : Err(CalculateLogicalSizeError::Other(err)) => {
2765 0 : if let Some(PageReconstructError::AncestorStopping(_)) =
2766 0 : err.root_cause().downcast_ref()
2767 : {
2768 0 : Err(BackgroundCalculationError::Cancelled)
2769 : } else {
2770 0 : Err(BackgroundCalculationError::Other(err))
2771 : }
2772 : }
2773 : }
2774 0 : }
2775 0 : };
2776 :
2777 0 : let retrying = async {
2778 0 : let mut attempt = 0;
2779 0 : loop {
2780 0 : attempt += 1;
2781 0 :
2782 0 : match try_once(attempt).await {
2783 0 : Ok(res) => return ControlFlow::Continue(res),
2784 0 : Err(BackgroundCalculationError::Cancelled) => return ControlFlow::Break(()),
2785 0 : Err(BackgroundCalculationError::Other(e)) => {
2786 0 : warn!(attempt, "initial size calculation failed: {e:?}");
2787 : // exponential back-off doesn't make sense at these long intervals;
2788 : // use fixed retry interval with generous jitter instead
2789 0 : let sleep_duration = Duration::from_secs(
2790 0 : u64::try_from(
2791 0 : // 1hour base
2792 0 : (60_i64 * 60_i64)
2793 0 : // 10min jitter
2794 0 : + rand::thread_rng().gen_range(-10 * 60..10 * 60),
2795 0 : )
2796 0 : .expect("10min < 1hour"),
2797 0 : );
2798 0 : tokio::time::sleep(sleep_duration).await;
2799 : }
2800 : }
2801 : }
2802 0 : };
2803 :
2804 0 : let (calculated_size, metrics_guard) = tokio::select! {
2805 : res = retrying => {
2806 : match res {
2807 : ControlFlow::Continue(calculated_size) => calculated_size,
2808 : ControlFlow::Break(()) => return,
2809 : }
2810 : }
2811 : _ = cancel.cancelled() => {
2812 : return;
2813 : }
2814 : };
2815 :
2816 : // we cannot query current_logical_size.current_size() to know the current
2817 : // *negative* value, only truncated to u64.
2818 0 : let added = self
2819 0 : .current_logical_size
2820 0 : .size_added_after_initial
2821 0 : .load(AtomicOrdering::Relaxed);
2822 0 :
2823 0 : let sum = calculated_size.saturating_add_signed(added);
2824 0 :
2825 0 : // set the gauge value before it can be set in `update_current_logical_size`.
2826 0 : self.metrics.current_logical_size_gauge.set(sum);
2827 0 :
2828 0 : self.current_logical_size
2829 0 : .initial_logical_size
2830 0 : .set((calculated_size, metrics_guard.calculation_result_saved()))
2831 0 : .ok()
2832 0 : .expect("only this task sets it");
2833 0 : }
2834 :
2835 0 : pub(crate) fn spawn_ondemand_logical_size_calculation(
2836 0 : self: &Arc<Self>,
2837 0 : lsn: Lsn,
2838 0 : cause: LogicalSizeCalculationCause,
2839 0 : ctx: RequestContext,
2840 0 : ) -> oneshot::Receiver<Result<u64, CalculateLogicalSizeError>> {
2841 0 : let (sender, receiver) = oneshot::channel();
2842 0 : let self_clone = Arc::clone(self);
2843 0 : // XXX if our caller loses interest, i.e., ctx is cancelled,
2844 0 : // we should stop the size calculation work and return an error.
2845 0 : // That would require restructuring this function's API to
2846 0 : // return the result directly, instead of a Receiver for the result.
2847 0 : let ctx = ctx.detached_child(
2848 0 : TaskKind::OndemandLogicalSizeCalculation,
2849 0 : DownloadBehavior::Download,
2850 0 : );
2851 0 : task_mgr::spawn(
2852 0 : task_mgr::BACKGROUND_RUNTIME.handle(),
2853 0 : task_mgr::TaskKind::OndemandLogicalSizeCalculation,
2854 0 : Some(self.tenant_shard_id),
2855 0 : Some(self.timeline_id),
2856 0 : "ondemand logical size calculation",
2857 0 : false,
2858 0 : async move {
2859 0 : let res = self_clone
2860 0 : .logical_size_calculation_task(lsn, cause, &ctx)
2861 0 : .await;
2862 0 : let _ = sender.send(res).ok();
2863 0 : Ok(()) // Receiver is responsible for handling errors
2864 0 : }
2865 0 : .in_current_span(),
2866 0 : );
2867 0 : receiver
2868 0 : }
2869 :
2870 : /// # Cancel-Safety
2871 : ///
2872 : /// This method is cancellation-safe.
2873 0 : #[instrument(skip_all)]
2874 : async fn logical_size_calculation_task(
2875 : self: &Arc<Self>,
2876 : lsn: Lsn,
2877 : cause: LogicalSizeCalculationCause,
2878 : ctx: &RequestContext,
2879 : ) -> Result<u64, CalculateLogicalSizeError> {
2880 : crate::span::debug_assert_current_span_has_tenant_and_timeline_id();
2881 : // We should never be calculating logical sizes on shard !=0, because these shards do not have
2882 : // accurate relation sizes, and they do not emit consumption metrics.
2883 : debug_assert!(self.tenant_shard_id.is_shard_zero());
2884 :
2885 : let guard = self
2886 : .gate
2887 : .enter()
2888 0 : .map_err(|_| CalculateLogicalSizeError::Cancelled)?;
2889 :
2890 : let self_calculation = Arc::clone(self);
2891 :
2892 0 : let mut calculation = pin!(async {
2893 0 : let ctx = ctx.attached_child();
2894 0 : self_calculation
2895 0 : .calculate_logical_size(lsn, cause, &guard, &ctx)
2896 0 : .await
2897 0 : });
2898 :
2899 : tokio::select! {
2900 : res = &mut calculation => { res }
2901 : _ = self.cancel.cancelled() => {
2902 : debug!("cancelling logical size calculation for timeline shutdown");
2903 : calculation.await
2904 : }
2905 : }
2906 : }
2907 :
2908 : /// Calculate the logical size of the database at the latest LSN.
2909 : ///
2910 : /// NOTE: counted incrementally, includes ancestors. This can be a slow operation,
2911 : /// especially if we need to download remote layers.
2912 : ///
2913 : /// # Cancel-Safety
2914 : ///
2915 : /// This method is cancellation-safe.
2916 0 : async fn calculate_logical_size(
2917 0 : &self,
2918 0 : up_to_lsn: Lsn,
2919 0 : cause: LogicalSizeCalculationCause,
2920 0 : _guard: &GateGuard,
2921 0 : ctx: &RequestContext,
2922 0 : ) -> Result<u64, CalculateLogicalSizeError> {
2923 0 : info!(
2924 0 : "Calculating logical size for timeline {} at {}",
2925 : self.timeline_id, up_to_lsn
2926 : );
2927 :
2928 : pausable_failpoint!("timeline-calculate-logical-size-pause");
2929 :
2930 : // See if we've already done the work for initial size calculation.
2931 : // This is a short-cut for timelines that are mostly unused.
2932 0 : if let Some(size) = self.current_logical_size.initialized_size(up_to_lsn) {
2933 0 : return Ok(size);
2934 0 : }
2935 0 : let storage_time_metrics = match cause {
2936 : LogicalSizeCalculationCause::Initial
2937 : | LogicalSizeCalculationCause::ConsumptionMetricsSyntheticSize
2938 0 : | LogicalSizeCalculationCause::TenantSizeHandler => &self.metrics.logical_size_histo,
2939 : LogicalSizeCalculationCause::EvictionTaskImitation => {
2940 0 : &self.metrics.imitate_logical_size_histo
2941 : }
2942 : };
2943 0 : let timer = storage_time_metrics.start_timer();
2944 0 : let logical_size = self
2945 0 : .get_current_logical_size_non_incremental(up_to_lsn, ctx)
2946 0 : .await?;
2947 0 : debug!("calculated logical size: {logical_size}");
2948 0 : timer.stop_and_record();
2949 0 : Ok(logical_size)
2950 0 : }
2951 :
2952 : /// Update current logical size, adding `delta' to the old value.
2953 270570 : fn update_current_logical_size(&self, delta: i64) {
2954 270570 : let logical_size = &self.current_logical_size;
2955 270570 : logical_size.increment_size(delta);
2956 270570 :
2957 270570 : // Also set the value in the prometheus gauge. Note that
2958 270570 : // there is a race condition here: if this is is called by two
2959 270570 : // threads concurrently, the prometheus gauge might be set to
2960 270570 : // one value while current_logical_size is set to the
2961 270570 : // other.
2962 270570 : match logical_size.current_size() {
2963 270570 : CurrentLogicalSize::Exact(ref new_current_size) => self
2964 270570 : .metrics
2965 270570 : .current_logical_size_gauge
2966 270570 : .set(new_current_size.into()),
2967 0 : CurrentLogicalSize::Approximate(_) => {
2968 0 : // don't update the gauge yet, this allows us not to update the gauge back and
2969 0 : // forth between the initial size calculation task.
2970 0 : }
2971 : }
2972 270570 : }
2973 :
2974 2596 : pub(crate) fn update_directory_entries_count(&self, kind: DirectoryKind, count: u64) {
2975 2596 : self.directory_metrics[kind.offset()].store(count, AtomicOrdering::Relaxed);
2976 2596 : let aux_metric =
2977 2596 : self.directory_metrics[DirectoryKind::AuxFiles.offset()].load(AtomicOrdering::Relaxed);
2978 2596 :
2979 2596 : let sum_of_entries = self
2980 2596 : .directory_metrics
2981 2596 : .iter()
2982 18172 : .map(|v| v.load(AtomicOrdering::Relaxed))
2983 2596 : .sum();
2984 2596 : // Set a high general threshold and a lower threshold for the auxiliary files,
2985 2596 : // as we can have large numbers of relations in the db directory.
2986 2596 : const SUM_THRESHOLD: u64 = 5000;
2987 2596 : const AUX_THRESHOLD: u64 = 1000;
2988 2596 : if sum_of_entries >= SUM_THRESHOLD || aux_metric >= AUX_THRESHOLD {
2989 0 : self.metrics
2990 0 : .directory_entries_count_gauge
2991 0 : .set(sum_of_entries);
2992 2596 : } else if let Some(metric) = Lazy::get(&self.metrics.directory_entries_count_gauge) {
2993 0 : metric.set(sum_of_entries);
2994 2596 : }
2995 2596 : }
2996 :
2997 0 : async fn find_layer(&self, layer_name: &LayerName) -> Option<Layer> {
2998 0 : let guard = self.layers.read().await;
2999 0 : for historic_layer in guard.layer_map().iter_historic_layers() {
3000 0 : let historic_layer_name = historic_layer.layer_name();
3001 0 : if layer_name == &historic_layer_name {
3002 0 : return Some(guard.get_from_desc(&historic_layer));
3003 0 : }
3004 : }
3005 :
3006 0 : None
3007 0 : }
3008 :
3009 : /// The timeline heatmap is a hint to secondary locations from the primary location,
3010 : /// indicating which layers are currently on-disk on the primary.
3011 : ///
3012 : /// None is returned if the Timeline is in a state where uploading a heatmap
3013 : /// doesn't make sense, such as shutting down or initializing. The caller
3014 : /// should treat this as a cue to simply skip doing any heatmap uploading
3015 : /// for this timeline.
3016 0 : pub(crate) async fn generate_heatmap(&self) -> Option<HeatMapTimeline> {
3017 : // no point in heatmaps without remote client
3018 0 : let _remote_client = self.remote_client.as_ref()?;
3019 :
3020 0 : if !self.is_active() {
3021 0 : return None;
3022 0 : }
3023 :
3024 0 : let guard = self.layers.read().await;
3025 :
3026 0 : let resident = guard.likely_resident_layers().map(|layer| {
3027 0 : let last_activity_ts = layer.access_stats().latest_activity_or_now();
3028 0 :
3029 0 : HeatMapLayer::new(
3030 0 : layer.layer_desc().layer_name(),
3031 0 : (&layer.metadata()).into(),
3032 0 : last_activity_ts,
3033 0 : )
3034 0 : });
3035 0 :
3036 0 : let layers = resident.collect();
3037 0 :
3038 0 : Some(HeatMapTimeline::new(self.timeline_id, layers))
3039 0 : }
3040 : }
3041 :
3042 : type TraversalId = Arc<str>;
3043 :
3044 : trait TraversalLayerExt {
3045 : fn traversal_id(&self) -> TraversalId;
3046 : }
3047 :
3048 : impl TraversalLayerExt for Layer {
3049 209304 : fn traversal_id(&self) -> TraversalId {
3050 209304 : Arc::clone(self.debug_str())
3051 209304 : }
3052 : }
3053 :
3054 : impl TraversalLayerExt for Arc<InMemoryLayer> {
3055 605583 : fn traversal_id(&self) -> TraversalId {
3056 605583 : Arc::clone(self.local_path_str())
3057 605583 : }
3058 : }
3059 :
3060 : impl Timeline {
3061 : ///
3062 : /// Get a handle to a Layer for reading.
3063 : ///
3064 : /// The returned Layer might be from an ancestor timeline, if the
3065 : /// segment hasn't been updated on this timeline yet.
3066 : ///
3067 : /// This function takes the current timeline's locked LayerMap as an argument,
3068 : /// so callers can avoid potential race conditions.
3069 : ///
3070 : /// # Cancel-Safety
3071 : ///
3072 : /// This method is cancellation-safe.
3073 622903 : async fn get_reconstruct_data(
3074 622903 : &self,
3075 622903 : key: Key,
3076 622903 : request_lsn: Lsn,
3077 622903 : reconstruct_state: &mut ValueReconstructState,
3078 622903 : ctx: &RequestContext,
3079 622903 : ) -> Result<Vec<TraversalPathItem>, PageReconstructError> {
3080 622903 : // Start from the current timeline.
3081 622903 : let mut timeline_owned;
3082 622903 : let mut timeline = self;
3083 622903 :
3084 622903 : let mut read_count = scopeguard::guard(0, |cnt| {
3085 622903 : crate::metrics::READ_NUM_LAYERS_VISITED.observe(cnt as f64)
3086 622903 : });
3087 622903 :
3088 622903 : // For debugging purposes, collect the path of layers that we traversed
3089 622903 : // through. It's included in the error message if we fail to find the key.
3090 622903 : let mut traversal_path = Vec::<TraversalPathItem>::new();
3091 :
3092 622903 : let cached_lsn = if let Some((cached_lsn, _)) = &reconstruct_state.img {
3093 0 : *cached_lsn
3094 : } else {
3095 622903 : Lsn(0)
3096 : };
3097 :
3098 : // 'prev_lsn' tracks the last LSN that we were at in our search. It's used
3099 : // to check that each iteration make some progress, to break infinite
3100 : // looping if something goes wrong.
3101 622903 : let mut prev_lsn = None;
3102 622903 :
3103 622903 : let mut result = ValueReconstructResult::Continue;
3104 622903 : let mut cont_lsn = Lsn(request_lsn.0 + 1);
3105 :
3106 1662149 : 'outer: loop {
3107 1662149 : if self.cancel.is_cancelled() {
3108 0 : return Err(PageReconstructError::Cancelled);
3109 1662149 : }
3110 1662149 :
3111 1662149 : // The function should have updated 'state'
3112 1662149 : //info!("CALLED for {} at {}: {:?} with {} records, cached {}", key, cont_lsn, result, reconstruct_state.records.len(), cached_lsn);
3113 1662149 : match result {
3114 622789 : ValueReconstructResult::Complete => return Ok(traversal_path),
3115 : ValueReconstructResult::Continue => {
3116 : // If we reached an earlier cached page image, we're done.
3117 1039352 : if cont_lsn == cached_lsn + 1 {
3118 0 : MATERIALIZED_PAGE_CACHE_HIT.inc_by(1);
3119 0 : return Ok(traversal_path);
3120 1039352 : }
3121 1039352 : if let Some(prev) = prev_lsn {
3122 192198 : if prev <= cont_lsn {
3123 : // Didn't make any progress in last iteration. Error out to avoid
3124 : // getting stuck in the loop.
3125 104 : return Err(PageReconstructError::MissingKey(MissingKeyError {
3126 104 : key,
3127 104 : shard: self.shard_identity.get_shard_number(&key),
3128 104 : cont_lsn: Lsn(cont_lsn.0 - 1),
3129 104 : request_lsn,
3130 104 : ancestor_lsn: Some(timeline.ancestor_lsn),
3131 104 : traversal_path,
3132 104 : backtrace: None,
3133 104 : }));
3134 192094 : }
3135 847154 : }
3136 1039248 : prev_lsn = Some(cont_lsn);
3137 : }
3138 : ValueReconstructResult::Missing => {
3139 : return Err(PageReconstructError::MissingKey(MissingKeyError {
3140 8 : key,
3141 8 : shard: self.shard_identity.get_shard_number(&key),
3142 8 : cont_lsn,
3143 8 : request_lsn,
3144 8 : ancestor_lsn: None,
3145 8 : traversal_path,
3146 8 : backtrace: if cfg!(test) {
3147 8 : Some(std::backtrace::Backtrace::force_capture())
3148 : } else {
3149 0 : None
3150 : },
3151 : }));
3152 : }
3153 : }
3154 :
3155 : // Recurse into ancestor if needed
3156 1039248 : if is_inherited_key(key) && Lsn(cont_lsn.0 - 1) <= timeline.ancestor_lsn {
3157 224253 : trace!(
3158 0 : "going into ancestor {}, cont_lsn is {}",
3159 : timeline.ancestor_lsn,
3160 : cont_lsn
3161 : );
3162 :
3163 224253 : timeline_owned = timeline.get_ready_ancestor_timeline(ctx).await?;
3164 224251 : timeline = &*timeline_owned;
3165 224251 : prev_lsn = None;
3166 224251 : continue 'outer;
3167 814995 : }
3168 :
3169 814995 : let guard = timeline.layers.read().await;
3170 814995 : let layers = guard.layer_map();
3171 :
3172 : // Check the open and frozen in-memory layers first, in order from newest
3173 : // to oldest.
3174 814995 : if let Some(open_layer) = &layers.open_layer {
3175 717171 : let start_lsn = open_layer.get_lsn_range().start;
3176 717171 : if cont_lsn > start_lsn {
3177 : //info!("CHECKING for {} at {} on open layer {}", key, cont_lsn, open_layer.layer_name().display());
3178 : // Get all the data needed to reconstruct the page version from this layer.
3179 : // But if we have an older cached page image, no need to go past that.
3180 604155 : let lsn_floor = max(cached_lsn + 1, start_lsn);
3181 604155 :
3182 604155 : let open_layer = open_layer.clone();
3183 604155 : drop(guard);
3184 604155 :
3185 604155 : result = match open_layer
3186 604155 : .get_value_reconstruct_data(
3187 604155 : key,
3188 604155 : lsn_floor..cont_lsn,
3189 604155 : reconstruct_state,
3190 604155 : ctx,
3191 604155 : )
3192 9452 : .await
3193 : {
3194 604155 : Ok(result) => result,
3195 0 : Err(e) => return Err(PageReconstructError::from(e)),
3196 : };
3197 604155 : cont_lsn = lsn_floor;
3198 604155 : *read_count += 1;
3199 604155 : traversal_path.push((result, cont_lsn, open_layer.traversal_id()));
3200 604155 : continue 'outer;
3201 113016 : }
3202 97824 : }
3203 210840 : for frozen_layer in layers.frozen_layers.iter().rev() {
3204 1428 : let start_lsn = frozen_layer.get_lsn_range().start;
3205 1428 : if cont_lsn > start_lsn {
3206 : //info!("CHECKING for {} at {} on frozen layer {}", key, cont_lsn, frozen_layer.layer_name().display());
3207 1428 : let lsn_floor = max(cached_lsn + 1, start_lsn);
3208 1428 :
3209 1428 : let frozen_layer = frozen_layer.clone();
3210 1428 : drop(guard);
3211 1428 :
3212 1428 : result = match frozen_layer
3213 1428 : .get_value_reconstruct_data(
3214 1428 : key,
3215 1428 : lsn_floor..cont_lsn,
3216 1428 : reconstruct_state,
3217 1428 : ctx,
3218 1428 : )
3219 0 : .await
3220 : {
3221 1428 : Ok(result) => result,
3222 0 : Err(e) => return Err(PageReconstructError::from(e)),
3223 : };
3224 1428 : cont_lsn = lsn_floor;
3225 1428 : *read_count += 1;
3226 1428 : traversal_path.push((result, cont_lsn, frozen_layer.traversal_id()));
3227 1428 : continue 'outer;
3228 0 : }
3229 : }
3230 :
3231 209412 : if let Some(SearchResult { lsn_floor, layer }) = layers.search(key, cont_lsn) {
3232 209304 : let layer = guard.get_from_desc(&layer);
3233 209304 : drop(guard);
3234 209304 : // Get all the data needed to reconstruct the page version from this layer.
3235 209304 : // But if we have an older cached page image, no need to go past that.
3236 209304 : let lsn_floor = max(cached_lsn + 1, lsn_floor);
3237 209304 : result = match layer
3238 209304 : .get_value_reconstruct_data(key, lsn_floor..cont_lsn, reconstruct_state, ctx)
3239 29489 : .await
3240 : {
3241 209304 : Ok(result) => result,
3242 0 : Err(e) => return Err(PageReconstructError::from(e)),
3243 : };
3244 209304 : cont_lsn = lsn_floor;
3245 209304 : *read_count += 1;
3246 209304 : traversal_path.push((result, cont_lsn, layer.traversal_id()));
3247 209304 : continue 'outer;
3248 108 : } else if timeline.ancestor_timeline.is_some() {
3249 : // Nothing on this timeline. Traverse to parent
3250 104 : result = ValueReconstructResult::Continue;
3251 104 : cont_lsn = Lsn(timeline.ancestor_lsn.0 + 1);
3252 104 : continue 'outer;
3253 : } else {
3254 : // Nothing found
3255 4 : result = ValueReconstructResult::Missing;
3256 4 : continue 'outer;
3257 : }
3258 : }
3259 622903 : }
3260 :
3261 : /// Get the data needed to reconstruct all keys in the provided keyspace
3262 : ///
3263 : /// The algorithm is as follows:
3264 : /// 1. While some keys are still not done and there's a timeline to visit:
3265 : /// 2. Visit the timeline (see [`Timeline::get_vectored_reconstruct_data_timeline`]:
3266 : /// 2.1: Build the fringe for the current keyspace
3267 : /// 2.2 Visit the newest layer from the fringe to collect all values for the range it
3268 : /// intersects
3269 : /// 2.3. Pop the timeline from the fringe
3270 : /// 2.4. If the fringe is empty, go back to 1
3271 64 : async fn get_vectored_reconstruct_data(
3272 64 : &self,
3273 64 : mut keyspace: KeySpace,
3274 64 : request_lsn: Lsn,
3275 64 : reconstruct_state: &mut ValuesReconstructState,
3276 64 : ctx: &RequestContext,
3277 64 : ) -> Result<(), GetVectoredError> {
3278 64 : let mut timeline_owned: Arc<Timeline>;
3279 64 : let mut timeline = self;
3280 64 :
3281 64 : let mut cont_lsn = Lsn(request_lsn.0 + 1);
3282 :
3283 : loop {
3284 96 : if self.cancel.is_cancelled() {
3285 0 : return Err(GetVectoredError::Cancelled);
3286 96 : }
3287 :
3288 96 : let completed = Self::get_vectored_reconstruct_data_timeline(
3289 96 : timeline,
3290 96 : keyspace.clone(),
3291 96 : cont_lsn,
3292 96 : reconstruct_state,
3293 96 : &self.cancel,
3294 96 : ctx,
3295 96 : )
3296 7413 : .await?;
3297 :
3298 96 : keyspace.remove_overlapping_with(&completed);
3299 96 :
3300 96 : // Do not descend into the ancestor timeline for aux files.
3301 96 : // We don't return a blanket [`GetVectoredError::MissingKey`] to avoid
3302 96 : // stalling compaction.
3303 96 : keyspace.remove_overlapping_with(&KeySpace {
3304 96 : ranges: vec![NON_INHERITED_RANGE, NON_INHERITED_SPARSE_RANGE],
3305 96 : });
3306 96 :
3307 96 : // Keyspace is fully retrieved, no ancestor timeline, or metadata scan (where we do not look
3308 96 : // into ancestor timelines). TODO: is there any other metadata which we want to inherit?
3309 96 : if keyspace.total_raw_size() == 0 || timeline.ancestor_timeline.is_none() {
3310 64 : break;
3311 32 : }
3312 32 :
3313 32 : // Take the min to avoid reconstructing a page with data newer than request Lsn.
3314 32 : cont_lsn = std::cmp::min(Lsn(request_lsn.0 + 1), Lsn(timeline.ancestor_lsn.0 + 1));
3315 32 : timeline_owned = timeline
3316 32 : .get_ready_ancestor_timeline(ctx)
3317 0 : .await
3318 32 : .map_err(GetVectoredError::GetReadyAncestorError)?;
3319 32 : timeline = &*timeline_owned;
3320 : }
3321 :
3322 64 : if keyspace.total_raw_size() != 0 {
3323 4 : return Err(GetVectoredError::MissingKey(MissingKeyError {
3324 4 : key: keyspace.start().unwrap(), /* better if we can store the full keyspace */
3325 4 : shard: self
3326 4 : .shard_identity
3327 4 : .get_shard_number(&keyspace.start().unwrap()),
3328 4 : cont_lsn,
3329 4 : request_lsn,
3330 4 : ancestor_lsn: Some(timeline.ancestor_lsn),
3331 4 : traversal_path: vec![],
3332 4 : backtrace: None,
3333 4 : }));
3334 60 : }
3335 60 :
3336 60 : Ok(())
3337 64 : }
3338 :
3339 : /// Collect the reconstruct data for a keyspace from the specified timeline.
3340 : ///
3341 : /// Maintain a fringe [`LayerFringe`] which tracks all the layers that intersect
3342 : /// the current keyspace. The current keyspace of the search at any given timeline
3343 : /// is the original keyspace minus all the keys that have been completed minus
3344 : /// any keys for which we couldn't find an intersecting layer. It's not tracked explicitly,
3345 : /// but if you merge all the keyspaces in the fringe, you get the "current keyspace".
3346 : ///
3347 : /// This is basically a depth-first search visitor implementation where a vertex
3348 : /// is the (layer, lsn range, key space) tuple. The fringe acts as the stack.
3349 : ///
3350 : /// At each iteration pop the top of the fringe (the layer with the highest Lsn)
3351 : /// and get all the required reconstruct data from the layer in one go.
3352 96 : async fn get_vectored_reconstruct_data_timeline(
3353 96 : timeline: &Timeline,
3354 96 : keyspace: KeySpace,
3355 96 : mut cont_lsn: Lsn,
3356 96 : reconstruct_state: &mut ValuesReconstructState,
3357 96 : cancel: &CancellationToken,
3358 96 : ctx: &RequestContext,
3359 96 : ) -> Result<KeySpace, GetVectoredError> {
3360 96 : let mut unmapped_keyspace = keyspace.clone();
3361 96 : let mut fringe = LayerFringe::new();
3362 96 :
3363 96 : let mut completed_keyspace = KeySpace::default();
3364 :
3365 248 : loop {
3366 248 : if cancel.is_cancelled() {
3367 0 : return Err(GetVectoredError::Cancelled);
3368 248 : }
3369 248 :
3370 248 : let keys_done_last_step = reconstruct_state.consume_done_keys();
3371 248 : unmapped_keyspace.remove_overlapping_with(&keys_done_last_step);
3372 248 : completed_keyspace.merge(&keys_done_last_step);
3373 248 :
3374 248 : // Do not descent any further if the last layer we visited
3375 248 : // completed all keys in the keyspace it inspected. This is not
3376 248 : // required for correctness, but avoids visiting extra layers
3377 248 : // which turns out to be a perf bottleneck in some cases.
3378 248 : if !unmapped_keyspace.is_empty() {
3379 206 : let guard = timeline.layers.read().await;
3380 206 : let layers = guard.layer_map();
3381 206 :
3382 206 : let in_memory_layer = layers.find_in_memory_layer(|l| {
3383 4 : let start_lsn = l.get_lsn_range().start;
3384 4 : cont_lsn > start_lsn
3385 206 : });
3386 206 :
3387 206 : match in_memory_layer {
3388 2 : Some(l) => {
3389 2 : let lsn_range = l.get_lsn_range().start..cont_lsn;
3390 2 : fringe.update(
3391 2 : ReadableLayer::InMemoryLayer(l),
3392 2 : unmapped_keyspace.clone(),
3393 2 : lsn_range,
3394 2 : );
3395 2 : }
3396 : None => {
3397 83560 : for range in unmapped_keyspace.ranges.iter() {
3398 83560 : let results = layers.range_search(range.clone(), cont_lsn);
3399 83560 :
3400 83560 : results
3401 83560 : .found
3402 83560 : .into_iter()
3403 83560 : .map(|(SearchResult { layer, lsn_floor }, keyspace_accum)| {
3404 63524 : (
3405 63524 : ReadableLayer::PersistentLayer(guard.get_from_desc(&layer)),
3406 63524 : keyspace_accum.to_keyspace(),
3407 63524 : lsn_floor..cont_lsn,
3408 63524 : )
3409 83560 : })
3410 83560 : .for_each(|(layer, keyspace, lsn_range)| {
3411 63524 : fringe.update(layer, keyspace, lsn_range)
3412 83560 : });
3413 83560 : }
3414 : }
3415 : }
3416 :
3417 : // It's safe to drop the layer map lock after planning the next round of reads.
3418 : // The fringe keeps readable handles for the layers which are safe to read even
3419 : // if layers were compacted or flushed.
3420 : //
3421 : // The more interesting consideration is: "Why is the read algorithm still correct
3422 : // if the layer map changes while it is operating?". Doing a vectored read on a
3423 : // timeline boils down to pushing an imaginary lsn boundary downwards for each range
3424 : // covered by the read. The layer map tells us how to move the lsn downwards for a
3425 : // range at *a particular point in time*. It is fine for the answer to be different
3426 : // at two different time points.
3427 206 : drop(guard);
3428 42 : }
3429 :
3430 248 : if let Some((layer_to_read, keyspace_to_read, lsn_range)) = fringe.next_layer() {
3431 152 : let next_cont_lsn = lsn_range.start;
3432 152 : layer_to_read
3433 152 : .get_values_reconstruct_data(
3434 152 : keyspace_to_read.clone(),
3435 152 : lsn_range,
3436 152 : reconstruct_state,
3437 152 : ctx,
3438 152 : )
3439 7411 : .await?;
3440 :
3441 152 : unmapped_keyspace = keyspace_to_read;
3442 152 : cont_lsn = next_cont_lsn;
3443 152 :
3444 152 : reconstruct_state.on_layer_visited();
3445 : } else {
3446 96 : break;
3447 96 : }
3448 96 : }
3449 96 :
3450 96 : Ok(completed_keyspace)
3451 96 : }
3452 :
3453 : /// # Cancel-safety
3454 : ///
3455 : /// This method is cancellation-safe.
3456 621765 : async fn lookup_cached_page(
3457 621765 : &self,
3458 621765 : key: &Key,
3459 621765 : lsn: Lsn,
3460 621765 : ctx: &RequestContext,
3461 621765 : ) -> Option<(Lsn, Bytes)> {
3462 621765 : let cache = page_cache::get();
3463 :
3464 : // FIXME: It's pointless to check the cache for things that are not 8kB pages.
3465 : // We should look at the key to determine if it's a cacheable object
3466 621765 : let (lsn, read_guard) = cache
3467 621765 : .lookup_materialized_page(self.tenant_shard_id, self.timeline_id, key, lsn, ctx)
3468 621765 : .await?;
3469 0 : let img = Bytes::from(read_guard.to_vec());
3470 0 : Some((lsn, img))
3471 621765 : }
3472 :
3473 224285 : async fn get_ready_ancestor_timeline(
3474 224285 : &self,
3475 224285 : ctx: &RequestContext,
3476 224285 : ) -> Result<Arc<Timeline>, GetReadyAncestorError> {
3477 224285 : let ancestor = match self.get_ancestor_timeline() {
3478 224285 : Ok(timeline) => timeline,
3479 0 : Err(e) => return Err(GetReadyAncestorError::from(e)),
3480 : };
3481 :
3482 : // It's possible that the ancestor timeline isn't active yet, or
3483 : // is active but hasn't yet caught up to the branch point. Wait
3484 : // for it.
3485 : //
3486 : // This cannot happen while the pageserver is running normally,
3487 : // because you cannot create a branch from a point that isn't
3488 : // present in the pageserver yet. However, we don't wait for the
3489 : // branch point to be uploaded to cloud storage before creating
3490 : // a branch. I.e., the branch LSN need not be remote consistent
3491 : // for the branching operation to succeed.
3492 : //
3493 : // Hence, if we try to load a tenant in such a state where
3494 : // 1. the existence of the branch was persisted (in IndexPart and/or locally)
3495 : // 2. but the ancestor state is behind branch_lsn because it was not yet persisted
3496 : // then we will need to wait for the ancestor timeline to
3497 : // re-stream WAL up to branch_lsn before we access it.
3498 : //
3499 : // How can a tenant get in such a state?
3500 : // - ungraceful pageserver process exit
3501 : // - detach+attach => this is a bug, https://github.com/neondatabase/neon/issues/4219
3502 : //
3503 : // NB: this could be avoided by requiring
3504 : // branch_lsn >= remote_consistent_lsn
3505 : // during branch creation.
3506 224285 : match ancestor.wait_to_become_active(ctx).await {
3507 224283 : Ok(()) => {}
3508 : Err(TimelineState::Stopping) => {
3509 0 : return Err(GetReadyAncestorError::AncestorStopping(
3510 0 : ancestor.timeline_id,
3511 0 : ));
3512 : }
3513 2 : Err(state) => {
3514 2 : return Err(GetReadyAncestorError::Other(anyhow::anyhow!(
3515 2 : "Timeline {} will not become active. Current state: {:?}",
3516 2 : ancestor.timeline_id,
3517 2 : &state,
3518 2 : )));
3519 : }
3520 : }
3521 224283 : ancestor
3522 224283 : .wait_lsn(self.ancestor_lsn, WaitLsnWaiter::Timeline(self), ctx)
3523 0 : .await
3524 224283 : .map_err(|e| match e {
3525 0 : e @ WaitLsnError::Timeout(_) => GetReadyAncestorError::AncestorLsnTimeout(e),
3526 0 : WaitLsnError::Shutdown => GetReadyAncestorError::Cancelled,
3527 0 : e @ WaitLsnError::BadState => GetReadyAncestorError::Other(anyhow::anyhow!(e)),
3528 224283 : })?;
3529 :
3530 224283 : Ok(ancestor)
3531 224285 : }
3532 :
3533 224285 : pub(crate) fn get_ancestor_timeline(&self) -> anyhow::Result<Arc<Timeline>> {
3534 224285 : let ancestor = self.ancestor_timeline.as_ref().with_context(|| {
3535 0 : format!(
3536 0 : "Ancestor is missing. Timeline id: {} Ancestor id {:?}",
3537 0 : self.timeline_id,
3538 0 : self.get_ancestor_timeline_id(),
3539 0 : )
3540 224285 : })?;
3541 224285 : Ok(Arc::clone(ancestor))
3542 224285 : }
3543 :
3544 5452 : pub(crate) fn get_shard_identity(&self) -> &ShardIdentity {
3545 5452 : &self.shard_identity
3546 5452 : }
3547 :
3548 : ///
3549 : /// Get a handle to the latest layer for appending.
3550 : ///
3551 4780028 : async fn get_layer_for_write(&self, lsn: Lsn) -> anyhow::Result<Arc<InMemoryLayer>> {
3552 4780028 : let mut guard = self.layers.write().await;
3553 4780028 : let layer = guard
3554 4780028 : .get_layer_for_write(
3555 4780028 : lsn,
3556 4780028 : self.get_last_record_lsn(),
3557 4780028 : self.conf,
3558 4780028 : self.timeline_id,
3559 4780028 : self.tenant_shard_id,
3560 4780028 : )
3561 577 : .await?;
3562 4780028 : Ok(layer)
3563 4780028 : }
3564 :
3565 5254946 : pub(crate) fn finish_write(&self, new_lsn: Lsn) {
3566 5254946 : assert!(new_lsn.is_aligned());
3567 :
3568 5254946 : self.metrics.last_record_gauge.set(new_lsn.0 as i64);
3569 5254946 : self.last_record_lsn.advance(new_lsn);
3570 5254946 : }
3571 :
3572 : /// Whether there was a layer to freeze or not, return the value of get_last_record_lsn
3573 : /// before we attempted the freeze: this guarantees that ingested data is frozen up to this lsn (inclusive).
3574 934 : async fn freeze_inmem_layer(&self, write_lock_held: bool) -> Lsn {
3575 : // Freeze the current open in-memory layer. It will be written to disk on next
3576 : // iteration.
3577 :
3578 934 : let _write_guard = if write_lock_held {
3579 0 : None
3580 : } else {
3581 934 : Some(self.write_lock.lock().await)
3582 : };
3583 :
3584 934 : let to_lsn = self.get_last_record_lsn();
3585 934 : self.freeze_inmem_layer_at(to_lsn).await;
3586 934 : to_lsn
3587 934 : }
3588 :
3589 934 : async fn freeze_inmem_layer_at(&self, at: Lsn) {
3590 934 : let mut guard = self.layers.write().await;
3591 934 : guard
3592 934 : .try_freeze_in_memory_layer(at, &self.last_freeze_at)
3593 3 : .await;
3594 934 : }
3595 :
3596 : /// Layer flusher task's main loop.
3597 328 : async fn flush_loop(
3598 328 : self: &Arc<Self>,
3599 328 : mut layer_flush_start_rx: tokio::sync::watch::Receiver<(u64, Lsn)>,
3600 328 : ctx: &RequestContext,
3601 328 : ) {
3602 328 : info!("started flush loop");
3603 934 : loop {
3604 934 : tokio::select! {
3605 : _ = self.cancel.cancelled() => {
3606 : info!("shutting down layer flush task due to Timeline::cancel");
3607 : break;
3608 : },
3609 : _ = layer_flush_start_rx.changed() => {}
3610 : }
3611 934 : trace!("waking up");
3612 934 : let (flush_counter, frozen_to_lsn) = *layer_flush_start_rx.borrow();
3613 934 :
3614 934 : // The highest LSN to which we flushed in the loop over frozen layers
3615 934 : let mut flushed_to_lsn = Lsn(0);
3616 :
3617 934 : let result = loop {
3618 1862 : if self.cancel.is_cancelled() {
3619 0 : info!("dropping out of flush loop for timeline shutdown");
3620 : // Note: we do not bother transmitting into [`layer_flush_done_tx`], because
3621 : // anyone waiting on that will respect self.cancel as well: they will stop
3622 : // waiting at the same time we as drop out of this loop.
3623 0 : return;
3624 1862 : }
3625 1862 :
3626 1862 : let timer = self.metrics.flush_time_histo.start_timer();
3627 :
3628 1862 : let layer_to_flush = {
3629 1862 : let guard = self.layers.read().await;
3630 1862 : guard.layer_map().frozen_layers.front().cloned()
3631 : // drop 'layers' lock to allow concurrent reads and writes
3632 : };
3633 1862 : let Some(layer_to_flush) = layer_to_flush else {
3634 934 : break Ok(());
3635 : };
3636 57496 : match self.flush_frozen_layer(layer_to_flush, ctx).await {
3637 928 : Ok(this_layer_to_lsn) => {
3638 928 : flushed_to_lsn = std::cmp::max(flushed_to_lsn, this_layer_to_lsn);
3639 928 : }
3640 : Err(FlushLayerError::Cancelled) => {
3641 0 : info!("dropping out of flush loop for timeline shutdown");
3642 0 : return;
3643 : }
3644 0 : err @ Err(
3645 : FlushLayerError::Other(_) | FlushLayerError::CreateImageLayersError(_),
3646 : ) => {
3647 0 : error!("could not flush frozen layer: {err:?}");
3648 0 : break err.map(|_| ());
3649 : }
3650 : }
3651 928 : timer.stop_and_record();
3652 : };
3653 :
3654 : // Unsharded tenants should never advance their LSN beyond the end of the
3655 : // highest layer they write: such gaps between layer data and the frozen LSN
3656 : // are only legal on sharded tenants.
3657 934 : debug_assert!(
3658 934 : self.shard_identity.count.count() > 1
3659 934 : || flushed_to_lsn >= frozen_to_lsn
3660 6 : || !flushed_to_lsn.is_valid()
3661 : );
3662 :
3663 934 : if flushed_to_lsn < frozen_to_lsn && self.shard_identity.count.count() > 1 {
3664 : // If our layer flushes didn't carry disk_consistent_lsn up to the `to_lsn` advertised
3665 : // to us via layer_flush_start_rx, then advance it here.
3666 : //
3667 : // This path is only taken for tenants with multiple shards: single sharded tenants should
3668 : // never encounter a gap in the wal.
3669 0 : let old_disk_consistent_lsn = self.disk_consistent_lsn.load();
3670 0 : tracing::debug!("Advancing disk_consistent_lsn across layer gap {old_disk_consistent_lsn}->{frozen_to_lsn}");
3671 0 : if self.set_disk_consistent_lsn(frozen_to_lsn) {
3672 0 : if let Err(e) = self.schedule_uploads(frozen_to_lsn, vec![]) {
3673 0 : tracing::warn!("Failed to schedule metadata upload after updating disk_consistent_lsn: {e}");
3674 0 : }
3675 0 : }
3676 934 : }
3677 :
3678 : // Notify any listeners that we're done
3679 934 : let _ = self
3680 934 : .layer_flush_done_tx
3681 934 : .send_replace((flush_counter, result));
3682 : }
3683 8 : }
3684 :
3685 : /// Request the flush loop to write out all frozen layers up to `to_lsn` as Delta L0 files to disk.
3686 : /// The caller is responsible for the freezing, e.g., [`Self::freeze_inmem_layer`].
3687 : ///
3688 : /// `last_record_lsn` may be higher than the highest LSN of a frozen layer: if this is the case,
3689 : /// it means no data will be written between the top of the highest frozen layer and to_lsn,
3690 : /// e.g. because this tenant shard has ingested up to to_lsn and not written any data locally for that part of the WAL.
3691 934 : async fn flush_frozen_layers_and_wait(&self, last_record_lsn: Lsn) -> anyhow::Result<()> {
3692 934 : let mut rx = self.layer_flush_done_tx.subscribe();
3693 934 :
3694 934 : // Increment the flush cycle counter and wake up the flush task.
3695 934 : // Remember the new value, so that when we listen for the flush
3696 934 : // to finish, we know when the flush that we initiated has
3697 934 : // finished, instead of some other flush that was started earlier.
3698 934 : let mut my_flush_request = 0;
3699 934 :
3700 934 : let flush_loop_state = { *self.flush_loop_state.lock().unwrap() };
3701 934 : if !matches!(flush_loop_state, FlushLoopState::Running { .. }) {
3702 0 : anyhow::bail!("cannot flush frozen layers when flush_loop is not running, state is {flush_loop_state:?}")
3703 934 : }
3704 934 :
3705 934 : self.layer_flush_start_tx.send_modify(|(counter, lsn)| {
3706 934 : my_flush_request = *counter + 1;
3707 934 : *counter = my_flush_request;
3708 934 : *lsn = std::cmp::max(last_record_lsn, *lsn);
3709 934 : });
3710 :
3711 1868 : loop {
3712 1868 : {
3713 1868 : let (last_result_counter, last_result) = &*rx.borrow();
3714 1868 : if *last_result_counter >= my_flush_request {
3715 934 : if let Err(_err) = last_result {
3716 : // We already logged the original error in
3717 : // flush_loop. We cannot propagate it to the caller
3718 : // here, because it might not be Cloneable
3719 0 : anyhow::bail!(
3720 0 : "Could not flush frozen layer. Request id: {}",
3721 0 : my_flush_request
3722 0 : );
3723 : } else {
3724 934 : return Ok(());
3725 : }
3726 934 : }
3727 934 : }
3728 934 : trace!("waiting for flush to complete");
3729 : tokio::select! {
3730 : rx_e = rx.changed() => {
3731 : rx_e?;
3732 : },
3733 : // Cancellation safety: we are not leaving an I/O in-flight for the flush, we're just ignoring
3734 : // the notification from [`flush_loop`] that it completed.
3735 : _ = self.cancel.cancelled() => {
3736 : tracing::info!("Cancelled layer flush due on timeline shutdown");
3737 : return Ok(())
3738 : }
3739 : };
3740 934 : trace!("done")
3741 : }
3742 934 : }
3743 :
3744 0 : fn flush_frozen_layers(&self) {
3745 0 : self.layer_flush_start_tx.send_modify(|(counter, lsn)| {
3746 0 : *counter += 1;
3747 0 :
3748 0 : *lsn = std::cmp::max(*lsn, Lsn(self.last_freeze_at.load().0 - 1));
3749 0 : });
3750 0 : }
3751 :
3752 : /// Flush one frozen in-memory layer to disk, as a new delta layer.
3753 : ///
3754 : /// Return value is the last lsn (inclusive) of the layer that was frozen.
3755 1856 : #[instrument(skip_all, fields(layer=%frozen_layer))]
3756 : async fn flush_frozen_layer(
3757 : self: &Arc<Self>,
3758 : frozen_layer: Arc<InMemoryLayer>,
3759 : ctx: &RequestContext,
3760 : ) -> Result<Lsn, FlushLayerError> {
3761 : debug_assert_current_span_has_tenant_and_timeline_id();
3762 :
3763 : // As a special case, when we have just imported an image into the repository,
3764 : // instead of writing out a L0 delta layer, we directly write out image layer
3765 : // files instead. This is possible as long as *all* the data imported into the
3766 : // repository have the same LSN.
3767 : let lsn_range = frozen_layer.get_lsn_range();
3768 :
3769 : // Whether to directly create image layers for this flush, or flush them as delta layers
3770 : let create_image_layer =
3771 : lsn_range.start == self.initdb_lsn && lsn_range.end == Lsn(self.initdb_lsn.0 + 1);
3772 :
3773 : #[cfg(test)]
3774 : {
3775 : match &mut *self.flush_loop_state.lock().unwrap() {
3776 : FlushLoopState::NotStarted | FlushLoopState::Exited => {
3777 : panic!("flush loop not running")
3778 : }
3779 : FlushLoopState::Running {
3780 : expect_initdb_optimization,
3781 : initdb_optimization_count,
3782 : ..
3783 : } => {
3784 : if create_image_layer {
3785 : *initdb_optimization_count += 1;
3786 : } else {
3787 : assert!(!*expect_initdb_optimization, "expected initdb optimization");
3788 : }
3789 : }
3790 : }
3791 : }
3792 :
3793 : let (layers_to_upload, delta_layer_to_add) = if create_image_layer {
3794 : // Note: The 'ctx' in use here has DownloadBehavior::Error. We should not
3795 : // require downloading anything during initial import.
3796 : let ((rel_partition, metadata_partition), _lsn) = self
3797 : .repartition(
3798 : self.initdb_lsn,
3799 : self.get_compaction_target_size(),
3800 : EnumSet::empty(),
3801 : ctx,
3802 : )
3803 : .await?;
3804 :
3805 : if self.cancel.is_cancelled() {
3806 : return Err(FlushLayerError::Cancelled);
3807 : }
3808 :
3809 : // For metadata, always create delta layers.
3810 : let delta_layer = if !metadata_partition.parts.is_empty() {
3811 : assert_eq!(
3812 : metadata_partition.parts.len(),
3813 : 1,
3814 : "currently sparse keyspace should only contain a single aux file keyspace"
3815 : );
3816 : let metadata_keyspace = &metadata_partition.parts[0];
3817 : assert_eq!(
3818 : metadata_keyspace.0.ranges.len(),
3819 : 1,
3820 : "aux file keyspace should be a single range"
3821 : );
3822 : self.create_delta_layer(
3823 : &frozen_layer,
3824 : ctx,
3825 : Some(metadata_keyspace.0.ranges[0].clone()),
3826 : )
3827 : .await?
3828 : } else {
3829 : None
3830 : };
3831 :
3832 : // For image layers, we add them immediately into the layer map.
3833 : let mut layers_to_upload = Vec::new();
3834 : layers_to_upload.extend(
3835 : self.create_image_layers(
3836 : &rel_partition,
3837 : self.initdb_lsn,
3838 : ImageLayerCreationMode::Initial,
3839 : ctx,
3840 : )
3841 : .await?,
3842 : );
3843 :
3844 : if let Some(delta_layer) = delta_layer {
3845 : layers_to_upload.push(delta_layer.clone());
3846 : (layers_to_upload, Some(delta_layer))
3847 : } else {
3848 : (layers_to_upload, None)
3849 : }
3850 : } else {
3851 : // Normal case, write out a L0 delta layer file.
3852 : // `create_delta_layer` will not modify the layer map.
3853 : // We will remove frozen layer and add delta layer in one atomic operation later.
3854 : let Some(layer) = self.create_delta_layer(&frozen_layer, ctx, None).await? else {
3855 : panic!("delta layer cannot be empty if no filter is applied");
3856 : };
3857 : (
3858 : // FIXME: even though we have a single image and single delta layer assumption
3859 : // we push them to vec
3860 : vec![layer.clone()],
3861 : Some(layer),
3862 : )
3863 : };
3864 :
3865 : pausable_failpoint!("flush-layer-cancel-after-writing-layer-out-pausable");
3866 :
3867 : if self.cancel.is_cancelled() {
3868 : return Err(FlushLayerError::Cancelled);
3869 : }
3870 :
3871 : let disk_consistent_lsn = Lsn(lsn_range.end.0 - 1);
3872 :
3873 : // The new on-disk layers are now in the layer map. We can remove the
3874 : // in-memory layer from the map now. The flushed layer is stored in
3875 : // the mapping in `create_delta_layer`.
3876 : {
3877 : let mut guard = self.layers.write().await;
3878 :
3879 : if self.cancel.is_cancelled() {
3880 : return Err(FlushLayerError::Cancelled);
3881 : }
3882 :
3883 : guard.finish_flush_l0_layer(delta_layer_to_add.as_ref(), &frozen_layer, &self.metrics);
3884 :
3885 : if self.set_disk_consistent_lsn(disk_consistent_lsn) {
3886 : // Schedule remote uploads that will reflect our new disk_consistent_lsn
3887 : self.schedule_uploads(disk_consistent_lsn, layers_to_upload)?;
3888 : }
3889 : // release lock on 'layers'
3890 : };
3891 :
3892 : // FIXME: between create_delta_layer and the scheduling of the upload in `update_metadata_file`,
3893 : // a compaction can delete the file and then it won't be available for uploads any more.
3894 : // We still schedule the upload, resulting in an error, but ideally we'd somehow avoid this
3895 : // race situation.
3896 : // See https://github.com/neondatabase/neon/issues/4526
3897 : pausable_failpoint!("flush-frozen-pausable");
3898 :
3899 : // This failpoint is used by another test case `test_pageserver_recovery`.
3900 : fail_point!("flush-frozen-exit");
3901 :
3902 : Ok(Lsn(lsn_range.end.0 - 1))
3903 : }
3904 :
3905 : /// Return true if the value changed
3906 : ///
3907 : /// This function must only be used from the layer flush task, and may not be called concurrently.
3908 928 : fn set_disk_consistent_lsn(&self, new_value: Lsn) -> bool {
3909 928 : // We do a simple load/store cycle: that's why this function isn't safe for concurrent use.
3910 928 : let old_value = self.disk_consistent_lsn.load();
3911 928 : if new_value != old_value {
3912 928 : assert!(new_value >= old_value);
3913 928 : self.disk_consistent_lsn.store(new_value);
3914 928 : true
3915 : } else {
3916 0 : false
3917 : }
3918 928 : }
3919 :
3920 : /// Update metadata file
3921 928 : fn schedule_uploads(
3922 928 : &self,
3923 928 : disk_consistent_lsn: Lsn,
3924 928 : layers_to_upload: impl IntoIterator<Item = ResidentLayer>,
3925 928 : ) -> anyhow::Result<()> {
3926 928 : // We can only save a valid 'prev_record_lsn' value on disk if we
3927 928 : // flushed *all* in-memory changes to disk. We only track
3928 928 : // 'prev_record_lsn' in memory for the latest processed record, so we
3929 928 : // don't remember what the correct value that corresponds to some old
3930 928 : // LSN is. But if we flush everything, then the value corresponding
3931 928 : // current 'last_record_lsn' is correct and we can store it on disk.
3932 928 : let RecordLsn {
3933 928 : last: last_record_lsn,
3934 928 : prev: prev_record_lsn,
3935 928 : } = self.last_record_lsn.load();
3936 928 : let ondisk_prev_record_lsn = if disk_consistent_lsn == last_record_lsn {
3937 928 : Some(prev_record_lsn)
3938 : } else {
3939 0 : None
3940 : };
3941 :
3942 928 : let update = crate::tenant::metadata::MetadataUpdate::new(
3943 928 : disk_consistent_lsn,
3944 928 : ondisk_prev_record_lsn,
3945 928 : *self.latest_gc_cutoff_lsn.read(),
3946 928 : );
3947 928 :
3948 928 : fail_point!("checkpoint-before-saving-metadata", |x| bail!(
3949 0 : "{}",
3950 0 : x.unwrap()
3951 928 : ));
3952 :
3953 928 : if let Some(remote_client) = &self.remote_client {
3954 1870 : for layer in layers_to_upload {
3955 942 : remote_client.schedule_layer_file_upload(layer)?;
3956 : }
3957 928 : remote_client.schedule_index_upload_for_metadata_update(&update)?;
3958 0 : }
3959 :
3960 928 : Ok(())
3961 928 : }
3962 :
3963 0 : pub(crate) async fn preserve_initdb_archive(&self) -> anyhow::Result<()> {
3964 0 : if let Some(remote_client) = &self.remote_client {
3965 0 : remote_client
3966 0 : .preserve_initdb_archive(
3967 0 : &self.tenant_shard_id.tenant_id,
3968 0 : &self.timeline_id,
3969 0 : &self.cancel,
3970 0 : )
3971 0 : .await?;
3972 : } else {
3973 0 : bail!("No remote storage configured, but was asked to backup the initdb archive for {} / {}", self.tenant_shard_id.tenant_id, self.timeline_id);
3974 : }
3975 0 : Ok(())
3976 0 : }
3977 :
3978 : // Write out the given frozen in-memory layer as a new L0 delta file. This L0 file will not be tracked
3979 : // in layer map immediately. The caller is responsible to put it into the layer map.
3980 928 : async fn create_delta_layer(
3981 928 : self: &Arc<Self>,
3982 928 : frozen_layer: &Arc<InMemoryLayer>,
3983 928 : ctx: &RequestContext,
3984 928 : key_range: Option<Range<Key>>,
3985 928 : ) -> anyhow::Result<Option<ResidentLayer>> {
3986 928 : let self_clone = Arc::clone(self);
3987 928 : let frozen_layer = Arc::clone(frozen_layer);
3988 928 : let ctx = ctx.attached_child();
3989 928 : let work = async move {
3990 928 : let Some(new_delta) = frozen_layer
3991 928 : .write_to_disk(&self_clone, &ctx, key_range)
3992 84976 : .await?
3993 : else {
3994 102 : return Ok(None);
3995 : };
3996 : // The write_to_disk() above calls writer.finish() which already did the fsync of the inodes.
3997 : // We just need to fsync the directory in which these inodes are linked,
3998 : // which we know to be the timeline directory.
3999 : //
4000 : // We use fatal_err() below because the after write_to_disk returns with success,
4001 : // the in-memory state of the filesystem already has the layer file in its final place,
4002 : // and subsequent pageserver code could think it's durable while it really isn't.
4003 826 : let timeline_dir = VirtualFile::open(
4004 826 : &self_clone
4005 826 : .conf
4006 826 : .timeline_path(&self_clone.tenant_shard_id, &self_clone.timeline_id),
4007 826 : )
4008 415 : .await
4009 826 : .fatal_err("VirtualFile::open for timeline dir fsync");
4010 826 : timeline_dir
4011 826 : .sync_all()
4012 413 : .await
4013 826 : .fatal_err("VirtualFile::sync_all timeline dir");
4014 826 : anyhow::Ok(Some(new_delta))
4015 928 : };
4016 : // Before tokio-epoll-uring, we ran write_to_disk & the sync_all inside spawn_blocking.
4017 : // Preserve that behavior to maintain the same behavior for `virtual_file_io_engine=std-fs`.
4018 : use crate::virtual_file::io_engine::IoEngine;
4019 928 : match crate::virtual_file::io_engine::get() {
4020 0 : IoEngine::NotSet => panic!("io engine not set"),
4021 : IoEngine::StdFs => {
4022 464 : let span = tracing::info_span!("blocking");
4023 464 : tokio::task::spawn_blocking({
4024 464 : move || Handle::current().block_on(work.instrument(span))
4025 464 : })
4026 464 : .await
4027 464 : .context("spawn_blocking")
4028 464 : .and_then(|x| x)
4029 : }
4030 : #[cfg(target_os = "linux")]
4031 53875 : IoEngine::TokioEpollUring => work.await,
4032 : }
4033 928 : }
4034 :
4035 432 : async fn repartition(
4036 432 : &self,
4037 432 : lsn: Lsn,
4038 432 : partition_size: u64,
4039 432 : flags: EnumSet<CompactFlags>,
4040 432 : ctx: &RequestContext,
4041 432 : ) -> anyhow::Result<((KeyPartitioning, SparseKeyPartitioning), Lsn)> {
4042 432 : let Ok(mut partitioning_guard) = self.partitioning.try_lock() else {
4043 : // NB: there are two callers, one is the compaction task, of which there is only one per struct Tenant and hence Timeline.
4044 : // The other is the initdb optimization in flush_frozen_layer, used by `boostrap_timeline`, which runs before `.activate()`
4045 : // and hence before the compaction task starts.
4046 0 : anyhow::bail!("repartition() called concurrently, this should not happen");
4047 : };
4048 432 : let ((dense_partition, sparse_partition), partition_lsn) = &*partitioning_guard;
4049 432 : if lsn < *partition_lsn {
4050 0 : anyhow::bail!("repartition() called with LSN going backwards, this should not happen");
4051 432 : }
4052 432 :
4053 432 : let distance = lsn.0 - partition_lsn.0;
4054 432 : if *partition_lsn != Lsn(0)
4055 228 : && distance <= self.repartition_threshold
4056 228 : && !flags.contains(CompactFlags::ForceRepartition)
4057 : {
4058 228 : debug!(
4059 : distance,
4060 : threshold = self.repartition_threshold,
4061 0 : "no repartitioning needed"
4062 : );
4063 228 : return Ok((
4064 228 : (dense_partition.clone(), sparse_partition.clone()),
4065 228 : *partition_lsn,
4066 228 : ));
4067 204 : }
4068 :
4069 13269 : let (dense_ks, sparse_ks) = self.collect_keyspace(lsn, ctx).await?;
4070 204 : let dense_partitioning = dense_ks.partition(&self.shard_identity, partition_size);
4071 204 : let sparse_partitioning = SparseKeyPartitioning {
4072 204 : parts: vec![sparse_ks],
4073 204 : }; // no partitioning for metadata keys for now
4074 204 : *partitioning_guard = ((dense_partitioning, sparse_partitioning), lsn);
4075 204 :
4076 204 : Ok((partitioning_guard.0.clone(), partitioning_guard.1))
4077 432 : }
4078 :
4079 : // Is it time to create a new image layer for the given partition?
4080 14 : async fn time_for_new_image_layer(&self, partition: &KeySpace, lsn: Lsn) -> bool {
4081 14 : let threshold = self.get_image_creation_threshold();
4082 :
4083 14 : let guard = self.layers.read().await;
4084 14 : let layers = guard.layer_map();
4085 14 :
4086 14 : let mut max_deltas = 0;
4087 28 : for part_range in &partition.ranges {
4088 14 : let image_coverage = layers.image_coverage(part_range, lsn);
4089 28 : for (img_range, last_img) in image_coverage {
4090 14 : let img_lsn = if let Some(last_img) = last_img {
4091 0 : last_img.get_lsn_range().end
4092 : } else {
4093 14 : Lsn(0)
4094 : };
4095 : // Let's consider an example:
4096 : //
4097 : // delta layer with LSN range 71-81
4098 : // delta layer with LSN range 81-91
4099 : // delta layer with LSN range 91-101
4100 : // image layer at LSN 100
4101 : //
4102 : // If 'lsn' is still 100, i.e. no new WAL has been processed since the last image layer,
4103 : // there's no need to create a new one. We check this case explicitly, to avoid passing
4104 : // a bogus range to count_deltas below, with start > end. It's even possible that there
4105 : // are some delta layers *later* than current 'lsn', if more WAL was processed and flushed
4106 : // after we read last_record_lsn, which is passed here in the 'lsn' argument.
4107 14 : if img_lsn < lsn {
4108 14 : let num_deltas =
4109 14 : layers.count_deltas(&img_range, &(img_lsn..lsn), Some(threshold));
4110 14 :
4111 14 : max_deltas = max_deltas.max(num_deltas);
4112 14 : if num_deltas >= threshold {
4113 0 : debug!(
4114 0 : "key range {}-{}, has {} deltas on this timeline in LSN range {}..{}",
4115 : img_range.start, img_range.end, num_deltas, img_lsn, lsn
4116 : );
4117 0 : return true;
4118 14 : }
4119 0 : }
4120 : }
4121 : }
4122 :
4123 14 : debug!(
4124 : max_deltas,
4125 0 : "none of the partitioned ranges had >= {threshold} deltas"
4126 : );
4127 14 : false
4128 14 : }
4129 :
4130 1524 : #[tracing::instrument(skip_all, fields(%lsn, %mode))]
4131 : async fn create_image_layers(
4132 : self: &Arc<Timeline>,
4133 : partitioning: &KeyPartitioning,
4134 : lsn: Lsn,
4135 : mode: ImageLayerCreationMode,
4136 : ctx: &RequestContext,
4137 : ) -> Result<Vec<ResidentLayer>, CreateImageLayersError> {
4138 : let timer = self.metrics.create_images_time_histo.start_timer();
4139 : let mut image_layers = Vec::new();
4140 :
4141 : // We need to avoid holes between generated image layers.
4142 : // Otherwise LayerMap::image_layer_exists will return false if key range of some layer is covered by more than one
4143 : // image layer with hole between them. In this case such layer can not be utilized by GC.
4144 : //
4145 : // How such hole between partitions can appear?
4146 : // if we have relation with relid=1 and size 100 and relation with relid=2 with size 200 then result of
4147 : // KeySpace::partition may contain partitions <100000000..100000099> and <200000000..200000199>.
4148 : // If there is delta layer <100000000..300000000> then it never be garbage collected because
4149 : // image layers <100000000..100000099> and <200000000..200000199> are not completely covering it.
4150 : let mut start = Key::MIN;
4151 :
4152 : let check_for_image_layers = {
4153 : let last_checks_at = self.last_image_layer_creation_check_at.load();
4154 : let distance = lsn
4155 : .checked_sub(last_checks_at)
4156 : .expect("Attempt to compact with LSN going backwards");
4157 : let min_distance = self.get_image_layer_creation_check_threshold() as u64
4158 : * self.get_checkpoint_distance();
4159 :
4160 : // Skip the expensive delta layer counting if this timeline has not ingested sufficient
4161 : // WAL since the last check.
4162 : distance.0 >= min_distance
4163 : };
4164 :
4165 : if check_for_image_layers {
4166 : self.last_image_layer_creation_check_at.store(lsn);
4167 : }
4168 :
4169 : for partition in partitioning.parts.iter() {
4170 : let img_range = start..partition.ranges.last().unwrap().end;
4171 :
4172 : if partition.overlaps(&Key::metadata_key_range()) {
4173 : // TODO(chi): The next patch will correctly create image layers for metadata keys, and it would be a
4174 : // rather big change. Keep this patch small for now.
4175 : match mode {
4176 : ImageLayerCreationMode::Force | ImageLayerCreationMode::Try => {
4177 : // skip image layer creation anyways for metadata keys.
4178 : start = img_range.end;
4179 : continue;
4180 : }
4181 : ImageLayerCreationMode::Initial => {
4182 : return Err(CreateImageLayersError::Other(anyhow::anyhow!("no image layer should be created for metadata keys when flushing frozen layers")));
4183 : }
4184 : }
4185 : } else if let ImageLayerCreationMode::Try = mode {
4186 : // check_for_image_layers = false -> skip
4187 : // check_for_image_layers = true -> check time_for_new_image_layer -> skip/generate
4188 : if !check_for_image_layers || !self.time_for_new_image_layer(partition, lsn).await {
4189 : start = img_range.end;
4190 : continue;
4191 : }
4192 : }
4193 :
4194 : let mut image_layer_writer = ImageLayerWriter::new(
4195 : self.conf,
4196 : self.timeline_id,
4197 : self.tenant_shard_id,
4198 : &img_range,
4199 : lsn,
4200 : )
4201 : .await?;
4202 :
4203 0 : fail_point!("image-layer-writer-fail-before-finish", |_| {
4204 0 : Err(CreateImageLayersError::Other(anyhow::anyhow!(
4205 0 : "failpoint image-layer-writer-fail-before-finish"
4206 0 : )))
4207 0 : });
4208 :
4209 : let mut wrote_keys = false;
4210 :
4211 : let mut key_request_accum = KeySpaceAccum::new();
4212 : for range in &partition.ranges {
4213 : let mut key = range.start;
4214 : while key < range.end {
4215 : // Decide whether to retain this key: usually we do, but sharded tenants may
4216 : // need to drop keys that don't belong to them. If we retain the key, add it
4217 : // to `key_request_accum` for later issuing a vectored get
4218 : if self.shard_identity.is_key_disposable(&key) {
4219 : debug!(
4220 : "Dropping key {} during compaction (it belongs on shard {:?})",
4221 : key,
4222 : self.shard_identity.get_shard_number(&key)
4223 : );
4224 : } else {
4225 : key_request_accum.add_key(key);
4226 : }
4227 :
4228 : let last_key_in_range = key.next() == range.end;
4229 : key = key.next();
4230 :
4231 : // Maybe flush `key_rest_accum`
4232 : if key_request_accum.raw_size() >= Timeline::MAX_GET_VECTORED_KEYS
4233 : || (last_key_in_range && key_request_accum.raw_size() > 0)
4234 : {
4235 : let results = self
4236 : .get_vectored(key_request_accum.consume_keyspace(), lsn, ctx)
4237 : .await?;
4238 :
4239 : for (img_key, img) in results {
4240 : let img = match img {
4241 : Ok(img) => img,
4242 : Err(err) => {
4243 : // If we fail to reconstruct a VM or FSM page, we can zero the
4244 : // page without losing any actual user data. That seems better
4245 : // than failing repeatedly and getting stuck.
4246 : //
4247 : // We had a bug at one point, where we truncated the FSM and VM
4248 : // in the pageserver, but the Postgres didn't know about that
4249 : // and continued to generate incremental WAL records for pages
4250 : // that didn't exist in the pageserver. Trying to replay those
4251 : // WAL records failed to find the previous image of the page.
4252 : // This special case allows us to recover from that situation.
4253 : // See https://github.com/neondatabase/neon/issues/2601.
4254 : //
4255 : // Unfortunately we cannot do this for the main fork, or for
4256 : // any metadata keys, keys, as that would lead to actual data
4257 : // loss.
4258 : if is_rel_fsm_block_key(img_key) || is_rel_vm_block_key(img_key)
4259 : {
4260 : warn!("could not reconstruct FSM or VM key {img_key}, filling with zeros: {err:?}");
4261 : ZERO_PAGE.clone()
4262 : } else {
4263 : return Err(CreateImageLayersError::PageReconstructError(
4264 : err,
4265 : ));
4266 : }
4267 : }
4268 : };
4269 :
4270 : // Write all the keys we just read into our new image layer.
4271 : image_layer_writer.put_image(img_key, img, ctx).await?;
4272 : wrote_keys = true;
4273 : }
4274 : }
4275 : }
4276 : }
4277 :
4278 : if wrote_keys {
4279 : // Normal path: we have written some data into the new image layer for this
4280 : // partition, so flush it to disk.
4281 : start = img_range.end;
4282 : let image_layer = image_layer_writer.finish(self, ctx).await?;
4283 : image_layers.push(image_layer);
4284 : } else {
4285 : // Special case: the image layer may be empty if this is a sharded tenant and the
4286 : // partition does not cover any keys owned by this shard. In this case, to ensure
4287 : // we don't leave gaps between image layers, leave `start` where it is, so that the next
4288 : // layer we write will cover the key range that we just scanned.
4289 : tracing::debug!("no data in range {}-{}", img_range.start, img_range.end);
4290 : }
4291 : }
4292 :
4293 : // The writer.finish() above already did the fsync of the inodes.
4294 : // We just need to fsync the directory in which these inodes are linked,
4295 : // which we know to be the timeline directory.
4296 : if !image_layers.is_empty() {
4297 : // We use fatal_err() below because the after writer.finish() returns with success,
4298 : // the in-memory state of the filesystem already has the layer file in its final place,
4299 : // and subsequent pageserver code could think it's durable while it really isn't.
4300 : let timeline_dir = VirtualFile::open(
4301 : &self
4302 : .conf
4303 : .timeline_path(&self.tenant_shard_id, &self.timeline_id),
4304 : )
4305 : .await
4306 : .fatal_err("VirtualFile::open for timeline dir fsync");
4307 : timeline_dir
4308 : .sync_all()
4309 : .await
4310 : .fatal_err("VirtualFile::sync_all timeline dir");
4311 : }
4312 :
4313 : let mut guard = self.layers.write().await;
4314 :
4315 : // FIXME: we could add the images to be uploaded *before* returning from here, but right
4316 : // now they are being scheduled outside of write lock
4317 : guard.track_new_image_layers(&image_layers, &self.metrics);
4318 : drop_wlock(guard);
4319 : timer.stop_and_record();
4320 :
4321 : Ok(image_layers)
4322 : }
4323 :
4324 : /// Wait until the background initial logical size calculation is complete, or
4325 : /// this Timeline is shut down. Calling this function will cause the initial
4326 : /// logical size calculation to skip waiting for the background jobs barrier.
4327 0 : pub(crate) async fn await_initial_logical_size(self: Arc<Self>) {
4328 0 : if let Some(await_bg_cancel) = self
4329 0 : .current_logical_size
4330 0 : .cancel_wait_for_background_loop_concurrency_limit_semaphore
4331 0 : .get()
4332 0 : {
4333 0 : await_bg_cancel.cancel();
4334 0 : } else {
4335 : // We should not wait if we were not able to explicitly instruct
4336 : // the logical size cancellation to skip the concurrency limit semaphore.
4337 : // TODO: this is an unexpected case. We should restructure so that it
4338 : // can't happen.
4339 0 : tracing::info!(
4340 0 : "await_initial_logical_size: can't get semaphore cancel token, skipping"
4341 : );
4342 : }
4343 :
4344 : tokio::select!(
4345 : _ = self.current_logical_size.initialized.acquire() => {},
4346 : _ = self.cancel.cancelled() => {}
4347 : )
4348 0 : }
4349 :
4350 : /// Detach this timeline from its ancestor by copying all of ancestors layers as this
4351 : /// Timelines layers up to the ancestor_lsn.
4352 : ///
4353 : /// Requires a timeline that:
4354 : /// - has an ancestor to detach from
4355 : /// - the ancestor does not have an ancestor -- follows from the original RFC limitations, not
4356 : /// a technical requirement
4357 : /// - has prev_lsn in remote storage (temporary restriction)
4358 : ///
4359 : /// After the operation has been started, it cannot be canceled. Upon restart it needs to be
4360 : /// polled again until completion.
4361 : ///
4362 : /// During the operation all timelines sharing the data with this timeline will be reparented
4363 : /// from our ancestor to be branches of this timeline.
4364 0 : pub(crate) async fn prepare_to_detach_from_ancestor(
4365 0 : self: &Arc<Timeline>,
4366 0 : tenant: &crate::tenant::Tenant,
4367 0 : options: detach_ancestor::Options,
4368 0 : ctx: &RequestContext,
4369 0 : ) -> Result<
4370 0 : (
4371 0 : completion::Completion,
4372 0 : detach_ancestor::PreparedTimelineDetach,
4373 0 : ),
4374 0 : detach_ancestor::Error,
4375 0 : > {
4376 0 : detach_ancestor::prepare(self, tenant, options, ctx).await
4377 0 : }
4378 :
4379 : /// Completes the ancestor detach. This method is to be called while holding the
4380 : /// TenantManager's tenant slot, so during this method we cannot be deleted nor can any
4381 : /// timeline be deleted. After this method returns successfully, tenant must be reloaded.
4382 : ///
4383 : /// Pageserver receiving a SIGKILL during this operation is not supported (yet).
4384 0 : pub(crate) async fn complete_detaching_timeline_ancestor(
4385 0 : self: &Arc<Timeline>,
4386 0 : tenant: &crate::tenant::Tenant,
4387 0 : prepared: detach_ancestor::PreparedTimelineDetach,
4388 0 : ctx: &RequestContext,
4389 0 : ) -> Result<Vec<TimelineId>, anyhow::Error> {
4390 0 : detach_ancestor::complete(self, tenant, prepared, ctx).await
4391 0 : }
4392 : }
4393 :
4394 : /// Top-level failure to compact.
4395 0 : #[derive(Debug, thiserror::Error)]
4396 : pub(crate) enum CompactionError {
4397 : #[error("The timeline or pageserver is shutting down")]
4398 : ShuttingDown,
4399 : /// Compaction cannot be done right now; page reconstruction and so on.
4400 : #[error(transparent)]
4401 : Other(#[from] anyhow::Error),
4402 : }
4403 :
4404 : impl From<CollectKeySpaceError> for CompactionError {
4405 0 : fn from(err: CollectKeySpaceError) -> Self {
4406 0 : match err {
4407 : CollectKeySpaceError::Cancelled
4408 : | CollectKeySpaceError::PageRead(PageReconstructError::Cancelled) => {
4409 0 : CompactionError::ShuttingDown
4410 : }
4411 0 : e => CompactionError::Other(e.into()),
4412 : }
4413 0 : }
4414 : }
4415 :
4416 : #[serde_as]
4417 168 : #[derive(serde::Serialize)]
4418 : struct RecordedDuration(#[serde_as(as = "serde_with::DurationMicroSeconds")] Duration);
4419 :
4420 : #[derive(Default)]
4421 : enum DurationRecorder {
4422 : #[default]
4423 : NotStarted,
4424 : Recorded(RecordedDuration, tokio::time::Instant),
4425 : }
4426 :
4427 : impl DurationRecorder {
4428 450 : fn till_now(&self) -> DurationRecorder {
4429 450 : match self {
4430 : DurationRecorder::NotStarted => {
4431 0 : panic!("must only call on recorded measurements")
4432 : }
4433 450 : DurationRecorder::Recorded(_, ended) => {
4434 450 : let now = tokio::time::Instant::now();
4435 450 : DurationRecorder::Recorded(RecordedDuration(now - *ended), now)
4436 450 : }
4437 450 : }
4438 450 : }
4439 168 : fn into_recorded(self) -> Option<RecordedDuration> {
4440 168 : match self {
4441 0 : DurationRecorder::NotStarted => None,
4442 168 : DurationRecorder::Recorded(recorded, _) => Some(recorded),
4443 : }
4444 168 : }
4445 : }
4446 :
4447 : impl Timeline {
4448 24 : async fn finish_compact_batch(
4449 24 : self: &Arc<Self>,
4450 24 : new_deltas: &[ResidentLayer],
4451 24 : new_images: &[ResidentLayer],
4452 24 : layers_to_remove: &[Layer],
4453 24 : ) -> anyhow::Result<()> {
4454 24 : let mut guard = self.layers.write().await;
4455 :
4456 24 : let mut duplicated_layers = HashSet::new();
4457 24 :
4458 24 : let mut insert_layers = Vec::with_capacity(new_deltas.len());
4459 :
4460 248 : for l in new_deltas {
4461 224 : if guard.contains(l.as_ref()) {
4462 : // expected in tests
4463 0 : tracing::error!(layer=%l, "duplicated L1 layer");
4464 :
4465 : // good ways to cause a duplicate: we repeatedly error after taking the writelock
4466 : // `guard` on self.layers. as of writing this, there are no error returns except
4467 : // for compact_level0_phase1 creating an L0, which does not happen in practice
4468 : // because we have not implemented L0 => L0 compaction.
4469 0 : duplicated_layers.insert(l.layer_desc().key());
4470 224 : } else if LayerMap::is_l0(l.layer_desc()) {
4471 0 : bail!("compaction generates a L0 layer file as output, which will cause infinite compaction.");
4472 224 : } else {
4473 224 : insert_layers.push(l.clone());
4474 224 : }
4475 : }
4476 :
4477 : // only remove those inputs which were not outputs
4478 24 : let remove_layers: Vec<Layer> = layers_to_remove
4479 24 : .iter()
4480 262 : .filter(|l| !duplicated_layers.contains(&l.layer_desc().key()))
4481 24 : .cloned()
4482 24 : .collect();
4483 24 :
4484 24 : if !new_images.is_empty() {
4485 0 : guard.track_new_image_layers(new_images, &self.metrics);
4486 24 : }
4487 :
4488 : // deletion will happen later, the layer file manager calls garbage_collect_on_drop
4489 24 : guard.finish_compact_l0(&remove_layers, &insert_layers, &self.metrics);
4490 :
4491 24 : if let Some(remote_client) = self.remote_client.as_ref() {
4492 24 : remote_client.schedule_compaction_update(&remove_layers, new_deltas)?;
4493 0 : }
4494 :
4495 24 : drop_wlock(guard);
4496 24 :
4497 24 : Ok(())
4498 24 : }
4499 :
4500 0 : async fn rewrite_layers(
4501 0 : self: &Arc<Self>,
4502 0 : replace_layers: Vec<(Layer, ResidentLayer)>,
4503 0 : drop_layers: Vec<Layer>,
4504 0 : ) -> anyhow::Result<()> {
4505 0 : let mut guard = self.layers.write().await;
4506 :
4507 0 : guard.rewrite_layers(&replace_layers, &drop_layers, &self.metrics);
4508 0 :
4509 0 : let upload_layers: Vec<_> = replace_layers.into_iter().map(|r| r.1).collect();
4510 :
4511 0 : if let Some(remote_client) = self.remote_client.as_ref() {
4512 0 : remote_client.schedule_compaction_update(&drop_layers, &upload_layers)?;
4513 0 : }
4514 :
4515 0 : Ok(())
4516 0 : }
4517 :
4518 : /// Schedules the uploads of the given image layers
4519 330 : fn upload_new_image_layers(
4520 330 : self: &Arc<Self>,
4521 330 : new_images: impl IntoIterator<Item = ResidentLayer>,
4522 330 : ) -> anyhow::Result<()> {
4523 330 : let Some(remote_client) = &self.remote_client else {
4524 0 : return Ok(());
4525 : };
4526 330 : for layer in new_images {
4527 0 : remote_client.schedule_layer_file_upload(layer)?;
4528 : }
4529 : // should any new image layer been created, not uploading index_part will
4530 : // result in a mismatch between remote_physical_size and layermap calculated
4531 : // size, which will fail some tests, but should not be an issue otherwise.
4532 330 : remote_client.schedule_index_upload_for_file_changes()?;
4533 330 : Ok(())
4534 330 : }
4535 :
4536 : /// Find the Lsns above which layer files need to be retained on
4537 : /// garbage collection. This is separate from actually performing the GC,
4538 : /// and is updated more frequently, so that compaction can remove obsolete
4539 : /// page versions more aggressively.
4540 : ///
4541 : /// TODO: that's wishful thinking, compaction doesn't actually do that
4542 : /// currently.
4543 : ///
4544 : /// The 'cutoff_horizon' point is used to retain recent versions that might still be
4545 : /// needed by read-only nodes. (As of this writing, the caller just passes
4546 : /// the latest LSN subtracted by a constant, and doesn't do anything smart
4547 : /// to figure out what read-only nodes might actually need.)
4548 : ///
4549 : /// The 'pitr' duration is used to calculate a 'pitr_cutoff', which can be used to determine
4550 : /// whether a record is needed for PITR.
4551 1456 : #[instrument(skip_all, fields(timeline_id=%self.timeline_id))]
4552 : pub(super) async fn find_gc_cutoffs(
4553 : &self,
4554 : cutoff_horizon: Lsn,
4555 : pitr: Duration,
4556 : cancel: &CancellationToken,
4557 : ctx: &RequestContext,
4558 : ) -> anyhow::Result<GcCutoffs> {
4559 : let _timer = self
4560 : .metrics
4561 : .find_gc_cutoffs_histo
4562 : .start_timer()
4563 : .record_on_drop();
4564 :
4565 : pausable_failpoint!("Timeline::find_gc_cutoffs-pausable");
4566 :
4567 : // First, calculate pitr_cutoff_timestamp and then convert it to LSN.
4568 : //
4569 : // Some unit tests depend on garbage-collection working even when
4570 : // CLOG data is missing, so that find_lsn_for_timestamp() doesn't
4571 : // work, so avoid calling it altogether if time-based retention is not
4572 : // configured. It would be pointless anyway.
4573 : let pitr_cutoff = if pitr != Duration::ZERO {
4574 : let now = SystemTime::now();
4575 : if let Some(pitr_cutoff_timestamp) = now.checked_sub(pitr) {
4576 : let pitr_timestamp = to_pg_timestamp(pitr_cutoff_timestamp);
4577 :
4578 : match self
4579 : .find_lsn_for_timestamp(pitr_timestamp, cancel, ctx)
4580 : .await?
4581 : {
4582 : LsnForTimestamp::Present(lsn) => lsn,
4583 : LsnForTimestamp::Future(lsn) => {
4584 : // The timestamp is in the future. That sounds impossible,
4585 : // but what it really means is that there hasn't been
4586 : // any commits since the cutoff timestamp.
4587 : //
4588 : // In this case we should use the LSN of the most recent commit,
4589 : // which is implicitly the last LSN in the log.
4590 : debug!("future({})", lsn);
4591 : self.get_last_record_lsn()
4592 : }
4593 : LsnForTimestamp::Past(lsn) => {
4594 : debug!("past({})", lsn);
4595 : // conservative, safe default is to remove nothing, when we
4596 : // have no commit timestamp data available
4597 : *self.get_latest_gc_cutoff_lsn()
4598 : }
4599 : LsnForTimestamp::NoData(lsn) => {
4600 : debug!("nodata({})", lsn);
4601 : // conservative, safe default is to remove nothing, when we
4602 : // have no commit timestamp data available
4603 : *self.get_latest_gc_cutoff_lsn()
4604 : }
4605 : }
4606 : } else {
4607 : // If we don't have enough data to convert to LSN,
4608 : // play safe and don't remove any layers.
4609 : *self.get_latest_gc_cutoff_lsn()
4610 : }
4611 : } else {
4612 : // No time-based retention was configured. Interpret this as "keep no history".
4613 : self.get_last_record_lsn()
4614 : };
4615 :
4616 : Ok(GcCutoffs {
4617 : horizon: cutoff_horizon,
4618 : pitr: pitr_cutoff,
4619 : })
4620 : }
4621 :
4622 : /// Garbage collect layer files on a timeline that are no longer needed.
4623 : ///
4624 : /// Currently, we don't make any attempt at removing unneeded page versions
4625 : /// within a layer file. We can only remove the whole file if it's fully
4626 : /// obsolete.
4627 728 : pub(super) async fn gc(&self) -> anyhow::Result<GcResult> {
4628 728 : // this is most likely the background tasks, but it might be the spawned task from
4629 728 : // immediate_gc
4630 728 : let cancel = crate::task_mgr::shutdown_token();
4631 728 : let _g = tokio::select! {
4632 : guard = self.gc_lock.lock() => guard,
4633 : _ = self.cancel.cancelled() => return Ok(GcResult::default()),
4634 : _ = cancel.cancelled() => return Ok(GcResult::default()),
4635 : };
4636 728 : let timer = self.metrics.garbage_collect_histo.start_timer();
4637 :
4638 : fail_point!("before-timeline-gc");
4639 :
4640 : // Is the timeline being deleted?
4641 728 : if self.is_stopping() {
4642 0 : anyhow::bail!("timeline is Stopping");
4643 728 : }
4644 728 :
4645 728 : let (horizon_cutoff, pitr_cutoff, retain_lsns) = {
4646 728 : let gc_info = self.gc_info.read().unwrap();
4647 728 :
4648 728 : let horizon_cutoff = min(gc_info.cutoffs.horizon, self.get_disk_consistent_lsn());
4649 728 : let pitr_cutoff = gc_info.cutoffs.pitr;
4650 728 : let retain_lsns = gc_info.retain_lsns.clone();
4651 728 : (horizon_cutoff, pitr_cutoff, retain_lsns)
4652 728 : };
4653 728 :
4654 728 : let new_gc_cutoff = Lsn::min(horizon_cutoff, pitr_cutoff);
4655 :
4656 728 : let res = self
4657 728 : .gc_timeline(horizon_cutoff, pitr_cutoff, retain_lsns, new_gc_cutoff)
4658 728 : .instrument(
4659 728 : info_span!("gc_timeline", timeline_id = %self.timeline_id, cutoff = %new_gc_cutoff),
4660 : )
4661 0 : .await?;
4662 :
4663 : // only record successes
4664 728 : timer.stop_and_record();
4665 728 :
4666 728 : Ok(res)
4667 728 : }
4668 :
4669 728 : async fn gc_timeline(
4670 728 : &self,
4671 728 : horizon_cutoff: Lsn,
4672 728 : pitr_cutoff: Lsn,
4673 728 : retain_lsns: Vec<Lsn>,
4674 728 : new_gc_cutoff: Lsn,
4675 728 : ) -> anyhow::Result<GcResult> {
4676 728 : // FIXME: if there is an ongoing detach_from_ancestor, we should just skip gc
4677 728 :
4678 728 : let now = SystemTime::now();
4679 728 : let mut result: GcResult = GcResult::default();
4680 728 :
4681 728 : // Nothing to GC. Return early.
4682 728 : let latest_gc_cutoff = *self.get_latest_gc_cutoff_lsn();
4683 728 : if latest_gc_cutoff >= new_gc_cutoff {
4684 0 : info!(
4685 0 : "Nothing to GC: new_gc_cutoff_lsn {new_gc_cutoff}, latest_gc_cutoff_lsn {latest_gc_cutoff}",
4686 : );
4687 0 : return Ok(result);
4688 728 : }
4689 :
4690 : // We need to ensure that no one tries to read page versions or create
4691 : // branches at a point before latest_gc_cutoff_lsn. See branch_timeline()
4692 : // for details. This will block until the old value is no longer in use.
4693 : //
4694 : // The GC cutoff should only ever move forwards.
4695 728 : let waitlist = {
4696 728 : let write_guard = self.latest_gc_cutoff_lsn.lock_for_write();
4697 728 : ensure!(
4698 728 : *write_guard <= new_gc_cutoff,
4699 0 : "Cannot move GC cutoff LSN backwards (was {}, new {})",
4700 0 : *write_guard,
4701 : new_gc_cutoff
4702 : );
4703 728 : write_guard.store_and_unlock(new_gc_cutoff)
4704 728 : };
4705 728 : waitlist.wait().await;
4706 :
4707 728 : info!("GC starting");
4708 :
4709 728 : debug!("retain_lsns: {:?}", retain_lsns);
4710 :
4711 728 : let mut layers_to_remove = Vec::new();
4712 :
4713 : // Scan all layers in the timeline (remote or on-disk).
4714 : //
4715 : // Garbage collect the layer if all conditions are satisfied:
4716 : // 1. it is older than cutoff LSN;
4717 : // 2. it is older than PITR interval;
4718 : // 3. it doesn't need to be retained for 'retain_lsns';
4719 : // 4. newer on-disk image layers cover the layer's whole key range
4720 : //
4721 : // TODO holding a write lock is too agressive and avoidable
4722 728 : let mut guard = self.layers.write().await;
4723 728 : let layers = guard.layer_map();
4724 12356 : 'outer: for l in layers.iter_historic_layers() {
4725 12356 : result.layers_total += 1;
4726 12356 :
4727 12356 : // 1. Is it newer than GC horizon cutoff point?
4728 12356 : if l.get_lsn_range().end > horizon_cutoff {
4729 728 : debug!(
4730 0 : "keeping {} because it's newer than horizon_cutoff {}",
4731 0 : l.layer_name(),
4732 : horizon_cutoff,
4733 : );
4734 728 : result.layers_needed_by_cutoff += 1;
4735 728 : continue 'outer;
4736 11628 : }
4737 11628 :
4738 11628 : // 2. It is newer than PiTR cutoff point?
4739 11628 : if l.get_lsn_range().end > pitr_cutoff {
4740 0 : debug!(
4741 0 : "keeping {} because it's newer than pitr_cutoff {}",
4742 0 : l.layer_name(),
4743 : pitr_cutoff,
4744 : );
4745 0 : result.layers_needed_by_pitr += 1;
4746 0 : continue 'outer;
4747 11628 : }
4748 :
4749 : // 3. Is it needed by a child branch?
4750 : // NOTE With that we would keep data that
4751 : // might be referenced by child branches forever.
4752 : // We can track this in child timeline GC and delete parent layers when
4753 : // they are no longer needed. This might be complicated with long inheritance chains.
4754 : //
4755 : // TODO Vec is not a great choice for `retain_lsns`
4756 11628 : for retain_lsn in &retain_lsns {
4757 : // start_lsn is inclusive
4758 12 : if &l.get_lsn_range().start <= retain_lsn {
4759 12 : debug!(
4760 0 : "keeping {} because it's still might be referenced by child branch forked at {} is_dropped: xx is_incremental: {}",
4761 0 : l.layer_name(),
4762 0 : retain_lsn,
4763 0 : l.is_incremental(),
4764 : );
4765 12 : result.layers_needed_by_branches += 1;
4766 12 : continue 'outer;
4767 0 : }
4768 : }
4769 :
4770 : // 4. Is there a later on-disk layer for this relation?
4771 : //
4772 : // The end-LSN is exclusive, while disk_consistent_lsn is
4773 : // inclusive. For example, if disk_consistent_lsn is 100, it is
4774 : // OK for a delta layer to have end LSN 101, but if the end LSN
4775 : // is 102, then it might not have been fully flushed to disk
4776 : // before crash.
4777 : //
4778 : // For example, imagine that the following layers exist:
4779 : //
4780 : // 1000 - image (A)
4781 : // 1000-2000 - delta (B)
4782 : // 2000 - image (C)
4783 : // 2000-3000 - delta (D)
4784 : // 3000 - image (E)
4785 : //
4786 : // If GC horizon is at 2500, we can remove layers A and B, but
4787 : // we cannot remove C, even though it's older than 2500, because
4788 : // the delta layer 2000-3000 depends on it.
4789 11616 : if !layers
4790 11616 : .image_layer_exists(&l.get_key_range(), &(l.get_lsn_range().end..new_gc_cutoff))
4791 : {
4792 11616 : debug!("keeping {} because it is the latest layer", l.layer_name());
4793 11616 : result.layers_not_updated += 1;
4794 11616 : continue 'outer;
4795 0 : }
4796 0 :
4797 0 : // We didn't find any reason to keep this file, so remove it.
4798 0 : debug!(
4799 0 : "garbage collecting {} is_dropped: xx is_incremental: {}",
4800 0 : l.layer_name(),
4801 0 : l.is_incremental(),
4802 : );
4803 0 : layers_to_remove.push(l);
4804 : }
4805 :
4806 728 : if !layers_to_remove.is_empty() {
4807 : // Persist the new GC cutoff value before we actually remove anything.
4808 : // This unconditionally schedules also an index_part.json update, even though, we will
4809 : // be doing one a bit later with the unlinked gc'd layers.
4810 0 : let disk_consistent_lsn = self.disk_consistent_lsn.load();
4811 0 : self.schedule_uploads(disk_consistent_lsn, None)?;
4812 :
4813 0 : let gc_layers = layers_to_remove
4814 0 : .iter()
4815 0 : .map(|x| guard.get_from_desc(x))
4816 0 : .collect::<Vec<Layer>>();
4817 0 :
4818 0 : result.layers_removed = gc_layers.len() as u64;
4819 :
4820 0 : if let Some(remote_client) = self.remote_client.as_ref() {
4821 0 : remote_client.schedule_gc_update(&gc_layers)?;
4822 0 : }
4823 :
4824 0 : guard.finish_gc_timeline(&gc_layers);
4825 0 :
4826 0 : #[cfg(feature = "testing")]
4827 0 : {
4828 0 : result.doomed_layers = gc_layers;
4829 0 : }
4830 728 : }
4831 :
4832 728 : info!(
4833 0 : "GC completed removing {} layers, cutoff {}",
4834 : result.layers_removed, new_gc_cutoff
4835 : );
4836 :
4837 728 : result.elapsed = now.elapsed()?;
4838 728 : Ok(result)
4839 728 : }
4840 :
4841 : /// Reconstruct a value, using the given base image and WAL records in 'data'.
4842 643177 : async fn reconstruct_value(
4843 643177 : &self,
4844 643177 : key: Key,
4845 643177 : request_lsn: Lsn,
4846 643177 : mut data: ValueReconstructState,
4847 643177 : ) -> Result<Bytes, PageReconstructError> {
4848 643177 : // Perform WAL redo if needed
4849 643177 : data.records.reverse();
4850 643177 :
4851 643177 : // If we have a page image, and no WAL, we're all set
4852 643177 : if data.records.is_empty() {
4853 643171 : if let Some((img_lsn, img)) = &data.img {
4854 643171 : trace!(
4855 0 : "found page image for key {} at {}, no WAL redo required, req LSN {}",
4856 : key,
4857 : img_lsn,
4858 : request_lsn,
4859 : );
4860 643171 : Ok(img.clone())
4861 : } else {
4862 0 : Err(PageReconstructError::from(anyhow!(
4863 0 : "base image for {key} at {request_lsn} not found"
4864 0 : )))
4865 : }
4866 : } else {
4867 : // We need to do WAL redo.
4868 : //
4869 : // If we don't have a base image, then the oldest WAL record better initialize
4870 : // the page
4871 6 : if data.img.is_none() && !data.records.first().unwrap().1.will_init() {
4872 0 : Err(PageReconstructError::from(anyhow!(
4873 0 : "Base image for {} at {} not found, but got {} WAL records",
4874 0 : key,
4875 0 : request_lsn,
4876 0 : data.records.len()
4877 0 : )))
4878 : } else {
4879 6 : if data.img.is_some() {
4880 6 : trace!(
4881 0 : "found {} WAL records and a base image for {} at {}, performing WAL redo",
4882 0 : data.records.len(),
4883 : key,
4884 : request_lsn
4885 : );
4886 : } else {
4887 0 : trace!("found {} WAL records that will init the page for {} at {}, performing WAL redo", data.records.len(), key, request_lsn);
4888 : };
4889 :
4890 6 : let last_rec_lsn = data.records.last().unwrap().0;
4891 :
4892 6 : let img = match self
4893 6 : .walredo_mgr
4894 6 : .as_ref()
4895 6 : .context("timeline has no walredo manager")
4896 6 : .map_err(PageReconstructError::WalRedo)?
4897 6 : .request_redo(key, request_lsn, data.img, data.records, self.pg_version)
4898 0 : .await
4899 6 : .context("reconstruct a page image")
4900 : {
4901 6 : Ok(img) => img,
4902 0 : Err(e) => return Err(PageReconstructError::WalRedo(e)),
4903 : };
4904 :
4905 6 : if img.len() == page_cache::PAGE_SZ {
4906 0 : let cache = page_cache::get();
4907 0 : if let Err(e) = cache
4908 0 : .memorize_materialized_page(
4909 0 : self.tenant_shard_id,
4910 0 : self.timeline_id,
4911 0 : key,
4912 0 : last_rec_lsn,
4913 0 : &img,
4914 0 : )
4915 0 : .await
4916 0 : .context("Materialized page memoization failed")
4917 : {
4918 0 : return Err(PageReconstructError::from(e));
4919 0 : }
4920 6 : }
4921 :
4922 6 : Ok(img)
4923 : }
4924 : }
4925 643177 : }
4926 :
4927 0 : pub(crate) async fn spawn_download_all_remote_layers(
4928 0 : self: Arc<Self>,
4929 0 : request: DownloadRemoteLayersTaskSpawnRequest,
4930 0 : ) -> Result<DownloadRemoteLayersTaskInfo, DownloadRemoteLayersTaskInfo> {
4931 0 : use pageserver_api::models::DownloadRemoteLayersTaskState;
4932 0 :
4933 0 : // this is not really needed anymore; it has tests which really check the return value from
4934 0 : // http api. it would be better not to maintain this anymore.
4935 0 :
4936 0 : let mut status_guard = self.download_all_remote_layers_task_info.write().unwrap();
4937 0 : if let Some(st) = &*status_guard {
4938 0 : match &st.state {
4939 : DownloadRemoteLayersTaskState::Running => {
4940 0 : return Err(st.clone());
4941 : }
4942 : DownloadRemoteLayersTaskState::ShutDown
4943 0 : | DownloadRemoteLayersTaskState::Completed => {
4944 0 : *status_guard = None;
4945 0 : }
4946 : }
4947 0 : }
4948 :
4949 0 : let self_clone = Arc::clone(&self);
4950 0 : let task_id = task_mgr::spawn(
4951 0 : task_mgr::BACKGROUND_RUNTIME.handle(),
4952 0 : task_mgr::TaskKind::DownloadAllRemoteLayers,
4953 0 : Some(self.tenant_shard_id),
4954 0 : Some(self.timeline_id),
4955 0 : "download all remote layers task",
4956 : false,
4957 0 : async move {
4958 0 : self_clone.download_all_remote_layers(request).await;
4959 0 : let mut status_guard = self_clone.download_all_remote_layers_task_info.write().unwrap();
4960 0 : match &mut *status_guard {
4961 : None => {
4962 0 : warn!("tasks status is supposed to be Some(), since we are running");
4963 : }
4964 0 : Some(st) => {
4965 0 : let exp_task_id = format!("{}", task_mgr::current_task_id().unwrap());
4966 0 : if st.task_id != exp_task_id {
4967 0 : warn!("task id changed while we were still running, expecting {} but have {}", exp_task_id, st.task_id);
4968 0 : } else {
4969 0 : st.state = DownloadRemoteLayersTaskState::Completed;
4970 0 : }
4971 : }
4972 : };
4973 0 : Ok(())
4974 0 : }
4975 0 : .instrument(info_span!(parent: None, "download_all_remote_layers", tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))
4976 : );
4977 :
4978 0 : let initial_info = DownloadRemoteLayersTaskInfo {
4979 0 : task_id: format!("{task_id}"),
4980 0 : state: DownloadRemoteLayersTaskState::Running,
4981 0 : total_layer_count: 0,
4982 0 : successful_download_count: 0,
4983 0 : failed_download_count: 0,
4984 0 : };
4985 0 : *status_guard = Some(initial_info.clone());
4986 0 :
4987 0 : Ok(initial_info)
4988 0 : }
4989 :
4990 0 : async fn download_all_remote_layers(
4991 0 : self: &Arc<Self>,
4992 0 : request: DownloadRemoteLayersTaskSpawnRequest,
4993 0 : ) {
4994 : use pageserver_api::models::DownloadRemoteLayersTaskState;
4995 :
4996 0 : let remaining = {
4997 0 : let guard = self.layers.read().await;
4998 0 : guard
4999 0 : .layer_map()
5000 0 : .iter_historic_layers()
5001 0 : .map(|desc| guard.get_from_desc(&desc))
5002 0 : .collect::<Vec<_>>()
5003 0 : };
5004 0 : let total_layer_count = remaining.len();
5005 0 :
5006 0 : macro_rules! lock_status {
5007 0 : ($st:ident) => {
5008 0 : let mut st = self.download_all_remote_layers_task_info.write().unwrap();
5009 0 : let st = st
5010 0 : .as_mut()
5011 0 : .expect("this function is only called after the task has been spawned");
5012 0 : assert_eq!(
5013 0 : st.task_id,
5014 0 : format!(
5015 0 : "{}",
5016 0 : task_mgr::current_task_id().expect("we run inside a task_mgr task")
5017 0 : )
5018 0 : );
5019 0 : let $st = st;
5020 0 : };
5021 0 : }
5022 0 :
5023 0 : {
5024 0 : lock_status!(st);
5025 0 : st.total_layer_count = total_layer_count as u64;
5026 0 : }
5027 0 :
5028 0 : let mut remaining = remaining.into_iter();
5029 0 : let mut have_remaining = true;
5030 0 : let mut js = tokio::task::JoinSet::new();
5031 0 :
5032 0 : let cancel = task_mgr::shutdown_token();
5033 0 :
5034 0 : let limit = request.max_concurrent_downloads;
5035 :
5036 : loop {
5037 0 : while js.len() < limit.get() && have_remaining && !cancel.is_cancelled() {
5038 0 : let Some(next) = remaining.next() else {
5039 0 : have_remaining = false;
5040 0 : break;
5041 : };
5042 :
5043 0 : let span = tracing::info_span!("download", layer = %next);
5044 :
5045 0 : js.spawn(
5046 0 : async move {
5047 0 : let res = next.download().await;
5048 0 : (next, res)
5049 0 : }
5050 0 : .instrument(span),
5051 0 : );
5052 : }
5053 :
5054 0 : while let Some(res) = js.join_next().await {
5055 0 : match res {
5056 : Ok((_, Ok(_))) => {
5057 0 : lock_status!(st);
5058 0 : st.successful_download_count += 1;
5059 : }
5060 0 : Ok((layer, Err(e))) => {
5061 0 : tracing::error!(%layer, "download failed: {e:#}");
5062 0 : lock_status!(st);
5063 0 : st.failed_download_count += 1;
5064 : }
5065 0 : Err(je) if je.is_cancelled() => unreachable!("not used here"),
5066 0 : Err(je) if je.is_panic() => {
5067 0 : lock_status!(st);
5068 0 : st.failed_download_count += 1;
5069 : }
5070 0 : Err(je) => tracing::warn!("unknown joinerror: {je:?}"),
5071 : }
5072 : }
5073 :
5074 0 : if js.is_empty() && (!have_remaining || cancel.is_cancelled()) {
5075 0 : break;
5076 0 : }
5077 : }
5078 :
5079 : {
5080 0 : lock_status!(st);
5081 0 : st.state = DownloadRemoteLayersTaskState::Completed;
5082 0 : }
5083 0 : }
5084 :
5085 0 : pub(crate) fn get_download_all_remote_layers_task_info(
5086 0 : &self,
5087 0 : ) -> Option<DownloadRemoteLayersTaskInfo> {
5088 0 : self.download_all_remote_layers_task_info
5089 0 : .read()
5090 0 : .unwrap()
5091 0 : .clone()
5092 0 : }
5093 : }
5094 :
5095 : impl Timeline {
5096 : /// Returns non-remote layers for eviction.
5097 0 : pub(crate) async fn get_local_layers_for_disk_usage_eviction(&self) -> DiskUsageEvictionInfo {
5098 0 : let guard = self.layers.read().await;
5099 0 : let mut max_layer_size: Option<u64> = None;
5100 0 :
5101 0 : let resident_layers = guard
5102 0 : .likely_resident_layers()
5103 0 : .map(|layer| {
5104 0 : let file_size = layer.layer_desc().file_size;
5105 0 : max_layer_size = max_layer_size.map_or(Some(file_size), |m| Some(m.max(file_size)));
5106 0 :
5107 0 : let last_activity_ts = layer.access_stats().latest_activity_or_now();
5108 0 :
5109 0 : EvictionCandidate {
5110 0 : layer: layer.into(),
5111 0 : last_activity_ts,
5112 0 : relative_last_activity: finite_f32::FiniteF32::ZERO,
5113 0 : }
5114 0 : })
5115 0 : .collect();
5116 0 :
5117 0 : DiskUsageEvictionInfo {
5118 0 : max_layer_size,
5119 0 : resident_layers,
5120 0 : }
5121 0 : }
5122 :
5123 1184 : pub(crate) fn get_shard_index(&self) -> ShardIndex {
5124 1184 : ShardIndex {
5125 1184 : shard_number: self.tenant_shard_id.shard_number,
5126 1184 : shard_count: self.tenant_shard_id.shard_count,
5127 1184 : }
5128 1184 : }
5129 : }
5130 :
5131 : type TraversalPathItem = (ValueReconstructResult, Lsn, TraversalId);
5132 :
5133 : struct TimelineWriterState {
5134 : open_layer: Arc<InMemoryLayer>,
5135 : current_size: u64,
5136 : // Previous Lsn which passed through
5137 : prev_lsn: Option<Lsn>,
5138 : // Largest Lsn which passed through the current writer
5139 : max_lsn: Option<Lsn>,
5140 : // Cached details of the last freeze. Avoids going trough the atomic/lock on every put.
5141 : cached_last_freeze_at: Lsn,
5142 : }
5143 :
5144 : impl TimelineWriterState {
5145 4780028 : fn new(open_layer: Arc<InMemoryLayer>, current_size: u64, last_freeze_at: Lsn) -> Self {
5146 4780028 : Self {
5147 4780028 : open_layer,
5148 4780028 : current_size,
5149 4780028 : prev_lsn: None,
5150 4780028 : max_lsn: None,
5151 4780028 : cached_last_freeze_at: last_freeze_at,
5152 4780028 : }
5153 4780028 : }
5154 : }
5155 :
5156 : /// Various functions to mutate the timeline.
5157 : // TODO Currently, Deref is used to allow easy access to read methods from this trait.
5158 : // This is probably considered a bad practice in Rust and should be fixed eventually,
5159 : // but will cause large code changes.
5160 : pub(crate) struct TimelineWriter<'a> {
5161 : tl: &'a Timeline,
5162 : write_guard: tokio::sync::MutexGuard<'a, Option<TimelineWriterState>>,
5163 : }
5164 :
5165 : impl Deref for TimelineWriter<'_> {
5166 : type Target = Timeline;
5167 :
5168 4782682 : fn deref(&self) -> &Self::Target {
5169 4782682 : self.tl
5170 4782682 : }
5171 : }
5172 :
5173 : impl Drop for TimelineWriter<'_> {
5174 5109044 : fn drop(&mut self) {
5175 5109044 : self.write_guard.take();
5176 5109044 : }
5177 : }
5178 :
5179 : #[derive(PartialEq)]
5180 : enum OpenLayerAction {
5181 : Roll,
5182 : Open,
5183 : None,
5184 : }
5185 :
5186 : impl<'a> TimelineWriter<'a> {
5187 : /// Put a new page version that can be constructed from a WAL record
5188 : ///
5189 : /// This will implicitly extend the relation, if the page is beyond the
5190 : /// current end-of-file.
5191 5066038 : pub(crate) async fn put(
5192 5066038 : &mut self,
5193 5066038 : key: Key,
5194 5066038 : lsn: Lsn,
5195 5066038 : value: &Value,
5196 5066038 : ctx: &RequestContext,
5197 5066038 : ) -> anyhow::Result<()> {
5198 5066038 : // Avoid doing allocations for "small" values.
5199 5066038 : // In the regression test suite, the limit of 256 avoided allocations in 95% of cases:
5200 5066038 : // https://github.com/neondatabase/neon/pull/5056#discussion_r1301975061
5201 5066038 : let mut buf = smallvec::SmallVec::<[u8; 256]>::new();
5202 5066038 : value.ser_into(&mut buf)?;
5203 5066038 : let buf_size: u64 = buf.len().try_into().expect("oversized value buf");
5204 5066038 :
5205 5066038 : let action = self.get_open_layer_action(lsn, buf_size);
5206 5066038 : let layer = self.handle_open_layer_action(lsn, action).await?;
5207 5066038 : let res = layer.put_value(key, lsn, &buf, ctx).await;
5208 :
5209 5066038 : if res.is_ok() {
5210 5066038 : // Update the current size only when the entire write was ok.
5211 5066038 : // In case of failures, we may have had partial writes which
5212 5066038 : // render the size tracking out of sync. That's ok because
5213 5066038 : // the checkpoint distance should be significantly smaller
5214 5066038 : // than the S3 single shot upload limit of 5GiB.
5215 5066038 : let state = self.write_guard.as_mut().unwrap();
5216 5066038 :
5217 5066038 : state.current_size += buf_size;
5218 5066038 : state.prev_lsn = Some(lsn);
5219 5066038 : state.max_lsn = std::cmp::max(state.max_lsn, Some(lsn));
5220 5066038 : }
5221 :
5222 5066038 : res
5223 5066038 : }
5224 :
5225 5066040 : async fn handle_open_layer_action(
5226 5066040 : &mut self,
5227 5066040 : at: Lsn,
5228 5066040 : action: OpenLayerAction,
5229 5066040 : ) -> anyhow::Result<&Arc<InMemoryLayer>> {
5230 5066040 : match action {
5231 : OpenLayerAction::Roll => {
5232 0 : let freeze_at = self.write_guard.as_ref().unwrap().max_lsn.unwrap();
5233 0 : self.roll_layer(freeze_at).await?;
5234 0 : self.open_layer(at).await?;
5235 : }
5236 4780028 : OpenLayerAction::Open => self.open_layer(at).await?,
5237 : OpenLayerAction::None => {
5238 286012 : assert!(self.write_guard.is_some());
5239 : }
5240 : }
5241 :
5242 5066040 : Ok(&self.write_guard.as_ref().unwrap().open_layer)
5243 5066040 : }
5244 :
5245 4780028 : async fn open_layer(&mut self, at: Lsn) -> anyhow::Result<()> {
5246 4780028 : let layer = self.tl.get_layer_for_write(at).await?;
5247 4780028 : let initial_size = layer.size().await?;
5248 :
5249 4780028 : let last_freeze_at = self.last_freeze_at.load();
5250 4780028 : self.write_guard.replace(TimelineWriterState::new(
5251 4780028 : layer,
5252 4780028 : initial_size,
5253 4780028 : last_freeze_at,
5254 4780028 : ));
5255 4780028 :
5256 4780028 : Ok(())
5257 4780028 : }
5258 :
5259 0 : async fn roll_layer(&mut self, freeze_at: Lsn) -> anyhow::Result<()> {
5260 0 : assert!(self.write_guard.is_some());
5261 :
5262 0 : self.tl.freeze_inmem_layer_at(freeze_at).await;
5263 :
5264 0 : let now = Instant::now();
5265 0 : *(self.last_freeze_ts.write().unwrap()) = now;
5266 0 :
5267 0 : self.tl.flush_frozen_layers();
5268 0 :
5269 0 : let current_size = self.write_guard.as_ref().unwrap().current_size;
5270 0 : if current_size > self.get_checkpoint_distance() {
5271 0 : warn!("Flushed oversized open layer with size {}", current_size)
5272 0 : }
5273 :
5274 0 : Ok(())
5275 0 : }
5276 :
5277 5066040 : fn get_open_layer_action(&self, lsn: Lsn, new_value_size: u64) -> OpenLayerAction {
5278 5066040 : let state = &*self.write_guard;
5279 5066040 : let Some(state) = &state else {
5280 4780028 : return OpenLayerAction::Open;
5281 : };
5282 :
5283 286012 : if state.prev_lsn == Some(lsn) {
5284 : // Rolling mid LSN is not supported by downstream code.
5285 : // Hence, only roll at LSN boundaries.
5286 285954 : return OpenLayerAction::None;
5287 58 : }
5288 58 :
5289 58 : if state.current_size == 0 {
5290 : // Don't roll empty layers
5291 0 : return OpenLayerAction::None;
5292 58 : }
5293 58 :
5294 58 : if self.tl.should_roll(
5295 58 : state.current_size,
5296 58 : state.current_size + new_value_size,
5297 58 : self.get_checkpoint_distance(),
5298 58 : lsn,
5299 58 : state.cached_last_freeze_at,
5300 58 : state.open_layer.get_opened_at(),
5301 58 : ) {
5302 0 : OpenLayerAction::Roll
5303 : } else {
5304 58 : OpenLayerAction::None
5305 : }
5306 5066040 : }
5307 :
5308 : /// Put a batch of keys at the specified Lsns.
5309 : ///
5310 : /// The batch is sorted by Lsn (enforced by usage of [`utils::vec_map::VecMap`].
5311 413970 : pub(crate) async fn put_batch(
5312 413970 : &mut self,
5313 413970 : batch: VecMap<Lsn, (Key, Value)>,
5314 413970 : ctx: &RequestContext,
5315 413970 : ) -> anyhow::Result<()> {
5316 1113894 : for (lsn, (key, val)) in batch {
5317 699924 : self.put(key, lsn, &val, ctx).await?
5318 : }
5319 :
5320 413970 : Ok(())
5321 413970 : }
5322 :
5323 2 : pub(crate) async fn delete_batch(&mut self, batch: &[(Range<Key>, Lsn)]) -> anyhow::Result<()> {
5324 2 : if let Some((_, lsn)) = batch.first() {
5325 2 : let action = self.get_open_layer_action(*lsn, 0);
5326 2 : let layer = self.handle_open_layer_action(*lsn, action).await?;
5327 2 : layer.put_tombstones(batch).await?;
5328 0 : }
5329 :
5330 2 : Ok(())
5331 2 : }
5332 :
5333 : /// Track the end of the latest digested WAL record.
5334 : /// Remember the (end of) last valid WAL record remembered in the timeline.
5335 : ///
5336 : /// Call this after you have finished writing all the WAL up to 'lsn'.
5337 : ///
5338 : /// 'lsn' must be aligned. This wakes up any wait_lsn() callers waiting for
5339 : /// the 'lsn' or anything older. The previous last record LSN is stored alongside
5340 : /// the latest and can be read.
5341 5254946 : pub(crate) fn finish_write(&self, new_lsn: Lsn) {
5342 5254946 : self.tl.finish_write(new_lsn);
5343 5254946 : }
5344 :
5345 270570 : pub(crate) fn update_current_logical_size(&self, delta: i64) {
5346 270570 : self.tl.update_current_logical_size(delta)
5347 270570 : }
5348 : }
5349 :
5350 : // We need TimelineWriter to be send in upcoming conversion of
5351 : // Timeline::layers to tokio::sync::RwLock.
5352 : #[test]
5353 2 : fn is_send() {
5354 2 : fn _assert_send<T: Send>() {}
5355 2 : _assert_send::<TimelineWriter<'_>>();
5356 2 : }
5357 :
5358 : /// Add a suffix to a layer file's name: .{num}.old
5359 : /// Uses the first available num (starts at 0)
5360 0 : fn rename_to_backup(path: &Utf8Path) -> anyhow::Result<()> {
5361 0 : let filename = path
5362 0 : .file_name()
5363 0 : .ok_or_else(|| anyhow!("Path {path} don't have a file name"))?;
5364 0 : let mut new_path = path.to_owned();
5365 :
5366 0 : for i in 0u32.. {
5367 0 : new_path.set_file_name(format!("{filename}.{i}.old"));
5368 0 : if !new_path.exists() {
5369 0 : std::fs::rename(path, &new_path)
5370 0 : .with_context(|| format!("rename {path:?} to {new_path:?}"))?;
5371 0 : return Ok(());
5372 0 : }
5373 : }
5374 :
5375 0 : bail!("couldn't find an unused backup number for {:?}", path)
5376 0 : }
5377 :
5378 : #[cfg(test)]
5379 : mod tests {
5380 : use utils::{id::TimelineId, lsn::Lsn};
5381 :
5382 : use crate::tenant::{
5383 : harness::TenantHarness, storage_layer::Layer, timeline::EvictionError, Timeline,
5384 : };
5385 :
5386 : #[tokio::test]
5387 2 : async fn two_layer_eviction_attempts_at_the_same_time() {
5388 2 : let harness =
5389 2 : TenantHarness::create("two_layer_eviction_attempts_at_the_same_time").unwrap();
5390 2 :
5391 8 : let (tenant, ctx) = harness.load().await;
5392 2 : let timeline = tenant
5393 2 : .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx)
5394 6 : .await
5395 2 : .unwrap();
5396 2 :
5397 2 : let layer = find_some_layer(&timeline).await;
5398 2 : let layer = layer
5399 2 : .keep_resident()
5400 2 : .await
5401 2 : .expect("no download => no downloading errors")
5402 2 : .drop_eviction_guard();
5403 2 :
5404 2 : let forever = std::time::Duration::from_secs(120);
5405 2 :
5406 2 : let first = layer.evict_and_wait(forever);
5407 2 : let second = layer.evict_and_wait(forever);
5408 2 :
5409 2 : let (first, second) = tokio::join!(first, second);
5410 2 :
5411 2 : let res = layer.keep_resident().await;
5412 2 : assert!(res.is_none(), "{res:?}");
5413 2 :
5414 2 : match (first, second) {
5415 2 : (Ok(()), Ok(())) => {
5416 2 : // because there are no more timeline locks being taken on eviction path, we can
5417 2 : // witness all three outcomes here.
5418 2 : }
5419 2 : (Ok(()), Err(EvictionError::NotFound)) | (Err(EvictionError::NotFound), Ok(())) => {
5420 0 : // if one completes before the other, this is fine just as well.
5421 0 : }
5422 2 : other => unreachable!("unexpected {:?}", other),
5423 2 : }
5424 2 : }
5425 :
5426 2 : async fn find_some_layer(timeline: &Timeline) -> Layer {
5427 2 : let layers = timeline.layers.read().await;
5428 2 : let desc = layers
5429 2 : .layer_map()
5430 2 : .iter_historic_layers()
5431 2 : .next()
5432 2 : .expect("must find one layer to evict");
5433 2 :
5434 2 : layers.get_from_desc(&desc)
5435 2 : }
5436 : }
|