Line data Source code
1 : //! Timeline repository implementation that keeps old data in layer files, and
2 : //! the recent changes in ephemeral files.
3 : //!
4 : //! See tenant/*_layer.rs files. The functions here are responsible for locating
5 : //! the correct layer for the get/put call, walking back the timeline branching
6 : //! history as needed.
7 : //!
8 : //! The files are stored in the .neon/tenants/<tenant_id>/timelines/<timeline_id>
9 : //! directory. See docs/pageserver-storage.md for how the files are managed.
10 : //! In addition to the layer files, there is a metadata file in the same
11 : //! directory that contains information about the timeline, in particular its
12 : //! parent timeline, and the last LSN that has been written to disk.
13 : //!
14 :
15 : use std::collections::hash_map::Entry;
16 : use std::collections::{BTreeMap, HashMap, HashSet};
17 : use std::fmt::{Debug, Display};
18 : use std::fs::File;
19 : use std::future::Future;
20 : use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
21 : use std::sync::{Arc, Mutex, Weak};
22 : use std::time::{Duration, Instant, SystemTime};
23 : use std::{fmt, fs};
24 :
25 : use anyhow::{Context, bail};
26 : use arc_swap::ArcSwap;
27 : use camino::{Utf8Path, Utf8PathBuf};
28 : use chrono::NaiveDateTime;
29 : use enumset::EnumSet;
30 : use futures::StreamExt;
31 : use futures::stream::FuturesUnordered;
32 : use itertools::Itertools as _;
33 : use once_cell::sync::Lazy;
34 : pub use pageserver_api::models::TenantState;
35 : use pageserver_api::models::{self, RelSizeMigration};
36 : use pageserver_api::models::{
37 : CompactInfoResponse, LsnLease, TimelineArchivalState, TimelineState, TopTenantShardItem,
38 : WalRedoManagerStatus,
39 : };
40 : use pageserver_api::shard::{ShardIdentity, ShardStripeSize, TenantShardId};
41 : use remote_storage::{DownloadError, GenericRemoteStorage, TimeoutOrCancel};
42 : use remote_timeline_client::index::GcCompactionState;
43 : use remote_timeline_client::manifest::{
44 : LATEST_TENANT_MANIFEST_VERSION, OffloadedTimelineManifest, TenantManifest,
45 : };
46 : use remote_timeline_client::{
47 : FAILED_REMOTE_OP_RETRIES, FAILED_UPLOAD_WARN_THRESHOLD, UploadQueueNotReadyError,
48 : download_tenant_manifest,
49 : };
50 : use secondary::heatmap::{HeatMapTenant, HeatMapTimeline};
51 : use storage_broker::BrokerClientChannel;
52 : use timeline::compaction::{CompactionOutcome, GcCompactionQueue};
53 : use timeline::import_pgdata::ImportingTimeline;
54 : use timeline::layer_manager::LayerManagerLockHolder;
55 : use timeline::offload::{OffloadError, offload_timeline};
56 : use timeline::{
57 : CompactFlags, CompactOptions, CompactionError, PreviousHeatmap, ShutdownMode, import_pgdata,
58 : };
59 : use tokio::io::BufReader;
60 : use tokio::sync::{Notify, Semaphore, watch};
61 : use tokio::task::JoinSet;
62 : use tokio_util::sync::CancellationToken;
63 : use tracing::*;
64 : use upload_queue::NotInitialized;
65 : use utils::circuit_breaker::CircuitBreaker;
66 : use utils::crashsafe::path_with_suffix_extension;
67 : use utils::sync::gate::{Gate, GateGuard};
68 : use utils::timeout::{TimeoutCancellableError, timeout_cancellable};
69 : use utils::try_rcu::ArcSwapExt;
70 : use utils::zstd::{create_zst_tarball, extract_zst_tarball};
71 : use utils::{backoff, completion, failpoint_support, fs_ext, pausable_failpoint};
72 :
73 : use self::config::{AttachedLocationConfig, AttachmentMode, LocationConf};
74 : use self::metadata::TimelineMetadata;
75 : use self::mgr::{GetActiveTenantError, GetTenantError};
76 : use self::remote_timeline_client::upload::{upload_index_part, upload_tenant_manifest};
77 : use self::remote_timeline_client::{RemoteTimelineClient, WaitCompletionError};
78 : use self::timeline::uninit::{TimelineCreateGuard, TimelineExclusionError, UninitializedTimeline};
79 : use self::timeline::{
80 : EvictionTaskTenantState, GcCutoffs, TimelineDeleteProgress, TimelineResources, WaitLsnError,
81 : };
82 : use crate::basebackup_cache::BasebackupPrepareSender;
83 : use crate::config::PageServerConf;
84 : use crate::context;
85 : use crate::context::RequestContextBuilder;
86 : use crate::context::{DownloadBehavior, RequestContext};
87 : use crate::deletion_queue::{DeletionQueueClient, DeletionQueueError};
88 : use crate::feature_resolver::FeatureResolver;
89 : use crate::l0_flush::L0FlushGlobalState;
90 : use crate::metrics::{
91 : BROKEN_TENANTS_SET, CIRCUIT_BREAKERS_BROKEN, CIRCUIT_BREAKERS_UNBROKEN, CONCURRENT_INITDBS,
92 : INITDB_RUN_TIME, INITDB_SEMAPHORE_ACQUISITION_TIME, TENANT, TENANT_OFFLOADED_TIMELINES,
93 : TENANT_STATE_METRIC, TENANT_SYNTHETIC_SIZE_METRIC, TIMELINE_STATE_METRIC,
94 : remove_tenant_metrics,
95 : };
96 : use crate::task_mgr::TaskKind;
97 : use crate::tenant::config::LocationMode;
98 : use crate::tenant::gc_result::GcResult;
99 : pub use crate::tenant::remote_timeline_client::index::IndexPart;
100 : use crate::tenant::remote_timeline_client::{
101 : INITDB_PATH, MaybeDeletedIndexPart, remote_initdb_archive_path,
102 : };
103 : use crate::tenant::storage_layer::{DeltaLayer, ImageLayer};
104 : use crate::tenant::timeline::delete::DeleteTimelineFlow;
105 : use crate::tenant::timeline::uninit::cleanup_timeline_directory;
106 : use crate::virtual_file::VirtualFile;
107 : use crate::walingest::WalLagCooldown;
108 : use crate::walredo::{PostgresRedoManager, RedoAttemptType};
109 : use crate::{InitializationOrder, TEMP_FILE_SUFFIX, import_datadir, span, task_mgr, walredo};
110 :
111 0 : static INIT_DB_SEMAPHORE: Lazy<Semaphore> = Lazy::new(|| Semaphore::new(8));
112 : use utils::crashsafe;
113 : use utils::generation::Generation;
114 : use utils::id::TimelineId;
115 : use utils::lsn::{Lsn, RecordLsn};
116 :
117 : pub mod blob_io;
118 : pub mod block_io;
119 : pub mod vectored_blob_io;
120 :
121 : pub mod disk_btree;
122 : pub(crate) mod ephemeral_file;
123 : pub mod layer_map;
124 :
125 : pub mod metadata;
126 : pub mod remote_timeline_client;
127 : pub mod storage_layer;
128 :
129 : pub mod checks;
130 : pub mod config;
131 : pub mod mgr;
132 : pub mod secondary;
133 : pub mod tasks;
134 : pub mod upload_queue;
135 :
136 : pub(crate) mod timeline;
137 :
138 : pub mod size;
139 :
140 : mod gc_block;
141 : mod gc_result;
142 : pub(crate) mod throttle;
143 :
144 : pub(crate) use timeline::{LogicalSizeCalculationCause, PageReconstructError, Timeline};
145 :
146 : pub(crate) use crate::span::debug_assert_current_span_has_tenant_and_timeline_id;
147 : // re-export for use in walreceiver
148 : pub use crate::tenant::timeline::WalReceiverInfo;
149 :
150 : /// The "tenants" part of `tenants/<tenant>/timelines...`
151 : pub const TENANTS_SEGMENT_NAME: &str = "tenants";
152 :
153 : /// Parts of the `.neon/tenants/<tenant_id>/timelines/<timeline_id>` directory prefix.
154 : pub const TIMELINES_SEGMENT_NAME: &str = "timelines";
155 :
156 : /// References to shared objects that are passed into each tenant, such
157 : /// as the shared remote storage client and process initialization state.
158 : #[derive(Clone)]
159 : pub struct TenantSharedResources {
160 : pub broker_client: storage_broker::BrokerClientChannel,
161 : pub remote_storage: GenericRemoteStorage,
162 : pub deletion_queue_client: DeletionQueueClient,
163 : pub l0_flush_global_state: L0FlushGlobalState,
164 : pub basebackup_prepare_sender: BasebackupPrepareSender,
165 : pub feature_resolver: FeatureResolver,
166 : }
167 :
168 : /// A [`TenantShard`] is really an _attached_ tenant. The configuration
169 : /// for an attached tenant is a subset of the [`LocationConf`], represented
170 : /// in this struct.
171 : #[derive(Clone)]
172 : pub(super) struct AttachedTenantConf {
173 : tenant_conf: pageserver_api::models::TenantConfig,
174 : location: AttachedLocationConfig,
175 : /// The deadline before which we are blocked from GC so that
176 : /// leases have a chance to be renewed.
177 : lsn_lease_deadline: Option<tokio::time::Instant>,
178 : }
179 :
180 : impl AttachedTenantConf {
181 118 : fn new(
182 118 : tenant_conf: pageserver_api::models::TenantConfig,
183 118 : location: AttachedLocationConfig,
184 118 : ) -> Self {
185 : // Sets a deadline before which we cannot proceed to GC due to lsn lease.
186 : //
187 : // We do this as the leases mapping are not persisted to disk. By delaying GC by lease
188 : // length, we guarantee that all the leases we granted before will have a chance to renew
189 : // when we run GC for the first time after restart / transition from AttachedMulti to AttachedSingle.
190 118 : let lsn_lease_deadline = if location.attach_mode == AttachmentMode::Single {
191 118 : Some(
192 118 : tokio::time::Instant::now()
193 118 : + tenant_conf
194 118 : .lsn_lease_length
195 118 : .unwrap_or(LsnLease::DEFAULT_LENGTH),
196 118 : )
197 : } else {
198 : // We don't use `lsn_lease_deadline` to delay GC in AttachedMulti and AttachedStale
199 : // because we don't do GC in these modes.
200 0 : None
201 : };
202 :
203 118 : Self {
204 118 : tenant_conf,
205 118 : location,
206 118 : lsn_lease_deadline,
207 118 : }
208 118 : }
209 :
210 118 : fn try_from(location_conf: LocationConf) -> anyhow::Result<Self> {
211 118 : match &location_conf.mode {
212 118 : LocationMode::Attached(attach_conf) => {
213 118 : Ok(Self::new(location_conf.tenant_conf, *attach_conf))
214 : }
215 : LocationMode::Secondary(_) => {
216 0 : anyhow::bail!(
217 0 : "Attempted to construct AttachedTenantConf from a LocationConf in secondary mode"
218 0 : )
219 : }
220 : }
221 118 : }
222 :
223 381 : fn is_gc_blocked_by_lsn_lease_deadline(&self) -> bool {
224 381 : self.lsn_lease_deadline
225 381 : .map(|d| tokio::time::Instant::now() < d)
226 381 : .unwrap_or(false)
227 381 : }
228 : }
229 : struct TimelinePreload {
230 : timeline_id: TimelineId,
231 : client: RemoteTimelineClient,
232 : index_part: Result<MaybeDeletedIndexPart, DownloadError>,
233 : previous_heatmap: Option<PreviousHeatmap>,
234 : }
235 :
236 : pub(crate) struct TenantPreload {
237 : /// The tenant manifest from remote storage, or None if no manifest was found.
238 : tenant_manifest: Option<TenantManifest>,
239 : /// Map from timeline ID to a possible timeline preload. It is None iff the timeline is offloaded according to the manifest.
240 : timelines: HashMap<TimelineId, Option<TimelinePreload>>,
241 : }
242 :
243 : /// When we spawn a tenant, there is a special mode for tenant creation that
244 : /// avoids trying to read anything from remote storage.
245 : pub(crate) enum SpawnMode {
246 : /// Activate as soon as possible
247 : Eager,
248 : /// Lazy activation in the background, with the option to skip the queue if the need comes up
249 : Lazy,
250 : }
251 :
252 : ///
253 : /// Tenant consists of multiple timelines. Keep them in a hash table.
254 : ///
255 : pub struct TenantShard {
256 : // Global pageserver config parameters
257 : pub conf: &'static PageServerConf,
258 :
259 : /// The value creation timestamp, used to measure activation delay, see:
260 : /// <https://github.com/neondatabase/neon/issues/4025>
261 : constructed_at: Instant,
262 :
263 : state: watch::Sender<TenantState>,
264 :
265 : // Overridden tenant-specific config parameters.
266 : // We keep pageserver_api::models::TenantConfig sturct here to preserve the information
267 : // about parameters that are not set.
268 : // This is necessary to allow global config updates.
269 : tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
270 :
271 : tenant_shard_id: TenantShardId,
272 :
273 : // The detailed sharding information, beyond the number/count in tenant_shard_id
274 : shard_identity: ShardIdentity,
275 :
276 : /// The remote storage generation, used to protect S3 objects from split-brain.
277 : /// Does not change over the lifetime of the [`TenantShard`] object.
278 : ///
279 : /// This duplicates the generation stored in LocationConf, but that structure is mutable:
280 : /// this copy enforces the invariant that generatio doesn't change during a Tenant's lifetime.
281 : generation: Generation,
282 :
283 : timelines: Mutex<HashMap<TimelineId, Arc<Timeline>>>,
284 :
285 : /// During timeline creation, we first insert the TimelineId to the
286 : /// creating map, then `timelines`, then remove it from the creating map.
287 : /// **Lock order**: if acquiring all (or a subset), acquire them in order `timelines`, `timelines_offloaded`, `timelines_creating`
288 : timelines_creating: std::sync::Mutex<HashSet<TimelineId>>,
289 :
290 : /// Possibly offloaded and archived timelines
291 : /// **Lock order**: if acquiring all (or a subset), acquire them in order `timelines`, `timelines_offloaded`, `timelines_creating`
292 : timelines_offloaded: Mutex<HashMap<TimelineId, Arc<OffloadedTimeline>>>,
293 :
294 : /// Tracks the timelines that are currently importing into this tenant shard.
295 : ///
296 : /// Note that importing timelines are also present in [`Self::timelines_creating`].
297 : /// Keep this in mind when ordering lock acquisition.
298 : ///
299 : /// Lifetime:
300 : /// * An imported timeline is created while scanning the bucket on tenant attach
301 : /// if the index part contains an `import_pgdata` entry and said field marks the import
302 : /// as in progress.
303 : /// * Imported timelines are removed when the storage controller calls the post timeline
304 : /// import activation endpoint.
305 : timelines_importing: std::sync::Mutex<HashMap<TimelineId, Arc<ImportingTimeline>>>,
306 :
307 : /// The last tenant manifest known to be in remote storage. None if the manifest has not yet
308 : /// been either downloaded or uploaded. Always Some after tenant attach.
309 : ///
310 : /// Initially populated during tenant attach, updated via `maybe_upload_tenant_manifest`.
311 : ///
312 : /// Do not modify this directly. It is used to check whether a new manifest needs to be
313 : /// uploaded. The manifest is constructed in `build_tenant_manifest`, and uploaded via
314 : /// `maybe_upload_tenant_manifest`.
315 : remote_tenant_manifest: tokio::sync::Mutex<Option<TenantManifest>>,
316 :
317 : // This mutex prevents creation of new timelines during GC.
318 : // Adding yet another mutex (in addition to `timelines`) is needed because holding
319 : // `timelines` mutex during all GC iteration
320 : // may block for a long time `get_timeline`, `get_timelines_state`,... and other operations
321 : // with timelines, which in turn may cause dropping replication connection, expiration of wait_for_lsn
322 : // timeout...
323 : gc_cs: tokio::sync::Mutex<()>,
324 : walredo_mgr: Option<Arc<WalRedoManager>>,
325 :
326 : /// Provides access to timeline data sitting in the remote storage.
327 : pub(crate) remote_storage: GenericRemoteStorage,
328 :
329 : /// Access to global deletion queue for when this tenant wants to schedule a deletion.
330 : deletion_queue_client: DeletionQueueClient,
331 :
332 : /// A channel to send async requests to prepare a basebackup for the basebackup cache.
333 : basebackup_prepare_sender: BasebackupPrepareSender,
334 :
335 : /// Cached logical sizes updated updated on each [`TenantShard::gather_size_inputs`].
336 : cached_logical_sizes: tokio::sync::Mutex<HashMap<(TimelineId, Lsn), u64>>,
337 : cached_synthetic_tenant_size: Arc<AtomicU64>,
338 :
339 : eviction_task_tenant_state: tokio::sync::Mutex<EvictionTaskTenantState>,
340 :
341 : /// Track repeated failures to compact, so that we can back off.
342 : /// Overhead of mutex is acceptable because compaction is done with a multi-second period.
343 : compaction_circuit_breaker: std::sync::Mutex<CircuitBreaker>,
344 :
345 : /// Signals the tenant compaction loop that there is L0 compaction work to be done.
346 : pub(crate) l0_compaction_trigger: Arc<Notify>,
347 :
348 : /// Scheduled gc-compaction tasks.
349 : scheduled_compaction_tasks: std::sync::Mutex<HashMap<TimelineId, Arc<GcCompactionQueue>>>,
350 :
351 : /// If the tenant is in Activating state, notify this to encourage it
352 : /// to proceed to Active as soon as possible, rather than waiting for lazy
353 : /// background warmup.
354 : pub(crate) activate_now_sem: tokio::sync::Semaphore,
355 :
356 : /// Time it took for the tenant to activate. Zero if not active yet.
357 : attach_wal_lag_cooldown: Arc<std::sync::OnceLock<WalLagCooldown>>,
358 :
359 : // Cancellation token fires when we have entered shutdown(). This is a parent of
360 : // Timelines' cancellation token.
361 : pub(crate) cancel: CancellationToken,
362 :
363 : // Users of the TenantShard such as the page service must take this Gate to avoid
364 : // trying to use a TenantShard which is shutting down.
365 : pub(crate) gate: Gate,
366 :
367 : /// Throttle applied at the top of [`Timeline::get`].
368 : /// All [`TenantShard::timelines`] of a given [`TenantShard`] instance share the same [`throttle::Throttle`] instance.
369 : pub(crate) pagestream_throttle: Arc<throttle::Throttle>,
370 :
371 : pub(crate) pagestream_throttle_metrics: Arc<crate::metrics::tenant_throttling::Pagestream>,
372 :
373 : /// An ongoing timeline detach concurrency limiter.
374 : ///
375 : /// As a tenant will likely be restarted as part of timeline detach ancestor it makes no sense
376 : /// to have two running at the same time. A different one can be started if an earlier one
377 : /// has failed for whatever reason.
378 : ongoing_timeline_detach: std::sync::Mutex<Option<(TimelineId, utils::completion::Barrier)>>,
379 :
380 : /// `index_part.json` based gc blocking reason tracking.
381 : ///
382 : /// New gc iterations must start a new iteration by acquiring `GcBlock::start` before
383 : /// proceeding.
384 : pub(crate) gc_block: gc_block::GcBlock,
385 :
386 : l0_flush_global_state: L0FlushGlobalState,
387 :
388 : pub(crate) feature_resolver: FeatureResolver,
389 : }
390 : impl std::fmt::Debug for TenantShard {
391 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
392 0 : write!(f, "{} ({})", self.tenant_shard_id, self.current_state())
393 0 : }
394 : }
395 :
396 : pub(crate) enum WalRedoManager {
397 : Prod(WalredoManagerId, PostgresRedoManager),
398 : #[cfg(test)]
399 : Test(harness::TestRedoManager),
400 : }
401 :
402 : #[derive(thiserror::Error, Debug)]
403 : #[error("pageserver is shutting down")]
404 : pub(crate) struct GlobalShutDown;
405 :
406 : impl WalRedoManager {
407 0 : pub(crate) fn new(mgr: PostgresRedoManager) -> Result<Arc<Self>, GlobalShutDown> {
408 0 : let id = WalredoManagerId::next();
409 0 : let arc = Arc::new(Self::Prod(id, mgr));
410 0 : let mut guard = WALREDO_MANAGERS.lock().unwrap();
411 0 : match &mut *guard {
412 0 : Some(map) => {
413 0 : map.insert(id, Arc::downgrade(&arc));
414 0 : Ok(arc)
415 : }
416 0 : None => Err(GlobalShutDown),
417 : }
418 0 : }
419 : }
420 :
421 : impl Drop for WalRedoManager {
422 5 : fn drop(&mut self) {
423 5 : match self {
424 0 : Self::Prod(id, _) => {
425 0 : let mut guard = WALREDO_MANAGERS.lock().unwrap();
426 0 : if let Some(map) = &mut *guard {
427 0 : map.remove(id).expect("new() registers, drop() unregisters");
428 0 : }
429 : }
430 : #[cfg(test)]
431 5 : Self::Test(_) => {
432 5 : // Not applicable to test redo manager
433 5 : }
434 : }
435 5 : }
436 : }
437 :
438 : /// Global registry of all walredo managers so that [`crate::shutdown_pageserver`] can shut down
439 : /// the walredo processes outside of the regular order.
440 : ///
441 : /// This is necessary to work around a systemd bug where it freezes if there are
442 : /// walredo processes left => <https://github.com/neondatabase/cloud/issues/11387>
443 : #[allow(clippy::type_complexity)]
444 : pub(crate) static WALREDO_MANAGERS: once_cell::sync::Lazy<
445 : Mutex<Option<HashMap<WalredoManagerId, Weak<WalRedoManager>>>>,
446 0 : > = once_cell::sync::Lazy::new(|| Mutex::new(Some(HashMap::new())));
447 : #[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
448 : pub(crate) struct WalredoManagerId(u64);
449 : impl WalredoManagerId {
450 0 : pub fn next() -> Self {
451 : static NEXT: std::sync::atomic::AtomicU64 = std::sync::atomic::AtomicU64::new(1);
452 0 : let id = NEXT.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
453 0 : if id == 0 {
454 0 : panic!(
455 0 : "WalredoManagerId::new() returned 0, indicating wraparound, risking it's no longer unique"
456 0 : );
457 0 : }
458 0 : Self(id)
459 0 : }
460 : }
461 :
462 : #[cfg(test)]
463 : impl From<harness::TestRedoManager> for WalRedoManager {
464 118 : fn from(mgr: harness::TestRedoManager) -> Self {
465 118 : Self::Test(mgr)
466 118 : }
467 : }
468 :
469 : impl WalRedoManager {
470 3 : pub(crate) async fn shutdown(&self) -> bool {
471 3 : match self {
472 0 : Self::Prod(_, mgr) => mgr.shutdown().await,
473 : #[cfg(test)]
474 : Self::Test(_) => {
475 : // Not applicable to test redo manager
476 3 : true
477 : }
478 : }
479 3 : }
480 :
481 0 : pub(crate) fn maybe_quiesce(&self, idle_timeout: Duration) {
482 0 : match self {
483 0 : Self::Prod(_, mgr) => mgr.maybe_quiesce(idle_timeout),
484 0 : #[cfg(test)]
485 0 : Self::Test(_) => {
486 0 : // Not applicable to test redo manager
487 0 : }
488 0 : }
489 0 : }
490 :
491 : /// # Cancel-Safety
492 : ///
493 : /// This method is cancellation-safe.
494 26774 : pub async fn request_redo(
495 26774 : &self,
496 26774 : key: pageserver_api::key::Key,
497 26774 : lsn: Lsn,
498 26774 : base_img: Option<(Lsn, bytes::Bytes)>,
499 26774 : records: Vec<(Lsn, pageserver_api::record::NeonWalRecord)>,
500 26774 : pg_version: u32,
501 26774 : redo_attempt_type: RedoAttemptType,
502 26774 : ) -> Result<bytes::Bytes, walredo::Error> {
503 26774 : match self {
504 0 : Self::Prod(_, mgr) => {
505 0 : mgr.request_redo(key, lsn, base_img, records, pg_version, redo_attempt_type)
506 0 : .await
507 : }
508 : #[cfg(test)]
509 26774 : Self::Test(mgr) => {
510 26774 : mgr.request_redo(key, lsn, base_img, records, pg_version, redo_attempt_type)
511 26774 : .await
512 : }
513 : }
514 26774 : }
515 :
516 0 : pub(crate) fn status(&self) -> Option<WalRedoManagerStatus> {
517 0 : match self {
518 0 : WalRedoManager::Prod(_, m) => Some(m.status()),
519 0 : #[cfg(test)]
520 0 : WalRedoManager::Test(_) => None,
521 0 : }
522 0 : }
523 : }
524 :
525 : /// A very lightweight memory representation of an offloaded timeline.
526 : ///
527 : /// We need to store the list of offloaded timelines so that we can perform operations on them,
528 : /// like unoffloading them, or (at a later date), decide to perform flattening.
529 : /// This type has a much smaller memory impact than [`Timeline`], and thus we can store many
530 : /// more offloaded timelines than we can manage ones that aren't.
531 : pub struct OffloadedTimeline {
532 : pub tenant_shard_id: TenantShardId,
533 : pub timeline_id: TimelineId,
534 : pub ancestor_timeline_id: Option<TimelineId>,
535 : /// Whether to retain the branch lsn at the ancestor or not
536 : pub ancestor_retain_lsn: Option<Lsn>,
537 :
538 : /// When the timeline was archived.
539 : ///
540 : /// Present for future flattening deliberations.
541 : pub archived_at: NaiveDateTime,
542 :
543 : /// Prevent two tasks from deleting the timeline at the same time. If held, the
544 : /// timeline is being deleted. If 'true', the timeline has already been deleted.
545 : pub delete_progress: TimelineDeleteProgress,
546 :
547 : /// Part of the `OffloadedTimeline` object's lifecycle: this needs to be set before we drop it
548 : pub deleted_from_ancestor: AtomicBool,
549 :
550 : _metrics_guard: OffloadedTimelineMetricsGuard,
551 : }
552 :
553 : /// Increases the offloaded timeline count metric when created, and decreases when dropped.
554 : struct OffloadedTimelineMetricsGuard;
555 :
556 : impl OffloadedTimelineMetricsGuard {
557 1 : fn new() -> Self {
558 1 : TIMELINE_STATE_METRIC
559 1 : .with_label_values(&["offloaded"])
560 1 : .inc();
561 1 : Self
562 1 : }
563 : }
564 :
565 : impl Drop for OffloadedTimelineMetricsGuard {
566 1 : fn drop(&mut self) {
567 1 : TIMELINE_STATE_METRIC
568 1 : .with_label_values(&["offloaded"])
569 1 : .dec();
570 1 : }
571 : }
572 :
573 : impl OffloadedTimeline {
574 : /// Obtains an offloaded timeline from a given timeline object.
575 : ///
576 : /// Returns `None` if the `archived_at` flag couldn't be obtained, i.e.
577 : /// the timeline is not in a stopped state.
578 : /// Panics if the timeline is not archived.
579 1 : fn from_timeline(timeline: &Timeline) -> Result<Self, UploadQueueNotReadyError> {
580 1 : let (ancestor_retain_lsn, ancestor_timeline_id) =
581 1 : if let Some(ancestor_timeline) = timeline.ancestor_timeline() {
582 1 : let ancestor_lsn = timeline.get_ancestor_lsn();
583 1 : let ancestor_timeline_id = ancestor_timeline.timeline_id;
584 1 : let mut gc_info = ancestor_timeline.gc_info.write().unwrap();
585 1 : gc_info.insert_child(timeline.timeline_id, ancestor_lsn, MaybeOffloaded::Yes);
586 1 : (Some(ancestor_lsn), Some(ancestor_timeline_id))
587 : } else {
588 0 : (None, None)
589 : };
590 1 : let archived_at = timeline
591 1 : .remote_client
592 1 : .archived_at_stopped_queue()?
593 1 : .expect("must be called on an archived timeline");
594 1 : Ok(Self {
595 1 : tenant_shard_id: timeline.tenant_shard_id,
596 1 : timeline_id: timeline.timeline_id,
597 1 : ancestor_timeline_id,
598 1 : ancestor_retain_lsn,
599 1 : archived_at,
600 1 :
601 1 : delete_progress: timeline.delete_progress.clone(),
602 1 : deleted_from_ancestor: AtomicBool::new(false),
603 1 :
604 1 : _metrics_guard: OffloadedTimelineMetricsGuard::new(),
605 1 : })
606 1 : }
607 0 : fn from_manifest(tenant_shard_id: TenantShardId, manifest: &OffloadedTimelineManifest) -> Self {
608 0 : // We expect to reach this case in tenant loading, where the `retain_lsn` is populated in the parent's `gc_info`
609 0 : // by the `initialize_gc_info` function.
610 0 : let OffloadedTimelineManifest {
611 0 : timeline_id,
612 0 : ancestor_timeline_id,
613 0 : ancestor_retain_lsn,
614 0 : archived_at,
615 0 : } = *manifest;
616 0 : Self {
617 0 : tenant_shard_id,
618 0 : timeline_id,
619 0 : ancestor_timeline_id,
620 0 : ancestor_retain_lsn,
621 0 : archived_at,
622 0 : delete_progress: TimelineDeleteProgress::default(),
623 0 : deleted_from_ancestor: AtomicBool::new(false),
624 0 : _metrics_guard: OffloadedTimelineMetricsGuard::new(),
625 0 : }
626 0 : }
627 1 : fn manifest(&self) -> OffloadedTimelineManifest {
628 1 : let Self {
629 1 : timeline_id,
630 1 : ancestor_timeline_id,
631 1 : ancestor_retain_lsn,
632 1 : archived_at,
633 1 : ..
634 1 : } = self;
635 1 : OffloadedTimelineManifest {
636 1 : timeline_id: *timeline_id,
637 1 : ancestor_timeline_id: *ancestor_timeline_id,
638 1 : ancestor_retain_lsn: *ancestor_retain_lsn,
639 1 : archived_at: *archived_at,
640 1 : }
641 1 : }
642 : /// Delete this timeline's retain_lsn from its ancestor, if present in the given tenant
643 0 : fn delete_from_ancestor_with_timelines(
644 0 : &self,
645 0 : timelines: &std::sync::MutexGuard<'_, HashMap<TimelineId, Arc<Timeline>>>,
646 0 : ) {
647 0 : if let (Some(_retain_lsn), Some(ancestor_timeline_id)) =
648 0 : (self.ancestor_retain_lsn, self.ancestor_timeline_id)
649 : {
650 0 : if let Some((_, ancestor_timeline)) = timelines
651 0 : .iter()
652 0 : .find(|(tid, _tl)| **tid == ancestor_timeline_id)
653 : {
654 0 : let removal_happened = ancestor_timeline
655 0 : .gc_info
656 0 : .write()
657 0 : .unwrap()
658 0 : .remove_child_offloaded(self.timeline_id);
659 0 : if !removal_happened {
660 0 : tracing::error!(tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id,
661 0 : "Couldn't remove retain_lsn entry from offloaded timeline's parent: already removed");
662 0 : }
663 0 : }
664 0 : }
665 0 : self.deleted_from_ancestor.store(true, Ordering::Release);
666 0 : }
667 : /// Call [`Self::delete_from_ancestor_with_timelines`] instead if possible.
668 : ///
669 : /// As the entire tenant is being dropped, don't bother deregistering the `retain_lsn` from the ancestor.
670 1 : fn defuse_for_tenant_drop(&self) {
671 1 : self.deleted_from_ancestor.store(true, Ordering::Release);
672 1 : }
673 : }
674 :
675 : impl fmt::Debug for OffloadedTimeline {
676 0 : fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
677 0 : write!(f, "OffloadedTimeline<{}>", self.timeline_id)
678 0 : }
679 : }
680 :
681 : impl Drop for OffloadedTimeline {
682 1 : fn drop(&mut self) {
683 1 : if !self.deleted_from_ancestor.load(Ordering::Acquire) {
684 0 : tracing::warn!(
685 0 : "offloaded timeline {} was dropped without having cleaned it up at the ancestor",
686 : self.timeline_id
687 : );
688 1 : }
689 1 : }
690 : }
691 :
692 : #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
693 : pub enum MaybeOffloaded {
694 : Yes,
695 : No,
696 : }
697 :
698 : #[derive(Clone, Debug)]
699 : pub enum TimelineOrOffloaded {
700 : Timeline(Arc<Timeline>),
701 : Offloaded(Arc<OffloadedTimeline>),
702 : Importing(Arc<ImportingTimeline>),
703 : }
704 :
705 : impl TimelineOrOffloaded {
706 0 : pub fn arc_ref(&self) -> TimelineOrOffloadedArcRef<'_> {
707 0 : match self {
708 0 : TimelineOrOffloaded::Timeline(timeline) => {
709 0 : TimelineOrOffloadedArcRef::Timeline(timeline)
710 : }
711 0 : TimelineOrOffloaded::Offloaded(offloaded) => {
712 0 : TimelineOrOffloadedArcRef::Offloaded(offloaded)
713 : }
714 0 : TimelineOrOffloaded::Importing(importing) => {
715 0 : TimelineOrOffloadedArcRef::Importing(importing)
716 : }
717 : }
718 0 : }
719 0 : pub fn tenant_shard_id(&self) -> TenantShardId {
720 0 : self.arc_ref().tenant_shard_id()
721 0 : }
722 0 : pub fn timeline_id(&self) -> TimelineId {
723 0 : self.arc_ref().timeline_id()
724 0 : }
725 1 : pub fn delete_progress(&self) -> &Arc<tokio::sync::Mutex<DeleteTimelineFlow>> {
726 1 : match self {
727 1 : TimelineOrOffloaded::Timeline(timeline) => &timeline.delete_progress,
728 0 : TimelineOrOffloaded::Offloaded(offloaded) => &offloaded.delete_progress,
729 0 : TimelineOrOffloaded::Importing(importing) => &importing.delete_progress,
730 : }
731 1 : }
732 0 : fn maybe_remote_client(&self) -> Option<Arc<RemoteTimelineClient>> {
733 0 : match self {
734 0 : TimelineOrOffloaded::Timeline(timeline) => Some(timeline.remote_client.clone()),
735 0 : TimelineOrOffloaded::Offloaded(_offloaded) => None,
736 0 : TimelineOrOffloaded::Importing(importing) => {
737 0 : Some(importing.timeline.remote_client.clone())
738 : }
739 : }
740 0 : }
741 : }
742 :
743 : pub enum TimelineOrOffloadedArcRef<'a> {
744 : Timeline(&'a Arc<Timeline>),
745 : Offloaded(&'a Arc<OffloadedTimeline>),
746 : Importing(&'a Arc<ImportingTimeline>),
747 : }
748 :
749 : impl TimelineOrOffloadedArcRef<'_> {
750 0 : pub fn tenant_shard_id(&self) -> TenantShardId {
751 0 : match self {
752 0 : TimelineOrOffloadedArcRef::Timeline(timeline) => timeline.tenant_shard_id,
753 0 : TimelineOrOffloadedArcRef::Offloaded(offloaded) => offloaded.tenant_shard_id,
754 0 : TimelineOrOffloadedArcRef::Importing(importing) => importing.timeline.tenant_shard_id,
755 : }
756 0 : }
757 0 : pub fn timeline_id(&self) -> TimelineId {
758 0 : match self {
759 0 : TimelineOrOffloadedArcRef::Timeline(timeline) => timeline.timeline_id,
760 0 : TimelineOrOffloadedArcRef::Offloaded(offloaded) => offloaded.timeline_id,
761 0 : TimelineOrOffloadedArcRef::Importing(importing) => importing.timeline.timeline_id,
762 : }
763 0 : }
764 : }
765 :
766 : impl<'a> From<&'a Arc<Timeline>> for TimelineOrOffloadedArcRef<'a> {
767 0 : fn from(timeline: &'a Arc<Timeline>) -> Self {
768 0 : Self::Timeline(timeline)
769 0 : }
770 : }
771 :
772 : impl<'a> From<&'a Arc<OffloadedTimeline>> for TimelineOrOffloadedArcRef<'a> {
773 0 : fn from(timeline: &'a Arc<OffloadedTimeline>) -> Self {
774 0 : Self::Offloaded(timeline)
775 0 : }
776 : }
777 :
778 : impl<'a> From<&'a Arc<ImportingTimeline>> for TimelineOrOffloadedArcRef<'a> {
779 0 : fn from(timeline: &'a Arc<ImportingTimeline>) -> Self {
780 0 : Self::Importing(timeline)
781 0 : }
782 : }
783 :
784 : #[derive(Debug, thiserror::Error, PartialEq, Eq)]
785 : pub enum GetTimelineError {
786 : #[error("Timeline is shutting down")]
787 : ShuttingDown,
788 : #[error("Timeline {tenant_id}/{timeline_id} is not active, state: {state:?}")]
789 : NotActive {
790 : tenant_id: TenantShardId,
791 : timeline_id: TimelineId,
792 : state: TimelineState,
793 : },
794 : #[error("Timeline {tenant_id}/{timeline_id} was not found")]
795 : NotFound {
796 : tenant_id: TenantShardId,
797 : timeline_id: TimelineId,
798 : },
799 : }
800 :
801 : #[derive(Debug, thiserror::Error)]
802 : pub enum LoadLocalTimelineError {
803 : #[error("FailedToLoad")]
804 : Load(#[source] anyhow::Error),
805 : #[error("FailedToResumeDeletion")]
806 : ResumeDeletion(#[source] anyhow::Error),
807 : }
808 :
809 : #[derive(thiserror::Error)]
810 : pub enum DeleteTimelineError {
811 : #[error("NotFound")]
812 : NotFound,
813 :
814 : #[error("HasChildren")]
815 : HasChildren(Vec<TimelineId>),
816 :
817 : #[error("Timeline deletion is already in progress")]
818 : AlreadyInProgress(Arc<tokio::sync::Mutex<DeleteTimelineFlow>>),
819 :
820 : #[error("Cancelled")]
821 : Cancelled,
822 :
823 : #[error(transparent)]
824 : Other(#[from] anyhow::Error),
825 : }
826 :
827 : impl Debug for DeleteTimelineError {
828 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
829 0 : match self {
830 0 : Self::NotFound => write!(f, "NotFound"),
831 0 : Self::HasChildren(c) => f.debug_tuple("HasChildren").field(c).finish(),
832 0 : Self::AlreadyInProgress(_) => f.debug_tuple("AlreadyInProgress").finish(),
833 0 : Self::Cancelled => f.debug_tuple("Cancelled").finish(),
834 0 : Self::Other(e) => f.debug_tuple("Other").field(e).finish(),
835 : }
836 0 : }
837 : }
838 :
839 : #[derive(thiserror::Error)]
840 : pub enum TimelineArchivalError {
841 : #[error("NotFound")]
842 : NotFound,
843 :
844 : #[error("Timeout")]
845 : Timeout,
846 :
847 : #[error("Cancelled")]
848 : Cancelled,
849 :
850 : #[error("ancestor is archived: {}", .0)]
851 : HasArchivedParent(TimelineId),
852 :
853 : #[error("HasUnarchivedChildren")]
854 : HasUnarchivedChildren(Vec<TimelineId>),
855 :
856 : #[error("Timeline archival is already in progress")]
857 : AlreadyInProgress,
858 :
859 : #[error(transparent)]
860 : Other(anyhow::Error),
861 : }
862 :
863 : #[derive(thiserror::Error, Debug)]
864 : pub(crate) enum TenantManifestError {
865 : #[error("Remote storage error: {0}")]
866 : RemoteStorage(anyhow::Error),
867 :
868 : #[error("Cancelled")]
869 : Cancelled,
870 : }
871 :
872 : impl From<TenantManifestError> for TimelineArchivalError {
873 0 : fn from(e: TenantManifestError) -> Self {
874 0 : match e {
875 0 : TenantManifestError::RemoteStorage(e) => Self::Other(e),
876 0 : TenantManifestError::Cancelled => Self::Cancelled,
877 : }
878 0 : }
879 : }
880 :
881 : impl Debug for TimelineArchivalError {
882 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
883 0 : match self {
884 0 : Self::NotFound => write!(f, "NotFound"),
885 0 : Self::Timeout => write!(f, "Timeout"),
886 0 : Self::Cancelled => write!(f, "Cancelled"),
887 0 : Self::HasArchivedParent(p) => f.debug_tuple("HasArchivedParent").field(p).finish(),
888 0 : Self::HasUnarchivedChildren(c) => {
889 0 : f.debug_tuple("HasUnarchivedChildren").field(c).finish()
890 : }
891 0 : Self::AlreadyInProgress => f.debug_tuple("AlreadyInProgress").finish(),
892 0 : Self::Other(e) => f.debug_tuple("Other").field(e).finish(),
893 : }
894 0 : }
895 : }
896 :
897 : pub enum SetStoppingError {
898 : AlreadyStopping(completion::Barrier),
899 : Broken,
900 : }
901 :
902 : impl Debug for SetStoppingError {
903 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
904 0 : match self {
905 0 : Self::AlreadyStopping(_) => f.debug_tuple("AlreadyStopping").finish(),
906 0 : Self::Broken => write!(f, "Broken"),
907 : }
908 0 : }
909 : }
910 :
911 : #[derive(thiserror::Error, Debug)]
912 : pub(crate) enum FinalizeTimelineImportError {
913 : #[error("Import task not done yet")]
914 : ImportTaskStillRunning,
915 : #[error("Shutting down")]
916 : ShuttingDown,
917 : }
918 :
919 : /// Arguments to [`TenantShard::create_timeline`].
920 : ///
921 : /// Not usable as an idempotency key for timeline creation because if [`CreateTimelineParamsBranch::ancestor_start_lsn`]
922 : /// is `None`, the result of the timeline create call is not deterministic.
923 : ///
924 : /// See [`CreateTimelineIdempotency`] for an idempotency key.
925 : #[derive(Debug)]
926 : pub(crate) enum CreateTimelineParams {
927 : Bootstrap(CreateTimelineParamsBootstrap),
928 : Branch(CreateTimelineParamsBranch),
929 : ImportPgdata(CreateTimelineParamsImportPgdata),
930 : }
931 :
932 : #[derive(Debug)]
933 : pub(crate) struct CreateTimelineParamsBootstrap {
934 : pub(crate) new_timeline_id: TimelineId,
935 : pub(crate) existing_initdb_timeline_id: Option<TimelineId>,
936 : pub(crate) pg_version: u32,
937 : }
938 :
939 : /// NB: See comment on [`CreateTimelineIdempotency::Branch`] for why there's no `pg_version` here.
940 : #[derive(Debug)]
941 : pub(crate) struct CreateTimelineParamsBranch {
942 : pub(crate) new_timeline_id: TimelineId,
943 : pub(crate) ancestor_timeline_id: TimelineId,
944 : pub(crate) ancestor_start_lsn: Option<Lsn>,
945 : }
946 :
947 : #[derive(Debug)]
948 : pub(crate) struct CreateTimelineParamsImportPgdata {
949 : pub(crate) new_timeline_id: TimelineId,
950 : pub(crate) location: import_pgdata::index_part_format::Location,
951 : pub(crate) idempotency_key: import_pgdata::index_part_format::IdempotencyKey,
952 : }
953 :
954 : /// What is used to determine idempotency of a [`TenantShard::create_timeline`] call in [`TenantShard::start_creating_timeline`] in [`TenantShard::start_creating_timeline`].
955 : ///
956 : /// Each [`Timeline`] object holds [`Self`] as an immutable property in [`Timeline::create_idempotency`].
957 : ///
958 : /// We lower timeline creation requests to [`Self`], and then use [`PartialEq::eq`] to compare [`Timeline::create_idempotency`] with the request.
959 : /// If they are equal, we return a reference to the existing timeline, otherwise it's an idempotency conflict.
960 : ///
961 : /// There is special treatment for [`Self::FailWithConflict`] to always return an idempotency conflict.
962 : /// It would be nice to have more advanced derive macros to make that special treatment declarative.
963 : ///
964 : /// Notes:
965 : /// - Unlike [`CreateTimelineParams`], ancestor LSN is fixed, so, branching will be at a deterministic LSN.
966 : /// - We make some trade-offs though, e.g., [`CreateTimelineParamsBootstrap::existing_initdb_timeline_id`]
967 : /// is not considered for idempotency. We can improve on this over time if we deem it necessary.
968 : ///
969 : #[derive(Debug, Clone, PartialEq, Eq)]
970 : pub(crate) enum CreateTimelineIdempotency {
971 : /// NB: special treatment, see comment in [`Self`].
972 : FailWithConflict,
973 : Bootstrap {
974 : pg_version: u32,
975 : },
976 : /// NB: branches always have the same `pg_version` as their ancestor.
977 : /// While [`pageserver_api::models::TimelineCreateRequestMode::Branch::pg_version`]
978 : /// exists as a field, and is set by cplane, it has always been ignored by pageserver when
979 : /// determining the child branch pg_version.
980 : Branch {
981 : ancestor_timeline_id: TimelineId,
982 : ancestor_start_lsn: Lsn,
983 : },
984 : ImportPgdata(CreatingTimelineIdempotencyImportPgdata),
985 : }
986 :
987 : #[derive(Debug, Clone, PartialEq, Eq)]
988 : pub(crate) struct CreatingTimelineIdempotencyImportPgdata {
989 : idempotency_key: import_pgdata::index_part_format::IdempotencyKey,
990 : }
991 :
992 : /// What is returned by [`TenantShard::start_creating_timeline`].
993 : #[must_use]
994 : enum StartCreatingTimelineResult {
995 : CreateGuard(TimelineCreateGuard),
996 : Idempotent(Arc<Timeline>),
997 : }
998 :
999 : #[allow(clippy::large_enum_variant, reason = "TODO")]
1000 : enum TimelineInitAndSyncResult {
1001 : ReadyToActivate,
1002 : NeedsSpawnImportPgdata(TimelineInitAndSyncNeedsSpawnImportPgdata),
1003 : }
1004 :
1005 : #[must_use]
1006 : struct TimelineInitAndSyncNeedsSpawnImportPgdata {
1007 : timeline: Arc<Timeline>,
1008 : import_pgdata: import_pgdata::index_part_format::Root,
1009 : guard: TimelineCreateGuard,
1010 : }
1011 :
1012 : /// What is returned by [`TenantShard::create_timeline`].
1013 : enum CreateTimelineResult {
1014 : Created(Arc<Timeline>),
1015 : Idempotent(Arc<Timeline>),
1016 : /// IMPORTANT: This [`Arc<Timeline>`] object is not in [`TenantShard::timelines`] when
1017 : /// we return this result, nor will this concrete object ever be added there.
1018 : /// Cf method comment on [`TenantShard::create_timeline_import_pgdata`].
1019 : ImportSpawned(Arc<Timeline>),
1020 : }
1021 :
1022 : impl CreateTimelineResult {
1023 0 : fn discriminant(&self) -> &'static str {
1024 0 : match self {
1025 0 : Self::Created(_) => "Created",
1026 0 : Self::Idempotent(_) => "Idempotent",
1027 0 : Self::ImportSpawned(_) => "ImportSpawned",
1028 : }
1029 0 : }
1030 0 : fn timeline(&self) -> &Arc<Timeline> {
1031 0 : match self {
1032 0 : Self::Created(t) | Self::Idempotent(t) | Self::ImportSpawned(t) => t,
1033 0 : }
1034 0 : }
1035 : /// Unit test timelines aren't activated, test has to do it if it needs to.
1036 : #[cfg(test)]
1037 118 : fn into_timeline_for_test(self) -> Arc<Timeline> {
1038 118 : match self {
1039 118 : Self::Created(t) | Self::Idempotent(t) | Self::ImportSpawned(t) => t,
1040 118 : }
1041 118 : }
1042 : }
1043 :
1044 : #[derive(thiserror::Error, Debug)]
1045 : pub enum CreateTimelineError {
1046 : #[error("creation of timeline with the given ID is in progress")]
1047 : AlreadyCreating,
1048 : #[error("timeline already exists with different parameters")]
1049 : Conflict,
1050 : #[error(transparent)]
1051 : AncestorLsn(anyhow::Error),
1052 : #[error("ancestor timeline is not active")]
1053 : AncestorNotActive,
1054 : #[error("ancestor timeline is archived")]
1055 : AncestorArchived,
1056 : #[error("tenant shutting down")]
1057 : ShuttingDown,
1058 : #[error(transparent)]
1059 : Other(#[from] anyhow::Error),
1060 : }
1061 :
1062 : #[derive(thiserror::Error, Debug)]
1063 : pub enum InitdbError {
1064 : #[error("Operation was cancelled")]
1065 : Cancelled,
1066 : #[error(transparent)]
1067 : Other(anyhow::Error),
1068 : #[error(transparent)]
1069 : Inner(postgres_initdb::Error),
1070 : }
1071 :
1072 : enum CreateTimelineCause {
1073 : Load,
1074 : Delete,
1075 : }
1076 :
1077 : #[allow(clippy::large_enum_variant, reason = "TODO")]
1078 : enum LoadTimelineCause {
1079 : Attach,
1080 : Unoffload,
1081 : }
1082 :
1083 : #[derive(thiserror::Error, Debug)]
1084 : pub(crate) enum GcError {
1085 : // The tenant is shutting down
1086 : #[error("tenant shutting down")]
1087 : TenantCancelled,
1088 :
1089 : // The tenant is shutting down
1090 : #[error("timeline shutting down")]
1091 : TimelineCancelled,
1092 :
1093 : // The tenant is in a state inelegible to run GC
1094 : #[error("not active")]
1095 : NotActive,
1096 :
1097 : // A requested GC cutoff LSN was invalid, for example it tried to move backwards
1098 : #[error("not active")]
1099 : BadLsn { why: String },
1100 :
1101 : // A remote storage error while scheduling updates after compaction
1102 : #[error(transparent)]
1103 : Remote(anyhow::Error),
1104 :
1105 : // An error reading while calculating GC cutoffs
1106 : #[error(transparent)]
1107 : GcCutoffs(PageReconstructError),
1108 :
1109 : // If GC was invoked for a particular timeline, this error means it didn't exist
1110 : #[error("timeline not found")]
1111 : TimelineNotFound,
1112 : }
1113 :
1114 : impl From<PageReconstructError> for GcError {
1115 0 : fn from(value: PageReconstructError) -> Self {
1116 0 : match value {
1117 0 : PageReconstructError::Cancelled => Self::TimelineCancelled,
1118 0 : other => Self::GcCutoffs(other),
1119 : }
1120 0 : }
1121 : }
1122 :
1123 : impl From<NotInitialized> for GcError {
1124 0 : fn from(value: NotInitialized) -> Self {
1125 0 : match value {
1126 0 : NotInitialized::Uninitialized => GcError::Remote(value.into()),
1127 0 : NotInitialized::Stopped | NotInitialized::ShuttingDown => GcError::TimelineCancelled,
1128 : }
1129 0 : }
1130 : }
1131 :
1132 : impl From<timeline::layer_manager::Shutdown> for GcError {
1133 0 : fn from(_: timeline::layer_manager::Shutdown) -> Self {
1134 0 : GcError::TimelineCancelled
1135 0 : }
1136 : }
1137 :
1138 : #[derive(thiserror::Error, Debug)]
1139 : pub(crate) enum LoadConfigError {
1140 : #[error("TOML deserialization error: '{0}'")]
1141 : DeserializeToml(#[from] toml_edit::de::Error),
1142 :
1143 : #[error("Config not found at {0}")]
1144 : NotFound(Utf8PathBuf),
1145 : }
1146 :
1147 : impl TenantShard {
1148 : /// Yet another helper for timeline initialization.
1149 : ///
1150 : /// - Initializes the Timeline struct and inserts it into the tenant's hash map
1151 : /// - Scans the local timeline directory for layer files and builds the layer map
1152 : /// - Downloads remote index file and adds remote files to the layer map
1153 : /// - Schedules remote upload tasks for any files that are present locally but missing from remote storage.
1154 : ///
1155 : /// If the operation fails, the timeline is left in the tenant's hash map in Broken state. On success,
1156 : /// it is marked as Active.
1157 : #[allow(clippy::too_many_arguments)]
1158 3 : async fn timeline_init_and_sync(
1159 3 : self: &Arc<Self>,
1160 3 : timeline_id: TimelineId,
1161 3 : resources: TimelineResources,
1162 3 : index_part: IndexPart,
1163 3 : metadata: TimelineMetadata,
1164 3 : previous_heatmap: Option<PreviousHeatmap>,
1165 3 : ancestor: Option<Arc<Timeline>>,
1166 3 : cause: LoadTimelineCause,
1167 3 : ctx: &RequestContext,
1168 3 : ) -> anyhow::Result<TimelineInitAndSyncResult> {
1169 3 : let tenant_id = self.tenant_shard_id;
1170 3 :
1171 3 : let import_pgdata = index_part.import_pgdata.clone();
1172 3 : let idempotency = match &import_pgdata {
1173 0 : Some(import_pgdata) => {
1174 0 : CreateTimelineIdempotency::ImportPgdata(CreatingTimelineIdempotencyImportPgdata {
1175 0 : idempotency_key: import_pgdata.idempotency_key().clone(),
1176 0 : })
1177 : }
1178 : None => {
1179 3 : if metadata.ancestor_timeline().is_none() {
1180 2 : CreateTimelineIdempotency::Bootstrap {
1181 2 : pg_version: metadata.pg_version(),
1182 2 : }
1183 : } else {
1184 1 : CreateTimelineIdempotency::Branch {
1185 1 : ancestor_timeline_id: metadata.ancestor_timeline().unwrap(),
1186 1 : ancestor_start_lsn: metadata.ancestor_lsn(),
1187 1 : }
1188 : }
1189 : }
1190 : };
1191 :
1192 3 : let (timeline, _timeline_ctx) = self.create_timeline_struct(
1193 3 : timeline_id,
1194 3 : &metadata,
1195 3 : previous_heatmap,
1196 3 : ancestor.clone(),
1197 3 : resources,
1198 3 : CreateTimelineCause::Load,
1199 3 : idempotency.clone(),
1200 3 : index_part.gc_compaction.clone(),
1201 3 : index_part.rel_size_migration.clone(),
1202 3 : ctx,
1203 3 : )?;
1204 3 : let disk_consistent_lsn = timeline.get_disk_consistent_lsn();
1205 3 :
1206 3 : if !disk_consistent_lsn.is_valid() {
1207 : // As opposed to normal timelines which get initialised with a disk consitent LSN
1208 : // via initdb, imported timelines start from 0. If the import task stops before
1209 : // it advances disk consitent LSN, allow it to resume.
1210 0 : let in_progress_import = import_pgdata
1211 0 : .as_ref()
1212 0 : .map(|import| !import.is_done())
1213 0 : .unwrap_or(false);
1214 0 : if !in_progress_import {
1215 0 : anyhow::bail!("Timeline {tenant_id}/{timeline_id} has invalid disk_consistent_lsn");
1216 0 : }
1217 3 : }
1218 :
1219 3 : assert_eq!(
1220 3 : disk_consistent_lsn,
1221 3 : metadata.disk_consistent_lsn(),
1222 0 : "these are used interchangeably"
1223 : );
1224 :
1225 3 : timeline.remote_client.init_upload_queue(&index_part)?;
1226 :
1227 3 : timeline
1228 3 : .load_layer_map(disk_consistent_lsn, index_part)
1229 3 : .await
1230 3 : .with_context(|| {
1231 0 : format!("Failed to load layermap for timeline {tenant_id}/{timeline_id}")
1232 3 : })?;
1233 :
1234 : // When unarchiving, we've mostly likely lost the heatmap generated prior
1235 : // to the archival operation. To allow warming this timeline up, generate
1236 : // a previous heatmap which contains all visible layers in the layer map.
1237 : // This previous heatmap will be used whenever a fresh heatmap is generated
1238 : // for the timeline.
1239 3 : if self.conf.generate_unarchival_heatmap && matches!(cause, LoadTimelineCause::Unoffload) {
1240 0 : let mut tline_ending_at = Some((&timeline, timeline.get_last_record_lsn()));
1241 0 : while let Some((tline, end_lsn)) = tline_ending_at {
1242 0 : let unarchival_heatmap = tline.generate_unarchival_heatmap(end_lsn).await;
1243 : // Another unearchived timeline might have generated a heatmap for this ancestor.
1244 : // If the current branch point greater than the previous one use the the heatmap
1245 : // we just generated - it should include more layers.
1246 0 : if !tline.should_keep_previous_heatmap(end_lsn) {
1247 0 : tline
1248 0 : .previous_heatmap
1249 0 : .store(Some(Arc::new(unarchival_heatmap)));
1250 0 : } else {
1251 0 : tracing::info!("Previous heatmap preferred. Dropping unarchival heatmap.")
1252 : }
1253 :
1254 0 : match tline.ancestor_timeline() {
1255 0 : Some(ancestor) => {
1256 0 : if ancestor.update_layer_visibility().await.is_err() {
1257 : // Ancestor timeline is shutting down.
1258 0 : break;
1259 0 : }
1260 0 :
1261 0 : tline_ending_at = Some((ancestor, tline.get_ancestor_lsn()));
1262 : }
1263 0 : None => {
1264 0 : tline_ending_at = None;
1265 0 : }
1266 : }
1267 : }
1268 3 : }
1269 :
1270 0 : match import_pgdata {
1271 0 : Some(import_pgdata) if !import_pgdata.is_done() => {
1272 0 : let mut guard = self.timelines_creating.lock().unwrap();
1273 0 : if !guard.insert(timeline_id) {
1274 : // We should never try and load the same timeline twice during startup
1275 0 : unreachable!("Timeline {tenant_id}/{timeline_id} is already being created")
1276 0 : }
1277 0 : let timeline_create_guard = TimelineCreateGuard {
1278 0 : _tenant_gate_guard: self.gate.enter()?,
1279 0 : owning_tenant: self.clone(),
1280 0 : timeline_id,
1281 0 : idempotency,
1282 0 : // The users of this specific return value don't need the timline_path in there.
1283 0 : timeline_path: timeline
1284 0 : .conf
1285 0 : .timeline_path(&timeline.tenant_shard_id, &timeline.timeline_id),
1286 0 : };
1287 0 : Ok(TimelineInitAndSyncResult::NeedsSpawnImportPgdata(
1288 0 : TimelineInitAndSyncNeedsSpawnImportPgdata {
1289 0 : timeline,
1290 0 : import_pgdata,
1291 0 : guard: timeline_create_guard,
1292 0 : },
1293 0 : ))
1294 : }
1295 : Some(_) | None => {
1296 : {
1297 3 : let mut timelines_accessor = self.timelines.lock().unwrap();
1298 3 : match timelines_accessor.entry(timeline_id) {
1299 : // We should never try and load the same timeline twice during startup
1300 : Entry::Occupied(_) => {
1301 0 : unreachable!(
1302 0 : "Timeline {tenant_id}/{timeline_id} already exists in the tenant map"
1303 0 : );
1304 : }
1305 3 : Entry::Vacant(v) => {
1306 3 : v.insert(Arc::clone(&timeline));
1307 3 : timeline.maybe_spawn_flush_loop();
1308 3 : }
1309 3 : }
1310 3 : }
1311 3 :
1312 3 : if disk_consistent_lsn.is_valid() {
1313 : // Sanity check: a timeline should have some content.
1314 : // Exception: importing timelines might not yet have any
1315 3 : anyhow::ensure!(
1316 3 : ancestor.is_some()
1317 2 : || timeline
1318 2 : .layers
1319 2 : .read(LayerManagerLockHolder::LoadLayerMap)
1320 2 : .await
1321 2 : .layer_map()
1322 2 : .expect(
1323 2 : "currently loading, layer manager cannot be shutdown already"
1324 2 : )
1325 2 : .iter_historic_layers()
1326 2 : .next()
1327 2 : .is_some(),
1328 0 : "Timeline has no ancestor and no layer files"
1329 : );
1330 0 : }
1331 :
1332 3 : Ok(TimelineInitAndSyncResult::ReadyToActivate)
1333 : }
1334 : }
1335 3 : }
1336 :
1337 : /// Attach a tenant that's available in cloud storage.
1338 : ///
1339 : /// This returns quickly, after just creating the in-memory object
1340 : /// Tenant struct and launching a background task to download
1341 : /// the remote index files. On return, the tenant is most likely still in
1342 : /// Attaching state, and it will become Active once the background task
1343 : /// finishes. You can use wait_until_active() to wait for the task to
1344 : /// complete.
1345 : ///
1346 : #[allow(clippy::too_many_arguments)]
1347 0 : pub(crate) fn spawn(
1348 0 : conf: &'static PageServerConf,
1349 0 : tenant_shard_id: TenantShardId,
1350 0 : resources: TenantSharedResources,
1351 0 : attached_conf: AttachedTenantConf,
1352 0 : shard_identity: ShardIdentity,
1353 0 : init_order: Option<InitializationOrder>,
1354 0 : mode: SpawnMode,
1355 0 : ctx: &RequestContext,
1356 0 : ) -> Result<Arc<TenantShard>, GlobalShutDown> {
1357 0 : let wal_redo_manager =
1358 0 : WalRedoManager::new(PostgresRedoManager::new(conf, tenant_shard_id))?;
1359 :
1360 : let TenantSharedResources {
1361 0 : broker_client,
1362 0 : remote_storage,
1363 0 : deletion_queue_client,
1364 0 : l0_flush_global_state,
1365 0 : basebackup_prepare_sender,
1366 0 : feature_resolver,
1367 0 : } = resources;
1368 0 :
1369 0 : let attach_mode = attached_conf.location.attach_mode;
1370 0 : let generation = attached_conf.location.generation;
1371 0 :
1372 0 : let tenant = Arc::new(TenantShard::new(
1373 0 : TenantState::Attaching,
1374 0 : conf,
1375 0 : attached_conf,
1376 0 : shard_identity,
1377 0 : Some(wal_redo_manager),
1378 0 : tenant_shard_id,
1379 0 : remote_storage.clone(),
1380 0 : deletion_queue_client,
1381 0 : l0_flush_global_state,
1382 0 : basebackup_prepare_sender,
1383 0 : feature_resolver,
1384 0 : ));
1385 0 :
1386 0 : // The attach task will carry a GateGuard, so that shutdown() reliably waits for it to drop out if
1387 0 : // we shut down while attaching.
1388 0 : let attach_gate_guard = tenant
1389 0 : .gate
1390 0 : .enter()
1391 0 : .expect("We just created the TenantShard: nothing else can have shut it down yet");
1392 0 :
1393 0 : // Do all the hard work in the background
1394 0 : let tenant_clone = Arc::clone(&tenant);
1395 0 : let ctx = ctx.detached_child(TaskKind::Attach, DownloadBehavior::Warn);
1396 0 : task_mgr::spawn(
1397 0 : &tokio::runtime::Handle::current(),
1398 0 : TaskKind::Attach,
1399 0 : tenant_shard_id,
1400 0 : None,
1401 0 : "attach tenant",
1402 0 : async move {
1403 0 :
1404 0 : info!(
1405 : ?attach_mode,
1406 0 : "Attaching tenant"
1407 : );
1408 :
1409 0 : let _gate_guard = attach_gate_guard;
1410 0 :
1411 0 : // Is this tenant being spawned as part of process startup?
1412 0 : let starting_up = init_order.is_some();
1413 0 : scopeguard::defer! {
1414 0 : if starting_up {
1415 0 : TENANT.startup_complete.inc();
1416 0 : }
1417 0 : }
1418 :
1419 0 : fn make_broken_or_stopping(t: &TenantShard, err: anyhow::Error) {
1420 0 : t.state.send_modify(|state| match state {
1421 : // TODO: the old code alluded to DeleteTenantFlow sometimes setting
1422 : // TenantState::Stopping before we get here, but this may be outdated.
1423 : // Let's find out with a testing assertion. If this doesn't fire, and the
1424 : // logs don't show this happening in production, remove the Stopping cases.
1425 0 : TenantState::Stopping{..} if cfg!(any(test, feature = "testing")) => {
1426 0 : panic!("unexpected TenantState::Stopping during attach")
1427 : }
1428 : // If the tenant is cancelled, assume the error was caused by cancellation.
1429 0 : TenantState::Attaching if t.cancel.is_cancelled() => {
1430 0 : info!("attach cancelled, setting tenant state to Stopping: {err}");
1431 : // NB: progress None tells `set_stopping` that attach has cancelled.
1432 0 : *state = TenantState::Stopping { progress: None };
1433 : }
1434 : // According to the old code, DeleteTenantFlow may already have set this to
1435 : // Stopping. Retain its progress.
1436 : // TODO: there is no DeleteTenantFlow. Is this still needed? See above.
1437 0 : TenantState::Stopping { progress } if t.cancel.is_cancelled() => {
1438 0 : assert!(progress.is_some(), "concurrent attach cancellation");
1439 0 : info!("attach cancelled, already Stopping: {err}");
1440 : }
1441 : // Mark the tenant as broken.
1442 : TenantState::Attaching | TenantState::Stopping { .. } => {
1443 0 : error!("attach failed, setting tenant state to Broken (was {state}): {err:?}");
1444 0 : *state = TenantState::broken_from_reason(err.to_string())
1445 : }
1446 : // The attach task owns the tenant state until activated.
1447 0 : state => panic!("invalid tenant state {state} during attach: {err:?}"),
1448 0 : });
1449 0 : }
1450 :
1451 : // TODO: should also be rejecting tenant conf changes that violate this check.
1452 0 : if let Err(e) = crate::tenant::storage_layer::inmemory_layer::IndexEntry::validate_checkpoint_distance(tenant_clone.get_checkpoint_distance()) {
1453 0 : make_broken_or_stopping(&tenant_clone, anyhow::anyhow!(e));
1454 0 : return Ok(());
1455 0 : }
1456 0 :
1457 0 : let mut init_order = init_order;
1458 0 : // take the completion because initial tenant loading will complete when all of
1459 0 : // these tasks complete.
1460 0 : let _completion = init_order
1461 0 : .as_mut()
1462 0 : .and_then(|x| x.initial_tenant_load.take());
1463 0 : let remote_load_completion = init_order
1464 0 : .as_mut()
1465 0 : .and_then(|x| x.initial_tenant_load_remote.take());
1466 :
1467 : enum AttachType<'a> {
1468 : /// We are attaching this tenant lazily in the background.
1469 : Warmup {
1470 : _permit: tokio::sync::SemaphorePermit<'a>,
1471 : during_startup: bool
1472 : },
1473 : /// We are attaching this tenant as soon as we can, because for example an
1474 : /// endpoint tried to access it.
1475 : OnDemand,
1476 : /// During normal operations after startup, we are attaching a tenant, and
1477 : /// eager attach was requested.
1478 : Normal,
1479 : }
1480 :
1481 0 : let attach_type = if matches!(mode, SpawnMode::Lazy) {
1482 : // Before doing any I/O, wait for at least one of:
1483 : // - A client attempting to access to this tenant (on-demand loading)
1484 : // - A permit becoming available in the warmup semaphore (background warmup)
1485 :
1486 0 : tokio::select!(
1487 0 : permit = tenant_clone.activate_now_sem.acquire() => {
1488 0 : let _ = permit.expect("activate_now_sem is never closed");
1489 0 : tracing::info!("Activating tenant (on-demand)");
1490 0 : AttachType::OnDemand
1491 : },
1492 0 : permit = conf.concurrent_tenant_warmup.inner().acquire() => {
1493 0 : let _permit = permit.expect("concurrent_tenant_warmup semaphore is never closed");
1494 0 : tracing::info!("Activating tenant (warmup)");
1495 0 : AttachType::Warmup {
1496 0 : _permit,
1497 0 : during_startup: init_order.is_some()
1498 0 : }
1499 : }
1500 0 : _ = tenant_clone.cancel.cancelled() => {
1501 : // This is safe, but should be pretty rare: it is interesting if a tenant
1502 : // stayed in Activating for such a long time that shutdown found it in
1503 : // that state.
1504 0 : tracing::info!(state=%tenant_clone.current_state(), "Tenant shut down before activation");
1505 : // Set the tenant to Stopping to signal `set_stopping` that we're done.
1506 0 : make_broken_or_stopping(&tenant_clone, anyhow::anyhow!("Shut down while Attaching"));
1507 0 : return Ok(());
1508 : },
1509 : )
1510 : } else {
1511 : // SpawnMode::{Create,Eager} always cause jumping ahead of the
1512 : // concurrent_tenant_warmup queue
1513 0 : AttachType::Normal
1514 : };
1515 :
1516 0 : let preload = match &mode {
1517 : SpawnMode::Eager | SpawnMode::Lazy => {
1518 0 : let _preload_timer = TENANT.preload.start_timer();
1519 0 : let res = tenant_clone
1520 0 : .preload(&remote_storage, task_mgr::shutdown_token())
1521 0 : .await;
1522 0 : match res {
1523 0 : Ok(p) => Some(p),
1524 0 : Err(e) => {
1525 0 : make_broken_or_stopping(&tenant_clone, anyhow::anyhow!(e));
1526 0 : return Ok(());
1527 : }
1528 : }
1529 : }
1530 :
1531 : };
1532 :
1533 : // Remote preload is complete.
1534 0 : drop(remote_load_completion);
1535 0 :
1536 0 :
1537 0 : // We will time the duration of the attach phase unless this is a creation (attach will do no work)
1538 0 : let attach_start = std::time::Instant::now();
1539 0 : let attached = {
1540 0 : let _attach_timer = Some(TENANT.attach.start_timer());
1541 0 : tenant_clone.attach(preload, &ctx).await
1542 : };
1543 0 : let attach_duration = attach_start.elapsed();
1544 0 : _ = tenant_clone.attach_wal_lag_cooldown.set(WalLagCooldown::new(attach_start, attach_duration));
1545 0 :
1546 0 : match attached {
1547 : Ok(()) => {
1548 0 : info!("attach finished, activating");
1549 0 : tenant_clone.activate(broker_client, None, &ctx);
1550 : }
1551 0 : Err(e) => make_broken_or_stopping(&tenant_clone, anyhow::anyhow!(e)),
1552 : }
1553 :
1554 : // If we are doing an opportunistic warmup attachment at startup, initialize
1555 : // logical size at the same time. This is better than starting a bunch of idle tenants
1556 : // with cold caches and then coming back later to initialize their logical sizes.
1557 : //
1558 : // It also prevents the warmup proccess competing with the concurrency limit on
1559 : // logical size calculations: if logical size calculation semaphore is saturated,
1560 : // then warmup will wait for that before proceeding to the next tenant.
1561 0 : if matches!(attach_type, AttachType::Warmup { during_startup: true, .. }) {
1562 0 : let mut futs: FuturesUnordered<_> = tenant_clone.timelines.lock().unwrap().values().cloned().map(|t| t.await_initial_logical_size()).collect();
1563 0 : tracing::info!("Waiting for initial logical sizes while warming up...");
1564 0 : while futs.next().await.is_some() {}
1565 0 : tracing::info!("Warm-up complete");
1566 0 : }
1567 :
1568 0 : Ok(())
1569 0 : }
1570 0 : .instrument(tracing::info_span!(parent: None, "attach", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), gen=?generation)),
1571 : );
1572 0 : Ok(tenant)
1573 0 : }
1574 :
1575 : #[instrument(skip_all)]
1576 : pub(crate) async fn preload(
1577 : self: &Arc<Self>,
1578 : remote_storage: &GenericRemoteStorage,
1579 : cancel: CancellationToken,
1580 : ) -> anyhow::Result<TenantPreload> {
1581 : span::debug_assert_current_span_has_tenant_id();
1582 : // Get list of remote timelines
1583 : // download index files for every tenant timeline
1584 : info!("listing remote timelines");
1585 : let (mut remote_timeline_ids, other_keys) = remote_timeline_client::list_remote_timelines(
1586 : remote_storage,
1587 : self.tenant_shard_id,
1588 : cancel.clone(),
1589 : )
1590 : .await?;
1591 :
1592 : let tenant_manifest = match download_tenant_manifest(
1593 : remote_storage,
1594 : &self.tenant_shard_id,
1595 : self.generation,
1596 : &cancel,
1597 : )
1598 : .await
1599 : {
1600 : Ok((tenant_manifest, _, _)) => Some(tenant_manifest),
1601 : Err(DownloadError::NotFound) => None,
1602 : Err(err) => return Err(err.into()),
1603 : };
1604 :
1605 : info!(
1606 : "found {} timelines ({} offloaded timelines)",
1607 : remote_timeline_ids.len(),
1608 : tenant_manifest
1609 : .as_ref()
1610 3 : .map(|m| m.offloaded_timelines.len())
1611 : .unwrap_or(0)
1612 : );
1613 :
1614 : for k in other_keys {
1615 : warn!("Unexpected non timeline key {k}");
1616 : }
1617 :
1618 : // Avoid downloading IndexPart of offloaded timelines.
1619 : let mut offloaded_with_prefix = HashSet::new();
1620 : if let Some(tenant_manifest) = &tenant_manifest {
1621 : for offloaded in tenant_manifest.offloaded_timelines.iter() {
1622 : if remote_timeline_ids.remove(&offloaded.timeline_id) {
1623 : offloaded_with_prefix.insert(offloaded.timeline_id);
1624 : } else {
1625 : // We'll take care later of timelines in the manifest without a prefix
1626 : }
1627 : }
1628 : }
1629 :
1630 : // TODO(vlad): Could go to S3 if the secondary is freezing cold and hasn't even
1631 : // pulled the first heatmap. Not entirely necessary since the storage controller
1632 : // will kick the secondary in any case and cause a download.
1633 : let maybe_heatmap_at = self.read_on_disk_heatmap().await;
1634 :
1635 : let timelines = self
1636 : .load_timelines_metadata(
1637 : remote_timeline_ids,
1638 : remote_storage,
1639 : maybe_heatmap_at,
1640 : cancel,
1641 : )
1642 : .await?;
1643 :
1644 : Ok(TenantPreload {
1645 : tenant_manifest,
1646 : timelines: timelines
1647 : .into_iter()
1648 3 : .map(|(id, tl)| (id, Some(tl)))
1649 0 : .chain(offloaded_with_prefix.into_iter().map(|id| (id, None)))
1650 : .collect(),
1651 : })
1652 : }
1653 :
1654 118 : async fn read_on_disk_heatmap(&self) -> Option<(HeatMapTenant, std::time::Instant)> {
1655 118 : if !self.conf.load_previous_heatmap {
1656 0 : return None;
1657 118 : }
1658 118 :
1659 118 : let on_disk_heatmap_path = self.conf.tenant_heatmap_path(&self.tenant_shard_id);
1660 118 : match tokio::fs::read_to_string(on_disk_heatmap_path).await {
1661 0 : Ok(heatmap) => match serde_json::from_str::<HeatMapTenant>(&heatmap) {
1662 0 : Ok(heatmap) => Some((heatmap, std::time::Instant::now())),
1663 0 : Err(err) => {
1664 0 : error!("Failed to deserialize old heatmap: {err}");
1665 0 : None
1666 : }
1667 : },
1668 118 : Err(err) => match err.kind() {
1669 118 : std::io::ErrorKind::NotFound => None,
1670 : _ => {
1671 0 : error!("Unexpected IO error reading old heatmap: {err}");
1672 0 : None
1673 : }
1674 : },
1675 : }
1676 118 : }
1677 :
1678 : ///
1679 : /// Background task that downloads all data for a tenant and brings it to Active state.
1680 : ///
1681 : /// No background tasks are started as part of this routine.
1682 : ///
1683 118 : async fn attach(
1684 118 : self: &Arc<TenantShard>,
1685 118 : preload: Option<TenantPreload>,
1686 118 : ctx: &RequestContext,
1687 118 : ) -> anyhow::Result<()> {
1688 118 : span::debug_assert_current_span_has_tenant_id();
1689 118 :
1690 118 : failpoint_support::sleep_millis_async!("before-attaching-tenant");
1691 :
1692 118 : let Some(preload) = preload else {
1693 0 : anyhow::bail!(
1694 0 : "local-only deployment is no longer supported, https://github.com/neondatabase/neon/issues/5624"
1695 0 : );
1696 : };
1697 :
1698 118 : let mut offloaded_timeline_ids = HashSet::new();
1699 118 : let mut offloaded_timelines_list = Vec::new();
1700 118 : if let Some(tenant_manifest) = &preload.tenant_manifest {
1701 3 : for timeline_manifest in tenant_manifest.offloaded_timelines.iter() {
1702 0 : let timeline_id = timeline_manifest.timeline_id;
1703 0 : let offloaded_timeline =
1704 0 : OffloadedTimeline::from_manifest(self.tenant_shard_id, timeline_manifest);
1705 0 : offloaded_timelines_list.push((timeline_id, Arc::new(offloaded_timeline)));
1706 0 : offloaded_timeline_ids.insert(timeline_id);
1707 0 : }
1708 115 : }
1709 : // Complete deletions for offloaded timeline id's from manifest.
1710 : // The manifest will be uploaded later in this function.
1711 118 : offloaded_timelines_list
1712 118 : .retain(|(offloaded_id, offloaded)| {
1713 0 : // Existence of a timeline is finally determined by the existence of an index-part.json in remote storage.
1714 0 : // If there is dangling references in another location, they need to be cleaned up.
1715 0 : let delete = !preload.timelines.contains_key(offloaded_id);
1716 0 : if delete {
1717 0 : tracing::info!("Removing offloaded timeline {offloaded_id} from manifest as no remote prefix was found");
1718 0 : offloaded.defuse_for_tenant_drop();
1719 0 : }
1720 0 : !delete
1721 118 : });
1722 118 :
1723 118 : let mut timelines_to_resume_deletions = vec![];
1724 118 :
1725 118 : let mut remote_index_and_client = HashMap::new();
1726 118 : let mut timeline_ancestors = HashMap::new();
1727 118 : let mut existent_timelines = HashSet::new();
1728 121 : for (timeline_id, preload) in preload.timelines {
1729 3 : let Some(preload) = preload else { continue };
1730 : // This is an invariant of the `preload` function's API
1731 3 : assert!(!offloaded_timeline_ids.contains(&timeline_id));
1732 3 : let index_part = match preload.index_part {
1733 3 : Ok(i) => {
1734 3 : debug!("remote index part exists for timeline {timeline_id}");
1735 : // We found index_part on the remote, this is the standard case.
1736 3 : existent_timelines.insert(timeline_id);
1737 3 : i
1738 : }
1739 : Err(DownloadError::NotFound) => {
1740 : // There is no index_part on the remote. We only get here
1741 : // if there is some prefix for the timeline in the remote storage.
1742 : // This can e.g. be the initdb.tar.zst archive, maybe a
1743 : // remnant from a prior incomplete creation or deletion attempt.
1744 : // Delete the local directory as the deciding criterion for a
1745 : // timeline's existence is presence of index_part.
1746 0 : info!(%timeline_id, "index_part not found on remote");
1747 0 : continue;
1748 : }
1749 0 : Err(DownloadError::Fatal(why)) => {
1750 0 : // If, while loading one remote timeline, we saw an indication that our generation
1751 0 : // number is likely invalid, then we should not load the whole tenant.
1752 0 : error!(%timeline_id, "Fatal error loading timeline: {why}");
1753 0 : anyhow::bail!(why.to_string());
1754 : }
1755 0 : Err(e) => {
1756 0 : // Some (possibly ephemeral) error happened during index_part download.
1757 0 : // Pretend the timeline exists to not delete the timeline directory,
1758 0 : // as it might be a temporary issue and we don't want to re-download
1759 0 : // everything after it resolves.
1760 0 : warn!(%timeline_id, "Failed to load index_part from remote storage, failed creation? ({e})");
1761 :
1762 0 : existent_timelines.insert(timeline_id);
1763 0 : continue;
1764 : }
1765 : };
1766 3 : match index_part {
1767 3 : MaybeDeletedIndexPart::IndexPart(index_part) => {
1768 3 : timeline_ancestors.insert(timeline_id, index_part.metadata.clone());
1769 3 : remote_index_and_client.insert(
1770 3 : timeline_id,
1771 3 : (index_part, preload.client, preload.previous_heatmap),
1772 3 : );
1773 3 : }
1774 0 : MaybeDeletedIndexPart::Deleted(index_part) => {
1775 0 : info!(
1776 0 : "timeline {} is deleted, picking to resume deletion",
1777 : timeline_id
1778 : );
1779 0 : timelines_to_resume_deletions.push((timeline_id, index_part, preload.client));
1780 : }
1781 : }
1782 : }
1783 :
1784 118 : let mut gc_blocks = HashMap::new();
1785 :
1786 : // For every timeline, download the metadata file, scan the local directory,
1787 : // and build a layer map that contains an entry for each remote and local
1788 : // layer file.
1789 118 : let sorted_timelines = tree_sort_timelines(timeline_ancestors, |m| m.ancestor_timeline())?;
1790 121 : for (timeline_id, remote_metadata) in sorted_timelines {
1791 3 : let (index_part, remote_client, previous_heatmap) = remote_index_and_client
1792 3 : .remove(&timeline_id)
1793 3 : .expect("just put it in above");
1794 :
1795 3 : if let Some(blocking) = index_part.gc_blocking.as_ref() {
1796 : // could just filter these away, but it helps while testing
1797 0 : anyhow::ensure!(
1798 0 : !blocking.reasons.is_empty(),
1799 0 : "index_part for {timeline_id} is malformed: it should not have gc blocking with zero reasons"
1800 : );
1801 0 : let prev = gc_blocks.insert(timeline_id, blocking.reasons);
1802 0 : assert!(prev.is_none());
1803 3 : }
1804 :
1805 : // TODO again handle early failure
1806 3 : let effect = self
1807 3 : .load_remote_timeline(
1808 3 : timeline_id,
1809 3 : index_part,
1810 3 : remote_metadata,
1811 3 : previous_heatmap,
1812 3 : self.get_timeline_resources_for(remote_client),
1813 3 : LoadTimelineCause::Attach,
1814 3 : ctx,
1815 3 : )
1816 3 : .await
1817 3 : .with_context(|| {
1818 0 : format!(
1819 0 : "failed to load remote timeline {} for tenant {}",
1820 0 : timeline_id, self.tenant_shard_id
1821 0 : )
1822 3 : })?;
1823 :
1824 3 : match effect {
1825 3 : TimelineInitAndSyncResult::ReadyToActivate => {
1826 3 : // activation happens later, on Tenant::activate
1827 3 : }
1828 : TimelineInitAndSyncResult::NeedsSpawnImportPgdata(
1829 : TimelineInitAndSyncNeedsSpawnImportPgdata {
1830 0 : timeline,
1831 0 : import_pgdata,
1832 0 : guard,
1833 0 : },
1834 0 : ) => {
1835 0 : let timeline_id = timeline.timeline_id;
1836 0 : let import_task_gate = Gate::default();
1837 0 : let import_task_guard = import_task_gate.enter().unwrap();
1838 0 : let import_task_handle =
1839 0 : tokio::task::spawn(self.clone().create_timeline_import_pgdata_task(
1840 0 : timeline.clone(),
1841 0 : import_pgdata,
1842 0 : guard,
1843 0 : import_task_guard,
1844 0 : ctx.detached_child(TaskKind::ImportPgdata, DownloadBehavior::Warn),
1845 0 : ));
1846 0 :
1847 0 : let prev = self.timelines_importing.lock().unwrap().insert(
1848 0 : timeline_id,
1849 0 : Arc::new(ImportingTimeline {
1850 0 : timeline: timeline.clone(),
1851 0 : import_task_handle,
1852 0 : import_task_gate,
1853 0 : delete_progress: TimelineDeleteProgress::default(),
1854 0 : }),
1855 0 : );
1856 0 :
1857 0 : assert!(prev.is_none());
1858 : }
1859 : }
1860 : }
1861 :
1862 : // Walk through deleted timelines, resume deletion
1863 118 : for (timeline_id, index_part, remote_timeline_client) in timelines_to_resume_deletions {
1864 0 : remote_timeline_client
1865 0 : .init_upload_queue_stopped_to_continue_deletion(&index_part)
1866 0 : .context("init queue stopped")
1867 0 : .map_err(LoadLocalTimelineError::ResumeDeletion)?;
1868 :
1869 0 : DeleteTimelineFlow::resume_deletion(
1870 0 : Arc::clone(self),
1871 0 : timeline_id,
1872 0 : &index_part.metadata,
1873 0 : remote_timeline_client,
1874 0 : ctx,
1875 0 : )
1876 0 : .instrument(tracing::info_span!("timeline_delete", %timeline_id))
1877 0 : .await
1878 0 : .context("resume_deletion")
1879 0 : .map_err(LoadLocalTimelineError::ResumeDeletion)?;
1880 : }
1881 118 : {
1882 118 : let mut offloaded_timelines_accessor = self.timelines_offloaded.lock().unwrap();
1883 118 : offloaded_timelines_accessor.extend(offloaded_timelines_list.into_iter());
1884 118 : }
1885 :
1886 : // Stash the preloaded tenant manifest, and upload a new manifest if changed.
1887 : //
1888 : // NB: this must happen after the tenant is fully populated above. In particular the
1889 : // offloaded timelines, which are included in the manifest.
1890 : {
1891 118 : let mut guard = self.remote_tenant_manifest.lock().await;
1892 118 : assert!(guard.is_none(), "tenant manifest set before preload"); // first populated here
1893 118 : *guard = preload.tenant_manifest;
1894 118 : }
1895 118 : self.maybe_upload_tenant_manifest().await?;
1896 :
1897 : // The local filesystem contents are a cache of what's in the remote IndexPart;
1898 : // IndexPart is the source of truth.
1899 118 : self.clean_up_timelines(&existent_timelines)?;
1900 :
1901 118 : self.gc_block.set_scanned(gc_blocks);
1902 118 :
1903 118 : fail::fail_point!("attach-before-activate", |_| {
1904 0 : anyhow::bail!("attach-before-activate");
1905 118 : });
1906 118 : failpoint_support::sleep_millis_async!("attach-before-activate-sleep", &self.cancel);
1907 :
1908 118 : info!("Done");
1909 :
1910 118 : Ok(())
1911 118 : }
1912 :
1913 : /// Check for any local timeline directories that are temporary, or do not correspond to a
1914 : /// timeline that still exists: this can happen if we crashed during a deletion/creation, or
1915 : /// if a timeline was deleted while the tenant was attached to a different pageserver.
1916 118 : fn clean_up_timelines(&self, existent_timelines: &HashSet<TimelineId>) -> anyhow::Result<()> {
1917 118 : let timelines_dir = self.conf.timelines_path(&self.tenant_shard_id);
1918 :
1919 118 : let entries = match timelines_dir.read_dir_utf8() {
1920 118 : Ok(d) => d,
1921 0 : Err(e) => {
1922 0 : if e.kind() == std::io::ErrorKind::NotFound {
1923 0 : return Ok(());
1924 : } else {
1925 0 : return Err(e).context("list timelines directory for tenant");
1926 : }
1927 : }
1928 : };
1929 :
1930 122 : for entry in entries {
1931 4 : let entry = entry.context("read timeline dir entry")?;
1932 4 : let entry_path = entry.path();
1933 :
1934 4 : let purge = if crate::is_temporary(entry_path) {
1935 0 : true
1936 : } else {
1937 4 : match TimelineId::try_from(entry_path.file_name()) {
1938 4 : Ok(i) => {
1939 4 : // Purge if the timeline ID does not exist in remote storage: remote storage is the authority.
1940 4 : !existent_timelines.contains(&i)
1941 : }
1942 0 : Err(e) => {
1943 0 : tracing::warn!(
1944 0 : "Unparseable directory in timelines directory: {entry_path}, ignoring ({e})"
1945 : );
1946 : // Do not purge junk: if we don't recognize it, be cautious and leave it for a human.
1947 0 : false
1948 : }
1949 : }
1950 : };
1951 :
1952 4 : if purge {
1953 1 : tracing::info!("Purging stale timeline dentry {entry_path}");
1954 1 : if let Err(e) = match entry.file_type() {
1955 1 : Ok(t) => if t.is_dir() {
1956 1 : std::fs::remove_dir_all(entry_path)
1957 : } else {
1958 0 : std::fs::remove_file(entry_path)
1959 : }
1960 1 : .or_else(fs_ext::ignore_not_found),
1961 0 : Err(e) => Err(e),
1962 : } {
1963 0 : tracing::warn!("Failed to purge stale timeline dentry {entry_path}: {e}");
1964 1 : }
1965 3 : }
1966 : }
1967 :
1968 118 : Ok(())
1969 118 : }
1970 :
1971 : /// Get sum of all remote timelines sizes
1972 : ///
1973 : /// This function relies on the index_part instead of listing the remote storage
1974 0 : pub fn remote_size(&self) -> u64 {
1975 0 : let mut size = 0;
1976 :
1977 0 : for timeline in self.list_timelines() {
1978 0 : size += timeline.remote_client.get_remote_physical_size();
1979 0 : }
1980 :
1981 0 : size
1982 0 : }
1983 :
1984 : #[instrument(skip_all, fields(timeline_id=%timeline_id))]
1985 : #[allow(clippy::too_many_arguments)]
1986 : async fn load_remote_timeline(
1987 : self: &Arc<Self>,
1988 : timeline_id: TimelineId,
1989 : index_part: IndexPart,
1990 : remote_metadata: TimelineMetadata,
1991 : previous_heatmap: Option<PreviousHeatmap>,
1992 : resources: TimelineResources,
1993 : cause: LoadTimelineCause,
1994 : ctx: &RequestContext,
1995 : ) -> anyhow::Result<TimelineInitAndSyncResult> {
1996 : span::debug_assert_current_span_has_tenant_id();
1997 :
1998 : info!("downloading index file for timeline {}", timeline_id);
1999 : tokio::fs::create_dir_all(self.conf.timeline_path(&self.tenant_shard_id, &timeline_id))
2000 : .await
2001 : .context("Failed to create new timeline directory")?;
2002 :
2003 : let ancestor = if let Some(ancestor_id) = remote_metadata.ancestor_timeline() {
2004 : let timelines = self.timelines.lock().unwrap();
2005 : Some(Arc::clone(timelines.get(&ancestor_id).ok_or_else(
2006 0 : || {
2007 0 : anyhow::anyhow!(
2008 0 : "cannot find ancestor timeline {ancestor_id} for timeline {timeline_id}"
2009 0 : )
2010 0 : },
2011 : )?))
2012 : } else {
2013 : None
2014 : };
2015 :
2016 : self.timeline_init_and_sync(
2017 : timeline_id,
2018 : resources,
2019 : index_part,
2020 : remote_metadata,
2021 : previous_heatmap,
2022 : ancestor,
2023 : cause,
2024 : ctx,
2025 : )
2026 : .await
2027 : }
2028 :
2029 118 : async fn load_timelines_metadata(
2030 118 : self: &Arc<TenantShard>,
2031 118 : timeline_ids: HashSet<TimelineId>,
2032 118 : remote_storage: &GenericRemoteStorage,
2033 118 : heatmap: Option<(HeatMapTenant, std::time::Instant)>,
2034 118 : cancel: CancellationToken,
2035 118 : ) -> anyhow::Result<HashMap<TimelineId, TimelinePreload>> {
2036 118 : let mut timeline_heatmaps = heatmap.map(|h| (h.0.into_timelines_index(), h.1));
2037 118 :
2038 118 : let mut part_downloads = JoinSet::new();
2039 121 : for timeline_id in timeline_ids {
2040 3 : let cancel_clone = cancel.clone();
2041 3 :
2042 3 : let previous_timeline_heatmap = timeline_heatmaps.as_mut().and_then(|hs| {
2043 0 : hs.0.remove(&timeline_id).map(|h| PreviousHeatmap::Active {
2044 0 : heatmap: h,
2045 0 : read_at: hs.1,
2046 0 : end_lsn: None,
2047 0 : })
2048 3 : });
2049 3 : part_downloads.spawn(
2050 3 : self.load_timeline_metadata(
2051 3 : timeline_id,
2052 3 : remote_storage.clone(),
2053 3 : previous_timeline_heatmap,
2054 3 : cancel_clone,
2055 3 : )
2056 3 : .instrument(info_span!("download_index_part", %timeline_id)),
2057 : );
2058 : }
2059 :
2060 118 : let mut timeline_preloads: HashMap<TimelineId, TimelinePreload> = HashMap::new();
2061 :
2062 : loop {
2063 121 : tokio::select!(
2064 121 : next = part_downloads.join_next() => {
2065 121 : match next {
2066 3 : Some(result) => {
2067 3 : let preload = result.context("join preload task")?;
2068 3 : timeline_preloads.insert(preload.timeline_id, preload);
2069 : },
2070 : None => {
2071 118 : break;
2072 : }
2073 : }
2074 : },
2075 121 : _ = cancel.cancelled() => {
2076 0 : anyhow::bail!("Cancelled while waiting for remote index download")
2077 : }
2078 : )
2079 : }
2080 :
2081 118 : Ok(timeline_preloads)
2082 118 : }
2083 :
2084 3 : fn build_timeline_client(
2085 3 : &self,
2086 3 : timeline_id: TimelineId,
2087 3 : remote_storage: GenericRemoteStorage,
2088 3 : ) -> RemoteTimelineClient {
2089 3 : RemoteTimelineClient::new(
2090 3 : remote_storage.clone(),
2091 3 : self.deletion_queue_client.clone(),
2092 3 : self.conf,
2093 3 : self.tenant_shard_id,
2094 3 : timeline_id,
2095 3 : self.generation,
2096 3 : &self.tenant_conf.load().location,
2097 3 : )
2098 3 : }
2099 :
2100 3 : fn load_timeline_metadata(
2101 3 : self: &Arc<TenantShard>,
2102 3 : timeline_id: TimelineId,
2103 3 : remote_storage: GenericRemoteStorage,
2104 3 : previous_heatmap: Option<PreviousHeatmap>,
2105 3 : cancel: CancellationToken,
2106 3 : ) -> impl Future<Output = TimelinePreload> + use<> {
2107 3 : let client = self.build_timeline_client(timeline_id, remote_storage);
2108 3 : async move {
2109 3 : debug_assert_current_span_has_tenant_and_timeline_id();
2110 3 : debug!("starting index part download");
2111 :
2112 3 : let index_part = client.download_index_file(&cancel).await;
2113 :
2114 3 : debug!("finished index part download");
2115 :
2116 3 : TimelinePreload {
2117 3 : client,
2118 3 : timeline_id,
2119 3 : index_part,
2120 3 : previous_heatmap,
2121 3 : }
2122 3 : }
2123 3 : }
2124 :
2125 0 : fn check_to_be_archived_has_no_unarchived_children(
2126 0 : timeline_id: TimelineId,
2127 0 : timelines: &std::sync::MutexGuard<'_, HashMap<TimelineId, Arc<Timeline>>>,
2128 0 : ) -> Result<(), TimelineArchivalError> {
2129 0 : let children: Vec<TimelineId> = timelines
2130 0 : .iter()
2131 0 : .filter_map(|(id, entry)| {
2132 0 : if entry.get_ancestor_timeline_id() != Some(timeline_id) {
2133 0 : return None;
2134 0 : }
2135 0 : if entry.is_archived() == Some(true) {
2136 0 : return None;
2137 0 : }
2138 0 : Some(*id)
2139 0 : })
2140 0 : .collect();
2141 0 :
2142 0 : if !children.is_empty() {
2143 0 : return Err(TimelineArchivalError::HasUnarchivedChildren(children));
2144 0 : }
2145 0 : Ok(())
2146 0 : }
2147 :
2148 0 : fn check_ancestor_of_to_be_unarchived_is_not_archived(
2149 0 : ancestor_timeline_id: TimelineId,
2150 0 : timelines: &std::sync::MutexGuard<'_, HashMap<TimelineId, Arc<Timeline>>>,
2151 0 : offloaded_timelines: &std::sync::MutexGuard<
2152 0 : '_,
2153 0 : HashMap<TimelineId, Arc<OffloadedTimeline>>,
2154 0 : >,
2155 0 : ) -> Result<(), TimelineArchivalError> {
2156 0 : let has_archived_parent =
2157 0 : if let Some(ancestor_timeline) = timelines.get(&ancestor_timeline_id) {
2158 0 : ancestor_timeline.is_archived() == Some(true)
2159 0 : } else if offloaded_timelines.contains_key(&ancestor_timeline_id) {
2160 0 : true
2161 : } else {
2162 0 : error!("ancestor timeline {ancestor_timeline_id} not found");
2163 0 : if cfg!(debug_assertions) {
2164 0 : panic!("ancestor timeline {ancestor_timeline_id} not found");
2165 0 : }
2166 0 : return Err(TimelineArchivalError::NotFound);
2167 : };
2168 0 : if has_archived_parent {
2169 0 : return Err(TimelineArchivalError::HasArchivedParent(
2170 0 : ancestor_timeline_id,
2171 0 : ));
2172 0 : }
2173 0 : Ok(())
2174 0 : }
2175 :
2176 0 : fn check_to_be_unarchived_timeline_has_no_archived_parent(
2177 0 : timeline: &Arc<Timeline>,
2178 0 : ) -> Result<(), TimelineArchivalError> {
2179 0 : if let Some(ancestor_timeline) = timeline.ancestor_timeline() {
2180 0 : if ancestor_timeline.is_archived() == Some(true) {
2181 0 : return Err(TimelineArchivalError::HasArchivedParent(
2182 0 : ancestor_timeline.timeline_id,
2183 0 : ));
2184 0 : }
2185 0 : }
2186 0 : Ok(())
2187 0 : }
2188 :
2189 : /// Loads the specified (offloaded) timeline from S3 and attaches it as a loaded timeline
2190 : ///
2191 : /// Counterpart to [`offload_timeline`].
2192 0 : async fn unoffload_timeline(
2193 0 : self: &Arc<Self>,
2194 0 : timeline_id: TimelineId,
2195 0 : broker_client: storage_broker::BrokerClientChannel,
2196 0 : ctx: RequestContext,
2197 0 : ) -> Result<Arc<Timeline>, TimelineArchivalError> {
2198 0 : info!("unoffloading timeline");
2199 :
2200 : // We activate the timeline below manually, so this must be called on an active tenant.
2201 : // We expect callers of this function to ensure this.
2202 0 : match self.current_state() {
2203 : TenantState::Activating { .. }
2204 : | TenantState::Attaching
2205 : | TenantState::Broken { .. } => {
2206 0 : panic!("Timeline expected to be active")
2207 : }
2208 0 : TenantState::Stopping { .. } => return Err(TimelineArchivalError::Cancelled),
2209 0 : TenantState::Active => {}
2210 0 : }
2211 0 : let cancel = self.cancel.clone();
2212 0 :
2213 0 : // Protect against concurrent attempts to use this TimelineId
2214 0 : // We don't care much about idempotency, as it's ensured a layer above.
2215 0 : let allow_offloaded = true;
2216 0 : let _create_guard = self
2217 0 : .create_timeline_create_guard(
2218 0 : timeline_id,
2219 0 : CreateTimelineIdempotency::FailWithConflict,
2220 0 : allow_offloaded,
2221 0 : )
2222 0 : .map_err(|err| match err {
2223 0 : TimelineExclusionError::AlreadyCreating => TimelineArchivalError::AlreadyInProgress,
2224 : TimelineExclusionError::AlreadyExists { .. } => {
2225 0 : TimelineArchivalError::Other(anyhow::anyhow!("Timeline already exists"))
2226 : }
2227 0 : TimelineExclusionError::Other(e) => TimelineArchivalError::Other(e),
2228 0 : TimelineExclusionError::ShuttingDown => TimelineArchivalError::Cancelled,
2229 0 : })?;
2230 :
2231 0 : let timeline_preload = self
2232 0 : .load_timeline_metadata(
2233 0 : timeline_id,
2234 0 : self.remote_storage.clone(),
2235 0 : None,
2236 0 : cancel.clone(),
2237 0 : )
2238 0 : .await;
2239 :
2240 0 : let index_part = match timeline_preload.index_part {
2241 0 : Ok(index_part) => {
2242 0 : debug!("remote index part exists for timeline {timeline_id}");
2243 0 : index_part
2244 : }
2245 : Err(DownloadError::NotFound) => {
2246 0 : error!(%timeline_id, "index_part not found on remote");
2247 0 : return Err(TimelineArchivalError::NotFound);
2248 : }
2249 0 : Err(DownloadError::Cancelled) => return Err(TimelineArchivalError::Cancelled),
2250 0 : Err(e) => {
2251 0 : // Some (possibly ephemeral) error happened during index_part download.
2252 0 : warn!(%timeline_id, "Failed to load index_part from remote storage, failed creation? ({e})");
2253 0 : return Err(TimelineArchivalError::Other(
2254 0 : anyhow::Error::new(e).context("downloading index_part from remote storage"),
2255 0 : ));
2256 : }
2257 : };
2258 0 : let index_part = match index_part {
2259 0 : MaybeDeletedIndexPart::IndexPart(index_part) => index_part,
2260 0 : MaybeDeletedIndexPart::Deleted(_index_part) => {
2261 0 : info!("timeline is deleted according to index_part.json");
2262 0 : return Err(TimelineArchivalError::NotFound);
2263 : }
2264 : };
2265 0 : let remote_metadata = index_part.metadata.clone();
2266 0 : let timeline_resources = self.build_timeline_resources(timeline_id);
2267 0 : self.load_remote_timeline(
2268 0 : timeline_id,
2269 0 : index_part,
2270 0 : remote_metadata,
2271 0 : None,
2272 0 : timeline_resources,
2273 0 : LoadTimelineCause::Unoffload,
2274 0 : &ctx,
2275 0 : )
2276 0 : .await
2277 0 : .with_context(|| {
2278 0 : format!(
2279 0 : "failed to load remote timeline {} for tenant {}",
2280 0 : timeline_id, self.tenant_shard_id
2281 0 : )
2282 0 : })
2283 0 : .map_err(TimelineArchivalError::Other)?;
2284 :
2285 0 : let timeline = {
2286 0 : let timelines = self.timelines.lock().unwrap();
2287 0 : let Some(timeline) = timelines.get(&timeline_id) else {
2288 0 : warn!("timeline not available directly after attach");
2289 : // This is not a panic because no locks are held between `load_remote_timeline`
2290 : // which puts the timeline into timelines, and our look into the timeline map.
2291 0 : return Err(TimelineArchivalError::Other(anyhow::anyhow!(
2292 0 : "timeline not available directly after attach"
2293 0 : )));
2294 : };
2295 0 : let mut offloaded_timelines = self.timelines_offloaded.lock().unwrap();
2296 0 : match offloaded_timelines.remove(&timeline_id) {
2297 0 : Some(offloaded) => {
2298 0 : offloaded.delete_from_ancestor_with_timelines(&timelines);
2299 0 : }
2300 0 : None => warn!("timeline already removed from offloaded timelines"),
2301 : }
2302 :
2303 0 : self.initialize_gc_info(&timelines, &offloaded_timelines, Some(timeline_id));
2304 0 :
2305 0 : Arc::clone(timeline)
2306 0 : };
2307 0 :
2308 0 : // Upload new list of offloaded timelines to S3
2309 0 : self.maybe_upload_tenant_manifest().await?;
2310 :
2311 : // Activate the timeline (if it makes sense)
2312 0 : if !(timeline.is_broken() || timeline.is_stopping()) {
2313 0 : let background_jobs_can_start = None;
2314 0 : timeline.activate(
2315 0 : self.clone(),
2316 0 : broker_client.clone(),
2317 0 : background_jobs_can_start,
2318 0 : &ctx.with_scope_timeline(&timeline),
2319 0 : );
2320 0 : }
2321 :
2322 0 : info!("timeline unoffloading complete");
2323 0 : Ok(timeline)
2324 0 : }
2325 :
2326 0 : pub(crate) async fn apply_timeline_archival_config(
2327 0 : self: &Arc<Self>,
2328 0 : timeline_id: TimelineId,
2329 0 : new_state: TimelineArchivalState,
2330 0 : broker_client: storage_broker::BrokerClientChannel,
2331 0 : ctx: RequestContext,
2332 0 : ) -> Result<(), TimelineArchivalError> {
2333 0 : info!("setting timeline archival config");
2334 : // First part: figure out what is needed to do, and do validation
2335 0 : let timeline_or_unarchive_offloaded = 'outer: {
2336 0 : let timelines = self.timelines.lock().unwrap();
2337 :
2338 0 : let Some(timeline) = timelines.get(&timeline_id) else {
2339 0 : let offloaded_timelines = self.timelines_offloaded.lock().unwrap();
2340 0 : let Some(offloaded) = offloaded_timelines.get(&timeline_id) else {
2341 0 : return Err(TimelineArchivalError::NotFound);
2342 : };
2343 0 : if new_state == TimelineArchivalState::Archived {
2344 : // It's offloaded already, so nothing to do
2345 0 : return Ok(());
2346 0 : }
2347 0 : if let Some(ancestor_timeline_id) = offloaded.ancestor_timeline_id {
2348 0 : Self::check_ancestor_of_to_be_unarchived_is_not_archived(
2349 0 : ancestor_timeline_id,
2350 0 : &timelines,
2351 0 : &offloaded_timelines,
2352 0 : )?;
2353 0 : }
2354 0 : break 'outer None;
2355 : };
2356 :
2357 : // Do some validation. We release the timelines lock below, so there is potential
2358 : // for race conditions: these checks are more present to prevent misunderstandings of
2359 : // the API's capabilities, instead of serving as the sole way to defend their invariants.
2360 0 : match new_state {
2361 : TimelineArchivalState::Unarchived => {
2362 0 : Self::check_to_be_unarchived_timeline_has_no_archived_parent(timeline)?
2363 : }
2364 : TimelineArchivalState::Archived => {
2365 0 : Self::check_to_be_archived_has_no_unarchived_children(timeline_id, &timelines)?
2366 : }
2367 : }
2368 0 : Some(Arc::clone(timeline))
2369 : };
2370 :
2371 : // Second part: unoffload timeline (if needed)
2372 0 : let timeline = if let Some(timeline) = timeline_or_unarchive_offloaded {
2373 0 : timeline
2374 : } else {
2375 : // Turn offloaded timeline into a non-offloaded one
2376 0 : self.unoffload_timeline(timeline_id, broker_client, ctx)
2377 0 : .await?
2378 : };
2379 :
2380 : // Third part: upload new timeline archival state and block until it is present in S3
2381 0 : let upload_needed = match timeline
2382 0 : .remote_client
2383 0 : .schedule_index_upload_for_timeline_archival_state(new_state)
2384 : {
2385 0 : Ok(upload_needed) => upload_needed,
2386 0 : Err(e) => {
2387 0 : if timeline.cancel.is_cancelled() {
2388 0 : return Err(TimelineArchivalError::Cancelled);
2389 : } else {
2390 0 : return Err(TimelineArchivalError::Other(e));
2391 : }
2392 : }
2393 : };
2394 :
2395 0 : if upload_needed {
2396 0 : info!("Uploading new state");
2397 : const MAX_WAIT: Duration = Duration::from_secs(10);
2398 0 : let Ok(v) =
2399 0 : tokio::time::timeout(MAX_WAIT, timeline.remote_client.wait_completion()).await
2400 : else {
2401 0 : tracing::warn!("reached timeout for waiting on upload queue");
2402 0 : return Err(TimelineArchivalError::Timeout);
2403 : };
2404 0 : v.map_err(|e| match e {
2405 0 : WaitCompletionError::NotInitialized(e) => {
2406 0 : TimelineArchivalError::Other(anyhow::anyhow!(e))
2407 : }
2408 : WaitCompletionError::UploadQueueShutDownOrStopped => {
2409 0 : TimelineArchivalError::Cancelled
2410 : }
2411 0 : })?;
2412 0 : }
2413 0 : Ok(())
2414 0 : }
2415 :
2416 1 : pub fn get_offloaded_timeline(
2417 1 : &self,
2418 1 : timeline_id: TimelineId,
2419 1 : ) -> Result<Arc<OffloadedTimeline>, GetTimelineError> {
2420 1 : self.timelines_offloaded
2421 1 : .lock()
2422 1 : .unwrap()
2423 1 : .get(&timeline_id)
2424 1 : .map(Arc::clone)
2425 1 : .ok_or(GetTimelineError::NotFound {
2426 1 : tenant_id: self.tenant_shard_id,
2427 1 : timeline_id,
2428 1 : })
2429 1 : }
2430 :
2431 2 : pub(crate) fn tenant_shard_id(&self) -> TenantShardId {
2432 2 : self.tenant_shard_id
2433 2 : }
2434 :
2435 : /// Get Timeline handle for given Neon timeline ID.
2436 : /// This function is idempotent. It doesn't change internal state in any way.
2437 111 : pub fn get_timeline(
2438 111 : &self,
2439 111 : timeline_id: TimelineId,
2440 111 : active_only: bool,
2441 111 : ) -> Result<Arc<Timeline>, GetTimelineError> {
2442 111 : let timelines_accessor = self.timelines.lock().unwrap();
2443 111 : let timeline = timelines_accessor
2444 111 : .get(&timeline_id)
2445 111 : .ok_or(GetTimelineError::NotFound {
2446 111 : tenant_id: self.tenant_shard_id,
2447 111 : timeline_id,
2448 111 : })?;
2449 :
2450 110 : if active_only && !timeline.is_active() {
2451 0 : Err(GetTimelineError::NotActive {
2452 0 : tenant_id: self.tenant_shard_id,
2453 0 : timeline_id,
2454 0 : state: timeline.current_state(),
2455 0 : })
2456 : } else {
2457 110 : Ok(Arc::clone(timeline))
2458 : }
2459 111 : }
2460 :
2461 : /// Lists timelines the tenant contains.
2462 : /// It's up to callers to omit certain timelines that are not considered ready for use.
2463 2 : pub fn list_timelines(&self) -> Vec<Arc<Timeline>> {
2464 2 : self.timelines
2465 2 : .lock()
2466 2 : .unwrap()
2467 2 : .values()
2468 2 : .map(Arc::clone)
2469 2 : .collect()
2470 2 : }
2471 :
2472 : /// Lists timelines the tenant contains.
2473 : /// It's up to callers to omit certain timelines that are not considered ready for use.
2474 0 : pub fn list_importing_timelines(&self) -> Vec<Arc<ImportingTimeline>> {
2475 0 : self.timelines_importing
2476 0 : .lock()
2477 0 : .unwrap()
2478 0 : .values()
2479 0 : .map(Arc::clone)
2480 0 : .collect()
2481 0 : }
2482 :
2483 : /// Lists timelines the tenant manages, including offloaded ones.
2484 : ///
2485 : /// It's up to callers to omit certain timelines that are not considered ready for use.
2486 0 : pub fn list_timelines_and_offloaded(
2487 0 : &self,
2488 0 : ) -> (Vec<Arc<Timeline>>, Vec<Arc<OffloadedTimeline>>) {
2489 0 : let timelines = self
2490 0 : .timelines
2491 0 : .lock()
2492 0 : .unwrap()
2493 0 : .values()
2494 0 : .map(Arc::clone)
2495 0 : .collect();
2496 0 : let offloaded = self
2497 0 : .timelines_offloaded
2498 0 : .lock()
2499 0 : .unwrap()
2500 0 : .values()
2501 0 : .map(Arc::clone)
2502 0 : .collect();
2503 0 : (timelines, offloaded)
2504 0 : }
2505 :
2506 0 : pub fn list_timeline_ids(&self) -> Vec<TimelineId> {
2507 0 : self.timelines.lock().unwrap().keys().cloned().collect()
2508 0 : }
2509 :
2510 : /// This is used by tests & import-from-basebackup.
2511 : ///
2512 : /// The returned [`UninitializedTimeline`] contains no data nor metadata and it is in
2513 : /// a state that will fail [`TenantShard::load_remote_timeline`] because `disk_consistent_lsn=Lsn(0)`.
2514 : ///
2515 : /// The caller is responsible for getting the timeline into a state that will be accepted
2516 : /// by [`TenantShard::load_remote_timeline`] / [`TenantShard::attach`].
2517 : /// Then they may call [`UninitializedTimeline::finish_creation`] to add the timeline
2518 : /// to the [`TenantShard::timelines`].
2519 : ///
2520 : /// Tests should use `TenantShard::create_test_timeline` to set up the minimum required metadata keys.
2521 114 : pub(crate) async fn create_empty_timeline(
2522 114 : self: &Arc<Self>,
2523 114 : new_timeline_id: TimelineId,
2524 114 : initdb_lsn: Lsn,
2525 114 : pg_version: u32,
2526 114 : ctx: &RequestContext,
2527 114 : ) -> anyhow::Result<(UninitializedTimeline, RequestContext)> {
2528 114 : anyhow::ensure!(
2529 114 : self.is_active(),
2530 0 : "Cannot create empty timelines on inactive tenant"
2531 : );
2532 :
2533 : // Protect against concurrent attempts to use this TimelineId
2534 114 : let create_guard = match self
2535 114 : .start_creating_timeline(new_timeline_id, CreateTimelineIdempotency::FailWithConflict)
2536 114 : .await?
2537 : {
2538 113 : StartCreatingTimelineResult::CreateGuard(guard) => guard,
2539 : StartCreatingTimelineResult::Idempotent(_) => {
2540 0 : unreachable!("FailWithConflict implies we get an error instead")
2541 : }
2542 : };
2543 :
2544 113 : let new_metadata = TimelineMetadata::new(
2545 113 : // Initialize disk_consistent LSN to 0, The caller must import some data to
2546 113 : // make it valid, before calling finish_creation()
2547 113 : Lsn(0),
2548 113 : None,
2549 113 : None,
2550 113 : Lsn(0),
2551 113 : initdb_lsn,
2552 113 : initdb_lsn,
2553 113 : pg_version,
2554 113 : );
2555 113 : self.prepare_new_timeline(
2556 113 : new_timeline_id,
2557 113 : &new_metadata,
2558 113 : create_guard,
2559 113 : initdb_lsn,
2560 113 : None,
2561 113 : None,
2562 113 : ctx,
2563 113 : )
2564 113 : .await
2565 114 : }
2566 :
2567 : /// Helper for unit tests to create an empty timeline.
2568 : ///
2569 : /// The timeline is has state value `Active` but its background loops are not running.
2570 : // This makes the various functions which anyhow::ensure! for Active state work in tests.
2571 : // Our current tests don't need the background loops.
2572 : #[cfg(test)]
2573 109 : pub async fn create_test_timeline(
2574 109 : self: &Arc<Self>,
2575 109 : new_timeline_id: TimelineId,
2576 109 : initdb_lsn: Lsn,
2577 109 : pg_version: u32,
2578 109 : ctx: &RequestContext,
2579 109 : ) -> anyhow::Result<Arc<Timeline>> {
2580 109 : let (uninit_tl, ctx) = self
2581 109 : .create_empty_timeline(new_timeline_id, initdb_lsn, pg_version, ctx)
2582 109 : .await?;
2583 109 : let tline = uninit_tl.raw_timeline().expect("we just created it");
2584 109 : assert_eq!(tline.get_last_record_lsn(), Lsn(0));
2585 :
2586 : // Setup minimum keys required for the timeline to be usable.
2587 109 : let mut modification = tline.begin_modification(initdb_lsn);
2588 109 : modification
2589 109 : .init_empty_test_timeline()
2590 109 : .context("init_empty_test_timeline")?;
2591 109 : modification
2592 109 : .commit(&ctx)
2593 109 : .await
2594 109 : .context("commit init_empty_test_timeline modification")?;
2595 :
2596 : // Flush to disk so that uninit_tl's check for valid disk_consistent_lsn passes.
2597 109 : tline.maybe_spawn_flush_loop();
2598 109 : tline.freeze_and_flush().await.context("freeze_and_flush")?;
2599 :
2600 : // Make sure the freeze_and_flush reaches remote storage.
2601 109 : tline.remote_client.wait_completion().await.unwrap();
2602 :
2603 109 : let tl = uninit_tl.finish_creation().await?;
2604 : // The non-test code would call tl.activate() here.
2605 109 : tl.set_state(TimelineState::Active);
2606 109 : Ok(tl)
2607 109 : }
2608 :
2609 : /// Helper for unit tests to create a timeline with some pre-loaded states.
2610 : #[cfg(test)]
2611 : #[allow(clippy::too_many_arguments)]
2612 24 : pub async fn create_test_timeline_with_layers(
2613 24 : self: &Arc<Self>,
2614 24 : new_timeline_id: TimelineId,
2615 24 : initdb_lsn: Lsn,
2616 24 : pg_version: u32,
2617 24 : ctx: &RequestContext,
2618 24 : in_memory_layer_desc: Vec<timeline::InMemoryLayerTestDesc>,
2619 24 : delta_layer_desc: Vec<timeline::DeltaLayerTestDesc>,
2620 24 : image_layer_desc: Vec<(Lsn, Vec<(pageserver_api::key::Key, bytes::Bytes)>)>,
2621 24 : end_lsn: Lsn,
2622 24 : ) -> anyhow::Result<Arc<Timeline>> {
2623 : use checks::check_valid_layermap;
2624 : use itertools::Itertools;
2625 :
2626 24 : let tline = self
2627 24 : .create_test_timeline(new_timeline_id, initdb_lsn, pg_version, ctx)
2628 24 : .await?;
2629 24 : tline.force_advance_lsn(end_lsn);
2630 71 : for deltas in delta_layer_desc {
2631 47 : tline
2632 47 : .force_create_delta_layer(deltas, Some(initdb_lsn), ctx)
2633 47 : .await?;
2634 : }
2635 58 : for (lsn, images) in image_layer_desc {
2636 34 : tline
2637 34 : .force_create_image_layer(lsn, images, Some(initdb_lsn), ctx)
2638 34 : .await?;
2639 : }
2640 28 : for in_memory in in_memory_layer_desc {
2641 4 : tline
2642 4 : .force_create_in_memory_layer(in_memory, Some(initdb_lsn), ctx)
2643 4 : .await?;
2644 : }
2645 24 : let layer_names = tline
2646 24 : .layers
2647 24 : .read(LayerManagerLockHolder::Testing)
2648 24 : .await
2649 24 : .layer_map()
2650 24 : .unwrap()
2651 24 : .iter_historic_layers()
2652 105 : .map(|layer| layer.layer_name())
2653 24 : .collect_vec();
2654 24 : if let Some(err) = check_valid_layermap(&layer_names) {
2655 0 : bail!("invalid layermap: {err}");
2656 24 : }
2657 24 : Ok(tline)
2658 24 : }
2659 :
2660 : /// Create a new timeline.
2661 : ///
2662 : /// Returns the new timeline ID and reference to its Timeline object.
2663 : ///
2664 : /// If the caller specified the timeline ID to use (`new_timeline_id`), and timeline with
2665 : /// the same timeline ID already exists, returns CreateTimelineError::AlreadyExists.
2666 : #[allow(clippy::too_many_arguments)]
2667 0 : pub(crate) async fn create_timeline(
2668 0 : self: &Arc<TenantShard>,
2669 0 : params: CreateTimelineParams,
2670 0 : broker_client: storage_broker::BrokerClientChannel,
2671 0 : ctx: &RequestContext,
2672 0 : ) -> Result<Arc<Timeline>, CreateTimelineError> {
2673 0 : if !self.is_active() {
2674 0 : if matches!(self.current_state(), TenantState::Stopping { .. }) {
2675 0 : return Err(CreateTimelineError::ShuttingDown);
2676 : } else {
2677 0 : return Err(CreateTimelineError::Other(anyhow::anyhow!(
2678 0 : "Cannot create timelines on inactive tenant"
2679 0 : )));
2680 : }
2681 0 : }
2682 :
2683 0 : let _gate = self
2684 0 : .gate
2685 0 : .enter()
2686 0 : .map_err(|_| CreateTimelineError::ShuttingDown)?;
2687 :
2688 0 : let result: CreateTimelineResult = match params {
2689 : CreateTimelineParams::Bootstrap(CreateTimelineParamsBootstrap {
2690 0 : new_timeline_id,
2691 0 : existing_initdb_timeline_id,
2692 0 : pg_version,
2693 0 : }) => {
2694 0 : self.bootstrap_timeline(
2695 0 : new_timeline_id,
2696 0 : pg_version,
2697 0 : existing_initdb_timeline_id,
2698 0 : ctx,
2699 0 : )
2700 0 : .await?
2701 : }
2702 : CreateTimelineParams::Branch(CreateTimelineParamsBranch {
2703 0 : new_timeline_id,
2704 0 : ancestor_timeline_id,
2705 0 : mut ancestor_start_lsn,
2706 : }) => {
2707 0 : let ancestor_timeline = self
2708 0 : .get_timeline(ancestor_timeline_id, false)
2709 0 : .context("Cannot branch off the timeline that's not present in pageserver")?;
2710 :
2711 : // instead of waiting around, just deny the request because ancestor is not yet
2712 : // ready for other purposes either.
2713 0 : if !ancestor_timeline.is_active() {
2714 0 : return Err(CreateTimelineError::AncestorNotActive);
2715 0 : }
2716 0 :
2717 0 : if ancestor_timeline.is_archived() == Some(true) {
2718 0 : info!("tried to branch archived timeline");
2719 0 : return Err(CreateTimelineError::AncestorArchived);
2720 0 : }
2721 :
2722 0 : if let Some(lsn) = ancestor_start_lsn.as_mut() {
2723 0 : *lsn = lsn.align();
2724 0 :
2725 0 : let ancestor_ancestor_lsn = ancestor_timeline.get_ancestor_lsn();
2726 0 : if ancestor_ancestor_lsn > *lsn {
2727 : // can we safely just branch from the ancestor instead?
2728 0 : return Err(CreateTimelineError::AncestorLsn(anyhow::anyhow!(
2729 0 : "invalid start lsn {} for ancestor timeline {}: less than timeline ancestor lsn {}",
2730 0 : lsn,
2731 0 : ancestor_timeline_id,
2732 0 : ancestor_ancestor_lsn,
2733 0 : )));
2734 0 : }
2735 0 :
2736 0 : // Wait for the WAL to arrive and be processed on the parent branch up
2737 0 : // to the requested branch point. The repository code itself doesn't
2738 0 : // require it, but if we start to receive WAL on the new timeline,
2739 0 : // decoding the new WAL might need to look up previous pages, relation
2740 0 : // sizes etc. and that would get confused if the previous page versions
2741 0 : // are not in the repository yet.
2742 0 : ancestor_timeline
2743 0 : .wait_lsn(
2744 0 : *lsn,
2745 0 : timeline::WaitLsnWaiter::Tenant,
2746 0 : timeline::WaitLsnTimeout::Default,
2747 0 : ctx,
2748 0 : )
2749 0 : .await
2750 0 : .map_err(|e| match e {
2751 0 : e @ (WaitLsnError::Timeout(_) | WaitLsnError::BadState { .. }) => {
2752 0 : CreateTimelineError::AncestorLsn(anyhow::anyhow!(e))
2753 : }
2754 0 : WaitLsnError::Shutdown => CreateTimelineError::ShuttingDown,
2755 0 : })?;
2756 0 : }
2757 :
2758 0 : self.branch_timeline(&ancestor_timeline, new_timeline_id, ancestor_start_lsn, ctx)
2759 0 : .await?
2760 : }
2761 0 : CreateTimelineParams::ImportPgdata(params) => {
2762 0 : self.create_timeline_import_pgdata(params, ctx).await?
2763 : }
2764 : };
2765 :
2766 : // At this point we have dropped our guard on [`Self::timelines_creating`], and
2767 : // the timeline is visible in [`Self::timelines`], but it is _not_ durable yet. We must
2768 : // not send a success to the caller until it is. The same applies to idempotent retries.
2769 : //
2770 : // TODO: the timeline is already visible in [`Self::timelines`]; a caller could incorrectly
2771 : // assume that, because they can see the timeline via API, that the creation is done and
2772 : // that it is durable. Ideally, we would keep the timeline hidden (in [`Self::timelines_creating`])
2773 : // until it is durable, e.g., by extending the time we hold the creation guard. This also
2774 : // interacts with UninitializedTimeline and is generally a bit tricky.
2775 : //
2776 : // To re-emphasize: the only correct way to create a timeline is to repeat calling the
2777 : // creation API until it returns success. Only then is durability guaranteed.
2778 0 : info!(creation_result=%result.discriminant(), "waiting for timeline to be durable");
2779 0 : result
2780 0 : .timeline()
2781 0 : .remote_client
2782 0 : .wait_completion()
2783 0 : .await
2784 0 : .map_err(|e| match e {
2785 : WaitCompletionError::NotInitialized(
2786 0 : e, // If the queue is already stopped, it's a shutdown error.
2787 0 : ) if e.is_stopping() => CreateTimelineError::ShuttingDown,
2788 : WaitCompletionError::NotInitialized(_) => {
2789 : // This is a bug: we should never try to wait for uploads before initializing the timeline
2790 0 : debug_assert!(false);
2791 0 : CreateTimelineError::Other(anyhow::anyhow!("timeline not initialized"))
2792 : }
2793 : WaitCompletionError::UploadQueueShutDownOrStopped => {
2794 0 : CreateTimelineError::ShuttingDown
2795 : }
2796 0 : })?;
2797 :
2798 : // The creating task is responsible for activating the timeline.
2799 : // We do this after `wait_completion()` so that we don't spin up tasks that start
2800 : // doing stuff before the IndexPart is durable in S3, which is done by the previous section.
2801 0 : let activated_timeline = match result {
2802 0 : CreateTimelineResult::Created(timeline) => {
2803 0 : timeline.activate(
2804 0 : self.clone(),
2805 0 : broker_client,
2806 0 : None,
2807 0 : &ctx.with_scope_timeline(&timeline),
2808 0 : );
2809 0 : timeline
2810 : }
2811 0 : CreateTimelineResult::Idempotent(timeline) => {
2812 0 : info!(
2813 0 : "request was deemed idempotent, activation will be done by the creating task"
2814 : );
2815 0 : timeline
2816 : }
2817 0 : CreateTimelineResult::ImportSpawned(timeline) => {
2818 0 : info!(
2819 0 : "import task spawned, timeline will become visible and activated once the import is done"
2820 : );
2821 0 : timeline
2822 : }
2823 : };
2824 :
2825 0 : Ok(activated_timeline)
2826 0 : }
2827 :
2828 : /// The returned [`Arc<Timeline>`] is NOT in the [`TenantShard::timelines`] map until the import
2829 : /// completes in the background. A DIFFERENT [`Arc<Timeline>`] will be inserted into the
2830 : /// [`TenantShard::timelines`] map when the import completes.
2831 : /// We only return an [`Arc<Timeline>`] here so the API handler can create a [`pageserver_api::models::TimelineInfo`]
2832 : /// for the response.
2833 0 : async fn create_timeline_import_pgdata(
2834 0 : self: &Arc<Self>,
2835 0 : params: CreateTimelineParamsImportPgdata,
2836 0 : ctx: &RequestContext,
2837 0 : ) -> Result<CreateTimelineResult, CreateTimelineError> {
2838 0 : let CreateTimelineParamsImportPgdata {
2839 0 : new_timeline_id,
2840 0 : location,
2841 0 : idempotency_key,
2842 0 : } = params;
2843 0 :
2844 0 : let started_at = chrono::Utc::now().naive_utc();
2845 :
2846 : //
2847 : // There's probably a simpler way to upload an index part, but, remote_timeline_client
2848 : // is the canonical way we do it.
2849 : // - create an empty timeline in-memory
2850 : // - use its remote_timeline_client to do the upload
2851 : // - dispose of the uninit timeline
2852 : // - keep the creation guard alive
2853 :
2854 0 : let timeline_create_guard = match self
2855 0 : .start_creating_timeline(
2856 0 : new_timeline_id,
2857 0 : CreateTimelineIdempotency::ImportPgdata(CreatingTimelineIdempotencyImportPgdata {
2858 0 : idempotency_key: idempotency_key.clone(),
2859 0 : }),
2860 0 : )
2861 0 : .await?
2862 : {
2863 0 : StartCreatingTimelineResult::CreateGuard(guard) => guard,
2864 0 : StartCreatingTimelineResult::Idempotent(timeline) => {
2865 0 : return Ok(CreateTimelineResult::Idempotent(timeline));
2866 : }
2867 : };
2868 :
2869 0 : let (mut uninit_timeline, timeline_ctx) = {
2870 0 : let this = &self;
2871 0 : let initdb_lsn = Lsn(0);
2872 0 : async move {
2873 0 : let new_metadata = TimelineMetadata::new(
2874 0 : // Initialize disk_consistent LSN to 0, The caller must import some data to
2875 0 : // make it valid, before calling finish_creation()
2876 0 : Lsn(0),
2877 0 : None,
2878 0 : None,
2879 0 : Lsn(0),
2880 0 : initdb_lsn,
2881 0 : initdb_lsn,
2882 0 : 15,
2883 0 : );
2884 0 : this.prepare_new_timeline(
2885 0 : new_timeline_id,
2886 0 : &new_metadata,
2887 0 : timeline_create_guard,
2888 0 : initdb_lsn,
2889 0 : None,
2890 0 : None,
2891 0 : ctx,
2892 0 : )
2893 0 : .await
2894 0 : }
2895 0 : }
2896 0 : .await?;
2897 :
2898 0 : let in_progress = import_pgdata::index_part_format::InProgress {
2899 0 : idempotency_key,
2900 0 : location,
2901 0 : started_at,
2902 0 : };
2903 0 : let index_part = import_pgdata::index_part_format::Root::V1(
2904 0 : import_pgdata::index_part_format::V1::InProgress(in_progress),
2905 0 : );
2906 0 : uninit_timeline
2907 0 : .raw_timeline()
2908 0 : .unwrap()
2909 0 : .remote_client
2910 0 : .schedule_index_upload_for_import_pgdata_state_update(Some(index_part.clone()))?;
2911 :
2912 : // wait_completion happens in caller
2913 :
2914 0 : let (timeline, timeline_create_guard) = uninit_timeline.finish_creation_myself();
2915 0 :
2916 0 : let import_task_gate = Gate::default();
2917 0 : let import_task_guard = import_task_gate.enter().unwrap();
2918 0 :
2919 0 : let import_task_handle = tokio::spawn(self.clone().create_timeline_import_pgdata_task(
2920 0 : timeline.clone(),
2921 0 : index_part,
2922 0 : timeline_create_guard,
2923 0 : import_task_guard,
2924 0 : timeline_ctx.detached_child(TaskKind::ImportPgdata, DownloadBehavior::Warn),
2925 0 : ));
2926 0 :
2927 0 : let prev = self.timelines_importing.lock().unwrap().insert(
2928 0 : timeline.timeline_id,
2929 0 : Arc::new(ImportingTimeline {
2930 0 : timeline: timeline.clone(),
2931 0 : import_task_handle,
2932 0 : import_task_gate,
2933 0 : delete_progress: TimelineDeleteProgress::default(),
2934 0 : }),
2935 0 : );
2936 0 :
2937 0 : // Idempotency is enforced higher up the stack
2938 0 : assert!(prev.is_none());
2939 :
2940 : // NB: the timeline doesn't exist in self.timelines at this point
2941 0 : Ok(CreateTimelineResult::ImportSpawned(timeline))
2942 0 : }
2943 :
2944 : /// Finalize the import of a timeline on this shard by marking it complete in
2945 : /// the index part. If the import task hasn't finished yet, returns an error.
2946 : ///
2947 : /// This method is idempotent. If the import was finalized once, the next call
2948 : /// will be a no-op.
2949 0 : pub(crate) async fn finalize_importing_timeline(
2950 0 : &self,
2951 0 : timeline_id: TimelineId,
2952 0 : ) -> Result<(), FinalizeTimelineImportError> {
2953 0 : let timeline = {
2954 0 : let locked = self.timelines_importing.lock().unwrap();
2955 0 : match locked.get(&timeline_id) {
2956 0 : Some(importing_timeline) => {
2957 0 : if !importing_timeline.import_task_handle.is_finished() {
2958 0 : return Err(FinalizeTimelineImportError::ImportTaskStillRunning);
2959 0 : }
2960 0 :
2961 0 : importing_timeline.timeline.clone()
2962 : }
2963 : None => {
2964 0 : return Ok(());
2965 : }
2966 : }
2967 : };
2968 :
2969 0 : timeline
2970 0 : .remote_client
2971 0 : .schedule_index_upload_for_import_pgdata_finalize()
2972 0 : .map_err(|_err| FinalizeTimelineImportError::ShuttingDown)?;
2973 0 : timeline
2974 0 : .remote_client
2975 0 : .wait_completion()
2976 0 : .await
2977 0 : .map_err(|_err| FinalizeTimelineImportError::ShuttingDown)?;
2978 :
2979 0 : self.timelines_importing
2980 0 : .lock()
2981 0 : .unwrap()
2982 0 : .remove(&timeline_id);
2983 0 :
2984 0 : Ok(())
2985 0 : }
2986 :
2987 : #[instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%timeline.timeline_id))]
2988 : async fn create_timeline_import_pgdata_task(
2989 : self: Arc<TenantShard>,
2990 : timeline: Arc<Timeline>,
2991 : index_part: import_pgdata::index_part_format::Root,
2992 : timeline_create_guard: TimelineCreateGuard,
2993 : _import_task_guard: GateGuard,
2994 : ctx: RequestContext,
2995 : ) {
2996 : debug_assert_current_span_has_tenant_and_timeline_id();
2997 : info!("starting");
2998 : scopeguard::defer! {info!("exiting")};
2999 :
3000 : let res = self
3001 : .create_timeline_import_pgdata_task_impl(
3002 : timeline,
3003 : index_part,
3004 : timeline_create_guard,
3005 : ctx,
3006 : )
3007 : .await;
3008 : if let Err(err) = &res {
3009 : error!(?err, "task failed");
3010 : // TODO sleep & retry, sensitive to tenant shutdown
3011 : // TODO: allow timeline deletion requests => should cancel the task
3012 : }
3013 : }
3014 :
3015 0 : async fn create_timeline_import_pgdata_task_impl(
3016 0 : self: Arc<TenantShard>,
3017 0 : timeline: Arc<Timeline>,
3018 0 : index_part: import_pgdata::index_part_format::Root,
3019 0 : _timeline_create_guard: TimelineCreateGuard,
3020 0 : ctx: RequestContext,
3021 0 : ) -> Result<(), anyhow::Error> {
3022 0 : info!("importing pgdata");
3023 0 : let ctx = ctx.with_scope_timeline(&timeline);
3024 0 : import_pgdata::doit(&timeline, index_part, &ctx, self.cancel.clone())
3025 0 : .await
3026 0 : .context("import")?;
3027 0 : info!("import done - waiting for activation");
3028 :
3029 0 : anyhow::Ok(())
3030 0 : }
3031 :
3032 0 : pub(crate) async fn delete_timeline(
3033 0 : self: Arc<Self>,
3034 0 : timeline_id: TimelineId,
3035 0 : ) -> Result<(), DeleteTimelineError> {
3036 0 : DeleteTimelineFlow::run(&self, timeline_id).await?;
3037 :
3038 0 : Ok(())
3039 0 : }
3040 :
3041 : /// perform one garbage collection iteration, removing old data files from disk.
3042 : /// this function is periodically called by gc task.
3043 : /// also it can be explicitly requested through page server api 'do_gc' command.
3044 : ///
3045 : /// `target_timeline_id` specifies the timeline to GC, or None for all.
3046 : ///
3047 : /// The `horizon` an `pitr` parameters determine how much WAL history needs to be retained.
3048 : /// Also known as the retention period, or the GC cutoff point. `horizon` specifies
3049 : /// the amount of history, as LSN difference from current latest LSN on each timeline.
3050 : /// `pitr` specifies the same as a time difference from the current time. The effective
3051 : /// GC cutoff point is determined conservatively by either `horizon` and `pitr`, whichever
3052 : /// requires more history to be retained.
3053 : //
3054 377 : pub(crate) async fn gc_iteration(
3055 377 : &self,
3056 377 : target_timeline_id: Option<TimelineId>,
3057 377 : horizon: u64,
3058 377 : pitr: Duration,
3059 377 : cancel: &CancellationToken,
3060 377 : ctx: &RequestContext,
3061 377 : ) -> Result<GcResult, GcError> {
3062 377 : // Don't start doing work during shutdown
3063 377 : if let TenantState::Stopping { .. } = self.current_state() {
3064 0 : return Ok(GcResult::default());
3065 377 : }
3066 377 :
3067 377 : // there is a global allowed_error for this
3068 377 : if !self.is_active() {
3069 0 : return Err(GcError::NotActive);
3070 377 : }
3071 377 :
3072 377 : {
3073 377 : let conf = self.tenant_conf.load();
3074 377 :
3075 377 : // If we may not delete layers, then simply skip GC. Even though a tenant
3076 377 : // in AttachedMulti state could do GC and just enqueue the blocked deletions,
3077 377 : // the only advantage to doing it is to perhaps shrink the LayerMap metadata
3078 377 : // a bit sooner than we would achieve by waiting for AttachedSingle status.
3079 377 : if !conf.location.may_delete_layers_hint() {
3080 0 : info!("Skipping GC in location state {:?}", conf.location);
3081 0 : return Ok(GcResult::default());
3082 377 : }
3083 377 :
3084 377 : if conf.is_gc_blocked_by_lsn_lease_deadline() {
3085 375 : info!("Skipping GC because lsn lease deadline is not reached");
3086 375 : return Ok(GcResult::default());
3087 2 : }
3088 : }
3089 :
3090 2 : let _guard = match self.gc_block.start().await {
3091 2 : Ok(guard) => guard,
3092 0 : Err(reasons) => {
3093 0 : info!("Skipping GC: {reasons}");
3094 0 : return Ok(GcResult::default());
3095 : }
3096 : };
3097 :
3098 2 : self.gc_iteration_internal(target_timeline_id, horizon, pitr, cancel, ctx)
3099 2 : .await
3100 377 : }
3101 :
3102 : /// Performs one compaction iteration. Called periodically from the compaction loop. Returns
3103 : /// whether another compaction is needed, if we still have pending work or if we yield for
3104 : /// immediate L0 compaction.
3105 : ///
3106 : /// Compaction can also be explicitly requested for a timeline via the HTTP API.
3107 0 : async fn compaction_iteration(
3108 0 : self: &Arc<Self>,
3109 0 : cancel: &CancellationToken,
3110 0 : ctx: &RequestContext,
3111 0 : ) -> Result<CompactionOutcome, CompactionError> {
3112 0 : // Don't compact inactive tenants.
3113 0 : if !self.is_active() {
3114 0 : return Ok(CompactionOutcome::Skipped);
3115 0 : }
3116 0 :
3117 0 : // Don't compact tenants that can't upload layers. We don't check `may_delete_layers_hint`,
3118 0 : // since we need to compact L0 even in AttachedMulti to bound read amplification.
3119 0 : let location = self.tenant_conf.load().location;
3120 0 : if !location.may_upload_layers_hint() {
3121 0 : info!("skipping compaction in location state {location:?}");
3122 0 : return Ok(CompactionOutcome::Skipped);
3123 0 : }
3124 0 :
3125 0 : // Don't compact if the circuit breaker is tripped.
3126 0 : if self.compaction_circuit_breaker.lock().unwrap().is_broken() {
3127 0 : info!("skipping compaction due to previous failures");
3128 0 : return Ok(CompactionOutcome::Skipped);
3129 0 : }
3130 0 :
3131 0 : // Collect all timelines to compact, along with offload instructions and L0 counts.
3132 0 : let mut compact: Vec<Arc<Timeline>> = Vec::new();
3133 0 : let mut offload: HashSet<TimelineId> = HashSet::new();
3134 0 : let mut l0_counts: HashMap<TimelineId, usize> = HashMap::new();
3135 0 :
3136 0 : {
3137 0 : let offload_enabled = self.get_timeline_offloading_enabled();
3138 0 : let timelines = self.timelines.lock().unwrap();
3139 0 : for (&timeline_id, timeline) in timelines.iter() {
3140 : // Skip inactive timelines.
3141 0 : if !timeline.is_active() {
3142 0 : continue;
3143 0 : }
3144 0 :
3145 0 : // Schedule the timeline for compaction.
3146 0 : compact.push(timeline.clone());
3147 :
3148 : // Schedule the timeline for offloading if eligible.
3149 0 : let can_offload = offload_enabled
3150 0 : && timeline.can_offload().0
3151 0 : && !timelines
3152 0 : .iter()
3153 0 : .any(|(_, tli)| tli.get_ancestor_timeline_id() == Some(timeline_id));
3154 0 : if can_offload {
3155 0 : offload.insert(timeline_id);
3156 0 : }
3157 : }
3158 : } // release timelines lock
3159 :
3160 0 : for timeline in &compact {
3161 : // Collect L0 counts. Can't await while holding lock above.
3162 0 : if let Ok(lm) = timeline
3163 0 : .layers
3164 0 : .read(LayerManagerLockHolder::Compaction)
3165 0 : .await
3166 0 : .layer_map()
3167 0 : {
3168 0 : l0_counts.insert(timeline.timeline_id, lm.level0_deltas().len());
3169 0 : }
3170 : }
3171 :
3172 : // Pass 1: L0 compaction across all timelines, in order of L0 count. We prioritize this to
3173 : // bound read amplification.
3174 : //
3175 : // TODO: this may spin on one or more ingest-heavy timelines, starving out image/GC
3176 : // compaction and offloading. We leave that as a potential problem to solve later. Consider
3177 : // splitting L0 and image/GC compaction to separate background jobs.
3178 0 : if self.get_compaction_l0_first() {
3179 0 : let compaction_threshold = self.get_compaction_threshold();
3180 0 : let compact_l0 = compact
3181 0 : .iter()
3182 0 : .map(|tli| (tli, l0_counts.get(&tli.timeline_id).copied().unwrap_or(0)))
3183 0 : .filter(|&(_, l0)| l0 >= compaction_threshold)
3184 0 : .sorted_by_key(|&(_, l0)| l0)
3185 0 : .rev()
3186 0 : .map(|(tli, _)| tli.clone())
3187 0 : .collect_vec();
3188 0 :
3189 0 : let mut has_pending_l0 = false;
3190 0 : for timeline in compact_l0 {
3191 0 : let ctx = &ctx.with_scope_timeline(&timeline);
3192 : // NB: don't set CompactFlags::YieldForL0, since this is an L0-only compaction pass.
3193 0 : let outcome = timeline
3194 0 : .compact(cancel, CompactFlags::OnlyL0Compaction.into(), ctx)
3195 0 : .instrument(info_span!("compact_timeline", timeline_id = %timeline.timeline_id))
3196 0 : .await
3197 0 : .inspect_err(|err| self.maybe_trip_compaction_breaker(err))?;
3198 0 : match outcome {
3199 0 : CompactionOutcome::Done => {}
3200 0 : CompactionOutcome::Skipped => {}
3201 0 : CompactionOutcome::Pending => has_pending_l0 = true,
3202 0 : CompactionOutcome::YieldForL0 => has_pending_l0 = true,
3203 : }
3204 : }
3205 0 : if has_pending_l0 {
3206 0 : return Ok(CompactionOutcome::YieldForL0); // do another pass
3207 0 : }
3208 0 : }
3209 :
3210 : // Pass 2: image compaction and timeline offloading. If any timelines have accumulated more
3211 : // L0 layers, they may also be compacted here. Image compaction will yield if there is
3212 : // pending L0 compaction on any tenant timeline.
3213 : //
3214 : // TODO: consider ordering timelines by some priority, e.g. time since last full compaction,
3215 : // amount of L1 delta debt or garbage, offload-eligible timelines first, etc.
3216 0 : let mut has_pending = false;
3217 0 : for timeline in compact {
3218 0 : if !timeline.is_active() {
3219 0 : continue;
3220 0 : }
3221 0 : let ctx = &ctx.with_scope_timeline(&timeline);
3222 0 :
3223 0 : // Yield for L0 if the separate L0 pass is enabled (otherwise there's no point).
3224 0 : let mut flags = EnumSet::default();
3225 0 : if self.get_compaction_l0_first() {
3226 0 : flags |= CompactFlags::YieldForL0;
3227 0 : }
3228 :
3229 0 : let mut outcome = timeline
3230 0 : .compact(cancel, flags, ctx)
3231 0 : .instrument(info_span!("compact_timeline", timeline_id = %timeline.timeline_id))
3232 0 : .await
3233 0 : .inspect_err(|err| self.maybe_trip_compaction_breaker(err))?;
3234 :
3235 : // If we're done compacting, check the scheduled GC compaction queue for more work.
3236 0 : if outcome == CompactionOutcome::Done {
3237 0 : let queue = {
3238 0 : let mut guard = self.scheduled_compaction_tasks.lock().unwrap();
3239 0 : guard
3240 0 : .entry(timeline.timeline_id)
3241 0 : .or_insert_with(|| Arc::new(GcCompactionQueue::new()))
3242 0 : .clone()
3243 0 : };
3244 0 : let gc_compaction_strategy = self
3245 0 : .feature_resolver
3246 0 : .evaluate_multivariate("gc-comapction-strategy", self.tenant_shard_id.tenant_id)
3247 0 : .ok();
3248 0 : let span = if let Some(gc_compaction_strategy) = gc_compaction_strategy {
3249 0 : info_span!("gc_compact_timeline", timeline_id = %timeline.timeline_id, strategy = %gc_compaction_strategy)
3250 : } else {
3251 0 : info_span!("gc_compact_timeline", timeline_id = %timeline.timeline_id)
3252 : };
3253 0 : outcome = queue
3254 0 : .iteration(cancel, ctx, &self.gc_block, &timeline)
3255 0 : .instrument(span)
3256 0 : .await?;
3257 0 : }
3258 :
3259 : // If we're done compacting, offload the timeline if requested.
3260 0 : if outcome == CompactionOutcome::Done && offload.contains(&timeline.timeline_id) {
3261 0 : pausable_failpoint!("before-timeline-auto-offload");
3262 0 : offload_timeline(self, &timeline)
3263 0 : .instrument(info_span!("offload_timeline", timeline_id = %timeline.timeline_id))
3264 0 : .await
3265 0 : .or_else(|err| match err {
3266 : // Ignore this, we likely raced with unarchival.
3267 0 : OffloadError::NotArchived => Ok(()),
3268 0 : err => Err(err),
3269 0 : })?;
3270 0 : }
3271 :
3272 0 : match outcome {
3273 0 : CompactionOutcome::Done => {}
3274 0 : CompactionOutcome::Skipped => {}
3275 0 : CompactionOutcome::Pending => has_pending = true,
3276 : // This mostly makes sense when the L0-only pass above is enabled, since there's
3277 : // otherwise no guarantee that we'll start with the timeline that has high L0.
3278 0 : CompactionOutcome::YieldForL0 => return Ok(CompactionOutcome::YieldForL0),
3279 : }
3280 : }
3281 :
3282 : // Success! Untrip the breaker if necessary.
3283 0 : self.compaction_circuit_breaker
3284 0 : .lock()
3285 0 : .unwrap()
3286 0 : .success(&CIRCUIT_BREAKERS_UNBROKEN);
3287 0 :
3288 0 : match has_pending {
3289 0 : true => Ok(CompactionOutcome::Pending),
3290 0 : false => Ok(CompactionOutcome::Done),
3291 : }
3292 0 : }
3293 :
3294 : /// Trips the compaction circuit breaker if appropriate.
3295 0 : pub(crate) fn maybe_trip_compaction_breaker(&self, err: &CompactionError) {
3296 0 : match err {
3297 0 : err if err.is_cancel() => {}
3298 0 : CompactionError::ShuttingDown => (),
3299 : // Offload failures don't trip the circuit breaker, since they're cheap to retry and
3300 : // shouldn't block compaction.
3301 0 : CompactionError::Offload(_) => {}
3302 0 : CompactionError::CollectKeySpaceError(err) => {
3303 0 : // CollectKeySpaceError::Cancelled and PageRead::Cancelled are handled in `err.is_cancel` branch.
3304 0 : self.compaction_circuit_breaker
3305 0 : .lock()
3306 0 : .unwrap()
3307 0 : .fail(&CIRCUIT_BREAKERS_BROKEN, err);
3308 0 : }
3309 0 : CompactionError::Other(err) => {
3310 0 : self.compaction_circuit_breaker
3311 0 : .lock()
3312 0 : .unwrap()
3313 0 : .fail(&CIRCUIT_BREAKERS_BROKEN, err);
3314 0 : }
3315 0 : CompactionError::AlreadyRunning(_) => {}
3316 : }
3317 0 : }
3318 :
3319 : /// Cancel scheduled compaction tasks
3320 0 : pub(crate) fn cancel_scheduled_compaction(&self, timeline_id: TimelineId) {
3321 0 : let mut guard = self.scheduled_compaction_tasks.lock().unwrap();
3322 0 : if let Some(q) = guard.get_mut(&timeline_id) {
3323 0 : q.cancel_scheduled();
3324 0 : }
3325 0 : }
3326 :
3327 0 : pub(crate) fn get_scheduled_compaction_tasks(
3328 0 : &self,
3329 0 : timeline_id: TimelineId,
3330 0 : ) -> Vec<CompactInfoResponse> {
3331 0 : let res = {
3332 0 : let guard = self.scheduled_compaction_tasks.lock().unwrap();
3333 0 : guard.get(&timeline_id).map(|q| q.remaining_jobs())
3334 : };
3335 0 : let Some((running, remaining)) = res else {
3336 0 : return Vec::new();
3337 : };
3338 0 : let mut result = Vec::new();
3339 0 : if let Some((id, running)) = running {
3340 0 : result.extend(running.into_compact_info_resp(id, true));
3341 0 : }
3342 0 : for (id, job) in remaining {
3343 0 : result.extend(job.into_compact_info_resp(id, false));
3344 0 : }
3345 0 : result
3346 0 : }
3347 :
3348 : /// Schedule a compaction task for a timeline.
3349 0 : pub(crate) async fn schedule_compaction(
3350 0 : &self,
3351 0 : timeline_id: TimelineId,
3352 0 : options: CompactOptions,
3353 0 : ) -> anyhow::Result<tokio::sync::oneshot::Receiver<()>> {
3354 0 : let (tx, rx) = tokio::sync::oneshot::channel();
3355 0 : let mut guard = self.scheduled_compaction_tasks.lock().unwrap();
3356 0 : let q = guard
3357 0 : .entry(timeline_id)
3358 0 : .or_insert_with(|| Arc::new(GcCompactionQueue::new()));
3359 0 : q.schedule_manual_compaction(options, Some(tx));
3360 0 : Ok(rx)
3361 0 : }
3362 :
3363 : /// Performs periodic housekeeping, via the tenant housekeeping background task.
3364 0 : async fn housekeeping(&self) {
3365 0 : // Call through to all timelines to freeze ephemeral layers as needed. This usually happens
3366 0 : // during ingest, but we don't want idle timelines to hold open layers for too long.
3367 0 : //
3368 0 : // We don't do this if the tenant can't upload layers (i.e. it's in stale attachment mode).
3369 0 : // We don't run compaction in this case either, and don't want to keep flushing tiny L0
3370 0 : // layers that won't be compacted down.
3371 0 : if self.tenant_conf.load().location.may_upload_layers_hint() {
3372 0 : let timelines = self
3373 0 : .timelines
3374 0 : .lock()
3375 0 : .unwrap()
3376 0 : .values()
3377 0 : .filter(|tli| tli.is_active())
3378 0 : .cloned()
3379 0 : .collect_vec();
3380 :
3381 0 : for timeline in timelines {
3382 0 : timeline.maybe_freeze_ephemeral_layer().await;
3383 : }
3384 0 : }
3385 :
3386 : // Shut down walredo if idle.
3387 : const WALREDO_IDLE_TIMEOUT: Duration = Duration::from_secs(180);
3388 0 : if let Some(ref walredo_mgr) = self.walredo_mgr {
3389 0 : walredo_mgr.maybe_quiesce(WALREDO_IDLE_TIMEOUT);
3390 0 : }
3391 0 : }
3392 :
3393 0 : pub fn timeline_has_no_attached_children(&self, timeline_id: TimelineId) -> bool {
3394 0 : let timelines = self.timelines.lock().unwrap();
3395 0 : !timelines
3396 0 : .iter()
3397 0 : .any(|(_id, tl)| tl.get_ancestor_timeline_id() == Some(timeline_id))
3398 0 : }
3399 :
3400 876 : pub fn current_state(&self) -> TenantState {
3401 876 : self.state.borrow().clone()
3402 876 : }
3403 :
3404 495 : pub fn is_active(&self) -> bool {
3405 495 : self.current_state() == TenantState::Active
3406 495 : }
3407 :
3408 0 : pub fn generation(&self) -> Generation {
3409 0 : self.generation
3410 0 : }
3411 :
3412 0 : pub(crate) fn wal_redo_manager_status(&self) -> Option<WalRedoManagerStatus> {
3413 0 : self.walredo_mgr.as_ref().and_then(|mgr| mgr.status())
3414 0 : }
3415 :
3416 : /// Changes tenant status to active, unless shutdown was already requested.
3417 : ///
3418 : /// `background_jobs_can_start` is an optional barrier set to a value during pageserver startup
3419 : /// to delay background jobs. Background jobs can be started right away when None is given.
3420 0 : fn activate(
3421 0 : self: &Arc<Self>,
3422 0 : broker_client: BrokerClientChannel,
3423 0 : background_jobs_can_start: Option<&completion::Barrier>,
3424 0 : ctx: &RequestContext,
3425 0 : ) {
3426 0 : span::debug_assert_current_span_has_tenant_id();
3427 0 :
3428 0 : let mut activating = false;
3429 0 : self.state.send_modify(|current_state| {
3430 : use pageserver_api::models::ActivatingFrom;
3431 0 : match &*current_state {
3432 : TenantState::Activating(_) | TenantState::Active | TenantState::Broken { .. } | TenantState::Stopping { .. } => {
3433 0 : panic!("caller is responsible for calling activate() only on Loading / Attaching tenants, got {state:?}", state = current_state);
3434 : }
3435 0 : TenantState::Attaching => {
3436 0 : *current_state = TenantState::Activating(ActivatingFrom::Attaching);
3437 0 : }
3438 0 : }
3439 0 : debug!(tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), "Activating tenant");
3440 0 : activating = true;
3441 0 : // Continue outside the closure. We need to grab timelines.lock()
3442 0 : // and we plan to turn it into a tokio::sync::Mutex in a future patch.
3443 0 : });
3444 0 :
3445 0 : if activating {
3446 0 : let timelines_accessor = self.timelines.lock().unwrap();
3447 0 : let timelines_offloaded_accessor = self.timelines_offloaded.lock().unwrap();
3448 0 : let timelines_to_activate = timelines_accessor
3449 0 : .values()
3450 0 : .filter(|timeline| !(timeline.is_broken() || timeline.is_stopping()));
3451 0 :
3452 0 : // Before activation, populate each Timeline's GcInfo with information about its children
3453 0 : self.initialize_gc_info(&timelines_accessor, &timelines_offloaded_accessor, None);
3454 0 :
3455 0 : // Spawn gc and compaction loops. The loops will shut themselves
3456 0 : // down when they notice that the tenant is inactive.
3457 0 : tasks::start_background_loops(self, background_jobs_can_start);
3458 0 :
3459 0 : let mut activated_timelines = 0;
3460 :
3461 0 : for timeline in timelines_to_activate {
3462 0 : timeline.activate(
3463 0 : self.clone(),
3464 0 : broker_client.clone(),
3465 0 : background_jobs_can_start,
3466 0 : &ctx.with_scope_timeline(timeline),
3467 0 : );
3468 0 : activated_timelines += 1;
3469 0 : }
3470 :
3471 0 : let tid = self.tenant_shard_id.tenant_id.to_string();
3472 0 : let shard_id = self.tenant_shard_id.shard_slug().to_string();
3473 0 : let offloaded_timeline_count = timelines_offloaded_accessor.len();
3474 0 : TENANT_OFFLOADED_TIMELINES
3475 0 : .with_label_values(&[&tid, &shard_id])
3476 0 : .set(offloaded_timeline_count as u64);
3477 0 :
3478 0 : self.state.send_modify(move |current_state| {
3479 0 : assert!(
3480 0 : matches!(current_state, TenantState::Activating(_)),
3481 0 : "set_stopping and set_broken wait for us to leave Activating state",
3482 : );
3483 0 : *current_state = TenantState::Active;
3484 0 :
3485 0 : let elapsed = self.constructed_at.elapsed();
3486 0 : let total_timelines = timelines_accessor.len();
3487 0 :
3488 0 : // log a lot of stuff, because some tenants sometimes suffer from user-visible
3489 0 : // times to activate. see https://github.com/neondatabase/neon/issues/4025
3490 0 : info!(
3491 0 : since_creation_millis = elapsed.as_millis(),
3492 0 : tenant_id = %self.tenant_shard_id.tenant_id,
3493 0 : shard_id = %self.tenant_shard_id.shard_slug(),
3494 0 : activated_timelines,
3495 0 : total_timelines,
3496 0 : post_state = <&'static str>::from(&*current_state),
3497 0 : "activation attempt finished"
3498 : );
3499 :
3500 0 : TENANT.activation.observe(elapsed.as_secs_f64());
3501 0 : });
3502 0 : }
3503 0 : }
3504 :
3505 : /// Shutdown the tenant and join all of the spawned tasks.
3506 : ///
3507 : /// The method caters for all use-cases:
3508 : /// - pageserver shutdown (freeze_and_flush == true)
3509 : /// - detach + ignore (freeze_and_flush == false)
3510 : ///
3511 : /// This will attempt to shutdown even if tenant is broken.
3512 : ///
3513 : /// `shutdown_progress` is a [`completion::Barrier`] for the shutdown initiated by this call.
3514 : /// If the tenant is already shutting down, we return a clone of the first shutdown call's
3515 : /// `Barrier` as an `Err`. This not-first caller can use the returned barrier to join with
3516 : /// the ongoing shutdown.
3517 3 : async fn shutdown(
3518 3 : &self,
3519 3 : shutdown_progress: completion::Barrier,
3520 3 : shutdown_mode: timeline::ShutdownMode,
3521 3 : ) -> Result<(), completion::Barrier> {
3522 3 : span::debug_assert_current_span_has_tenant_id();
3523 :
3524 : // Set tenant (and its timlines) to Stoppping state.
3525 : //
3526 : // Since we can only transition into Stopping state after activation is complete,
3527 : // run it in a JoinSet so all tenants have a chance to stop before we get SIGKILLed.
3528 : //
3529 : // Transitioning tenants to Stopping state has a couple of non-obvious side effects:
3530 : // 1. Lock out any new requests to the tenants.
3531 : // 2. Signal cancellation to WAL receivers (we wait on it below).
3532 : // 3. Signal cancellation for other tenant background loops.
3533 : // 4. ???
3534 : //
3535 : // The waiting for the cancellation is not done uniformly.
3536 : // We certainly wait for WAL receivers to shut down.
3537 : // That is necessary so that no new data comes in before the freeze_and_flush.
3538 : // But the tenant background loops are joined-on in our caller.
3539 : // It's mesed up.
3540 : // we just ignore the failure to stop
3541 :
3542 : // If we're still attaching, fire the cancellation token early to drop out: this
3543 : // will prevent us flushing, but ensures timely shutdown if some I/O during attach
3544 : // is very slow.
3545 3 : let shutdown_mode = if matches!(self.current_state(), TenantState::Attaching) {
3546 0 : self.cancel.cancel();
3547 0 :
3548 0 : // Having fired our cancellation token, do not try and flush timelines: their cancellation tokens
3549 0 : // are children of ours, so their flush loops will have shut down already
3550 0 : timeline::ShutdownMode::Hard
3551 : } else {
3552 3 : shutdown_mode
3553 : };
3554 :
3555 3 : match self.set_stopping(shutdown_progress).await {
3556 3 : Ok(()) => {}
3557 0 : Err(SetStoppingError::Broken) => {
3558 0 : // assume that this is acceptable
3559 0 : }
3560 0 : Err(SetStoppingError::AlreadyStopping(other)) => {
3561 0 : // give caller the option to wait for this this shutdown
3562 0 : info!("Tenant::shutdown: AlreadyStopping");
3563 0 : return Err(other);
3564 : }
3565 : };
3566 :
3567 3 : let mut js = tokio::task::JoinSet::new();
3568 3 : {
3569 3 : let timelines = self.timelines.lock().unwrap();
3570 3 : timelines.values().for_each(|timeline| {
3571 3 : let timeline = Arc::clone(timeline);
3572 3 : let timeline_id = timeline.timeline_id;
3573 3 : let span = tracing::info_span!("timeline_shutdown", %timeline_id, ?shutdown_mode);
3574 3 : js.spawn(async move { timeline.shutdown(shutdown_mode).instrument(span).await });
3575 3 : });
3576 3 : }
3577 3 : {
3578 3 : let timelines_offloaded = self.timelines_offloaded.lock().unwrap();
3579 3 : timelines_offloaded.values().for_each(|timeline| {
3580 0 : timeline.defuse_for_tenant_drop();
3581 3 : });
3582 3 : }
3583 3 : {
3584 3 : let mut timelines_importing = self.timelines_importing.lock().unwrap();
3585 3 : timelines_importing
3586 3 : .drain()
3587 3 : .for_each(|(timeline_id, importing_timeline)| {
3588 0 : let span = tracing::info_span!("importing_timeline_shutdown", %timeline_id);
3589 0 : js.spawn(async move { importing_timeline.shutdown().instrument(span).await });
3590 3 : });
3591 3 : }
3592 3 : // test_long_timeline_create_then_tenant_delete is leaning on this message
3593 3 : tracing::info!("Waiting for timelines...");
3594 6 : while let Some(res) = js.join_next().await {
3595 0 : match res {
3596 3 : Ok(()) => {}
3597 0 : Err(je) if je.is_cancelled() => unreachable!("no cancelling used"),
3598 0 : Err(je) if je.is_panic() => { /* logged already */ }
3599 0 : Err(je) => warn!("unexpected JoinError: {je:?}"),
3600 : }
3601 : }
3602 :
3603 3 : if let ShutdownMode::Reload = shutdown_mode {
3604 0 : tracing::info!("Flushing deletion queue");
3605 0 : if let Err(e) = self.deletion_queue_client.flush().await {
3606 0 : match e {
3607 0 : DeletionQueueError::ShuttingDown => {
3608 0 : // This is the only error we expect for now. In the future, if more error
3609 0 : // variants are added, we should handle them here.
3610 0 : }
3611 : }
3612 0 : }
3613 3 : }
3614 :
3615 : // We cancel the Tenant's cancellation token _after_ the timelines have all shut down. This permits
3616 : // them to continue to do work during their shutdown methods, e.g. flushing data.
3617 3 : tracing::debug!("Cancelling CancellationToken");
3618 3 : self.cancel.cancel();
3619 3 :
3620 3 : // shutdown all tenant and timeline tasks: gc, compaction, page service
3621 3 : // No new tasks will be started for this tenant because it's in `Stopping` state.
3622 3 : //
3623 3 : // this will additionally shutdown and await all timeline tasks.
3624 3 : tracing::debug!("Waiting for tasks...");
3625 3 : task_mgr::shutdown_tasks(None, Some(self.tenant_shard_id), None).await;
3626 :
3627 3 : if let Some(walredo_mgr) = self.walredo_mgr.as_ref() {
3628 3 : walredo_mgr.shutdown().await;
3629 0 : }
3630 :
3631 : // Wait for any in-flight operations to complete
3632 3 : self.gate.close().await;
3633 :
3634 3 : remove_tenant_metrics(&self.tenant_shard_id);
3635 3 :
3636 3 : Ok(())
3637 3 : }
3638 :
3639 : /// Change tenant status to Stopping, to mark that it is being shut down.
3640 : ///
3641 : /// This function waits for the tenant to become active if it isn't already, before transitioning it into Stopping state.
3642 : ///
3643 : /// This function is not cancel-safe!
3644 3 : async fn set_stopping(&self, progress: completion::Barrier) -> Result<(), SetStoppingError> {
3645 3 : let mut rx = self.state.subscribe();
3646 3 :
3647 3 : // cannot stop before we're done activating, so wait out until we're done activating
3648 3 : rx.wait_for(|state| match state {
3649 : TenantState::Activating(_) | TenantState::Attaching => {
3650 0 : info!("waiting for {state} to turn Active|Broken|Stopping");
3651 0 : false
3652 : }
3653 3 : TenantState::Active | TenantState::Broken { .. } | TenantState::Stopping { .. } => true,
3654 3 : })
3655 3 : .await
3656 3 : .expect("cannot drop self.state while on a &self method");
3657 3 :
3658 3 : // we now know we're done activating, let's see whether this task is the winner to transition into Stopping
3659 3 : let mut err = None;
3660 3 : let stopping = self.state.send_if_modified(|current_state| match current_state {
3661 : TenantState::Activating(_) | TenantState::Attaching => {
3662 0 : unreachable!("we ensured above that we're done with activation, and, there is no re-activation")
3663 : }
3664 : TenantState::Active => {
3665 : // FIXME: due to time-of-check vs time-of-use issues, it can happen that new timelines
3666 : // are created after the transition to Stopping. That's harmless, as the Timelines
3667 : // won't be accessible to anyone afterwards, because the Tenant is in Stopping state.
3668 3 : *current_state = TenantState::Stopping { progress: Some(progress) };
3669 3 : // Continue stopping outside the closure. We need to grab timelines.lock()
3670 3 : // and we plan to turn it into a tokio::sync::Mutex in a future patch.
3671 3 : true
3672 : }
3673 : TenantState::Stopping { progress: None } => {
3674 : // An attach was cancelled, and the attach transitioned the tenant from Attaching to
3675 : // Stopping(None) to let us know it exited. Register our progress and continue.
3676 0 : *current_state = TenantState::Stopping { progress: Some(progress) };
3677 0 : true
3678 : }
3679 0 : TenantState::Broken { reason, .. } => {
3680 0 : info!(
3681 0 : "Cannot set tenant to Stopping state, it is in Broken state due to: {reason}"
3682 : );
3683 0 : err = Some(SetStoppingError::Broken);
3684 0 : false
3685 : }
3686 0 : TenantState::Stopping { progress: Some(progress) } => {
3687 0 : info!("Tenant is already in Stopping state");
3688 0 : err = Some(SetStoppingError::AlreadyStopping(progress.clone()));
3689 0 : false
3690 : }
3691 3 : });
3692 3 : match (stopping, err) {
3693 3 : (true, None) => {} // continue
3694 0 : (false, Some(err)) => return Err(err),
3695 0 : (true, Some(_)) => unreachable!(
3696 0 : "send_if_modified closure must error out if not transitioning to Stopping"
3697 0 : ),
3698 0 : (false, None) => unreachable!(
3699 0 : "send_if_modified closure must return true if transitioning to Stopping"
3700 0 : ),
3701 : }
3702 :
3703 3 : let timelines_accessor = self.timelines.lock().unwrap();
3704 3 : let not_broken_timelines = timelines_accessor
3705 3 : .values()
3706 3 : .filter(|timeline| !timeline.is_broken());
3707 6 : for timeline in not_broken_timelines {
3708 3 : timeline.set_state(TimelineState::Stopping);
3709 3 : }
3710 3 : Ok(())
3711 3 : }
3712 :
3713 : /// Method for tenant::mgr to transition us into Broken state in case of a late failure in
3714 : /// `remove_tenant_from_memory`
3715 : ///
3716 : /// This function waits for the tenant to become active if it isn't already, before transitioning it into Stopping state.
3717 : ///
3718 : /// In tests, we also use this to set tenants to Broken state on purpose.
3719 0 : pub(crate) async fn set_broken(&self, reason: String) {
3720 0 : let mut rx = self.state.subscribe();
3721 0 :
3722 0 : // The load & attach routines own the tenant state until it has reached `Active`.
3723 0 : // So, wait until it's done.
3724 0 : rx.wait_for(|state| match state {
3725 : TenantState::Activating(_) | TenantState::Attaching => {
3726 0 : info!(
3727 0 : "waiting for {} to turn Active|Broken|Stopping",
3728 0 : <&'static str>::from(state)
3729 : );
3730 0 : false
3731 : }
3732 0 : TenantState::Active | TenantState::Broken { .. } | TenantState::Stopping { .. } => true,
3733 0 : })
3734 0 : .await
3735 0 : .expect("cannot drop self.state while on a &self method");
3736 0 :
3737 0 : // we now know we're done activating, let's see whether this task is the winner to transition into Broken
3738 0 : self.set_broken_no_wait(reason)
3739 0 : }
3740 :
3741 0 : pub(crate) fn set_broken_no_wait(&self, reason: impl Display) {
3742 0 : let reason = reason.to_string();
3743 0 : self.state.send_modify(|current_state| {
3744 0 : match *current_state {
3745 : TenantState::Activating(_) | TenantState::Attaching => {
3746 0 : unreachable!("we ensured above that we're done with activation, and, there is no re-activation")
3747 : }
3748 : TenantState::Active => {
3749 0 : if cfg!(feature = "testing") {
3750 0 : warn!("Changing Active tenant to Broken state, reason: {}", reason);
3751 0 : *current_state = TenantState::broken_from_reason(reason);
3752 : } else {
3753 0 : unreachable!("not allowed to call set_broken on Active tenants in non-testing builds")
3754 : }
3755 : }
3756 : TenantState::Broken { .. } => {
3757 0 : warn!("Tenant is already in Broken state");
3758 : }
3759 : // This is the only "expected" path, any other path is a bug.
3760 : TenantState::Stopping { .. } => {
3761 0 : warn!(
3762 0 : "Marking Stopping tenant as Broken state, reason: {}",
3763 : reason
3764 : );
3765 0 : *current_state = TenantState::broken_from_reason(reason);
3766 : }
3767 : }
3768 0 : });
3769 0 : }
3770 :
3771 0 : pub fn subscribe_for_state_updates(&self) -> watch::Receiver<TenantState> {
3772 0 : self.state.subscribe()
3773 0 : }
3774 :
3775 : /// The activate_now semaphore is initialized with zero units. As soon as
3776 : /// we add a unit, waiters will be able to acquire a unit and proceed.
3777 0 : pub(crate) fn activate_now(&self) {
3778 0 : self.activate_now_sem.add_permits(1);
3779 0 : }
3780 :
3781 0 : pub(crate) async fn wait_to_become_active(
3782 0 : &self,
3783 0 : timeout: Duration,
3784 0 : ) -> Result<(), GetActiveTenantError> {
3785 0 : let mut receiver = self.state.subscribe();
3786 : loop {
3787 0 : let current_state = receiver.borrow_and_update().clone();
3788 0 : match current_state {
3789 : TenantState::Attaching | TenantState::Activating(_) => {
3790 : // in these states, there's a chance that we can reach ::Active
3791 0 : self.activate_now();
3792 0 : match timeout_cancellable(timeout, &self.cancel, receiver.changed()).await {
3793 0 : Ok(r) => {
3794 0 : r.map_err(
3795 0 : |_e: tokio::sync::watch::error::RecvError|
3796 : // Tenant existed but was dropped: report it as non-existent
3797 0 : GetActiveTenantError::NotFound(GetTenantError::ShardNotFound(self.tenant_shard_id))
3798 0 : )?
3799 : }
3800 : Err(TimeoutCancellableError::Cancelled) => {
3801 0 : return Err(GetActiveTenantError::Cancelled);
3802 : }
3803 : Err(TimeoutCancellableError::Timeout) => {
3804 0 : return Err(GetActiveTenantError::WaitForActiveTimeout {
3805 0 : latest_state: Some(self.current_state()),
3806 0 : wait_time: timeout,
3807 0 : });
3808 : }
3809 : }
3810 : }
3811 : TenantState::Active => {
3812 0 : return Ok(());
3813 : }
3814 0 : TenantState::Broken { reason, .. } => {
3815 0 : // This is fatal, and reported distinctly from the general case of "will never be active" because
3816 0 : // it's logically a 500 to external API users (broken is always a bug).
3817 0 : return Err(GetActiveTenantError::Broken(reason));
3818 : }
3819 : TenantState::Stopping { .. } => {
3820 : // There's no chance the tenant can transition back into ::Active
3821 0 : return Err(GetActiveTenantError::WillNotBecomeActive(current_state));
3822 : }
3823 : }
3824 : }
3825 0 : }
3826 :
3827 0 : pub(crate) fn get_attach_mode(&self) -> AttachmentMode {
3828 0 : self.tenant_conf.load().location.attach_mode
3829 0 : }
3830 :
3831 : /// For API access: generate a LocationConfig equivalent to the one that would be used to
3832 : /// create a Tenant in the same state. Do not use this in hot paths: it's for relatively
3833 : /// rare external API calls, like a reconciliation at startup.
3834 0 : pub(crate) fn get_location_conf(&self) -> models::LocationConfig {
3835 0 : let attached_tenant_conf = self.tenant_conf.load();
3836 :
3837 0 : let location_config_mode = match attached_tenant_conf.location.attach_mode {
3838 0 : AttachmentMode::Single => models::LocationConfigMode::AttachedSingle,
3839 0 : AttachmentMode::Multi => models::LocationConfigMode::AttachedMulti,
3840 0 : AttachmentMode::Stale => models::LocationConfigMode::AttachedStale,
3841 : };
3842 :
3843 0 : models::LocationConfig {
3844 0 : mode: location_config_mode,
3845 0 : generation: self.generation.into(),
3846 0 : secondary_conf: None,
3847 0 : shard_number: self.shard_identity.number.0,
3848 0 : shard_count: self.shard_identity.count.literal(),
3849 0 : shard_stripe_size: self.shard_identity.stripe_size.0,
3850 0 : tenant_conf: attached_tenant_conf.tenant_conf.clone(),
3851 0 : }
3852 0 : }
3853 :
3854 0 : pub(crate) fn get_tenant_shard_id(&self) -> &TenantShardId {
3855 0 : &self.tenant_shard_id
3856 0 : }
3857 :
3858 119 : pub(crate) fn get_shard_stripe_size(&self) -> ShardStripeSize {
3859 119 : self.shard_identity.stripe_size
3860 119 : }
3861 :
3862 0 : pub(crate) fn get_generation(&self) -> Generation {
3863 0 : self.generation
3864 0 : }
3865 :
3866 : /// This function partially shuts down the tenant (it shuts down the Timelines) and is fallible,
3867 : /// and can leave the tenant in a bad state if it fails. The caller is responsible for
3868 : /// resetting this tenant to a valid state if we fail.
3869 0 : pub(crate) async fn split_prepare(
3870 0 : &self,
3871 0 : child_shards: &Vec<TenantShardId>,
3872 0 : ) -> anyhow::Result<()> {
3873 0 : let (timelines, offloaded) = {
3874 0 : let timelines = self.timelines.lock().unwrap();
3875 0 : let offloaded = self.timelines_offloaded.lock().unwrap();
3876 0 : (timelines.clone(), offloaded.clone())
3877 0 : };
3878 0 : let timelines_iter = timelines
3879 0 : .values()
3880 0 : .map(TimelineOrOffloadedArcRef::<'_>::from)
3881 0 : .chain(
3882 0 : offloaded
3883 0 : .values()
3884 0 : .map(TimelineOrOffloadedArcRef::<'_>::from),
3885 0 : );
3886 0 : for timeline in timelines_iter {
3887 : // We do not block timeline creation/deletion during splits inside the pageserver: it is up to higher levels
3888 : // to ensure that they do not start a split if currently in the process of doing these.
3889 :
3890 0 : let timeline_id = timeline.timeline_id();
3891 :
3892 0 : if let TimelineOrOffloadedArcRef::Timeline(timeline) = timeline {
3893 : // Upload an index from the parent: this is partly to provide freshness for the
3894 : // child tenants that will copy it, and partly for general ease-of-debugging: there will
3895 : // always be a parent shard index in the same generation as we wrote the child shard index.
3896 0 : tracing::info!(%timeline_id, "Uploading index");
3897 0 : timeline
3898 0 : .remote_client
3899 0 : .schedule_index_upload_for_file_changes()?;
3900 0 : timeline.remote_client.wait_completion().await?;
3901 0 : }
3902 :
3903 0 : let remote_client = match timeline {
3904 0 : TimelineOrOffloadedArcRef::Timeline(timeline) => timeline.remote_client.clone(),
3905 0 : TimelineOrOffloadedArcRef::Offloaded(offloaded) => {
3906 0 : let remote_client = self
3907 0 : .build_timeline_client(offloaded.timeline_id, self.remote_storage.clone());
3908 0 : Arc::new(remote_client)
3909 : }
3910 : TimelineOrOffloadedArcRef::Importing(_) => {
3911 0 : unreachable!("Importing timelines are not included in the iterator")
3912 : }
3913 : };
3914 :
3915 : // Shut down the timeline's remote client: this means that the indices we write
3916 : // for child shards will not be invalidated by the parent shard deleting layers.
3917 0 : tracing::info!(%timeline_id, "Shutting down remote storage client");
3918 0 : remote_client.shutdown().await;
3919 :
3920 : // Download methods can still be used after shutdown, as they don't flow through the remote client's
3921 : // queue. In principal the RemoteTimelineClient could provide this without downloading it, but this
3922 : // operation is rare, so it's simpler to just download it (and robustly guarantees that the index
3923 : // we use here really is the remotely persistent one).
3924 0 : tracing::info!(%timeline_id, "Downloading index_part from parent");
3925 0 : let result = remote_client
3926 0 : .download_index_file(&self.cancel)
3927 0 : .instrument(info_span!("download_index_file", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), %timeline_id))
3928 0 : .await?;
3929 0 : let index_part = match result {
3930 : MaybeDeletedIndexPart::Deleted(_) => {
3931 0 : anyhow::bail!("Timeline deletion happened concurrently with split")
3932 : }
3933 0 : MaybeDeletedIndexPart::IndexPart(p) => p,
3934 : };
3935 :
3936 : // A shard split may not take place while a timeline import is on-going
3937 : // for the tenant. Timeline imports run as part of each tenant shard
3938 : // and rely on the sharding scheme to split the work among pageservers.
3939 : // If we were to split in the middle of this process, we would have to
3940 : // either ensure that it's driven to completion on the old shard set
3941 : // or transfer it to the new shard set. It's technically possible, but complex.
3942 0 : match index_part.import_pgdata {
3943 0 : Some(ref import) if !import.is_done() => {
3944 0 : anyhow::bail!(
3945 0 : "Cannot split due to import with idempotency key: {:?}",
3946 0 : import.idempotency_key()
3947 0 : );
3948 : }
3949 0 : Some(_) | None => {
3950 0 : // fallthrough
3951 0 : }
3952 : }
3953 :
3954 0 : for child_shard in child_shards {
3955 0 : tracing::info!(%timeline_id, "Uploading index_part for child {}", child_shard.to_index());
3956 0 : upload_index_part(
3957 0 : &self.remote_storage,
3958 0 : child_shard,
3959 0 : &timeline_id,
3960 0 : self.generation,
3961 0 : &index_part,
3962 0 : &self.cancel,
3963 0 : )
3964 0 : .await?;
3965 : }
3966 : }
3967 :
3968 0 : let tenant_manifest = self.build_tenant_manifest();
3969 0 : for child_shard in child_shards {
3970 0 : tracing::info!(
3971 0 : "Uploading tenant manifest for child {}",
3972 0 : child_shard.to_index()
3973 : );
3974 0 : upload_tenant_manifest(
3975 0 : &self.remote_storage,
3976 0 : child_shard,
3977 0 : self.generation,
3978 0 : &tenant_manifest,
3979 0 : &self.cancel,
3980 0 : )
3981 0 : .await?;
3982 : }
3983 :
3984 0 : Ok(())
3985 0 : }
3986 :
3987 0 : pub(crate) fn get_sizes(&self) -> TopTenantShardItem {
3988 0 : let mut result = TopTenantShardItem {
3989 0 : id: self.tenant_shard_id,
3990 0 : resident_size: 0,
3991 0 : physical_size: 0,
3992 0 : max_logical_size: 0,
3993 0 : max_logical_size_per_shard: 0,
3994 0 : };
3995 :
3996 0 : for timeline in self.timelines.lock().unwrap().values() {
3997 0 : result.resident_size += timeline.metrics.resident_physical_size_gauge.get();
3998 0 :
3999 0 : result.physical_size += timeline
4000 0 : .remote_client
4001 0 : .metrics
4002 0 : .remote_physical_size_gauge
4003 0 : .get();
4004 0 : result.max_logical_size = std::cmp::max(
4005 0 : result.max_logical_size,
4006 0 : timeline.metrics.current_logical_size_gauge.get(),
4007 0 : );
4008 0 : }
4009 :
4010 0 : result.max_logical_size_per_shard = result
4011 0 : .max_logical_size
4012 0 : .div_ceil(self.tenant_shard_id.shard_count.count() as u64);
4013 0 :
4014 0 : result
4015 0 : }
4016 : }
4017 :
4018 : /// Given a Vec of timelines and their ancestors (timeline_id, ancestor_id),
4019 : /// perform a topological sort, so that the parent of each timeline comes
4020 : /// before the children.
4021 : /// E extracts the ancestor from T
4022 : /// This allows for T to be different. It can be TimelineMetadata, can be Timeline itself, etc.
4023 118 : fn tree_sort_timelines<T, E>(
4024 118 : timelines: HashMap<TimelineId, T>,
4025 118 : extractor: E,
4026 118 : ) -> anyhow::Result<Vec<(TimelineId, T)>>
4027 118 : where
4028 118 : E: Fn(&T) -> Option<TimelineId>,
4029 118 : {
4030 118 : let mut result = Vec::with_capacity(timelines.len());
4031 118 :
4032 118 : let mut now = Vec::with_capacity(timelines.len());
4033 118 : // (ancestor, children)
4034 118 : let mut later: HashMap<TimelineId, Vec<(TimelineId, T)>> =
4035 118 : HashMap::with_capacity(timelines.len());
4036 :
4037 121 : for (timeline_id, value) in timelines {
4038 3 : if let Some(ancestor_id) = extractor(&value) {
4039 1 : let children = later.entry(ancestor_id).or_default();
4040 1 : children.push((timeline_id, value));
4041 2 : } else {
4042 2 : now.push((timeline_id, value));
4043 2 : }
4044 : }
4045 :
4046 121 : while let Some((timeline_id, metadata)) = now.pop() {
4047 3 : result.push((timeline_id, metadata));
4048 : // All children of this can be loaded now
4049 3 : if let Some(mut children) = later.remove(&timeline_id) {
4050 1 : now.append(&mut children);
4051 2 : }
4052 : }
4053 :
4054 : // All timelines should be visited now. Unless there were timelines with missing ancestors.
4055 118 : if !later.is_empty() {
4056 0 : for (missing_id, orphan_ids) in later {
4057 0 : for (orphan_id, _) in orphan_ids {
4058 0 : error!(
4059 0 : "could not load timeline {orphan_id} because its ancestor timeline {missing_id} could not be loaded"
4060 : );
4061 : }
4062 : }
4063 0 : bail!("could not load tenant because some timelines are missing ancestors");
4064 118 : }
4065 118 :
4066 118 : Ok(result)
4067 118 : }
4068 :
4069 : impl TenantShard {
4070 0 : pub fn tenant_specific_overrides(&self) -> pageserver_api::models::TenantConfig {
4071 0 : self.tenant_conf.load().tenant_conf.clone()
4072 0 : }
4073 :
4074 0 : pub fn effective_config(&self) -> pageserver_api::config::TenantConfigToml {
4075 0 : self.tenant_specific_overrides()
4076 0 : .merge(self.conf.default_tenant_conf.clone())
4077 0 : }
4078 :
4079 0 : pub fn get_checkpoint_distance(&self) -> u64 {
4080 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
4081 0 : tenant_conf
4082 0 : .checkpoint_distance
4083 0 : .unwrap_or(self.conf.default_tenant_conf.checkpoint_distance)
4084 0 : }
4085 :
4086 0 : pub fn get_checkpoint_timeout(&self) -> Duration {
4087 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
4088 0 : tenant_conf
4089 0 : .checkpoint_timeout
4090 0 : .unwrap_or(self.conf.default_tenant_conf.checkpoint_timeout)
4091 0 : }
4092 :
4093 0 : pub fn get_compaction_target_size(&self) -> u64 {
4094 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
4095 0 : tenant_conf
4096 0 : .compaction_target_size
4097 0 : .unwrap_or(self.conf.default_tenant_conf.compaction_target_size)
4098 0 : }
4099 :
4100 0 : pub fn get_compaction_period(&self) -> Duration {
4101 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
4102 0 : tenant_conf
4103 0 : .compaction_period
4104 0 : .unwrap_or(self.conf.default_tenant_conf.compaction_period)
4105 0 : }
4106 :
4107 0 : pub fn get_compaction_threshold(&self) -> usize {
4108 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
4109 0 : tenant_conf
4110 0 : .compaction_threshold
4111 0 : .unwrap_or(self.conf.default_tenant_conf.compaction_threshold)
4112 0 : }
4113 :
4114 0 : pub fn get_rel_size_v2_enabled(&self) -> bool {
4115 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
4116 0 : tenant_conf
4117 0 : .rel_size_v2_enabled
4118 0 : .unwrap_or(self.conf.default_tenant_conf.rel_size_v2_enabled)
4119 0 : }
4120 :
4121 0 : pub fn get_compaction_upper_limit(&self) -> usize {
4122 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
4123 0 : tenant_conf
4124 0 : .compaction_upper_limit
4125 0 : .unwrap_or(self.conf.default_tenant_conf.compaction_upper_limit)
4126 0 : }
4127 :
4128 0 : pub fn get_compaction_l0_first(&self) -> bool {
4129 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
4130 0 : tenant_conf
4131 0 : .compaction_l0_first
4132 0 : .unwrap_or(self.conf.default_tenant_conf.compaction_l0_first)
4133 0 : }
4134 :
4135 2 : pub fn get_gc_horizon(&self) -> u64 {
4136 2 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
4137 2 : tenant_conf
4138 2 : .gc_horizon
4139 2 : .unwrap_or(self.conf.default_tenant_conf.gc_horizon)
4140 2 : }
4141 :
4142 0 : pub fn get_gc_period(&self) -> Duration {
4143 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
4144 0 : tenant_conf
4145 0 : .gc_period
4146 0 : .unwrap_or(self.conf.default_tenant_conf.gc_period)
4147 0 : }
4148 :
4149 0 : pub fn get_image_creation_threshold(&self) -> usize {
4150 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
4151 0 : tenant_conf
4152 0 : .image_creation_threshold
4153 0 : .unwrap_or(self.conf.default_tenant_conf.image_creation_threshold)
4154 0 : }
4155 :
4156 2 : pub fn get_pitr_interval(&self) -> Duration {
4157 2 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
4158 2 : tenant_conf
4159 2 : .pitr_interval
4160 2 : .unwrap_or(self.conf.default_tenant_conf.pitr_interval)
4161 2 : }
4162 :
4163 0 : pub fn get_min_resident_size_override(&self) -> Option<u64> {
4164 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
4165 0 : tenant_conf
4166 0 : .min_resident_size_override
4167 0 : .or(self.conf.default_tenant_conf.min_resident_size_override)
4168 0 : }
4169 :
4170 0 : pub fn get_heatmap_period(&self) -> Option<Duration> {
4171 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
4172 0 : let heatmap_period = tenant_conf
4173 0 : .heatmap_period
4174 0 : .unwrap_or(self.conf.default_tenant_conf.heatmap_period);
4175 0 : if heatmap_period.is_zero() {
4176 0 : None
4177 : } else {
4178 0 : Some(heatmap_period)
4179 : }
4180 0 : }
4181 :
4182 2 : pub fn get_lsn_lease_length(&self) -> Duration {
4183 2 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
4184 2 : tenant_conf
4185 2 : .lsn_lease_length
4186 2 : .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length)
4187 2 : }
4188 :
4189 0 : pub fn get_timeline_offloading_enabled(&self) -> bool {
4190 0 : if self.conf.timeline_offloading {
4191 0 : return true;
4192 0 : }
4193 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
4194 0 : tenant_conf
4195 0 : .timeline_offloading
4196 0 : .unwrap_or(self.conf.default_tenant_conf.timeline_offloading)
4197 0 : }
4198 :
4199 : /// Generate an up-to-date TenantManifest based on the state of this Tenant.
4200 119 : fn build_tenant_manifest(&self) -> TenantManifest {
4201 119 : // Collect the offloaded timelines, and sort them for deterministic output.
4202 119 : let offloaded_timelines = self
4203 119 : .timelines_offloaded
4204 119 : .lock()
4205 119 : .unwrap()
4206 119 : .values()
4207 119 : .map(|tli| tli.manifest())
4208 119 : .sorted_by_key(|m| m.timeline_id)
4209 119 : .collect_vec();
4210 119 :
4211 119 : TenantManifest {
4212 119 : version: LATEST_TENANT_MANIFEST_VERSION,
4213 119 : stripe_size: Some(self.get_shard_stripe_size()),
4214 119 : offloaded_timelines,
4215 119 : }
4216 119 : }
4217 :
4218 0 : pub fn update_tenant_config<
4219 0 : F: Fn(
4220 0 : pageserver_api::models::TenantConfig,
4221 0 : ) -> anyhow::Result<pageserver_api::models::TenantConfig>,
4222 0 : >(
4223 0 : &self,
4224 0 : update: F,
4225 0 : ) -> anyhow::Result<pageserver_api::models::TenantConfig> {
4226 0 : // Use read-copy-update in order to avoid overwriting the location config
4227 0 : // state if this races with [`TenantShard::set_new_location_config`]. Note that
4228 0 : // this race is not possible if both request types come from the storage
4229 0 : // controller (as they should!) because an exclusive op lock is required
4230 0 : // on the storage controller side.
4231 0 :
4232 0 : self.tenant_conf
4233 0 : .try_rcu(|attached_conf| -> Result<_, anyhow::Error> {
4234 0 : Ok(Arc::new(AttachedTenantConf {
4235 0 : tenant_conf: update(attached_conf.tenant_conf.clone())?,
4236 0 : location: attached_conf.location,
4237 0 : lsn_lease_deadline: attached_conf.lsn_lease_deadline,
4238 : }))
4239 0 : })?;
4240 :
4241 0 : let updated = self.tenant_conf.load();
4242 0 :
4243 0 : self.tenant_conf_updated(&updated.tenant_conf);
4244 0 : // Don't hold self.timelines.lock() during the notifies.
4245 0 : // There's no risk of deadlock right now, but there could be if we consolidate
4246 0 : // mutexes in struct Timeline in the future.
4247 0 : let timelines = self.list_timelines();
4248 0 : for timeline in timelines {
4249 0 : timeline.tenant_conf_updated(&updated);
4250 0 : }
4251 :
4252 0 : Ok(updated.tenant_conf.clone())
4253 0 : }
4254 :
4255 0 : pub(crate) fn set_new_location_config(&self, new_conf: AttachedTenantConf) {
4256 0 : let new_tenant_conf = new_conf.tenant_conf.clone();
4257 0 :
4258 0 : self.tenant_conf.store(Arc::new(new_conf.clone()));
4259 0 :
4260 0 : self.tenant_conf_updated(&new_tenant_conf);
4261 0 : // Don't hold self.timelines.lock() during the notifies.
4262 0 : // There's no risk of deadlock right now, but there could be if we consolidate
4263 0 : // mutexes in struct Timeline in the future.
4264 0 : let timelines = self.list_timelines();
4265 0 : for timeline in timelines {
4266 0 : timeline.tenant_conf_updated(&new_conf);
4267 0 : }
4268 0 : }
4269 :
4270 118 : fn get_pagestream_throttle_config(
4271 118 : psconf: &'static PageServerConf,
4272 118 : overrides: &pageserver_api::models::TenantConfig,
4273 118 : ) -> throttle::Config {
4274 118 : overrides
4275 118 : .timeline_get_throttle
4276 118 : .clone()
4277 118 : .unwrap_or(psconf.default_tenant_conf.timeline_get_throttle.clone())
4278 118 : }
4279 :
4280 0 : pub(crate) fn tenant_conf_updated(&self, new_conf: &pageserver_api::models::TenantConfig) {
4281 0 : let conf = Self::get_pagestream_throttle_config(self.conf, new_conf);
4282 0 : self.pagestream_throttle.reconfigure(conf)
4283 0 : }
4284 :
4285 : /// Helper function to create a new Timeline struct.
4286 : ///
4287 : /// The returned Timeline is in Loading state. The caller is responsible for
4288 : /// initializing any on-disk state, and for inserting the Timeline to the 'timelines'
4289 : /// map.
4290 : ///
4291 : /// `validate_ancestor == false` is used when a timeline is created for deletion
4292 : /// and we might not have the ancestor present anymore which is fine for to be
4293 : /// deleted timelines.
4294 : #[allow(clippy::too_many_arguments)]
4295 234 : fn create_timeline_struct(
4296 234 : &self,
4297 234 : new_timeline_id: TimelineId,
4298 234 : new_metadata: &TimelineMetadata,
4299 234 : previous_heatmap: Option<PreviousHeatmap>,
4300 234 : ancestor: Option<Arc<Timeline>>,
4301 234 : resources: TimelineResources,
4302 234 : cause: CreateTimelineCause,
4303 234 : create_idempotency: CreateTimelineIdempotency,
4304 234 : gc_compaction_state: Option<GcCompactionState>,
4305 234 : rel_size_v2_status: Option<RelSizeMigration>,
4306 234 : ctx: &RequestContext,
4307 234 : ) -> anyhow::Result<(Arc<Timeline>, RequestContext)> {
4308 234 : let state = match cause {
4309 : CreateTimelineCause::Load => {
4310 234 : let ancestor_id = new_metadata.ancestor_timeline();
4311 234 : anyhow::ensure!(
4312 234 : ancestor_id == ancestor.as_ref().map(|t| t.timeline_id),
4313 0 : "Timeline's {new_timeline_id} ancestor {ancestor_id:?} was not found"
4314 : );
4315 234 : TimelineState::Loading
4316 : }
4317 0 : CreateTimelineCause::Delete => TimelineState::Stopping,
4318 : };
4319 :
4320 234 : let pg_version = new_metadata.pg_version();
4321 234 :
4322 234 : let timeline = Timeline::new(
4323 234 : self.conf,
4324 234 : Arc::clone(&self.tenant_conf),
4325 234 : new_metadata,
4326 234 : previous_heatmap,
4327 234 : ancestor,
4328 234 : new_timeline_id,
4329 234 : self.tenant_shard_id,
4330 234 : self.generation,
4331 234 : self.shard_identity,
4332 234 : self.walredo_mgr.clone(),
4333 234 : resources,
4334 234 : pg_version,
4335 234 : state,
4336 234 : self.attach_wal_lag_cooldown.clone(),
4337 234 : create_idempotency,
4338 234 : gc_compaction_state,
4339 234 : rel_size_v2_status,
4340 234 : self.cancel.child_token(),
4341 234 : );
4342 234 :
4343 234 : let timeline_ctx = RequestContextBuilder::from(ctx)
4344 234 : .scope(context::Scope::new_timeline(&timeline))
4345 234 : .detached_child();
4346 234 :
4347 234 : Ok((timeline, timeline_ctx))
4348 234 : }
4349 :
4350 : /// [`TenantShard::shutdown`] must be called before dropping the returned [`TenantShard`] object
4351 : /// to ensure proper cleanup of background tasks and metrics.
4352 : //
4353 : // Allow too_many_arguments because a constructor's argument list naturally grows with the
4354 : // number of attributes in the struct: breaking these out into a builder wouldn't be helpful.
4355 : #[allow(clippy::too_many_arguments)]
4356 118 : fn new(
4357 118 : state: TenantState,
4358 118 : conf: &'static PageServerConf,
4359 118 : attached_conf: AttachedTenantConf,
4360 118 : shard_identity: ShardIdentity,
4361 118 : walredo_mgr: Option<Arc<WalRedoManager>>,
4362 118 : tenant_shard_id: TenantShardId,
4363 118 : remote_storage: GenericRemoteStorage,
4364 118 : deletion_queue_client: DeletionQueueClient,
4365 118 : l0_flush_global_state: L0FlushGlobalState,
4366 118 : basebackup_prepare_sender: BasebackupPrepareSender,
4367 118 : feature_resolver: FeatureResolver,
4368 118 : ) -> TenantShard {
4369 118 : assert!(!attached_conf.location.generation.is_none());
4370 :
4371 118 : let (state, mut rx) = watch::channel(state);
4372 118 :
4373 118 : tokio::spawn(async move {
4374 118 : // reflect tenant state in metrics:
4375 118 : // - global per tenant state: TENANT_STATE_METRIC
4376 118 : // - "set" of broken tenants: BROKEN_TENANTS_SET
4377 118 : //
4378 118 : // set of broken tenants should not have zero counts so that it remains accessible for
4379 118 : // alerting.
4380 118 :
4381 118 : let tid = tenant_shard_id.to_string();
4382 118 : let shard_id = tenant_shard_id.shard_slug().to_string();
4383 118 : let set_key = &[tid.as_str(), shard_id.as_str()][..];
4384 :
4385 236 : fn inspect_state(state: &TenantState) -> ([&'static str; 1], bool) {
4386 236 : ([state.into()], matches!(state, TenantState::Broken { .. }))
4387 236 : }
4388 :
4389 118 : let mut tuple = inspect_state(&rx.borrow_and_update());
4390 118 :
4391 118 : let is_broken = tuple.1;
4392 118 : let mut counted_broken = if is_broken {
4393 : // add the id to the set right away, there should not be any updates on the channel
4394 : // after before tenant is removed, if ever
4395 0 : BROKEN_TENANTS_SET.with_label_values(set_key).set(1);
4396 0 : true
4397 : } else {
4398 118 : false
4399 : };
4400 :
4401 : loop {
4402 236 : let labels = &tuple.0;
4403 236 : let current = TENANT_STATE_METRIC.with_label_values(labels);
4404 236 : current.inc();
4405 236 :
4406 236 : if rx.changed().await.is_err() {
4407 : // tenant has been dropped
4408 7 : current.dec();
4409 7 : drop(BROKEN_TENANTS_SET.remove_label_values(set_key));
4410 7 : break;
4411 118 : }
4412 118 :
4413 118 : current.dec();
4414 118 : tuple = inspect_state(&rx.borrow_and_update());
4415 118 :
4416 118 : let is_broken = tuple.1;
4417 118 : if is_broken && !counted_broken {
4418 0 : counted_broken = true;
4419 0 : // insert the tenant_id (back) into the set while avoiding needless counter
4420 0 : // access
4421 0 : BROKEN_TENANTS_SET.with_label_values(set_key).set(1);
4422 118 : }
4423 : }
4424 118 : });
4425 118 :
4426 118 : TenantShard {
4427 118 : tenant_shard_id,
4428 118 : shard_identity,
4429 118 : generation: attached_conf.location.generation,
4430 118 : conf,
4431 118 : // using now here is good enough approximation to catch tenants with really long
4432 118 : // activation times.
4433 118 : constructed_at: Instant::now(),
4434 118 : timelines: Mutex::new(HashMap::new()),
4435 118 : timelines_creating: Mutex::new(HashSet::new()),
4436 118 : timelines_offloaded: Mutex::new(HashMap::new()),
4437 118 : timelines_importing: Mutex::new(HashMap::new()),
4438 118 : remote_tenant_manifest: Default::default(),
4439 118 : gc_cs: tokio::sync::Mutex::new(()),
4440 118 : walredo_mgr,
4441 118 : remote_storage,
4442 118 : deletion_queue_client,
4443 118 : state,
4444 118 : cached_logical_sizes: tokio::sync::Mutex::new(HashMap::new()),
4445 118 : cached_synthetic_tenant_size: Arc::new(AtomicU64::new(0)),
4446 118 : eviction_task_tenant_state: tokio::sync::Mutex::new(EvictionTaskTenantState::default()),
4447 118 : compaction_circuit_breaker: std::sync::Mutex::new(CircuitBreaker::new(
4448 118 : format!("compaction-{tenant_shard_id}"),
4449 118 : 5,
4450 118 : // Compaction can be a very expensive operation, and might leak disk space. It also ought
4451 118 : // to be infallible, as long as remote storage is available. So if it repeatedly fails,
4452 118 : // use an extremely long backoff.
4453 118 : Some(Duration::from_secs(3600 * 24)),
4454 118 : )),
4455 118 : l0_compaction_trigger: Arc::new(Notify::new()),
4456 118 : scheduled_compaction_tasks: Mutex::new(Default::default()),
4457 118 : activate_now_sem: tokio::sync::Semaphore::new(0),
4458 118 : attach_wal_lag_cooldown: Arc::new(std::sync::OnceLock::new()),
4459 118 : cancel: CancellationToken::default(),
4460 118 : gate: Gate::default(),
4461 118 : pagestream_throttle: Arc::new(throttle::Throttle::new(
4462 118 : TenantShard::get_pagestream_throttle_config(conf, &attached_conf.tenant_conf),
4463 118 : )),
4464 118 : pagestream_throttle_metrics: Arc::new(
4465 118 : crate::metrics::tenant_throttling::Pagestream::new(&tenant_shard_id),
4466 118 : ),
4467 118 : tenant_conf: Arc::new(ArcSwap::from_pointee(attached_conf)),
4468 118 : ongoing_timeline_detach: std::sync::Mutex::default(),
4469 118 : gc_block: Default::default(),
4470 118 : l0_flush_global_state,
4471 118 : basebackup_prepare_sender,
4472 118 : feature_resolver,
4473 118 : }
4474 118 : }
4475 :
4476 : /// Locate and load config
4477 0 : pub(super) fn load_tenant_config(
4478 0 : conf: &'static PageServerConf,
4479 0 : tenant_shard_id: &TenantShardId,
4480 0 : ) -> Result<LocationConf, LoadConfigError> {
4481 0 : let config_path = conf.tenant_location_config_path(tenant_shard_id);
4482 0 :
4483 0 : info!("loading tenant configuration from {config_path}");
4484 :
4485 : // load and parse file
4486 0 : let config = fs::read_to_string(&config_path).map_err(|e| {
4487 0 : match e.kind() {
4488 : std::io::ErrorKind::NotFound => {
4489 : // The config should almost always exist for a tenant directory:
4490 : // - When attaching a tenant, the config is the first thing we write
4491 : // - When detaching a tenant, we atomically move the directory to a tmp location
4492 : // before deleting contents.
4493 : //
4494 : // The very rare edge case that can result in a missing config is if we crash during attach
4495 : // between creating directory and writing config. Callers should handle that as if the
4496 : // directory didn't exist.
4497 :
4498 0 : LoadConfigError::NotFound(config_path)
4499 : }
4500 : _ => {
4501 : // No IO errors except NotFound are acceptable here: other kinds of error indicate local storage or permissions issues
4502 : // that we cannot cleanly recover
4503 0 : crate::virtual_file::on_fatal_io_error(&e, "Reading tenant config file")
4504 : }
4505 : }
4506 0 : })?;
4507 :
4508 0 : Ok(toml_edit::de::from_str::<LocationConf>(&config)?)
4509 0 : }
4510 :
4511 : #[tracing::instrument(skip_all, fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug()))]
4512 : pub(super) async fn persist_tenant_config(
4513 : conf: &'static PageServerConf,
4514 : tenant_shard_id: &TenantShardId,
4515 : location_conf: &LocationConf,
4516 : ) -> std::io::Result<()> {
4517 : let config_path = conf.tenant_location_config_path(tenant_shard_id);
4518 :
4519 : Self::persist_tenant_config_at(tenant_shard_id, &config_path, location_conf).await
4520 : }
4521 :
4522 : #[tracing::instrument(skip_all, fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug()))]
4523 : pub(super) async fn persist_tenant_config_at(
4524 : tenant_shard_id: &TenantShardId,
4525 : config_path: &Utf8Path,
4526 : location_conf: &LocationConf,
4527 : ) -> std::io::Result<()> {
4528 : debug!("persisting tenantconf to {config_path}");
4529 :
4530 : let mut conf_content = r#"# This file contains a specific per-tenant's config.
4531 : # It is read in case of pageserver restart.
4532 : "#
4533 : .to_string();
4534 :
4535 0 : fail::fail_point!("tenant-config-before-write", |_| {
4536 0 : Err(std::io::Error::other("tenant-config-before-write"))
4537 0 : });
4538 :
4539 : // Convert the config to a toml file.
4540 : conf_content +=
4541 : &toml_edit::ser::to_string_pretty(&location_conf).expect("Config serialization failed");
4542 :
4543 : let temp_path = path_with_suffix_extension(config_path, TEMP_FILE_SUFFIX);
4544 :
4545 : let conf_content = conf_content.into_bytes();
4546 : VirtualFile::crashsafe_overwrite(config_path.to_owned(), temp_path, conf_content).await
4547 : }
4548 :
4549 : //
4550 : // How garbage collection works:
4551 : //
4552 : // +--bar------------->
4553 : // /
4554 : // +----+-----foo---------------->
4555 : // /
4556 : // ----main--+-------------------------->
4557 : // \
4558 : // +-----baz-------->
4559 : //
4560 : //
4561 : // 1. Grab 'gc_cs' mutex to prevent new timelines from being created while Timeline's
4562 : // `gc_infos` are being refreshed
4563 : // 2. Scan collected timelines, and on each timeline, make note of the
4564 : // all the points where other timelines have been branched off.
4565 : // We will refrain from removing page versions at those LSNs.
4566 : // 3. For each timeline, scan all layer files on the timeline.
4567 : // Remove all files for which a newer file exists and which
4568 : // don't cover any branch point LSNs.
4569 : //
4570 : // TODO:
4571 : // - if a relation has a non-incremental persistent layer on a child branch, then we
4572 : // don't need to keep that in the parent anymore. But currently
4573 : // we do.
4574 2 : async fn gc_iteration_internal(
4575 2 : &self,
4576 2 : target_timeline_id: Option<TimelineId>,
4577 2 : horizon: u64,
4578 2 : pitr: Duration,
4579 2 : cancel: &CancellationToken,
4580 2 : ctx: &RequestContext,
4581 2 : ) -> Result<GcResult, GcError> {
4582 2 : let mut totals: GcResult = Default::default();
4583 2 : let now = Instant::now();
4584 :
4585 2 : let gc_timelines = self
4586 2 : .refresh_gc_info_internal(target_timeline_id, horizon, pitr, cancel, ctx)
4587 2 : .await?;
4588 :
4589 2 : failpoint_support::sleep_millis_async!("gc_iteration_internal_after_getting_gc_timelines");
4590 :
4591 : // If there is nothing to GC, we don't want any messages in the INFO log.
4592 2 : if !gc_timelines.is_empty() {
4593 2 : info!("{} timelines need GC", gc_timelines.len());
4594 : } else {
4595 0 : debug!("{} timelines need GC", gc_timelines.len());
4596 : }
4597 :
4598 : // Perform GC for each timeline.
4599 : //
4600 : // Note that we don't hold the `TenantShard::gc_cs` lock here because we don't want to delay the
4601 : // branch creation task, which requires the GC lock. A GC iteration can run concurrently
4602 : // with branch creation.
4603 : //
4604 : // See comments in [`TenantShard::branch_timeline`] for more information about why branch
4605 : // creation task can run concurrently with timeline's GC iteration.
4606 4 : for timeline in gc_timelines {
4607 2 : if cancel.is_cancelled() {
4608 : // We were requested to shut down. Stop and return with the progress we
4609 : // made.
4610 0 : break;
4611 2 : }
4612 2 : let result = match timeline.gc().await {
4613 : Err(GcError::TimelineCancelled) => {
4614 0 : if target_timeline_id.is_some() {
4615 : // If we were targetting this specific timeline, surface cancellation to caller
4616 0 : return Err(GcError::TimelineCancelled);
4617 : } else {
4618 : // A timeline may be shutting down independently of the tenant's lifecycle: we should
4619 : // skip past this and proceed to try GC on other timelines.
4620 0 : continue;
4621 : }
4622 : }
4623 2 : r => r?,
4624 : };
4625 2 : totals += result;
4626 : }
4627 :
4628 2 : totals.elapsed = now.elapsed();
4629 2 : Ok(totals)
4630 2 : }
4631 :
4632 : /// Refreshes the Timeline::gc_info for all timelines, returning the
4633 : /// vector of timelines which have [`Timeline::get_last_record_lsn`] past
4634 : /// [`TenantShard::get_gc_horizon`].
4635 : ///
4636 : /// This is usually executed as part of periodic gc, but can now be triggered more often.
4637 2 : pub(crate) async fn refresh_gc_info(
4638 2 : &self,
4639 2 : cancel: &CancellationToken,
4640 2 : ctx: &RequestContext,
4641 2 : ) -> Result<Vec<Arc<Timeline>>, GcError> {
4642 2 : // since this method can now be called at different rates than the configured gc loop, it
4643 2 : // might be that these configuration values get applied faster than what it was previously,
4644 2 : // since these were only read from the gc task.
4645 2 : let horizon = self.get_gc_horizon();
4646 2 : let pitr = self.get_pitr_interval();
4647 2 :
4648 2 : // refresh all timelines
4649 2 : let target_timeline_id = None;
4650 2 :
4651 2 : self.refresh_gc_info_internal(target_timeline_id, horizon, pitr, cancel, ctx)
4652 2 : .await
4653 2 : }
4654 :
4655 : /// Populate all Timelines' `GcInfo` with information about their children. We do not set the
4656 : /// PITR cutoffs here, because that requires I/O: this is done later, before GC, by [`Self::refresh_gc_info_internal`]
4657 : ///
4658 : /// Subsequently, parent-child relationships are updated incrementally inside [`Timeline::new`] and [`Timeline::drop`].
4659 0 : fn initialize_gc_info(
4660 0 : &self,
4661 0 : timelines: &std::sync::MutexGuard<HashMap<TimelineId, Arc<Timeline>>>,
4662 0 : timelines_offloaded: &std::sync::MutexGuard<HashMap<TimelineId, Arc<OffloadedTimeline>>>,
4663 0 : restrict_to_timeline: Option<TimelineId>,
4664 0 : ) {
4665 0 : if restrict_to_timeline.is_none() {
4666 : // This function must be called before activation: after activation timeline create/delete operations
4667 : // might happen, and this function is not safe to run concurrently with those.
4668 0 : assert!(!self.is_active());
4669 0 : }
4670 :
4671 : // Scan all timelines. For each timeline, remember the timeline ID and
4672 : // the branch point where it was created.
4673 0 : let mut all_branchpoints: BTreeMap<TimelineId, Vec<(Lsn, TimelineId, MaybeOffloaded)>> =
4674 0 : BTreeMap::new();
4675 0 : timelines.iter().for_each(|(timeline_id, timeline_entry)| {
4676 0 : if let Some(ancestor_timeline_id) = &timeline_entry.get_ancestor_timeline_id() {
4677 0 : let ancestor_children = all_branchpoints.entry(*ancestor_timeline_id).or_default();
4678 0 : ancestor_children.push((
4679 0 : timeline_entry.get_ancestor_lsn(),
4680 0 : *timeline_id,
4681 0 : MaybeOffloaded::No,
4682 0 : ));
4683 0 : }
4684 0 : });
4685 0 : timelines_offloaded
4686 0 : .iter()
4687 0 : .for_each(|(timeline_id, timeline_entry)| {
4688 0 : let Some(ancestor_timeline_id) = &timeline_entry.ancestor_timeline_id else {
4689 0 : return;
4690 : };
4691 0 : let Some(retain_lsn) = timeline_entry.ancestor_retain_lsn else {
4692 0 : return;
4693 : };
4694 0 : let ancestor_children = all_branchpoints.entry(*ancestor_timeline_id).or_default();
4695 0 : ancestor_children.push((retain_lsn, *timeline_id, MaybeOffloaded::Yes));
4696 0 : });
4697 0 :
4698 0 : // The number of bytes we always keep, irrespective of PITR: this is a constant across timelines
4699 0 : let horizon = self.get_gc_horizon();
4700 :
4701 : // Populate each timeline's GcInfo with information about its child branches
4702 0 : let timelines_to_write = if let Some(timeline_id) = restrict_to_timeline {
4703 0 : itertools::Either::Left(timelines.get(&timeline_id).into_iter())
4704 : } else {
4705 0 : itertools::Either::Right(timelines.values())
4706 : };
4707 0 : for timeline in timelines_to_write {
4708 0 : let mut branchpoints: Vec<(Lsn, TimelineId, MaybeOffloaded)> = all_branchpoints
4709 0 : .remove(&timeline.timeline_id)
4710 0 : .unwrap_or_default();
4711 0 :
4712 0 : branchpoints.sort_by_key(|b| b.0);
4713 0 :
4714 0 : let mut target = timeline.gc_info.write().unwrap();
4715 0 :
4716 0 : target.retain_lsns = branchpoints;
4717 0 :
4718 0 : let space_cutoff = timeline
4719 0 : .get_last_record_lsn()
4720 0 : .checked_sub(horizon)
4721 0 : .unwrap_or(Lsn(0));
4722 0 :
4723 0 : target.cutoffs = GcCutoffs {
4724 0 : space: space_cutoff,
4725 0 : time: None,
4726 0 : };
4727 0 : }
4728 0 : }
4729 :
4730 4 : async fn refresh_gc_info_internal(
4731 4 : &self,
4732 4 : target_timeline_id: Option<TimelineId>,
4733 4 : horizon: u64,
4734 4 : pitr: Duration,
4735 4 : cancel: &CancellationToken,
4736 4 : ctx: &RequestContext,
4737 4 : ) -> Result<Vec<Arc<Timeline>>, GcError> {
4738 4 : // before taking the gc_cs lock, do the heavier weight finding of gc_cutoff points for
4739 4 : // currently visible timelines.
4740 4 : let timelines = self
4741 4 : .timelines
4742 4 : .lock()
4743 4 : .unwrap()
4744 4 : .values()
4745 10 : .filter(|tl| match target_timeline_id.as_ref() {
4746 2 : Some(target) => &tl.timeline_id == target,
4747 8 : None => true,
4748 10 : })
4749 4 : .cloned()
4750 4 : .collect::<Vec<_>>();
4751 4 :
4752 4 : if target_timeline_id.is_some() && timelines.is_empty() {
4753 : // We were to act on a particular timeline and it wasn't found
4754 0 : return Err(GcError::TimelineNotFound);
4755 4 : }
4756 4 :
4757 4 : let mut gc_cutoffs: HashMap<TimelineId, GcCutoffs> =
4758 4 : HashMap::with_capacity(timelines.len());
4759 4 :
4760 4 : // Ensures all timelines use the same start time when computing the time cutoff.
4761 4 : let now_ts_for_pitr_calc = SystemTime::now();
4762 10 : for timeline in timelines.iter() {
4763 10 : let ctx = &ctx.with_scope_timeline(timeline);
4764 10 : let cutoff = timeline
4765 10 : .get_last_record_lsn()
4766 10 : .checked_sub(horizon)
4767 10 : .unwrap_or(Lsn(0));
4768 :
4769 10 : let cutoffs = timeline
4770 10 : .find_gc_cutoffs(now_ts_for_pitr_calc, cutoff, pitr, cancel, ctx)
4771 10 : .await?;
4772 10 : let old = gc_cutoffs.insert(timeline.timeline_id, cutoffs);
4773 10 : assert!(old.is_none());
4774 : }
4775 :
4776 4 : if !self.is_active() || self.cancel.is_cancelled() {
4777 0 : return Err(GcError::TenantCancelled);
4778 4 : }
4779 :
4780 : // grab mutex to prevent new timelines from being created here; avoid doing long operations
4781 : // because that will stall branch creation.
4782 4 : let gc_cs = self.gc_cs.lock().await;
4783 :
4784 : // Ok, we now know all the branch points.
4785 : // Update the GC information for each timeline.
4786 4 : let mut gc_timelines = Vec::with_capacity(timelines.len());
4787 14 : for timeline in timelines {
4788 : // We filtered the timeline list above
4789 10 : if let Some(target_timeline_id) = target_timeline_id {
4790 2 : assert_eq!(target_timeline_id, timeline.timeline_id);
4791 8 : }
4792 :
4793 : {
4794 10 : let mut target = timeline.gc_info.write().unwrap();
4795 10 :
4796 10 : // Cull any expired leases
4797 10 : let now = SystemTime::now();
4798 10 : target.leases.retain(|_, lease| !lease.is_expired(&now));
4799 10 :
4800 10 : timeline
4801 10 : .metrics
4802 10 : .valid_lsn_lease_count_gauge
4803 10 : .set(target.leases.len() as u64);
4804 :
4805 : // Look up parent's PITR cutoff to update the child's knowledge of whether it is within parent's PITR
4806 10 : if let Some(ancestor_id) = timeline.get_ancestor_timeline_id() {
4807 6 : if let Some(ancestor_gc_cutoffs) = gc_cutoffs.get(&ancestor_id) {
4808 6 : target.within_ancestor_pitr =
4809 6 : Some(timeline.get_ancestor_lsn()) >= ancestor_gc_cutoffs.time;
4810 6 : }
4811 4 : }
4812 :
4813 : // Update metrics that depend on GC state
4814 10 : timeline
4815 10 : .metrics
4816 10 : .archival_size
4817 10 : .set(if target.within_ancestor_pitr {
4818 0 : timeline.metrics.current_logical_size_gauge.get()
4819 : } else {
4820 10 : 0
4821 : });
4822 10 : if let Some(time_cutoff) = target.cutoffs.time {
4823 4 : timeline.metrics.pitr_history_size.set(
4824 4 : timeline
4825 4 : .get_last_record_lsn()
4826 4 : .checked_sub(time_cutoff)
4827 4 : .unwrap_or_default()
4828 4 : .0,
4829 4 : );
4830 6 : }
4831 :
4832 : // Apply the cutoffs we found to the Timeline's GcInfo. Why might we _not_ have cutoffs for a timeline?
4833 : // - this timeline was created while we were finding cutoffs
4834 : // - lsn for timestamp search fails for this timeline repeatedly
4835 10 : if let Some(cutoffs) = gc_cutoffs.get(&timeline.timeline_id) {
4836 10 : let original_cutoffs = target.cutoffs.clone();
4837 10 : // GC cutoffs should never go back
4838 10 : target.cutoffs = GcCutoffs {
4839 10 : space: cutoffs.space.max(original_cutoffs.space),
4840 10 : time: cutoffs.time.max(original_cutoffs.time),
4841 10 : }
4842 0 : }
4843 : }
4844 :
4845 10 : gc_timelines.push(timeline);
4846 : }
4847 4 : drop(gc_cs);
4848 4 : Ok(gc_timelines)
4849 4 : }
4850 :
4851 : /// A substitute for `branch_timeline` for use in unit tests.
4852 : /// The returned timeline will have state value `Active` to make various `anyhow::ensure!()`
4853 : /// calls pass, but, we do not actually call `.activate()` under the hood. So, none of the
4854 : /// timeline background tasks are launched, except the flush loop.
4855 : #[cfg(test)]
4856 119 : async fn branch_timeline_test(
4857 119 : self: &Arc<Self>,
4858 119 : src_timeline: &Arc<Timeline>,
4859 119 : dst_id: TimelineId,
4860 119 : ancestor_lsn: Option<Lsn>,
4861 119 : ctx: &RequestContext,
4862 119 : ) -> Result<Arc<Timeline>, CreateTimelineError> {
4863 119 : let tl = self
4864 119 : .branch_timeline_impl(src_timeline, dst_id, ancestor_lsn, ctx)
4865 119 : .await?
4866 117 : .into_timeline_for_test();
4867 117 : tl.set_state(TimelineState::Active);
4868 117 : Ok(tl)
4869 119 : }
4870 :
4871 : /// Helper for unit tests to branch a timeline with some pre-loaded states.
4872 : #[cfg(test)]
4873 : #[allow(clippy::too_many_arguments)]
4874 6 : pub async fn branch_timeline_test_with_layers(
4875 6 : self: &Arc<Self>,
4876 6 : src_timeline: &Arc<Timeline>,
4877 6 : dst_id: TimelineId,
4878 6 : ancestor_lsn: Option<Lsn>,
4879 6 : ctx: &RequestContext,
4880 6 : delta_layer_desc: Vec<timeline::DeltaLayerTestDesc>,
4881 6 : image_layer_desc: Vec<(Lsn, Vec<(pageserver_api::key::Key, bytes::Bytes)>)>,
4882 6 : end_lsn: Lsn,
4883 6 : ) -> anyhow::Result<Arc<Timeline>> {
4884 : use checks::check_valid_layermap;
4885 : use itertools::Itertools;
4886 :
4887 6 : let tline = self
4888 6 : .branch_timeline_test(src_timeline, dst_id, ancestor_lsn, ctx)
4889 6 : .await?;
4890 6 : let ancestor_lsn = if let Some(ancestor_lsn) = ancestor_lsn {
4891 6 : ancestor_lsn
4892 : } else {
4893 0 : tline.get_last_record_lsn()
4894 : };
4895 6 : assert!(end_lsn >= ancestor_lsn);
4896 6 : tline.force_advance_lsn(end_lsn);
4897 9 : for deltas in delta_layer_desc {
4898 3 : tline
4899 3 : .force_create_delta_layer(deltas, Some(ancestor_lsn), ctx)
4900 3 : .await?;
4901 : }
4902 8 : for (lsn, images) in image_layer_desc {
4903 2 : tline
4904 2 : .force_create_image_layer(lsn, images, Some(ancestor_lsn), ctx)
4905 2 : .await?;
4906 : }
4907 6 : let layer_names = tline
4908 6 : .layers
4909 6 : .read(LayerManagerLockHolder::Testing)
4910 6 : .await
4911 6 : .layer_map()
4912 6 : .unwrap()
4913 6 : .iter_historic_layers()
4914 6 : .map(|layer| layer.layer_name())
4915 6 : .collect_vec();
4916 6 : if let Some(err) = check_valid_layermap(&layer_names) {
4917 0 : bail!("invalid layermap: {err}");
4918 6 : }
4919 6 : Ok(tline)
4920 6 : }
4921 :
4922 : /// Branch an existing timeline.
4923 0 : async fn branch_timeline(
4924 0 : self: &Arc<Self>,
4925 0 : src_timeline: &Arc<Timeline>,
4926 0 : dst_id: TimelineId,
4927 0 : start_lsn: Option<Lsn>,
4928 0 : ctx: &RequestContext,
4929 0 : ) -> Result<CreateTimelineResult, CreateTimelineError> {
4930 0 : self.branch_timeline_impl(src_timeline, dst_id, start_lsn, ctx)
4931 0 : .await
4932 0 : }
4933 :
4934 119 : async fn branch_timeline_impl(
4935 119 : self: &Arc<Self>,
4936 119 : src_timeline: &Arc<Timeline>,
4937 119 : dst_id: TimelineId,
4938 119 : start_lsn: Option<Lsn>,
4939 119 : ctx: &RequestContext,
4940 119 : ) -> Result<CreateTimelineResult, CreateTimelineError> {
4941 119 : let src_id = src_timeline.timeline_id;
4942 :
4943 : // We will validate our ancestor LSN in this function. Acquire the GC lock so that
4944 : // this check cannot race with GC, and the ancestor LSN is guaranteed to remain
4945 : // valid while we are creating the branch.
4946 119 : let _gc_cs = self.gc_cs.lock().await;
4947 :
4948 : // If no start LSN is specified, we branch the new timeline from the source timeline's last record LSN
4949 119 : let start_lsn = start_lsn.unwrap_or_else(|| {
4950 1 : let lsn = src_timeline.get_last_record_lsn();
4951 1 : info!("branching timeline {dst_id} from timeline {src_id} at last record LSN: {lsn}");
4952 1 : lsn
4953 119 : });
4954 :
4955 : // we finally have determined the ancestor_start_lsn, so we can get claim exclusivity now
4956 119 : let timeline_create_guard = match self
4957 119 : .start_creating_timeline(
4958 119 : dst_id,
4959 119 : CreateTimelineIdempotency::Branch {
4960 119 : ancestor_timeline_id: src_timeline.timeline_id,
4961 119 : ancestor_start_lsn: start_lsn,
4962 119 : },
4963 119 : )
4964 119 : .await?
4965 : {
4966 119 : StartCreatingTimelineResult::CreateGuard(guard) => guard,
4967 0 : StartCreatingTimelineResult::Idempotent(timeline) => {
4968 0 : return Ok(CreateTimelineResult::Idempotent(timeline));
4969 : }
4970 : };
4971 :
4972 : // Ensure that `start_lsn` is valid, i.e. the LSN is within the PITR
4973 : // horizon on the source timeline
4974 : //
4975 : // We check it against both the planned GC cutoff stored in 'gc_info',
4976 : // and the 'latest_gc_cutoff' of the last GC that was performed. The
4977 : // planned GC cutoff in 'gc_info' is normally larger than
4978 : // 'applied_gc_cutoff_lsn', but beware of corner cases like if you just
4979 : // changed the GC settings for the tenant to make the PITR window
4980 : // larger, but some of the data was already removed by an earlier GC
4981 : // iteration.
4982 :
4983 : // check against last actual 'latest_gc_cutoff' first
4984 119 : let applied_gc_cutoff_lsn = src_timeline.get_applied_gc_cutoff_lsn();
4985 119 : {
4986 119 : let gc_info = src_timeline.gc_info.read().unwrap();
4987 119 : let planned_cutoff = gc_info.min_cutoff();
4988 119 : if gc_info.lsn_covered_by_lease(start_lsn) {
4989 0 : tracing::info!(
4990 0 : "skipping comparison of {start_lsn} with gc cutoff {} and planned gc cutoff {planned_cutoff} due to lsn lease",
4991 0 : *applied_gc_cutoff_lsn
4992 : );
4993 : } else {
4994 119 : src_timeline
4995 119 : .check_lsn_is_in_scope(start_lsn, &applied_gc_cutoff_lsn)
4996 119 : .context(format!(
4997 119 : "invalid branch start lsn: less than latest GC cutoff {}",
4998 119 : *applied_gc_cutoff_lsn,
4999 119 : ))
5000 119 : .map_err(CreateTimelineError::AncestorLsn)?;
5001 :
5002 : // and then the planned GC cutoff
5003 117 : if start_lsn < planned_cutoff {
5004 0 : return Err(CreateTimelineError::AncestorLsn(anyhow::anyhow!(
5005 0 : "invalid branch start lsn: less than planned GC cutoff {planned_cutoff}"
5006 0 : )));
5007 117 : }
5008 : }
5009 : }
5010 :
5011 : //
5012 : // The branch point is valid, and we are still holding the 'gc_cs' lock
5013 : // so that GC cannot advance the GC cutoff until we are finished.
5014 : // Proceed with the branch creation.
5015 : //
5016 :
5017 : // Determine prev-LSN for the new timeline. We can only determine it if
5018 : // the timeline was branched at the current end of the source timeline.
5019 : let RecordLsn {
5020 117 : last: src_last,
5021 117 : prev: src_prev,
5022 117 : } = src_timeline.get_last_record_rlsn();
5023 117 : let dst_prev = if src_last == start_lsn {
5024 108 : Some(src_prev)
5025 : } else {
5026 9 : None
5027 : };
5028 :
5029 : // Create the metadata file, noting the ancestor of the new timeline.
5030 : // There is initially no data in it, but all the read-calls know to look
5031 : // into the ancestor.
5032 117 : let metadata = TimelineMetadata::new(
5033 117 : start_lsn,
5034 117 : dst_prev,
5035 117 : Some(src_id),
5036 117 : start_lsn,
5037 117 : *src_timeline.applied_gc_cutoff_lsn.read(), // FIXME: should we hold onto this guard longer?
5038 117 : src_timeline.initdb_lsn,
5039 117 : src_timeline.pg_version,
5040 117 : );
5041 :
5042 117 : let (uninitialized_timeline, _timeline_ctx) = self
5043 117 : .prepare_new_timeline(
5044 117 : dst_id,
5045 117 : &metadata,
5046 117 : timeline_create_guard,
5047 117 : start_lsn + 1,
5048 117 : Some(Arc::clone(src_timeline)),
5049 117 : Some(src_timeline.get_rel_size_v2_status()),
5050 117 : ctx,
5051 117 : )
5052 117 : .await?;
5053 :
5054 117 : let new_timeline = uninitialized_timeline.finish_creation().await?;
5055 :
5056 : // Root timeline gets its layers during creation and uploads them along with the metadata.
5057 : // A branch timeline though, when created, can get no writes for some time, hence won't get any layers created.
5058 : // We still need to upload its metadata eagerly: if other nodes `attach` the tenant and miss this timeline, their GC
5059 : // could get incorrect information and remove more layers, than needed.
5060 : // See also https://github.com/neondatabase/neon/issues/3865
5061 117 : new_timeline
5062 117 : .remote_client
5063 117 : .schedule_index_upload_for_full_metadata_update(&metadata)
5064 117 : .context("branch initial metadata upload")?;
5065 :
5066 : // Callers are responsible to wait for uploads to complete and for activating the timeline.
5067 :
5068 117 : Ok(CreateTimelineResult::Created(new_timeline))
5069 119 : }
5070 :
5071 : /// For unit tests, make this visible so that other modules can directly create timelines
5072 : #[cfg(test)]
5073 : #[tracing::instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), %timeline_id))]
5074 : pub(crate) async fn bootstrap_timeline_test(
5075 : self: &Arc<Self>,
5076 : timeline_id: TimelineId,
5077 : pg_version: u32,
5078 : load_existing_initdb: Option<TimelineId>,
5079 : ctx: &RequestContext,
5080 : ) -> anyhow::Result<Arc<Timeline>> {
5081 : self.bootstrap_timeline(timeline_id, pg_version, load_existing_initdb, ctx)
5082 : .await
5083 : .map_err(anyhow::Error::new)
5084 1 : .map(|r| r.into_timeline_for_test())
5085 : }
5086 :
5087 : /// Get exclusive access to the timeline ID for creation.
5088 : ///
5089 : /// Timeline-creating code paths must use this function before making changes
5090 : /// to in-memory or persistent state.
5091 : ///
5092 : /// The `state` parameter is a description of the timeline creation operation
5093 : /// we intend to perform.
5094 : /// If the timeline was already created in the meantime, we check whether this
5095 : /// request conflicts or is idempotent , based on `state`.
5096 234 : async fn start_creating_timeline(
5097 234 : self: &Arc<Self>,
5098 234 : new_timeline_id: TimelineId,
5099 234 : idempotency: CreateTimelineIdempotency,
5100 234 : ) -> Result<StartCreatingTimelineResult, CreateTimelineError> {
5101 234 : let allow_offloaded = false;
5102 234 : match self.create_timeline_create_guard(new_timeline_id, idempotency, allow_offloaded) {
5103 233 : Ok(create_guard) => {
5104 233 : pausable_failpoint!("timeline-creation-after-uninit");
5105 233 : Ok(StartCreatingTimelineResult::CreateGuard(create_guard))
5106 : }
5107 0 : Err(TimelineExclusionError::ShuttingDown) => Err(CreateTimelineError::ShuttingDown),
5108 : Err(TimelineExclusionError::AlreadyCreating) => {
5109 : // Creation is in progress, we cannot create it again, and we cannot
5110 : // check if this request matches the existing one, so caller must try
5111 : // again later.
5112 0 : Err(CreateTimelineError::AlreadyCreating)
5113 : }
5114 0 : Err(TimelineExclusionError::Other(e)) => Err(CreateTimelineError::Other(e)),
5115 : Err(TimelineExclusionError::AlreadyExists {
5116 0 : existing: TimelineOrOffloaded::Offloaded(_existing),
5117 0 : ..
5118 0 : }) => {
5119 0 : info!("timeline already exists but is offloaded");
5120 0 : Err(CreateTimelineError::Conflict)
5121 : }
5122 : Err(TimelineExclusionError::AlreadyExists {
5123 0 : existing: TimelineOrOffloaded::Importing(_existing),
5124 0 : ..
5125 0 : }) => {
5126 0 : // If there's a timeline already importing, then we would hit
5127 0 : // the [`TimelineExclusionError::AlreadyCreating`] branch above.
5128 0 : unreachable!("Importing timelines hold the creation guard")
5129 : }
5130 : Err(TimelineExclusionError::AlreadyExists {
5131 1 : existing: TimelineOrOffloaded::Timeline(existing),
5132 1 : arg,
5133 1 : }) => {
5134 1 : {
5135 1 : let existing = &existing.create_idempotency;
5136 1 : let _span = info_span!("idempotency_check", ?existing, ?arg).entered();
5137 1 : debug!("timeline already exists");
5138 :
5139 1 : match (existing, &arg) {
5140 : // FailWithConflict => no idempotency check
5141 : (CreateTimelineIdempotency::FailWithConflict, _)
5142 : | (_, CreateTimelineIdempotency::FailWithConflict) => {
5143 1 : warn!("timeline already exists, failing request");
5144 1 : return Err(CreateTimelineError::Conflict);
5145 : }
5146 : // Idempotent <=> CreateTimelineIdempotency is identical
5147 0 : (x, y) if x == y => {
5148 0 : info!(
5149 0 : "timeline already exists and idempotency matches, succeeding request"
5150 : );
5151 : // fallthrough
5152 : }
5153 : (_, _) => {
5154 0 : warn!("idempotency conflict, failing request");
5155 0 : return Err(CreateTimelineError::Conflict);
5156 : }
5157 : }
5158 : }
5159 :
5160 0 : Ok(StartCreatingTimelineResult::Idempotent(existing))
5161 : }
5162 : }
5163 234 : }
5164 :
5165 0 : async fn upload_initdb(
5166 0 : &self,
5167 0 : timelines_path: &Utf8PathBuf,
5168 0 : pgdata_path: &Utf8PathBuf,
5169 0 : timeline_id: &TimelineId,
5170 0 : ) -> anyhow::Result<()> {
5171 0 : let temp_path = timelines_path.join(format!(
5172 0 : "{INITDB_PATH}.upload-{timeline_id}.{TEMP_FILE_SUFFIX}"
5173 0 : ));
5174 0 :
5175 0 : scopeguard::defer! {
5176 0 : if let Err(e) = fs::remove_file(&temp_path) {
5177 0 : error!("Failed to remove temporary initdb archive '{temp_path}': {e}");
5178 0 : }
5179 0 : }
5180 :
5181 0 : let (pgdata_zstd, tar_zst_size) = create_zst_tarball(pgdata_path, &temp_path).await?;
5182 : const INITDB_TAR_ZST_WARN_LIMIT: u64 = 2 * 1024 * 1024;
5183 0 : if tar_zst_size > INITDB_TAR_ZST_WARN_LIMIT {
5184 0 : warn!(
5185 0 : "compressed {temp_path} size of {tar_zst_size} is above limit {INITDB_TAR_ZST_WARN_LIMIT}."
5186 : );
5187 0 : }
5188 :
5189 0 : pausable_failpoint!("before-initdb-upload");
5190 :
5191 0 : backoff::retry(
5192 0 : || async {
5193 0 : self::remote_timeline_client::upload_initdb_dir(
5194 0 : &self.remote_storage,
5195 0 : &self.tenant_shard_id.tenant_id,
5196 0 : timeline_id,
5197 0 : pgdata_zstd.try_clone().await?,
5198 0 : tar_zst_size,
5199 0 : &self.cancel,
5200 0 : )
5201 0 : .await
5202 0 : },
5203 0 : |_| false,
5204 0 : 3,
5205 0 : u32::MAX,
5206 0 : "persist_initdb_tar_zst",
5207 0 : &self.cancel,
5208 0 : )
5209 0 : .await
5210 0 : .ok_or_else(|| anyhow::Error::new(TimeoutOrCancel::Cancel))
5211 0 : .and_then(|x| x)
5212 0 : }
5213 :
5214 : /// - run initdb to init temporary instance and get bootstrap data
5215 : /// - after initialization completes, tar up the temp dir and upload it to S3.
5216 1 : async fn bootstrap_timeline(
5217 1 : self: &Arc<Self>,
5218 1 : timeline_id: TimelineId,
5219 1 : pg_version: u32,
5220 1 : load_existing_initdb: Option<TimelineId>,
5221 1 : ctx: &RequestContext,
5222 1 : ) -> Result<CreateTimelineResult, CreateTimelineError> {
5223 1 : let timeline_create_guard = match self
5224 1 : .start_creating_timeline(
5225 1 : timeline_id,
5226 1 : CreateTimelineIdempotency::Bootstrap { pg_version },
5227 1 : )
5228 1 : .await?
5229 : {
5230 1 : StartCreatingTimelineResult::CreateGuard(guard) => guard,
5231 0 : StartCreatingTimelineResult::Idempotent(timeline) => {
5232 0 : return Ok(CreateTimelineResult::Idempotent(timeline));
5233 : }
5234 : };
5235 :
5236 : // create a `tenant/{tenant_id}/timelines/basebackup-{timeline_id}.{TEMP_FILE_SUFFIX}/`
5237 : // temporary directory for basebackup files for the given timeline.
5238 :
5239 1 : let timelines_path = self.conf.timelines_path(&self.tenant_shard_id);
5240 1 : let pgdata_path = path_with_suffix_extension(
5241 1 : timelines_path.join(format!("basebackup-{timeline_id}")),
5242 1 : TEMP_FILE_SUFFIX,
5243 1 : );
5244 1 :
5245 1 : // Remove whatever was left from the previous runs: safe because TimelineCreateGuard guarantees
5246 1 : // we won't race with other creations or existent timelines with the same path.
5247 1 : if pgdata_path.exists() {
5248 0 : fs::remove_dir_all(&pgdata_path).with_context(|| {
5249 0 : format!("Failed to remove already existing initdb directory: {pgdata_path}")
5250 0 : })?;
5251 0 : tracing::info!("removed previous attempt's temporary initdb directory '{pgdata_path}'");
5252 1 : }
5253 :
5254 : // this new directory is very temporary, set to remove it immediately after bootstrap, we don't need it
5255 1 : let pgdata_path_deferred = pgdata_path.clone();
5256 1 : scopeguard::defer! {
5257 1 : if let Err(e) = fs::remove_dir_all(&pgdata_path_deferred).or_else(fs_ext::ignore_not_found) {
5258 1 : // this is unlikely, but we will remove the directory on pageserver restart or another bootstrap call
5259 1 : error!("Failed to remove temporary initdb directory '{pgdata_path_deferred}': {e}");
5260 1 : } else {
5261 1 : tracing::info!("removed temporary initdb directory '{pgdata_path_deferred}'");
5262 1 : }
5263 1 : }
5264 1 : if let Some(existing_initdb_timeline_id) = load_existing_initdb {
5265 1 : if existing_initdb_timeline_id != timeline_id {
5266 0 : let source_path = &remote_initdb_archive_path(
5267 0 : &self.tenant_shard_id.tenant_id,
5268 0 : &existing_initdb_timeline_id,
5269 0 : );
5270 0 : let dest_path =
5271 0 : &remote_initdb_archive_path(&self.tenant_shard_id.tenant_id, &timeline_id);
5272 0 :
5273 0 : // if this fails, it will get retried by retried control plane requests
5274 0 : self.remote_storage
5275 0 : .copy_object(source_path, dest_path, &self.cancel)
5276 0 : .await
5277 0 : .context("copy initdb tar")?;
5278 1 : }
5279 1 : let (initdb_tar_zst_path, initdb_tar_zst) =
5280 1 : self::remote_timeline_client::download_initdb_tar_zst(
5281 1 : self.conf,
5282 1 : &self.remote_storage,
5283 1 : &self.tenant_shard_id,
5284 1 : &existing_initdb_timeline_id,
5285 1 : &self.cancel,
5286 1 : )
5287 1 : .await
5288 1 : .context("download initdb tar")?;
5289 :
5290 1 : scopeguard::defer! {
5291 1 : if let Err(e) = fs::remove_file(&initdb_tar_zst_path) {
5292 1 : error!("Failed to remove temporary initdb archive '{initdb_tar_zst_path}': {e}");
5293 1 : }
5294 1 : }
5295 1 :
5296 1 : let buf_read =
5297 1 : BufReader::with_capacity(remote_timeline_client::BUFFER_SIZE, initdb_tar_zst);
5298 1 : extract_zst_tarball(&pgdata_path, buf_read)
5299 1 : .await
5300 1 : .context("extract initdb tar")?;
5301 : } else {
5302 : // Init temporarily repo to get bootstrap data, this creates a directory in the `pgdata_path` path
5303 0 : run_initdb(self.conf, &pgdata_path, pg_version, &self.cancel)
5304 0 : .await
5305 0 : .context("run initdb")?;
5306 :
5307 : // Upload the created data dir to S3
5308 0 : if self.tenant_shard_id().is_shard_zero() {
5309 0 : self.upload_initdb(&timelines_path, &pgdata_path, &timeline_id)
5310 0 : .await?;
5311 0 : }
5312 : }
5313 1 : let pgdata_lsn = import_datadir::get_lsn_from_controlfile(&pgdata_path)?.align();
5314 1 :
5315 1 : // Import the contents of the data directory at the initial checkpoint
5316 1 : // LSN, and any WAL after that.
5317 1 : // Initdb lsn will be equal to last_record_lsn which will be set after import.
5318 1 : // Because we know it upfront avoid having an option or dummy zero value by passing it to the metadata.
5319 1 : let new_metadata = TimelineMetadata::new(
5320 1 : Lsn(0),
5321 1 : None,
5322 1 : None,
5323 1 : Lsn(0),
5324 1 : pgdata_lsn,
5325 1 : pgdata_lsn,
5326 1 : pg_version,
5327 1 : );
5328 1 : let (mut raw_timeline, timeline_ctx) = self
5329 1 : .prepare_new_timeline(
5330 1 : timeline_id,
5331 1 : &new_metadata,
5332 1 : timeline_create_guard,
5333 1 : pgdata_lsn,
5334 1 : None,
5335 1 : None,
5336 1 : ctx,
5337 1 : )
5338 1 : .await?;
5339 :
5340 1 : let tenant_shard_id = raw_timeline.owning_tenant.tenant_shard_id;
5341 1 : raw_timeline
5342 1 : .write(|unfinished_timeline| async move {
5343 1 : import_datadir::import_timeline_from_postgres_datadir(
5344 1 : &unfinished_timeline,
5345 1 : &pgdata_path,
5346 1 : pgdata_lsn,
5347 1 : &timeline_ctx,
5348 1 : )
5349 1 : .await
5350 1 : .with_context(|| {
5351 0 : format!(
5352 0 : "Failed to import pgdatadir for timeline {tenant_shard_id}/{timeline_id}"
5353 0 : )
5354 1 : })?;
5355 :
5356 1 : fail::fail_point!("before-checkpoint-new-timeline", |_| {
5357 0 : Err(CreateTimelineError::Other(anyhow::anyhow!(
5358 0 : "failpoint before-checkpoint-new-timeline"
5359 0 : )))
5360 1 : });
5361 :
5362 1 : Ok(())
5363 2 : })
5364 1 : .await?;
5365 :
5366 : // All done!
5367 1 : let timeline = raw_timeline.finish_creation().await?;
5368 :
5369 : // Callers are responsible to wait for uploads to complete and for activating the timeline.
5370 :
5371 1 : Ok(CreateTimelineResult::Created(timeline))
5372 1 : }
5373 :
5374 231 : fn build_timeline_remote_client(&self, timeline_id: TimelineId) -> RemoteTimelineClient {
5375 231 : RemoteTimelineClient::new(
5376 231 : self.remote_storage.clone(),
5377 231 : self.deletion_queue_client.clone(),
5378 231 : self.conf,
5379 231 : self.tenant_shard_id,
5380 231 : timeline_id,
5381 231 : self.generation,
5382 231 : &self.tenant_conf.load().location,
5383 231 : )
5384 231 : }
5385 :
5386 : /// Builds required resources for a new timeline.
5387 231 : fn build_timeline_resources(&self, timeline_id: TimelineId) -> TimelineResources {
5388 231 : let remote_client = self.build_timeline_remote_client(timeline_id);
5389 231 : self.get_timeline_resources_for(remote_client)
5390 231 : }
5391 :
5392 : /// Builds timeline resources for the given remote client.
5393 234 : fn get_timeline_resources_for(&self, remote_client: RemoteTimelineClient) -> TimelineResources {
5394 234 : TimelineResources {
5395 234 : remote_client,
5396 234 : pagestream_throttle: self.pagestream_throttle.clone(),
5397 234 : pagestream_throttle_metrics: self.pagestream_throttle_metrics.clone(),
5398 234 : l0_compaction_trigger: self.l0_compaction_trigger.clone(),
5399 234 : l0_flush_global_state: self.l0_flush_global_state.clone(),
5400 234 : basebackup_prepare_sender: self.basebackup_prepare_sender.clone(),
5401 234 : feature_resolver: self.feature_resolver.clone(),
5402 234 : }
5403 234 : }
5404 :
5405 : /// Creates intermediate timeline structure and its files.
5406 : ///
5407 : /// An empty layer map is initialized, and new data and WAL can be imported starting
5408 : /// at 'disk_consistent_lsn'. After any initial data has been imported, call
5409 : /// `finish_creation` to insert the Timeline into the timelines map.
5410 : #[allow(clippy::too_many_arguments)]
5411 231 : async fn prepare_new_timeline<'a>(
5412 231 : &'a self,
5413 231 : new_timeline_id: TimelineId,
5414 231 : new_metadata: &TimelineMetadata,
5415 231 : create_guard: TimelineCreateGuard,
5416 231 : start_lsn: Lsn,
5417 231 : ancestor: Option<Arc<Timeline>>,
5418 231 : rel_size_v2_status: Option<RelSizeMigration>,
5419 231 : ctx: &RequestContext,
5420 231 : ) -> anyhow::Result<(UninitializedTimeline<'a>, RequestContext)> {
5421 231 : let tenant_shard_id = self.tenant_shard_id;
5422 231 :
5423 231 : let resources = self.build_timeline_resources(new_timeline_id);
5424 231 : resources
5425 231 : .remote_client
5426 231 : .init_upload_queue_for_empty_remote(new_metadata, rel_size_v2_status.clone())?;
5427 :
5428 231 : let (timeline_struct, timeline_ctx) = self
5429 231 : .create_timeline_struct(
5430 231 : new_timeline_id,
5431 231 : new_metadata,
5432 231 : None,
5433 231 : ancestor,
5434 231 : resources,
5435 231 : CreateTimelineCause::Load,
5436 231 : create_guard.idempotency.clone(),
5437 231 : None,
5438 231 : rel_size_v2_status,
5439 231 : ctx,
5440 231 : )
5441 231 : .context("Failed to create timeline data structure")?;
5442 :
5443 231 : timeline_struct.init_empty_layer_map(start_lsn);
5444 :
5445 231 : if let Err(e) = self
5446 231 : .create_timeline_files(&create_guard.timeline_path)
5447 231 : .await
5448 : {
5449 0 : error!(
5450 0 : "Failed to create initial files for timeline {tenant_shard_id}/{new_timeline_id}, cleaning up: {e:?}"
5451 : );
5452 0 : cleanup_timeline_directory(create_guard);
5453 0 : return Err(e);
5454 231 : }
5455 231 :
5456 231 : debug!(
5457 0 : "Successfully created initial files for timeline {tenant_shard_id}/{new_timeline_id}"
5458 : );
5459 :
5460 231 : Ok((
5461 231 : UninitializedTimeline::new(
5462 231 : self,
5463 231 : new_timeline_id,
5464 231 : Some((timeline_struct, create_guard)),
5465 231 : ),
5466 231 : timeline_ctx,
5467 231 : ))
5468 231 : }
5469 :
5470 231 : async fn create_timeline_files(&self, timeline_path: &Utf8Path) -> anyhow::Result<()> {
5471 231 : crashsafe::create_dir(timeline_path).context("Failed to create timeline directory")?;
5472 :
5473 231 : fail::fail_point!("after-timeline-dir-creation", |_| {
5474 0 : anyhow::bail!("failpoint after-timeline-dir-creation");
5475 231 : });
5476 :
5477 231 : Ok(())
5478 231 : }
5479 :
5480 : /// Get a guard that provides exclusive access to the timeline directory, preventing
5481 : /// concurrent attempts to create the same timeline.
5482 : ///
5483 : /// The `allow_offloaded` parameter controls whether to tolerate the existence of
5484 : /// offloaded timelines or not.
5485 234 : fn create_timeline_create_guard(
5486 234 : self: &Arc<Self>,
5487 234 : timeline_id: TimelineId,
5488 234 : idempotency: CreateTimelineIdempotency,
5489 234 : allow_offloaded: bool,
5490 234 : ) -> Result<TimelineCreateGuard, TimelineExclusionError> {
5491 234 : let tenant_shard_id = self.tenant_shard_id;
5492 234 :
5493 234 : let timeline_path = self.conf.timeline_path(&tenant_shard_id, &timeline_id);
5494 :
5495 234 : let create_guard = TimelineCreateGuard::new(
5496 234 : self,
5497 234 : timeline_id,
5498 234 : timeline_path.clone(),
5499 234 : idempotency,
5500 234 : allow_offloaded,
5501 234 : )?;
5502 :
5503 : // At this stage, we have got exclusive access to in-memory state for this timeline ID
5504 : // for creation.
5505 : // A timeline directory should never exist on disk already:
5506 : // - a previous failed creation would have cleaned up after itself
5507 : // - a pageserver restart would clean up timeline directories that don't have valid remote state
5508 : //
5509 : // Therefore it is an unexpected internal error to encounter a timeline directory already existing here,
5510 : // this error may indicate a bug in cleanup on failed creations.
5511 233 : if timeline_path.exists() {
5512 0 : return Err(TimelineExclusionError::Other(anyhow::anyhow!(
5513 0 : "Timeline directory already exists! This is a bug."
5514 0 : )));
5515 233 : }
5516 233 :
5517 233 : Ok(create_guard)
5518 234 : }
5519 :
5520 : /// Gathers inputs from all of the timelines to produce a sizing model input.
5521 : ///
5522 : /// Future is cancellation safe. Only one calculation can be running at once per tenant.
5523 : #[instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug()))]
5524 : pub async fn gather_size_inputs(
5525 : &self,
5526 : // `max_retention_period` overrides the cutoff that is used to calculate the size
5527 : // (only if it is shorter than the real cutoff).
5528 : max_retention_period: Option<u64>,
5529 : cause: LogicalSizeCalculationCause,
5530 : cancel: &CancellationToken,
5531 : ctx: &RequestContext,
5532 : ) -> Result<size::ModelInputs, size::CalculateSyntheticSizeError> {
5533 : let logical_sizes_at_once = self
5534 : .conf
5535 : .concurrent_tenant_size_logical_size_queries
5536 : .inner();
5537 :
5538 : // TODO: Having a single mutex block concurrent reads is not great for performance.
5539 : //
5540 : // But the only case where we need to run multiple of these at once is when we
5541 : // request a size for a tenant manually via API, while another background calculation
5542 : // is in progress (which is not a common case).
5543 : //
5544 : // See more for on the issue #2748 condenced out of the initial PR review.
5545 : let mut shared_cache = tokio::select! {
5546 : locked = self.cached_logical_sizes.lock() => locked,
5547 : _ = cancel.cancelled() => return Err(size::CalculateSyntheticSizeError::Cancelled),
5548 : _ = self.cancel.cancelled() => return Err(size::CalculateSyntheticSizeError::Cancelled),
5549 : };
5550 :
5551 : size::gather_inputs(
5552 : self,
5553 : logical_sizes_at_once,
5554 : max_retention_period,
5555 : &mut shared_cache,
5556 : cause,
5557 : cancel,
5558 : ctx,
5559 : )
5560 : .await
5561 : }
5562 :
5563 : /// Calculate synthetic tenant size and cache the result.
5564 : /// This is periodically called by background worker.
5565 : /// result is cached in tenant struct
5566 : #[instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug()))]
5567 : pub async fn calculate_synthetic_size(
5568 : &self,
5569 : cause: LogicalSizeCalculationCause,
5570 : cancel: &CancellationToken,
5571 : ctx: &RequestContext,
5572 : ) -> Result<u64, size::CalculateSyntheticSizeError> {
5573 : let inputs = self.gather_size_inputs(None, cause, cancel, ctx).await?;
5574 :
5575 : let size = inputs.calculate();
5576 :
5577 : self.set_cached_synthetic_size(size);
5578 :
5579 : Ok(size)
5580 : }
5581 :
5582 : /// Cache given synthetic size and update the metric value
5583 0 : pub fn set_cached_synthetic_size(&self, size: u64) {
5584 0 : self.cached_synthetic_tenant_size
5585 0 : .store(size, Ordering::Relaxed);
5586 0 :
5587 0 : // Only shard zero should be calculating synthetic sizes
5588 0 : debug_assert!(self.shard_identity.is_shard_zero());
5589 :
5590 0 : TENANT_SYNTHETIC_SIZE_METRIC
5591 0 : .get_metric_with_label_values(&[&self.tenant_shard_id.tenant_id.to_string()])
5592 0 : .unwrap()
5593 0 : .set(size);
5594 0 : }
5595 :
5596 0 : pub fn cached_synthetic_size(&self) -> u64 {
5597 0 : self.cached_synthetic_tenant_size.load(Ordering::Relaxed)
5598 0 : }
5599 :
5600 : /// Flush any in-progress layers, schedule uploads, and wait for uploads to complete.
5601 : ///
5602 : /// This function can take a long time: callers should wrap it in a timeout if calling
5603 : /// from an external API handler.
5604 : ///
5605 : /// Cancel-safety: cancelling this function may leave I/O running, but such I/O is
5606 : /// still bounded by tenant/timeline shutdown.
5607 : #[tracing::instrument(skip_all)]
5608 : pub(crate) async fn flush_remote(&self) -> anyhow::Result<()> {
5609 : let timelines = self.timelines.lock().unwrap().clone();
5610 :
5611 0 : async fn flush_timeline(_gate: GateGuard, timeline: Arc<Timeline>) -> anyhow::Result<()> {
5612 0 : tracing::info!(timeline_id=%timeline.timeline_id, "Flushing...");
5613 0 : timeline.freeze_and_flush().await?;
5614 0 : tracing::info!(timeline_id=%timeline.timeline_id, "Waiting for uploads...");
5615 0 : timeline.remote_client.wait_completion().await?;
5616 :
5617 0 : Ok(())
5618 0 : }
5619 :
5620 : // We do not use a JoinSet for these tasks, because we don't want them to be
5621 : // aborted when this function's future is cancelled: they should stay alive
5622 : // holding their GateGuard until they complete, to ensure their I/Os complete
5623 : // before Timeline shutdown completes.
5624 : let mut results = FuturesUnordered::new();
5625 :
5626 : for (_timeline_id, timeline) in timelines {
5627 : // Run each timeline's flush in a task holding the timeline's gate: this
5628 : // means that if this function's future is cancelled, the Timeline shutdown
5629 : // will still wait for any I/O in here to complete.
5630 : let Ok(gate) = timeline.gate.enter() else {
5631 : continue;
5632 : };
5633 0 : let jh = tokio::task::spawn(async move { flush_timeline(gate, timeline).await });
5634 : results.push(jh);
5635 : }
5636 :
5637 : while let Some(r) = results.next().await {
5638 : if let Err(e) = r {
5639 : if !e.is_cancelled() && !e.is_panic() {
5640 : tracing::error!("unexpected join error: {e:?}");
5641 : }
5642 : }
5643 : }
5644 :
5645 : // The flushes we did above were just writes, but the TenantShard might have had
5646 : // pending deletions as well from recent compaction/gc: we want to flush those
5647 : // as well. This requires flushing the global delete queue. This is cheap
5648 : // because it's typically a no-op.
5649 : match self.deletion_queue_client.flush_execute().await {
5650 : Ok(_) => {}
5651 : Err(DeletionQueueError::ShuttingDown) => {}
5652 : }
5653 :
5654 : Ok(())
5655 : }
5656 :
5657 0 : pub(crate) fn get_tenant_conf(&self) -> pageserver_api::models::TenantConfig {
5658 0 : self.tenant_conf.load().tenant_conf.clone()
5659 0 : }
5660 :
5661 : /// How much local storage would this tenant like to have? It can cope with
5662 : /// less than this (via eviction and on-demand downloads), but this function enables
5663 : /// the TenantShard to advertise how much storage it would prefer to have to provide fast I/O
5664 : /// by keeping important things on local disk.
5665 : ///
5666 : /// This is a heuristic, not a guarantee: tenants that are long-idle will actually use less
5667 : /// than they report here, due to layer eviction. Tenants with many active branches may
5668 : /// actually use more than they report here.
5669 0 : pub(crate) fn local_storage_wanted(&self) -> u64 {
5670 0 : let timelines = self.timelines.lock().unwrap();
5671 0 :
5672 0 : // Heuristic: we use the max() of the timelines' visible sizes, rather than the sum. This
5673 0 : // reflects the observation that on tenants with multiple large branches, typically only one
5674 0 : // of them is used actively enough to occupy space on disk.
5675 0 : timelines
5676 0 : .values()
5677 0 : .map(|t| t.metrics.visible_physical_size_gauge.get())
5678 0 : .max()
5679 0 : .unwrap_or(0)
5680 0 : }
5681 :
5682 : /// Builds a new tenant manifest, and uploads it if it differs from the last-known tenant
5683 : /// manifest in `Self::remote_tenant_manifest`.
5684 : ///
5685 : /// TODO: instead of requiring callers to remember to call `maybe_upload_tenant_manifest` after
5686 : /// changing any `TenantShard` state that's included in the manifest, consider making the manifest
5687 : /// the authoritative source of data with an API that automatically uploads on changes. Revisit
5688 : /// this when the manifest is more widely used and we have a better idea of the data model.
5689 119 : pub(crate) async fn maybe_upload_tenant_manifest(&self) -> Result<(), TenantManifestError> {
5690 : // Multiple tasks may call this function concurrently after mutating the TenantShard runtime
5691 : // state, affecting the manifest generated by `build_tenant_manifest`. We use an async mutex
5692 : // to serialize these callers. `eq_ignoring_version` acts as a slightly inefficient but
5693 : // simple coalescing mechanism.
5694 119 : let mut guard = tokio::select! {
5695 119 : guard = self.remote_tenant_manifest.lock() => guard,
5696 119 : _ = self.cancel.cancelled() => return Err(TenantManifestError::Cancelled),
5697 : };
5698 :
5699 : // Build a new manifest.
5700 119 : let manifest = self.build_tenant_manifest();
5701 :
5702 : // Check if the manifest has changed. We ignore the version number here, to avoid
5703 : // uploading every manifest on version number bumps.
5704 119 : if let Some(old) = guard.as_ref() {
5705 4 : if manifest.eq_ignoring_version(old) {
5706 3 : return Ok(());
5707 1 : }
5708 115 : }
5709 :
5710 : // Update metrics
5711 116 : let tid = self.tenant_shard_id.to_string();
5712 116 : let shard_id = self.tenant_shard_id.shard_slug().to_string();
5713 116 : let set_key = &[tid.as_str(), shard_id.as_str()][..];
5714 116 : TENANT_OFFLOADED_TIMELINES
5715 116 : .with_label_values(set_key)
5716 116 : .set(manifest.offloaded_timelines.len() as u64);
5717 116 :
5718 116 : // Upload the manifest. Remote storage does no retries internally, so retry here.
5719 116 : match backoff::retry(
5720 116 : || async {
5721 116 : upload_tenant_manifest(
5722 116 : &self.remote_storage,
5723 116 : &self.tenant_shard_id,
5724 116 : self.generation,
5725 116 : &manifest,
5726 116 : &self.cancel,
5727 116 : )
5728 116 : .await
5729 232 : },
5730 116 : |_| self.cancel.is_cancelled(),
5731 116 : FAILED_UPLOAD_WARN_THRESHOLD,
5732 116 : FAILED_REMOTE_OP_RETRIES,
5733 116 : "uploading tenant manifest",
5734 116 : &self.cancel,
5735 116 : )
5736 116 : .await
5737 : {
5738 0 : None => Err(TenantManifestError::Cancelled),
5739 0 : Some(Err(_)) if self.cancel.is_cancelled() => Err(TenantManifestError::Cancelled),
5740 0 : Some(Err(e)) => Err(TenantManifestError::RemoteStorage(e)),
5741 : Some(Ok(_)) => {
5742 : // Store the successfully uploaded manifest, so that future callers can avoid
5743 : // re-uploading the same thing.
5744 116 : *guard = Some(manifest);
5745 116 :
5746 116 : Ok(())
5747 : }
5748 : }
5749 119 : }
5750 : }
5751 :
5752 : /// Create the cluster temporarily in 'initdbpath' directory inside the repository
5753 : /// to get bootstrap data for timeline initialization.
5754 0 : async fn run_initdb(
5755 0 : conf: &'static PageServerConf,
5756 0 : initdb_target_dir: &Utf8Path,
5757 0 : pg_version: u32,
5758 0 : cancel: &CancellationToken,
5759 0 : ) -> Result<(), InitdbError> {
5760 0 : let initdb_bin_path = conf
5761 0 : .pg_bin_dir(pg_version)
5762 0 : .map_err(InitdbError::Other)?
5763 0 : .join("initdb");
5764 0 : let initdb_lib_dir = conf.pg_lib_dir(pg_version).map_err(InitdbError::Other)?;
5765 0 : info!(
5766 0 : "running {} in {}, libdir: {}",
5767 : initdb_bin_path, initdb_target_dir, initdb_lib_dir,
5768 : );
5769 :
5770 0 : let _permit = {
5771 0 : let _timer = INITDB_SEMAPHORE_ACQUISITION_TIME.start_timer();
5772 0 : INIT_DB_SEMAPHORE.acquire().await
5773 : };
5774 :
5775 0 : CONCURRENT_INITDBS.inc();
5776 0 : scopeguard::defer! {
5777 0 : CONCURRENT_INITDBS.dec();
5778 0 : }
5779 0 :
5780 0 : let _timer = INITDB_RUN_TIME.start_timer();
5781 0 : let res = postgres_initdb::do_run_initdb(postgres_initdb::RunInitdbArgs {
5782 0 : superuser: &conf.superuser,
5783 0 : locale: &conf.locale,
5784 0 : initdb_bin: &initdb_bin_path,
5785 0 : pg_version,
5786 0 : library_search_path: &initdb_lib_dir,
5787 0 : pgdata: initdb_target_dir,
5788 0 : })
5789 0 : .await
5790 0 : .map_err(InitdbError::Inner);
5791 0 :
5792 0 : // This isn't true cancellation support, see above. Still return an error to
5793 0 : // excercise the cancellation code path.
5794 0 : if cancel.is_cancelled() {
5795 0 : return Err(InitdbError::Cancelled);
5796 0 : }
5797 0 :
5798 0 : res
5799 0 : }
5800 :
5801 : /// Dump contents of a layer file to stdout.
5802 0 : pub async fn dump_layerfile_from_path(
5803 0 : path: &Utf8Path,
5804 0 : verbose: bool,
5805 0 : ctx: &RequestContext,
5806 0 : ) -> anyhow::Result<()> {
5807 : use std::os::unix::fs::FileExt;
5808 :
5809 : // All layer files start with a two-byte "magic" value, to identify the kind of
5810 : // file.
5811 0 : let file = File::open(path)?;
5812 0 : let mut header_buf = [0u8; 2];
5813 0 : file.read_exact_at(&mut header_buf, 0)?;
5814 :
5815 0 : match u16::from_be_bytes(header_buf) {
5816 : crate::IMAGE_FILE_MAGIC => {
5817 0 : ImageLayer::new_for_path(path, file)?
5818 0 : .dump(verbose, ctx)
5819 0 : .await?
5820 : }
5821 : crate::DELTA_FILE_MAGIC => {
5822 0 : DeltaLayer::new_for_path(path, file)?
5823 0 : .dump(verbose, ctx)
5824 0 : .await?
5825 : }
5826 0 : magic => bail!("unrecognized magic identifier: {:?}", magic),
5827 : }
5828 :
5829 0 : Ok(())
5830 0 : }
5831 :
5832 : #[cfg(test)]
5833 : pub(crate) mod harness {
5834 : use bytes::{Bytes, BytesMut};
5835 : use hex_literal::hex;
5836 : use once_cell::sync::OnceCell;
5837 : use pageserver_api::key::Key;
5838 : use pageserver_api::models::ShardParameters;
5839 : use pageserver_api::record::NeonWalRecord;
5840 : use pageserver_api::shard::ShardIndex;
5841 : use utils::id::TenantId;
5842 : use utils::logging;
5843 :
5844 : use super::*;
5845 : use crate::deletion_queue::mock::MockDeletionQueue;
5846 : use crate::l0_flush::L0FlushConfig;
5847 : use crate::walredo::apply_neon;
5848 :
5849 : pub const TIMELINE_ID: TimelineId =
5850 : TimelineId::from_array(hex!("11223344556677881122334455667788"));
5851 : pub const NEW_TIMELINE_ID: TimelineId =
5852 : TimelineId::from_array(hex!("AA223344556677881122334455667788"));
5853 :
5854 : /// Convenience function to create a page image with given string as the only content
5855 2514376 : pub fn test_img(s: &str) -> Bytes {
5856 2514376 : let mut buf = BytesMut::new();
5857 2514376 : buf.extend_from_slice(s.as_bytes());
5858 2514376 : buf.resize(64, 0);
5859 2514376 :
5860 2514376 : buf.freeze()
5861 2514376 : }
5862 :
5863 : pub struct TenantHarness {
5864 : pub conf: &'static PageServerConf,
5865 : pub tenant_conf: pageserver_api::models::TenantConfig,
5866 : pub tenant_shard_id: TenantShardId,
5867 : pub shard_identity: ShardIdentity,
5868 : pub generation: Generation,
5869 : pub shard: ShardIndex,
5870 : pub remote_storage: GenericRemoteStorage,
5871 : pub remote_fs_dir: Utf8PathBuf,
5872 : pub deletion_queue: MockDeletionQueue,
5873 : }
5874 :
5875 : static LOG_HANDLE: OnceCell<()> = OnceCell::new();
5876 :
5877 130 : pub(crate) fn setup_logging() {
5878 130 : LOG_HANDLE.get_or_init(|| {
5879 124 : logging::init(
5880 124 : logging::LogFormat::Test,
5881 124 : // enable it in case the tests exercise code paths that use
5882 124 : // debug_assert_current_span_has_tenant_and_timeline_id
5883 124 : logging::TracingErrorLayerEnablement::EnableWithRustLogFilter,
5884 124 : logging::Output::Stdout,
5885 124 : )
5886 124 : .expect("Failed to init test logging");
5887 130 : });
5888 130 : }
5889 :
5890 : impl TenantHarness {
5891 118 : pub async fn create_custom(
5892 118 : test_name: &'static str,
5893 118 : tenant_conf: pageserver_api::models::TenantConfig,
5894 118 : tenant_id: TenantId,
5895 118 : shard_identity: ShardIdentity,
5896 118 : generation: Generation,
5897 118 : ) -> anyhow::Result<Self> {
5898 118 : setup_logging();
5899 118 :
5900 118 : let repo_dir = PageServerConf::test_repo_dir(test_name);
5901 118 : let _ = fs::remove_dir_all(&repo_dir);
5902 118 : fs::create_dir_all(&repo_dir)?;
5903 :
5904 118 : let conf = PageServerConf::dummy_conf(repo_dir);
5905 118 : // Make a static copy of the config. This can never be free'd, but that's
5906 118 : // OK in a test.
5907 118 : let conf: &'static PageServerConf = Box::leak(Box::new(conf));
5908 118 :
5909 118 : let shard = shard_identity.shard_index();
5910 118 : let tenant_shard_id = TenantShardId {
5911 118 : tenant_id,
5912 118 : shard_number: shard.shard_number,
5913 118 : shard_count: shard.shard_count,
5914 118 : };
5915 118 : fs::create_dir_all(conf.tenant_path(&tenant_shard_id))?;
5916 118 : fs::create_dir_all(conf.timelines_path(&tenant_shard_id))?;
5917 :
5918 : use remote_storage::{RemoteStorageConfig, RemoteStorageKind};
5919 118 : let remote_fs_dir = conf.workdir.join("localfs");
5920 118 : std::fs::create_dir_all(&remote_fs_dir).unwrap();
5921 118 : let config = RemoteStorageConfig {
5922 118 : storage: RemoteStorageKind::LocalFs {
5923 118 : local_path: remote_fs_dir.clone(),
5924 118 : },
5925 118 : timeout: RemoteStorageConfig::DEFAULT_TIMEOUT,
5926 118 : small_timeout: RemoteStorageConfig::DEFAULT_SMALL_TIMEOUT,
5927 118 : };
5928 118 : let remote_storage = GenericRemoteStorage::from_config(&config).await.unwrap();
5929 118 : let deletion_queue = MockDeletionQueue::new(Some(remote_storage.clone()));
5930 118 :
5931 118 : Ok(Self {
5932 118 : conf,
5933 118 : tenant_conf,
5934 118 : tenant_shard_id,
5935 118 : shard_identity,
5936 118 : generation,
5937 118 : shard,
5938 118 : remote_storage,
5939 118 : remote_fs_dir,
5940 118 : deletion_queue,
5941 118 : })
5942 118 : }
5943 :
5944 110 : pub async fn create(test_name: &'static str) -> anyhow::Result<Self> {
5945 110 : // Disable automatic GC and compaction to make the unit tests more deterministic.
5946 110 : // The tests perform them manually if needed.
5947 110 : let tenant_conf = pageserver_api::models::TenantConfig {
5948 110 : gc_period: Some(Duration::ZERO),
5949 110 : compaction_period: Some(Duration::ZERO),
5950 110 : ..Default::default()
5951 110 : };
5952 110 : let tenant_id = TenantId::generate();
5953 110 : let shard = ShardIdentity::unsharded();
5954 110 : Self::create_custom(
5955 110 : test_name,
5956 110 : tenant_conf,
5957 110 : tenant_id,
5958 110 : shard,
5959 110 : Generation::new(0xdeadbeef),
5960 110 : )
5961 110 : .await
5962 110 : }
5963 :
5964 10 : pub fn span(&self) -> tracing::Span {
5965 10 : info_span!("TenantHarness", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug())
5966 10 : }
5967 :
5968 118 : pub(crate) async fn load(&self) -> (Arc<TenantShard>, RequestContext) {
5969 118 : let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error)
5970 118 : .with_scope_unit_test();
5971 118 : (
5972 118 : self.do_try_load(&ctx)
5973 118 : .await
5974 118 : .expect("failed to load test tenant"),
5975 118 : ctx,
5976 118 : )
5977 118 : }
5978 :
5979 : #[instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug()))]
5980 : pub(crate) async fn do_try_load(
5981 : &self,
5982 : ctx: &RequestContext,
5983 : ) -> anyhow::Result<Arc<TenantShard>> {
5984 : let walredo_mgr = Arc::new(WalRedoManager::from(TestRedoManager));
5985 :
5986 : let (basebackup_requst_sender, _) = tokio::sync::mpsc::unbounded_channel();
5987 :
5988 : let tenant = Arc::new(TenantShard::new(
5989 : TenantState::Attaching,
5990 : self.conf,
5991 : AttachedTenantConf::try_from(LocationConf::attached_single(
5992 : self.tenant_conf.clone(),
5993 : self.generation,
5994 : &ShardParameters::default(),
5995 : ))
5996 : .unwrap(),
5997 : self.shard_identity,
5998 : Some(walredo_mgr),
5999 : self.tenant_shard_id,
6000 : self.remote_storage.clone(),
6001 : self.deletion_queue.new_client(),
6002 : // TODO: ideally we should run all unit tests with both configs
6003 : L0FlushGlobalState::new(L0FlushConfig::default()),
6004 : basebackup_requst_sender,
6005 : FeatureResolver::new_disabled(),
6006 : ));
6007 :
6008 : let preload = tenant
6009 : .preload(&self.remote_storage, CancellationToken::new())
6010 : .await?;
6011 : tenant.attach(Some(preload), ctx).await?;
6012 :
6013 : tenant.state.send_replace(TenantState::Active);
6014 : for timeline in tenant.timelines.lock().unwrap().values() {
6015 : timeline.set_state(TimelineState::Active);
6016 : }
6017 : Ok(tenant)
6018 : }
6019 :
6020 1 : pub fn timeline_path(&self, timeline_id: &TimelineId) -> Utf8PathBuf {
6021 1 : self.conf.timeline_path(&self.tenant_shard_id, timeline_id)
6022 1 : }
6023 : }
6024 :
6025 : // Mock WAL redo manager that doesn't do much
6026 : pub(crate) struct TestRedoManager;
6027 :
6028 : impl TestRedoManager {
6029 : /// # Cancel-Safety
6030 : ///
6031 : /// This method is cancellation-safe.
6032 26774 : pub async fn request_redo(
6033 26774 : &self,
6034 26774 : key: Key,
6035 26774 : lsn: Lsn,
6036 26774 : base_img: Option<(Lsn, Bytes)>,
6037 26774 : records: Vec<(Lsn, NeonWalRecord)>,
6038 26774 : _pg_version: u32,
6039 26774 : _redo_attempt_type: RedoAttemptType,
6040 26774 : ) -> Result<Bytes, walredo::Error> {
6041 1403510 : let records_neon = records.iter().all(|r| apply_neon::can_apply_in_neon(&r.1));
6042 26774 : if records_neon {
6043 : // For Neon wal records, we can decode without spawning postgres, so do so.
6044 26774 : let mut page = match (base_img, records.first()) {
6045 13029 : (Some((_lsn, img)), _) => {
6046 13029 : let mut page = BytesMut::new();
6047 13029 : page.extend_from_slice(&img);
6048 13029 : page
6049 : }
6050 13745 : (_, Some((_lsn, rec))) if rec.will_init() => BytesMut::new(),
6051 : _ => {
6052 0 : panic!("Neon WAL redo requires base image or will init record");
6053 : }
6054 : };
6055 :
6056 1430283 : for (record_lsn, record) in records {
6057 1403510 : apply_neon::apply_in_neon(&record, record_lsn, key, &mut page)?;
6058 : }
6059 26773 : Ok(page.freeze())
6060 : } else {
6061 : // We never spawn a postgres walredo process in unit tests: just log what we might have done.
6062 0 : let s = format!(
6063 0 : "redo for {} to get to {}, with {} and {} records",
6064 0 : key,
6065 0 : lsn,
6066 0 : if base_img.is_some() {
6067 0 : "base image"
6068 : } else {
6069 0 : "no base image"
6070 : },
6071 0 : records.len()
6072 0 : );
6073 0 : println!("{s}");
6074 0 :
6075 0 : Ok(test_img(&s))
6076 : }
6077 26774 : }
6078 : }
6079 : }
6080 :
6081 : #[cfg(test)]
6082 : mod tests {
6083 : use std::collections::{BTreeMap, BTreeSet};
6084 :
6085 : use bytes::{Bytes, BytesMut};
6086 : use hex_literal::hex;
6087 : use itertools::Itertools;
6088 : #[cfg(feature = "testing")]
6089 : use models::CompactLsnRange;
6090 : use pageserver_api::key::{
6091 : AUX_KEY_PREFIX, Key, NON_INHERITED_RANGE, RELATION_SIZE_PREFIX, repl_origin_key,
6092 : };
6093 : use pageserver_api::keyspace::KeySpace;
6094 : #[cfg(feature = "testing")]
6095 : use pageserver_api::keyspace::KeySpaceRandomAccum;
6096 : use pageserver_api::models::{CompactionAlgorithm, CompactionAlgorithmSettings};
6097 : #[cfg(feature = "testing")]
6098 : use pageserver_api::record::NeonWalRecord;
6099 : use pageserver_api::value::Value;
6100 : use pageserver_compaction::helpers::overlaps_with;
6101 : #[cfg(feature = "testing")]
6102 : use rand::SeedableRng;
6103 : #[cfg(feature = "testing")]
6104 : use rand::rngs::StdRng;
6105 : use rand::{Rng, thread_rng};
6106 : #[cfg(feature = "testing")]
6107 : use std::ops::Range;
6108 : use storage_layer::{IoConcurrency, PersistentLayerKey};
6109 : use tests::storage_layer::ValuesReconstructState;
6110 : use tests::timeline::{GetVectoredError, ShutdownMode};
6111 : #[cfg(feature = "testing")]
6112 : use timeline::GcInfo;
6113 : #[cfg(feature = "testing")]
6114 : use timeline::InMemoryLayerTestDesc;
6115 : #[cfg(feature = "testing")]
6116 : use timeline::compaction::{KeyHistoryRetention, KeyLogAtLsn};
6117 : use timeline::{CompactOptions, DeltaLayerTestDesc, VersionedKeySpaceQuery};
6118 : use utils::id::TenantId;
6119 : use utils::shard::{ShardCount, ShardNumber};
6120 :
6121 : use super::*;
6122 : use crate::DEFAULT_PG_VERSION;
6123 : use crate::keyspace::KeySpaceAccum;
6124 : use crate::tenant::harness::*;
6125 : use crate::tenant::timeline::CompactFlags;
6126 :
6127 : static TEST_KEY: Lazy<Key> =
6128 10 : Lazy::new(|| Key::from_slice(&hex!("010000000033333333444444445500000001")));
6129 :
6130 : #[cfg(feature = "testing")]
6131 : struct TestTimelineSpecification {
6132 : start_lsn: Lsn,
6133 : last_record_lsn: Lsn,
6134 :
6135 : in_memory_layers_shape: Vec<(Range<Key>, Range<Lsn>)>,
6136 : delta_layers_shape: Vec<(Range<Key>, Range<Lsn>)>,
6137 : image_layers_shape: Vec<(Range<Key>, Lsn)>,
6138 :
6139 : gap_chance: u8,
6140 : will_init_chance: u8,
6141 : }
6142 :
6143 : #[cfg(feature = "testing")]
6144 : struct Storage {
6145 : storage: HashMap<(Key, Lsn), Value>,
6146 : start_lsn: Lsn,
6147 : }
6148 :
6149 : #[cfg(feature = "testing")]
6150 : impl Storage {
6151 32000 : fn get(&self, key: Key, lsn: Lsn) -> Bytes {
6152 : use bytes::BufMut;
6153 :
6154 32000 : let mut crnt_lsn = lsn;
6155 32000 : let mut got_base = false;
6156 32000 :
6157 32000 : let mut acc = Vec::new();
6158 :
6159 2831871 : while crnt_lsn >= self.start_lsn {
6160 2831871 : if let Some(value) = self.storage.get(&(key, crnt_lsn)) {
6161 1421172 : acc.push(value.clone());
6162 :
6163 1402881 : match value {
6164 1402881 : Value::WalRecord(NeonWalRecord::Test { will_init, .. }) => {
6165 1402881 : if *will_init {
6166 13709 : got_base = true;
6167 13709 : break;
6168 1389172 : }
6169 : }
6170 : Value::Image(_) => {
6171 18291 : got_base = true;
6172 18291 : break;
6173 : }
6174 0 : _ => unreachable!(),
6175 : }
6176 1410699 : }
6177 :
6178 2799871 : crnt_lsn = crnt_lsn.checked_sub(1u64).unwrap();
6179 : }
6180 :
6181 32000 : assert!(
6182 32000 : got_base,
6183 0 : "Input data was incorrect. No base image for {key}@{lsn}"
6184 : );
6185 :
6186 32000 : tracing::debug!("Wal redo depth for {key}@{lsn} is {}", acc.len());
6187 :
6188 32000 : let mut blob = BytesMut::new();
6189 1421172 : for value in acc.into_iter().rev() {
6190 1402881 : match value {
6191 1402881 : Value::WalRecord(NeonWalRecord::Test { append, .. }) => {
6192 1402881 : blob.extend_from_slice(append.as_bytes());
6193 1402881 : }
6194 18291 : Value::Image(img) => {
6195 18291 : blob.put(img);
6196 18291 : }
6197 0 : _ => unreachable!(),
6198 : }
6199 : }
6200 :
6201 32000 : blob.into()
6202 32000 : }
6203 : }
6204 :
6205 : #[cfg(feature = "testing")]
6206 : #[allow(clippy::too_many_arguments)]
6207 1 : async fn randomize_timeline(
6208 1 : tenant: &Arc<TenantShard>,
6209 1 : new_timeline_id: TimelineId,
6210 1 : pg_version: u32,
6211 1 : spec: TestTimelineSpecification,
6212 1 : random: &mut rand::rngs::StdRng,
6213 1 : ctx: &RequestContext,
6214 1 : ) -> anyhow::Result<(Arc<Timeline>, Storage, Vec<Lsn>)> {
6215 1 : let mut storage: HashMap<(Key, Lsn), Value> = HashMap::default();
6216 1 : let mut interesting_lsns = vec![spec.last_record_lsn];
6217 :
6218 2 : for (key_range, lsn_range) in spec.in_memory_layers_shape.iter() {
6219 2 : let mut lsn = lsn_range.start;
6220 202 : while lsn < lsn_range.end {
6221 200 : let mut key = key_range.start;
6222 21018 : while key < key_range.end {
6223 20818 : let gap = random.gen_range(1..=100) <= spec.gap_chance;
6224 20818 : let will_init = random.gen_range(1..=100) <= spec.will_init_chance;
6225 20818 :
6226 20818 : if gap {
6227 1018 : continue;
6228 19800 : }
6229 :
6230 19800 : let record = if will_init {
6231 191 : Value::WalRecord(NeonWalRecord::wal_init(format!("[wil_init {key}@{lsn}]")))
6232 : } else {
6233 19609 : Value::WalRecord(NeonWalRecord::wal_append(format!("[delta {key}@{lsn}]")))
6234 : };
6235 :
6236 19800 : storage.insert((key, lsn), record);
6237 19800 :
6238 19800 : key = key.next();
6239 : }
6240 200 : lsn = Lsn(lsn.0 + 1);
6241 : }
6242 :
6243 : // Stash some interesting LSN for future use
6244 6 : for offset in [0, 5, 100].iter() {
6245 6 : if *offset == 0 {
6246 2 : interesting_lsns.push(lsn_range.start);
6247 2 : } else {
6248 4 : let below = lsn_range.start.checked_sub(*offset);
6249 4 : match below {
6250 4 : Some(v) if v >= spec.start_lsn => {
6251 4 : interesting_lsns.push(v);
6252 4 : }
6253 0 : _ => {}
6254 : }
6255 :
6256 4 : let above = Lsn(lsn_range.start.0 + offset);
6257 4 : interesting_lsns.push(above);
6258 : }
6259 : }
6260 : }
6261 :
6262 3 : for (key_range, lsn_range) in spec.delta_layers_shape.iter() {
6263 3 : let mut lsn = lsn_range.start;
6264 315 : while lsn < lsn_range.end {
6265 312 : let mut key = key_range.start;
6266 11112 : while key < key_range.end {
6267 10800 : let gap = random.gen_range(1..=100) <= spec.gap_chance;
6268 10800 : let will_init = random.gen_range(1..=100) <= spec.will_init_chance;
6269 10800 :
6270 10800 : if gap {
6271 504 : continue;
6272 10296 : }
6273 :
6274 10296 : let record = if will_init {
6275 103 : Value::WalRecord(NeonWalRecord::wal_init(format!("[wil_init {key}@{lsn}]")))
6276 : } else {
6277 10193 : Value::WalRecord(NeonWalRecord::wal_append(format!("[delta {key}@{lsn}]")))
6278 : };
6279 :
6280 10296 : storage.insert((key, lsn), record);
6281 10296 :
6282 10296 : key = key.next();
6283 : }
6284 312 : lsn = Lsn(lsn.0 + 1);
6285 : }
6286 :
6287 : // Stash some interesting LSN for future use
6288 9 : for offset in [0, 5, 100].iter() {
6289 9 : if *offset == 0 {
6290 3 : interesting_lsns.push(lsn_range.start);
6291 3 : } else {
6292 6 : let below = lsn_range.start.checked_sub(*offset);
6293 6 : match below {
6294 6 : Some(v) if v >= spec.start_lsn => {
6295 3 : interesting_lsns.push(v);
6296 3 : }
6297 3 : _ => {}
6298 : }
6299 :
6300 6 : let above = Lsn(lsn_range.start.0 + offset);
6301 6 : interesting_lsns.push(above);
6302 : }
6303 : }
6304 : }
6305 :
6306 3 : for (key_range, lsn) in spec.image_layers_shape.iter() {
6307 3 : let mut key = key_range.start;
6308 142 : while key < key_range.end {
6309 139 : let blob = Bytes::from(format!("[image {key}@{lsn}]"));
6310 139 : let record = Value::Image(blob.clone());
6311 139 : storage.insert((key, *lsn), record);
6312 139 :
6313 139 : key = key.next();
6314 139 : }
6315 :
6316 : // Stash some interesting LSN for future use
6317 9 : for offset in [0, 5, 100].iter() {
6318 9 : if *offset == 0 {
6319 3 : interesting_lsns.push(*lsn);
6320 3 : } else {
6321 6 : let below = lsn.checked_sub(*offset);
6322 6 : match below {
6323 6 : Some(v) if v >= spec.start_lsn => {
6324 4 : interesting_lsns.push(v);
6325 4 : }
6326 2 : _ => {}
6327 : }
6328 :
6329 6 : let above = Lsn(lsn.0 + offset);
6330 6 : interesting_lsns.push(above);
6331 : }
6332 : }
6333 : }
6334 :
6335 1 : let in_memory_test_layers = {
6336 1 : let mut acc = Vec::new();
6337 :
6338 2 : for (key_range, lsn_range) in spec.in_memory_layers_shape.iter() {
6339 2 : let mut data = Vec::new();
6340 2 :
6341 2 : let mut lsn = lsn_range.start;
6342 202 : while lsn < lsn_range.end {
6343 200 : let mut key = key_range.start;
6344 20000 : while key < key_range.end {
6345 19800 : if let Some(record) = storage.get(&(key, lsn)) {
6346 19800 : data.push((key, lsn, record.clone()));
6347 19800 : }
6348 :
6349 19800 : key = key.next();
6350 : }
6351 200 : lsn = Lsn(lsn.0 + 1);
6352 : }
6353 :
6354 2 : acc.push(InMemoryLayerTestDesc {
6355 2 : data,
6356 2 : lsn_range: lsn_range.clone(),
6357 2 : is_open: false,
6358 2 : })
6359 : }
6360 :
6361 1 : acc
6362 : };
6363 :
6364 1 : let delta_test_layers = {
6365 1 : let mut acc = Vec::new();
6366 :
6367 3 : for (key_range, lsn_range) in spec.delta_layers_shape.iter() {
6368 3 : let mut data = Vec::new();
6369 3 :
6370 3 : let mut lsn = lsn_range.start;
6371 315 : while lsn < lsn_range.end {
6372 312 : let mut key = key_range.start;
6373 10608 : while key < key_range.end {
6374 10296 : if let Some(record) = storage.get(&(key, lsn)) {
6375 10296 : data.push((key, lsn, record.clone()));
6376 10296 : }
6377 :
6378 10296 : key = key.next();
6379 : }
6380 312 : lsn = Lsn(lsn.0 + 1);
6381 : }
6382 :
6383 3 : acc.push(DeltaLayerTestDesc {
6384 3 : data,
6385 3 : lsn_range: lsn_range.clone(),
6386 3 : key_range: key_range.clone(),
6387 3 : })
6388 : }
6389 :
6390 1 : acc
6391 : };
6392 :
6393 1 : let image_test_layers = {
6394 1 : let mut acc = Vec::new();
6395 :
6396 3 : for (key_range, lsn) in spec.image_layers_shape.iter() {
6397 3 : let mut data = Vec::new();
6398 3 :
6399 3 : let mut key = key_range.start;
6400 142 : while key < key_range.end {
6401 139 : if let Some(record) = storage.get(&(key, *lsn)) {
6402 139 : let blob = match record {
6403 139 : Value::Image(blob) => blob.clone(),
6404 0 : _ => unreachable!(),
6405 : };
6406 :
6407 139 : data.push((key, blob));
6408 0 : }
6409 :
6410 139 : key = key.next();
6411 : }
6412 :
6413 3 : acc.push((*lsn, data));
6414 : }
6415 :
6416 1 : acc
6417 : };
6418 :
6419 1 : let tline = tenant
6420 1 : .create_test_timeline_with_layers(
6421 1 : new_timeline_id,
6422 1 : spec.start_lsn,
6423 1 : pg_version,
6424 1 : ctx,
6425 1 : in_memory_test_layers,
6426 1 : delta_test_layers,
6427 1 : image_test_layers,
6428 1 : spec.last_record_lsn,
6429 1 : )
6430 1 : .await?;
6431 :
6432 1 : Ok((
6433 1 : tline,
6434 1 : Storage {
6435 1 : storage,
6436 1 : start_lsn: spec.start_lsn,
6437 1 : },
6438 1 : interesting_lsns,
6439 1 : ))
6440 1 : }
6441 :
6442 : #[tokio::test]
6443 1 : async fn test_basic() -> anyhow::Result<()> {
6444 1 : let (tenant, ctx) = TenantHarness::create("test_basic").await?.load().await;
6445 1 : let tline = tenant
6446 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx)
6447 1 : .await?;
6448 1 :
6449 1 : let mut writer = tline.writer().await;
6450 1 : writer
6451 1 : .put(
6452 1 : *TEST_KEY,
6453 1 : Lsn(0x10),
6454 1 : &Value::Image(test_img("foo at 0x10")),
6455 1 : &ctx,
6456 1 : )
6457 1 : .await?;
6458 1 : writer.finish_write(Lsn(0x10));
6459 1 : drop(writer);
6460 1 :
6461 1 : let mut writer = tline.writer().await;
6462 1 : writer
6463 1 : .put(
6464 1 : *TEST_KEY,
6465 1 : Lsn(0x20),
6466 1 : &Value::Image(test_img("foo at 0x20")),
6467 1 : &ctx,
6468 1 : )
6469 1 : .await?;
6470 1 : writer.finish_write(Lsn(0x20));
6471 1 : drop(writer);
6472 1 :
6473 1 : assert_eq!(
6474 1 : tline.get(*TEST_KEY, Lsn(0x10), &ctx).await?,
6475 1 : test_img("foo at 0x10")
6476 1 : );
6477 1 : assert_eq!(
6478 1 : tline.get(*TEST_KEY, Lsn(0x1f), &ctx).await?,
6479 1 : test_img("foo at 0x10")
6480 1 : );
6481 1 : assert_eq!(
6482 1 : tline.get(*TEST_KEY, Lsn(0x20), &ctx).await?,
6483 1 : test_img("foo at 0x20")
6484 1 : );
6485 1 :
6486 1 : Ok(())
6487 1 : }
6488 :
6489 : #[tokio::test]
6490 1 : async fn no_duplicate_timelines() -> anyhow::Result<()> {
6491 1 : let (tenant, ctx) = TenantHarness::create("no_duplicate_timelines")
6492 1 : .await?
6493 1 : .load()
6494 1 : .await;
6495 1 : let _ = tenant
6496 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
6497 1 : .await?;
6498 1 :
6499 1 : match tenant
6500 1 : .create_empty_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
6501 1 : .await
6502 1 : {
6503 1 : Ok(_) => panic!("duplicate timeline creation should fail"),
6504 1 : Err(e) => assert_eq!(
6505 1 : e.to_string(),
6506 1 : "timeline already exists with different parameters".to_string()
6507 1 : ),
6508 1 : }
6509 1 :
6510 1 : Ok(())
6511 1 : }
6512 :
6513 : /// Convenience function to create a page image with given string as the only content
6514 5 : pub fn test_value(s: &str) -> Value {
6515 5 : let mut buf = BytesMut::new();
6516 5 : buf.extend_from_slice(s.as_bytes());
6517 5 : Value::Image(buf.freeze())
6518 5 : }
6519 :
6520 : ///
6521 : /// Test branch creation
6522 : ///
6523 : #[tokio::test]
6524 1 : async fn test_branch() -> anyhow::Result<()> {
6525 1 : use std::str::from_utf8;
6526 1 :
6527 1 : let (tenant, ctx) = TenantHarness::create("test_branch").await?.load().await;
6528 1 : let tline = tenant
6529 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
6530 1 : .await?;
6531 1 : let mut writer = tline.writer().await;
6532 1 :
6533 1 : #[allow(non_snake_case)]
6534 1 : let TEST_KEY_A: Key = Key::from_hex("110000000033333333444444445500000001").unwrap();
6535 1 : #[allow(non_snake_case)]
6536 1 : let TEST_KEY_B: Key = Key::from_hex("110000000033333333444444445500000002").unwrap();
6537 1 :
6538 1 : // Insert a value on the timeline
6539 1 : writer
6540 1 : .put(TEST_KEY_A, Lsn(0x20), &test_value("foo at 0x20"), &ctx)
6541 1 : .await?;
6542 1 : writer
6543 1 : .put(TEST_KEY_B, Lsn(0x20), &test_value("foobar at 0x20"), &ctx)
6544 1 : .await?;
6545 1 : writer.finish_write(Lsn(0x20));
6546 1 :
6547 1 : writer
6548 1 : .put(TEST_KEY_A, Lsn(0x30), &test_value("foo at 0x30"), &ctx)
6549 1 : .await?;
6550 1 : writer.finish_write(Lsn(0x30));
6551 1 : writer
6552 1 : .put(TEST_KEY_A, Lsn(0x40), &test_value("foo at 0x40"), &ctx)
6553 1 : .await?;
6554 1 : writer.finish_write(Lsn(0x40));
6555 1 :
6556 1 : //assert_current_logical_size(&tline, Lsn(0x40));
6557 1 :
6558 1 : // Branch the history, modify relation differently on the new timeline
6559 1 : tenant
6560 1 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x30)), &ctx)
6561 1 : .await?;
6562 1 : let newtline = tenant
6563 1 : .get_timeline(NEW_TIMELINE_ID, true)
6564 1 : .expect("Should have a local timeline");
6565 1 : let mut new_writer = newtline.writer().await;
6566 1 : new_writer
6567 1 : .put(TEST_KEY_A, Lsn(0x40), &test_value("bar at 0x40"), &ctx)
6568 1 : .await?;
6569 1 : new_writer.finish_write(Lsn(0x40));
6570 1 :
6571 1 : // Check page contents on both branches
6572 1 : assert_eq!(
6573 1 : from_utf8(&tline.get(TEST_KEY_A, Lsn(0x40), &ctx).await?)?,
6574 1 : "foo at 0x40"
6575 1 : );
6576 1 : assert_eq!(
6577 1 : from_utf8(&newtline.get(TEST_KEY_A, Lsn(0x40), &ctx).await?)?,
6578 1 : "bar at 0x40"
6579 1 : );
6580 1 : assert_eq!(
6581 1 : from_utf8(&newtline.get(TEST_KEY_B, Lsn(0x40), &ctx).await?)?,
6582 1 : "foobar at 0x20"
6583 1 : );
6584 1 :
6585 1 : //assert_current_logical_size(&tline, Lsn(0x40));
6586 1 :
6587 1 : Ok(())
6588 1 : }
6589 :
6590 10 : async fn make_some_layers(
6591 10 : tline: &Timeline,
6592 10 : start_lsn: Lsn,
6593 10 : ctx: &RequestContext,
6594 10 : ) -> anyhow::Result<()> {
6595 10 : let mut lsn = start_lsn;
6596 : {
6597 10 : let mut writer = tline.writer().await;
6598 : // Create a relation on the timeline
6599 10 : writer
6600 10 : .put(
6601 10 : *TEST_KEY,
6602 10 : lsn,
6603 10 : &Value::Image(test_img(&format!("foo at {}", lsn))),
6604 10 : ctx,
6605 10 : )
6606 10 : .await?;
6607 10 : writer.finish_write(lsn);
6608 10 : lsn += 0x10;
6609 10 : writer
6610 10 : .put(
6611 10 : *TEST_KEY,
6612 10 : lsn,
6613 10 : &Value::Image(test_img(&format!("foo at {}", lsn))),
6614 10 : ctx,
6615 10 : )
6616 10 : .await?;
6617 10 : writer.finish_write(lsn);
6618 10 : lsn += 0x10;
6619 10 : }
6620 10 : tline.freeze_and_flush().await?;
6621 : {
6622 10 : let mut writer = tline.writer().await;
6623 10 : writer
6624 10 : .put(
6625 10 : *TEST_KEY,
6626 10 : lsn,
6627 10 : &Value::Image(test_img(&format!("foo at {}", lsn))),
6628 10 : ctx,
6629 10 : )
6630 10 : .await?;
6631 10 : writer.finish_write(lsn);
6632 10 : lsn += 0x10;
6633 10 : writer
6634 10 : .put(
6635 10 : *TEST_KEY,
6636 10 : lsn,
6637 10 : &Value::Image(test_img(&format!("foo at {}", lsn))),
6638 10 : ctx,
6639 10 : )
6640 10 : .await?;
6641 10 : writer.finish_write(lsn);
6642 10 : }
6643 10 : tline.freeze_and_flush().await.map_err(|e| e.into())
6644 10 : }
6645 :
6646 : #[tokio::test(start_paused = true)]
6647 1 : async fn test_prohibit_branch_creation_on_garbage_collected_data() -> anyhow::Result<()> {
6648 1 : let (tenant, ctx) =
6649 1 : TenantHarness::create("test_prohibit_branch_creation_on_garbage_collected_data")
6650 1 : .await?
6651 1 : .load()
6652 1 : .await;
6653 1 : // Advance to the lsn lease deadline so that GC is not blocked by
6654 1 : // initial transition into AttachedSingle.
6655 1 : tokio::time::advance(tenant.get_lsn_lease_length()).await;
6656 1 : tokio::time::resume();
6657 1 : let tline = tenant
6658 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
6659 1 : .await?;
6660 1 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
6661 1 :
6662 1 : // this removes layers before lsn 40 (50 minus 10), so there are two remaining layers, image and delta for 31-50
6663 1 : // FIXME: this doesn't actually remove any layer currently, given how the flushing
6664 1 : // and compaction works. But it does set the 'cutoff' point so that the cross check
6665 1 : // below should fail.
6666 1 : tenant
6667 1 : .gc_iteration(
6668 1 : Some(TIMELINE_ID),
6669 1 : 0x10,
6670 1 : Duration::ZERO,
6671 1 : &CancellationToken::new(),
6672 1 : &ctx,
6673 1 : )
6674 1 : .await?;
6675 1 :
6676 1 : // try to branch at lsn 25, should fail because we already garbage collected the data
6677 1 : match tenant
6678 1 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x25)), &ctx)
6679 1 : .await
6680 1 : {
6681 1 : Ok(_) => panic!("branching should have failed"),
6682 1 : Err(err) => {
6683 1 : let CreateTimelineError::AncestorLsn(err) = err else {
6684 1 : panic!("wrong error type")
6685 1 : };
6686 1 : assert!(err.to_string().contains("invalid branch start lsn"));
6687 1 : assert!(
6688 1 : err.source()
6689 1 : .unwrap()
6690 1 : .to_string()
6691 1 : .contains("we might've already garbage collected needed data")
6692 1 : )
6693 1 : }
6694 1 : }
6695 1 :
6696 1 : Ok(())
6697 1 : }
6698 :
6699 : #[tokio::test]
6700 1 : async fn test_prohibit_branch_creation_on_pre_initdb_lsn() -> anyhow::Result<()> {
6701 1 : let (tenant, ctx) =
6702 1 : TenantHarness::create("test_prohibit_branch_creation_on_pre_initdb_lsn")
6703 1 : .await?
6704 1 : .load()
6705 1 : .await;
6706 1 :
6707 1 : let tline = tenant
6708 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x50), DEFAULT_PG_VERSION, &ctx)
6709 1 : .await?;
6710 1 : // try to branch at lsn 0x25, should fail because initdb lsn is 0x50
6711 1 : match tenant
6712 1 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x25)), &ctx)
6713 1 : .await
6714 1 : {
6715 1 : Ok(_) => panic!("branching should have failed"),
6716 1 : Err(err) => {
6717 1 : let CreateTimelineError::AncestorLsn(err) = err else {
6718 1 : panic!("wrong error type");
6719 1 : };
6720 1 : assert!(&err.to_string().contains("invalid branch start lsn"));
6721 1 : assert!(
6722 1 : &err.source()
6723 1 : .unwrap()
6724 1 : .to_string()
6725 1 : .contains("is earlier than latest GC cutoff")
6726 1 : );
6727 1 : }
6728 1 : }
6729 1 :
6730 1 : Ok(())
6731 1 : }
6732 :
6733 : /*
6734 : // FIXME: This currently fails to error out. Calling GC doesn't currently
6735 : // remove the old value, we'd need to work a little harder
6736 : #[tokio::test]
6737 : async fn test_prohibit_get_for_garbage_collected_data() -> anyhow::Result<()> {
6738 : let repo =
6739 : RepoHarness::create("test_prohibit_get_for_garbage_collected_data")?
6740 : .load();
6741 :
6742 : let tline = repo.create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION)?;
6743 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
6744 :
6745 : repo.gc_iteration(Some(TIMELINE_ID), 0x10, Duration::ZERO)?;
6746 : let applied_gc_cutoff_lsn = tline.get_applied_gc_cutoff_lsn();
6747 : assert!(*applied_gc_cutoff_lsn > Lsn(0x25));
6748 : match tline.get(*TEST_KEY, Lsn(0x25)) {
6749 : Ok(_) => panic!("request for page should have failed"),
6750 : Err(err) => assert!(err.to_string().contains("not found at")),
6751 : }
6752 : Ok(())
6753 : }
6754 : */
6755 :
6756 : #[tokio::test]
6757 1 : async fn test_get_branchpoints_from_an_inactive_timeline() -> anyhow::Result<()> {
6758 1 : let (tenant, ctx) =
6759 1 : TenantHarness::create("test_get_branchpoints_from_an_inactive_timeline")
6760 1 : .await?
6761 1 : .load()
6762 1 : .await;
6763 1 : let tline = tenant
6764 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
6765 1 : .await?;
6766 1 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
6767 1 :
6768 1 : tenant
6769 1 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x40)), &ctx)
6770 1 : .await?;
6771 1 : let newtline = tenant
6772 1 : .get_timeline(NEW_TIMELINE_ID, true)
6773 1 : .expect("Should have a local timeline");
6774 1 :
6775 1 : make_some_layers(newtline.as_ref(), Lsn(0x60), &ctx).await?;
6776 1 :
6777 1 : tline.set_broken("test".to_owned());
6778 1 :
6779 1 : tenant
6780 1 : .gc_iteration(
6781 1 : Some(TIMELINE_ID),
6782 1 : 0x10,
6783 1 : Duration::ZERO,
6784 1 : &CancellationToken::new(),
6785 1 : &ctx,
6786 1 : )
6787 1 : .await?;
6788 1 :
6789 1 : // The branchpoints should contain all timelines, even ones marked
6790 1 : // as Broken.
6791 1 : {
6792 1 : let branchpoints = &tline.gc_info.read().unwrap().retain_lsns;
6793 1 : assert_eq!(branchpoints.len(), 1);
6794 1 : assert_eq!(
6795 1 : branchpoints[0],
6796 1 : (Lsn(0x40), NEW_TIMELINE_ID, MaybeOffloaded::No)
6797 1 : );
6798 1 : }
6799 1 :
6800 1 : // You can read the key from the child branch even though the parent is
6801 1 : // Broken, as long as you don't need to access data from the parent.
6802 1 : assert_eq!(
6803 1 : newtline.get(*TEST_KEY, Lsn(0x70), &ctx).await?,
6804 1 : test_img(&format!("foo at {}", Lsn(0x70)))
6805 1 : );
6806 1 :
6807 1 : // This needs to traverse to the parent, and fails.
6808 1 : let err = newtline.get(*TEST_KEY, Lsn(0x50), &ctx).await.unwrap_err();
6809 1 : assert!(
6810 1 : err.to_string().starts_with(&format!(
6811 1 : "bad state on timeline {}: Broken",
6812 1 : tline.timeline_id
6813 1 : )),
6814 1 : "{err}"
6815 1 : );
6816 1 :
6817 1 : Ok(())
6818 1 : }
6819 :
6820 : #[tokio::test]
6821 1 : async fn test_retain_data_in_parent_which_is_needed_for_child() -> anyhow::Result<()> {
6822 1 : let (tenant, ctx) =
6823 1 : TenantHarness::create("test_retain_data_in_parent_which_is_needed_for_child")
6824 1 : .await?
6825 1 : .load()
6826 1 : .await;
6827 1 : let tline = tenant
6828 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
6829 1 : .await?;
6830 1 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
6831 1 :
6832 1 : tenant
6833 1 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x40)), &ctx)
6834 1 : .await?;
6835 1 : let newtline = tenant
6836 1 : .get_timeline(NEW_TIMELINE_ID, true)
6837 1 : .expect("Should have a local timeline");
6838 1 : // this removes layers before lsn 40 (50 minus 10), so there are two remaining layers, image and delta for 31-50
6839 1 : tenant
6840 1 : .gc_iteration(
6841 1 : Some(TIMELINE_ID),
6842 1 : 0x10,
6843 1 : Duration::ZERO,
6844 1 : &CancellationToken::new(),
6845 1 : &ctx,
6846 1 : )
6847 1 : .await?;
6848 1 : assert!(newtline.get(*TEST_KEY, Lsn(0x25), &ctx).await.is_ok());
6849 1 :
6850 1 : Ok(())
6851 1 : }
6852 : #[tokio::test]
6853 1 : async fn test_parent_keeps_data_forever_after_branching() -> anyhow::Result<()> {
6854 1 : let (tenant, ctx) = TenantHarness::create("test_parent_keeps_data_forever_after_branching")
6855 1 : .await?
6856 1 : .load()
6857 1 : .await;
6858 1 : let tline = tenant
6859 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
6860 1 : .await?;
6861 1 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
6862 1 :
6863 1 : tenant
6864 1 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x40)), &ctx)
6865 1 : .await?;
6866 1 : let newtline = tenant
6867 1 : .get_timeline(NEW_TIMELINE_ID, true)
6868 1 : .expect("Should have a local timeline");
6869 1 :
6870 1 : make_some_layers(newtline.as_ref(), Lsn(0x60), &ctx).await?;
6871 1 :
6872 1 : // run gc on parent
6873 1 : tenant
6874 1 : .gc_iteration(
6875 1 : Some(TIMELINE_ID),
6876 1 : 0x10,
6877 1 : Duration::ZERO,
6878 1 : &CancellationToken::new(),
6879 1 : &ctx,
6880 1 : )
6881 1 : .await?;
6882 1 :
6883 1 : // Check that the data is still accessible on the branch.
6884 1 : assert_eq!(
6885 1 : newtline.get(*TEST_KEY, Lsn(0x50), &ctx).await?,
6886 1 : test_img(&format!("foo at {}", Lsn(0x40)))
6887 1 : );
6888 1 :
6889 1 : Ok(())
6890 1 : }
6891 :
6892 : #[tokio::test]
6893 1 : async fn timeline_load() -> anyhow::Result<()> {
6894 1 : const TEST_NAME: &str = "timeline_load";
6895 1 : let harness = TenantHarness::create(TEST_NAME).await?;
6896 1 : {
6897 1 : let (tenant, ctx) = harness.load().await;
6898 1 : let tline = tenant
6899 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x7000), DEFAULT_PG_VERSION, &ctx)
6900 1 : .await?;
6901 1 : make_some_layers(tline.as_ref(), Lsn(0x8000), &ctx).await?;
6902 1 : // so that all uploads finish & we can call harness.load() below again
6903 1 : tenant
6904 1 : .shutdown(Default::default(), ShutdownMode::FreezeAndFlush)
6905 1 : .instrument(harness.span())
6906 1 : .await
6907 1 : .ok()
6908 1 : .unwrap();
6909 1 : }
6910 1 :
6911 1 : let (tenant, _ctx) = harness.load().await;
6912 1 : tenant
6913 1 : .get_timeline(TIMELINE_ID, true)
6914 1 : .expect("cannot load timeline");
6915 1 :
6916 1 : Ok(())
6917 1 : }
6918 :
6919 : #[tokio::test]
6920 1 : async fn timeline_load_with_ancestor() -> anyhow::Result<()> {
6921 1 : const TEST_NAME: &str = "timeline_load_with_ancestor";
6922 1 : let harness = TenantHarness::create(TEST_NAME).await?;
6923 1 : // create two timelines
6924 1 : {
6925 1 : let (tenant, ctx) = harness.load().await;
6926 1 : let tline = tenant
6927 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
6928 1 : .await?;
6929 1 :
6930 1 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
6931 1 :
6932 1 : let child_tline = tenant
6933 1 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x40)), &ctx)
6934 1 : .await?;
6935 1 : child_tline.set_state(TimelineState::Active);
6936 1 :
6937 1 : let newtline = tenant
6938 1 : .get_timeline(NEW_TIMELINE_ID, true)
6939 1 : .expect("Should have a local timeline");
6940 1 :
6941 1 : make_some_layers(newtline.as_ref(), Lsn(0x60), &ctx).await?;
6942 1 :
6943 1 : // so that all uploads finish & we can call harness.load() below again
6944 1 : tenant
6945 1 : .shutdown(Default::default(), ShutdownMode::FreezeAndFlush)
6946 1 : .instrument(harness.span())
6947 1 : .await
6948 1 : .ok()
6949 1 : .unwrap();
6950 1 : }
6951 1 :
6952 1 : // check that both of them are initially unloaded
6953 1 : let (tenant, _ctx) = harness.load().await;
6954 1 :
6955 1 : // check that both, child and ancestor are loaded
6956 1 : let _child_tline = tenant
6957 1 : .get_timeline(NEW_TIMELINE_ID, true)
6958 1 : .expect("cannot get child timeline loaded");
6959 1 :
6960 1 : let _ancestor_tline = tenant
6961 1 : .get_timeline(TIMELINE_ID, true)
6962 1 : .expect("cannot get ancestor timeline loaded");
6963 1 :
6964 1 : Ok(())
6965 1 : }
6966 :
6967 : #[tokio::test]
6968 1 : async fn delta_layer_dumping() -> anyhow::Result<()> {
6969 1 : use storage_layer::AsLayerDesc;
6970 1 : let (tenant, ctx) = TenantHarness::create("test_layer_dumping")
6971 1 : .await?
6972 1 : .load()
6973 1 : .await;
6974 1 : let tline = tenant
6975 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
6976 1 : .await?;
6977 1 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
6978 1 :
6979 1 : let layer_map = tline.layers.read(LayerManagerLockHolder::Testing).await;
6980 1 : let level0_deltas = layer_map
6981 1 : .layer_map()?
6982 1 : .level0_deltas()
6983 1 : .iter()
6984 2 : .map(|desc| layer_map.get_from_desc(desc))
6985 1 : .collect::<Vec<_>>();
6986 1 :
6987 1 : assert!(!level0_deltas.is_empty());
6988 1 :
6989 3 : for delta in level0_deltas {
6990 1 : // Ensure we are dumping a delta layer here
6991 2 : assert!(delta.layer_desc().is_delta);
6992 2 : delta.dump(true, &ctx).await.unwrap();
6993 1 : }
6994 1 :
6995 1 : Ok(())
6996 1 : }
6997 :
6998 : #[tokio::test]
6999 1 : async fn test_images() -> anyhow::Result<()> {
7000 1 : let (tenant, ctx) = TenantHarness::create("test_images").await?.load().await;
7001 1 : let tline = tenant
7002 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx)
7003 1 : .await?;
7004 1 :
7005 1 : let mut writer = tline.writer().await;
7006 1 : writer
7007 1 : .put(
7008 1 : *TEST_KEY,
7009 1 : Lsn(0x10),
7010 1 : &Value::Image(test_img("foo at 0x10")),
7011 1 : &ctx,
7012 1 : )
7013 1 : .await?;
7014 1 : writer.finish_write(Lsn(0x10));
7015 1 : drop(writer);
7016 1 :
7017 1 : tline.freeze_and_flush().await?;
7018 1 : tline
7019 1 : .compact(&CancellationToken::new(), EnumSet::default(), &ctx)
7020 1 : .await?;
7021 1 :
7022 1 : let mut writer = tline.writer().await;
7023 1 : writer
7024 1 : .put(
7025 1 : *TEST_KEY,
7026 1 : Lsn(0x20),
7027 1 : &Value::Image(test_img("foo at 0x20")),
7028 1 : &ctx,
7029 1 : )
7030 1 : .await?;
7031 1 : writer.finish_write(Lsn(0x20));
7032 1 : drop(writer);
7033 1 :
7034 1 : tline.freeze_and_flush().await?;
7035 1 : tline
7036 1 : .compact(&CancellationToken::new(), EnumSet::default(), &ctx)
7037 1 : .await?;
7038 1 :
7039 1 : let mut writer = tline.writer().await;
7040 1 : writer
7041 1 : .put(
7042 1 : *TEST_KEY,
7043 1 : Lsn(0x30),
7044 1 : &Value::Image(test_img("foo at 0x30")),
7045 1 : &ctx,
7046 1 : )
7047 1 : .await?;
7048 1 : writer.finish_write(Lsn(0x30));
7049 1 : drop(writer);
7050 1 :
7051 1 : tline.freeze_and_flush().await?;
7052 1 : tline
7053 1 : .compact(&CancellationToken::new(), EnumSet::default(), &ctx)
7054 1 : .await?;
7055 1 :
7056 1 : let mut writer = tline.writer().await;
7057 1 : writer
7058 1 : .put(
7059 1 : *TEST_KEY,
7060 1 : Lsn(0x40),
7061 1 : &Value::Image(test_img("foo at 0x40")),
7062 1 : &ctx,
7063 1 : )
7064 1 : .await?;
7065 1 : writer.finish_write(Lsn(0x40));
7066 1 : drop(writer);
7067 1 :
7068 1 : tline.freeze_and_flush().await?;
7069 1 : tline
7070 1 : .compact(&CancellationToken::new(), EnumSet::default(), &ctx)
7071 1 : .await?;
7072 1 :
7073 1 : assert_eq!(
7074 1 : tline.get(*TEST_KEY, Lsn(0x10), &ctx).await?,
7075 1 : test_img("foo at 0x10")
7076 1 : );
7077 1 : assert_eq!(
7078 1 : tline.get(*TEST_KEY, Lsn(0x1f), &ctx).await?,
7079 1 : test_img("foo at 0x10")
7080 1 : );
7081 1 : assert_eq!(
7082 1 : tline.get(*TEST_KEY, Lsn(0x20), &ctx).await?,
7083 1 : test_img("foo at 0x20")
7084 1 : );
7085 1 : assert_eq!(
7086 1 : tline.get(*TEST_KEY, Lsn(0x30), &ctx).await?,
7087 1 : test_img("foo at 0x30")
7088 1 : );
7089 1 : assert_eq!(
7090 1 : tline.get(*TEST_KEY, Lsn(0x40), &ctx).await?,
7091 1 : test_img("foo at 0x40")
7092 1 : );
7093 1 :
7094 1 : Ok(())
7095 1 : }
7096 :
7097 2 : async fn bulk_insert_compact_gc(
7098 2 : tenant: &TenantShard,
7099 2 : timeline: &Arc<Timeline>,
7100 2 : ctx: &RequestContext,
7101 2 : lsn: Lsn,
7102 2 : repeat: usize,
7103 2 : key_count: usize,
7104 2 : ) -> anyhow::Result<HashMap<Key, BTreeSet<Lsn>>> {
7105 2 : let compact = true;
7106 2 : bulk_insert_maybe_compact_gc(tenant, timeline, ctx, lsn, repeat, key_count, compact).await
7107 2 : }
7108 :
7109 4 : async fn bulk_insert_maybe_compact_gc(
7110 4 : tenant: &TenantShard,
7111 4 : timeline: &Arc<Timeline>,
7112 4 : ctx: &RequestContext,
7113 4 : mut lsn: Lsn,
7114 4 : repeat: usize,
7115 4 : key_count: usize,
7116 4 : compact: bool,
7117 4 : ) -> anyhow::Result<HashMap<Key, BTreeSet<Lsn>>> {
7118 4 : let mut inserted: HashMap<Key, BTreeSet<Lsn>> = Default::default();
7119 4 :
7120 4 : let mut test_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
7121 4 : let mut blknum = 0;
7122 4 :
7123 4 : // Enforce that key range is monotonously increasing
7124 4 : let mut keyspace = KeySpaceAccum::new();
7125 4 :
7126 4 : let cancel = CancellationToken::new();
7127 4 :
7128 4 : for _ in 0..repeat {
7129 200 : for _ in 0..key_count {
7130 2000000 : test_key.field6 = blknum;
7131 2000000 : let mut writer = timeline.writer().await;
7132 2000000 : writer
7133 2000000 : .put(
7134 2000000 : test_key,
7135 2000000 : lsn,
7136 2000000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
7137 2000000 : ctx,
7138 2000000 : )
7139 2000000 : .await?;
7140 2000000 : inserted.entry(test_key).or_default().insert(lsn);
7141 2000000 : writer.finish_write(lsn);
7142 2000000 : drop(writer);
7143 2000000 :
7144 2000000 : keyspace.add_key(test_key);
7145 2000000 :
7146 2000000 : lsn = Lsn(lsn.0 + 0x10);
7147 2000000 : blknum += 1;
7148 : }
7149 :
7150 200 : timeline.freeze_and_flush().await?;
7151 200 : if compact {
7152 : // this requires timeline to be &Arc<Timeline>
7153 100 : timeline.compact(&cancel, EnumSet::default(), ctx).await?;
7154 100 : }
7155 :
7156 : // this doesn't really need to use the timeline_id target, but it is closer to what it
7157 : // originally was.
7158 200 : let res = tenant
7159 200 : .gc_iteration(Some(timeline.timeline_id), 0, Duration::ZERO, &cancel, ctx)
7160 200 : .await?;
7161 :
7162 200 : assert_eq!(res.layers_removed, 0, "this never removes anything");
7163 : }
7164 :
7165 4 : Ok(inserted)
7166 4 : }
7167 :
7168 : //
7169 : // Insert 1000 key-value pairs with increasing keys, flush, compact, GC.
7170 : // Repeat 50 times.
7171 : //
7172 : #[tokio::test]
7173 1 : async fn test_bulk_insert() -> anyhow::Result<()> {
7174 1 : let harness = TenantHarness::create("test_bulk_insert").await?;
7175 1 : let (tenant, ctx) = harness.load().await;
7176 1 : let tline = tenant
7177 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx)
7178 1 : .await?;
7179 1 :
7180 1 : let lsn = Lsn(0x10);
7181 1 : bulk_insert_compact_gc(&tenant, &tline, &ctx, lsn, 50, 10000).await?;
7182 1 :
7183 1 : Ok(())
7184 1 : }
7185 :
7186 : // Test the vectored get real implementation against a simple sequential implementation.
7187 : //
7188 : // The test generates a keyspace by repeatedly flushing the in-memory layer and compacting.
7189 : // Projected to 2D the key space looks like below. Lsn grows upwards on the Y axis and keys
7190 : // grow to the right on the X axis.
7191 : // [Delta]
7192 : // [Delta]
7193 : // [Delta]
7194 : // [Delta]
7195 : // ------------ Image ---------------
7196 : //
7197 : // After layer generation we pick the ranges to query as follows:
7198 : // 1. The beginning of each delta layer
7199 : // 2. At the seam between two adjacent delta layers
7200 : //
7201 : // There's one major downside to this test: delta layers only contains images,
7202 : // so the search can stop at the first delta layer and doesn't traverse any deeper.
7203 : #[tokio::test]
7204 1 : async fn test_get_vectored() -> anyhow::Result<()> {
7205 1 : let harness = TenantHarness::create("test_get_vectored").await?;
7206 1 : let (tenant, ctx) = harness.load().await;
7207 1 : let io_concurrency = IoConcurrency::spawn_for_test();
7208 1 : let tline = tenant
7209 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx)
7210 1 : .await?;
7211 1 :
7212 1 : let lsn = Lsn(0x10);
7213 1 : let inserted = bulk_insert_compact_gc(&tenant, &tline, &ctx, lsn, 50, 10000).await?;
7214 1 :
7215 1 : let guard = tline.layers.read(LayerManagerLockHolder::Testing).await;
7216 1 : let lm = guard.layer_map()?;
7217 1 :
7218 1 : lm.dump(true, &ctx).await?;
7219 1 :
7220 1 : let mut reads = Vec::new();
7221 1 : let mut prev = None;
7222 6 : lm.iter_historic_layers().for_each(|desc| {
7223 6 : if !desc.is_delta() {
7224 1 : prev = Some(desc.clone());
7225 1 : return;
7226 5 : }
7227 5 :
7228 5 : let start = desc.key_range.start;
7229 5 : let end = desc
7230 5 : .key_range
7231 5 : .start
7232 5 : .add(tenant.conf.max_get_vectored_keys.get() as u32);
7233 5 : reads.push(KeySpace {
7234 5 : ranges: vec![start..end],
7235 5 : });
7236 1 :
7237 5 : if let Some(prev) = &prev {
7238 5 : if !prev.is_delta() {
7239 5 : return;
7240 1 : }
7241 0 :
7242 0 : let first_range = Key {
7243 0 : field6: prev.key_range.end.field6 - 4,
7244 0 : ..prev.key_range.end
7245 0 : }..prev.key_range.end;
7246 0 :
7247 0 : let second_range = desc.key_range.start..Key {
7248 0 : field6: desc.key_range.start.field6 + 4,
7249 0 : ..desc.key_range.start
7250 0 : };
7251 0 :
7252 0 : reads.push(KeySpace {
7253 0 : ranges: vec![first_range, second_range],
7254 0 : });
7255 1 : };
7256 1 :
7257 1 : prev = Some(desc.clone());
7258 6 : });
7259 1 :
7260 1 : drop(guard);
7261 1 :
7262 1 : // Pick a big LSN such that we query over all the changes.
7263 1 : let reads_lsn = Lsn(u64::MAX - 1);
7264 1 :
7265 6 : for read in reads {
7266 5 : info!("Doing vectored read on {:?}", read);
7267 1 :
7268 5 : let query = VersionedKeySpaceQuery::uniform(read.clone(), reads_lsn);
7269 1 :
7270 5 : let vectored_res = tline
7271 5 : .get_vectored_impl(
7272 5 : query,
7273 5 : &mut ValuesReconstructState::new(io_concurrency.clone()),
7274 5 : &ctx,
7275 5 : )
7276 5 : .await;
7277 1 :
7278 5 : let mut expected_lsns: HashMap<Key, Lsn> = Default::default();
7279 5 : let mut expect_missing = false;
7280 5 : let mut key = read.start().unwrap();
7281 165 : while key != read.end().unwrap() {
7282 160 : if let Some(lsns) = inserted.get(&key) {
7283 160 : let expected_lsn = lsns.iter().rfind(|lsn| **lsn <= reads_lsn);
7284 160 : match expected_lsn {
7285 160 : Some(lsn) => {
7286 160 : expected_lsns.insert(key, *lsn);
7287 160 : }
7288 1 : None => {
7289 1 : expect_missing = true;
7290 0 : break;
7291 1 : }
7292 1 : }
7293 1 : } else {
7294 1 : expect_missing = true;
7295 0 : break;
7296 1 : }
7297 1 :
7298 160 : key = key.next();
7299 1 : }
7300 1 :
7301 5 : if expect_missing {
7302 1 : assert!(matches!(vectored_res, Err(GetVectoredError::MissingKey(_))));
7303 1 : } else {
7304 160 : for (key, image) in vectored_res? {
7305 160 : let expected_lsn = expected_lsns.get(&key).expect("determined above");
7306 160 : let expected_image = test_img(&format!("{} at {}", key.field6, expected_lsn));
7307 160 : assert_eq!(image?, expected_image);
7308 1 : }
7309 1 : }
7310 1 : }
7311 1 :
7312 1 : Ok(())
7313 1 : }
7314 :
7315 : #[tokio::test]
7316 1 : async fn test_get_vectored_aux_files() -> anyhow::Result<()> {
7317 1 : let harness = TenantHarness::create("test_get_vectored_aux_files").await?;
7318 1 :
7319 1 : let (tenant, ctx) = harness.load().await;
7320 1 : let io_concurrency = IoConcurrency::spawn_for_test();
7321 1 : let (tline, ctx) = tenant
7322 1 : .create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION, &ctx)
7323 1 : .await?;
7324 1 : let tline = tline.raw_timeline().unwrap();
7325 1 :
7326 1 : let mut modification = tline.begin_modification(Lsn(0x1000));
7327 1 : modification.put_file("foo/bar1", b"content1", &ctx).await?;
7328 1 : modification.set_lsn(Lsn(0x1008))?;
7329 1 : modification.put_file("foo/bar2", b"content2", &ctx).await?;
7330 1 : modification.commit(&ctx).await?;
7331 1 :
7332 1 : let child_timeline_id = TimelineId::generate();
7333 1 : tenant
7334 1 : .branch_timeline_test(
7335 1 : tline,
7336 1 : child_timeline_id,
7337 1 : Some(tline.get_last_record_lsn()),
7338 1 : &ctx,
7339 1 : )
7340 1 : .await?;
7341 1 :
7342 1 : let child_timeline = tenant
7343 1 : .get_timeline(child_timeline_id, true)
7344 1 : .expect("Should have the branched timeline");
7345 1 :
7346 1 : let aux_keyspace = KeySpace {
7347 1 : ranges: vec![NON_INHERITED_RANGE],
7348 1 : };
7349 1 : let read_lsn = child_timeline.get_last_record_lsn();
7350 1 :
7351 1 : let query = VersionedKeySpaceQuery::uniform(aux_keyspace.clone(), read_lsn);
7352 1 :
7353 1 : let vectored_res = child_timeline
7354 1 : .get_vectored_impl(
7355 1 : query,
7356 1 : &mut ValuesReconstructState::new(io_concurrency.clone()),
7357 1 : &ctx,
7358 1 : )
7359 1 : .await;
7360 1 :
7361 1 : let images = vectored_res?;
7362 1 : assert!(images.is_empty());
7363 1 : Ok(())
7364 1 : }
7365 :
7366 : // Test that vectored get handles layer gaps correctly
7367 : // by advancing into the next ancestor timeline if required.
7368 : //
7369 : // The test generates timelines that look like the diagram below.
7370 : // We leave a gap in one of the L1 layers at `gap_at_key` (`/` in the diagram).
7371 : // The reconstruct data for that key lies in the ancestor timeline (`X` in the diagram).
7372 : //
7373 : // ```
7374 : //-------------------------------+
7375 : // ... |
7376 : // [ L1 ] |
7377 : // [ / L1 ] | Child Timeline
7378 : // ... |
7379 : // ------------------------------+
7380 : // [ X L1 ] | Parent Timeline
7381 : // ------------------------------+
7382 : // ```
7383 : #[tokio::test]
7384 1 : async fn test_get_vectored_key_gap() -> anyhow::Result<()> {
7385 1 : let tenant_conf = pageserver_api::models::TenantConfig {
7386 1 : // Make compaction deterministic
7387 1 : gc_period: Some(Duration::ZERO),
7388 1 : compaction_period: Some(Duration::ZERO),
7389 1 : // Encourage creation of L1 layers
7390 1 : checkpoint_distance: Some(16 * 1024),
7391 1 : compaction_target_size: Some(8 * 1024),
7392 1 : ..Default::default()
7393 1 : };
7394 1 :
7395 1 : let harness = TenantHarness::create_custom(
7396 1 : "test_get_vectored_key_gap",
7397 1 : tenant_conf,
7398 1 : TenantId::generate(),
7399 1 : ShardIdentity::unsharded(),
7400 1 : Generation::new(0xdeadbeef),
7401 1 : )
7402 1 : .await?;
7403 1 : let (tenant, ctx) = harness.load().await;
7404 1 : let io_concurrency = IoConcurrency::spawn_for_test();
7405 1 :
7406 1 : let mut current_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
7407 1 : let gap_at_key = current_key.add(100);
7408 1 : let mut current_lsn = Lsn(0x10);
7409 1 :
7410 1 : const KEY_COUNT: usize = 10_000;
7411 1 :
7412 1 : let timeline_id = TimelineId::generate();
7413 1 : let current_timeline = tenant
7414 1 : .create_test_timeline(timeline_id, current_lsn, DEFAULT_PG_VERSION, &ctx)
7415 1 : .await?;
7416 1 :
7417 1 : current_lsn += 0x100;
7418 1 :
7419 1 : let mut writer = current_timeline.writer().await;
7420 1 : writer
7421 1 : .put(
7422 1 : gap_at_key,
7423 1 : current_lsn,
7424 1 : &Value::Image(test_img(&format!("{} at {}", gap_at_key, current_lsn))),
7425 1 : &ctx,
7426 1 : )
7427 1 : .await?;
7428 1 : writer.finish_write(current_lsn);
7429 1 : drop(writer);
7430 1 :
7431 1 : let mut latest_lsns = HashMap::new();
7432 1 : latest_lsns.insert(gap_at_key, current_lsn);
7433 1 :
7434 1 : current_timeline.freeze_and_flush().await?;
7435 1 :
7436 1 : let child_timeline_id = TimelineId::generate();
7437 1 :
7438 1 : tenant
7439 1 : .branch_timeline_test(
7440 1 : ¤t_timeline,
7441 1 : child_timeline_id,
7442 1 : Some(current_lsn),
7443 1 : &ctx,
7444 1 : )
7445 1 : .await?;
7446 1 : let child_timeline = tenant
7447 1 : .get_timeline(child_timeline_id, true)
7448 1 : .expect("Should have the branched timeline");
7449 1 :
7450 10001 : for i in 0..KEY_COUNT {
7451 10000 : if current_key == gap_at_key {
7452 1 : current_key = current_key.next();
7453 1 : continue;
7454 9999 : }
7455 9999 :
7456 9999 : current_lsn += 0x10;
7457 1 :
7458 9999 : let mut writer = child_timeline.writer().await;
7459 9999 : writer
7460 9999 : .put(
7461 9999 : current_key,
7462 9999 : current_lsn,
7463 9999 : &Value::Image(test_img(&format!("{} at {}", current_key, current_lsn))),
7464 9999 : &ctx,
7465 9999 : )
7466 9999 : .await?;
7467 9999 : writer.finish_write(current_lsn);
7468 9999 : drop(writer);
7469 9999 :
7470 9999 : latest_lsns.insert(current_key, current_lsn);
7471 9999 : current_key = current_key.next();
7472 9999 :
7473 9999 : // Flush every now and then to encourage layer file creation.
7474 9999 : if i % 500 == 0 {
7475 20 : child_timeline.freeze_and_flush().await?;
7476 9979 : }
7477 1 : }
7478 1 :
7479 1 : child_timeline.freeze_and_flush().await?;
7480 1 : let mut flags = EnumSet::new();
7481 1 : flags.insert(CompactFlags::ForceRepartition);
7482 1 : child_timeline
7483 1 : .compact(&CancellationToken::new(), flags, &ctx)
7484 1 : .await?;
7485 1 :
7486 1 : let key_near_end = {
7487 1 : let mut tmp = current_key;
7488 1 : tmp.field6 -= 10;
7489 1 : tmp
7490 1 : };
7491 1 :
7492 1 : let key_near_gap = {
7493 1 : let mut tmp = gap_at_key;
7494 1 : tmp.field6 -= 10;
7495 1 : tmp
7496 1 : };
7497 1 :
7498 1 : let read = KeySpace {
7499 1 : ranges: vec![key_near_gap..gap_at_key.next(), key_near_end..current_key],
7500 1 : };
7501 1 :
7502 1 : let query = VersionedKeySpaceQuery::uniform(read.clone(), current_lsn);
7503 1 :
7504 1 : let results = child_timeline
7505 1 : .get_vectored_impl(
7506 1 : query,
7507 1 : &mut ValuesReconstructState::new(io_concurrency.clone()),
7508 1 : &ctx,
7509 1 : )
7510 1 : .await?;
7511 1 :
7512 22 : for (key, img_res) in results {
7513 21 : let expected = test_img(&format!("{} at {}", key, latest_lsns[&key]));
7514 21 : assert_eq!(img_res?, expected);
7515 1 : }
7516 1 :
7517 1 : Ok(())
7518 1 : }
7519 :
7520 : // Test that vectored get descends into ancestor timelines correctly and
7521 : // does not return an image that's newer than requested.
7522 : //
7523 : // The diagram below ilustrates an interesting case. We have a parent timeline
7524 : // (top of the Lsn range) and a child timeline. The request key cannot be reconstructed
7525 : // from the child timeline, so the parent timeline must be visited. When advacing into
7526 : // the child timeline, the read path needs to remember what the requested Lsn was in
7527 : // order to avoid returning an image that's too new. The test below constructs such
7528 : // a timeline setup and does a few queries around the Lsn of each page image.
7529 : // ```
7530 : // LSN
7531 : // ^
7532 : // |
7533 : // |
7534 : // 500 | --------------------------------------> branch point
7535 : // 400 | X
7536 : // 300 | X
7537 : // 200 | --------------------------------------> requested lsn
7538 : // 100 | X
7539 : // |---------------------------------------> Key
7540 : // |
7541 : // ------> requested key
7542 : //
7543 : // Legend:
7544 : // * X - page images
7545 : // ```
7546 : #[tokio::test]
7547 1 : async fn test_get_vectored_ancestor_descent() -> anyhow::Result<()> {
7548 1 : let harness = TenantHarness::create("test_get_vectored_on_lsn_axis").await?;
7549 1 : let (tenant, ctx) = harness.load().await;
7550 1 : let io_concurrency = IoConcurrency::spawn_for_test();
7551 1 :
7552 1 : let start_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
7553 1 : let end_key = start_key.add(1000);
7554 1 : let child_gap_at_key = start_key.add(500);
7555 1 : let mut parent_gap_lsns: BTreeMap<Lsn, String> = BTreeMap::new();
7556 1 :
7557 1 : let mut current_lsn = Lsn(0x10);
7558 1 :
7559 1 : let timeline_id = TimelineId::generate();
7560 1 : let parent_timeline = tenant
7561 1 : .create_test_timeline(timeline_id, current_lsn, DEFAULT_PG_VERSION, &ctx)
7562 1 : .await?;
7563 1 :
7564 1 : current_lsn += 0x100;
7565 1 :
7566 4 : for _ in 0..3 {
7567 3 : let mut key = start_key;
7568 3003 : while key < end_key {
7569 3000 : current_lsn += 0x10;
7570 3000 :
7571 3000 : let image_value = format!("{} at {}", child_gap_at_key, current_lsn);
7572 1 :
7573 3000 : let mut writer = parent_timeline.writer().await;
7574 3000 : writer
7575 3000 : .put(
7576 3000 : key,
7577 3000 : current_lsn,
7578 3000 : &Value::Image(test_img(&image_value)),
7579 3000 : &ctx,
7580 3000 : )
7581 3000 : .await?;
7582 3000 : writer.finish_write(current_lsn);
7583 3000 :
7584 3000 : if key == child_gap_at_key {
7585 3 : parent_gap_lsns.insert(current_lsn, image_value);
7586 2997 : }
7587 1 :
7588 3000 : key = key.next();
7589 1 : }
7590 1 :
7591 3 : parent_timeline.freeze_and_flush().await?;
7592 1 : }
7593 1 :
7594 1 : let child_timeline_id = TimelineId::generate();
7595 1 :
7596 1 : let child_timeline = tenant
7597 1 : .branch_timeline_test(&parent_timeline, child_timeline_id, Some(current_lsn), &ctx)
7598 1 : .await?;
7599 1 :
7600 1 : let mut key = start_key;
7601 1001 : while key < end_key {
7602 1000 : if key == child_gap_at_key {
7603 1 : key = key.next();
7604 1 : continue;
7605 999 : }
7606 999 :
7607 999 : current_lsn += 0x10;
7608 1 :
7609 999 : let mut writer = child_timeline.writer().await;
7610 999 : writer
7611 999 : .put(
7612 999 : key,
7613 999 : current_lsn,
7614 999 : &Value::Image(test_img(&format!("{} at {}", key, current_lsn))),
7615 999 : &ctx,
7616 999 : )
7617 999 : .await?;
7618 999 : writer.finish_write(current_lsn);
7619 999 :
7620 999 : key = key.next();
7621 1 : }
7622 1 :
7623 1 : child_timeline.freeze_and_flush().await?;
7624 1 :
7625 1 : let lsn_offsets: [i64; 5] = [-10, -1, 0, 1, 10];
7626 1 : let mut query_lsns = Vec::new();
7627 3 : for image_lsn in parent_gap_lsns.keys().rev() {
7628 18 : for offset in lsn_offsets {
7629 15 : query_lsns.push(Lsn(image_lsn
7630 15 : .0
7631 15 : .checked_add_signed(offset)
7632 15 : .expect("Shouldn't overflow")));
7633 15 : }
7634 1 : }
7635 1 :
7636 16 : for query_lsn in query_lsns {
7637 15 : let query = VersionedKeySpaceQuery::uniform(
7638 15 : KeySpace {
7639 15 : ranges: vec![child_gap_at_key..child_gap_at_key.next()],
7640 15 : },
7641 15 : query_lsn,
7642 15 : );
7643 1 :
7644 15 : let results = child_timeline
7645 15 : .get_vectored_impl(
7646 15 : query,
7647 15 : &mut ValuesReconstructState::new(io_concurrency.clone()),
7648 15 : &ctx,
7649 15 : )
7650 15 : .await;
7651 1 :
7652 15 : let expected_item = parent_gap_lsns
7653 15 : .iter()
7654 15 : .rev()
7655 34 : .find(|(lsn, _)| **lsn <= query_lsn);
7656 15 :
7657 15 : info!(
7658 1 : "Doing vectored read at LSN {}. Expecting image to be: {:?}",
7659 1 : query_lsn, expected_item
7660 1 : );
7661 1 :
7662 15 : match expected_item {
7663 13 : Some((_, img_value)) => {
7664 13 : let key_results = results.expect("No vectored get error expected");
7665 13 : let key_result = &key_results[&child_gap_at_key];
7666 13 : let returned_img = key_result
7667 13 : .as_ref()
7668 13 : .expect("No page reconstruct error expected");
7669 13 :
7670 13 : info!(
7671 1 : "Vectored read at LSN {} returned image {}",
7672 0 : query_lsn,
7673 0 : std::str::from_utf8(returned_img)?
7674 1 : );
7675 13 : assert_eq!(*returned_img, test_img(img_value));
7676 1 : }
7677 1 : None => {
7678 2 : assert!(matches!(results, Err(GetVectoredError::MissingKey(_))));
7679 1 : }
7680 1 : }
7681 1 : }
7682 1 :
7683 1 : Ok(())
7684 1 : }
7685 :
7686 : #[tokio::test]
7687 1 : async fn test_random_updates() -> anyhow::Result<()> {
7688 1 : let names_algorithms = [
7689 1 : ("test_random_updates_legacy", CompactionAlgorithm::Legacy),
7690 1 : ("test_random_updates_tiered", CompactionAlgorithm::Tiered),
7691 1 : ];
7692 3 : for (name, algorithm) in names_algorithms {
7693 2 : test_random_updates_algorithm(name, algorithm).await?;
7694 1 : }
7695 1 : Ok(())
7696 1 : }
7697 :
7698 2 : async fn test_random_updates_algorithm(
7699 2 : name: &'static str,
7700 2 : compaction_algorithm: CompactionAlgorithm,
7701 2 : ) -> anyhow::Result<()> {
7702 2 : let mut harness = TenantHarness::create(name).await?;
7703 2 : harness.tenant_conf.compaction_algorithm = Some(CompactionAlgorithmSettings {
7704 2 : kind: compaction_algorithm,
7705 2 : });
7706 2 : let (tenant, ctx) = harness.load().await;
7707 2 : let tline = tenant
7708 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
7709 2 : .await?;
7710 :
7711 : const NUM_KEYS: usize = 1000;
7712 2 : let cancel = CancellationToken::new();
7713 2 :
7714 2 : let mut test_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
7715 2 : let mut test_key_end = test_key;
7716 2 : test_key_end.field6 = NUM_KEYS as u32;
7717 2 : tline.add_extra_test_dense_keyspace(KeySpace::single(test_key..test_key_end));
7718 2 :
7719 2 : let mut keyspace = KeySpaceAccum::new();
7720 2 :
7721 2 : // Track when each page was last modified. Used to assert that
7722 2 : // a read sees the latest page version.
7723 2 : let mut updated = [Lsn(0); NUM_KEYS];
7724 2 :
7725 2 : let mut lsn = Lsn(0x10);
7726 : #[allow(clippy::needless_range_loop)]
7727 2002 : for blknum in 0..NUM_KEYS {
7728 2000 : lsn = Lsn(lsn.0 + 0x10);
7729 2000 : test_key.field6 = blknum as u32;
7730 2000 : let mut writer = tline.writer().await;
7731 2000 : writer
7732 2000 : .put(
7733 2000 : test_key,
7734 2000 : lsn,
7735 2000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
7736 2000 : &ctx,
7737 2000 : )
7738 2000 : .await?;
7739 2000 : writer.finish_write(lsn);
7740 2000 : updated[blknum] = lsn;
7741 2000 : drop(writer);
7742 2000 :
7743 2000 : keyspace.add_key(test_key);
7744 : }
7745 :
7746 102 : for _ in 0..50 {
7747 100100 : for _ in 0..NUM_KEYS {
7748 100000 : lsn = Lsn(lsn.0 + 0x10);
7749 100000 : let blknum = thread_rng().gen_range(0..NUM_KEYS);
7750 100000 : test_key.field6 = blknum as u32;
7751 100000 : let mut writer = tline.writer().await;
7752 100000 : writer
7753 100000 : .put(
7754 100000 : test_key,
7755 100000 : lsn,
7756 100000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
7757 100000 : &ctx,
7758 100000 : )
7759 100000 : .await?;
7760 100000 : writer.finish_write(lsn);
7761 100000 : drop(writer);
7762 100000 : updated[blknum] = lsn;
7763 : }
7764 :
7765 : // Read all the blocks
7766 100000 : for (blknum, last_lsn) in updated.iter().enumerate() {
7767 100000 : test_key.field6 = blknum as u32;
7768 100000 : assert_eq!(
7769 100000 : tline.get(test_key, lsn, &ctx).await?,
7770 100000 : test_img(&format!("{} at {}", blknum, last_lsn))
7771 : );
7772 : }
7773 :
7774 : // Perform a cycle of flush, and GC
7775 100 : tline.freeze_and_flush().await?;
7776 100 : tenant
7777 100 : .gc_iteration(Some(tline.timeline_id), 0, Duration::ZERO, &cancel, &ctx)
7778 100 : .await?;
7779 : }
7780 :
7781 2 : Ok(())
7782 2 : }
7783 :
7784 : #[tokio::test]
7785 1 : async fn test_traverse_branches() -> anyhow::Result<()> {
7786 1 : let (tenant, ctx) = TenantHarness::create("test_traverse_branches")
7787 1 : .await?
7788 1 : .load()
7789 1 : .await;
7790 1 : let mut tline = tenant
7791 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
7792 1 : .await?;
7793 1 :
7794 1 : const NUM_KEYS: usize = 1000;
7795 1 :
7796 1 : let mut test_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
7797 1 :
7798 1 : let mut keyspace = KeySpaceAccum::new();
7799 1 :
7800 1 : let cancel = CancellationToken::new();
7801 1 :
7802 1 : // Track when each page was last modified. Used to assert that
7803 1 : // a read sees the latest page version.
7804 1 : let mut updated = [Lsn(0); NUM_KEYS];
7805 1 :
7806 1 : let mut lsn = Lsn(0x10);
7807 1 : #[allow(clippy::needless_range_loop)]
7808 1001 : for blknum in 0..NUM_KEYS {
7809 1000 : lsn = Lsn(lsn.0 + 0x10);
7810 1000 : test_key.field6 = blknum as u32;
7811 1000 : let mut writer = tline.writer().await;
7812 1000 : writer
7813 1000 : .put(
7814 1000 : test_key,
7815 1000 : lsn,
7816 1000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
7817 1000 : &ctx,
7818 1000 : )
7819 1000 : .await?;
7820 1000 : writer.finish_write(lsn);
7821 1000 : updated[blknum] = lsn;
7822 1000 : drop(writer);
7823 1000 :
7824 1000 : keyspace.add_key(test_key);
7825 1 : }
7826 1 :
7827 51 : for _ in 0..50 {
7828 50 : let new_tline_id = TimelineId::generate();
7829 50 : tenant
7830 50 : .branch_timeline_test(&tline, new_tline_id, Some(lsn), &ctx)
7831 50 : .await?;
7832 50 : tline = tenant
7833 50 : .get_timeline(new_tline_id, true)
7834 50 : .expect("Should have the branched timeline");
7835 1 :
7836 50050 : for _ in 0..NUM_KEYS {
7837 50000 : lsn = Lsn(lsn.0 + 0x10);
7838 50000 : let blknum = thread_rng().gen_range(0..NUM_KEYS);
7839 50000 : test_key.field6 = blknum as u32;
7840 50000 : let mut writer = tline.writer().await;
7841 50000 : writer
7842 50000 : .put(
7843 50000 : test_key,
7844 50000 : lsn,
7845 50000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
7846 50000 : &ctx,
7847 50000 : )
7848 50000 : .await?;
7849 50000 : println!("updating {} at {}", blknum, lsn);
7850 50000 : writer.finish_write(lsn);
7851 50000 : drop(writer);
7852 50000 : updated[blknum] = lsn;
7853 1 : }
7854 1 :
7855 1 : // Read all the blocks
7856 50000 : for (blknum, last_lsn) in updated.iter().enumerate() {
7857 50000 : test_key.field6 = blknum as u32;
7858 50000 : assert_eq!(
7859 50000 : tline.get(test_key, lsn, &ctx).await?,
7860 50000 : test_img(&format!("{} at {}", blknum, last_lsn))
7861 1 : );
7862 1 : }
7863 1 :
7864 1 : // Perform a cycle of flush, compact, and GC
7865 50 : tline.freeze_and_flush().await?;
7866 50 : tline.compact(&cancel, EnumSet::default(), &ctx).await?;
7867 50 : tenant
7868 50 : .gc_iteration(Some(tline.timeline_id), 0, Duration::ZERO, &cancel, &ctx)
7869 50 : .await?;
7870 1 : }
7871 1 :
7872 1 : Ok(())
7873 1 : }
7874 :
7875 : #[tokio::test]
7876 1 : async fn test_traverse_ancestors() -> anyhow::Result<()> {
7877 1 : let (tenant, ctx) = TenantHarness::create("test_traverse_ancestors")
7878 1 : .await?
7879 1 : .load()
7880 1 : .await;
7881 1 : let mut tline = tenant
7882 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
7883 1 : .await?;
7884 1 :
7885 1 : const NUM_KEYS: usize = 100;
7886 1 : const NUM_TLINES: usize = 50;
7887 1 :
7888 1 : let mut test_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
7889 1 : // Track page mutation lsns across different timelines.
7890 1 : let mut updated = [[Lsn(0); NUM_KEYS]; NUM_TLINES];
7891 1 :
7892 1 : let mut lsn = Lsn(0x10);
7893 1 :
7894 1 : #[allow(clippy::needless_range_loop)]
7895 51 : for idx in 0..NUM_TLINES {
7896 50 : let new_tline_id = TimelineId::generate();
7897 50 : tenant
7898 50 : .branch_timeline_test(&tline, new_tline_id, Some(lsn), &ctx)
7899 50 : .await?;
7900 50 : tline = tenant
7901 50 : .get_timeline(new_tline_id, true)
7902 50 : .expect("Should have the branched timeline");
7903 1 :
7904 5050 : for _ in 0..NUM_KEYS {
7905 5000 : lsn = Lsn(lsn.0 + 0x10);
7906 5000 : let blknum = thread_rng().gen_range(0..NUM_KEYS);
7907 5000 : test_key.field6 = blknum as u32;
7908 5000 : let mut writer = tline.writer().await;
7909 5000 : writer
7910 5000 : .put(
7911 5000 : test_key,
7912 5000 : lsn,
7913 5000 : &Value::Image(test_img(&format!("{} {} at {}", idx, blknum, lsn))),
7914 5000 : &ctx,
7915 5000 : )
7916 5000 : .await?;
7917 5000 : println!("updating [{}][{}] at {}", idx, blknum, lsn);
7918 5000 : writer.finish_write(lsn);
7919 5000 : drop(writer);
7920 5000 : updated[idx][blknum] = lsn;
7921 1 : }
7922 1 : }
7923 1 :
7924 1 : // Read pages from leaf timeline across all ancestors.
7925 50 : for (idx, lsns) in updated.iter().enumerate() {
7926 5000 : for (blknum, lsn) in lsns.iter().enumerate() {
7927 1 : // Skip empty mutations.
7928 5000 : if lsn.0 == 0 {
7929 1848 : continue;
7930 3152 : }
7931 3152 : println!("checking [{idx}][{blknum}] at {lsn}");
7932 3152 : test_key.field6 = blknum as u32;
7933 3152 : assert_eq!(
7934 3152 : tline.get(test_key, *lsn, &ctx).await?,
7935 3152 : test_img(&format!("{idx} {blknum} at {lsn}"))
7936 1 : );
7937 1 : }
7938 1 : }
7939 1 : Ok(())
7940 1 : }
7941 :
7942 : #[tokio::test]
7943 1 : async fn test_write_at_initdb_lsn_takes_optimization_code_path() -> anyhow::Result<()> {
7944 1 : let (tenant, ctx) = TenantHarness::create("test_empty_test_timeline_is_usable")
7945 1 : .await?
7946 1 : .load()
7947 1 : .await;
7948 1 :
7949 1 : let initdb_lsn = Lsn(0x20);
7950 1 : let (utline, ctx) = tenant
7951 1 : .create_empty_timeline(TIMELINE_ID, initdb_lsn, DEFAULT_PG_VERSION, &ctx)
7952 1 : .await?;
7953 1 : let tline = utline.raw_timeline().unwrap();
7954 1 :
7955 1 : // Spawn flush loop now so that we can set the `expect_initdb_optimization`
7956 1 : tline.maybe_spawn_flush_loop();
7957 1 :
7958 1 : // Make sure the timeline has the minimum set of required keys for operation.
7959 1 : // The only operation you can always do on an empty timeline is to `put` new data.
7960 1 : // Except if you `put` at `initdb_lsn`.
7961 1 : // In that case, there's an optimization to directly create image layers instead of delta layers.
7962 1 : // It uses `repartition()`, which assumes some keys to be present.
7963 1 : // Let's make sure the test timeline can handle that case.
7964 1 : {
7965 1 : let mut state = tline.flush_loop_state.lock().unwrap();
7966 1 : assert_eq!(
7967 1 : timeline::FlushLoopState::Running {
7968 1 : expect_initdb_optimization: false,
7969 1 : initdb_optimization_count: 0,
7970 1 : },
7971 1 : *state
7972 1 : );
7973 1 : *state = timeline::FlushLoopState::Running {
7974 1 : expect_initdb_optimization: true,
7975 1 : initdb_optimization_count: 0,
7976 1 : };
7977 1 : }
7978 1 :
7979 1 : // Make writes at the initdb_lsn. When we flush it below, it should be handled by the optimization.
7980 1 : // As explained above, the optimization requires some keys to be present.
7981 1 : // As per `create_empty_timeline` documentation, use init_empty to set them.
7982 1 : // This is what `create_test_timeline` does, by the way.
7983 1 : let mut modification = tline.begin_modification(initdb_lsn);
7984 1 : modification
7985 1 : .init_empty_test_timeline()
7986 1 : .context("init_empty_test_timeline")?;
7987 1 : modification
7988 1 : .commit(&ctx)
7989 1 : .await
7990 1 : .context("commit init_empty_test_timeline modification")?;
7991 1 :
7992 1 : // Do the flush. The flush code will check the expectations that we set above.
7993 1 : tline.freeze_and_flush().await?;
7994 1 :
7995 1 : // assert freeze_and_flush exercised the initdb optimization
7996 1 : {
7997 1 : let state = tline.flush_loop_state.lock().unwrap();
7998 1 : let timeline::FlushLoopState::Running {
7999 1 : expect_initdb_optimization,
8000 1 : initdb_optimization_count,
8001 1 : } = *state
8002 1 : else {
8003 1 : panic!("unexpected state: {:?}", *state);
8004 1 : };
8005 1 : assert!(expect_initdb_optimization);
8006 1 : assert!(initdb_optimization_count > 0);
8007 1 : }
8008 1 : Ok(())
8009 1 : }
8010 :
8011 : #[tokio::test]
8012 1 : async fn test_create_guard_crash() -> anyhow::Result<()> {
8013 1 : let name = "test_create_guard_crash";
8014 1 : let harness = TenantHarness::create(name).await?;
8015 1 : {
8016 1 : let (tenant, ctx) = harness.load().await;
8017 1 : let (tline, _ctx) = tenant
8018 1 : .create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION, &ctx)
8019 1 : .await?;
8020 1 : // Leave the timeline ID in [`TenantShard::timelines_creating`] to exclude attempting to create it again
8021 1 : let raw_tline = tline.raw_timeline().unwrap();
8022 1 : raw_tline
8023 1 : .shutdown(super::timeline::ShutdownMode::Hard)
8024 1 : .instrument(info_span!("test_shutdown", tenant_id=%raw_tline.tenant_shard_id, shard_id=%raw_tline.tenant_shard_id.shard_slug(), timeline_id=%TIMELINE_ID))
8025 1 : .await;
8026 1 : std::mem::forget(tline);
8027 1 : }
8028 1 :
8029 1 : let (tenant, _) = harness.load().await;
8030 1 : match tenant.get_timeline(TIMELINE_ID, false) {
8031 1 : Ok(_) => panic!("timeline should've been removed during load"),
8032 1 : Err(e) => {
8033 1 : assert_eq!(
8034 1 : e,
8035 1 : GetTimelineError::NotFound {
8036 1 : tenant_id: tenant.tenant_shard_id,
8037 1 : timeline_id: TIMELINE_ID,
8038 1 : }
8039 1 : )
8040 1 : }
8041 1 : }
8042 1 :
8043 1 : assert!(
8044 1 : !harness
8045 1 : .conf
8046 1 : .timeline_path(&tenant.tenant_shard_id, &TIMELINE_ID)
8047 1 : .exists()
8048 1 : );
8049 1 :
8050 1 : Ok(())
8051 1 : }
8052 :
8053 : #[tokio::test]
8054 1 : async fn test_read_at_max_lsn() -> anyhow::Result<()> {
8055 1 : let names_algorithms = [
8056 1 : ("test_read_at_max_lsn_legacy", CompactionAlgorithm::Legacy),
8057 1 : ("test_read_at_max_lsn_tiered", CompactionAlgorithm::Tiered),
8058 1 : ];
8059 3 : for (name, algorithm) in names_algorithms {
8060 2 : test_read_at_max_lsn_algorithm(name, algorithm).await?;
8061 1 : }
8062 1 : Ok(())
8063 1 : }
8064 :
8065 2 : async fn test_read_at_max_lsn_algorithm(
8066 2 : name: &'static str,
8067 2 : compaction_algorithm: CompactionAlgorithm,
8068 2 : ) -> anyhow::Result<()> {
8069 2 : let mut harness = TenantHarness::create(name).await?;
8070 2 : harness.tenant_conf.compaction_algorithm = Some(CompactionAlgorithmSettings {
8071 2 : kind: compaction_algorithm,
8072 2 : });
8073 2 : let (tenant, ctx) = harness.load().await;
8074 2 : let tline = tenant
8075 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx)
8076 2 : .await?;
8077 :
8078 2 : let lsn = Lsn(0x10);
8079 2 : let compact = false;
8080 2 : bulk_insert_maybe_compact_gc(&tenant, &tline, &ctx, lsn, 50, 10000, compact).await?;
8081 :
8082 2 : let test_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
8083 2 : let read_lsn = Lsn(u64::MAX - 1);
8084 :
8085 2 : let result = tline.get(test_key, read_lsn, &ctx).await;
8086 2 : assert!(result.is_ok(), "result is not Ok: {}", result.unwrap_err());
8087 :
8088 2 : Ok(())
8089 2 : }
8090 :
8091 : #[tokio::test]
8092 1 : async fn test_metadata_scan() -> anyhow::Result<()> {
8093 1 : let harness = TenantHarness::create("test_metadata_scan").await?;
8094 1 : let (tenant, ctx) = harness.load().await;
8095 1 : let io_concurrency = IoConcurrency::spawn_for_test();
8096 1 : let tline = tenant
8097 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
8098 1 : .await?;
8099 1 :
8100 1 : const NUM_KEYS: usize = 1000;
8101 1 : const STEP: usize = 10000; // random update + scan base_key + idx * STEP
8102 1 :
8103 1 : let cancel = CancellationToken::new();
8104 1 :
8105 1 : let mut base_key = Key::from_hex("000000000033333333444444445500000000").unwrap();
8106 1 : base_key.field1 = AUX_KEY_PREFIX;
8107 1 : let mut test_key = base_key;
8108 1 :
8109 1 : // Track when each page was last modified. Used to assert that
8110 1 : // a read sees the latest page version.
8111 1 : let mut updated = [Lsn(0); NUM_KEYS];
8112 1 :
8113 1 : let mut lsn = Lsn(0x10);
8114 1 : #[allow(clippy::needless_range_loop)]
8115 1001 : for blknum in 0..NUM_KEYS {
8116 1000 : lsn = Lsn(lsn.0 + 0x10);
8117 1000 : test_key.field6 = (blknum * STEP) as u32;
8118 1000 : let mut writer = tline.writer().await;
8119 1000 : writer
8120 1000 : .put(
8121 1000 : test_key,
8122 1000 : lsn,
8123 1000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
8124 1000 : &ctx,
8125 1000 : )
8126 1000 : .await?;
8127 1000 : writer.finish_write(lsn);
8128 1000 : updated[blknum] = lsn;
8129 1000 : drop(writer);
8130 1 : }
8131 1 :
8132 1 : let keyspace = KeySpace::single(base_key..base_key.add((NUM_KEYS * STEP) as u32));
8133 1 :
8134 12 : for iter in 0..=10 {
8135 1 : // Read all the blocks
8136 11000 : for (blknum, last_lsn) in updated.iter().enumerate() {
8137 11000 : test_key.field6 = (blknum * STEP) as u32;
8138 11000 : assert_eq!(
8139 11000 : tline.get(test_key, lsn, &ctx).await?,
8140 11000 : test_img(&format!("{} at {}", blknum, last_lsn))
8141 1 : );
8142 1 : }
8143 1 :
8144 11 : let mut cnt = 0;
8145 11 : let query = VersionedKeySpaceQuery::uniform(keyspace.clone(), lsn);
8146 1 :
8147 11000 : for (key, value) in tline
8148 11 : .get_vectored_impl(
8149 11 : query,
8150 11 : &mut ValuesReconstructState::new(io_concurrency.clone()),
8151 11 : &ctx,
8152 11 : )
8153 11 : .await?
8154 1 : {
8155 11000 : let blknum = key.field6 as usize;
8156 11000 : let value = value?;
8157 11000 : assert!(blknum % STEP == 0);
8158 11000 : let blknum = blknum / STEP;
8159 11000 : assert_eq!(
8160 11000 : value,
8161 11000 : test_img(&format!("{} at {}", blknum, updated[blknum]))
8162 11000 : );
8163 11000 : cnt += 1;
8164 1 : }
8165 1 :
8166 11 : assert_eq!(cnt, NUM_KEYS);
8167 1 :
8168 11011 : for _ in 0..NUM_KEYS {
8169 11000 : lsn = Lsn(lsn.0 + 0x10);
8170 11000 : let blknum = thread_rng().gen_range(0..NUM_KEYS);
8171 11000 : test_key.field6 = (blknum * STEP) as u32;
8172 11000 : let mut writer = tline.writer().await;
8173 11000 : writer
8174 11000 : .put(
8175 11000 : test_key,
8176 11000 : lsn,
8177 11000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
8178 11000 : &ctx,
8179 11000 : )
8180 11000 : .await?;
8181 11000 : writer.finish_write(lsn);
8182 11000 : drop(writer);
8183 11000 : updated[blknum] = lsn;
8184 1 : }
8185 1 :
8186 1 : // Perform two cycles of flush, compact, and GC
8187 33 : for round in 0..2 {
8188 22 : tline.freeze_and_flush().await?;
8189 22 : tline
8190 22 : .compact(
8191 22 : &cancel,
8192 22 : if iter % 5 == 0 && round == 0 {
8193 3 : let mut flags = EnumSet::new();
8194 3 : flags.insert(CompactFlags::ForceImageLayerCreation);
8195 3 : flags.insert(CompactFlags::ForceRepartition);
8196 3 : flags
8197 1 : } else {
8198 19 : EnumSet::empty()
8199 1 : },
8200 22 : &ctx,
8201 22 : )
8202 22 : .await?;
8203 22 : tenant
8204 22 : .gc_iteration(Some(tline.timeline_id), 0, Duration::ZERO, &cancel, &ctx)
8205 22 : .await?;
8206 1 : }
8207 1 : }
8208 1 :
8209 1 : Ok(())
8210 1 : }
8211 :
8212 : #[tokio::test]
8213 1 : async fn test_metadata_compaction_trigger() -> anyhow::Result<()> {
8214 1 : let harness = TenantHarness::create("test_metadata_compaction_trigger").await?;
8215 1 : let (tenant, ctx) = harness.load().await;
8216 1 : let tline = tenant
8217 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
8218 1 : .await?;
8219 1 :
8220 1 : let cancel = CancellationToken::new();
8221 1 :
8222 1 : let mut base_key = Key::from_hex("000000000033333333444444445500000000").unwrap();
8223 1 : base_key.field1 = AUX_KEY_PREFIX;
8224 1 : let test_key = base_key;
8225 1 : let mut lsn = Lsn(0x10);
8226 1 :
8227 21 : for _ in 0..20 {
8228 20 : lsn = Lsn(lsn.0 + 0x10);
8229 20 : let mut writer = tline.writer().await;
8230 20 : writer
8231 20 : .put(
8232 20 : test_key,
8233 20 : lsn,
8234 20 : &Value::Image(test_img(&format!("{} at {}", 0, lsn))),
8235 20 : &ctx,
8236 20 : )
8237 20 : .await?;
8238 20 : writer.finish_write(lsn);
8239 20 : drop(writer);
8240 20 : tline.freeze_and_flush().await?; // force create a delta layer
8241 1 : }
8242 1 :
8243 1 : let before_num_l0_delta_files = tline
8244 1 : .layers
8245 1 : .read(LayerManagerLockHolder::Testing)
8246 1 : .await
8247 1 : .layer_map()?
8248 1 : .level0_deltas()
8249 1 : .len();
8250 1 :
8251 1 : tline.compact(&cancel, EnumSet::default(), &ctx).await?;
8252 1 :
8253 1 : let after_num_l0_delta_files = tline
8254 1 : .layers
8255 1 : .read(LayerManagerLockHolder::Testing)
8256 1 : .await
8257 1 : .layer_map()?
8258 1 : .level0_deltas()
8259 1 : .len();
8260 1 :
8261 1 : assert!(
8262 1 : after_num_l0_delta_files < before_num_l0_delta_files,
8263 1 : "after_num_l0_delta_files={after_num_l0_delta_files}, before_num_l0_delta_files={before_num_l0_delta_files}"
8264 1 : );
8265 1 :
8266 1 : assert_eq!(
8267 1 : tline.get(test_key, lsn, &ctx).await?,
8268 1 : test_img(&format!("{} at {}", 0, lsn))
8269 1 : );
8270 1 :
8271 1 : Ok(())
8272 1 : }
8273 :
8274 : #[tokio::test]
8275 1 : async fn test_aux_file_e2e() {
8276 1 : let harness = TenantHarness::create("test_aux_file_e2e").await.unwrap();
8277 1 :
8278 1 : let (tenant, ctx) = harness.load().await;
8279 1 : let io_concurrency = IoConcurrency::spawn_for_test();
8280 1 :
8281 1 : let mut lsn = Lsn(0x08);
8282 1 :
8283 1 : let tline: Arc<Timeline> = tenant
8284 1 : .create_test_timeline(TIMELINE_ID, lsn, DEFAULT_PG_VERSION, &ctx)
8285 1 : .await
8286 1 : .unwrap();
8287 1 :
8288 1 : {
8289 1 : lsn += 8;
8290 1 : let mut modification = tline.begin_modification(lsn);
8291 1 : modification
8292 1 : .put_file("pg_logical/mappings/test1", b"first", &ctx)
8293 1 : .await
8294 1 : .unwrap();
8295 1 : modification.commit(&ctx).await.unwrap();
8296 1 : }
8297 1 :
8298 1 : // we can read everything from the storage
8299 1 : let files = tline
8300 1 : .list_aux_files(lsn, &ctx, io_concurrency.clone())
8301 1 : .await
8302 1 : .unwrap();
8303 1 : assert_eq!(
8304 1 : files.get("pg_logical/mappings/test1"),
8305 1 : Some(&bytes::Bytes::from_static(b"first"))
8306 1 : );
8307 1 :
8308 1 : {
8309 1 : lsn += 8;
8310 1 : let mut modification = tline.begin_modification(lsn);
8311 1 : modification
8312 1 : .put_file("pg_logical/mappings/test2", b"second", &ctx)
8313 1 : .await
8314 1 : .unwrap();
8315 1 : modification.commit(&ctx).await.unwrap();
8316 1 : }
8317 1 :
8318 1 : let files = tline
8319 1 : .list_aux_files(lsn, &ctx, io_concurrency.clone())
8320 1 : .await
8321 1 : .unwrap();
8322 1 : assert_eq!(
8323 1 : files.get("pg_logical/mappings/test2"),
8324 1 : Some(&bytes::Bytes::from_static(b"second"))
8325 1 : );
8326 1 :
8327 1 : let child = tenant
8328 1 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(lsn), &ctx)
8329 1 : .await
8330 1 : .unwrap();
8331 1 :
8332 1 : let files = child
8333 1 : .list_aux_files(lsn, &ctx, io_concurrency.clone())
8334 1 : .await
8335 1 : .unwrap();
8336 1 : assert_eq!(files.get("pg_logical/mappings/test1"), None);
8337 1 : assert_eq!(files.get("pg_logical/mappings/test2"), None);
8338 1 : }
8339 :
8340 : #[tokio::test]
8341 1 : async fn test_repl_origin_tombstones() {
8342 1 : let harness = TenantHarness::create("test_repl_origin_tombstones")
8343 1 : .await
8344 1 : .unwrap();
8345 1 :
8346 1 : let (tenant, ctx) = harness.load().await;
8347 1 : let io_concurrency = IoConcurrency::spawn_for_test();
8348 1 :
8349 1 : let mut lsn = Lsn(0x08);
8350 1 :
8351 1 : let tline: Arc<Timeline> = tenant
8352 1 : .create_test_timeline(TIMELINE_ID, lsn, DEFAULT_PG_VERSION, &ctx)
8353 1 : .await
8354 1 : .unwrap();
8355 1 :
8356 1 : let repl_lsn = Lsn(0x10);
8357 1 : {
8358 1 : lsn += 8;
8359 1 : let mut modification = tline.begin_modification(lsn);
8360 1 : modification.put_for_unit_test(repl_origin_key(2), Value::Image(Bytes::new()));
8361 1 : modification.set_replorigin(1, repl_lsn).await.unwrap();
8362 1 : modification.commit(&ctx).await.unwrap();
8363 1 : }
8364 1 :
8365 1 : // we can read everything from the storage
8366 1 : let repl_origins = tline
8367 1 : .get_replorigins(lsn, &ctx, io_concurrency.clone())
8368 1 : .await
8369 1 : .unwrap();
8370 1 : assert_eq!(repl_origins.len(), 1);
8371 1 : assert_eq!(repl_origins[&1], lsn);
8372 1 :
8373 1 : {
8374 1 : lsn += 8;
8375 1 : let mut modification = tline.begin_modification(lsn);
8376 1 : modification.put_for_unit_test(
8377 1 : repl_origin_key(3),
8378 1 : Value::Image(Bytes::copy_from_slice(b"cannot_decode_this")),
8379 1 : );
8380 1 : modification.commit(&ctx).await.unwrap();
8381 1 : }
8382 1 : let result = tline
8383 1 : .get_replorigins(lsn, &ctx, io_concurrency.clone())
8384 1 : .await;
8385 1 : assert!(result.is_err());
8386 1 : }
8387 :
8388 : #[tokio::test]
8389 1 : async fn test_metadata_image_creation() -> anyhow::Result<()> {
8390 1 : let harness = TenantHarness::create("test_metadata_image_creation").await?;
8391 1 : let (tenant, ctx) = harness.load().await;
8392 1 : let io_concurrency = IoConcurrency::spawn_for_test();
8393 1 : let tline = tenant
8394 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
8395 1 : .await?;
8396 1 :
8397 1 : const NUM_KEYS: usize = 1000;
8398 1 : const STEP: usize = 10000; // random update + scan base_key + idx * STEP
8399 1 :
8400 1 : let cancel = CancellationToken::new();
8401 1 :
8402 1 : let base_key = Key::from_hex("620000000033333333444444445500000000").unwrap();
8403 1 : assert_eq!(base_key.field1, AUX_KEY_PREFIX); // in case someone accidentally changed the prefix...
8404 1 : let mut test_key = base_key;
8405 1 : let mut lsn = Lsn(0x10);
8406 1 :
8407 4 : async fn scan_with_statistics(
8408 4 : tline: &Timeline,
8409 4 : keyspace: &KeySpace,
8410 4 : lsn: Lsn,
8411 4 : ctx: &RequestContext,
8412 4 : io_concurrency: IoConcurrency,
8413 4 : ) -> anyhow::Result<(BTreeMap<Key, Result<Bytes, PageReconstructError>>, usize)> {
8414 4 : let mut reconstruct_state = ValuesReconstructState::new(io_concurrency);
8415 4 : let query = VersionedKeySpaceQuery::uniform(keyspace.clone(), lsn);
8416 4 : let res = tline
8417 4 : .get_vectored_impl(query, &mut reconstruct_state, ctx)
8418 4 : .await?;
8419 4 : Ok((res, reconstruct_state.get_delta_layers_visited() as usize))
8420 4 : }
8421 1 :
8422 1001 : for blknum in 0..NUM_KEYS {
8423 1000 : lsn = Lsn(lsn.0 + 0x10);
8424 1000 : test_key.field6 = (blknum * STEP) as u32;
8425 1000 : let mut writer = tline.writer().await;
8426 1000 : writer
8427 1000 : .put(
8428 1000 : test_key,
8429 1000 : lsn,
8430 1000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
8431 1000 : &ctx,
8432 1000 : )
8433 1000 : .await?;
8434 1000 : writer.finish_write(lsn);
8435 1000 : drop(writer);
8436 1 : }
8437 1 :
8438 1 : let keyspace = KeySpace::single(base_key..base_key.add((NUM_KEYS * STEP) as u32));
8439 1 :
8440 11 : for iter in 1..=10 {
8441 10010 : for _ in 0..NUM_KEYS {
8442 10000 : lsn = Lsn(lsn.0 + 0x10);
8443 10000 : let blknum = thread_rng().gen_range(0..NUM_KEYS);
8444 10000 : test_key.field6 = (blknum * STEP) as u32;
8445 10000 : let mut writer = tline.writer().await;
8446 10000 : writer
8447 10000 : .put(
8448 10000 : test_key,
8449 10000 : lsn,
8450 10000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
8451 10000 : &ctx,
8452 10000 : )
8453 10000 : .await?;
8454 10000 : writer.finish_write(lsn);
8455 10000 : drop(writer);
8456 1 : }
8457 1 :
8458 10 : tline.freeze_and_flush().await?;
8459 1 : // Force layers to L1
8460 10 : tline
8461 10 : .compact(
8462 10 : &cancel,
8463 10 : {
8464 10 : let mut flags = EnumSet::new();
8465 10 : flags.insert(CompactFlags::ForceL0Compaction);
8466 10 : flags
8467 10 : },
8468 10 : &ctx,
8469 10 : )
8470 10 : .await?;
8471 1 :
8472 10 : if iter % 5 == 0 {
8473 2 : let scan_lsn = Lsn(lsn.0 + 1);
8474 2 : info!("scanning at {}", scan_lsn);
8475 2 : let (_, before_delta_file_accessed) =
8476 2 : scan_with_statistics(&tline, &keyspace, scan_lsn, &ctx, io_concurrency.clone())
8477 2 : .await?;
8478 2 : tline
8479 2 : .compact(
8480 2 : &cancel,
8481 2 : {
8482 2 : let mut flags = EnumSet::new();
8483 2 : flags.insert(CompactFlags::ForceImageLayerCreation);
8484 2 : flags.insert(CompactFlags::ForceRepartition);
8485 2 : flags.insert(CompactFlags::ForceL0Compaction);
8486 2 : flags
8487 2 : },
8488 2 : &ctx,
8489 2 : )
8490 2 : .await?;
8491 2 : let (_, after_delta_file_accessed) =
8492 2 : scan_with_statistics(&tline, &keyspace, scan_lsn, &ctx, io_concurrency.clone())
8493 2 : .await?;
8494 2 : assert!(
8495 2 : after_delta_file_accessed < before_delta_file_accessed,
8496 1 : "after_delta_file_accessed={after_delta_file_accessed}, before_delta_file_accessed={before_delta_file_accessed}"
8497 1 : );
8498 1 : // Given that we already produced an image layer, there should be no delta layer needed for the scan, but still setting a low threshold there for unforeseen circumstances.
8499 2 : assert!(
8500 2 : after_delta_file_accessed <= 2,
8501 1 : "after_delta_file_accessed={after_delta_file_accessed}"
8502 1 : );
8503 8 : }
8504 1 : }
8505 1 :
8506 1 : Ok(())
8507 1 : }
8508 :
8509 : #[tokio::test]
8510 1 : async fn test_vectored_missing_data_key_reads() -> anyhow::Result<()> {
8511 1 : let harness = TenantHarness::create("test_vectored_missing_data_key_reads").await?;
8512 1 : let (tenant, ctx) = harness.load().await;
8513 1 :
8514 1 : let base_key = Key::from_hex("000000000033333333444444445500000000").unwrap();
8515 1 : let base_key_child = Key::from_hex("000000000033333333444444445500000001").unwrap();
8516 1 : let base_key_nonexist = Key::from_hex("000000000033333333444444445500000002").unwrap();
8517 1 :
8518 1 : let tline = tenant
8519 1 : .create_test_timeline_with_layers(
8520 1 : TIMELINE_ID,
8521 1 : Lsn(0x10),
8522 1 : DEFAULT_PG_VERSION,
8523 1 : &ctx,
8524 1 : Vec::new(), // in-memory layers
8525 1 : Vec::new(), // delta layers
8526 1 : vec![(Lsn(0x20), vec![(base_key, test_img("data key 1"))])], // image layers
8527 1 : Lsn(0x20), // it's fine to not advance LSN to 0x30 while using 0x30 to get below because `get_vectored_impl` does not wait for LSN
8528 1 : )
8529 1 : .await?;
8530 1 : tline.add_extra_test_dense_keyspace(KeySpace::single(base_key..(base_key_nonexist.next())));
8531 1 :
8532 1 : let child = tenant
8533 1 : .branch_timeline_test_with_layers(
8534 1 : &tline,
8535 1 : NEW_TIMELINE_ID,
8536 1 : Some(Lsn(0x20)),
8537 1 : &ctx,
8538 1 : Vec::new(), // delta layers
8539 1 : vec![(Lsn(0x30), vec![(base_key_child, test_img("data key 2"))])], // image layers
8540 1 : Lsn(0x30),
8541 1 : )
8542 1 : .await
8543 1 : .unwrap();
8544 1 :
8545 1 : let lsn = Lsn(0x30);
8546 1 :
8547 1 : // test vectored get on parent timeline
8548 1 : assert_eq!(
8549 1 : get_vectored_impl_wrapper(&tline, base_key, lsn, &ctx).await?,
8550 1 : Some(test_img("data key 1"))
8551 1 : );
8552 1 : assert!(
8553 1 : get_vectored_impl_wrapper(&tline, base_key_child, lsn, &ctx)
8554 1 : .await
8555 1 : .unwrap_err()
8556 1 : .is_missing_key_error()
8557 1 : );
8558 1 : assert!(
8559 1 : get_vectored_impl_wrapper(&tline, base_key_nonexist, lsn, &ctx)
8560 1 : .await
8561 1 : .unwrap_err()
8562 1 : .is_missing_key_error()
8563 1 : );
8564 1 :
8565 1 : // test vectored get on child timeline
8566 1 : assert_eq!(
8567 1 : get_vectored_impl_wrapper(&child, base_key, lsn, &ctx).await?,
8568 1 : Some(test_img("data key 1"))
8569 1 : );
8570 1 : assert_eq!(
8571 1 : get_vectored_impl_wrapper(&child, base_key_child, lsn, &ctx).await?,
8572 1 : Some(test_img("data key 2"))
8573 1 : );
8574 1 : assert!(
8575 1 : get_vectored_impl_wrapper(&child, base_key_nonexist, lsn, &ctx)
8576 1 : .await
8577 1 : .unwrap_err()
8578 1 : .is_missing_key_error()
8579 1 : );
8580 1 :
8581 1 : Ok(())
8582 1 : }
8583 :
8584 : #[tokio::test]
8585 1 : async fn test_vectored_missing_metadata_key_reads() -> anyhow::Result<()> {
8586 1 : let harness = TenantHarness::create("test_vectored_missing_metadata_key_reads").await?;
8587 1 : let (tenant, ctx) = harness.load().await;
8588 1 : let io_concurrency = IoConcurrency::spawn_for_test();
8589 1 :
8590 1 : let base_key = Key::from_hex("620000000033333333444444445500000000").unwrap();
8591 1 : let base_key_child = Key::from_hex("620000000033333333444444445500000001").unwrap();
8592 1 : let base_key_nonexist = Key::from_hex("620000000033333333444444445500000002").unwrap();
8593 1 : let base_key_overwrite = Key::from_hex("620000000033333333444444445500000003").unwrap();
8594 1 :
8595 1 : let base_inherited_key = Key::from_hex("610000000033333333444444445500000000").unwrap();
8596 1 : let base_inherited_key_child =
8597 1 : Key::from_hex("610000000033333333444444445500000001").unwrap();
8598 1 : let base_inherited_key_nonexist =
8599 1 : Key::from_hex("610000000033333333444444445500000002").unwrap();
8600 1 : let base_inherited_key_overwrite =
8601 1 : Key::from_hex("610000000033333333444444445500000003").unwrap();
8602 1 :
8603 1 : assert_eq!(base_key.field1, AUX_KEY_PREFIX); // in case someone accidentally changed the prefix...
8604 1 : assert_eq!(base_inherited_key.field1, RELATION_SIZE_PREFIX);
8605 1 :
8606 1 : let tline = tenant
8607 1 : .create_test_timeline_with_layers(
8608 1 : TIMELINE_ID,
8609 1 : Lsn(0x10),
8610 1 : DEFAULT_PG_VERSION,
8611 1 : &ctx,
8612 1 : Vec::new(), // in-memory layers
8613 1 : Vec::new(), // delta layers
8614 1 : vec![(
8615 1 : Lsn(0x20),
8616 1 : vec![
8617 1 : (base_inherited_key, test_img("metadata inherited key 1")),
8618 1 : (
8619 1 : base_inherited_key_overwrite,
8620 1 : test_img("metadata key overwrite 1a"),
8621 1 : ),
8622 1 : (base_key, test_img("metadata key 1")),
8623 1 : (base_key_overwrite, test_img("metadata key overwrite 1b")),
8624 1 : ],
8625 1 : )], // image layers
8626 1 : Lsn(0x20), // it's fine to not advance LSN to 0x30 while using 0x30 to get below because `get_vectored_impl` does not wait for LSN
8627 1 : )
8628 1 : .await?;
8629 1 :
8630 1 : let child = tenant
8631 1 : .branch_timeline_test_with_layers(
8632 1 : &tline,
8633 1 : NEW_TIMELINE_ID,
8634 1 : Some(Lsn(0x20)),
8635 1 : &ctx,
8636 1 : Vec::new(), // delta layers
8637 1 : vec![(
8638 1 : Lsn(0x30),
8639 1 : vec![
8640 1 : (
8641 1 : base_inherited_key_child,
8642 1 : test_img("metadata inherited key 2"),
8643 1 : ),
8644 1 : (
8645 1 : base_inherited_key_overwrite,
8646 1 : test_img("metadata key overwrite 2a"),
8647 1 : ),
8648 1 : (base_key_child, test_img("metadata key 2")),
8649 1 : (base_key_overwrite, test_img("metadata key overwrite 2b")),
8650 1 : ],
8651 1 : )], // image layers
8652 1 : Lsn(0x30),
8653 1 : )
8654 1 : .await
8655 1 : .unwrap();
8656 1 :
8657 1 : let lsn = Lsn(0x30);
8658 1 :
8659 1 : // test vectored get on parent timeline
8660 1 : assert_eq!(
8661 1 : get_vectored_impl_wrapper(&tline, base_key, lsn, &ctx).await?,
8662 1 : Some(test_img("metadata key 1"))
8663 1 : );
8664 1 : assert_eq!(
8665 1 : get_vectored_impl_wrapper(&tline, base_key_child, lsn, &ctx).await?,
8666 1 : None
8667 1 : );
8668 1 : assert_eq!(
8669 1 : get_vectored_impl_wrapper(&tline, base_key_nonexist, lsn, &ctx).await?,
8670 1 : None
8671 1 : );
8672 1 : assert_eq!(
8673 1 : get_vectored_impl_wrapper(&tline, base_key_overwrite, lsn, &ctx).await?,
8674 1 : Some(test_img("metadata key overwrite 1b"))
8675 1 : );
8676 1 : assert_eq!(
8677 1 : get_vectored_impl_wrapper(&tline, base_inherited_key, lsn, &ctx).await?,
8678 1 : Some(test_img("metadata inherited key 1"))
8679 1 : );
8680 1 : assert_eq!(
8681 1 : get_vectored_impl_wrapper(&tline, base_inherited_key_child, lsn, &ctx).await?,
8682 1 : None
8683 1 : );
8684 1 : assert_eq!(
8685 1 : get_vectored_impl_wrapper(&tline, base_inherited_key_nonexist, lsn, &ctx).await?,
8686 1 : None
8687 1 : );
8688 1 : assert_eq!(
8689 1 : get_vectored_impl_wrapper(&tline, base_inherited_key_overwrite, lsn, &ctx).await?,
8690 1 : Some(test_img("metadata key overwrite 1a"))
8691 1 : );
8692 1 :
8693 1 : // test vectored get on child timeline
8694 1 : assert_eq!(
8695 1 : get_vectored_impl_wrapper(&child, base_key, lsn, &ctx).await?,
8696 1 : None
8697 1 : );
8698 1 : assert_eq!(
8699 1 : get_vectored_impl_wrapper(&child, base_key_child, lsn, &ctx).await?,
8700 1 : Some(test_img("metadata key 2"))
8701 1 : );
8702 1 : assert_eq!(
8703 1 : get_vectored_impl_wrapper(&child, base_key_nonexist, lsn, &ctx).await?,
8704 1 : None
8705 1 : );
8706 1 : assert_eq!(
8707 1 : get_vectored_impl_wrapper(&child, base_inherited_key, lsn, &ctx).await?,
8708 1 : Some(test_img("metadata inherited key 1"))
8709 1 : );
8710 1 : assert_eq!(
8711 1 : get_vectored_impl_wrapper(&child, base_inherited_key_child, lsn, &ctx).await?,
8712 1 : Some(test_img("metadata inherited key 2"))
8713 1 : );
8714 1 : assert_eq!(
8715 1 : get_vectored_impl_wrapper(&child, base_inherited_key_nonexist, lsn, &ctx).await?,
8716 1 : None
8717 1 : );
8718 1 : assert_eq!(
8719 1 : get_vectored_impl_wrapper(&child, base_key_overwrite, lsn, &ctx).await?,
8720 1 : Some(test_img("metadata key overwrite 2b"))
8721 1 : );
8722 1 : assert_eq!(
8723 1 : get_vectored_impl_wrapper(&child, base_inherited_key_overwrite, lsn, &ctx).await?,
8724 1 : Some(test_img("metadata key overwrite 2a"))
8725 1 : );
8726 1 :
8727 1 : // test vectored scan on parent timeline
8728 1 : let mut reconstruct_state = ValuesReconstructState::new(io_concurrency.clone());
8729 1 : let query =
8730 1 : VersionedKeySpaceQuery::uniform(KeySpace::single(Key::metadata_key_range()), lsn);
8731 1 : let res = tline
8732 1 : .get_vectored_impl(query, &mut reconstruct_state, &ctx)
8733 1 : .await?;
8734 1 :
8735 1 : assert_eq!(
8736 1 : res.into_iter()
8737 4 : .map(|(k, v)| (k, v.unwrap()))
8738 1 : .collect::<Vec<_>>(),
8739 1 : vec![
8740 1 : (base_inherited_key, test_img("metadata inherited key 1")),
8741 1 : (
8742 1 : base_inherited_key_overwrite,
8743 1 : test_img("metadata key overwrite 1a")
8744 1 : ),
8745 1 : (base_key, test_img("metadata key 1")),
8746 1 : (base_key_overwrite, test_img("metadata key overwrite 1b")),
8747 1 : ]
8748 1 : );
8749 1 :
8750 1 : // test vectored scan on child timeline
8751 1 : let mut reconstruct_state = ValuesReconstructState::new(io_concurrency.clone());
8752 1 : let query =
8753 1 : VersionedKeySpaceQuery::uniform(KeySpace::single(Key::metadata_key_range()), lsn);
8754 1 : let res = child
8755 1 : .get_vectored_impl(query, &mut reconstruct_state, &ctx)
8756 1 : .await?;
8757 1 :
8758 1 : assert_eq!(
8759 1 : res.into_iter()
8760 5 : .map(|(k, v)| (k, v.unwrap()))
8761 1 : .collect::<Vec<_>>(),
8762 1 : vec![
8763 1 : (base_inherited_key, test_img("metadata inherited key 1")),
8764 1 : (
8765 1 : base_inherited_key_child,
8766 1 : test_img("metadata inherited key 2")
8767 1 : ),
8768 1 : (
8769 1 : base_inherited_key_overwrite,
8770 1 : test_img("metadata key overwrite 2a")
8771 1 : ),
8772 1 : (base_key_child, test_img("metadata key 2")),
8773 1 : (base_key_overwrite, test_img("metadata key overwrite 2b")),
8774 1 : ]
8775 1 : );
8776 1 :
8777 1 : Ok(())
8778 1 : }
8779 :
8780 28 : async fn get_vectored_impl_wrapper(
8781 28 : tline: &Arc<Timeline>,
8782 28 : key: Key,
8783 28 : lsn: Lsn,
8784 28 : ctx: &RequestContext,
8785 28 : ) -> Result<Option<Bytes>, GetVectoredError> {
8786 28 : let io_concurrency = IoConcurrency::spawn_from_conf(
8787 28 : tline.conf.get_vectored_concurrent_io,
8788 28 : tline.gate.enter().unwrap(),
8789 28 : );
8790 28 : let mut reconstruct_state = ValuesReconstructState::new(io_concurrency);
8791 28 : let query = VersionedKeySpaceQuery::uniform(KeySpace::single(key..key.next()), lsn);
8792 28 : let mut res = tline
8793 28 : .get_vectored_impl(query, &mut reconstruct_state, ctx)
8794 28 : .await?;
8795 25 : Ok(res.pop_last().map(|(k, v)| {
8796 16 : assert_eq!(k, key);
8797 16 : v.unwrap()
8798 25 : }))
8799 28 : }
8800 :
8801 : #[tokio::test]
8802 1 : async fn test_metadata_tombstone_reads() -> anyhow::Result<()> {
8803 1 : let harness = TenantHarness::create("test_metadata_tombstone_reads").await?;
8804 1 : let (tenant, ctx) = harness.load().await;
8805 1 : let key0 = Key::from_hex("620000000033333333444444445500000000").unwrap();
8806 1 : let key1 = Key::from_hex("620000000033333333444444445500000001").unwrap();
8807 1 : let key2 = Key::from_hex("620000000033333333444444445500000002").unwrap();
8808 1 : let key3 = Key::from_hex("620000000033333333444444445500000003").unwrap();
8809 1 :
8810 1 : // We emulate the situation that the compaction algorithm creates an image layer that removes the tombstones
8811 1 : // Lsn 0x30 key0, key3, no key1+key2
8812 1 : // Lsn 0x20 key1+key2 tomestones
8813 1 : // Lsn 0x10 key1 in image, key2 in delta
8814 1 : let tline = tenant
8815 1 : .create_test_timeline_with_layers(
8816 1 : TIMELINE_ID,
8817 1 : Lsn(0x10),
8818 1 : DEFAULT_PG_VERSION,
8819 1 : &ctx,
8820 1 : Vec::new(), // in-memory layers
8821 1 : // delta layers
8822 1 : vec![
8823 1 : DeltaLayerTestDesc::new_with_inferred_key_range(
8824 1 : Lsn(0x10)..Lsn(0x20),
8825 1 : vec![(key2, Lsn(0x10), Value::Image(test_img("metadata key 2")))],
8826 1 : ),
8827 1 : DeltaLayerTestDesc::new_with_inferred_key_range(
8828 1 : Lsn(0x20)..Lsn(0x30),
8829 1 : vec![(key1, Lsn(0x20), Value::Image(Bytes::new()))],
8830 1 : ),
8831 1 : DeltaLayerTestDesc::new_with_inferred_key_range(
8832 1 : Lsn(0x20)..Lsn(0x30),
8833 1 : vec![(key2, Lsn(0x20), Value::Image(Bytes::new()))],
8834 1 : ),
8835 1 : ],
8836 1 : // image layers
8837 1 : vec![
8838 1 : (Lsn(0x10), vec![(key1, test_img("metadata key 1"))]),
8839 1 : (
8840 1 : Lsn(0x30),
8841 1 : vec![
8842 1 : (key0, test_img("metadata key 0")),
8843 1 : (key3, test_img("metadata key 3")),
8844 1 : ],
8845 1 : ),
8846 1 : ],
8847 1 : Lsn(0x30),
8848 1 : )
8849 1 : .await?;
8850 1 :
8851 1 : let lsn = Lsn(0x30);
8852 1 : let old_lsn = Lsn(0x20);
8853 1 :
8854 1 : assert_eq!(
8855 1 : get_vectored_impl_wrapper(&tline, key0, lsn, &ctx).await?,
8856 1 : Some(test_img("metadata key 0"))
8857 1 : );
8858 1 : assert_eq!(
8859 1 : get_vectored_impl_wrapper(&tline, key1, lsn, &ctx).await?,
8860 1 : None,
8861 1 : );
8862 1 : assert_eq!(
8863 1 : get_vectored_impl_wrapper(&tline, key2, lsn, &ctx).await?,
8864 1 : None,
8865 1 : );
8866 1 : assert_eq!(
8867 1 : get_vectored_impl_wrapper(&tline, key1, old_lsn, &ctx).await?,
8868 1 : Some(Bytes::new()),
8869 1 : );
8870 1 : assert_eq!(
8871 1 : get_vectored_impl_wrapper(&tline, key2, old_lsn, &ctx).await?,
8872 1 : Some(Bytes::new()),
8873 1 : );
8874 1 : assert_eq!(
8875 1 : get_vectored_impl_wrapper(&tline, key3, lsn, &ctx).await?,
8876 1 : Some(test_img("metadata key 3"))
8877 1 : );
8878 1 :
8879 1 : Ok(())
8880 1 : }
8881 :
8882 : #[tokio::test]
8883 1 : async fn test_metadata_tombstone_image_creation() {
8884 1 : let harness = TenantHarness::create("test_metadata_tombstone_image_creation")
8885 1 : .await
8886 1 : .unwrap();
8887 1 : let (tenant, ctx) = harness.load().await;
8888 1 : let io_concurrency = IoConcurrency::spawn_for_test();
8889 1 :
8890 1 : let key0 = Key::from_hex("620000000033333333444444445500000000").unwrap();
8891 1 : let key1 = Key::from_hex("620000000033333333444444445500000001").unwrap();
8892 1 : let key2 = Key::from_hex("620000000033333333444444445500000002").unwrap();
8893 1 : let key3 = Key::from_hex("620000000033333333444444445500000003").unwrap();
8894 1 :
8895 1 : let tline = tenant
8896 1 : .create_test_timeline_with_layers(
8897 1 : TIMELINE_ID,
8898 1 : Lsn(0x10),
8899 1 : DEFAULT_PG_VERSION,
8900 1 : &ctx,
8901 1 : Vec::new(), // in-memory layers
8902 1 : // delta layers
8903 1 : vec![
8904 1 : DeltaLayerTestDesc::new_with_inferred_key_range(
8905 1 : Lsn(0x10)..Lsn(0x20),
8906 1 : vec![(key2, Lsn(0x10), Value::Image(test_img("metadata key 2")))],
8907 1 : ),
8908 1 : DeltaLayerTestDesc::new_with_inferred_key_range(
8909 1 : Lsn(0x20)..Lsn(0x30),
8910 1 : vec![(key1, Lsn(0x20), Value::Image(Bytes::new()))],
8911 1 : ),
8912 1 : DeltaLayerTestDesc::new_with_inferred_key_range(
8913 1 : Lsn(0x20)..Lsn(0x30),
8914 1 : vec![(key2, Lsn(0x20), Value::Image(Bytes::new()))],
8915 1 : ),
8916 1 : DeltaLayerTestDesc::new_with_inferred_key_range(
8917 1 : Lsn(0x30)..Lsn(0x40),
8918 1 : vec![
8919 1 : (key0, Lsn(0x30), Value::Image(test_img("metadata key 0"))),
8920 1 : (key3, Lsn(0x30), Value::Image(test_img("metadata key 3"))),
8921 1 : ],
8922 1 : ),
8923 1 : ],
8924 1 : // image layers
8925 1 : vec![(Lsn(0x10), vec![(key1, test_img("metadata key 1"))])],
8926 1 : Lsn(0x40),
8927 1 : )
8928 1 : .await
8929 1 : .unwrap();
8930 1 :
8931 1 : let cancel = CancellationToken::new();
8932 1 :
8933 1 : // Image layer creation happens on the disk_consistent_lsn so we need to force set it now.
8934 1 : tline.force_set_disk_consistent_lsn(Lsn(0x40));
8935 1 : tline
8936 1 : .compact(
8937 1 : &cancel,
8938 1 : {
8939 1 : let mut flags = EnumSet::new();
8940 1 : flags.insert(CompactFlags::ForceImageLayerCreation);
8941 1 : flags.insert(CompactFlags::ForceRepartition);
8942 1 : flags
8943 1 : },
8944 1 : &ctx,
8945 1 : )
8946 1 : .await
8947 1 : .unwrap();
8948 1 : // Image layers are created at repartition LSN
8949 1 : let images = tline
8950 1 : .inspect_image_layers(Lsn(0x40), &ctx, io_concurrency.clone())
8951 1 : .await
8952 1 : .unwrap()
8953 1 : .into_iter()
8954 9 : .filter(|(k, _)| k.is_metadata_key())
8955 1 : .collect::<Vec<_>>();
8956 1 : assert_eq!(images.len(), 2); // the image layer should only contain two existing keys, tombstones should be removed.
8957 1 : }
8958 :
8959 : #[tokio::test]
8960 1 : async fn test_metadata_tombstone_empty_image_creation() {
8961 1 : let harness = TenantHarness::create("test_metadata_tombstone_empty_image_creation")
8962 1 : .await
8963 1 : .unwrap();
8964 1 : let (tenant, ctx) = harness.load().await;
8965 1 : let io_concurrency = IoConcurrency::spawn_for_test();
8966 1 :
8967 1 : let key1 = Key::from_hex("620000000033333333444444445500000001").unwrap();
8968 1 : let key2 = Key::from_hex("620000000033333333444444445500000002").unwrap();
8969 1 :
8970 1 : let tline = tenant
8971 1 : .create_test_timeline_with_layers(
8972 1 : TIMELINE_ID,
8973 1 : Lsn(0x10),
8974 1 : DEFAULT_PG_VERSION,
8975 1 : &ctx,
8976 1 : Vec::new(), // in-memory layers
8977 1 : // delta layers
8978 1 : vec![
8979 1 : DeltaLayerTestDesc::new_with_inferred_key_range(
8980 1 : Lsn(0x10)..Lsn(0x20),
8981 1 : vec![(key2, Lsn(0x10), Value::Image(test_img("metadata key 2")))],
8982 1 : ),
8983 1 : DeltaLayerTestDesc::new_with_inferred_key_range(
8984 1 : Lsn(0x20)..Lsn(0x30),
8985 1 : vec![(key1, Lsn(0x20), Value::Image(Bytes::new()))],
8986 1 : ),
8987 1 : DeltaLayerTestDesc::new_with_inferred_key_range(
8988 1 : Lsn(0x20)..Lsn(0x30),
8989 1 : vec![(key2, Lsn(0x20), Value::Image(Bytes::new()))],
8990 1 : ),
8991 1 : ],
8992 1 : // image layers
8993 1 : vec![(Lsn(0x10), vec![(key1, test_img("metadata key 1"))])],
8994 1 : Lsn(0x30),
8995 1 : )
8996 1 : .await
8997 1 : .unwrap();
8998 1 :
8999 1 : let cancel = CancellationToken::new();
9000 1 :
9001 1 : tline
9002 1 : .compact(
9003 1 : &cancel,
9004 1 : {
9005 1 : let mut flags = EnumSet::new();
9006 1 : flags.insert(CompactFlags::ForceImageLayerCreation);
9007 1 : flags.insert(CompactFlags::ForceRepartition);
9008 1 : flags
9009 1 : },
9010 1 : &ctx,
9011 1 : )
9012 1 : .await
9013 1 : .unwrap();
9014 1 :
9015 1 : // Image layers are created at last_record_lsn
9016 1 : let images = tline
9017 1 : .inspect_image_layers(Lsn(0x30), &ctx, io_concurrency.clone())
9018 1 : .await
9019 1 : .unwrap()
9020 1 : .into_iter()
9021 7 : .filter(|(k, _)| k.is_metadata_key())
9022 1 : .collect::<Vec<_>>();
9023 1 : assert_eq!(images.len(), 0); // the image layer should not contain tombstones, or it is not created
9024 1 : }
9025 :
9026 : #[tokio::test]
9027 1 : async fn test_simple_bottom_most_compaction_images() -> anyhow::Result<()> {
9028 1 : let harness = TenantHarness::create("test_simple_bottom_most_compaction_images").await?;
9029 1 : let (tenant, ctx) = harness.load().await;
9030 1 : let io_concurrency = IoConcurrency::spawn_for_test();
9031 1 :
9032 51 : fn get_key(id: u32) -> Key {
9033 51 : // using aux key here b/c they are guaranteed to be inside `collect_keyspace`.
9034 51 : let mut key = Key::from_hex("620000000033333333444444445500000000").unwrap();
9035 51 : key.field6 = id;
9036 51 : key
9037 51 : }
9038 1 :
9039 1 : // We create
9040 1 : // - one bottom-most image layer,
9041 1 : // - a delta layer D1 crossing the GC horizon with data below and above the horizon,
9042 1 : // - a delta layer D2 crossing the GC horizon with data only below the horizon,
9043 1 : // - a delta layer D3 above the horizon.
9044 1 : //
9045 1 : // | D3 |
9046 1 : // | D1 |
9047 1 : // -| |-- gc horizon -----------------
9048 1 : // | | | D2 |
9049 1 : // --------- img layer ------------------
9050 1 : //
9051 1 : // What we should expact from this compaction is:
9052 1 : // | D3 |
9053 1 : // | Part of D1 |
9054 1 : // --------- img layer with D1+D2 at GC horizon------------------
9055 1 :
9056 1 : // img layer at 0x10
9057 1 : let img_layer = (0..10)
9058 10 : .map(|id| (get_key(id), Bytes::from(format!("value {id}@0x10"))))
9059 1 : .collect_vec();
9060 1 :
9061 1 : let delta1 = vec![
9062 1 : (
9063 1 : get_key(1),
9064 1 : Lsn(0x20),
9065 1 : Value::Image(Bytes::from("value 1@0x20")),
9066 1 : ),
9067 1 : (
9068 1 : get_key(2),
9069 1 : Lsn(0x30),
9070 1 : Value::Image(Bytes::from("value 2@0x30")),
9071 1 : ),
9072 1 : (
9073 1 : get_key(3),
9074 1 : Lsn(0x40),
9075 1 : Value::Image(Bytes::from("value 3@0x40")),
9076 1 : ),
9077 1 : ];
9078 1 : let delta2 = vec![
9079 1 : (
9080 1 : get_key(5),
9081 1 : Lsn(0x20),
9082 1 : Value::Image(Bytes::from("value 5@0x20")),
9083 1 : ),
9084 1 : (
9085 1 : get_key(6),
9086 1 : Lsn(0x20),
9087 1 : Value::Image(Bytes::from("value 6@0x20")),
9088 1 : ),
9089 1 : ];
9090 1 : let delta3 = vec![
9091 1 : (
9092 1 : get_key(8),
9093 1 : Lsn(0x48),
9094 1 : Value::Image(Bytes::from("value 8@0x48")),
9095 1 : ),
9096 1 : (
9097 1 : get_key(9),
9098 1 : Lsn(0x48),
9099 1 : Value::Image(Bytes::from("value 9@0x48")),
9100 1 : ),
9101 1 : ];
9102 1 :
9103 1 : let tline = tenant
9104 1 : .create_test_timeline_with_layers(
9105 1 : TIMELINE_ID,
9106 1 : Lsn(0x10),
9107 1 : DEFAULT_PG_VERSION,
9108 1 : &ctx,
9109 1 : Vec::new(), // in-memory layers
9110 1 : vec![
9111 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x20)..Lsn(0x48), delta1),
9112 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x20)..Lsn(0x48), delta2),
9113 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x48)..Lsn(0x50), delta3),
9114 1 : ], // delta layers
9115 1 : vec![(Lsn(0x10), img_layer)], // image layers
9116 1 : Lsn(0x50),
9117 1 : )
9118 1 : .await?;
9119 1 : {
9120 1 : tline
9121 1 : .applied_gc_cutoff_lsn
9122 1 : .lock_for_write()
9123 1 : .store_and_unlock(Lsn(0x30))
9124 1 : .wait()
9125 1 : .await;
9126 1 : // Update GC info
9127 1 : let mut guard = tline.gc_info.write().unwrap();
9128 1 : guard.cutoffs.time = Some(Lsn(0x30));
9129 1 : guard.cutoffs.space = Lsn(0x30);
9130 1 : }
9131 1 :
9132 1 : let expected_result = [
9133 1 : Bytes::from_static(b"value 0@0x10"),
9134 1 : Bytes::from_static(b"value 1@0x20"),
9135 1 : Bytes::from_static(b"value 2@0x30"),
9136 1 : Bytes::from_static(b"value 3@0x40"),
9137 1 : Bytes::from_static(b"value 4@0x10"),
9138 1 : Bytes::from_static(b"value 5@0x20"),
9139 1 : Bytes::from_static(b"value 6@0x20"),
9140 1 : Bytes::from_static(b"value 7@0x10"),
9141 1 : Bytes::from_static(b"value 8@0x48"),
9142 1 : Bytes::from_static(b"value 9@0x48"),
9143 1 : ];
9144 1 :
9145 10 : for (idx, expected) in expected_result.iter().enumerate() {
9146 10 : assert_eq!(
9147 10 : tline
9148 10 : .get(get_key(idx as u32), Lsn(0x50), &ctx)
9149 10 : .await
9150 10 : .unwrap(),
9151 1 : expected
9152 1 : );
9153 1 : }
9154 1 :
9155 1 : let cancel = CancellationToken::new();
9156 1 : tline
9157 1 : .compact_with_gc(&cancel, CompactOptions::default(), &ctx)
9158 1 : .await
9159 1 : .unwrap();
9160 1 :
9161 10 : for (idx, expected) in expected_result.iter().enumerate() {
9162 10 : assert_eq!(
9163 10 : tline
9164 10 : .get(get_key(idx as u32), Lsn(0x50), &ctx)
9165 10 : .await
9166 10 : .unwrap(),
9167 1 : expected
9168 1 : );
9169 1 : }
9170 1 :
9171 1 : // Check if the image layer at the GC horizon contains exactly what we want
9172 1 : let image_at_gc_horizon = tline
9173 1 : .inspect_image_layers(Lsn(0x30), &ctx, io_concurrency.clone())
9174 1 : .await
9175 1 : .unwrap()
9176 1 : .into_iter()
9177 17 : .filter(|(k, _)| k.is_metadata_key())
9178 1 : .collect::<Vec<_>>();
9179 1 :
9180 1 : assert_eq!(image_at_gc_horizon.len(), 10);
9181 1 : let expected_result = [
9182 1 : Bytes::from_static(b"value 0@0x10"),
9183 1 : Bytes::from_static(b"value 1@0x20"),
9184 1 : Bytes::from_static(b"value 2@0x30"),
9185 1 : Bytes::from_static(b"value 3@0x10"),
9186 1 : Bytes::from_static(b"value 4@0x10"),
9187 1 : Bytes::from_static(b"value 5@0x20"),
9188 1 : Bytes::from_static(b"value 6@0x20"),
9189 1 : Bytes::from_static(b"value 7@0x10"),
9190 1 : Bytes::from_static(b"value 8@0x10"),
9191 1 : Bytes::from_static(b"value 9@0x10"),
9192 1 : ];
9193 11 : for idx in 0..10 {
9194 10 : assert_eq!(
9195 10 : image_at_gc_horizon[idx],
9196 10 : (get_key(idx as u32), expected_result[idx].clone())
9197 10 : );
9198 1 : }
9199 1 :
9200 1 : // Check if old layers are removed / new layers have the expected LSN
9201 1 : let all_layers = inspect_and_sort(&tline, None).await;
9202 1 : assert_eq!(
9203 1 : all_layers,
9204 1 : vec![
9205 1 : // Image layer at GC horizon
9206 1 : PersistentLayerKey {
9207 1 : key_range: Key::MIN..Key::MAX,
9208 1 : lsn_range: Lsn(0x30)..Lsn(0x31),
9209 1 : is_delta: false
9210 1 : },
9211 1 : // The delta layer below the horizon
9212 1 : PersistentLayerKey {
9213 1 : key_range: get_key(3)..get_key(4),
9214 1 : lsn_range: Lsn(0x30)..Lsn(0x48),
9215 1 : is_delta: true
9216 1 : },
9217 1 : // The delta3 layer that should not be picked for the compaction
9218 1 : PersistentLayerKey {
9219 1 : key_range: get_key(8)..get_key(10),
9220 1 : lsn_range: Lsn(0x48)..Lsn(0x50),
9221 1 : is_delta: true
9222 1 : }
9223 1 : ]
9224 1 : );
9225 1 :
9226 1 : // increase GC horizon and compact again
9227 1 : {
9228 1 : tline
9229 1 : .applied_gc_cutoff_lsn
9230 1 : .lock_for_write()
9231 1 : .store_and_unlock(Lsn(0x40))
9232 1 : .wait()
9233 1 : .await;
9234 1 : // Update GC info
9235 1 : let mut guard = tline.gc_info.write().unwrap();
9236 1 : guard.cutoffs.time = Some(Lsn(0x40));
9237 1 : guard.cutoffs.space = Lsn(0x40);
9238 1 : }
9239 1 : tline
9240 1 : .compact_with_gc(&cancel, CompactOptions::default(), &ctx)
9241 1 : .await
9242 1 : .unwrap();
9243 1 :
9244 1 : Ok(())
9245 1 : }
9246 :
9247 : #[cfg(feature = "testing")]
9248 : #[tokio::test]
9249 1 : async fn test_neon_test_record() -> anyhow::Result<()> {
9250 1 : let harness = TenantHarness::create("test_neon_test_record").await?;
9251 1 : let (tenant, ctx) = harness.load().await;
9252 1 :
9253 17 : fn get_key(id: u32) -> Key {
9254 17 : // using aux key here b/c they are guaranteed to be inside `collect_keyspace`.
9255 17 : let mut key = Key::from_hex("620000000033333333444444445500000000").unwrap();
9256 17 : key.field6 = id;
9257 17 : key
9258 17 : }
9259 1 :
9260 1 : let delta1 = vec![
9261 1 : (
9262 1 : get_key(1),
9263 1 : Lsn(0x20),
9264 1 : Value::WalRecord(NeonWalRecord::wal_append(",0x20")),
9265 1 : ),
9266 1 : (
9267 1 : get_key(1),
9268 1 : Lsn(0x30),
9269 1 : Value::WalRecord(NeonWalRecord::wal_append(",0x30")),
9270 1 : ),
9271 1 : (get_key(2), Lsn(0x10), Value::Image("0x10".into())),
9272 1 : (
9273 1 : get_key(2),
9274 1 : Lsn(0x20),
9275 1 : Value::WalRecord(NeonWalRecord::wal_append(",0x20")),
9276 1 : ),
9277 1 : (
9278 1 : get_key(2),
9279 1 : Lsn(0x30),
9280 1 : Value::WalRecord(NeonWalRecord::wal_append(",0x30")),
9281 1 : ),
9282 1 : (get_key(3), Lsn(0x10), Value::Image("0x10".into())),
9283 1 : (
9284 1 : get_key(3),
9285 1 : Lsn(0x20),
9286 1 : Value::WalRecord(NeonWalRecord::wal_clear("c")),
9287 1 : ),
9288 1 : (get_key(4), Lsn(0x10), Value::Image("0x10".into())),
9289 1 : (
9290 1 : get_key(4),
9291 1 : Lsn(0x20),
9292 1 : Value::WalRecord(NeonWalRecord::wal_init("i")),
9293 1 : ),
9294 1 : (
9295 1 : get_key(4),
9296 1 : Lsn(0x30),
9297 1 : Value::WalRecord(NeonWalRecord::wal_append_conditional("j", "i")),
9298 1 : ),
9299 1 : (
9300 1 : get_key(5),
9301 1 : Lsn(0x20),
9302 1 : Value::WalRecord(NeonWalRecord::wal_init("1")),
9303 1 : ),
9304 1 : (
9305 1 : get_key(5),
9306 1 : Lsn(0x30),
9307 1 : Value::WalRecord(NeonWalRecord::wal_append_conditional("j", "2")),
9308 1 : ),
9309 1 : ];
9310 1 : let image1 = vec![(get_key(1), "0x10".into())];
9311 1 :
9312 1 : let tline = tenant
9313 1 : .create_test_timeline_with_layers(
9314 1 : TIMELINE_ID,
9315 1 : Lsn(0x10),
9316 1 : DEFAULT_PG_VERSION,
9317 1 : &ctx,
9318 1 : Vec::new(), // in-memory layers
9319 1 : vec![DeltaLayerTestDesc::new_with_inferred_key_range(
9320 1 : Lsn(0x10)..Lsn(0x40),
9321 1 : delta1,
9322 1 : )], // delta layers
9323 1 : vec![(Lsn(0x10), image1)], // image layers
9324 1 : Lsn(0x50),
9325 1 : )
9326 1 : .await?;
9327 1 :
9328 1 : assert_eq!(
9329 1 : tline.get(get_key(1), Lsn(0x50), &ctx).await?,
9330 1 : Bytes::from_static(b"0x10,0x20,0x30")
9331 1 : );
9332 1 : assert_eq!(
9333 1 : tline.get(get_key(2), Lsn(0x50), &ctx).await?,
9334 1 : Bytes::from_static(b"0x10,0x20,0x30")
9335 1 : );
9336 1 :
9337 1 : // Need to remove the limit of "Neon WAL redo requires base image".
9338 1 :
9339 1 : assert_eq!(
9340 1 : tline.get(get_key(3), Lsn(0x50), &ctx).await?,
9341 1 : Bytes::from_static(b"c")
9342 1 : );
9343 1 : assert_eq!(
9344 1 : tline.get(get_key(4), Lsn(0x50), &ctx).await?,
9345 1 : Bytes::from_static(b"ij")
9346 1 : );
9347 1 :
9348 1 : // Manual testing required: currently, read errors will panic the process in debug mode. So we
9349 1 : // cannot enable this assertion in the unit test.
9350 1 : // assert!(tline.get(get_key(5), Lsn(0x50), &ctx).await.is_err());
9351 1 :
9352 1 : Ok(())
9353 1 : }
9354 :
9355 : #[tokio::test(start_paused = true)]
9356 1 : async fn test_lsn_lease() -> anyhow::Result<()> {
9357 1 : let (tenant, ctx) = TenantHarness::create("test_lsn_lease")
9358 1 : .await
9359 1 : .unwrap()
9360 1 : .load()
9361 1 : .await;
9362 1 : // Advance to the lsn lease deadline so that GC is not blocked by
9363 1 : // initial transition into AttachedSingle.
9364 1 : tokio::time::advance(tenant.get_lsn_lease_length()).await;
9365 1 : tokio::time::resume();
9366 1 : let key = Key::from_hex("010000000033333333444444445500000000").unwrap();
9367 1 :
9368 1 : let end_lsn = Lsn(0x100);
9369 1 : let image_layers = (0x20..=0x90)
9370 1 : .step_by(0x10)
9371 8 : .map(|n| {
9372 8 : (
9373 8 : Lsn(n),
9374 8 : vec![(key, test_img(&format!("data key at {:x}", n)))],
9375 8 : )
9376 8 : })
9377 1 : .collect();
9378 1 :
9379 1 : let timeline = tenant
9380 1 : .create_test_timeline_with_layers(
9381 1 : TIMELINE_ID,
9382 1 : Lsn(0x10),
9383 1 : DEFAULT_PG_VERSION,
9384 1 : &ctx,
9385 1 : Vec::new(), // in-memory layers
9386 1 : Vec::new(),
9387 1 : image_layers,
9388 1 : end_lsn,
9389 1 : )
9390 1 : .await?;
9391 1 :
9392 1 : let leased_lsns = [0x30, 0x50, 0x70];
9393 1 : let mut leases = Vec::new();
9394 3 : leased_lsns.iter().for_each(|n| {
9395 3 : leases.push(
9396 3 : timeline
9397 3 : .init_lsn_lease(Lsn(*n), timeline.get_lsn_lease_length(), &ctx)
9398 3 : .expect("lease request should succeed"),
9399 3 : );
9400 3 : });
9401 1 :
9402 1 : let updated_lease_0 = timeline
9403 1 : .renew_lsn_lease(Lsn(leased_lsns[0]), Duration::from_secs(0), &ctx)
9404 1 : .expect("lease renewal should succeed");
9405 1 : assert_eq!(
9406 1 : updated_lease_0.valid_until, leases[0].valid_until,
9407 1 : " Renewing with shorter lease should not change the lease."
9408 1 : );
9409 1 :
9410 1 : let updated_lease_1 = timeline
9411 1 : .renew_lsn_lease(
9412 1 : Lsn(leased_lsns[1]),
9413 1 : timeline.get_lsn_lease_length() * 2,
9414 1 : &ctx,
9415 1 : )
9416 1 : .expect("lease renewal should succeed");
9417 1 : assert!(
9418 1 : updated_lease_1.valid_until > leases[1].valid_until,
9419 1 : "Renewing with a long lease should renew lease with later expiration time."
9420 1 : );
9421 1 :
9422 1 : // Force set disk consistent lsn so we can get the cutoff at `end_lsn`.
9423 1 : info!(
9424 1 : "applied_gc_cutoff_lsn: {}",
9425 0 : *timeline.get_applied_gc_cutoff_lsn()
9426 1 : );
9427 1 : timeline.force_set_disk_consistent_lsn(end_lsn);
9428 1 :
9429 1 : let res = tenant
9430 1 : .gc_iteration(
9431 1 : Some(TIMELINE_ID),
9432 1 : 0,
9433 1 : Duration::ZERO,
9434 1 : &CancellationToken::new(),
9435 1 : &ctx,
9436 1 : )
9437 1 : .await
9438 1 : .unwrap();
9439 1 :
9440 1 : // Keeping everything <= Lsn(0x80) b/c leases:
9441 1 : // 0/10: initdb layer
9442 1 : // (0/20..=0/70).step_by(0x10): image layers added when creating the timeline.
9443 1 : assert_eq!(res.layers_needed_by_leases, 7);
9444 1 : // Keeping 0/90 b/c it is the latest layer.
9445 1 : assert_eq!(res.layers_not_updated, 1);
9446 1 : // Removed 0/80.
9447 1 : assert_eq!(res.layers_removed, 1);
9448 1 :
9449 1 : // Make lease on a already GC-ed LSN.
9450 1 : // 0/80 does not have a valid lease + is below latest_gc_cutoff
9451 1 : assert!(Lsn(0x80) < *timeline.get_applied_gc_cutoff_lsn());
9452 1 : timeline
9453 1 : .init_lsn_lease(Lsn(0x80), timeline.get_lsn_lease_length(), &ctx)
9454 1 : .expect_err("lease request on GC-ed LSN should fail");
9455 1 :
9456 1 : // Should still be able to renew a currently valid lease
9457 1 : // Assumption: original lease to is still valid for 0/50.
9458 1 : // (use `Timeline::init_lsn_lease` for testing so it always does validation)
9459 1 : timeline
9460 1 : .init_lsn_lease(Lsn(leased_lsns[1]), timeline.get_lsn_lease_length(), &ctx)
9461 1 : .expect("lease renewal with validation should succeed");
9462 1 :
9463 1 : Ok(())
9464 1 : }
9465 :
9466 : #[tokio::test]
9467 1 : async fn test_failed_flush_should_not_update_disk_consistent_lsn() -> anyhow::Result<()> {
9468 1 : //
9469 1 : // Setup
9470 1 : //
9471 1 : let harness = TenantHarness::create_custom(
9472 1 : "test_failed_flush_should_not_upload_disk_consistent_lsn",
9473 1 : pageserver_api::models::TenantConfig::default(),
9474 1 : TenantId::generate(),
9475 1 : ShardIdentity::new(ShardNumber(0), ShardCount(4), ShardStripeSize(128)).unwrap(),
9476 1 : Generation::new(1),
9477 1 : )
9478 1 : .await?;
9479 1 : let (tenant, ctx) = harness.load().await;
9480 1 :
9481 1 : let timeline = tenant
9482 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
9483 1 : .await?;
9484 1 : assert_eq!(timeline.get_shard_identity().count, ShardCount(4));
9485 1 : let mut writer = timeline.writer().await;
9486 1 : writer
9487 1 : .put(
9488 1 : *TEST_KEY,
9489 1 : Lsn(0x20),
9490 1 : &Value::Image(test_img("foo at 0x20")),
9491 1 : &ctx,
9492 1 : )
9493 1 : .await?;
9494 1 : writer.finish_write(Lsn(0x20));
9495 1 : drop(writer);
9496 1 : timeline.freeze_and_flush().await.unwrap();
9497 1 :
9498 1 : timeline.remote_client.wait_completion().await.unwrap();
9499 1 : let disk_consistent_lsn = timeline.get_disk_consistent_lsn();
9500 1 : let remote_consistent_lsn = timeline.get_remote_consistent_lsn_projected();
9501 1 : assert_eq!(Some(disk_consistent_lsn), remote_consistent_lsn);
9502 1 :
9503 1 : //
9504 1 : // Test
9505 1 : //
9506 1 :
9507 1 : let mut writer = timeline.writer().await;
9508 1 : writer
9509 1 : .put(
9510 1 : *TEST_KEY,
9511 1 : Lsn(0x30),
9512 1 : &Value::Image(test_img("foo at 0x30")),
9513 1 : &ctx,
9514 1 : )
9515 1 : .await?;
9516 1 : writer.finish_write(Lsn(0x30));
9517 1 : drop(writer);
9518 1 :
9519 1 : fail::cfg(
9520 1 : "flush-layer-before-update-remote-consistent-lsn",
9521 1 : "return()",
9522 1 : )
9523 1 : .unwrap();
9524 1 :
9525 1 : let flush_res = timeline.freeze_and_flush().await;
9526 1 : // if flush failed, the disk/remote consistent LSN should not be updated
9527 1 : assert!(flush_res.is_err());
9528 1 : assert_eq!(disk_consistent_lsn, timeline.get_disk_consistent_lsn());
9529 1 : assert_eq!(
9530 1 : remote_consistent_lsn,
9531 1 : timeline.get_remote_consistent_lsn_projected()
9532 1 : );
9533 1 :
9534 1 : Ok(())
9535 1 : }
9536 :
9537 : #[cfg(feature = "testing")]
9538 : #[tokio::test]
9539 1 : async fn test_simple_bottom_most_compaction_deltas_1() -> anyhow::Result<()> {
9540 1 : test_simple_bottom_most_compaction_deltas_helper(
9541 1 : "test_simple_bottom_most_compaction_deltas_1",
9542 1 : false,
9543 1 : )
9544 1 : .await
9545 1 : }
9546 :
9547 : #[cfg(feature = "testing")]
9548 : #[tokio::test]
9549 1 : async fn test_simple_bottom_most_compaction_deltas_2() -> anyhow::Result<()> {
9550 1 : test_simple_bottom_most_compaction_deltas_helper(
9551 1 : "test_simple_bottom_most_compaction_deltas_2",
9552 1 : true,
9553 1 : )
9554 1 : .await
9555 1 : }
9556 :
9557 : #[cfg(feature = "testing")]
9558 2 : async fn test_simple_bottom_most_compaction_deltas_helper(
9559 2 : test_name: &'static str,
9560 2 : use_delta_bottom_layer: bool,
9561 2 : ) -> anyhow::Result<()> {
9562 2 : let harness = TenantHarness::create(test_name).await?;
9563 2 : let (tenant, ctx) = harness.load().await;
9564 :
9565 138 : fn get_key(id: u32) -> Key {
9566 138 : // using aux key here b/c they are guaranteed to be inside `collect_keyspace`.
9567 138 : let mut key = Key::from_hex("620000000033333333444444445500000000").unwrap();
9568 138 : key.field6 = id;
9569 138 : key
9570 138 : }
9571 :
9572 : // We create
9573 : // - one bottom-most image layer,
9574 : // - a delta layer D1 crossing the GC horizon with data below and above the horizon,
9575 : // - a delta layer D2 crossing the GC horizon with data only below the horizon,
9576 : // - a delta layer D3 above the horizon.
9577 : //
9578 : // | D3 |
9579 : // | D1 |
9580 : // -| |-- gc horizon -----------------
9581 : // | | | D2 |
9582 : // --------- img layer ------------------
9583 : //
9584 : // What we should expact from this compaction is:
9585 : // | D3 |
9586 : // | Part of D1 |
9587 : // --------- img layer with D1+D2 at GC horizon------------------
9588 :
9589 : // img layer at 0x10
9590 2 : let img_layer = (0..10)
9591 20 : .map(|id| (get_key(id), Bytes::from(format!("value {id}@0x10"))))
9592 2 : .collect_vec();
9593 2 : // or, delta layer at 0x10 if `use_delta_bottom_layer` is true
9594 2 : let delta4 = (0..10)
9595 20 : .map(|id| {
9596 20 : (
9597 20 : get_key(id),
9598 20 : Lsn(0x08),
9599 20 : Value::WalRecord(NeonWalRecord::wal_init(format!("value {id}@0x10"))),
9600 20 : )
9601 20 : })
9602 2 : .collect_vec();
9603 2 :
9604 2 : let delta1 = vec![
9605 2 : (
9606 2 : get_key(1),
9607 2 : Lsn(0x20),
9608 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x20")),
9609 2 : ),
9610 2 : (
9611 2 : get_key(2),
9612 2 : Lsn(0x30),
9613 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x30")),
9614 2 : ),
9615 2 : (
9616 2 : get_key(3),
9617 2 : Lsn(0x28),
9618 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x28")),
9619 2 : ),
9620 2 : (
9621 2 : get_key(3),
9622 2 : Lsn(0x30),
9623 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x30")),
9624 2 : ),
9625 2 : (
9626 2 : get_key(3),
9627 2 : Lsn(0x40),
9628 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x40")),
9629 2 : ),
9630 2 : ];
9631 2 : let delta2 = vec![
9632 2 : (
9633 2 : get_key(5),
9634 2 : Lsn(0x20),
9635 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x20")),
9636 2 : ),
9637 2 : (
9638 2 : get_key(6),
9639 2 : Lsn(0x20),
9640 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x20")),
9641 2 : ),
9642 2 : ];
9643 2 : let delta3 = vec![
9644 2 : (
9645 2 : get_key(8),
9646 2 : Lsn(0x48),
9647 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x48")),
9648 2 : ),
9649 2 : (
9650 2 : get_key(9),
9651 2 : Lsn(0x48),
9652 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x48")),
9653 2 : ),
9654 2 : ];
9655 :
9656 2 : let tline = if use_delta_bottom_layer {
9657 1 : tenant
9658 1 : .create_test_timeline_with_layers(
9659 1 : TIMELINE_ID,
9660 1 : Lsn(0x08),
9661 1 : DEFAULT_PG_VERSION,
9662 1 : &ctx,
9663 1 : Vec::new(), // in-memory layers
9664 1 : vec![
9665 1 : DeltaLayerTestDesc::new_with_inferred_key_range(
9666 1 : Lsn(0x08)..Lsn(0x10),
9667 1 : delta4,
9668 1 : ),
9669 1 : DeltaLayerTestDesc::new_with_inferred_key_range(
9670 1 : Lsn(0x20)..Lsn(0x48),
9671 1 : delta1,
9672 1 : ),
9673 1 : DeltaLayerTestDesc::new_with_inferred_key_range(
9674 1 : Lsn(0x20)..Lsn(0x48),
9675 1 : delta2,
9676 1 : ),
9677 1 : DeltaLayerTestDesc::new_with_inferred_key_range(
9678 1 : Lsn(0x48)..Lsn(0x50),
9679 1 : delta3,
9680 1 : ),
9681 1 : ], // delta layers
9682 1 : vec![], // image layers
9683 1 : Lsn(0x50),
9684 1 : )
9685 1 : .await?
9686 : } else {
9687 1 : tenant
9688 1 : .create_test_timeline_with_layers(
9689 1 : TIMELINE_ID,
9690 1 : Lsn(0x10),
9691 1 : DEFAULT_PG_VERSION,
9692 1 : &ctx,
9693 1 : Vec::new(), // in-memory layers
9694 1 : vec![
9695 1 : DeltaLayerTestDesc::new_with_inferred_key_range(
9696 1 : Lsn(0x10)..Lsn(0x48),
9697 1 : delta1,
9698 1 : ),
9699 1 : DeltaLayerTestDesc::new_with_inferred_key_range(
9700 1 : Lsn(0x10)..Lsn(0x48),
9701 1 : delta2,
9702 1 : ),
9703 1 : DeltaLayerTestDesc::new_with_inferred_key_range(
9704 1 : Lsn(0x48)..Lsn(0x50),
9705 1 : delta3,
9706 1 : ),
9707 1 : ], // delta layers
9708 1 : vec![(Lsn(0x10), img_layer)], // image layers
9709 1 : Lsn(0x50),
9710 1 : )
9711 1 : .await?
9712 : };
9713 : {
9714 2 : tline
9715 2 : .applied_gc_cutoff_lsn
9716 2 : .lock_for_write()
9717 2 : .store_and_unlock(Lsn(0x30))
9718 2 : .wait()
9719 2 : .await;
9720 : // Update GC info
9721 2 : let mut guard = tline.gc_info.write().unwrap();
9722 2 : *guard = GcInfo {
9723 2 : retain_lsns: vec![],
9724 2 : cutoffs: GcCutoffs {
9725 2 : time: Some(Lsn(0x30)),
9726 2 : space: Lsn(0x30),
9727 2 : },
9728 2 : leases: Default::default(),
9729 2 : within_ancestor_pitr: false,
9730 2 : };
9731 2 : }
9732 2 :
9733 2 : let expected_result = [
9734 2 : Bytes::from_static(b"value 0@0x10"),
9735 2 : Bytes::from_static(b"value 1@0x10@0x20"),
9736 2 : Bytes::from_static(b"value 2@0x10@0x30"),
9737 2 : Bytes::from_static(b"value 3@0x10@0x28@0x30@0x40"),
9738 2 : Bytes::from_static(b"value 4@0x10"),
9739 2 : Bytes::from_static(b"value 5@0x10@0x20"),
9740 2 : Bytes::from_static(b"value 6@0x10@0x20"),
9741 2 : Bytes::from_static(b"value 7@0x10"),
9742 2 : Bytes::from_static(b"value 8@0x10@0x48"),
9743 2 : Bytes::from_static(b"value 9@0x10@0x48"),
9744 2 : ];
9745 2 :
9746 2 : let expected_result_at_gc_horizon = [
9747 2 : Bytes::from_static(b"value 0@0x10"),
9748 2 : Bytes::from_static(b"value 1@0x10@0x20"),
9749 2 : Bytes::from_static(b"value 2@0x10@0x30"),
9750 2 : Bytes::from_static(b"value 3@0x10@0x28@0x30"),
9751 2 : Bytes::from_static(b"value 4@0x10"),
9752 2 : Bytes::from_static(b"value 5@0x10@0x20"),
9753 2 : Bytes::from_static(b"value 6@0x10@0x20"),
9754 2 : Bytes::from_static(b"value 7@0x10"),
9755 2 : Bytes::from_static(b"value 8@0x10"),
9756 2 : Bytes::from_static(b"value 9@0x10"),
9757 2 : ];
9758 :
9759 22 : for idx in 0..10 {
9760 20 : assert_eq!(
9761 20 : tline
9762 20 : .get(get_key(idx as u32), Lsn(0x50), &ctx)
9763 20 : .await
9764 20 : .unwrap(),
9765 20 : &expected_result[idx]
9766 : );
9767 20 : assert_eq!(
9768 20 : tline
9769 20 : .get(get_key(idx as u32), Lsn(0x30), &ctx)
9770 20 : .await
9771 20 : .unwrap(),
9772 20 : &expected_result_at_gc_horizon[idx]
9773 : );
9774 : }
9775 :
9776 2 : let cancel = CancellationToken::new();
9777 2 : tline
9778 2 : .compact_with_gc(&cancel, CompactOptions::default(), &ctx)
9779 2 : .await
9780 2 : .unwrap();
9781 :
9782 22 : for idx in 0..10 {
9783 20 : assert_eq!(
9784 20 : tline
9785 20 : .get(get_key(idx as u32), Lsn(0x50), &ctx)
9786 20 : .await
9787 20 : .unwrap(),
9788 20 : &expected_result[idx]
9789 : );
9790 20 : assert_eq!(
9791 20 : tline
9792 20 : .get(get_key(idx as u32), Lsn(0x30), &ctx)
9793 20 : .await
9794 20 : .unwrap(),
9795 20 : &expected_result_at_gc_horizon[idx]
9796 : );
9797 : }
9798 :
9799 : // increase GC horizon and compact again
9800 : {
9801 2 : tline
9802 2 : .applied_gc_cutoff_lsn
9803 2 : .lock_for_write()
9804 2 : .store_and_unlock(Lsn(0x40))
9805 2 : .wait()
9806 2 : .await;
9807 : // Update GC info
9808 2 : let mut guard = tline.gc_info.write().unwrap();
9809 2 : guard.cutoffs.time = Some(Lsn(0x40));
9810 2 : guard.cutoffs.space = Lsn(0x40);
9811 2 : }
9812 2 : tline
9813 2 : .compact_with_gc(&cancel, CompactOptions::default(), &ctx)
9814 2 : .await
9815 2 : .unwrap();
9816 2 :
9817 2 : Ok(())
9818 2 : }
9819 :
9820 : #[cfg(feature = "testing")]
9821 : #[tokio::test]
9822 1 : async fn test_generate_key_retention() -> anyhow::Result<()> {
9823 1 : let harness = TenantHarness::create("test_generate_key_retention").await?;
9824 1 : let (tenant, ctx) = harness.load().await;
9825 1 : let tline = tenant
9826 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
9827 1 : .await?;
9828 1 : tline.force_advance_lsn(Lsn(0x70));
9829 1 : let key = Key::from_hex("010000000033333333444444445500000000").unwrap();
9830 1 : let history = vec![
9831 1 : (
9832 1 : key,
9833 1 : Lsn(0x10),
9834 1 : Value::WalRecord(NeonWalRecord::wal_init("0x10")),
9835 1 : ),
9836 1 : (
9837 1 : key,
9838 1 : Lsn(0x20),
9839 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x20")),
9840 1 : ),
9841 1 : (
9842 1 : key,
9843 1 : Lsn(0x30),
9844 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x30")),
9845 1 : ),
9846 1 : (
9847 1 : key,
9848 1 : Lsn(0x40),
9849 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x40")),
9850 1 : ),
9851 1 : (
9852 1 : key,
9853 1 : Lsn(0x50),
9854 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x50")),
9855 1 : ),
9856 1 : (
9857 1 : key,
9858 1 : Lsn(0x60),
9859 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x60")),
9860 1 : ),
9861 1 : (
9862 1 : key,
9863 1 : Lsn(0x70),
9864 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x70")),
9865 1 : ),
9866 1 : (
9867 1 : key,
9868 1 : Lsn(0x80),
9869 1 : Value::Image(Bytes::copy_from_slice(
9870 1 : b"0x10;0x20;0x30;0x40;0x50;0x60;0x70;0x80",
9871 1 : )),
9872 1 : ),
9873 1 : (
9874 1 : key,
9875 1 : Lsn(0x90),
9876 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x90")),
9877 1 : ),
9878 1 : ];
9879 1 : let res = tline
9880 1 : .generate_key_retention(
9881 1 : key,
9882 1 : &history,
9883 1 : Lsn(0x60),
9884 1 : &[Lsn(0x20), Lsn(0x40), Lsn(0x50)],
9885 1 : 3,
9886 1 : None,
9887 1 : true,
9888 1 : )
9889 1 : .await
9890 1 : .unwrap();
9891 1 : let expected_res = KeyHistoryRetention {
9892 1 : below_horizon: vec![
9893 1 : (
9894 1 : Lsn(0x20),
9895 1 : KeyLogAtLsn(vec![(
9896 1 : Lsn(0x20),
9897 1 : Value::Image(Bytes::from_static(b"0x10;0x20")),
9898 1 : )]),
9899 1 : ),
9900 1 : (
9901 1 : Lsn(0x40),
9902 1 : KeyLogAtLsn(vec![
9903 1 : (
9904 1 : Lsn(0x30),
9905 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x30")),
9906 1 : ),
9907 1 : (
9908 1 : Lsn(0x40),
9909 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x40")),
9910 1 : ),
9911 1 : ]),
9912 1 : ),
9913 1 : (
9914 1 : Lsn(0x50),
9915 1 : KeyLogAtLsn(vec![(
9916 1 : Lsn(0x50),
9917 1 : Value::Image(Bytes::copy_from_slice(b"0x10;0x20;0x30;0x40;0x50")),
9918 1 : )]),
9919 1 : ),
9920 1 : (
9921 1 : Lsn(0x60),
9922 1 : KeyLogAtLsn(vec![(
9923 1 : Lsn(0x60),
9924 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x60")),
9925 1 : )]),
9926 1 : ),
9927 1 : ],
9928 1 : above_horizon: KeyLogAtLsn(vec![
9929 1 : (
9930 1 : Lsn(0x70),
9931 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x70")),
9932 1 : ),
9933 1 : (
9934 1 : Lsn(0x80),
9935 1 : Value::Image(Bytes::copy_from_slice(
9936 1 : b"0x10;0x20;0x30;0x40;0x50;0x60;0x70;0x80",
9937 1 : )),
9938 1 : ),
9939 1 : (
9940 1 : Lsn(0x90),
9941 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x90")),
9942 1 : ),
9943 1 : ]),
9944 1 : };
9945 1 : assert_eq!(res, expected_res);
9946 1 :
9947 1 : // We expect GC-compaction to run with the original GC. This would create a situation that
9948 1 : // the original GC algorithm removes some delta layers b/c there are full image coverage,
9949 1 : // therefore causing some keys to have an incomplete history below the lowest retain LSN.
9950 1 : // For example, we have
9951 1 : // ```plain
9952 1 : // init delta @ 0x10, image @ 0x20, delta @ 0x30 (gc_horizon), image @ 0x40.
9953 1 : // ```
9954 1 : // Now the GC horizon moves up, and we have
9955 1 : // ```plain
9956 1 : // init delta @ 0x10, image @ 0x20, delta @ 0x30, image @ 0x40 (gc_horizon)
9957 1 : // ```
9958 1 : // The original GC algorithm kicks in, and removes delta @ 0x10, image @ 0x20.
9959 1 : // We will end up with
9960 1 : // ```plain
9961 1 : // delta @ 0x30, image @ 0x40 (gc_horizon)
9962 1 : // ```
9963 1 : // Now we run the GC-compaction, and this key does not have a full history.
9964 1 : // We should be able to handle this partial history and drop everything before the
9965 1 : // gc_horizon image.
9966 1 :
9967 1 : let history = vec![
9968 1 : (
9969 1 : key,
9970 1 : Lsn(0x20),
9971 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x20")),
9972 1 : ),
9973 1 : (
9974 1 : key,
9975 1 : Lsn(0x30),
9976 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x30")),
9977 1 : ),
9978 1 : (
9979 1 : key,
9980 1 : Lsn(0x40),
9981 1 : Value::Image(Bytes::copy_from_slice(b"0x10;0x20;0x30;0x40")),
9982 1 : ),
9983 1 : (
9984 1 : key,
9985 1 : Lsn(0x50),
9986 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x50")),
9987 1 : ),
9988 1 : (
9989 1 : key,
9990 1 : Lsn(0x60),
9991 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x60")),
9992 1 : ),
9993 1 : (
9994 1 : key,
9995 1 : Lsn(0x70),
9996 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x70")),
9997 1 : ),
9998 1 : (
9999 1 : key,
10000 1 : Lsn(0x80),
10001 1 : Value::Image(Bytes::copy_from_slice(
10002 1 : b"0x10;0x20;0x30;0x40;0x50;0x60;0x70;0x80",
10003 1 : )),
10004 1 : ),
10005 1 : (
10006 1 : key,
10007 1 : Lsn(0x90),
10008 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x90")),
10009 1 : ),
10010 1 : ];
10011 1 : let res = tline
10012 1 : .generate_key_retention(
10013 1 : key,
10014 1 : &history,
10015 1 : Lsn(0x60),
10016 1 : &[Lsn(0x40), Lsn(0x50)],
10017 1 : 3,
10018 1 : None,
10019 1 : true,
10020 1 : )
10021 1 : .await
10022 1 : .unwrap();
10023 1 : let expected_res = KeyHistoryRetention {
10024 1 : below_horizon: vec![
10025 1 : (
10026 1 : Lsn(0x40),
10027 1 : KeyLogAtLsn(vec![(
10028 1 : Lsn(0x40),
10029 1 : Value::Image(Bytes::copy_from_slice(b"0x10;0x20;0x30;0x40")),
10030 1 : )]),
10031 1 : ),
10032 1 : (
10033 1 : Lsn(0x50),
10034 1 : KeyLogAtLsn(vec![(
10035 1 : Lsn(0x50),
10036 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x50")),
10037 1 : )]),
10038 1 : ),
10039 1 : (
10040 1 : Lsn(0x60),
10041 1 : KeyLogAtLsn(vec![(
10042 1 : Lsn(0x60),
10043 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x60")),
10044 1 : )]),
10045 1 : ),
10046 1 : ],
10047 1 : above_horizon: KeyLogAtLsn(vec![
10048 1 : (
10049 1 : Lsn(0x70),
10050 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x70")),
10051 1 : ),
10052 1 : (
10053 1 : Lsn(0x80),
10054 1 : Value::Image(Bytes::copy_from_slice(
10055 1 : b"0x10;0x20;0x30;0x40;0x50;0x60;0x70;0x80",
10056 1 : )),
10057 1 : ),
10058 1 : (
10059 1 : Lsn(0x90),
10060 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x90")),
10061 1 : ),
10062 1 : ]),
10063 1 : };
10064 1 : assert_eq!(res, expected_res);
10065 1 :
10066 1 : // In case of branch compaction, the branch itself does not have the full history, and we need to provide
10067 1 : // the ancestor image in the test case.
10068 1 :
10069 1 : let history = vec![
10070 1 : (
10071 1 : key,
10072 1 : Lsn(0x20),
10073 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x20")),
10074 1 : ),
10075 1 : (
10076 1 : key,
10077 1 : Lsn(0x30),
10078 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x30")),
10079 1 : ),
10080 1 : (
10081 1 : key,
10082 1 : Lsn(0x40),
10083 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x40")),
10084 1 : ),
10085 1 : (
10086 1 : key,
10087 1 : Lsn(0x70),
10088 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x70")),
10089 1 : ),
10090 1 : ];
10091 1 : let res = tline
10092 1 : .generate_key_retention(
10093 1 : key,
10094 1 : &history,
10095 1 : Lsn(0x60),
10096 1 : &[],
10097 1 : 3,
10098 1 : Some((key, Lsn(0x10), Bytes::copy_from_slice(b"0x10"))),
10099 1 : true,
10100 1 : )
10101 1 : .await
10102 1 : .unwrap();
10103 1 : let expected_res = KeyHistoryRetention {
10104 1 : below_horizon: vec![(
10105 1 : Lsn(0x60),
10106 1 : KeyLogAtLsn(vec![(
10107 1 : Lsn(0x60),
10108 1 : Value::Image(Bytes::copy_from_slice(b"0x10;0x20;0x30;0x40")), // use the ancestor image to reconstruct the page
10109 1 : )]),
10110 1 : )],
10111 1 : above_horizon: KeyLogAtLsn(vec![(
10112 1 : Lsn(0x70),
10113 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x70")),
10114 1 : )]),
10115 1 : };
10116 1 : assert_eq!(res, expected_res);
10117 1 :
10118 1 : let history = vec![
10119 1 : (
10120 1 : key,
10121 1 : Lsn(0x20),
10122 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x20")),
10123 1 : ),
10124 1 : (
10125 1 : key,
10126 1 : Lsn(0x40),
10127 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x40")),
10128 1 : ),
10129 1 : (
10130 1 : key,
10131 1 : Lsn(0x60),
10132 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x60")),
10133 1 : ),
10134 1 : (
10135 1 : key,
10136 1 : Lsn(0x70),
10137 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x70")),
10138 1 : ),
10139 1 : ];
10140 1 : let res = tline
10141 1 : .generate_key_retention(
10142 1 : key,
10143 1 : &history,
10144 1 : Lsn(0x60),
10145 1 : &[Lsn(0x30)],
10146 1 : 3,
10147 1 : Some((key, Lsn(0x10), Bytes::copy_from_slice(b"0x10"))),
10148 1 : true,
10149 1 : )
10150 1 : .await
10151 1 : .unwrap();
10152 1 : let expected_res = KeyHistoryRetention {
10153 1 : below_horizon: vec![
10154 1 : (
10155 1 : Lsn(0x30),
10156 1 : KeyLogAtLsn(vec![(
10157 1 : Lsn(0x20),
10158 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x20")),
10159 1 : )]),
10160 1 : ),
10161 1 : (
10162 1 : Lsn(0x60),
10163 1 : KeyLogAtLsn(vec![(
10164 1 : Lsn(0x60),
10165 1 : Value::Image(Bytes::copy_from_slice(b"0x10;0x20;0x40;0x60")),
10166 1 : )]),
10167 1 : ),
10168 1 : ],
10169 1 : above_horizon: KeyLogAtLsn(vec![(
10170 1 : Lsn(0x70),
10171 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x70")),
10172 1 : )]),
10173 1 : };
10174 1 : assert_eq!(res, expected_res);
10175 1 :
10176 1 : Ok(())
10177 1 : }
10178 :
10179 : #[cfg(feature = "testing")]
10180 : #[tokio::test]
10181 1 : async fn test_simple_bottom_most_compaction_with_retain_lsns() -> anyhow::Result<()> {
10182 1 : let harness =
10183 1 : TenantHarness::create("test_simple_bottom_most_compaction_with_retain_lsns").await?;
10184 1 : let (tenant, ctx) = harness.load().await;
10185 1 :
10186 259 : fn get_key(id: u32) -> Key {
10187 259 : // using aux key here b/c they are guaranteed to be inside `collect_keyspace`.
10188 259 : let mut key = Key::from_hex("620000000033333333444444445500000000").unwrap();
10189 259 : key.field6 = id;
10190 259 : key
10191 259 : }
10192 1 :
10193 1 : let img_layer = (0..10)
10194 10 : .map(|id| (get_key(id), Bytes::from(format!("value {id}@0x10"))))
10195 1 : .collect_vec();
10196 1 :
10197 1 : let delta1 = vec![
10198 1 : (
10199 1 : get_key(1),
10200 1 : Lsn(0x20),
10201 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x20")),
10202 1 : ),
10203 1 : (
10204 1 : get_key(2),
10205 1 : Lsn(0x30),
10206 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x30")),
10207 1 : ),
10208 1 : (
10209 1 : get_key(3),
10210 1 : Lsn(0x28),
10211 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x28")),
10212 1 : ),
10213 1 : (
10214 1 : get_key(3),
10215 1 : Lsn(0x30),
10216 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x30")),
10217 1 : ),
10218 1 : (
10219 1 : get_key(3),
10220 1 : Lsn(0x40),
10221 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x40")),
10222 1 : ),
10223 1 : ];
10224 1 : let delta2 = vec![
10225 1 : (
10226 1 : get_key(5),
10227 1 : Lsn(0x20),
10228 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x20")),
10229 1 : ),
10230 1 : (
10231 1 : get_key(6),
10232 1 : Lsn(0x20),
10233 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x20")),
10234 1 : ),
10235 1 : ];
10236 1 : let delta3 = vec![
10237 1 : (
10238 1 : get_key(8),
10239 1 : Lsn(0x48),
10240 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x48")),
10241 1 : ),
10242 1 : (
10243 1 : get_key(9),
10244 1 : Lsn(0x48),
10245 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x48")),
10246 1 : ),
10247 1 : ];
10248 1 :
10249 1 : let tline = tenant
10250 1 : .create_test_timeline_with_layers(
10251 1 : TIMELINE_ID,
10252 1 : Lsn(0x10),
10253 1 : DEFAULT_PG_VERSION,
10254 1 : &ctx,
10255 1 : Vec::new(), // in-memory layers
10256 1 : vec![
10257 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x10)..Lsn(0x48), delta1),
10258 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x10)..Lsn(0x48), delta2),
10259 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x48)..Lsn(0x50), delta3),
10260 1 : ], // delta layers
10261 1 : vec![(Lsn(0x10), img_layer)], // image layers
10262 1 : Lsn(0x50),
10263 1 : )
10264 1 : .await?;
10265 1 : {
10266 1 : tline
10267 1 : .applied_gc_cutoff_lsn
10268 1 : .lock_for_write()
10269 1 : .store_and_unlock(Lsn(0x30))
10270 1 : .wait()
10271 1 : .await;
10272 1 : // Update GC info
10273 1 : let mut guard = tline.gc_info.write().unwrap();
10274 1 : *guard = GcInfo {
10275 1 : retain_lsns: vec![
10276 1 : (Lsn(0x10), tline.timeline_id, MaybeOffloaded::No),
10277 1 : (Lsn(0x20), tline.timeline_id, MaybeOffloaded::No),
10278 1 : ],
10279 1 : cutoffs: GcCutoffs {
10280 1 : time: Some(Lsn(0x30)),
10281 1 : space: Lsn(0x30),
10282 1 : },
10283 1 : leases: Default::default(),
10284 1 : within_ancestor_pitr: false,
10285 1 : };
10286 1 : }
10287 1 :
10288 1 : let expected_result = [
10289 1 : Bytes::from_static(b"value 0@0x10"),
10290 1 : Bytes::from_static(b"value 1@0x10@0x20"),
10291 1 : Bytes::from_static(b"value 2@0x10@0x30"),
10292 1 : Bytes::from_static(b"value 3@0x10@0x28@0x30@0x40"),
10293 1 : Bytes::from_static(b"value 4@0x10"),
10294 1 : Bytes::from_static(b"value 5@0x10@0x20"),
10295 1 : Bytes::from_static(b"value 6@0x10@0x20"),
10296 1 : Bytes::from_static(b"value 7@0x10"),
10297 1 : Bytes::from_static(b"value 8@0x10@0x48"),
10298 1 : Bytes::from_static(b"value 9@0x10@0x48"),
10299 1 : ];
10300 1 :
10301 1 : let expected_result_at_gc_horizon = [
10302 1 : Bytes::from_static(b"value 0@0x10"),
10303 1 : Bytes::from_static(b"value 1@0x10@0x20"),
10304 1 : Bytes::from_static(b"value 2@0x10@0x30"),
10305 1 : Bytes::from_static(b"value 3@0x10@0x28@0x30"),
10306 1 : Bytes::from_static(b"value 4@0x10"),
10307 1 : Bytes::from_static(b"value 5@0x10@0x20"),
10308 1 : Bytes::from_static(b"value 6@0x10@0x20"),
10309 1 : Bytes::from_static(b"value 7@0x10"),
10310 1 : Bytes::from_static(b"value 8@0x10"),
10311 1 : Bytes::from_static(b"value 9@0x10"),
10312 1 : ];
10313 1 :
10314 1 : let expected_result_at_lsn_20 = [
10315 1 : Bytes::from_static(b"value 0@0x10"),
10316 1 : Bytes::from_static(b"value 1@0x10@0x20"),
10317 1 : Bytes::from_static(b"value 2@0x10"),
10318 1 : Bytes::from_static(b"value 3@0x10"),
10319 1 : Bytes::from_static(b"value 4@0x10"),
10320 1 : Bytes::from_static(b"value 5@0x10@0x20"),
10321 1 : Bytes::from_static(b"value 6@0x10@0x20"),
10322 1 : Bytes::from_static(b"value 7@0x10"),
10323 1 : Bytes::from_static(b"value 8@0x10"),
10324 1 : Bytes::from_static(b"value 9@0x10"),
10325 1 : ];
10326 1 :
10327 1 : let expected_result_at_lsn_10 = [
10328 1 : Bytes::from_static(b"value 0@0x10"),
10329 1 : Bytes::from_static(b"value 1@0x10"),
10330 1 : Bytes::from_static(b"value 2@0x10"),
10331 1 : Bytes::from_static(b"value 3@0x10"),
10332 1 : Bytes::from_static(b"value 4@0x10"),
10333 1 : Bytes::from_static(b"value 5@0x10"),
10334 1 : Bytes::from_static(b"value 6@0x10"),
10335 1 : Bytes::from_static(b"value 7@0x10"),
10336 1 : Bytes::from_static(b"value 8@0x10"),
10337 1 : Bytes::from_static(b"value 9@0x10"),
10338 1 : ];
10339 1 :
10340 6 : let verify_result = || async {
10341 6 : let gc_horizon = {
10342 6 : let gc_info = tline.gc_info.read().unwrap();
10343 6 : gc_info.cutoffs.time.unwrap_or_default()
10344 1 : };
10345 66 : for idx in 0..10 {
10346 60 : assert_eq!(
10347 60 : tline
10348 60 : .get(get_key(idx as u32), Lsn(0x50), &ctx)
10349 60 : .await
10350 60 : .unwrap(),
10351 60 : &expected_result[idx]
10352 1 : );
10353 60 : assert_eq!(
10354 60 : tline
10355 60 : .get(get_key(idx as u32), gc_horizon, &ctx)
10356 60 : .await
10357 60 : .unwrap(),
10358 60 : &expected_result_at_gc_horizon[idx]
10359 1 : );
10360 60 : assert_eq!(
10361 60 : tline
10362 60 : .get(get_key(idx as u32), Lsn(0x20), &ctx)
10363 60 : .await
10364 60 : .unwrap(),
10365 60 : &expected_result_at_lsn_20[idx]
10366 1 : );
10367 60 : assert_eq!(
10368 60 : tline
10369 60 : .get(get_key(idx as u32), Lsn(0x10), &ctx)
10370 60 : .await
10371 60 : .unwrap(),
10372 60 : &expected_result_at_lsn_10[idx]
10373 1 : );
10374 1 : }
10375 12 : };
10376 1 :
10377 1 : verify_result().await;
10378 1 :
10379 1 : let cancel = CancellationToken::new();
10380 1 : let mut dryrun_flags = EnumSet::new();
10381 1 : dryrun_flags.insert(CompactFlags::DryRun);
10382 1 :
10383 1 : tline
10384 1 : .compact_with_gc(
10385 1 : &cancel,
10386 1 : CompactOptions {
10387 1 : flags: dryrun_flags,
10388 1 : ..Default::default()
10389 1 : },
10390 1 : &ctx,
10391 1 : )
10392 1 : .await
10393 1 : .unwrap();
10394 1 : // We expect layer map to be the same b/c the dry run flag, but we don't know whether there will be other background jobs
10395 1 : // cleaning things up, and therefore, we don't do sanity checks on the layer map during unit tests.
10396 1 : verify_result().await;
10397 1 :
10398 1 : tline
10399 1 : .compact_with_gc(&cancel, CompactOptions::default(), &ctx)
10400 1 : .await
10401 1 : .unwrap();
10402 1 : verify_result().await;
10403 1 :
10404 1 : // compact again
10405 1 : tline
10406 1 : .compact_with_gc(&cancel, CompactOptions::default(), &ctx)
10407 1 : .await
10408 1 : .unwrap();
10409 1 : verify_result().await;
10410 1 :
10411 1 : // increase GC horizon and compact again
10412 1 : {
10413 1 : tline
10414 1 : .applied_gc_cutoff_lsn
10415 1 : .lock_for_write()
10416 1 : .store_and_unlock(Lsn(0x38))
10417 1 : .wait()
10418 1 : .await;
10419 1 : // Update GC info
10420 1 : let mut guard = tline.gc_info.write().unwrap();
10421 1 : guard.cutoffs.time = Some(Lsn(0x38));
10422 1 : guard.cutoffs.space = Lsn(0x38);
10423 1 : }
10424 1 : tline
10425 1 : .compact_with_gc(&cancel, CompactOptions::default(), &ctx)
10426 1 : .await
10427 1 : .unwrap();
10428 1 : verify_result().await; // no wals between 0x30 and 0x38, so we should obtain the same result
10429 1 :
10430 1 : // not increasing the GC horizon and compact again
10431 1 : tline
10432 1 : .compact_with_gc(&cancel, CompactOptions::default(), &ctx)
10433 1 : .await
10434 1 : .unwrap();
10435 1 : verify_result().await;
10436 1 :
10437 1 : Ok(())
10438 1 : }
10439 :
10440 : #[cfg(feature = "testing")]
10441 : #[tokio::test]
10442 1 : async fn test_simple_bottom_most_compaction_with_retain_lsns_single_key() -> anyhow::Result<()>
10443 1 : {
10444 1 : let harness =
10445 1 : TenantHarness::create("test_simple_bottom_most_compaction_with_retain_lsns_single_key")
10446 1 : .await?;
10447 1 : let (tenant, ctx) = harness.load().await;
10448 1 :
10449 176 : fn get_key(id: u32) -> Key {
10450 176 : // using aux key here b/c they are guaranteed to be inside `collect_keyspace`.
10451 176 : let mut key = Key::from_hex("620000000033333333444444445500000000").unwrap();
10452 176 : key.field6 = id;
10453 176 : key
10454 176 : }
10455 1 :
10456 1 : let img_layer = (0..10)
10457 10 : .map(|id| (get_key(id), Bytes::from(format!("value {id}@0x10"))))
10458 1 : .collect_vec();
10459 1 :
10460 1 : let delta1 = vec![
10461 1 : (
10462 1 : get_key(1),
10463 1 : Lsn(0x20),
10464 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x20")),
10465 1 : ),
10466 1 : (
10467 1 : get_key(1),
10468 1 : Lsn(0x28),
10469 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x28")),
10470 1 : ),
10471 1 : ];
10472 1 : let delta2 = vec![
10473 1 : (
10474 1 : get_key(1),
10475 1 : Lsn(0x30),
10476 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x30")),
10477 1 : ),
10478 1 : (
10479 1 : get_key(1),
10480 1 : Lsn(0x38),
10481 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x38")),
10482 1 : ),
10483 1 : ];
10484 1 : let delta3 = vec![
10485 1 : (
10486 1 : get_key(8),
10487 1 : Lsn(0x48),
10488 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x48")),
10489 1 : ),
10490 1 : (
10491 1 : get_key(9),
10492 1 : Lsn(0x48),
10493 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x48")),
10494 1 : ),
10495 1 : ];
10496 1 :
10497 1 : let tline = tenant
10498 1 : .create_test_timeline_with_layers(
10499 1 : TIMELINE_ID,
10500 1 : Lsn(0x10),
10501 1 : DEFAULT_PG_VERSION,
10502 1 : &ctx,
10503 1 : Vec::new(), // in-memory layers
10504 1 : vec![
10505 1 : // delta1 and delta 2 only contain a single key but multiple updates
10506 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x10)..Lsn(0x30), delta1),
10507 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x30)..Lsn(0x50), delta2),
10508 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x10)..Lsn(0x50), delta3),
10509 1 : ], // delta layers
10510 1 : vec![(Lsn(0x10), img_layer)], // image layers
10511 1 : Lsn(0x50),
10512 1 : )
10513 1 : .await?;
10514 1 : {
10515 1 : tline
10516 1 : .applied_gc_cutoff_lsn
10517 1 : .lock_for_write()
10518 1 : .store_and_unlock(Lsn(0x30))
10519 1 : .wait()
10520 1 : .await;
10521 1 : // Update GC info
10522 1 : let mut guard = tline.gc_info.write().unwrap();
10523 1 : *guard = GcInfo {
10524 1 : retain_lsns: vec![
10525 1 : (Lsn(0x10), tline.timeline_id, MaybeOffloaded::No),
10526 1 : (Lsn(0x20), tline.timeline_id, MaybeOffloaded::No),
10527 1 : ],
10528 1 : cutoffs: GcCutoffs {
10529 1 : time: Some(Lsn(0x30)),
10530 1 : space: Lsn(0x30),
10531 1 : },
10532 1 : leases: Default::default(),
10533 1 : within_ancestor_pitr: false,
10534 1 : };
10535 1 : }
10536 1 :
10537 1 : let expected_result = [
10538 1 : Bytes::from_static(b"value 0@0x10"),
10539 1 : Bytes::from_static(b"value 1@0x10@0x20@0x28@0x30@0x38"),
10540 1 : Bytes::from_static(b"value 2@0x10"),
10541 1 : Bytes::from_static(b"value 3@0x10"),
10542 1 : Bytes::from_static(b"value 4@0x10"),
10543 1 : Bytes::from_static(b"value 5@0x10"),
10544 1 : Bytes::from_static(b"value 6@0x10"),
10545 1 : Bytes::from_static(b"value 7@0x10"),
10546 1 : Bytes::from_static(b"value 8@0x10@0x48"),
10547 1 : Bytes::from_static(b"value 9@0x10@0x48"),
10548 1 : ];
10549 1 :
10550 1 : let expected_result_at_gc_horizon = [
10551 1 : Bytes::from_static(b"value 0@0x10"),
10552 1 : Bytes::from_static(b"value 1@0x10@0x20@0x28@0x30"),
10553 1 : Bytes::from_static(b"value 2@0x10"),
10554 1 : Bytes::from_static(b"value 3@0x10"),
10555 1 : Bytes::from_static(b"value 4@0x10"),
10556 1 : Bytes::from_static(b"value 5@0x10"),
10557 1 : Bytes::from_static(b"value 6@0x10"),
10558 1 : Bytes::from_static(b"value 7@0x10"),
10559 1 : Bytes::from_static(b"value 8@0x10"),
10560 1 : Bytes::from_static(b"value 9@0x10"),
10561 1 : ];
10562 1 :
10563 1 : let expected_result_at_lsn_20 = [
10564 1 : Bytes::from_static(b"value 0@0x10"),
10565 1 : Bytes::from_static(b"value 1@0x10@0x20"),
10566 1 : Bytes::from_static(b"value 2@0x10"),
10567 1 : Bytes::from_static(b"value 3@0x10"),
10568 1 : Bytes::from_static(b"value 4@0x10"),
10569 1 : Bytes::from_static(b"value 5@0x10"),
10570 1 : Bytes::from_static(b"value 6@0x10"),
10571 1 : Bytes::from_static(b"value 7@0x10"),
10572 1 : Bytes::from_static(b"value 8@0x10"),
10573 1 : Bytes::from_static(b"value 9@0x10"),
10574 1 : ];
10575 1 :
10576 1 : let expected_result_at_lsn_10 = [
10577 1 : Bytes::from_static(b"value 0@0x10"),
10578 1 : Bytes::from_static(b"value 1@0x10"),
10579 1 : Bytes::from_static(b"value 2@0x10"),
10580 1 : Bytes::from_static(b"value 3@0x10"),
10581 1 : Bytes::from_static(b"value 4@0x10"),
10582 1 : Bytes::from_static(b"value 5@0x10"),
10583 1 : Bytes::from_static(b"value 6@0x10"),
10584 1 : Bytes::from_static(b"value 7@0x10"),
10585 1 : Bytes::from_static(b"value 8@0x10"),
10586 1 : Bytes::from_static(b"value 9@0x10"),
10587 1 : ];
10588 1 :
10589 4 : let verify_result = || async {
10590 4 : let gc_horizon = {
10591 4 : let gc_info = tline.gc_info.read().unwrap();
10592 4 : gc_info.cutoffs.time.unwrap_or_default()
10593 1 : };
10594 44 : for idx in 0..10 {
10595 40 : assert_eq!(
10596 40 : tline
10597 40 : .get(get_key(idx as u32), Lsn(0x50), &ctx)
10598 40 : .await
10599 40 : .unwrap(),
10600 40 : &expected_result[idx]
10601 1 : );
10602 40 : assert_eq!(
10603 40 : tline
10604 40 : .get(get_key(idx as u32), gc_horizon, &ctx)
10605 40 : .await
10606 40 : .unwrap(),
10607 40 : &expected_result_at_gc_horizon[idx]
10608 1 : );
10609 40 : assert_eq!(
10610 40 : tline
10611 40 : .get(get_key(idx as u32), Lsn(0x20), &ctx)
10612 40 : .await
10613 40 : .unwrap(),
10614 40 : &expected_result_at_lsn_20[idx]
10615 1 : );
10616 40 : assert_eq!(
10617 40 : tline
10618 40 : .get(get_key(idx as u32), Lsn(0x10), &ctx)
10619 40 : .await
10620 40 : .unwrap(),
10621 40 : &expected_result_at_lsn_10[idx]
10622 1 : );
10623 1 : }
10624 8 : };
10625 1 :
10626 1 : verify_result().await;
10627 1 :
10628 1 : let cancel = CancellationToken::new();
10629 1 : let mut dryrun_flags = EnumSet::new();
10630 1 : dryrun_flags.insert(CompactFlags::DryRun);
10631 1 :
10632 1 : tline
10633 1 : .compact_with_gc(
10634 1 : &cancel,
10635 1 : CompactOptions {
10636 1 : flags: dryrun_flags,
10637 1 : ..Default::default()
10638 1 : },
10639 1 : &ctx,
10640 1 : )
10641 1 : .await
10642 1 : .unwrap();
10643 1 : // We expect layer map to be the same b/c the dry run flag, but we don't know whether there will be other background jobs
10644 1 : // cleaning things up, and therefore, we don't do sanity checks on the layer map during unit tests.
10645 1 : verify_result().await;
10646 1 :
10647 1 : tline
10648 1 : .compact_with_gc(&cancel, CompactOptions::default(), &ctx)
10649 1 : .await
10650 1 : .unwrap();
10651 1 : verify_result().await;
10652 1 :
10653 1 : // compact again
10654 1 : tline
10655 1 : .compact_with_gc(&cancel, CompactOptions::default(), &ctx)
10656 1 : .await
10657 1 : .unwrap();
10658 1 : verify_result().await;
10659 1 :
10660 1 : Ok(())
10661 1 : }
10662 :
10663 : #[cfg(feature = "testing")]
10664 : #[tokio::test]
10665 1 : async fn test_simple_bottom_most_compaction_on_branch() -> anyhow::Result<()> {
10666 1 : use models::CompactLsnRange;
10667 1 :
10668 1 : let harness = TenantHarness::create("test_simple_bottom_most_compaction_on_branch").await?;
10669 1 : let (tenant, ctx) = harness.load().await;
10670 1 :
10671 83 : fn get_key(id: u32) -> Key {
10672 83 : let mut key = Key::from_hex("000000000033333333444444445500000000").unwrap();
10673 83 : key.field6 = id;
10674 83 : key
10675 83 : }
10676 1 :
10677 1 : let img_layer = (0..10)
10678 10 : .map(|id| (get_key(id), Bytes::from(format!("value {id}@0x10"))))
10679 1 : .collect_vec();
10680 1 :
10681 1 : let delta1 = vec![
10682 1 : (
10683 1 : get_key(1),
10684 1 : Lsn(0x20),
10685 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x20")),
10686 1 : ),
10687 1 : (
10688 1 : get_key(2),
10689 1 : Lsn(0x30),
10690 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x30")),
10691 1 : ),
10692 1 : (
10693 1 : get_key(3),
10694 1 : Lsn(0x28),
10695 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x28")),
10696 1 : ),
10697 1 : (
10698 1 : get_key(3),
10699 1 : Lsn(0x30),
10700 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x30")),
10701 1 : ),
10702 1 : (
10703 1 : get_key(3),
10704 1 : Lsn(0x40),
10705 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x40")),
10706 1 : ),
10707 1 : ];
10708 1 : let delta2 = vec![
10709 1 : (
10710 1 : get_key(5),
10711 1 : Lsn(0x20),
10712 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x20")),
10713 1 : ),
10714 1 : (
10715 1 : get_key(6),
10716 1 : Lsn(0x20),
10717 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x20")),
10718 1 : ),
10719 1 : ];
10720 1 : let delta3 = vec![
10721 1 : (
10722 1 : get_key(8),
10723 1 : Lsn(0x48),
10724 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x48")),
10725 1 : ),
10726 1 : (
10727 1 : get_key(9),
10728 1 : Lsn(0x48),
10729 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x48")),
10730 1 : ),
10731 1 : ];
10732 1 :
10733 1 : let parent_tline = tenant
10734 1 : .create_test_timeline_with_layers(
10735 1 : TIMELINE_ID,
10736 1 : Lsn(0x10),
10737 1 : DEFAULT_PG_VERSION,
10738 1 : &ctx,
10739 1 : vec![], // in-memory layers
10740 1 : vec![], // delta layers
10741 1 : vec![(Lsn(0x18), img_layer)], // image layers
10742 1 : Lsn(0x18),
10743 1 : )
10744 1 : .await?;
10745 1 :
10746 1 : parent_tline.add_extra_test_dense_keyspace(KeySpace::single(get_key(0)..get_key(10)));
10747 1 :
10748 1 : let branch_tline = tenant
10749 1 : .branch_timeline_test_with_layers(
10750 1 : &parent_tline,
10751 1 : NEW_TIMELINE_ID,
10752 1 : Some(Lsn(0x18)),
10753 1 : &ctx,
10754 1 : vec![
10755 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x20)..Lsn(0x48), delta1),
10756 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x20)..Lsn(0x48), delta2),
10757 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x48)..Lsn(0x50), delta3),
10758 1 : ], // delta layers
10759 1 : vec![], // image layers
10760 1 : Lsn(0x50),
10761 1 : )
10762 1 : .await?;
10763 1 :
10764 1 : branch_tline.add_extra_test_dense_keyspace(KeySpace::single(get_key(0)..get_key(10)));
10765 1 :
10766 1 : {
10767 1 : parent_tline
10768 1 : .applied_gc_cutoff_lsn
10769 1 : .lock_for_write()
10770 1 : .store_and_unlock(Lsn(0x10))
10771 1 : .wait()
10772 1 : .await;
10773 1 : // Update GC info
10774 1 : let mut guard = parent_tline.gc_info.write().unwrap();
10775 1 : *guard = GcInfo {
10776 1 : retain_lsns: vec![(Lsn(0x18), branch_tline.timeline_id, MaybeOffloaded::No)],
10777 1 : cutoffs: GcCutoffs {
10778 1 : time: Some(Lsn(0x10)),
10779 1 : space: Lsn(0x10),
10780 1 : },
10781 1 : leases: Default::default(),
10782 1 : within_ancestor_pitr: false,
10783 1 : };
10784 1 : }
10785 1 :
10786 1 : {
10787 1 : branch_tline
10788 1 : .applied_gc_cutoff_lsn
10789 1 : .lock_for_write()
10790 1 : .store_and_unlock(Lsn(0x50))
10791 1 : .wait()
10792 1 : .await;
10793 1 : // Update GC info
10794 1 : let mut guard = branch_tline.gc_info.write().unwrap();
10795 1 : *guard = GcInfo {
10796 1 : retain_lsns: vec![(Lsn(0x40), branch_tline.timeline_id, MaybeOffloaded::No)],
10797 1 : cutoffs: GcCutoffs {
10798 1 : time: Some(Lsn(0x50)),
10799 1 : space: Lsn(0x50),
10800 1 : },
10801 1 : leases: Default::default(),
10802 1 : within_ancestor_pitr: false,
10803 1 : };
10804 1 : }
10805 1 :
10806 1 : let expected_result_at_gc_horizon = [
10807 1 : Bytes::from_static(b"value 0@0x10"),
10808 1 : Bytes::from_static(b"value 1@0x10@0x20"),
10809 1 : Bytes::from_static(b"value 2@0x10@0x30"),
10810 1 : Bytes::from_static(b"value 3@0x10@0x28@0x30@0x40"),
10811 1 : Bytes::from_static(b"value 4@0x10"),
10812 1 : Bytes::from_static(b"value 5@0x10@0x20"),
10813 1 : Bytes::from_static(b"value 6@0x10@0x20"),
10814 1 : Bytes::from_static(b"value 7@0x10"),
10815 1 : Bytes::from_static(b"value 8@0x10@0x48"),
10816 1 : Bytes::from_static(b"value 9@0x10@0x48"),
10817 1 : ];
10818 1 :
10819 1 : let expected_result_at_lsn_40 = [
10820 1 : Bytes::from_static(b"value 0@0x10"),
10821 1 : Bytes::from_static(b"value 1@0x10@0x20"),
10822 1 : Bytes::from_static(b"value 2@0x10@0x30"),
10823 1 : Bytes::from_static(b"value 3@0x10@0x28@0x30@0x40"),
10824 1 : Bytes::from_static(b"value 4@0x10"),
10825 1 : Bytes::from_static(b"value 5@0x10@0x20"),
10826 1 : Bytes::from_static(b"value 6@0x10@0x20"),
10827 1 : Bytes::from_static(b"value 7@0x10"),
10828 1 : Bytes::from_static(b"value 8@0x10"),
10829 1 : Bytes::from_static(b"value 9@0x10"),
10830 1 : ];
10831 1 :
10832 3 : let verify_result = || async {
10833 33 : for idx in 0..10 {
10834 30 : assert_eq!(
10835 30 : branch_tline
10836 30 : .get(get_key(idx as u32), Lsn(0x50), &ctx)
10837 30 : .await
10838 30 : .unwrap(),
10839 30 : &expected_result_at_gc_horizon[idx]
10840 1 : );
10841 30 : assert_eq!(
10842 30 : branch_tline
10843 30 : .get(get_key(idx as u32), Lsn(0x40), &ctx)
10844 30 : .await
10845 30 : .unwrap(),
10846 30 : &expected_result_at_lsn_40[idx]
10847 1 : );
10848 1 : }
10849 6 : };
10850 1 :
10851 1 : verify_result().await;
10852 1 :
10853 1 : let cancel = CancellationToken::new();
10854 1 : branch_tline
10855 1 : .compact_with_gc(&cancel, CompactOptions::default(), &ctx)
10856 1 : .await
10857 1 : .unwrap();
10858 1 :
10859 1 : verify_result().await;
10860 1 :
10861 1 : // Piggyback a compaction with above_lsn. Ensure it works correctly when the specified LSN intersects with the layer files.
10862 1 : // Now we already have a single large delta layer, so the compaction min_layer_lsn should be the same as ancestor LSN (0x18).
10863 1 : branch_tline
10864 1 : .compact_with_gc(
10865 1 : &cancel,
10866 1 : CompactOptions {
10867 1 : compact_lsn_range: Some(CompactLsnRange::above(Lsn(0x40))),
10868 1 : ..Default::default()
10869 1 : },
10870 1 : &ctx,
10871 1 : )
10872 1 : .await
10873 1 : .unwrap();
10874 1 :
10875 1 : verify_result().await;
10876 1 :
10877 1 : Ok(())
10878 1 : }
10879 :
10880 : // Regression test for https://github.com/neondatabase/neon/issues/9012
10881 : // Create an image arrangement where we have to read at different LSN ranges
10882 : // from a delta layer. This is achieved by overlapping an image layer on top of
10883 : // a delta layer. Like so:
10884 : //
10885 : // A B
10886 : // +----------------+ -> delta_layer
10887 : // | | ^ lsn
10888 : // | =========|-> nested_image_layer |
10889 : // | C | |
10890 : // +----------------+ |
10891 : // ======== -> baseline_image_layer +-------> key
10892 : //
10893 : //
10894 : // When querying the key range [A, B) we need to read at different LSN ranges
10895 : // for [A, C) and [C, B). This test checks that the described edge case is handled correctly.
10896 : #[cfg(feature = "testing")]
10897 : #[tokio::test]
10898 1 : async fn test_vectored_read_with_nested_image_layer() -> anyhow::Result<()> {
10899 1 : let harness = TenantHarness::create("test_vectored_read_with_nested_image_layer").await?;
10900 1 : let (tenant, ctx) = harness.load().await;
10901 1 :
10902 1 : let will_init_keys = [2, 6];
10903 22 : fn get_key(id: u32) -> Key {
10904 22 : let mut key = Key::from_hex("110000000033333333444444445500000000").unwrap();
10905 22 : key.field6 = id;
10906 22 : key
10907 22 : }
10908 1 :
10909 1 : let mut expected_key_values = HashMap::new();
10910 1 :
10911 1 : let baseline_image_layer_lsn = Lsn(0x10);
10912 1 : let mut baseline_img_layer = Vec::new();
10913 6 : for i in 0..5 {
10914 5 : let key = get_key(i);
10915 5 : let value = format!("value {i}@{baseline_image_layer_lsn}");
10916 5 :
10917 5 : let removed = expected_key_values.insert(key, value.clone());
10918 5 : assert!(removed.is_none());
10919 1 :
10920 5 : baseline_img_layer.push((key, Bytes::from(value)));
10921 1 : }
10922 1 :
10923 1 : let nested_image_layer_lsn = Lsn(0x50);
10924 1 : let mut nested_img_layer = Vec::new();
10925 6 : for i in 5..10 {
10926 5 : let key = get_key(i);
10927 5 : let value = format!("value {i}@{nested_image_layer_lsn}");
10928 5 :
10929 5 : let removed = expected_key_values.insert(key, value.clone());
10930 5 : assert!(removed.is_none());
10931 1 :
10932 5 : nested_img_layer.push((key, Bytes::from(value)));
10933 1 : }
10934 1 :
10935 1 : let mut delta_layer_spec = Vec::default();
10936 1 : let delta_layer_start_lsn = Lsn(0x20);
10937 1 : let mut delta_layer_end_lsn = delta_layer_start_lsn;
10938 1 :
10939 11 : for i in 0..10 {
10940 10 : let key = get_key(i);
10941 10 : let key_in_nested = nested_img_layer
10942 10 : .iter()
10943 40 : .any(|(key_with_img, _)| *key_with_img == key);
10944 10 : let lsn = {
10945 10 : if key_in_nested {
10946 5 : Lsn(nested_image_layer_lsn.0 + 0x10)
10947 1 : } else {
10948 5 : delta_layer_start_lsn
10949 1 : }
10950 1 : };
10951 1 :
10952 10 : let will_init = will_init_keys.contains(&i);
10953 10 : if will_init {
10954 2 : delta_layer_spec.push((key, lsn, Value::WalRecord(NeonWalRecord::wal_init(""))));
10955 2 :
10956 2 : expected_key_values.insert(key, "".to_string());
10957 8 : } else {
10958 8 : let delta = format!("@{lsn}");
10959 8 : delta_layer_spec.push((
10960 8 : key,
10961 8 : lsn,
10962 8 : Value::WalRecord(NeonWalRecord::wal_append(&delta)),
10963 8 : ));
10964 8 :
10965 8 : expected_key_values
10966 8 : .get_mut(&key)
10967 8 : .expect("An image exists for each key")
10968 8 : .push_str(delta.as_str());
10969 8 : }
10970 10 : delta_layer_end_lsn = std::cmp::max(delta_layer_start_lsn, lsn);
10971 1 : }
10972 1 :
10973 1 : delta_layer_end_lsn = Lsn(delta_layer_end_lsn.0 + 1);
10974 1 :
10975 1 : assert!(
10976 1 : nested_image_layer_lsn > delta_layer_start_lsn
10977 1 : && nested_image_layer_lsn < delta_layer_end_lsn
10978 1 : );
10979 1 :
10980 1 : let tline = tenant
10981 1 : .create_test_timeline_with_layers(
10982 1 : TIMELINE_ID,
10983 1 : baseline_image_layer_lsn,
10984 1 : DEFAULT_PG_VERSION,
10985 1 : &ctx,
10986 1 : vec![], // in-memory layers
10987 1 : vec![DeltaLayerTestDesc::new_with_inferred_key_range(
10988 1 : delta_layer_start_lsn..delta_layer_end_lsn,
10989 1 : delta_layer_spec,
10990 1 : )], // delta layers
10991 1 : vec![
10992 1 : (baseline_image_layer_lsn, baseline_img_layer),
10993 1 : (nested_image_layer_lsn, nested_img_layer),
10994 1 : ], // image layers
10995 1 : delta_layer_end_lsn,
10996 1 : )
10997 1 : .await?;
10998 1 :
10999 1 : let query = VersionedKeySpaceQuery::uniform(
11000 1 : KeySpace::single(get_key(0)..get_key(10)),
11001 1 : delta_layer_end_lsn,
11002 1 : );
11003 1 :
11004 1 : let results = tline
11005 1 : .get_vectored(query, IoConcurrency::sequential(), &ctx)
11006 1 : .await
11007 1 : .expect("No vectored errors");
11008 11 : for (key, res) in results {
11009 10 : let value = res.expect("No key errors");
11010 10 : let expected_value = expected_key_values.remove(&key).expect("No unknown keys");
11011 10 : assert_eq!(value, Bytes::from(expected_value));
11012 1 : }
11013 1 :
11014 1 : Ok(())
11015 1 : }
11016 :
11017 : #[cfg(feature = "testing")]
11018 : #[tokio::test]
11019 1 : async fn test_vectored_read_with_image_layer_inside_inmem() -> anyhow::Result<()> {
11020 1 : let harness =
11021 1 : TenantHarness::create("test_vectored_read_with_image_layer_inside_inmem").await?;
11022 1 : let (tenant, ctx) = harness.load().await;
11023 1 :
11024 1 : let will_init_keys = [2, 6];
11025 32 : fn get_key(id: u32) -> Key {
11026 32 : let mut key = Key::from_hex("110000000033333333444444445500000000").unwrap();
11027 32 : key.field6 = id;
11028 32 : key
11029 32 : }
11030 1 :
11031 1 : let mut expected_key_values = HashMap::new();
11032 1 :
11033 1 : let baseline_image_layer_lsn = Lsn(0x10);
11034 1 : let mut baseline_img_layer = Vec::new();
11035 6 : for i in 0..5 {
11036 5 : let key = get_key(i);
11037 5 : let value = format!("value {i}@{baseline_image_layer_lsn}");
11038 5 :
11039 5 : let removed = expected_key_values.insert(key, value.clone());
11040 5 : assert!(removed.is_none());
11041 1 :
11042 5 : baseline_img_layer.push((key, Bytes::from(value)));
11043 1 : }
11044 1 :
11045 1 : let nested_image_layer_lsn = Lsn(0x50);
11046 1 : let mut nested_img_layer = Vec::new();
11047 6 : for i in 5..10 {
11048 5 : let key = get_key(i);
11049 5 : let value = format!("value {i}@{nested_image_layer_lsn}");
11050 5 :
11051 5 : let removed = expected_key_values.insert(key, value.clone());
11052 5 : assert!(removed.is_none());
11053 1 :
11054 5 : nested_img_layer.push((key, Bytes::from(value)));
11055 1 : }
11056 1 :
11057 1 : let frozen_layer = {
11058 1 : let lsn_range = Lsn(0x40)..Lsn(0x60);
11059 1 : let mut data = Vec::new();
11060 11 : for i in 0..10 {
11061 10 : let key = get_key(i);
11062 10 : let key_in_nested = nested_img_layer
11063 10 : .iter()
11064 40 : .any(|(key_with_img, _)| *key_with_img == key);
11065 10 : let lsn = {
11066 10 : if key_in_nested {
11067 5 : Lsn(nested_image_layer_lsn.0 + 5)
11068 1 : } else {
11069 5 : lsn_range.start
11070 1 : }
11071 1 : };
11072 1 :
11073 10 : let will_init = will_init_keys.contains(&i);
11074 10 : if will_init {
11075 2 : data.push((key, lsn, Value::WalRecord(NeonWalRecord::wal_init(""))));
11076 2 :
11077 2 : expected_key_values.insert(key, "".to_string());
11078 8 : } else {
11079 8 : let delta = format!("@{lsn}");
11080 8 : data.push((
11081 8 : key,
11082 8 : lsn,
11083 8 : Value::WalRecord(NeonWalRecord::wal_append(&delta)),
11084 8 : ));
11085 8 :
11086 8 : expected_key_values
11087 8 : .get_mut(&key)
11088 8 : .expect("An image exists for each key")
11089 8 : .push_str(delta.as_str());
11090 8 : }
11091 1 : }
11092 1 :
11093 1 : InMemoryLayerTestDesc {
11094 1 : lsn_range,
11095 1 : is_open: false,
11096 1 : data,
11097 1 : }
11098 1 : };
11099 1 :
11100 1 : let (open_layer, last_record_lsn) = {
11101 1 : let start_lsn = Lsn(0x70);
11102 1 : let mut data = Vec::new();
11103 1 : let mut end_lsn = Lsn(0);
11104 11 : for i in 0..10 {
11105 10 : let key = get_key(i);
11106 10 : let lsn = Lsn(start_lsn.0 + i as u64);
11107 10 : let delta = format!("@{lsn}");
11108 10 : data.push((
11109 10 : key,
11110 10 : lsn,
11111 10 : Value::WalRecord(NeonWalRecord::wal_append(&delta)),
11112 10 : ));
11113 10 :
11114 10 : expected_key_values
11115 10 : .get_mut(&key)
11116 10 : .expect("An image exists for each key")
11117 10 : .push_str(delta.as_str());
11118 10 :
11119 10 : end_lsn = std::cmp::max(end_lsn, lsn);
11120 10 : }
11121 1 :
11122 1 : (
11123 1 : InMemoryLayerTestDesc {
11124 1 : lsn_range: start_lsn..Lsn::MAX,
11125 1 : is_open: true,
11126 1 : data,
11127 1 : },
11128 1 : end_lsn,
11129 1 : )
11130 1 : };
11131 1 :
11132 1 : assert!(
11133 1 : nested_image_layer_lsn > frozen_layer.lsn_range.start
11134 1 : && nested_image_layer_lsn < frozen_layer.lsn_range.end
11135 1 : );
11136 1 :
11137 1 : let tline = tenant
11138 1 : .create_test_timeline_with_layers(
11139 1 : TIMELINE_ID,
11140 1 : baseline_image_layer_lsn,
11141 1 : DEFAULT_PG_VERSION,
11142 1 : &ctx,
11143 1 : vec![open_layer, frozen_layer], // in-memory layers
11144 1 : Vec::new(), // delta layers
11145 1 : vec![
11146 1 : (baseline_image_layer_lsn, baseline_img_layer),
11147 1 : (nested_image_layer_lsn, nested_img_layer),
11148 1 : ], // image layers
11149 1 : last_record_lsn,
11150 1 : )
11151 1 : .await?;
11152 1 :
11153 1 : let query = VersionedKeySpaceQuery::uniform(
11154 1 : KeySpace::single(get_key(0)..get_key(10)),
11155 1 : last_record_lsn,
11156 1 : );
11157 1 :
11158 1 : let results = tline
11159 1 : .get_vectored(query, IoConcurrency::sequential(), &ctx)
11160 1 : .await
11161 1 : .expect("No vectored errors");
11162 11 : for (key, res) in results {
11163 10 : let value = res.expect("No key errors");
11164 10 : let expected_value = expected_key_values.remove(&key).expect("No unknown keys");
11165 10 : assert_eq!(value, Bytes::from(expected_value.clone()));
11166 1 :
11167 10 : tracing::info!("key={key} value={expected_value}");
11168 1 : }
11169 1 :
11170 1 : Ok(())
11171 1 : }
11172 :
11173 : // A randomized read path test. Generates a layer map according to a deterministic
11174 : // specification. Fills the (key, LSN) space in random manner and then performs
11175 : // random scattered queries validating the results against in-memory storage.
11176 : //
11177 : // See this internal Notion page for a diagram of the layer map:
11178 : // https://www.notion.so/neondatabase/Read-Path-Unit-Testing-Fuzzing-1d1f189e0047806c8e5cd37781b0a350?pvs=4
11179 : //
11180 : // A fuzzing mode is also supported. In this mode, the test will use a random
11181 : // seed instead of a hardcoded one. Use it in conjunction with `cargo stress`
11182 : // to run multiple instances in parallel:
11183 : //
11184 : // $ RUST_BACKTRACE=1 RUST_LOG=INFO \
11185 : // cargo stress --package=pageserver --features=testing,fuzz-read-path --release -- test_read_path
11186 : #[cfg(feature = "testing")]
11187 : #[tokio::test]
11188 1 : async fn test_read_path() -> anyhow::Result<()> {
11189 1 : use rand::seq::SliceRandom;
11190 1 :
11191 1 : let seed = if cfg!(feature = "fuzz-read-path") {
11192 1 : let seed: u64 = thread_rng().r#gen();
11193 0 : seed
11194 1 : } else {
11195 1 : // Use a hard-coded seed when not in fuzzing mode.
11196 1 : // Note that with the current approach results are not reproducible
11197 1 : // accross platforms and Rust releases.
11198 1 : const SEED: u64 = 0;
11199 1 : SEED
11200 1 : };
11201 1 :
11202 1 : let mut random = StdRng::seed_from_u64(seed);
11203 1 :
11204 1 : let (queries, will_init_chance, gap_chance) = if cfg!(feature = "fuzz-read-path") {
11205 1 : const QUERIES: u64 = 5000;
11206 1 : let will_init_chance: u8 = random.gen_range(0..=10);
11207 0 : let gap_chance: u8 = random.gen_range(0..=50);
11208 0 :
11209 0 : (QUERIES, will_init_chance, gap_chance)
11210 1 : } else {
11211 1 : const QUERIES: u64 = 1000;
11212 1 : const WILL_INIT_CHANCE: u8 = 1;
11213 1 : const GAP_CHANCE: u8 = 5;
11214 1 :
11215 1 : (QUERIES, WILL_INIT_CHANCE, GAP_CHANCE)
11216 1 : };
11217 1 :
11218 1 : let harness = TenantHarness::create("test_read_path").await?;
11219 1 : let (tenant, ctx) = harness.load().await;
11220 1 :
11221 1 : tracing::info!("Using random seed: {seed}");
11222 1 : tracing::info!(%will_init_chance, %gap_chance, "Fill params");
11223 1 :
11224 1 : // Define the layer map shape. Note that this part is not randomized.
11225 1 :
11226 1 : const KEY_DIMENSION_SIZE: u32 = 99;
11227 1 : let start_key = Key::from_hex("110000000033333333444444445500000000").unwrap();
11228 1 : let end_key = start_key.add(KEY_DIMENSION_SIZE);
11229 1 : let total_key_range = start_key..end_key;
11230 1 : let total_key_range_size = end_key.to_i128() - start_key.to_i128();
11231 1 : let total_start_lsn = Lsn(104);
11232 1 : let last_record_lsn = Lsn(504);
11233 1 :
11234 1 : assert!(total_key_range_size % 3 == 0);
11235 1 :
11236 1 : let in_memory_layers_shape = vec![
11237 1 : (total_key_range.clone(), Lsn(304)..Lsn(400)),
11238 1 : (total_key_range.clone(), Lsn(400)..last_record_lsn),
11239 1 : ];
11240 1 :
11241 1 : let delta_layers_shape = vec![
11242 1 : (
11243 1 : start_key..(start_key.add((total_key_range_size / 3) as u32)),
11244 1 : Lsn(200)..Lsn(304),
11245 1 : ),
11246 1 : (
11247 1 : (start_key.add((total_key_range_size / 3) as u32))
11248 1 : ..(start_key.add((total_key_range_size * 2 / 3) as u32)),
11249 1 : Lsn(200)..Lsn(304),
11250 1 : ),
11251 1 : (
11252 1 : (start_key.add((total_key_range_size * 2 / 3) as u32))
11253 1 : ..(start_key.add(total_key_range_size as u32)),
11254 1 : Lsn(200)..Lsn(304),
11255 1 : ),
11256 1 : ];
11257 1 :
11258 1 : let image_layers_shape = vec![
11259 1 : (
11260 1 : start_key.add((total_key_range_size * 2 / 3 - 10) as u32)
11261 1 : ..start_key.add((total_key_range_size * 2 / 3 + 10) as u32),
11262 1 : Lsn(456),
11263 1 : ),
11264 1 : (
11265 1 : start_key.add((total_key_range_size / 3 - 10) as u32)
11266 1 : ..start_key.add((total_key_range_size / 3 + 10) as u32),
11267 1 : Lsn(256),
11268 1 : ),
11269 1 : (total_key_range.clone(), total_start_lsn),
11270 1 : ];
11271 1 :
11272 1 : let specification = TestTimelineSpecification {
11273 1 : start_lsn: total_start_lsn,
11274 1 : last_record_lsn,
11275 1 : in_memory_layers_shape,
11276 1 : delta_layers_shape,
11277 1 : image_layers_shape,
11278 1 : gap_chance,
11279 1 : will_init_chance,
11280 1 : };
11281 1 :
11282 1 : // Create and randomly fill in the layers according to the specification
11283 1 : let (tline, storage, interesting_lsns) = randomize_timeline(
11284 1 : &tenant,
11285 1 : TIMELINE_ID,
11286 1 : DEFAULT_PG_VERSION,
11287 1 : specification,
11288 1 : &mut random,
11289 1 : &ctx,
11290 1 : )
11291 1 : .await?;
11292 1 :
11293 1 : // Now generate queries based on the interesting lsns that we've collected.
11294 1 : //
11295 1 : // While there's still room in the query, pick and interesting LSN and a random
11296 1 : // key. Then roll the dice to see if the next key should also be included in
11297 1 : // the query. When the roll fails, break the "batch" and pick another point in the
11298 1 : // (key, LSN) space.
11299 1 :
11300 1 : const PICK_NEXT_CHANCE: u8 = 50;
11301 1 : for _ in 0..queries {
11302 1000 : let query = {
11303 1000 : let mut keyspaces_at_lsn: HashMap<Lsn, KeySpaceRandomAccum> = HashMap::default();
11304 1000 : let mut used_keys: HashSet<Key> = HashSet::default();
11305 1 :
11306 22536 : while used_keys.len() < tenant.conf.max_get_vectored_keys.get() {
11307 21536 : let selected_lsn = interesting_lsns.choose(&mut random).expect("not empty");
11308 21536 : let mut selected_key = start_key.add(random.gen_range(0..KEY_DIMENSION_SIZE));
11309 1 :
11310 37614 : while used_keys.len() < tenant.conf.max_get_vectored_keys.get() {
11311 37093 : if used_keys.contains(&selected_key)
11312 32154 : || selected_key >= start_key.add(KEY_DIMENSION_SIZE)
11313 1 : {
11314 5093 : break;
11315 32000 : }
11316 32000 :
11317 32000 : keyspaces_at_lsn
11318 32000 : .entry(*selected_lsn)
11319 32000 : .or_default()
11320 32000 : .add_key(selected_key);
11321 32000 : used_keys.insert(selected_key);
11322 32000 :
11323 32000 : let pick_next = random.gen_range(0..=100) <= PICK_NEXT_CHANCE;
11324 32000 : if pick_next {
11325 16078 : selected_key = selected_key.next();
11326 16078 : } else {
11327 15922 : break;
11328 1 : }
11329 1 : }
11330 1 : }
11331 1 :
11332 1000 : VersionedKeySpaceQuery::scattered(
11333 1000 : keyspaces_at_lsn
11334 1000 : .into_iter()
11335 11917 : .map(|(lsn, acc)| (lsn, acc.to_keyspace()))
11336 1000 : .collect(),
11337 1000 : )
11338 1 : };
11339 1 :
11340 1 : // Run the query and validate the results
11341 1 :
11342 1000 : let results = tline
11343 1000 : .get_vectored(query.clone(), IoConcurrency::Sequential, &ctx)
11344 1000 : .await;
11345 1 :
11346 1000 : let blobs = match results {
11347 1000 : Ok(ok) => ok,
11348 1 : Err(err) => {
11349 0 : panic!("seed={seed} Error returned for query {query}: {err}");
11350 1 : }
11351 1 : };
11352 1 :
11353 32000 : for (key, key_res) in blobs.into_iter() {
11354 32000 : match key_res {
11355 32000 : Ok(blob) => {
11356 32000 : let requested_at_lsn = query.map_key_to_lsn(&key);
11357 32000 : let expected = storage.get(key, requested_at_lsn);
11358 32000 :
11359 32000 : if blob != expected {
11360 1 : tracing::error!(
11361 1 : "seed={seed} Mismatch for {key}@{requested_at_lsn} from query: {query}"
11362 1 : );
11363 32000 : }
11364 1 :
11365 32000 : assert_eq!(blob, expected);
11366 1 : }
11367 1 : Err(err) => {
11368 0 : let requested_at_lsn = query.map_key_to_lsn(&key);
11369 0 :
11370 0 : panic!(
11371 0 : "seed={seed} Error returned for {key}@{requested_at_lsn} from query {query}: {err}"
11372 0 : );
11373 1 : }
11374 1 : }
11375 1 : }
11376 1 : }
11377 1 :
11378 1 : Ok(())
11379 1 : }
11380 :
11381 107 : fn sort_layer_key(k1: &PersistentLayerKey, k2: &PersistentLayerKey) -> std::cmp::Ordering {
11382 107 : (
11383 107 : k1.is_delta,
11384 107 : k1.key_range.start,
11385 107 : k1.key_range.end,
11386 107 : k1.lsn_range.start,
11387 107 : k1.lsn_range.end,
11388 107 : )
11389 107 : .cmp(&(
11390 107 : k2.is_delta,
11391 107 : k2.key_range.start,
11392 107 : k2.key_range.end,
11393 107 : k2.lsn_range.start,
11394 107 : k2.lsn_range.end,
11395 107 : ))
11396 107 : }
11397 :
11398 12 : async fn inspect_and_sort(
11399 12 : tline: &Arc<Timeline>,
11400 12 : filter: Option<std::ops::Range<Key>>,
11401 12 : ) -> Vec<PersistentLayerKey> {
11402 12 : let mut all_layers = tline.inspect_historic_layers().await.unwrap();
11403 12 : if let Some(filter) = filter {
11404 54 : all_layers.retain(|layer| overlaps_with(&layer.key_range, &filter));
11405 11 : }
11406 12 : all_layers.sort_by(sort_layer_key);
11407 12 : all_layers
11408 12 : }
11409 :
11410 : #[cfg(feature = "testing")]
11411 11 : fn check_layer_map_key_eq(
11412 11 : mut left: Vec<PersistentLayerKey>,
11413 11 : mut right: Vec<PersistentLayerKey>,
11414 11 : ) {
11415 11 : left.sort_by(sort_layer_key);
11416 11 : right.sort_by(sort_layer_key);
11417 11 : if left != right {
11418 0 : eprintln!("---LEFT---");
11419 0 : for left in left.iter() {
11420 0 : eprintln!("{}", left);
11421 0 : }
11422 0 : eprintln!("---RIGHT---");
11423 0 : for right in right.iter() {
11424 0 : eprintln!("{}", right);
11425 0 : }
11426 0 : assert_eq!(left, right);
11427 11 : }
11428 11 : }
11429 :
11430 : #[cfg(feature = "testing")]
11431 : #[tokio::test]
11432 1 : async fn test_simple_partial_bottom_most_compaction() -> anyhow::Result<()> {
11433 1 : let harness = TenantHarness::create("test_simple_partial_bottom_most_compaction").await?;
11434 1 : let (tenant, ctx) = harness.load().await;
11435 1 :
11436 91 : fn get_key(id: u32) -> Key {
11437 91 : // using aux key here b/c they are guaranteed to be inside `collect_keyspace`.
11438 91 : let mut key = Key::from_hex("620000000033333333444444445500000000").unwrap();
11439 91 : key.field6 = id;
11440 91 : key
11441 91 : }
11442 1 :
11443 1 : // img layer at 0x10
11444 1 : let img_layer = (0..10)
11445 10 : .map(|id| (get_key(id), Bytes::from(format!("value {id}@0x10"))))
11446 1 : .collect_vec();
11447 1 :
11448 1 : let delta1 = vec![
11449 1 : (
11450 1 : get_key(1),
11451 1 : Lsn(0x20),
11452 1 : Value::Image(Bytes::from("value 1@0x20")),
11453 1 : ),
11454 1 : (
11455 1 : get_key(2),
11456 1 : Lsn(0x30),
11457 1 : Value::Image(Bytes::from("value 2@0x30")),
11458 1 : ),
11459 1 : (
11460 1 : get_key(3),
11461 1 : Lsn(0x40),
11462 1 : Value::Image(Bytes::from("value 3@0x40")),
11463 1 : ),
11464 1 : ];
11465 1 : let delta2 = vec![
11466 1 : (
11467 1 : get_key(5),
11468 1 : Lsn(0x20),
11469 1 : Value::Image(Bytes::from("value 5@0x20")),
11470 1 : ),
11471 1 : (
11472 1 : get_key(6),
11473 1 : Lsn(0x20),
11474 1 : Value::Image(Bytes::from("value 6@0x20")),
11475 1 : ),
11476 1 : ];
11477 1 : let delta3 = vec![
11478 1 : (
11479 1 : get_key(8),
11480 1 : Lsn(0x48),
11481 1 : Value::Image(Bytes::from("value 8@0x48")),
11482 1 : ),
11483 1 : (
11484 1 : get_key(9),
11485 1 : Lsn(0x48),
11486 1 : Value::Image(Bytes::from("value 9@0x48")),
11487 1 : ),
11488 1 : ];
11489 1 :
11490 1 : let tline = tenant
11491 1 : .create_test_timeline_with_layers(
11492 1 : TIMELINE_ID,
11493 1 : Lsn(0x10),
11494 1 : DEFAULT_PG_VERSION,
11495 1 : &ctx,
11496 1 : vec![], // in-memory layers
11497 1 : vec![
11498 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x20)..Lsn(0x48), delta1),
11499 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x20)..Lsn(0x48), delta2),
11500 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x48)..Lsn(0x50), delta3),
11501 1 : ], // delta layers
11502 1 : vec![(Lsn(0x10), img_layer)], // image layers
11503 1 : Lsn(0x50),
11504 1 : )
11505 1 : .await?;
11506 1 :
11507 1 : {
11508 1 : tline
11509 1 : .applied_gc_cutoff_lsn
11510 1 : .lock_for_write()
11511 1 : .store_and_unlock(Lsn(0x30))
11512 1 : .wait()
11513 1 : .await;
11514 1 : // Update GC info
11515 1 : let mut guard = tline.gc_info.write().unwrap();
11516 1 : *guard = GcInfo {
11517 1 : retain_lsns: vec![(Lsn(0x20), tline.timeline_id, MaybeOffloaded::No)],
11518 1 : cutoffs: GcCutoffs {
11519 1 : time: Some(Lsn(0x30)),
11520 1 : space: Lsn(0x30),
11521 1 : },
11522 1 : leases: Default::default(),
11523 1 : within_ancestor_pitr: false,
11524 1 : };
11525 1 : }
11526 1 :
11527 1 : let cancel = CancellationToken::new();
11528 1 :
11529 1 : // Do a partial compaction on key range 0..2
11530 1 : tline
11531 1 : .compact_with_gc(
11532 1 : &cancel,
11533 1 : CompactOptions {
11534 1 : flags: EnumSet::new(),
11535 1 : compact_key_range: Some((get_key(0)..get_key(2)).into()),
11536 1 : ..Default::default()
11537 1 : },
11538 1 : &ctx,
11539 1 : )
11540 1 : .await
11541 1 : .unwrap();
11542 1 : let all_layers = inspect_and_sort(&tline, Some(get_key(0)..get_key(10))).await;
11543 1 : check_layer_map_key_eq(
11544 1 : all_layers,
11545 1 : vec![
11546 1 : // newly-generated image layer for the partial compaction range 0-2
11547 1 : PersistentLayerKey {
11548 1 : key_range: get_key(0)..get_key(2),
11549 1 : lsn_range: Lsn(0x20)..Lsn(0x21),
11550 1 : is_delta: false,
11551 1 : },
11552 1 : PersistentLayerKey {
11553 1 : key_range: get_key(0)..get_key(10),
11554 1 : lsn_range: Lsn(0x10)..Lsn(0x11),
11555 1 : is_delta: false,
11556 1 : },
11557 1 : // delta1 is split and the second part is rewritten
11558 1 : PersistentLayerKey {
11559 1 : key_range: get_key(2)..get_key(4),
11560 1 : lsn_range: Lsn(0x20)..Lsn(0x48),
11561 1 : is_delta: true,
11562 1 : },
11563 1 : PersistentLayerKey {
11564 1 : key_range: get_key(5)..get_key(7),
11565 1 : lsn_range: Lsn(0x20)..Lsn(0x48),
11566 1 : is_delta: true,
11567 1 : },
11568 1 : PersistentLayerKey {
11569 1 : key_range: get_key(8)..get_key(10),
11570 1 : lsn_range: Lsn(0x48)..Lsn(0x50),
11571 1 : is_delta: true,
11572 1 : },
11573 1 : ],
11574 1 : );
11575 1 :
11576 1 : // Do a partial compaction on key range 2..4
11577 1 : tline
11578 1 : .compact_with_gc(
11579 1 : &cancel,
11580 1 : CompactOptions {
11581 1 : flags: EnumSet::new(),
11582 1 : compact_key_range: Some((get_key(2)..get_key(4)).into()),
11583 1 : ..Default::default()
11584 1 : },
11585 1 : &ctx,
11586 1 : )
11587 1 : .await
11588 1 : .unwrap();
11589 1 : let all_layers = inspect_and_sort(&tline, Some(get_key(0)..get_key(10))).await;
11590 1 : check_layer_map_key_eq(
11591 1 : all_layers,
11592 1 : vec![
11593 1 : PersistentLayerKey {
11594 1 : key_range: get_key(0)..get_key(2),
11595 1 : lsn_range: Lsn(0x20)..Lsn(0x21),
11596 1 : is_delta: false,
11597 1 : },
11598 1 : PersistentLayerKey {
11599 1 : key_range: get_key(0)..get_key(10),
11600 1 : lsn_range: Lsn(0x10)..Lsn(0x11),
11601 1 : is_delta: false,
11602 1 : },
11603 1 : // image layer generated for the compaction range 2-4
11604 1 : PersistentLayerKey {
11605 1 : key_range: get_key(2)..get_key(4),
11606 1 : lsn_range: Lsn(0x20)..Lsn(0x21),
11607 1 : is_delta: false,
11608 1 : },
11609 1 : // we have key2/key3 above the retain_lsn, so we still need this delta layer
11610 1 : PersistentLayerKey {
11611 1 : key_range: get_key(2)..get_key(4),
11612 1 : lsn_range: Lsn(0x20)..Lsn(0x48),
11613 1 : is_delta: true,
11614 1 : },
11615 1 : PersistentLayerKey {
11616 1 : key_range: get_key(5)..get_key(7),
11617 1 : lsn_range: Lsn(0x20)..Lsn(0x48),
11618 1 : is_delta: true,
11619 1 : },
11620 1 : PersistentLayerKey {
11621 1 : key_range: get_key(8)..get_key(10),
11622 1 : lsn_range: Lsn(0x48)..Lsn(0x50),
11623 1 : is_delta: true,
11624 1 : },
11625 1 : ],
11626 1 : );
11627 1 :
11628 1 : // Do a partial compaction on key range 4..9
11629 1 : tline
11630 1 : .compact_with_gc(
11631 1 : &cancel,
11632 1 : CompactOptions {
11633 1 : flags: EnumSet::new(),
11634 1 : compact_key_range: Some((get_key(4)..get_key(9)).into()),
11635 1 : ..Default::default()
11636 1 : },
11637 1 : &ctx,
11638 1 : )
11639 1 : .await
11640 1 : .unwrap();
11641 1 : let all_layers = inspect_and_sort(&tline, Some(get_key(0)..get_key(10))).await;
11642 1 : check_layer_map_key_eq(
11643 1 : all_layers,
11644 1 : vec![
11645 1 : PersistentLayerKey {
11646 1 : key_range: get_key(0)..get_key(2),
11647 1 : lsn_range: Lsn(0x20)..Lsn(0x21),
11648 1 : is_delta: false,
11649 1 : },
11650 1 : PersistentLayerKey {
11651 1 : key_range: get_key(0)..get_key(10),
11652 1 : lsn_range: Lsn(0x10)..Lsn(0x11),
11653 1 : is_delta: false,
11654 1 : },
11655 1 : PersistentLayerKey {
11656 1 : key_range: get_key(2)..get_key(4),
11657 1 : lsn_range: Lsn(0x20)..Lsn(0x21),
11658 1 : is_delta: false,
11659 1 : },
11660 1 : PersistentLayerKey {
11661 1 : key_range: get_key(2)..get_key(4),
11662 1 : lsn_range: Lsn(0x20)..Lsn(0x48),
11663 1 : is_delta: true,
11664 1 : },
11665 1 : // image layer generated for this compaction range
11666 1 : PersistentLayerKey {
11667 1 : key_range: get_key(4)..get_key(9),
11668 1 : lsn_range: Lsn(0x20)..Lsn(0x21),
11669 1 : is_delta: false,
11670 1 : },
11671 1 : PersistentLayerKey {
11672 1 : key_range: get_key(8)..get_key(10),
11673 1 : lsn_range: Lsn(0x48)..Lsn(0x50),
11674 1 : is_delta: true,
11675 1 : },
11676 1 : ],
11677 1 : );
11678 1 :
11679 1 : // Do a partial compaction on key range 9..10
11680 1 : tline
11681 1 : .compact_with_gc(
11682 1 : &cancel,
11683 1 : CompactOptions {
11684 1 : flags: EnumSet::new(),
11685 1 : compact_key_range: Some((get_key(9)..get_key(10)).into()),
11686 1 : ..Default::default()
11687 1 : },
11688 1 : &ctx,
11689 1 : )
11690 1 : .await
11691 1 : .unwrap();
11692 1 : let all_layers = inspect_and_sort(&tline, Some(get_key(0)..get_key(10))).await;
11693 1 : check_layer_map_key_eq(
11694 1 : all_layers,
11695 1 : vec![
11696 1 : PersistentLayerKey {
11697 1 : key_range: get_key(0)..get_key(2),
11698 1 : lsn_range: Lsn(0x20)..Lsn(0x21),
11699 1 : is_delta: false,
11700 1 : },
11701 1 : PersistentLayerKey {
11702 1 : key_range: get_key(0)..get_key(10),
11703 1 : lsn_range: Lsn(0x10)..Lsn(0x11),
11704 1 : is_delta: false,
11705 1 : },
11706 1 : PersistentLayerKey {
11707 1 : key_range: get_key(2)..get_key(4),
11708 1 : lsn_range: Lsn(0x20)..Lsn(0x21),
11709 1 : is_delta: false,
11710 1 : },
11711 1 : PersistentLayerKey {
11712 1 : key_range: get_key(2)..get_key(4),
11713 1 : lsn_range: Lsn(0x20)..Lsn(0x48),
11714 1 : is_delta: true,
11715 1 : },
11716 1 : PersistentLayerKey {
11717 1 : key_range: get_key(4)..get_key(9),
11718 1 : lsn_range: Lsn(0x20)..Lsn(0x21),
11719 1 : is_delta: false,
11720 1 : },
11721 1 : // image layer generated for the compaction range
11722 1 : PersistentLayerKey {
11723 1 : key_range: get_key(9)..get_key(10),
11724 1 : lsn_range: Lsn(0x20)..Lsn(0x21),
11725 1 : is_delta: false,
11726 1 : },
11727 1 : PersistentLayerKey {
11728 1 : key_range: get_key(8)..get_key(10),
11729 1 : lsn_range: Lsn(0x48)..Lsn(0x50),
11730 1 : is_delta: true,
11731 1 : },
11732 1 : ],
11733 1 : );
11734 1 :
11735 1 : // Do a partial compaction on key range 0..10, all image layers below LSN 20 can be replaced with new ones.
11736 1 : tline
11737 1 : .compact_with_gc(
11738 1 : &cancel,
11739 1 : CompactOptions {
11740 1 : flags: EnumSet::new(),
11741 1 : compact_key_range: Some((get_key(0)..get_key(10)).into()),
11742 1 : ..Default::default()
11743 1 : },
11744 1 : &ctx,
11745 1 : )
11746 1 : .await
11747 1 : .unwrap();
11748 1 : let all_layers = inspect_and_sort(&tline, Some(get_key(0)..get_key(10))).await;
11749 1 : check_layer_map_key_eq(
11750 1 : all_layers,
11751 1 : vec![
11752 1 : // aha, we removed all unnecessary image/delta layers and got a very clean layer map!
11753 1 : PersistentLayerKey {
11754 1 : key_range: get_key(0)..get_key(10),
11755 1 : lsn_range: Lsn(0x20)..Lsn(0x21),
11756 1 : is_delta: false,
11757 1 : },
11758 1 : PersistentLayerKey {
11759 1 : key_range: get_key(2)..get_key(4),
11760 1 : lsn_range: Lsn(0x20)..Lsn(0x48),
11761 1 : is_delta: true,
11762 1 : },
11763 1 : PersistentLayerKey {
11764 1 : key_range: get_key(8)..get_key(10),
11765 1 : lsn_range: Lsn(0x48)..Lsn(0x50),
11766 1 : is_delta: true,
11767 1 : },
11768 1 : ],
11769 1 : );
11770 1 : Ok(())
11771 1 : }
11772 :
11773 : #[cfg(feature = "testing")]
11774 : #[tokio::test]
11775 1 : async fn test_timeline_offload_retain_lsn() -> anyhow::Result<()> {
11776 1 : let harness = TenantHarness::create("test_timeline_offload_retain_lsn")
11777 1 : .await
11778 1 : .unwrap();
11779 1 : let (tenant, ctx) = harness.load().await;
11780 1 : let tline_parent = tenant
11781 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
11782 1 : .await
11783 1 : .unwrap();
11784 1 : let tline_child = tenant
11785 1 : .branch_timeline_test(&tline_parent, NEW_TIMELINE_ID, Some(Lsn(0x20)), &ctx)
11786 1 : .await
11787 1 : .unwrap();
11788 1 : {
11789 1 : let gc_info_parent = tline_parent.gc_info.read().unwrap();
11790 1 : assert_eq!(
11791 1 : gc_info_parent.retain_lsns,
11792 1 : vec![(Lsn(0x20), tline_child.timeline_id, MaybeOffloaded::No)]
11793 1 : );
11794 1 : }
11795 1 : // We have to directly call the remote_client instead of using the archive function to avoid constructing broker client...
11796 1 : tline_child
11797 1 : .remote_client
11798 1 : .schedule_index_upload_for_timeline_archival_state(TimelineArchivalState::Archived)
11799 1 : .unwrap();
11800 1 : tline_child.remote_client.wait_completion().await.unwrap();
11801 1 : offload_timeline(&tenant, &tline_child)
11802 1 : .instrument(tracing::info_span!(parent: None, "offload_test", tenant_id=%"test", shard_id=%"test", timeline_id=%"test"))
11803 1 : .await.unwrap();
11804 1 : let child_timeline_id = tline_child.timeline_id;
11805 1 : Arc::try_unwrap(tline_child).unwrap();
11806 1 :
11807 1 : {
11808 1 : let gc_info_parent = tline_parent.gc_info.read().unwrap();
11809 1 : assert_eq!(
11810 1 : gc_info_parent.retain_lsns,
11811 1 : vec![(Lsn(0x20), child_timeline_id, MaybeOffloaded::Yes)]
11812 1 : );
11813 1 : }
11814 1 :
11815 1 : tenant
11816 1 : .get_offloaded_timeline(child_timeline_id)
11817 1 : .unwrap()
11818 1 : .defuse_for_tenant_drop();
11819 1 :
11820 1 : Ok(())
11821 1 : }
11822 :
11823 : #[cfg(feature = "testing")]
11824 : #[tokio::test]
11825 1 : async fn test_simple_bottom_most_compaction_above_lsn() -> anyhow::Result<()> {
11826 1 : let harness = TenantHarness::create("test_simple_bottom_most_compaction_above_lsn").await?;
11827 1 : let (tenant, ctx) = harness.load().await;
11828 1 :
11829 148 : fn get_key(id: u32) -> Key {
11830 148 : // using aux key here b/c they are guaranteed to be inside `collect_keyspace`.
11831 148 : let mut key = Key::from_hex("620000000033333333444444445500000000").unwrap();
11832 148 : key.field6 = id;
11833 148 : key
11834 148 : }
11835 1 :
11836 1 : let img_layer = (0..10)
11837 10 : .map(|id| (get_key(id), Bytes::from(format!("value {id}@0x10"))))
11838 1 : .collect_vec();
11839 1 :
11840 1 : let delta1 = vec![(
11841 1 : get_key(1),
11842 1 : Lsn(0x20),
11843 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x20")),
11844 1 : )];
11845 1 : let delta4 = vec![(
11846 1 : get_key(1),
11847 1 : Lsn(0x28),
11848 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x28")),
11849 1 : )];
11850 1 : let delta2 = vec![
11851 1 : (
11852 1 : get_key(1),
11853 1 : Lsn(0x30),
11854 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x30")),
11855 1 : ),
11856 1 : (
11857 1 : get_key(1),
11858 1 : Lsn(0x38),
11859 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x38")),
11860 1 : ),
11861 1 : ];
11862 1 : let delta3 = vec![
11863 1 : (
11864 1 : get_key(8),
11865 1 : Lsn(0x48),
11866 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x48")),
11867 1 : ),
11868 1 : (
11869 1 : get_key(9),
11870 1 : Lsn(0x48),
11871 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x48")),
11872 1 : ),
11873 1 : ];
11874 1 :
11875 1 : let tline = tenant
11876 1 : .create_test_timeline_with_layers(
11877 1 : TIMELINE_ID,
11878 1 : Lsn(0x10),
11879 1 : DEFAULT_PG_VERSION,
11880 1 : &ctx,
11881 1 : vec![], // in-memory layers
11882 1 : vec![
11883 1 : // delta1/2/4 only contain a single key but multiple updates
11884 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x20)..Lsn(0x28), delta1),
11885 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x30)..Lsn(0x50), delta2),
11886 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x28)..Lsn(0x30), delta4),
11887 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x30)..Lsn(0x50), delta3),
11888 1 : ], // delta layers
11889 1 : vec![(Lsn(0x10), img_layer)], // image layers
11890 1 : Lsn(0x50),
11891 1 : )
11892 1 : .await?;
11893 1 : {
11894 1 : tline
11895 1 : .applied_gc_cutoff_lsn
11896 1 : .lock_for_write()
11897 1 : .store_and_unlock(Lsn(0x30))
11898 1 : .wait()
11899 1 : .await;
11900 1 : // Update GC info
11901 1 : let mut guard = tline.gc_info.write().unwrap();
11902 1 : *guard = GcInfo {
11903 1 : retain_lsns: vec![
11904 1 : (Lsn(0x10), tline.timeline_id, MaybeOffloaded::No),
11905 1 : (Lsn(0x20), tline.timeline_id, MaybeOffloaded::No),
11906 1 : ],
11907 1 : cutoffs: GcCutoffs {
11908 1 : time: Some(Lsn(0x30)),
11909 1 : space: Lsn(0x30),
11910 1 : },
11911 1 : leases: Default::default(),
11912 1 : within_ancestor_pitr: false,
11913 1 : };
11914 1 : }
11915 1 :
11916 1 : let expected_result = [
11917 1 : Bytes::from_static(b"value 0@0x10"),
11918 1 : Bytes::from_static(b"value 1@0x10@0x20@0x28@0x30@0x38"),
11919 1 : Bytes::from_static(b"value 2@0x10"),
11920 1 : Bytes::from_static(b"value 3@0x10"),
11921 1 : Bytes::from_static(b"value 4@0x10"),
11922 1 : Bytes::from_static(b"value 5@0x10"),
11923 1 : Bytes::from_static(b"value 6@0x10"),
11924 1 : Bytes::from_static(b"value 7@0x10"),
11925 1 : Bytes::from_static(b"value 8@0x10@0x48"),
11926 1 : Bytes::from_static(b"value 9@0x10@0x48"),
11927 1 : ];
11928 1 :
11929 1 : let expected_result_at_gc_horizon = [
11930 1 : Bytes::from_static(b"value 0@0x10"),
11931 1 : Bytes::from_static(b"value 1@0x10@0x20@0x28@0x30"),
11932 1 : Bytes::from_static(b"value 2@0x10"),
11933 1 : Bytes::from_static(b"value 3@0x10"),
11934 1 : Bytes::from_static(b"value 4@0x10"),
11935 1 : Bytes::from_static(b"value 5@0x10"),
11936 1 : Bytes::from_static(b"value 6@0x10"),
11937 1 : Bytes::from_static(b"value 7@0x10"),
11938 1 : Bytes::from_static(b"value 8@0x10"),
11939 1 : Bytes::from_static(b"value 9@0x10"),
11940 1 : ];
11941 1 :
11942 1 : let expected_result_at_lsn_20 = [
11943 1 : Bytes::from_static(b"value 0@0x10"),
11944 1 : Bytes::from_static(b"value 1@0x10@0x20"),
11945 1 : Bytes::from_static(b"value 2@0x10"),
11946 1 : Bytes::from_static(b"value 3@0x10"),
11947 1 : Bytes::from_static(b"value 4@0x10"),
11948 1 : Bytes::from_static(b"value 5@0x10"),
11949 1 : Bytes::from_static(b"value 6@0x10"),
11950 1 : Bytes::from_static(b"value 7@0x10"),
11951 1 : Bytes::from_static(b"value 8@0x10"),
11952 1 : Bytes::from_static(b"value 9@0x10"),
11953 1 : ];
11954 1 :
11955 1 : let expected_result_at_lsn_10 = [
11956 1 : Bytes::from_static(b"value 0@0x10"),
11957 1 : Bytes::from_static(b"value 1@0x10"),
11958 1 : Bytes::from_static(b"value 2@0x10"),
11959 1 : Bytes::from_static(b"value 3@0x10"),
11960 1 : Bytes::from_static(b"value 4@0x10"),
11961 1 : Bytes::from_static(b"value 5@0x10"),
11962 1 : Bytes::from_static(b"value 6@0x10"),
11963 1 : Bytes::from_static(b"value 7@0x10"),
11964 1 : Bytes::from_static(b"value 8@0x10"),
11965 1 : Bytes::from_static(b"value 9@0x10"),
11966 1 : ];
11967 1 :
11968 3 : let verify_result = || async {
11969 3 : let gc_horizon = {
11970 3 : let gc_info = tline.gc_info.read().unwrap();
11971 3 : gc_info.cutoffs.time.unwrap_or_default()
11972 1 : };
11973 33 : for idx in 0..10 {
11974 30 : assert_eq!(
11975 30 : tline
11976 30 : .get(get_key(idx as u32), Lsn(0x50), &ctx)
11977 30 : .await
11978 30 : .unwrap(),
11979 30 : &expected_result[idx]
11980 1 : );
11981 30 : assert_eq!(
11982 30 : tline
11983 30 : .get(get_key(idx as u32), gc_horizon, &ctx)
11984 30 : .await
11985 30 : .unwrap(),
11986 30 : &expected_result_at_gc_horizon[idx]
11987 1 : );
11988 30 : assert_eq!(
11989 30 : tline
11990 30 : .get(get_key(idx as u32), Lsn(0x20), &ctx)
11991 30 : .await
11992 30 : .unwrap(),
11993 30 : &expected_result_at_lsn_20[idx]
11994 1 : );
11995 30 : assert_eq!(
11996 30 : tline
11997 30 : .get(get_key(idx as u32), Lsn(0x10), &ctx)
11998 30 : .await
11999 30 : .unwrap(),
12000 30 : &expected_result_at_lsn_10[idx]
12001 1 : );
12002 1 : }
12003 6 : };
12004 1 :
12005 1 : verify_result().await;
12006 1 :
12007 1 : let cancel = CancellationToken::new();
12008 1 : tline
12009 1 : .compact_with_gc(
12010 1 : &cancel,
12011 1 : CompactOptions {
12012 1 : compact_lsn_range: Some(CompactLsnRange::above(Lsn(0x28))),
12013 1 : ..Default::default()
12014 1 : },
12015 1 : &ctx,
12016 1 : )
12017 1 : .await
12018 1 : .unwrap();
12019 1 : verify_result().await;
12020 1 :
12021 1 : let all_layers = inspect_and_sort(&tline, Some(get_key(0)..get_key(10))).await;
12022 1 : check_layer_map_key_eq(
12023 1 : all_layers,
12024 1 : vec![
12025 1 : // The original image layer, not compacted
12026 1 : PersistentLayerKey {
12027 1 : key_range: get_key(0)..get_key(10),
12028 1 : lsn_range: Lsn(0x10)..Lsn(0x11),
12029 1 : is_delta: false,
12030 1 : },
12031 1 : // Delta layer below the specified above_lsn not compacted
12032 1 : PersistentLayerKey {
12033 1 : key_range: get_key(1)..get_key(2),
12034 1 : lsn_range: Lsn(0x20)..Lsn(0x28),
12035 1 : is_delta: true,
12036 1 : },
12037 1 : // Delta layer compacted above the LSN
12038 1 : PersistentLayerKey {
12039 1 : key_range: get_key(1)..get_key(10),
12040 1 : lsn_range: Lsn(0x28)..Lsn(0x50),
12041 1 : is_delta: true,
12042 1 : },
12043 1 : ],
12044 1 : );
12045 1 :
12046 1 : // compact again
12047 1 : tline
12048 1 : .compact_with_gc(&cancel, CompactOptions::default(), &ctx)
12049 1 : .await
12050 1 : .unwrap();
12051 1 : verify_result().await;
12052 1 :
12053 1 : let all_layers = inspect_and_sort(&tline, Some(get_key(0)..get_key(10))).await;
12054 1 : check_layer_map_key_eq(
12055 1 : all_layers,
12056 1 : vec![
12057 1 : // The compacted image layer (full key range)
12058 1 : PersistentLayerKey {
12059 1 : key_range: Key::MIN..Key::MAX,
12060 1 : lsn_range: Lsn(0x10)..Lsn(0x11),
12061 1 : is_delta: false,
12062 1 : },
12063 1 : // All other data in the delta layer
12064 1 : PersistentLayerKey {
12065 1 : key_range: get_key(1)..get_key(10),
12066 1 : lsn_range: Lsn(0x10)..Lsn(0x50),
12067 1 : is_delta: true,
12068 1 : },
12069 1 : ],
12070 1 : );
12071 1 :
12072 1 : Ok(())
12073 1 : }
12074 :
12075 : #[cfg(feature = "testing")]
12076 : #[tokio::test]
12077 1 : async fn test_simple_bottom_most_compaction_rectangle() -> anyhow::Result<()> {
12078 1 : let harness = TenantHarness::create("test_simple_bottom_most_compaction_rectangle").await?;
12079 1 : let (tenant, ctx) = harness.load().await;
12080 1 :
12081 254 : fn get_key(id: u32) -> Key {
12082 254 : // using aux key here b/c they are guaranteed to be inside `collect_keyspace`.
12083 254 : let mut key = Key::from_hex("620000000033333333444444445500000000").unwrap();
12084 254 : key.field6 = id;
12085 254 : key
12086 254 : }
12087 1 :
12088 1 : let img_layer = (0..10)
12089 10 : .map(|id| (get_key(id), Bytes::from(format!("value {id}@0x10"))))
12090 1 : .collect_vec();
12091 1 :
12092 1 : let delta1 = vec![(
12093 1 : get_key(1),
12094 1 : Lsn(0x20),
12095 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x20")),
12096 1 : )];
12097 1 : let delta4 = vec![(
12098 1 : get_key(1),
12099 1 : Lsn(0x28),
12100 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x28")),
12101 1 : )];
12102 1 : let delta2 = vec![
12103 1 : (
12104 1 : get_key(1),
12105 1 : Lsn(0x30),
12106 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x30")),
12107 1 : ),
12108 1 : (
12109 1 : get_key(1),
12110 1 : Lsn(0x38),
12111 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x38")),
12112 1 : ),
12113 1 : ];
12114 1 : let delta3 = vec![
12115 1 : (
12116 1 : get_key(8),
12117 1 : Lsn(0x48),
12118 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x48")),
12119 1 : ),
12120 1 : (
12121 1 : get_key(9),
12122 1 : Lsn(0x48),
12123 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x48")),
12124 1 : ),
12125 1 : ];
12126 1 :
12127 1 : let tline = tenant
12128 1 : .create_test_timeline_with_layers(
12129 1 : TIMELINE_ID,
12130 1 : Lsn(0x10),
12131 1 : DEFAULT_PG_VERSION,
12132 1 : &ctx,
12133 1 : vec![], // in-memory layers
12134 1 : vec![
12135 1 : // delta1/2/4 only contain a single key but multiple updates
12136 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x20)..Lsn(0x28), delta1),
12137 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x30)..Lsn(0x50), delta2),
12138 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x28)..Lsn(0x30), delta4),
12139 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x30)..Lsn(0x50), delta3),
12140 1 : ], // delta layers
12141 1 : vec![(Lsn(0x10), img_layer)], // image layers
12142 1 : Lsn(0x50),
12143 1 : )
12144 1 : .await?;
12145 1 : {
12146 1 : tline
12147 1 : .applied_gc_cutoff_lsn
12148 1 : .lock_for_write()
12149 1 : .store_and_unlock(Lsn(0x30))
12150 1 : .wait()
12151 1 : .await;
12152 1 : // Update GC info
12153 1 : let mut guard = tline.gc_info.write().unwrap();
12154 1 : *guard = GcInfo {
12155 1 : retain_lsns: vec![
12156 1 : (Lsn(0x10), tline.timeline_id, MaybeOffloaded::No),
12157 1 : (Lsn(0x20), tline.timeline_id, MaybeOffloaded::No),
12158 1 : ],
12159 1 : cutoffs: GcCutoffs {
12160 1 : time: Some(Lsn(0x30)),
12161 1 : space: Lsn(0x30),
12162 1 : },
12163 1 : leases: Default::default(),
12164 1 : within_ancestor_pitr: false,
12165 1 : };
12166 1 : }
12167 1 :
12168 1 : let expected_result = [
12169 1 : Bytes::from_static(b"value 0@0x10"),
12170 1 : Bytes::from_static(b"value 1@0x10@0x20@0x28@0x30@0x38"),
12171 1 : Bytes::from_static(b"value 2@0x10"),
12172 1 : Bytes::from_static(b"value 3@0x10"),
12173 1 : Bytes::from_static(b"value 4@0x10"),
12174 1 : Bytes::from_static(b"value 5@0x10"),
12175 1 : Bytes::from_static(b"value 6@0x10"),
12176 1 : Bytes::from_static(b"value 7@0x10"),
12177 1 : Bytes::from_static(b"value 8@0x10@0x48"),
12178 1 : Bytes::from_static(b"value 9@0x10@0x48"),
12179 1 : ];
12180 1 :
12181 1 : let expected_result_at_gc_horizon = [
12182 1 : Bytes::from_static(b"value 0@0x10"),
12183 1 : Bytes::from_static(b"value 1@0x10@0x20@0x28@0x30"),
12184 1 : Bytes::from_static(b"value 2@0x10"),
12185 1 : Bytes::from_static(b"value 3@0x10"),
12186 1 : Bytes::from_static(b"value 4@0x10"),
12187 1 : Bytes::from_static(b"value 5@0x10"),
12188 1 : Bytes::from_static(b"value 6@0x10"),
12189 1 : Bytes::from_static(b"value 7@0x10"),
12190 1 : Bytes::from_static(b"value 8@0x10"),
12191 1 : Bytes::from_static(b"value 9@0x10"),
12192 1 : ];
12193 1 :
12194 1 : let expected_result_at_lsn_20 = [
12195 1 : Bytes::from_static(b"value 0@0x10"),
12196 1 : Bytes::from_static(b"value 1@0x10@0x20"),
12197 1 : Bytes::from_static(b"value 2@0x10"),
12198 1 : Bytes::from_static(b"value 3@0x10"),
12199 1 : Bytes::from_static(b"value 4@0x10"),
12200 1 : Bytes::from_static(b"value 5@0x10"),
12201 1 : Bytes::from_static(b"value 6@0x10"),
12202 1 : Bytes::from_static(b"value 7@0x10"),
12203 1 : Bytes::from_static(b"value 8@0x10"),
12204 1 : Bytes::from_static(b"value 9@0x10"),
12205 1 : ];
12206 1 :
12207 1 : let expected_result_at_lsn_10 = [
12208 1 : Bytes::from_static(b"value 0@0x10"),
12209 1 : Bytes::from_static(b"value 1@0x10"),
12210 1 : Bytes::from_static(b"value 2@0x10"),
12211 1 : Bytes::from_static(b"value 3@0x10"),
12212 1 : Bytes::from_static(b"value 4@0x10"),
12213 1 : Bytes::from_static(b"value 5@0x10"),
12214 1 : Bytes::from_static(b"value 6@0x10"),
12215 1 : Bytes::from_static(b"value 7@0x10"),
12216 1 : Bytes::from_static(b"value 8@0x10"),
12217 1 : Bytes::from_static(b"value 9@0x10"),
12218 1 : ];
12219 1 :
12220 5 : let verify_result = || async {
12221 5 : let gc_horizon = {
12222 5 : let gc_info = tline.gc_info.read().unwrap();
12223 5 : gc_info.cutoffs.time.unwrap_or_default()
12224 1 : };
12225 55 : for idx in 0..10 {
12226 50 : assert_eq!(
12227 50 : tline
12228 50 : .get(get_key(idx as u32), Lsn(0x50), &ctx)
12229 50 : .await
12230 50 : .unwrap(),
12231 50 : &expected_result[idx]
12232 1 : );
12233 50 : assert_eq!(
12234 50 : tline
12235 50 : .get(get_key(idx as u32), gc_horizon, &ctx)
12236 50 : .await
12237 50 : .unwrap(),
12238 50 : &expected_result_at_gc_horizon[idx]
12239 1 : );
12240 50 : assert_eq!(
12241 50 : tline
12242 50 : .get(get_key(idx as u32), Lsn(0x20), &ctx)
12243 50 : .await
12244 50 : .unwrap(),
12245 50 : &expected_result_at_lsn_20[idx]
12246 1 : );
12247 50 : assert_eq!(
12248 50 : tline
12249 50 : .get(get_key(idx as u32), Lsn(0x10), &ctx)
12250 50 : .await
12251 50 : .unwrap(),
12252 50 : &expected_result_at_lsn_10[idx]
12253 1 : );
12254 1 : }
12255 10 : };
12256 1 :
12257 1 : verify_result().await;
12258 1 :
12259 1 : let cancel = CancellationToken::new();
12260 1 :
12261 1 : tline
12262 1 : .compact_with_gc(
12263 1 : &cancel,
12264 1 : CompactOptions {
12265 1 : compact_key_range: Some((get_key(0)..get_key(2)).into()),
12266 1 : compact_lsn_range: Some((Lsn(0x20)..Lsn(0x28)).into()),
12267 1 : ..Default::default()
12268 1 : },
12269 1 : &ctx,
12270 1 : )
12271 1 : .await
12272 1 : .unwrap();
12273 1 : verify_result().await;
12274 1 :
12275 1 : let all_layers = inspect_and_sort(&tline, Some(get_key(0)..get_key(10))).await;
12276 1 : check_layer_map_key_eq(
12277 1 : all_layers,
12278 1 : vec![
12279 1 : // The original image layer, not compacted
12280 1 : PersistentLayerKey {
12281 1 : key_range: get_key(0)..get_key(10),
12282 1 : lsn_range: Lsn(0x10)..Lsn(0x11),
12283 1 : is_delta: false,
12284 1 : },
12285 1 : // According the selection logic, we select all layers with start key <= 0x28, so we would merge the layer 0x20-0x28 and
12286 1 : // the layer 0x28-0x30 into one.
12287 1 : PersistentLayerKey {
12288 1 : key_range: get_key(1)..get_key(2),
12289 1 : lsn_range: Lsn(0x20)..Lsn(0x30),
12290 1 : is_delta: true,
12291 1 : },
12292 1 : // Above the upper bound and untouched
12293 1 : PersistentLayerKey {
12294 1 : key_range: get_key(1)..get_key(2),
12295 1 : lsn_range: Lsn(0x30)..Lsn(0x50),
12296 1 : is_delta: true,
12297 1 : },
12298 1 : // This layer is untouched
12299 1 : PersistentLayerKey {
12300 1 : key_range: get_key(8)..get_key(10),
12301 1 : lsn_range: Lsn(0x30)..Lsn(0x50),
12302 1 : is_delta: true,
12303 1 : },
12304 1 : ],
12305 1 : );
12306 1 :
12307 1 : tline
12308 1 : .compact_with_gc(
12309 1 : &cancel,
12310 1 : CompactOptions {
12311 1 : compact_key_range: Some((get_key(3)..get_key(8)).into()),
12312 1 : compact_lsn_range: Some((Lsn(0x28)..Lsn(0x40)).into()),
12313 1 : ..Default::default()
12314 1 : },
12315 1 : &ctx,
12316 1 : )
12317 1 : .await
12318 1 : .unwrap();
12319 1 : verify_result().await;
12320 1 :
12321 1 : let all_layers = inspect_and_sort(&tline, Some(get_key(0)..get_key(10))).await;
12322 1 : check_layer_map_key_eq(
12323 1 : all_layers,
12324 1 : vec![
12325 1 : // The original image layer, not compacted
12326 1 : PersistentLayerKey {
12327 1 : key_range: get_key(0)..get_key(10),
12328 1 : lsn_range: Lsn(0x10)..Lsn(0x11),
12329 1 : is_delta: false,
12330 1 : },
12331 1 : // Not in the compaction key range, uncompacted
12332 1 : PersistentLayerKey {
12333 1 : key_range: get_key(1)..get_key(2),
12334 1 : lsn_range: Lsn(0x20)..Lsn(0x30),
12335 1 : is_delta: true,
12336 1 : },
12337 1 : // Not in the compaction key range, uncompacted but need rewrite because the delta layer overlaps with the range
12338 1 : PersistentLayerKey {
12339 1 : key_range: get_key(1)..get_key(2),
12340 1 : lsn_range: Lsn(0x30)..Lsn(0x50),
12341 1 : is_delta: true,
12342 1 : },
12343 1 : // Note that when we specify the LSN upper bound to be 0x40, the compaction algorithm will not try to cut the layer
12344 1 : // horizontally in half. Instead, it will include all LSNs that overlap with 0x40. So the real max_lsn of the compaction
12345 1 : // becomes 0x50.
12346 1 : PersistentLayerKey {
12347 1 : key_range: get_key(8)..get_key(10),
12348 1 : lsn_range: Lsn(0x30)..Lsn(0x50),
12349 1 : is_delta: true,
12350 1 : },
12351 1 : ],
12352 1 : );
12353 1 :
12354 1 : // compact again
12355 1 : tline
12356 1 : .compact_with_gc(
12357 1 : &cancel,
12358 1 : CompactOptions {
12359 1 : compact_key_range: Some((get_key(0)..get_key(5)).into()),
12360 1 : compact_lsn_range: Some((Lsn(0x20)..Lsn(0x50)).into()),
12361 1 : ..Default::default()
12362 1 : },
12363 1 : &ctx,
12364 1 : )
12365 1 : .await
12366 1 : .unwrap();
12367 1 : verify_result().await;
12368 1 :
12369 1 : let all_layers = inspect_and_sort(&tline, Some(get_key(0)..get_key(10))).await;
12370 1 : check_layer_map_key_eq(
12371 1 : all_layers,
12372 1 : vec![
12373 1 : // The original image layer, not compacted
12374 1 : PersistentLayerKey {
12375 1 : key_range: get_key(0)..get_key(10),
12376 1 : lsn_range: Lsn(0x10)..Lsn(0x11),
12377 1 : is_delta: false,
12378 1 : },
12379 1 : // The range gets compacted
12380 1 : PersistentLayerKey {
12381 1 : key_range: get_key(1)..get_key(2),
12382 1 : lsn_range: Lsn(0x20)..Lsn(0x50),
12383 1 : is_delta: true,
12384 1 : },
12385 1 : // Not touched during this iteration of compaction
12386 1 : PersistentLayerKey {
12387 1 : key_range: get_key(8)..get_key(10),
12388 1 : lsn_range: Lsn(0x30)..Lsn(0x50),
12389 1 : is_delta: true,
12390 1 : },
12391 1 : ],
12392 1 : );
12393 1 :
12394 1 : // final full compaction
12395 1 : tline
12396 1 : .compact_with_gc(&cancel, CompactOptions::default(), &ctx)
12397 1 : .await
12398 1 : .unwrap();
12399 1 : verify_result().await;
12400 1 :
12401 1 : let all_layers = inspect_and_sort(&tline, Some(get_key(0)..get_key(10))).await;
12402 1 : check_layer_map_key_eq(
12403 1 : all_layers,
12404 1 : vec![
12405 1 : // The compacted image layer (full key range)
12406 1 : PersistentLayerKey {
12407 1 : key_range: Key::MIN..Key::MAX,
12408 1 : lsn_range: Lsn(0x10)..Lsn(0x11),
12409 1 : is_delta: false,
12410 1 : },
12411 1 : // All other data in the delta layer
12412 1 : PersistentLayerKey {
12413 1 : key_range: get_key(1)..get_key(10),
12414 1 : lsn_range: Lsn(0x10)..Lsn(0x50),
12415 1 : is_delta: true,
12416 1 : },
12417 1 : ],
12418 1 : );
12419 1 :
12420 1 : Ok(())
12421 1 : }
12422 :
12423 : #[cfg(feature = "testing")]
12424 : #[tokio::test]
12425 1 : async fn test_bottom_most_compation_redo_failure() -> anyhow::Result<()> {
12426 1 : let harness = TenantHarness::create("test_bottom_most_compation_redo_failure").await?;
12427 1 : let (tenant, ctx) = harness.load().await;
12428 1 :
12429 13 : fn get_key(id: u32) -> Key {
12430 13 : // using aux key here b/c they are guaranteed to be inside `collect_keyspace`.
12431 13 : let mut key = Key::from_hex("620000000033333333444444445500000000").unwrap();
12432 13 : key.field6 = id;
12433 13 : key
12434 13 : }
12435 1 :
12436 1 : let img_layer = (0..10)
12437 10 : .map(|id| (get_key(id), Bytes::from(format!("value {id}@0x10"))))
12438 1 : .collect_vec();
12439 1 :
12440 1 : let delta1 = vec![
12441 1 : (
12442 1 : get_key(1),
12443 1 : Lsn(0x20),
12444 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x20")),
12445 1 : ),
12446 1 : (
12447 1 : get_key(1),
12448 1 : Lsn(0x24),
12449 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x24")),
12450 1 : ),
12451 1 : (
12452 1 : get_key(1),
12453 1 : Lsn(0x28),
12454 1 : // This record will fail to redo
12455 1 : Value::WalRecord(NeonWalRecord::wal_append_conditional("@0x28", "???")),
12456 1 : ),
12457 1 : ];
12458 1 :
12459 1 : let tline = tenant
12460 1 : .create_test_timeline_with_layers(
12461 1 : TIMELINE_ID,
12462 1 : Lsn(0x10),
12463 1 : DEFAULT_PG_VERSION,
12464 1 : &ctx,
12465 1 : vec![], // in-memory layers
12466 1 : vec![DeltaLayerTestDesc::new_with_inferred_key_range(
12467 1 : Lsn(0x20)..Lsn(0x30),
12468 1 : delta1,
12469 1 : )], // delta layers
12470 1 : vec![(Lsn(0x10), img_layer)], // image layers
12471 1 : Lsn(0x50),
12472 1 : )
12473 1 : .await?;
12474 1 : {
12475 1 : tline
12476 1 : .applied_gc_cutoff_lsn
12477 1 : .lock_for_write()
12478 1 : .store_and_unlock(Lsn(0x30))
12479 1 : .wait()
12480 1 : .await;
12481 1 : // Update GC info
12482 1 : let mut guard = tline.gc_info.write().unwrap();
12483 1 : *guard = GcInfo {
12484 1 : retain_lsns: vec![],
12485 1 : cutoffs: GcCutoffs {
12486 1 : time: Some(Lsn(0x30)),
12487 1 : space: Lsn(0x30),
12488 1 : },
12489 1 : leases: Default::default(),
12490 1 : within_ancestor_pitr: false,
12491 1 : };
12492 1 : }
12493 1 :
12494 1 : let cancel = CancellationToken::new();
12495 1 :
12496 1 : // Compaction will fail, but should not fire any critical error.
12497 1 : // Gc-compaction currently cannot figure out what keys are not in the keyspace during the compaction
12498 1 : // process. It will always try to redo the logs it reads and if it doesn't work, fail the entire
12499 1 : // compaction job. Tracked in <https://github.com/neondatabase/neon/issues/10395>.
12500 1 : let res = tline
12501 1 : .compact_with_gc(
12502 1 : &cancel,
12503 1 : CompactOptions {
12504 1 : compact_key_range: None,
12505 1 : compact_lsn_range: None,
12506 1 : ..Default::default()
12507 1 : },
12508 1 : &ctx,
12509 1 : )
12510 1 : .await;
12511 1 : assert!(res.is_err());
12512 1 :
12513 1 : Ok(())
12514 1 : }
12515 :
12516 : #[cfg(feature = "testing")]
12517 : #[tokio::test]
12518 1 : async fn test_synthetic_size_calculation_with_invisible_branches() -> anyhow::Result<()> {
12519 1 : use pageserver_api::models::TimelineVisibilityState;
12520 1 :
12521 1 : use crate::tenant::size::gather_inputs;
12522 1 :
12523 1 : let tenant_conf = pageserver_api::models::TenantConfig {
12524 1 : // Ensure that we don't compute gc_cutoffs (which needs reading the layer files)
12525 1 : pitr_interval: Some(Duration::ZERO),
12526 1 : ..Default::default()
12527 1 : };
12528 1 : let harness = TenantHarness::create_custom(
12529 1 : "test_synthetic_size_calculation_with_invisible_branches",
12530 1 : tenant_conf,
12531 1 : TenantId::generate(),
12532 1 : ShardIdentity::unsharded(),
12533 1 : Generation::new(0xdeadbeef),
12534 1 : )
12535 1 : .await?;
12536 1 : let (tenant, ctx) = harness.load().await;
12537 1 : let main_tline = tenant
12538 1 : .create_test_timeline_with_layers(
12539 1 : TIMELINE_ID,
12540 1 : Lsn(0x10),
12541 1 : DEFAULT_PG_VERSION,
12542 1 : &ctx,
12543 1 : vec![],
12544 1 : vec![],
12545 1 : vec![],
12546 1 : Lsn(0x100),
12547 1 : )
12548 1 : .await?;
12549 1 :
12550 1 : let snapshot1 = TimelineId::from_array(hex!("11223344556677881122334455667790"));
12551 1 : tenant
12552 1 : .branch_timeline_test_with_layers(
12553 1 : &main_tline,
12554 1 : snapshot1,
12555 1 : Some(Lsn(0x20)),
12556 1 : &ctx,
12557 1 : vec![],
12558 1 : vec![],
12559 1 : Lsn(0x50),
12560 1 : )
12561 1 : .await?;
12562 1 : let snapshot2 = TimelineId::from_array(hex!("11223344556677881122334455667791"));
12563 1 : tenant
12564 1 : .branch_timeline_test_with_layers(
12565 1 : &main_tline,
12566 1 : snapshot2,
12567 1 : Some(Lsn(0x30)),
12568 1 : &ctx,
12569 1 : vec![],
12570 1 : vec![],
12571 1 : Lsn(0x50),
12572 1 : )
12573 1 : .await?;
12574 1 : let snapshot3 = TimelineId::from_array(hex!("11223344556677881122334455667792"));
12575 1 : tenant
12576 1 : .branch_timeline_test_with_layers(
12577 1 : &main_tline,
12578 1 : snapshot3,
12579 1 : Some(Lsn(0x40)),
12580 1 : &ctx,
12581 1 : vec![],
12582 1 : vec![],
12583 1 : Lsn(0x50),
12584 1 : )
12585 1 : .await?;
12586 1 : let limit = Arc::new(Semaphore::new(1));
12587 1 : let max_retention_period = None;
12588 1 : let mut logical_size_cache = HashMap::new();
12589 1 : let cause = LogicalSizeCalculationCause::EvictionTaskImitation;
12590 1 : let cancel = CancellationToken::new();
12591 1 :
12592 1 : let inputs = gather_inputs(
12593 1 : &tenant,
12594 1 : &limit,
12595 1 : max_retention_period,
12596 1 : &mut logical_size_cache,
12597 1 : cause,
12598 1 : &cancel,
12599 1 : &ctx,
12600 1 : )
12601 1 : .instrument(info_span!(
12602 1 : "gather_inputs",
12603 1 : tenant_id = "unknown",
12604 1 : shard_id = "unknown",
12605 1 : ))
12606 1 : .await?;
12607 1 : use crate::tenant::size::{LsnKind, ModelInputs, SegmentMeta};
12608 1 : use LsnKind::*;
12609 1 : use tenant_size_model::Segment;
12610 1 : let ModelInputs { mut segments, .. } = inputs;
12611 15 : segments.retain(|s| s.timeline_id == TIMELINE_ID);
12612 6 : for segment in segments.iter_mut() {
12613 6 : segment.segment.parent = None; // We don't care about the parent for the test
12614 6 : segment.segment.size = None; // We don't care about the size for the test
12615 6 : }
12616 1 : assert_eq!(
12617 1 : segments,
12618 1 : [
12619 1 : SegmentMeta {
12620 1 : segment: Segment {
12621 1 : parent: None,
12622 1 : lsn: 0x10,
12623 1 : size: None,
12624 1 : needed: false,
12625 1 : },
12626 1 : timeline_id: TIMELINE_ID,
12627 1 : kind: BranchStart,
12628 1 : },
12629 1 : SegmentMeta {
12630 1 : segment: Segment {
12631 1 : parent: None,
12632 1 : lsn: 0x20,
12633 1 : size: None,
12634 1 : needed: false,
12635 1 : },
12636 1 : timeline_id: TIMELINE_ID,
12637 1 : kind: BranchPoint,
12638 1 : },
12639 1 : SegmentMeta {
12640 1 : segment: Segment {
12641 1 : parent: None,
12642 1 : lsn: 0x30,
12643 1 : size: None,
12644 1 : needed: false,
12645 1 : },
12646 1 : timeline_id: TIMELINE_ID,
12647 1 : kind: BranchPoint,
12648 1 : },
12649 1 : SegmentMeta {
12650 1 : segment: Segment {
12651 1 : parent: None,
12652 1 : lsn: 0x40,
12653 1 : size: None,
12654 1 : needed: false,
12655 1 : },
12656 1 : timeline_id: TIMELINE_ID,
12657 1 : kind: BranchPoint,
12658 1 : },
12659 1 : SegmentMeta {
12660 1 : segment: Segment {
12661 1 : parent: None,
12662 1 : lsn: 0x100,
12663 1 : size: None,
12664 1 : needed: false,
12665 1 : },
12666 1 : timeline_id: TIMELINE_ID,
12667 1 : kind: GcCutOff,
12668 1 : }, // we need to retain everything above the last branch point
12669 1 : SegmentMeta {
12670 1 : segment: Segment {
12671 1 : parent: None,
12672 1 : lsn: 0x100,
12673 1 : size: None,
12674 1 : needed: true,
12675 1 : },
12676 1 : timeline_id: TIMELINE_ID,
12677 1 : kind: BranchEnd,
12678 1 : },
12679 1 : ]
12680 1 : );
12681 1 :
12682 1 : main_tline
12683 1 : .remote_client
12684 1 : .schedule_index_upload_for_timeline_invisible_state(
12685 1 : TimelineVisibilityState::Invisible,
12686 1 : )?;
12687 1 : main_tline.remote_client.wait_completion().await?;
12688 1 : let inputs = gather_inputs(
12689 1 : &tenant,
12690 1 : &limit,
12691 1 : max_retention_period,
12692 1 : &mut logical_size_cache,
12693 1 : cause,
12694 1 : &cancel,
12695 1 : &ctx,
12696 1 : )
12697 1 : .instrument(info_span!(
12698 1 : "gather_inputs",
12699 1 : tenant_id = "unknown",
12700 1 : shard_id = "unknown",
12701 1 : ))
12702 1 : .await?;
12703 1 : let ModelInputs { mut segments, .. } = inputs;
12704 14 : segments.retain(|s| s.timeline_id == TIMELINE_ID);
12705 5 : for segment in segments.iter_mut() {
12706 5 : segment.segment.parent = None; // We don't care about the parent for the test
12707 5 : segment.segment.size = None; // We don't care about the size for the test
12708 5 : }
12709 1 : assert_eq!(
12710 1 : segments,
12711 1 : [
12712 1 : SegmentMeta {
12713 1 : segment: Segment {
12714 1 : parent: None,
12715 1 : lsn: 0x10,
12716 1 : size: None,
12717 1 : needed: false,
12718 1 : },
12719 1 : timeline_id: TIMELINE_ID,
12720 1 : kind: BranchStart,
12721 1 : },
12722 1 : SegmentMeta {
12723 1 : segment: Segment {
12724 1 : parent: None,
12725 1 : lsn: 0x20,
12726 1 : size: None,
12727 1 : needed: false,
12728 1 : },
12729 1 : timeline_id: TIMELINE_ID,
12730 1 : kind: BranchPoint,
12731 1 : },
12732 1 : SegmentMeta {
12733 1 : segment: Segment {
12734 1 : parent: None,
12735 1 : lsn: 0x30,
12736 1 : size: None,
12737 1 : needed: false,
12738 1 : },
12739 1 : timeline_id: TIMELINE_ID,
12740 1 : kind: BranchPoint,
12741 1 : },
12742 1 : SegmentMeta {
12743 1 : segment: Segment {
12744 1 : parent: None,
12745 1 : lsn: 0x40,
12746 1 : size: None,
12747 1 : needed: false,
12748 1 : },
12749 1 : timeline_id: TIMELINE_ID,
12750 1 : kind: BranchPoint,
12751 1 : },
12752 1 : SegmentMeta {
12753 1 : segment: Segment {
12754 1 : parent: None,
12755 1 : lsn: 0x40, // Branch end LSN == last branch point LSN
12756 1 : size: None,
12757 1 : needed: true,
12758 1 : },
12759 1 : timeline_id: TIMELINE_ID,
12760 1 : kind: BranchEnd,
12761 1 : },
12762 1 : ]
12763 1 : );
12764 1 : Ok(())
12765 1 : }
12766 : }
|