Line data Source code
1 : //! Timeline repository implementation that keeps old data in layer files, and
2 : //! the recent changes in ephemeral files.
3 : //!
4 : //! See tenant/*_layer.rs files. The functions here are responsible for locating
5 : //! the correct layer for the get/put call, walking back the timeline branching
6 : //! history as needed.
7 : //!
8 : //! The files are stored in the .neon/tenants/<tenant_id>/timelines/<timeline_id>
9 : //! directory. See docs/pageserver-storage.md for how the files are managed.
10 : //! In addition to the layer files, there is a metadata file in the same
11 : //! directory that contains information about the timeline, in particular its
12 : //! parent timeline, and the last LSN that has been written to disk.
13 : //!
14 :
15 : use std::collections::hash_map::Entry;
16 : use std::collections::{BTreeMap, HashMap, HashSet};
17 : use std::fmt::{Debug, Display};
18 : use std::fs::File;
19 : use std::future::Future;
20 : use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
21 : use std::sync::{Arc, Mutex, Weak};
22 : use std::time::{Duration, Instant, SystemTime};
23 : use std::{fmt, fs};
24 :
25 : use anyhow::{Context, bail};
26 : use arc_swap::ArcSwap;
27 : use camino::{Utf8Path, Utf8PathBuf};
28 : use chrono::NaiveDateTime;
29 : use enumset::EnumSet;
30 : use futures::StreamExt;
31 : use futures::stream::FuturesUnordered;
32 : use itertools::Itertools as _;
33 : use once_cell::sync::Lazy;
34 : pub use pageserver_api::models::TenantState;
35 : use pageserver_api::models::{self, RelSizeMigration};
36 : use pageserver_api::models::{
37 : CompactInfoResponse, LsnLease, TimelineArchivalState, TimelineState, TopTenantShardItem,
38 : WalRedoManagerStatus,
39 : };
40 : use pageserver_api::shard::{ShardIdentity, ShardStripeSize, TenantShardId};
41 : use remote_storage::{DownloadError, GenericRemoteStorage, TimeoutOrCancel};
42 : use remote_timeline_client::index::GcCompactionState;
43 : use remote_timeline_client::manifest::{
44 : LATEST_TENANT_MANIFEST_VERSION, OffloadedTimelineManifest, TenantManifest,
45 : };
46 : use remote_timeline_client::{
47 : FAILED_REMOTE_OP_RETRIES, FAILED_UPLOAD_WARN_THRESHOLD, UploadQueueNotReadyError,
48 : download_tenant_manifest,
49 : };
50 : use secondary::heatmap::{HeatMapTenant, HeatMapTimeline};
51 : use storage_broker::BrokerClientChannel;
52 : use timeline::compaction::{CompactionOutcome, GcCompactionQueue};
53 : use timeline::import_pgdata::ImportingTimeline;
54 : use timeline::offload::{OffloadError, offload_timeline};
55 : use timeline::{
56 : CompactFlags, CompactOptions, CompactionError, PreviousHeatmap, ShutdownMode, import_pgdata,
57 : };
58 : use tokio::io::BufReader;
59 : use tokio::sync::{Notify, Semaphore, watch};
60 : use tokio::task::JoinSet;
61 : use tokio_util::sync::CancellationToken;
62 : use tracing::*;
63 : use upload_queue::NotInitialized;
64 : use utils::circuit_breaker::CircuitBreaker;
65 : use utils::crashsafe::path_with_suffix_extension;
66 : use utils::sync::gate::{Gate, GateGuard};
67 : use utils::timeout::{TimeoutCancellableError, timeout_cancellable};
68 : use utils::try_rcu::ArcSwapExt;
69 : use utils::zstd::{create_zst_tarball, extract_zst_tarball};
70 : use utils::{backoff, completion, failpoint_support, fs_ext, pausable_failpoint};
71 :
72 : use self::config::{AttachedLocationConfig, AttachmentMode, LocationConf};
73 : use self::metadata::TimelineMetadata;
74 : use self::mgr::{GetActiveTenantError, GetTenantError};
75 : use self::remote_timeline_client::upload::{upload_index_part, upload_tenant_manifest};
76 : use self::remote_timeline_client::{RemoteTimelineClient, WaitCompletionError};
77 : use self::timeline::uninit::{TimelineCreateGuard, TimelineExclusionError, UninitializedTimeline};
78 : use self::timeline::{
79 : EvictionTaskTenantState, GcCutoffs, TimelineDeleteProgress, TimelineResources, WaitLsnError,
80 : };
81 : use crate::basebackup_cache::BasebackupPrepareSender;
82 : use crate::config::PageServerConf;
83 : use crate::context;
84 : use crate::context::RequestContextBuilder;
85 : use crate::context::{DownloadBehavior, RequestContext};
86 : use crate::deletion_queue::{DeletionQueueClient, DeletionQueueError};
87 : use crate::feature_resolver::FeatureResolver;
88 : use crate::l0_flush::L0FlushGlobalState;
89 : use crate::metrics::{
90 : BROKEN_TENANTS_SET, CIRCUIT_BREAKERS_BROKEN, CIRCUIT_BREAKERS_UNBROKEN, CONCURRENT_INITDBS,
91 : INITDB_RUN_TIME, INITDB_SEMAPHORE_ACQUISITION_TIME, TENANT, TENANT_OFFLOADED_TIMELINES,
92 : TENANT_STATE_METRIC, TENANT_SYNTHETIC_SIZE_METRIC, remove_tenant_metrics,
93 : };
94 : use crate::task_mgr::TaskKind;
95 : use crate::tenant::config::LocationMode;
96 : use crate::tenant::gc_result::GcResult;
97 : pub use crate::tenant::remote_timeline_client::index::IndexPart;
98 : use crate::tenant::remote_timeline_client::{
99 : INITDB_PATH, MaybeDeletedIndexPart, remote_initdb_archive_path,
100 : };
101 : use crate::tenant::storage_layer::{DeltaLayer, ImageLayer};
102 : use crate::tenant::timeline::delete::DeleteTimelineFlow;
103 : use crate::tenant::timeline::uninit::cleanup_timeline_directory;
104 : use crate::virtual_file::VirtualFile;
105 : use crate::walingest::WalLagCooldown;
106 : use crate::walredo::{PostgresRedoManager, RedoAttemptType};
107 : use crate::{InitializationOrder, TEMP_FILE_SUFFIX, import_datadir, span, task_mgr, walredo};
108 :
109 0 : static INIT_DB_SEMAPHORE: Lazy<Semaphore> = Lazy::new(|| Semaphore::new(8));
110 : use utils::crashsafe;
111 : use utils::generation::Generation;
112 : use utils::id::TimelineId;
113 : use utils::lsn::{Lsn, RecordLsn};
114 :
115 : pub mod blob_io;
116 : pub mod block_io;
117 : pub mod vectored_blob_io;
118 :
119 : pub mod disk_btree;
120 : pub(crate) mod ephemeral_file;
121 : pub mod layer_map;
122 :
123 : pub mod metadata;
124 : pub mod remote_timeline_client;
125 : pub mod storage_layer;
126 :
127 : pub mod checks;
128 : pub mod config;
129 : pub mod mgr;
130 : pub mod secondary;
131 : pub mod tasks;
132 : pub mod upload_queue;
133 :
134 : pub(crate) mod timeline;
135 :
136 : pub mod size;
137 :
138 : mod gc_block;
139 : mod gc_result;
140 : pub(crate) mod throttle;
141 :
142 : pub(crate) use timeline::{LogicalSizeCalculationCause, PageReconstructError, Timeline};
143 :
144 : pub(crate) use crate::span::debug_assert_current_span_has_tenant_and_timeline_id;
145 : // re-export for use in walreceiver
146 : pub use crate::tenant::timeline::WalReceiverInfo;
147 :
148 : /// The "tenants" part of `tenants/<tenant>/timelines...`
149 : pub const TENANTS_SEGMENT_NAME: &str = "tenants";
150 :
151 : /// Parts of the `.neon/tenants/<tenant_id>/timelines/<timeline_id>` directory prefix.
152 : pub const TIMELINES_SEGMENT_NAME: &str = "timelines";
153 :
154 : /// References to shared objects that are passed into each tenant, such
155 : /// as the shared remote storage client and process initialization state.
156 : #[derive(Clone)]
157 : pub struct TenantSharedResources {
158 : pub broker_client: storage_broker::BrokerClientChannel,
159 : pub remote_storage: GenericRemoteStorage,
160 : pub deletion_queue_client: DeletionQueueClient,
161 : pub l0_flush_global_state: L0FlushGlobalState,
162 : pub basebackup_prepare_sender: BasebackupPrepareSender,
163 : pub feature_resolver: FeatureResolver,
164 : }
165 :
166 : /// A [`TenantShard`] is really an _attached_ tenant. The configuration
167 : /// for an attached tenant is a subset of the [`LocationConf`], represented
168 : /// in this struct.
169 : #[derive(Clone)]
170 : pub(super) struct AttachedTenantConf {
171 : tenant_conf: pageserver_api::models::TenantConfig,
172 : location: AttachedLocationConfig,
173 : /// The deadline before which we are blocked from GC so that
174 : /// leases have a chance to be renewed.
175 : lsn_lease_deadline: Option<tokio::time::Instant>,
176 : }
177 :
178 : impl AttachedTenantConf {
179 117 : fn new(
180 117 : tenant_conf: pageserver_api::models::TenantConfig,
181 117 : location: AttachedLocationConfig,
182 117 : ) -> Self {
183 : // Sets a deadline before which we cannot proceed to GC due to lsn lease.
184 : //
185 : // We do this as the leases mapping are not persisted to disk. By delaying GC by lease
186 : // length, we guarantee that all the leases we granted before will have a chance to renew
187 : // when we run GC for the first time after restart / transition from AttachedMulti to AttachedSingle.
188 117 : let lsn_lease_deadline = if location.attach_mode == AttachmentMode::Single {
189 117 : Some(
190 117 : tokio::time::Instant::now()
191 117 : + tenant_conf
192 117 : .lsn_lease_length
193 117 : .unwrap_or(LsnLease::DEFAULT_LENGTH),
194 117 : )
195 : } else {
196 : // We don't use `lsn_lease_deadline` to delay GC in AttachedMulti and AttachedStale
197 : // because we don't do GC in these modes.
198 0 : None
199 : };
200 :
201 117 : Self {
202 117 : tenant_conf,
203 117 : location,
204 117 : lsn_lease_deadline,
205 117 : }
206 117 : }
207 :
208 117 : fn try_from(location_conf: LocationConf) -> anyhow::Result<Self> {
209 117 : match &location_conf.mode {
210 117 : LocationMode::Attached(attach_conf) => {
211 117 : Ok(Self::new(location_conf.tenant_conf, *attach_conf))
212 : }
213 : LocationMode::Secondary(_) => {
214 0 : anyhow::bail!(
215 0 : "Attempted to construct AttachedTenantConf from a LocationConf in secondary mode"
216 0 : )
217 : }
218 : }
219 117 : }
220 :
221 381 : fn is_gc_blocked_by_lsn_lease_deadline(&self) -> bool {
222 381 : self.lsn_lease_deadline
223 381 : .map(|d| tokio::time::Instant::now() < d)
224 381 : .unwrap_or(false)
225 381 : }
226 : }
227 : struct TimelinePreload {
228 : timeline_id: TimelineId,
229 : client: RemoteTimelineClient,
230 : index_part: Result<MaybeDeletedIndexPart, DownloadError>,
231 : previous_heatmap: Option<PreviousHeatmap>,
232 : }
233 :
234 : pub(crate) struct TenantPreload {
235 : /// The tenant manifest from remote storage, or None if no manifest was found.
236 : tenant_manifest: Option<TenantManifest>,
237 : /// Map from timeline ID to a possible timeline preload. It is None iff the timeline is offloaded according to the manifest.
238 : timelines: HashMap<TimelineId, Option<TimelinePreload>>,
239 : }
240 :
241 : /// When we spawn a tenant, there is a special mode for tenant creation that
242 : /// avoids trying to read anything from remote storage.
243 : pub(crate) enum SpawnMode {
244 : /// Activate as soon as possible
245 : Eager,
246 : /// Lazy activation in the background, with the option to skip the queue if the need comes up
247 : Lazy,
248 : }
249 :
250 : ///
251 : /// Tenant consists of multiple timelines. Keep them in a hash table.
252 : ///
253 : pub struct TenantShard {
254 : // Global pageserver config parameters
255 : pub conf: &'static PageServerConf,
256 :
257 : /// The value creation timestamp, used to measure activation delay, see:
258 : /// <https://github.com/neondatabase/neon/issues/4025>
259 : constructed_at: Instant,
260 :
261 : state: watch::Sender<TenantState>,
262 :
263 : // Overridden tenant-specific config parameters.
264 : // We keep pageserver_api::models::TenantConfig sturct here to preserve the information
265 : // about parameters that are not set.
266 : // This is necessary to allow global config updates.
267 : tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
268 :
269 : tenant_shard_id: TenantShardId,
270 :
271 : // The detailed sharding information, beyond the number/count in tenant_shard_id
272 : shard_identity: ShardIdentity,
273 :
274 : /// The remote storage generation, used to protect S3 objects from split-brain.
275 : /// Does not change over the lifetime of the [`TenantShard`] object.
276 : ///
277 : /// This duplicates the generation stored in LocationConf, but that structure is mutable:
278 : /// this copy enforces the invariant that generatio doesn't change during a Tenant's lifetime.
279 : generation: Generation,
280 :
281 : timelines: Mutex<HashMap<TimelineId, Arc<Timeline>>>,
282 :
283 : /// During timeline creation, we first insert the TimelineId to the
284 : /// creating map, then `timelines`, then remove it from the creating map.
285 : /// **Lock order**: if acquiring all (or a subset), acquire them in order `timelines`, `timelines_offloaded`, `timelines_creating`
286 : timelines_creating: std::sync::Mutex<HashSet<TimelineId>>,
287 :
288 : /// Possibly offloaded and archived timelines
289 : /// **Lock order**: if acquiring all (or a subset), acquire them in order `timelines`, `timelines_offloaded`, `timelines_creating`
290 : timelines_offloaded: Mutex<HashMap<TimelineId, Arc<OffloadedTimeline>>>,
291 :
292 : /// Tracks the timelines that are currently importing into this tenant shard.
293 : ///
294 : /// Note that importing timelines are also present in [`Self::timelines_creating`].
295 : /// Keep this in mind when ordering lock acquisition.
296 : ///
297 : /// Lifetime:
298 : /// * An imported timeline is created while scanning the bucket on tenant attach
299 : /// if the index part contains an `import_pgdata` entry and said field marks the import
300 : /// as in progress.
301 : /// * Imported timelines are removed when the storage controller calls the post timeline
302 : /// import activation endpoint.
303 : timelines_importing: std::sync::Mutex<HashMap<TimelineId, ImportingTimeline>>,
304 :
305 : /// The last tenant manifest known to be in remote storage. None if the manifest has not yet
306 : /// been either downloaded or uploaded. Always Some after tenant attach.
307 : ///
308 : /// Initially populated during tenant attach, updated via `maybe_upload_tenant_manifest`.
309 : ///
310 : /// Do not modify this directly. It is used to check whether a new manifest needs to be
311 : /// uploaded. The manifest is constructed in `build_tenant_manifest`, and uploaded via
312 : /// `maybe_upload_tenant_manifest`.
313 : remote_tenant_manifest: tokio::sync::Mutex<Option<TenantManifest>>,
314 :
315 : // This mutex prevents creation of new timelines during GC.
316 : // Adding yet another mutex (in addition to `timelines`) is needed because holding
317 : // `timelines` mutex during all GC iteration
318 : // may block for a long time `get_timeline`, `get_timelines_state`,... and other operations
319 : // with timelines, which in turn may cause dropping replication connection, expiration of wait_for_lsn
320 : // timeout...
321 : gc_cs: tokio::sync::Mutex<()>,
322 : walredo_mgr: Option<Arc<WalRedoManager>>,
323 :
324 : /// Provides access to timeline data sitting in the remote storage.
325 : pub(crate) remote_storage: GenericRemoteStorage,
326 :
327 : /// Access to global deletion queue for when this tenant wants to schedule a deletion.
328 : deletion_queue_client: DeletionQueueClient,
329 :
330 : /// A channel to send async requests to prepare a basebackup for the basebackup cache.
331 : basebackup_prepare_sender: BasebackupPrepareSender,
332 :
333 : /// Cached logical sizes updated updated on each [`TenantShard::gather_size_inputs`].
334 : cached_logical_sizes: tokio::sync::Mutex<HashMap<(TimelineId, Lsn), u64>>,
335 : cached_synthetic_tenant_size: Arc<AtomicU64>,
336 :
337 : eviction_task_tenant_state: tokio::sync::Mutex<EvictionTaskTenantState>,
338 :
339 : /// Track repeated failures to compact, so that we can back off.
340 : /// Overhead of mutex is acceptable because compaction is done with a multi-second period.
341 : compaction_circuit_breaker: std::sync::Mutex<CircuitBreaker>,
342 :
343 : /// Signals the tenant compaction loop that there is L0 compaction work to be done.
344 : pub(crate) l0_compaction_trigger: Arc<Notify>,
345 :
346 : /// Scheduled gc-compaction tasks.
347 : scheduled_compaction_tasks: std::sync::Mutex<HashMap<TimelineId, Arc<GcCompactionQueue>>>,
348 :
349 : /// If the tenant is in Activating state, notify this to encourage it
350 : /// to proceed to Active as soon as possible, rather than waiting for lazy
351 : /// background warmup.
352 : pub(crate) activate_now_sem: tokio::sync::Semaphore,
353 :
354 : /// Time it took for the tenant to activate. Zero if not active yet.
355 : attach_wal_lag_cooldown: Arc<std::sync::OnceLock<WalLagCooldown>>,
356 :
357 : // Cancellation token fires when we have entered shutdown(). This is a parent of
358 : // Timelines' cancellation token.
359 : pub(crate) cancel: CancellationToken,
360 :
361 : // Users of the TenantShard such as the page service must take this Gate to avoid
362 : // trying to use a TenantShard which is shutting down.
363 : pub(crate) gate: Gate,
364 :
365 : /// Throttle applied at the top of [`Timeline::get`].
366 : /// All [`TenantShard::timelines`] of a given [`TenantShard`] instance share the same [`throttle::Throttle`] instance.
367 : pub(crate) pagestream_throttle: Arc<throttle::Throttle>,
368 :
369 : pub(crate) pagestream_throttle_metrics: Arc<crate::metrics::tenant_throttling::Pagestream>,
370 :
371 : /// An ongoing timeline detach concurrency limiter.
372 : ///
373 : /// As a tenant will likely be restarted as part of timeline detach ancestor it makes no sense
374 : /// to have two running at the same time. A different one can be started if an earlier one
375 : /// has failed for whatever reason.
376 : ongoing_timeline_detach: std::sync::Mutex<Option<(TimelineId, utils::completion::Barrier)>>,
377 :
378 : /// `index_part.json` based gc blocking reason tracking.
379 : ///
380 : /// New gc iterations must start a new iteration by acquiring `GcBlock::start` before
381 : /// proceeding.
382 : pub(crate) gc_block: gc_block::GcBlock,
383 :
384 : l0_flush_global_state: L0FlushGlobalState,
385 :
386 : feature_resolver: FeatureResolver,
387 : }
388 : impl std::fmt::Debug for TenantShard {
389 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
390 0 : write!(f, "{} ({})", self.tenant_shard_id, self.current_state())
391 0 : }
392 : }
393 :
394 : pub(crate) enum WalRedoManager {
395 : Prod(WalredoManagerId, PostgresRedoManager),
396 : #[cfg(test)]
397 : Test(harness::TestRedoManager),
398 : }
399 :
400 : #[derive(thiserror::Error, Debug)]
401 : #[error("pageserver is shutting down")]
402 : pub(crate) struct GlobalShutDown;
403 :
404 : impl WalRedoManager {
405 0 : pub(crate) fn new(mgr: PostgresRedoManager) -> Result<Arc<Self>, GlobalShutDown> {
406 0 : let id = WalredoManagerId::next();
407 0 : let arc = Arc::new(Self::Prod(id, mgr));
408 0 : let mut guard = WALREDO_MANAGERS.lock().unwrap();
409 0 : match &mut *guard {
410 0 : Some(map) => {
411 0 : map.insert(id, Arc::downgrade(&arc));
412 0 : Ok(arc)
413 : }
414 0 : None => Err(GlobalShutDown),
415 : }
416 0 : }
417 : }
418 :
419 : impl Drop for WalRedoManager {
420 5 : fn drop(&mut self) {
421 5 : match self {
422 0 : Self::Prod(id, _) => {
423 0 : let mut guard = WALREDO_MANAGERS.lock().unwrap();
424 0 : if let Some(map) = &mut *guard {
425 0 : map.remove(id).expect("new() registers, drop() unregisters");
426 0 : }
427 : }
428 : #[cfg(test)]
429 5 : Self::Test(_) => {
430 5 : // Not applicable to test redo manager
431 5 : }
432 : }
433 5 : }
434 : }
435 :
436 : /// Global registry of all walredo managers so that [`crate::shutdown_pageserver`] can shut down
437 : /// the walredo processes outside of the regular order.
438 : ///
439 : /// This is necessary to work around a systemd bug where it freezes if there are
440 : /// walredo processes left => <https://github.com/neondatabase/cloud/issues/11387>
441 : #[allow(clippy::type_complexity)]
442 : pub(crate) static WALREDO_MANAGERS: once_cell::sync::Lazy<
443 : Mutex<Option<HashMap<WalredoManagerId, Weak<WalRedoManager>>>>,
444 0 : > = once_cell::sync::Lazy::new(|| Mutex::new(Some(HashMap::new())));
445 : #[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
446 : pub(crate) struct WalredoManagerId(u64);
447 : impl WalredoManagerId {
448 0 : pub fn next() -> Self {
449 : static NEXT: std::sync::atomic::AtomicU64 = std::sync::atomic::AtomicU64::new(1);
450 0 : let id = NEXT.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
451 0 : if id == 0 {
452 0 : panic!(
453 0 : "WalredoManagerId::new() returned 0, indicating wraparound, risking it's no longer unique"
454 0 : );
455 0 : }
456 0 : Self(id)
457 0 : }
458 : }
459 :
460 : #[cfg(test)]
461 : impl From<harness::TestRedoManager> for WalRedoManager {
462 117 : fn from(mgr: harness::TestRedoManager) -> Self {
463 117 : Self::Test(mgr)
464 117 : }
465 : }
466 :
467 : impl WalRedoManager {
468 3 : pub(crate) async fn shutdown(&self) -> bool {
469 3 : match self {
470 0 : Self::Prod(_, mgr) => mgr.shutdown().await,
471 : #[cfg(test)]
472 : Self::Test(_) => {
473 : // Not applicable to test redo manager
474 3 : true
475 : }
476 : }
477 3 : }
478 :
479 0 : pub(crate) fn maybe_quiesce(&self, idle_timeout: Duration) {
480 0 : match self {
481 0 : Self::Prod(_, mgr) => mgr.maybe_quiesce(idle_timeout),
482 0 : #[cfg(test)]
483 0 : Self::Test(_) => {
484 0 : // Not applicable to test redo manager
485 0 : }
486 0 : }
487 0 : }
488 :
489 : /// # Cancel-Safety
490 : ///
491 : /// This method is cancellation-safe.
492 26774 : pub async fn request_redo(
493 26774 : &self,
494 26774 : key: pageserver_api::key::Key,
495 26774 : lsn: Lsn,
496 26774 : base_img: Option<(Lsn, bytes::Bytes)>,
497 26774 : records: Vec<(Lsn, pageserver_api::record::NeonWalRecord)>,
498 26774 : pg_version: u32,
499 26774 : redo_attempt_type: RedoAttemptType,
500 26774 : ) -> Result<bytes::Bytes, walredo::Error> {
501 26774 : match self {
502 0 : Self::Prod(_, mgr) => {
503 0 : mgr.request_redo(key, lsn, base_img, records, pg_version, redo_attempt_type)
504 0 : .await
505 : }
506 : #[cfg(test)]
507 26774 : Self::Test(mgr) => {
508 26774 : mgr.request_redo(key, lsn, base_img, records, pg_version, redo_attempt_type)
509 26774 : .await
510 : }
511 : }
512 26774 : }
513 :
514 0 : pub(crate) fn status(&self) -> Option<WalRedoManagerStatus> {
515 0 : match self {
516 0 : WalRedoManager::Prod(_, m) => Some(m.status()),
517 0 : #[cfg(test)]
518 0 : WalRedoManager::Test(_) => None,
519 0 : }
520 0 : }
521 : }
522 :
523 : /// A very lightweight memory representation of an offloaded timeline.
524 : ///
525 : /// We need to store the list of offloaded timelines so that we can perform operations on them,
526 : /// like unoffloading them, or (at a later date), decide to perform flattening.
527 : /// This type has a much smaller memory impact than [`Timeline`], and thus we can store many
528 : /// more offloaded timelines than we can manage ones that aren't.
529 : pub struct OffloadedTimeline {
530 : pub tenant_shard_id: TenantShardId,
531 : pub timeline_id: TimelineId,
532 : pub ancestor_timeline_id: Option<TimelineId>,
533 : /// Whether to retain the branch lsn at the ancestor or not
534 : pub ancestor_retain_lsn: Option<Lsn>,
535 :
536 : /// When the timeline was archived.
537 : ///
538 : /// Present for future flattening deliberations.
539 : pub archived_at: NaiveDateTime,
540 :
541 : /// Prevent two tasks from deleting the timeline at the same time. If held, the
542 : /// timeline is being deleted. If 'true', the timeline has already been deleted.
543 : pub delete_progress: TimelineDeleteProgress,
544 :
545 : /// Part of the `OffloadedTimeline` object's lifecycle: this needs to be set before we drop it
546 : pub deleted_from_ancestor: AtomicBool,
547 : }
548 :
549 : impl OffloadedTimeline {
550 : /// Obtains an offloaded timeline from a given timeline object.
551 : ///
552 : /// Returns `None` if the `archived_at` flag couldn't be obtained, i.e.
553 : /// the timeline is not in a stopped state.
554 : /// Panics if the timeline is not archived.
555 1 : fn from_timeline(timeline: &Timeline) -> Result<Self, UploadQueueNotReadyError> {
556 1 : let (ancestor_retain_lsn, ancestor_timeline_id) =
557 1 : if let Some(ancestor_timeline) = timeline.ancestor_timeline() {
558 1 : let ancestor_lsn = timeline.get_ancestor_lsn();
559 1 : let ancestor_timeline_id = ancestor_timeline.timeline_id;
560 1 : let mut gc_info = ancestor_timeline.gc_info.write().unwrap();
561 1 : gc_info.insert_child(timeline.timeline_id, ancestor_lsn, MaybeOffloaded::Yes);
562 1 : (Some(ancestor_lsn), Some(ancestor_timeline_id))
563 : } else {
564 0 : (None, None)
565 : };
566 1 : let archived_at = timeline
567 1 : .remote_client
568 1 : .archived_at_stopped_queue()?
569 1 : .expect("must be called on an archived timeline");
570 1 : Ok(Self {
571 1 : tenant_shard_id: timeline.tenant_shard_id,
572 1 : timeline_id: timeline.timeline_id,
573 1 : ancestor_timeline_id,
574 1 : ancestor_retain_lsn,
575 1 : archived_at,
576 1 :
577 1 : delete_progress: timeline.delete_progress.clone(),
578 1 : deleted_from_ancestor: AtomicBool::new(false),
579 1 : })
580 1 : }
581 0 : fn from_manifest(tenant_shard_id: TenantShardId, manifest: &OffloadedTimelineManifest) -> Self {
582 0 : // We expect to reach this case in tenant loading, where the `retain_lsn` is populated in the parent's `gc_info`
583 0 : // by the `initialize_gc_info` function.
584 0 : let OffloadedTimelineManifest {
585 0 : timeline_id,
586 0 : ancestor_timeline_id,
587 0 : ancestor_retain_lsn,
588 0 : archived_at,
589 0 : } = *manifest;
590 0 : Self {
591 0 : tenant_shard_id,
592 0 : timeline_id,
593 0 : ancestor_timeline_id,
594 0 : ancestor_retain_lsn,
595 0 : archived_at,
596 0 : delete_progress: TimelineDeleteProgress::default(),
597 0 : deleted_from_ancestor: AtomicBool::new(false),
598 0 : }
599 0 : }
600 1 : fn manifest(&self) -> OffloadedTimelineManifest {
601 1 : let Self {
602 1 : timeline_id,
603 1 : ancestor_timeline_id,
604 1 : ancestor_retain_lsn,
605 1 : archived_at,
606 1 : ..
607 1 : } = self;
608 1 : OffloadedTimelineManifest {
609 1 : timeline_id: *timeline_id,
610 1 : ancestor_timeline_id: *ancestor_timeline_id,
611 1 : ancestor_retain_lsn: *ancestor_retain_lsn,
612 1 : archived_at: *archived_at,
613 1 : }
614 1 : }
615 : /// Delete this timeline's retain_lsn from its ancestor, if present in the given tenant
616 0 : fn delete_from_ancestor_with_timelines(
617 0 : &self,
618 0 : timelines: &std::sync::MutexGuard<'_, HashMap<TimelineId, Arc<Timeline>>>,
619 0 : ) {
620 0 : if let (Some(_retain_lsn), Some(ancestor_timeline_id)) =
621 0 : (self.ancestor_retain_lsn, self.ancestor_timeline_id)
622 : {
623 0 : if let Some((_, ancestor_timeline)) = timelines
624 0 : .iter()
625 0 : .find(|(tid, _tl)| **tid == ancestor_timeline_id)
626 : {
627 0 : let removal_happened = ancestor_timeline
628 0 : .gc_info
629 0 : .write()
630 0 : .unwrap()
631 0 : .remove_child_offloaded(self.timeline_id);
632 0 : if !removal_happened {
633 0 : tracing::error!(tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id,
634 0 : "Couldn't remove retain_lsn entry from offloaded timeline's parent: already removed");
635 0 : }
636 0 : }
637 0 : }
638 0 : self.deleted_from_ancestor.store(true, Ordering::Release);
639 0 : }
640 : /// Call [`Self::delete_from_ancestor_with_timelines`] instead if possible.
641 : ///
642 : /// As the entire tenant is being dropped, don't bother deregistering the `retain_lsn` from the ancestor.
643 1 : fn defuse_for_tenant_drop(&self) {
644 1 : self.deleted_from_ancestor.store(true, Ordering::Release);
645 1 : }
646 : }
647 :
648 : impl fmt::Debug for OffloadedTimeline {
649 0 : fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
650 0 : write!(f, "OffloadedTimeline<{}>", self.timeline_id)
651 0 : }
652 : }
653 :
654 : impl Drop for OffloadedTimeline {
655 1 : fn drop(&mut self) {
656 1 : if !self.deleted_from_ancestor.load(Ordering::Acquire) {
657 0 : tracing::warn!(
658 0 : "offloaded timeline {} was dropped without having cleaned it up at the ancestor",
659 : self.timeline_id
660 : );
661 1 : }
662 1 : }
663 : }
664 :
665 : #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
666 : pub enum MaybeOffloaded {
667 : Yes,
668 : No,
669 : }
670 :
671 : #[derive(Clone, Debug)]
672 : pub enum TimelineOrOffloaded {
673 : Timeline(Arc<Timeline>),
674 : Offloaded(Arc<OffloadedTimeline>),
675 : }
676 :
677 : impl TimelineOrOffloaded {
678 0 : pub fn arc_ref(&self) -> TimelineOrOffloadedArcRef<'_> {
679 0 : match self {
680 0 : TimelineOrOffloaded::Timeline(timeline) => {
681 0 : TimelineOrOffloadedArcRef::Timeline(timeline)
682 : }
683 0 : TimelineOrOffloaded::Offloaded(offloaded) => {
684 0 : TimelineOrOffloadedArcRef::Offloaded(offloaded)
685 : }
686 : }
687 0 : }
688 0 : pub fn tenant_shard_id(&self) -> TenantShardId {
689 0 : self.arc_ref().tenant_shard_id()
690 0 : }
691 0 : pub fn timeline_id(&self) -> TimelineId {
692 0 : self.arc_ref().timeline_id()
693 0 : }
694 1 : pub fn delete_progress(&self) -> &Arc<tokio::sync::Mutex<DeleteTimelineFlow>> {
695 1 : match self {
696 1 : TimelineOrOffloaded::Timeline(timeline) => &timeline.delete_progress,
697 0 : TimelineOrOffloaded::Offloaded(offloaded) => &offloaded.delete_progress,
698 : }
699 1 : }
700 0 : fn maybe_remote_client(&self) -> Option<Arc<RemoteTimelineClient>> {
701 0 : match self {
702 0 : TimelineOrOffloaded::Timeline(timeline) => Some(timeline.remote_client.clone()),
703 0 : TimelineOrOffloaded::Offloaded(_offloaded) => None,
704 : }
705 0 : }
706 : }
707 :
708 : pub enum TimelineOrOffloadedArcRef<'a> {
709 : Timeline(&'a Arc<Timeline>),
710 : Offloaded(&'a Arc<OffloadedTimeline>),
711 : }
712 :
713 : impl TimelineOrOffloadedArcRef<'_> {
714 0 : pub fn tenant_shard_id(&self) -> TenantShardId {
715 0 : match self {
716 0 : TimelineOrOffloadedArcRef::Timeline(timeline) => timeline.tenant_shard_id,
717 0 : TimelineOrOffloadedArcRef::Offloaded(offloaded) => offloaded.tenant_shard_id,
718 : }
719 0 : }
720 0 : pub fn timeline_id(&self) -> TimelineId {
721 0 : match self {
722 0 : TimelineOrOffloadedArcRef::Timeline(timeline) => timeline.timeline_id,
723 0 : TimelineOrOffloadedArcRef::Offloaded(offloaded) => offloaded.timeline_id,
724 : }
725 0 : }
726 : }
727 :
728 : impl<'a> From<&'a Arc<Timeline>> for TimelineOrOffloadedArcRef<'a> {
729 0 : fn from(timeline: &'a Arc<Timeline>) -> Self {
730 0 : Self::Timeline(timeline)
731 0 : }
732 : }
733 :
734 : impl<'a> From<&'a Arc<OffloadedTimeline>> for TimelineOrOffloadedArcRef<'a> {
735 0 : fn from(timeline: &'a Arc<OffloadedTimeline>) -> Self {
736 0 : Self::Offloaded(timeline)
737 0 : }
738 : }
739 :
740 : #[derive(Debug, thiserror::Error, PartialEq, Eq)]
741 : pub enum GetTimelineError {
742 : #[error("Timeline is shutting down")]
743 : ShuttingDown,
744 : #[error("Timeline {tenant_id}/{timeline_id} is not active, state: {state:?}")]
745 : NotActive {
746 : tenant_id: TenantShardId,
747 : timeline_id: TimelineId,
748 : state: TimelineState,
749 : },
750 : #[error("Timeline {tenant_id}/{timeline_id} was not found")]
751 : NotFound {
752 : tenant_id: TenantShardId,
753 : timeline_id: TimelineId,
754 : },
755 : }
756 :
757 : #[derive(Debug, thiserror::Error)]
758 : pub enum LoadLocalTimelineError {
759 : #[error("FailedToLoad")]
760 : Load(#[source] anyhow::Error),
761 : #[error("FailedToResumeDeletion")]
762 : ResumeDeletion(#[source] anyhow::Error),
763 : }
764 :
765 : #[derive(thiserror::Error)]
766 : pub enum DeleteTimelineError {
767 : #[error("NotFound")]
768 : NotFound,
769 :
770 : #[error("HasChildren")]
771 : HasChildren(Vec<TimelineId>),
772 :
773 : #[error("Timeline deletion is already in progress")]
774 : AlreadyInProgress(Arc<tokio::sync::Mutex<DeleteTimelineFlow>>),
775 :
776 : #[error("Cancelled")]
777 : Cancelled,
778 :
779 : #[error(transparent)]
780 : Other(#[from] anyhow::Error),
781 : }
782 :
783 : impl Debug for DeleteTimelineError {
784 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
785 0 : match self {
786 0 : Self::NotFound => write!(f, "NotFound"),
787 0 : Self::HasChildren(c) => f.debug_tuple("HasChildren").field(c).finish(),
788 0 : Self::AlreadyInProgress(_) => f.debug_tuple("AlreadyInProgress").finish(),
789 0 : Self::Cancelled => f.debug_tuple("Cancelled").finish(),
790 0 : Self::Other(e) => f.debug_tuple("Other").field(e).finish(),
791 : }
792 0 : }
793 : }
794 :
795 : #[derive(thiserror::Error)]
796 : pub enum TimelineArchivalError {
797 : #[error("NotFound")]
798 : NotFound,
799 :
800 : #[error("Timeout")]
801 : Timeout,
802 :
803 : #[error("Cancelled")]
804 : Cancelled,
805 :
806 : #[error("ancestor is archived: {}", .0)]
807 : HasArchivedParent(TimelineId),
808 :
809 : #[error("HasUnarchivedChildren")]
810 : HasUnarchivedChildren(Vec<TimelineId>),
811 :
812 : #[error("Timeline archival is already in progress")]
813 : AlreadyInProgress,
814 :
815 : #[error(transparent)]
816 : Other(anyhow::Error),
817 : }
818 :
819 : #[derive(thiserror::Error, Debug)]
820 : pub(crate) enum TenantManifestError {
821 : #[error("Remote storage error: {0}")]
822 : RemoteStorage(anyhow::Error),
823 :
824 : #[error("Cancelled")]
825 : Cancelled,
826 : }
827 :
828 : impl From<TenantManifestError> for TimelineArchivalError {
829 0 : fn from(e: TenantManifestError) -> Self {
830 0 : match e {
831 0 : TenantManifestError::RemoteStorage(e) => Self::Other(e),
832 0 : TenantManifestError::Cancelled => Self::Cancelled,
833 : }
834 0 : }
835 : }
836 :
837 : impl Debug for TimelineArchivalError {
838 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
839 0 : match self {
840 0 : Self::NotFound => write!(f, "NotFound"),
841 0 : Self::Timeout => write!(f, "Timeout"),
842 0 : Self::Cancelled => write!(f, "Cancelled"),
843 0 : Self::HasArchivedParent(p) => f.debug_tuple("HasArchivedParent").field(p).finish(),
844 0 : Self::HasUnarchivedChildren(c) => {
845 0 : f.debug_tuple("HasUnarchivedChildren").field(c).finish()
846 : }
847 0 : Self::AlreadyInProgress => f.debug_tuple("AlreadyInProgress").finish(),
848 0 : Self::Other(e) => f.debug_tuple("Other").field(e).finish(),
849 : }
850 0 : }
851 : }
852 :
853 : pub enum SetStoppingError {
854 : AlreadyStopping(completion::Barrier),
855 : Broken,
856 : }
857 :
858 : impl Debug for SetStoppingError {
859 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
860 0 : match self {
861 0 : Self::AlreadyStopping(_) => f.debug_tuple("AlreadyStopping").finish(),
862 0 : Self::Broken => write!(f, "Broken"),
863 : }
864 0 : }
865 : }
866 :
867 : #[derive(thiserror::Error, Debug)]
868 : pub(crate) enum FinalizeTimelineImportError {
869 : #[error("Import task not done yet")]
870 : ImportTaskStillRunning,
871 : #[error("Shutting down")]
872 : ShuttingDown,
873 : }
874 :
875 : /// Arguments to [`TenantShard::create_timeline`].
876 : ///
877 : /// Not usable as an idempotency key for timeline creation because if [`CreateTimelineParamsBranch::ancestor_start_lsn`]
878 : /// is `None`, the result of the timeline create call is not deterministic.
879 : ///
880 : /// See [`CreateTimelineIdempotency`] for an idempotency key.
881 : #[derive(Debug)]
882 : pub(crate) enum CreateTimelineParams {
883 : Bootstrap(CreateTimelineParamsBootstrap),
884 : Branch(CreateTimelineParamsBranch),
885 : ImportPgdata(CreateTimelineParamsImportPgdata),
886 : }
887 :
888 : #[derive(Debug)]
889 : pub(crate) struct CreateTimelineParamsBootstrap {
890 : pub(crate) new_timeline_id: TimelineId,
891 : pub(crate) existing_initdb_timeline_id: Option<TimelineId>,
892 : pub(crate) pg_version: u32,
893 : }
894 :
895 : /// NB: See comment on [`CreateTimelineIdempotency::Branch`] for why there's no `pg_version` here.
896 : #[derive(Debug)]
897 : pub(crate) struct CreateTimelineParamsBranch {
898 : pub(crate) new_timeline_id: TimelineId,
899 : pub(crate) ancestor_timeline_id: TimelineId,
900 : pub(crate) ancestor_start_lsn: Option<Lsn>,
901 : }
902 :
903 : #[derive(Debug)]
904 : pub(crate) struct CreateTimelineParamsImportPgdata {
905 : pub(crate) new_timeline_id: TimelineId,
906 : pub(crate) location: import_pgdata::index_part_format::Location,
907 : pub(crate) idempotency_key: import_pgdata::index_part_format::IdempotencyKey,
908 : }
909 :
910 : /// What is used to determine idempotency of a [`TenantShard::create_timeline`] call in [`TenantShard::start_creating_timeline`] in [`TenantShard::start_creating_timeline`].
911 : ///
912 : /// Each [`Timeline`] object holds [`Self`] as an immutable property in [`Timeline::create_idempotency`].
913 : ///
914 : /// We lower timeline creation requests to [`Self`], and then use [`PartialEq::eq`] to compare [`Timeline::create_idempotency`] with the request.
915 : /// If they are equal, we return a reference to the existing timeline, otherwise it's an idempotency conflict.
916 : ///
917 : /// There is special treatment for [`Self::FailWithConflict`] to always return an idempotency conflict.
918 : /// It would be nice to have more advanced derive macros to make that special treatment declarative.
919 : ///
920 : /// Notes:
921 : /// - Unlike [`CreateTimelineParams`], ancestor LSN is fixed, so, branching will be at a deterministic LSN.
922 : /// - We make some trade-offs though, e.g., [`CreateTimelineParamsBootstrap::existing_initdb_timeline_id`]
923 : /// is not considered for idempotency. We can improve on this over time if we deem it necessary.
924 : ///
925 : #[derive(Debug, Clone, PartialEq, Eq)]
926 : pub(crate) enum CreateTimelineIdempotency {
927 : /// NB: special treatment, see comment in [`Self`].
928 : FailWithConflict,
929 : Bootstrap {
930 : pg_version: u32,
931 : },
932 : /// NB: branches always have the same `pg_version` as their ancestor.
933 : /// While [`pageserver_api::models::TimelineCreateRequestMode::Branch::pg_version`]
934 : /// exists as a field, and is set by cplane, it has always been ignored by pageserver when
935 : /// determining the child branch pg_version.
936 : Branch {
937 : ancestor_timeline_id: TimelineId,
938 : ancestor_start_lsn: Lsn,
939 : },
940 : ImportPgdata(CreatingTimelineIdempotencyImportPgdata),
941 : }
942 :
943 : #[derive(Debug, Clone, PartialEq, Eq)]
944 : pub(crate) struct CreatingTimelineIdempotencyImportPgdata {
945 : idempotency_key: import_pgdata::index_part_format::IdempotencyKey,
946 : }
947 :
948 : /// What is returned by [`TenantShard::start_creating_timeline`].
949 : #[must_use]
950 : enum StartCreatingTimelineResult {
951 : CreateGuard(TimelineCreateGuard),
952 : Idempotent(Arc<Timeline>),
953 : }
954 :
955 : #[allow(clippy::large_enum_variant, reason = "TODO")]
956 : enum TimelineInitAndSyncResult {
957 : ReadyToActivate,
958 : NeedsSpawnImportPgdata(TimelineInitAndSyncNeedsSpawnImportPgdata),
959 : }
960 :
961 : #[must_use]
962 : struct TimelineInitAndSyncNeedsSpawnImportPgdata {
963 : timeline: Arc<Timeline>,
964 : import_pgdata: import_pgdata::index_part_format::Root,
965 : guard: TimelineCreateGuard,
966 : }
967 :
968 : /// What is returned by [`TenantShard::create_timeline`].
969 : enum CreateTimelineResult {
970 : Created(Arc<Timeline>),
971 : Idempotent(Arc<Timeline>),
972 : /// IMPORTANT: This [`Arc<Timeline>`] object is not in [`TenantShard::timelines`] when
973 : /// we return this result, nor will this concrete object ever be added there.
974 : /// Cf method comment on [`TenantShard::create_timeline_import_pgdata`].
975 : ImportSpawned(Arc<Timeline>),
976 : }
977 :
978 : impl CreateTimelineResult {
979 0 : fn discriminant(&self) -> &'static str {
980 0 : match self {
981 0 : Self::Created(_) => "Created",
982 0 : Self::Idempotent(_) => "Idempotent",
983 0 : Self::ImportSpawned(_) => "ImportSpawned",
984 : }
985 0 : }
986 0 : fn timeline(&self) -> &Arc<Timeline> {
987 0 : match self {
988 0 : Self::Created(t) | Self::Idempotent(t) | Self::ImportSpawned(t) => t,
989 0 : }
990 0 : }
991 : /// Unit test timelines aren't activated, test has to do it if it needs to.
992 : #[cfg(test)]
993 118 : fn into_timeline_for_test(self) -> Arc<Timeline> {
994 118 : match self {
995 118 : Self::Created(t) | Self::Idempotent(t) | Self::ImportSpawned(t) => t,
996 118 : }
997 118 : }
998 : }
999 :
1000 : #[derive(thiserror::Error, Debug)]
1001 : pub enum CreateTimelineError {
1002 : #[error("creation of timeline with the given ID is in progress")]
1003 : AlreadyCreating,
1004 : #[error("timeline already exists with different parameters")]
1005 : Conflict,
1006 : #[error(transparent)]
1007 : AncestorLsn(anyhow::Error),
1008 : #[error("ancestor timeline is not active")]
1009 : AncestorNotActive,
1010 : #[error("ancestor timeline is archived")]
1011 : AncestorArchived,
1012 : #[error("tenant shutting down")]
1013 : ShuttingDown,
1014 : #[error(transparent)]
1015 : Other(#[from] anyhow::Error),
1016 : }
1017 :
1018 : #[derive(thiserror::Error, Debug)]
1019 : pub enum InitdbError {
1020 : #[error("Operation was cancelled")]
1021 : Cancelled,
1022 : #[error(transparent)]
1023 : Other(anyhow::Error),
1024 : #[error(transparent)]
1025 : Inner(postgres_initdb::Error),
1026 : }
1027 :
1028 : enum CreateTimelineCause {
1029 : Load,
1030 : Delete,
1031 : }
1032 :
1033 : #[allow(clippy::large_enum_variant, reason = "TODO")]
1034 : enum LoadTimelineCause {
1035 : Attach,
1036 : Unoffload,
1037 : }
1038 :
1039 : #[derive(thiserror::Error, Debug)]
1040 : pub(crate) enum GcError {
1041 : // The tenant is shutting down
1042 : #[error("tenant shutting down")]
1043 : TenantCancelled,
1044 :
1045 : // The tenant is shutting down
1046 : #[error("timeline shutting down")]
1047 : TimelineCancelled,
1048 :
1049 : // The tenant is in a state inelegible to run GC
1050 : #[error("not active")]
1051 : NotActive,
1052 :
1053 : // A requested GC cutoff LSN was invalid, for example it tried to move backwards
1054 : #[error("not active")]
1055 : BadLsn { why: String },
1056 :
1057 : // A remote storage error while scheduling updates after compaction
1058 : #[error(transparent)]
1059 : Remote(anyhow::Error),
1060 :
1061 : // An error reading while calculating GC cutoffs
1062 : #[error(transparent)]
1063 : GcCutoffs(PageReconstructError),
1064 :
1065 : // If GC was invoked for a particular timeline, this error means it didn't exist
1066 : #[error("timeline not found")]
1067 : TimelineNotFound,
1068 : }
1069 :
1070 : impl From<PageReconstructError> for GcError {
1071 0 : fn from(value: PageReconstructError) -> Self {
1072 0 : match value {
1073 0 : PageReconstructError::Cancelled => Self::TimelineCancelled,
1074 0 : other => Self::GcCutoffs(other),
1075 : }
1076 0 : }
1077 : }
1078 :
1079 : impl From<NotInitialized> for GcError {
1080 0 : fn from(value: NotInitialized) -> Self {
1081 0 : match value {
1082 0 : NotInitialized::Uninitialized => GcError::Remote(value.into()),
1083 0 : NotInitialized::Stopped | NotInitialized::ShuttingDown => GcError::TimelineCancelled,
1084 : }
1085 0 : }
1086 : }
1087 :
1088 : impl From<timeline::layer_manager::Shutdown> for GcError {
1089 0 : fn from(_: timeline::layer_manager::Shutdown) -> Self {
1090 0 : GcError::TimelineCancelled
1091 0 : }
1092 : }
1093 :
1094 : #[derive(thiserror::Error, Debug)]
1095 : pub(crate) enum LoadConfigError {
1096 : #[error("TOML deserialization error: '{0}'")]
1097 : DeserializeToml(#[from] toml_edit::de::Error),
1098 :
1099 : #[error("Config not found at {0}")]
1100 : NotFound(Utf8PathBuf),
1101 : }
1102 :
1103 : impl TenantShard {
1104 : /// Yet another helper for timeline initialization.
1105 : ///
1106 : /// - Initializes the Timeline struct and inserts it into the tenant's hash map
1107 : /// - Scans the local timeline directory for layer files and builds the layer map
1108 : /// - Downloads remote index file and adds remote files to the layer map
1109 : /// - Schedules remote upload tasks for any files that are present locally but missing from remote storage.
1110 : ///
1111 : /// If the operation fails, the timeline is left in the tenant's hash map in Broken state. On success,
1112 : /// it is marked as Active.
1113 : #[allow(clippy::too_many_arguments)]
1114 3 : async fn timeline_init_and_sync(
1115 3 : self: &Arc<Self>,
1116 3 : timeline_id: TimelineId,
1117 3 : resources: TimelineResources,
1118 3 : index_part: IndexPart,
1119 3 : metadata: TimelineMetadata,
1120 3 : previous_heatmap: Option<PreviousHeatmap>,
1121 3 : ancestor: Option<Arc<Timeline>>,
1122 3 : cause: LoadTimelineCause,
1123 3 : ctx: &RequestContext,
1124 3 : ) -> anyhow::Result<TimelineInitAndSyncResult> {
1125 3 : let tenant_id = self.tenant_shard_id;
1126 3 :
1127 3 : let import_pgdata = index_part.import_pgdata.clone();
1128 3 : let idempotency = match &import_pgdata {
1129 0 : Some(import_pgdata) => {
1130 0 : CreateTimelineIdempotency::ImportPgdata(CreatingTimelineIdempotencyImportPgdata {
1131 0 : idempotency_key: import_pgdata.idempotency_key().clone(),
1132 0 : })
1133 : }
1134 : None => {
1135 3 : if metadata.ancestor_timeline().is_none() {
1136 2 : CreateTimelineIdempotency::Bootstrap {
1137 2 : pg_version: metadata.pg_version(),
1138 2 : }
1139 : } else {
1140 1 : CreateTimelineIdempotency::Branch {
1141 1 : ancestor_timeline_id: metadata.ancestor_timeline().unwrap(),
1142 1 : ancestor_start_lsn: metadata.ancestor_lsn(),
1143 1 : }
1144 : }
1145 : }
1146 : };
1147 :
1148 3 : let (timeline, _timeline_ctx) = self.create_timeline_struct(
1149 3 : timeline_id,
1150 3 : &metadata,
1151 3 : previous_heatmap,
1152 3 : ancestor.clone(),
1153 3 : resources,
1154 3 : CreateTimelineCause::Load,
1155 3 : idempotency.clone(),
1156 3 : index_part.gc_compaction.clone(),
1157 3 : index_part.rel_size_migration.clone(),
1158 3 : ctx,
1159 3 : )?;
1160 3 : let disk_consistent_lsn = timeline.get_disk_consistent_lsn();
1161 3 :
1162 3 : if !disk_consistent_lsn.is_valid() {
1163 : // As opposed to normal timelines which get initialised with a disk consitent LSN
1164 : // via initdb, imported timelines start from 0. If the import task stops before
1165 : // it advances disk consitent LSN, allow it to resume.
1166 0 : let in_progress_import = import_pgdata
1167 0 : .as_ref()
1168 0 : .map(|import| !import.is_done())
1169 0 : .unwrap_or(false);
1170 0 : if !in_progress_import {
1171 0 : anyhow::bail!("Timeline {tenant_id}/{timeline_id} has invalid disk_consistent_lsn");
1172 0 : }
1173 3 : }
1174 :
1175 3 : assert_eq!(
1176 3 : disk_consistent_lsn,
1177 3 : metadata.disk_consistent_lsn(),
1178 0 : "these are used interchangeably"
1179 : );
1180 :
1181 3 : timeline.remote_client.init_upload_queue(&index_part)?;
1182 :
1183 3 : timeline
1184 3 : .load_layer_map(disk_consistent_lsn, index_part)
1185 3 : .await
1186 3 : .with_context(|| {
1187 0 : format!("Failed to load layermap for timeline {tenant_id}/{timeline_id}")
1188 3 : })?;
1189 :
1190 : // When unarchiving, we've mostly likely lost the heatmap generated prior
1191 : // to the archival operation. To allow warming this timeline up, generate
1192 : // a previous heatmap which contains all visible layers in the layer map.
1193 : // This previous heatmap will be used whenever a fresh heatmap is generated
1194 : // for the timeline.
1195 3 : if self.conf.generate_unarchival_heatmap && matches!(cause, LoadTimelineCause::Unoffload) {
1196 0 : let mut tline_ending_at = Some((&timeline, timeline.get_last_record_lsn()));
1197 0 : while let Some((tline, end_lsn)) = tline_ending_at {
1198 0 : let unarchival_heatmap = tline.generate_unarchival_heatmap(end_lsn).await;
1199 : // Another unearchived timeline might have generated a heatmap for this ancestor.
1200 : // If the current branch point greater than the previous one use the the heatmap
1201 : // we just generated - it should include more layers.
1202 0 : if !tline.should_keep_previous_heatmap(end_lsn) {
1203 0 : tline
1204 0 : .previous_heatmap
1205 0 : .store(Some(Arc::new(unarchival_heatmap)));
1206 0 : } else {
1207 0 : tracing::info!("Previous heatmap preferred. Dropping unarchival heatmap.")
1208 : }
1209 :
1210 0 : match tline.ancestor_timeline() {
1211 0 : Some(ancestor) => {
1212 0 : if ancestor.update_layer_visibility().await.is_err() {
1213 : // Ancestor timeline is shutting down.
1214 0 : break;
1215 0 : }
1216 0 :
1217 0 : tline_ending_at = Some((ancestor, tline.get_ancestor_lsn()));
1218 : }
1219 0 : None => {
1220 0 : tline_ending_at = None;
1221 0 : }
1222 : }
1223 : }
1224 3 : }
1225 :
1226 0 : match import_pgdata {
1227 0 : Some(import_pgdata) if !import_pgdata.is_done() => {
1228 0 : let mut guard = self.timelines_creating.lock().unwrap();
1229 0 : if !guard.insert(timeline_id) {
1230 : // We should never try and load the same timeline twice during startup
1231 0 : unreachable!("Timeline {tenant_id}/{timeline_id} is already being created")
1232 0 : }
1233 0 : let timeline_create_guard = TimelineCreateGuard {
1234 0 : _tenant_gate_guard: self.gate.enter()?,
1235 0 : owning_tenant: self.clone(),
1236 0 : timeline_id,
1237 0 : idempotency,
1238 0 : // The users of this specific return value don't need the timline_path in there.
1239 0 : timeline_path: timeline
1240 0 : .conf
1241 0 : .timeline_path(&timeline.tenant_shard_id, &timeline.timeline_id),
1242 0 : };
1243 0 : Ok(TimelineInitAndSyncResult::NeedsSpawnImportPgdata(
1244 0 : TimelineInitAndSyncNeedsSpawnImportPgdata {
1245 0 : timeline,
1246 0 : import_pgdata,
1247 0 : guard: timeline_create_guard,
1248 0 : },
1249 0 : ))
1250 : }
1251 : Some(_) | None => {
1252 : {
1253 3 : let mut timelines_accessor = self.timelines.lock().unwrap();
1254 3 : match timelines_accessor.entry(timeline_id) {
1255 : // We should never try and load the same timeline twice during startup
1256 : Entry::Occupied(_) => {
1257 0 : unreachable!(
1258 0 : "Timeline {tenant_id}/{timeline_id} already exists in the tenant map"
1259 0 : );
1260 : }
1261 3 : Entry::Vacant(v) => {
1262 3 : v.insert(Arc::clone(&timeline));
1263 3 : timeline.maybe_spawn_flush_loop();
1264 3 : }
1265 3 : }
1266 3 : }
1267 3 :
1268 3 : if disk_consistent_lsn.is_valid() {
1269 : // Sanity check: a timeline should have some content.
1270 : // Exception: importing timelines might not yet have any
1271 3 : anyhow::ensure!(
1272 3 : ancestor.is_some()
1273 2 : || timeline
1274 2 : .layers
1275 2 : .read()
1276 2 : .await
1277 2 : .layer_map()
1278 2 : .expect(
1279 2 : "currently loading, layer manager cannot be shutdown already"
1280 2 : )
1281 2 : .iter_historic_layers()
1282 2 : .next()
1283 2 : .is_some(),
1284 0 : "Timeline has no ancestor and no layer files"
1285 : );
1286 0 : }
1287 :
1288 3 : Ok(TimelineInitAndSyncResult::ReadyToActivate)
1289 : }
1290 : }
1291 3 : }
1292 :
1293 : /// Attach a tenant that's available in cloud storage.
1294 : ///
1295 : /// This returns quickly, after just creating the in-memory object
1296 : /// Tenant struct and launching a background task to download
1297 : /// the remote index files. On return, the tenant is most likely still in
1298 : /// Attaching state, and it will become Active once the background task
1299 : /// finishes. You can use wait_until_active() to wait for the task to
1300 : /// complete.
1301 : ///
1302 : #[allow(clippy::too_many_arguments)]
1303 0 : pub(crate) fn spawn(
1304 0 : conf: &'static PageServerConf,
1305 0 : tenant_shard_id: TenantShardId,
1306 0 : resources: TenantSharedResources,
1307 0 : attached_conf: AttachedTenantConf,
1308 0 : shard_identity: ShardIdentity,
1309 0 : init_order: Option<InitializationOrder>,
1310 0 : mode: SpawnMode,
1311 0 : ctx: &RequestContext,
1312 0 : ) -> Result<Arc<TenantShard>, GlobalShutDown> {
1313 0 : let wal_redo_manager =
1314 0 : WalRedoManager::new(PostgresRedoManager::new(conf, tenant_shard_id))?;
1315 :
1316 : let TenantSharedResources {
1317 0 : broker_client,
1318 0 : remote_storage,
1319 0 : deletion_queue_client,
1320 0 : l0_flush_global_state,
1321 0 : basebackup_prepare_sender,
1322 0 : feature_resolver,
1323 0 : } = resources;
1324 0 :
1325 0 : let attach_mode = attached_conf.location.attach_mode;
1326 0 : let generation = attached_conf.location.generation;
1327 0 :
1328 0 : let tenant = Arc::new(TenantShard::new(
1329 0 : TenantState::Attaching,
1330 0 : conf,
1331 0 : attached_conf,
1332 0 : shard_identity,
1333 0 : Some(wal_redo_manager),
1334 0 : tenant_shard_id,
1335 0 : remote_storage.clone(),
1336 0 : deletion_queue_client,
1337 0 : l0_flush_global_state,
1338 0 : basebackup_prepare_sender,
1339 0 : feature_resolver,
1340 0 : ));
1341 0 :
1342 0 : // The attach task will carry a GateGuard, so that shutdown() reliably waits for it to drop out if
1343 0 : // we shut down while attaching.
1344 0 : let attach_gate_guard = tenant
1345 0 : .gate
1346 0 : .enter()
1347 0 : .expect("We just created the TenantShard: nothing else can have shut it down yet");
1348 0 :
1349 0 : // Do all the hard work in the background
1350 0 : let tenant_clone = Arc::clone(&tenant);
1351 0 : let ctx = ctx.detached_child(TaskKind::Attach, DownloadBehavior::Warn);
1352 0 : task_mgr::spawn(
1353 0 : &tokio::runtime::Handle::current(),
1354 0 : TaskKind::Attach,
1355 0 : tenant_shard_id,
1356 0 : None,
1357 0 : "attach tenant",
1358 0 : async move {
1359 0 :
1360 0 : info!(
1361 : ?attach_mode,
1362 0 : "Attaching tenant"
1363 : );
1364 :
1365 0 : let _gate_guard = attach_gate_guard;
1366 0 :
1367 0 : // Is this tenant being spawned as part of process startup?
1368 0 : let starting_up = init_order.is_some();
1369 0 : scopeguard::defer! {
1370 0 : if starting_up {
1371 0 : TENANT.startup_complete.inc();
1372 0 : }
1373 0 : }
1374 :
1375 0 : fn make_broken_or_stopping(t: &TenantShard, err: anyhow::Error) {
1376 0 : t.state.send_modify(|state| match state {
1377 : // TODO: the old code alluded to DeleteTenantFlow sometimes setting
1378 : // TenantState::Stopping before we get here, but this may be outdated.
1379 : // Let's find out with a testing assertion. If this doesn't fire, and the
1380 : // logs don't show this happening in production, remove the Stopping cases.
1381 0 : TenantState::Stopping{..} if cfg!(any(test, feature = "testing")) => {
1382 0 : panic!("unexpected TenantState::Stopping during attach")
1383 : }
1384 : // If the tenant is cancelled, assume the error was caused by cancellation.
1385 0 : TenantState::Attaching if t.cancel.is_cancelled() => {
1386 0 : info!("attach cancelled, setting tenant state to Stopping: {err}");
1387 : // NB: progress None tells `set_stopping` that attach has cancelled.
1388 0 : *state = TenantState::Stopping { progress: None };
1389 : }
1390 : // According to the old code, DeleteTenantFlow may already have set this to
1391 : // Stopping. Retain its progress.
1392 : // TODO: there is no DeleteTenantFlow. Is this still needed? See above.
1393 0 : TenantState::Stopping { progress } if t.cancel.is_cancelled() => {
1394 0 : assert!(progress.is_some(), "concurrent attach cancellation");
1395 0 : info!("attach cancelled, already Stopping: {err}");
1396 : }
1397 : // Mark the tenant as broken.
1398 : TenantState::Attaching | TenantState::Stopping { .. } => {
1399 0 : error!("attach failed, setting tenant state to Broken (was {state}): {err:?}");
1400 0 : *state = TenantState::broken_from_reason(err.to_string())
1401 : }
1402 : // The attach task owns the tenant state until activated.
1403 0 : state => panic!("invalid tenant state {state} during attach: {err:?}"),
1404 0 : });
1405 0 : }
1406 :
1407 : // TODO: should also be rejecting tenant conf changes that violate this check.
1408 0 : if let Err(e) = crate::tenant::storage_layer::inmemory_layer::IndexEntry::validate_checkpoint_distance(tenant_clone.get_checkpoint_distance()) {
1409 0 : make_broken_or_stopping(&tenant_clone, anyhow::anyhow!(e));
1410 0 : return Ok(());
1411 0 : }
1412 0 :
1413 0 : let mut init_order = init_order;
1414 0 : // take the completion because initial tenant loading will complete when all of
1415 0 : // these tasks complete.
1416 0 : let _completion = init_order
1417 0 : .as_mut()
1418 0 : .and_then(|x| x.initial_tenant_load.take());
1419 0 : let remote_load_completion = init_order
1420 0 : .as_mut()
1421 0 : .and_then(|x| x.initial_tenant_load_remote.take());
1422 :
1423 : enum AttachType<'a> {
1424 : /// We are attaching this tenant lazily in the background.
1425 : Warmup {
1426 : _permit: tokio::sync::SemaphorePermit<'a>,
1427 : during_startup: bool
1428 : },
1429 : /// We are attaching this tenant as soon as we can, because for example an
1430 : /// endpoint tried to access it.
1431 : OnDemand,
1432 : /// During normal operations after startup, we are attaching a tenant, and
1433 : /// eager attach was requested.
1434 : Normal,
1435 : }
1436 :
1437 0 : let attach_type = if matches!(mode, SpawnMode::Lazy) {
1438 : // Before doing any I/O, wait for at least one of:
1439 : // - A client attempting to access to this tenant (on-demand loading)
1440 : // - A permit becoming available in the warmup semaphore (background warmup)
1441 :
1442 0 : tokio::select!(
1443 0 : permit = tenant_clone.activate_now_sem.acquire() => {
1444 0 : let _ = permit.expect("activate_now_sem is never closed");
1445 0 : tracing::info!("Activating tenant (on-demand)");
1446 0 : AttachType::OnDemand
1447 : },
1448 0 : permit = conf.concurrent_tenant_warmup.inner().acquire() => {
1449 0 : let _permit = permit.expect("concurrent_tenant_warmup semaphore is never closed");
1450 0 : tracing::info!("Activating tenant (warmup)");
1451 0 : AttachType::Warmup {
1452 0 : _permit,
1453 0 : during_startup: init_order.is_some()
1454 0 : }
1455 : }
1456 0 : _ = tenant_clone.cancel.cancelled() => {
1457 : // This is safe, but should be pretty rare: it is interesting if a tenant
1458 : // stayed in Activating for such a long time that shutdown found it in
1459 : // that state.
1460 0 : tracing::info!(state=%tenant_clone.current_state(), "Tenant shut down before activation");
1461 : // Set the tenant to Stopping to signal `set_stopping` that we're done.
1462 0 : make_broken_or_stopping(&tenant_clone, anyhow::anyhow!("Shut down while Attaching"));
1463 0 : return Ok(());
1464 : },
1465 : )
1466 : } else {
1467 : // SpawnMode::{Create,Eager} always cause jumping ahead of the
1468 : // concurrent_tenant_warmup queue
1469 0 : AttachType::Normal
1470 : };
1471 :
1472 0 : let preload = match &mode {
1473 : SpawnMode::Eager | SpawnMode::Lazy => {
1474 0 : let _preload_timer = TENANT.preload.start_timer();
1475 0 : let res = tenant_clone
1476 0 : .preload(&remote_storage, task_mgr::shutdown_token())
1477 0 : .await;
1478 0 : match res {
1479 0 : Ok(p) => Some(p),
1480 0 : Err(e) => {
1481 0 : make_broken_or_stopping(&tenant_clone, anyhow::anyhow!(e));
1482 0 : return Ok(());
1483 : }
1484 : }
1485 : }
1486 :
1487 : };
1488 :
1489 : // Remote preload is complete.
1490 0 : drop(remote_load_completion);
1491 0 :
1492 0 :
1493 0 : // We will time the duration of the attach phase unless this is a creation (attach will do no work)
1494 0 : let attach_start = std::time::Instant::now();
1495 0 : let attached = {
1496 0 : let _attach_timer = Some(TENANT.attach.start_timer());
1497 0 : tenant_clone.attach(preload, &ctx).await
1498 : };
1499 0 : let attach_duration = attach_start.elapsed();
1500 0 : _ = tenant_clone.attach_wal_lag_cooldown.set(WalLagCooldown::new(attach_start, attach_duration));
1501 0 :
1502 0 : match attached {
1503 : Ok(()) => {
1504 0 : info!("attach finished, activating");
1505 0 : tenant_clone.activate(broker_client, None, &ctx);
1506 : }
1507 0 : Err(e) => make_broken_or_stopping(&tenant_clone, anyhow::anyhow!(e)),
1508 : }
1509 :
1510 : // If we are doing an opportunistic warmup attachment at startup, initialize
1511 : // logical size at the same time. This is better than starting a bunch of idle tenants
1512 : // with cold caches and then coming back later to initialize their logical sizes.
1513 : //
1514 : // It also prevents the warmup proccess competing with the concurrency limit on
1515 : // logical size calculations: if logical size calculation semaphore is saturated,
1516 : // then warmup will wait for that before proceeding to the next tenant.
1517 0 : if matches!(attach_type, AttachType::Warmup { during_startup: true, .. }) {
1518 0 : let mut futs: FuturesUnordered<_> = tenant_clone.timelines.lock().unwrap().values().cloned().map(|t| t.await_initial_logical_size()).collect();
1519 0 : tracing::info!("Waiting for initial logical sizes while warming up...");
1520 0 : while futs.next().await.is_some() {}
1521 0 : tracing::info!("Warm-up complete");
1522 0 : }
1523 :
1524 0 : Ok(())
1525 0 : }
1526 0 : .instrument(tracing::info_span!(parent: None, "attach", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), gen=?generation)),
1527 : );
1528 0 : Ok(tenant)
1529 0 : }
1530 :
1531 : #[instrument(skip_all)]
1532 : pub(crate) async fn preload(
1533 : self: &Arc<Self>,
1534 : remote_storage: &GenericRemoteStorage,
1535 : cancel: CancellationToken,
1536 : ) -> anyhow::Result<TenantPreload> {
1537 : span::debug_assert_current_span_has_tenant_id();
1538 : // Get list of remote timelines
1539 : // download index files for every tenant timeline
1540 : info!("listing remote timelines");
1541 : let (mut remote_timeline_ids, other_keys) = remote_timeline_client::list_remote_timelines(
1542 : remote_storage,
1543 : self.tenant_shard_id,
1544 : cancel.clone(),
1545 : )
1546 : .await?;
1547 :
1548 : let tenant_manifest = match download_tenant_manifest(
1549 : remote_storage,
1550 : &self.tenant_shard_id,
1551 : self.generation,
1552 : &cancel,
1553 : )
1554 : .await
1555 : {
1556 : Ok((tenant_manifest, _, _)) => Some(tenant_manifest),
1557 : Err(DownloadError::NotFound) => None,
1558 : Err(err) => return Err(err.into()),
1559 : };
1560 :
1561 : info!(
1562 : "found {} timelines ({} offloaded timelines)",
1563 : remote_timeline_ids.len(),
1564 : tenant_manifest
1565 : .as_ref()
1566 3 : .map(|m| m.offloaded_timelines.len())
1567 : .unwrap_or(0)
1568 : );
1569 :
1570 : for k in other_keys {
1571 : warn!("Unexpected non timeline key {k}");
1572 : }
1573 :
1574 : // Avoid downloading IndexPart of offloaded timelines.
1575 : let mut offloaded_with_prefix = HashSet::new();
1576 : if let Some(tenant_manifest) = &tenant_manifest {
1577 : for offloaded in tenant_manifest.offloaded_timelines.iter() {
1578 : if remote_timeline_ids.remove(&offloaded.timeline_id) {
1579 : offloaded_with_prefix.insert(offloaded.timeline_id);
1580 : } else {
1581 : // We'll take care later of timelines in the manifest without a prefix
1582 : }
1583 : }
1584 : }
1585 :
1586 : // TODO(vlad): Could go to S3 if the secondary is freezing cold and hasn't even
1587 : // pulled the first heatmap. Not entirely necessary since the storage controller
1588 : // will kick the secondary in any case and cause a download.
1589 : let maybe_heatmap_at = self.read_on_disk_heatmap().await;
1590 :
1591 : let timelines = self
1592 : .load_timelines_metadata(
1593 : remote_timeline_ids,
1594 : remote_storage,
1595 : maybe_heatmap_at,
1596 : cancel,
1597 : )
1598 : .await?;
1599 :
1600 : Ok(TenantPreload {
1601 : tenant_manifest,
1602 : timelines: timelines
1603 : .into_iter()
1604 3 : .map(|(id, tl)| (id, Some(tl)))
1605 0 : .chain(offloaded_with_prefix.into_iter().map(|id| (id, None)))
1606 : .collect(),
1607 : })
1608 : }
1609 :
1610 117 : async fn read_on_disk_heatmap(&self) -> Option<(HeatMapTenant, std::time::Instant)> {
1611 117 : if !self.conf.load_previous_heatmap {
1612 0 : return None;
1613 117 : }
1614 117 :
1615 117 : let on_disk_heatmap_path = self.conf.tenant_heatmap_path(&self.tenant_shard_id);
1616 117 : match tokio::fs::read_to_string(on_disk_heatmap_path).await {
1617 0 : Ok(heatmap) => match serde_json::from_str::<HeatMapTenant>(&heatmap) {
1618 0 : Ok(heatmap) => Some((heatmap, std::time::Instant::now())),
1619 0 : Err(err) => {
1620 0 : error!("Failed to deserialize old heatmap: {err}");
1621 0 : None
1622 : }
1623 : },
1624 117 : Err(err) => match err.kind() {
1625 117 : std::io::ErrorKind::NotFound => None,
1626 : _ => {
1627 0 : error!("Unexpected IO error reading old heatmap: {err}");
1628 0 : None
1629 : }
1630 : },
1631 : }
1632 117 : }
1633 :
1634 : ///
1635 : /// Background task that downloads all data for a tenant and brings it to Active state.
1636 : ///
1637 : /// No background tasks are started as part of this routine.
1638 : ///
1639 117 : async fn attach(
1640 117 : self: &Arc<TenantShard>,
1641 117 : preload: Option<TenantPreload>,
1642 117 : ctx: &RequestContext,
1643 117 : ) -> anyhow::Result<()> {
1644 117 : span::debug_assert_current_span_has_tenant_id();
1645 117 :
1646 117 : failpoint_support::sleep_millis_async!("before-attaching-tenant");
1647 :
1648 117 : let Some(preload) = preload else {
1649 0 : anyhow::bail!(
1650 0 : "local-only deployment is no longer supported, https://github.com/neondatabase/neon/issues/5624"
1651 0 : );
1652 : };
1653 :
1654 117 : let mut offloaded_timeline_ids = HashSet::new();
1655 117 : let mut offloaded_timelines_list = Vec::new();
1656 117 : if let Some(tenant_manifest) = &preload.tenant_manifest {
1657 3 : for timeline_manifest in tenant_manifest.offloaded_timelines.iter() {
1658 0 : let timeline_id = timeline_manifest.timeline_id;
1659 0 : let offloaded_timeline =
1660 0 : OffloadedTimeline::from_manifest(self.tenant_shard_id, timeline_manifest);
1661 0 : offloaded_timelines_list.push((timeline_id, Arc::new(offloaded_timeline)));
1662 0 : offloaded_timeline_ids.insert(timeline_id);
1663 0 : }
1664 114 : }
1665 : // Complete deletions for offloaded timeline id's from manifest.
1666 : // The manifest will be uploaded later in this function.
1667 117 : offloaded_timelines_list
1668 117 : .retain(|(offloaded_id, offloaded)| {
1669 0 : // Existence of a timeline is finally determined by the existence of an index-part.json in remote storage.
1670 0 : // If there is dangling references in another location, they need to be cleaned up.
1671 0 : let delete = !preload.timelines.contains_key(offloaded_id);
1672 0 : if delete {
1673 0 : tracing::info!("Removing offloaded timeline {offloaded_id} from manifest as no remote prefix was found");
1674 0 : offloaded.defuse_for_tenant_drop();
1675 0 : }
1676 0 : !delete
1677 117 : });
1678 117 :
1679 117 : let mut timelines_to_resume_deletions = vec![];
1680 117 :
1681 117 : let mut remote_index_and_client = HashMap::new();
1682 117 : let mut timeline_ancestors = HashMap::new();
1683 117 : let mut existent_timelines = HashSet::new();
1684 120 : for (timeline_id, preload) in preload.timelines {
1685 3 : let Some(preload) = preload else { continue };
1686 : // This is an invariant of the `preload` function's API
1687 3 : assert!(!offloaded_timeline_ids.contains(&timeline_id));
1688 3 : let index_part = match preload.index_part {
1689 3 : Ok(i) => {
1690 3 : debug!("remote index part exists for timeline {timeline_id}");
1691 : // We found index_part on the remote, this is the standard case.
1692 3 : existent_timelines.insert(timeline_id);
1693 3 : i
1694 : }
1695 : Err(DownloadError::NotFound) => {
1696 : // There is no index_part on the remote. We only get here
1697 : // if there is some prefix for the timeline in the remote storage.
1698 : // This can e.g. be the initdb.tar.zst archive, maybe a
1699 : // remnant from a prior incomplete creation or deletion attempt.
1700 : // Delete the local directory as the deciding criterion for a
1701 : // timeline's existence is presence of index_part.
1702 0 : info!(%timeline_id, "index_part not found on remote");
1703 0 : continue;
1704 : }
1705 0 : Err(DownloadError::Fatal(why)) => {
1706 0 : // If, while loading one remote timeline, we saw an indication that our generation
1707 0 : // number is likely invalid, then we should not load the whole tenant.
1708 0 : error!(%timeline_id, "Fatal error loading timeline: {why}");
1709 0 : anyhow::bail!(why.to_string());
1710 : }
1711 0 : Err(e) => {
1712 0 : // Some (possibly ephemeral) error happened during index_part download.
1713 0 : // Pretend the timeline exists to not delete the timeline directory,
1714 0 : // as it might be a temporary issue and we don't want to re-download
1715 0 : // everything after it resolves.
1716 0 : warn!(%timeline_id, "Failed to load index_part from remote storage, failed creation? ({e})");
1717 :
1718 0 : existent_timelines.insert(timeline_id);
1719 0 : continue;
1720 : }
1721 : };
1722 3 : match index_part {
1723 3 : MaybeDeletedIndexPart::IndexPart(index_part) => {
1724 3 : timeline_ancestors.insert(timeline_id, index_part.metadata.clone());
1725 3 : remote_index_and_client.insert(
1726 3 : timeline_id,
1727 3 : (index_part, preload.client, preload.previous_heatmap),
1728 3 : );
1729 3 : }
1730 0 : MaybeDeletedIndexPart::Deleted(index_part) => {
1731 0 : info!(
1732 0 : "timeline {} is deleted, picking to resume deletion",
1733 : timeline_id
1734 : );
1735 0 : timelines_to_resume_deletions.push((timeline_id, index_part, preload.client));
1736 : }
1737 : }
1738 : }
1739 :
1740 117 : let mut gc_blocks = HashMap::new();
1741 :
1742 : // For every timeline, download the metadata file, scan the local directory,
1743 : // and build a layer map that contains an entry for each remote and local
1744 : // layer file.
1745 117 : let sorted_timelines = tree_sort_timelines(timeline_ancestors, |m| m.ancestor_timeline())?;
1746 120 : for (timeline_id, remote_metadata) in sorted_timelines {
1747 3 : let (index_part, remote_client, previous_heatmap) = remote_index_and_client
1748 3 : .remove(&timeline_id)
1749 3 : .expect("just put it in above");
1750 :
1751 3 : if let Some(blocking) = index_part.gc_blocking.as_ref() {
1752 : // could just filter these away, but it helps while testing
1753 0 : anyhow::ensure!(
1754 0 : !blocking.reasons.is_empty(),
1755 0 : "index_part for {timeline_id} is malformed: it should not have gc blocking with zero reasons"
1756 : );
1757 0 : let prev = gc_blocks.insert(timeline_id, blocking.reasons);
1758 0 : assert!(prev.is_none());
1759 3 : }
1760 :
1761 : // TODO again handle early failure
1762 3 : let effect = self
1763 3 : .load_remote_timeline(
1764 3 : timeline_id,
1765 3 : index_part,
1766 3 : remote_metadata,
1767 3 : previous_heatmap,
1768 3 : self.get_timeline_resources_for(remote_client),
1769 3 : LoadTimelineCause::Attach,
1770 3 : ctx,
1771 3 : )
1772 3 : .await
1773 3 : .with_context(|| {
1774 0 : format!(
1775 0 : "failed to load remote timeline {} for tenant {}",
1776 0 : timeline_id, self.tenant_shard_id
1777 0 : )
1778 3 : })?;
1779 :
1780 3 : match effect {
1781 3 : TimelineInitAndSyncResult::ReadyToActivate => {
1782 3 : // activation happens later, on Tenant::activate
1783 3 : }
1784 : TimelineInitAndSyncResult::NeedsSpawnImportPgdata(
1785 : TimelineInitAndSyncNeedsSpawnImportPgdata {
1786 0 : timeline,
1787 0 : import_pgdata,
1788 0 : guard,
1789 0 : },
1790 0 : ) => {
1791 0 : let timeline_id = timeline.timeline_id;
1792 0 : let import_task_handle =
1793 0 : tokio::task::spawn(self.clone().create_timeline_import_pgdata_task(
1794 0 : timeline.clone(),
1795 0 : import_pgdata,
1796 0 : guard,
1797 0 : ctx.detached_child(TaskKind::ImportPgdata, DownloadBehavior::Warn),
1798 0 : ));
1799 0 :
1800 0 : let prev = self.timelines_importing.lock().unwrap().insert(
1801 0 : timeline_id,
1802 0 : ImportingTimeline {
1803 0 : timeline: timeline.clone(),
1804 0 : import_task_handle,
1805 0 : },
1806 0 : );
1807 0 :
1808 0 : assert!(prev.is_none());
1809 : }
1810 : }
1811 : }
1812 :
1813 : // Walk through deleted timelines, resume deletion
1814 117 : for (timeline_id, index_part, remote_timeline_client) in timelines_to_resume_deletions {
1815 0 : remote_timeline_client
1816 0 : .init_upload_queue_stopped_to_continue_deletion(&index_part)
1817 0 : .context("init queue stopped")
1818 0 : .map_err(LoadLocalTimelineError::ResumeDeletion)?;
1819 :
1820 0 : DeleteTimelineFlow::resume_deletion(
1821 0 : Arc::clone(self),
1822 0 : timeline_id,
1823 0 : &index_part.metadata,
1824 0 : remote_timeline_client,
1825 0 : ctx,
1826 0 : )
1827 0 : .instrument(tracing::info_span!("timeline_delete", %timeline_id))
1828 0 : .await
1829 0 : .context("resume_deletion")
1830 0 : .map_err(LoadLocalTimelineError::ResumeDeletion)?;
1831 : }
1832 117 : {
1833 117 : let mut offloaded_timelines_accessor = self.timelines_offloaded.lock().unwrap();
1834 117 : offloaded_timelines_accessor.extend(offloaded_timelines_list.into_iter());
1835 117 : }
1836 :
1837 : // Stash the preloaded tenant manifest, and upload a new manifest if changed.
1838 : //
1839 : // NB: this must happen after the tenant is fully populated above. In particular the
1840 : // offloaded timelines, which are included in the manifest.
1841 : {
1842 117 : let mut guard = self.remote_tenant_manifest.lock().await;
1843 117 : assert!(guard.is_none(), "tenant manifest set before preload"); // first populated here
1844 117 : *guard = preload.tenant_manifest;
1845 117 : }
1846 117 : self.maybe_upload_tenant_manifest().await?;
1847 :
1848 : // The local filesystem contents are a cache of what's in the remote IndexPart;
1849 : // IndexPart is the source of truth.
1850 117 : self.clean_up_timelines(&existent_timelines)?;
1851 :
1852 117 : self.gc_block.set_scanned(gc_blocks);
1853 117 :
1854 117 : fail::fail_point!("attach-before-activate", |_| {
1855 0 : anyhow::bail!("attach-before-activate");
1856 117 : });
1857 117 : failpoint_support::sleep_millis_async!("attach-before-activate-sleep", &self.cancel);
1858 :
1859 117 : info!("Done");
1860 :
1861 117 : Ok(())
1862 117 : }
1863 :
1864 : /// Check for any local timeline directories that are temporary, or do not correspond to a
1865 : /// timeline that still exists: this can happen if we crashed during a deletion/creation, or
1866 : /// if a timeline was deleted while the tenant was attached to a different pageserver.
1867 117 : fn clean_up_timelines(&self, existent_timelines: &HashSet<TimelineId>) -> anyhow::Result<()> {
1868 117 : let timelines_dir = self.conf.timelines_path(&self.tenant_shard_id);
1869 :
1870 117 : let entries = match timelines_dir.read_dir_utf8() {
1871 117 : Ok(d) => d,
1872 0 : Err(e) => {
1873 0 : if e.kind() == std::io::ErrorKind::NotFound {
1874 0 : return Ok(());
1875 : } else {
1876 0 : return Err(e).context("list timelines directory for tenant");
1877 : }
1878 : }
1879 : };
1880 :
1881 121 : for entry in entries {
1882 4 : let entry = entry.context("read timeline dir entry")?;
1883 4 : let entry_path = entry.path();
1884 :
1885 4 : let purge = if crate::is_temporary(entry_path) {
1886 0 : true
1887 : } else {
1888 4 : match TimelineId::try_from(entry_path.file_name()) {
1889 4 : Ok(i) => {
1890 4 : // Purge if the timeline ID does not exist in remote storage: remote storage is the authority.
1891 4 : !existent_timelines.contains(&i)
1892 : }
1893 0 : Err(e) => {
1894 0 : tracing::warn!(
1895 0 : "Unparseable directory in timelines directory: {entry_path}, ignoring ({e})"
1896 : );
1897 : // Do not purge junk: if we don't recognize it, be cautious and leave it for a human.
1898 0 : false
1899 : }
1900 : }
1901 : };
1902 :
1903 4 : if purge {
1904 1 : tracing::info!("Purging stale timeline dentry {entry_path}");
1905 1 : if let Err(e) = match entry.file_type() {
1906 1 : Ok(t) => if t.is_dir() {
1907 1 : std::fs::remove_dir_all(entry_path)
1908 : } else {
1909 0 : std::fs::remove_file(entry_path)
1910 : }
1911 1 : .or_else(fs_ext::ignore_not_found),
1912 0 : Err(e) => Err(e),
1913 : } {
1914 0 : tracing::warn!("Failed to purge stale timeline dentry {entry_path}: {e}");
1915 1 : }
1916 3 : }
1917 : }
1918 :
1919 117 : Ok(())
1920 117 : }
1921 :
1922 : /// Get sum of all remote timelines sizes
1923 : ///
1924 : /// This function relies on the index_part instead of listing the remote storage
1925 0 : pub fn remote_size(&self) -> u64 {
1926 0 : let mut size = 0;
1927 :
1928 0 : for timeline in self.list_timelines() {
1929 0 : size += timeline.remote_client.get_remote_physical_size();
1930 0 : }
1931 :
1932 0 : size
1933 0 : }
1934 :
1935 : #[instrument(skip_all, fields(timeline_id=%timeline_id))]
1936 : #[allow(clippy::too_many_arguments)]
1937 : async fn load_remote_timeline(
1938 : self: &Arc<Self>,
1939 : timeline_id: TimelineId,
1940 : index_part: IndexPart,
1941 : remote_metadata: TimelineMetadata,
1942 : previous_heatmap: Option<PreviousHeatmap>,
1943 : resources: TimelineResources,
1944 : cause: LoadTimelineCause,
1945 : ctx: &RequestContext,
1946 : ) -> anyhow::Result<TimelineInitAndSyncResult> {
1947 : span::debug_assert_current_span_has_tenant_id();
1948 :
1949 : info!("downloading index file for timeline {}", timeline_id);
1950 : tokio::fs::create_dir_all(self.conf.timeline_path(&self.tenant_shard_id, &timeline_id))
1951 : .await
1952 : .context("Failed to create new timeline directory")?;
1953 :
1954 : let ancestor = if let Some(ancestor_id) = remote_metadata.ancestor_timeline() {
1955 : let timelines = self.timelines.lock().unwrap();
1956 : Some(Arc::clone(timelines.get(&ancestor_id).ok_or_else(
1957 0 : || {
1958 0 : anyhow::anyhow!(
1959 0 : "cannot find ancestor timeline {ancestor_id} for timeline {timeline_id}"
1960 0 : )
1961 0 : },
1962 : )?))
1963 : } else {
1964 : None
1965 : };
1966 :
1967 : self.timeline_init_and_sync(
1968 : timeline_id,
1969 : resources,
1970 : index_part,
1971 : remote_metadata,
1972 : previous_heatmap,
1973 : ancestor,
1974 : cause,
1975 : ctx,
1976 : )
1977 : .await
1978 : }
1979 :
1980 117 : async fn load_timelines_metadata(
1981 117 : self: &Arc<TenantShard>,
1982 117 : timeline_ids: HashSet<TimelineId>,
1983 117 : remote_storage: &GenericRemoteStorage,
1984 117 : heatmap: Option<(HeatMapTenant, std::time::Instant)>,
1985 117 : cancel: CancellationToken,
1986 117 : ) -> anyhow::Result<HashMap<TimelineId, TimelinePreload>> {
1987 117 : let mut timeline_heatmaps = heatmap.map(|h| (h.0.into_timelines_index(), h.1));
1988 117 :
1989 117 : let mut part_downloads = JoinSet::new();
1990 120 : for timeline_id in timeline_ids {
1991 3 : let cancel_clone = cancel.clone();
1992 3 :
1993 3 : let previous_timeline_heatmap = timeline_heatmaps.as_mut().and_then(|hs| {
1994 0 : hs.0.remove(&timeline_id).map(|h| PreviousHeatmap::Active {
1995 0 : heatmap: h,
1996 0 : read_at: hs.1,
1997 0 : end_lsn: None,
1998 0 : })
1999 3 : });
2000 3 : part_downloads.spawn(
2001 3 : self.load_timeline_metadata(
2002 3 : timeline_id,
2003 3 : remote_storage.clone(),
2004 3 : previous_timeline_heatmap,
2005 3 : cancel_clone,
2006 3 : )
2007 3 : .instrument(info_span!("download_index_part", %timeline_id)),
2008 : );
2009 : }
2010 :
2011 117 : let mut timeline_preloads: HashMap<TimelineId, TimelinePreload> = HashMap::new();
2012 :
2013 : loop {
2014 120 : tokio::select!(
2015 120 : next = part_downloads.join_next() => {
2016 120 : match next {
2017 3 : Some(result) => {
2018 3 : let preload = result.context("join preload task")?;
2019 3 : timeline_preloads.insert(preload.timeline_id, preload);
2020 : },
2021 : None => {
2022 117 : break;
2023 : }
2024 : }
2025 : },
2026 120 : _ = cancel.cancelled() => {
2027 0 : anyhow::bail!("Cancelled while waiting for remote index download")
2028 : }
2029 : )
2030 : }
2031 :
2032 117 : Ok(timeline_preloads)
2033 117 : }
2034 :
2035 3 : fn build_timeline_client(
2036 3 : &self,
2037 3 : timeline_id: TimelineId,
2038 3 : remote_storage: GenericRemoteStorage,
2039 3 : ) -> RemoteTimelineClient {
2040 3 : RemoteTimelineClient::new(
2041 3 : remote_storage.clone(),
2042 3 : self.deletion_queue_client.clone(),
2043 3 : self.conf,
2044 3 : self.tenant_shard_id,
2045 3 : timeline_id,
2046 3 : self.generation,
2047 3 : &self.tenant_conf.load().location,
2048 3 : )
2049 3 : }
2050 :
2051 3 : fn load_timeline_metadata(
2052 3 : self: &Arc<TenantShard>,
2053 3 : timeline_id: TimelineId,
2054 3 : remote_storage: GenericRemoteStorage,
2055 3 : previous_heatmap: Option<PreviousHeatmap>,
2056 3 : cancel: CancellationToken,
2057 3 : ) -> impl Future<Output = TimelinePreload> + use<> {
2058 3 : let client = self.build_timeline_client(timeline_id, remote_storage);
2059 3 : async move {
2060 3 : debug_assert_current_span_has_tenant_and_timeline_id();
2061 3 : debug!("starting index part download");
2062 :
2063 3 : let index_part = client.download_index_file(&cancel).await;
2064 :
2065 3 : debug!("finished index part download");
2066 :
2067 3 : TimelinePreload {
2068 3 : client,
2069 3 : timeline_id,
2070 3 : index_part,
2071 3 : previous_heatmap,
2072 3 : }
2073 3 : }
2074 3 : }
2075 :
2076 0 : fn check_to_be_archived_has_no_unarchived_children(
2077 0 : timeline_id: TimelineId,
2078 0 : timelines: &std::sync::MutexGuard<'_, HashMap<TimelineId, Arc<Timeline>>>,
2079 0 : ) -> Result<(), TimelineArchivalError> {
2080 0 : let children: Vec<TimelineId> = timelines
2081 0 : .iter()
2082 0 : .filter_map(|(id, entry)| {
2083 0 : if entry.get_ancestor_timeline_id() != Some(timeline_id) {
2084 0 : return None;
2085 0 : }
2086 0 : if entry.is_archived() == Some(true) {
2087 0 : return None;
2088 0 : }
2089 0 : Some(*id)
2090 0 : })
2091 0 : .collect();
2092 0 :
2093 0 : if !children.is_empty() {
2094 0 : return Err(TimelineArchivalError::HasUnarchivedChildren(children));
2095 0 : }
2096 0 : Ok(())
2097 0 : }
2098 :
2099 0 : fn check_ancestor_of_to_be_unarchived_is_not_archived(
2100 0 : ancestor_timeline_id: TimelineId,
2101 0 : timelines: &std::sync::MutexGuard<'_, HashMap<TimelineId, Arc<Timeline>>>,
2102 0 : offloaded_timelines: &std::sync::MutexGuard<
2103 0 : '_,
2104 0 : HashMap<TimelineId, Arc<OffloadedTimeline>>,
2105 0 : >,
2106 0 : ) -> Result<(), TimelineArchivalError> {
2107 0 : let has_archived_parent =
2108 0 : if let Some(ancestor_timeline) = timelines.get(&ancestor_timeline_id) {
2109 0 : ancestor_timeline.is_archived() == Some(true)
2110 0 : } else if offloaded_timelines.contains_key(&ancestor_timeline_id) {
2111 0 : true
2112 : } else {
2113 0 : error!("ancestor timeline {ancestor_timeline_id} not found");
2114 0 : if cfg!(debug_assertions) {
2115 0 : panic!("ancestor timeline {ancestor_timeline_id} not found");
2116 0 : }
2117 0 : return Err(TimelineArchivalError::NotFound);
2118 : };
2119 0 : if has_archived_parent {
2120 0 : return Err(TimelineArchivalError::HasArchivedParent(
2121 0 : ancestor_timeline_id,
2122 0 : ));
2123 0 : }
2124 0 : Ok(())
2125 0 : }
2126 :
2127 0 : fn check_to_be_unarchived_timeline_has_no_archived_parent(
2128 0 : timeline: &Arc<Timeline>,
2129 0 : ) -> Result<(), TimelineArchivalError> {
2130 0 : if let Some(ancestor_timeline) = timeline.ancestor_timeline() {
2131 0 : if ancestor_timeline.is_archived() == Some(true) {
2132 0 : return Err(TimelineArchivalError::HasArchivedParent(
2133 0 : ancestor_timeline.timeline_id,
2134 0 : ));
2135 0 : }
2136 0 : }
2137 0 : Ok(())
2138 0 : }
2139 :
2140 : /// Loads the specified (offloaded) timeline from S3 and attaches it as a loaded timeline
2141 : ///
2142 : /// Counterpart to [`offload_timeline`].
2143 0 : async fn unoffload_timeline(
2144 0 : self: &Arc<Self>,
2145 0 : timeline_id: TimelineId,
2146 0 : broker_client: storage_broker::BrokerClientChannel,
2147 0 : ctx: RequestContext,
2148 0 : ) -> Result<Arc<Timeline>, TimelineArchivalError> {
2149 0 : info!("unoffloading timeline");
2150 :
2151 : // We activate the timeline below manually, so this must be called on an active tenant.
2152 : // We expect callers of this function to ensure this.
2153 0 : match self.current_state() {
2154 : TenantState::Activating { .. }
2155 : | TenantState::Attaching
2156 : | TenantState::Broken { .. } => {
2157 0 : panic!("Timeline expected to be active")
2158 : }
2159 0 : TenantState::Stopping { .. } => return Err(TimelineArchivalError::Cancelled),
2160 0 : TenantState::Active => {}
2161 0 : }
2162 0 : let cancel = self.cancel.clone();
2163 0 :
2164 0 : // Protect against concurrent attempts to use this TimelineId
2165 0 : // We don't care much about idempotency, as it's ensured a layer above.
2166 0 : let allow_offloaded = true;
2167 0 : let _create_guard = self
2168 0 : .create_timeline_create_guard(
2169 0 : timeline_id,
2170 0 : CreateTimelineIdempotency::FailWithConflict,
2171 0 : allow_offloaded,
2172 0 : )
2173 0 : .map_err(|err| match err {
2174 0 : TimelineExclusionError::AlreadyCreating => TimelineArchivalError::AlreadyInProgress,
2175 : TimelineExclusionError::AlreadyExists { .. } => {
2176 0 : TimelineArchivalError::Other(anyhow::anyhow!("Timeline already exists"))
2177 : }
2178 0 : TimelineExclusionError::Other(e) => TimelineArchivalError::Other(e),
2179 0 : TimelineExclusionError::ShuttingDown => TimelineArchivalError::Cancelled,
2180 0 : })?;
2181 :
2182 0 : let timeline_preload = self
2183 0 : .load_timeline_metadata(
2184 0 : timeline_id,
2185 0 : self.remote_storage.clone(),
2186 0 : None,
2187 0 : cancel.clone(),
2188 0 : )
2189 0 : .await;
2190 :
2191 0 : let index_part = match timeline_preload.index_part {
2192 0 : Ok(index_part) => {
2193 0 : debug!("remote index part exists for timeline {timeline_id}");
2194 0 : index_part
2195 : }
2196 : Err(DownloadError::NotFound) => {
2197 0 : error!(%timeline_id, "index_part not found on remote");
2198 0 : return Err(TimelineArchivalError::NotFound);
2199 : }
2200 0 : Err(DownloadError::Cancelled) => return Err(TimelineArchivalError::Cancelled),
2201 0 : Err(e) => {
2202 0 : // Some (possibly ephemeral) error happened during index_part download.
2203 0 : warn!(%timeline_id, "Failed to load index_part from remote storage, failed creation? ({e})");
2204 0 : return Err(TimelineArchivalError::Other(
2205 0 : anyhow::Error::new(e).context("downloading index_part from remote storage"),
2206 0 : ));
2207 : }
2208 : };
2209 0 : let index_part = match index_part {
2210 0 : MaybeDeletedIndexPart::IndexPart(index_part) => index_part,
2211 0 : MaybeDeletedIndexPart::Deleted(_index_part) => {
2212 0 : info!("timeline is deleted according to index_part.json");
2213 0 : return Err(TimelineArchivalError::NotFound);
2214 : }
2215 : };
2216 0 : let remote_metadata = index_part.metadata.clone();
2217 0 : let timeline_resources = self.build_timeline_resources(timeline_id);
2218 0 : self.load_remote_timeline(
2219 0 : timeline_id,
2220 0 : index_part,
2221 0 : remote_metadata,
2222 0 : None,
2223 0 : timeline_resources,
2224 0 : LoadTimelineCause::Unoffload,
2225 0 : &ctx,
2226 0 : )
2227 0 : .await
2228 0 : .with_context(|| {
2229 0 : format!(
2230 0 : "failed to load remote timeline {} for tenant {}",
2231 0 : timeline_id, self.tenant_shard_id
2232 0 : )
2233 0 : })
2234 0 : .map_err(TimelineArchivalError::Other)?;
2235 :
2236 0 : let timeline = {
2237 0 : let timelines = self.timelines.lock().unwrap();
2238 0 : let Some(timeline) = timelines.get(&timeline_id) else {
2239 0 : warn!("timeline not available directly after attach");
2240 : // This is not a panic because no locks are held between `load_remote_timeline`
2241 : // which puts the timeline into timelines, and our look into the timeline map.
2242 0 : return Err(TimelineArchivalError::Other(anyhow::anyhow!(
2243 0 : "timeline not available directly after attach"
2244 0 : )));
2245 : };
2246 0 : let mut offloaded_timelines = self.timelines_offloaded.lock().unwrap();
2247 0 : match offloaded_timelines.remove(&timeline_id) {
2248 0 : Some(offloaded) => {
2249 0 : offloaded.delete_from_ancestor_with_timelines(&timelines);
2250 0 : }
2251 0 : None => warn!("timeline already removed from offloaded timelines"),
2252 : }
2253 :
2254 0 : self.initialize_gc_info(&timelines, &offloaded_timelines, Some(timeline_id));
2255 0 :
2256 0 : Arc::clone(timeline)
2257 0 : };
2258 0 :
2259 0 : // Upload new list of offloaded timelines to S3
2260 0 : self.maybe_upload_tenant_manifest().await?;
2261 :
2262 : // Activate the timeline (if it makes sense)
2263 0 : if !(timeline.is_broken() || timeline.is_stopping()) {
2264 0 : let background_jobs_can_start = None;
2265 0 : timeline.activate(
2266 0 : self.clone(),
2267 0 : broker_client.clone(),
2268 0 : background_jobs_can_start,
2269 0 : &ctx.with_scope_timeline(&timeline),
2270 0 : );
2271 0 : }
2272 :
2273 0 : info!("timeline unoffloading complete");
2274 0 : Ok(timeline)
2275 0 : }
2276 :
2277 0 : pub(crate) async fn apply_timeline_archival_config(
2278 0 : self: &Arc<Self>,
2279 0 : timeline_id: TimelineId,
2280 0 : new_state: TimelineArchivalState,
2281 0 : broker_client: storage_broker::BrokerClientChannel,
2282 0 : ctx: RequestContext,
2283 0 : ) -> Result<(), TimelineArchivalError> {
2284 0 : info!("setting timeline archival config");
2285 : // First part: figure out what is needed to do, and do validation
2286 0 : let timeline_or_unarchive_offloaded = 'outer: {
2287 0 : let timelines = self.timelines.lock().unwrap();
2288 :
2289 0 : let Some(timeline) = timelines.get(&timeline_id) else {
2290 0 : let offloaded_timelines = self.timelines_offloaded.lock().unwrap();
2291 0 : let Some(offloaded) = offloaded_timelines.get(&timeline_id) else {
2292 0 : return Err(TimelineArchivalError::NotFound);
2293 : };
2294 0 : if new_state == TimelineArchivalState::Archived {
2295 : // It's offloaded already, so nothing to do
2296 0 : return Ok(());
2297 0 : }
2298 0 : if let Some(ancestor_timeline_id) = offloaded.ancestor_timeline_id {
2299 0 : Self::check_ancestor_of_to_be_unarchived_is_not_archived(
2300 0 : ancestor_timeline_id,
2301 0 : &timelines,
2302 0 : &offloaded_timelines,
2303 0 : )?;
2304 0 : }
2305 0 : break 'outer None;
2306 : };
2307 :
2308 : // Do some validation. We release the timelines lock below, so there is potential
2309 : // for race conditions: these checks are more present to prevent misunderstandings of
2310 : // the API's capabilities, instead of serving as the sole way to defend their invariants.
2311 0 : match new_state {
2312 : TimelineArchivalState::Unarchived => {
2313 0 : Self::check_to_be_unarchived_timeline_has_no_archived_parent(timeline)?
2314 : }
2315 : TimelineArchivalState::Archived => {
2316 0 : Self::check_to_be_archived_has_no_unarchived_children(timeline_id, &timelines)?
2317 : }
2318 : }
2319 0 : Some(Arc::clone(timeline))
2320 : };
2321 :
2322 : // Second part: unoffload timeline (if needed)
2323 0 : let timeline = if let Some(timeline) = timeline_or_unarchive_offloaded {
2324 0 : timeline
2325 : } else {
2326 : // Turn offloaded timeline into a non-offloaded one
2327 0 : self.unoffload_timeline(timeline_id, broker_client, ctx)
2328 0 : .await?
2329 : };
2330 :
2331 : // Third part: upload new timeline archival state and block until it is present in S3
2332 0 : let upload_needed = match timeline
2333 0 : .remote_client
2334 0 : .schedule_index_upload_for_timeline_archival_state(new_state)
2335 : {
2336 0 : Ok(upload_needed) => upload_needed,
2337 0 : Err(e) => {
2338 0 : if timeline.cancel.is_cancelled() {
2339 0 : return Err(TimelineArchivalError::Cancelled);
2340 : } else {
2341 0 : return Err(TimelineArchivalError::Other(e));
2342 : }
2343 : }
2344 : };
2345 :
2346 0 : if upload_needed {
2347 0 : info!("Uploading new state");
2348 : const MAX_WAIT: Duration = Duration::from_secs(10);
2349 0 : let Ok(v) =
2350 0 : tokio::time::timeout(MAX_WAIT, timeline.remote_client.wait_completion()).await
2351 : else {
2352 0 : tracing::warn!("reached timeout for waiting on upload queue");
2353 0 : return Err(TimelineArchivalError::Timeout);
2354 : };
2355 0 : v.map_err(|e| match e {
2356 0 : WaitCompletionError::NotInitialized(e) => {
2357 0 : TimelineArchivalError::Other(anyhow::anyhow!(e))
2358 : }
2359 : WaitCompletionError::UploadQueueShutDownOrStopped => {
2360 0 : TimelineArchivalError::Cancelled
2361 : }
2362 0 : })?;
2363 0 : }
2364 0 : Ok(())
2365 0 : }
2366 :
2367 1 : pub fn get_offloaded_timeline(
2368 1 : &self,
2369 1 : timeline_id: TimelineId,
2370 1 : ) -> Result<Arc<OffloadedTimeline>, GetTimelineError> {
2371 1 : self.timelines_offloaded
2372 1 : .lock()
2373 1 : .unwrap()
2374 1 : .get(&timeline_id)
2375 1 : .map(Arc::clone)
2376 1 : .ok_or(GetTimelineError::NotFound {
2377 1 : tenant_id: self.tenant_shard_id,
2378 1 : timeline_id,
2379 1 : })
2380 1 : }
2381 :
2382 2 : pub(crate) fn tenant_shard_id(&self) -> TenantShardId {
2383 2 : self.tenant_shard_id
2384 2 : }
2385 :
2386 : /// Get Timeline handle for given Neon timeline ID.
2387 : /// This function is idempotent. It doesn't change internal state in any way.
2388 111 : pub fn get_timeline(
2389 111 : &self,
2390 111 : timeline_id: TimelineId,
2391 111 : active_only: bool,
2392 111 : ) -> Result<Arc<Timeline>, GetTimelineError> {
2393 111 : let timelines_accessor = self.timelines.lock().unwrap();
2394 111 : let timeline = timelines_accessor
2395 111 : .get(&timeline_id)
2396 111 : .ok_or(GetTimelineError::NotFound {
2397 111 : tenant_id: self.tenant_shard_id,
2398 111 : timeline_id,
2399 111 : })?;
2400 :
2401 110 : if active_only && !timeline.is_active() {
2402 0 : Err(GetTimelineError::NotActive {
2403 0 : tenant_id: self.tenant_shard_id,
2404 0 : timeline_id,
2405 0 : state: timeline.current_state(),
2406 0 : })
2407 : } else {
2408 110 : Ok(Arc::clone(timeline))
2409 : }
2410 111 : }
2411 :
2412 : /// Lists timelines the tenant contains.
2413 : /// It's up to callers to omit certain timelines that are not considered ready for use.
2414 2 : pub fn list_timelines(&self) -> Vec<Arc<Timeline>> {
2415 2 : self.timelines
2416 2 : .lock()
2417 2 : .unwrap()
2418 2 : .values()
2419 2 : .map(Arc::clone)
2420 2 : .collect()
2421 2 : }
2422 :
2423 : /// Lists timelines the tenant manages, including offloaded ones.
2424 : ///
2425 : /// It's up to callers to omit certain timelines that are not considered ready for use.
2426 0 : pub fn list_timelines_and_offloaded(
2427 0 : &self,
2428 0 : ) -> (Vec<Arc<Timeline>>, Vec<Arc<OffloadedTimeline>>) {
2429 0 : let timelines = self
2430 0 : .timelines
2431 0 : .lock()
2432 0 : .unwrap()
2433 0 : .values()
2434 0 : .map(Arc::clone)
2435 0 : .collect();
2436 0 : let offloaded = self
2437 0 : .timelines_offloaded
2438 0 : .lock()
2439 0 : .unwrap()
2440 0 : .values()
2441 0 : .map(Arc::clone)
2442 0 : .collect();
2443 0 : (timelines, offloaded)
2444 0 : }
2445 :
2446 0 : pub fn list_timeline_ids(&self) -> Vec<TimelineId> {
2447 0 : self.timelines.lock().unwrap().keys().cloned().collect()
2448 0 : }
2449 :
2450 : /// This is used by tests & import-from-basebackup.
2451 : ///
2452 : /// The returned [`UninitializedTimeline`] contains no data nor metadata and it is in
2453 : /// a state that will fail [`TenantShard::load_remote_timeline`] because `disk_consistent_lsn=Lsn(0)`.
2454 : ///
2455 : /// The caller is responsible for getting the timeline into a state that will be accepted
2456 : /// by [`TenantShard::load_remote_timeline`] / [`TenantShard::attach`].
2457 : /// Then they may call [`UninitializedTimeline::finish_creation`] to add the timeline
2458 : /// to the [`TenantShard::timelines`].
2459 : ///
2460 : /// Tests should use `TenantShard::create_test_timeline` to set up the minimum required metadata keys.
2461 113 : pub(crate) async fn create_empty_timeline(
2462 113 : self: &Arc<Self>,
2463 113 : new_timeline_id: TimelineId,
2464 113 : initdb_lsn: Lsn,
2465 113 : pg_version: u32,
2466 113 : ctx: &RequestContext,
2467 113 : ) -> anyhow::Result<(UninitializedTimeline, RequestContext)> {
2468 113 : anyhow::ensure!(
2469 113 : self.is_active(),
2470 0 : "Cannot create empty timelines on inactive tenant"
2471 : );
2472 :
2473 : // Protect against concurrent attempts to use this TimelineId
2474 113 : let create_guard = match self
2475 113 : .start_creating_timeline(new_timeline_id, CreateTimelineIdempotency::FailWithConflict)
2476 113 : .await?
2477 : {
2478 112 : StartCreatingTimelineResult::CreateGuard(guard) => guard,
2479 : StartCreatingTimelineResult::Idempotent(_) => {
2480 0 : unreachable!("FailWithConflict implies we get an error instead")
2481 : }
2482 : };
2483 :
2484 112 : let new_metadata = TimelineMetadata::new(
2485 112 : // Initialize disk_consistent LSN to 0, The caller must import some data to
2486 112 : // make it valid, before calling finish_creation()
2487 112 : Lsn(0),
2488 112 : None,
2489 112 : None,
2490 112 : Lsn(0),
2491 112 : initdb_lsn,
2492 112 : initdb_lsn,
2493 112 : pg_version,
2494 112 : );
2495 112 : self.prepare_new_timeline(
2496 112 : new_timeline_id,
2497 112 : &new_metadata,
2498 112 : create_guard,
2499 112 : initdb_lsn,
2500 112 : None,
2501 112 : None,
2502 112 : ctx,
2503 112 : )
2504 112 : .await
2505 113 : }
2506 :
2507 : /// Helper for unit tests to create an empty timeline.
2508 : ///
2509 : /// The timeline is has state value `Active` but its background loops are not running.
2510 : // This makes the various functions which anyhow::ensure! for Active state work in tests.
2511 : // Our current tests don't need the background loops.
2512 : #[cfg(test)]
2513 108 : pub async fn create_test_timeline(
2514 108 : self: &Arc<Self>,
2515 108 : new_timeline_id: TimelineId,
2516 108 : initdb_lsn: Lsn,
2517 108 : pg_version: u32,
2518 108 : ctx: &RequestContext,
2519 108 : ) -> anyhow::Result<Arc<Timeline>> {
2520 108 : let (uninit_tl, ctx) = self
2521 108 : .create_empty_timeline(new_timeline_id, initdb_lsn, pg_version, ctx)
2522 108 : .await?;
2523 108 : let tline = uninit_tl.raw_timeline().expect("we just created it");
2524 108 : assert_eq!(tline.get_last_record_lsn(), Lsn(0));
2525 :
2526 : // Setup minimum keys required for the timeline to be usable.
2527 108 : let mut modification = tline.begin_modification(initdb_lsn);
2528 108 : modification
2529 108 : .init_empty_test_timeline()
2530 108 : .context("init_empty_test_timeline")?;
2531 108 : modification
2532 108 : .commit(&ctx)
2533 108 : .await
2534 108 : .context("commit init_empty_test_timeline modification")?;
2535 :
2536 : // Flush to disk so that uninit_tl's check for valid disk_consistent_lsn passes.
2537 108 : tline.maybe_spawn_flush_loop();
2538 108 : tline.freeze_and_flush().await.context("freeze_and_flush")?;
2539 :
2540 : // Make sure the freeze_and_flush reaches remote storage.
2541 108 : tline.remote_client.wait_completion().await.unwrap();
2542 :
2543 108 : let tl = uninit_tl.finish_creation().await?;
2544 : // The non-test code would call tl.activate() here.
2545 108 : tl.set_state(TimelineState::Active);
2546 108 : Ok(tl)
2547 108 : }
2548 :
2549 : /// Helper for unit tests to create a timeline with some pre-loaded states.
2550 : #[cfg(test)]
2551 : #[allow(clippy::too_many_arguments)]
2552 24 : pub async fn create_test_timeline_with_layers(
2553 24 : self: &Arc<Self>,
2554 24 : new_timeline_id: TimelineId,
2555 24 : initdb_lsn: Lsn,
2556 24 : pg_version: u32,
2557 24 : ctx: &RequestContext,
2558 24 : in_memory_layer_desc: Vec<timeline::InMemoryLayerTestDesc>,
2559 24 : delta_layer_desc: Vec<timeline::DeltaLayerTestDesc>,
2560 24 : image_layer_desc: Vec<(Lsn, Vec<(pageserver_api::key::Key, bytes::Bytes)>)>,
2561 24 : end_lsn: Lsn,
2562 24 : ) -> anyhow::Result<Arc<Timeline>> {
2563 : use checks::check_valid_layermap;
2564 : use itertools::Itertools;
2565 :
2566 24 : let tline = self
2567 24 : .create_test_timeline(new_timeline_id, initdb_lsn, pg_version, ctx)
2568 24 : .await?;
2569 24 : tline.force_advance_lsn(end_lsn);
2570 71 : for deltas in delta_layer_desc {
2571 47 : tline
2572 47 : .force_create_delta_layer(deltas, Some(initdb_lsn), ctx)
2573 47 : .await?;
2574 : }
2575 58 : for (lsn, images) in image_layer_desc {
2576 34 : tline
2577 34 : .force_create_image_layer(lsn, images, Some(initdb_lsn), ctx)
2578 34 : .await?;
2579 : }
2580 28 : for in_memory in in_memory_layer_desc {
2581 4 : tline
2582 4 : .force_create_in_memory_layer(in_memory, Some(initdb_lsn), ctx)
2583 4 : .await?;
2584 : }
2585 24 : let layer_names = tline
2586 24 : .layers
2587 24 : .read()
2588 24 : .await
2589 24 : .layer_map()
2590 24 : .unwrap()
2591 24 : .iter_historic_layers()
2592 105 : .map(|layer| layer.layer_name())
2593 24 : .collect_vec();
2594 24 : if let Some(err) = check_valid_layermap(&layer_names) {
2595 0 : bail!("invalid layermap: {err}");
2596 24 : }
2597 24 : Ok(tline)
2598 24 : }
2599 :
2600 : /// Create a new timeline.
2601 : ///
2602 : /// Returns the new timeline ID and reference to its Timeline object.
2603 : ///
2604 : /// If the caller specified the timeline ID to use (`new_timeline_id`), and timeline with
2605 : /// the same timeline ID already exists, returns CreateTimelineError::AlreadyExists.
2606 : #[allow(clippy::too_many_arguments)]
2607 0 : pub(crate) async fn create_timeline(
2608 0 : self: &Arc<TenantShard>,
2609 0 : params: CreateTimelineParams,
2610 0 : broker_client: storage_broker::BrokerClientChannel,
2611 0 : ctx: &RequestContext,
2612 0 : ) -> Result<Arc<Timeline>, CreateTimelineError> {
2613 0 : if !self.is_active() {
2614 0 : if matches!(self.current_state(), TenantState::Stopping { .. }) {
2615 0 : return Err(CreateTimelineError::ShuttingDown);
2616 : } else {
2617 0 : return Err(CreateTimelineError::Other(anyhow::anyhow!(
2618 0 : "Cannot create timelines on inactive tenant"
2619 0 : )));
2620 : }
2621 0 : }
2622 :
2623 0 : let _gate = self
2624 0 : .gate
2625 0 : .enter()
2626 0 : .map_err(|_| CreateTimelineError::ShuttingDown)?;
2627 :
2628 0 : let result: CreateTimelineResult = match params {
2629 : CreateTimelineParams::Bootstrap(CreateTimelineParamsBootstrap {
2630 0 : new_timeline_id,
2631 0 : existing_initdb_timeline_id,
2632 0 : pg_version,
2633 0 : }) => {
2634 0 : self.bootstrap_timeline(
2635 0 : new_timeline_id,
2636 0 : pg_version,
2637 0 : existing_initdb_timeline_id,
2638 0 : ctx,
2639 0 : )
2640 0 : .await?
2641 : }
2642 : CreateTimelineParams::Branch(CreateTimelineParamsBranch {
2643 0 : new_timeline_id,
2644 0 : ancestor_timeline_id,
2645 0 : mut ancestor_start_lsn,
2646 : }) => {
2647 0 : let ancestor_timeline = self
2648 0 : .get_timeline(ancestor_timeline_id, false)
2649 0 : .context("Cannot branch off the timeline that's not present in pageserver")?;
2650 :
2651 : // instead of waiting around, just deny the request because ancestor is not yet
2652 : // ready for other purposes either.
2653 0 : if !ancestor_timeline.is_active() {
2654 0 : return Err(CreateTimelineError::AncestorNotActive);
2655 0 : }
2656 0 :
2657 0 : if ancestor_timeline.is_archived() == Some(true) {
2658 0 : info!("tried to branch archived timeline");
2659 0 : return Err(CreateTimelineError::AncestorArchived);
2660 0 : }
2661 :
2662 0 : if let Some(lsn) = ancestor_start_lsn.as_mut() {
2663 0 : *lsn = lsn.align();
2664 0 :
2665 0 : let ancestor_ancestor_lsn = ancestor_timeline.get_ancestor_lsn();
2666 0 : if ancestor_ancestor_lsn > *lsn {
2667 : // can we safely just branch from the ancestor instead?
2668 0 : return Err(CreateTimelineError::AncestorLsn(anyhow::anyhow!(
2669 0 : "invalid start lsn {} for ancestor timeline {}: less than timeline ancestor lsn {}",
2670 0 : lsn,
2671 0 : ancestor_timeline_id,
2672 0 : ancestor_ancestor_lsn,
2673 0 : )));
2674 0 : }
2675 0 :
2676 0 : // Wait for the WAL to arrive and be processed on the parent branch up
2677 0 : // to the requested branch point. The repository code itself doesn't
2678 0 : // require it, but if we start to receive WAL on the new timeline,
2679 0 : // decoding the new WAL might need to look up previous pages, relation
2680 0 : // sizes etc. and that would get confused if the previous page versions
2681 0 : // are not in the repository yet.
2682 0 : ancestor_timeline
2683 0 : .wait_lsn(
2684 0 : *lsn,
2685 0 : timeline::WaitLsnWaiter::Tenant,
2686 0 : timeline::WaitLsnTimeout::Default,
2687 0 : ctx,
2688 0 : )
2689 0 : .await
2690 0 : .map_err(|e| match e {
2691 0 : e @ (WaitLsnError::Timeout(_) | WaitLsnError::BadState { .. }) => {
2692 0 : CreateTimelineError::AncestorLsn(anyhow::anyhow!(e))
2693 : }
2694 0 : WaitLsnError::Shutdown => CreateTimelineError::ShuttingDown,
2695 0 : })?;
2696 0 : }
2697 :
2698 0 : self.branch_timeline(&ancestor_timeline, new_timeline_id, ancestor_start_lsn, ctx)
2699 0 : .await?
2700 : }
2701 0 : CreateTimelineParams::ImportPgdata(params) => {
2702 0 : self.create_timeline_import_pgdata(params, ctx).await?
2703 : }
2704 : };
2705 :
2706 : // At this point we have dropped our guard on [`Self::timelines_creating`], and
2707 : // the timeline is visible in [`Self::timelines`], but it is _not_ durable yet. We must
2708 : // not send a success to the caller until it is. The same applies to idempotent retries.
2709 : //
2710 : // TODO: the timeline is already visible in [`Self::timelines`]; a caller could incorrectly
2711 : // assume that, because they can see the timeline via API, that the creation is done and
2712 : // that it is durable. Ideally, we would keep the timeline hidden (in [`Self::timelines_creating`])
2713 : // until it is durable, e.g., by extending the time we hold the creation guard. This also
2714 : // interacts with UninitializedTimeline and is generally a bit tricky.
2715 : //
2716 : // To re-emphasize: the only correct way to create a timeline is to repeat calling the
2717 : // creation API until it returns success. Only then is durability guaranteed.
2718 0 : info!(creation_result=%result.discriminant(), "waiting for timeline to be durable");
2719 0 : result
2720 0 : .timeline()
2721 0 : .remote_client
2722 0 : .wait_completion()
2723 0 : .await
2724 0 : .map_err(|e| match e {
2725 : WaitCompletionError::NotInitialized(
2726 0 : e, // If the queue is already stopped, it's a shutdown error.
2727 0 : ) if e.is_stopping() => CreateTimelineError::ShuttingDown,
2728 : WaitCompletionError::NotInitialized(_) => {
2729 : // This is a bug: we should never try to wait for uploads before initializing the timeline
2730 0 : debug_assert!(false);
2731 0 : CreateTimelineError::Other(anyhow::anyhow!("timeline not initialized"))
2732 : }
2733 : WaitCompletionError::UploadQueueShutDownOrStopped => {
2734 0 : CreateTimelineError::ShuttingDown
2735 : }
2736 0 : })?;
2737 :
2738 : // The creating task is responsible for activating the timeline.
2739 : // We do this after `wait_completion()` so that we don't spin up tasks that start
2740 : // doing stuff before the IndexPart is durable in S3, which is done by the previous section.
2741 0 : let activated_timeline = match result {
2742 0 : CreateTimelineResult::Created(timeline) => {
2743 0 : timeline.activate(
2744 0 : self.clone(),
2745 0 : broker_client,
2746 0 : None,
2747 0 : &ctx.with_scope_timeline(&timeline),
2748 0 : );
2749 0 : timeline
2750 : }
2751 0 : CreateTimelineResult::Idempotent(timeline) => {
2752 0 : info!(
2753 0 : "request was deemed idempotent, activation will be done by the creating task"
2754 : );
2755 0 : timeline
2756 : }
2757 0 : CreateTimelineResult::ImportSpawned(timeline) => {
2758 0 : info!(
2759 0 : "import task spawned, timeline will become visible and activated once the import is done"
2760 : );
2761 0 : timeline
2762 : }
2763 : };
2764 :
2765 0 : Ok(activated_timeline)
2766 0 : }
2767 :
2768 : /// The returned [`Arc<Timeline>`] is NOT in the [`TenantShard::timelines`] map until the import
2769 : /// completes in the background. A DIFFERENT [`Arc<Timeline>`] will be inserted into the
2770 : /// [`TenantShard::timelines`] map when the import completes.
2771 : /// We only return an [`Arc<Timeline>`] here so the API handler can create a [`pageserver_api::models::TimelineInfo`]
2772 : /// for the response.
2773 0 : async fn create_timeline_import_pgdata(
2774 0 : self: &Arc<Self>,
2775 0 : params: CreateTimelineParamsImportPgdata,
2776 0 : ctx: &RequestContext,
2777 0 : ) -> Result<CreateTimelineResult, CreateTimelineError> {
2778 0 : let CreateTimelineParamsImportPgdata {
2779 0 : new_timeline_id,
2780 0 : location,
2781 0 : idempotency_key,
2782 0 : } = params;
2783 0 :
2784 0 : let started_at = chrono::Utc::now().naive_utc();
2785 :
2786 : //
2787 : // There's probably a simpler way to upload an index part, but, remote_timeline_client
2788 : // is the canonical way we do it.
2789 : // - create an empty timeline in-memory
2790 : // - use its remote_timeline_client to do the upload
2791 : // - dispose of the uninit timeline
2792 : // - keep the creation guard alive
2793 :
2794 0 : let timeline_create_guard = match self
2795 0 : .start_creating_timeline(
2796 0 : new_timeline_id,
2797 0 : CreateTimelineIdempotency::ImportPgdata(CreatingTimelineIdempotencyImportPgdata {
2798 0 : idempotency_key: idempotency_key.clone(),
2799 0 : }),
2800 0 : )
2801 0 : .await?
2802 : {
2803 0 : StartCreatingTimelineResult::CreateGuard(guard) => guard,
2804 0 : StartCreatingTimelineResult::Idempotent(timeline) => {
2805 0 : return Ok(CreateTimelineResult::Idempotent(timeline));
2806 : }
2807 : };
2808 :
2809 0 : let (mut uninit_timeline, timeline_ctx) = {
2810 0 : let this = &self;
2811 0 : let initdb_lsn = Lsn(0);
2812 0 : async move {
2813 0 : let new_metadata = TimelineMetadata::new(
2814 0 : // Initialize disk_consistent LSN to 0, The caller must import some data to
2815 0 : // make it valid, before calling finish_creation()
2816 0 : Lsn(0),
2817 0 : None,
2818 0 : None,
2819 0 : Lsn(0),
2820 0 : initdb_lsn,
2821 0 : initdb_lsn,
2822 0 : 15,
2823 0 : );
2824 0 : this.prepare_new_timeline(
2825 0 : new_timeline_id,
2826 0 : &new_metadata,
2827 0 : timeline_create_guard,
2828 0 : initdb_lsn,
2829 0 : None,
2830 0 : None,
2831 0 : ctx,
2832 0 : )
2833 0 : .await
2834 0 : }
2835 0 : }
2836 0 : .await?;
2837 :
2838 0 : let in_progress = import_pgdata::index_part_format::InProgress {
2839 0 : idempotency_key,
2840 0 : location,
2841 0 : started_at,
2842 0 : };
2843 0 : let index_part = import_pgdata::index_part_format::Root::V1(
2844 0 : import_pgdata::index_part_format::V1::InProgress(in_progress),
2845 0 : );
2846 0 : uninit_timeline
2847 0 : .raw_timeline()
2848 0 : .unwrap()
2849 0 : .remote_client
2850 0 : .schedule_index_upload_for_import_pgdata_state_update(Some(index_part.clone()))?;
2851 :
2852 : // wait_completion happens in caller
2853 :
2854 0 : let (timeline, timeline_create_guard) = uninit_timeline.finish_creation_myself();
2855 0 :
2856 0 : let import_task_handle = tokio::spawn(self.clone().create_timeline_import_pgdata_task(
2857 0 : timeline.clone(),
2858 0 : index_part,
2859 0 : timeline_create_guard,
2860 0 : timeline_ctx.detached_child(TaskKind::ImportPgdata, DownloadBehavior::Warn),
2861 0 : ));
2862 0 :
2863 0 : let prev = self.timelines_importing.lock().unwrap().insert(
2864 0 : timeline.timeline_id,
2865 0 : ImportingTimeline {
2866 0 : timeline: timeline.clone(),
2867 0 : import_task_handle,
2868 0 : },
2869 0 : );
2870 0 :
2871 0 : // Idempotency is enforced higher up the stack
2872 0 : assert!(prev.is_none());
2873 :
2874 : // NB: the timeline doesn't exist in self.timelines at this point
2875 0 : Ok(CreateTimelineResult::ImportSpawned(timeline))
2876 0 : }
2877 :
2878 : /// Finalize the import of a timeline on this shard by marking it complete in
2879 : /// the index part. If the import task hasn't finished yet, returns an error.
2880 : ///
2881 : /// This method is idempotent. If the import was finalized once, the next call
2882 : /// will be a no-op.
2883 0 : pub(crate) async fn finalize_importing_timeline(
2884 0 : &self,
2885 0 : timeline_id: TimelineId,
2886 0 : ) -> Result<(), FinalizeTimelineImportError> {
2887 0 : let timeline = {
2888 0 : let locked = self.timelines_importing.lock().unwrap();
2889 0 : match locked.get(&timeline_id) {
2890 0 : Some(importing_timeline) => {
2891 0 : if !importing_timeline.import_task_handle.is_finished() {
2892 0 : return Err(FinalizeTimelineImportError::ImportTaskStillRunning);
2893 0 : }
2894 0 :
2895 0 : importing_timeline.timeline.clone()
2896 : }
2897 : None => {
2898 0 : return Ok(());
2899 : }
2900 : }
2901 : };
2902 :
2903 0 : timeline
2904 0 : .remote_client
2905 0 : .schedule_index_upload_for_import_pgdata_finalize()
2906 0 : .map_err(|_err| FinalizeTimelineImportError::ShuttingDown)?;
2907 0 : timeline
2908 0 : .remote_client
2909 0 : .wait_completion()
2910 0 : .await
2911 0 : .map_err(|_err| FinalizeTimelineImportError::ShuttingDown)?;
2912 :
2913 0 : self.timelines_importing
2914 0 : .lock()
2915 0 : .unwrap()
2916 0 : .remove(&timeline_id);
2917 0 :
2918 0 : Ok(())
2919 0 : }
2920 :
2921 : #[instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%timeline.timeline_id))]
2922 : async fn create_timeline_import_pgdata_task(
2923 : self: Arc<TenantShard>,
2924 : timeline: Arc<Timeline>,
2925 : index_part: import_pgdata::index_part_format::Root,
2926 : timeline_create_guard: TimelineCreateGuard,
2927 : ctx: RequestContext,
2928 : ) {
2929 : debug_assert_current_span_has_tenant_and_timeline_id();
2930 : info!("starting");
2931 : scopeguard::defer! {info!("exiting")};
2932 :
2933 : let res = self
2934 : .create_timeline_import_pgdata_task_impl(
2935 : timeline,
2936 : index_part,
2937 : timeline_create_guard,
2938 : ctx,
2939 : )
2940 : .await;
2941 : if let Err(err) = &res {
2942 : error!(?err, "task failed");
2943 : // TODO sleep & retry, sensitive to tenant shutdown
2944 : // TODO: allow timeline deletion requests => should cancel the task
2945 : }
2946 : }
2947 :
2948 0 : async fn create_timeline_import_pgdata_task_impl(
2949 0 : self: Arc<TenantShard>,
2950 0 : timeline: Arc<Timeline>,
2951 0 : index_part: import_pgdata::index_part_format::Root,
2952 0 : _timeline_create_guard: TimelineCreateGuard,
2953 0 : ctx: RequestContext,
2954 0 : ) -> Result<(), anyhow::Error> {
2955 0 : info!("importing pgdata");
2956 0 : let ctx = ctx.with_scope_timeline(&timeline);
2957 0 : import_pgdata::doit(&timeline, index_part, &ctx, self.cancel.clone())
2958 0 : .await
2959 0 : .context("import")?;
2960 0 : info!("import done - waiting for activation");
2961 :
2962 0 : anyhow::Ok(())
2963 0 : }
2964 :
2965 0 : pub(crate) async fn delete_timeline(
2966 0 : self: Arc<Self>,
2967 0 : timeline_id: TimelineId,
2968 0 : ) -> Result<(), DeleteTimelineError> {
2969 0 : DeleteTimelineFlow::run(&self, timeline_id).await?;
2970 :
2971 0 : Ok(())
2972 0 : }
2973 :
2974 : /// perform one garbage collection iteration, removing old data files from disk.
2975 : /// this function is periodically called by gc task.
2976 : /// also it can be explicitly requested through page server api 'do_gc' command.
2977 : ///
2978 : /// `target_timeline_id` specifies the timeline to GC, or None for all.
2979 : ///
2980 : /// The `horizon` an `pitr` parameters determine how much WAL history needs to be retained.
2981 : /// Also known as the retention period, or the GC cutoff point. `horizon` specifies
2982 : /// the amount of history, as LSN difference from current latest LSN on each timeline.
2983 : /// `pitr` specifies the same as a time difference from the current time. The effective
2984 : /// GC cutoff point is determined conservatively by either `horizon` and `pitr`, whichever
2985 : /// requires more history to be retained.
2986 : //
2987 377 : pub(crate) async fn gc_iteration(
2988 377 : &self,
2989 377 : target_timeline_id: Option<TimelineId>,
2990 377 : horizon: u64,
2991 377 : pitr: Duration,
2992 377 : cancel: &CancellationToken,
2993 377 : ctx: &RequestContext,
2994 377 : ) -> Result<GcResult, GcError> {
2995 377 : // Don't start doing work during shutdown
2996 377 : if let TenantState::Stopping { .. } = self.current_state() {
2997 0 : return Ok(GcResult::default());
2998 377 : }
2999 377 :
3000 377 : // there is a global allowed_error for this
3001 377 : if !self.is_active() {
3002 0 : return Err(GcError::NotActive);
3003 377 : }
3004 377 :
3005 377 : {
3006 377 : let conf = self.tenant_conf.load();
3007 377 :
3008 377 : // If we may not delete layers, then simply skip GC. Even though a tenant
3009 377 : // in AttachedMulti state could do GC and just enqueue the blocked deletions,
3010 377 : // the only advantage to doing it is to perhaps shrink the LayerMap metadata
3011 377 : // a bit sooner than we would achieve by waiting for AttachedSingle status.
3012 377 : if !conf.location.may_delete_layers_hint() {
3013 0 : info!("Skipping GC in location state {:?}", conf.location);
3014 0 : return Ok(GcResult::default());
3015 377 : }
3016 377 :
3017 377 : if conf.is_gc_blocked_by_lsn_lease_deadline() {
3018 375 : info!("Skipping GC because lsn lease deadline is not reached");
3019 375 : return Ok(GcResult::default());
3020 2 : }
3021 : }
3022 :
3023 2 : let _guard = match self.gc_block.start().await {
3024 2 : Ok(guard) => guard,
3025 0 : Err(reasons) => {
3026 0 : info!("Skipping GC: {reasons}");
3027 0 : return Ok(GcResult::default());
3028 : }
3029 : };
3030 :
3031 2 : self.gc_iteration_internal(target_timeline_id, horizon, pitr, cancel, ctx)
3032 2 : .await
3033 377 : }
3034 :
3035 : /// Performs one compaction iteration. Called periodically from the compaction loop. Returns
3036 : /// whether another compaction is needed, if we still have pending work or if we yield for
3037 : /// immediate L0 compaction.
3038 : ///
3039 : /// Compaction can also be explicitly requested for a timeline via the HTTP API.
3040 0 : async fn compaction_iteration(
3041 0 : self: &Arc<Self>,
3042 0 : cancel: &CancellationToken,
3043 0 : ctx: &RequestContext,
3044 0 : ) -> Result<CompactionOutcome, CompactionError> {
3045 0 : // Don't compact inactive tenants.
3046 0 : if !self.is_active() {
3047 0 : return Ok(CompactionOutcome::Skipped);
3048 0 : }
3049 0 :
3050 0 : // Don't compact tenants that can't upload layers. We don't check `may_delete_layers_hint`,
3051 0 : // since we need to compact L0 even in AttachedMulti to bound read amplification.
3052 0 : let location = self.tenant_conf.load().location;
3053 0 : if !location.may_upload_layers_hint() {
3054 0 : info!("skipping compaction in location state {location:?}");
3055 0 : return Ok(CompactionOutcome::Skipped);
3056 0 : }
3057 0 :
3058 0 : // Don't compact if the circuit breaker is tripped.
3059 0 : if self.compaction_circuit_breaker.lock().unwrap().is_broken() {
3060 0 : info!("skipping compaction due to previous failures");
3061 0 : return Ok(CompactionOutcome::Skipped);
3062 0 : }
3063 0 :
3064 0 : // Collect all timelines to compact, along with offload instructions and L0 counts.
3065 0 : let mut compact: Vec<Arc<Timeline>> = Vec::new();
3066 0 : let mut offload: HashSet<TimelineId> = HashSet::new();
3067 0 : let mut l0_counts: HashMap<TimelineId, usize> = HashMap::new();
3068 0 :
3069 0 : {
3070 0 : let offload_enabled = self.get_timeline_offloading_enabled();
3071 0 : let timelines = self.timelines.lock().unwrap();
3072 0 : for (&timeline_id, timeline) in timelines.iter() {
3073 : // Skip inactive timelines.
3074 0 : if !timeline.is_active() {
3075 0 : continue;
3076 0 : }
3077 0 :
3078 0 : // Schedule the timeline for compaction.
3079 0 : compact.push(timeline.clone());
3080 :
3081 : // Schedule the timeline for offloading if eligible.
3082 0 : let can_offload = offload_enabled
3083 0 : && timeline.can_offload().0
3084 0 : && !timelines
3085 0 : .iter()
3086 0 : .any(|(_, tli)| tli.get_ancestor_timeline_id() == Some(timeline_id));
3087 0 : if can_offload {
3088 0 : offload.insert(timeline_id);
3089 0 : }
3090 : }
3091 : } // release timelines lock
3092 :
3093 0 : for timeline in &compact {
3094 : // Collect L0 counts. Can't await while holding lock above.
3095 0 : if let Ok(lm) = timeline.layers.read().await.layer_map() {
3096 0 : l0_counts.insert(timeline.timeline_id, lm.level0_deltas().len());
3097 0 : }
3098 : }
3099 :
3100 : // Pass 1: L0 compaction across all timelines, in order of L0 count. We prioritize this to
3101 : // bound read amplification.
3102 : //
3103 : // TODO: this may spin on one or more ingest-heavy timelines, starving out image/GC
3104 : // compaction and offloading. We leave that as a potential problem to solve later. Consider
3105 : // splitting L0 and image/GC compaction to separate background jobs.
3106 0 : if self.get_compaction_l0_first() {
3107 0 : let compaction_threshold = self.get_compaction_threshold();
3108 0 : let compact_l0 = compact
3109 0 : .iter()
3110 0 : .map(|tli| (tli, l0_counts.get(&tli.timeline_id).copied().unwrap_or(0)))
3111 0 : .filter(|&(_, l0)| l0 >= compaction_threshold)
3112 0 : .sorted_by_key(|&(_, l0)| l0)
3113 0 : .rev()
3114 0 : .map(|(tli, _)| tli.clone())
3115 0 : .collect_vec();
3116 0 :
3117 0 : let mut has_pending_l0 = false;
3118 0 : for timeline in compact_l0 {
3119 0 : let ctx = &ctx.with_scope_timeline(&timeline);
3120 : // NB: don't set CompactFlags::YieldForL0, since this is an L0-only compaction pass.
3121 0 : let outcome = timeline
3122 0 : .compact(cancel, CompactFlags::OnlyL0Compaction.into(), ctx)
3123 0 : .instrument(info_span!("compact_timeline", timeline_id = %timeline.timeline_id))
3124 0 : .await
3125 0 : .inspect_err(|err| self.maybe_trip_compaction_breaker(err))?;
3126 0 : match outcome {
3127 0 : CompactionOutcome::Done => {}
3128 0 : CompactionOutcome::Skipped => {}
3129 0 : CompactionOutcome::Pending => has_pending_l0 = true,
3130 0 : CompactionOutcome::YieldForL0 => has_pending_l0 = true,
3131 : }
3132 : }
3133 0 : if has_pending_l0 {
3134 0 : return Ok(CompactionOutcome::YieldForL0); // do another pass
3135 0 : }
3136 0 : }
3137 :
3138 : // Pass 2: image compaction and timeline offloading. If any timelines have accumulated more
3139 : // L0 layers, they may also be compacted here. Image compaction will yield if there is
3140 : // pending L0 compaction on any tenant timeline.
3141 : //
3142 : // TODO: consider ordering timelines by some priority, e.g. time since last full compaction,
3143 : // amount of L1 delta debt or garbage, offload-eligible timelines first, etc.
3144 0 : let mut has_pending = false;
3145 0 : for timeline in compact {
3146 0 : if !timeline.is_active() {
3147 0 : continue;
3148 0 : }
3149 0 : let ctx = &ctx.with_scope_timeline(&timeline);
3150 0 :
3151 0 : // Yield for L0 if the separate L0 pass is enabled (otherwise there's no point).
3152 0 : let mut flags = EnumSet::default();
3153 0 : if self.get_compaction_l0_first() {
3154 0 : flags |= CompactFlags::YieldForL0;
3155 0 : }
3156 :
3157 0 : let mut outcome = timeline
3158 0 : .compact(cancel, flags, ctx)
3159 0 : .instrument(info_span!("compact_timeline", timeline_id = %timeline.timeline_id))
3160 0 : .await
3161 0 : .inspect_err(|err| self.maybe_trip_compaction_breaker(err))?;
3162 :
3163 : // If we're done compacting, check the scheduled GC compaction queue for more work.
3164 0 : if outcome == CompactionOutcome::Done {
3165 0 : let queue = {
3166 0 : let mut guard = self.scheduled_compaction_tasks.lock().unwrap();
3167 0 : guard
3168 0 : .entry(timeline.timeline_id)
3169 0 : .or_insert_with(|| Arc::new(GcCompactionQueue::new()))
3170 0 : .clone()
3171 0 : };
3172 0 : let gc_compaction_strategy = self
3173 0 : .feature_resolver
3174 0 : .evaluate_multivariate("gc-comapction-strategy", self.tenant_shard_id.tenant_id)
3175 0 : .ok();
3176 0 : let span = if let Some(gc_compaction_strategy) = gc_compaction_strategy {
3177 0 : info_span!("gc_compact_timeline", timeline_id = %timeline.timeline_id, strategy = %gc_compaction_strategy)
3178 : } else {
3179 0 : info_span!("gc_compact_timeline", timeline_id = %timeline.timeline_id)
3180 : };
3181 0 : outcome = queue
3182 0 : .iteration(cancel, ctx, &self.gc_block, &timeline)
3183 0 : .instrument(span)
3184 0 : .await?;
3185 0 : }
3186 :
3187 : // If we're done compacting, offload the timeline if requested.
3188 0 : if outcome == CompactionOutcome::Done && offload.contains(&timeline.timeline_id) {
3189 0 : pausable_failpoint!("before-timeline-auto-offload");
3190 0 : offload_timeline(self, &timeline)
3191 0 : .instrument(info_span!("offload_timeline", timeline_id = %timeline.timeline_id))
3192 0 : .await
3193 0 : .or_else(|err| match err {
3194 : // Ignore this, we likely raced with unarchival.
3195 0 : OffloadError::NotArchived => Ok(()),
3196 0 : err => Err(err),
3197 0 : })?;
3198 0 : }
3199 :
3200 0 : match outcome {
3201 0 : CompactionOutcome::Done => {}
3202 0 : CompactionOutcome::Skipped => {}
3203 0 : CompactionOutcome::Pending => has_pending = true,
3204 : // This mostly makes sense when the L0-only pass above is enabled, since there's
3205 : // otherwise no guarantee that we'll start with the timeline that has high L0.
3206 0 : CompactionOutcome::YieldForL0 => return Ok(CompactionOutcome::YieldForL0),
3207 : }
3208 : }
3209 :
3210 : // Success! Untrip the breaker if necessary.
3211 0 : self.compaction_circuit_breaker
3212 0 : .lock()
3213 0 : .unwrap()
3214 0 : .success(&CIRCUIT_BREAKERS_UNBROKEN);
3215 0 :
3216 0 : match has_pending {
3217 0 : true => Ok(CompactionOutcome::Pending),
3218 0 : false => Ok(CompactionOutcome::Done),
3219 : }
3220 0 : }
3221 :
3222 : /// Trips the compaction circuit breaker if appropriate.
3223 0 : pub(crate) fn maybe_trip_compaction_breaker(&self, err: &CompactionError) {
3224 0 : match err {
3225 0 : err if err.is_cancel() => {}
3226 0 : CompactionError::ShuttingDown => (),
3227 : // Offload failures don't trip the circuit breaker, since they're cheap to retry and
3228 : // shouldn't block compaction.
3229 0 : CompactionError::Offload(_) => {}
3230 0 : CompactionError::CollectKeySpaceError(err) => {
3231 0 : // CollectKeySpaceError::Cancelled and PageRead::Cancelled are handled in `err.is_cancel` branch.
3232 0 : self.compaction_circuit_breaker
3233 0 : .lock()
3234 0 : .unwrap()
3235 0 : .fail(&CIRCUIT_BREAKERS_BROKEN, err);
3236 0 : }
3237 0 : CompactionError::Other(err) => {
3238 0 : self.compaction_circuit_breaker
3239 0 : .lock()
3240 0 : .unwrap()
3241 0 : .fail(&CIRCUIT_BREAKERS_BROKEN, err);
3242 0 : }
3243 0 : CompactionError::AlreadyRunning(_) => {}
3244 : }
3245 0 : }
3246 :
3247 : /// Cancel scheduled compaction tasks
3248 0 : pub(crate) fn cancel_scheduled_compaction(&self, timeline_id: TimelineId) {
3249 0 : let mut guard = self.scheduled_compaction_tasks.lock().unwrap();
3250 0 : if let Some(q) = guard.get_mut(&timeline_id) {
3251 0 : q.cancel_scheduled();
3252 0 : }
3253 0 : }
3254 :
3255 0 : pub(crate) fn get_scheduled_compaction_tasks(
3256 0 : &self,
3257 0 : timeline_id: TimelineId,
3258 0 : ) -> Vec<CompactInfoResponse> {
3259 0 : let res = {
3260 0 : let guard = self.scheduled_compaction_tasks.lock().unwrap();
3261 0 : guard.get(&timeline_id).map(|q| q.remaining_jobs())
3262 : };
3263 0 : let Some((running, remaining)) = res else {
3264 0 : return Vec::new();
3265 : };
3266 0 : let mut result = Vec::new();
3267 0 : if let Some((id, running)) = running {
3268 0 : result.extend(running.into_compact_info_resp(id, true));
3269 0 : }
3270 0 : for (id, job) in remaining {
3271 0 : result.extend(job.into_compact_info_resp(id, false));
3272 0 : }
3273 0 : result
3274 0 : }
3275 :
3276 : /// Schedule a compaction task for a timeline.
3277 0 : pub(crate) async fn schedule_compaction(
3278 0 : &self,
3279 0 : timeline_id: TimelineId,
3280 0 : options: CompactOptions,
3281 0 : ) -> anyhow::Result<tokio::sync::oneshot::Receiver<()>> {
3282 0 : let (tx, rx) = tokio::sync::oneshot::channel();
3283 0 : let mut guard = self.scheduled_compaction_tasks.lock().unwrap();
3284 0 : let q = guard
3285 0 : .entry(timeline_id)
3286 0 : .or_insert_with(|| Arc::new(GcCompactionQueue::new()));
3287 0 : q.schedule_manual_compaction(options, Some(tx));
3288 0 : Ok(rx)
3289 0 : }
3290 :
3291 : /// Performs periodic housekeeping, via the tenant housekeeping background task.
3292 0 : async fn housekeeping(&self) {
3293 0 : // Call through to all timelines to freeze ephemeral layers as needed. This usually happens
3294 0 : // during ingest, but we don't want idle timelines to hold open layers for too long.
3295 0 : //
3296 0 : // We don't do this if the tenant can't upload layers (i.e. it's in stale attachment mode).
3297 0 : // We don't run compaction in this case either, and don't want to keep flushing tiny L0
3298 0 : // layers that won't be compacted down.
3299 0 : if self.tenant_conf.load().location.may_upload_layers_hint() {
3300 0 : let timelines = self
3301 0 : .timelines
3302 0 : .lock()
3303 0 : .unwrap()
3304 0 : .values()
3305 0 : .filter(|tli| tli.is_active())
3306 0 : .cloned()
3307 0 : .collect_vec();
3308 :
3309 0 : for timeline in timelines {
3310 0 : timeline.maybe_freeze_ephemeral_layer().await;
3311 : }
3312 0 : }
3313 :
3314 : // Shut down walredo if idle.
3315 : const WALREDO_IDLE_TIMEOUT: Duration = Duration::from_secs(180);
3316 0 : if let Some(ref walredo_mgr) = self.walredo_mgr {
3317 0 : walredo_mgr.maybe_quiesce(WALREDO_IDLE_TIMEOUT);
3318 0 : }
3319 0 : }
3320 :
3321 0 : pub fn timeline_has_no_attached_children(&self, timeline_id: TimelineId) -> bool {
3322 0 : let timelines = self.timelines.lock().unwrap();
3323 0 : !timelines
3324 0 : .iter()
3325 0 : .any(|(_id, tl)| tl.get_ancestor_timeline_id() == Some(timeline_id))
3326 0 : }
3327 :
3328 875 : pub fn current_state(&self) -> TenantState {
3329 875 : self.state.borrow().clone()
3330 875 : }
3331 :
3332 494 : pub fn is_active(&self) -> bool {
3333 494 : self.current_state() == TenantState::Active
3334 494 : }
3335 :
3336 0 : pub fn generation(&self) -> Generation {
3337 0 : self.generation
3338 0 : }
3339 :
3340 0 : pub(crate) fn wal_redo_manager_status(&self) -> Option<WalRedoManagerStatus> {
3341 0 : self.walredo_mgr.as_ref().and_then(|mgr| mgr.status())
3342 0 : }
3343 :
3344 : /// Changes tenant status to active, unless shutdown was already requested.
3345 : ///
3346 : /// `background_jobs_can_start` is an optional barrier set to a value during pageserver startup
3347 : /// to delay background jobs. Background jobs can be started right away when None is given.
3348 0 : fn activate(
3349 0 : self: &Arc<Self>,
3350 0 : broker_client: BrokerClientChannel,
3351 0 : background_jobs_can_start: Option<&completion::Barrier>,
3352 0 : ctx: &RequestContext,
3353 0 : ) {
3354 0 : span::debug_assert_current_span_has_tenant_id();
3355 0 :
3356 0 : let mut activating = false;
3357 0 : self.state.send_modify(|current_state| {
3358 : use pageserver_api::models::ActivatingFrom;
3359 0 : match &*current_state {
3360 : TenantState::Activating(_) | TenantState::Active | TenantState::Broken { .. } | TenantState::Stopping { .. } => {
3361 0 : panic!("caller is responsible for calling activate() only on Loading / Attaching tenants, got {state:?}", state = current_state);
3362 : }
3363 0 : TenantState::Attaching => {
3364 0 : *current_state = TenantState::Activating(ActivatingFrom::Attaching);
3365 0 : }
3366 0 : }
3367 0 : debug!(tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), "Activating tenant");
3368 0 : activating = true;
3369 0 : // Continue outside the closure. We need to grab timelines.lock()
3370 0 : // and we plan to turn it into a tokio::sync::Mutex in a future patch.
3371 0 : });
3372 0 :
3373 0 : if activating {
3374 0 : let timelines_accessor = self.timelines.lock().unwrap();
3375 0 : let timelines_offloaded_accessor = self.timelines_offloaded.lock().unwrap();
3376 0 : let timelines_to_activate = timelines_accessor
3377 0 : .values()
3378 0 : .filter(|timeline| !(timeline.is_broken() || timeline.is_stopping()));
3379 0 :
3380 0 : // Before activation, populate each Timeline's GcInfo with information about its children
3381 0 : self.initialize_gc_info(&timelines_accessor, &timelines_offloaded_accessor, None);
3382 0 :
3383 0 : // Spawn gc and compaction loops. The loops will shut themselves
3384 0 : // down when they notice that the tenant is inactive.
3385 0 : tasks::start_background_loops(self, background_jobs_can_start);
3386 0 :
3387 0 : let mut activated_timelines = 0;
3388 :
3389 0 : for timeline in timelines_to_activate {
3390 0 : timeline.activate(
3391 0 : self.clone(),
3392 0 : broker_client.clone(),
3393 0 : background_jobs_can_start,
3394 0 : &ctx.with_scope_timeline(timeline),
3395 0 : );
3396 0 : activated_timelines += 1;
3397 0 : }
3398 :
3399 0 : let tid = self.tenant_shard_id.tenant_id.to_string();
3400 0 : let shard_id = self.tenant_shard_id.shard_slug().to_string();
3401 0 : let offloaded_timeline_count = timelines_offloaded_accessor.len();
3402 0 : TENANT_OFFLOADED_TIMELINES
3403 0 : .with_label_values(&[&tid, &shard_id])
3404 0 : .set(offloaded_timeline_count as u64);
3405 0 :
3406 0 : self.state.send_modify(move |current_state| {
3407 0 : assert!(
3408 0 : matches!(current_state, TenantState::Activating(_)),
3409 0 : "set_stopping and set_broken wait for us to leave Activating state",
3410 : );
3411 0 : *current_state = TenantState::Active;
3412 0 :
3413 0 : let elapsed = self.constructed_at.elapsed();
3414 0 : let total_timelines = timelines_accessor.len();
3415 0 :
3416 0 : // log a lot of stuff, because some tenants sometimes suffer from user-visible
3417 0 : // times to activate. see https://github.com/neondatabase/neon/issues/4025
3418 0 : info!(
3419 0 : since_creation_millis = elapsed.as_millis(),
3420 0 : tenant_id = %self.tenant_shard_id.tenant_id,
3421 0 : shard_id = %self.tenant_shard_id.shard_slug(),
3422 0 : activated_timelines,
3423 0 : total_timelines,
3424 0 : post_state = <&'static str>::from(&*current_state),
3425 0 : "activation attempt finished"
3426 : );
3427 :
3428 0 : TENANT.activation.observe(elapsed.as_secs_f64());
3429 0 : });
3430 0 : }
3431 0 : }
3432 :
3433 : /// Shutdown the tenant and join all of the spawned tasks.
3434 : ///
3435 : /// The method caters for all use-cases:
3436 : /// - pageserver shutdown (freeze_and_flush == true)
3437 : /// - detach + ignore (freeze_and_flush == false)
3438 : ///
3439 : /// This will attempt to shutdown even if tenant is broken.
3440 : ///
3441 : /// `shutdown_progress` is a [`completion::Barrier`] for the shutdown initiated by this call.
3442 : /// If the tenant is already shutting down, we return a clone of the first shutdown call's
3443 : /// `Barrier` as an `Err`. This not-first caller can use the returned barrier to join with
3444 : /// the ongoing shutdown.
3445 3 : async fn shutdown(
3446 3 : &self,
3447 3 : shutdown_progress: completion::Barrier,
3448 3 : shutdown_mode: timeline::ShutdownMode,
3449 3 : ) -> Result<(), completion::Barrier> {
3450 3 : span::debug_assert_current_span_has_tenant_id();
3451 :
3452 : // Set tenant (and its timlines) to Stoppping state.
3453 : //
3454 : // Since we can only transition into Stopping state after activation is complete,
3455 : // run it in a JoinSet so all tenants have a chance to stop before we get SIGKILLed.
3456 : //
3457 : // Transitioning tenants to Stopping state has a couple of non-obvious side effects:
3458 : // 1. Lock out any new requests to the tenants.
3459 : // 2. Signal cancellation to WAL receivers (we wait on it below).
3460 : // 3. Signal cancellation for other tenant background loops.
3461 : // 4. ???
3462 : //
3463 : // The waiting for the cancellation is not done uniformly.
3464 : // We certainly wait for WAL receivers to shut down.
3465 : // That is necessary so that no new data comes in before the freeze_and_flush.
3466 : // But the tenant background loops are joined-on in our caller.
3467 : // It's mesed up.
3468 : // we just ignore the failure to stop
3469 :
3470 : // If we're still attaching, fire the cancellation token early to drop out: this
3471 : // will prevent us flushing, but ensures timely shutdown if some I/O during attach
3472 : // is very slow.
3473 3 : let shutdown_mode = if matches!(self.current_state(), TenantState::Attaching) {
3474 0 : self.cancel.cancel();
3475 0 :
3476 0 : // Having fired our cancellation token, do not try and flush timelines: their cancellation tokens
3477 0 : // are children of ours, so their flush loops will have shut down already
3478 0 : timeline::ShutdownMode::Hard
3479 : } else {
3480 3 : shutdown_mode
3481 : };
3482 :
3483 3 : match self.set_stopping(shutdown_progress).await {
3484 3 : Ok(()) => {}
3485 0 : Err(SetStoppingError::Broken) => {
3486 0 : // assume that this is acceptable
3487 0 : }
3488 0 : Err(SetStoppingError::AlreadyStopping(other)) => {
3489 0 : // give caller the option to wait for this this shutdown
3490 0 : info!("Tenant::shutdown: AlreadyStopping");
3491 0 : return Err(other);
3492 : }
3493 : };
3494 :
3495 3 : let mut js = tokio::task::JoinSet::new();
3496 3 : {
3497 3 : let timelines = self.timelines.lock().unwrap();
3498 3 : timelines.values().for_each(|timeline| {
3499 3 : let timeline = Arc::clone(timeline);
3500 3 : let timeline_id = timeline.timeline_id;
3501 3 : let span = tracing::info_span!("timeline_shutdown", %timeline_id, ?shutdown_mode);
3502 3 : js.spawn(async move { timeline.shutdown(shutdown_mode).instrument(span).await });
3503 3 : });
3504 3 : }
3505 3 : {
3506 3 : let timelines_offloaded = self.timelines_offloaded.lock().unwrap();
3507 3 : timelines_offloaded.values().for_each(|timeline| {
3508 0 : timeline.defuse_for_tenant_drop();
3509 3 : });
3510 3 : }
3511 3 : {
3512 3 : let mut timelines_importing = self.timelines_importing.lock().unwrap();
3513 3 : timelines_importing
3514 3 : .drain()
3515 3 : .for_each(|(timeline_id, importing_timeline)| {
3516 0 : let span = tracing::info_span!("importing_timeline_shutdown", %timeline_id);
3517 0 : js.spawn(async move { importing_timeline.shutdown().instrument(span).await });
3518 3 : });
3519 3 : }
3520 3 : // test_long_timeline_create_then_tenant_delete is leaning on this message
3521 3 : tracing::info!("Waiting for timelines...");
3522 6 : while let Some(res) = js.join_next().await {
3523 0 : match res {
3524 3 : Ok(()) => {}
3525 0 : Err(je) if je.is_cancelled() => unreachable!("no cancelling used"),
3526 0 : Err(je) if je.is_panic() => { /* logged already */ }
3527 0 : Err(je) => warn!("unexpected JoinError: {je:?}"),
3528 : }
3529 : }
3530 :
3531 3 : if let ShutdownMode::Reload = shutdown_mode {
3532 0 : tracing::info!("Flushing deletion queue");
3533 0 : if let Err(e) = self.deletion_queue_client.flush().await {
3534 0 : match e {
3535 0 : DeletionQueueError::ShuttingDown => {
3536 0 : // This is the only error we expect for now. In the future, if more error
3537 0 : // variants are added, we should handle them here.
3538 0 : }
3539 : }
3540 0 : }
3541 3 : }
3542 :
3543 : // We cancel the Tenant's cancellation token _after_ the timelines have all shut down. This permits
3544 : // them to continue to do work during their shutdown methods, e.g. flushing data.
3545 3 : tracing::debug!("Cancelling CancellationToken");
3546 3 : self.cancel.cancel();
3547 3 :
3548 3 : // shutdown all tenant and timeline tasks: gc, compaction, page service
3549 3 : // No new tasks will be started for this tenant because it's in `Stopping` state.
3550 3 : //
3551 3 : // this will additionally shutdown and await all timeline tasks.
3552 3 : tracing::debug!("Waiting for tasks...");
3553 3 : task_mgr::shutdown_tasks(None, Some(self.tenant_shard_id), None).await;
3554 :
3555 3 : if let Some(walredo_mgr) = self.walredo_mgr.as_ref() {
3556 3 : walredo_mgr.shutdown().await;
3557 0 : }
3558 :
3559 : // Wait for any in-flight operations to complete
3560 3 : self.gate.close().await;
3561 :
3562 3 : remove_tenant_metrics(&self.tenant_shard_id);
3563 3 :
3564 3 : Ok(())
3565 3 : }
3566 :
3567 : /// Change tenant status to Stopping, to mark that it is being shut down.
3568 : ///
3569 : /// This function waits for the tenant to become active if it isn't already, before transitioning it into Stopping state.
3570 : ///
3571 : /// This function is not cancel-safe!
3572 3 : async fn set_stopping(&self, progress: completion::Barrier) -> Result<(), SetStoppingError> {
3573 3 : let mut rx = self.state.subscribe();
3574 3 :
3575 3 : // cannot stop before we're done activating, so wait out until we're done activating
3576 3 : rx.wait_for(|state| match state {
3577 : TenantState::Activating(_) | TenantState::Attaching => {
3578 0 : info!("waiting for {state} to turn Active|Broken|Stopping");
3579 0 : false
3580 : }
3581 3 : TenantState::Active | TenantState::Broken { .. } | TenantState::Stopping { .. } => true,
3582 3 : })
3583 3 : .await
3584 3 : .expect("cannot drop self.state while on a &self method");
3585 3 :
3586 3 : // we now know we're done activating, let's see whether this task is the winner to transition into Stopping
3587 3 : let mut err = None;
3588 3 : let stopping = self.state.send_if_modified(|current_state| match current_state {
3589 : TenantState::Activating(_) | TenantState::Attaching => {
3590 0 : unreachable!("we ensured above that we're done with activation, and, there is no re-activation")
3591 : }
3592 : TenantState::Active => {
3593 : // FIXME: due to time-of-check vs time-of-use issues, it can happen that new timelines
3594 : // are created after the transition to Stopping. That's harmless, as the Timelines
3595 : // won't be accessible to anyone afterwards, because the Tenant is in Stopping state.
3596 3 : *current_state = TenantState::Stopping { progress: Some(progress) };
3597 3 : // Continue stopping outside the closure. We need to grab timelines.lock()
3598 3 : // and we plan to turn it into a tokio::sync::Mutex in a future patch.
3599 3 : true
3600 : }
3601 : TenantState::Stopping { progress: None } => {
3602 : // An attach was cancelled, and the attach transitioned the tenant from Attaching to
3603 : // Stopping(None) to let us know it exited. Register our progress and continue.
3604 0 : *current_state = TenantState::Stopping { progress: Some(progress) };
3605 0 : true
3606 : }
3607 0 : TenantState::Broken { reason, .. } => {
3608 0 : info!(
3609 0 : "Cannot set tenant to Stopping state, it is in Broken state due to: {reason}"
3610 : );
3611 0 : err = Some(SetStoppingError::Broken);
3612 0 : false
3613 : }
3614 0 : TenantState::Stopping { progress: Some(progress) } => {
3615 0 : info!("Tenant is already in Stopping state");
3616 0 : err = Some(SetStoppingError::AlreadyStopping(progress.clone()));
3617 0 : false
3618 : }
3619 3 : });
3620 3 : match (stopping, err) {
3621 3 : (true, None) => {} // continue
3622 0 : (false, Some(err)) => return Err(err),
3623 0 : (true, Some(_)) => unreachable!(
3624 0 : "send_if_modified closure must error out if not transitioning to Stopping"
3625 0 : ),
3626 0 : (false, None) => unreachable!(
3627 0 : "send_if_modified closure must return true if transitioning to Stopping"
3628 0 : ),
3629 : }
3630 :
3631 3 : let timelines_accessor = self.timelines.lock().unwrap();
3632 3 : let not_broken_timelines = timelines_accessor
3633 3 : .values()
3634 3 : .filter(|timeline| !timeline.is_broken());
3635 6 : for timeline in not_broken_timelines {
3636 3 : timeline.set_state(TimelineState::Stopping);
3637 3 : }
3638 3 : Ok(())
3639 3 : }
3640 :
3641 : /// Method for tenant::mgr to transition us into Broken state in case of a late failure in
3642 : /// `remove_tenant_from_memory`
3643 : ///
3644 : /// This function waits for the tenant to become active if it isn't already, before transitioning it into Stopping state.
3645 : ///
3646 : /// In tests, we also use this to set tenants to Broken state on purpose.
3647 0 : pub(crate) async fn set_broken(&self, reason: String) {
3648 0 : let mut rx = self.state.subscribe();
3649 0 :
3650 0 : // The load & attach routines own the tenant state until it has reached `Active`.
3651 0 : // So, wait until it's done.
3652 0 : rx.wait_for(|state| match state {
3653 : TenantState::Activating(_) | TenantState::Attaching => {
3654 0 : info!(
3655 0 : "waiting for {} to turn Active|Broken|Stopping",
3656 0 : <&'static str>::from(state)
3657 : );
3658 0 : false
3659 : }
3660 0 : TenantState::Active | TenantState::Broken { .. } | TenantState::Stopping { .. } => true,
3661 0 : })
3662 0 : .await
3663 0 : .expect("cannot drop self.state while on a &self method");
3664 0 :
3665 0 : // we now know we're done activating, let's see whether this task is the winner to transition into Broken
3666 0 : self.set_broken_no_wait(reason)
3667 0 : }
3668 :
3669 0 : pub(crate) fn set_broken_no_wait(&self, reason: impl Display) {
3670 0 : let reason = reason.to_string();
3671 0 : self.state.send_modify(|current_state| {
3672 0 : match *current_state {
3673 : TenantState::Activating(_) | TenantState::Attaching => {
3674 0 : unreachable!("we ensured above that we're done with activation, and, there is no re-activation")
3675 : }
3676 : TenantState::Active => {
3677 0 : if cfg!(feature = "testing") {
3678 0 : warn!("Changing Active tenant to Broken state, reason: {}", reason);
3679 0 : *current_state = TenantState::broken_from_reason(reason);
3680 : } else {
3681 0 : unreachable!("not allowed to call set_broken on Active tenants in non-testing builds")
3682 : }
3683 : }
3684 : TenantState::Broken { .. } => {
3685 0 : warn!("Tenant is already in Broken state");
3686 : }
3687 : // This is the only "expected" path, any other path is a bug.
3688 : TenantState::Stopping { .. } => {
3689 0 : warn!(
3690 0 : "Marking Stopping tenant as Broken state, reason: {}",
3691 : reason
3692 : );
3693 0 : *current_state = TenantState::broken_from_reason(reason);
3694 : }
3695 : }
3696 0 : });
3697 0 : }
3698 :
3699 0 : pub fn subscribe_for_state_updates(&self) -> watch::Receiver<TenantState> {
3700 0 : self.state.subscribe()
3701 0 : }
3702 :
3703 : /// The activate_now semaphore is initialized with zero units. As soon as
3704 : /// we add a unit, waiters will be able to acquire a unit and proceed.
3705 0 : pub(crate) fn activate_now(&self) {
3706 0 : self.activate_now_sem.add_permits(1);
3707 0 : }
3708 :
3709 0 : pub(crate) async fn wait_to_become_active(
3710 0 : &self,
3711 0 : timeout: Duration,
3712 0 : ) -> Result<(), GetActiveTenantError> {
3713 0 : let mut receiver = self.state.subscribe();
3714 : loop {
3715 0 : let current_state = receiver.borrow_and_update().clone();
3716 0 : match current_state {
3717 : TenantState::Attaching | TenantState::Activating(_) => {
3718 : // in these states, there's a chance that we can reach ::Active
3719 0 : self.activate_now();
3720 0 : match timeout_cancellable(timeout, &self.cancel, receiver.changed()).await {
3721 0 : Ok(r) => {
3722 0 : r.map_err(
3723 0 : |_e: tokio::sync::watch::error::RecvError|
3724 : // Tenant existed but was dropped: report it as non-existent
3725 0 : GetActiveTenantError::NotFound(GetTenantError::ShardNotFound(self.tenant_shard_id))
3726 0 : )?
3727 : }
3728 : Err(TimeoutCancellableError::Cancelled) => {
3729 0 : return Err(GetActiveTenantError::Cancelled);
3730 : }
3731 : Err(TimeoutCancellableError::Timeout) => {
3732 0 : return Err(GetActiveTenantError::WaitForActiveTimeout {
3733 0 : latest_state: Some(self.current_state()),
3734 0 : wait_time: timeout,
3735 0 : });
3736 : }
3737 : }
3738 : }
3739 : TenantState::Active => {
3740 0 : return Ok(());
3741 : }
3742 0 : TenantState::Broken { reason, .. } => {
3743 0 : // This is fatal, and reported distinctly from the general case of "will never be active" because
3744 0 : // it's logically a 500 to external API users (broken is always a bug).
3745 0 : return Err(GetActiveTenantError::Broken(reason));
3746 : }
3747 : TenantState::Stopping { .. } => {
3748 : // There's no chance the tenant can transition back into ::Active
3749 0 : return Err(GetActiveTenantError::WillNotBecomeActive(current_state));
3750 : }
3751 : }
3752 : }
3753 0 : }
3754 :
3755 0 : pub(crate) fn get_attach_mode(&self) -> AttachmentMode {
3756 0 : self.tenant_conf.load().location.attach_mode
3757 0 : }
3758 :
3759 : /// For API access: generate a LocationConfig equivalent to the one that would be used to
3760 : /// create a Tenant in the same state. Do not use this in hot paths: it's for relatively
3761 : /// rare external API calls, like a reconciliation at startup.
3762 0 : pub(crate) fn get_location_conf(&self) -> models::LocationConfig {
3763 0 : let attached_tenant_conf = self.tenant_conf.load();
3764 :
3765 0 : let location_config_mode = match attached_tenant_conf.location.attach_mode {
3766 0 : AttachmentMode::Single => models::LocationConfigMode::AttachedSingle,
3767 0 : AttachmentMode::Multi => models::LocationConfigMode::AttachedMulti,
3768 0 : AttachmentMode::Stale => models::LocationConfigMode::AttachedStale,
3769 : };
3770 :
3771 0 : models::LocationConfig {
3772 0 : mode: location_config_mode,
3773 0 : generation: self.generation.into(),
3774 0 : secondary_conf: None,
3775 0 : shard_number: self.shard_identity.number.0,
3776 0 : shard_count: self.shard_identity.count.literal(),
3777 0 : shard_stripe_size: self.shard_identity.stripe_size.0,
3778 0 : tenant_conf: attached_tenant_conf.tenant_conf.clone(),
3779 0 : }
3780 0 : }
3781 :
3782 0 : pub(crate) fn get_tenant_shard_id(&self) -> &TenantShardId {
3783 0 : &self.tenant_shard_id
3784 0 : }
3785 :
3786 118 : pub(crate) fn get_shard_stripe_size(&self) -> ShardStripeSize {
3787 118 : self.shard_identity.stripe_size
3788 118 : }
3789 :
3790 0 : pub(crate) fn get_generation(&self) -> Generation {
3791 0 : self.generation
3792 0 : }
3793 :
3794 : /// This function partially shuts down the tenant (it shuts down the Timelines) and is fallible,
3795 : /// and can leave the tenant in a bad state if it fails. The caller is responsible for
3796 : /// resetting this tenant to a valid state if we fail.
3797 0 : pub(crate) async fn split_prepare(
3798 0 : &self,
3799 0 : child_shards: &Vec<TenantShardId>,
3800 0 : ) -> anyhow::Result<()> {
3801 0 : let (timelines, offloaded) = {
3802 0 : let timelines = self.timelines.lock().unwrap();
3803 0 : let offloaded = self.timelines_offloaded.lock().unwrap();
3804 0 : (timelines.clone(), offloaded.clone())
3805 0 : };
3806 0 : let timelines_iter = timelines
3807 0 : .values()
3808 0 : .map(TimelineOrOffloadedArcRef::<'_>::from)
3809 0 : .chain(
3810 0 : offloaded
3811 0 : .values()
3812 0 : .map(TimelineOrOffloadedArcRef::<'_>::from),
3813 0 : );
3814 0 : for timeline in timelines_iter {
3815 : // We do not block timeline creation/deletion during splits inside the pageserver: it is up to higher levels
3816 : // to ensure that they do not start a split if currently in the process of doing these.
3817 :
3818 0 : let timeline_id = timeline.timeline_id();
3819 :
3820 0 : if let TimelineOrOffloadedArcRef::Timeline(timeline) = timeline {
3821 : // Upload an index from the parent: this is partly to provide freshness for the
3822 : // child tenants that will copy it, and partly for general ease-of-debugging: there will
3823 : // always be a parent shard index in the same generation as we wrote the child shard index.
3824 0 : tracing::info!(%timeline_id, "Uploading index");
3825 0 : timeline
3826 0 : .remote_client
3827 0 : .schedule_index_upload_for_file_changes()?;
3828 0 : timeline.remote_client.wait_completion().await?;
3829 0 : }
3830 :
3831 0 : let remote_client = match timeline {
3832 0 : TimelineOrOffloadedArcRef::Timeline(timeline) => timeline.remote_client.clone(),
3833 0 : TimelineOrOffloadedArcRef::Offloaded(offloaded) => {
3834 0 : let remote_client = self
3835 0 : .build_timeline_client(offloaded.timeline_id, self.remote_storage.clone());
3836 0 : Arc::new(remote_client)
3837 : }
3838 : };
3839 :
3840 : // Shut down the timeline's remote client: this means that the indices we write
3841 : // for child shards will not be invalidated by the parent shard deleting layers.
3842 0 : tracing::info!(%timeline_id, "Shutting down remote storage client");
3843 0 : remote_client.shutdown().await;
3844 :
3845 : // Download methods can still be used after shutdown, as they don't flow through the remote client's
3846 : // queue. In principal the RemoteTimelineClient could provide this without downloading it, but this
3847 : // operation is rare, so it's simpler to just download it (and robustly guarantees that the index
3848 : // we use here really is the remotely persistent one).
3849 0 : tracing::info!(%timeline_id, "Downloading index_part from parent");
3850 0 : let result = remote_client
3851 0 : .download_index_file(&self.cancel)
3852 0 : .instrument(info_span!("download_index_file", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), %timeline_id))
3853 0 : .await?;
3854 0 : let index_part = match result {
3855 : MaybeDeletedIndexPart::Deleted(_) => {
3856 0 : anyhow::bail!("Timeline deletion happened concurrently with split")
3857 : }
3858 0 : MaybeDeletedIndexPart::IndexPart(p) => p,
3859 : };
3860 :
3861 : // A shard split may not take place while a timeline import is on-going
3862 : // for the tenant. Timeline imports run as part of each tenant shard
3863 : // and rely on the sharding scheme to split the work among pageservers.
3864 : // If we were to split in the middle of this process, we would have to
3865 : // either ensure that it's driven to completion on the old shard set
3866 : // or transfer it to the new shard set. It's technically possible, but complex.
3867 0 : match index_part.import_pgdata {
3868 0 : Some(ref import) if !import.is_done() => {
3869 0 : anyhow::bail!(
3870 0 : "Cannot split due to import with idempotency key: {:?}",
3871 0 : import.idempotency_key()
3872 0 : );
3873 : }
3874 0 : Some(_) | None => {
3875 0 : // fallthrough
3876 0 : }
3877 : }
3878 :
3879 0 : for child_shard in child_shards {
3880 0 : tracing::info!(%timeline_id, "Uploading index_part for child {}", child_shard.to_index());
3881 0 : upload_index_part(
3882 0 : &self.remote_storage,
3883 0 : child_shard,
3884 0 : &timeline_id,
3885 0 : self.generation,
3886 0 : &index_part,
3887 0 : &self.cancel,
3888 0 : )
3889 0 : .await?;
3890 : }
3891 : }
3892 :
3893 0 : let tenant_manifest = self.build_tenant_manifest();
3894 0 : for child_shard in child_shards {
3895 0 : tracing::info!(
3896 0 : "Uploading tenant manifest for child {}",
3897 0 : child_shard.to_index()
3898 : );
3899 0 : upload_tenant_manifest(
3900 0 : &self.remote_storage,
3901 0 : child_shard,
3902 0 : self.generation,
3903 0 : &tenant_manifest,
3904 0 : &self.cancel,
3905 0 : )
3906 0 : .await?;
3907 : }
3908 :
3909 0 : Ok(())
3910 0 : }
3911 :
3912 0 : pub(crate) fn get_sizes(&self) -> TopTenantShardItem {
3913 0 : let mut result = TopTenantShardItem {
3914 0 : id: self.tenant_shard_id,
3915 0 : resident_size: 0,
3916 0 : physical_size: 0,
3917 0 : max_logical_size: 0,
3918 0 : max_logical_size_per_shard: 0,
3919 0 : };
3920 :
3921 0 : for timeline in self.timelines.lock().unwrap().values() {
3922 0 : result.resident_size += timeline.metrics.resident_physical_size_gauge.get();
3923 0 :
3924 0 : result.physical_size += timeline
3925 0 : .remote_client
3926 0 : .metrics
3927 0 : .remote_physical_size_gauge
3928 0 : .get();
3929 0 : result.max_logical_size = std::cmp::max(
3930 0 : result.max_logical_size,
3931 0 : timeline.metrics.current_logical_size_gauge.get(),
3932 0 : );
3933 0 : }
3934 :
3935 0 : result.max_logical_size_per_shard = result
3936 0 : .max_logical_size
3937 0 : .div_ceil(self.tenant_shard_id.shard_count.count() as u64);
3938 0 :
3939 0 : result
3940 0 : }
3941 : }
3942 :
3943 : /// Given a Vec of timelines and their ancestors (timeline_id, ancestor_id),
3944 : /// perform a topological sort, so that the parent of each timeline comes
3945 : /// before the children.
3946 : /// E extracts the ancestor from T
3947 : /// This allows for T to be different. It can be TimelineMetadata, can be Timeline itself, etc.
3948 117 : fn tree_sort_timelines<T, E>(
3949 117 : timelines: HashMap<TimelineId, T>,
3950 117 : extractor: E,
3951 117 : ) -> anyhow::Result<Vec<(TimelineId, T)>>
3952 117 : where
3953 117 : E: Fn(&T) -> Option<TimelineId>,
3954 117 : {
3955 117 : let mut result = Vec::with_capacity(timelines.len());
3956 117 :
3957 117 : let mut now = Vec::with_capacity(timelines.len());
3958 117 : // (ancestor, children)
3959 117 : let mut later: HashMap<TimelineId, Vec<(TimelineId, T)>> =
3960 117 : HashMap::with_capacity(timelines.len());
3961 :
3962 120 : for (timeline_id, value) in timelines {
3963 3 : if let Some(ancestor_id) = extractor(&value) {
3964 1 : let children = later.entry(ancestor_id).or_default();
3965 1 : children.push((timeline_id, value));
3966 2 : } else {
3967 2 : now.push((timeline_id, value));
3968 2 : }
3969 : }
3970 :
3971 120 : while let Some((timeline_id, metadata)) = now.pop() {
3972 3 : result.push((timeline_id, metadata));
3973 : // All children of this can be loaded now
3974 3 : if let Some(mut children) = later.remove(&timeline_id) {
3975 1 : now.append(&mut children);
3976 2 : }
3977 : }
3978 :
3979 : // All timelines should be visited now. Unless there were timelines with missing ancestors.
3980 117 : if !later.is_empty() {
3981 0 : for (missing_id, orphan_ids) in later {
3982 0 : for (orphan_id, _) in orphan_ids {
3983 0 : error!(
3984 0 : "could not load timeline {orphan_id} because its ancestor timeline {missing_id} could not be loaded"
3985 : );
3986 : }
3987 : }
3988 0 : bail!("could not load tenant because some timelines are missing ancestors");
3989 117 : }
3990 117 :
3991 117 : Ok(result)
3992 117 : }
3993 :
3994 : impl TenantShard {
3995 0 : pub fn tenant_specific_overrides(&self) -> pageserver_api::models::TenantConfig {
3996 0 : self.tenant_conf.load().tenant_conf.clone()
3997 0 : }
3998 :
3999 0 : pub fn effective_config(&self) -> pageserver_api::config::TenantConfigToml {
4000 0 : self.tenant_specific_overrides()
4001 0 : .merge(self.conf.default_tenant_conf.clone())
4002 0 : }
4003 :
4004 0 : pub fn get_checkpoint_distance(&self) -> u64 {
4005 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
4006 0 : tenant_conf
4007 0 : .checkpoint_distance
4008 0 : .unwrap_or(self.conf.default_tenant_conf.checkpoint_distance)
4009 0 : }
4010 :
4011 0 : pub fn get_checkpoint_timeout(&self) -> Duration {
4012 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
4013 0 : tenant_conf
4014 0 : .checkpoint_timeout
4015 0 : .unwrap_or(self.conf.default_tenant_conf.checkpoint_timeout)
4016 0 : }
4017 :
4018 0 : pub fn get_compaction_target_size(&self) -> u64 {
4019 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
4020 0 : tenant_conf
4021 0 : .compaction_target_size
4022 0 : .unwrap_or(self.conf.default_tenant_conf.compaction_target_size)
4023 0 : }
4024 :
4025 0 : pub fn get_compaction_period(&self) -> Duration {
4026 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
4027 0 : tenant_conf
4028 0 : .compaction_period
4029 0 : .unwrap_or(self.conf.default_tenant_conf.compaction_period)
4030 0 : }
4031 :
4032 0 : pub fn get_compaction_threshold(&self) -> usize {
4033 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
4034 0 : tenant_conf
4035 0 : .compaction_threshold
4036 0 : .unwrap_or(self.conf.default_tenant_conf.compaction_threshold)
4037 0 : }
4038 :
4039 0 : pub fn get_rel_size_v2_enabled(&self) -> bool {
4040 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
4041 0 : tenant_conf
4042 0 : .rel_size_v2_enabled
4043 0 : .unwrap_or(self.conf.default_tenant_conf.rel_size_v2_enabled)
4044 0 : }
4045 :
4046 0 : pub fn get_compaction_upper_limit(&self) -> usize {
4047 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
4048 0 : tenant_conf
4049 0 : .compaction_upper_limit
4050 0 : .unwrap_or(self.conf.default_tenant_conf.compaction_upper_limit)
4051 0 : }
4052 :
4053 0 : pub fn get_compaction_l0_first(&self) -> bool {
4054 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
4055 0 : tenant_conf
4056 0 : .compaction_l0_first
4057 0 : .unwrap_or(self.conf.default_tenant_conf.compaction_l0_first)
4058 0 : }
4059 :
4060 2 : pub fn get_gc_horizon(&self) -> u64 {
4061 2 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
4062 2 : tenant_conf
4063 2 : .gc_horizon
4064 2 : .unwrap_or(self.conf.default_tenant_conf.gc_horizon)
4065 2 : }
4066 :
4067 0 : pub fn get_gc_period(&self) -> Duration {
4068 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
4069 0 : tenant_conf
4070 0 : .gc_period
4071 0 : .unwrap_or(self.conf.default_tenant_conf.gc_period)
4072 0 : }
4073 :
4074 0 : pub fn get_image_creation_threshold(&self) -> usize {
4075 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
4076 0 : tenant_conf
4077 0 : .image_creation_threshold
4078 0 : .unwrap_or(self.conf.default_tenant_conf.image_creation_threshold)
4079 0 : }
4080 :
4081 2 : pub fn get_pitr_interval(&self) -> Duration {
4082 2 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
4083 2 : tenant_conf
4084 2 : .pitr_interval
4085 2 : .unwrap_or(self.conf.default_tenant_conf.pitr_interval)
4086 2 : }
4087 :
4088 0 : pub fn get_min_resident_size_override(&self) -> Option<u64> {
4089 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
4090 0 : tenant_conf
4091 0 : .min_resident_size_override
4092 0 : .or(self.conf.default_tenant_conf.min_resident_size_override)
4093 0 : }
4094 :
4095 0 : pub fn get_heatmap_period(&self) -> Option<Duration> {
4096 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
4097 0 : let heatmap_period = tenant_conf
4098 0 : .heatmap_period
4099 0 : .unwrap_or(self.conf.default_tenant_conf.heatmap_period);
4100 0 : if heatmap_period.is_zero() {
4101 0 : None
4102 : } else {
4103 0 : Some(heatmap_period)
4104 : }
4105 0 : }
4106 :
4107 2 : pub fn get_lsn_lease_length(&self) -> Duration {
4108 2 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
4109 2 : tenant_conf
4110 2 : .lsn_lease_length
4111 2 : .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length)
4112 2 : }
4113 :
4114 0 : pub fn get_timeline_offloading_enabled(&self) -> bool {
4115 0 : if self.conf.timeline_offloading {
4116 0 : return true;
4117 0 : }
4118 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
4119 0 : tenant_conf
4120 0 : .timeline_offloading
4121 0 : .unwrap_or(self.conf.default_tenant_conf.timeline_offloading)
4122 0 : }
4123 :
4124 : /// Generate an up-to-date TenantManifest based on the state of this Tenant.
4125 118 : fn build_tenant_manifest(&self) -> TenantManifest {
4126 118 : // Collect the offloaded timelines, and sort them for deterministic output.
4127 118 : let offloaded_timelines = self
4128 118 : .timelines_offloaded
4129 118 : .lock()
4130 118 : .unwrap()
4131 118 : .values()
4132 118 : .map(|tli| tli.manifest())
4133 118 : .sorted_by_key(|m| m.timeline_id)
4134 118 : .collect_vec();
4135 118 :
4136 118 : TenantManifest {
4137 118 : version: LATEST_TENANT_MANIFEST_VERSION,
4138 118 : stripe_size: Some(self.get_shard_stripe_size()),
4139 118 : offloaded_timelines,
4140 118 : }
4141 118 : }
4142 :
4143 0 : pub fn update_tenant_config<
4144 0 : F: Fn(
4145 0 : pageserver_api::models::TenantConfig,
4146 0 : ) -> anyhow::Result<pageserver_api::models::TenantConfig>,
4147 0 : >(
4148 0 : &self,
4149 0 : update: F,
4150 0 : ) -> anyhow::Result<pageserver_api::models::TenantConfig> {
4151 0 : // Use read-copy-update in order to avoid overwriting the location config
4152 0 : // state if this races with [`TenantShard::set_new_location_config`]. Note that
4153 0 : // this race is not possible if both request types come from the storage
4154 0 : // controller (as they should!) because an exclusive op lock is required
4155 0 : // on the storage controller side.
4156 0 :
4157 0 : self.tenant_conf
4158 0 : .try_rcu(|attached_conf| -> Result<_, anyhow::Error> {
4159 0 : Ok(Arc::new(AttachedTenantConf {
4160 0 : tenant_conf: update(attached_conf.tenant_conf.clone())?,
4161 0 : location: attached_conf.location,
4162 0 : lsn_lease_deadline: attached_conf.lsn_lease_deadline,
4163 : }))
4164 0 : })?;
4165 :
4166 0 : let updated = self.tenant_conf.load();
4167 0 :
4168 0 : self.tenant_conf_updated(&updated.tenant_conf);
4169 0 : // Don't hold self.timelines.lock() during the notifies.
4170 0 : // There's no risk of deadlock right now, but there could be if we consolidate
4171 0 : // mutexes in struct Timeline in the future.
4172 0 : let timelines = self.list_timelines();
4173 0 : for timeline in timelines {
4174 0 : timeline.tenant_conf_updated(&updated);
4175 0 : }
4176 :
4177 0 : Ok(updated.tenant_conf.clone())
4178 0 : }
4179 :
4180 0 : pub(crate) fn set_new_location_config(&self, new_conf: AttachedTenantConf) {
4181 0 : let new_tenant_conf = new_conf.tenant_conf.clone();
4182 0 :
4183 0 : self.tenant_conf.store(Arc::new(new_conf.clone()));
4184 0 :
4185 0 : self.tenant_conf_updated(&new_tenant_conf);
4186 0 : // Don't hold self.timelines.lock() during the notifies.
4187 0 : // There's no risk of deadlock right now, but there could be if we consolidate
4188 0 : // mutexes in struct Timeline in the future.
4189 0 : let timelines = self.list_timelines();
4190 0 : for timeline in timelines {
4191 0 : timeline.tenant_conf_updated(&new_conf);
4192 0 : }
4193 0 : }
4194 :
4195 117 : fn get_pagestream_throttle_config(
4196 117 : psconf: &'static PageServerConf,
4197 117 : overrides: &pageserver_api::models::TenantConfig,
4198 117 : ) -> throttle::Config {
4199 117 : overrides
4200 117 : .timeline_get_throttle
4201 117 : .clone()
4202 117 : .unwrap_or(psconf.default_tenant_conf.timeline_get_throttle.clone())
4203 117 : }
4204 :
4205 0 : pub(crate) fn tenant_conf_updated(&self, new_conf: &pageserver_api::models::TenantConfig) {
4206 0 : let conf = Self::get_pagestream_throttle_config(self.conf, new_conf);
4207 0 : self.pagestream_throttle.reconfigure(conf)
4208 0 : }
4209 :
4210 : /// Helper function to create a new Timeline struct.
4211 : ///
4212 : /// The returned Timeline is in Loading state. The caller is responsible for
4213 : /// initializing any on-disk state, and for inserting the Timeline to the 'timelines'
4214 : /// map.
4215 : ///
4216 : /// `validate_ancestor == false` is used when a timeline is created for deletion
4217 : /// and we might not have the ancestor present anymore which is fine for to be
4218 : /// deleted timelines.
4219 : #[allow(clippy::too_many_arguments)]
4220 233 : fn create_timeline_struct(
4221 233 : &self,
4222 233 : new_timeline_id: TimelineId,
4223 233 : new_metadata: &TimelineMetadata,
4224 233 : previous_heatmap: Option<PreviousHeatmap>,
4225 233 : ancestor: Option<Arc<Timeline>>,
4226 233 : resources: TimelineResources,
4227 233 : cause: CreateTimelineCause,
4228 233 : create_idempotency: CreateTimelineIdempotency,
4229 233 : gc_compaction_state: Option<GcCompactionState>,
4230 233 : rel_size_v2_status: Option<RelSizeMigration>,
4231 233 : ctx: &RequestContext,
4232 233 : ) -> anyhow::Result<(Arc<Timeline>, RequestContext)> {
4233 233 : let state = match cause {
4234 : CreateTimelineCause::Load => {
4235 233 : let ancestor_id = new_metadata.ancestor_timeline();
4236 233 : anyhow::ensure!(
4237 233 : ancestor_id == ancestor.as_ref().map(|t| t.timeline_id),
4238 0 : "Timeline's {new_timeline_id} ancestor {ancestor_id:?} was not found"
4239 : );
4240 233 : TimelineState::Loading
4241 : }
4242 0 : CreateTimelineCause::Delete => TimelineState::Stopping,
4243 : };
4244 :
4245 233 : let pg_version = new_metadata.pg_version();
4246 233 :
4247 233 : let timeline = Timeline::new(
4248 233 : self.conf,
4249 233 : Arc::clone(&self.tenant_conf),
4250 233 : new_metadata,
4251 233 : previous_heatmap,
4252 233 : ancestor,
4253 233 : new_timeline_id,
4254 233 : self.tenant_shard_id,
4255 233 : self.generation,
4256 233 : self.shard_identity,
4257 233 : self.walredo_mgr.clone(),
4258 233 : resources,
4259 233 : pg_version,
4260 233 : state,
4261 233 : self.attach_wal_lag_cooldown.clone(),
4262 233 : create_idempotency,
4263 233 : gc_compaction_state,
4264 233 : rel_size_v2_status,
4265 233 : self.cancel.child_token(),
4266 233 : );
4267 233 :
4268 233 : let timeline_ctx = RequestContextBuilder::from(ctx)
4269 233 : .scope(context::Scope::new_timeline(&timeline))
4270 233 : .detached_child();
4271 233 :
4272 233 : Ok((timeline, timeline_ctx))
4273 233 : }
4274 :
4275 : /// [`TenantShard::shutdown`] must be called before dropping the returned [`TenantShard`] object
4276 : /// to ensure proper cleanup of background tasks and metrics.
4277 : //
4278 : // Allow too_many_arguments because a constructor's argument list naturally grows with the
4279 : // number of attributes in the struct: breaking these out into a builder wouldn't be helpful.
4280 : #[allow(clippy::too_many_arguments)]
4281 117 : fn new(
4282 117 : state: TenantState,
4283 117 : conf: &'static PageServerConf,
4284 117 : attached_conf: AttachedTenantConf,
4285 117 : shard_identity: ShardIdentity,
4286 117 : walredo_mgr: Option<Arc<WalRedoManager>>,
4287 117 : tenant_shard_id: TenantShardId,
4288 117 : remote_storage: GenericRemoteStorage,
4289 117 : deletion_queue_client: DeletionQueueClient,
4290 117 : l0_flush_global_state: L0FlushGlobalState,
4291 117 : basebackup_prepare_sender: BasebackupPrepareSender,
4292 117 : feature_resolver: FeatureResolver,
4293 117 : ) -> TenantShard {
4294 117 : assert!(!attached_conf.location.generation.is_none());
4295 :
4296 117 : let (state, mut rx) = watch::channel(state);
4297 117 :
4298 117 : tokio::spawn(async move {
4299 117 : // reflect tenant state in metrics:
4300 117 : // - global per tenant state: TENANT_STATE_METRIC
4301 117 : // - "set" of broken tenants: BROKEN_TENANTS_SET
4302 117 : //
4303 117 : // set of broken tenants should not have zero counts so that it remains accessible for
4304 117 : // alerting.
4305 117 :
4306 117 : let tid = tenant_shard_id.to_string();
4307 117 : let shard_id = tenant_shard_id.shard_slug().to_string();
4308 117 : let set_key = &[tid.as_str(), shard_id.as_str()][..];
4309 :
4310 234 : fn inspect_state(state: &TenantState) -> ([&'static str; 1], bool) {
4311 234 : ([state.into()], matches!(state, TenantState::Broken { .. }))
4312 234 : }
4313 :
4314 117 : let mut tuple = inspect_state(&rx.borrow_and_update());
4315 117 :
4316 117 : let is_broken = tuple.1;
4317 117 : let mut counted_broken = if is_broken {
4318 : // add the id to the set right away, there should not be any updates on the channel
4319 : // after before tenant is removed, if ever
4320 0 : BROKEN_TENANTS_SET.with_label_values(set_key).set(1);
4321 0 : true
4322 : } else {
4323 117 : false
4324 : };
4325 :
4326 : loop {
4327 234 : let labels = &tuple.0;
4328 234 : let current = TENANT_STATE_METRIC.with_label_values(labels);
4329 234 : current.inc();
4330 234 :
4331 234 : if rx.changed().await.is_err() {
4332 : // tenant has been dropped
4333 7 : current.dec();
4334 7 : drop(BROKEN_TENANTS_SET.remove_label_values(set_key));
4335 7 : break;
4336 117 : }
4337 117 :
4338 117 : current.dec();
4339 117 : tuple = inspect_state(&rx.borrow_and_update());
4340 117 :
4341 117 : let is_broken = tuple.1;
4342 117 : if is_broken && !counted_broken {
4343 0 : counted_broken = true;
4344 0 : // insert the tenant_id (back) into the set while avoiding needless counter
4345 0 : // access
4346 0 : BROKEN_TENANTS_SET.with_label_values(set_key).set(1);
4347 117 : }
4348 : }
4349 117 : });
4350 117 :
4351 117 : TenantShard {
4352 117 : tenant_shard_id,
4353 117 : shard_identity,
4354 117 : generation: attached_conf.location.generation,
4355 117 : conf,
4356 117 : // using now here is good enough approximation to catch tenants with really long
4357 117 : // activation times.
4358 117 : constructed_at: Instant::now(),
4359 117 : timelines: Mutex::new(HashMap::new()),
4360 117 : timelines_creating: Mutex::new(HashSet::new()),
4361 117 : timelines_offloaded: Mutex::new(HashMap::new()),
4362 117 : timelines_importing: Mutex::new(HashMap::new()),
4363 117 : remote_tenant_manifest: Default::default(),
4364 117 : gc_cs: tokio::sync::Mutex::new(()),
4365 117 : walredo_mgr,
4366 117 : remote_storage,
4367 117 : deletion_queue_client,
4368 117 : state,
4369 117 : cached_logical_sizes: tokio::sync::Mutex::new(HashMap::new()),
4370 117 : cached_synthetic_tenant_size: Arc::new(AtomicU64::new(0)),
4371 117 : eviction_task_tenant_state: tokio::sync::Mutex::new(EvictionTaskTenantState::default()),
4372 117 : compaction_circuit_breaker: std::sync::Mutex::new(CircuitBreaker::new(
4373 117 : format!("compaction-{tenant_shard_id}"),
4374 117 : 5,
4375 117 : // Compaction can be a very expensive operation, and might leak disk space. It also ought
4376 117 : // to be infallible, as long as remote storage is available. So if it repeatedly fails,
4377 117 : // use an extremely long backoff.
4378 117 : Some(Duration::from_secs(3600 * 24)),
4379 117 : )),
4380 117 : l0_compaction_trigger: Arc::new(Notify::new()),
4381 117 : scheduled_compaction_tasks: Mutex::new(Default::default()),
4382 117 : activate_now_sem: tokio::sync::Semaphore::new(0),
4383 117 : attach_wal_lag_cooldown: Arc::new(std::sync::OnceLock::new()),
4384 117 : cancel: CancellationToken::default(),
4385 117 : gate: Gate::default(),
4386 117 : pagestream_throttle: Arc::new(throttle::Throttle::new(
4387 117 : TenantShard::get_pagestream_throttle_config(conf, &attached_conf.tenant_conf),
4388 117 : )),
4389 117 : pagestream_throttle_metrics: Arc::new(
4390 117 : crate::metrics::tenant_throttling::Pagestream::new(&tenant_shard_id),
4391 117 : ),
4392 117 : tenant_conf: Arc::new(ArcSwap::from_pointee(attached_conf)),
4393 117 : ongoing_timeline_detach: std::sync::Mutex::default(),
4394 117 : gc_block: Default::default(),
4395 117 : l0_flush_global_state,
4396 117 : basebackup_prepare_sender,
4397 117 : feature_resolver,
4398 117 : }
4399 117 : }
4400 :
4401 : /// Locate and load config
4402 0 : pub(super) fn load_tenant_config(
4403 0 : conf: &'static PageServerConf,
4404 0 : tenant_shard_id: &TenantShardId,
4405 0 : ) -> Result<LocationConf, LoadConfigError> {
4406 0 : let config_path = conf.tenant_location_config_path(tenant_shard_id);
4407 0 :
4408 0 : info!("loading tenant configuration from {config_path}");
4409 :
4410 : // load and parse file
4411 0 : let config = fs::read_to_string(&config_path).map_err(|e| {
4412 0 : match e.kind() {
4413 : std::io::ErrorKind::NotFound => {
4414 : // The config should almost always exist for a tenant directory:
4415 : // - When attaching a tenant, the config is the first thing we write
4416 : // - When detaching a tenant, we atomically move the directory to a tmp location
4417 : // before deleting contents.
4418 : //
4419 : // The very rare edge case that can result in a missing config is if we crash during attach
4420 : // between creating directory and writing config. Callers should handle that as if the
4421 : // directory didn't exist.
4422 :
4423 0 : LoadConfigError::NotFound(config_path)
4424 : }
4425 : _ => {
4426 : // No IO errors except NotFound are acceptable here: other kinds of error indicate local storage or permissions issues
4427 : // that we cannot cleanly recover
4428 0 : crate::virtual_file::on_fatal_io_error(&e, "Reading tenant config file")
4429 : }
4430 : }
4431 0 : })?;
4432 :
4433 0 : Ok(toml_edit::de::from_str::<LocationConf>(&config)?)
4434 0 : }
4435 :
4436 : #[tracing::instrument(skip_all, fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug()))]
4437 : pub(super) async fn persist_tenant_config(
4438 : conf: &'static PageServerConf,
4439 : tenant_shard_id: &TenantShardId,
4440 : location_conf: &LocationConf,
4441 : ) -> std::io::Result<()> {
4442 : let config_path = conf.tenant_location_config_path(tenant_shard_id);
4443 :
4444 : Self::persist_tenant_config_at(tenant_shard_id, &config_path, location_conf).await
4445 : }
4446 :
4447 : #[tracing::instrument(skip_all, fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug()))]
4448 : pub(super) async fn persist_tenant_config_at(
4449 : tenant_shard_id: &TenantShardId,
4450 : config_path: &Utf8Path,
4451 : location_conf: &LocationConf,
4452 : ) -> std::io::Result<()> {
4453 : debug!("persisting tenantconf to {config_path}");
4454 :
4455 : let mut conf_content = r#"# This file contains a specific per-tenant's config.
4456 : # It is read in case of pageserver restart.
4457 : "#
4458 : .to_string();
4459 :
4460 0 : fail::fail_point!("tenant-config-before-write", |_| {
4461 0 : Err(std::io::Error::other("tenant-config-before-write"))
4462 0 : });
4463 :
4464 : // Convert the config to a toml file.
4465 : conf_content +=
4466 : &toml_edit::ser::to_string_pretty(&location_conf).expect("Config serialization failed");
4467 :
4468 : let temp_path = path_with_suffix_extension(config_path, TEMP_FILE_SUFFIX);
4469 :
4470 : let conf_content = conf_content.into_bytes();
4471 : VirtualFile::crashsafe_overwrite(config_path.to_owned(), temp_path, conf_content).await
4472 : }
4473 :
4474 : //
4475 : // How garbage collection works:
4476 : //
4477 : // +--bar------------->
4478 : // /
4479 : // +----+-----foo---------------->
4480 : // /
4481 : // ----main--+-------------------------->
4482 : // \
4483 : // +-----baz-------->
4484 : //
4485 : //
4486 : // 1. Grab 'gc_cs' mutex to prevent new timelines from being created while Timeline's
4487 : // `gc_infos` are being refreshed
4488 : // 2. Scan collected timelines, and on each timeline, make note of the
4489 : // all the points where other timelines have been branched off.
4490 : // We will refrain from removing page versions at those LSNs.
4491 : // 3. For each timeline, scan all layer files on the timeline.
4492 : // Remove all files for which a newer file exists and which
4493 : // don't cover any branch point LSNs.
4494 : //
4495 : // TODO:
4496 : // - if a relation has a non-incremental persistent layer on a child branch, then we
4497 : // don't need to keep that in the parent anymore. But currently
4498 : // we do.
4499 2 : async fn gc_iteration_internal(
4500 2 : &self,
4501 2 : target_timeline_id: Option<TimelineId>,
4502 2 : horizon: u64,
4503 2 : pitr: Duration,
4504 2 : cancel: &CancellationToken,
4505 2 : ctx: &RequestContext,
4506 2 : ) -> Result<GcResult, GcError> {
4507 2 : let mut totals: GcResult = Default::default();
4508 2 : let now = Instant::now();
4509 :
4510 2 : let gc_timelines = self
4511 2 : .refresh_gc_info_internal(target_timeline_id, horizon, pitr, cancel, ctx)
4512 2 : .await?;
4513 :
4514 2 : failpoint_support::sleep_millis_async!("gc_iteration_internal_after_getting_gc_timelines");
4515 :
4516 : // If there is nothing to GC, we don't want any messages in the INFO log.
4517 2 : if !gc_timelines.is_empty() {
4518 2 : info!("{} timelines need GC", gc_timelines.len());
4519 : } else {
4520 0 : debug!("{} timelines need GC", gc_timelines.len());
4521 : }
4522 :
4523 : // Perform GC for each timeline.
4524 : //
4525 : // Note that we don't hold the `TenantShard::gc_cs` lock here because we don't want to delay the
4526 : // branch creation task, which requires the GC lock. A GC iteration can run concurrently
4527 : // with branch creation.
4528 : //
4529 : // See comments in [`TenantShard::branch_timeline`] for more information about why branch
4530 : // creation task can run concurrently with timeline's GC iteration.
4531 4 : for timeline in gc_timelines {
4532 2 : if cancel.is_cancelled() {
4533 : // We were requested to shut down. Stop and return with the progress we
4534 : // made.
4535 0 : break;
4536 2 : }
4537 2 : let result = match timeline.gc().await {
4538 : Err(GcError::TimelineCancelled) => {
4539 0 : if target_timeline_id.is_some() {
4540 : // If we were targetting this specific timeline, surface cancellation to caller
4541 0 : return Err(GcError::TimelineCancelled);
4542 : } else {
4543 : // A timeline may be shutting down independently of the tenant's lifecycle: we should
4544 : // skip past this and proceed to try GC on other timelines.
4545 0 : continue;
4546 : }
4547 : }
4548 2 : r => r?,
4549 : };
4550 2 : totals += result;
4551 : }
4552 :
4553 2 : totals.elapsed = now.elapsed();
4554 2 : Ok(totals)
4555 2 : }
4556 :
4557 : /// Refreshes the Timeline::gc_info for all timelines, returning the
4558 : /// vector of timelines which have [`Timeline::get_last_record_lsn`] past
4559 : /// [`TenantShard::get_gc_horizon`].
4560 : ///
4561 : /// This is usually executed as part of periodic gc, but can now be triggered more often.
4562 2 : pub(crate) async fn refresh_gc_info(
4563 2 : &self,
4564 2 : cancel: &CancellationToken,
4565 2 : ctx: &RequestContext,
4566 2 : ) -> Result<Vec<Arc<Timeline>>, GcError> {
4567 2 : // since this method can now be called at different rates than the configured gc loop, it
4568 2 : // might be that these configuration values get applied faster than what it was previously,
4569 2 : // since these were only read from the gc task.
4570 2 : let horizon = self.get_gc_horizon();
4571 2 : let pitr = self.get_pitr_interval();
4572 2 :
4573 2 : // refresh all timelines
4574 2 : let target_timeline_id = None;
4575 2 :
4576 2 : self.refresh_gc_info_internal(target_timeline_id, horizon, pitr, cancel, ctx)
4577 2 : .await
4578 2 : }
4579 :
4580 : /// Populate all Timelines' `GcInfo` with information about their children. We do not set the
4581 : /// PITR cutoffs here, because that requires I/O: this is done later, before GC, by [`Self::refresh_gc_info_internal`]
4582 : ///
4583 : /// Subsequently, parent-child relationships are updated incrementally inside [`Timeline::new`] and [`Timeline::drop`].
4584 0 : fn initialize_gc_info(
4585 0 : &self,
4586 0 : timelines: &std::sync::MutexGuard<HashMap<TimelineId, Arc<Timeline>>>,
4587 0 : timelines_offloaded: &std::sync::MutexGuard<HashMap<TimelineId, Arc<OffloadedTimeline>>>,
4588 0 : restrict_to_timeline: Option<TimelineId>,
4589 0 : ) {
4590 0 : if restrict_to_timeline.is_none() {
4591 : // This function must be called before activation: after activation timeline create/delete operations
4592 : // might happen, and this function is not safe to run concurrently with those.
4593 0 : assert!(!self.is_active());
4594 0 : }
4595 :
4596 : // Scan all timelines. For each timeline, remember the timeline ID and
4597 : // the branch point where it was created.
4598 0 : let mut all_branchpoints: BTreeMap<TimelineId, Vec<(Lsn, TimelineId, MaybeOffloaded)>> =
4599 0 : BTreeMap::new();
4600 0 : timelines.iter().for_each(|(timeline_id, timeline_entry)| {
4601 0 : if let Some(ancestor_timeline_id) = &timeline_entry.get_ancestor_timeline_id() {
4602 0 : let ancestor_children = all_branchpoints.entry(*ancestor_timeline_id).or_default();
4603 0 : ancestor_children.push((
4604 0 : timeline_entry.get_ancestor_lsn(),
4605 0 : *timeline_id,
4606 0 : MaybeOffloaded::No,
4607 0 : ));
4608 0 : }
4609 0 : });
4610 0 : timelines_offloaded
4611 0 : .iter()
4612 0 : .for_each(|(timeline_id, timeline_entry)| {
4613 0 : let Some(ancestor_timeline_id) = &timeline_entry.ancestor_timeline_id else {
4614 0 : return;
4615 : };
4616 0 : let Some(retain_lsn) = timeline_entry.ancestor_retain_lsn else {
4617 0 : return;
4618 : };
4619 0 : let ancestor_children = all_branchpoints.entry(*ancestor_timeline_id).or_default();
4620 0 : ancestor_children.push((retain_lsn, *timeline_id, MaybeOffloaded::Yes));
4621 0 : });
4622 0 :
4623 0 : // The number of bytes we always keep, irrespective of PITR: this is a constant across timelines
4624 0 : let horizon = self.get_gc_horizon();
4625 :
4626 : // Populate each timeline's GcInfo with information about its child branches
4627 0 : let timelines_to_write = if let Some(timeline_id) = restrict_to_timeline {
4628 0 : itertools::Either::Left(timelines.get(&timeline_id).into_iter())
4629 : } else {
4630 0 : itertools::Either::Right(timelines.values())
4631 : };
4632 0 : for timeline in timelines_to_write {
4633 0 : let mut branchpoints: Vec<(Lsn, TimelineId, MaybeOffloaded)> = all_branchpoints
4634 0 : .remove(&timeline.timeline_id)
4635 0 : .unwrap_or_default();
4636 0 :
4637 0 : branchpoints.sort_by_key(|b| b.0);
4638 0 :
4639 0 : let mut target = timeline.gc_info.write().unwrap();
4640 0 :
4641 0 : target.retain_lsns = branchpoints;
4642 0 :
4643 0 : let space_cutoff = timeline
4644 0 : .get_last_record_lsn()
4645 0 : .checked_sub(horizon)
4646 0 : .unwrap_or(Lsn(0));
4647 0 :
4648 0 : target.cutoffs = GcCutoffs {
4649 0 : space: space_cutoff,
4650 0 : time: None,
4651 0 : };
4652 0 : }
4653 0 : }
4654 :
4655 4 : async fn refresh_gc_info_internal(
4656 4 : &self,
4657 4 : target_timeline_id: Option<TimelineId>,
4658 4 : horizon: u64,
4659 4 : pitr: Duration,
4660 4 : cancel: &CancellationToken,
4661 4 : ctx: &RequestContext,
4662 4 : ) -> Result<Vec<Arc<Timeline>>, GcError> {
4663 4 : // before taking the gc_cs lock, do the heavier weight finding of gc_cutoff points for
4664 4 : // currently visible timelines.
4665 4 : let timelines = self
4666 4 : .timelines
4667 4 : .lock()
4668 4 : .unwrap()
4669 4 : .values()
4670 10 : .filter(|tl| match target_timeline_id.as_ref() {
4671 2 : Some(target) => &tl.timeline_id == target,
4672 8 : None => true,
4673 10 : })
4674 4 : .cloned()
4675 4 : .collect::<Vec<_>>();
4676 4 :
4677 4 : if target_timeline_id.is_some() && timelines.is_empty() {
4678 : // We were to act on a particular timeline and it wasn't found
4679 0 : return Err(GcError::TimelineNotFound);
4680 4 : }
4681 4 :
4682 4 : let mut gc_cutoffs: HashMap<TimelineId, GcCutoffs> =
4683 4 : HashMap::with_capacity(timelines.len());
4684 4 :
4685 4 : // Ensures all timelines use the same start time when computing the time cutoff.
4686 4 : let now_ts_for_pitr_calc = SystemTime::now();
4687 10 : for timeline in timelines.iter() {
4688 10 : let ctx = &ctx.with_scope_timeline(timeline);
4689 10 : let cutoff = timeline
4690 10 : .get_last_record_lsn()
4691 10 : .checked_sub(horizon)
4692 10 : .unwrap_or(Lsn(0));
4693 :
4694 10 : let cutoffs = timeline
4695 10 : .find_gc_cutoffs(now_ts_for_pitr_calc, cutoff, pitr, cancel, ctx)
4696 10 : .await?;
4697 10 : let old = gc_cutoffs.insert(timeline.timeline_id, cutoffs);
4698 10 : assert!(old.is_none());
4699 : }
4700 :
4701 4 : if !self.is_active() || self.cancel.is_cancelled() {
4702 0 : return Err(GcError::TenantCancelled);
4703 4 : }
4704 :
4705 : // grab mutex to prevent new timelines from being created here; avoid doing long operations
4706 : // because that will stall branch creation.
4707 4 : let gc_cs = self.gc_cs.lock().await;
4708 :
4709 : // Ok, we now know all the branch points.
4710 : // Update the GC information for each timeline.
4711 4 : let mut gc_timelines = Vec::with_capacity(timelines.len());
4712 14 : for timeline in timelines {
4713 : // We filtered the timeline list above
4714 10 : if let Some(target_timeline_id) = target_timeline_id {
4715 2 : assert_eq!(target_timeline_id, timeline.timeline_id);
4716 8 : }
4717 :
4718 : {
4719 10 : let mut target = timeline.gc_info.write().unwrap();
4720 10 :
4721 10 : // Cull any expired leases
4722 10 : let now = SystemTime::now();
4723 10 : target.leases.retain(|_, lease| !lease.is_expired(&now));
4724 10 :
4725 10 : timeline
4726 10 : .metrics
4727 10 : .valid_lsn_lease_count_gauge
4728 10 : .set(target.leases.len() as u64);
4729 :
4730 : // Look up parent's PITR cutoff to update the child's knowledge of whether it is within parent's PITR
4731 10 : if let Some(ancestor_id) = timeline.get_ancestor_timeline_id() {
4732 6 : if let Some(ancestor_gc_cutoffs) = gc_cutoffs.get(&ancestor_id) {
4733 6 : target.within_ancestor_pitr =
4734 6 : Some(timeline.get_ancestor_lsn()) >= ancestor_gc_cutoffs.time;
4735 6 : }
4736 4 : }
4737 :
4738 : // Update metrics that depend on GC state
4739 10 : timeline
4740 10 : .metrics
4741 10 : .archival_size
4742 10 : .set(if target.within_ancestor_pitr {
4743 0 : timeline.metrics.current_logical_size_gauge.get()
4744 : } else {
4745 10 : 0
4746 : });
4747 10 : if let Some(time_cutoff) = target.cutoffs.time {
4748 4 : timeline.metrics.pitr_history_size.set(
4749 4 : timeline
4750 4 : .get_last_record_lsn()
4751 4 : .checked_sub(time_cutoff)
4752 4 : .unwrap_or_default()
4753 4 : .0,
4754 4 : );
4755 6 : }
4756 :
4757 : // Apply the cutoffs we found to the Timeline's GcInfo. Why might we _not_ have cutoffs for a timeline?
4758 : // - this timeline was created while we were finding cutoffs
4759 : // - lsn for timestamp search fails for this timeline repeatedly
4760 10 : if let Some(cutoffs) = gc_cutoffs.get(&timeline.timeline_id) {
4761 10 : let original_cutoffs = target.cutoffs.clone();
4762 10 : // GC cutoffs should never go back
4763 10 : target.cutoffs = GcCutoffs {
4764 10 : space: cutoffs.space.max(original_cutoffs.space),
4765 10 : time: cutoffs.time.max(original_cutoffs.time),
4766 10 : }
4767 0 : }
4768 : }
4769 :
4770 10 : gc_timelines.push(timeline);
4771 : }
4772 4 : drop(gc_cs);
4773 4 : Ok(gc_timelines)
4774 4 : }
4775 :
4776 : /// A substitute for `branch_timeline` for use in unit tests.
4777 : /// The returned timeline will have state value `Active` to make various `anyhow::ensure!()`
4778 : /// calls pass, but, we do not actually call `.activate()` under the hood. So, none of the
4779 : /// timeline background tasks are launched, except the flush loop.
4780 : #[cfg(test)]
4781 119 : async fn branch_timeline_test(
4782 119 : self: &Arc<Self>,
4783 119 : src_timeline: &Arc<Timeline>,
4784 119 : dst_id: TimelineId,
4785 119 : ancestor_lsn: Option<Lsn>,
4786 119 : ctx: &RequestContext,
4787 119 : ) -> Result<Arc<Timeline>, CreateTimelineError> {
4788 119 : let tl = self
4789 119 : .branch_timeline_impl(src_timeline, dst_id, ancestor_lsn, ctx)
4790 119 : .await?
4791 117 : .into_timeline_for_test();
4792 117 : tl.set_state(TimelineState::Active);
4793 117 : Ok(tl)
4794 119 : }
4795 :
4796 : /// Helper for unit tests to branch a timeline with some pre-loaded states.
4797 : #[cfg(test)]
4798 : #[allow(clippy::too_many_arguments)]
4799 6 : pub async fn branch_timeline_test_with_layers(
4800 6 : self: &Arc<Self>,
4801 6 : src_timeline: &Arc<Timeline>,
4802 6 : dst_id: TimelineId,
4803 6 : ancestor_lsn: Option<Lsn>,
4804 6 : ctx: &RequestContext,
4805 6 : delta_layer_desc: Vec<timeline::DeltaLayerTestDesc>,
4806 6 : image_layer_desc: Vec<(Lsn, Vec<(pageserver_api::key::Key, bytes::Bytes)>)>,
4807 6 : end_lsn: Lsn,
4808 6 : ) -> anyhow::Result<Arc<Timeline>> {
4809 : use checks::check_valid_layermap;
4810 : use itertools::Itertools;
4811 :
4812 6 : let tline = self
4813 6 : .branch_timeline_test(src_timeline, dst_id, ancestor_lsn, ctx)
4814 6 : .await?;
4815 6 : let ancestor_lsn = if let Some(ancestor_lsn) = ancestor_lsn {
4816 6 : ancestor_lsn
4817 : } else {
4818 0 : tline.get_last_record_lsn()
4819 : };
4820 6 : assert!(end_lsn >= ancestor_lsn);
4821 6 : tline.force_advance_lsn(end_lsn);
4822 9 : for deltas in delta_layer_desc {
4823 3 : tline
4824 3 : .force_create_delta_layer(deltas, Some(ancestor_lsn), ctx)
4825 3 : .await?;
4826 : }
4827 8 : for (lsn, images) in image_layer_desc {
4828 2 : tline
4829 2 : .force_create_image_layer(lsn, images, Some(ancestor_lsn), ctx)
4830 2 : .await?;
4831 : }
4832 6 : let layer_names = tline
4833 6 : .layers
4834 6 : .read()
4835 6 : .await
4836 6 : .layer_map()
4837 6 : .unwrap()
4838 6 : .iter_historic_layers()
4839 6 : .map(|layer| layer.layer_name())
4840 6 : .collect_vec();
4841 6 : if let Some(err) = check_valid_layermap(&layer_names) {
4842 0 : bail!("invalid layermap: {err}");
4843 6 : }
4844 6 : Ok(tline)
4845 6 : }
4846 :
4847 : /// Branch an existing timeline.
4848 0 : async fn branch_timeline(
4849 0 : self: &Arc<Self>,
4850 0 : src_timeline: &Arc<Timeline>,
4851 0 : dst_id: TimelineId,
4852 0 : start_lsn: Option<Lsn>,
4853 0 : ctx: &RequestContext,
4854 0 : ) -> Result<CreateTimelineResult, CreateTimelineError> {
4855 0 : self.branch_timeline_impl(src_timeline, dst_id, start_lsn, ctx)
4856 0 : .await
4857 0 : }
4858 :
4859 119 : async fn branch_timeline_impl(
4860 119 : self: &Arc<Self>,
4861 119 : src_timeline: &Arc<Timeline>,
4862 119 : dst_id: TimelineId,
4863 119 : start_lsn: Option<Lsn>,
4864 119 : ctx: &RequestContext,
4865 119 : ) -> Result<CreateTimelineResult, CreateTimelineError> {
4866 119 : let src_id = src_timeline.timeline_id;
4867 :
4868 : // We will validate our ancestor LSN in this function. Acquire the GC lock so that
4869 : // this check cannot race with GC, and the ancestor LSN is guaranteed to remain
4870 : // valid while we are creating the branch.
4871 119 : let _gc_cs = self.gc_cs.lock().await;
4872 :
4873 : // If no start LSN is specified, we branch the new timeline from the source timeline's last record LSN
4874 119 : let start_lsn = start_lsn.unwrap_or_else(|| {
4875 1 : let lsn = src_timeline.get_last_record_lsn();
4876 1 : info!("branching timeline {dst_id} from timeline {src_id} at last record LSN: {lsn}");
4877 1 : lsn
4878 119 : });
4879 :
4880 : // we finally have determined the ancestor_start_lsn, so we can get claim exclusivity now
4881 119 : let timeline_create_guard = match self
4882 119 : .start_creating_timeline(
4883 119 : dst_id,
4884 119 : CreateTimelineIdempotency::Branch {
4885 119 : ancestor_timeline_id: src_timeline.timeline_id,
4886 119 : ancestor_start_lsn: start_lsn,
4887 119 : },
4888 119 : )
4889 119 : .await?
4890 : {
4891 119 : StartCreatingTimelineResult::CreateGuard(guard) => guard,
4892 0 : StartCreatingTimelineResult::Idempotent(timeline) => {
4893 0 : return Ok(CreateTimelineResult::Idempotent(timeline));
4894 : }
4895 : };
4896 :
4897 : // Ensure that `start_lsn` is valid, i.e. the LSN is within the PITR
4898 : // horizon on the source timeline
4899 : //
4900 : // We check it against both the planned GC cutoff stored in 'gc_info',
4901 : // and the 'latest_gc_cutoff' of the last GC that was performed. The
4902 : // planned GC cutoff in 'gc_info' is normally larger than
4903 : // 'applied_gc_cutoff_lsn', but beware of corner cases like if you just
4904 : // changed the GC settings for the tenant to make the PITR window
4905 : // larger, but some of the data was already removed by an earlier GC
4906 : // iteration.
4907 :
4908 : // check against last actual 'latest_gc_cutoff' first
4909 119 : let applied_gc_cutoff_lsn = src_timeline.get_applied_gc_cutoff_lsn();
4910 119 : {
4911 119 : let gc_info = src_timeline.gc_info.read().unwrap();
4912 119 : let planned_cutoff = gc_info.min_cutoff();
4913 119 : if gc_info.lsn_covered_by_lease(start_lsn) {
4914 0 : tracing::info!(
4915 0 : "skipping comparison of {start_lsn} with gc cutoff {} and planned gc cutoff {planned_cutoff} due to lsn lease",
4916 0 : *applied_gc_cutoff_lsn
4917 : );
4918 : } else {
4919 119 : src_timeline
4920 119 : .check_lsn_is_in_scope(start_lsn, &applied_gc_cutoff_lsn)
4921 119 : .context(format!(
4922 119 : "invalid branch start lsn: less than latest GC cutoff {}",
4923 119 : *applied_gc_cutoff_lsn,
4924 119 : ))
4925 119 : .map_err(CreateTimelineError::AncestorLsn)?;
4926 :
4927 : // and then the planned GC cutoff
4928 117 : if start_lsn < planned_cutoff {
4929 0 : return Err(CreateTimelineError::AncestorLsn(anyhow::anyhow!(
4930 0 : "invalid branch start lsn: less than planned GC cutoff {planned_cutoff}"
4931 0 : )));
4932 117 : }
4933 : }
4934 : }
4935 :
4936 : //
4937 : // The branch point is valid, and we are still holding the 'gc_cs' lock
4938 : // so that GC cannot advance the GC cutoff until we are finished.
4939 : // Proceed with the branch creation.
4940 : //
4941 :
4942 : // Determine prev-LSN for the new timeline. We can only determine it if
4943 : // the timeline was branched at the current end of the source timeline.
4944 : let RecordLsn {
4945 117 : last: src_last,
4946 117 : prev: src_prev,
4947 117 : } = src_timeline.get_last_record_rlsn();
4948 117 : let dst_prev = if src_last == start_lsn {
4949 108 : Some(src_prev)
4950 : } else {
4951 9 : None
4952 : };
4953 :
4954 : // Create the metadata file, noting the ancestor of the new timeline.
4955 : // There is initially no data in it, but all the read-calls know to look
4956 : // into the ancestor.
4957 117 : let metadata = TimelineMetadata::new(
4958 117 : start_lsn,
4959 117 : dst_prev,
4960 117 : Some(src_id),
4961 117 : start_lsn,
4962 117 : *src_timeline.applied_gc_cutoff_lsn.read(), // FIXME: should we hold onto this guard longer?
4963 117 : src_timeline.initdb_lsn,
4964 117 : src_timeline.pg_version,
4965 117 : );
4966 :
4967 117 : let (uninitialized_timeline, _timeline_ctx) = self
4968 117 : .prepare_new_timeline(
4969 117 : dst_id,
4970 117 : &metadata,
4971 117 : timeline_create_guard,
4972 117 : start_lsn + 1,
4973 117 : Some(Arc::clone(src_timeline)),
4974 117 : Some(src_timeline.get_rel_size_v2_status()),
4975 117 : ctx,
4976 117 : )
4977 117 : .await?;
4978 :
4979 117 : let new_timeline = uninitialized_timeline.finish_creation().await?;
4980 :
4981 : // Root timeline gets its layers during creation and uploads them along with the metadata.
4982 : // A branch timeline though, when created, can get no writes for some time, hence won't get any layers created.
4983 : // We still need to upload its metadata eagerly: if other nodes `attach` the tenant and miss this timeline, their GC
4984 : // could get incorrect information and remove more layers, than needed.
4985 : // See also https://github.com/neondatabase/neon/issues/3865
4986 117 : new_timeline
4987 117 : .remote_client
4988 117 : .schedule_index_upload_for_full_metadata_update(&metadata)
4989 117 : .context("branch initial metadata upload")?;
4990 :
4991 : // Callers are responsible to wait for uploads to complete and for activating the timeline.
4992 :
4993 117 : Ok(CreateTimelineResult::Created(new_timeline))
4994 119 : }
4995 :
4996 : /// For unit tests, make this visible so that other modules can directly create timelines
4997 : #[cfg(test)]
4998 : #[tracing::instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), %timeline_id))]
4999 : pub(crate) async fn bootstrap_timeline_test(
5000 : self: &Arc<Self>,
5001 : timeline_id: TimelineId,
5002 : pg_version: u32,
5003 : load_existing_initdb: Option<TimelineId>,
5004 : ctx: &RequestContext,
5005 : ) -> anyhow::Result<Arc<Timeline>> {
5006 : self.bootstrap_timeline(timeline_id, pg_version, load_existing_initdb, ctx)
5007 : .await
5008 : .map_err(anyhow::Error::new)
5009 1 : .map(|r| r.into_timeline_for_test())
5010 : }
5011 :
5012 : /// Get exclusive access to the timeline ID for creation.
5013 : ///
5014 : /// Timeline-creating code paths must use this function before making changes
5015 : /// to in-memory or persistent state.
5016 : ///
5017 : /// The `state` parameter is a description of the timeline creation operation
5018 : /// we intend to perform.
5019 : /// If the timeline was already created in the meantime, we check whether this
5020 : /// request conflicts or is idempotent , based on `state`.
5021 233 : async fn start_creating_timeline(
5022 233 : self: &Arc<Self>,
5023 233 : new_timeline_id: TimelineId,
5024 233 : idempotency: CreateTimelineIdempotency,
5025 233 : ) -> Result<StartCreatingTimelineResult, CreateTimelineError> {
5026 233 : let allow_offloaded = false;
5027 233 : match self.create_timeline_create_guard(new_timeline_id, idempotency, allow_offloaded) {
5028 232 : Ok(create_guard) => {
5029 232 : pausable_failpoint!("timeline-creation-after-uninit");
5030 232 : Ok(StartCreatingTimelineResult::CreateGuard(create_guard))
5031 : }
5032 0 : Err(TimelineExclusionError::ShuttingDown) => Err(CreateTimelineError::ShuttingDown),
5033 : Err(TimelineExclusionError::AlreadyCreating) => {
5034 : // Creation is in progress, we cannot create it again, and we cannot
5035 : // check if this request matches the existing one, so caller must try
5036 : // again later.
5037 0 : Err(CreateTimelineError::AlreadyCreating)
5038 : }
5039 0 : Err(TimelineExclusionError::Other(e)) => Err(CreateTimelineError::Other(e)),
5040 : Err(TimelineExclusionError::AlreadyExists {
5041 0 : existing: TimelineOrOffloaded::Offloaded(_existing),
5042 0 : ..
5043 0 : }) => {
5044 0 : info!("timeline already exists but is offloaded");
5045 0 : Err(CreateTimelineError::Conflict)
5046 : }
5047 : Err(TimelineExclusionError::AlreadyExists {
5048 1 : existing: TimelineOrOffloaded::Timeline(existing),
5049 1 : arg,
5050 1 : }) => {
5051 1 : {
5052 1 : let existing = &existing.create_idempotency;
5053 1 : let _span = info_span!("idempotency_check", ?existing, ?arg).entered();
5054 1 : debug!("timeline already exists");
5055 :
5056 1 : match (existing, &arg) {
5057 : // FailWithConflict => no idempotency check
5058 : (CreateTimelineIdempotency::FailWithConflict, _)
5059 : | (_, CreateTimelineIdempotency::FailWithConflict) => {
5060 1 : warn!("timeline already exists, failing request");
5061 1 : return Err(CreateTimelineError::Conflict);
5062 : }
5063 : // Idempotent <=> CreateTimelineIdempotency is identical
5064 0 : (x, y) if x == y => {
5065 0 : info!(
5066 0 : "timeline already exists and idempotency matches, succeeding request"
5067 : );
5068 : // fallthrough
5069 : }
5070 : (_, _) => {
5071 0 : warn!("idempotency conflict, failing request");
5072 0 : return Err(CreateTimelineError::Conflict);
5073 : }
5074 : }
5075 : }
5076 :
5077 0 : Ok(StartCreatingTimelineResult::Idempotent(existing))
5078 : }
5079 : }
5080 233 : }
5081 :
5082 0 : async fn upload_initdb(
5083 0 : &self,
5084 0 : timelines_path: &Utf8PathBuf,
5085 0 : pgdata_path: &Utf8PathBuf,
5086 0 : timeline_id: &TimelineId,
5087 0 : ) -> anyhow::Result<()> {
5088 0 : let temp_path = timelines_path.join(format!(
5089 0 : "{INITDB_PATH}.upload-{timeline_id}.{TEMP_FILE_SUFFIX}"
5090 0 : ));
5091 0 :
5092 0 : scopeguard::defer! {
5093 0 : if let Err(e) = fs::remove_file(&temp_path) {
5094 0 : error!("Failed to remove temporary initdb archive '{temp_path}': {e}");
5095 0 : }
5096 0 : }
5097 :
5098 0 : let (pgdata_zstd, tar_zst_size) = create_zst_tarball(pgdata_path, &temp_path).await?;
5099 : const INITDB_TAR_ZST_WARN_LIMIT: u64 = 2 * 1024 * 1024;
5100 0 : if tar_zst_size > INITDB_TAR_ZST_WARN_LIMIT {
5101 0 : warn!(
5102 0 : "compressed {temp_path} size of {tar_zst_size} is above limit {INITDB_TAR_ZST_WARN_LIMIT}."
5103 : );
5104 0 : }
5105 :
5106 0 : pausable_failpoint!("before-initdb-upload");
5107 :
5108 0 : backoff::retry(
5109 0 : || async {
5110 0 : self::remote_timeline_client::upload_initdb_dir(
5111 0 : &self.remote_storage,
5112 0 : &self.tenant_shard_id.tenant_id,
5113 0 : timeline_id,
5114 0 : pgdata_zstd.try_clone().await?,
5115 0 : tar_zst_size,
5116 0 : &self.cancel,
5117 0 : )
5118 0 : .await
5119 0 : },
5120 0 : |_| false,
5121 0 : 3,
5122 0 : u32::MAX,
5123 0 : "persist_initdb_tar_zst",
5124 0 : &self.cancel,
5125 0 : )
5126 0 : .await
5127 0 : .ok_or_else(|| anyhow::Error::new(TimeoutOrCancel::Cancel))
5128 0 : .and_then(|x| x)
5129 0 : }
5130 :
5131 : /// - run initdb to init temporary instance and get bootstrap data
5132 : /// - after initialization completes, tar up the temp dir and upload it to S3.
5133 1 : async fn bootstrap_timeline(
5134 1 : self: &Arc<Self>,
5135 1 : timeline_id: TimelineId,
5136 1 : pg_version: u32,
5137 1 : load_existing_initdb: Option<TimelineId>,
5138 1 : ctx: &RequestContext,
5139 1 : ) -> Result<CreateTimelineResult, CreateTimelineError> {
5140 1 : let timeline_create_guard = match self
5141 1 : .start_creating_timeline(
5142 1 : timeline_id,
5143 1 : CreateTimelineIdempotency::Bootstrap { pg_version },
5144 1 : )
5145 1 : .await?
5146 : {
5147 1 : StartCreatingTimelineResult::CreateGuard(guard) => guard,
5148 0 : StartCreatingTimelineResult::Idempotent(timeline) => {
5149 0 : return Ok(CreateTimelineResult::Idempotent(timeline));
5150 : }
5151 : };
5152 :
5153 : // create a `tenant/{tenant_id}/timelines/basebackup-{timeline_id}.{TEMP_FILE_SUFFIX}/`
5154 : // temporary directory for basebackup files for the given timeline.
5155 :
5156 1 : let timelines_path = self.conf.timelines_path(&self.tenant_shard_id);
5157 1 : let pgdata_path = path_with_suffix_extension(
5158 1 : timelines_path.join(format!("basebackup-{timeline_id}")),
5159 1 : TEMP_FILE_SUFFIX,
5160 1 : );
5161 1 :
5162 1 : // Remove whatever was left from the previous runs: safe because TimelineCreateGuard guarantees
5163 1 : // we won't race with other creations or existent timelines with the same path.
5164 1 : if pgdata_path.exists() {
5165 0 : fs::remove_dir_all(&pgdata_path).with_context(|| {
5166 0 : format!("Failed to remove already existing initdb directory: {pgdata_path}")
5167 0 : })?;
5168 0 : tracing::info!("removed previous attempt's temporary initdb directory '{pgdata_path}'");
5169 1 : }
5170 :
5171 : // this new directory is very temporary, set to remove it immediately after bootstrap, we don't need it
5172 1 : let pgdata_path_deferred = pgdata_path.clone();
5173 1 : scopeguard::defer! {
5174 1 : if let Err(e) = fs::remove_dir_all(&pgdata_path_deferred).or_else(fs_ext::ignore_not_found) {
5175 1 : // this is unlikely, but we will remove the directory on pageserver restart or another bootstrap call
5176 1 : error!("Failed to remove temporary initdb directory '{pgdata_path_deferred}': {e}");
5177 1 : } else {
5178 1 : tracing::info!("removed temporary initdb directory '{pgdata_path_deferred}'");
5179 1 : }
5180 1 : }
5181 1 : if let Some(existing_initdb_timeline_id) = load_existing_initdb {
5182 1 : if existing_initdb_timeline_id != timeline_id {
5183 0 : let source_path = &remote_initdb_archive_path(
5184 0 : &self.tenant_shard_id.tenant_id,
5185 0 : &existing_initdb_timeline_id,
5186 0 : );
5187 0 : let dest_path =
5188 0 : &remote_initdb_archive_path(&self.tenant_shard_id.tenant_id, &timeline_id);
5189 0 :
5190 0 : // if this fails, it will get retried by retried control plane requests
5191 0 : self.remote_storage
5192 0 : .copy_object(source_path, dest_path, &self.cancel)
5193 0 : .await
5194 0 : .context("copy initdb tar")?;
5195 1 : }
5196 1 : let (initdb_tar_zst_path, initdb_tar_zst) =
5197 1 : self::remote_timeline_client::download_initdb_tar_zst(
5198 1 : self.conf,
5199 1 : &self.remote_storage,
5200 1 : &self.tenant_shard_id,
5201 1 : &existing_initdb_timeline_id,
5202 1 : &self.cancel,
5203 1 : )
5204 1 : .await
5205 1 : .context("download initdb tar")?;
5206 :
5207 1 : scopeguard::defer! {
5208 1 : if let Err(e) = fs::remove_file(&initdb_tar_zst_path) {
5209 1 : error!("Failed to remove temporary initdb archive '{initdb_tar_zst_path}': {e}");
5210 1 : }
5211 1 : }
5212 1 :
5213 1 : let buf_read =
5214 1 : BufReader::with_capacity(remote_timeline_client::BUFFER_SIZE, initdb_tar_zst);
5215 1 : extract_zst_tarball(&pgdata_path, buf_read)
5216 1 : .await
5217 1 : .context("extract initdb tar")?;
5218 : } else {
5219 : // Init temporarily repo to get bootstrap data, this creates a directory in the `pgdata_path` path
5220 0 : run_initdb(self.conf, &pgdata_path, pg_version, &self.cancel)
5221 0 : .await
5222 0 : .context("run initdb")?;
5223 :
5224 : // Upload the created data dir to S3
5225 0 : if self.tenant_shard_id().is_shard_zero() {
5226 0 : self.upload_initdb(&timelines_path, &pgdata_path, &timeline_id)
5227 0 : .await?;
5228 0 : }
5229 : }
5230 1 : let pgdata_lsn = import_datadir::get_lsn_from_controlfile(&pgdata_path)?.align();
5231 1 :
5232 1 : // Import the contents of the data directory at the initial checkpoint
5233 1 : // LSN, and any WAL after that.
5234 1 : // Initdb lsn will be equal to last_record_lsn which will be set after import.
5235 1 : // Because we know it upfront avoid having an option or dummy zero value by passing it to the metadata.
5236 1 : let new_metadata = TimelineMetadata::new(
5237 1 : Lsn(0),
5238 1 : None,
5239 1 : None,
5240 1 : Lsn(0),
5241 1 : pgdata_lsn,
5242 1 : pgdata_lsn,
5243 1 : pg_version,
5244 1 : );
5245 1 : let (mut raw_timeline, timeline_ctx) = self
5246 1 : .prepare_new_timeline(
5247 1 : timeline_id,
5248 1 : &new_metadata,
5249 1 : timeline_create_guard,
5250 1 : pgdata_lsn,
5251 1 : None,
5252 1 : None,
5253 1 : ctx,
5254 1 : )
5255 1 : .await?;
5256 :
5257 1 : let tenant_shard_id = raw_timeline.owning_tenant.tenant_shard_id;
5258 1 : raw_timeline
5259 1 : .write(|unfinished_timeline| async move {
5260 1 : import_datadir::import_timeline_from_postgres_datadir(
5261 1 : &unfinished_timeline,
5262 1 : &pgdata_path,
5263 1 : pgdata_lsn,
5264 1 : &timeline_ctx,
5265 1 : )
5266 1 : .await
5267 1 : .with_context(|| {
5268 0 : format!(
5269 0 : "Failed to import pgdatadir for timeline {tenant_shard_id}/{timeline_id}"
5270 0 : )
5271 1 : })?;
5272 :
5273 1 : fail::fail_point!("before-checkpoint-new-timeline", |_| {
5274 0 : Err(CreateTimelineError::Other(anyhow::anyhow!(
5275 0 : "failpoint before-checkpoint-new-timeline"
5276 0 : )))
5277 1 : });
5278 :
5279 1 : Ok(())
5280 2 : })
5281 1 : .await?;
5282 :
5283 : // All done!
5284 1 : let timeline = raw_timeline.finish_creation().await?;
5285 :
5286 : // Callers are responsible to wait for uploads to complete and for activating the timeline.
5287 :
5288 1 : Ok(CreateTimelineResult::Created(timeline))
5289 1 : }
5290 :
5291 230 : fn build_timeline_remote_client(&self, timeline_id: TimelineId) -> RemoteTimelineClient {
5292 230 : RemoteTimelineClient::new(
5293 230 : self.remote_storage.clone(),
5294 230 : self.deletion_queue_client.clone(),
5295 230 : self.conf,
5296 230 : self.tenant_shard_id,
5297 230 : timeline_id,
5298 230 : self.generation,
5299 230 : &self.tenant_conf.load().location,
5300 230 : )
5301 230 : }
5302 :
5303 : /// Builds required resources for a new timeline.
5304 230 : fn build_timeline_resources(&self, timeline_id: TimelineId) -> TimelineResources {
5305 230 : let remote_client = self.build_timeline_remote_client(timeline_id);
5306 230 : self.get_timeline_resources_for(remote_client)
5307 230 : }
5308 :
5309 : /// Builds timeline resources for the given remote client.
5310 233 : fn get_timeline_resources_for(&self, remote_client: RemoteTimelineClient) -> TimelineResources {
5311 233 : TimelineResources {
5312 233 : remote_client,
5313 233 : pagestream_throttle: self.pagestream_throttle.clone(),
5314 233 : pagestream_throttle_metrics: self.pagestream_throttle_metrics.clone(),
5315 233 : l0_compaction_trigger: self.l0_compaction_trigger.clone(),
5316 233 : l0_flush_global_state: self.l0_flush_global_state.clone(),
5317 233 : basebackup_prepare_sender: self.basebackup_prepare_sender.clone(),
5318 233 : }
5319 233 : }
5320 :
5321 : /// Creates intermediate timeline structure and its files.
5322 : ///
5323 : /// An empty layer map is initialized, and new data and WAL can be imported starting
5324 : /// at 'disk_consistent_lsn'. After any initial data has been imported, call
5325 : /// `finish_creation` to insert the Timeline into the timelines map.
5326 : #[allow(clippy::too_many_arguments)]
5327 230 : async fn prepare_new_timeline<'a>(
5328 230 : &'a self,
5329 230 : new_timeline_id: TimelineId,
5330 230 : new_metadata: &TimelineMetadata,
5331 230 : create_guard: TimelineCreateGuard,
5332 230 : start_lsn: Lsn,
5333 230 : ancestor: Option<Arc<Timeline>>,
5334 230 : rel_size_v2_status: Option<RelSizeMigration>,
5335 230 : ctx: &RequestContext,
5336 230 : ) -> anyhow::Result<(UninitializedTimeline<'a>, RequestContext)> {
5337 230 : let tenant_shard_id = self.tenant_shard_id;
5338 230 :
5339 230 : let resources = self.build_timeline_resources(new_timeline_id);
5340 230 : resources
5341 230 : .remote_client
5342 230 : .init_upload_queue_for_empty_remote(new_metadata, rel_size_v2_status.clone())?;
5343 :
5344 230 : let (timeline_struct, timeline_ctx) = self
5345 230 : .create_timeline_struct(
5346 230 : new_timeline_id,
5347 230 : new_metadata,
5348 230 : None,
5349 230 : ancestor,
5350 230 : resources,
5351 230 : CreateTimelineCause::Load,
5352 230 : create_guard.idempotency.clone(),
5353 230 : None,
5354 230 : rel_size_v2_status,
5355 230 : ctx,
5356 230 : )
5357 230 : .context("Failed to create timeline data structure")?;
5358 :
5359 230 : timeline_struct.init_empty_layer_map(start_lsn);
5360 :
5361 230 : if let Err(e) = self
5362 230 : .create_timeline_files(&create_guard.timeline_path)
5363 230 : .await
5364 : {
5365 0 : error!(
5366 0 : "Failed to create initial files for timeline {tenant_shard_id}/{new_timeline_id}, cleaning up: {e:?}"
5367 : );
5368 0 : cleanup_timeline_directory(create_guard);
5369 0 : return Err(e);
5370 230 : }
5371 230 :
5372 230 : debug!(
5373 0 : "Successfully created initial files for timeline {tenant_shard_id}/{new_timeline_id}"
5374 : );
5375 :
5376 230 : Ok((
5377 230 : UninitializedTimeline::new(
5378 230 : self,
5379 230 : new_timeline_id,
5380 230 : Some((timeline_struct, create_guard)),
5381 230 : ),
5382 230 : timeline_ctx,
5383 230 : ))
5384 230 : }
5385 :
5386 230 : async fn create_timeline_files(&self, timeline_path: &Utf8Path) -> anyhow::Result<()> {
5387 230 : crashsafe::create_dir(timeline_path).context("Failed to create timeline directory")?;
5388 :
5389 230 : fail::fail_point!("after-timeline-dir-creation", |_| {
5390 0 : anyhow::bail!("failpoint after-timeline-dir-creation");
5391 230 : });
5392 :
5393 230 : Ok(())
5394 230 : }
5395 :
5396 : /// Get a guard that provides exclusive access to the timeline directory, preventing
5397 : /// concurrent attempts to create the same timeline.
5398 : ///
5399 : /// The `allow_offloaded` parameter controls whether to tolerate the existence of
5400 : /// offloaded timelines or not.
5401 233 : fn create_timeline_create_guard(
5402 233 : self: &Arc<Self>,
5403 233 : timeline_id: TimelineId,
5404 233 : idempotency: CreateTimelineIdempotency,
5405 233 : allow_offloaded: bool,
5406 233 : ) -> Result<TimelineCreateGuard, TimelineExclusionError> {
5407 233 : let tenant_shard_id = self.tenant_shard_id;
5408 233 :
5409 233 : let timeline_path = self.conf.timeline_path(&tenant_shard_id, &timeline_id);
5410 :
5411 233 : let create_guard = TimelineCreateGuard::new(
5412 233 : self,
5413 233 : timeline_id,
5414 233 : timeline_path.clone(),
5415 233 : idempotency,
5416 233 : allow_offloaded,
5417 233 : )?;
5418 :
5419 : // At this stage, we have got exclusive access to in-memory state for this timeline ID
5420 : // for creation.
5421 : // A timeline directory should never exist on disk already:
5422 : // - a previous failed creation would have cleaned up after itself
5423 : // - a pageserver restart would clean up timeline directories that don't have valid remote state
5424 : //
5425 : // Therefore it is an unexpected internal error to encounter a timeline directory already existing here,
5426 : // this error may indicate a bug in cleanup on failed creations.
5427 232 : if timeline_path.exists() {
5428 0 : return Err(TimelineExclusionError::Other(anyhow::anyhow!(
5429 0 : "Timeline directory already exists! This is a bug."
5430 0 : )));
5431 232 : }
5432 232 :
5433 232 : Ok(create_guard)
5434 233 : }
5435 :
5436 : /// Gathers inputs from all of the timelines to produce a sizing model input.
5437 : ///
5438 : /// Future is cancellation safe. Only one calculation can be running at once per tenant.
5439 : #[instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug()))]
5440 : pub async fn gather_size_inputs(
5441 : &self,
5442 : // `max_retention_period` overrides the cutoff that is used to calculate the size
5443 : // (only if it is shorter than the real cutoff).
5444 : max_retention_period: Option<u64>,
5445 : cause: LogicalSizeCalculationCause,
5446 : cancel: &CancellationToken,
5447 : ctx: &RequestContext,
5448 : ) -> Result<size::ModelInputs, size::CalculateSyntheticSizeError> {
5449 : let logical_sizes_at_once = self
5450 : .conf
5451 : .concurrent_tenant_size_logical_size_queries
5452 : .inner();
5453 :
5454 : // TODO: Having a single mutex block concurrent reads is not great for performance.
5455 : //
5456 : // But the only case where we need to run multiple of these at once is when we
5457 : // request a size for a tenant manually via API, while another background calculation
5458 : // is in progress (which is not a common case).
5459 : //
5460 : // See more for on the issue #2748 condenced out of the initial PR review.
5461 : let mut shared_cache = tokio::select! {
5462 : locked = self.cached_logical_sizes.lock() => locked,
5463 : _ = cancel.cancelled() => return Err(size::CalculateSyntheticSizeError::Cancelled),
5464 : _ = self.cancel.cancelled() => return Err(size::CalculateSyntheticSizeError::Cancelled),
5465 : };
5466 :
5467 : size::gather_inputs(
5468 : self,
5469 : logical_sizes_at_once,
5470 : max_retention_period,
5471 : &mut shared_cache,
5472 : cause,
5473 : cancel,
5474 : ctx,
5475 : )
5476 : .await
5477 : }
5478 :
5479 : /// Calculate synthetic tenant size and cache the result.
5480 : /// This is periodically called by background worker.
5481 : /// result is cached in tenant struct
5482 : #[instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug()))]
5483 : pub async fn calculate_synthetic_size(
5484 : &self,
5485 : cause: LogicalSizeCalculationCause,
5486 : cancel: &CancellationToken,
5487 : ctx: &RequestContext,
5488 : ) -> Result<u64, size::CalculateSyntheticSizeError> {
5489 : let inputs = self.gather_size_inputs(None, cause, cancel, ctx).await?;
5490 :
5491 : let size = inputs.calculate();
5492 :
5493 : self.set_cached_synthetic_size(size);
5494 :
5495 : Ok(size)
5496 : }
5497 :
5498 : /// Cache given synthetic size and update the metric value
5499 0 : pub fn set_cached_synthetic_size(&self, size: u64) {
5500 0 : self.cached_synthetic_tenant_size
5501 0 : .store(size, Ordering::Relaxed);
5502 0 :
5503 0 : // Only shard zero should be calculating synthetic sizes
5504 0 : debug_assert!(self.shard_identity.is_shard_zero());
5505 :
5506 0 : TENANT_SYNTHETIC_SIZE_METRIC
5507 0 : .get_metric_with_label_values(&[&self.tenant_shard_id.tenant_id.to_string()])
5508 0 : .unwrap()
5509 0 : .set(size);
5510 0 : }
5511 :
5512 0 : pub fn cached_synthetic_size(&self) -> u64 {
5513 0 : self.cached_synthetic_tenant_size.load(Ordering::Relaxed)
5514 0 : }
5515 :
5516 : /// Flush any in-progress layers, schedule uploads, and wait for uploads to complete.
5517 : ///
5518 : /// This function can take a long time: callers should wrap it in a timeout if calling
5519 : /// from an external API handler.
5520 : ///
5521 : /// Cancel-safety: cancelling this function may leave I/O running, but such I/O is
5522 : /// still bounded by tenant/timeline shutdown.
5523 : #[tracing::instrument(skip_all)]
5524 : pub(crate) async fn flush_remote(&self) -> anyhow::Result<()> {
5525 : let timelines = self.timelines.lock().unwrap().clone();
5526 :
5527 0 : async fn flush_timeline(_gate: GateGuard, timeline: Arc<Timeline>) -> anyhow::Result<()> {
5528 0 : tracing::info!(timeline_id=%timeline.timeline_id, "Flushing...");
5529 0 : timeline.freeze_and_flush().await?;
5530 0 : tracing::info!(timeline_id=%timeline.timeline_id, "Waiting for uploads...");
5531 0 : timeline.remote_client.wait_completion().await?;
5532 :
5533 0 : Ok(())
5534 0 : }
5535 :
5536 : // We do not use a JoinSet for these tasks, because we don't want them to be
5537 : // aborted when this function's future is cancelled: they should stay alive
5538 : // holding their GateGuard until they complete, to ensure their I/Os complete
5539 : // before Timeline shutdown completes.
5540 : let mut results = FuturesUnordered::new();
5541 :
5542 : for (_timeline_id, timeline) in timelines {
5543 : // Run each timeline's flush in a task holding the timeline's gate: this
5544 : // means that if this function's future is cancelled, the Timeline shutdown
5545 : // will still wait for any I/O in here to complete.
5546 : let Ok(gate) = timeline.gate.enter() else {
5547 : continue;
5548 : };
5549 0 : let jh = tokio::task::spawn(async move { flush_timeline(gate, timeline).await });
5550 : results.push(jh);
5551 : }
5552 :
5553 : while let Some(r) = results.next().await {
5554 : if let Err(e) = r {
5555 : if !e.is_cancelled() && !e.is_panic() {
5556 : tracing::error!("unexpected join error: {e:?}");
5557 : }
5558 : }
5559 : }
5560 :
5561 : // The flushes we did above were just writes, but the TenantShard might have had
5562 : // pending deletions as well from recent compaction/gc: we want to flush those
5563 : // as well. This requires flushing the global delete queue. This is cheap
5564 : // because it's typically a no-op.
5565 : match self.deletion_queue_client.flush_execute().await {
5566 : Ok(_) => {}
5567 : Err(DeletionQueueError::ShuttingDown) => {}
5568 : }
5569 :
5570 : Ok(())
5571 : }
5572 :
5573 0 : pub(crate) fn get_tenant_conf(&self) -> pageserver_api::models::TenantConfig {
5574 0 : self.tenant_conf.load().tenant_conf.clone()
5575 0 : }
5576 :
5577 : /// How much local storage would this tenant like to have? It can cope with
5578 : /// less than this (via eviction and on-demand downloads), but this function enables
5579 : /// the TenantShard to advertise how much storage it would prefer to have to provide fast I/O
5580 : /// by keeping important things on local disk.
5581 : ///
5582 : /// This is a heuristic, not a guarantee: tenants that are long-idle will actually use less
5583 : /// than they report here, due to layer eviction. Tenants with many active branches may
5584 : /// actually use more than they report here.
5585 0 : pub(crate) fn local_storage_wanted(&self) -> u64 {
5586 0 : let timelines = self.timelines.lock().unwrap();
5587 0 :
5588 0 : // Heuristic: we use the max() of the timelines' visible sizes, rather than the sum. This
5589 0 : // reflects the observation that on tenants with multiple large branches, typically only one
5590 0 : // of them is used actively enough to occupy space on disk.
5591 0 : timelines
5592 0 : .values()
5593 0 : .map(|t| t.metrics.visible_physical_size_gauge.get())
5594 0 : .max()
5595 0 : .unwrap_or(0)
5596 0 : }
5597 :
5598 : /// Builds a new tenant manifest, and uploads it if it differs from the last-known tenant
5599 : /// manifest in `Self::remote_tenant_manifest`.
5600 : ///
5601 : /// TODO: instead of requiring callers to remember to call `maybe_upload_tenant_manifest` after
5602 : /// changing any `TenantShard` state that's included in the manifest, consider making the manifest
5603 : /// the authoritative source of data with an API that automatically uploads on changes. Revisit
5604 : /// this when the manifest is more widely used and we have a better idea of the data model.
5605 118 : pub(crate) async fn maybe_upload_tenant_manifest(&self) -> Result<(), TenantManifestError> {
5606 : // Multiple tasks may call this function concurrently after mutating the TenantShard runtime
5607 : // state, affecting the manifest generated by `build_tenant_manifest`. We use an async mutex
5608 : // to serialize these callers. `eq_ignoring_version` acts as a slightly inefficient but
5609 : // simple coalescing mechanism.
5610 118 : let mut guard = tokio::select! {
5611 118 : guard = self.remote_tenant_manifest.lock() => guard,
5612 118 : _ = self.cancel.cancelled() => return Err(TenantManifestError::Cancelled),
5613 : };
5614 :
5615 : // Build a new manifest.
5616 118 : let manifest = self.build_tenant_manifest();
5617 :
5618 : // Check if the manifest has changed. We ignore the version number here, to avoid
5619 : // uploading every manifest on version number bumps.
5620 118 : if let Some(old) = guard.as_ref() {
5621 4 : if manifest.eq_ignoring_version(old) {
5622 3 : return Ok(());
5623 1 : }
5624 114 : }
5625 :
5626 : // Update metrics
5627 115 : let tid = self.tenant_shard_id.to_string();
5628 115 : let shard_id = self.tenant_shard_id.shard_slug().to_string();
5629 115 : let set_key = &[tid.as_str(), shard_id.as_str()][..];
5630 115 : TENANT_OFFLOADED_TIMELINES
5631 115 : .with_label_values(set_key)
5632 115 : .set(manifest.offloaded_timelines.len() as u64);
5633 115 :
5634 115 : // Upload the manifest. Remote storage does no retries internally, so retry here.
5635 115 : match backoff::retry(
5636 115 : || async {
5637 115 : upload_tenant_manifest(
5638 115 : &self.remote_storage,
5639 115 : &self.tenant_shard_id,
5640 115 : self.generation,
5641 115 : &manifest,
5642 115 : &self.cancel,
5643 115 : )
5644 115 : .await
5645 230 : },
5646 115 : |_| self.cancel.is_cancelled(),
5647 115 : FAILED_UPLOAD_WARN_THRESHOLD,
5648 115 : FAILED_REMOTE_OP_RETRIES,
5649 115 : "uploading tenant manifest",
5650 115 : &self.cancel,
5651 115 : )
5652 115 : .await
5653 : {
5654 0 : None => Err(TenantManifestError::Cancelled),
5655 0 : Some(Err(_)) if self.cancel.is_cancelled() => Err(TenantManifestError::Cancelled),
5656 0 : Some(Err(e)) => Err(TenantManifestError::RemoteStorage(e)),
5657 : Some(Ok(_)) => {
5658 : // Store the successfully uploaded manifest, so that future callers can avoid
5659 : // re-uploading the same thing.
5660 115 : *guard = Some(manifest);
5661 115 :
5662 115 : Ok(())
5663 : }
5664 : }
5665 118 : }
5666 : }
5667 :
5668 : /// Create the cluster temporarily in 'initdbpath' directory inside the repository
5669 : /// to get bootstrap data for timeline initialization.
5670 0 : async fn run_initdb(
5671 0 : conf: &'static PageServerConf,
5672 0 : initdb_target_dir: &Utf8Path,
5673 0 : pg_version: u32,
5674 0 : cancel: &CancellationToken,
5675 0 : ) -> Result<(), InitdbError> {
5676 0 : let initdb_bin_path = conf
5677 0 : .pg_bin_dir(pg_version)
5678 0 : .map_err(InitdbError::Other)?
5679 0 : .join("initdb");
5680 0 : let initdb_lib_dir = conf.pg_lib_dir(pg_version).map_err(InitdbError::Other)?;
5681 0 : info!(
5682 0 : "running {} in {}, libdir: {}",
5683 : initdb_bin_path, initdb_target_dir, initdb_lib_dir,
5684 : );
5685 :
5686 0 : let _permit = {
5687 0 : let _timer = INITDB_SEMAPHORE_ACQUISITION_TIME.start_timer();
5688 0 : INIT_DB_SEMAPHORE.acquire().await
5689 : };
5690 :
5691 0 : CONCURRENT_INITDBS.inc();
5692 0 : scopeguard::defer! {
5693 0 : CONCURRENT_INITDBS.dec();
5694 0 : }
5695 0 :
5696 0 : let _timer = INITDB_RUN_TIME.start_timer();
5697 0 : let res = postgres_initdb::do_run_initdb(postgres_initdb::RunInitdbArgs {
5698 0 : superuser: &conf.superuser,
5699 0 : locale: &conf.locale,
5700 0 : initdb_bin: &initdb_bin_path,
5701 0 : pg_version,
5702 0 : library_search_path: &initdb_lib_dir,
5703 0 : pgdata: initdb_target_dir,
5704 0 : })
5705 0 : .await
5706 0 : .map_err(InitdbError::Inner);
5707 0 :
5708 0 : // This isn't true cancellation support, see above. Still return an error to
5709 0 : // excercise the cancellation code path.
5710 0 : if cancel.is_cancelled() {
5711 0 : return Err(InitdbError::Cancelled);
5712 0 : }
5713 0 :
5714 0 : res
5715 0 : }
5716 :
5717 : /// Dump contents of a layer file to stdout.
5718 0 : pub async fn dump_layerfile_from_path(
5719 0 : path: &Utf8Path,
5720 0 : verbose: bool,
5721 0 : ctx: &RequestContext,
5722 0 : ) -> anyhow::Result<()> {
5723 : use std::os::unix::fs::FileExt;
5724 :
5725 : // All layer files start with a two-byte "magic" value, to identify the kind of
5726 : // file.
5727 0 : let file = File::open(path)?;
5728 0 : let mut header_buf = [0u8; 2];
5729 0 : file.read_exact_at(&mut header_buf, 0)?;
5730 :
5731 0 : match u16::from_be_bytes(header_buf) {
5732 : crate::IMAGE_FILE_MAGIC => {
5733 0 : ImageLayer::new_for_path(path, file)?
5734 0 : .dump(verbose, ctx)
5735 0 : .await?
5736 : }
5737 : crate::DELTA_FILE_MAGIC => {
5738 0 : DeltaLayer::new_for_path(path, file)?
5739 0 : .dump(verbose, ctx)
5740 0 : .await?
5741 : }
5742 0 : magic => bail!("unrecognized magic identifier: {:?}", magic),
5743 : }
5744 :
5745 0 : Ok(())
5746 0 : }
5747 :
5748 : #[cfg(test)]
5749 : pub(crate) mod harness {
5750 : use bytes::{Bytes, BytesMut};
5751 : use hex_literal::hex;
5752 : use once_cell::sync::OnceCell;
5753 : use pageserver_api::key::Key;
5754 : use pageserver_api::models::ShardParameters;
5755 : use pageserver_api::record::NeonWalRecord;
5756 : use pageserver_api::shard::ShardIndex;
5757 : use utils::id::TenantId;
5758 : use utils::logging;
5759 :
5760 : use super::*;
5761 : use crate::deletion_queue::mock::MockDeletionQueue;
5762 : use crate::l0_flush::L0FlushConfig;
5763 : use crate::walredo::apply_neon;
5764 :
5765 : pub const TIMELINE_ID: TimelineId =
5766 : TimelineId::from_array(hex!("11223344556677881122334455667788"));
5767 : pub const NEW_TIMELINE_ID: TimelineId =
5768 : TimelineId::from_array(hex!("AA223344556677881122334455667788"));
5769 :
5770 : /// Convenience function to create a page image with given string as the only content
5771 2514395 : pub fn test_img(s: &str) -> Bytes {
5772 2514395 : let mut buf = BytesMut::new();
5773 2514395 : buf.extend_from_slice(s.as_bytes());
5774 2514395 : buf.resize(64, 0);
5775 2514395 :
5776 2514395 : buf.freeze()
5777 2514395 : }
5778 :
5779 : pub struct TenantHarness {
5780 : pub conf: &'static PageServerConf,
5781 : pub tenant_conf: pageserver_api::models::TenantConfig,
5782 : pub tenant_shard_id: TenantShardId,
5783 : pub generation: Generation,
5784 : pub shard: ShardIndex,
5785 : pub remote_storage: GenericRemoteStorage,
5786 : pub remote_fs_dir: Utf8PathBuf,
5787 : pub deletion_queue: MockDeletionQueue,
5788 : }
5789 :
5790 : static LOG_HANDLE: OnceCell<()> = OnceCell::new();
5791 :
5792 129 : pub(crate) fn setup_logging() {
5793 129 : LOG_HANDLE.get_or_init(|| {
5794 123 : logging::init(
5795 123 : logging::LogFormat::Test,
5796 123 : // enable it in case the tests exercise code paths that use
5797 123 : // debug_assert_current_span_has_tenant_and_timeline_id
5798 123 : logging::TracingErrorLayerEnablement::EnableWithRustLogFilter,
5799 123 : logging::Output::Stdout,
5800 123 : )
5801 123 : .expect("Failed to init test logging");
5802 129 : });
5803 129 : }
5804 :
5805 : impl TenantHarness {
5806 117 : pub async fn create_custom(
5807 117 : test_name: &'static str,
5808 117 : tenant_conf: pageserver_api::models::TenantConfig,
5809 117 : tenant_id: TenantId,
5810 117 : shard_identity: ShardIdentity,
5811 117 : generation: Generation,
5812 117 : ) -> anyhow::Result<Self> {
5813 117 : setup_logging();
5814 117 :
5815 117 : let repo_dir = PageServerConf::test_repo_dir(test_name);
5816 117 : let _ = fs::remove_dir_all(&repo_dir);
5817 117 : fs::create_dir_all(&repo_dir)?;
5818 :
5819 117 : let conf = PageServerConf::dummy_conf(repo_dir);
5820 117 : // Make a static copy of the config. This can never be free'd, but that's
5821 117 : // OK in a test.
5822 117 : let conf: &'static PageServerConf = Box::leak(Box::new(conf));
5823 117 :
5824 117 : let shard = shard_identity.shard_index();
5825 117 : let tenant_shard_id = TenantShardId {
5826 117 : tenant_id,
5827 117 : shard_number: shard.shard_number,
5828 117 : shard_count: shard.shard_count,
5829 117 : };
5830 117 : fs::create_dir_all(conf.tenant_path(&tenant_shard_id))?;
5831 117 : fs::create_dir_all(conf.timelines_path(&tenant_shard_id))?;
5832 :
5833 : use remote_storage::{RemoteStorageConfig, RemoteStorageKind};
5834 117 : let remote_fs_dir = conf.workdir.join("localfs");
5835 117 : std::fs::create_dir_all(&remote_fs_dir).unwrap();
5836 117 : let config = RemoteStorageConfig {
5837 117 : storage: RemoteStorageKind::LocalFs {
5838 117 : local_path: remote_fs_dir.clone(),
5839 117 : },
5840 117 : timeout: RemoteStorageConfig::DEFAULT_TIMEOUT,
5841 117 : small_timeout: RemoteStorageConfig::DEFAULT_SMALL_TIMEOUT,
5842 117 : };
5843 117 : let remote_storage = GenericRemoteStorage::from_config(&config).await.unwrap();
5844 117 : let deletion_queue = MockDeletionQueue::new(Some(remote_storage.clone()));
5845 117 :
5846 117 : Ok(Self {
5847 117 : conf,
5848 117 : tenant_conf,
5849 117 : tenant_shard_id,
5850 117 : generation,
5851 117 : shard,
5852 117 : remote_storage,
5853 117 : remote_fs_dir,
5854 117 : deletion_queue,
5855 117 : })
5856 117 : }
5857 :
5858 110 : pub async fn create(test_name: &'static str) -> anyhow::Result<Self> {
5859 110 : // Disable automatic GC and compaction to make the unit tests more deterministic.
5860 110 : // The tests perform them manually if needed.
5861 110 : let tenant_conf = pageserver_api::models::TenantConfig {
5862 110 : gc_period: Some(Duration::ZERO),
5863 110 : compaction_period: Some(Duration::ZERO),
5864 110 : ..Default::default()
5865 110 : };
5866 110 : let tenant_id = TenantId::generate();
5867 110 : let shard = ShardIdentity::unsharded();
5868 110 : Self::create_custom(
5869 110 : test_name,
5870 110 : tenant_conf,
5871 110 : tenant_id,
5872 110 : shard,
5873 110 : Generation::new(0xdeadbeef),
5874 110 : )
5875 110 : .await
5876 110 : }
5877 :
5878 10 : pub fn span(&self) -> tracing::Span {
5879 10 : info_span!("TenantHarness", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug())
5880 10 : }
5881 :
5882 117 : pub(crate) async fn load(&self) -> (Arc<TenantShard>, RequestContext) {
5883 117 : let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error)
5884 117 : .with_scope_unit_test();
5885 117 : (
5886 117 : self.do_try_load(&ctx)
5887 117 : .await
5888 117 : .expect("failed to load test tenant"),
5889 117 : ctx,
5890 117 : )
5891 117 : }
5892 :
5893 : #[instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug()))]
5894 : pub(crate) async fn do_try_load(
5895 : &self,
5896 : ctx: &RequestContext,
5897 : ) -> anyhow::Result<Arc<TenantShard>> {
5898 : let walredo_mgr = Arc::new(WalRedoManager::from(TestRedoManager));
5899 :
5900 : let (basebackup_requst_sender, _) = tokio::sync::mpsc::unbounded_channel();
5901 :
5902 : let tenant = Arc::new(TenantShard::new(
5903 : TenantState::Attaching,
5904 : self.conf,
5905 : AttachedTenantConf::try_from(LocationConf::attached_single(
5906 : self.tenant_conf.clone(),
5907 : self.generation,
5908 : &ShardParameters::default(),
5909 : ))
5910 : .unwrap(),
5911 : // This is a legacy/test code path: sharding isn't supported here.
5912 : ShardIdentity::unsharded(),
5913 : Some(walredo_mgr),
5914 : self.tenant_shard_id,
5915 : self.remote_storage.clone(),
5916 : self.deletion_queue.new_client(),
5917 : // TODO: ideally we should run all unit tests with both configs
5918 : L0FlushGlobalState::new(L0FlushConfig::default()),
5919 : basebackup_requst_sender,
5920 : FeatureResolver::new_disabled(),
5921 : ));
5922 :
5923 : let preload = tenant
5924 : .preload(&self.remote_storage, CancellationToken::new())
5925 : .await?;
5926 : tenant.attach(Some(preload), ctx).await?;
5927 :
5928 : tenant.state.send_replace(TenantState::Active);
5929 : for timeline in tenant.timelines.lock().unwrap().values() {
5930 : timeline.set_state(TimelineState::Active);
5931 : }
5932 : Ok(tenant)
5933 : }
5934 :
5935 1 : pub fn timeline_path(&self, timeline_id: &TimelineId) -> Utf8PathBuf {
5936 1 : self.conf.timeline_path(&self.tenant_shard_id, timeline_id)
5937 1 : }
5938 : }
5939 :
5940 : // Mock WAL redo manager that doesn't do much
5941 : pub(crate) struct TestRedoManager;
5942 :
5943 : impl TestRedoManager {
5944 : /// # Cancel-Safety
5945 : ///
5946 : /// This method is cancellation-safe.
5947 26774 : pub async fn request_redo(
5948 26774 : &self,
5949 26774 : key: Key,
5950 26774 : lsn: Lsn,
5951 26774 : base_img: Option<(Lsn, Bytes)>,
5952 26774 : records: Vec<(Lsn, NeonWalRecord)>,
5953 26774 : _pg_version: u32,
5954 26774 : _redo_attempt_type: RedoAttemptType,
5955 26774 : ) -> Result<Bytes, walredo::Error> {
5956 1403510 : let records_neon = records.iter().all(|r| apply_neon::can_apply_in_neon(&r.1));
5957 26774 : if records_neon {
5958 : // For Neon wal records, we can decode without spawning postgres, so do so.
5959 26774 : let mut page = match (base_img, records.first()) {
5960 13029 : (Some((_lsn, img)), _) => {
5961 13029 : let mut page = BytesMut::new();
5962 13029 : page.extend_from_slice(&img);
5963 13029 : page
5964 : }
5965 13745 : (_, Some((_lsn, rec))) if rec.will_init() => BytesMut::new(),
5966 : _ => {
5967 0 : panic!("Neon WAL redo requires base image or will init record");
5968 : }
5969 : };
5970 :
5971 1430283 : for (record_lsn, record) in records {
5972 1403510 : apply_neon::apply_in_neon(&record, record_lsn, key, &mut page)?;
5973 : }
5974 26773 : Ok(page.freeze())
5975 : } else {
5976 : // We never spawn a postgres walredo process in unit tests: just log what we might have done.
5977 0 : let s = format!(
5978 0 : "redo for {} to get to {}, with {} and {} records",
5979 0 : key,
5980 0 : lsn,
5981 0 : if base_img.is_some() {
5982 0 : "base image"
5983 : } else {
5984 0 : "no base image"
5985 : },
5986 0 : records.len()
5987 0 : );
5988 0 : println!("{s}");
5989 0 :
5990 0 : Ok(test_img(&s))
5991 : }
5992 26774 : }
5993 : }
5994 : }
5995 :
5996 : #[cfg(test)]
5997 : mod tests {
5998 : use std::collections::{BTreeMap, BTreeSet};
5999 :
6000 : use bytes::{Bytes, BytesMut};
6001 : use hex_literal::hex;
6002 : use itertools::Itertools;
6003 : #[cfg(feature = "testing")]
6004 : use models::CompactLsnRange;
6005 : use pageserver_api::key::{
6006 : AUX_KEY_PREFIX, Key, NON_INHERITED_RANGE, RELATION_SIZE_PREFIX, repl_origin_key,
6007 : };
6008 : use pageserver_api::keyspace::KeySpace;
6009 : #[cfg(feature = "testing")]
6010 : use pageserver_api::keyspace::KeySpaceRandomAccum;
6011 : use pageserver_api::models::{CompactionAlgorithm, CompactionAlgorithmSettings};
6012 : #[cfg(feature = "testing")]
6013 : use pageserver_api::record::NeonWalRecord;
6014 : use pageserver_api::value::Value;
6015 : use pageserver_compaction::helpers::overlaps_with;
6016 : #[cfg(feature = "testing")]
6017 : use rand::SeedableRng;
6018 : #[cfg(feature = "testing")]
6019 : use rand::rngs::StdRng;
6020 : use rand::{Rng, thread_rng};
6021 : #[cfg(feature = "testing")]
6022 : use std::ops::Range;
6023 : use storage_layer::{IoConcurrency, PersistentLayerKey};
6024 : use tests::storage_layer::ValuesReconstructState;
6025 : use tests::timeline::{GetVectoredError, ShutdownMode};
6026 : #[cfg(feature = "testing")]
6027 : use timeline::GcInfo;
6028 : #[cfg(feature = "testing")]
6029 : use timeline::InMemoryLayerTestDesc;
6030 : #[cfg(feature = "testing")]
6031 : use timeline::compaction::{KeyHistoryRetention, KeyLogAtLsn};
6032 : use timeline::{CompactOptions, DeltaLayerTestDesc, VersionedKeySpaceQuery};
6033 : use utils::id::TenantId;
6034 :
6035 : use super::*;
6036 : use crate::DEFAULT_PG_VERSION;
6037 : use crate::keyspace::KeySpaceAccum;
6038 : use crate::tenant::harness::*;
6039 : use crate::tenant::timeline::CompactFlags;
6040 :
6041 : static TEST_KEY: Lazy<Key> =
6042 9 : Lazy::new(|| Key::from_slice(&hex!("010000000033333333444444445500000001")));
6043 :
6044 : #[cfg(feature = "testing")]
6045 : struct TestTimelineSpecification {
6046 : start_lsn: Lsn,
6047 : last_record_lsn: Lsn,
6048 :
6049 : in_memory_layers_shape: Vec<(Range<Key>, Range<Lsn>)>,
6050 : delta_layers_shape: Vec<(Range<Key>, Range<Lsn>)>,
6051 : image_layers_shape: Vec<(Range<Key>, Lsn)>,
6052 :
6053 : gap_chance: u8,
6054 : will_init_chance: u8,
6055 : }
6056 :
6057 : #[cfg(feature = "testing")]
6058 : struct Storage {
6059 : storage: HashMap<(Key, Lsn), Value>,
6060 : start_lsn: Lsn,
6061 : }
6062 :
6063 : #[cfg(feature = "testing")]
6064 : impl Storage {
6065 32000 : fn get(&self, key: Key, lsn: Lsn) -> Bytes {
6066 : use bytes::BufMut;
6067 :
6068 32000 : let mut crnt_lsn = lsn;
6069 32000 : let mut got_base = false;
6070 32000 :
6071 32000 : let mut acc = Vec::new();
6072 :
6073 2831871 : while crnt_lsn >= self.start_lsn {
6074 2831871 : if let Some(value) = self.storage.get(&(key, crnt_lsn)) {
6075 1421172 : acc.push(value.clone());
6076 :
6077 1402881 : match value {
6078 1402881 : Value::WalRecord(NeonWalRecord::Test { will_init, .. }) => {
6079 1402881 : if *will_init {
6080 13709 : got_base = true;
6081 13709 : break;
6082 1389172 : }
6083 : }
6084 : Value::Image(_) => {
6085 18291 : got_base = true;
6086 18291 : break;
6087 : }
6088 0 : _ => unreachable!(),
6089 : }
6090 1410699 : }
6091 :
6092 2799871 : crnt_lsn = crnt_lsn.checked_sub(1u64).unwrap();
6093 : }
6094 :
6095 32000 : assert!(
6096 32000 : got_base,
6097 0 : "Input data was incorrect. No base image for {key}@{lsn}"
6098 : );
6099 :
6100 32000 : tracing::debug!("Wal redo depth for {key}@{lsn} is {}", acc.len());
6101 :
6102 32000 : let mut blob = BytesMut::new();
6103 1421172 : for value in acc.into_iter().rev() {
6104 1402881 : match value {
6105 1402881 : Value::WalRecord(NeonWalRecord::Test { append, .. }) => {
6106 1402881 : blob.extend_from_slice(append.as_bytes());
6107 1402881 : }
6108 18291 : Value::Image(img) => {
6109 18291 : blob.put(img);
6110 18291 : }
6111 0 : _ => unreachable!(),
6112 : }
6113 : }
6114 :
6115 32000 : blob.into()
6116 32000 : }
6117 : }
6118 :
6119 : #[cfg(feature = "testing")]
6120 : #[allow(clippy::too_many_arguments)]
6121 1 : async fn randomize_timeline(
6122 1 : tenant: &Arc<TenantShard>,
6123 1 : new_timeline_id: TimelineId,
6124 1 : pg_version: u32,
6125 1 : spec: TestTimelineSpecification,
6126 1 : random: &mut rand::rngs::StdRng,
6127 1 : ctx: &RequestContext,
6128 1 : ) -> anyhow::Result<(Arc<Timeline>, Storage, Vec<Lsn>)> {
6129 1 : let mut storage: HashMap<(Key, Lsn), Value> = HashMap::default();
6130 1 : let mut interesting_lsns = vec![spec.last_record_lsn];
6131 :
6132 2 : for (key_range, lsn_range) in spec.in_memory_layers_shape.iter() {
6133 2 : let mut lsn = lsn_range.start;
6134 202 : while lsn < lsn_range.end {
6135 200 : let mut key = key_range.start;
6136 21018 : while key < key_range.end {
6137 20818 : let gap = random.gen_range(1..=100) <= spec.gap_chance;
6138 20818 : let will_init = random.gen_range(1..=100) <= spec.will_init_chance;
6139 20818 :
6140 20818 : if gap {
6141 1018 : continue;
6142 19800 : }
6143 :
6144 19800 : let record = if will_init {
6145 191 : Value::WalRecord(NeonWalRecord::wal_init(format!("[wil_init {key}@{lsn}]")))
6146 : } else {
6147 19609 : Value::WalRecord(NeonWalRecord::wal_append(format!("[delta {key}@{lsn}]")))
6148 : };
6149 :
6150 19800 : storage.insert((key, lsn), record);
6151 19800 :
6152 19800 : key = key.next();
6153 : }
6154 200 : lsn = Lsn(lsn.0 + 1);
6155 : }
6156 :
6157 : // Stash some interesting LSN for future use
6158 6 : for offset in [0, 5, 100].iter() {
6159 6 : if *offset == 0 {
6160 2 : interesting_lsns.push(lsn_range.start);
6161 2 : } else {
6162 4 : let below = lsn_range.start.checked_sub(*offset);
6163 4 : match below {
6164 4 : Some(v) if v >= spec.start_lsn => {
6165 4 : interesting_lsns.push(v);
6166 4 : }
6167 0 : _ => {}
6168 : }
6169 :
6170 4 : let above = Lsn(lsn_range.start.0 + offset);
6171 4 : interesting_lsns.push(above);
6172 : }
6173 : }
6174 : }
6175 :
6176 3 : for (key_range, lsn_range) in spec.delta_layers_shape.iter() {
6177 3 : let mut lsn = lsn_range.start;
6178 315 : while lsn < lsn_range.end {
6179 312 : let mut key = key_range.start;
6180 11112 : while key < key_range.end {
6181 10800 : let gap = random.gen_range(1..=100) <= spec.gap_chance;
6182 10800 : let will_init = random.gen_range(1..=100) <= spec.will_init_chance;
6183 10800 :
6184 10800 : if gap {
6185 504 : continue;
6186 10296 : }
6187 :
6188 10296 : let record = if will_init {
6189 103 : Value::WalRecord(NeonWalRecord::wal_init(format!("[wil_init {key}@{lsn}]")))
6190 : } else {
6191 10193 : Value::WalRecord(NeonWalRecord::wal_append(format!("[delta {key}@{lsn}]")))
6192 : };
6193 :
6194 10296 : storage.insert((key, lsn), record);
6195 10296 :
6196 10296 : key = key.next();
6197 : }
6198 312 : lsn = Lsn(lsn.0 + 1);
6199 : }
6200 :
6201 : // Stash some interesting LSN for future use
6202 9 : for offset in [0, 5, 100].iter() {
6203 9 : if *offset == 0 {
6204 3 : interesting_lsns.push(lsn_range.start);
6205 3 : } else {
6206 6 : let below = lsn_range.start.checked_sub(*offset);
6207 6 : match below {
6208 6 : Some(v) if v >= spec.start_lsn => {
6209 3 : interesting_lsns.push(v);
6210 3 : }
6211 3 : _ => {}
6212 : }
6213 :
6214 6 : let above = Lsn(lsn_range.start.0 + offset);
6215 6 : interesting_lsns.push(above);
6216 : }
6217 : }
6218 : }
6219 :
6220 3 : for (key_range, lsn) in spec.image_layers_shape.iter() {
6221 3 : let mut key = key_range.start;
6222 142 : while key < key_range.end {
6223 139 : let blob = Bytes::from(format!("[image {key}@{lsn}]"));
6224 139 : let record = Value::Image(blob.clone());
6225 139 : storage.insert((key, *lsn), record);
6226 139 :
6227 139 : key = key.next();
6228 139 : }
6229 :
6230 : // Stash some interesting LSN for future use
6231 9 : for offset in [0, 5, 100].iter() {
6232 9 : if *offset == 0 {
6233 3 : interesting_lsns.push(*lsn);
6234 3 : } else {
6235 6 : let below = lsn.checked_sub(*offset);
6236 6 : match below {
6237 6 : Some(v) if v >= spec.start_lsn => {
6238 4 : interesting_lsns.push(v);
6239 4 : }
6240 2 : _ => {}
6241 : }
6242 :
6243 6 : let above = Lsn(lsn.0 + offset);
6244 6 : interesting_lsns.push(above);
6245 : }
6246 : }
6247 : }
6248 :
6249 1 : let in_memory_test_layers = {
6250 1 : let mut acc = Vec::new();
6251 :
6252 2 : for (key_range, lsn_range) in spec.in_memory_layers_shape.iter() {
6253 2 : let mut data = Vec::new();
6254 2 :
6255 2 : let mut lsn = lsn_range.start;
6256 202 : while lsn < lsn_range.end {
6257 200 : let mut key = key_range.start;
6258 20000 : while key < key_range.end {
6259 19800 : if let Some(record) = storage.get(&(key, lsn)) {
6260 19800 : data.push((key, lsn, record.clone()));
6261 19800 : }
6262 :
6263 19800 : key = key.next();
6264 : }
6265 200 : lsn = Lsn(lsn.0 + 1);
6266 : }
6267 :
6268 2 : acc.push(InMemoryLayerTestDesc {
6269 2 : data,
6270 2 : lsn_range: lsn_range.clone(),
6271 2 : is_open: false,
6272 2 : })
6273 : }
6274 :
6275 1 : acc
6276 : };
6277 :
6278 1 : let delta_test_layers = {
6279 1 : let mut acc = Vec::new();
6280 :
6281 3 : for (key_range, lsn_range) in spec.delta_layers_shape.iter() {
6282 3 : let mut data = Vec::new();
6283 3 :
6284 3 : let mut lsn = lsn_range.start;
6285 315 : while lsn < lsn_range.end {
6286 312 : let mut key = key_range.start;
6287 10608 : while key < key_range.end {
6288 10296 : if let Some(record) = storage.get(&(key, lsn)) {
6289 10296 : data.push((key, lsn, record.clone()));
6290 10296 : }
6291 :
6292 10296 : key = key.next();
6293 : }
6294 312 : lsn = Lsn(lsn.0 + 1);
6295 : }
6296 :
6297 3 : acc.push(DeltaLayerTestDesc {
6298 3 : data,
6299 3 : lsn_range: lsn_range.clone(),
6300 3 : key_range: key_range.clone(),
6301 3 : })
6302 : }
6303 :
6304 1 : acc
6305 : };
6306 :
6307 1 : let image_test_layers = {
6308 1 : let mut acc = Vec::new();
6309 :
6310 3 : for (key_range, lsn) in spec.image_layers_shape.iter() {
6311 3 : let mut data = Vec::new();
6312 3 :
6313 3 : let mut key = key_range.start;
6314 142 : while key < key_range.end {
6315 139 : if let Some(record) = storage.get(&(key, *lsn)) {
6316 139 : let blob = match record {
6317 139 : Value::Image(blob) => blob.clone(),
6318 0 : _ => unreachable!(),
6319 : };
6320 :
6321 139 : data.push((key, blob));
6322 0 : }
6323 :
6324 139 : key = key.next();
6325 : }
6326 :
6327 3 : acc.push((*lsn, data));
6328 : }
6329 :
6330 1 : acc
6331 : };
6332 :
6333 1 : let tline = tenant
6334 1 : .create_test_timeline_with_layers(
6335 1 : new_timeline_id,
6336 1 : spec.start_lsn,
6337 1 : pg_version,
6338 1 : ctx,
6339 1 : in_memory_test_layers,
6340 1 : delta_test_layers,
6341 1 : image_test_layers,
6342 1 : spec.last_record_lsn,
6343 1 : )
6344 1 : .await?;
6345 :
6346 1 : Ok((
6347 1 : tline,
6348 1 : Storage {
6349 1 : storage,
6350 1 : start_lsn: spec.start_lsn,
6351 1 : },
6352 1 : interesting_lsns,
6353 1 : ))
6354 1 : }
6355 :
6356 : #[tokio::test]
6357 1 : async fn test_basic() -> anyhow::Result<()> {
6358 1 : let (tenant, ctx) = TenantHarness::create("test_basic").await?.load().await;
6359 1 : let tline = tenant
6360 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx)
6361 1 : .await?;
6362 1 :
6363 1 : let mut writer = tline.writer().await;
6364 1 : writer
6365 1 : .put(
6366 1 : *TEST_KEY,
6367 1 : Lsn(0x10),
6368 1 : &Value::Image(test_img("foo at 0x10")),
6369 1 : &ctx,
6370 1 : )
6371 1 : .await?;
6372 1 : writer.finish_write(Lsn(0x10));
6373 1 : drop(writer);
6374 1 :
6375 1 : let mut writer = tline.writer().await;
6376 1 : writer
6377 1 : .put(
6378 1 : *TEST_KEY,
6379 1 : Lsn(0x20),
6380 1 : &Value::Image(test_img("foo at 0x20")),
6381 1 : &ctx,
6382 1 : )
6383 1 : .await?;
6384 1 : writer.finish_write(Lsn(0x20));
6385 1 : drop(writer);
6386 1 :
6387 1 : assert_eq!(
6388 1 : tline.get(*TEST_KEY, Lsn(0x10), &ctx).await?,
6389 1 : test_img("foo at 0x10")
6390 1 : );
6391 1 : assert_eq!(
6392 1 : tline.get(*TEST_KEY, Lsn(0x1f), &ctx).await?,
6393 1 : test_img("foo at 0x10")
6394 1 : );
6395 1 : assert_eq!(
6396 1 : tline.get(*TEST_KEY, Lsn(0x20), &ctx).await?,
6397 1 : test_img("foo at 0x20")
6398 1 : );
6399 1 :
6400 1 : Ok(())
6401 1 : }
6402 :
6403 : #[tokio::test]
6404 1 : async fn no_duplicate_timelines() -> anyhow::Result<()> {
6405 1 : let (tenant, ctx) = TenantHarness::create("no_duplicate_timelines")
6406 1 : .await?
6407 1 : .load()
6408 1 : .await;
6409 1 : let _ = tenant
6410 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
6411 1 : .await?;
6412 1 :
6413 1 : match tenant
6414 1 : .create_empty_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
6415 1 : .await
6416 1 : {
6417 1 : Ok(_) => panic!("duplicate timeline creation should fail"),
6418 1 : Err(e) => assert_eq!(
6419 1 : e.to_string(),
6420 1 : "timeline already exists with different parameters".to_string()
6421 1 : ),
6422 1 : }
6423 1 :
6424 1 : Ok(())
6425 1 : }
6426 :
6427 : /// Convenience function to create a page image with given string as the only content
6428 5 : pub fn test_value(s: &str) -> Value {
6429 5 : let mut buf = BytesMut::new();
6430 5 : buf.extend_from_slice(s.as_bytes());
6431 5 : Value::Image(buf.freeze())
6432 5 : }
6433 :
6434 : ///
6435 : /// Test branch creation
6436 : ///
6437 : #[tokio::test]
6438 1 : async fn test_branch() -> anyhow::Result<()> {
6439 1 : use std::str::from_utf8;
6440 1 :
6441 1 : let (tenant, ctx) = TenantHarness::create("test_branch").await?.load().await;
6442 1 : let tline = tenant
6443 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
6444 1 : .await?;
6445 1 : let mut writer = tline.writer().await;
6446 1 :
6447 1 : #[allow(non_snake_case)]
6448 1 : let TEST_KEY_A: Key = Key::from_hex("110000000033333333444444445500000001").unwrap();
6449 1 : #[allow(non_snake_case)]
6450 1 : let TEST_KEY_B: Key = Key::from_hex("110000000033333333444444445500000002").unwrap();
6451 1 :
6452 1 : // Insert a value on the timeline
6453 1 : writer
6454 1 : .put(TEST_KEY_A, Lsn(0x20), &test_value("foo at 0x20"), &ctx)
6455 1 : .await?;
6456 1 : writer
6457 1 : .put(TEST_KEY_B, Lsn(0x20), &test_value("foobar at 0x20"), &ctx)
6458 1 : .await?;
6459 1 : writer.finish_write(Lsn(0x20));
6460 1 :
6461 1 : writer
6462 1 : .put(TEST_KEY_A, Lsn(0x30), &test_value("foo at 0x30"), &ctx)
6463 1 : .await?;
6464 1 : writer.finish_write(Lsn(0x30));
6465 1 : writer
6466 1 : .put(TEST_KEY_A, Lsn(0x40), &test_value("foo at 0x40"), &ctx)
6467 1 : .await?;
6468 1 : writer.finish_write(Lsn(0x40));
6469 1 :
6470 1 : //assert_current_logical_size(&tline, Lsn(0x40));
6471 1 :
6472 1 : // Branch the history, modify relation differently on the new timeline
6473 1 : tenant
6474 1 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x30)), &ctx)
6475 1 : .await?;
6476 1 : let newtline = tenant
6477 1 : .get_timeline(NEW_TIMELINE_ID, true)
6478 1 : .expect("Should have a local timeline");
6479 1 : let mut new_writer = newtline.writer().await;
6480 1 : new_writer
6481 1 : .put(TEST_KEY_A, Lsn(0x40), &test_value("bar at 0x40"), &ctx)
6482 1 : .await?;
6483 1 : new_writer.finish_write(Lsn(0x40));
6484 1 :
6485 1 : // Check page contents on both branches
6486 1 : assert_eq!(
6487 1 : from_utf8(&tline.get(TEST_KEY_A, Lsn(0x40), &ctx).await?)?,
6488 1 : "foo at 0x40"
6489 1 : );
6490 1 : assert_eq!(
6491 1 : from_utf8(&newtline.get(TEST_KEY_A, Lsn(0x40), &ctx).await?)?,
6492 1 : "bar at 0x40"
6493 1 : );
6494 1 : assert_eq!(
6495 1 : from_utf8(&newtline.get(TEST_KEY_B, Lsn(0x40), &ctx).await?)?,
6496 1 : "foobar at 0x20"
6497 1 : );
6498 1 :
6499 1 : //assert_current_logical_size(&tline, Lsn(0x40));
6500 1 :
6501 1 : Ok(())
6502 1 : }
6503 :
6504 10 : async fn make_some_layers(
6505 10 : tline: &Timeline,
6506 10 : start_lsn: Lsn,
6507 10 : ctx: &RequestContext,
6508 10 : ) -> anyhow::Result<()> {
6509 10 : let mut lsn = start_lsn;
6510 : {
6511 10 : let mut writer = tline.writer().await;
6512 : // Create a relation on the timeline
6513 10 : writer
6514 10 : .put(
6515 10 : *TEST_KEY,
6516 10 : lsn,
6517 10 : &Value::Image(test_img(&format!("foo at {}", lsn))),
6518 10 : ctx,
6519 10 : )
6520 10 : .await?;
6521 10 : writer.finish_write(lsn);
6522 10 : lsn += 0x10;
6523 10 : writer
6524 10 : .put(
6525 10 : *TEST_KEY,
6526 10 : lsn,
6527 10 : &Value::Image(test_img(&format!("foo at {}", lsn))),
6528 10 : ctx,
6529 10 : )
6530 10 : .await?;
6531 10 : writer.finish_write(lsn);
6532 10 : lsn += 0x10;
6533 10 : }
6534 10 : tline.freeze_and_flush().await?;
6535 : {
6536 10 : let mut writer = tline.writer().await;
6537 10 : writer
6538 10 : .put(
6539 10 : *TEST_KEY,
6540 10 : lsn,
6541 10 : &Value::Image(test_img(&format!("foo at {}", lsn))),
6542 10 : ctx,
6543 10 : )
6544 10 : .await?;
6545 10 : writer.finish_write(lsn);
6546 10 : lsn += 0x10;
6547 10 : writer
6548 10 : .put(
6549 10 : *TEST_KEY,
6550 10 : lsn,
6551 10 : &Value::Image(test_img(&format!("foo at {}", lsn))),
6552 10 : ctx,
6553 10 : )
6554 10 : .await?;
6555 10 : writer.finish_write(lsn);
6556 10 : }
6557 10 : tline.freeze_and_flush().await.map_err(|e| e.into())
6558 10 : }
6559 :
6560 : #[tokio::test(start_paused = true)]
6561 1 : async fn test_prohibit_branch_creation_on_garbage_collected_data() -> anyhow::Result<()> {
6562 1 : let (tenant, ctx) =
6563 1 : TenantHarness::create("test_prohibit_branch_creation_on_garbage_collected_data")
6564 1 : .await?
6565 1 : .load()
6566 1 : .await;
6567 1 : // Advance to the lsn lease deadline so that GC is not blocked by
6568 1 : // initial transition into AttachedSingle.
6569 1 : tokio::time::advance(tenant.get_lsn_lease_length()).await;
6570 1 : tokio::time::resume();
6571 1 : let tline = tenant
6572 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
6573 1 : .await?;
6574 1 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
6575 1 :
6576 1 : // this removes layers before lsn 40 (50 minus 10), so there are two remaining layers, image and delta for 31-50
6577 1 : // FIXME: this doesn't actually remove any layer currently, given how the flushing
6578 1 : // and compaction works. But it does set the 'cutoff' point so that the cross check
6579 1 : // below should fail.
6580 1 : tenant
6581 1 : .gc_iteration(
6582 1 : Some(TIMELINE_ID),
6583 1 : 0x10,
6584 1 : Duration::ZERO,
6585 1 : &CancellationToken::new(),
6586 1 : &ctx,
6587 1 : )
6588 1 : .await?;
6589 1 :
6590 1 : // try to branch at lsn 25, should fail because we already garbage collected the data
6591 1 : match tenant
6592 1 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x25)), &ctx)
6593 1 : .await
6594 1 : {
6595 1 : Ok(_) => panic!("branching should have failed"),
6596 1 : Err(err) => {
6597 1 : let CreateTimelineError::AncestorLsn(err) = err else {
6598 1 : panic!("wrong error type")
6599 1 : };
6600 1 : assert!(err.to_string().contains("invalid branch start lsn"));
6601 1 : assert!(
6602 1 : err.source()
6603 1 : .unwrap()
6604 1 : .to_string()
6605 1 : .contains("we might've already garbage collected needed data")
6606 1 : )
6607 1 : }
6608 1 : }
6609 1 :
6610 1 : Ok(())
6611 1 : }
6612 :
6613 : #[tokio::test]
6614 1 : async fn test_prohibit_branch_creation_on_pre_initdb_lsn() -> anyhow::Result<()> {
6615 1 : let (tenant, ctx) =
6616 1 : TenantHarness::create("test_prohibit_branch_creation_on_pre_initdb_lsn")
6617 1 : .await?
6618 1 : .load()
6619 1 : .await;
6620 1 :
6621 1 : let tline = tenant
6622 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x50), DEFAULT_PG_VERSION, &ctx)
6623 1 : .await?;
6624 1 : // try to branch at lsn 0x25, should fail because initdb lsn is 0x50
6625 1 : match tenant
6626 1 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x25)), &ctx)
6627 1 : .await
6628 1 : {
6629 1 : Ok(_) => panic!("branching should have failed"),
6630 1 : Err(err) => {
6631 1 : let CreateTimelineError::AncestorLsn(err) = err else {
6632 1 : panic!("wrong error type");
6633 1 : };
6634 1 : assert!(&err.to_string().contains("invalid branch start lsn"));
6635 1 : assert!(
6636 1 : &err.source()
6637 1 : .unwrap()
6638 1 : .to_string()
6639 1 : .contains("is earlier than latest GC cutoff")
6640 1 : );
6641 1 : }
6642 1 : }
6643 1 :
6644 1 : Ok(())
6645 1 : }
6646 :
6647 : /*
6648 : // FIXME: This currently fails to error out. Calling GC doesn't currently
6649 : // remove the old value, we'd need to work a little harder
6650 : #[tokio::test]
6651 : async fn test_prohibit_get_for_garbage_collected_data() -> anyhow::Result<()> {
6652 : let repo =
6653 : RepoHarness::create("test_prohibit_get_for_garbage_collected_data")?
6654 : .load();
6655 :
6656 : let tline = repo.create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION)?;
6657 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
6658 :
6659 : repo.gc_iteration(Some(TIMELINE_ID), 0x10, Duration::ZERO)?;
6660 : let applied_gc_cutoff_lsn = tline.get_applied_gc_cutoff_lsn();
6661 : assert!(*applied_gc_cutoff_lsn > Lsn(0x25));
6662 : match tline.get(*TEST_KEY, Lsn(0x25)) {
6663 : Ok(_) => panic!("request for page should have failed"),
6664 : Err(err) => assert!(err.to_string().contains("not found at")),
6665 : }
6666 : Ok(())
6667 : }
6668 : */
6669 :
6670 : #[tokio::test]
6671 1 : async fn test_get_branchpoints_from_an_inactive_timeline() -> anyhow::Result<()> {
6672 1 : let (tenant, ctx) =
6673 1 : TenantHarness::create("test_get_branchpoints_from_an_inactive_timeline")
6674 1 : .await?
6675 1 : .load()
6676 1 : .await;
6677 1 : let tline = tenant
6678 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
6679 1 : .await?;
6680 1 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
6681 1 :
6682 1 : tenant
6683 1 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x40)), &ctx)
6684 1 : .await?;
6685 1 : let newtline = tenant
6686 1 : .get_timeline(NEW_TIMELINE_ID, true)
6687 1 : .expect("Should have a local timeline");
6688 1 :
6689 1 : make_some_layers(newtline.as_ref(), Lsn(0x60), &ctx).await?;
6690 1 :
6691 1 : tline.set_broken("test".to_owned());
6692 1 :
6693 1 : tenant
6694 1 : .gc_iteration(
6695 1 : Some(TIMELINE_ID),
6696 1 : 0x10,
6697 1 : Duration::ZERO,
6698 1 : &CancellationToken::new(),
6699 1 : &ctx,
6700 1 : )
6701 1 : .await?;
6702 1 :
6703 1 : // The branchpoints should contain all timelines, even ones marked
6704 1 : // as Broken.
6705 1 : {
6706 1 : let branchpoints = &tline.gc_info.read().unwrap().retain_lsns;
6707 1 : assert_eq!(branchpoints.len(), 1);
6708 1 : assert_eq!(
6709 1 : branchpoints[0],
6710 1 : (Lsn(0x40), NEW_TIMELINE_ID, MaybeOffloaded::No)
6711 1 : );
6712 1 : }
6713 1 :
6714 1 : // You can read the key from the child branch even though the parent is
6715 1 : // Broken, as long as you don't need to access data from the parent.
6716 1 : assert_eq!(
6717 1 : newtline.get(*TEST_KEY, Lsn(0x70), &ctx).await?,
6718 1 : test_img(&format!("foo at {}", Lsn(0x70)))
6719 1 : );
6720 1 :
6721 1 : // This needs to traverse to the parent, and fails.
6722 1 : let err = newtline.get(*TEST_KEY, Lsn(0x50), &ctx).await.unwrap_err();
6723 1 : assert!(
6724 1 : err.to_string().starts_with(&format!(
6725 1 : "bad state on timeline {}: Broken",
6726 1 : tline.timeline_id
6727 1 : )),
6728 1 : "{err}"
6729 1 : );
6730 1 :
6731 1 : Ok(())
6732 1 : }
6733 :
6734 : #[tokio::test]
6735 1 : async fn test_retain_data_in_parent_which_is_needed_for_child() -> anyhow::Result<()> {
6736 1 : let (tenant, ctx) =
6737 1 : TenantHarness::create("test_retain_data_in_parent_which_is_needed_for_child")
6738 1 : .await?
6739 1 : .load()
6740 1 : .await;
6741 1 : let tline = tenant
6742 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
6743 1 : .await?;
6744 1 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
6745 1 :
6746 1 : tenant
6747 1 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x40)), &ctx)
6748 1 : .await?;
6749 1 : let newtline = tenant
6750 1 : .get_timeline(NEW_TIMELINE_ID, true)
6751 1 : .expect("Should have a local timeline");
6752 1 : // this removes layers before lsn 40 (50 minus 10), so there are two remaining layers, image and delta for 31-50
6753 1 : tenant
6754 1 : .gc_iteration(
6755 1 : Some(TIMELINE_ID),
6756 1 : 0x10,
6757 1 : Duration::ZERO,
6758 1 : &CancellationToken::new(),
6759 1 : &ctx,
6760 1 : )
6761 1 : .await?;
6762 1 : assert!(newtline.get(*TEST_KEY, Lsn(0x25), &ctx).await.is_ok());
6763 1 :
6764 1 : Ok(())
6765 1 : }
6766 : #[tokio::test]
6767 1 : async fn test_parent_keeps_data_forever_after_branching() -> anyhow::Result<()> {
6768 1 : let (tenant, ctx) = TenantHarness::create("test_parent_keeps_data_forever_after_branching")
6769 1 : .await?
6770 1 : .load()
6771 1 : .await;
6772 1 : let tline = tenant
6773 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
6774 1 : .await?;
6775 1 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
6776 1 :
6777 1 : tenant
6778 1 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x40)), &ctx)
6779 1 : .await?;
6780 1 : let newtline = tenant
6781 1 : .get_timeline(NEW_TIMELINE_ID, true)
6782 1 : .expect("Should have a local timeline");
6783 1 :
6784 1 : make_some_layers(newtline.as_ref(), Lsn(0x60), &ctx).await?;
6785 1 :
6786 1 : // run gc on parent
6787 1 : tenant
6788 1 : .gc_iteration(
6789 1 : Some(TIMELINE_ID),
6790 1 : 0x10,
6791 1 : Duration::ZERO,
6792 1 : &CancellationToken::new(),
6793 1 : &ctx,
6794 1 : )
6795 1 : .await?;
6796 1 :
6797 1 : // Check that the data is still accessible on the branch.
6798 1 : assert_eq!(
6799 1 : newtline.get(*TEST_KEY, Lsn(0x50), &ctx).await?,
6800 1 : test_img(&format!("foo at {}", Lsn(0x40)))
6801 1 : );
6802 1 :
6803 1 : Ok(())
6804 1 : }
6805 :
6806 : #[tokio::test]
6807 1 : async fn timeline_load() -> anyhow::Result<()> {
6808 1 : const TEST_NAME: &str = "timeline_load";
6809 1 : let harness = TenantHarness::create(TEST_NAME).await?;
6810 1 : {
6811 1 : let (tenant, ctx) = harness.load().await;
6812 1 : let tline = tenant
6813 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x7000), DEFAULT_PG_VERSION, &ctx)
6814 1 : .await?;
6815 1 : make_some_layers(tline.as_ref(), Lsn(0x8000), &ctx).await?;
6816 1 : // so that all uploads finish & we can call harness.load() below again
6817 1 : tenant
6818 1 : .shutdown(Default::default(), ShutdownMode::FreezeAndFlush)
6819 1 : .instrument(harness.span())
6820 1 : .await
6821 1 : .ok()
6822 1 : .unwrap();
6823 1 : }
6824 1 :
6825 1 : let (tenant, _ctx) = harness.load().await;
6826 1 : tenant
6827 1 : .get_timeline(TIMELINE_ID, true)
6828 1 : .expect("cannot load timeline");
6829 1 :
6830 1 : Ok(())
6831 1 : }
6832 :
6833 : #[tokio::test]
6834 1 : async fn timeline_load_with_ancestor() -> anyhow::Result<()> {
6835 1 : const TEST_NAME: &str = "timeline_load_with_ancestor";
6836 1 : let harness = TenantHarness::create(TEST_NAME).await?;
6837 1 : // create two timelines
6838 1 : {
6839 1 : let (tenant, ctx) = harness.load().await;
6840 1 : let tline = tenant
6841 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
6842 1 : .await?;
6843 1 :
6844 1 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
6845 1 :
6846 1 : let child_tline = tenant
6847 1 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x40)), &ctx)
6848 1 : .await?;
6849 1 : child_tline.set_state(TimelineState::Active);
6850 1 :
6851 1 : let newtline = tenant
6852 1 : .get_timeline(NEW_TIMELINE_ID, true)
6853 1 : .expect("Should have a local timeline");
6854 1 :
6855 1 : make_some_layers(newtline.as_ref(), Lsn(0x60), &ctx).await?;
6856 1 :
6857 1 : // so that all uploads finish & we can call harness.load() below again
6858 1 : tenant
6859 1 : .shutdown(Default::default(), ShutdownMode::FreezeAndFlush)
6860 1 : .instrument(harness.span())
6861 1 : .await
6862 1 : .ok()
6863 1 : .unwrap();
6864 1 : }
6865 1 :
6866 1 : // check that both of them are initially unloaded
6867 1 : let (tenant, _ctx) = harness.load().await;
6868 1 :
6869 1 : // check that both, child and ancestor are loaded
6870 1 : let _child_tline = tenant
6871 1 : .get_timeline(NEW_TIMELINE_ID, true)
6872 1 : .expect("cannot get child timeline loaded");
6873 1 :
6874 1 : let _ancestor_tline = tenant
6875 1 : .get_timeline(TIMELINE_ID, true)
6876 1 : .expect("cannot get ancestor timeline loaded");
6877 1 :
6878 1 : Ok(())
6879 1 : }
6880 :
6881 : #[tokio::test]
6882 1 : async fn delta_layer_dumping() -> anyhow::Result<()> {
6883 1 : use storage_layer::AsLayerDesc;
6884 1 : let (tenant, ctx) = TenantHarness::create("test_layer_dumping")
6885 1 : .await?
6886 1 : .load()
6887 1 : .await;
6888 1 : let tline = tenant
6889 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
6890 1 : .await?;
6891 1 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
6892 1 :
6893 1 : let layer_map = tline.layers.read().await;
6894 1 : let level0_deltas = layer_map
6895 1 : .layer_map()?
6896 1 : .level0_deltas()
6897 1 : .iter()
6898 2 : .map(|desc| layer_map.get_from_desc(desc))
6899 1 : .collect::<Vec<_>>();
6900 1 :
6901 1 : assert!(!level0_deltas.is_empty());
6902 1 :
6903 3 : for delta in level0_deltas {
6904 1 : // Ensure we are dumping a delta layer here
6905 2 : assert!(delta.layer_desc().is_delta);
6906 2 : delta.dump(true, &ctx).await.unwrap();
6907 1 : }
6908 1 :
6909 1 : Ok(())
6910 1 : }
6911 :
6912 : #[tokio::test]
6913 1 : async fn test_images() -> anyhow::Result<()> {
6914 1 : let (tenant, ctx) = TenantHarness::create("test_images").await?.load().await;
6915 1 : let tline = tenant
6916 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx)
6917 1 : .await?;
6918 1 :
6919 1 : let mut writer = tline.writer().await;
6920 1 : writer
6921 1 : .put(
6922 1 : *TEST_KEY,
6923 1 : Lsn(0x10),
6924 1 : &Value::Image(test_img("foo at 0x10")),
6925 1 : &ctx,
6926 1 : )
6927 1 : .await?;
6928 1 : writer.finish_write(Lsn(0x10));
6929 1 : drop(writer);
6930 1 :
6931 1 : tline.freeze_and_flush().await?;
6932 1 : tline
6933 1 : .compact(&CancellationToken::new(), EnumSet::default(), &ctx)
6934 1 : .await?;
6935 1 :
6936 1 : let mut writer = tline.writer().await;
6937 1 : writer
6938 1 : .put(
6939 1 : *TEST_KEY,
6940 1 : Lsn(0x20),
6941 1 : &Value::Image(test_img("foo at 0x20")),
6942 1 : &ctx,
6943 1 : )
6944 1 : .await?;
6945 1 : writer.finish_write(Lsn(0x20));
6946 1 : drop(writer);
6947 1 :
6948 1 : tline.freeze_and_flush().await?;
6949 1 : tline
6950 1 : .compact(&CancellationToken::new(), EnumSet::default(), &ctx)
6951 1 : .await?;
6952 1 :
6953 1 : let mut writer = tline.writer().await;
6954 1 : writer
6955 1 : .put(
6956 1 : *TEST_KEY,
6957 1 : Lsn(0x30),
6958 1 : &Value::Image(test_img("foo at 0x30")),
6959 1 : &ctx,
6960 1 : )
6961 1 : .await?;
6962 1 : writer.finish_write(Lsn(0x30));
6963 1 : drop(writer);
6964 1 :
6965 1 : tline.freeze_and_flush().await?;
6966 1 : tline
6967 1 : .compact(&CancellationToken::new(), EnumSet::default(), &ctx)
6968 1 : .await?;
6969 1 :
6970 1 : let mut writer = tline.writer().await;
6971 1 : writer
6972 1 : .put(
6973 1 : *TEST_KEY,
6974 1 : Lsn(0x40),
6975 1 : &Value::Image(test_img("foo at 0x40")),
6976 1 : &ctx,
6977 1 : )
6978 1 : .await?;
6979 1 : writer.finish_write(Lsn(0x40));
6980 1 : drop(writer);
6981 1 :
6982 1 : tline.freeze_and_flush().await?;
6983 1 : tline
6984 1 : .compact(&CancellationToken::new(), EnumSet::default(), &ctx)
6985 1 : .await?;
6986 1 :
6987 1 : assert_eq!(
6988 1 : tline.get(*TEST_KEY, Lsn(0x10), &ctx).await?,
6989 1 : test_img("foo at 0x10")
6990 1 : );
6991 1 : assert_eq!(
6992 1 : tline.get(*TEST_KEY, Lsn(0x1f), &ctx).await?,
6993 1 : test_img("foo at 0x10")
6994 1 : );
6995 1 : assert_eq!(
6996 1 : tline.get(*TEST_KEY, Lsn(0x20), &ctx).await?,
6997 1 : test_img("foo at 0x20")
6998 1 : );
6999 1 : assert_eq!(
7000 1 : tline.get(*TEST_KEY, Lsn(0x30), &ctx).await?,
7001 1 : test_img("foo at 0x30")
7002 1 : );
7003 1 : assert_eq!(
7004 1 : tline.get(*TEST_KEY, Lsn(0x40), &ctx).await?,
7005 1 : test_img("foo at 0x40")
7006 1 : );
7007 1 :
7008 1 : Ok(())
7009 1 : }
7010 :
7011 2 : async fn bulk_insert_compact_gc(
7012 2 : tenant: &TenantShard,
7013 2 : timeline: &Arc<Timeline>,
7014 2 : ctx: &RequestContext,
7015 2 : lsn: Lsn,
7016 2 : repeat: usize,
7017 2 : key_count: usize,
7018 2 : ) -> anyhow::Result<HashMap<Key, BTreeSet<Lsn>>> {
7019 2 : let compact = true;
7020 2 : bulk_insert_maybe_compact_gc(tenant, timeline, ctx, lsn, repeat, key_count, compact).await
7021 2 : }
7022 :
7023 4 : async fn bulk_insert_maybe_compact_gc(
7024 4 : tenant: &TenantShard,
7025 4 : timeline: &Arc<Timeline>,
7026 4 : ctx: &RequestContext,
7027 4 : mut lsn: Lsn,
7028 4 : repeat: usize,
7029 4 : key_count: usize,
7030 4 : compact: bool,
7031 4 : ) -> anyhow::Result<HashMap<Key, BTreeSet<Lsn>>> {
7032 4 : let mut inserted: HashMap<Key, BTreeSet<Lsn>> = Default::default();
7033 4 :
7034 4 : let mut test_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
7035 4 : let mut blknum = 0;
7036 4 :
7037 4 : // Enforce that key range is monotonously increasing
7038 4 : let mut keyspace = KeySpaceAccum::new();
7039 4 :
7040 4 : let cancel = CancellationToken::new();
7041 4 :
7042 4 : for _ in 0..repeat {
7043 200 : for _ in 0..key_count {
7044 2000000 : test_key.field6 = blknum;
7045 2000000 : let mut writer = timeline.writer().await;
7046 2000000 : writer
7047 2000000 : .put(
7048 2000000 : test_key,
7049 2000000 : lsn,
7050 2000000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
7051 2000000 : ctx,
7052 2000000 : )
7053 2000000 : .await?;
7054 2000000 : inserted.entry(test_key).or_default().insert(lsn);
7055 2000000 : writer.finish_write(lsn);
7056 2000000 : drop(writer);
7057 2000000 :
7058 2000000 : keyspace.add_key(test_key);
7059 2000000 :
7060 2000000 : lsn = Lsn(lsn.0 + 0x10);
7061 2000000 : blknum += 1;
7062 : }
7063 :
7064 200 : timeline.freeze_and_flush().await?;
7065 200 : if compact {
7066 : // this requires timeline to be &Arc<Timeline>
7067 100 : timeline.compact(&cancel, EnumSet::default(), ctx).await?;
7068 100 : }
7069 :
7070 : // this doesn't really need to use the timeline_id target, but it is closer to what it
7071 : // originally was.
7072 200 : let res = tenant
7073 200 : .gc_iteration(Some(timeline.timeline_id), 0, Duration::ZERO, &cancel, ctx)
7074 200 : .await?;
7075 :
7076 200 : assert_eq!(res.layers_removed, 0, "this never removes anything");
7077 : }
7078 :
7079 4 : Ok(inserted)
7080 4 : }
7081 :
7082 : //
7083 : // Insert 1000 key-value pairs with increasing keys, flush, compact, GC.
7084 : // Repeat 50 times.
7085 : //
7086 : #[tokio::test]
7087 1 : async fn test_bulk_insert() -> anyhow::Result<()> {
7088 1 : let harness = TenantHarness::create("test_bulk_insert").await?;
7089 1 : let (tenant, ctx) = harness.load().await;
7090 1 : let tline = tenant
7091 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx)
7092 1 : .await?;
7093 1 :
7094 1 : let lsn = Lsn(0x10);
7095 1 : bulk_insert_compact_gc(&tenant, &tline, &ctx, lsn, 50, 10000).await?;
7096 1 :
7097 1 : Ok(())
7098 1 : }
7099 :
7100 : // Test the vectored get real implementation against a simple sequential implementation.
7101 : //
7102 : // The test generates a keyspace by repeatedly flushing the in-memory layer and compacting.
7103 : // Projected to 2D the key space looks like below. Lsn grows upwards on the Y axis and keys
7104 : // grow to the right on the X axis.
7105 : // [Delta]
7106 : // [Delta]
7107 : // [Delta]
7108 : // [Delta]
7109 : // ------------ Image ---------------
7110 : //
7111 : // After layer generation we pick the ranges to query as follows:
7112 : // 1. The beginning of each delta layer
7113 : // 2. At the seam between two adjacent delta layers
7114 : //
7115 : // There's one major downside to this test: delta layers only contains images,
7116 : // so the search can stop at the first delta layer and doesn't traverse any deeper.
7117 : #[tokio::test]
7118 1 : async fn test_get_vectored() -> anyhow::Result<()> {
7119 1 : let harness = TenantHarness::create("test_get_vectored").await?;
7120 1 : let (tenant, ctx) = harness.load().await;
7121 1 : let io_concurrency = IoConcurrency::spawn_for_test();
7122 1 : let tline = tenant
7123 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx)
7124 1 : .await?;
7125 1 :
7126 1 : let lsn = Lsn(0x10);
7127 1 : let inserted = bulk_insert_compact_gc(&tenant, &tline, &ctx, lsn, 50, 10000).await?;
7128 1 :
7129 1 : let guard = tline.layers.read().await;
7130 1 : let lm = guard.layer_map()?;
7131 1 :
7132 1 : lm.dump(true, &ctx).await?;
7133 1 :
7134 1 : let mut reads = Vec::new();
7135 1 : let mut prev = None;
7136 6 : lm.iter_historic_layers().for_each(|desc| {
7137 6 : if !desc.is_delta() {
7138 1 : prev = Some(desc.clone());
7139 1 : return;
7140 5 : }
7141 5 :
7142 5 : let start = desc.key_range.start;
7143 5 : let end = desc
7144 5 : .key_range
7145 5 : .start
7146 5 : .add(Timeline::MAX_GET_VECTORED_KEYS.try_into().unwrap());
7147 5 : reads.push(KeySpace {
7148 5 : ranges: vec![start..end],
7149 5 : });
7150 1 :
7151 5 : if let Some(prev) = &prev {
7152 5 : if !prev.is_delta() {
7153 5 : return;
7154 1 : }
7155 0 :
7156 0 : let first_range = Key {
7157 0 : field6: prev.key_range.end.field6 - 4,
7158 0 : ..prev.key_range.end
7159 0 : }..prev.key_range.end;
7160 0 :
7161 0 : let second_range = desc.key_range.start..Key {
7162 0 : field6: desc.key_range.start.field6 + 4,
7163 0 : ..desc.key_range.start
7164 0 : };
7165 0 :
7166 0 : reads.push(KeySpace {
7167 0 : ranges: vec![first_range, second_range],
7168 0 : });
7169 1 : };
7170 1 :
7171 1 : prev = Some(desc.clone());
7172 6 : });
7173 1 :
7174 1 : drop(guard);
7175 1 :
7176 1 : // Pick a big LSN such that we query over all the changes.
7177 1 : let reads_lsn = Lsn(u64::MAX - 1);
7178 1 :
7179 6 : for read in reads {
7180 5 : info!("Doing vectored read on {:?}", read);
7181 1 :
7182 5 : let query = VersionedKeySpaceQuery::uniform(read.clone(), reads_lsn);
7183 1 :
7184 5 : let vectored_res = tline
7185 5 : .get_vectored_impl(
7186 5 : query,
7187 5 : &mut ValuesReconstructState::new(io_concurrency.clone()),
7188 5 : &ctx,
7189 5 : )
7190 5 : .await;
7191 1 :
7192 5 : let mut expected_lsns: HashMap<Key, Lsn> = Default::default();
7193 5 : let mut expect_missing = false;
7194 5 : let mut key = read.start().unwrap();
7195 165 : while key != read.end().unwrap() {
7196 160 : if let Some(lsns) = inserted.get(&key) {
7197 160 : let expected_lsn = lsns.iter().rfind(|lsn| **lsn <= reads_lsn);
7198 160 : match expected_lsn {
7199 160 : Some(lsn) => {
7200 160 : expected_lsns.insert(key, *lsn);
7201 160 : }
7202 1 : None => {
7203 1 : expect_missing = true;
7204 0 : break;
7205 1 : }
7206 1 : }
7207 1 : } else {
7208 1 : expect_missing = true;
7209 0 : break;
7210 1 : }
7211 1 :
7212 160 : key = key.next();
7213 1 : }
7214 1 :
7215 5 : if expect_missing {
7216 1 : assert!(matches!(vectored_res, Err(GetVectoredError::MissingKey(_))));
7217 1 : } else {
7218 160 : for (key, image) in vectored_res? {
7219 160 : let expected_lsn = expected_lsns.get(&key).expect("determined above");
7220 160 : let expected_image = test_img(&format!("{} at {}", key.field6, expected_lsn));
7221 160 : assert_eq!(image?, expected_image);
7222 1 : }
7223 1 : }
7224 1 : }
7225 1 :
7226 1 : Ok(())
7227 1 : }
7228 :
7229 : #[tokio::test]
7230 1 : async fn test_get_vectored_aux_files() -> anyhow::Result<()> {
7231 1 : let harness = TenantHarness::create("test_get_vectored_aux_files").await?;
7232 1 :
7233 1 : let (tenant, ctx) = harness.load().await;
7234 1 : let io_concurrency = IoConcurrency::spawn_for_test();
7235 1 : let (tline, ctx) = tenant
7236 1 : .create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION, &ctx)
7237 1 : .await?;
7238 1 : let tline = tline.raw_timeline().unwrap();
7239 1 :
7240 1 : let mut modification = tline.begin_modification(Lsn(0x1000));
7241 1 : modification.put_file("foo/bar1", b"content1", &ctx).await?;
7242 1 : modification.set_lsn(Lsn(0x1008))?;
7243 1 : modification.put_file("foo/bar2", b"content2", &ctx).await?;
7244 1 : modification.commit(&ctx).await?;
7245 1 :
7246 1 : let child_timeline_id = TimelineId::generate();
7247 1 : tenant
7248 1 : .branch_timeline_test(
7249 1 : tline,
7250 1 : child_timeline_id,
7251 1 : Some(tline.get_last_record_lsn()),
7252 1 : &ctx,
7253 1 : )
7254 1 : .await?;
7255 1 :
7256 1 : let child_timeline = tenant
7257 1 : .get_timeline(child_timeline_id, true)
7258 1 : .expect("Should have the branched timeline");
7259 1 :
7260 1 : let aux_keyspace = KeySpace {
7261 1 : ranges: vec![NON_INHERITED_RANGE],
7262 1 : };
7263 1 : let read_lsn = child_timeline.get_last_record_lsn();
7264 1 :
7265 1 : let query = VersionedKeySpaceQuery::uniform(aux_keyspace.clone(), read_lsn);
7266 1 :
7267 1 : let vectored_res = child_timeline
7268 1 : .get_vectored_impl(
7269 1 : query,
7270 1 : &mut ValuesReconstructState::new(io_concurrency.clone()),
7271 1 : &ctx,
7272 1 : )
7273 1 : .await;
7274 1 :
7275 1 : let images = vectored_res?;
7276 1 : assert!(images.is_empty());
7277 1 : Ok(())
7278 1 : }
7279 :
7280 : // Test that vectored get handles layer gaps correctly
7281 : // by advancing into the next ancestor timeline if required.
7282 : //
7283 : // The test generates timelines that look like the diagram below.
7284 : // We leave a gap in one of the L1 layers at `gap_at_key` (`/` in the diagram).
7285 : // The reconstruct data for that key lies in the ancestor timeline (`X` in the diagram).
7286 : //
7287 : // ```
7288 : //-------------------------------+
7289 : // ... |
7290 : // [ L1 ] |
7291 : // [ / L1 ] | Child Timeline
7292 : // ... |
7293 : // ------------------------------+
7294 : // [ X L1 ] | Parent Timeline
7295 : // ------------------------------+
7296 : // ```
7297 : #[tokio::test]
7298 1 : async fn test_get_vectored_key_gap() -> anyhow::Result<()> {
7299 1 : let tenant_conf = pageserver_api::models::TenantConfig {
7300 1 : // Make compaction deterministic
7301 1 : gc_period: Some(Duration::ZERO),
7302 1 : compaction_period: Some(Duration::ZERO),
7303 1 : // Encourage creation of L1 layers
7304 1 : checkpoint_distance: Some(16 * 1024),
7305 1 : compaction_target_size: Some(8 * 1024),
7306 1 : ..Default::default()
7307 1 : };
7308 1 :
7309 1 : let harness = TenantHarness::create_custom(
7310 1 : "test_get_vectored_key_gap",
7311 1 : tenant_conf,
7312 1 : TenantId::generate(),
7313 1 : ShardIdentity::unsharded(),
7314 1 : Generation::new(0xdeadbeef),
7315 1 : )
7316 1 : .await?;
7317 1 : let (tenant, ctx) = harness.load().await;
7318 1 : let io_concurrency = IoConcurrency::spawn_for_test();
7319 1 :
7320 1 : let mut current_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
7321 1 : let gap_at_key = current_key.add(100);
7322 1 : let mut current_lsn = Lsn(0x10);
7323 1 :
7324 1 : const KEY_COUNT: usize = 10_000;
7325 1 :
7326 1 : let timeline_id = TimelineId::generate();
7327 1 : let current_timeline = tenant
7328 1 : .create_test_timeline(timeline_id, current_lsn, DEFAULT_PG_VERSION, &ctx)
7329 1 : .await?;
7330 1 :
7331 1 : current_lsn += 0x100;
7332 1 :
7333 1 : let mut writer = current_timeline.writer().await;
7334 1 : writer
7335 1 : .put(
7336 1 : gap_at_key,
7337 1 : current_lsn,
7338 1 : &Value::Image(test_img(&format!("{} at {}", gap_at_key, current_lsn))),
7339 1 : &ctx,
7340 1 : )
7341 1 : .await?;
7342 1 : writer.finish_write(current_lsn);
7343 1 : drop(writer);
7344 1 :
7345 1 : let mut latest_lsns = HashMap::new();
7346 1 : latest_lsns.insert(gap_at_key, current_lsn);
7347 1 :
7348 1 : current_timeline.freeze_and_flush().await?;
7349 1 :
7350 1 : let child_timeline_id = TimelineId::generate();
7351 1 :
7352 1 : tenant
7353 1 : .branch_timeline_test(
7354 1 : ¤t_timeline,
7355 1 : child_timeline_id,
7356 1 : Some(current_lsn),
7357 1 : &ctx,
7358 1 : )
7359 1 : .await?;
7360 1 : let child_timeline = tenant
7361 1 : .get_timeline(child_timeline_id, true)
7362 1 : .expect("Should have the branched timeline");
7363 1 :
7364 10001 : for i in 0..KEY_COUNT {
7365 10000 : if current_key == gap_at_key {
7366 1 : current_key = current_key.next();
7367 1 : continue;
7368 9999 : }
7369 9999 :
7370 9999 : current_lsn += 0x10;
7371 1 :
7372 9999 : let mut writer = child_timeline.writer().await;
7373 9999 : writer
7374 9999 : .put(
7375 9999 : current_key,
7376 9999 : current_lsn,
7377 9999 : &Value::Image(test_img(&format!("{} at {}", current_key, current_lsn))),
7378 9999 : &ctx,
7379 9999 : )
7380 9999 : .await?;
7381 9999 : writer.finish_write(current_lsn);
7382 9999 : drop(writer);
7383 9999 :
7384 9999 : latest_lsns.insert(current_key, current_lsn);
7385 9999 : current_key = current_key.next();
7386 9999 :
7387 9999 : // Flush every now and then to encourage layer file creation.
7388 9999 : if i % 500 == 0 {
7389 20 : child_timeline.freeze_and_flush().await?;
7390 9979 : }
7391 1 : }
7392 1 :
7393 1 : child_timeline.freeze_and_flush().await?;
7394 1 : let mut flags = EnumSet::new();
7395 1 : flags.insert(CompactFlags::ForceRepartition);
7396 1 : child_timeline
7397 1 : .compact(&CancellationToken::new(), flags, &ctx)
7398 1 : .await?;
7399 1 :
7400 1 : let key_near_end = {
7401 1 : let mut tmp = current_key;
7402 1 : tmp.field6 -= 10;
7403 1 : tmp
7404 1 : };
7405 1 :
7406 1 : let key_near_gap = {
7407 1 : let mut tmp = gap_at_key;
7408 1 : tmp.field6 -= 10;
7409 1 : tmp
7410 1 : };
7411 1 :
7412 1 : let read = KeySpace {
7413 1 : ranges: vec![key_near_gap..gap_at_key.next(), key_near_end..current_key],
7414 1 : };
7415 1 :
7416 1 : let query = VersionedKeySpaceQuery::uniform(read.clone(), current_lsn);
7417 1 :
7418 1 : let results = child_timeline
7419 1 : .get_vectored_impl(
7420 1 : query,
7421 1 : &mut ValuesReconstructState::new(io_concurrency.clone()),
7422 1 : &ctx,
7423 1 : )
7424 1 : .await?;
7425 1 :
7426 22 : for (key, img_res) in results {
7427 21 : let expected = test_img(&format!("{} at {}", key, latest_lsns[&key]));
7428 21 : assert_eq!(img_res?, expected);
7429 1 : }
7430 1 :
7431 1 : Ok(())
7432 1 : }
7433 :
7434 : // Test that vectored get descends into ancestor timelines correctly and
7435 : // does not return an image that's newer than requested.
7436 : //
7437 : // The diagram below ilustrates an interesting case. We have a parent timeline
7438 : // (top of the Lsn range) and a child timeline. The request key cannot be reconstructed
7439 : // from the child timeline, so the parent timeline must be visited. When advacing into
7440 : // the child timeline, the read path needs to remember what the requested Lsn was in
7441 : // order to avoid returning an image that's too new. The test below constructs such
7442 : // a timeline setup and does a few queries around the Lsn of each page image.
7443 : // ```
7444 : // LSN
7445 : // ^
7446 : // |
7447 : // |
7448 : // 500 | --------------------------------------> branch point
7449 : // 400 | X
7450 : // 300 | X
7451 : // 200 | --------------------------------------> requested lsn
7452 : // 100 | X
7453 : // |---------------------------------------> Key
7454 : // |
7455 : // ------> requested key
7456 : //
7457 : // Legend:
7458 : // * X - page images
7459 : // ```
7460 : #[tokio::test]
7461 1 : async fn test_get_vectored_ancestor_descent() -> anyhow::Result<()> {
7462 1 : let harness = TenantHarness::create("test_get_vectored_on_lsn_axis").await?;
7463 1 : let (tenant, ctx) = harness.load().await;
7464 1 : let io_concurrency = IoConcurrency::spawn_for_test();
7465 1 :
7466 1 : let start_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
7467 1 : let end_key = start_key.add(1000);
7468 1 : let child_gap_at_key = start_key.add(500);
7469 1 : let mut parent_gap_lsns: BTreeMap<Lsn, String> = BTreeMap::new();
7470 1 :
7471 1 : let mut current_lsn = Lsn(0x10);
7472 1 :
7473 1 : let timeline_id = TimelineId::generate();
7474 1 : let parent_timeline = tenant
7475 1 : .create_test_timeline(timeline_id, current_lsn, DEFAULT_PG_VERSION, &ctx)
7476 1 : .await?;
7477 1 :
7478 1 : current_lsn += 0x100;
7479 1 :
7480 4 : for _ in 0..3 {
7481 3 : let mut key = start_key;
7482 3003 : while key < end_key {
7483 3000 : current_lsn += 0x10;
7484 3000 :
7485 3000 : let image_value = format!("{} at {}", child_gap_at_key, current_lsn);
7486 1 :
7487 3000 : let mut writer = parent_timeline.writer().await;
7488 3000 : writer
7489 3000 : .put(
7490 3000 : key,
7491 3000 : current_lsn,
7492 3000 : &Value::Image(test_img(&image_value)),
7493 3000 : &ctx,
7494 3000 : )
7495 3000 : .await?;
7496 3000 : writer.finish_write(current_lsn);
7497 3000 :
7498 3000 : if key == child_gap_at_key {
7499 3 : parent_gap_lsns.insert(current_lsn, image_value);
7500 2997 : }
7501 1 :
7502 3000 : key = key.next();
7503 1 : }
7504 1 :
7505 3 : parent_timeline.freeze_and_flush().await?;
7506 1 : }
7507 1 :
7508 1 : let child_timeline_id = TimelineId::generate();
7509 1 :
7510 1 : let child_timeline = tenant
7511 1 : .branch_timeline_test(&parent_timeline, child_timeline_id, Some(current_lsn), &ctx)
7512 1 : .await?;
7513 1 :
7514 1 : let mut key = start_key;
7515 1001 : while key < end_key {
7516 1000 : if key == child_gap_at_key {
7517 1 : key = key.next();
7518 1 : continue;
7519 999 : }
7520 999 :
7521 999 : current_lsn += 0x10;
7522 1 :
7523 999 : let mut writer = child_timeline.writer().await;
7524 999 : writer
7525 999 : .put(
7526 999 : key,
7527 999 : current_lsn,
7528 999 : &Value::Image(test_img(&format!("{} at {}", key, current_lsn))),
7529 999 : &ctx,
7530 999 : )
7531 999 : .await?;
7532 999 : writer.finish_write(current_lsn);
7533 999 :
7534 999 : key = key.next();
7535 1 : }
7536 1 :
7537 1 : child_timeline.freeze_and_flush().await?;
7538 1 :
7539 1 : let lsn_offsets: [i64; 5] = [-10, -1, 0, 1, 10];
7540 1 : let mut query_lsns = Vec::new();
7541 3 : for image_lsn in parent_gap_lsns.keys().rev() {
7542 18 : for offset in lsn_offsets {
7543 15 : query_lsns.push(Lsn(image_lsn
7544 15 : .0
7545 15 : .checked_add_signed(offset)
7546 15 : .expect("Shouldn't overflow")));
7547 15 : }
7548 1 : }
7549 1 :
7550 16 : for query_lsn in query_lsns {
7551 15 : let query = VersionedKeySpaceQuery::uniform(
7552 15 : KeySpace {
7553 15 : ranges: vec![child_gap_at_key..child_gap_at_key.next()],
7554 15 : },
7555 15 : query_lsn,
7556 15 : );
7557 1 :
7558 15 : let results = child_timeline
7559 15 : .get_vectored_impl(
7560 15 : query,
7561 15 : &mut ValuesReconstructState::new(io_concurrency.clone()),
7562 15 : &ctx,
7563 15 : )
7564 15 : .await;
7565 1 :
7566 15 : let expected_item = parent_gap_lsns
7567 15 : .iter()
7568 15 : .rev()
7569 34 : .find(|(lsn, _)| **lsn <= query_lsn);
7570 15 :
7571 15 : info!(
7572 1 : "Doing vectored read at LSN {}. Expecting image to be: {:?}",
7573 1 : query_lsn, expected_item
7574 1 : );
7575 1 :
7576 15 : match expected_item {
7577 13 : Some((_, img_value)) => {
7578 13 : let key_results = results.expect("No vectored get error expected");
7579 13 : let key_result = &key_results[&child_gap_at_key];
7580 13 : let returned_img = key_result
7581 13 : .as_ref()
7582 13 : .expect("No page reconstruct error expected");
7583 13 :
7584 13 : info!(
7585 1 : "Vectored read at LSN {} returned image {}",
7586 0 : query_lsn,
7587 0 : std::str::from_utf8(returned_img)?
7588 1 : );
7589 13 : assert_eq!(*returned_img, test_img(img_value));
7590 1 : }
7591 1 : None => {
7592 2 : assert!(matches!(results, Err(GetVectoredError::MissingKey(_))));
7593 1 : }
7594 1 : }
7595 1 : }
7596 1 :
7597 1 : Ok(())
7598 1 : }
7599 :
7600 : #[tokio::test]
7601 1 : async fn test_random_updates() -> anyhow::Result<()> {
7602 1 : let names_algorithms = [
7603 1 : ("test_random_updates_legacy", CompactionAlgorithm::Legacy),
7604 1 : ("test_random_updates_tiered", CompactionAlgorithm::Tiered),
7605 1 : ];
7606 3 : for (name, algorithm) in names_algorithms {
7607 2 : test_random_updates_algorithm(name, algorithm).await?;
7608 1 : }
7609 1 : Ok(())
7610 1 : }
7611 :
7612 2 : async fn test_random_updates_algorithm(
7613 2 : name: &'static str,
7614 2 : compaction_algorithm: CompactionAlgorithm,
7615 2 : ) -> anyhow::Result<()> {
7616 2 : let mut harness = TenantHarness::create(name).await?;
7617 2 : harness.tenant_conf.compaction_algorithm = Some(CompactionAlgorithmSettings {
7618 2 : kind: compaction_algorithm,
7619 2 : });
7620 2 : let (tenant, ctx) = harness.load().await;
7621 2 : let tline = tenant
7622 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
7623 2 : .await?;
7624 :
7625 : const NUM_KEYS: usize = 1000;
7626 2 : let cancel = CancellationToken::new();
7627 2 :
7628 2 : let mut test_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
7629 2 : let mut test_key_end = test_key;
7630 2 : test_key_end.field6 = NUM_KEYS as u32;
7631 2 : tline.add_extra_test_dense_keyspace(KeySpace::single(test_key..test_key_end));
7632 2 :
7633 2 : let mut keyspace = KeySpaceAccum::new();
7634 2 :
7635 2 : // Track when each page was last modified. Used to assert that
7636 2 : // a read sees the latest page version.
7637 2 : let mut updated = [Lsn(0); NUM_KEYS];
7638 2 :
7639 2 : let mut lsn = Lsn(0x10);
7640 : #[allow(clippy::needless_range_loop)]
7641 2002 : for blknum in 0..NUM_KEYS {
7642 2000 : lsn = Lsn(lsn.0 + 0x10);
7643 2000 : test_key.field6 = blknum as u32;
7644 2000 : let mut writer = tline.writer().await;
7645 2000 : writer
7646 2000 : .put(
7647 2000 : test_key,
7648 2000 : lsn,
7649 2000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
7650 2000 : &ctx,
7651 2000 : )
7652 2000 : .await?;
7653 2000 : writer.finish_write(lsn);
7654 2000 : updated[blknum] = lsn;
7655 2000 : drop(writer);
7656 2000 :
7657 2000 : keyspace.add_key(test_key);
7658 : }
7659 :
7660 102 : for _ in 0..50 {
7661 100100 : for _ in 0..NUM_KEYS {
7662 100000 : lsn = Lsn(lsn.0 + 0x10);
7663 100000 : let blknum = thread_rng().gen_range(0..NUM_KEYS);
7664 100000 : test_key.field6 = blknum as u32;
7665 100000 : let mut writer = tline.writer().await;
7666 100000 : writer
7667 100000 : .put(
7668 100000 : test_key,
7669 100000 : lsn,
7670 100000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
7671 100000 : &ctx,
7672 100000 : )
7673 100000 : .await?;
7674 100000 : writer.finish_write(lsn);
7675 100000 : drop(writer);
7676 100000 : updated[blknum] = lsn;
7677 : }
7678 :
7679 : // Read all the blocks
7680 100000 : for (blknum, last_lsn) in updated.iter().enumerate() {
7681 100000 : test_key.field6 = blknum as u32;
7682 100000 : assert_eq!(
7683 100000 : tline.get(test_key, lsn, &ctx).await?,
7684 100000 : test_img(&format!("{} at {}", blknum, last_lsn))
7685 : );
7686 : }
7687 :
7688 : // Perform a cycle of flush, and GC
7689 100 : tline.freeze_and_flush().await?;
7690 100 : tenant
7691 100 : .gc_iteration(Some(tline.timeline_id), 0, Duration::ZERO, &cancel, &ctx)
7692 100 : .await?;
7693 : }
7694 :
7695 2 : Ok(())
7696 2 : }
7697 :
7698 : #[tokio::test]
7699 1 : async fn test_traverse_branches() -> anyhow::Result<()> {
7700 1 : let (tenant, ctx) = TenantHarness::create("test_traverse_branches")
7701 1 : .await?
7702 1 : .load()
7703 1 : .await;
7704 1 : let mut tline = tenant
7705 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
7706 1 : .await?;
7707 1 :
7708 1 : const NUM_KEYS: usize = 1000;
7709 1 :
7710 1 : let mut test_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
7711 1 :
7712 1 : let mut keyspace = KeySpaceAccum::new();
7713 1 :
7714 1 : let cancel = CancellationToken::new();
7715 1 :
7716 1 : // Track when each page was last modified. Used to assert that
7717 1 : // a read sees the latest page version.
7718 1 : let mut updated = [Lsn(0); NUM_KEYS];
7719 1 :
7720 1 : let mut lsn = Lsn(0x10);
7721 1 : #[allow(clippy::needless_range_loop)]
7722 1001 : for blknum in 0..NUM_KEYS {
7723 1000 : lsn = Lsn(lsn.0 + 0x10);
7724 1000 : test_key.field6 = blknum as u32;
7725 1000 : let mut writer = tline.writer().await;
7726 1000 : writer
7727 1000 : .put(
7728 1000 : test_key,
7729 1000 : lsn,
7730 1000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
7731 1000 : &ctx,
7732 1000 : )
7733 1000 : .await?;
7734 1000 : writer.finish_write(lsn);
7735 1000 : updated[blknum] = lsn;
7736 1000 : drop(writer);
7737 1000 :
7738 1000 : keyspace.add_key(test_key);
7739 1 : }
7740 1 :
7741 51 : for _ in 0..50 {
7742 50 : let new_tline_id = TimelineId::generate();
7743 50 : tenant
7744 50 : .branch_timeline_test(&tline, new_tline_id, Some(lsn), &ctx)
7745 50 : .await?;
7746 50 : tline = tenant
7747 50 : .get_timeline(new_tline_id, true)
7748 50 : .expect("Should have the branched timeline");
7749 1 :
7750 50050 : for _ in 0..NUM_KEYS {
7751 50000 : lsn = Lsn(lsn.0 + 0x10);
7752 50000 : let blknum = thread_rng().gen_range(0..NUM_KEYS);
7753 50000 : test_key.field6 = blknum as u32;
7754 50000 : let mut writer = tline.writer().await;
7755 50000 : writer
7756 50000 : .put(
7757 50000 : test_key,
7758 50000 : lsn,
7759 50000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
7760 50000 : &ctx,
7761 50000 : )
7762 50000 : .await?;
7763 50000 : println!("updating {} at {}", blknum, lsn);
7764 50000 : writer.finish_write(lsn);
7765 50000 : drop(writer);
7766 50000 : updated[blknum] = lsn;
7767 1 : }
7768 1 :
7769 1 : // Read all the blocks
7770 50000 : for (blknum, last_lsn) in updated.iter().enumerate() {
7771 50000 : test_key.field6 = blknum as u32;
7772 50000 : assert_eq!(
7773 50000 : tline.get(test_key, lsn, &ctx).await?,
7774 50000 : test_img(&format!("{} at {}", blknum, last_lsn))
7775 1 : );
7776 1 : }
7777 1 :
7778 1 : // Perform a cycle of flush, compact, and GC
7779 50 : tline.freeze_and_flush().await?;
7780 50 : tline.compact(&cancel, EnumSet::default(), &ctx).await?;
7781 50 : tenant
7782 50 : .gc_iteration(Some(tline.timeline_id), 0, Duration::ZERO, &cancel, &ctx)
7783 50 : .await?;
7784 1 : }
7785 1 :
7786 1 : Ok(())
7787 1 : }
7788 :
7789 : #[tokio::test]
7790 1 : async fn test_traverse_ancestors() -> anyhow::Result<()> {
7791 1 : let (tenant, ctx) = TenantHarness::create("test_traverse_ancestors")
7792 1 : .await?
7793 1 : .load()
7794 1 : .await;
7795 1 : let mut tline = tenant
7796 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
7797 1 : .await?;
7798 1 :
7799 1 : const NUM_KEYS: usize = 100;
7800 1 : const NUM_TLINES: usize = 50;
7801 1 :
7802 1 : let mut test_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
7803 1 : // Track page mutation lsns across different timelines.
7804 1 : let mut updated = [[Lsn(0); NUM_KEYS]; NUM_TLINES];
7805 1 :
7806 1 : let mut lsn = Lsn(0x10);
7807 1 :
7808 1 : #[allow(clippy::needless_range_loop)]
7809 51 : for idx in 0..NUM_TLINES {
7810 50 : let new_tline_id = TimelineId::generate();
7811 50 : tenant
7812 50 : .branch_timeline_test(&tline, new_tline_id, Some(lsn), &ctx)
7813 50 : .await?;
7814 50 : tline = tenant
7815 50 : .get_timeline(new_tline_id, true)
7816 50 : .expect("Should have the branched timeline");
7817 1 :
7818 5050 : for _ in 0..NUM_KEYS {
7819 5000 : lsn = Lsn(lsn.0 + 0x10);
7820 5000 : let blknum = thread_rng().gen_range(0..NUM_KEYS);
7821 5000 : test_key.field6 = blknum as u32;
7822 5000 : let mut writer = tline.writer().await;
7823 5000 : writer
7824 5000 : .put(
7825 5000 : test_key,
7826 5000 : lsn,
7827 5000 : &Value::Image(test_img(&format!("{} {} at {}", idx, blknum, lsn))),
7828 5000 : &ctx,
7829 5000 : )
7830 5000 : .await?;
7831 5000 : println!("updating [{}][{}] at {}", idx, blknum, lsn);
7832 5000 : writer.finish_write(lsn);
7833 5000 : drop(writer);
7834 5000 : updated[idx][blknum] = lsn;
7835 1 : }
7836 1 : }
7837 1 :
7838 1 : // Read pages from leaf timeline across all ancestors.
7839 50 : for (idx, lsns) in updated.iter().enumerate() {
7840 5000 : for (blknum, lsn) in lsns.iter().enumerate() {
7841 1 : // Skip empty mutations.
7842 5000 : if lsn.0 == 0 {
7843 1827 : continue;
7844 3173 : }
7845 3173 : println!("checking [{idx}][{blknum}] at {lsn}");
7846 3173 : test_key.field6 = blknum as u32;
7847 3173 : assert_eq!(
7848 3173 : tline.get(test_key, *lsn, &ctx).await?,
7849 3173 : test_img(&format!("{idx} {blknum} at {lsn}"))
7850 1 : );
7851 1 : }
7852 1 : }
7853 1 : Ok(())
7854 1 : }
7855 :
7856 : #[tokio::test]
7857 1 : async fn test_write_at_initdb_lsn_takes_optimization_code_path() -> anyhow::Result<()> {
7858 1 : let (tenant, ctx) = TenantHarness::create("test_empty_test_timeline_is_usable")
7859 1 : .await?
7860 1 : .load()
7861 1 : .await;
7862 1 :
7863 1 : let initdb_lsn = Lsn(0x20);
7864 1 : let (utline, ctx) = tenant
7865 1 : .create_empty_timeline(TIMELINE_ID, initdb_lsn, DEFAULT_PG_VERSION, &ctx)
7866 1 : .await?;
7867 1 : let tline = utline.raw_timeline().unwrap();
7868 1 :
7869 1 : // Spawn flush loop now so that we can set the `expect_initdb_optimization`
7870 1 : tline.maybe_spawn_flush_loop();
7871 1 :
7872 1 : // Make sure the timeline has the minimum set of required keys for operation.
7873 1 : // The only operation you can always do on an empty timeline is to `put` new data.
7874 1 : // Except if you `put` at `initdb_lsn`.
7875 1 : // In that case, there's an optimization to directly create image layers instead of delta layers.
7876 1 : // It uses `repartition()`, which assumes some keys to be present.
7877 1 : // Let's make sure the test timeline can handle that case.
7878 1 : {
7879 1 : let mut state = tline.flush_loop_state.lock().unwrap();
7880 1 : assert_eq!(
7881 1 : timeline::FlushLoopState::Running {
7882 1 : expect_initdb_optimization: false,
7883 1 : initdb_optimization_count: 0,
7884 1 : },
7885 1 : *state
7886 1 : );
7887 1 : *state = timeline::FlushLoopState::Running {
7888 1 : expect_initdb_optimization: true,
7889 1 : initdb_optimization_count: 0,
7890 1 : };
7891 1 : }
7892 1 :
7893 1 : // Make writes at the initdb_lsn. When we flush it below, it should be handled by the optimization.
7894 1 : // As explained above, the optimization requires some keys to be present.
7895 1 : // As per `create_empty_timeline` documentation, use init_empty to set them.
7896 1 : // This is what `create_test_timeline` does, by the way.
7897 1 : let mut modification = tline.begin_modification(initdb_lsn);
7898 1 : modification
7899 1 : .init_empty_test_timeline()
7900 1 : .context("init_empty_test_timeline")?;
7901 1 : modification
7902 1 : .commit(&ctx)
7903 1 : .await
7904 1 : .context("commit init_empty_test_timeline modification")?;
7905 1 :
7906 1 : // Do the flush. The flush code will check the expectations that we set above.
7907 1 : tline.freeze_and_flush().await?;
7908 1 :
7909 1 : // assert freeze_and_flush exercised the initdb optimization
7910 1 : {
7911 1 : let state = tline.flush_loop_state.lock().unwrap();
7912 1 : let timeline::FlushLoopState::Running {
7913 1 : expect_initdb_optimization,
7914 1 : initdb_optimization_count,
7915 1 : } = *state
7916 1 : else {
7917 1 : panic!("unexpected state: {:?}", *state);
7918 1 : };
7919 1 : assert!(expect_initdb_optimization);
7920 1 : assert!(initdb_optimization_count > 0);
7921 1 : }
7922 1 : Ok(())
7923 1 : }
7924 :
7925 : #[tokio::test]
7926 1 : async fn test_create_guard_crash() -> anyhow::Result<()> {
7927 1 : let name = "test_create_guard_crash";
7928 1 : let harness = TenantHarness::create(name).await?;
7929 1 : {
7930 1 : let (tenant, ctx) = harness.load().await;
7931 1 : let (tline, _ctx) = tenant
7932 1 : .create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION, &ctx)
7933 1 : .await?;
7934 1 : // Leave the timeline ID in [`TenantShard::timelines_creating`] to exclude attempting to create it again
7935 1 : let raw_tline = tline.raw_timeline().unwrap();
7936 1 : raw_tline
7937 1 : .shutdown(super::timeline::ShutdownMode::Hard)
7938 1 : .instrument(info_span!("test_shutdown", tenant_id=%raw_tline.tenant_shard_id, shard_id=%raw_tline.tenant_shard_id.shard_slug(), timeline_id=%TIMELINE_ID))
7939 1 : .await;
7940 1 : std::mem::forget(tline);
7941 1 : }
7942 1 :
7943 1 : let (tenant, _) = harness.load().await;
7944 1 : match tenant.get_timeline(TIMELINE_ID, false) {
7945 1 : Ok(_) => panic!("timeline should've been removed during load"),
7946 1 : Err(e) => {
7947 1 : assert_eq!(
7948 1 : e,
7949 1 : GetTimelineError::NotFound {
7950 1 : tenant_id: tenant.tenant_shard_id,
7951 1 : timeline_id: TIMELINE_ID,
7952 1 : }
7953 1 : )
7954 1 : }
7955 1 : }
7956 1 :
7957 1 : assert!(
7958 1 : !harness
7959 1 : .conf
7960 1 : .timeline_path(&tenant.tenant_shard_id, &TIMELINE_ID)
7961 1 : .exists()
7962 1 : );
7963 1 :
7964 1 : Ok(())
7965 1 : }
7966 :
7967 : #[tokio::test]
7968 1 : async fn test_read_at_max_lsn() -> anyhow::Result<()> {
7969 1 : let names_algorithms = [
7970 1 : ("test_read_at_max_lsn_legacy", CompactionAlgorithm::Legacy),
7971 1 : ("test_read_at_max_lsn_tiered", CompactionAlgorithm::Tiered),
7972 1 : ];
7973 3 : for (name, algorithm) in names_algorithms {
7974 2 : test_read_at_max_lsn_algorithm(name, algorithm).await?;
7975 1 : }
7976 1 : Ok(())
7977 1 : }
7978 :
7979 2 : async fn test_read_at_max_lsn_algorithm(
7980 2 : name: &'static str,
7981 2 : compaction_algorithm: CompactionAlgorithm,
7982 2 : ) -> anyhow::Result<()> {
7983 2 : let mut harness = TenantHarness::create(name).await?;
7984 2 : harness.tenant_conf.compaction_algorithm = Some(CompactionAlgorithmSettings {
7985 2 : kind: compaction_algorithm,
7986 2 : });
7987 2 : let (tenant, ctx) = harness.load().await;
7988 2 : let tline = tenant
7989 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx)
7990 2 : .await?;
7991 :
7992 2 : let lsn = Lsn(0x10);
7993 2 : let compact = false;
7994 2 : bulk_insert_maybe_compact_gc(&tenant, &tline, &ctx, lsn, 50, 10000, compact).await?;
7995 :
7996 2 : let test_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
7997 2 : let read_lsn = Lsn(u64::MAX - 1);
7998 :
7999 2 : let result = tline.get(test_key, read_lsn, &ctx).await;
8000 2 : assert!(result.is_ok(), "result is not Ok: {}", result.unwrap_err());
8001 :
8002 2 : Ok(())
8003 2 : }
8004 :
8005 : #[tokio::test]
8006 1 : async fn test_metadata_scan() -> anyhow::Result<()> {
8007 1 : let harness = TenantHarness::create("test_metadata_scan").await?;
8008 1 : let (tenant, ctx) = harness.load().await;
8009 1 : let io_concurrency = IoConcurrency::spawn_for_test();
8010 1 : let tline = tenant
8011 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
8012 1 : .await?;
8013 1 :
8014 1 : const NUM_KEYS: usize = 1000;
8015 1 : const STEP: usize = 10000; // random update + scan base_key + idx * STEP
8016 1 :
8017 1 : let cancel = CancellationToken::new();
8018 1 :
8019 1 : let mut base_key = Key::from_hex("000000000033333333444444445500000000").unwrap();
8020 1 : base_key.field1 = AUX_KEY_PREFIX;
8021 1 : let mut test_key = base_key;
8022 1 :
8023 1 : // Track when each page was last modified. Used to assert that
8024 1 : // a read sees the latest page version.
8025 1 : let mut updated = [Lsn(0); NUM_KEYS];
8026 1 :
8027 1 : let mut lsn = Lsn(0x10);
8028 1 : #[allow(clippy::needless_range_loop)]
8029 1001 : for blknum in 0..NUM_KEYS {
8030 1000 : lsn = Lsn(lsn.0 + 0x10);
8031 1000 : test_key.field6 = (blknum * STEP) as u32;
8032 1000 : let mut writer = tline.writer().await;
8033 1000 : writer
8034 1000 : .put(
8035 1000 : test_key,
8036 1000 : lsn,
8037 1000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
8038 1000 : &ctx,
8039 1000 : )
8040 1000 : .await?;
8041 1000 : writer.finish_write(lsn);
8042 1000 : updated[blknum] = lsn;
8043 1000 : drop(writer);
8044 1 : }
8045 1 :
8046 1 : let keyspace = KeySpace::single(base_key..base_key.add((NUM_KEYS * STEP) as u32));
8047 1 :
8048 12 : for iter in 0..=10 {
8049 1 : // Read all the blocks
8050 11000 : for (blknum, last_lsn) in updated.iter().enumerate() {
8051 11000 : test_key.field6 = (blknum * STEP) as u32;
8052 11000 : assert_eq!(
8053 11000 : tline.get(test_key, lsn, &ctx).await?,
8054 11000 : test_img(&format!("{} at {}", blknum, last_lsn))
8055 1 : );
8056 1 : }
8057 1 :
8058 11 : let mut cnt = 0;
8059 11 : let query = VersionedKeySpaceQuery::uniform(keyspace.clone(), lsn);
8060 1 :
8061 11000 : for (key, value) in tline
8062 11 : .get_vectored_impl(
8063 11 : query,
8064 11 : &mut ValuesReconstructState::new(io_concurrency.clone()),
8065 11 : &ctx,
8066 11 : )
8067 11 : .await?
8068 1 : {
8069 11000 : let blknum = key.field6 as usize;
8070 11000 : let value = value?;
8071 11000 : assert!(blknum % STEP == 0);
8072 11000 : let blknum = blknum / STEP;
8073 11000 : assert_eq!(
8074 11000 : value,
8075 11000 : test_img(&format!("{} at {}", blknum, updated[blknum]))
8076 11000 : );
8077 11000 : cnt += 1;
8078 1 : }
8079 1 :
8080 11 : assert_eq!(cnt, NUM_KEYS);
8081 1 :
8082 11011 : for _ in 0..NUM_KEYS {
8083 11000 : lsn = Lsn(lsn.0 + 0x10);
8084 11000 : let blknum = thread_rng().gen_range(0..NUM_KEYS);
8085 11000 : test_key.field6 = (blknum * STEP) as u32;
8086 11000 : let mut writer = tline.writer().await;
8087 11000 : writer
8088 11000 : .put(
8089 11000 : test_key,
8090 11000 : lsn,
8091 11000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
8092 11000 : &ctx,
8093 11000 : )
8094 11000 : .await?;
8095 11000 : writer.finish_write(lsn);
8096 11000 : drop(writer);
8097 11000 : updated[blknum] = lsn;
8098 1 : }
8099 1 :
8100 1 : // Perform two cycles of flush, compact, and GC
8101 33 : for round in 0..2 {
8102 22 : tline.freeze_and_flush().await?;
8103 22 : tline
8104 22 : .compact(
8105 22 : &cancel,
8106 22 : if iter % 5 == 0 && round == 0 {
8107 3 : let mut flags = EnumSet::new();
8108 3 : flags.insert(CompactFlags::ForceImageLayerCreation);
8109 3 : flags.insert(CompactFlags::ForceRepartition);
8110 3 : flags
8111 1 : } else {
8112 19 : EnumSet::empty()
8113 1 : },
8114 22 : &ctx,
8115 22 : )
8116 22 : .await?;
8117 22 : tenant
8118 22 : .gc_iteration(Some(tline.timeline_id), 0, Duration::ZERO, &cancel, &ctx)
8119 22 : .await?;
8120 1 : }
8121 1 : }
8122 1 :
8123 1 : Ok(())
8124 1 : }
8125 :
8126 : #[tokio::test]
8127 1 : async fn test_metadata_compaction_trigger() -> anyhow::Result<()> {
8128 1 : let harness = TenantHarness::create("test_metadata_compaction_trigger").await?;
8129 1 : let (tenant, ctx) = harness.load().await;
8130 1 : let tline = tenant
8131 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
8132 1 : .await?;
8133 1 :
8134 1 : let cancel = CancellationToken::new();
8135 1 :
8136 1 : let mut base_key = Key::from_hex("000000000033333333444444445500000000").unwrap();
8137 1 : base_key.field1 = AUX_KEY_PREFIX;
8138 1 : let test_key = base_key;
8139 1 : let mut lsn = Lsn(0x10);
8140 1 :
8141 21 : for _ in 0..20 {
8142 20 : lsn = Lsn(lsn.0 + 0x10);
8143 20 : let mut writer = tline.writer().await;
8144 20 : writer
8145 20 : .put(
8146 20 : test_key,
8147 20 : lsn,
8148 20 : &Value::Image(test_img(&format!("{} at {}", 0, lsn))),
8149 20 : &ctx,
8150 20 : )
8151 20 : .await?;
8152 20 : writer.finish_write(lsn);
8153 20 : drop(writer);
8154 20 : tline.freeze_and_flush().await?; // force create a delta layer
8155 1 : }
8156 1 :
8157 1 : let before_num_l0_delta_files =
8158 1 : tline.layers.read().await.layer_map()?.level0_deltas().len();
8159 1 :
8160 1 : tline.compact(&cancel, EnumSet::default(), &ctx).await?;
8161 1 :
8162 1 : let after_num_l0_delta_files = tline.layers.read().await.layer_map()?.level0_deltas().len();
8163 1 :
8164 1 : assert!(
8165 1 : after_num_l0_delta_files < before_num_l0_delta_files,
8166 1 : "after_num_l0_delta_files={after_num_l0_delta_files}, before_num_l0_delta_files={before_num_l0_delta_files}"
8167 1 : );
8168 1 :
8169 1 : assert_eq!(
8170 1 : tline.get(test_key, lsn, &ctx).await?,
8171 1 : test_img(&format!("{} at {}", 0, lsn))
8172 1 : );
8173 1 :
8174 1 : Ok(())
8175 1 : }
8176 :
8177 : #[tokio::test]
8178 1 : async fn test_aux_file_e2e() {
8179 1 : let harness = TenantHarness::create("test_aux_file_e2e").await.unwrap();
8180 1 :
8181 1 : let (tenant, ctx) = harness.load().await;
8182 1 : let io_concurrency = IoConcurrency::spawn_for_test();
8183 1 :
8184 1 : let mut lsn = Lsn(0x08);
8185 1 :
8186 1 : let tline: Arc<Timeline> = tenant
8187 1 : .create_test_timeline(TIMELINE_ID, lsn, DEFAULT_PG_VERSION, &ctx)
8188 1 : .await
8189 1 : .unwrap();
8190 1 :
8191 1 : {
8192 1 : lsn += 8;
8193 1 : let mut modification = tline.begin_modification(lsn);
8194 1 : modification
8195 1 : .put_file("pg_logical/mappings/test1", b"first", &ctx)
8196 1 : .await
8197 1 : .unwrap();
8198 1 : modification.commit(&ctx).await.unwrap();
8199 1 : }
8200 1 :
8201 1 : // we can read everything from the storage
8202 1 : let files = tline
8203 1 : .list_aux_files(lsn, &ctx, io_concurrency.clone())
8204 1 : .await
8205 1 : .unwrap();
8206 1 : assert_eq!(
8207 1 : files.get("pg_logical/mappings/test1"),
8208 1 : Some(&bytes::Bytes::from_static(b"first"))
8209 1 : );
8210 1 :
8211 1 : {
8212 1 : lsn += 8;
8213 1 : let mut modification = tline.begin_modification(lsn);
8214 1 : modification
8215 1 : .put_file("pg_logical/mappings/test2", b"second", &ctx)
8216 1 : .await
8217 1 : .unwrap();
8218 1 : modification.commit(&ctx).await.unwrap();
8219 1 : }
8220 1 :
8221 1 : let files = tline
8222 1 : .list_aux_files(lsn, &ctx, io_concurrency.clone())
8223 1 : .await
8224 1 : .unwrap();
8225 1 : assert_eq!(
8226 1 : files.get("pg_logical/mappings/test2"),
8227 1 : Some(&bytes::Bytes::from_static(b"second"))
8228 1 : );
8229 1 :
8230 1 : let child = tenant
8231 1 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(lsn), &ctx)
8232 1 : .await
8233 1 : .unwrap();
8234 1 :
8235 1 : let files = child
8236 1 : .list_aux_files(lsn, &ctx, io_concurrency.clone())
8237 1 : .await
8238 1 : .unwrap();
8239 1 : assert_eq!(files.get("pg_logical/mappings/test1"), None);
8240 1 : assert_eq!(files.get("pg_logical/mappings/test2"), None);
8241 1 : }
8242 :
8243 : #[tokio::test]
8244 1 : async fn test_repl_origin_tombstones() {
8245 1 : let harness = TenantHarness::create("test_repl_origin_tombstones")
8246 1 : .await
8247 1 : .unwrap();
8248 1 :
8249 1 : let (tenant, ctx) = harness.load().await;
8250 1 : let io_concurrency = IoConcurrency::spawn_for_test();
8251 1 :
8252 1 : let mut lsn = Lsn(0x08);
8253 1 :
8254 1 : let tline: Arc<Timeline> = tenant
8255 1 : .create_test_timeline(TIMELINE_ID, lsn, DEFAULT_PG_VERSION, &ctx)
8256 1 : .await
8257 1 : .unwrap();
8258 1 :
8259 1 : let repl_lsn = Lsn(0x10);
8260 1 : {
8261 1 : lsn += 8;
8262 1 : let mut modification = tline.begin_modification(lsn);
8263 1 : modification.put_for_unit_test(repl_origin_key(2), Value::Image(Bytes::new()));
8264 1 : modification.set_replorigin(1, repl_lsn).await.unwrap();
8265 1 : modification.commit(&ctx).await.unwrap();
8266 1 : }
8267 1 :
8268 1 : // we can read everything from the storage
8269 1 : let repl_origins = tline
8270 1 : .get_replorigins(lsn, &ctx, io_concurrency.clone())
8271 1 : .await
8272 1 : .unwrap();
8273 1 : assert_eq!(repl_origins.len(), 1);
8274 1 : assert_eq!(repl_origins[&1], lsn);
8275 1 :
8276 1 : {
8277 1 : lsn += 8;
8278 1 : let mut modification = tline.begin_modification(lsn);
8279 1 : modification.put_for_unit_test(
8280 1 : repl_origin_key(3),
8281 1 : Value::Image(Bytes::copy_from_slice(b"cannot_decode_this")),
8282 1 : );
8283 1 : modification.commit(&ctx).await.unwrap();
8284 1 : }
8285 1 : let result = tline
8286 1 : .get_replorigins(lsn, &ctx, io_concurrency.clone())
8287 1 : .await;
8288 1 : assert!(result.is_err());
8289 1 : }
8290 :
8291 : #[tokio::test]
8292 1 : async fn test_metadata_image_creation() -> anyhow::Result<()> {
8293 1 : let harness = TenantHarness::create("test_metadata_image_creation").await?;
8294 1 : let (tenant, ctx) = harness.load().await;
8295 1 : let io_concurrency = IoConcurrency::spawn_for_test();
8296 1 : let tline = tenant
8297 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
8298 1 : .await?;
8299 1 :
8300 1 : const NUM_KEYS: usize = 1000;
8301 1 : const STEP: usize = 10000; // random update + scan base_key + idx * STEP
8302 1 :
8303 1 : let cancel = CancellationToken::new();
8304 1 :
8305 1 : let base_key = Key::from_hex("620000000033333333444444445500000000").unwrap();
8306 1 : assert_eq!(base_key.field1, AUX_KEY_PREFIX); // in case someone accidentally changed the prefix...
8307 1 : let mut test_key = base_key;
8308 1 : let mut lsn = Lsn(0x10);
8309 1 :
8310 4 : async fn scan_with_statistics(
8311 4 : tline: &Timeline,
8312 4 : keyspace: &KeySpace,
8313 4 : lsn: Lsn,
8314 4 : ctx: &RequestContext,
8315 4 : io_concurrency: IoConcurrency,
8316 4 : ) -> anyhow::Result<(BTreeMap<Key, Result<Bytes, PageReconstructError>>, usize)> {
8317 4 : let mut reconstruct_state = ValuesReconstructState::new(io_concurrency);
8318 4 : let query = VersionedKeySpaceQuery::uniform(keyspace.clone(), lsn);
8319 4 : let res = tline
8320 4 : .get_vectored_impl(query, &mut reconstruct_state, ctx)
8321 4 : .await?;
8322 4 : Ok((res, reconstruct_state.get_delta_layers_visited() as usize))
8323 4 : }
8324 1 :
8325 1001 : for blknum in 0..NUM_KEYS {
8326 1000 : lsn = Lsn(lsn.0 + 0x10);
8327 1000 : test_key.field6 = (blknum * STEP) as u32;
8328 1000 : let mut writer = tline.writer().await;
8329 1000 : writer
8330 1000 : .put(
8331 1000 : test_key,
8332 1000 : lsn,
8333 1000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
8334 1000 : &ctx,
8335 1000 : )
8336 1000 : .await?;
8337 1000 : writer.finish_write(lsn);
8338 1000 : drop(writer);
8339 1 : }
8340 1 :
8341 1 : let keyspace = KeySpace::single(base_key..base_key.add((NUM_KEYS * STEP) as u32));
8342 1 :
8343 11 : for iter in 1..=10 {
8344 10010 : for _ in 0..NUM_KEYS {
8345 10000 : lsn = Lsn(lsn.0 + 0x10);
8346 10000 : let blknum = thread_rng().gen_range(0..NUM_KEYS);
8347 10000 : test_key.field6 = (blknum * STEP) as u32;
8348 10000 : let mut writer = tline.writer().await;
8349 10000 : writer
8350 10000 : .put(
8351 10000 : test_key,
8352 10000 : lsn,
8353 10000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
8354 10000 : &ctx,
8355 10000 : )
8356 10000 : .await?;
8357 10000 : writer.finish_write(lsn);
8358 10000 : drop(writer);
8359 1 : }
8360 1 :
8361 10 : tline.freeze_and_flush().await?;
8362 1 :
8363 10 : if iter % 5 == 0 {
8364 2 : let (_, before_delta_file_accessed) =
8365 2 : scan_with_statistics(&tline, &keyspace, lsn, &ctx, io_concurrency.clone())
8366 2 : .await?;
8367 2 : tline
8368 2 : .compact(
8369 2 : &cancel,
8370 2 : {
8371 2 : let mut flags = EnumSet::new();
8372 2 : flags.insert(CompactFlags::ForceImageLayerCreation);
8373 2 : flags.insert(CompactFlags::ForceRepartition);
8374 2 : flags
8375 2 : },
8376 2 : &ctx,
8377 2 : )
8378 2 : .await?;
8379 2 : let (_, after_delta_file_accessed) =
8380 2 : scan_with_statistics(&tline, &keyspace, lsn, &ctx, io_concurrency.clone())
8381 2 : .await?;
8382 2 : assert!(
8383 2 : after_delta_file_accessed < before_delta_file_accessed,
8384 1 : "after_delta_file_accessed={after_delta_file_accessed}, before_delta_file_accessed={before_delta_file_accessed}"
8385 1 : );
8386 1 : // Given that we already produced an image layer, there should be no delta layer needed for the scan, but still setting a low threshold there for unforeseen circumstances.
8387 2 : assert!(
8388 2 : after_delta_file_accessed <= 2,
8389 1 : "after_delta_file_accessed={after_delta_file_accessed}"
8390 1 : );
8391 8 : }
8392 1 : }
8393 1 :
8394 1 : Ok(())
8395 1 : }
8396 :
8397 : #[tokio::test]
8398 1 : async fn test_vectored_missing_data_key_reads() -> anyhow::Result<()> {
8399 1 : let harness = TenantHarness::create("test_vectored_missing_data_key_reads").await?;
8400 1 : let (tenant, ctx) = harness.load().await;
8401 1 :
8402 1 : let base_key = Key::from_hex("000000000033333333444444445500000000").unwrap();
8403 1 : let base_key_child = Key::from_hex("000000000033333333444444445500000001").unwrap();
8404 1 : let base_key_nonexist = Key::from_hex("000000000033333333444444445500000002").unwrap();
8405 1 :
8406 1 : let tline = tenant
8407 1 : .create_test_timeline_with_layers(
8408 1 : TIMELINE_ID,
8409 1 : Lsn(0x10),
8410 1 : DEFAULT_PG_VERSION,
8411 1 : &ctx,
8412 1 : Vec::new(), // in-memory layers
8413 1 : Vec::new(), // delta layers
8414 1 : vec![(Lsn(0x20), vec![(base_key, test_img("data key 1"))])], // image layers
8415 1 : Lsn(0x20), // it's fine to not advance LSN to 0x30 while using 0x30 to get below because `get_vectored_impl` does not wait for LSN
8416 1 : )
8417 1 : .await?;
8418 1 : tline.add_extra_test_dense_keyspace(KeySpace::single(base_key..(base_key_nonexist.next())));
8419 1 :
8420 1 : let child = tenant
8421 1 : .branch_timeline_test_with_layers(
8422 1 : &tline,
8423 1 : NEW_TIMELINE_ID,
8424 1 : Some(Lsn(0x20)),
8425 1 : &ctx,
8426 1 : Vec::new(), // delta layers
8427 1 : vec![(Lsn(0x30), vec![(base_key_child, test_img("data key 2"))])], // image layers
8428 1 : Lsn(0x30),
8429 1 : )
8430 1 : .await
8431 1 : .unwrap();
8432 1 :
8433 1 : let lsn = Lsn(0x30);
8434 1 :
8435 1 : // test vectored get on parent timeline
8436 1 : assert_eq!(
8437 1 : get_vectored_impl_wrapper(&tline, base_key, lsn, &ctx).await?,
8438 1 : Some(test_img("data key 1"))
8439 1 : );
8440 1 : assert!(
8441 1 : get_vectored_impl_wrapper(&tline, base_key_child, lsn, &ctx)
8442 1 : .await
8443 1 : .unwrap_err()
8444 1 : .is_missing_key_error()
8445 1 : );
8446 1 : assert!(
8447 1 : get_vectored_impl_wrapper(&tline, base_key_nonexist, lsn, &ctx)
8448 1 : .await
8449 1 : .unwrap_err()
8450 1 : .is_missing_key_error()
8451 1 : );
8452 1 :
8453 1 : // test vectored get on child timeline
8454 1 : assert_eq!(
8455 1 : get_vectored_impl_wrapper(&child, base_key, lsn, &ctx).await?,
8456 1 : Some(test_img("data key 1"))
8457 1 : );
8458 1 : assert_eq!(
8459 1 : get_vectored_impl_wrapper(&child, base_key_child, lsn, &ctx).await?,
8460 1 : Some(test_img("data key 2"))
8461 1 : );
8462 1 : assert!(
8463 1 : get_vectored_impl_wrapper(&child, base_key_nonexist, lsn, &ctx)
8464 1 : .await
8465 1 : .unwrap_err()
8466 1 : .is_missing_key_error()
8467 1 : );
8468 1 :
8469 1 : Ok(())
8470 1 : }
8471 :
8472 : #[tokio::test]
8473 1 : async fn test_vectored_missing_metadata_key_reads() -> anyhow::Result<()> {
8474 1 : let harness = TenantHarness::create("test_vectored_missing_metadata_key_reads").await?;
8475 1 : let (tenant, ctx) = harness.load().await;
8476 1 : let io_concurrency = IoConcurrency::spawn_for_test();
8477 1 :
8478 1 : let base_key = Key::from_hex("620000000033333333444444445500000000").unwrap();
8479 1 : let base_key_child = Key::from_hex("620000000033333333444444445500000001").unwrap();
8480 1 : let base_key_nonexist = Key::from_hex("620000000033333333444444445500000002").unwrap();
8481 1 : let base_key_overwrite = Key::from_hex("620000000033333333444444445500000003").unwrap();
8482 1 :
8483 1 : let base_inherited_key = Key::from_hex("610000000033333333444444445500000000").unwrap();
8484 1 : let base_inherited_key_child =
8485 1 : Key::from_hex("610000000033333333444444445500000001").unwrap();
8486 1 : let base_inherited_key_nonexist =
8487 1 : Key::from_hex("610000000033333333444444445500000002").unwrap();
8488 1 : let base_inherited_key_overwrite =
8489 1 : Key::from_hex("610000000033333333444444445500000003").unwrap();
8490 1 :
8491 1 : assert_eq!(base_key.field1, AUX_KEY_PREFIX); // in case someone accidentally changed the prefix...
8492 1 : assert_eq!(base_inherited_key.field1, RELATION_SIZE_PREFIX);
8493 1 :
8494 1 : let tline = tenant
8495 1 : .create_test_timeline_with_layers(
8496 1 : TIMELINE_ID,
8497 1 : Lsn(0x10),
8498 1 : DEFAULT_PG_VERSION,
8499 1 : &ctx,
8500 1 : Vec::new(), // in-memory layers
8501 1 : Vec::new(), // delta layers
8502 1 : vec![(
8503 1 : Lsn(0x20),
8504 1 : vec![
8505 1 : (base_inherited_key, test_img("metadata inherited key 1")),
8506 1 : (
8507 1 : base_inherited_key_overwrite,
8508 1 : test_img("metadata key overwrite 1a"),
8509 1 : ),
8510 1 : (base_key, test_img("metadata key 1")),
8511 1 : (base_key_overwrite, test_img("metadata key overwrite 1b")),
8512 1 : ],
8513 1 : )], // image layers
8514 1 : Lsn(0x20), // it's fine to not advance LSN to 0x30 while using 0x30 to get below because `get_vectored_impl` does not wait for LSN
8515 1 : )
8516 1 : .await?;
8517 1 :
8518 1 : let child = tenant
8519 1 : .branch_timeline_test_with_layers(
8520 1 : &tline,
8521 1 : NEW_TIMELINE_ID,
8522 1 : Some(Lsn(0x20)),
8523 1 : &ctx,
8524 1 : Vec::new(), // delta layers
8525 1 : vec![(
8526 1 : Lsn(0x30),
8527 1 : vec![
8528 1 : (
8529 1 : base_inherited_key_child,
8530 1 : test_img("metadata inherited key 2"),
8531 1 : ),
8532 1 : (
8533 1 : base_inherited_key_overwrite,
8534 1 : test_img("metadata key overwrite 2a"),
8535 1 : ),
8536 1 : (base_key_child, test_img("metadata key 2")),
8537 1 : (base_key_overwrite, test_img("metadata key overwrite 2b")),
8538 1 : ],
8539 1 : )], // image layers
8540 1 : Lsn(0x30),
8541 1 : )
8542 1 : .await
8543 1 : .unwrap();
8544 1 :
8545 1 : let lsn = Lsn(0x30);
8546 1 :
8547 1 : // test vectored get on parent timeline
8548 1 : assert_eq!(
8549 1 : get_vectored_impl_wrapper(&tline, base_key, lsn, &ctx).await?,
8550 1 : Some(test_img("metadata key 1"))
8551 1 : );
8552 1 : assert_eq!(
8553 1 : get_vectored_impl_wrapper(&tline, base_key_child, lsn, &ctx).await?,
8554 1 : None
8555 1 : );
8556 1 : assert_eq!(
8557 1 : get_vectored_impl_wrapper(&tline, base_key_nonexist, lsn, &ctx).await?,
8558 1 : None
8559 1 : );
8560 1 : assert_eq!(
8561 1 : get_vectored_impl_wrapper(&tline, base_key_overwrite, lsn, &ctx).await?,
8562 1 : Some(test_img("metadata key overwrite 1b"))
8563 1 : );
8564 1 : assert_eq!(
8565 1 : get_vectored_impl_wrapper(&tline, base_inherited_key, lsn, &ctx).await?,
8566 1 : Some(test_img("metadata inherited key 1"))
8567 1 : );
8568 1 : assert_eq!(
8569 1 : get_vectored_impl_wrapper(&tline, base_inherited_key_child, lsn, &ctx).await?,
8570 1 : None
8571 1 : );
8572 1 : assert_eq!(
8573 1 : get_vectored_impl_wrapper(&tline, base_inherited_key_nonexist, lsn, &ctx).await?,
8574 1 : None
8575 1 : );
8576 1 : assert_eq!(
8577 1 : get_vectored_impl_wrapper(&tline, base_inherited_key_overwrite, lsn, &ctx).await?,
8578 1 : Some(test_img("metadata key overwrite 1a"))
8579 1 : );
8580 1 :
8581 1 : // test vectored get on child timeline
8582 1 : assert_eq!(
8583 1 : get_vectored_impl_wrapper(&child, base_key, lsn, &ctx).await?,
8584 1 : None
8585 1 : );
8586 1 : assert_eq!(
8587 1 : get_vectored_impl_wrapper(&child, base_key_child, lsn, &ctx).await?,
8588 1 : Some(test_img("metadata key 2"))
8589 1 : );
8590 1 : assert_eq!(
8591 1 : get_vectored_impl_wrapper(&child, base_key_nonexist, lsn, &ctx).await?,
8592 1 : None
8593 1 : );
8594 1 : assert_eq!(
8595 1 : get_vectored_impl_wrapper(&child, base_inherited_key, lsn, &ctx).await?,
8596 1 : Some(test_img("metadata inherited key 1"))
8597 1 : );
8598 1 : assert_eq!(
8599 1 : get_vectored_impl_wrapper(&child, base_inherited_key_child, lsn, &ctx).await?,
8600 1 : Some(test_img("metadata inherited key 2"))
8601 1 : );
8602 1 : assert_eq!(
8603 1 : get_vectored_impl_wrapper(&child, base_inherited_key_nonexist, lsn, &ctx).await?,
8604 1 : None
8605 1 : );
8606 1 : assert_eq!(
8607 1 : get_vectored_impl_wrapper(&child, base_key_overwrite, lsn, &ctx).await?,
8608 1 : Some(test_img("metadata key overwrite 2b"))
8609 1 : );
8610 1 : assert_eq!(
8611 1 : get_vectored_impl_wrapper(&child, base_inherited_key_overwrite, lsn, &ctx).await?,
8612 1 : Some(test_img("metadata key overwrite 2a"))
8613 1 : );
8614 1 :
8615 1 : // test vectored scan on parent timeline
8616 1 : let mut reconstruct_state = ValuesReconstructState::new(io_concurrency.clone());
8617 1 : let query =
8618 1 : VersionedKeySpaceQuery::uniform(KeySpace::single(Key::metadata_key_range()), lsn);
8619 1 : let res = tline
8620 1 : .get_vectored_impl(query, &mut reconstruct_state, &ctx)
8621 1 : .await?;
8622 1 :
8623 1 : assert_eq!(
8624 1 : res.into_iter()
8625 4 : .map(|(k, v)| (k, v.unwrap()))
8626 1 : .collect::<Vec<_>>(),
8627 1 : vec![
8628 1 : (base_inherited_key, test_img("metadata inherited key 1")),
8629 1 : (
8630 1 : base_inherited_key_overwrite,
8631 1 : test_img("metadata key overwrite 1a")
8632 1 : ),
8633 1 : (base_key, test_img("metadata key 1")),
8634 1 : (base_key_overwrite, test_img("metadata key overwrite 1b")),
8635 1 : ]
8636 1 : );
8637 1 :
8638 1 : // test vectored scan on child timeline
8639 1 : let mut reconstruct_state = ValuesReconstructState::new(io_concurrency.clone());
8640 1 : let query =
8641 1 : VersionedKeySpaceQuery::uniform(KeySpace::single(Key::metadata_key_range()), lsn);
8642 1 : let res = child
8643 1 : .get_vectored_impl(query, &mut reconstruct_state, &ctx)
8644 1 : .await?;
8645 1 :
8646 1 : assert_eq!(
8647 1 : res.into_iter()
8648 5 : .map(|(k, v)| (k, v.unwrap()))
8649 1 : .collect::<Vec<_>>(),
8650 1 : vec![
8651 1 : (base_inherited_key, test_img("metadata inherited key 1")),
8652 1 : (
8653 1 : base_inherited_key_child,
8654 1 : test_img("metadata inherited key 2")
8655 1 : ),
8656 1 : (
8657 1 : base_inherited_key_overwrite,
8658 1 : test_img("metadata key overwrite 2a")
8659 1 : ),
8660 1 : (base_key_child, test_img("metadata key 2")),
8661 1 : (base_key_overwrite, test_img("metadata key overwrite 2b")),
8662 1 : ]
8663 1 : );
8664 1 :
8665 1 : Ok(())
8666 1 : }
8667 :
8668 28 : async fn get_vectored_impl_wrapper(
8669 28 : tline: &Arc<Timeline>,
8670 28 : key: Key,
8671 28 : lsn: Lsn,
8672 28 : ctx: &RequestContext,
8673 28 : ) -> Result<Option<Bytes>, GetVectoredError> {
8674 28 : let io_concurrency = IoConcurrency::spawn_from_conf(
8675 28 : tline.conf.get_vectored_concurrent_io,
8676 28 : tline.gate.enter().unwrap(),
8677 28 : );
8678 28 : let mut reconstruct_state = ValuesReconstructState::new(io_concurrency);
8679 28 : let query = VersionedKeySpaceQuery::uniform(KeySpace::single(key..key.next()), lsn);
8680 28 : let mut res = tline
8681 28 : .get_vectored_impl(query, &mut reconstruct_state, ctx)
8682 28 : .await?;
8683 25 : Ok(res.pop_last().map(|(k, v)| {
8684 16 : assert_eq!(k, key);
8685 16 : v.unwrap()
8686 25 : }))
8687 28 : }
8688 :
8689 : #[tokio::test]
8690 1 : async fn test_metadata_tombstone_reads() -> anyhow::Result<()> {
8691 1 : let harness = TenantHarness::create("test_metadata_tombstone_reads").await?;
8692 1 : let (tenant, ctx) = harness.load().await;
8693 1 : let key0 = Key::from_hex("620000000033333333444444445500000000").unwrap();
8694 1 : let key1 = Key::from_hex("620000000033333333444444445500000001").unwrap();
8695 1 : let key2 = Key::from_hex("620000000033333333444444445500000002").unwrap();
8696 1 : let key3 = Key::from_hex("620000000033333333444444445500000003").unwrap();
8697 1 :
8698 1 : // We emulate the situation that the compaction algorithm creates an image layer that removes the tombstones
8699 1 : // Lsn 0x30 key0, key3, no key1+key2
8700 1 : // Lsn 0x20 key1+key2 tomestones
8701 1 : // Lsn 0x10 key1 in image, key2 in delta
8702 1 : let tline = tenant
8703 1 : .create_test_timeline_with_layers(
8704 1 : TIMELINE_ID,
8705 1 : Lsn(0x10),
8706 1 : DEFAULT_PG_VERSION,
8707 1 : &ctx,
8708 1 : Vec::new(), // in-memory layers
8709 1 : // delta layers
8710 1 : vec![
8711 1 : DeltaLayerTestDesc::new_with_inferred_key_range(
8712 1 : Lsn(0x10)..Lsn(0x20),
8713 1 : vec![(key2, Lsn(0x10), Value::Image(test_img("metadata key 2")))],
8714 1 : ),
8715 1 : DeltaLayerTestDesc::new_with_inferred_key_range(
8716 1 : Lsn(0x20)..Lsn(0x30),
8717 1 : vec![(key1, Lsn(0x20), Value::Image(Bytes::new()))],
8718 1 : ),
8719 1 : DeltaLayerTestDesc::new_with_inferred_key_range(
8720 1 : Lsn(0x20)..Lsn(0x30),
8721 1 : vec![(key2, Lsn(0x20), Value::Image(Bytes::new()))],
8722 1 : ),
8723 1 : ],
8724 1 : // image layers
8725 1 : vec![
8726 1 : (Lsn(0x10), vec![(key1, test_img("metadata key 1"))]),
8727 1 : (
8728 1 : Lsn(0x30),
8729 1 : vec![
8730 1 : (key0, test_img("metadata key 0")),
8731 1 : (key3, test_img("metadata key 3")),
8732 1 : ],
8733 1 : ),
8734 1 : ],
8735 1 : Lsn(0x30),
8736 1 : )
8737 1 : .await?;
8738 1 :
8739 1 : let lsn = Lsn(0x30);
8740 1 : let old_lsn = Lsn(0x20);
8741 1 :
8742 1 : assert_eq!(
8743 1 : get_vectored_impl_wrapper(&tline, key0, lsn, &ctx).await?,
8744 1 : Some(test_img("metadata key 0"))
8745 1 : );
8746 1 : assert_eq!(
8747 1 : get_vectored_impl_wrapper(&tline, key1, lsn, &ctx).await?,
8748 1 : None,
8749 1 : );
8750 1 : assert_eq!(
8751 1 : get_vectored_impl_wrapper(&tline, key2, lsn, &ctx).await?,
8752 1 : None,
8753 1 : );
8754 1 : assert_eq!(
8755 1 : get_vectored_impl_wrapper(&tline, key1, old_lsn, &ctx).await?,
8756 1 : Some(Bytes::new()),
8757 1 : );
8758 1 : assert_eq!(
8759 1 : get_vectored_impl_wrapper(&tline, key2, old_lsn, &ctx).await?,
8760 1 : Some(Bytes::new()),
8761 1 : );
8762 1 : assert_eq!(
8763 1 : get_vectored_impl_wrapper(&tline, key3, lsn, &ctx).await?,
8764 1 : Some(test_img("metadata key 3"))
8765 1 : );
8766 1 :
8767 1 : Ok(())
8768 1 : }
8769 :
8770 : #[tokio::test]
8771 1 : async fn test_metadata_tombstone_image_creation() {
8772 1 : let harness = TenantHarness::create("test_metadata_tombstone_image_creation")
8773 1 : .await
8774 1 : .unwrap();
8775 1 : let (tenant, ctx) = harness.load().await;
8776 1 : let io_concurrency = IoConcurrency::spawn_for_test();
8777 1 :
8778 1 : let key0 = Key::from_hex("620000000033333333444444445500000000").unwrap();
8779 1 : let key1 = Key::from_hex("620000000033333333444444445500000001").unwrap();
8780 1 : let key2 = Key::from_hex("620000000033333333444444445500000002").unwrap();
8781 1 : let key3 = Key::from_hex("620000000033333333444444445500000003").unwrap();
8782 1 :
8783 1 : let tline = tenant
8784 1 : .create_test_timeline_with_layers(
8785 1 : TIMELINE_ID,
8786 1 : Lsn(0x10),
8787 1 : DEFAULT_PG_VERSION,
8788 1 : &ctx,
8789 1 : Vec::new(), // in-memory layers
8790 1 : // delta layers
8791 1 : vec![
8792 1 : DeltaLayerTestDesc::new_with_inferred_key_range(
8793 1 : Lsn(0x10)..Lsn(0x20),
8794 1 : vec![(key2, Lsn(0x10), Value::Image(test_img("metadata key 2")))],
8795 1 : ),
8796 1 : DeltaLayerTestDesc::new_with_inferred_key_range(
8797 1 : Lsn(0x20)..Lsn(0x30),
8798 1 : vec![(key1, Lsn(0x20), Value::Image(Bytes::new()))],
8799 1 : ),
8800 1 : DeltaLayerTestDesc::new_with_inferred_key_range(
8801 1 : Lsn(0x20)..Lsn(0x30),
8802 1 : vec![(key2, Lsn(0x20), Value::Image(Bytes::new()))],
8803 1 : ),
8804 1 : DeltaLayerTestDesc::new_with_inferred_key_range(
8805 1 : Lsn(0x30)..Lsn(0x40),
8806 1 : vec![
8807 1 : (key0, Lsn(0x30), Value::Image(test_img("metadata key 0"))),
8808 1 : (key3, Lsn(0x30), Value::Image(test_img("metadata key 3"))),
8809 1 : ],
8810 1 : ),
8811 1 : ],
8812 1 : // image layers
8813 1 : vec![(Lsn(0x10), vec![(key1, test_img("metadata key 1"))])],
8814 1 : Lsn(0x40),
8815 1 : )
8816 1 : .await
8817 1 : .unwrap();
8818 1 :
8819 1 : let cancel = CancellationToken::new();
8820 1 :
8821 1 : tline
8822 1 : .compact(
8823 1 : &cancel,
8824 1 : {
8825 1 : let mut flags = EnumSet::new();
8826 1 : flags.insert(CompactFlags::ForceImageLayerCreation);
8827 1 : flags.insert(CompactFlags::ForceRepartition);
8828 1 : flags
8829 1 : },
8830 1 : &ctx,
8831 1 : )
8832 1 : .await
8833 1 : .unwrap();
8834 1 :
8835 1 : // Image layers are created at last_record_lsn
8836 1 : let images = tline
8837 1 : .inspect_image_layers(Lsn(0x40), &ctx, io_concurrency.clone())
8838 1 : .await
8839 1 : .unwrap()
8840 1 : .into_iter()
8841 9 : .filter(|(k, _)| k.is_metadata_key())
8842 1 : .collect::<Vec<_>>();
8843 1 : assert_eq!(images.len(), 2); // the image layer should only contain two existing keys, tombstones should be removed.
8844 1 : }
8845 :
8846 : #[tokio::test]
8847 1 : async fn test_metadata_tombstone_empty_image_creation() {
8848 1 : let harness = TenantHarness::create("test_metadata_tombstone_empty_image_creation")
8849 1 : .await
8850 1 : .unwrap();
8851 1 : let (tenant, ctx) = harness.load().await;
8852 1 : let io_concurrency = IoConcurrency::spawn_for_test();
8853 1 :
8854 1 : let key1 = Key::from_hex("620000000033333333444444445500000001").unwrap();
8855 1 : let key2 = Key::from_hex("620000000033333333444444445500000002").unwrap();
8856 1 :
8857 1 : let tline = tenant
8858 1 : .create_test_timeline_with_layers(
8859 1 : TIMELINE_ID,
8860 1 : Lsn(0x10),
8861 1 : DEFAULT_PG_VERSION,
8862 1 : &ctx,
8863 1 : Vec::new(), // in-memory layers
8864 1 : // delta layers
8865 1 : vec![
8866 1 : DeltaLayerTestDesc::new_with_inferred_key_range(
8867 1 : Lsn(0x10)..Lsn(0x20),
8868 1 : vec![(key2, Lsn(0x10), Value::Image(test_img("metadata key 2")))],
8869 1 : ),
8870 1 : DeltaLayerTestDesc::new_with_inferred_key_range(
8871 1 : Lsn(0x20)..Lsn(0x30),
8872 1 : vec![(key1, Lsn(0x20), Value::Image(Bytes::new()))],
8873 1 : ),
8874 1 : DeltaLayerTestDesc::new_with_inferred_key_range(
8875 1 : Lsn(0x20)..Lsn(0x30),
8876 1 : vec![(key2, Lsn(0x20), Value::Image(Bytes::new()))],
8877 1 : ),
8878 1 : ],
8879 1 : // image layers
8880 1 : vec![(Lsn(0x10), vec![(key1, test_img("metadata key 1"))])],
8881 1 : Lsn(0x30),
8882 1 : )
8883 1 : .await
8884 1 : .unwrap();
8885 1 :
8886 1 : let cancel = CancellationToken::new();
8887 1 :
8888 1 : tline
8889 1 : .compact(
8890 1 : &cancel,
8891 1 : {
8892 1 : let mut flags = EnumSet::new();
8893 1 : flags.insert(CompactFlags::ForceImageLayerCreation);
8894 1 : flags.insert(CompactFlags::ForceRepartition);
8895 1 : flags
8896 1 : },
8897 1 : &ctx,
8898 1 : )
8899 1 : .await
8900 1 : .unwrap();
8901 1 :
8902 1 : // Image layers are created at last_record_lsn
8903 1 : let images = tline
8904 1 : .inspect_image_layers(Lsn(0x30), &ctx, io_concurrency.clone())
8905 1 : .await
8906 1 : .unwrap()
8907 1 : .into_iter()
8908 7 : .filter(|(k, _)| k.is_metadata_key())
8909 1 : .collect::<Vec<_>>();
8910 1 : assert_eq!(images.len(), 0); // the image layer should not contain tombstones, or it is not created
8911 1 : }
8912 :
8913 : #[tokio::test]
8914 1 : async fn test_simple_bottom_most_compaction_images() -> anyhow::Result<()> {
8915 1 : let harness = TenantHarness::create("test_simple_bottom_most_compaction_images").await?;
8916 1 : let (tenant, ctx) = harness.load().await;
8917 1 : let io_concurrency = IoConcurrency::spawn_for_test();
8918 1 :
8919 51 : fn get_key(id: u32) -> Key {
8920 51 : // using aux key here b/c they are guaranteed to be inside `collect_keyspace`.
8921 51 : let mut key = Key::from_hex("620000000033333333444444445500000000").unwrap();
8922 51 : key.field6 = id;
8923 51 : key
8924 51 : }
8925 1 :
8926 1 : // We create
8927 1 : // - one bottom-most image layer,
8928 1 : // - a delta layer D1 crossing the GC horizon with data below and above the horizon,
8929 1 : // - a delta layer D2 crossing the GC horizon with data only below the horizon,
8930 1 : // - a delta layer D3 above the horizon.
8931 1 : //
8932 1 : // | D3 |
8933 1 : // | D1 |
8934 1 : // -| |-- gc horizon -----------------
8935 1 : // | | | D2 |
8936 1 : // --------- img layer ------------------
8937 1 : //
8938 1 : // What we should expact from this compaction is:
8939 1 : // | D3 |
8940 1 : // | Part of D1 |
8941 1 : // --------- img layer with D1+D2 at GC horizon------------------
8942 1 :
8943 1 : // img layer at 0x10
8944 1 : let img_layer = (0..10)
8945 10 : .map(|id| (get_key(id), Bytes::from(format!("value {id}@0x10"))))
8946 1 : .collect_vec();
8947 1 :
8948 1 : let delta1 = vec![
8949 1 : (
8950 1 : get_key(1),
8951 1 : Lsn(0x20),
8952 1 : Value::Image(Bytes::from("value 1@0x20")),
8953 1 : ),
8954 1 : (
8955 1 : get_key(2),
8956 1 : Lsn(0x30),
8957 1 : Value::Image(Bytes::from("value 2@0x30")),
8958 1 : ),
8959 1 : (
8960 1 : get_key(3),
8961 1 : Lsn(0x40),
8962 1 : Value::Image(Bytes::from("value 3@0x40")),
8963 1 : ),
8964 1 : ];
8965 1 : let delta2 = vec![
8966 1 : (
8967 1 : get_key(5),
8968 1 : Lsn(0x20),
8969 1 : Value::Image(Bytes::from("value 5@0x20")),
8970 1 : ),
8971 1 : (
8972 1 : get_key(6),
8973 1 : Lsn(0x20),
8974 1 : Value::Image(Bytes::from("value 6@0x20")),
8975 1 : ),
8976 1 : ];
8977 1 : let delta3 = vec![
8978 1 : (
8979 1 : get_key(8),
8980 1 : Lsn(0x48),
8981 1 : Value::Image(Bytes::from("value 8@0x48")),
8982 1 : ),
8983 1 : (
8984 1 : get_key(9),
8985 1 : Lsn(0x48),
8986 1 : Value::Image(Bytes::from("value 9@0x48")),
8987 1 : ),
8988 1 : ];
8989 1 :
8990 1 : let tline = tenant
8991 1 : .create_test_timeline_with_layers(
8992 1 : TIMELINE_ID,
8993 1 : Lsn(0x10),
8994 1 : DEFAULT_PG_VERSION,
8995 1 : &ctx,
8996 1 : Vec::new(), // in-memory layers
8997 1 : vec![
8998 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x20)..Lsn(0x48), delta1),
8999 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x20)..Lsn(0x48), delta2),
9000 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x48)..Lsn(0x50), delta3),
9001 1 : ], // delta layers
9002 1 : vec![(Lsn(0x10), img_layer)], // image layers
9003 1 : Lsn(0x50),
9004 1 : )
9005 1 : .await?;
9006 1 : {
9007 1 : tline
9008 1 : .applied_gc_cutoff_lsn
9009 1 : .lock_for_write()
9010 1 : .store_and_unlock(Lsn(0x30))
9011 1 : .wait()
9012 1 : .await;
9013 1 : // Update GC info
9014 1 : let mut guard = tline.gc_info.write().unwrap();
9015 1 : guard.cutoffs.time = Some(Lsn(0x30));
9016 1 : guard.cutoffs.space = Lsn(0x30);
9017 1 : }
9018 1 :
9019 1 : let expected_result = [
9020 1 : Bytes::from_static(b"value 0@0x10"),
9021 1 : Bytes::from_static(b"value 1@0x20"),
9022 1 : Bytes::from_static(b"value 2@0x30"),
9023 1 : Bytes::from_static(b"value 3@0x40"),
9024 1 : Bytes::from_static(b"value 4@0x10"),
9025 1 : Bytes::from_static(b"value 5@0x20"),
9026 1 : Bytes::from_static(b"value 6@0x20"),
9027 1 : Bytes::from_static(b"value 7@0x10"),
9028 1 : Bytes::from_static(b"value 8@0x48"),
9029 1 : Bytes::from_static(b"value 9@0x48"),
9030 1 : ];
9031 1 :
9032 10 : for (idx, expected) in expected_result.iter().enumerate() {
9033 10 : assert_eq!(
9034 10 : tline
9035 10 : .get(get_key(idx as u32), Lsn(0x50), &ctx)
9036 10 : .await
9037 10 : .unwrap(),
9038 1 : expected
9039 1 : );
9040 1 : }
9041 1 :
9042 1 : let cancel = CancellationToken::new();
9043 1 : tline
9044 1 : .compact_with_gc(&cancel, CompactOptions::default(), &ctx)
9045 1 : .await
9046 1 : .unwrap();
9047 1 :
9048 10 : for (idx, expected) in expected_result.iter().enumerate() {
9049 10 : assert_eq!(
9050 10 : tline
9051 10 : .get(get_key(idx as u32), Lsn(0x50), &ctx)
9052 10 : .await
9053 10 : .unwrap(),
9054 1 : expected
9055 1 : );
9056 1 : }
9057 1 :
9058 1 : // Check if the image layer at the GC horizon contains exactly what we want
9059 1 : let image_at_gc_horizon = tline
9060 1 : .inspect_image_layers(Lsn(0x30), &ctx, io_concurrency.clone())
9061 1 : .await
9062 1 : .unwrap()
9063 1 : .into_iter()
9064 17 : .filter(|(k, _)| k.is_metadata_key())
9065 1 : .collect::<Vec<_>>();
9066 1 :
9067 1 : assert_eq!(image_at_gc_horizon.len(), 10);
9068 1 : let expected_result = [
9069 1 : Bytes::from_static(b"value 0@0x10"),
9070 1 : Bytes::from_static(b"value 1@0x20"),
9071 1 : Bytes::from_static(b"value 2@0x30"),
9072 1 : Bytes::from_static(b"value 3@0x10"),
9073 1 : Bytes::from_static(b"value 4@0x10"),
9074 1 : Bytes::from_static(b"value 5@0x20"),
9075 1 : Bytes::from_static(b"value 6@0x20"),
9076 1 : Bytes::from_static(b"value 7@0x10"),
9077 1 : Bytes::from_static(b"value 8@0x10"),
9078 1 : Bytes::from_static(b"value 9@0x10"),
9079 1 : ];
9080 11 : for idx in 0..10 {
9081 10 : assert_eq!(
9082 10 : image_at_gc_horizon[idx],
9083 10 : (get_key(idx as u32), expected_result[idx].clone())
9084 10 : );
9085 1 : }
9086 1 :
9087 1 : // Check if old layers are removed / new layers have the expected LSN
9088 1 : let all_layers = inspect_and_sort(&tline, None).await;
9089 1 : assert_eq!(
9090 1 : all_layers,
9091 1 : vec![
9092 1 : // Image layer at GC horizon
9093 1 : PersistentLayerKey {
9094 1 : key_range: Key::MIN..Key::MAX,
9095 1 : lsn_range: Lsn(0x30)..Lsn(0x31),
9096 1 : is_delta: false
9097 1 : },
9098 1 : // The delta layer below the horizon
9099 1 : PersistentLayerKey {
9100 1 : key_range: get_key(3)..get_key(4),
9101 1 : lsn_range: Lsn(0x30)..Lsn(0x48),
9102 1 : is_delta: true
9103 1 : },
9104 1 : // The delta3 layer that should not be picked for the compaction
9105 1 : PersistentLayerKey {
9106 1 : key_range: get_key(8)..get_key(10),
9107 1 : lsn_range: Lsn(0x48)..Lsn(0x50),
9108 1 : is_delta: true
9109 1 : }
9110 1 : ]
9111 1 : );
9112 1 :
9113 1 : // increase GC horizon and compact again
9114 1 : {
9115 1 : tline
9116 1 : .applied_gc_cutoff_lsn
9117 1 : .lock_for_write()
9118 1 : .store_and_unlock(Lsn(0x40))
9119 1 : .wait()
9120 1 : .await;
9121 1 : // Update GC info
9122 1 : let mut guard = tline.gc_info.write().unwrap();
9123 1 : guard.cutoffs.time = Some(Lsn(0x40));
9124 1 : guard.cutoffs.space = Lsn(0x40);
9125 1 : }
9126 1 : tline
9127 1 : .compact_with_gc(&cancel, CompactOptions::default(), &ctx)
9128 1 : .await
9129 1 : .unwrap();
9130 1 :
9131 1 : Ok(())
9132 1 : }
9133 :
9134 : #[cfg(feature = "testing")]
9135 : #[tokio::test]
9136 1 : async fn test_neon_test_record() -> anyhow::Result<()> {
9137 1 : let harness = TenantHarness::create("test_neon_test_record").await?;
9138 1 : let (tenant, ctx) = harness.load().await;
9139 1 :
9140 17 : fn get_key(id: u32) -> Key {
9141 17 : // using aux key here b/c they are guaranteed to be inside `collect_keyspace`.
9142 17 : let mut key = Key::from_hex("620000000033333333444444445500000000").unwrap();
9143 17 : key.field6 = id;
9144 17 : key
9145 17 : }
9146 1 :
9147 1 : let delta1 = vec![
9148 1 : (
9149 1 : get_key(1),
9150 1 : Lsn(0x20),
9151 1 : Value::WalRecord(NeonWalRecord::wal_append(",0x20")),
9152 1 : ),
9153 1 : (
9154 1 : get_key(1),
9155 1 : Lsn(0x30),
9156 1 : Value::WalRecord(NeonWalRecord::wal_append(",0x30")),
9157 1 : ),
9158 1 : (get_key(2), Lsn(0x10), Value::Image("0x10".into())),
9159 1 : (
9160 1 : get_key(2),
9161 1 : Lsn(0x20),
9162 1 : Value::WalRecord(NeonWalRecord::wal_append(",0x20")),
9163 1 : ),
9164 1 : (
9165 1 : get_key(2),
9166 1 : Lsn(0x30),
9167 1 : Value::WalRecord(NeonWalRecord::wal_append(",0x30")),
9168 1 : ),
9169 1 : (get_key(3), Lsn(0x10), Value::Image("0x10".into())),
9170 1 : (
9171 1 : get_key(3),
9172 1 : Lsn(0x20),
9173 1 : Value::WalRecord(NeonWalRecord::wal_clear("c")),
9174 1 : ),
9175 1 : (get_key(4), Lsn(0x10), Value::Image("0x10".into())),
9176 1 : (
9177 1 : get_key(4),
9178 1 : Lsn(0x20),
9179 1 : Value::WalRecord(NeonWalRecord::wal_init("i")),
9180 1 : ),
9181 1 : (
9182 1 : get_key(4),
9183 1 : Lsn(0x30),
9184 1 : Value::WalRecord(NeonWalRecord::wal_append_conditional("j", "i")),
9185 1 : ),
9186 1 : (
9187 1 : get_key(5),
9188 1 : Lsn(0x20),
9189 1 : Value::WalRecord(NeonWalRecord::wal_init("1")),
9190 1 : ),
9191 1 : (
9192 1 : get_key(5),
9193 1 : Lsn(0x30),
9194 1 : Value::WalRecord(NeonWalRecord::wal_append_conditional("j", "2")),
9195 1 : ),
9196 1 : ];
9197 1 : let image1 = vec![(get_key(1), "0x10".into())];
9198 1 :
9199 1 : let tline = tenant
9200 1 : .create_test_timeline_with_layers(
9201 1 : TIMELINE_ID,
9202 1 : Lsn(0x10),
9203 1 : DEFAULT_PG_VERSION,
9204 1 : &ctx,
9205 1 : Vec::new(), // in-memory layers
9206 1 : vec![DeltaLayerTestDesc::new_with_inferred_key_range(
9207 1 : Lsn(0x10)..Lsn(0x40),
9208 1 : delta1,
9209 1 : )], // delta layers
9210 1 : vec![(Lsn(0x10), image1)], // image layers
9211 1 : Lsn(0x50),
9212 1 : )
9213 1 : .await?;
9214 1 :
9215 1 : assert_eq!(
9216 1 : tline.get(get_key(1), Lsn(0x50), &ctx).await?,
9217 1 : Bytes::from_static(b"0x10,0x20,0x30")
9218 1 : );
9219 1 : assert_eq!(
9220 1 : tline.get(get_key(2), Lsn(0x50), &ctx).await?,
9221 1 : Bytes::from_static(b"0x10,0x20,0x30")
9222 1 : );
9223 1 :
9224 1 : // Need to remove the limit of "Neon WAL redo requires base image".
9225 1 :
9226 1 : assert_eq!(
9227 1 : tline.get(get_key(3), Lsn(0x50), &ctx).await?,
9228 1 : Bytes::from_static(b"c")
9229 1 : );
9230 1 : assert_eq!(
9231 1 : tline.get(get_key(4), Lsn(0x50), &ctx).await?,
9232 1 : Bytes::from_static(b"ij")
9233 1 : );
9234 1 :
9235 1 : // Manual testing required: currently, read errors will panic the process in debug mode. So we
9236 1 : // cannot enable this assertion in the unit test.
9237 1 : // assert!(tline.get(get_key(5), Lsn(0x50), &ctx).await.is_err());
9238 1 :
9239 1 : Ok(())
9240 1 : }
9241 :
9242 : #[tokio::test(start_paused = true)]
9243 1 : async fn test_lsn_lease() -> anyhow::Result<()> {
9244 1 : let (tenant, ctx) = TenantHarness::create("test_lsn_lease")
9245 1 : .await
9246 1 : .unwrap()
9247 1 : .load()
9248 1 : .await;
9249 1 : // Advance to the lsn lease deadline so that GC is not blocked by
9250 1 : // initial transition into AttachedSingle.
9251 1 : tokio::time::advance(tenant.get_lsn_lease_length()).await;
9252 1 : tokio::time::resume();
9253 1 : let key = Key::from_hex("010000000033333333444444445500000000").unwrap();
9254 1 :
9255 1 : let end_lsn = Lsn(0x100);
9256 1 : let image_layers = (0x20..=0x90)
9257 1 : .step_by(0x10)
9258 8 : .map(|n| {
9259 8 : (
9260 8 : Lsn(n),
9261 8 : vec![(key, test_img(&format!("data key at {:x}", n)))],
9262 8 : )
9263 8 : })
9264 1 : .collect();
9265 1 :
9266 1 : let timeline = tenant
9267 1 : .create_test_timeline_with_layers(
9268 1 : TIMELINE_ID,
9269 1 : Lsn(0x10),
9270 1 : DEFAULT_PG_VERSION,
9271 1 : &ctx,
9272 1 : Vec::new(), // in-memory layers
9273 1 : Vec::new(),
9274 1 : image_layers,
9275 1 : end_lsn,
9276 1 : )
9277 1 : .await?;
9278 1 :
9279 1 : let leased_lsns = [0x30, 0x50, 0x70];
9280 1 : let mut leases = Vec::new();
9281 3 : leased_lsns.iter().for_each(|n| {
9282 3 : leases.push(
9283 3 : timeline
9284 3 : .init_lsn_lease(Lsn(*n), timeline.get_lsn_lease_length(), &ctx)
9285 3 : .expect("lease request should succeed"),
9286 3 : );
9287 3 : });
9288 1 :
9289 1 : let updated_lease_0 = timeline
9290 1 : .renew_lsn_lease(Lsn(leased_lsns[0]), Duration::from_secs(0), &ctx)
9291 1 : .expect("lease renewal should succeed");
9292 1 : assert_eq!(
9293 1 : updated_lease_0.valid_until, leases[0].valid_until,
9294 1 : " Renewing with shorter lease should not change the lease."
9295 1 : );
9296 1 :
9297 1 : let updated_lease_1 = timeline
9298 1 : .renew_lsn_lease(
9299 1 : Lsn(leased_lsns[1]),
9300 1 : timeline.get_lsn_lease_length() * 2,
9301 1 : &ctx,
9302 1 : )
9303 1 : .expect("lease renewal should succeed");
9304 1 : assert!(
9305 1 : updated_lease_1.valid_until > leases[1].valid_until,
9306 1 : "Renewing with a long lease should renew lease with later expiration time."
9307 1 : );
9308 1 :
9309 1 : // Force set disk consistent lsn so we can get the cutoff at `end_lsn`.
9310 1 : info!(
9311 1 : "applied_gc_cutoff_lsn: {}",
9312 0 : *timeline.get_applied_gc_cutoff_lsn()
9313 1 : );
9314 1 : timeline.force_set_disk_consistent_lsn(end_lsn);
9315 1 :
9316 1 : let res = tenant
9317 1 : .gc_iteration(
9318 1 : Some(TIMELINE_ID),
9319 1 : 0,
9320 1 : Duration::ZERO,
9321 1 : &CancellationToken::new(),
9322 1 : &ctx,
9323 1 : )
9324 1 : .await
9325 1 : .unwrap();
9326 1 :
9327 1 : // Keeping everything <= Lsn(0x80) b/c leases:
9328 1 : // 0/10: initdb layer
9329 1 : // (0/20..=0/70).step_by(0x10): image layers added when creating the timeline.
9330 1 : assert_eq!(res.layers_needed_by_leases, 7);
9331 1 : // Keeping 0/90 b/c it is the latest layer.
9332 1 : assert_eq!(res.layers_not_updated, 1);
9333 1 : // Removed 0/80.
9334 1 : assert_eq!(res.layers_removed, 1);
9335 1 :
9336 1 : // Make lease on a already GC-ed LSN.
9337 1 : // 0/80 does not have a valid lease + is below latest_gc_cutoff
9338 1 : assert!(Lsn(0x80) < *timeline.get_applied_gc_cutoff_lsn());
9339 1 : timeline
9340 1 : .init_lsn_lease(Lsn(0x80), timeline.get_lsn_lease_length(), &ctx)
9341 1 : .expect_err("lease request on GC-ed LSN should fail");
9342 1 :
9343 1 : // Should still be able to renew a currently valid lease
9344 1 : // Assumption: original lease to is still valid for 0/50.
9345 1 : // (use `Timeline::init_lsn_lease` for testing so it always does validation)
9346 1 : timeline
9347 1 : .init_lsn_lease(Lsn(leased_lsns[1]), timeline.get_lsn_lease_length(), &ctx)
9348 1 : .expect("lease renewal with validation should succeed");
9349 1 :
9350 1 : Ok(())
9351 1 : }
9352 :
9353 : #[cfg(feature = "testing")]
9354 : #[tokio::test]
9355 1 : async fn test_simple_bottom_most_compaction_deltas_1() -> anyhow::Result<()> {
9356 1 : test_simple_bottom_most_compaction_deltas_helper(
9357 1 : "test_simple_bottom_most_compaction_deltas_1",
9358 1 : false,
9359 1 : )
9360 1 : .await
9361 1 : }
9362 :
9363 : #[cfg(feature = "testing")]
9364 : #[tokio::test]
9365 1 : async fn test_simple_bottom_most_compaction_deltas_2() -> anyhow::Result<()> {
9366 1 : test_simple_bottom_most_compaction_deltas_helper(
9367 1 : "test_simple_bottom_most_compaction_deltas_2",
9368 1 : true,
9369 1 : )
9370 1 : .await
9371 1 : }
9372 :
9373 : #[cfg(feature = "testing")]
9374 2 : async fn test_simple_bottom_most_compaction_deltas_helper(
9375 2 : test_name: &'static str,
9376 2 : use_delta_bottom_layer: bool,
9377 2 : ) -> anyhow::Result<()> {
9378 2 : let harness = TenantHarness::create(test_name).await?;
9379 2 : let (tenant, ctx) = harness.load().await;
9380 :
9381 138 : fn get_key(id: u32) -> Key {
9382 138 : // using aux key here b/c they are guaranteed to be inside `collect_keyspace`.
9383 138 : let mut key = Key::from_hex("620000000033333333444444445500000000").unwrap();
9384 138 : key.field6 = id;
9385 138 : key
9386 138 : }
9387 :
9388 : // We create
9389 : // - one bottom-most image layer,
9390 : // - a delta layer D1 crossing the GC horizon with data below and above the horizon,
9391 : // - a delta layer D2 crossing the GC horizon with data only below the horizon,
9392 : // - a delta layer D3 above the horizon.
9393 : //
9394 : // | D3 |
9395 : // | D1 |
9396 : // -| |-- gc horizon -----------------
9397 : // | | | D2 |
9398 : // --------- img layer ------------------
9399 : //
9400 : // What we should expact from this compaction is:
9401 : // | D3 |
9402 : // | Part of D1 |
9403 : // --------- img layer with D1+D2 at GC horizon------------------
9404 :
9405 : // img layer at 0x10
9406 2 : let img_layer = (0..10)
9407 20 : .map(|id| (get_key(id), Bytes::from(format!("value {id}@0x10"))))
9408 2 : .collect_vec();
9409 2 : // or, delta layer at 0x10 if `use_delta_bottom_layer` is true
9410 2 : let delta4 = (0..10)
9411 20 : .map(|id| {
9412 20 : (
9413 20 : get_key(id),
9414 20 : Lsn(0x08),
9415 20 : Value::WalRecord(NeonWalRecord::wal_init(format!("value {id}@0x10"))),
9416 20 : )
9417 20 : })
9418 2 : .collect_vec();
9419 2 :
9420 2 : let delta1 = vec![
9421 2 : (
9422 2 : get_key(1),
9423 2 : Lsn(0x20),
9424 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x20")),
9425 2 : ),
9426 2 : (
9427 2 : get_key(2),
9428 2 : Lsn(0x30),
9429 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x30")),
9430 2 : ),
9431 2 : (
9432 2 : get_key(3),
9433 2 : Lsn(0x28),
9434 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x28")),
9435 2 : ),
9436 2 : (
9437 2 : get_key(3),
9438 2 : Lsn(0x30),
9439 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x30")),
9440 2 : ),
9441 2 : (
9442 2 : get_key(3),
9443 2 : Lsn(0x40),
9444 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x40")),
9445 2 : ),
9446 2 : ];
9447 2 : let delta2 = vec![
9448 2 : (
9449 2 : get_key(5),
9450 2 : Lsn(0x20),
9451 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x20")),
9452 2 : ),
9453 2 : (
9454 2 : get_key(6),
9455 2 : Lsn(0x20),
9456 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x20")),
9457 2 : ),
9458 2 : ];
9459 2 : let delta3 = vec![
9460 2 : (
9461 2 : get_key(8),
9462 2 : Lsn(0x48),
9463 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x48")),
9464 2 : ),
9465 2 : (
9466 2 : get_key(9),
9467 2 : Lsn(0x48),
9468 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x48")),
9469 2 : ),
9470 2 : ];
9471 :
9472 2 : let tline = if use_delta_bottom_layer {
9473 1 : tenant
9474 1 : .create_test_timeline_with_layers(
9475 1 : TIMELINE_ID,
9476 1 : Lsn(0x08),
9477 1 : DEFAULT_PG_VERSION,
9478 1 : &ctx,
9479 1 : Vec::new(), // in-memory layers
9480 1 : vec![
9481 1 : DeltaLayerTestDesc::new_with_inferred_key_range(
9482 1 : Lsn(0x08)..Lsn(0x10),
9483 1 : delta4,
9484 1 : ),
9485 1 : DeltaLayerTestDesc::new_with_inferred_key_range(
9486 1 : Lsn(0x20)..Lsn(0x48),
9487 1 : delta1,
9488 1 : ),
9489 1 : DeltaLayerTestDesc::new_with_inferred_key_range(
9490 1 : Lsn(0x20)..Lsn(0x48),
9491 1 : delta2,
9492 1 : ),
9493 1 : DeltaLayerTestDesc::new_with_inferred_key_range(
9494 1 : Lsn(0x48)..Lsn(0x50),
9495 1 : delta3,
9496 1 : ),
9497 1 : ], // delta layers
9498 1 : vec![], // image layers
9499 1 : Lsn(0x50),
9500 1 : )
9501 1 : .await?
9502 : } else {
9503 1 : tenant
9504 1 : .create_test_timeline_with_layers(
9505 1 : TIMELINE_ID,
9506 1 : Lsn(0x10),
9507 1 : DEFAULT_PG_VERSION,
9508 1 : &ctx,
9509 1 : Vec::new(), // in-memory layers
9510 1 : vec![
9511 1 : DeltaLayerTestDesc::new_with_inferred_key_range(
9512 1 : Lsn(0x10)..Lsn(0x48),
9513 1 : delta1,
9514 1 : ),
9515 1 : DeltaLayerTestDesc::new_with_inferred_key_range(
9516 1 : Lsn(0x10)..Lsn(0x48),
9517 1 : delta2,
9518 1 : ),
9519 1 : DeltaLayerTestDesc::new_with_inferred_key_range(
9520 1 : Lsn(0x48)..Lsn(0x50),
9521 1 : delta3,
9522 1 : ),
9523 1 : ], // delta layers
9524 1 : vec![(Lsn(0x10), img_layer)], // image layers
9525 1 : Lsn(0x50),
9526 1 : )
9527 1 : .await?
9528 : };
9529 : {
9530 2 : tline
9531 2 : .applied_gc_cutoff_lsn
9532 2 : .lock_for_write()
9533 2 : .store_and_unlock(Lsn(0x30))
9534 2 : .wait()
9535 2 : .await;
9536 : // Update GC info
9537 2 : let mut guard = tline.gc_info.write().unwrap();
9538 2 : *guard = GcInfo {
9539 2 : retain_lsns: vec![],
9540 2 : cutoffs: GcCutoffs {
9541 2 : time: Some(Lsn(0x30)),
9542 2 : space: Lsn(0x30),
9543 2 : },
9544 2 : leases: Default::default(),
9545 2 : within_ancestor_pitr: false,
9546 2 : };
9547 2 : }
9548 2 :
9549 2 : let expected_result = [
9550 2 : Bytes::from_static(b"value 0@0x10"),
9551 2 : Bytes::from_static(b"value 1@0x10@0x20"),
9552 2 : Bytes::from_static(b"value 2@0x10@0x30"),
9553 2 : Bytes::from_static(b"value 3@0x10@0x28@0x30@0x40"),
9554 2 : Bytes::from_static(b"value 4@0x10"),
9555 2 : Bytes::from_static(b"value 5@0x10@0x20"),
9556 2 : Bytes::from_static(b"value 6@0x10@0x20"),
9557 2 : Bytes::from_static(b"value 7@0x10"),
9558 2 : Bytes::from_static(b"value 8@0x10@0x48"),
9559 2 : Bytes::from_static(b"value 9@0x10@0x48"),
9560 2 : ];
9561 2 :
9562 2 : let expected_result_at_gc_horizon = [
9563 2 : Bytes::from_static(b"value 0@0x10"),
9564 2 : Bytes::from_static(b"value 1@0x10@0x20"),
9565 2 : Bytes::from_static(b"value 2@0x10@0x30"),
9566 2 : Bytes::from_static(b"value 3@0x10@0x28@0x30"),
9567 2 : Bytes::from_static(b"value 4@0x10"),
9568 2 : Bytes::from_static(b"value 5@0x10@0x20"),
9569 2 : Bytes::from_static(b"value 6@0x10@0x20"),
9570 2 : Bytes::from_static(b"value 7@0x10"),
9571 2 : Bytes::from_static(b"value 8@0x10"),
9572 2 : Bytes::from_static(b"value 9@0x10"),
9573 2 : ];
9574 :
9575 22 : for idx in 0..10 {
9576 20 : assert_eq!(
9577 20 : tline
9578 20 : .get(get_key(idx as u32), Lsn(0x50), &ctx)
9579 20 : .await
9580 20 : .unwrap(),
9581 20 : &expected_result[idx]
9582 : );
9583 20 : assert_eq!(
9584 20 : tline
9585 20 : .get(get_key(idx as u32), Lsn(0x30), &ctx)
9586 20 : .await
9587 20 : .unwrap(),
9588 20 : &expected_result_at_gc_horizon[idx]
9589 : );
9590 : }
9591 :
9592 2 : let cancel = CancellationToken::new();
9593 2 : tline
9594 2 : .compact_with_gc(&cancel, CompactOptions::default(), &ctx)
9595 2 : .await
9596 2 : .unwrap();
9597 :
9598 22 : for idx in 0..10 {
9599 20 : assert_eq!(
9600 20 : tline
9601 20 : .get(get_key(idx as u32), Lsn(0x50), &ctx)
9602 20 : .await
9603 20 : .unwrap(),
9604 20 : &expected_result[idx]
9605 : );
9606 20 : assert_eq!(
9607 20 : tline
9608 20 : .get(get_key(idx as u32), Lsn(0x30), &ctx)
9609 20 : .await
9610 20 : .unwrap(),
9611 20 : &expected_result_at_gc_horizon[idx]
9612 : );
9613 : }
9614 :
9615 : // increase GC horizon and compact again
9616 : {
9617 2 : tline
9618 2 : .applied_gc_cutoff_lsn
9619 2 : .lock_for_write()
9620 2 : .store_and_unlock(Lsn(0x40))
9621 2 : .wait()
9622 2 : .await;
9623 : // Update GC info
9624 2 : let mut guard = tline.gc_info.write().unwrap();
9625 2 : guard.cutoffs.time = Some(Lsn(0x40));
9626 2 : guard.cutoffs.space = Lsn(0x40);
9627 2 : }
9628 2 : tline
9629 2 : .compact_with_gc(&cancel, CompactOptions::default(), &ctx)
9630 2 : .await
9631 2 : .unwrap();
9632 2 :
9633 2 : Ok(())
9634 2 : }
9635 :
9636 : #[cfg(feature = "testing")]
9637 : #[tokio::test]
9638 1 : async fn test_generate_key_retention() -> anyhow::Result<()> {
9639 1 : let harness = TenantHarness::create("test_generate_key_retention").await?;
9640 1 : let (tenant, ctx) = harness.load().await;
9641 1 : let tline = tenant
9642 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
9643 1 : .await?;
9644 1 : tline.force_advance_lsn(Lsn(0x70));
9645 1 : let key = Key::from_hex("010000000033333333444444445500000000").unwrap();
9646 1 : let history = vec![
9647 1 : (
9648 1 : key,
9649 1 : Lsn(0x10),
9650 1 : Value::WalRecord(NeonWalRecord::wal_init("0x10")),
9651 1 : ),
9652 1 : (
9653 1 : key,
9654 1 : Lsn(0x20),
9655 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x20")),
9656 1 : ),
9657 1 : (
9658 1 : key,
9659 1 : Lsn(0x30),
9660 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x30")),
9661 1 : ),
9662 1 : (
9663 1 : key,
9664 1 : Lsn(0x40),
9665 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x40")),
9666 1 : ),
9667 1 : (
9668 1 : key,
9669 1 : Lsn(0x50),
9670 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x50")),
9671 1 : ),
9672 1 : (
9673 1 : key,
9674 1 : Lsn(0x60),
9675 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x60")),
9676 1 : ),
9677 1 : (
9678 1 : key,
9679 1 : Lsn(0x70),
9680 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x70")),
9681 1 : ),
9682 1 : (
9683 1 : key,
9684 1 : Lsn(0x80),
9685 1 : Value::Image(Bytes::copy_from_slice(
9686 1 : b"0x10;0x20;0x30;0x40;0x50;0x60;0x70;0x80",
9687 1 : )),
9688 1 : ),
9689 1 : (
9690 1 : key,
9691 1 : Lsn(0x90),
9692 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x90")),
9693 1 : ),
9694 1 : ];
9695 1 : let res = tline
9696 1 : .generate_key_retention(
9697 1 : key,
9698 1 : &history,
9699 1 : Lsn(0x60),
9700 1 : &[Lsn(0x20), Lsn(0x40), Lsn(0x50)],
9701 1 : 3,
9702 1 : None,
9703 1 : true,
9704 1 : )
9705 1 : .await
9706 1 : .unwrap();
9707 1 : let expected_res = KeyHistoryRetention {
9708 1 : below_horizon: vec![
9709 1 : (
9710 1 : Lsn(0x20),
9711 1 : KeyLogAtLsn(vec![(
9712 1 : Lsn(0x20),
9713 1 : Value::Image(Bytes::from_static(b"0x10;0x20")),
9714 1 : )]),
9715 1 : ),
9716 1 : (
9717 1 : Lsn(0x40),
9718 1 : KeyLogAtLsn(vec![
9719 1 : (
9720 1 : Lsn(0x30),
9721 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x30")),
9722 1 : ),
9723 1 : (
9724 1 : Lsn(0x40),
9725 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x40")),
9726 1 : ),
9727 1 : ]),
9728 1 : ),
9729 1 : (
9730 1 : Lsn(0x50),
9731 1 : KeyLogAtLsn(vec![(
9732 1 : Lsn(0x50),
9733 1 : Value::Image(Bytes::copy_from_slice(b"0x10;0x20;0x30;0x40;0x50")),
9734 1 : )]),
9735 1 : ),
9736 1 : (
9737 1 : Lsn(0x60),
9738 1 : KeyLogAtLsn(vec![(
9739 1 : Lsn(0x60),
9740 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x60")),
9741 1 : )]),
9742 1 : ),
9743 1 : ],
9744 1 : above_horizon: KeyLogAtLsn(vec![
9745 1 : (
9746 1 : Lsn(0x70),
9747 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x70")),
9748 1 : ),
9749 1 : (
9750 1 : Lsn(0x80),
9751 1 : Value::Image(Bytes::copy_from_slice(
9752 1 : b"0x10;0x20;0x30;0x40;0x50;0x60;0x70;0x80",
9753 1 : )),
9754 1 : ),
9755 1 : (
9756 1 : Lsn(0x90),
9757 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x90")),
9758 1 : ),
9759 1 : ]),
9760 1 : };
9761 1 : assert_eq!(res, expected_res);
9762 1 :
9763 1 : // We expect GC-compaction to run with the original GC. This would create a situation that
9764 1 : // the original GC algorithm removes some delta layers b/c there are full image coverage,
9765 1 : // therefore causing some keys to have an incomplete history below the lowest retain LSN.
9766 1 : // For example, we have
9767 1 : // ```plain
9768 1 : // init delta @ 0x10, image @ 0x20, delta @ 0x30 (gc_horizon), image @ 0x40.
9769 1 : // ```
9770 1 : // Now the GC horizon moves up, and we have
9771 1 : // ```plain
9772 1 : // init delta @ 0x10, image @ 0x20, delta @ 0x30, image @ 0x40 (gc_horizon)
9773 1 : // ```
9774 1 : // The original GC algorithm kicks in, and removes delta @ 0x10, image @ 0x20.
9775 1 : // We will end up with
9776 1 : // ```plain
9777 1 : // delta @ 0x30, image @ 0x40 (gc_horizon)
9778 1 : // ```
9779 1 : // Now we run the GC-compaction, and this key does not have a full history.
9780 1 : // We should be able to handle this partial history and drop everything before the
9781 1 : // gc_horizon image.
9782 1 :
9783 1 : let history = vec![
9784 1 : (
9785 1 : key,
9786 1 : Lsn(0x20),
9787 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x20")),
9788 1 : ),
9789 1 : (
9790 1 : key,
9791 1 : Lsn(0x30),
9792 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x30")),
9793 1 : ),
9794 1 : (
9795 1 : key,
9796 1 : Lsn(0x40),
9797 1 : Value::Image(Bytes::copy_from_slice(b"0x10;0x20;0x30;0x40")),
9798 1 : ),
9799 1 : (
9800 1 : key,
9801 1 : Lsn(0x50),
9802 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x50")),
9803 1 : ),
9804 1 : (
9805 1 : key,
9806 1 : Lsn(0x60),
9807 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x60")),
9808 1 : ),
9809 1 : (
9810 1 : key,
9811 1 : Lsn(0x70),
9812 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x70")),
9813 1 : ),
9814 1 : (
9815 1 : key,
9816 1 : Lsn(0x80),
9817 1 : Value::Image(Bytes::copy_from_slice(
9818 1 : b"0x10;0x20;0x30;0x40;0x50;0x60;0x70;0x80",
9819 1 : )),
9820 1 : ),
9821 1 : (
9822 1 : key,
9823 1 : Lsn(0x90),
9824 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x90")),
9825 1 : ),
9826 1 : ];
9827 1 : let res = tline
9828 1 : .generate_key_retention(
9829 1 : key,
9830 1 : &history,
9831 1 : Lsn(0x60),
9832 1 : &[Lsn(0x40), Lsn(0x50)],
9833 1 : 3,
9834 1 : None,
9835 1 : true,
9836 1 : )
9837 1 : .await
9838 1 : .unwrap();
9839 1 : let expected_res = KeyHistoryRetention {
9840 1 : below_horizon: vec![
9841 1 : (
9842 1 : Lsn(0x40),
9843 1 : KeyLogAtLsn(vec![(
9844 1 : Lsn(0x40),
9845 1 : Value::Image(Bytes::copy_from_slice(b"0x10;0x20;0x30;0x40")),
9846 1 : )]),
9847 1 : ),
9848 1 : (
9849 1 : Lsn(0x50),
9850 1 : KeyLogAtLsn(vec![(
9851 1 : Lsn(0x50),
9852 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x50")),
9853 1 : )]),
9854 1 : ),
9855 1 : (
9856 1 : Lsn(0x60),
9857 1 : KeyLogAtLsn(vec![(
9858 1 : Lsn(0x60),
9859 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x60")),
9860 1 : )]),
9861 1 : ),
9862 1 : ],
9863 1 : above_horizon: KeyLogAtLsn(vec![
9864 1 : (
9865 1 : Lsn(0x70),
9866 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x70")),
9867 1 : ),
9868 1 : (
9869 1 : Lsn(0x80),
9870 1 : Value::Image(Bytes::copy_from_slice(
9871 1 : b"0x10;0x20;0x30;0x40;0x50;0x60;0x70;0x80",
9872 1 : )),
9873 1 : ),
9874 1 : (
9875 1 : Lsn(0x90),
9876 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x90")),
9877 1 : ),
9878 1 : ]),
9879 1 : };
9880 1 : assert_eq!(res, expected_res);
9881 1 :
9882 1 : // In case of branch compaction, the branch itself does not have the full history, and we need to provide
9883 1 : // the ancestor image in the test case.
9884 1 :
9885 1 : let history = vec![
9886 1 : (
9887 1 : key,
9888 1 : Lsn(0x20),
9889 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x20")),
9890 1 : ),
9891 1 : (
9892 1 : key,
9893 1 : Lsn(0x30),
9894 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x30")),
9895 1 : ),
9896 1 : (
9897 1 : key,
9898 1 : Lsn(0x40),
9899 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x40")),
9900 1 : ),
9901 1 : (
9902 1 : key,
9903 1 : Lsn(0x70),
9904 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x70")),
9905 1 : ),
9906 1 : ];
9907 1 : let res = tline
9908 1 : .generate_key_retention(
9909 1 : key,
9910 1 : &history,
9911 1 : Lsn(0x60),
9912 1 : &[],
9913 1 : 3,
9914 1 : Some((key, Lsn(0x10), Bytes::copy_from_slice(b"0x10"))),
9915 1 : true,
9916 1 : )
9917 1 : .await
9918 1 : .unwrap();
9919 1 : let expected_res = KeyHistoryRetention {
9920 1 : below_horizon: vec![(
9921 1 : Lsn(0x60),
9922 1 : KeyLogAtLsn(vec![(
9923 1 : Lsn(0x60),
9924 1 : Value::Image(Bytes::copy_from_slice(b"0x10;0x20;0x30;0x40")), // use the ancestor image to reconstruct the page
9925 1 : )]),
9926 1 : )],
9927 1 : above_horizon: KeyLogAtLsn(vec![(
9928 1 : Lsn(0x70),
9929 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x70")),
9930 1 : )]),
9931 1 : };
9932 1 : assert_eq!(res, expected_res);
9933 1 :
9934 1 : let history = vec![
9935 1 : (
9936 1 : key,
9937 1 : Lsn(0x20),
9938 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x20")),
9939 1 : ),
9940 1 : (
9941 1 : key,
9942 1 : Lsn(0x40),
9943 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x40")),
9944 1 : ),
9945 1 : (
9946 1 : key,
9947 1 : Lsn(0x60),
9948 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x60")),
9949 1 : ),
9950 1 : (
9951 1 : key,
9952 1 : Lsn(0x70),
9953 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x70")),
9954 1 : ),
9955 1 : ];
9956 1 : let res = tline
9957 1 : .generate_key_retention(
9958 1 : key,
9959 1 : &history,
9960 1 : Lsn(0x60),
9961 1 : &[Lsn(0x30)],
9962 1 : 3,
9963 1 : Some((key, Lsn(0x10), Bytes::copy_from_slice(b"0x10"))),
9964 1 : true,
9965 1 : )
9966 1 : .await
9967 1 : .unwrap();
9968 1 : let expected_res = KeyHistoryRetention {
9969 1 : below_horizon: vec![
9970 1 : (
9971 1 : Lsn(0x30),
9972 1 : KeyLogAtLsn(vec![(
9973 1 : Lsn(0x20),
9974 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x20")),
9975 1 : )]),
9976 1 : ),
9977 1 : (
9978 1 : Lsn(0x60),
9979 1 : KeyLogAtLsn(vec![(
9980 1 : Lsn(0x60),
9981 1 : Value::Image(Bytes::copy_from_slice(b"0x10;0x20;0x40;0x60")),
9982 1 : )]),
9983 1 : ),
9984 1 : ],
9985 1 : above_horizon: KeyLogAtLsn(vec![(
9986 1 : Lsn(0x70),
9987 1 : Value::WalRecord(NeonWalRecord::wal_append(";0x70")),
9988 1 : )]),
9989 1 : };
9990 1 : assert_eq!(res, expected_res);
9991 1 :
9992 1 : Ok(())
9993 1 : }
9994 :
9995 : #[cfg(feature = "testing")]
9996 : #[tokio::test]
9997 1 : async fn test_simple_bottom_most_compaction_with_retain_lsns() -> anyhow::Result<()> {
9998 1 : let harness =
9999 1 : TenantHarness::create("test_simple_bottom_most_compaction_with_retain_lsns").await?;
10000 1 : let (tenant, ctx) = harness.load().await;
10001 1 :
10002 259 : fn get_key(id: u32) -> Key {
10003 259 : // using aux key here b/c they are guaranteed to be inside `collect_keyspace`.
10004 259 : let mut key = Key::from_hex("620000000033333333444444445500000000").unwrap();
10005 259 : key.field6 = id;
10006 259 : key
10007 259 : }
10008 1 :
10009 1 : let img_layer = (0..10)
10010 10 : .map(|id| (get_key(id), Bytes::from(format!("value {id}@0x10"))))
10011 1 : .collect_vec();
10012 1 :
10013 1 : let delta1 = vec![
10014 1 : (
10015 1 : get_key(1),
10016 1 : Lsn(0x20),
10017 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x20")),
10018 1 : ),
10019 1 : (
10020 1 : get_key(2),
10021 1 : Lsn(0x30),
10022 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x30")),
10023 1 : ),
10024 1 : (
10025 1 : get_key(3),
10026 1 : Lsn(0x28),
10027 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x28")),
10028 1 : ),
10029 1 : (
10030 1 : get_key(3),
10031 1 : Lsn(0x30),
10032 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x30")),
10033 1 : ),
10034 1 : (
10035 1 : get_key(3),
10036 1 : Lsn(0x40),
10037 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x40")),
10038 1 : ),
10039 1 : ];
10040 1 : let delta2 = vec![
10041 1 : (
10042 1 : get_key(5),
10043 1 : Lsn(0x20),
10044 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x20")),
10045 1 : ),
10046 1 : (
10047 1 : get_key(6),
10048 1 : Lsn(0x20),
10049 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x20")),
10050 1 : ),
10051 1 : ];
10052 1 : let delta3 = vec![
10053 1 : (
10054 1 : get_key(8),
10055 1 : Lsn(0x48),
10056 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x48")),
10057 1 : ),
10058 1 : (
10059 1 : get_key(9),
10060 1 : Lsn(0x48),
10061 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x48")),
10062 1 : ),
10063 1 : ];
10064 1 :
10065 1 : let tline = tenant
10066 1 : .create_test_timeline_with_layers(
10067 1 : TIMELINE_ID,
10068 1 : Lsn(0x10),
10069 1 : DEFAULT_PG_VERSION,
10070 1 : &ctx,
10071 1 : Vec::new(), // in-memory layers
10072 1 : vec![
10073 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x10)..Lsn(0x48), delta1),
10074 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x10)..Lsn(0x48), delta2),
10075 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x48)..Lsn(0x50), delta3),
10076 1 : ], // delta layers
10077 1 : vec![(Lsn(0x10), img_layer)], // image layers
10078 1 : Lsn(0x50),
10079 1 : )
10080 1 : .await?;
10081 1 : {
10082 1 : tline
10083 1 : .applied_gc_cutoff_lsn
10084 1 : .lock_for_write()
10085 1 : .store_and_unlock(Lsn(0x30))
10086 1 : .wait()
10087 1 : .await;
10088 1 : // Update GC info
10089 1 : let mut guard = tline.gc_info.write().unwrap();
10090 1 : *guard = GcInfo {
10091 1 : retain_lsns: vec![
10092 1 : (Lsn(0x10), tline.timeline_id, MaybeOffloaded::No),
10093 1 : (Lsn(0x20), tline.timeline_id, MaybeOffloaded::No),
10094 1 : ],
10095 1 : cutoffs: GcCutoffs {
10096 1 : time: Some(Lsn(0x30)),
10097 1 : space: Lsn(0x30),
10098 1 : },
10099 1 : leases: Default::default(),
10100 1 : within_ancestor_pitr: false,
10101 1 : };
10102 1 : }
10103 1 :
10104 1 : let expected_result = [
10105 1 : Bytes::from_static(b"value 0@0x10"),
10106 1 : Bytes::from_static(b"value 1@0x10@0x20"),
10107 1 : Bytes::from_static(b"value 2@0x10@0x30"),
10108 1 : Bytes::from_static(b"value 3@0x10@0x28@0x30@0x40"),
10109 1 : Bytes::from_static(b"value 4@0x10"),
10110 1 : Bytes::from_static(b"value 5@0x10@0x20"),
10111 1 : Bytes::from_static(b"value 6@0x10@0x20"),
10112 1 : Bytes::from_static(b"value 7@0x10"),
10113 1 : Bytes::from_static(b"value 8@0x10@0x48"),
10114 1 : Bytes::from_static(b"value 9@0x10@0x48"),
10115 1 : ];
10116 1 :
10117 1 : let expected_result_at_gc_horizon = [
10118 1 : Bytes::from_static(b"value 0@0x10"),
10119 1 : Bytes::from_static(b"value 1@0x10@0x20"),
10120 1 : Bytes::from_static(b"value 2@0x10@0x30"),
10121 1 : Bytes::from_static(b"value 3@0x10@0x28@0x30"),
10122 1 : Bytes::from_static(b"value 4@0x10"),
10123 1 : Bytes::from_static(b"value 5@0x10@0x20"),
10124 1 : Bytes::from_static(b"value 6@0x10@0x20"),
10125 1 : Bytes::from_static(b"value 7@0x10"),
10126 1 : Bytes::from_static(b"value 8@0x10"),
10127 1 : Bytes::from_static(b"value 9@0x10"),
10128 1 : ];
10129 1 :
10130 1 : let expected_result_at_lsn_20 = [
10131 1 : Bytes::from_static(b"value 0@0x10"),
10132 1 : Bytes::from_static(b"value 1@0x10@0x20"),
10133 1 : Bytes::from_static(b"value 2@0x10"),
10134 1 : Bytes::from_static(b"value 3@0x10"),
10135 1 : Bytes::from_static(b"value 4@0x10"),
10136 1 : Bytes::from_static(b"value 5@0x10@0x20"),
10137 1 : Bytes::from_static(b"value 6@0x10@0x20"),
10138 1 : Bytes::from_static(b"value 7@0x10"),
10139 1 : Bytes::from_static(b"value 8@0x10"),
10140 1 : Bytes::from_static(b"value 9@0x10"),
10141 1 : ];
10142 1 :
10143 1 : let expected_result_at_lsn_10 = [
10144 1 : Bytes::from_static(b"value 0@0x10"),
10145 1 : Bytes::from_static(b"value 1@0x10"),
10146 1 : Bytes::from_static(b"value 2@0x10"),
10147 1 : Bytes::from_static(b"value 3@0x10"),
10148 1 : Bytes::from_static(b"value 4@0x10"),
10149 1 : Bytes::from_static(b"value 5@0x10"),
10150 1 : Bytes::from_static(b"value 6@0x10"),
10151 1 : Bytes::from_static(b"value 7@0x10"),
10152 1 : Bytes::from_static(b"value 8@0x10"),
10153 1 : Bytes::from_static(b"value 9@0x10"),
10154 1 : ];
10155 1 :
10156 6 : let verify_result = || async {
10157 6 : let gc_horizon = {
10158 6 : let gc_info = tline.gc_info.read().unwrap();
10159 6 : gc_info.cutoffs.time.unwrap_or_default()
10160 1 : };
10161 66 : for idx in 0..10 {
10162 60 : assert_eq!(
10163 60 : tline
10164 60 : .get(get_key(idx as u32), Lsn(0x50), &ctx)
10165 60 : .await
10166 60 : .unwrap(),
10167 60 : &expected_result[idx]
10168 1 : );
10169 60 : assert_eq!(
10170 60 : tline
10171 60 : .get(get_key(idx as u32), gc_horizon, &ctx)
10172 60 : .await
10173 60 : .unwrap(),
10174 60 : &expected_result_at_gc_horizon[idx]
10175 1 : );
10176 60 : assert_eq!(
10177 60 : tline
10178 60 : .get(get_key(idx as u32), Lsn(0x20), &ctx)
10179 60 : .await
10180 60 : .unwrap(),
10181 60 : &expected_result_at_lsn_20[idx]
10182 1 : );
10183 60 : assert_eq!(
10184 60 : tline
10185 60 : .get(get_key(idx as u32), Lsn(0x10), &ctx)
10186 60 : .await
10187 60 : .unwrap(),
10188 60 : &expected_result_at_lsn_10[idx]
10189 1 : );
10190 1 : }
10191 12 : };
10192 1 :
10193 1 : verify_result().await;
10194 1 :
10195 1 : let cancel = CancellationToken::new();
10196 1 : let mut dryrun_flags = EnumSet::new();
10197 1 : dryrun_flags.insert(CompactFlags::DryRun);
10198 1 :
10199 1 : tline
10200 1 : .compact_with_gc(
10201 1 : &cancel,
10202 1 : CompactOptions {
10203 1 : flags: dryrun_flags,
10204 1 : ..Default::default()
10205 1 : },
10206 1 : &ctx,
10207 1 : )
10208 1 : .await
10209 1 : .unwrap();
10210 1 : // We expect layer map to be the same b/c the dry run flag, but we don't know whether there will be other background jobs
10211 1 : // cleaning things up, and therefore, we don't do sanity checks on the layer map during unit tests.
10212 1 : verify_result().await;
10213 1 :
10214 1 : tline
10215 1 : .compact_with_gc(&cancel, CompactOptions::default(), &ctx)
10216 1 : .await
10217 1 : .unwrap();
10218 1 : verify_result().await;
10219 1 :
10220 1 : // compact again
10221 1 : tline
10222 1 : .compact_with_gc(&cancel, CompactOptions::default(), &ctx)
10223 1 : .await
10224 1 : .unwrap();
10225 1 : verify_result().await;
10226 1 :
10227 1 : // increase GC horizon and compact again
10228 1 : {
10229 1 : tline
10230 1 : .applied_gc_cutoff_lsn
10231 1 : .lock_for_write()
10232 1 : .store_and_unlock(Lsn(0x38))
10233 1 : .wait()
10234 1 : .await;
10235 1 : // Update GC info
10236 1 : let mut guard = tline.gc_info.write().unwrap();
10237 1 : guard.cutoffs.time = Some(Lsn(0x38));
10238 1 : guard.cutoffs.space = Lsn(0x38);
10239 1 : }
10240 1 : tline
10241 1 : .compact_with_gc(&cancel, CompactOptions::default(), &ctx)
10242 1 : .await
10243 1 : .unwrap();
10244 1 : verify_result().await; // no wals between 0x30 and 0x38, so we should obtain the same result
10245 1 :
10246 1 : // not increasing the GC horizon and compact again
10247 1 : tline
10248 1 : .compact_with_gc(&cancel, CompactOptions::default(), &ctx)
10249 1 : .await
10250 1 : .unwrap();
10251 1 : verify_result().await;
10252 1 :
10253 1 : Ok(())
10254 1 : }
10255 :
10256 : #[cfg(feature = "testing")]
10257 : #[tokio::test]
10258 1 : async fn test_simple_bottom_most_compaction_with_retain_lsns_single_key() -> anyhow::Result<()>
10259 1 : {
10260 1 : let harness =
10261 1 : TenantHarness::create("test_simple_bottom_most_compaction_with_retain_lsns_single_key")
10262 1 : .await?;
10263 1 : let (tenant, ctx) = harness.load().await;
10264 1 :
10265 176 : fn get_key(id: u32) -> Key {
10266 176 : // using aux key here b/c they are guaranteed to be inside `collect_keyspace`.
10267 176 : let mut key = Key::from_hex("620000000033333333444444445500000000").unwrap();
10268 176 : key.field6 = id;
10269 176 : key
10270 176 : }
10271 1 :
10272 1 : let img_layer = (0..10)
10273 10 : .map(|id| (get_key(id), Bytes::from(format!("value {id}@0x10"))))
10274 1 : .collect_vec();
10275 1 :
10276 1 : let delta1 = vec![
10277 1 : (
10278 1 : get_key(1),
10279 1 : Lsn(0x20),
10280 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x20")),
10281 1 : ),
10282 1 : (
10283 1 : get_key(1),
10284 1 : Lsn(0x28),
10285 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x28")),
10286 1 : ),
10287 1 : ];
10288 1 : let delta2 = vec![
10289 1 : (
10290 1 : get_key(1),
10291 1 : Lsn(0x30),
10292 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x30")),
10293 1 : ),
10294 1 : (
10295 1 : get_key(1),
10296 1 : Lsn(0x38),
10297 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x38")),
10298 1 : ),
10299 1 : ];
10300 1 : let delta3 = vec![
10301 1 : (
10302 1 : get_key(8),
10303 1 : Lsn(0x48),
10304 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x48")),
10305 1 : ),
10306 1 : (
10307 1 : get_key(9),
10308 1 : Lsn(0x48),
10309 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x48")),
10310 1 : ),
10311 1 : ];
10312 1 :
10313 1 : let tline = tenant
10314 1 : .create_test_timeline_with_layers(
10315 1 : TIMELINE_ID,
10316 1 : Lsn(0x10),
10317 1 : DEFAULT_PG_VERSION,
10318 1 : &ctx,
10319 1 : Vec::new(), // in-memory layers
10320 1 : vec![
10321 1 : // delta1 and delta 2 only contain a single key but multiple updates
10322 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x10)..Lsn(0x30), delta1),
10323 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x30)..Lsn(0x50), delta2),
10324 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x10)..Lsn(0x50), delta3),
10325 1 : ], // delta layers
10326 1 : vec![(Lsn(0x10), img_layer)], // image layers
10327 1 : Lsn(0x50),
10328 1 : )
10329 1 : .await?;
10330 1 : {
10331 1 : tline
10332 1 : .applied_gc_cutoff_lsn
10333 1 : .lock_for_write()
10334 1 : .store_and_unlock(Lsn(0x30))
10335 1 : .wait()
10336 1 : .await;
10337 1 : // Update GC info
10338 1 : let mut guard = tline.gc_info.write().unwrap();
10339 1 : *guard = GcInfo {
10340 1 : retain_lsns: vec![
10341 1 : (Lsn(0x10), tline.timeline_id, MaybeOffloaded::No),
10342 1 : (Lsn(0x20), tline.timeline_id, MaybeOffloaded::No),
10343 1 : ],
10344 1 : cutoffs: GcCutoffs {
10345 1 : time: Some(Lsn(0x30)),
10346 1 : space: Lsn(0x30),
10347 1 : },
10348 1 : leases: Default::default(),
10349 1 : within_ancestor_pitr: false,
10350 1 : };
10351 1 : }
10352 1 :
10353 1 : let expected_result = [
10354 1 : Bytes::from_static(b"value 0@0x10"),
10355 1 : Bytes::from_static(b"value 1@0x10@0x20@0x28@0x30@0x38"),
10356 1 : Bytes::from_static(b"value 2@0x10"),
10357 1 : Bytes::from_static(b"value 3@0x10"),
10358 1 : Bytes::from_static(b"value 4@0x10"),
10359 1 : Bytes::from_static(b"value 5@0x10"),
10360 1 : Bytes::from_static(b"value 6@0x10"),
10361 1 : Bytes::from_static(b"value 7@0x10"),
10362 1 : Bytes::from_static(b"value 8@0x10@0x48"),
10363 1 : Bytes::from_static(b"value 9@0x10@0x48"),
10364 1 : ];
10365 1 :
10366 1 : let expected_result_at_gc_horizon = [
10367 1 : Bytes::from_static(b"value 0@0x10"),
10368 1 : Bytes::from_static(b"value 1@0x10@0x20@0x28@0x30"),
10369 1 : Bytes::from_static(b"value 2@0x10"),
10370 1 : Bytes::from_static(b"value 3@0x10"),
10371 1 : Bytes::from_static(b"value 4@0x10"),
10372 1 : Bytes::from_static(b"value 5@0x10"),
10373 1 : Bytes::from_static(b"value 6@0x10"),
10374 1 : Bytes::from_static(b"value 7@0x10"),
10375 1 : Bytes::from_static(b"value 8@0x10"),
10376 1 : Bytes::from_static(b"value 9@0x10"),
10377 1 : ];
10378 1 :
10379 1 : let expected_result_at_lsn_20 = [
10380 1 : Bytes::from_static(b"value 0@0x10"),
10381 1 : Bytes::from_static(b"value 1@0x10@0x20"),
10382 1 : Bytes::from_static(b"value 2@0x10"),
10383 1 : Bytes::from_static(b"value 3@0x10"),
10384 1 : Bytes::from_static(b"value 4@0x10"),
10385 1 : Bytes::from_static(b"value 5@0x10"),
10386 1 : Bytes::from_static(b"value 6@0x10"),
10387 1 : Bytes::from_static(b"value 7@0x10"),
10388 1 : Bytes::from_static(b"value 8@0x10"),
10389 1 : Bytes::from_static(b"value 9@0x10"),
10390 1 : ];
10391 1 :
10392 1 : let expected_result_at_lsn_10 = [
10393 1 : Bytes::from_static(b"value 0@0x10"),
10394 1 : Bytes::from_static(b"value 1@0x10"),
10395 1 : Bytes::from_static(b"value 2@0x10"),
10396 1 : Bytes::from_static(b"value 3@0x10"),
10397 1 : Bytes::from_static(b"value 4@0x10"),
10398 1 : Bytes::from_static(b"value 5@0x10"),
10399 1 : Bytes::from_static(b"value 6@0x10"),
10400 1 : Bytes::from_static(b"value 7@0x10"),
10401 1 : Bytes::from_static(b"value 8@0x10"),
10402 1 : Bytes::from_static(b"value 9@0x10"),
10403 1 : ];
10404 1 :
10405 4 : let verify_result = || async {
10406 4 : let gc_horizon = {
10407 4 : let gc_info = tline.gc_info.read().unwrap();
10408 4 : gc_info.cutoffs.time.unwrap_or_default()
10409 1 : };
10410 44 : for idx in 0..10 {
10411 40 : assert_eq!(
10412 40 : tline
10413 40 : .get(get_key(idx as u32), Lsn(0x50), &ctx)
10414 40 : .await
10415 40 : .unwrap(),
10416 40 : &expected_result[idx]
10417 1 : );
10418 40 : assert_eq!(
10419 40 : tline
10420 40 : .get(get_key(idx as u32), gc_horizon, &ctx)
10421 40 : .await
10422 40 : .unwrap(),
10423 40 : &expected_result_at_gc_horizon[idx]
10424 1 : );
10425 40 : assert_eq!(
10426 40 : tline
10427 40 : .get(get_key(idx as u32), Lsn(0x20), &ctx)
10428 40 : .await
10429 40 : .unwrap(),
10430 40 : &expected_result_at_lsn_20[idx]
10431 1 : );
10432 40 : assert_eq!(
10433 40 : tline
10434 40 : .get(get_key(idx as u32), Lsn(0x10), &ctx)
10435 40 : .await
10436 40 : .unwrap(),
10437 40 : &expected_result_at_lsn_10[idx]
10438 1 : );
10439 1 : }
10440 8 : };
10441 1 :
10442 1 : verify_result().await;
10443 1 :
10444 1 : let cancel = CancellationToken::new();
10445 1 : let mut dryrun_flags = EnumSet::new();
10446 1 : dryrun_flags.insert(CompactFlags::DryRun);
10447 1 :
10448 1 : tline
10449 1 : .compact_with_gc(
10450 1 : &cancel,
10451 1 : CompactOptions {
10452 1 : flags: dryrun_flags,
10453 1 : ..Default::default()
10454 1 : },
10455 1 : &ctx,
10456 1 : )
10457 1 : .await
10458 1 : .unwrap();
10459 1 : // We expect layer map to be the same b/c the dry run flag, but we don't know whether there will be other background jobs
10460 1 : // cleaning things up, and therefore, we don't do sanity checks on the layer map during unit tests.
10461 1 : verify_result().await;
10462 1 :
10463 1 : tline
10464 1 : .compact_with_gc(&cancel, CompactOptions::default(), &ctx)
10465 1 : .await
10466 1 : .unwrap();
10467 1 : verify_result().await;
10468 1 :
10469 1 : // compact again
10470 1 : tline
10471 1 : .compact_with_gc(&cancel, CompactOptions::default(), &ctx)
10472 1 : .await
10473 1 : .unwrap();
10474 1 : verify_result().await;
10475 1 :
10476 1 : Ok(())
10477 1 : }
10478 :
10479 : #[cfg(feature = "testing")]
10480 : #[tokio::test]
10481 1 : async fn test_simple_bottom_most_compaction_on_branch() -> anyhow::Result<()> {
10482 1 : use models::CompactLsnRange;
10483 1 :
10484 1 : let harness = TenantHarness::create("test_simple_bottom_most_compaction_on_branch").await?;
10485 1 : let (tenant, ctx) = harness.load().await;
10486 1 :
10487 83 : fn get_key(id: u32) -> Key {
10488 83 : let mut key = Key::from_hex("000000000033333333444444445500000000").unwrap();
10489 83 : key.field6 = id;
10490 83 : key
10491 83 : }
10492 1 :
10493 1 : let img_layer = (0..10)
10494 10 : .map(|id| (get_key(id), Bytes::from(format!("value {id}@0x10"))))
10495 1 : .collect_vec();
10496 1 :
10497 1 : let delta1 = vec![
10498 1 : (
10499 1 : get_key(1),
10500 1 : Lsn(0x20),
10501 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x20")),
10502 1 : ),
10503 1 : (
10504 1 : get_key(2),
10505 1 : Lsn(0x30),
10506 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x30")),
10507 1 : ),
10508 1 : (
10509 1 : get_key(3),
10510 1 : Lsn(0x28),
10511 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x28")),
10512 1 : ),
10513 1 : (
10514 1 : get_key(3),
10515 1 : Lsn(0x30),
10516 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x30")),
10517 1 : ),
10518 1 : (
10519 1 : get_key(3),
10520 1 : Lsn(0x40),
10521 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x40")),
10522 1 : ),
10523 1 : ];
10524 1 : let delta2 = vec![
10525 1 : (
10526 1 : get_key(5),
10527 1 : Lsn(0x20),
10528 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x20")),
10529 1 : ),
10530 1 : (
10531 1 : get_key(6),
10532 1 : Lsn(0x20),
10533 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x20")),
10534 1 : ),
10535 1 : ];
10536 1 : let delta3 = vec![
10537 1 : (
10538 1 : get_key(8),
10539 1 : Lsn(0x48),
10540 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x48")),
10541 1 : ),
10542 1 : (
10543 1 : get_key(9),
10544 1 : Lsn(0x48),
10545 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x48")),
10546 1 : ),
10547 1 : ];
10548 1 :
10549 1 : let parent_tline = tenant
10550 1 : .create_test_timeline_with_layers(
10551 1 : TIMELINE_ID,
10552 1 : Lsn(0x10),
10553 1 : DEFAULT_PG_VERSION,
10554 1 : &ctx,
10555 1 : vec![], // in-memory layers
10556 1 : vec![], // delta layers
10557 1 : vec![(Lsn(0x18), img_layer)], // image layers
10558 1 : Lsn(0x18),
10559 1 : )
10560 1 : .await?;
10561 1 :
10562 1 : parent_tline.add_extra_test_dense_keyspace(KeySpace::single(get_key(0)..get_key(10)));
10563 1 :
10564 1 : let branch_tline = tenant
10565 1 : .branch_timeline_test_with_layers(
10566 1 : &parent_tline,
10567 1 : NEW_TIMELINE_ID,
10568 1 : Some(Lsn(0x18)),
10569 1 : &ctx,
10570 1 : vec![
10571 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x20)..Lsn(0x48), delta1),
10572 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x20)..Lsn(0x48), delta2),
10573 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x48)..Lsn(0x50), delta3),
10574 1 : ], // delta layers
10575 1 : vec![], // image layers
10576 1 : Lsn(0x50),
10577 1 : )
10578 1 : .await?;
10579 1 :
10580 1 : branch_tline.add_extra_test_dense_keyspace(KeySpace::single(get_key(0)..get_key(10)));
10581 1 :
10582 1 : {
10583 1 : parent_tline
10584 1 : .applied_gc_cutoff_lsn
10585 1 : .lock_for_write()
10586 1 : .store_and_unlock(Lsn(0x10))
10587 1 : .wait()
10588 1 : .await;
10589 1 : // Update GC info
10590 1 : let mut guard = parent_tline.gc_info.write().unwrap();
10591 1 : *guard = GcInfo {
10592 1 : retain_lsns: vec![(Lsn(0x18), branch_tline.timeline_id, MaybeOffloaded::No)],
10593 1 : cutoffs: GcCutoffs {
10594 1 : time: Some(Lsn(0x10)),
10595 1 : space: Lsn(0x10),
10596 1 : },
10597 1 : leases: Default::default(),
10598 1 : within_ancestor_pitr: false,
10599 1 : };
10600 1 : }
10601 1 :
10602 1 : {
10603 1 : branch_tline
10604 1 : .applied_gc_cutoff_lsn
10605 1 : .lock_for_write()
10606 1 : .store_and_unlock(Lsn(0x50))
10607 1 : .wait()
10608 1 : .await;
10609 1 : // Update GC info
10610 1 : let mut guard = branch_tline.gc_info.write().unwrap();
10611 1 : *guard = GcInfo {
10612 1 : retain_lsns: vec![(Lsn(0x40), branch_tline.timeline_id, MaybeOffloaded::No)],
10613 1 : cutoffs: GcCutoffs {
10614 1 : time: Some(Lsn(0x50)),
10615 1 : space: Lsn(0x50),
10616 1 : },
10617 1 : leases: Default::default(),
10618 1 : within_ancestor_pitr: false,
10619 1 : };
10620 1 : }
10621 1 :
10622 1 : let expected_result_at_gc_horizon = [
10623 1 : Bytes::from_static(b"value 0@0x10"),
10624 1 : Bytes::from_static(b"value 1@0x10@0x20"),
10625 1 : Bytes::from_static(b"value 2@0x10@0x30"),
10626 1 : Bytes::from_static(b"value 3@0x10@0x28@0x30@0x40"),
10627 1 : Bytes::from_static(b"value 4@0x10"),
10628 1 : Bytes::from_static(b"value 5@0x10@0x20"),
10629 1 : Bytes::from_static(b"value 6@0x10@0x20"),
10630 1 : Bytes::from_static(b"value 7@0x10"),
10631 1 : Bytes::from_static(b"value 8@0x10@0x48"),
10632 1 : Bytes::from_static(b"value 9@0x10@0x48"),
10633 1 : ];
10634 1 :
10635 1 : let expected_result_at_lsn_40 = [
10636 1 : Bytes::from_static(b"value 0@0x10"),
10637 1 : Bytes::from_static(b"value 1@0x10@0x20"),
10638 1 : Bytes::from_static(b"value 2@0x10@0x30"),
10639 1 : Bytes::from_static(b"value 3@0x10@0x28@0x30@0x40"),
10640 1 : Bytes::from_static(b"value 4@0x10"),
10641 1 : Bytes::from_static(b"value 5@0x10@0x20"),
10642 1 : Bytes::from_static(b"value 6@0x10@0x20"),
10643 1 : Bytes::from_static(b"value 7@0x10"),
10644 1 : Bytes::from_static(b"value 8@0x10"),
10645 1 : Bytes::from_static(b"value 9@0x10"),
10646 1 : ];
10647 1 :
10648 3 : let verify_result = || async {
10649 33 : for idx in 0..10 {
10650 30 : assert_eq!(
10651 30 : branch_tline
10652 30 : .get(get_key(idx as u32), Lsn(0x50), &ctx)
10653 30 : .await
10654 30 : .unwrap(),
10655 30 : &expected_result_at_gc_horizon[idx]
10656 1 : );
10657 30 : assert_eq!(
10658 30 : branch_tline
10659 30 : .get(get_key(idx as u32), Lsn(0x40), &ctx)
10660 30 : .await
10661 30 : .unwrap(),
10662 30 : &expected_result_at_lsn_40[idx]
10663 1 : );
10664 1 : }
10665 6 : };
10666 1 :
10667 1 : verify_result().await;
10668 1 :
10669 1 : let cancel = CancellationToken::new();
10670 1 : branch_tline
10671 1 : .compact_with_gc(&cancel, CompactOptions::default(), &ctx)
10672 1 : .await
10673 1 : .unwrap();
10674 1 :
10675 1 : verify_result().await;
10676 1 :
10677 1 : // Piggyback a compaction with above_lsn. Ensure it works correctly when the specified LSN intersects with the layer files.
10678 1 : // Now we already have a single large delta layer, so the compaction min_layer_lsn should be the same as ancestor LSN (0x18).
10679 1 : branch_tline
10680 1 : .compact_with_gc(
10681 1 : &cancel,
10682 1 : CompactOptions {
10683 1 : compact_lsn_range: Some(CompactLsnRange::above(Lsn(0x40))),
10684 1 : ..Default::default()
10685 1 : },
10686 1 : &ctx,
10687 1 : )
10688 1 : .await
10689 1 : .unwrap();
10690 1 :
10691 1 : verify_result().await;
10692 1 :
10693 1 : Ok(())
10694 1 : }
10695 :
10696 : // Regression test for https://github.com/neondatabase/neon/issues/9012
10697 : // Create an image arrangement where we have to read at different LSN ranges
10698 : // from a delta layer. This is achieved by overlapping an image layer on top of
10699 : // a delta layer. Like so:
10700 : //
10701 : // A B
10702 : // +----------------+ -> delta_layer
10703 : // | | ^ lsn
10704 : // | =========|-> nested_image_layer |
10705 : // | C | |
10706 : // +----------------+ |
10707 : // ======== -> baseline_image_layer +-------> key
10708 : //
10709 : //
10710 : // When querying the key range [A, B) we need to read at different LSN ranges
10711 : // for [A, C) and [C, B). This test checks that the described edge case is handled correctly.
10712 : #[cfg(feature = "testing")]
10713 : #[tokio::test]
10714 1 : async fn test_vectored_read_with_nested_image_layer() -> anyhow::Result<()> {
10715 1 : let harness = TenantHarness::create("test_vectored_read_with_nested_image_layer").await?;
10716 1 : let (tenant, ctx) = harness.load().await;
10717 1 :
10718 1 : let will_init_keys = [2, 6];
10719 22 : fn get_key(id: u32) -> Key {
10720 22 : let mut key = Key::from_hex("110000000033333333444444445500000000").unwrap();
10721 22 : key.field6 = id;
10722 22 : key
10723 22 : }
10724 1 :
10725 1 : let mut expected_key_values = HashMap::new();
10726 1 :
10727 1 : let baseline_image_layer_lsn = Lsn(0x10);
10728 1 : let mut baseline_img_layer = Vec::new();
10729 6 : for i in 0..5 {
10730 5 : let key = get_key(i);
10731 5 : let value = format!("value {i}@{baseline_image_layer_lsn}");
10732 5 :
10733 5 : let removed = expected_key_values.insert(key, value.clone());
10734 5 : assert!(removed.is_none());
10735 1 :
10736 5 : baseline_img_layer.push((key, Bytes::from(value)));
10737 1 : }
10738 1 :
10739 1 : let nested_image_layer_lsn = Lsn(0x50);
10740 1 : let mut nested_img_layer = Vec::new();
10741 6 : for i in 5..10 {
10742 5 : let key = get_key(i);
10743 5 : let value = format!("value {i}@{nested_image_layer_lsn}");
10744 5 :
10745 5 : let removed = expected_key_values.insert(key, value.clone());
10746 5 : assert!(removed.is_none());
10747 1 :
10748 5 : nested_img_layer.push((key, Bytes::from(value)));
10749 1 : }
10750 1 :
10751 1 : let mut delta_layer_spec = Vec::default();
10752 1 : let delta_layer_start_lsn = Lsn(0x20);
10753 1 : let mut delta_layer_end_lsn = delta_layer_start_lsn;
10754 1 :
10755 11 : for i in 0..10 {
10756 10 : let key = get_key(i);
10757 10 : let key_in_nested = nested_img_layer
10758 10 : .iter()
10759 40 : .any(|(key_with_img, _)| *key_with_img == key);
10760 10 : let lsn = {
10761 10 : if key_in_nested {
10762 5 : Lsn(nested_image_layer_lsn.0 + 0x10)
10763 1 : } else {
10764 5 : delta_layer_start_lsn
10765 1 : }
10766 1 : };
10767 1 :
10768 10 : let will_init = will_init_keys.contains(&i);
10769 10 : if will_init {
10770 2 : delta_layer_spec.push((key, lsn, Value::WalRecord(NeonWalRecord::wal_init(""))));
10771 2 :
10772 2 : expected_key_values.insert(key, "".to_string());
10773 8 : } else {
10774 8 : let delta = format!("@{lsn}");
10775 8 : delta_layer_spec.push((
10776 8 : key,
10777 8 : lsn,
10778 8 : Value::WalRecord(NeonWalRecord::wal_append(&delta)),
10779 8 : ));
10780 8 :
10781 8 : expected_key_values
10782 8 : .get_mut(&key)
10783 8 : .expect("An image exists for each key")
10784 8 : .push_str(delta.as_str());
10785 8 : }
10786 10 : delta_layer_end_lsn = std::cmp::max(delta_layer_start_lsn, lsn);
10787 1 : }
10788 1 :
10789 1 : delta_layer_end_lsn = Lsn(delta_layer_end_lsn.0 + 1);
10790 1 :
10791 1 : assert!(
10792 1 : nested_image_layer_lsn > delta_layer_start_lsn
10793 1 : && nested_image_layer_lsn < delta_layer_end_lsn
10794 1 : );
10795 1 :
10796 1 : let tline = tenant
10797 1 : .create_test_timeline_with_layers(
10798 1 : TIMELINE_ID,
10799 1 : baseline_image_layer_lsn,
10800 1 : DEFAULT_PG_VERSION,
10801 1 : &ctx,
10802 1 : vec![], // in-memory layers
10803 1 : vec![DeltaLayerTestDesc::new_with_inferred_key_range(
10804 1 : delta_layer_start_lsn..delta_layer_end_lsn,
10805 1 : delta_layer_spec,
10806 1 : )], // delta layers
10807 1 : vec![
10808 1 : (baseline_image_layer_lsn, baseline_img_layer),
10809 1 : (nested_image_layer_lsn, nested_img_layer),
10810 1 : ], // image layers
10811 1 : delta_layer_end_lsn,
10812 1 : )
10813 1 : .await?;
10814 1 :
10815 1 : let query = VersionedKeySpaceQuery::uniform(
10816 1 : KeySpace::single(get_key(0)..get_key(10)),
10817 1 : delta_layer_end_lsn,
10818 1 : );
10819 1 :
10820 1 : let results = tline
10821 1 : .get_vectored(query, IoConcurrency::sequential(), &ctx)
10822 1 : .await
10823 1 : .expect("No vectored errors");
10824 11 : for (key, res) in results {
10825 10 : let value = res.expect("No key errors");
10826 10 : let expected_value = expected_key_values.remove(&key).expect("No unknown keys");
10827 10 : assert_eq!(value, Bytes::from(expected_value));
10828 1 : }
10829 1 :
10830 1 : Ok(())
10831 1 : }
10832 :
10833 : #[cfg(feature = "testing")]
10834 : #[tokio::test]
10835 1 : async fn test_vectored_read_with_image_layer_inside_inmem() -> anyhow::Result<()> {
10836 1 : let harness =
10837 1 : TenantHarness::create("test_vectored_read_with_image_layer_inside_inmem").await?;
10838 1 : let (tenant, ctx) = harness.load().await;
10839 1 :
10840 1 : let will_init_keys = [2, 6];
10841 32 : fn get_key(id: u32) -> Key {
10842 32 : let mut key = Key::from_hex("110000000033333333444444445500000000").unwrap();
10843 32 : key.field6 = id;
10844 32 : key
10845 32 : }
10846 1 :
10847 1 : let mut expected_key_values = HashMap::new();
10848 1 :
10849 1 : let baseline_image_layer_lsn = Lsn(0x10);
10850 1 : let mut baseline_img_layer = Vec::new();
10851 6 : for i in 0..5 {
10852 5 : let key = get_key(i);
10853 5 : let value = format!("value {i}@{baseline_image_layer_lsn}");
10854 5 :
10855 5 : let removed = expected_key_values.insert(key, value.clone());
10856 5 : assert!(removed.is_none());
10857 1 :
10858 5 : baseline_img_layer.push((key, Bytes::from(value)));
10859 1 : }
10860 1 :
10861 1 : let nested_image_layer_lsn = Lsn(0x50);
10862 1 : let mut nested_img_layer = Vec::new();
10863 6 : for i in 5..10 {
10864 5 : let key = get_key(i);
10865 5 : let value = format!("value {i}@{nested_image_layer_lsn}");
10866 5 :
10867 5 : let removed = expected_key_values.insert(key, value.clone());
10868 5 : assert!(removed.is_none());
10869 1 :
10870 5 : nested_img_layer.push((key, Bytes::from(value)));
10871 1 : }
10872 1 :
10873 1 : let frozen_layer = {
10874 1 : let lsn_range = Lsn(0x40)..Lsn(0x60);
10875 1 : let mut data = Vec::new();
10876 11 : for i in 0..10 {
10877 10 : let key = get_key(i);
10878 10 : let key_in_nested = nested_img_layer
10879 10 : .iter()
10880 40 : .any(|(key_with_img, _)| *key_with_img == key);
10881 10 : let lsn = {
10882 10 : if key_in_nested {
10883 5 : Lsn(nested_image_layer_lsn.0 + 5)
10884 1 : } else {
10885 5 : lsn_range.start
10886 1 : }
10887 1 : };
10888 1 :
10889 10 : let will_init = will_init_keys.contains(&i);
10890 10 : if will_init {
10891 2 : data.push((key, lsn, Value::WalRecord(NeonWalRecord::wal_init(""))));
10892 2 :
10893 2 : expected_key_values.insert(key, "".to_string());
10894 8 : } else {
10895 8 : let delta = format!("@{lsn}");
10896 8 : data.push((
10897 8 : key,
10898 8 : lsn,
10899 8 : Value::WalRecord(NeonWalRecord::wal_append(&delta)),
10900 8 : ));
10901 8 :
10902 8 : expected_key_values
10903 8 : .get_mut(&key)
10904 8 : .expect("An image exists for each key")
10905 8 : .push_str(delta.as_str());
10906 8 : }
10907 1 : }
10908 1 :
10909 1 : InMemoryLayerTestDesc {
10910 1 : lsn_range,
10911 1 : is_open: false,
10912 1 : data,
10913 1 : }
10914 1 : };
10915 1 :
10916 1 : let (open_layer, last_record_lsn) = {
10917 1 : let start_lsn = Lsn(0x70);
10918 1 : let mut data = Vec::new();
10919 1 : let mut end_lsn = Lsn(0);
10920 11 : for i in 0..10 {
10921 10 : let key = get_key(i);
10922 10 : let lsn = Lsn(start_lsn.0 + i as u64);
10923 10 : let delta = format!("@{lsn}");
10924 10 : data.push((
10925 10 : key,
10926 10 : lsn,
10927 10 : Value::WalRecord(NeonWalRecord::wal_append(&delta)),
10928 10 : ));
10929 10 :
10930 10 : expected_key_values
10931 10 : .get_mut(&key)
10932 10 : .expect("An image exists for each key")
10933 10 : .push_str(delta.as_str());
10934 10 :
10935 10 : end_lsn = std::cmp::max(end_lsn, lsn);
10936 10 : }
10937 1 :
10938 1 : (
10939 1 : InMemoryLayerTestDesc {
10940 1 : lsn_range: start_lsn..Lsn::MAX,
10941 1 : is_open: true,
10942 1 : data,
10943 1 : },
10944 1 : end_lsn,
10945 1 : )
10946 1 : };
10947 1 :
10948 1 : assert!(
10949 1 : nested_image_layer_lsn > frozen_layer.lsn_range.start
10950 1 : && nested_image_layer_lsn < frozen_layer.lsn_range.end
10951 1 : );
10952 1 :
10953 1 : let tline = tenant
10954 1 : .create_test_timeline_with_layers(
10955 1 : TIMELINE_ID,
10956 1 : baseline_image_layer_lsn,
10957 1 : DEFAULT_PG_VERSION,
10958 1 : &ctx,
10959 1 : vec![open_layer, frozen_layer], // in-memory layers
10960 1 : Vec::new(), // delta layers
10961 1 : vec![
10962 1 : (baseline_image_layer_lsn, baseline_img_layer),
10963 1 : (nested_image_layer_lsn, nested_img_layer),
10964 1 : ], // image layers
10965 1 : last_record_lsn,
10966 1 : )
10967 1 : .await?;
10968 1 :
10969 1 : let query = VersionedKeySpaceQuery::uniform(
10970 1 : KeySpace::single(get_key(0)..get_key(10)),
10971 1 : last_record_lsn,
10972 1 : );
10973 1 :
10974 1 : let results = tline
10975 1 : .get_vectored(query, IoConcurrency::sequential(), &ctx)
10976 1 : .await
10977 1 : .expect("No vectored errors");
10978 11 : for (key, res) in results {
10979 10 : let value = res.expect("No key errors");
10980 10 : let expected_value = expected_key_values.remove(&key).expect("No unknown keys");
10981 10 : assert_eq!(value, Bytes::from(expected_value.clone()));
10982 1 :
10983 10 : tracing::info!("key={key} value={expected_value}");
10984 1 : }
10985 1 :
10986 1 : Ok(())
10987 1 : }
10988 :
10989 : // A randomized read path test. Generates a layer map according to a deterministic
10990 : // specification. Fills the (key, LSN) space in random manner and then performs
10991 : // random scattered queries validating the results against in-memory storage.
10992 : //
10993 : // See this internal Notion page for a diagram of the layer map:
10994 : // https://www.notion.so/neondatabase/Read-Path-Unit-Testing-Fuzzing-1d1f189e0047806c8e5cd37781b0a350?pvs=4
10995 : //
10996 : // A fuzzing mode is also supported. In this mode, the test will use a random
10997 : // seed instead of a hardcoded one. Use it in conjunction with `cargo stress`
10998 : // to run multiple instances in parallel:
10999 : //
11000 : // $ RUST_BACKTRACE=1 RUST_LOG=INFO \
11001 : // cargo stress --package=pageserver --features=testing,fuzz-read-path --release -- test_read_path
11002 : #[cfg(feature = "testing")]
11003 : #[tokio::test]
11004 1 : async fn test_read_path() -> anyhow::Result<()> {
11005 1 : use rand::seq::SliceRandom;
11006 1 :
11007 1 : let seed = if cfg!(feature = "fuzz-read-path") {
11008 1 : let seed: u64 = thread_rng().r#gen();
11009 0 : seed
11010 1 : } else {
11011 1 : // Use a hard-coded seed when not in fuzzing mode.
11012 1 : // Note that with the current approach results are not reproducible
11013 1 : // accross platforms and Rust releases.
11014 1 : const SEED: u64 = 0;
11015 1 : SEED
11016 1 : };
11017 1 :
11018 1 : let mut random = StdRng::seed_from_u64(seed);
11019 1 :
11020 1 : let (queries, will_init_chance, gap_chance) = if cfg!(feature = "fuzz-read-path") {
11021 1 : const QUERIES: u64 = 5000;
11022 1 : let will_init_chance: u8 = random.gen_range(0..=10);
11023 0 : let gap_chance: u8 = random.gen_range(0..=50);
11024 0 :
11025 0 : (QUERIES, will_init_chance, gap_chance)
11026 1 : } else {
11027 1 : const QUERIES: u64 = 1000;
11028 1 : const WILL_INIT_CHANCE: u8 = 1;
11029 1 : const GAP_CHANCE: u8 = 5;
11030 1 :
11031 1 : (QUERIES, WILL_INIT_CHANCE, GAP_CHANCE)
11032 1 : };
11033 1 :
11034 1 : let harness = TenantHarness::create("test_read_path").await?;
11035 1 : let (tenant, ctx) = harness.load().await;
11036 1 :
11037 1 : tracing::info!("Using random seed: {seed}");
11038 1 : tracing::info!(%will_init_chance, %gap_chance, "Fill params");
11039 1 :
11040 1 : // Define the layer map shape. Note that this part is not randomized.
11041 1 :
11042 1 : const KEY_DIMENSION_SIZE: u32 = 99;
11043 1 : let start_key = Key::from_hex("110000000033333333444444445500000000").unwrap();
11044 1 : let end_key = start_key.add(KEY_DIMENSION_SIZE);
11045 1 : let total_key_range = start_key..end_key;
11046 1 : let total_key_range_size = end_key.to_i128() - start_key.to_i128();
11047 1 : let total_start_lsn = Lsn(104);
11048 1 : let last_record_lsn = Lsn(504);
11049 1 :
11050 1 : assert!(total_key_range_size % 3 == 0);
11051 1 :
11052 1 : let in_memory_layers_shape = vec![
11053 1 : (total_key_range.clone(), Lsn(304)..Lsn(400)),
11054 1 : (total_key_range.clone(), Lsn(400)..last_record_lsn),
11055 1 : ];
11056 1 :
11057 1 : let delta_layers_shape = vec![
11058 1 : (
11059 1 : start_key..(start_key.add((total_key_range_size / 3) as u32)),
11060 1 : Lsn(200)..Lsn(304),
11061 1 : ),
11062 1 : (
11063 1 : (start_key.add((total_key_range_size / 3) as u32))
11064 1 : ..(start_key.add((total_key_range_size * 2 / 3) as u32)),
11065 1 : Lsn(200)..Lsn(304),
11066 1 : ),
11067 1 : (
11068 1 : (start_key.add((total_key_range_size * 2 / 3) as u32))
11069 1 : ..(start_key.add(total_key_range_size as u32)),
11070 1 : Lsn(200)..Lsn(304),
11071 1 : ),
11072 1 : ];
11073 1 :
11074 1 : let image_layers_shape = vec![
11075 1 : (
11076 1 : start_key.add((total_key_range_size * 2 / 3 - 10) as u32)
11077 1 : ..start_key.add((total_key_range_size * 2 / 3 + 10) as u32),
11078 1 : Lsn(456),
11079 1 : ),
11080 1 : (
11081 1 : start_key.add((total_key_range_size / 3 - 10) as u32)
11082 1 : ..start_key.add((total_key_range_size / 3 + 10) as u32),
11083 1 : Lsn(256),
11084 1 : ),
11085 1 : (total_key_range.clone(), total_start_lsn),
11086 1 : ];
11087 1 :
11088 1 : let specification = TestTimelineSpecification {
11089 1 : start_lsn: total_start_lsn,
11090 1 : last_record_lsn,
11091 1 : in_memory_layers_shape,
11092 1 : delta_layers_shape,
11093 1 : image_layers_shape,
11094 1 : gap_chance,
11095 1 : will_init_chance,
11096 1 : };
11097 1 :
11098 1 : // Create and randomly fill in the layers according to the specification
11099 1 : let (tline, storage, interesting_lsns) = randomize_timeline(
11100 1 : &tenant,
11101 1 : TIMELINE_ID,
11102 1 : DEFAULT_PG_VERSION,
11103 1 : specification,
11104 1 : &mut random,
11105 1 : &ctx,
11106 1 : )
11107 1 : .await?;
11108 1 :
11109 1 : // Now generate queries based on the interesting lsns that we've collected.
11110 1 : //
11111 1 : // While there's still room in the query, pick and interesting LSN and a random
11112 1 : // key. Then roll the dice to see if the next key should also be included in
11113 1 : // the query. When the roll fails, break the "batch" and pick another point in the
11114 1 : // (key, LSN) space.
11115 1 :
11116 1 : const PICK_NEXT_CHANCE: u8 = 50;
11117 1 : for _ in 0..queries {
11118 1000 : let query = {
11119 1000 : let mut keyspaces_at_lsn: HashMap<Lsn, KeySpaceRandomAccum> = HashMap::default();
11120 1000 : let mut used_keys: HashSet<Key> = HashSet::default();
11121 1 :
11122 22536 : while used_keys.len() < Timeline::MAX_GET_VECTORED_KEYS as usize {
11123 21536 : let selected_lsn = interesting_lsns.choose(&mut random).expect("not empty");
11124 21536 : let mut selected_key = start_key.add(random.gen_range(0..KEY_DIMENSION_SIZE));
11125 1 :
11126 37614 : while used_keys.len() < Timeline::MAX_GET_VECTORED_KEYS as usize {
11127 37093 : if used_keys.contains(&selected_key)
11128 32154 : || selected_key >= start_key.add(KEY_DIMENSION_SIZE)
11129 1 : {
11130 5093 : break;
11131 32000 : }
11132 32000 :
11133 32000 : keyspaces_at_lsn
11134 32000 : .entry(*selected_lsn)
11135 32000 : .or_default()
11136 32000 : .add_key(selected_key);
11137 32000 : used_keys.insert(selected_key);
11138 32000 :
11139 32000 : let pick_next = random.gen_range(0..=100) <= PICK_NEXT_CHANCE;
11140 32000 : if pick_next {
11141 16078 : selected_key = selected_key.next();
11142 16078 : } else {
11143 15922 : break;
11144 1 : }
11145 1 : }
11146 1 : }
11147 1 :
11148 1000 : VersionedKeySpaceQuery::scattered(
11149 1000 : keyspaces_at_lsn
11150 1000 : .into_iter()
11151 11917 : .map(|(lsn, acc)| (lsn, acc.to_keyspace()))
11152 1000 : .collect(),
11153 1000 : )
11154 1 : };
11155 1 :
11156 1 : // Run the query and validate the results
11157 1 :
11158 1000 : let results = tline
11159 1000 : .get_vectored(query.clone(), IoConcurrency::Sequential, &ctx)
11160 1000 : .await;
11161 1 :
11162 1000 : let blobs = match results {
11163 1000 : Ok(ok) => ok,
11164 1 : Err(err) => {
11165 0 : panic!("seed={seed} Error returned for query {query}: {err}");
11166 1 : }
11167 1 : };
11168 1 :
11169 32000 : for (key, key_res) in blobs.into_iter() {
11170 32000 : match key_res {
11171 32000 : Ok(blob) => {
11172 32000 : let requested_at_lsn = query.map_key_to_lsn(&key);
11173 32000 : let expected = storage.get(key, requested_at_lsn);
11174 32000 :
11175 32000 : if blob != expected {
11176 1 : tracing::error!(
11177 1 : "seed={seed} Mismatch for {key}@{requested_at_lsn} from query: {query}"
11178 1 : );
11179 32000 : }
11180 1 :
11181 32000 : assert_eq!(blob, expected);
11182 1 : }
11183 1 : Err(err) => {
11184 0 : let requested_at_lsn = query.map_key_to_lsn(&key);
11185 0 :
11186 0 : panic!(
11187 0 : "seed={seed} Error returned for {key}@{requested_at_lsn} from query {query}: {err}"
11188 0 : );
11189 1 : }
11190 1 : }
11191 1 : }
11192 1 : }
11193 1 :
11194 1 : Ok(())
11195 1 : }
11196 :
11197 107 : fn sort_layer_key(k1: &PersistentLayerKey, k2: &PersistentLayerKey) -> std::cmp::Ordering {
11198 107 : (
11199 107 : k1.is_delta,
11200 107 : k1.key_range.start,
11201 107 : k1.key_range.end,
11202 107 : k1.lsn_range.start,
11203 107 : k1.lsn_range.end,
11204 107 : )
11205 107 : .cmp(&(
11206 107 : k2.is_delta,
11207 107 : k2.key_range.start,
11208 107 : k2.key_range.end,
11209 107 : k2.lsn_range.start,
11210 107 : k2.lsn_range.end,
11211 107 : ))
11212 107 : }
11213 :
11214 12 : async fn inspect_and_sort(
11215 12 : tline: &Arc<Timeline>,
11216 12 : filter: Option<std::ops::Range<Key>>,
11217 12 : ) -> Vec<PersistentLayerKey> {
11218 12 : let mut all_layers = tline.inspect_historic_layers().await.unwrap();
11219 12 : if let Some(filter) = filter {
11220 54 : all_layers.retain(|layer| overlaps_with(&layer.key_range, &filter));
11221 11 : }
11222 12 : all_layers.sort_by(sort_layer_key);
11223 12 : all_layers
11224 12 : }
11225 :
11226 : #[cfg(feature = "testing")]
11227 11 : fn check_layer_map_key_eq(
11228 11 : mut left: Vec<PersistentLayerKey>,
11229 11 : mut right: Vec<PersistentLayerKey>,
11230 11 : ) {
11231 11 : left.sort_by(sort_layer_key);
11232 11 : right.sort_by(sort_layer_key);
11233 11 : if left != right {
11234 0 : eprintln!("---LEFT---");
11235 0 : for left in left.iter() {
11236 0 : eprintln!("{}", left);
11237 0 : }
11238 0 : eprintln!("---RIGHT---");
11239 0 : for right in right.iter() {
11240 0 : eprintln!("{}", right);
11241 0 : }
11242 0 : assert_eq!(left, right);
11243 11 : }
11244 11 : }
11245 :
11246 : #[cfg(feature = "testing")]
11247 : #[tokio::test]
11248 1 : async fn test_simple_partial_bottom_most_compaction() -> anyhow::Result<()> {
11249 1 : let harness = TenantHarness::create("test_simple_partial_bottom_most_compaction").await?;
11250 1 : let (tenant, ctx) = harness.load().await;
11251 1 :
11252 91 : fn get_key(id: u32) -> Key {
11253 91 : // using aux key here b/c they are guaranteed to be inside `collect_keyspace`.
11254 91 : let mut key = Key::from_hex("620000000033333333444444445500000000").unwrap();
11255 91 : key.field6 = id;
11256 91 : key
11257 91 : }
11258 1 :
11259 1 : // img layer at 0x10
11260 1 : let img_layer = (0..10)
11261 10 : .map(|id| (get_key(id), Bytes::from(format!("value {id}@0x10"))))
11262 1 : .collect_vec();
11263 1 :
11264 1 : let delta1 = vec![
11265 1 : (
11266 1 : get_key(1),
11267 1 : Lsn(0x20),
11268 1 : Value::Image(Bytes::from("value 1@0x20")),
11269 1 : ),
11270 1 : (
11271 1 : get_key(2),
11272 1 : Lsn(0x30),
11273 1 : Value::Image(Bytes::from("value 2@0x30")),
11274 1 : ),
11275 1 : (
11276 1 : get_key(3),
11277 1 : Lsn(0x40),
11278 1 : Value::Image(Bytes::from("value 3@0x40")),
11279 1 : ),
11280 1 : ];
11281 1 : let delta2 = vec![
11282 1 : (
11283 1 : get_key(5),
11284 1 : Lsn(0x20),
11285 1 : Value::Image(Bytes::from("value 5@0x20")),
11286 1 : ),
11287 1 : (
11288 1 : get_key(6),
11289 1 : Lsn(0x20),
11290 1 : Value::Image(Bytes::from("value 6@0x20")),
11291 1 : ),
11292 1 : ];
11293 1 : let delta3 = vec![
11294 1 : (
11295 1 : get_key(8),
11296 1 : Lsn(0x48),
11297 1 : Value::Image(Bytes::from("value 8@0x48")),
11298 1 : ),
11299 1 : (
11300 1 : get_key(9),
11301 1 : Lsn(0x48),
11302 1 : Value::Image(Bytes::from("value 9@0x48")),
11303 1 : ),
11304 1 : ];
11305 1 :
11306 1 : let tline = tenant
11307 1 : .create_test_timeline_with_layers(
11308 1 : TIMELINE_ID,
11309 1 : Lsn(0x10),
11310 1 : DEFAULT_PG_VERSION,
11311 1 : &ctx,
11312 1 : vec![], // in-memory layers
11313 1 : vec![
11314 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x20)..Lsn(0x48), delta1),
11315 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x20)..Lsn(0x48), delta2),
11316 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x48)..Lsn(0x50), delta3),
11317 1 : ], // delta layers
11318 1 : vec![(Lsn(0x10), img_layer)], // image layers
11319 1 : Lsn(0x50),
11320 1 : )
11321 1 : .await?;
11322 1 :
11323 1 : {
11324 1 : tline
11325 1 : .applied_gc_cutoff_lsn
11326 1 : .lock_for_write()
11327 1 : .store_and_unlock(Lsn(0x30))
11328 1 : .wait()
11329 1 : .await;
11330 1 : // Update GC info
11331 1 : let mut guard = tline.gc_info.write().unwrap();
11332 1 : *guard = GcInfo {
11333 1 : retain_lsns: vec![(Lsn(0x20), tline.timeline_id, MaybeOffloaded::No)],
11334 1 : cutoffs: GcCutoffs {
11335 1 : time: Some(Lsn(0x30)),
11336 1 : space: Lsn(0x30),
11337 1 : },
11338 1 : leases: Default::default(),
11339 1 : within_ancestor_pitr: false,
11340 1 : };
11341 1 : }
11342 1 :
11343 1 : let cancel = CancellationToken::new();
11344 1 :
11345 1 : // Do a partial compaction on key range 0..2
11346 1 : tline
11347 1 : .compact_with_gc(
11348 1 : &cancel,
11349 1 : CompactOptions {
11350 1 : flags: EnumSet::new(),
11351 1 : compact_key_range: Some((get_key(0)..get_key(2)).into()),
11352 1 : ..Default::default()
11353 1 : },
11354 1 : &ctx,
11355 1 : )
11356 1 : .await
11357 1 : .unwrap();
11358 1 : let all_layers = inspect_and_sort(&tline, Some(get_key(0)..get_key(10))).await;
11359 1 : check_layer_map_key_eq(
11360 1 : all_layers,
11361 1 : vec![
11362 1 : // newly-generated image layer for the partial compaction range 0-2
11363 1 : PersistentLayerKey {
11364 1 : key_range: get_key(0)..get_key(2),
11365 1 : lsn_range: Lsn(0x20)..Lsn(0x21),
11366 1 : is_delta: false,
11367 1 : },
11368 1 : PersistentLayerKey {
11369 1 : key_range: get_key(0)..get_key(10),
11370 1 : lsn_range: Lsn(0x10)..Lsn(0x11),
11371 1 : is_delta: false,
11372 1 : },
11373 1 : // delta1 is split and the second part is rewritten
11374 1 : PersistentLayerKey {
11375 1 : key_range: get_key(2)..get_key(4),
11376 1 : lsn_range: Lsn(0x20)..Lsn(0x48),
11377 1 : is_delta: true,
11378 1 : },
11379 1 : PersistentLayerKey {
11380 1 : key_range: get_key(5)..get_key(7),
11381 1 : lsn_range: Lsn(0x20)..Lsn(0x48),
11382 1 : is_delta: true,
11383 1 : },
11384 1 : PersistentLayerKey {
11385 1 : key_range: get_key(8)..get_key(10),
11386 1 : lsn_range: Lsn(0x48)..Lsn(0x50),
11387 1 : is_delta: true,
11388 1 : },
11389 1 : ],
11390 1 : );
11391 1 :
11392 1 : // Do a partial compaction on key range 2..4
11393 1 : tline
11394 1 : .compact_with_gc(
11395 1 : &cancel,
11396 1 : CompactOptions {
11397 1 : flags: EnumSet::new(),
11398 1 : compact_key_range: Some((get_key(2)..get_key(4)).into()),
11399 1 : ..Default::default()
11400 1 : },
11401 1 : &ctx,
11402 1 : )
11403 1 : .await
11404 1 : .unwrap();
11405 1 : let all_layers = inspect_and_sort(&tline, Some(get_key(0)..get_key(10))).await;
11406 1 : check_layer_map_key_eq(
11407 1 : all_layers,
11408 1 : vec![
11409 1 : PersistentLayerKey {
11410 1 : key_range: get_key(0)..get_key(2),
11411 1 : lsn_range: Lsn(0x20)..Lsn(0x21),
11412 1 : is_delta: false,
11413 1 : },
11414 1 : PersistentLayerKey {
11415 1 : key_range: get_key(0)..get_key(10),
11416 1 : lsn_range: Lsn(0x10)..Lsn(0x11),
11417 1 : is_delta: false,
11418 1 : },
11419 1 : // image layer generated for the compaction range 2-4
11420 1 : PersistentLayerKey {
11421 1 : key_range: get_key(2)..get_key(4),
11422 1 : lsn_range: Lsn(0x20)..Lsn(0x21),
11423 1 : is_delta: false,
11424 1 : },
11425 1 : // we have key2/key3 above the retain_lsn, so we still need this delta layer
11426 1 : PersistentLayerKey {
11427 1 : key_range: get_key(2)..get_key(4),
11428 1 : lsn_range: Lsn(0x20)..Lsn(0x48),
11429 1 : is_delta: true,
11430 1 : },
11431 1 : PersistentLayerKey {
11432 1 : key_range: get_key(5)..get_key(7),
11433 1 : lsn_range: Lsn(0x20)..Lsn(0x48),
11434 1 : is_delta: true,
11435 1 : },
11436 1 : PersistentLayerKey {
11437 1 : key_range: get_key(8)..get_key(10),
11438 1 : lsn_range: Lsn(0x48)..Lsn(0x50),
11439 1 : is_delta: true,
11440 1 : },
11441 1 : ],
11442 1 : );
11443 1 :
11444 1 : // Do a partial compaction on key range 4..9
11445 1 : tline
11446 1 : .compact_with_gc(
11447 1 : &cancel,
11448 1 : CompactOptions {
11449 1 : flags: EnumSet::new(),
11450 1 : compact_key_range: Some((get_key(4)..get_key(9)).into()),
11451 1 : ..Default::default()
11452 1 : },
11453 1 : &ctx,
11454 1 : )
11455 1 : .await
11456 1 : .unwrap();
11457 1 : let all_layers = inspect_and_sort(&tline, Some(get_key(0)..get_key(10))).await;
11458 1 : check_layer_map_key_eq(
11459 1 : all_layers,
11460 1 : vec![
11461 1 : PersistentLayerKey {
11462 1 : key_range: get_key(0)..get_key(2),
11463 1 : lsn_range: Lsn(0x20)..Lsn(0x21),
11464 1 : is_delta: false,
11465 1 : },
11466 1 : PersistentLayerKey {
11467 1 : key_range: get_key(0)..get_key(10),
11468 1 : lsn_range: Lsn(0x10)..Lsn(0x11),
11469 1 : is_delta: false,
11470 1 : },
11471 1 : PersistentLayerKey {
11472 1 : key_range: get_key(2)..get_key(4),
11473 1 : lsn_range: Lsn(0x20)..Lsn(0x21),
11474 1 : is_delta: false,
11475 1 : },
11476 1 : PersistentLayerKey {
11477 1 : key_range: get_key(2)..get_key(4),
11478 1 : lsn_range: Lsn(0x20)..Lsn(0x48),
11479 1 : is_delta: true,
11480 1 : },
11481 1 : // image layer generated for this compaction range
11482 1 : PersistentLayerKey {
11483 1 : key_range: get_key(4)..get_key(9),
11484 1 : lsn_range: Lsn(0x20)..Lsn(0x21),
11485 1 : is_delta: false,
11486 1 : },
11487 1 : PersistentLayerKey {
11488 1 : key_range: get_key(8)..get_key(10),
11489 1 : lsn_range: Lsn(0x48)..Lsn(0x50),
11490 1 : is_delta: true,
11491 1 : },
11492 1 : ],
11493 1 : );
11494 1 :
11495 1 : // Do a partial compaction on key range 9..10
11496 1 : tline
11497 1 : .compact_with_gc(
11498 1 : &cancel,
11499 1 : CompactOptions {
11500 1 : flags: EnumSet::new(),
11501 1 : compact_key_range: Some((get_key(9)..get_key(10)).into()),
11502 1 : ..Default::default()
11503 1 : },
11504 1 : &ctx,
11505 1 : )
11506 1 : .await
11507 1 : .unwrap();
11508 1 : let all_layers = inspect_and_sort(&tline, Some(get_key(0)..get_key(10))).await;
11509 1 : check_layer_map_key_eq(
11510 1 : all_layers,
11511 1 : vec![
11512 1 : PersistentLayerKey {
11513 1 : key_range: get_key(0)..get_key(2),
11514 1 : lsn_range: Lsn(0x20)..Lsn(0x21),
11515 1 : is_delta: false,
11516 1 : },
11517 1 : PersistentLayerKey {
11518 1 : key_range: get_key(0)..get_key(10),
11519 1 : lsn_range: Lsn(0x10)..Lsn(0x11),
11520 1 : is_delta: false,
11521 1 : },
11522 1 : PersistentLayerKey {
11523 1 : key_range: get_key(2)..get_key(4),
11524 1 : lsn_range: Lsn(0x20)..Lsn(0x21),
11525 1 : is_delta: false,
11526 1 : },
11527 1 : PersistentLayerKey {
11528 1 : key_range: get_key(2)..get_key(4),
11529 1 : lsn_range: Lsn(0x20)..Lsn(0x48),
11530 1 : is_delta: true,
11531 1 : },
11532 1 : PersistentLayerKey {
11533 1 : key_range: get_key(4)..get_key(9),
11534 1 : lsn_range: Lsn(0x20)..Lsn(0x21),
11535 1 : is_delta: false,
11536 1 : },
11537 1 : // image layer generated for the compaction range
11538 1 : PersistentLayerKey {
11539 1 : key_range: get_key(9)..get_key(10),
11540 1 : lsn_range: Lsn(0x20)..Lsn(0x21),
11541 1 : is_delta: false,
11542 1 : },
11543 1 : PersistentLayerKey {
11544 1 : key_range: get_key(8)..get_key(10),
11545 1 : lsn_range: Lsn(0x48)..Lsn(0x50),
11546 1 : is_delta: true,
11547 1 : },
11548 1 : ],
11549 1 : );
11550 1 :
11551 1 : // Do a partial compaction on key range 0..10, all image layers below LSN 20 can be replaced with new ones.
11552 1 : tline
11553 1 : .compact_with_gc(
11554 1 : &cancel,
11555 1 : CompactOptions {
11556 1 : flags: EnumSet::new(),
11557 1 : compact_key_range: Some((get_key(0)..get_key(10)).into()),
11558 1 : ..Default::default()
11559 1 : },
11560 1 : &ctx,
11561 1 : )
11562 1 : .await
11563 1 : .unwrap();
11564 1 : let all_layers = inspect_and_sort(&tline, Some(get_key(0)..get_key(10))).await;
11565 1 : check_layer_map_key_eq(
11566 1 : all_layers,
11567 1 : vec![
11568 1 : // aha, we removed all unnecessary image/delta layers and got a very clean layer map!
11569 1 : PersistentLayerKey {
11570 1 : key_range: get_key(0)..get_key(10),
11571 1 : lsn_range: Lsn(0x20)..Lsn(0x21),
11572 1 : is_delta: false,
11573 1 : },
11574 1 : PersistentLayerKey {
11575 1 : key_range: get_key(2)..get_key(4),
11576 1 : lsn_range: Lsn(0x20)..Lsn(0x48),
11577 1 : is_delta: true,
11578 1 : },
11579 1 : PersistentLayerKey {
11580 1 : key_range: get_key(8)..get_key(10),
11581 1 : lsn_range: Lsn(0x48)..Lsn(0x50),
11582 1 : is_delta: true,
11583 1 : },
11584 1 : ],
11585 1 : );
11586 1 : Ok(())
11587 1 : }
11588 :
11589 : #[cfg(feature = "testing")]
11590 : #[tokio::test]
11591 1 : async fn test_timeline_offload_retain_lsn() -> anyhow::Result<()> {
11592 1 : let harness = TenantHarness::create("test_timeline_offload_retain_lsn")
11593 1 : .await
11594 1 : .unwrap();
11595 1 : let (tenant, ctx) = harness.load().await;
11596 1 : let tline_parent = tenant
11597 1 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
11598 1 : .await
11599 1 : .unwrap();
11600 1 : let tline_child = tenant
11601 1 : .branch_timeline_test(&tline_parent, NEW_TIMELINE_ID, Some(Lsn(0x20)), &ctx)
11602 1 : .await
11603 1 : .unwrap();
11604 1 : {
11605 1 : let gc_info_parent = tline_parent.gc_info.read().unwrap();
11606 1 : assert_eq!(
11607 1 : gc_info_parent.retain_lsns,
11608 1 : vec![(Lsn(0x20), tline_child.timeline_id, MaybeOffloaded::No)]
11609 1 : );
11610 1 : }
11611 1 : // We have to directly call the remote_client instead of using the archive function to avoid constructing broker client...
11612 1 : tline_child
11613 1 : .remote_client
11614 1 : .schedule_index_upload_for_timeline_archival_state(TimelineArchivalState::Archived)
11615 1 : .unwrap();
11616 1 : tline_child.remote_client.wait_completion().await.unwrap();
11617 1 : offload_timeline(&tenant, &tline_child)
11618 1 : .instrument(tracing::info_span!(parent: None, "offload_test", tenant_id=%"test", shard_id=%"test", timeline_id=%"test"))
11619 1 : .await.unwrap();
11620 1 : let child_timeline_id = tline_child.timeline_id;
11621 1 : Arc::try_unwrap(tline_child).unwrap();
11622 1 :
11623 1 : {
11624 1 : let gc_info_parent = tline_parent.gc_info.read().unwrap();
11625 1 : assert_eq!(
11626 1 : gc_info_parent.retain_lsns,
11627 1 : vec![(Lsn(0x20), child_timeline_id, MaybeOffloaded::Yes)]
11628 1 : );
11629 1 : }
11630 1 :
11631 1 : tenant
11632 1 : .get_offloaded_timeline(child_timeline_id)
11633 1 : .unwrap()
11634 1 : .defuse_for_tenant_drop();
11635 1 :
11636 1 : Ok(())
11637 1 : }
11638 :
11639 : #[cfg(feature = "testing")]
11640 : #[tokio::test]
11641 1 : async fn test_simple_bottom_most_compaction_above_lsn() -> anyhow::Result<()> {
11642 1 : let harness = TenantHarness::create("test_simple_bottom_most_compaction_above_lsn").await?;
11643 1 : let (tenant, ctx) = harness.load().await;
11644 1 :
11645 148 : fn get_key(id: u32) -> Key {
11646 148 : // using aux key here b/c they are guaranteed to be inside `collect_keyspace`.
11647 148 : let mut key = Key::from_hex("620000000033333333444444445500000000").unwrap();
11648 148 : key.field6 = id;
11649 148 : key
11650 148 : }
11651 1 :
11652 1 : let img_layer = (0..10)
11653 10 : .map(|id| (get_key(id), Bytes::from(format!("value {id}@0x10"))))
11654 1 : .collect_vec();
11655 1 :
11656 1 : let delta1 = vec![(
11657 1 : get_key(1),
11658 1 : Lsn(0x20),
11659 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x20")),
11660 1 : )];
11661 1 : let delta4 = vec![(
11662 1 : get_key(1),
11663 1 : Lsn(0x28),
11664 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x28")),
11665 1 : )];
11666 1 : let delta2 = vec![
11667 1 : (
11668 1 : get_key(1),
11669 1 : Lsn(0x30),
11670 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x30")),
11671 1 : ),
11672 1 : (
11673 1 : get_key(1),
11674 1 : Lsn(0x38),
11675 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x38")),
11676 1 : ),
11677 1 : ];
11678 1 : let delta3 = vec![
11679 1 : (
11680 1 : get_key(8),
11681 1 : Lsn(0x48),
11682 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x48")),
11683 1 : ),
11684 1 : (
11685 1 : get_key(9),
11686 1 : Lsn(0x48),
11687 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x48")),
11688 1 : ),
11689 1 : ];
11690 1 :
11691 1 : let tline = tenant
11692 1 : .create_test_timeline_with_layers(
11693 1 : TIMELINE_ID,
11694 1 : Lsn(0x10),
11695 1 : DEFAULT_PG_VERSION,
11696 1 : &ctx,
11697 1 : vec![], // in-memory layers
11698 1 : vec![
11699 1 : // delta1/2/4 only contain a single key but multiple updates
11700 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x20)..Lsn(0x28), delta1),
11701 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x30)..Lsn(0x50), delta2),
11702 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x28)..Lsn(0x30), delta4),
11703 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x30)..Lsn(0x50), delta3),
11704 1 : ], // delta layers
11705 1 : vec![(Lsn(0x10), img_layer)], // image layers
11706 1 : Lsn(0x50),
11707 1 : )
11708 1 : .await?;
11709 1 : {
11710 1 : tline
11711 1 : .applied_gc_cutoff_lsn
11712 1 : .lock_for_write()
11713 1 : .store_and_unlock(Lsn(0x30))
11714 1 : .wait()
11715 1 : .await;
11716 1 : // Update GC info
11717 1 : let mut guard = tline.gc_info.write().unwrap();
11718 1 : *guard = GcInfo {
11719 1 : retain_lsns: vec![
11720 1 : (Lsn(0x10), tline.timeline_id, MaybeOffloaded::No),
11721 1 : (Lsn(0x20), tline.timeline_id, MaybeOffloaded::No),
11722 1 : ],
11723 1 : cutoffs: GcCutoffs {
11724 1 : time: Some(Lsn(0x30)),
11725 1 : space: Lsn(0x30),
11726 1 : },
11727 1 : leases: Default::default(),
11728 1 : within_ancestor_pitr: false,
11729 1 : };
11730 1 : }
11731 1 :
11732 1 : let expected_result = [
11733 1 : Bytes::from_static(b"value 0@0x10"),
11734 1 : Bytes::from_static(b"value 1@0x10@0x20@0x28@0x30@0x38"),
11735 1 : Bytes::from_static(b"value 2@0x10"),
11736 1 : Bytes::from_static(b"value 3@0x10"),
11737 1 : Bytes::from_static(b"value 4@0x10"),
11738 1 : Bytes::from_static(b"value 5@0x10"),
11739 1 : Bytes::from_static(b"value 6@0x10"),
11740 1 : Bytes::from_static(b"value 7@0x10"),
11741 1 : Bytes::from_static(b"value 8@0x10@0x48"),
11742 1 : Bytes::from_static(b"value 9@0x10@0x48"),
11743 1 : ];
11744 1 :
11745 1 : let expected_result_at_gc_horizon = [
11746 1 : Bytes::from_static(b"value 0@0x10"),
11747 1 : Bytes::from_static(b"value 1@0x10@0x20@0x28@0x30"),
11748 1 : Bytes::from_static(b"value 2@0x10"),
11749 1 : Bytes::from_static(b"value 3@0x10"),
11750 1 : Bytes::from_static(b"value 4@0x10"),
11751 1 : Bytes::from_static(b"value 5@0x10"),
11752 1 : Bytes::from_static(b"value 6@0x10"),
11753 1 : Bytes::from_static(b"value 7@0x10"),
11754 1 : Bytes::from_static(b"value 8@0x10"),
11755 1 : Bytes::from_static(b"value 9@0x10"),
11756 1 : ];
11757 1 :
11758 1 : let expected_result_at_lsn_20 = [
11759 1 : Bytes::from_static(b"value 0@0x10"),
11760 1 : Bytes::from_static(b"value 1@0x10@0x20"),
11761 1 : Bytes::from_static(b"value 2@0x10"),
11762 1 : Bytes::from_static(b"value 3@0x10"),
11763 1 : Bytes::from_static(b"value 4@0x10"),
11764 1 : Bytes::from_static(b"value 5@0x10"),
11765 1 : Bytes::from_static(b"value 6@0x10"),
11766 1 : Bytes::from_static(b"value 7@0x10"),
11767 1 : Bytes::from_static(b"value 8@0x10"),
11768 1 : Bytes::from_static(b"value 9@0x10"),
11769 1 : ];
11770 1 :
11771 1 : let expected_result_at_lsn_10 = [
11772 1 : Bytes::from_static(b"value 0@0x10"),
11773 1 : Bytes::from_static(b"value 1@0x10"),
11774 1 : Bytes::from_static(b"value 2@0x10"),
11775 1 : Bytes::from_static(b"value 3@0x10"),
11776 1 : Bytes::from_static(b"value 4@0x10"),
11777 1 : Bytes::from_static(b"value 5@0x10"),
11778 1 : Bytes::from_static(b"value 6@0x10"),
11779 1 : Bytes::from_static(b"value 7@0x10"),
11780 1 : Bytes::from_static(b"value 8@0x10"),
11781 1 : Bytes::from_static(b"value 9@0x10"),
11782 1 : ];
11783 1 :
11784 3 : let verify_result = || async {
11785 3 : let gc_horizon = {
11786 3 : let gc_info = tline.gc_info.read().unwrap();
11787 3 : gc_info.cutoffs.time.unwrap_or_default()
11788 1 : };
11789 33 : for idx in 0..10 {
11790 30 : assert_eq!(
11791 30 : tline
11792 30 : .get(get_key(idx as u32), Lsn(0x50), &ctx)
11793 30 : .await
11794 30 : .unwrap(),
11795 30 : &expected_result[idx]
11796 1 : );
11797 30 : assert_eq!(
11798 30 : tline
11799 30 : .get(get_key(idx as u32), gc_horizon, &ctx)
11800 30 : .await
11801 30 : .unwrap(),
11802 30 : &expected_result_at_gc_horizon[idx]
11803 1 : );
11804 30 : assert_eq!(
11805 30 : tline
11806 30 : .get(get_key(idx as u32), Lsn(0x20), &ctx)
11807 30 : .await
11808 30 : .unwrap(),
11809 30 : &expected_result_at_lsn_20[idx]
11810 1 : );
11811 30 : assert_eq!(
11812 30 : tline
11813 30 : .get(get_key(idx as u32), Lsn(0x10), &ctx)
11814 30 : .await
11815 30 : .unwrap(),
11816 30 : &expected_result_at_lsn_10[idx]
11817 1 : );
11818 1 : }
11819 6 : };
11820 1 :
11821 1 : verify_result().await;
11822 1 :
11823 1 : let cancel = CancellationToken::new();
11824 1 : tline
11825 1 : .compact_with_gc(
11826 1 : &cancel,
11827 1 : CompactOptions {
11828 1 : compact_lsn_range: Some(CompactLsnRange::above(Lsn(0x28))),
11829 1 : ..Default::default()
11830 1 : },
11831 1 : &ctx,
11832 1 : )
11833 1 : .await
11834 1 : .unwrap();
11835 1 : verify_result().await;
11836 1 :
11837 1 : let all_layers = inspect_and_sort(&tline, Some(get_key(0)..get_key(10))).await;
11838 1 : check_layer_map_key_eq(
11839 1 : all_layers,
11840 1 : vec![
11841 1 : // The original image layer, not compacted
11842 1 : PersistentLayerKey {
11843 1 : key_range: get_key(0)..get_key(10),
11844 1 : lsn_range: Lsn(0x10)..Lsn(0x11),
11845 1 : is_delta: false,
11846 1 : },
11847 1 : // Delta layer below the specified above_lsn not compacted
11848 1 : PersistentLayerKey {
11849 1 : key_range: get_key(1)..get_key(2),
11850 1 : lsn_range: Lsn(0x20)..Lsn(0x28),
11851 1 : is_delta: true,
11852 1 : },
11853 1 : // Delta layer compacted above the LSN
11854 1 : PersistentLayerKey {
11855 1 : key_range: get_key(1)..get_key(10),
11856 1 : lsn_range: Lsn(0x28)..Lsn(0x50),
11857 1 : is_delta: true,
11858 1 : },
11859 1 : ],
11860 1 : );
11861 1 :
11862 1 : // compact again
11863 1 : tline
11864 1 : .compact_with_gc(&cancel, CompactOptions::default(), &ctx)
11865 1 : .await
11866 1 : .unwrap();
11867 1 : verify_result().await;
11868 1 :
11869 1 : let all_layers = inspect_and_sort(&tline, Some(get_key(0)..get_key(10))).await;
11870 1 : check_layer_map_key_eq(
11871 1 : all_layers,
11872 1 : vec![
11873 1 : // The compacted image layer (full key range)
11874 1 : PersistentLayerKey {
11875 1 : key_range: Key::MIN..Key::MAX,
11876 1 : lsn_range: Lsn(0x10)..Lsn(0x11),
11877 1 : is_delta: false,
11878 1 : },
11879 1 : // All other data in the delta layer
11880 1 : PersistentLayerKey {
11881 1 : key_range: get_key(1)..get_key(10),
11882 1 : lsn_range: Lsn(0x10)..Lsn(0x50),
11883 1 : is_delta: true,
11884 1 : },
11885 1 : ],
11886 1 : );
11887 1 :
11888 1 : Ok(())
11889 1 : }
11890 :
11891 : #[cfg(feature = "testing")]
11892 : #[tokio::test]
11893 1 : async fn test_simple_bottom_most_compaction_rectangle() -> anyhow::Result<()> {
11894 1 : let harness = TenantHarness::create("test_simple_bottom_most_compaction_rectangle").await?;
11895 1 : let (tenant, ctx) = harness.load().await;
11896 1 :
11897 254 : fn get_key(id: u32) -> Key {
11898 254 : // using aux key here b/c they are guaranteed to be inside `collect_keyspace`.
11899 254 : let mut key = Key::from_hex("620000000033333333444444445500000000").unwrap();
11900 254 : key.field6 = id;
11901 254 : key
11902 254 : }
11903 1 :
11904 1 : let img_layer = (0..10)
11905 10 : .map(|id| (get_key(id), Bytes::from(format!("value {id}@0x10"))))
11906 1 : .collect_vec();
11907 1 :
11908 1 : let delta1 = vec![(
11909 1 : get_key(1),
11910 1 : Lsn(0x20),
11911 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x20")),
11912 1 : )];
11913 1 : let delta4 = vec![(
11914 1 : get_key(1),
11915 1 : Lsn(0x28),
11916 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x28")),
11917 1 : )];
11918 1 : let delta2 = vec![
11919 1 : (
11920 1 : get_key(1),
11921 1 : Lsn(0x30),
11922 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x30")),
11923 1 : ),
11924 1 : (
11925 1 : get_key(1),
11926 1 : Lsn(0x38),
11927 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x38")),
11928 1 : ),
11929 1 : ];
11930 1 : let delta3 = vec![
11931 1 : (
11932 1 : get_key(8),
11933 1 : Lsn(0x48),
11934 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x48")),
11935 1 : ),
11936 1 : (
11937 1 : get_key(9),
11938 1 : Lsn(0x48),
11939 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x48")),
11940 1 : ),
11941 1 : ];
11942 1 :
11943 1 : let tline = tenant
11944 1 : .create_test_timeline_with_layers(
11945 1 : TIMELINE_ID,
11946 1 : Lsn(0x10),
11947 1 : DEFAULT_PG_VERSION,
11948 1 : &ctx,
11949 1 : vec![], // in-memory layers
11950 1 : vec![
11951 1 : // delta1/2/4 only contain a single key but multiple updates
11952 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x20)..Lsn(0x28), delta1),
11953 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x30)..Lsn(0x50), delta2),
11954 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x28)..Lsn(0x30), delta4),
11955 1 : DeltaLayerTestDesc::new_with_inferred_key_range(Lsn(0x30)..Lsn(0x50), delta3),
11956 1 : ], // delta layers
11957 1 : vec![(Lsn(0x10), img_layer)], // image layers
11958 1 : Lsn(0x50),
11959 1 : )
11960 1 : .await?;
11961 1 : {
11962 1 : tline
11963 1 : .applied_gc_cutoff_lsn
11964 1 : .lock_for_write()
11965 1 : .store_and_unlock(Lsn(0x30))
11966 1 : .wait()
11967 1 : .await;
11968 1 : // Update GC info
11969 1 : let mut guard = tline.gc_info.write().unwrap();
11970 1 : *guard = GcInfo {
11971 1 : retain_lsns: vec![
11972 1 : (Lsn(0x10), tline.timeline_id, MaybeOffloaded::No),
11973 1 : (Lsn(0x20), tline.timeline_id, MaybeOffloaded::No),
11974 1 : ],
11975 1 : cutoffs: GcCutoffs {
11976 1 : time: Some(Lsn(0x30)),
11977 1 : space: Lsn(0x30),
11978 1 : },
11979 1 : leases: Default::default(),
11980 1 : within_ancestor_pitr: false,
11981 1 : };
11982 1 : }
11983 1 :
11984 1 : let expected_result = [
11985 1 : Bytes::from_static(b"value 0@0x10"),
11986 1 : Bytes::from_static(b"value 1@0x10@0x20@0x28@0x30@0x38"),
11987 1 : Bytes::from_static(b"value 2@0x10"),
11988 1 : Bytes::from_static(b"value 3@0x10"),
11989 1 : Bytes::from_static(b"value 4@0x10"),
11990 1 : Bytes::from_static(b"value 5@0x10"),
11991 1 : Bytes::from_static(b"value 6@0x10"),
11992 1 : Bytes::from_static(b"value 7@0x10"),
11993 1 : Bytes::from_static(b"value 8@0x10@0x48"),
11994 1 : Bytes::from_static(b"value 9@0x10@0x48"),
11995 1 : ];
11996 1 :
11997 1 : let expected_result_at_gc_horizon = [
11998 1 : Bytes::from_static(b"value 0@0x10"),
11999 1 : Bytes::from_static(b"value 1@0x10@0x20@0x28@0x30"),
12000 1 : Bytes::from_static(b"value 2@0x10"),
12001 1 : Bytes::from_static(b"value 3@0x10"),
12002 1 : Bytes::from_static(b"value 4@0x10"),
12003 1 : Bytes::from_static(b"value 5@0x10"),
12004 1 : Bytes::from_static(b"value 6@0x10"),
12005 1 : Bytes::from_static(b"value 7@0x10"),
12006 1 : Bytes::from_static(b"value 8@0x10"),
12007 1 : Bytes::from_static(b"value 9@0x10"),
12008 1 : ];
12009 1 :
12010 1 : let expected_result_at_lsn_20 = [
12011 1 : Bytes::from_static(b"value 0@0x10"),
12012 1 : Bytes::from_static(b"value 1@0x10@0x20"),
12013 1 : Bytes::from_static(b"value 2@0x10"),
12014 1 : Bytes::from_static(b"value 3@0x10"),
12015 1 : Bytes::from_static(b"value 4@0x10"),
12016 1 : Bytes::from_static(b"value 5@0x10"),
12017 1 : Bytes::from_static(b"value 6@0x10"),
12018 1 : Bytes::from_static(b"value 7@0x10"),
12019 1 : Bytes::from_static(b"value 8@0x10"),
12020 1 : Bytes::from_static(b"value 9@0x10"),
12021 1 : ];
12022 1 :
12023 1 : let expected_result_at_lsn_10 = [
12024 1 : Bytes::from_static(b"value 0@0x10"),
12025 1 : Bytes::from_static(b"value 1@0x10"),
12026 1 : Bytes::from_static(b"value 2@0x10"),
12027 1 : Bytes::from_static(b"value 3@0x10"),
12028 1 : Bytes::from_static(b"value 4@0x10"),
12029 1 : Bytes::from_static(b"value 5@0x10"),
12030 1 : Bytes::from_static(b"value 6@0x10"),
12031 1 : Bytes::from_static(b"value 7@0x10"),
12032 1 : Bytes::from_static(b"value 8@0x10"),
12033 1 : Bytes::from_static(b"value 9@0x10"),
12034 1 : ];
12035 1 :
12036 5 : let verify_result = || async {
12037 5 : let gc_horizon = {
12038 5 : let gc_info = tline.gc_info.read().unwrap();
12039 5 : gc_info.cutoffs.time.unwrap_or_default()
12040 1 : };
12041 55 : for idx in 0..10 {
12042 50 : assert_eq!(
12043 50 : tline
12044 50 : .get(get_key(idx as u32), Lsn(0x50), &ctx)
12045 50 : .await
12046 50 : .unwrap(),
12047 50 : &expected_result[idx]
12048 1 : );
12049 50 : assert_eq!(
12050 50 : tline
12051 50 : .get(get_key(idx as u32), gc_horizon, &ctx)
12052 50 : .await
12053 50 : .unwrap(),
12054 50 : &expected_result_at_gc_horizon[idx]
12055 1 : );
12056 50 : assert_eq!(
12057 50 : tline
12058 50 : .get(get_key(idx as u32), Lsn(0x20), &ctx)
12059 50 : .await
12060 50 : .unwrap(),
12061 50 : &expected_result_at_lsn_20[idx]
12062 1 : );
12063 50 : assert_eq!(
12064 50 : tline
12065 50 : .get(get_key(idx as u32), Lsn(0x10), &ctx)
12066 50 : .await
12067 50 : .unwrap(),
12068 50 : &expected_result_at_lsn_10[idx]
12069 1 : );
12070 1 : }
12071 10 : };
12072 1 :
12073 1 : verify_result().await;
12074 1 :
12075 1 : let cancel = CancellationToken::new();
12076 1 :
12077 1 : tline
12078 1 : .compact_with_gc(
12079 1 : &cancel,
12080 1 : CompactOptions {
12081 1 : compact_key_range: Some((get_key(0)..get_key(2)).into()),
12082 1 : compact_lsn_range: Some((Lsn(0x20)..Lsn(0x28)).into()),
12083 1 : ..Default::default()
12084 1 : },
12085 1 : &ctx,
12086 1 : )
12087 1 : .await
12088 1 : .unwrap();
12089 1 : verify_result().await;
12090 1 :
12091 1 : let all_layers = inspect_and_sort(&tline, Some(get_key(0)..get_key(10))).await;
12092 1 : check_layer_map_key_eq(
12093 1 : all_layers,
12094 1 : vec![
12095 1 : // The original image layer, not compacted
12096 1 : PersistentLayerKey {
12097 1 : key_range: get_key(0)..get_key(10),
12098 1 : lsn_range: Lsn(0x10)..Lsn(0x11),
12099 1 : is_delta: false,
12100 1 : },
12101 1 : // According the selection logic, we select all layers with start key <= 0x28, so we would merge the layer 0x20-0x28 and
12102 1 : // the layer 0x28-0x30 into one.
12103 1 : PersistentLayerKey {
12104 1 : key_range: get_key(1)..get_key(2),
12105 1 : lsn_range: Lsn(0x20)..Lsn(0x30),
12106 1 : is_delta: true,
12107 1 : },
12108 1 : // Above the upper bound and untouched
12109 1 : PersistentLayerKey {
12110 1 : key_range: get_key(1)..get_key(2),
12111 1 : lsn_range: Lsn(0x30)..Lsn(0x50),
12112 1 : is_delta: true,
12113 1 : },
12114 1 : // This layer is untouched
12115 1 : PersistentLayerKey {
12116 1 : key_range: get_key(8)..get_key(10),
12117 1 : lsn_range: Lsn(0x30)..Lsn(0x50),
12118 1 : is_delta: true,
12119 1 : },
12120 1 : ],
12121 1 : );
12122 1 :
12123 1 : tline
12124 1 : .compact_with_gc(
12125 1 : &cancel,
12126 1 : CompactOptions {
12127 1 : compact_key_range: Some((get_key(3)..get_key(8)).into()),
12128 1 : compact_lsn_range: Some((Lsn(0x28)..Lsn(0x40)).into()),
12129 1 : ..Default::default()
12130 1 : },
12131 1 : &ctx,
12132 1 : )
12133 1 : .await
12134 1 : .unwrap();
12135 1 : verify_result().await;
12136 1 :
12137 1 : let all_layers = inspect_and_sort(&tline, Some(get_key(0)..get_key(10))).await;
12138 1 : check_layer_map_key_eq(
12139 1 : all_layers,
12140 1 : vec![
12141 1 : // The original image layer, not compacted
12142 1 : PersistentLayerKey {
12143 1 : key_range: get_key(0)..get_key(10),
12144 1 : lsn_range: Lsn(0x10)..Lsn(0x11),
12145 1 : is_delta: false,
12146 1 : },
12147 1 : // Not in the compaction key range, uncompacted
12148 1 : PersistentLayerKey {
12149 1 : key_range: get_key(1)..get_key(2),
12150 1 : lsn_range: Lsn(0x20)..Lsn(0x30),
12151 1 : is_delta: true,
12152 1 : },
12153 1 : // Not in the compaction key range, uncompacted but need rewrite because the delta layer overlaps with the range
12154 1 : PersistentLayerKey {
12155 1 : key_range: get_key(1)..get_key(2),
12156 1 : lsn_range: Lsn(0x30)..Lsn(0x50),
12157 1 : is_delta: true,
12158 1 : },
12159 1 : // Note that when we specify the LSN upper bound to be 0x40, the compaction algorithm will not try to cut the layer
12160 1 : // horizontally in half. Instead, it will include all LSNs that overlap with 0x40. So the real max_lsn of the compaction
12161 1 : // becomes 0x50.
12162 1 : PersistentLayerKey {
12163 1 : key_range: get_key(8)..get_key(10),
12164 1 : lsn_range: Lsn(0x30)..Lsn(0x50),
12165 1 : is_delta: true,
12166 1 : },
12167 1 : ],
12168 1 : );
12169 1 :
12170 1 : // compact again
12171 1 : tline
12172 1 : .compact_with_gc(
12173 1 : &cancel,
12174 1 : CompactOptions {
12175 1 : compact_key_range: Some((get_key(0)..get_key(5)).into()),
12176 1 : compact_lsn_range: Some((Lsn(0x20)..Lsn(0x50)).into()),
12177 1 : ..Default::default()
12178 1 : },
12179 1 : &ctx,
12180 1 : )
12181 1 : .await
12182 1 : .unwrap();
12183 1 : verify_result().await;
12184 1 :
12185 1 : let all_layers = inspect_and_sort(&tline, Some(get_key(0)..get_key(10))).await;
12186 1 : check_layer_map_key_eq(
12187 1 : all_layers,
12188 1 : vec![
12189 1 : // The original image layer, not compacted
12190 1 : PersistentLayerKey {
12191 1 : key_range: get_key(0)..get_key(10),
12192 1 : lsn_range: Lsn(0x10)..Lsn(0x11),
12193 1 : is_delta: false,
12194 1 : },
12195 1 : // The range gets compacted
12196 1 : PersistentLayerKey {
12197 1 : key_range: get_key(1)..get_key(2),
12198 1 : lsn_range: Lsn(0x20)..Lsn(0x50),
12199 1 : is_delta: true,
12200 1 : },
12201 1 : // Not touched during this iteration of compaction
12202 1 : PersistentLayerKey {
12203 1 : key_range: get_key(8)..get_key(10),
12204 1 : lsn_range: Lsn(0x30)..Lsn(0x50),
12205 1 : is_delta: true,
12206 1 : },
12207 1 : ],
12208 1 : );
12209 1 :
12210 1 : // final full compaction
12211 1 : tline
12212 1 : .compact_with_gc(&cancel, CompactOptions::default(), &ctx)
12213 1 : .await
12214 1 : .unwrap();
12215 1 : verify_result().await;
12216 1 :
12217 1 : let all_layers = inspect_and_sort(&tline, Some(get_key(0)..get_key(10))).await;
12218 1 : check_layer_map_key_eq(
12219 1 : all_layers,
12220 1 : vec![
12221 1 : // The compacted image layer (full key range)
12222 1 : PersistentLayerKey {
12223 1 : key_range: Key::MIN..Key::MAX,
12224 1 : lsn_range: Lsn(0x10)..Lsn(0x11),
12225 1 : is_delta: false,
12226 1 : },
12227 1 : // All other data in the delta layer
12228 1 : PersistentLayerKey {
12229 1 : key_range: get_key(1)..get_key(10),
12230 1 : lsn_range: Lsn(0x10)..Lsn(0x50),
12231 1 : is_delta: true,
12232 1 : },
12233 1 : ],
12234 1 : );
12235 1 :
12236 1 : Ok(())
12237 1 : }
12238 :
12239 : #[cfg(feature = "testing")]
12240 : #[tokio::test]
12241 1 : async fn test_bottom_most_compation_redo_failure() -> anyhow::Result<()> {
12242 1 : let harness = TenantHarness::create("test_bottom_most_compation_redo_failure").await?;
12243 1 : let (tenant, ctx) = harness.load().await;
12244 1 :
12245 13 : fn get_key(id: u32) -> Key {
12246 13 : // using aux key here b/c they are guaranteed to be inside `collect_keyspace`.
12247 13 : let mut key = Key::from_hex("620000000033333333444444445500000000").unwrap();
12248 13 : key.field6 = id;
12249 13 : key
12250 13 : }
12251 1 :
12252 1 : let img_layer = (0..10)
12253 10 : .map(|id| (get_key(id), Bytes::from(format!("value {id}@0x10"))))
12254 1 : .collect_vec();
12255 1 :
12256 1 : let delta1 = vec![
12257 1 : (
12258 1 : get_key(1),
12259 1 : Lsn(0x20),
12260 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x20")),
12261 1 : ),
12262 1 : (
12263 1 : get_key(1),
12264 1 : Lsn(0x24),
12265 1 : Value::WalRecord(NeonWalRecord::wal_append("@0x24")),
12266 1 : ),
12267 1 : (
12268 1 : get_key(1),
12269 1 : Lsn(0x28),
12270 1 : // This record will fail to redo
12271 1 : Value::WalRecord(NeonWalRecord::wal_append_conditional("@0x28", "???")),
12272 1 : ),
12273 1 : ];
12274 1 :
12275 1 : let tline = tenant
12276 1 : .create_test_timeline_with_layers(
12277 1 : TIMELINE_ID,
12278 1 : Lsn(0x10),
12279 1 : DEFAULT_PG_VERSION,
12280 1 : &ctx,
12281 1 : vec![], // in-memory layers
12282 1 : vec![DeltaLayerTestDesc::new_with_inferred_key_range(
12283 1 : Lsn(0x20)..Lsn(0x30),
12284 1 : delta1,
12285 1 : )], // delta layers
12286 1 : vec![(Lsn(0x10), img_layer)], // image layers
12287 1 : Lsn(0x50),
12288 1 : )
12289 1 : .await?;
12290 1 : {
12291 1 : tline
12292 1 : .applied_gc_cutoff_lsn
12293 1 : .lock_for_write()
12294 1 : .store_and_unlock(Lsn(0x30))
12295 1 : .wait()
12296 1 : .await;
12297 1 : // Update GC info
12298 1 : let mut guard = tline.gc_info.write().unwrap();
12299 1 : *guard = GcInfo {
12300 1 : retain_lsns: vec![],
12301 1 : cutoffs: GcCutoffs {
12302 1 : time: Some(Lsn(0x30)),
12303 1 : space: Lsn(0x30),
12304 1 : },
12305 1 : leases: Default::default(),
12306 1 : within_ancestor_pitr: false,
12307 1 : };
12308 1 : }
12309 1 :
12310 1 : let cancel = CancellationToken::new();
12311 1 :
12312 1 : // Compaction will fail, but should not fire any critical error.
12313 1 : // Gc-compaction currently cannot figure out what keys are not in the keyspace during the compaction
12314 1 : // process. It will always try to redo the logs it reads and if it doesn't work, fail the entire
12315 1 : // compaction job. Tracked in <https://github.com/neondatabase/neon/issues/10395>.
12316 1 : let res = tline
12317 1 : .compact_with_gc(
12318 1 : &cancel,
12319 1 : CompactOptions {
12320 1 : compact_key_range: None,
12321 1 : compact_lsn_range: None,
12322 1 : ..Default::default()
12323 1 : },
12324 1 : &ctx,
12325 1 : )
12326 1 : .await;
12327 1 : assert!(res.is_err());
12328 1 :
12329 1 : Ok(())
12330 1 : }
12331 :
12332 : #[cfg(feature = "testing")]
12333 : #[tokio::test]
12334 1 : async fn test_synthetic_size_calculation_with_invisible_branches() -> anyhow::Result<()> {
12335 1 : use pageserver_api::models::TimelineVisibilityState;
12336 1 :
12337 1 : use crate::tenant::size::gather_inputs;
12338 1 :
12339 1 : let tenant_conf = pageserver_api::models::TenantConfig {
12340 1 : // Ensure that we don't compute gc_cutoffs (which needs reading the layer files)
12341 1 : pitr_interval: Some(Duration::ZERO),
12342 1 : ..Default::default()
12343 1 : };
12344 1 : let harness = TenantHarness::create_custom(
12345 1 : "test_synthetic_size_calculation_with_invisible_branches",
12346 1 : tenant_conf,
12347 1 : TenantId::generate(),
12348 1 : ShardIdentity::unsharded(),
12349 1 : Generation::new(0xdeadbeef),
12350 1 : )
12351 1 : .await?;
12352 1 : let (tenant, ctx) = harness.load().await;
12353 1 : let main_tline = tenant
12354 1 : .create_test_timeline_with_layers(
12355 1 : TIMELINE_ID,
12356 1 : Lsn(0x10),
12357 1 : DEFAULT_PG_VERSION,
12358 1 : &ctx,
12359 1 : vec![],
12360 1 : vec![],
12361 1 : vec![],
12362 1 : Lsn(0x100),
12363 1 : )
12364 1 : .await?;
12365 1 :
12366 1 : let snapshot1 = TimelineId::from_array(hex!("11223344556677881122334455667790"));
12367 1 : tenant
12368 1 : .branch_timeline_test_with_layers(
12369 1 : &main_tline,
12370 1 : snapshot1,
12371 1 : Some(Lsn(0x20)),
12372 1 : &ctx,
12373 1 : vec![],
12374 1 : vec![],
12375 1 : Lsn(0x50),
12376 1 : )
12377 1 : .await?;
12378 1 : let snapshot2 = TimelineId::from_array(hex!("11223344556677881122334455667791"));
12379 1 : tenant
12380 1 : .branch_timeline_test_with_layers(
12381 1 : &main_tline,
12382 1 : snapshot2,
12383 1 : Some(Lsn(0x30)),
12384 1 : &ctx,
12385 1 : vec![],
12386 1 : vec![],
12387 1 : Lsn(0x50),
12388 1 : )
12389 1 : .await?;
12390 1 : let snapshot3 = TimelineId::from_array(hex!("11223344556677881122334455667792"));
12391 1 : tenant
12392 1 : .branch_timeline_test_with_layers(
12393 1 : &main_tline,
12394 1 : snapshot3,
12395 1 : Some(Lsn(0x40)),
12396 1 : &ctx,
12397 1 : vec![],
12398 1 : vec![],
12399 1 : Lsn(0x50),
12400 1 : )
12401 1 : .await?;
12402 1 : let limit = Arc::new(Semaphore::new(1));
12403 1 : let max_retention_period = None;
12404 1 : let mut logical_size_cache = HashMap::new();
12405 1 : let cause = LogicalSizeCalculationCause::EvictionTaskImitation;
12406 1 : let cancel = CancellationToken::new();
12407 1 :
12408 1 : let inputs = gather_inputs(
12409 1 : &tenant,
12410 1 : &limit,
12411 1 : max_retention_period,
12412 1 : &mut logical_size_cache,
12413 1 : cause,
12414 1 : &cancel,
12415 1 : &ctx,
12416 1 : )
12417 1 : .instrument(info_span!(
12418 1 : "gather_inputs",
12419 1 : tenant_id = "unknown",
12420 1 : shard_id = "unknown",
12421 1 : ))
12422 1 : .await?;
12423 1 : use crate::tenant::size::{LsnKind, ModelInputs, SegmentMeta};
12424 1 : use LsnKind::*;
12425 1 : use tenant_size_model::Segment;
12426 1 : let ModelInputs { mut segments, .. } = inputs;
12427 15 : segments.retain(|s| s.timeline_id == TIMELINE_ID);
12428 6 : for segment in segments.iter_mut() {
12429 6 : segment.segment.parent = None; // We don't care about the parent for the test
12430 6 : segment.segment.size = None; // We don't care about the size for the test
12431 6 : }
12432 1 : assert_eq!(
12433 1 : segments,
12434 1 : [
12435 1 : SegmentMeta {
12436 1 : segment: Segment {
12437 1 : parent: None,
12438 1 : lsn: 0x10,
12439 1 : size: None,
12440 1 : needed: false,
12441 1 : },
12442 1 : timeline_id: TIMELINE_ID,
12443 1 : kind: BranchStart,
12444 1 : },
12445 1 : SegmentMeta {
12446 1 : segment: Segment {
12447 1 : parent: None,
12448 1 : lsn: 0x20,
12449 1 : size: None,
12450 1 : needed: false,
12451 1 : },
12452 1 : timeline_id: TIMELINE_ID,
12453 1 : kind: BranchPoint,
12454 1 : },
12455 1 : SegmentMeta {
12456 1 : segment: Segment {
12457 1 : parent: None,
12458 1 : lsn: 0x30,
12459 1 : size: None,
12460 1 : needed: false,
12461 1 : },
12462 1 : timeline_id: TIMELINE_ID,
12463 1 : kind: BranchPoint,
12464 1 : },
12465 1 : SegmentMeta {
12466 1 : segment: Segment {
12467 1 : parent: None,
12468 1 : lsn: 0x40,
12469 1 : size: None,
12470 1 : needed: false,
12471 1 : },
12472 1 : timeline_id: TIMELINE_ID,
12473 1 : kind: BranchPoint,
12474 1 : },
12475 1 : SegmentMeta {
12476 1 : segment: Segment {
12477 1 : parent: None,
12478 1 : lsn: 0x100,
12479 1 : size: None,
12480 1 : needed: false,
12481 1 : },
12482 1 : timeline_id: TIMELINE_ID,
12483 1 : kind: GcCutOff,
12484 1 : }, // we need to retain everything above the last branch point
12485 1 : SegmentMeta {
12486 1 : segment: Segment {
12487 1 : parent: None,
12488 1 : lsn: 0x100,
12489 1 : size: None,
12490 1 : needed: true,
12491 1 : },
12492 1 : timeline_id: TIMELINE_ID,
12493 1 : kind: BranchEnd,
12494 1 : },
12495 1 : ]
12496 1 : );
12497 1 :
12498 1 : main_tline
12499 1 : .remote_client
12500 1 : .schedule_index_upload_for_timeline_invisible_state(
12501 1 : TimelineVisibilityState::Invisible,
12502 1 : )?;
12503 1 : main_tline.remote_client.wait_completion().await?;
12504 1 : let inputs = gather_inputs(
12505 1 : &tenant,
12506 1 : &limit,
12507 1 : max_retention_period,
12508 1 : &mut logical_size_cache,
12509 1 : cause,
12510 1 : &cancel,
12511 1 : &ctx,
12512 1 : )
12513 1 : .instrument(info_span!(
12514 1 : "gather_inputs",
12515 1 : tenant_id = "unknown",
12516 1 : shard_id = "unknown",
12517 1 : ))
12518 1 : .await?;
12519 1 : let ModelInputs { mut segments, .. } = inputs;
12520 14 : segments.retain(|s| s.timeline_id == TIMELINE_ID);
12521 5 : for segment in segments.iter_mut() {
12522 5 : segment.segment.parent = None; // We don't care about the parent for the test
12523 5 : segment.segment.size = None; // We don't care about the size for the test
12524 5 : }
12525 1 : assert_eq!(
12526 1 : segments,
12527 1 : [
12528 1 : SegmentMeta {
12529 1 : segment: Segment {
12530 1 : parent: None,
12531 1 : lsn: 0x10,
12532 1 : size: None,
12533 1 : needed: false,
12534 1 : },
12535 1 : timeline_id: TIMELINE_ID,
12536 1 : kind: BranchStart,
12537 1 : },
12538 1 : SegmentMeta {
12539 1 : segment: Segment {
12540 1 : parent: None,
12541 1 : lsn: 0x20,
12542 1 : size: None,
12543 1 : needed: false,
12544 1 : },
12545 1 : timeline_id: TIMELINE_ID,
12546 1 : kind: BranchPoint,
12547 1 : },
12548 1 : SegmentMeta {
12549 1 : segment: Segment {
12550 1 : parent: None,
12551 1 : lsn: 0x30,
12552 1 : size: None,
12553 1 : needed: false,
12554 1 : },
12555 1 : timeline_id: TIMELINE_ID,
12556 1 : kind: BranchPoint,
12557 1 : },
12558 1 : SegmentMeta {
12559 1 : segment: Segment {
12560 1 : parent: None,
12561 1 : lsn: 0x40,
12562 1 : size: None,
12563 1 : needed: false,
12564 1 : },
12565 1 : timeline_id: TIMELINE_ID,
12566 1 : kind: BranchPoint,
12567 1 : },
12568 1 : SegmentMeta {
12569 1 : segment: Segment {
12570 1 : parent: None,
12571 1 : lsn: 0x40, // Branch end LSN == last branch point LSN
12572 1 : size: None,
12573 1 : needed: true,
12574 1 : },
12575 1 : timeline_id: TIMELINE_ID,
12576 1 : kind: BranchEnd,
12577 1 : },
12578 1 : ]
12579 1 : );
12580 1 : Ok(())
12581 1 : }
12582 : }
|