Line data Source code
1 : //!
2 : //! Timeline repository implementation that keeps old data in files on disk, and
3 : //! the recent changes in memory. See tenant/*_layer.rs files.
4 : //! The functions here are responsible for locating the correct layer for the
5 : //! get/put call, walking back the timeline branching history as needed.
6 : //!
7 : //! The files are stored in the .neon/tenants/<tenant_id>/timelines/<timeline_id>
8 : //! directory. See docs/pageserver-storage.md for how the files are managed.
9 : //! In addition to the layer files, there is a metadata file in the same
10 : //! directory that contains information about the timeline, in particular its
11 : //! parent timeline, and the last LSN that has been written to disk.
12 : //!
13 :
14 : use anyhow::{bail, Context};
15 : use arc_swap::ArcSwap;
16 : use camino::Utf8Path;
17 : use camino::Utf8PathBuf;
18 : use enumset::EnumSet;
19 : use futures::stream::FuturesUnordered;
20 : use futures::FutureExt;
21 : use futures::StreamExt;
22 : use pageserver_api::models;
23 : use pageserver_api::models::AuxFilePolicy;
24 : use pageserver_api::models::TimelineState;
25 : use pageserver_api::models::TopTenantShardItem;
26 : use pageserver_api::models::WalRedoManagerStatus;
27 : use pageserver_api::shard::ShardIdentity;
28 : use pageserver_api::shard::ShardStripeSize;
29 : use pageserver_api::shard::TenantShardId;
30 : use remote_storage::DownloadError;
31 : use remote_storage::GenericRemoteStorage;
32 : use remote_storage::TimeoutOrCancel;
33 : use std::fmt;
34 : use std::time::SystemTime;
35 : use storage_broker::BrokerClientChannel;
36 : use tokio::io::BufReader;
37 : use tokio::sync::watch;
38 : use tokio::task::JoinSet;
39 : use tokio_util::sync::CancellationToken;
40 : use tracing::*;
41 : use utils::backoff;
42 : use utils::completion;
43 : use utils::crashsafe::path_with_suffix_extension;
44 : use utils::failpoint_support;
45 : use utils::fs_ext;
46 : use utils::pausable_failpoint;
47 : use utils::sync::gate::Gate;
48 : use utils::sync::gate::GateGuard;
49 : use utils::timeout::timeout_cancellable;
50 : use utils::timeout::TimeoutCancellableError;
51 : use utils::zstd::create_zst_tarball;
52 : use utils::zstd::extract_zst_tarball;
53 :
54 : use self::config::AttachedLocationConfig;
55 : use self::config::AttachmentMode;
56 : use self::config::LocationConf;
57 : use self::config::TenantConf;
58 : use self::metadata::TimelineMetadata;
59 : use self::mgr::GetActiveTenantError;
60 : use self::mgr::GetTenantError;
61 : use self::remote_timeline_client::upload::upload_index_part;
62 : use self::remote_timeline_client::RemoteTimelineClient;
63 : use self::timeline::uninit::TimelineCreateGuard;
64 : use self::timeline::uninit::TimelineExclusionError;
65 : use self::timeline::uninit::UninitializedTimeline;
66 : use self::timeline::EvictionTaskTenantState;
67 : use self::timeline::GcCutoffs;
68 : use self::timeline::TimelineResources;
69 : use self::timeline::WaitLsnError;
70 : use crate::config::PageServerConf;
71 : use crate::context::{DownloadBehavior, RequestContext};
72 : use crate::deletion_queue::DeletionQueueClient;
73 : use crate::deletion_queue::DeletionQueueError;
74 : use crate::import_datadir;
75 : use crate::is_uninit_mark;
76 : use crate::l0_flush::L0FlushGlobalState;
77 : use crate::metrics::TENANT;
78 : use crate::metrics::{
79 : remove_tenant_metrics, BROKEN_TENANTS_SET, TENANT_STATE_METRIC, TENANT_SYNTHETIC_SIZE_METRIC,
80 : };
81 : use crate::repository::GcResult;
82 : use crate::task_mgr;
83 : use crate::task_mgr::TaskKind;
84 : use crate::tenant::config::LocationMode;
85 : use crate::tenant::config::TenantConfOpt;
86 : pub use crate::tenant::remote_timeline_client::index::IndexPart;
87 : use crate::tenant::remote_timeline_client::remote_initdb_archive_path;
88 : use crate::tenant::remote_timeline_client::MaybeDeletedIndexPart;
89 : use crate::tenant::remote_timeline_client::INITDB_PATH;
90 : use crate::tenant::storage_layer::DeltaLayer;
91 : use crate::tenant::storage_layer::ImageLayer;
92 : use crate::walredo;
93 : use crate::InitializationOrder;
94 : use std::collections::hash_map::Entry;
95 : use std::collections::BTreeSet;
96 : use std::collections::HashMap;
97 : use std::collections::HashSet;
98 : use std::fmt::Debug;
99 : use std::fmt::Display;
100 : use std::fs;
101 : use std::fs::File;
102 : use std::ops::Bound::Included;
103 : use std::sync::atomic::AtomicU64;
104 : use std::sync::atomic::Ordering;
105 : use std::sync::Arc;
106 : use std::sync::Mutex;
107 : use std::time::{Duration, Instant};
108 :
109 : use crate::span;
110 : use crate::tenant::timeline::delete::DeleteTimelineFlow;
111 : use crate::tenant::timeline::uninit::cleanup_timeline_directory;
112 : use crate::virtual_file::VirtualFile;
113 : use crate::walredo::PostgresRedoManager;
114 : use crate::TEMP_FILE_SUFFIX;
115 : use once_cell::sync::Lazy;
116 : pub use pageserver_api::models::TenantState;
117 : use tokio::sync::Semaphore;
118 :
119 0 : static INIT_DB_SEMAPHORE: Lazy<Semaphore> = Lazy::new(|| Semaphore::new(8));
120 : use utils::{
121 : crashsafe,
122 : generation::Generation,
123 : id::TimelineId,
124 : lsn::{Lsn, RecordLsn},
125 : };
126 :
127 : pub mod blob_io;
128 : pub mod block_io;
129 : pub mod vectored_blob_io;
130 :
131 : pub mod disk_btree;
132 : pub(crate) mod ephemeral_file;
133 : pub mod layer_map;
134 :
135 : pub mod metadata;
136 : pub mod remote_timeline_client;
137 : pub mod storage_layer;
138 :
139 : pub mod config;
140 : pub mod mgr;
141 : pub mod secondary;
142 : pub mod tasks;
143 : pub mod upload_queue;
144 :
145 : pub(crate) mod timeline;
146 :
147 : pub mod size;
148 :
149 : pub(crate) mod throttle;
150 :
151 : pub(crate) use crate::span::debug_assert_current_span_has_tenant_and_timeline_id;
152 : pub(crate) use timeline::{LogicalSizeCalculationCause, PageReconstructError, Timeline};
153 :
154 : // re-export for use in walreceiver
155 : pub use crate::tenant::timeline::WalReceiverInfo;
156 :
157 : /// The "tenants" part of `tenants/<tenant>/timelines...`
158 : pub const TENANTS_SEGMENT_NAME: &str = "tenants";
159 :
160 : /// Parts of the `.neon/tenants/<tenant_id>/timelines/<timeline_id>` directory prefix.
161 : pub const TIMELINES_SEGMENT_NAME: &str = "timelines";
162 :
163 : /// References to shared objects that are passed into each tenant, such
164 : /// as the shared remote storage client and process initialization state.
165 : #[derive(Clone)]
166 : pub struct TenantSharedResources {
167 : pub broker_client: storage_broker::BrokerClientChannel,
168 : pub remote_storage: GenericRemoteStorage,
169 : pub deletion_queue_client: DeletionQueueClient,
170 : pub l0_flush_global_state: L0FlushGlobalState,
171 : }
172 :
173 : /// A [`Tenant`] is really an _attached_ tenant. The configuration
174 : /// for an attached tenant is a subset of the [`LocationConf`], represented
175 : /// in this struct.
176 : pub(super) struct AttachedTenantConf {
177 : tenant_conf: TenantConfOpt,
178 : location: AttachedLocationConfig,
179 : }
180 :
181 : impl AttachedTenantConf {
182 0 : fn new(tenant_conf: TenantConfOpt, location: AttachedLocationConfig) -> Self {
183 0 : Self {
184 0 : tenant_conf,
185 0 : location,
186 0 : }
187 0 : }
188 :
189 170 : fn try_from(location_conf: LocationConf) -> anyhow::Result<Self> {
190 170 : match &location_conf.mode {
191 170 : LocationMode::Attached(attach_conf) => Ok(Self {
192 170 : tenant_conf: location_conf.tenant_conf,
193 170 : location: *attach_conf,
194 170 : }),
195 : LocationMode::Secondary(_) => {
196 0 : anyhow::bail!("Attempted to construct AttachedTenantConf from a LocationConf in secondary mode")
197 : }
198 : }
199 170 : }
200 : }
201 : struct TimelinePreload {
202 : timeline_id: TimelineId,
203 : client: RemoteTimelineClient,
204 : index_part: Result<MaybeDeletedIndexPart, DownloadError>,
205 : }
206 :
207 : pub(crate) struct TenantPreload {
208 : timelines: HashMap<TimelineId, TimelinePreload>,
209 : }
210 :
211 : /// When we spawn a tenant, there is a special mode for tenant creation that
212 : /// avoids trying to read anything from remote storage.
213 : pub(crate) enum SpawnMode {
214 : /// Activate as soon as possible
215 : Eager,
216 : /// Lazy activation in the background, with the option to skip the queue if the need comes up
217 : Lazy,
218 : }
219 :
220 : ///
221 : /// Tenant consists of multiple timelines. Keep them in a hash table.
222 : ///
223 : pub struct Tenant {
224 : // Global pageserver config parameters
225 : pub conf: &'static PageServerConf,
226 :
227 : /// The value creation timestamp, used to measure activation delay, see:
228 : /// <https://github.com/neondatabase/neon/issues/4025>
229 : constructed_at: Instant,
230 :
231 : state: watch::Sender<TenantState>,
232 :
233 : // Overridden tenant-specific config parameters.
234 : // We keep TenantConfOpt sturct here to preserve the information
235 : // about parameters that are not set.
236 : // This is necessary to allow global config updates.
237 : tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
238 :
239 : tenant_shard_id: TenantShardId,
240 :
241 : // The detailed sharding information, beyond the number/count in tenant_shard_id
242 : shard_identity: ShardIdentity,
243 :
244 : /// The remote storage generation, used to protect S3 objects from split-brain.
245 : /// Does not change over the lifetime of the [`Tenant`] object.
246 : ///
247 : /// This duplicates the generation stored in LocationConf, but that structure is mutable:
248 : /// this copy enforces the invariant that generatio doesn't change during a Tenant's lifetime.
249 : generation: Generation,
250 :
251 : timelines: Mutex<HashMap<TimelineId, Arc<Timeline>>>,
252 :
253 : /// During timeline creation, we first insert the TimelineId to the
254 : /// creating map, then `timelines`, then remove it from the creating map.
255 : /// **Lock order**: if acquring both, acquire`timelines` before `timelines_creating`
256 : timelines_creating: std::sync::Mutex<HashSet<TimelineId>>,
257 :
258 : // This mutex prevents creation of new timelines during GC.
259 : // Adding yet another mutex (in addition to `timelines`) is needed because holding
260 : // `timelines` mutex during all GC iteration
261 : // may block for a long time `get_timeline`, `get_timelines_state`,... and other operations
262 : // with timelines, which in turn may cause dropping replication connection, expiration of wait_for_lsn
263 : // timeout...
264 : gc_cs: tokio::sync::Mutex<()>,
265 : walredo_mgr: Option<Arc<WalRedoManager>>,
266 :
267 : // provides access to timeline data sitting in the remote storage
268 : pub(crate) remote_storage: GenericRemoteStorage,
269 :
270 : // Access to global deletion queue for when this tenant wants to schedule a deletion
271 : deletion_queue_client: DeletionQueueClient,
272 :
273 : /// Cached logical sizes updated updated on each [`Tenant::gather_size_inputs`].
274 : cached_logical_sizes: tokio::sync::Mutex<HashMap<(TimelineId, Lsn), u64>>,
275 : cached_synthetic_tenant_size: Arc<AtomicU64>,
276 :
277 : eviction_task_tenant_state: tokio::sync::Mutex<EvictionTaskTenantState>,
278 :
279 : /// If the tenant is in Activating state, notify this to encourage it
280 : /// to proceed to Active as soon as possible, rather than waiting for lazy
281 : /// background warmup.
282 : pub(crate) activate_now_sem: tokio::sync::Semaphore,
283 :
284 : // Cancellation token fires when we have entered shutdown(). This is a parent of
285 : // Timelines' cancellation token.
286 : pub(crate) cancel: CancellationToken,
287 :
288 : // Users of the Tenant such as the page service must take this Gate to avoid
289 : // trying to use a Tenant which is shutting down.
290 : pub(crate) gate: Gate,
291 :
292 : /// Throttle applied at the top of [`Timeline::get`].
293 : /// All [`Tenant::timelines`] of a given [`Tenant`] instance share the same [`throttle::Throttle`] instance.
294 : pub(crate) timeline_get_throttle:
295 : Arc<throttle::Throttle<&'static crate::metrics::tenant_throttling::TimelineGet>>,
296 :
297 : /// An ongoing timeline detach must be checked during attempts to GC or compact a timeline.
298 : ongoing_timeline_detach: std::sync::Mutex<Option<(TimelineId, utils::completion::Barrier)>>,
299 :
300 : l0_flush_global_state: L0FlushGlobalState,
301 : }
302 :
303 : impl std::fmt::Debug for Tenant {
304 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
305 0 : write!(f, "{} ({})", self.tenant_shard_id, self.current_state())
306 0 : }
307 : }
308 :
309 : pub(crate) enum WalRedoManager {
310 : Prod(PostgresRedoManager),
311 : #[cfg(test)]
312 : Test(harness::TestRedoManager),
313 : }
314 :
315 : impl From<PostgresRedoManager> for WalRedoManager {
316 0 : fn from(mgr: PostgresRedoManager) -> Self {
317 0 : Self::Prod(mgr)
318 0 : }
319 : }
320 :
321 : #[cfg(test)]
322 : impl From<harness::TestRedoManager> for WalRedoManager {
323 162 : fn from(mgr: harness::TestRedoManager) -> Self {
324 162 : Self::Test(mgr)
325 162 : }
326 : }
327 :
328 : impl WalRedoManager {
329 6 : pub(crate) async fn shutdown(&self) {
330 6 : match self {
331 0 : Self::Prod(mgr) => mgr.shutdown().await,
332 : #[cfg(test)]
333 6 : Self::Test(_) => {
334 6 : // Not applicable to test redo manager
335 6 : }
336 : }
337 6 : }
338 :
339 0 : pub(crate) fn maybe_quiesce(&self, idle_timeout: Duration) {
340 0 : match self {
341 0 : Self::Prod(mgr) => mgr.maybe_quiesce(idle_timeout),
342 0 : #[cfg(test)]
343 0 : Self::Test(_) => {
344 0 : // Not applicable to test redo manager
345 0 : }
346 0 : }
347 0 : }
348 :
349 : /// # Cancel-Safety
350 : ///
351 : /// This method is cancellation-safe.
352 58 : pub async fn request_redo(
353 58 : &self,
354 58 : key: crate::repository::Key,
355 58 : lsn: Lsn,
356 58 : base_img: Option<(Lsn, bytes::Bytes)>,
357 58 : records: Vec<(Lsn, crate::walrecord::NeonWalRecord)>,
358 58 : pg_version: u32,
359 58 : ) -> Result<bytes::Bytes, walredo::Error> {
360 58 : match self {
361 0 : Self::Prod(mgr) => {
362 0 : mgr.request_redo(key, lsn, base_img, records, pg_version)
363 0 : .await
364 : }
365 : #[cfg(test)]
366 58 : Self::Test(mgr) => {
367 58 : mgr.request_redo(key, lsn, base_img, records, pg_version)
368 0 : .await
369 : }
370 : }
371 58 : }
372 :
373 0 : pub(crate) fn status(&self) -> Option<WalRedoManagerStatus> {
374 0 : match self {
375 0 : WalRedoManager::Prod(m) => Some(m.status()),
376 0 : #[cfg(test)]
377 0 : WalRedoManager::Test(_) => None,
378 0 : }
379 0 : }
380 : }
381 :
382 0 : #[derive(Debug, thiserror::Error, PartialEq, Eq)]
383 : pub enum GetTimelineError {
384 : #[error("Timeline {tenant_id}/{timeline_id} is not active, state: {state:?}")]
385 : NotActive {
386 : tenant_id: TenantShardId,
387 : timeline_id: TimelineId,
388 : state: TimelineState,
389 : },
390 : #[error("Timeline {tenant_id}/{timeline_id} was not found")]
391 : NotFound {
392 : tenant_id: TenantShardId,
393 : timeline_id: TimelineId,
394 : },
395 : }
396 :
397 0 : #[derive(Debug, thiserror::Error)]
398 : pub enum LoadLocalTimelineError {
399 : #[error("FailedToLoad")]
400 : Load(#[source] anyhow::Error),
401 : #[error("FailedToResumeDeletion")]
402 : ResumeDeletion(#[source] anyhow::Error),
403 : }
404 :
405 0 : #[derive(thiserror::Error)]
406 : pub enum DeleteTimelineError {
407 : #[error("NotFound")]
408 : NotFound,
409 :
410 : #[error("HasChildren")]
411 : HasChildren(Vec<TimelineId>),
412 :
413 : #[error("Timeline deletion is already in progress")]
414 : AlreadyInProgress(Arc<tokio::sync::Mutex<DeleteTimelineFlow>>),
415 :
416 : #[error(transparent)]
417 : Other(#[from] anyhow::Error),
418 : }
419 :
420 : impl Debug for DeleteTimelineError {
421 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
422 0 : match self {
423 0 : Self::NotFound => write!(f, "NotFound"),
424 0 : Self::HasChildren(c) => f.debug_tuple("HasChildren").field(c).finish(),
425 0 : Self::AlreadyInProgress(_) => f.debug_tuple("AlreadyInProgress").finish(),
426 0 : Self::Other(e) => f.debug_tuple("Other").field(e).finish(),
427 : }
428 0 : }
429 : }
430 :
431 : pub enum SetStoppingError {
432 : AlreadyStopping(completion::Barrier),
433 : Broken,
434 : }
435 :
436 : impl Debug for SetStoppingError {
437 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
438 0 : match self {
439 0 : Self::AlreadyStopping(_) => f.debug_tuple("AlreadyStopping").finish(),
440 0 : Self::Broken => write!(f, "Broken"),
441 : }
442 0 : }
443 : }
444 :
445 0 : #[derive(thiserror::Error, Debug)]
446 : pub enum CreateTimelineError {
447 : #[error("creation of timeline with the given ID is in progress")]
448 : AlreadyCreating,
449 : #[error("timeline already exists with different parameters")]
450 : Conflict,
451 : #[error(transparent)]
452 : AncestorLsn(anyhow::Error),
453 : #[error("ancestor timeline is not active")]
454 : AncestorNotActive,
455 : #[error("tenant shutting down")]
456 : ShuttingDown,
457 : #[error(transparent)]
458 : Other(#[from] anyhow::Error),
459 : }
460 :
461 : #[derive(thiserror::Error, Debug)]
462 : enum InitdbError {
463 : Other(anyhow::Error),
464 : Cancelled,
465 : Spawn(std::io::Result<()>),
466 : Failed(std::process::ExitStatus, Vec<u8>),
467 : }
468 :
469 : impl fmt::Display for InitdbError {
470 0 : fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
471 0 : match self {
472 0 : InitdbError::Cancelled => write!(f, "Operation was cancelled"),
473 0 : InitdbError::Spawn(e) => write!(f, "Spawn error: {:?}", e),
474 0 : InitdbError::Failed(status, stderr) => write!(
475 0 : f,
476 0 : "Command failed with status {:?}: {}",
477 0 : status,
478 0 : String::from_utf8_lossy(stderr)
479 0 : ),
480 0 : InitdbError::Other(e) => write!(f, "Error: {:?}", e),
481 : }
482 0 : }
483 : }
484 :
485 : impl From<std::io::Error> for InitdbError {
486 0 : fn from(error: std::io::Error) -> Self {
487 0 : InitdbError::Spawn(Err(error))
488 0 : }
489 : }
490 :
491 : enum CreateTimelineCause {
492 : Load,
493 : Delete,
494 : }
495 :
496 0 : #[derive(thiserror::Error, Debug)]
497 : pub(crate) enum GcError {
498 : // The tenant is shutting down
499 : #[error("tenant shutting down")]
500 : TenantCancelled,
501 :
502 : // The tenant is shutting down
503 : #[error("timeline shutting down")]
504 : TimelineCancelled,
505 :
506 : // The tenant is in a state inelegible to run GC
507 : #[error("not active")]
508 : NotActive,
509 :
510 : // A requested GC cutoff LSN was invalid, for example it tried to move backwards
511 : #[error("not active")]
512 : BadLsn { why: String },
513 :
514 : // A remote storage error while scheduling updates after compaction
515 : #[error(transparent)]
516 : Remote(anyhow::Error),
517 :
518 : // An error reading while calculating GC cutoffs
519 : #[error(transparent)]
520 : GcCutoffs(PageReconstructError),
521 :
522 : // If GC was invoked for a particular timeline, this error means it didn't exist
523 : #[error("timeline not found")]
524 : TimelineNotFound,
525 : }
526 :
527 : impl From<PageReconstructError> for GcError {
528 0 : fn from(value: PageReconstructError) -> Self {
529 0 : match value {
530 0 : PageReconstructError::Cancelled => Self::TimelineCancelled,
531 0 : other => Self::GcCutoffs(other),
532 : }
533 0 : }
534 : }
535 :
536 0 : #[derive(thiserror::Error, Debug)]
537 : pub(crate) enum LoadConfigError {
538 : #[error("TOML deserialization error: '{0}'")]
539 : DeserializeToml(#[from] toml_edit::de::Error),
540 :
541 : #[error("Config not found at {0}")]
542 : NotFound(Utf8PathBuf),
543 : }
544 :
545 : impl Tenant {
546 : /// Yet another helper for timeline initialization.
547 : ///
548 : /// - Initializes the Timeline struct and inserts it into the tenant's hash map
549 : /// - Scans the local timeline directory for layer files and builds the layer map
550 : /// - Downloads remote index file and adds remote files to the layer map
551 : /// - Schedules remote upload tasks for any files that are present locally but missing from remote storage.
552 : ///
553 : /// If the operation fails, the timeline is left in the tenant's hash map in Broken state. On success,
554 : /// it is marked as Active.
555 : #[allow(clippy::too_many_arguments)]
556 6 : async fn timeline_init_and_sync(
557 6 : &self,
558 6 : timeline_id: TimelineId,
559 6 : resources: TimelineResources,
560 6 : index_part: Option<IndexPart>,
561 6 : metadata: TimelineMetadata,
562 6 : ancestor: Option<Arc<Timeline>>,
563 6 : last_aux_file_policy: Option<AuxFilePolicy>,
564 6 : _ctx: &RequestContext,
565 6 : ) -> anyhow::Result<()> {
566 6 : let tenant_id = self.tenant_shard_id;
567 :
568 6 : let timeline = self.create_timeline_struct(
569 6 : timeline_id,
570 6 : &metadata,
571 6 : ancestor.clone(),
572 6 : resources,
573 6 : CreateTimelineCause::Load,
574 6 : // This could be derived from ancestor branch + index part. Though the only caller of `timeline_init_and_sync` is `load_remote_timeline`,
575 6 : // there will potentially be other caller of this function in the future, and we don't know whether `index_part` or `ancestor` takes precedence.
576 6 : // Therefore, we pass this field explicitly for now, and remove it once we fully migrate to aux file v2.
577 6 : last_aux_file_policy,
578 6 : )?;
579 6 : let disk_consistent_lsn = timeline.get_disk_consistent_lsn();
580 6 : anyhow::ensure!(
581 6 : disk_consistent_lsn.is_valid(),
582 0 : "Timeline {tenant_id}/{timeline_id} has invalid disk_consistent_lsn"
583 : );
584 6 : assert_eq!(
585 6 : disk_consistent_lsn,
586 6 : metadata.disk_consistent_lsn(),
587 0 : "these are used interchangeably"
588 : );
589 :
590 6 : if let Some(index_part) = index_part.as_ref() {
591 6 : timeline.remote_client.init_upload_queue(index_part)?;
592 :
593 6 : timeline
594 6 : .last_aux_file_policy
595 6 : .store(index_part.last_aux_file_policy());
596 : } else {
597 : // No data on the remote storage, but we have local metadata file. We can end up
598 : // here with timeline_create being interrupted before finishing index part upload.
599 : // By doing what we do here, the index part upload is retried.
600 : // If control plane retries timeline creation in the meantime, the mgmt API handler
601 : // for timeline creation will coalesce on the upload we queue here.
602 :
603 : // FIXME: this branch should be dead code as we no longer write local metadata.
604 :
605 0 : timeline
606 0 : .remote_client
607 0 : .init_upload_queue_for_empty_remote(&metadata)?;
608 0 : timeline
609 0 : .remote_client
610 0 : .schedule_index_upload_for_full_metadata_update(&metadata)?;
611 : }
612 :
613 6 : timeline
614 6 : .load_layer_map(disk_consistent_lsn, index_part)
615 6 : .await
616 6 : .with_context(|| {
617 0 : format!("Failed to load layermap for timeline {tenant_id}/{timeline_id}")
618 6 : })?;
619 :
620 : {
621 : // avoiding holding it across awaits
622 6 : let mut timelines_accessor = self.timelines.lock().unwrap();
623 6 : match timelines_accessor.entry(timeline_id) {
624 : // We should never try and load the same timeline twice during startup
625 : Entry::Occupied(_) => {
626 0 : unreachable!(
627 0 : "Timeline {tenant_id}/{timeline_id} already exists in the tenant map"
628 0 : );
629 : }
630 6 : Entry::Vacant(v) => {
631 6 : v.insert(Arc::clone(&timeline));
632 6 : timeline.maybe_spawn_flush_loop();
633 6 : }
634 6 : }
635 6 : };
636 6 :
637 6 : // Sanity check: a timeline should have some content.
638 6 : anyhow::ensure!(
639 6 : ancestor.is_some()
640 4 : || timeline
641 4 : .layers
642 4 : .read()
643 0 : .await
644 4 : .layer_map()
645 4 : .iter_historic_layers()
646 4 : .next()
647 4 : .is_some(),
648 0 : "Timeline has no ancestor and no layer files"
649 : );
650 :
651 6 : Ok(())
652 6 : }
653 :
654 : /// Attach a tenant that's available in cloud storage.
655 : ///
656 : /// This returns quickly, after just creating the in-memory object
657 : /// Tenant struct and launching a background task to download
658 : /// the remote index files. On return, the tenant is most likely still in
659 : /// Attaching state, and it will become Active once the background task
660 : /// finishes. You can use wait_until_active() to wait for the task to
661 : /// complete.
662 : ///
663 : #[allow(clippy::too_many_arguments)]
664 0 : pub(crate) fn spawn(
665 0 : conf: &'static PageServerConf,
666 0 : tenant_shard_id: TenantShardId,
667 0 : resources: TenantSharedResources,
668 0 : attached_conf: AttachedTenantConf,
669 0 : shard_identity: ShardIdentity,
670 0 : init_order: Option<InitializationOrder>,
671 0 : mode: SpawnMode,
672 0 : ctx: &RequestContext,
673 0 : ) -> Arc<Tenant> {
674 0 : let wal_redo_manager = Arc::new(WalRedoManager::from(PostgresRedoManager::new(
675 0 : conf,
676 0 : tenant_shard_id,
677 0 : )));
678 0 :
679 0 : let TenantSharedResources {
680 0 : broker_client,
681 0 : remote_storage,
682 0 : deletion_queue_client,
683 0 : l0_flush_global_state,
684 0 : } = resources;
685 0 :
686 0 : let attach_mode = attached_conf.location.attach_mode;
687 0 : let generation = attached_conf.location.generation;
688 0 :
689 0 : let tenant = Arc::new(Tenant::new(
690 0 : TenantState::Attaching,
691 0 : conf,
692 0 : attached_conf,
693 0 : shard_identity,
694 0 : Some(wal_redo_manager),
695 0 : tenant_shard_id,
696 0 : remote_storage.clone(),
697 0 : deletion_queue_client,
698 0 : l0_flush_global_state,
699 0 : ));
700 0 :
701 0 : // The attach task will carry a GateGuard, so that shutdown() reliably waits for it to drop out if
702 0 : // we shut down while attaching.
703 0 : let attach_gate_guard = tenant
704 0 : .gate
705 0 : .enter()
706 0 : .expect("We just created the Tenant: nothing else can have shut it down yet");
707 0 :
708 0 : // Do all the hard work in the background
709 0 : let tenant_clone = Arc::clone(&tenant);
710 0 : let ctx = ctx.detached_child(TaskKind::Attach, DownloadBehavior::Warn);
711 0 : task_mgr::spawn(
712 0 : &tokio::runtime::Handle::current(),
713 0 : TaskKind::Attach,
714 0 : Some(tenant_shard_id),
715 0 : None,
716 0 : "attach tenant",
717 : false,
718 0 : async move {
719 0 :
720 0 : info!(
721 : ?attach_mode,
722 0 : "Attaching tenant"
723 : );
724 :
725 0 : let _gate_guard = attach_gate_guard;
726 0 :
727 0 : // Is this tenant being spawned as part of process startup?
728 0 : let starting_up = init_order.is_some();
729 : scopeguard::defer! {
730 : if starting_up {
731 : TENANT.startup_complete.inc();
732 : }
733 : }
734 :
735 : // Ideally we should use Tenant::set_broken_no_wait, but it is not supposed to be used when tenant is in loading state.
736 : enum BrokenVerbosity {
737 : Error,
738 : Info
739 : }
740 0 : let make_broken =
741 0 : |t: &Tenant, err: anyhow::Error, verbosity: BrokenVerbosity| {
742 0 : match verbosity {
743 : BrokenVerbosity::Info => {
744 0 : info!("attach cancelled, setting tenant state to Broken: {err}");
745 : },
746 : BrokenVerbosity::Error => {
747 0 : error!("attach failed, setting tenant state to Broken: {err:?}");
748 : }
749 : }
750 0 : t.state.send_modify(|state| {
751 0 : // The Stopping case is for when we have passed control on to DeleteTenantFlow:
752 0 : // if it errors, we will call make_broken when tenant is already in Stopping.
753 0 : assert!(
754 0 : matches!(*state, TenantState::Attaching | TenantState::Stopping { .. }),
755 0 : "the attach task owns the tenant state until activation is complete"
756 : );
757 :
758 0 : *state = TenantState::broken_from_reason(err.to_string());
759 0 : });
760 0 : };
761 :
762 0 : let mut init_order = init_order;
763 0 : // take the completion because initial tenant loading will complete when all of
764 0 : // these tasks complete.
765 0 : let _completion = init_order
766 0 : .as_mut()
767 0 : .and_then(|x| x.initial_tenant_load.take());
768 0 : let remote_load_completion = init_order
769 0 : .as_mut()
770 0 : .and_then(|x| x.initial_tenant_load_remote.take());
771 :
772 : enum AttachType<'a> {
773 : /// We are attaching this tenant lazily in the background.
774 : Warmup {
775 : _permit: tokio::sync::SemaphorePermit<'a>,
776 : during_startup: bool
777 : },
778 : /// We are attaching this tenant as soon as we can, because for example an
779 : /// endpoint tried to access it.
780 : OnDemand,
781 : /// During normal operations after startup, we are attaching a tenant, and
782 : /// eager attach was requested.
783 : Normal,
784 : }
785 :
786 0 : let attach_type = if matches!(mode, SpawnMode::Lazy) {
787 : // Before doing any I/O, wait for at least one of:
788 : // - A client attempting to access to this tenant (on-demand loading)
789 : // - A permit becoming available in the warmup semaphore (background warmup)
790 :
791 : tokio::select!(
792 : permit = tenant_clone.activate_now_sem.acquire() => {
793 : let _ = permit.expect("activate_now_sem is never closed");
794 : tracing::info!("Activating tenant (on-demand)");
795 : AttachType::OnDemand
796 : },
797 : permit = conf.concurrent_tenant_warmup.inner().acquire() => {
798 : let _permit = permit.expect("concurrent_tenant_warmup semaphore is never closed");
799 : tracing::info!("Activating tenant (warmup)");
800 : AttachType::Warmup {
801 : _permit,
802 : during_startup: init_order.is_some()
803 : }
804 : }
805 : _ = tenant_clone.cancel.cancelled() => {
806 : // This is safe, but should be pretty rare: it is interesting if a tenant
807 : // stayed in Activating for such a long time that shutdown found it in
808 : // that state.
809 : tracing::info!(state=%tenant_clone.current_state(), "Tenant shut down before activation");
810 : // Make the tenant broken so that set_stopping will not hang waiting for it to leave
811 : // the Attaching state. This is an over-reaction (nothing really broke, the tenant is
812 : // just shutting down), but ensures progress.
813 : make_broken(&tenant_clone, anyhow::anyhow!("Shut down while Attaching"), BrokenVerbosity::Info);
814 : return Ok(());
815 : },
816 : )
817 : } else {
818 : // SpawnMode::{Create,Eager} always cause jumping ahead of the
819 : // concurrent_tenant_warmup queue
820 0 : AttachType::Normal
821 : };
822 :
823 0 : let preload = match &mode {
824 : SpawnMode::Eager | SpawnMode::Lazy => {
825 0 : let _preload_timer = TENANT.preload.start_timer();
826 0 : let res = tenant_clone
827 0 : .preload(&remote_storage, task_mgr::shutdown_token())
828 0 : .await;
829 0 : match res {
830 0 : Ok(p) => Some(p),
831 0 : Err(e) => {
832 0 : make_broken(&tenant_clone, anyhow::anyhow!(e), BrokenVerbosity::Error);
833 0 : return Ok(());
834 : }
835 : }
836 : }
837 :
838 : };
839 :
840 : // Remote preload is complete.
841 0 : drop(remote_load_completion);
842 :
843 : // We will time the duration of the attach phase unless this is a creation (attach will do no work)
844 0 : let attached = {
845 0 : let _attach_timer = Some(TENANT.attach.start_timer());
846 0 : tenant_clone.attach(preload, &ctx).await
847 : };
848 :
849 0 : match attached {
850 : Ok(()) => {
851 0 : info!("attach finished, activating");
852 0 : tenant_clone.activate(broker_client, None, &ctx);
853 : }
854 0 : Err(e) => {
855 0 : make_broken(&tenant_clone, anyhow::anyhow!(e), BrokenVerbosity::Error);
856 0 : }
857 : }
858 :
859 : // If we are doing an opportunistic warmup attachment at startup, initialize
860 : // logical size at the same time. This is better than starting a bunch of idle tenants
861 : // with cold caches and then coming back later to initialize their logical sizes.
862 : //
863 : // It also prevents the warmup proccess competing with the concurrency limit on
864 : // logical size calculations: if logical size calculation semaphore is saturated,
865 : // then warmup will wait for that before proceeding to the next tenant.
866 0 : if matches!(attach_type, AttachType::Warmup { during_startup: true, .. }) {
867 0 : let mut futs: FuturesUnordered<_> = tenant_clone.timelines.lock().unwrap().values().cloned().map(|t| t.await_initial_logical_size()).collect();
868 0 : tracing::info!("Waiting for initial logical sizes while warming up...");
869 0 : while futs.next().await.is_some() {}
870 0 : tracing::info!("Warm-up complete");
871 0 : }
872 :
873 0 : Ok(())
874 0 : }
875 0 : .instrument(tracing::info_span!(parent: None, "attach", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), gen=?generation)),
876 : );
877 0 : tenant
878 0 : }
879 :
880 324 : #[instrument(skip_all)]
881 : pub(crate) async fn preload(
882 : self: &Arc<Self>,
883 : remote_storage: &GenericRemoteStorage,
884 : cancel: CancellationToken,
885 : ) -> anyhow::Result<TenantPreload> {
886 : span::debug_assert_current_span_has_tenant_id();
887 : // Get list of remote timelines
888 : // download index files for every tenant timeline
889 : info!("listing remote timelines");
890 : let (remote_timeline_ids, other_keys) = remote_timeline_client::list_remote_timelines(
891 : remote_storage,
892 : self.tenant_shard_id,
893 : cancel.clone(),
894 : )
895 : .await?;
896 :
897 : info!("found {} timelines", remote_timeline_ids.len(),);
898 :
899 : for k in other_keys {
900 : warn!("Unexpected non timeline key {k}");
901 : }
902 :
903 : Ok(TenantPreload {
904 : timelines: Self::load_timeline_metadata(
905 : self,
906 : remote_timeline_ids,
907 : remote_storage,
908 : cancel,
909 : )
910 : .await?,
911 : })
912 : }
913 :
914 : ///
915 : /// Background task that downloads all data for a tenant and brings it to Active state.
916 : ///
917 : /// No background tasks are started as part of this routine.
918 : ///
919 162 : async fn attach(
920 162 : self: &Arc<Tenant>,
921 162 : preload: Option<TenantPreload>,
922 162 : ctx: &RequestContext,
923 162 : ) -> anyhow::Result<()> {
924 162 : span::debug_assert_current_span_has_tenant_id();
925 162 :
926 162 : failpoint_support::sleep_millis_async!("before-attaching-tenant");
927 :
928 162 : let Some(preload) = preload else {
929 0 : anyhow::bail!("local-only deployment is no longer supported, https://github.com/neondatabase/neon/issues/5624");
930 : };
931 :
932 162 : let mut timelines_to_resume_deletions = vec![];
933 162 :
934 162 : let mut remote_index_and_client = HashMap::new();
935 162 : let mut timeline_ancestors = HashMap::new();
936 162 : let mut existent_timelines = HashSet::new();
937 168 : for (timeline_id, preload) in preload.timelines {
938 6 : let index_part = match preload.index_part {
939 6 : Ok(i) => {
940 6 : debug!("remote index part exists for timeline {timeline_id}");
941 : // We found index_part on the remote, this is the standard case.
942 6 : existent_timelines.insert(timeline_id);
943 6 : i
944 : }
945 : Err(DownloadError::NotFound) => {
946 : // There is no index_part on the remote. We only get here
947 : // if there is some prefix for the timeline in the remote storage.
948 : // This can e.g. be the initdb.tar.zst archive, maybe a
949 : // remnant from a prior incomplete creation or deletion attempt.
950 : // Delete the local directory as the deciding criterion for a
951 : // timeline's existence is presence of index_part.
952 0 : info!(%timeline_id, "index_part not found on remote");
953 0 : continue;
954 : }
955 0 : Err(e) => {
956 0 : // Some (possibly ephemeral) error happened during index_part download.
957 0 : // Pretend the timeline exists to not delete the timeline directory,
958 0 : // as it might be a temporary issue and we don't want to re-download
959 0 : // everything after it resolves.
960 0 : warn!(%timeline_id, "Failed to load index_part from remote storage, failed creation? ({e})");
961 :
962 0 : existent_timelines.insert(timeline_id);
963 0 : continue;
964 : }
965 : };
966 6 : match index_part {
967 6 : MaybeDeletedIndexPart::IndexPart(index_part) => {
968 6 : timeline_ancestors.insert(timeline_id, index_part.metadata.clone());
969 6 : remote_index_and_client.insert(timeline_id, (index_part, preload.client));
970 6 : }
971 0 : MaybeDeletedIndexPart::Deleted(index_part) => {
972 0 : info!(
973 0 : "timeline {} is deleted, picking to resume deletion",
974 : timeline_id
975 : );
976 0 : timelines_to_resume_deletions.push((timeline_id, index_part, preload.client));
977 : }
978 : }
979 : }
980 :
981 : // For every timeline, download the metadata file, scan the local directory,
982 : // and build a layer map that contains an entry for each remote and local
983 : // layer file.
984 162 : let sorted_timelines = tree_sort_timelines(timeline_ancestors, |m| m.ancestor_timeline())?;
985 168 : for (timeline_id, remote_metadata) in sorted_timelines {
986 6 : let (index_part, remote_client) = remote_index_and_client
987 6 : .remove(&timeline_id)
988 6 : .expect("just put it in above");
989 6 :
990 6 : // TODO again handle early failure
991 6 : self.load_remote_timeline(
992 6 : timeline_id,
993 6 : index_part,
994 6 : remote_metadata,
995 6 : TimelineResources {
996 6 : remote_client,
997 6 : timeline_get_throttle: self.timeline_get_throttle.clone(),
998 6 : l0_flush_global_state: self.l0_flush_global_state.clone(),
999 6 : },
1000 6 : ctx,
1001 6 : )
1002 12 : .await
1003 6 : .with_context(|| {
1004 0 : format!(
1005 0 : "failed to load remote timeline {} for tenant {}",
1006 0 : timeline_id, self.tenant_shard_id
1007 0 : )
1008 6 : })?;
1009 : }
1010 :
1011 : // Walk through deleted timelines, resume deletion
1012 162 : for (timeline_id, index_part, remote_timeline_client) in timelines_to_resume_deletions {
1013 0 : remote_timeline_client
1014 0 : .init_upload_queue_stopped_to_continue_deletion(&index_part)
1015 0 : .context("init queue stopped")
1016 0 : .map_err(LoadLocalTimelineError::ResumeDeletion)?;
1017 :
1018 0 : DeleteTimelineFlow::resume_deletion(
1019 0 : Arc::clone(self),
1020 0 : timeline_id,
1021 0 : &index_part.metadata,
1022 0 : remote_timeline_client,
1023 0 : )
1024 0 : .instrument(tracing::info_span!("timeline_delete", %timeline_id))
1025 0 : .await
1026 0 : .context("resume_deletion")
1027 0 : .map_err(LoadLocalTimelineError::ResumeDeletion)?;
1028 : }
1029 :
1030 : // The local filesystem contents are a cache of what's in the remote IndexPart;
1031 : // IndexPart is the source of truth.
1032 162 : self.clean_up_timelines(&existent_timelines)?;
1033 :
1034 162 : fail::fail_point!("attach-before-activate", |_| {
1035 0 : anyhow::bail!("attach-before-activate");
1036 162 : });
1037 162 : failpoint_support::sleep_millis_async!("attach-before-activate-sleep", &self.cancel);
1038 :
1039 162 : info!("Done");
1040 :
1041 162 : Ok(())
1042 162 : }
1043 :
1044 : /// Check for any local timeline directories that are temporary, or do not correspond to a
1045 : /// timeline that still exists: this can happen if we crashed during a deletion/creation, or
1046 : /// if a timeline was deleted while the tenant was attached to a different pageserver.
1047 162 : fn clean_up_timelines(&self, existent_timelines: &HashSet<TimelineId>) -> anyhow::Result<()> {
1048 162 : let timelines_dir = self.conf.timelines_path(&self.tenant_shard_id);
1049 :
1050 162 : let entries = match timelines_dir.read_dir_utf8() {
1051 162 : Ok(d) => d,
1052 0 : Err(e) => {
1053 0 : if e.kind() == std::io::ErrorKind::NotFound {
1054 0 : return Ok(());
1055 : } else {
1056 0 : return Err(e).context("list timelines directory for tenant");
1057 : }
1058 : }
1059 : };
1060 :
1061 170 : for entry in entries {
1062 8 : let entry = entry.context("read timeline dir entry")?;
1063 8 : let entry_path = entry.path();
1064 :
1065 8 : let purge = if crate::is_temporary(entry_path)
1066 : // TODO: remove uninit mark code (https://github.com/neondatabase/neon/issues/5718)
1067 8 : || is_uninit_mark(entry_path)
1068 8 : || crate::is_delete_mark(entry_path)
1069 : {
1070 0 : true
1071 : } else {
1072 8 : match TimelineId::try_from(entry_path.file_name()) {
1073 8 : Ok(i) => {
1074 8 : // Purge if the timeline ID does not exist in remote storage: remote storage is the authority.
1075 8 : !existent_timelines.contains(&i)
1076 : }
1077 0 : Err(e) => {
1078 0 : tracing::warn!(
1079 0 : "Unparseable directory in timelines directory: {entry_path}, ignoring ({e})"
1080 : );
1081 : // Do not purge junk: if we don't recognize it, be cautious and leave it for a human.
1082 0 : false
1083 : }
1084 : }
1085 : };
1086 :
1087 8 : if purge {
1088 2 : tracing::info!("Purging stale timeline dentry {entry_path}");
1089 2 : if let Err(e) = match entry.file_type() {
1090 2 : Ok(t) => if t.is_dir() {
1091 2 : std::fs::remove_dir_all(entry_path)
1092 : } else {
1093 0 : std::fs::remove_file(entry_path)
1094 : }
1095 2 : .or_else(fs_ext::ignore_not_found),
1096 0 : Err(e) => Err(e),
1097 : } {
1098 0 : tracing::warn!("Failed to purge stale timeline dentry {entry_path}: {e}");
1099 2 : }
1100 6 : }
1101 : }
1102 :
1103 162 : Ok(())
1104 162 : }
1105 :
1106 : /// Get sum of all remote timelines sizes
1107 : ///
1108 : /// This function relies on the index_part instead of listing the remote storage
1109 0 : pub fn remote_size(&self) -> u64 {
1110 0 : let mut size = 0;
1111 :
1112 0 : for timeline in self.list_timelines() {
1113 0 : size += timeline.remote_client.get_remote_physical_size();
1114 0 : }
1115 :
1116 0 : size
1117 0 : }
1118 :
1119 12 : #[instrument(skip_all, fields(timeline_id=%timeline_id))]
1120 : async fn load_remote_timeline(
1121 : &self,
1122 : timeline_id: TimelineId,
1123 : index_part: IndexPart,
1124 : remote_metadata: TimelineMetadata,
1125 : resources: TimelineResources,
1126 : ctx: &RequestContext,
1127 : ) -> anyhow::Result<()> {
1128 : span::debug_assert_current_span_has_tenant_id();
1129 :
1130 : info!("downloading index file for timeline {}", timeline_id);
1131 : tokio::fs::create_dir_all(self.conf.timeline_path(&self.tenant_shard_id, &timeline_id))
1132 : .await
1133 : .context("Failed to create new timeline directory")?;
1134 :
1135 : let ancestor = if let Some(ancestor_id) = remote_metadata.ancestor_timeline() {
1136 : let timelines = self.timelines.lock().unwrap();
1137 : Some(Arc::clone(timelines.get(&ancestor_id).ok_or_else(
1138 0 : || {
1139 0 : anyhow::anyhow!(
1140 0 : "cannot find ancestor timeline {ancestor_id} for timeline {timeline_id}"
1141 0 : )
1142 0 : },
1143 : )?))
1144 : } else {
1145 : None
1146 : };
1147 :
1148 : let last_aux_file_policy = index_part.last_aux_file_policy();
1149 :
1150 : self.timeline_init_and_sync(
1151 : timeline_id,
1152 : resources,
1153 : Some(index_part),
1154 : remote_metadata,
1155 : ancestor,
1156 : last_aux_file_policy,
1157 : ctx,
1158 : )
1159 : .await
1160 : }
1161 :
1162 162 : async fn load_timeline_metadata(
1163 162 : self: &Arc<Tenant>,
1164 162 : timeline_ids: HashSet<TimelineId>,
1165 162 : remote_storage: &GenericRemoteStorage,
1166 162 : cancel: CancellationToken,
1167 162 : ) -> anyhow::Result<HashMap<TimelineId, TimelinePreload>> {
1168 162 : let mut part_downloads = JoinSet::new();
1169 168 : for timeline_id in timeline_ids {
1170 6 : let client = RemoteTimelineClient::new(
1171 6 : remote_storage.clone(),
1172 6 : self.deletion_queue_client.clone(),
1173 6 : self.conf,
1174 6 : self.tenant_shard_id,
1175 6 : timeline_id,
1176 6 : self.generation,
1177 6 : );
1178 6 : let cancel_clone = cancel.clone();
1179 6 : part_downloads.spawn(
1180 6 : async move {
1181 6 : debug!("starting index part download");
1182 :
1183 23 : let index_part = client.download_index_file(&cancel_clone).await;
1184 :
1185 6 : debug!("finished index part download");
1186 :
1187 6 : Result::<_, anyhow::Error>::Ok(TimelinePreload {
1188 6 : client,
1189 6 : timeline_id,
1190 6 : index_part,
1191 6 : })
1192 6 : }
1193 6 : .map(move |res| {
1194 6 : res.with_context(|| format!("download index part for timeline {timeline_id}"))
1195 6 : })
1196 6 : .instrument(info_span!("download_index_part", %timeline_id)),
1197 : );
1198 : }
1199 :
1200 162 : let mut timeline_preloads: HashMap<TimelineId, TimelinePreload> = HashMap::new();
1201 :
1202 168 : loop {
1203 168 : tokio::select!(
1204 : next = part_downloads.join_next() => {
1205 : match next {
1206 : Some(result) => {
1207 : let preload_result = result.context("join preload task")?;
1208 : let preload = preload_result?;
1209 : timeline_preloads.insert(preload.timeline_id, preload);
1210 : },
1211 : None => {
1212 : break;
1213 : }
1214 : }
1215 : },
1216 : _ = cancel.cancelled() => {
1217 : anyhow::bail!("Cancelled while waiting for remote index download")
1218 : }
1219 168 : )
1220 168 : }
1221 :
1222 162 : Ok(timeline_preloads)
1223 162 : }
1224 :
1225 4 : pub(crate) fn tenant_shard_id(&self) -> TenantShardId {
1226 4 : self.tenant_shard_id
1227 4 : }
1228 :
1229 : /// Get Timeline handle for given Neon timeline ID.
1230 : /// This function is idempotent. It doesn't change internal state in any way.
1231 222 : pub fn get_timeline(
1232 222 : &self,
1233 222 : timeline_id: TimelineId,
1234 222 : active_only: bool,
1235 222 : ) -> Result<Arc<Timeline>, GetTimelineError> {
1236 222 : let timelines_accessor = self.timelines.lock().unwrap();
1237 222 : let timeline = timelines_accessor
1238 222 : .get(&timeline_id)
1239 222 : .ok_or(GetTimelineError::NotFound {
1240 222 : tenant_id: self.tenant_shard_id,
1241 222 : timeline_id,
1242 222 : })?;
1243 :
1244 220 : if active_only && !timeline.is_active() {
1245 0 : Err(GetTimelineError::NotActive {
1246 0 : tenant_id: self.tenant_shard_id,
1247 0 : timeline_id,
1248 0 : state: timeline.current_state(),
1249 0 : })
1250 : } else {
1251 220 : Ok(Arc::clone(timeline))
1252 : }
1253 222 : }
1254 :
1255 : /// Lists timelines the tenant contains.
1256 : /// Up to tenant's implementation to omit certain timelines that ar not considered ready for use.
1257 8 : pub fn list_timelines(&self) -> Vec<Arc<Timeline>> {
1258 8 : self.timelines
1259 8 : .lock()
1260 8 : .unwrap()
1261 8 : .values()
1262 8 : .map(Arc::clone)
1263 8 : .collect()
1264 8 : }
1265 :
1266 0 : pub fn list_timeline_ids(&self) -> Vec<TimelineId> {
1267 0 : self.timelines.lock().unwrap().keys().cloned().collect()
1268 0 : }
1269 :
1270 : /// This is used to create the initial 'main' timeline during bootstrapping,
1271 : /// or when importing a new base backup. The caller is expected to load an
1272 : /// initial image of the datadir to the new timeline after this.
1273 : ///
1274 : /// Until that happens, the on-disk state is invalid (disk_consistent_lsn=Lsn(0))
1275 : /// and the timeline will fail to load at a restart.
1276 : ///
1277 : /// For tests, use `DatadirModification::init_empty_test_timeline` + `commit` to setup the
1278 : /// minimum amount of keys required to get a writable timeline.
1279 : /// (Without it, `put` might fail due to `repartition` failing.)
1280 154 : pub(crate) async fn create_empty_timeline(
1281 154 : &self,
1282 154 : new_timeline_id: TimelineId,
1283 154 : initdb_lsn: Lsn,
1284 154 : pg_version: u32,
1285 154 : _ctx: &RequestContext,
1286 154 : ) -> anyhow::Result<UninitializedTimeline> {
1287 154 : anyhow::ensure!(
1288 154 : self.is_active(),
1289 0 : "Cannot create empty timelines on inactive tenant"
1290 : );
1291 :
1292 : // Protect against concurrent attempts to use this TimelineId
1293 154 : let create_guard = self.create_timeline_create_guard(new_timeline_id)?;
1294 :
1295 152 : let new_metadata = TimelineMetadata::new(
1296 152 : // Initialize disk_consistent LSN to 0, The caller must import some data to
1297 152 : // make it valid, before calling finish_creation()
1298 152 : Lsn(0),
1299 152 : None,
1300 152 : None,
1301 152 : Lsn(0),
1302 152 : initdb_lsn,
1303 152 : initdb_lsn,
1304 152 : pg_version,
1305 152 : );
1306 152 : self.prepare_new_timeline(
1307 152 : new_timeline_id,
1308 152 : &new_metadata,
1309 152 : create_guard,
1310 152 : initdb_lsn,
1311 152 : None,
1312 152 : None,
1313 152 : )
1314 0 : .await
1315 154 : }
1316 :
1317 : /// Helper for unit tests to create an empty timeline.
1318 : ///
1319 : /// The timeline is has state value `Active` but its background loops are not running.
1320 : // This makes the various functions which anyhow::ensure! for Active state work in tests.
1321 : // Our current tests don't need the background loops.
1322 : #[cfg(test)]
1323 144 : pub async fn create_test_timeline(
1324 144 : &self,
1325 144 : new_timeline_id: TimelineId,
1326 144 : initdb_lsn: Lsn,
1327 144 : pg_version: u32,
1328 144 : ctx: &RequestContext,
1329 144 : ) -> anyhow::Result<Arc<Timeline>> {
1330 144 : let uninit_tl = self
1331 144 : .create_empty_timeline(new_timeline_id, initdb_lsn, pg_version, ctx)
1332 0 : .await?;
1333 144 : let tline = uninit_tl.raw_timeline().expect("we just created it");
1334 144 : assert_eq!(tline.get_last_record_lsn(), Lsn(0));
1335 :
1336 : // Setup minimum keys required for the timeline to be usable.
1337 144 : let mut modification = tline.begin_modification(initdb_lsn);
1338 144 : modification
1339 144 : .init_empty_test_timeline()
1340 144 : .context("init_empty_test_timeline")?;
1341 144 : modification
1342 144 : .commit(ctx)
1343 138 : .await
1344 144 : .context("commit init_empty_test_timeline modification")?;
1345 :
1346 : // Flush to disk so that uninit_tl's check for valid disk_consistent_lsn passes.
1347 144 : tline.maybe_spawn_flush_loop();
1348 144 : tline.freeze_and_flush().await.context("freeze_and_flush")?;
1349 :
1350 : // Make sure the freeze_and_flush reaches remote storage.
1351 144 : tline.remote_client.wait_completion().await.unwrap();
1352 :
1353 144 : let tl = uninit_tl.finish_creation()?;
1354 : // The non-test code would call tl.activate() here.
1355 144 : tl.set_state(TimelineState::Active);
1356 144 : Ok(tl)
1357 144 : }
1358 :
1359 : /// Helper for unit tests to create a timeline with some pre-loaded states.
1360 : #[cfg(test)]
1361 : #[allow(clippy::too_many_arguments)]
1362 18 : pub async fn create_test_timeline_with_layers(
1363 18 : &self,
1364 18 : new_timeline_id: TimelineId,
1365 18 : initdb_lsn: Lsn,
1366 18 : pg_version: u32,
1367 18 : ctx: &RequestContext,
1368 18 : delta_layer_desc: Vec<Vec<(pageserver_api::key::Key, Lsn, crate::repository::Value)>>,
1369 18 : image_layer_desc: Vec<(Lsn, Vec<(pageserver_api::key::Key, bytes::Bytes)>)>,
1370 18 : end_lsn: Lsn,
1371 18 : ) -> anyhow::Result<Arc<Timeline>> {
1372 18 : let tline = self
1373 18 : .create_test_timeline(new_timeline_id, initdb_lsn, pg_version, ctx)
1374 53 : .await?;
1375 18 : tline.force_advance_lsn(end_lsn);
1376 52 : for deltas in delta_layer_desc {
1377 34 : tline
1378 34 : .force_create_delta_layer(deltas, Some(initdb_lsn), ctx)
1379 102 : .await?;
1380 : }
1381 52 : for (lsn, images) in image_layer_desc {
1382 34 : tline
1383 34 : .force_create_image_layer(lsn, images, Some(initdb_lsn), ctx)
1384 157 : .await?;
1385 : }
1386 18 : Ok(tline)
1387 18 : }
1388 :
1389 : /// Create a new timeline.
1390 : ///
1391 : /// Returns the new timeline ID and reference to its Timeline object.
1392 : ///
1393 : /// If the caller specified the timeline ID to use (`new_timeline_id`), and timeline with
1394 : /// the same timeline ID already exists, returns CreateTimelineError::AlreadyExists.
1395 : #[allow(clippy::too_many_arguments)]
1396 0 : pub(crate) async fn create_timeline(
1397 0 : self: &Arc<Tenant>,
1398 0 : new_timeline_id: TimelineId,
1399 0 : ancestor_timeline_id: Option<TimelineId>,
1400 0 : mut ancestor_start_lsn: Option<Lsn>,
1401 0 : pg_version: u32,
1402 0 : load_existing_initdb: Option<TimelineId>,
1403 0 : broker_client: storage_broker::BrokerClientChannel,
1404 0 : ctx: &RequestContext,
1405 0 : ) -> Result<Arc<Timeline>, CreateTimelineError> {
1406 0 : if !self.is_active() {
1407 0 : if matches!(self.current_state(), TenantState::Stopping { .. }) {
1408 0 : return Err(CreateTimelineError::ShuttingDown);
1409 : } else {
1410 0 : return Err(CreateTimelineError::Other(anyhow::anyhow!(
1411 0 : "Cannot create timelines on inactive tenant"
1412 0 : )));
1413 : }
1414 0 : }
1415 :
1416 0 : let _gate = self
1417 0 : .gate
1418 0 : .enter()
1419 0 : .map_err(|_| CreateTimelineError::ShuttingDown)?;
1420 :
1421 : // Get exclusive access to the timeline ID: this ensures that it does not already exist,
1422 : // and that no other creation attempts will be allowed in while we are working.
1423 0 : let create_guard = match self.create_timeline_create_guard(new_timeline_id) {
1424 0 : Ok(m) => m,
1425 : Err(TimelineExclusionError::AlreadyCreating) => {
1426 : // Creation is in progress, we cannot create it again, and we cannot
1427 : // check if this request matches the existing one, so caller must try
1428 : // again later.
1429 0 : return Err(CreateTimelineError::AlreadyCreating);
1430 : }
1431 0 : Err(TimelineExclusionError::Other(e)) => {
1432 0 : return Err(CreateTimelineError::Other(e));
1433 : }
1434 0 : Err(TimelineExclusionError::AlreadyExists(existing)) => {
1435 0 : debug!("timeline {new_timeline_id} already exists");
1436 :
1437 : // Idempotency: creating the same timeline twice is not an error, unless
1438 : // the second creation has different parameters.
1439 0 : if existing.get_ancestor_timeline_id() != ancestor_timeline_id
1440 0 : || existing.pg_version != pg_version
1441 0 : || (ancestor_start_lsn.is_some()
1442 0 : && ancestor_start_lsn != Some(existing.get_ancestor_lsn()))
1443 : {
1444 0 : return Err(CreateTimelineError::Conflict);
1445 0 : }
1446 0 :
1447 0 : // Wait for uploads to complete, so that when we return Ok, the timeline
1448 0 : // is known to be durable on remote storage. Just like we do at the end of
1449 0 : // this function, after we have created the timeline ourselves.
1450 0 : //
1451 0 : // We only really care that the initial version of `index_part.json` has
1452 0 : // been uploaded. That's enough to remember that the timeline
1453 0 : // exists. However, there is no function to wait specifically for that so
1454 0 : // we just wait for all in-progress uploads to finish.
1455 0 : existing
1456 0 : .remote_client
1457 0 : .wait_completion()
1458 0 : .await
1459 0 : .context("wait for timeline uploads to complete")?;
1460 :
1461 0 : return Ok(existing);
1462 : }
1463 : };
1464 :
1465 : pausable_failpoint!("timeline-creation-after-uninit");
1466 :
1467 0 : let loaded_timeline = match ancestor_timeline_id {
1468 0 : Some(ancestor_timeline_id) => {
1469 0 : let ancestor_timeline = self
1470 0 : .get_timeline(ancestor_timeline_id, false)
1471 0 : .context("Cannot branch off the timeline that's not present in pageserver")?;
1472 :
1473 : // instead of waiting around, just deny the request because ancestor is not yet
1474 : // ready for other purposes either.
1475 0 : if !ancestor_timeline.is_active() {
1476 0 : return Err(CreateTimelineError::AncestorNotActive);
1477 0 : }
1478 :
1479 0 : if let Some(lsn) = ancestor_start_lsn.as_mut() {
1480 0 : *lsn = lsn.align();
1481 0 :
1482 0 : let ancestor_ancestor_lsn = ancestor_timeline.get_ancestor_lsn();
1483 0 : if ancestor_ancestor_lsn > *lsn {
1484 : // can we safely just branch from the ancestor instead?
1485 0 : return Err(CreateTimelineError::AncestorLsn(anyhow::anyhow!(
1486 0 : "invalid start lsn {} for ancestor timeline {}: less than timeline ancestor lsn {}",
1487 0 : lsn,
1488 0 : ancestor_timeline_id,
1489 0 : ancestor_ancestor_lsn,
1490 0 : )));
1491 0 : }
1492 0 :
1493 0 : // Wait for the WAL to arrive and be processed on the parent branch up
1494 0 : // to the requested branch point. The repository code itself doesn't
1495 0 : // require it, but if we start to receive WAL on the new timeline,
1496 0 : // decoding the new WAL might need to look up previous pages, relation
1497 0 : // sizes etc. and that would get confused if the previous page versions
1498 0 : // are not in the repository yet.
1499 0 : ancestor_timeline
1500 0 : .wait_lsn(*lsn, timeline::WaitLsnWaiter::Tenant, ctx)
1501 0 : .await
1502 0 : .map_err(|e| match e {
1503 0 : e @ (WaitLsnError::Timeout(_) | WaitLsnError::BadState { .. }) => {
1504 0 : CreateTimelineError::AncestorLsn(anyhow::anyhow!(e))
1505 : }
1506 0 : WaitLsnError::Shutdown => CreateTimelineError::ShuttingDown,
1507 0 : })?;
1508 0 : }
1509 :
1510 0 : self.branch_timeline(
1511 0 : &ancestor_timeline,
1512 0 : new_timeline_id,
1513 0 : ancestor_start_lsn,
1514 0 : create_guard,
1515 0 : ctx,
1516 0 : )
1517 0 : .await?
1518 : }
1519 : None => {
1520 0 : self.bootstrap_timeline(
1521 0 : new_timeline_id,
1522 0 : pg_version,
1523 0 : load_existing_initdb,
1524 0 : create_guard,
1525 0 : ctx,
1526 0 : )
1527 0 : .await?
1528 : }
1529 : };
1530 :
1531 : // At this point we have dropped our guard on [`Self::timelines_creating`], and
1532 : // the timeline is visible in [`Self::timelines`], but it is _not_ durable yet. We must
1533 : // not send a success to the caller until it is. The same applies to handling retries,
1534 : // see the handling of [`TimelineExclusionError::AlreadyExists`] above.
1535 0 : let kind = ancestor_timeline_id
1536 0 : .map(|_| "branched")
1537 0 : .unwrap_or("bootstrapped");
1538 0 : loaded_timeline
1539 0 : .remote_client
1540 0 : .wait_completion()
1541 0 : .await
1542 0 : .with_context(|| format!("wait for {} timeline initial uploads to complete", kind))?;
1543 :
1544 0 : loaded_timeline.activate(self.clone(), broker_client, None, ctx);
1545 0 :
1546 0 : Ok(loaded_timeline)
1547 0 : }
1548 :
1549 0 : pub(crate) async fn delete_timeline(
1550 0 : self: Arc<Self>,
1551 0 : timeline_id: TimelineId,
1552 0 : ) -> Result<(), DeleteTimelineError> {
1553 0 : DeleteTimelineFlow::run(&self, timeline_id, false).await?;
1554 :
1555 0 : Ok(())
1556 0 : }
1557 :
1558 : /// perform one garbage collection iteration, removing old data files from disk.
1559 : /// this function is periodically called by gc task.
1560 : /// also it can be explicitly requested through page server api 'do_gc' command.
1561 : ///
1562 : /// `target_timeline_id` specifies the timeline to GC, or None for all.
1563 : ///
1564 : /// The `horizon` an `pitr` parameters determine how much WAL history needs to be retained.
1565 : /// Also known as the retention period, or the GC cutoff point. `horizon` specifies
1566 : /// the amount of history, as LSN difference from current latest LSN on each timeline.
1567 : /// `pitr` specifies the same as a time difference from the current time. The effective
1568 : /// GC cutoff point is determined conservatively by either `horizon` and `pitr`, whichever
1569 : /// requires more history to be retained.
1570 : //
1571 754 : pub(crate) async fn gc_iteration(
1572 754 : &self,
1573 754 : target_timeline_id: Option<TimelineId>,
1574 754 : horizon: u64,
1575 754 : pitr: Duration,
1576 754 : cancel: &CancellationToken,
1577 754 : ctx: &RequestContext,
1578 754 : ) -> Result<GcResult, GcError> {
1579 754 : // Don't start doing work during shutdown
1580 754 : if let TenantState::Stopping { .. } = self.current_state() {
1581 0 : return Ok(GcResult::default());
1582 754 : }
1583 754 :
1584 754 : // there is a global allowed_error for this
1585 754 : if !self.is_active() {
1586 0 : return Err(GcError::NotActive);
1587 754 : }
1588 754 :
1589 754 : {
1590 754 : let conf = self.tenant_conf.load();
1591 754 :
1592 754 : if !conf.location.may_delete_layers_hint() {
1593 0 : info!("Skipping GC in location state {:?}", conf.location);
1594 0 : return Ok(GcResult::default());
1595 754 : }
1596 754 : }
1597 754 :
1598 754 : self.gc_iteration_internal(target_timeline_id, horizon, pitr, cancel, ctx)
1599 746 : .await
1600 754 : }
1601 :
1602 : /// Perform one compaction iteration.
1603 : /// This function is periodically called by compactor task.
1604 : /// Also it can be explicitly requested per timeline through page server
1605 : /// api's 'compact' command.
1606 0 : async fn compaction_iteration(
1607 0 : &self,
1608 0 : cancel: &CancellationToken,
1609 0 : ctx: &RequestContext,
1610 0 : ) -> anyhow::Result<(), timeline::CompactionError> {
1611 0 : // Don't start doing work during shutdown, or when broken, we do not need those in the logs
1612 0 : if !self.is_active() {
1613 0 : return Ok(());
1614 0 : }
1615 0 :
1616 0 : {
1617 0 : let conf = self.tenant_conf.load();
1618 0 : if !conf.location.may_delete_layers_hint() || !conf.location.may_upload_layers_hint() {
1619 0 : info!("Skipping compaction in location state {:?}", conf.location);
1620 0 : return Ok(());
1621 0 : }
1622 0 : }
1623 0 :
1624 0 : // Scan through the hashmap and collect a list of all the timelines,
1625 0 : // while holding the lock. Then drop the lock and actually perform the
1626 0 : // compactions. We don't want to block everything else while the
1627 0 : // compaction runs.
1628 0 : let timelines_to_compact = {
1629 0 : let timelines = self.timelines.lock().unwrap();
1630 0 : let timelines_to_compact = timelines
1631 0 : .iter()
1632 0 : .filter_map(|(timeline_id, timeline)| {
1633 0 : if timeline.is_active() {
1634 0 : Some((*timeline_id, timeline.clone()))
1635 : } else {
1636 0 : None
1637 : }
1638 0 : })
1639 0 : .collect::<Vec<_>>();
1640 0 : drop(timelines);
1641 0 : timelines_to_compact
1642 : };
1643 :
1644 0 : for (timeline_id, timeline) in &timelines_to_compact {
1645 0 : timeline
1646 0 : .compact(cancel, EnumSet::empty(), ctx)
1647 0 : .instrument(info_span!("compact_timeline", %timeline_id))
1648 0 : .await?;
1649 : }
1650 :
1651 0 : Ok(())
1652 0 : }
1653 :
1654 : // Call through to all timelines to freeze ephemeral layers if needed. Usually
1655 : // this happens during ingest: this background housekeeping is for freezing layers
1656 : // that are open but haven't been written to for some time.
1657 0 : async fn ingest_housekeeping(&self) {
1658 0 : // Scan through the hashmap and collect a list of all the timelines,
1659 0 : // while holding the lock. Then drop the lock and actually perform the
1660 0 : // compactions. We don't want to block everything else while the
1661 0 : // compaction runs.
1662 0 : let timelines = {
1663 0 : self.timelines
1664 0 : .lock()
1665 0 : .unwrap()
1666 0 : .values()
1667 0 : .filter_map(|timeline| {
1668 0 : if timeline.is_active() {
1669 0 : Some(timeline.clone())
1670 : } else {
1671 0 : None
1672 : }
1673 0 : })
1674 0 : .collect::<Vec<_>>()
1675 : };
1676 :
1677 0 : for timeline in &timelines {
1678 0 : timeline.maybe_freeze_ephemeral_layer().await;
1679 : }
1680 0 : }
1681 :
1682 2422 : pub fn current_state(&self) -> TenantState {
1683 2422 : self.state.borrow().clone()
1684 2422 : }
1685 :
1686 1662 : pub fn is_active(&self) -> bool {
1687 1662 : self.current_state() == TenantState::Active
1688 1662 : }
1689 :
1690 0 : pub fn generation(&self) -> Generation {
1691 0 : self.generation
1692 0 : }
1693 :
1694 0 : pub(crate) fn wal_redo_manager_status(&self) -> Option<WalRedoManagerStatus> {
1695 0 : self.walredo_mgr.as_ref().and_then(|mgr| mgr.status())
1696 0 : }
1697 :
1698 : /// Changes tenant status to active, unless shutdown was already requested.
1699 : ///
1700 : /// `background_jobs_can_start` is an optional barrier set to a value during pageserver startup
1701 : /// to delay background jobs. Background jobs can be started right away when None is given.
1702 0 : fn activate(
1703 0 : self: &Arc<Self>,
1704 0 : broker_client: BrokerClientChannel,
1705 0 : background_jobs_can_start: Option<&completion::Barrier>,
1706 0 : ctx: &RequestContext,
1707 0 : ) {
1708 0 : span::debug_assert_current_span_has_tenant_id();
1709 0 :
1710 0 : let mut activating = false;
1711 0 : self.state.send_modify(|current_state| {
1712 0 : use pageserver_api::models::ActivatingFrom;
1713 0 : match &*current_state {
1714 : TenantState::Activating(_) | TenantState::Active | TenantState::Broken { .. } | TenantState::Stopping { .. } => {
1715 0 : panic!("caller is responsible for calling activate() only on Loading / Attaching tenants, got {state:?}", state = current_state);
1716 : }
1717 0 : TenantState::Loading => {
1718 0 : *current_state = TenantState::Activating(ActivatingFrom::Loading);
1719 0 : }
1720 0 : TenantState::Attaching => {
1721 0 : *current_state = TenantState::Activating(ActivatingFrom::Attaching);
1722 0 : }
1723 : }
1724 0 : debug!(tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), "Activating tenant");
1725 0 : activating = true;
1726 0 : // Continue outside the closure. We need to grab timelines.lock()
1727 0 : // and we plan to turn it into a tokio::sync::Mutex in a future patch.
1728 0 : });
1729 0 :
1730 0 : if activating {
1731 0 : let timelines_accessor = self.timelines.lock().unwrap();
1732 0 : let timelines_to_activate = timelines_accessor
1733 0 : .values()
1734 0 : .filter(|timeline| !(timeline.is_broken() || timeline.is_stopping()));
1735 0 :
1736 0 : // Spawn gc and compaction loops. The loops will shut themselves
1737 0 : // down when they notice that the tenant is inactive.
1738 0 : tasks::start_background_loops(self, background_jobs_can_start);
1739 0 :
1740 0 : let mut activated_timelines = 0;
1741 :
1742 0 : for timeline in timelines_to_activate {
1743 0 : timeline.activate(
1744 0 : self.clone(),
1745 0 : broker_client.clone(),
1746 0 : background_jobs_can_start,
1747 0 : ctx,
1748 0 : );
1749 0 : activated_timelines += 1;
1750 0 : }
1751 :
1752 0 : self.state.send_modify(move |current_state| {
1753 0 : assert!(
1754 0 : matches!(current_state, TenantState::Activating(_)),
1755 0 : "set_stopping and set_broken wait for us to leave Activating state",
1756 : );
1757 0 : *current_state = TenantState::Active;
1758 0 :
1759 0 : let elapsed = self.constructed_at.elapsed();
1760 0 : let total_timelines = timelines_accessor.len();
1761 0 :
1762 0 : // log a lot of stuff, because some tenants sometimes suffer from user-visible
1763 0 : // times to activate. see https://github.com/neondatabase/neon/issues/4025
1764 0 : info!(
1765 0 : since_creation_millis = elapsed.as_millis(),
1766 0 : tenant_id = %self.tenant_shard_id.tenant_id,
1767 0 : shard_id = %self.tenant_shard_id.shard_slug(),
1768 0 : activated_timelines,
1769 0 : total_timelines,
1770 0 : post_state = <&'static str>::from(&*current_state),
1771 0 : "activation attempt finished"
1772 : );
1773 :
1774 0 : TENANT.activation.observe(elapsed.as_secs_f64());
1775 0 : });
1776 0 : }
1777 0 : }
1778 :
1779 : /// Shutdown the tenant and join all of the spawned tasks.
1780 : ///
1781 : /// The method caters for all use-cases:
1782 : /// - pageserver shutdown (freeze_and_flush == true)
1783 : /// - detach + ignore (freeze_and_flush == false)
1784 : ///
1785 : /// This will attempt to shutdown even if tenant is broken.
1786 : ///
1787 : /// `shutdown_progress` is a [`completion::Barrier`] for the shutdown initiated by this call.
1788 : /// If the tenant is already shutting down, we return a clone of the first shutdown call's
1789 : /// `Barrier` as an `Err`. This not-first caller can use the returned barrier to join with
1790 : /// the ongoing shutdown.
1791 6 : async fn shutdown(
1792 6 : &self,
1793 6 : shutdown_progress: completion::Barrier,
1794 6 : shutdown_mode: timeline::ShutdownMode,
1795 6 : ) -> Result<(), completion::Barrier> {
1796 6 : span::debug_assert_current_span_has_tenant_id();
1797 :
1798 : // Set tenant (and its timlines) to Stoppping state.
1799 : //
1800 : // Since we can only transition into Stopping state after activation is complete,
1801 : // run it in a JoinSet so all tenants have a chance to stop before we get SIGKILLed.
1802 : //
1803 : // Transitioning tenants to Stopping state has a couple of non-obvious side effects:
1804 : // 1. Lock out any new requests to the tenants.
1805 : // 2. Signal cancellation to WAL receivers (we wait on it below).
1806 : // 3. Signal cancellation for other tenant background loops.
1807 : // 4. ???
1808 : //
1809 : // The waiting for the cancellation is not done uniformly.
1810 : // We certainly wait for WAL receivers to shut down.
1811 : // That is necessary so that no new data comes in before the freeze_and_flush.
1812 : // But the tenant background loops are joined-on in our caller.
1813 : // It's mesed up.
1814 : // we just ignore the failure to stop
1815 :
1816 : // If we're still attaching, fire the cancellation token early to drop out: this
1817 : // will prevent us flushing, but ensures timely shutdown if some I/O during attach
1818 : // is very slow.
1819 6 : let shutdown_mode = if matches!(self.current_state(), TenantState::Attaching) {
1820 0 : self.cancel.cancel();
1821 0 :
1822 0 : // Having fired our cancellation token, do not try and flush timelines: their cancellation tokens
1823 0 : // are children of ours, so their flush loops will have shut down already
1824 0 : timeline::ShutdownMode::Hard
1825 : } else {
1826 6 : shutdown_mode
1827 : };
1828 :
1829 6 : match self.set_stopping(shutdown_progress, false, false).await {
1830 6 : Ok(()) => {}
1831 0 : Err(SetStoppingError::Broken) => {
1832 0 : // assume that this is acceptable
1833 0 : }
1834 0 : Err(SetStoppingError::AlreadyStopping(other)) => {
1835 0 : // give caller the option to wait for this this shutdown
1836 0 : info!("Tenant::shutdown: AlreadyStopping");
1837 0 : return Err(other);
1838 : }
1839 : };
1840 :
1841 6 : let mut js = tokio::task::JoinSet::new();
1842 6 : {
1843 6 : let timelines = self.timelines.lock().unwrap();
1844 6 : timelines.values().for_each(|timeline| {
1845 6 : let timeline = Arc::clone(timeline);
1846 6 : let timeline_id = timeline.timeline_id;
1847 6 : let span = tracing::info_span!("timeline_shutdown", %timeline_id, ?shutdown_mode);
1848 14 : js.spawn(async move { timeline.shutdown(shutdown_mode).instrument(span).await });
1849 6 : })
1850 6 : };
1851 6 : // test_long_timeline_create_then_tenant_delete is leaning on this message
1852 6 : tracing::info!("Waiting for timelines...");
1853 12 : while let Some(res) = js.join_next().await {
1854 0 : match res {
1855 6 : Ok(()) => {}
1856 0 : Err(je) if je.is_cancelled() => unreachable!("no cancelling used"),
1857 0 : Err(je) if je.is_panic() => { /* logged already */ }
1858 0 : Err(je) => warn!("unexpected JoinError: {je:?}"),
1859 : }
1860 : }
1861 :
1862 : // We cancel the Tenant's cancellation token _after_ the timelines have all shut down. This permits
1863 : // them to continue to do work during their shutdown methods, e.g. flushing data.
1864 6 : tracing::debug!("Cancelling CancellationToken");
1865 6 : self.cancel.cancel();
1866 6 :
1867 6 : // shutdown all tenant and timeline tasks: gc, compaction, page service
1868 6 : // No new tasks will be started for this tenant because it's in `Stopping` state.
1869 6 : //
1870 6 : // this will additionally shutdown and await all timeline tasks.
1871 6 : tracing::debug!("Waiting for tasks...");
1872 6 : task_mgr::shutdown_tasks(None, Some(self.tenant_shard_id), None).await;
1873 :
1874 6 : if let Some(walredo_mgr) = self.walredo_mgr.as_ref() {
1875 6 : walredo_mgr.shutdown().await;
1876 0 : }
1877 :
1878 : // Wait for any in-flight operations to complete
1879 6 : self.gate.close().await;
1880 :
1881 6 : remove_tenant_metrics(&self.tenant_shard_id);
1882 6 :
1883 6 : Ok(())
1884 6 : }
1885 :
1886 : /// Change tenant status to Stopping, to mark that it is being shut down.
1887 : ///
1888 : /// This function waits for the tenant to become active if it isn't already, before transitioning it into Stopping state.
1889 : ///
1890 : /// This function is not cancel-safe!
1891 : ///
1892 : /// `allow_transition_from_loading` is needed for the special case of loading task deleting the tenant.
1893 : /// `allow_transition_from_attaching` is needed for the special case of attaching deleted tenant.
1894 6 : async fn set_stopping(
1895 6 : &self,
1896 6 : progress: completion::Barrier,
1897 6 : allow_transition_from_loading: bool,
1898 6 : allow_transition_from_attaching: bool,
1899 6 : ) -> Result<(), SetStoppingError> {
1900 6 : let mut rx = self.state.subscribe();
1901 6 :
1902 6 : // cannot stop before we're done activating, so wait out until we're done activating
1903 6 : rx.wait_for(|state| match state {
1904 0 : TenantState::Attaching if allow_transition_from_attaching => true,
1905 : TenantState::Activating(_) | TenantState::Attaching => {
1906 0 : info!(
1907 0 : "waiting for {} to turn Active|Broken|Stopping",
1908 0 : <&'static str>::from(state)
1909 : );
1910 0 : false
1911 : }
1912 0 : TenantState::Loading => allow_transition_from_loading,
1913 6 : TenantState::Active | TenantState::Broken { .. } | TenantState::Stopping { .. } => true,
1914 6 : })
1915 0 : .await
1916 6 : .expect("cannot drop self.state while on a &self method");
1917 6 :
1918 6 : // we now know we're done activating, let's see whether this task is the winner to transition into Stopping
1919 6 : let mut err = None;
1920 6 : let stopping = self.state.send_if_modified(|current_state| match current_state {
1921 : TenantState::Activating(_) => {
1922 0 : unreachable!("1we ensured above that we're done with activation, and, there is no re-activation")
1923 : }
1924 : TenantState::Attaching => {
1925 0 : if !allow_transition_from_attaching {
1926 0 : unreachable!("2we ensured above that we're done with activation, and, there is no re-activation")
1927 0 : };
1928 0 : *current_state = TenantState::Stopping { progress };
1929 0 : true
1930 : }
1931 : TenantState::Loading => {
1932 0 : if !allow_transition_from_loading {
1933 0 : unreachable!("3we ensured above that we're done with activation, and, there is no re-activation")
1934 0 : };
1935 0 : *current_state = TenantState::Stopping { progress };
1936 0 : true
1937 : }
1938 : TenantState::Active => {
1939 : // FIXME: due to time-of-check vs time-of-use issues, it can happen that new timelines
1940 : // are created after the transition to Stopping. That's harmless, as the Timelines
1941 : // won't be accessible to anyone afterwards, because the Tenant is in Stopping state.
1942 6 : *current_state = TenantState::Stopping { progress };
1943 6 : // Continue stopping outside the closure. We need to grab timelines.lock()
1944 6 : // and we plan to turn it into a tokio::sync::Mutex in a future patch.
1945 6 : true
1946 : }
1947 0 : TenantState::Broken { reason, .. } => {
1948 0 : info!(
1949 0 : "Cannot set tenant to Stopping state, it is in Broken state due to: {reason}"
1950 : );
1951 0 : err = Some(SetStoppingError::Broken);
1952 0 : false
1953 : }
1954 0 : TenantState::Stopping { progress } => {
1955 0 : info!("Tenant is already in Stopping state");
1956 0 : err = Some(SetStoppingError::AlreadyStopping(progress.clone()));
1957 0 : false
1958 : }
1959 6 : });
1960 6 : match (stopping, err) {
1961 6 : (true, None) => {} // continue
1962 0 : (false, Some(err)) => return Err(err),
1963 0 : (true, Some(_)) => unreachable!(
1964 0 : "send_if_modified closure must error out if not transitioning to Stopping"
1965 0 : ),
1966 0 : (false, None) => unreachable!(
1967 0 : "send_if_modified closure must return true if transitioning to Stopping"
1968 0 : ),
1969 : }
1970 :
1971 6 : let timelines_accessor = self.timelines.lock().unwrap();
1972 6 : let not_broken_timelines = timelines_accessor
1973 6 : .values()
1974 6 : .filter(|timeline| !timeline.is_broken());
1975 12 : for timeline in not_broken_timelines {
1976 6 : timeline.set_state(TimelineState::Stopping);
1977 6 : }
1978 6 : Ok(())
1979 6 : }
1980 :
1981 : /// Method for tenant::mgr to transition us into Broken state in case of a late failure in
1982 : /// `remove_tenant_from_memory`
1983 : ///
1984 : /// This function waits for the tenant to become active if it isn't already, before transitioning it into Stopping state.
1985 : ///
1986 : /// In tests, we also use this to set tenants to Broken state on purpose.
1987 0 : pub(crate) async fn set_broken(&self, reason: String) {
1988 0 : let mut rx = self.state.subscribe();
1989 0 :
1990 0 : // The load & attach routines own the tenant state until it has reached `Active`.
1991 0 : // So, wait until it's done.
1992 0 : rx.wait_for(|state| match state {
1993 : TenantState::Activating(_) | TenantState::Loading | TenantState::Attaching => {
1994 0 : info!(
1995 0 : "waiting for {} to turn Active|Broken|Stopping",
1996 0 : <&'static str>::from(state)
1997 : );
1998 0 : false
1999 : }
2000 0 : TenantState::Active | TenantState::Broken { .. } | TenantState::Stopping { .. } => true,
2001 0 : })
2002 0 : .await
2003 0 : .expect("cannot drop self.state while on a &self method");
2004 0 :
2005 0 : // we now know we're done activating, let's see whether this task is the winner to transition into Broken
2006 0 : self.set_broken_no_wait(reason)
2007 0 : }
2008 :
2009 0 : pub(crate) fn set_broken_no_wait(&self, reason: impl Display) {
2010 0 : let reason = reason.to_string();
2011 0 : self.state.send_modify(|current_state| {
2012 0 : match *current_state {
2013 : TenantState::Activating(_) | TenantState::Loading | TenantState::Attaching => {
2014 0 : unreachable!("we ensured above that we're done with activation, and, there is no re-activation")
2015 : }
2016 : TenantState::Active => {
2017 0 : if cfg!(feature = "testing") {
2018 0 : warn!("Changing Active tenant to Broken state, reason: {}", reason);
2019 0 : *current_state = TenantState::broken_from_reason(reason);
2020 : } else {
2021 0 : unreachable!("not allowed to call set_broken on Active tenants in non-testing builds")
2022 : }
2023 : }
2024 : TenantState::Broken { .. } => {
2025 0 : warn!("Tenant is already in Broken state");
2026 : }
2027 : // This is the only "expected" path, any other path is a bug.
2028 : TenantState::Stopping { .. } => {
2029 0 : warn!(
2030 0 : "Marking Stopping tenant as Broken state, reason: {}",
2031 : reason
2032 : );
2033 0 : *current_state = TenantState::broken_from_reason(reason);
2034 : }
2035 : }
2036 0 : });
2037 0 : }
2038 :
2039 0 : pub fn subscribe_for_state_updates(&self) -> watch::Receiver<TenantState> {
2040 0 : self.state.subscribe()
2041 0 : }
2042 :
2043 : /// The activate_now semaphore is initialized with zero units. As soon as
2044 : /// we add a unit, waiters will be able to acquire a unit and proceed.
2045 0 : pub(crate) fn activate_now(&self) {
2046 0 : self.activate_now_sem.add_permits(1);
2047 0 : }
2048 :
2049 0 : pub(crate) async fn wait_to_become_active(
2050 0 : &self,
2051 0 : timeout: Duration,
2052 0 : ) -> Result<(), GetActiveTenantError> {
2053 0 : let mut receiver = self.state.subscribe();
2054 0 : loop {
2055 0 : let current_state = receiver.borrow_and_update().clone();
2056 0 : match current_state {
2057 : TenantState::Loading | TenantState::Attaching | TenantState::Activating(_) => {
2058 : // in these states, there's a chance that we can reach ::Active
2059 0 : self.activate_now();
2060 0 : match timeout_cancellable(timeout, &self.cancel, receiver.changed()).await {
2061 0 : Ok(r) => {
2062 0 : r.map_err(
2063 0 : |_e: tokio::sync::watch::error::RecvError|
2064 : // Tenant existed but was dropped: report it as non-existent
2065 0 : GetActiveTenantError::NotFound(GetTenantError::NotFound(self.tenant_shard_id.tenant_id))
2066 0 : )?
2067 : }
2068 : Err(TimeoutCancellableError::Cancelled) => {
2069 0 : return Err(GetActiveTenantError::Cancelled);
2070 : }
2071 : Err(TimeoutCancellableError::Timeout) => {
2072 0 : return Err(GetActiveTenantError::WaitForActiveTimeout {
2073 0 : latest_state: Some(self.current_state()),
2074 0 : wait_time: timeout,
2075 0 : });
2076 : }
2077 : }
2078 : }
2079 : TenantState::Active { .. } => {
2080 0 : return Ok(());
2081 : }
2082 0 : TenantState::Broken { reason, .. } => {
2083 0 : // This is fatal, and reported distinctly from the general case of "will never be active" because
2084 0 : // it's logically a 500 to external API users (broken is always a bug).
2085 0 : return Err(GetActiveTenantError::Broken(reason));
2086 : }
2087 : TenantState::Stopping { .. } => {
2088 : // There's no chance the tenant can transition back into ::Active
2089 0 : return Err(GetActiveTenantError::WillNotBecomeActive(current_state));
2090 : }
2091 : }
2092 : }
2093 0 : }
2094 :
2095 0 : pub(crate) fn get_attach_mode(&self) -> AttachmentMode {
2096 0 : self.tenant_conf.load().location.attach_mode
2097 0 : }
2098 :
2099 : /// For API access: generate a LocationConfig equivalent to the one that would be used to
2100 : /// create a Tenant in the same state. Do not use this in hot paths: it's for relatively
2101 : /// rare external API calls, like a reconciliation at startup.
2102 0 : pub(crate) fn get_location_conf(&self) -> models::LocationConfig {
2103 0 : let conf = self.tenant_conf.load();
2104 :
2105 0 : let location_config_mode = match conf.location.attach_mode {
2106 0 : AttachmentMode::Single => models::LocationConfigMode::AttachedSingle,
2107 0 : AttachmentMode::Multi => models::LocationConfigMode::AttachedMulti,
2108 0 : AttachmentMode::Stale => models::LocationConfigMode::AttachedStale,
2109 : };
2110 :
2111 : // We have a pageserver TenantConf, we need the API-facing TenantConfig.
2112 0 : let tenant_config: models::TenantConfig = conf.tenant_conf.clone().into();
2113 0 :
2114 0 : models::LocationConfig {
2115 0 : mode: location_config_mode,
2116 0 : generation: self.generation.into(),
2117 0 : secondary_conf: None,
2118 0 : shard_number: self.shard_identity.number.0,
2119 0 : shard_count: self.shard_identity.count.literal(),
2120 0 : shard_stripe_size: self.shard_identity.stripe_size.0,
2121 0 : tenant_conf: tenant_config,
2122 0 : }
2123 0 : }
2124 :
2125 0 : pub(crate) fn get_tenant_shard_id(&self) -> &TenantShardId {
2126 0 : &self.tenant_shard_id
2127 0 : }
2128 :
2129 0 : pub(crate) fn get_shard_stripe_size(&self) -> ShardStripeSize {
2130 0 : self.shard_identity.stripe_size
2131 0 : }
2132 :
2133 0 : pub(crate) fn get_generation(&self) -> Generation {
2134 0 : self.generation
2135 0 : }
2136 :
2137 : /// This function partially shuts down the tenant (it shuts down the Timelines) and is fallible,
2138 : /// and can leave the tenant in a bad state if it fails. The caller is responsible for
2139 : /// resetting this tenant to a valid state if we fail.
2140 0 : pub(crate) async fn split_prepare(
2141 0 : &self,
2142 0 : child_shards: &Vec<TenantShardId>,
2143 0 : ) -> anyhow::Result<()> {
2144 0 : let timelines = self.timelines.lock().unwrap().clone();
2145 0 : for timeline in timelines.values() {
2146 : // We do not block timeline creation/deletion during splits inside the pageserver: it is up to higher levels
2147 : // to ensure that they do not start a split if currently in the process of doing these.
2148 :
2149 : // Upload an index from the parent: this is partly to provide freshness for the
2150 : // child tenants that will copy it, and partly for general ease-of-debugging: there will
2151 : // always be a parent shard index in the same generation as we wrote the child shard index.
2152 0 : tracing::info!(timeline_id=%timeline.timeline_id, "Uploading index");
2153 0 : timeline
2154 0 : .remote_client
2155 0 : .schedule_index_upload_for_file_changes()?;
2156 0 : timeline.remote_client.wait_completion().await?;
2157 :
2158 : // Shut down the timeline's remote client: this means that the indices we write
2159 : // for child shards will not be invalidated by the parent shard deleting layers.
2160 0 : tracing::info!(timeline_id=%timeline.timeline_id, "Shutting down remote storage client");
2161 0 : timeline.remote_client.shutdown().await;
2162 :
2163 : // Download methods can still be used after shutdown, as they don't flow through the remote client's
2164 : // queue. In principal the RemoteTimelineClient could provide this without downloading it, but this
2165 : // operation is rare, so it's simpler to just download it (and robustly guarantees that the index
2166 : // we use here really is the remotely persistent one).
2167 0 : tracing::info!(timeline_id=%timeline.timeline_id, "Downloading index_part from parent");
2168 0 : let result = timeline.remote_client
2169 0 : .download_index_file(&self.cancel)
2170 0 : .instrument(info_span!("download_index_file", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%timeline.timeline_id))
2171 0 : .await?;
2172 0 : let index_part = match result {
2173 : MaybeDeletedIndexPart::Deleted(_) => {
2174 0 : anyhow::bail!("Timeline deletion happened concurrently with split")
2175 : }
2176 0 : MaybeDeletedIndexPart::IndexPart(p) => p,
2177 : };
2178 :
2179 0 : for child_shard in child_shards {
2180 0 : tracing::info!(timeline_id=%timeline.timeline_id, "Uploading index_part for child {}", child_shard.to_index());
2181 0 : upload_index_part(
2182 0 : &self.remote_storage,
2183 0 : child_shard,
2184 0 : &timeline.timeline_id,
2185 0 : self.generation,
2186 0 : &index_part,
2187 0 : &self.cancel,
2188 0 : )
2189 0 : .await?;
2190 : }
2191 : }
2192 :
2193 0 : Ok(())
2194 0 : }
2195 :
2196 0 : pub(crate) fn get_sizes(&self) -> TopTenantShardItem {
2197 0 : let mut result = TopTenantShardItem {
2198 0 : id: self.tenant_shard_id,
2199 0 : resident_size: 0,
2200 0 : physical_size: 0,
2201 0 : max_logical_size: 0,
2202 0 : };
2203 :
2204 0 : for timeline in self.timelines.lock().unwrap().values() {
2205 0 : result.resident_size += timeline.metrics.resident_physical_size_gauge.get();
2206 0 :
2207 0 : result.physical_size += timeline
2208 0 : .remote_client
2209 0 : .metrics
2210 0 : .remote_physical_size_gauge
2211 0 : .get();
2212 0 : result.max_logical_size = std::cmp::max(
2213 0 : result.max_logical_size,
2214 0 : timeline.metrics.current_logical_size_gauge.get(),
2215 0 : );
2216 0 : }
2217 :
2218 0 : result
2219 0 : }
2220 : }
2221 :
2222 : /// Given a Vec of timelines and their ancestors (timeline_id, ancestor_id),
2223 : /// perform a topological sort, so that the parent of each timeline comes
2224 : /// before the children.
2225 : /// E extracts the ancestor from T
2226 : /// This allows for T to be different. It can be TimelineMetadata, can be Timeline itself, etc.
2227 162 : fn tree_sort_timelines<T, E>(
2228 162 : timelines: HashMap<TimelineId, T>,
2229 162 : extractor: E,
2230 162 : ) -> anyhow::Result<Vec<(TimelineId, T)>>
2231 162 : where
2232 162 : E: Fn(&T) -> Option<TimelineId>,
2233 162 : {
2234 162 : let mut result = Vec::with_capacity(timelines.len());
2235 162 :
2236 162 : let mut now = Vec::with_capacity(timelines.len());
2237 162 : // (ancestor, children)
2238 162 : let mut later: HashMap<TimelineId, Vec<(TimelineId, T)>> =
2239 162 : HashMap::with_capacity(timelines.len());
2240 :
2241 168 : for (timeline_id, value) in timelines {
2242 6 : if let Some(ancestor_id) = extractor(&value) {
2243 2 : let children = later.entry(ancestor_id).or_default();
2244 2 : children.push((timeline_id, value));
2245 4 : } else {
2246 4 : now.push((timeline_id, value));
2247 4 : }
2248 : }
2249 :
2250 168 : while let Some((timeline_id, metadata)) = now.pop() {
2251 6 : result.push((timeline_id, metadata));
2252 : // All children of this can be loaded now
2253 6 : if let Some(mut children) = later.remove(&timeline_id) {
2254 2 : now.append(&mut children);
2255 4 : }
2256 : }
2257 :
2258 : // All timelines should be visited now. Unless there were timelines with missing ancestors.
2259 162 : if !later.is_empty() {
2260 0 : for (missing_id, orphan_ids) in later {
2261 0 : for (orphan_id, _) in orphan_ids {
2262 0 : error!("could not load timeline {orphan_id} because its ancestor timeline {missing_id} could not be loaded");
2263 : }
2264 : }
2265 0 : bail!("could not load tenant because some timelines are missing ancestors");
2266 162 : }
2267 162 :
2268 162 : Ok(result)
2269 162 : }
2270 :
2271 : impl Tenant {
2272 0 : pub fn tenant_specific_overrides(&self) -> TenantConfOpt {
2273 0 : self.tenant_conf.load().tenant_conf.clone()
2274 0 : }
2275 :
2276 0 : pub fn effective_config(&self) -> TenantConf {
2277 0 : self.tenant_specific_overrides()
2278 0 : .merge(self.conf.default_tenant_conf.clone())
2279 0 : }
2280 :
2281 0 : pub fn get_checkpoint_distance(&self) -> u64 {
2282 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2283 0 : tenant_conf
2284 0 : .checkpoint_distance
2285 0 : .unwrap_or(self.conf.default_tenant_conf.checkpoint_distance)
2286 0 : }
2287 :
2288 0 : pub fn get_checkpoint_timeout(&self) -> Duration {
2289 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2290 0 : tenant_conf
2291 0 : .checkpoint_timeout
2292 0 : .unwrap_or(self.conf.default_tenant_conf.checkpoint_timeout)
2293 0 : }
2294 :
2295 0 : pub fn get_compaction_target_size(&self) -> u64 {
2296 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2297 0 : tenant_conf
2298 0 : .compaction_target_size
2299 0 : .unwrap_or(self.conf.default_tenant_conf.compaction_target_size)
2300 0 : }
2301 :
2302 0 : pub fn get_compaction_period(&self) -> Duration {
2303 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2304 0 : tenant_conf
2305 0 : .compaction_period
2306 0 : .unwrap_or(self.conf.default_tenant_conf.compaction_period)
2307 0 : }
2308 :
2309 0 : pub fn get_compaction_threshold(&self) -> usize {
2310 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2311 0 : tenant_conf
2312 0 : .compaction_threshold
2313 0 : .unwrap_or(self.conf.default_tenant_conf.compaction_threshold)
2314 0 : }
2315 :
2316 0 : pub fn get_gc_horizon(&self) -> u64 {
2317 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2318 0 : tenant_conf
2319 0 : .gc_horizon
2320 0 : .unwrap_or(self.conf.default_tenant_conf.gc_horizon)
2321 0 : }
2322 :
2323 0 : pub fn get_gc_period(&self) -> Duration {
2324 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2325 0 : tenant_conf
2326 0 : .gc_period
2327 0 : .unwrap_or(self.conf.default_tenant_conf.gc_period)
2328 0 : }
2329 :
2330 0 : pub fn get_image_creation_threshold(&self) -> usize {
2331 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2332 0 : tenant_conf
2333 0 : .image_creation_threshold
2334 0 : .unwrap_or(self.conf.default_tenant_conf.image_creation_threshold)
2335 0 : }
2336 :
2337 0 : pub fn get_pitr_interval(&self) -> Duration {
2338 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2339 0 : tenant_conf
2340 0 : .pitr_interval
2341 0 : .unwrap_or(self.conf.default_tenant_conf.pitr_interval)
2342 0 : }
2343 :
2344 0 : pub fn get_trace_read_requests(&self) -> bool {
2345 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2346 0 : tenant_conf
2347 0 : .trace_read_requests
2348 0 : .unwrap_or(self.conf.default_tenant_conf.trace_read_requests)
2349 0 : }
2350 :
2351 0 : pub fn get_min_resident_size_override(&self) -> Option<u64> {
2352 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2353 0 : tenant_conf
2354 0 : .min_resident_size_override
2355 0 : .or(self.conf.default_tenant_conf.min_resident_size_override)
2356 0 : }
2357 :
2358 0 : pub fn get_heatmap_period(&self) -> Option<Duration> {
2359 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2360 0 : let heatmap_period = tenant_conf
2361 0 : .heatmap_period
2362 0 : .unwrap_or(self.conf.default_tenant_conf.heatmap_period);
2363 0 : if heatmap_period.is_zero() {
2364 0 : None
2365 : } else {
2366 0 : Some(heatmap_period)
2367 : }
2368 0 : }
2369 :
2370 0 : pub fn get_lsn_lease_length(&self) -> Duration {
2371 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2372 0 : tenant_conf
2373 0 : .lsn_lease_length
2374 0 : .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length)
2375 0 : }
2376 :
2377 0 : pub fn set_new_tenant_config(&self, new_tenant_conf: TenantConfOpt) {
2378 0 : // Use read-copy-update in order to avoid overwriting the location config
2379 0 : // state if this races with [`Tenant::set_new_location_config`]. Note that
2380 0 : // this race is not possible if both request types come from the storage
2381 0 : // controller (as they should!) because an exclusive op lock is required
2382 0 : // on the storage controller side.
2383 0 : self.tenant_conf.rcu(|inner| {
2384 0 : Arc::new(AttachedTenantConf {
2385 0 : tenant_conf: new_tenant_conf.clone(),
2386 0 : location: inner.location,
2387 0 : })
2388 0 : });
2389 0 :
2390 0 : self.tenant_conf_updated(&new_tenant_conf);
2391 0 : // Don't hold self.timelines.lock() during the notifies.
2392 0 : // There's no risk of deadlock right now, but there could be if we consolidate
2393 0 : // mutexes in struct Timeline in the future.
2394 0 : let timelines = self.list_timelines();
2395 0 : for timeline in timelines {
2396 0 : timeline.tenant_conf_updated(&new_tenant_conf);
2397 0 : }
2398 0 : }
2399 :
2400 8 : pub(crate) fn set_new_location_config(&self, new_conf: AttachedTenantConf) {
2401 8 : let new_tenant_conf = new_conf.tenant_conf.clone();
2402 8 :
2403 8 : self.tenant_conf.store(Arc::new(new_conf));
2404 8 :
2405 8 : self.tenant_conf_updated(&new_tenant_conf);
2406 8 : // Don't hold self.timelines.lock() during the notifies.
2407 8 : // There's no risk of deadlock right now, but there could be if we consolidate
2408 8 : // mutexes in struct Timeline in the future.
2409 8 : let timelines = self.list_timelines();
2410 16 : for timeline in timelines {
2411 8 : timeline.tenant_conf_updated(&new_tenant_conf);
2412 8 : }
2413 8 : }
2414 :
2415 170 : fn get_timeline_get_throttle_config(
2416 170 : psconf: &'static PageServerConf,
2417 170 : overrides: &TenantConfOpt,
2418 170 : ) -> throttle::Config {
2419 170 : overrides
2420 170 : .timeline_get_throttle
2421 170 : .clone()
2422 170 : .unwrap_or(psconf.default_tenant_conf.timeline_get_throttle.clone())
2423 170 : }
2424 :
2425 8 : pub(crate) fn tenant_conf_updated(&self, new_conf: &TenantConfOpt) {
2426 8 : let conf = Self::get_timeline_get_throttle_config(self.conf, new_conf);
2427 8 : self.timeline_get_throttle.reconfigure(conf)
2428 8 : }
2429 :
2430 : /// Helper function to create a new Timeline struct.
2431 : ///
2432 : /// The returned Timeline is in Loading state. The caller is responsible for
2433 : /// initializing any on-disk state, and for inserting the Timeline to the 'timelines'
2434 : /// map.
2435 : ///
2436 : /// `validate_ancestor == false` is used when a timeline is created for deletion
2437 : /// and we might not have the ancestor present anymore which is fine for to be
2438 : /// deleted timelines.
2439 384 : fn create_timeline_struct(
2440 384 : &self,
2441 384 : new_timeline_id: TimelineId,
2442 384 : new_metadata: &TimelineMetadata,
2443 384 : ancestor: Option<Arc<Timeline>>,
2444 384 : resources: TimelineResources,
2445 384 : cause: CreateTimelineCause,
2446 384 : last_aux_file_policy: Option<AuxFilePolicy>,
2447 384 : ) -> anyhow::Result<Arc<Timeline>> {
2448 384 : let state = match cause {
2449 : CreateTimelineCause::Load => {
2450 384 : let ancestor_id = new_metadata.ancestor_timeline();
2451 384 : anyhow::ensure!(
2452 384 : ancestor_id == ancestor.as_ref().map(|t| t.timeline_id),
2453 0 : "Timeline's {new_timeline_id} ancestor {ancestor_id:?} was not found"
2454 : );
2455 384 : TimelineState::Loading
2456 : }
2457 0 : CreateTimelineCause::Delete => TimelineState::Stopping,
2458 : };
2459 :
2460 384 : let pg_version = new_metadata.pg_version();
2461 384 :
2462 384 : let timeline = Timeline::new(
2463 384 : self.conf,
2464 384 : Arc::clone(&self.tenant_conf),
2465 384 : new_metadata,
2466 384 : ancestor,
2467 384 : new_timeline_id,
2468 384 : self.tenant_shard_id,
2469 384 : self.generation,
2470 384 : self.shard_identity,
2471 384 : self.walredo_mgr.clone(),
2472 384 : resources,
2473 384 : pg_version,
2474 384 : state,
2475 384 : last_aux_file_policy,
2476 384 : self.cancel.child_token(),
2477 384 : );
2478 384 :
2479 384 : Ok(timeline)
2480 384 : }
2481 :
2482 : // Allow too_many_arguments because a constructor's argument list naturally grows with the
2483 : // number of attributes in the struct: breaking these out into a builder wouldn't be helpful.
2484 : #[allow(clippy::too_many_arguments)]
2485 162 : fn new(
2486 162 : state: TenantState,
2487 162 : conf: &'static PageServerConf,
2488 162 : attached_conf: AttachedTenantConf,
2489 162 : shard_identity: ShardIdentity,
2490 162 : walredo_mgr: Option<Arc<WalRedoManager>>,
2491 162 : tenant_shard_id: TenantShardId,
2492 162 : remote_storage: GenericRemoteStorage,
2493 162 : deletion_queue_client: DeletionQueueClient,
2494 162 : l0_flush_global_state: L0FlushGlobalState,
2495 162 : ) -> Tenant {
2496 162 : debug_assert!(
2497 162 : !attached_conf.location.generation.is_none() || conf.control_plane_api.is_none()
2498 : );
2499 :
2500 162 : let (state, mut rx) = watch::channel(state);
2501 162 :
2502 162 : tokio::spawn(async move {
2503 162 : // reflect tenant state in metrics:
2504 162 : // - global per tenant state: TENANT_STATE_METRIC
2505 162 : // - "set" of broken tenants: BROKEN_TENANTS_SET
2506 162 : //
2507 162 : // set of broken tenants should not have zero counts so that it remains accessible for
2508 162 : // alerting.
2509 162 :
2510 162 : let tid = tenant_shard_id.to_string();
2511 162 : let shard_id = tenant_shard_id.shard_slug().to_string();
2512 162 : let set_key = &[tid.as_str(), shard_id.as_str()][..];
2513 162 :
2514 322 : fn inspect_state(state: &TenantState) -> ([&'static str; 1], bool) {
2515 322 : ([state.into()], matches!(state, TenantState::Broken { .. }))
2516 322 : }
2517 162 :
2518 162 : let mut tuple = inspect_state(&rx.borrow_and_update());
2519 162 :
2520 162 : let is_broken = tuple.1;
2521 162 : let mut counted_broken = if is_broken {
2522 : // add the id to the set right away, there should not be any updates on the channel
2523 : // after before tenant is removed, if ever
2524 0 : BROKEN_TENANTS_SET.with_label_values(set_key).set(1);
2525 0 : true
2526 : } else {
2527 162 : false
2528 : };
2529 :
2530 322 : loop {
2531 322 : let labels = &tuple.0;
2532 322 : let current = TENANT_STATE_METRIC.with_label_values(labels);
2533 322 : current.inc();
2534 322 :
2535 322 : if rx.changed().await.is_err() {
2536 : // tenant has been dropped
2537 16 : current.dec();
2538 16 : drop(BROKEN_TENANTS_SET.remove_label_values(set_key));
2539 16 : break;
2540 160 : }
2541 160 :
2542 160 : current.dec();
2543 160 : tuple = inspect_state(&rx.borrow_and_update());
2544 160 :
2545 160 : let is_broken = tuple.1;
2546 160 : if is_broken && !counted_broken {
2547 0 : counted_broken = true;
2548 0 : // insert the tenant_id (back) into the set while avoiding needless counter
2549 0 : // access
2550 0 : BROKEN_TENANTS_SET.with_label_values(set_key).set(1);
2551 160 : }
2552 : }
2553 162 : });
2554 162 :
2555 162 : Tenant {
2556 162 : tenant_shard_id,
2557 162 : shard_identity,
2558 162 : generation: attached_conf.location.generation,
2559 162 : conf,
2560 162 : // using now here is good enough approximation to catch tenants with really long
2561 162 : // activation times.
2562 162 : constructed_at: Instant::now(),
2563 162 : timelines: Mutex::new(HashMap::new()),
2564 162 : timelines_creating: Mutex::new(HashSet::new()),
2565 162 : gc_cs: tokio::sync::Mutex::new(()),
2566 162 : walredo_mgr,
2567 162 : remote_storage,
2568 162 : deletion_queue_client,
2569 162 : state,
2570 162 : cached_logical_sizes: tokio::sync::Mutex::new(HashMap::new()),
2571 162 : cached_synthetic_tenant_size: Arc::new(AtomicU64::new(0)),
2572 162 : eviction_task_tenant_state: tokio::sync::Mutex::new(EvictionTaskTenantState::default()),
2573 162 : activate_now_sem: tokio::sync::Semaphore::new(0),
2574 162 : cancel: CancellationToken::default(),
2575 162 : gate: Gate::default(),
2576 162 : timeline_get_throttle: Arc::new(throttle::Throttle::new(
2577 162 : Tenant::get_timeline_get_throttle_config(conf, &attached_conf.tenant_conf),
2578 162 : &crate::metrics::tenant_throttling::TIMELINE_GET,
2579 162 : )),
2580 162 : tenant_conf: Arc::new(ArcSwap::from_pointee(attached_conf)),
2581 162 : ongoing_timeline_detach: std::sync::Mutex::default(),
2582 162 : l0_flush_global_state,
2583 162 : }
2584 162 : }
2585 :
2586 : /// Locate and load config
2587 0 : pub(super) fn load_tenant_config(
2588 0 : conf: &'static PageServerConf,
2589 0 : tenant_shard_id: &TenantShardId,
2590 0 : ) -> Result<LocationConf, LoadConfigError> {
2591 0 : let config_path = conf.tenant_location_config_path(tenant_shard_id);
2592 0 :
2593 0 : info!("loading tenant configuration from {config_path}");
2594 :
2595 : // load and parse file
2596 0 : let config = fs::read_to_string(&config_path).map_err(|e| {
2597 0 : match e.kind() {
2598 : std::io::ErrorKind::NotFound => {
2599 : // The config should almost always exist for a tenant directory:
2600 : // - When attaching a tenant, the config is the first thing we write
2601 : // - When detaching a tenant, we atomically move the directory to a tmp location
2602 : // before deleting contents.
2603 : //
2604 : // The very rare edge case that can result in a missing config is if we crash during attach
2605 : // between creating directory and writing config. Callers should handle that as if the
2606 : // directory didn't exist.
2607 :
2608 0 : LoadConfigError::NotFound(config_path)
2609 : }
2610 : _ => {
2611 : // No IO errors except NotFound are acceptable here: other kinds of error indicate local storage or permissions issues
2612 : // that we cannot cleanly recover
2613 0 : crate::virtual_file::on_fatal_io_error(&e, "Reading tenant config file")
2614 : }
2615 : }
2616 0 : })?;
2617 :
2618 0 : Ok(toml_edit::de::from_str::<LocationConf>(&config)?)
2619 0 : }
2620 :
2621 0 : #[tracing::instrument(skip_all, fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug()))]
2622 : pub(super) async fn persist_tenant_config(
2623 : conf: &'static PageServerConf,
2624 : tenant_shard_id: &TenantShardId,
2625 : location_conf: &LocationConf,
2626 : ) -> std::io::Result<()> {
2627 : let config_path = conf.tenant_location_config_path(tenant_shard_id);
2628 :
2629 : Self::persist_tenant_config_at(tenant_shard_id, &config_path, location_conf).await
2630 : }
2631 :
2632 0 : #[tracing::instrument(skip_all, fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug()))]
2633 : pub(super) async fn persist_tenant_config_at(
2634 : tenant_shard_id: &TenantShardId,
2635 : config_path: &Utf8Path,
2636 : location_conf: &LocationConf,
2637 : ) -> std::io::Result<()> {
2638 : debug!("persisting tenantconf to {config_path}");
2639 :
2640 : let mut conf_content = r#"# This file contains a specific per-tenant's config.
2641 : # It is read in case of pageserver restart.
2642 : "#
2643 : .to_string();
2644 :
2645 0 : fail::fail_point!("tenant-config-before-write", |_| {
2646 0 : Err(std::io::Error::new(
2647 0 : std::io::ErrorKind::Other,
2648 0 : "tenant-config-before-write",
2649 0 : ))
2650 0 : });
2651 :
2652 : // Convert the config to a toml file.
2653 : conf_content +=
2654 : &toml_edit::ser::to_string_pretty(&location_conf).expect("Config serialization failed");
2655 :
2656 : let temp_path = path_with_suffix_extension(config_path, TEMP_FILE_SUFFIX);
2657 :
2658 : let conf_content = conf_content.into_bytes();
2659 : VirtualFile::crashsafe_overwrite(config_path.to_owned(), temp_path, conf_content).await
2660 : }
2661 :
2662 : //
2663 : // How garbage collection works:
2664 : //
2665 : // +--bar------------->
2666 : // /
2667 : // +----+-----foo---------------->
2668 : // /
2669 : // ----main--+-------------------------->
2670 : // \
2671 : // +-----baz-------->
2672 : //
2673 : //
2674 : // 1. Grab 'gc_cs' mutex to prevent new timelines from being created while Timeline's
2675 : // `gc_infos` are being refreshed
2676 : // 2. Scan collected timelines, and on each timeline, make note of the
2677 : // all the points where other timelines have been branched off.
2678 : // We will refrain from removing page versions at those LSNs.
2679 : // 3. For each timeline, scan all layer files on the timeline.
2680 : // Remove all files for which a newer file exists and which
2681 : // don't cover any branch point LSNs.
2682 : //
2683 : // TODO:
2684 : // - if a relation has a non-incremental persistent layer on a child branch, then we
2685 : // don't need to keep that in the parent anymore. But currently
2686 : // we do.
2687 754 : async fn gc_iteration_internal(
2688 754 : &self,
2689 754 : target_timeline_id: Option<TimelineId>,
2690 754 : horizon: u64,
2691 754 : pitr: Duration,
2692 754 : cancel: &CancellationToken,
2693 754 : ctx: &RequestContext,
2694 754 : ) -> Result<GcResult, GcError> {
2695 754 : let mut totals: GcResult = Default::default();
2696 754 : let now = Instant::now();
2697 :
2698 754 : let gc_timelines = self
2699 754 : .refresh_gc_info_internal(target_timeline_id, horizon, pitr, cancel, ctx)
2700 746 : .await?;
2701 :
2702 754 : failpoint_support::sleep_millis_async!("gc_iteration_internal_after_getting_gc_timelines");
2703 :
2704 : // If there is nothing to GC, we don't want any messages in the INFO log.
2705 754 : if !gc_timelines.is_empty() {
2706 754 : info!("{} timelines need GC", gc_timelines.len());
2707 : } else {
2708 0 : debug!("{} timelines need GC", gc_timelines.len());
2709 : }
2710 :
2711 : // Perform GC for each timeline.
2712 : //
2713 : // Note that we don't hold the `Tenant::gc_cs` lock here because we don't want to delay the
2714 : // branch creation task, which requires the GC lock. A GC iteration can run concurrently
2715 : // with branch creation.
2716 : //
2717 : // See comments in [`Tenant::branch_timeline`] for more information about why branch
2718 : // creation task can run concurrently with timeline's GC iteration.
2719 1508 : for timeline in gc_timelines {
2720 754 : if cancel.is_cancelled() {
2721 : // We were requested to shut down. Stop and return with the progress we
2722 : // made.
2723 0 : break;
2724 754 : }
2725 754 : let result = match timeline.gc().await {
2726 : Err(GcError::TimelineCancelled) => {
2727 0 : if target_timeline_id.is_some() {
2728 : // If we were targetting this specific timeline, surface cancellation to caller
2729 0 : return Err(GcError::TimelineCancelled);
2730 : } else {
2731 : // A timeline may be shutting down independently of the tenant's lifecycle: we should
2732 : // skip past this and proceed to try GC on other timelines.
2733 0 : continue;
2734 : }
2735 : }
2736 754 : r => r?,
2737 : };
2738 754 : totals += result;
2739 : }
2740 :
2741 754 : totals.elapsed = now.elapsed();
2742 754 : Ok(totals)
2743 754 : }
2744 :
2745 : /// Refreshes the Timeline::gc_info for all timelines, returning the
2746 : /// vector of timelines which have [`Timeline::get_last_record_lsn`] past
2747 : /// [`Tenant::get_gc_horizon`].
2748 : ///
2749 : /// This is usually executed as part of periodic gc, but can now be triggered more often.
2750 0 : pub(crate) async fn refresh_gc_info(
2751 0 : &self,
2752 0 : cancel: &CancellationToken,
2753 0 : ctx: &RequestContext,
2754 0 : ) -> Result<Vec<Arc<Timeline>>, GcError> {
2755 0 : // since this method can now be called at different rates than the configured gc loop, it
2756 0 : // might be that these configuration values get applied faster than what it was previously,
2757 0 : // since these were only read from the gc task.
2758 0 : let horizon = self.get_gc_horizon();
2759 0 : let pitr = self.get_pitr_interval();
2760 0 :
2761 0 : // refresh all timelines
2762 0 : let target_timeline_id = None;
2763 0 :
2764 0 : self.refresh_gc_info_internal(target_timeline_id, horizon, pitr, cancel, ctx)
2765 0 : .await
2766 0 : }
2767 :
2768 754 : async fn refresh_gc_info_internal(
2769 754 : &self,
2770 754 : target_timeline_id: Option<TimelineId>,
2771 754 : horizon: u64,
2772 754 : pitr: Duration,
2773 754 : cancel: &CancellationToken,
2774 754 : ctx: &RequestContext,
2775 754 : ) -> Result<Vec<Arc<Timeline>>, GcError> {
2776 754 : // before taking the gc_cs lock, do the heavier weight finding of gc_cutoff points for
2777 754 : // currently visible timelines.
2778 754 : let timelines = self
2779 754 : .timelines
2780 754 : .lock()
2781 754 : .unwrap()
2782 754 : .values()
2783 3310 : .filter(|tl| match target_timeline_id.as_ref() {
2784 3310 : Some(target) => &tl.timeline_id == target,
2785 0 : None => true,
2786 3310 : })
2787 754 : .cloned()
2788 754 : .collect::<Vec<_>>();
2789 754 :
2790 754 : let mut gc_cutoffs: HashMap<TimelineId, GcCutoffs> =
2791 754 : HashMap::with_capacity(timelines.len());
2792 :
2793 754 : for timeline in timelines.iter() {
2794 754 : let cutoff = timeline
2795 754 : .get_last_record_lsn()
2796 754 : .checked_sub(horizon)
2797 754 : .unwrap_or(Lsn(0));
2798 :
2799 754 : let cutoffs = timeline.find_gc_cutoffs(cutoff, pitr, cancel, ctx).await?;
2800 754 : let old = gc_cutoffs.insert(timeline.timeline_id, cutoffs);
2801 754 : assert!(old.is_none());
2802 : }
2803 :
2804 754 : if !self.is_active() || self.cancel.is_cancelled() {
2805 0 : return Err(GcError::TenantCancelled);
2806 754 : }
2807 :
2808 : // grab mutex to prevent new timelines from being created here; avoid doing long operations
2809 : // because that will stall branch creation.
2810 754 : let gc_cs = self.gc_cs.lock().await;
2811 :
2812 : // Scan all timelines. For each timeline, remember the timeline ID and
2813 : // the branch point where it was created.
2814 754 : let (all_branchpoints, timelines): (BTreeSet<(TimelineId, Lsn)>, _) = {
2815 754 : let timelines = self.timelines.lock().unwrap();
2816 754 : let mut all_branchpoints = BTreeSet::new();
2817 754 : let timelines = {
2818 754 : if let Some(target_timeline_id) = target_timeline_id.as_ref() {
2819 754 : if timelines.get(target_timeline_id).is_none() {
2820 0 : return Err(GcError::TimelineNotFound);
2821 754 : }
2822 0 : };
2823 :
2824 754 : timelines
2825 754 : .iter()
2826 3310 : .map(|(_timeline_id, timeline_entry)| {
2827 2556 : if let Some(ancestor_timeline_id) =
2828 3310 : &timeline_entry.get_ancestor_timeline_id()
2829 : {
2830 : // If target_timeline is specified, we only need to know branchpoints of its children
2831 2556 : if let Some(timeline_id) = target_timeline_id {
2832 2556 : if ancestor_timeline_id == &timeline_id {
2833 6 : all_branchpoints.insert((
2834 6 : *ancestor_timeline_id,
2835 6 : timeline_entry.get_ancestor_lsn(),
2836 6 : ));
2837 2550 : }
2838 : }
2839 : // Collect branchpoints for all timelines
2840 0 : else {
2841 0 : all_branchpoints.insert((
2842 0 : *ancestor_timeline_id,
2843 0 : timeline_entry.get_ancestor_lsn(),
2844 0 : ));
2845 0 : }
2846 754 : }
2847 :
2848 3310 : timeline_entry.clone()
2849 3310 : })
2850 754 : .collect::<Vec<_>>()
2851 754 : };
2852 754 : (all_branchpoints, timelines)
2853 754 : };
2854 754 :
2855 754 : // Ok, we now know all the branch points.
2856 754 : // Update the GC information for each timeline.
2857 754 : let mut gc_timelines = Vec::with_capacity(timelines.len());
2858 4064 : for timeline in timelines {
2859 : // If target_timeline is specified, ignore all other timelines
2860 3310 : if let Some(target_timeline_id) = target_timeline_id {
2861 3310 : if timeline.timeline_id != target_timeline_id {
2862 2556 : continue;
2863 754 : }
2864 0 : }
2865 :
2866 754 : let branchpoints: Vec<Lsn> = all_branchpoints
2867 754 : .range((
2868 754 : Included((timeline.timeline_id, Lsn(0))),
2869 754 : Included((timeline.timeline_id, Lsn(u64::MAX))),
2870 754 : ))
2871 754 : .map(|&x| x.1)
2872 754 : .collect();
2873 754 :
2874 754 : {
2875 754 : let mut target = timeline.gc_info.write().unwrap();
2876 754 :
2877 754 : let now = SystemTime::now();
2878 754 : target.leases.retain(|_, lease| !lease.is_expired(&now));
2879 754 :
2880 754 : timeline
2881 754 : .metrics
2882 754 : .valid_lsn_lease_count_gauge
2883 754 : .set(target.leases.len() as u64);
2884 754 :
2885 754 : match gc_cutoffs.remove(&timeline.timeline_id) {
2886 754 : Some(cutoffs) => {
2887 754 : target.retain_lsns = branchpoints;
2888 754 : target.cutoffs = cutoffs;
2889 754 : }
2890 0 : None => {
2891 0 : // reasons for this being unavailable:
2892 0 : // - this timeline was created while we were finding cutoffs
2893 0 : // - lsn for timestamp search fails for this timeline repeatedly
2894 0 : //
2895 0 : // in both cases, refreshing the branchpoints is correct.
2896 0 : target.retain_lsns = branchpoints;
2897 0 : }
2898 : };
2899 : }
2900 :
2901 754 : gc_timelines.push(timeline);
2902 : }
2903 754 : drop(gc_cs);
2904 754 : Ok(gc_timelines)
2905 754 : }
2906 :
2907 : /// A substitute for `branch_timeline` for use in unit tests.
2908 : /// The returned timeline will have state value `Active` to make various `anyhow::ensure!()`
2909 : /// calls pass, but, we do not actually call `.activate()` under the hood. So, none of the
2910 : /// timeline background tasks are launched, except the flush loop.
2911 : #[cfg(test)]
2912 228 : async fn branch_timeline_test(
2913 228 : &self,
2914 228 : src_timeline: &Arc<Timeline>,
2915 228 : dst_id: TimelineId,
2916 228 : ancestor_lsn: Option<Lsn>,
2917 228 : ctx: &RequestContext,
2918 228 : ) -> Result<Arc<Timeline>, CreateTimelineError> {
2919 228 : let create_guard = self.create_timeline_create_guard(dst_id).unwrap();
2920 228 : let tl = self
2921 228 : .branch_timeline_impl(src_timeline, dst_id, ancestor_lsn, create_guard, ctx)
2922 4 : .await?;
2923 224 : tl.set_state(TimelineState::Active);
2924 224 : Ok(tl)
2925 228 : }
2926 :
2927 : /// Helper for unit tests to branch a timeline with some pre-loaded states.
2928 : #[cfg(test)]
2929 : #[allow(clippy::too_many_arguments)]
2930 4 : pub async fn branch_timeline_test_with_layers(
2931 4 : &self,
2932 4 : src_timeline: &Arc<Timeline>,
2933 4 : dst_id: TimelineId,
2934 4 : ancestor_lsn: Option<Lsn>,
2935 4 : ctx: &RequestContext,
2936 4 : delta_layer_desc: Vec<Vec<(pageserver_api::key::Key, Lsn, crate::repository::Value)>>,
2937 4 : image_layer_desc: Vec<(Lsn, Vec<(pageserver_api::key::Key, bytes::Bytes)>)>,
2938 4 : end_lsn: Lsn,
2939 4 : ) -> anyhow::Result<Arc<Timeline>> {
2940 4 : let tline = self
2941 4 : .branch_timeline_test(src_timeline, dst_id, ancestor_lsn, ctx)
2942 0 : .await?;
2943 4 : let ancestor_lsn = if let Some(ancestor_lsn) = ancestor_lsn {
2944 4 : ancestor_lsn
2945 : } else {
2946 0 : tline.get_last_record_lsn()
2947 : };
2948 4 : assert!(end_lsn >= ancestor_lsn);
2949 4 : tline.force_advance_lsn(end_lsn);
2950 4 : for deltas in delta_layer_desc {
2951 0 : tline
2952 0 : .force_create_delta_layer(deltas, Some(ancestor_lsn), ctx)
2953 0 : .await?;
2954 : }
2955 8 : for (lsn, images) in image_layer_desc {
2956 4 : tline
2957 4 : .force_create_image_layer(lsn, images, Some(ancestor_lsn), ctx)
2958 14 : .await?;
2959 : }
2960 4 : Ok(tline)
2961 4 : }
2962 :
2963 : /// Branch an existing timeline.
2964 : ///
2965 : /// The caller is responsible for activating the returned timeline.
2966 0 : async fn branch_timeline(
2967 0 : &self,
2968 0 : src_timeline: &Arc<Timeline>,
2969 0 : dst_id: TimelineId,
2970 0 : start_lsn: Option<Lsn>,
2971 0 : timeline_create_guard: TimelineCreateGuard<'_>,
2972 0 : ctx: &RequestContext,
2973 0 : ) -> Result<Arc<Timeline>, CreateTimelineError> {
2974 0 : self.branch_timeline_impl(src_timeline, dst_id, start_lsn, timeline_create_guard, ctx)
2975 0 : .await
2976 0 : }
2977 :
2978 228 : async fn branch_timeline_impl(
2979 228 : &self,
2980 228 : src_timeline: &Arc<Timeline>,
2981 228 : dst_id: TimelineId,
2982 228 : start_lsn: Option<Lsn>,
2983 228 : timeline_create_guard: TimelineCreateGuard<'_>,
2984 228 : _ctx: &RequestContext,
2985 228 : ) -> Result<Arc<Timeline>, CreateTimelineError> {
2986 228 : let src_id = src_timeline.timeline_id;
2987 :
2988 : // We will validate our ancestor LSN in this function. Acquire the GC lock so that
2989 : // this check cannot race with GC, and the ancestor LSN is guaranteed to remain
2990 : // valid while we are creating the branch.
2991 228 : let _gc_cs = self.gc_cs.lock().await;
2992 :
2993 : // If no start LSN is specified, we branch the new timeline from the source timeline's last record LSN
2994 228 : let start_lsn = start_lsn.unwrap_or_else(|| {
2995 2 : let lsn = src_timeline.get_last_record_lsn();
2996 2 : info!("branching timeline {dst_id} from timeline {src_id} at last record LSN: {lsn}");
2997 2 : lsn
2998 228 : });
2999 228 :
3000 228 : // Ensure that `start_lsn` is valid, i.e. the LSN is within the PITR
3001 228 : // horizon on the source timeline
3002 228 : //
3003 228 : // We check it against both the planned GC cutoff stored in 'gc_info',
3004 228 : // and the 'latest_gc_cutoff' of the last GC that was performed. The
3005 228 : // planned GC cutoff in 'gc_info' is normally larger than
3006 228 : // 'latest_gc_cutoff_lsn', but beware of corner cases like if you just
3007 228 : // changed the GC settings for the tenant to make the PITR window
3008 228 : // larger, but some of the data was already removed by an earlier GC
3009 228 : // iteration.
3010 228 :
3011 228 : // check against last actual 'latest_gc_cutoff' first
3012 228 : let latest_gc_cutoff_lsn = src_timeline.get_latest_gc_cutoff_lsn();
3013 228 : src_timeline
3014 228 : .check_lsn_is_in_scope(start_lsn, &latest_gc_cutoff_lsn)
3015 228 : .context(format!(
3016 228 : "invalid branch start lsn: less than latest GC cutoff {}",
3017 228 : *latest_gc_cutoff_lsn,
3018 228 : ))
3019 228 : .map_err(CreateTimelineError::AncestorLsn)?;
3020 :
3021 : // and then the planned GC cutoff
3022 : {
3023 224 : let gc_info = src_timeline.gc_info.read().unwrap();
3024 224 : let cutoff = gc_info.min_cutoff();
3025 224 : if start_lsn < cutoff {
3026 0 : return Err(CreateTimelineError::AncestorLsn(anyhow::anyhow!(
3027 0 : "invalid branch start lsn: less than planned GC cutoff {cutoff}"
3028 0 : )));
3029 224 : }
3030 224 : }
3031 224 :
3032 224 : //
3033 224 : // The branch point is valid, and we are still holding the 'gc_cs' lock
3034 224 : // so that GC cannot advance the GC cutoff until we are finished.
3035 224 : // Proceed with the branch creation.
3036 224 : //
3037 224 :
3038 224 : // Determine prev-LSN for the new timeline. We can only determine it if
3039 224 : // the timeline was branched at the current end of the source timeline.
3040 224 : let RecordLsn {
3041 224 : last: src_last,
3042 224 : prev: src_prev,
3043 224 : } = src_timeline.get_last_record_rlsn();
3044 224 : let dst_prev = if src_last == start_lsn {
3045 214 : Some(src_prev)
3046 : } else {
3047 10 : None
3048 : };
3049 :
3050 : // Create the metadata file, noting the ancestor of the new timeline.
3051 : // There is initially no data in it, but all the read-calls know to look
3052 : // into the ancestor.
3053 224 : let metadata = TimelineMetadata::new(
3054 224 : start_lsn,
3055 224 : dst_prev,
3056 224 : Some(src_id),
3057 224 : start_lsn,
3058 224 : *src_timeline.latest_gc_cutoff_lsn.read(), // FIXME: should we hold onto this guard longer?
3059 224 : src_timeline.initdb_lsn,
3060 224 : src_timeline.pg_version,
3061 224 : );
3062 :
3063 224 : let uninitialized_timeline = self
3064 224 : .prepare_new_timeline(
3065 224 : dst_id,
3066 224 : &metadata,
3067 224 : timeline_create_guard,
3068 224 : start_lsn + 1,
3069 224 : Some(Arc::clone(src_timeline)),
3070 224 : src_timeline.last_aux_file_policy.load(),
3071 224 : )
3072 0 : .await?;
3073 :
3074 224 : let new_timeline = uninitialized_timeline.finish_creation()?;
3075 :
3076 : // Root timeline gets its layers during creation and uploads them along with the metadata.
3077 : // A branch timeline though, when created, can get no writes for some time, hence won't get any layers created.
3078 : // We still need to upload its metadata eagerly: if other nodes `attach` the tenant and miss this timeline, their GC
3079 : // could get incorrect information and remove more layers, than needed.
3080 : // See also https://github.com/neondatabase/neon/issues/3865
3081 224 : new_timeline
3082 224 : .remote_client
3083 224 : .schedule_index_upload_for_full_metadata_update(&metadata)
3084 224 : .context("branch initial metadata upload")?;
3085 :
3086 224 : Ok(new_timeline)
3087 228 : }
3088 :
3089 : /// For unit tests, make this visible so that other modules can directly create timelines
3090 : #[cfg(test)]
3091 4 : #[tracing::instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), %timeline_id))]
3092 : pub(crate) async fn bootstrap_timeline_test(
3093 : &self,
3094 : timeline_id: TimelineId,
3095 : pg_version: u32,
3096 : load_existing_initdb: Option<TimelineId>,
3097 : ctx: &RequestContext,
3098 : ) -> anyhow::Result<Arc<Timeline>> {
3099 : let create_guard = self.create_timeline_create_guard(timeline_id).unwrap();
3100 : self.bootstrap_timeline(
3101 : timeline_id,
3102 : pg_version,
3103 : load_existing_initdb,
3104 : create_guard,
3105 : ctx,
3106 : )
3107 : .await
3108 : }
3109 :
3110 0 : async fn upload_initdb(
3111 0 : &self,
3112 0 : timelines_path: &Utf8PathBuf,
3113 0 : pgdata_path: &Utf8PathBuf,
3114 0 : timeline_id: &TimelineId,
3115 0 : ) -> anyhow::Result<()> {
3116 0 : let temp_path = timelines_path.join(format!(
3117 0 : "{INITDB_PATH}.upload-{timeline_id}.{TEMP_FILE_SUFFIX}"
3118 0 : ));
3119 :
3120 : scopeguard::defer! {
3121 : if let Err(e) = fs::remove_file(&temp_path) {
3122 : error!("Failed to remove temporary initdb archive '{temp_path}': {e}");
3123 : }
3124 : }
3125 :
3126 0 : let (pgdata_zstd, tar_zst_size) = create_zst_tarball(pgdata_path, &temp_path).await?;
3127 : const INITDB_TAR_ZST_WARN_LIMIT: u64 = 2 * 1024 * 1024;
3128 0 : if tar_zst_size > INITDB_TAR_ZST_WARN_LIMIT {
3129 0 : warn!(
3130 0 : "compressed {temp_path} size of {tar_zst_size} is above limit {INITDB_TAR_ZST_WARN_LIMIT}."
3131 : );
3132 0 : }
3133 :
3134 : pausable_failpoint!("before-initdb-upload");
3135 :
3136 0 : backoff::retry(
3137 0 : || async {
3138 0 : self::remote_timeline_client::upload_initdb_dir(
3139 0 : &self.remote_storage,
3140 0 : &self.tenant_shard_id.tenant_id,
3141 0 : timeline_id,
3142 0 : pgdata_zstd.try_clone().await?,
3143 0 : tar_zst_size,
3144 0 : &self.cancel,
3145 0 : )
3146 0 : .await
3147 0 : },
3148 0 : |_| false,
3149 0 : 3,
3150 0 : u32::MAX,
3151 0 : "persist_initdb_tar_zst",
3152 0 : &self.cancel,
3153 0 : )
3154 0 : .await
3155 0 : .ok_or_else(|| anyhow::Error::new(TimeoutOrCancel::Cancel))
3156 0 : .and_then(|x| x)
3157 0 : }
3158 :
3159 : /// - run initdb to init temporary instance and get bootstrap data
3160 : /// - after initialization completes, tar up the temp dir and upload it to S3.
3161 : ///
3162 : /// The caller is responsible for activating the returned timeline.
3163 2 : async fn bootstrap_timeline(
3164 2 : &self,
3165 2 : timeline_id: TimelineId,
3166 2 : pg_version: u32,
3167 2 : load_existing_initdb: Option<TimelineId>,
3168 2 : timeline_create_guard: TimelineCreateGuard<'_>,
3169 2 : ctx: &RequestContext,
3170 2 : ) -> anyhow::Result<Arc<Timeline>> {
3171 2 : // create a `tenant/{tenant_id}/timelines/basebackup-{timeline_id}.{TEMP_FILE_SUFFIX}/`
3172 2 : // temporary directory for basebackup files for the given timeline.
3173 2 :
3174 2 : let timelines_path = self.conf.timelines_path(&self.tenant_shard_id);
3175 2 : let pgdata_path = path_with_suffix_extension(
3176 2 : timelines_path.join(format!("basebackup-{timeline_id}")),
3177 2 : TEMP_FILE_SUFFIX,
3178 2 : );
3179 2 :
3180 2 : // Remove whatever was left from the previous runs: safe because TimelineCreateGuard guarantees
3181 2 : // we won't race with other creations or existent timelines with the same path.
3182 2 : if pgdata_path.exists() {
3183 0 : fs::remove_dir_all(&pgdata_path).with_context(|| {
3184 0 : format!("Failed to remove already existing initdb directory: {pgdata_path}")
3185 0 : })?;
3186 2 : }
3187 :
3188 : // this new directory is very temporary, set to remove it immediately after bootstrap, we don't need it
3189 : scopeguard::defer! {
3190 : if let Err(e) = fs::remove_dir_all(&pgdata_path) {
3191 : // this is unlikely, but we will remove the directory on pageserver restart or another bootstrap call
3192 : error!("Failed to remove temporary initdb directory '{pgdata_path}': {e}");
3193 : }
3194 : }
3195 2 : if let Some(existing_initdb_timeline_id) = load_existing_initdb {
3196 2 : if existing_initdb_timeline_id != timeline_id {
3197 0 : let source_path = &remote_initdb_archive_path(
3198 0 : &self.tenant_shard_id.tenant_id,
3199 0 : &existing_initdb_timeline_id,
3200 0 : );
3201 0 : let dest_path =
3202 0 : &remote_initdb_archive_path(&self.tenant_shard_id.tenant_id, &timeline_id);
3203 0 :
3204 0 : // if this fails, it will get retried by retried control plane requests
3205 0 : self.remote_storage
3206 0 : .copy_object(source_path, dest_path, &self.cancel)
3207 0 : .await
3208 0 : .context("copy initdb tar")?;
3209 2 : }
3210 2 : let (initdb_tar_zst_path, initdb_tar_zst) =
3211 2 : self::remote_timeline_client::download_initdb_tar_zst(
3212 2 : self.conf,
3213 2 : &self.remote_storage,
3214 2 : &self.tenant_shard_id,
3215 2 : &existing_initdb_timeline_id,
3216 2 : &self.cancel,
3217 2 : )
3218 719 : .await
3219 2 : .context("download initdb tar")?;
3220 :
3221 : scopeguard::defer! {
3222 : if let Err(e) = fs::remove_file(&initdb_tar_zst_path) {
3223 : error!("Failed to remove temporary initdb archive '{initdb_tar_zst_path}': {e}");
3224 : }
3225 : }
3226 :
3227 2 : let buf_read =
3228 2 : BufReader::with_capacity(remote_timeline_client::BUFFER_SIZE, initdb_tar_zst);
3229 2 : extract_zst_tarball(&pgdata_path, buf_read)
3230 11508 : .await
3231 2 : .context("extract initdb tar")?;
3232 : } else {
3233 : // Init temporarily repo to get bootstrap data, this creates a directory in the `pgdata_path` path
3234 0 : run_initdb(self.conf, &pgdata_path, pg_version, &self.cancel).await?;
3235 :
3236 : // Upload the created data dir to S3
3237 0 : if self.tenant_shard_id().is_shard_zero() {
3238 0 : self.upload_initdb(&timelines_path, &pgdata_path, &timeline_id)
3239 0 : .await?;
3240 0 : }
3241 : }
3242 2 : let pgdata_lsn = import_datadir::get_lsn_from_controlfile(&pgdata_path)?.align();
3243 2 :
3244 2 : // Import the contents of the data directory at the initial checkpoint
3245 2 : // LSN, and any WAL after that.
3246 2 : // Initdb lsn will be equal to last_record_lsn which will be set after import.
3247 2 : // Because we know it upfront avoid having an option or dummy zero value by passing it to the metadata.
3248 2 : let new_metadata = TimelineMetadata::new(
3249 2 : Lsn(0),
3250 2 : None,
3251 2 : None,
3252 2 : Lsn(0),
3253 2 : pgdata_lsn,
3254 2 : pgdata_lsn,
3255 2 : pg_version,
3256 2 : );
3257 2 : let raw_timeline = self
3258 2 : .prepare_new_timeline(
3259 2 : timeline_id,
3260 2 : &new_metadata,
3261 2 : timeline_create_guard,
3262 2 : pgdata_lsn,
3263 2 : None,
3264 2 : None,
3265 2 : )
3266 0 : .await?;
3267 :
3268 2 : let tenant_shard_id = raw_timeline.owning_tenant.tenant_shard_id;
3269 2 : let unfinished_timeline = raw_timeline.raw_timeline()?;
3270 :
3271 : // Flush the new layer files to disk, before we make the timeline as available to
3272 : // the outside world.
3273 : //
3274 : // Flush loop needs to be spawned in order to be able to flush.
3275 2 : unfinished_timeline.maybe_spawn_flush_loop();
3276 2 :
3277 2 : import_datadir::import_timeline_from_postgres_datadir(
3278 2 : unfinished_timeline,
3279 2 : &pgdata_path,
3280 2 : pgdata_lsn,
3281 2 : ctx,
3282 2 : )
3283 9666 : .await
3284 2 : .with_context(|| {
3285 0 : format!("Failed to import pgdatadir for timeline {tenant_shard_id}/{timeline_id}")
3286 2 : })?;
3287 :
3288 2 : fail::fail_point!("before-checkpoint-new-timeline", |_| {
3289 0 : anyhow::bail!("failpoint before-checkpoint-new-timeline");
3290 2 : });
3291 :
3292 2 : unfinished_timeline
3293 2 : .freeze_and_flush()
3294 2 : .await
3295 2 : .with_context(|| {
3296 0 : format!(
3297 0 : "Failed to flush after pgdatadir import for timeline {tenant_shard_id}/{timeline_id}"
3298 0 : )
3299 2 : })?;
3300 :
3301 : // All done!
3302 2 : let timeline = raw_timeline.finish_creation()?;
3303 :
3304 2 : Ok(timeline)
3305 2 : }
3306 :
3307 : /// Call this before constructing a timeline, to build its required structures
3308 378 : fn build_timeline_resources(&self, timeline_id: TimelineId) -> TimelineResources {
3309 378 : let remote_client = RemoteTimelineClient::new(
3310 378 : self.remote_storage.clone(),
3311 378 : self.deletion_queue_client.clone(),
3312 378 : self.conf,
3313 378 : self.tenant_shard_id,
3314 378 : timeline_id,
3315 378 : self.generation,
3316 378 : );
3317 378 : TimelineResources {
3318 378 : remote_client,
3319 378 : timeline_get_throttle: self.timeline_get_throttle.clone(),
3320 378 : l0_flush_global_state: self.l0_flush_global_state.clone(),
3321 378 : }
3322 378 : }
3323 :
3324 : /// Creates intermediate timeline structure and its files.
3325 : ///
3326 : /// An empty layer map is initialized, and new data and WAL can be imported starting
3327 : /// at 'disk_consistent_lsn'. After any initial data has been imported, call
3328 : /// `finish_creation` to insert the Timeline into the timelines map.
3329 378 : async fn prepare_new_timeline<'a>(
3330 378 : &'a self,
3331 378 : new_timeline_id: TimelineId,
3332 378 : new_metadata: &TimelineMetadata,
3333 378 : create_guard: TimelineCreateGuard<'a>,
3334 378 : start_lsn: Lsn,
3335 378 : ancestor: Option<Arc<Timeline>>,
3336 378 : last_aux_file_policy: Option<AuxFilePolicy>,
3337 378 : ) -> anyhow::Result<UninitializedTimeline> {
3338 378 : let tenant_shard_id = self.tenant_shard_id;
3339 378 :
3340 378 : let resources = self.build_timeline_resources(new_timeline_id);
3341 378 : resources
3342 378 : .remote_client
3343 378 : .init_upload_queue_for_empty_remote(new_metadata)?;
3344 :
3345 378 : let timeline_struct = self
3346 378 : .create_timeline_struct(
3347 378 : new_timeline_id,
3348 378 : new_metadata,
3349 378 : ancestor,
3350 378 : resources,
3351 378 : CreateTimelineCause::Load,
3352 378 : last_aux_file_policy,
3353 378 : )
3354 378 : .context("Failed to create timeline data structure")?;
3355 :
3356 378 : timeline_struct.init_empty_layer_map(start_lsn);
3357 :
3358 378 : if let Err(e) = self
3359 378 : .create_timeline_files(&create_guard.timeline_path)
3360 0 : .await
3361 : {
3362 0 : error!("Failed to create initial files for timeline {tenant_shard_id}/{new_timeline_id}, cleaning up: {e:?}");
3363 0 : cleanup_timeline_directory(create_guard);
3364 0 : return Err(e);
3365 378 : }
3366 378 :
3367 378 : debug!(
3368 0 : "Successfully created initial files for timeline {tenant_shard_id}/{new_timeline_id}"
3369 : );
3370 :
3371 378 : Ok(UninitializedTimeline::new(
3372 378 : self,
3373 378 : new_timeline_id,
3374 378 : Some((timeline_struct, create_guard)),
3375 378 : ))
3376 378 : }
3377 :
3378 378 : async fn create_timeline_files(&self, timeline_path: &Utf8Path) -> anyhow::Result<()> {
3379 378 : crashsafe::create_dir(timeline_path).context("Failed to create timeline directory")?;
3380 :
3381 378 : fail::fail_point!("after-timeline-dir-creation", |_| {
3382 0 : anyhow::bail!("failpoint after-timeline-dir-creation");
3383 378 : });
3384 :
3385 378 : Ok(())
3386 378 : }
3387 :
3388 : /// Get a guard that provides exclusive access to the timeline directory, preventing
3389 : /// concurrent attempts to create the same timeline.
3390 384 : fn create_timeline_create_guard(
3391 384 : &self,
3392 384 : timeline_id: TimelineId,
3393 384 : ) -> Result<TimelineCreateGuard, TimelineExclusionError> {
3394 384 : let tenant_shard_id = self.tenant_shard_id;
3395 384 :
3396 384 : let timeline_path = self.conf.timeline_path(&tenant_shard_id, &timeline_id);
3397 :
3398 384 : let create_guard = TimelineCreateGuard::new(self, timeline_id, timeline_path.clone())?;
3399 :
3400 : // At this stage, we have got exclusive access to in-memory state for this timeline ID
3401 : // for creation.
3402 : // A timeline directory should never exist on disk already:
3403 : // - a previous failed creation would have cleaned up after itself
3404 : // - a pageserver restart would clean up timeline directories that don't have valid remote state
3405 : //
3406 : // Therefore it is an unexpected internal error to encounter a timeline directory already existing here,
3407 : // this error may indicate a bug in cleanup on failed creations.
3408 382 : if timeline_path.exists() {
3409 0 : return Err(TimelineExclusionError::Other(anyhow::anyhow!(
3410 0 : "Timeline directory already exists! This is a bug."
3411 0 : )));
3412 382 : }
3413 382 :
3414 382 : Ok(create_guard)
3415 384 : }
3416 :
3417 : /// Gathers inputs from all of the timelines to produce a sizing model input.
3418 : ///
3419 : /// Future is cancellation safe. Only one calculation can be running at once per tenant.
3420 0 : #[instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug()))]
3421 : pub async fn gather_size_inputs(
3422 : &self,
3423 : // `max_retention_period` overrides the cutoff that is used to calculate the size
3424 : // (only if it is shorter than the real cutoff).
3425 : max_retention_period: Option<u64>,
3426 : cause: LogicalSizeCalculationCause,
3427 : cancel: &CancellationToken,
3428 : ctx: &RequestContext,
3429 : ) -> Result<size::ModelInputs, size::CalculateSyntheticSizeError> {
3430 : let logical_sizes_at_once = self
3431 : .conf
3432 : .concurrent_tenant_size_logical_size_queries
3433 : .inner();
3434 :
3435 : // TODO: Having a single mutex block concurrent reads is not great for performance.
3436 : //
3437 : // But the only case where we need to run multiple of these at once is when we
3438 : // request a size for a tenant manually via API, while another background calculation
3439 : // is in progress (which is not a common case).
3440 : //
3441 : // See more for on the issue #2748 condenced out of the initial PR review.
3442 : let mut shared_cache = tokio::select! {
3443 : locked = self.cached_logical_sizes.lock() => locked,
3444 : _ = cancel.cancelled() => return Err(size::CalculateSyntheticSizeError::Cancelled),
3445 : _ = self.cancel.cancelled() => return Err(size::CalculateSyntheticSizeError::Cancelled),
3446 : };
3447 :
3448 : size::gather_inputs(
3449 : self,
3450 : logical_sizes_at_once,
3451 : max_retention_period,
3452 : &mut shared_cache,
3453 : cause,
3454 : cancel,
3455 : ctx,
3456 : )
3457 : .await
3458 : }
3459 :
3460 : /// Calculate synthetic tenant size and cache the result.
3461 : /// This is periodically called by background worker.
3462 : /// result is cached in tenant struct
3463 0 : #[instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug()))]
3464 : pub async fn calculate_synthetic_size(
3465 : &self,
3466 : cause: LogicalSizeCalculationCause,
3467 : cancel: &CancellationToken,
3468 : ctx: &RequestContext,
3469 : ) -> Result<u64, size::CalculateSyntheticSizeError> {
3470 : let inputs = self.gather_size_inputs(None, cause, cancel, ctx).await?;
3471 :
3472 : let size = inputs.calculate();
3473 :
3474 : self.set_cached_synthetic_size(size);
3475 :
3476 : Ok(size)
3477 : }
3478 :
3479 : /// Cache given synthetic size and update the metric value
3480 0 : pub fn set_cached_synthetic_size(&self, size: u64) {
3481 0 : self.cached_synthetic_tenant_size
3482 0 : .store(size, Ordering::Relaxed);
3483 0 :
3484 0 : // Only shard zero should be calculating synthetic sizes
3485 0 : debug_assert!(self.shard_identity.is_shard_zero());
3486 :
3487 0 : TENANT_SYNTHETIC_SIZE_METRIC
3488 0 : .get_metric_with_label_values(&[&self.tenant_shard_id.tenant_id.to_string()])
3489 0 : .unwrap()
3490 0 : .set(size);
3491 0 : }
3492 :
3493 0 : pub fn cached_synthetic_size(&self) -> u64 {
3494 0 : self.cached_synthetic_tenant_size.load(Ordering::Relaxed)
3495 0 : }
3496 :
3497 : /// Flush any in-progress layers, schedule uploads, and wait for uploads to complete.
3498 : ///
3499 : /// This function can take a long time: callers should wrap it in a timeout if calling
3500 : /// from an external API handler.
3501 : ///
3502 : /// Cancel-safety: cancelling this function may leave I/O running, but such I/O is
3503 : /// still bounded by tenant/timeline shutdown.
3504 0 : #[tracing::instrument(skip_all)]
3505 : pub(crate) async fn flush_remote(&self) -> anyhow::Result<()> {
3506 : let timelines = self.timelines.lock().unwrap().clone();
3507 :
3508 0 : async fn flush_timeline(_gate: GateGuard, timeline: Arc<Timeline>) -> anyhow::Result<()> {
3509 0 : tracing::info!(timeline_id=%timeline.timeline_id, "Flushing...");
3510 0 : timeline.freeze_and_flush().await?;
3511 0 : tracing::info!(timeline_id=%timeline.timeline_id, "Waiting for uploads...");
3512 0 : timeline.remote_client.wait_completion().await?;
3513 :
3514 0 : Ok(())
3515 0 : }
3516 :
3517 : // We do not use a JoinSet for these tasks, because we don't want them to be
3518 : // aborted when this function's future is cancelled: they should stay alive
3519 : // holding their GateGuard until they complete, to ensure their I/Os complete
3520 : // before Timeline shutdown completes.
3521 : let mut results = FuturesUnordered::new();
3522 :
3523 : for (_timeline_id, timeline) in timelines {
3524 : // Run each timeline's flush in a task holding the timeline's gate: this
3525 : // means that if this function's future is cancelled, the Timeline shutdown
3526 : // will still wait for any I/O in here to complete.
3527 : let Ok(gate) = timeline.gate.enter() else {
3528 : continue;
3529 : };
3530 0 : let jh = tokio::task::spawn(async move { flush_timeline(gate, timeline).await });
3531 : results.push(jh);
3532 : }
3533 :
3534 : while let Some(r) = results.next().await {
3535 : if let Err(e) = r {
3536 : if !e.is_cancelled() && !e.is_panic() {
3537 : tracing::error!("unexpected join error: {e:?}");
3538 : }
3539 : }
3540 : }
3541 :
3542 : // The flushes we did above were just writes, but the Tenant might have had
3543 : // pending deletions as well from recent compaction/gc: we want to flush those
3544 : // as well. This requires flushing the global delete queue. This is cheap
3545 : // because it's typically a no-op.
3546 : match self.deletion_queue_client.flush_execute().await {
3547 : Ok(_) => {}
3548 : Err(DeletionQueueError::ShuttingDown) => {}
3549 : }
3550 :
3551 : Ok(())
3552 : }
3553 :
3554 0 : pub(crate) fn get_tenant_conf(&self) -> TenantConfOpt {
3555 0 : self.tenant_conf.load().tenant_conf.clone()
3556 0 : }
3557 : }
3558 :
3559 : /// Create the cluster temporarily in 'initdbpath' directory inside the repository
3560 : /// to get bootstrap data for timeline initialization.
3561 0 : async fn run_initdb(
3562 0 : conf: &'static PageServerConf,
3563 0 : initdb_target_dir: &Utf8Path,
3564 0 : pg_version: u32,
3565 0 : cancel: &CancellationToken,
3566 0 : ) -> Result<(), InitdbError> {
3567 0 : let initdb_bin_path = conf
3568 0 : .pg_bin_dir(pg_version)
3569 0 : .map_err(InitdbError::Other)?
3570 0 : .join("initdb");
3571 0 : let initdb_lib_dir = conf.pg_lib_dir(pg_version).map_err(InitdbError::Other)?;
3572 0 : info!(
3573 0 : "running {} in {}, libdir: {}",
3574 : initdb_bin_path, initdb_target_dir, initdb_lib_dir,
3575 : );
3576 :
3577 0 : let _permit = INIT_DB_SEMAPHORE.acquire().await;
3578 :
3579 0 : let initdb_command = tokio::process::Command::new(&initdb_bin_path)
3580 0 : .args(["-D", initdb_target_dir.as_ref()])
3581 0 : .args(["-U", &conf.superuser])
3582 0 : .args(["-E", "utf8"])
3583 0 : .arg("--no-instructions")
3584 0 : .arg("--no-sync")
3585 0 : .env_clear()
3586 0 : .env("LD_LIBRARY_PATH", &initdb_lib_dir)
3587 0 : .env("DYLD_LIBRARY_PATH", &initdb_lib_dir)
3588 0 : .stdin(std::process::Stdio::null())
3589 0 : // stdout invocation produces the same output every time, we don't need it
3590 0 : .stdout(std::process::Stdio::null())
3591 0 : // we would be interested in the stderr output, if there was any
3592 0 : .stderr(std::process::Stdio::piped())
3593 0 : .spawn()?;
3594 :
3595 : // Ideally we'd select here with the cancellation token, but the problem is that
3596 : // we can't safely terminate initdb: it launches processes of its own, and killing
3597 : // initdb doesn't kill them. After we return from this function, we want the target
3598 : // directory to be able to be cleaned up.
3599 : // See https://github.com/neondatabase/neon/issues/6385
3600 0 : let initdb_output = initdb_command.wait_with_output().await?;
3601 0 : if !initdb_output.status.success() {
3602 0 : return Err(InitdbError::Failed(
3603 0 : initdb_output.status,
3604 0 : initdb_output.stderr,
3605 0 : ));
3606 0 : }
3607 0 :
3608 0 : // This isn't true cancellation support, see above. Still return an error to
3609 0 : // excercise the cancellation code path.
3610 0 : if cancel.is_cancelled() {
3611 0 : return Err(InitdbError::Cancelled);
3612 0 : }
3613 0 :
3614 0 : Ok(())
3615 0 : }
3616 :
3617 : /// Dump contents of a layer file to stdout.
3618 0 : pub async fn dump_layerfile_from_path(
3619 0 : path: &Utf8Path,
3620 0 : verbose: bool,
3621 0 : ctx: &RequestContext,
3622 0 : ) -> anyhow::Result<()> {
3623 : use std::os::unix::fs::FileExt;
3624 :
3625 : // All layer files start with a two-byte "magic" value, to identify the kind of
3626 : // file.
3627 0 : let file = File::open(path)?;
3628 0 : let mut header_buf = [0u8; 2];
3629 0 : file.read_exact_at(&mut header_buf, 0)?;
3630 :
3631 0 : match u16::from_be_bytes(header_buf) {
3632 : crate::IMAGE_FILE_MAGIC => {
3633 0 : ImageLayer::new_for_path(path, file)?
3634 0 : .dump(verbose, ctx)
3635 0 : .await?
3636 : }
3637 : crate::DELTA_FILE_MAGIC => {
3638 0 : DeltaLayer::new_for_path(path, file)?
3639 0 : .dump(verbose, ctx)
3640 0 : .await?
3641 : }
3642 0 : magic => bail!("unrecognized magic identifier: {:?}", magic),
3643 : }
3644 :
3645 0 : Ok(())
3646 0 : }
3647 :
3648 : #[cfg(test)]
3649 : pub(crate) mod harness {
3650 : use bytes::{Bytes, BytesMut};
3651 : use once_cell::sync::OnceCell;
3652 : use pageserver_api::models::ShardParameters;
3653 : use pageserver_api::shard::ShardIndex;
3654 : use utils::logging;
3655 :
3656 : use crate::deletion_queue::mock::MockDeletionQueue;
3657 : use crate::l0_flush::L0FlushConfig;
3658 : use crate::walredo::apply_neon;
3659 : use crate::{repository::Key, walrecord::NeonWalRecord};
3660 :
3661 : use super::*;
3662 : use hex_literal::hex;
3663 : use utils::id::TenantId;
3664 :
3665 : pub const TIMELINE_ID: TimelineId =
3666 : TimelineId::from_array(hex!("11223344556677881122334455667788"));
3667 : pub const NEW_TIMELINE_ID: TimelineId =
3668 : TimelineId::from_array(hex!("AA223344556677881122334455667788"));
3669 :
3670 : /// Convenience function to create a page image with given string as the only content
3671 5028414 : pub fn test_img(s: &str) -> Bytes {
3672 5028414 : let mut buf = BytesMut::new();
3673 5028414 : buf.extend_from_slice(s.as_bytes());
3674 5028414 : buf.resize(64, 0);
3675 5028414 :
3676 5028414 : buf.freeze()
3677 5028414 : }
3678 :
3679 : impl From<TenantConf> for TenantConfOpt {
3680 162 : fn from(tenant_conf: TenantConf) -> Self {
3681 162 : Self {
3682 162 : checkpoint_distance: Some(tenant_conf.checkpoint_distance),
3683 162 : checkpoint_timeout: Some(tenant_conf.checkpoint_timeout),
3684 162 : compaction_target_size: Some(tenant_conf.compaction_target_size),
3685 162 : compaction_period: Some(tenant_conf.compaction_period),
3686 162 : compaction_threshold: Some(tenant_conf.compaction_threshold),
3687 162 : compaction_algorithm: Some(tenant_conf.compaction_algorithm),
3688 162 : gc_horizon: Some(tenant_conf.gc_horizon),
3689 162 : gc_period: Some(tenant_conf.gc_period),
3690 162 : image_creation_threshold: Some(tenant_conf.image_creation_threshold),
3691 162 : pitr_interval: Some(tenant_conf.pitr_interval),
3692 162 : walreceiver_connect_timeout: Some(tenant_conf.walreceiver_connect_timeout),
3693 162 : lagging_wal_timeout: Some(tenant_conf.lagging_wal_timeout),
3694 162 : max_lsn_wal_lag: Some(tenant_conf.max_lsn_wal_lag),
3695 162 : trace_read_requests: Some(tenant_conf.trace_read_requests),
3696 162 : eviction_policy: Some(tenant_conf.eviction_policy),
3697 162 : min_resident_size_override: tenant_conf.min_resident_size_override,
3698 162 : evictions_low_residence_duration_metric_threshold: Some(
3699 162 : tenant_conf.evictions_low_residence_duration_metric_threshold,
3700 162 : ),
3701 162 : heatmap_period: Some(tenant_conf.heatmap_period),
3702 162 : lazy_slru_download: Some(tenant_conf.lazy_slru_download),
3703 162 : timeline_get_throttle: Some(tenant_conf.timeline_get_throttle),
3704 162 : image_layer_creation_check_threshold: Some(
3705 162 : tenant_conf.image_layer_creation_check_threshold,
3706 162 : ),
3707 162 : switch_aux_file_policy: Some(tenant_conf.switch_aux_file_policy),
3708 162 : lsn_lease_length: Some(tenant_conf.lsn_lease_length),
3709 162 : lsn_lease_length_for_ts: Some(tenant_conf.lsn_lease_length_for_ts),
3710 162 : }
3711 162 : }
3712 : }
3713 :
3714 : pub struct TenantHarness {
3715 : pub conf: &'static PageServerConf,
3716 : pub tenant_conf: TenantConf,
3717 : pub tenant_shard_id: TenantShardId,
3718 : pub generation: Generation,
3719 : pub shard: ShardIndex,
3720 : pub remote_storage: GenericRemoteStorage,
3721 : pub remote_fs_dir: Utf8PathBuf,
3722 : pub deletion_queue: MockDeletionQueue,
3723 : }
3724 :
3725 : static LOG_HANDLE: OnceCell<()> = OnceCell::new();
3726 :
3727 168 : pub(crate) fn setup_logging() {
3728 168 : LOG_HANDLE.get_or_init(|| {
3729 156 : logging::init(
3730 156 : logging::LogFormat::Test,
3731 156 : // enable it in case the tests exercise code paths that use
3732 156 : // debug_assert_current_span_has_tenant_and_timeline_id
3733 156 : logging::TracingErrorLayerEnablement::EnableWithRustLogFilter,
3734 156 : logging::Output::Stdout,
3735 156 : )
3736 156 : .expect("Failed to init test logging")
3737 168 : });
3738 168 : }
3739 :
3740 : impl TenantHarness {
3741 162 : pub fn create_custom(
3742 162 : test_name: &'static str,
3743 162 : tenant_conf: TenantConf,
3744 162 : tenant_id: TenantId,
3745 162 : shard_identity: ShardIdentity,
3746 162 : generation: Generation,
3747 162 : ) -> anyhow::Result<Self> {
3748 162 : setup_logging();
3749 162 :
3750 162 : let repo_dir = PageServerConf::test_repo_dir(test_name);
3751 162 : let _ = fs::remove_dir_all(&repo_dir);
3752 162 : fs::create_dir_all(&repo_dir)?;
3753 :
3754 162 : let conf = PageServerConf::dummy_conf(repo_dir);
3755 162 : // Make a static copy of the config. This can never be free'd, but that's
3756 162 : // OK in a test.
3757 162 : let conf: &'static PageServerConf = Box::leak(Box::new(conf));
3758 162 :
3759 162 : let shard = shard_identity.shard_index();
3760 162 : let tenant_shard_id = TenantShardId {
3761 162 : tenant_id,
3762 162 : shard_number: shard.shard_number,
3763 162 : shard_count: shard.shard_count,
3764 162 : };
3765 162 : fs::create_dir_all(conf.tenant_path(&tenant_shard_id))?;
3766 162 : fs::create_dir_all(conf.timelines_path(&tenant_shard_id))?;
3767 :
3768 : use remote_storage::{RemoteStorageConfig, RemoteStorageKind};
3769 162 : let remote_fs_dir = conf.workdir.join("localfs");
3770 162 : std::fs::create_dir_all(&remote_fs_dir).unwrap();
3771 162 : let config = RemoteStorageConfig {
3772 162 : storage: RemoteStorageKind::LocalFs {
3773 162 : local_path: remote_fs_dir.clone(),
3774 162 : },
3775 162 : timeout: RemoteStorageConfig::DEFAULT_TIMEOUT,
3776 162 : };
3777 162 : let remote_storage = GenericRemoteStorage::from_config(&config).unwrap();
3778 162 : let deletion_queue = MockDeletionQueue::new(Some(remote_storage.clone()));
3779 162 :
3780 162 : Ok(Self {
3781 162 : conf,
3782 162 : tenant_conf,
3783 162 : tenant_shard_id,
3784 162 : generation,
3785 162 : shard,
3786 162 : remote_storage,
3787 162 : remote_fs_dir,
3788 162 : deletion_queue,
3789 162 : })
3790 162 : }
3791 :
3792 150 : pub fn create(test_name: &'static str) -> anyhow::Result<Self> {
3793 150 : // Disable automatic GC and compaction to make the unit tests more deterministic.
3794 150 : // The tests perform them manually if needed.
3795 150 : let tenant_conf = TenantConf {
3796 150 : gc_period: Duration::ZERO,
3797 150 : compaction_period: Duration::ZERO,
3798 150 : ..TenantConf::default()
3799 150 : };
3800 150 : let tenant_id = TenantId::generate();
3801 150 : let shard = ShardIdentity::unsharded();
3802 150 : Self::create_custom(
3803 150 : test_name,
3804 150 : tenant_conf,
3805 150 : tenant_id,
3806 150 : shard,
3807 150 : Generation::new(0xdeadbeef),
3808 150 : )
3809 150 : }
3810 :
3811 18 : pub fn span(&self) -> tracing::Span {
3812 18 : info_span!("TenantHarness", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug())
3813 18 : }
3814 :
3815 162 : pub(crate) async fn load(&self) -> (Arc<Tenant>, RequestContext) {
3816 162 : let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error);
3817 162 : (
3818 162 : self.do_try_load(&ctx)
3819 652 : .await
3820 162 : .expect("failed to load test tenant"),
3821 162 : ctx,
3822 162 : )
3823 162 : }
3824 :
3825 324 : #[instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug()))]
3826 : pub(crate) async fn do_try_load(
3827 : &self,
3828 : ctx: &RequestContext,
3829 : ) -> anyhow::Result<Arc<Tenant>> {
3830 : let walredo_mgr = Arc::new(WalRedoManager::from(TestRedoManager));
3831 :
3832 : let tenant = Arc::new(Tenant::new(
3833 : TenantState::Loading,
3834 : self.conf,
3835 : AttachedTenantConf::try_from(LocationConf::attached_single(
3836 : TenantConfOpt::from(self.tenant_conf.clone()),
3837 : self.generation,
3838 : &ShardParameters::default(),
3839 : ))
3840 : .unwrap(),
3841 : // This is a legacy/test code path: sharding isn't supported here.
3842 : ShardIdentity::unsharded(),
3843 : Some(walredo_mgr),
3844 : self.tenant_shard_id,
3845 : self.remote_storage.clone(),
3846 : self.deletion_queue.new_client(),
3847 : // TODO: ideally we should run all unit tests with both configs
3848 : L0FlushGlobalState::new(L0FlushConfig::default()),
3849 : ));
3850 :
3851 : let preload = tenant
3852 : .preload(&self.remote_storage, CancellationToken::new())
3853 : .await?;
3854 : tenant.attach(Some(preload), ctx).await?;
3855 :
3856 : tenant.state.send_replace(TenantState::Active);
3857 : for timeline in tenant.timelines.lock().unwrap().values() {
3858 : timeline.set_state(TimelineState::Active);
3859 : }
3860 : Ok(tenant)
3861 : }
3862 :
3863 2 : pub fn timeline_path(&self, timeline_id: &TimelineId) -> Utf8PathBuf {
3864 2 : self.conf.timeline_path(&self.tenant_shard_id, timeline_id)
3865 2 : }
3866 : }
3867 :
3868 : // Mock WAL redo manager that doesn't do much
3869 : pub(crate) struct TestRedoManager;
3870 :
3871 : impl TestRedoManager {
3872 : /// # Cancel-Safety
3873 : ///
3874 : /// This method is cancellation-safe.
3875 58 : pub async fn request_redo(
3876 58 : &self,
3877 58 : key: Key,
3878 58 : lsn: Lsn,
3879 58 : base_img: Option<(Lsn, Bytes)>,
3880 58 : records: Vec<(Lsn, NeonWalRecord)>,
3881 58 : _pg_version: u32,
3882 58 : ) -> Result<Bytes, walredo::Error> {
3883 76 : let records_neon = records.iter().all(|r| apply_neon::can_apply_in_neon(&r.1));
3884 58 : if records_neon {
3885 : // For Neon wal records, we can decode without spawning postgres, so do so.
3886 58 : let base_img = base_img.expect("Neon WAL redo requires base image").1;
3887 58 : let mut page = BytesMut::new();
3888 58 : page.extend_from_slice(&base_img);
3889 134 : for (record_lsn, record) in records {
3890 76 : apply_neon::apply_in_neon(&record, record_lsn, key, &mut page)?;
3891 : }
3892 58 : Ok(page.freeze())
3893 : } else {
3894 : // We never spawn a postgres walredo process in unit tests: just log what we might have done.
3895 0 : let s = format!(
3896 0 : "redo for {} to get to {}, with {} and {} records",
3897 0 : key,
3898 0 : lsn,
3899 0 : if base_img.is_some() {
3900 0 : "base image"
3901 : } else {
3902 0 : "no base image"
3903 : },
3904 0 : records.len()
3905 0 : );
3906 0 : println!("{s}");
3907 0 :
3908 0 : Ok(test_img(&s))
3909 : }
3910 58 : }
3911 : }
3912 : }
3913 :
3914 : #[cfg(test)]
3915 : mod tests {
3916 : use std::collections::BTreeMap;
3917 :
3918 : use super::*;
3919 : use crate::keyspace::KeySpaceAccum;
3920 : use crate::pgdatadir_mapping::AuxFilesDirectory;
3921 : use crate::repository::{Key, Value};
3922 : use crate::tenant::harness::*;
3923 : use crate::tenant::timeline::CompactFlags;
3924 : use crate::walrecord::NeonWalRecord;
3925 : use crate::DEFAULT_PG_VERSION;
3926 : use bytes::{Bytes, BytesMut};
3927 : use hex_literal::hex;
3928 : use itertools::Itertools;
3929 : use pageserver_api::key::{AUX_FILES_KEY, AUX_KEY_PREFIX, NON_INHERITED_RANGE};
3930 : use pageserver_api::keyspace::KeySpace;
3931 : use pageserver_api::models::{CompactionAlgorithm, CompactionAlgorithmSettings};
3932 : use rand::{thread_rng, Rng};
3933 : use storage_layer::PersistentLayerKey;
3934 : use tests::storage_layer::ValuesReconstructState;
3935 : use tests::timeline::{GetVectoredError, ShutdownMode};
3936 : use timeline::GcInfo;
3937 : use utils::bin_ser::BeSer;
3938 : use utils::id::TenantId;
3939 :
3940 : static TEST_KEY: Lazy<Key> =
3941 18 : Lazy::new(|| Key::from_slice(&hex!("010000000033333333444444445500000001")));
3942 :
3943 : #[tokio::test]
3944 2 : async fn test_basic() -> anyhow::Result<()> {
3945 8 : let (tenant, ctx) = TenantHarness::create("test_basic")?.load().await;
3946 2 : let tline = tenant
3947 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx)
3948 6 : .await?;
3949 2 :
3950 2 : let mut writer = tline.writer().await;
3951 2 : writer
3952 2 : .put(
3953 2 : *TEST_KEY,
3954 2 : Lsn(0x10),
3955 2 : &Value::Image(test_img("foo at 0x10")),
3956 2 : &ctx,
3957 2 : )
3958 2 : .await?;
3959 2 : writer.finish_write(Lsn(0x10));
3960 2 : drop(writer);
3961 2 :
3962 2 : let mut writer = tline.writer().await;
3963 2 : writer
3964 2 : .put(
3965 2 : *TEST_KEY,
3966 2 : Lsn(0x20),
3967 2 : &Value::Image(test_img("foo at 0x20")),
3968 2 : &ctx,
3969 2 : )
3970 2 : .await?;
3971 2 : writer.finish_write(Lsn(0x20));
3972 2 : drop(writer);
3973 2 :
3974 2 : assert_eq!(
3975 2 : tline.get(*TEST_KEY, Lsn(0x10), &ctx).await?,
3976 2 : test_img("foo at 0x10")
3977 2 : );
3978 2 : assert_eq!(
3979 2 : tline.get(*TEST_KEY, Lsn(0x1f), &ctx).await?,
3980 2 : test_img("foo at 0x10")
3981 2 : );
3982 2 : assert_eq!(
3983 2 : tline.get(*TEST_KEY, Lsn(0x20), &ctx).await?,
3984 2 : test_img("foo at 0x20")
3985 2 : );
3986 2 :
3987 2 : Ok(())
3988 2 : }
3989 :
3990 : #[tokio::test]
3991 2 : async fn no_duplicate_timelines() -> anyhow::Result<()> {
3992 2 : let (tenant, ctx) = TenantHarness::create("no_duplicate_timelines")?
3993 2 : .load()
3994 8 : .await;
3995 2 : let _ = tenant
3996 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
3997 6 : .await?;
3998 2 :
3999 2 : match tenant
4000 2 : .create_empty_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4001 2 : .await
4002 2 : {
4003 2 : Ok(_) => panic!("duplicate timeline creation should fail"),
4004 2 : Err(e) => assert_eq!(e.to_string(), "Already exists".to_string()),
4005 2 : }
4006 2 :
4007 2 : Ok(())
4008 2 : }
4009 :
4010 : /// Convenience function to create a page image with given string as the only content
4011 10 : pub fn test_value(s: &str) -> Value {
4012 10 : let mut buf = BytesMut::new();
4013 10 : buf.extend_from_slice(s.as_bytes());
4014 10 : Value::Image(buf.freeze())
4015 10 : }
4016 :
4017 : ///
4018 : /// Test branch creation
4019 : ///
4020 : #[tokio::test]
4021 2 : async fn test_branch() -> anyhow::Result<()> {
4022 2 : use std::str::from_utf8;
4023 2 :
4024 8 : let (tenant, ctx) = TenantHarness::create("test_branch")?.load().await;
4025 2 : let tline = tenant
4026 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4027 6 : .await?;
4028 2 : let mut writer = tline.writer().await;
4029 2 :
4030 2 : #[allow(non_snake_case)]
4031 2 : let TEST_KEY_A: Key = Key::from_hex("110000000033333333444444445500000001").unwrap();
4032 2 : #[allow(non_snake_case)]
4033 2 : let TEST_KEY_B: Key = Key::from_hex("110000000033333333444444445500000002").unwrap();
4034 2 :
4035 2 : // Insert a value on the timeline
4036 2 : writer
4037 2 : .put(TEST_KEY_A, Lsn(0x20), &test_value("foo at 0x20"), &ctx)
4038 2 : .await?;
4039 2 : writer
4040 2 : .put(TEST_KEY_B, Lsn(0x20), &test_value("foobar at 0x20"), &ctx)
4041 2 : .await?;
4042 2 : writer.finish_write(Lsn(0x20));
4043 2 :
4044 2 : writer
4045 2 : .put(TEST_KEY_A, Lsn(0x30), &test_value("foo at 0x30"), &ctx)
4046 2 : .await?;
4047 2 : writer.finish_write(Lsn(0x30));
4048 2 : writer
4049 2 : .put(TEST_KEY_A, Lsn(0x40), &test_value("foo at 0x40"), &ctx)
4050 2 : .await?;
4051 2 : writer.finish_write(Lsn(0x40));
4052 2 :
4053 2 : //assert_current_logical_size(&tline, Lsn(0x40));
4054 2 :
4055 2 : // Branch the history, modify relation differently on the new timeline
4056 2 : tenant
4057 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x30)), &ctx)
4058 2 : .await?;
4059 2 : let newtline = tenant
4060 2 : .get_timeline(NEW_TIMELINE_ID, true)
4061 2 : .expect("Should have a local timeline");
4062 2 : let mut new_writer = newtline.writer().await;
4063 2 : new_writer
4064 2 : .put(TEST_KEY_A, Lsn(0x40), &test_value("bar at 0x40"), &ctx)
4065 2 : .await?;
4066 2 : new_writer.finish_write(Lsn(0x40));
4067 2 :
4068 2 : // Check page contents on both branches
4069 2 : assert_eq!(
4070 2 : from_utf8(&tline.get(TEST_KEY_A, Lsn(0x40), &ctx).await?)?,
4071 2 : "foo at 0x40"
4072 2 : );
4073 2 : assert_eq!(
4074 2 : from_utf8(&newtline.get(TEST_KEY_A, Lsn(0x40), &ctx).await?)?,
4075 2 : "bar at 0x40"
4076 2 : );
4077 2 : assert_eq!(
4078 2 : from_utf8(&newtline.get(TEST_KEY_B, Lsn(0x40), &ctx).await?)?,
4079 2 : "foobar at 0x20"
4080 2 : );
4081 2 :
4082 2 : //assert_current_logical_size(&tline, Lsn(0x40));
4083 2 :
4084 2 : Ok(())
4085 2 : }
4086 :
4087 20 : async fn make_some_layers(
4088 20 : tline: &Timeline,
4089 20 : start_lsn: Lsn,
4090 20 : ctx: &RequestContext,
4091 20 : ) -> anyhow::Result<()> {
4092 20 : let mut lsn = start_lsn;
4093 : {
4094 20 : let mut writer = tline.writer().await;
4095 : // Create a relation on the timeline
4096 20 : writer
4097 20 : .put(
4098 20 : *TEST_KEY,
4099 20 : lsn,
4100 20 : &Value::Image(test_img(&format!("foo at {}", lsn))),
4101 20 : ctx,
4102 20 : )
4103 10 : .await?;
4104 20 : writer.finish_write(lsn);
4105 20 : lsn += 0x10;
4106 20 : writer
4107 20 : .put(
4108 20 : *TEST_KEY,
4109 20 : lsn,
4110 20 : &Value::Image(test_img(&format!("foo at {}", lsn))),
4111 20 : ctx,
4112 20 : )
4113 0 : .await?;
4114 20 : writer.finish_write(lsn);
4115 20 : lsn += 0x10;
4116 20 : }
4117 20 : tline.freeze_and_flush().await?;
4118 : {
4119 20 : let mut writer = tline.writer().await;
4120 20 : writer
4121 20 : .put(
4122 20 : *TEST_KEY,
4123 20 : lsn,
4124 20 : &Value::Image(test_img(&format!("foo at {}", lsn))),
4125 20 : ctx,
4126 20 : )
4127 10 : .await?;
4128 20 : writer.finish_write(lsn);
4129 20 : lsn += 0x10;
4130 20 : writer
4131 20 : .put(
4132 20 : *TEST_KEY,
4133 20 : lsn,
4134 20 : &Value::Image(test_img(&format!("foo at {}", lsn))),
4135 20 : ctx,
4136 20 : )
4137 0 : .await?;
4138 20 : writer.finish_write(lsn);
4139 20 : }
4140 20 : tline.freeze_and_flush().await.map_err(|e| e.into())
4141 20 : }
4142 :
4143 : #[tokio::test]
4144 2 : async fn test_prohibit_branch_creation_on_garbage_collected_data() -> anyhow::Result<()> {
4145 2 : let (tenant, ctx) =
4146 2 : TenantHarness::create("test_prohibit_branch_creation_on_garbage_collected_data")?
4147 2 : .load()
4148 8 : .await;
4149 2 : let tline = tenant
4150 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4151 6 : .await?;
4152 6 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
4153 2 :
4154 2 : // this removes layers before lsn 40 (50 minus 10), so there are two remaining layers, image and delta for 31-50
4155 2 : // FIXME: this doesn't actually remove any layer currently, given how the flushing
4156 2 : // and compaction works. But it does set the 'cutoff' point so that the cross check
4157 2 : // below should fail.
4158 2 : tenant
4159 2 : .gc_iteration(
4160 2 : Some(TIMELINE_ID),
4161 2 : 0x10,
4162 2 : Duration::ZERO,
4163 2 : &CancellationToken::new(),
4164 2 : &ctx,
4165 2 : )
4166 2 : .await?;
4167 2 :
4168 2 : // try to branch at lsn 25, should fail because we already garbage collected the data
4169 2 : match tenant
4170 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x25)), &ctx)
4171 2 : .await
4172 2 : {
4173 2 : Ok(_) => panic!("branching should have failed"),
4174 2 : Err(err) => {
4175 2 : let CreateTimelineError::AncestorLsn(err) = err else {
4176 2 : panic!("wrong error type")
4177 2 : };
4178 2 : assert!(err.to_string().contains("invalid branch start lsn"));
4179 2 : assert!(err
4180 2 : .source()
4181 2 : .unwrap()
4182 2 : .to_string()
4183 2 : .contains("we might've already garbage collected needed data"))
4184 2 : }
4185 2 : }
4186 2 :
4187 2 : Ok(())
4188 2 : }
4189 :
4190 : #[tokio::test]
4191 2 : async fn test_prohibit_branch_creation_on_pre_initdb_lsn() -> anyhow::Result<()> {
4192 2 : let (tenant, ctx) =
4193 2 : TenantHarness::create("test_prohibit_branch_creation_on_pre_initdb_lsn")?
4194 2 : .load()
4195 8 : .await;
4196 2 :
4197 2 : let tline = tenant
4198 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x50), DEFAULT_PG_VERSION, &ctx)
4199 6 : .await?;
4200 2 : // try to branch at lsn 0x25, should fail because initdb lsn is 0x50
4201 2 : match tenant
4202 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x25)), &ctx)
4203 2 : .await
4204 2 : {
4205 2 : Ok(_) => panic!("branching should have failed"),
4206 2 : Err(err) => {
4207 2 : let CreateTimelineError::AncestorLsn(err) = err else {
4208 2 : panic!("wrong error type");
4209 2 : };
4210 2 : assert!(&err.to_string().contains("invalid branch start lsn"));
4211 2 : assert!(&err
4212 2 : .source()
4213 2 : .unwrap()
4214 2 : .to_string()
4215 2 : .contains("is earlier than latest GC horizon"));
4216 2 : }
4217 2 : }
4218 2 :
4219 2 : Ok(())
4220 2 : }
4221 :
4222 : /*
4223 : // FIXME: This currently fails to error out. Calling GC doesn't currently
4224 : // remove the old value, we'd need to work a little harder
4225 : #[tokio::test]
4226 : async fn test_prohibit_get_for_garbage_collected_data() -> anyhow::Result<()> {
4227 : let repo =
4228 : RepoHarness::create("test_prohibit_get_for_garbage_collected_data")?
4229 : .load();
4230 :
4231 : let tline = repo.create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION)?;
4232 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
4233 :
4234 : repo.gc_iteration(Some(TIMELINE_ID), 0x10, Duration::ZERO)?;
4235 : let latest_gc_cutoff_lsn = tline.get_latest_gc_cutoff_lsn();
4236 : assert!(*latest_gc_cutoff_lsn > Lsn(0x25));
4237 : match tline.get(*TEST_KEY, Lsn(0x25)) {
4238 : Ok(_) => panic!("request for page should have failed"),
4239 : Err(err) => assert!(err.to_string().contains("not found at")),
4240 : }
4241 : Ok(())
4242 : }
4243 : */
4244 :
4245 : #[tokio::test]
4246 2 : async fn test_get_branchpoints_from_an_inactive_timeline() -> anyhow::Result<()> {
4247 2 : let (tenant, ctx) =
4248 2 : TenantHarness::create("test_get_branchpoints_from_an_inactive_timeline")?
4249 2 : .load()
4250 8 : .await;
4251 2 : let tline = tenant
4252 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4253 6 : .await?;
4254 6 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
4255 2 :
4256 2 : tenant
4257 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x40)), &ctx)
4258 2 : .await?;
4259 2 : let newtline = tenant
4260 2 : .get_timeline(NEW_TIMELINE_ID, true)
4261 2 : .expect("Should have a local timeline");
4262 2 :
4263 6 : make_some_layers(newtline.as_ref(), Lsn(0x60), &ctx).await?;
4264 2 :
4265 2 : tline.set_broken("test".to_owned());
4266 2 :
4267 2 : tenant
4268 2 : .gc_iteration(
4269 2 : Some(TIMELINE_ID),
4270 2 : 0x10,
4271 2 : Duration::ZERO,
4272 2 : &CancellationToken::new(),
4273 2 : &ctx,
4274 2 : )
4275 2 : .await?;
4276 2 :
4277 2 : // The branchpoints should contain all timelines, even ones marked
4278 2 : // as Broken.
4279 2 : {
4280 2 : let branchpoints = &tline.gc_info.read().unwrap().retain_lsns;
4281 2 : assert_eq!(branchpoints.len(), 1);
4282 2 : assert_eq!(branchpoints[0], Lsn(0x40));
4283 2 : }
4284 2 :
4285 2 : // You can read the key from the child branch even though the parent is
4286 2 : // Broken, as long as you don't need to access data from the parent.
4287 2 : assert_eq!(
4288 4 : newtline.get(*TEST_KEY, Lsn(0x70), &ctx).await?,
4289 2 : test_img(&format!("foo at {}", Lsn(0x70)))
4290 2 : );
4291 2 :
4292 2 : // This needs to traverse to the parent, and fails.
4293 2 : let err = newtline.get(*TEST_KEY, Lsn(0x50), &ctx).await.unwrap_err();
4294 2 : assert!(err.to_string().starts_with(&format!(
4295 2 : "Bad state on timeline {}: Broken",
4296 2 : tline.timeline_id
4297 2 : )));
4298 2 :
4299 2 : Ok(())
4300 2 : }
4301 :
4302 : #[tokio::test]
4303 2 : async fn test_retain_data_in_parent_which_is_needed_for_child() -> anyhow::Result<()> {
4304 2 : let (tenant, ctx) =
4305 2 : TenantHarness::create("test_retain_data_in_parent_which_is_needed_for_child")?
4306 2 : .load()
4307 8 : .await;
4308 2 : let tline = tenant
4309 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4310 6 : .await?;
4311 6 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
4312 2 :
4313 2 : tenant
4314 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x40)), &ctx)
4315 2 : .await?;
4316 2 : let newtline = tenant
4317 2 : .get_timeline(NEW_TIMELINE_ID, true)
4318 2 : .expect("Should have a local timeline");
4319 2 : // this removes layers before lsn 40 (50 minus 10), so there are two remaining layers, image and delta for 31-50
4320 2 : tenant
4321 2 : .gc_iteration(
4322 2 : Some(TIMELINE_ID),
4323 2 : 0x10,
4324 2 : Duration::ZERO,
4325 2 : &CancellationToken::new(),
4326 2 : &ctx,
4327 2 : )
4328 2 : .await?;
4329 4 : assert!(newtline.get(*TEST_KEY, Lsn(0x25), &ctx).await.is_ok());
4330 2 :
4331 2 : Ok(())
4332 2 : }
4333 : #[tokio::test]
4334 2 : async fn test_parent_keeps_data_forever_after_branching() -> anyhow::Result<()> {
4335 2 : let (tenant, ctx) =
4336 2 : TenantHarness::create("test_parent_keeps_data_forever_after_branching")?
4337 2 : .load()
4338 8 : .await;
4339 2 : let tline = tenant
4340 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4341 6 : .await?;
4342 6 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
4343 2 :
4344 2 : tenant
4345 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x40)), &ctx)
4346 2 : .await?;
4347 2 : let newtline = tenant
4348 2 : .get_timeline(NEW_TIMELINE_ID, true)
4349 2 : .expect("Should have a local timeline");
4350 2 :
4351 6 : make_some_layers(newtline.as_ref(), Lsn(0x60), &ctx).await?;
4352 2 :
4353 2 : // run gc on parent
4354 2 : tenant
4355 2 : .gc_iteration(
4356 2 : Some(TIMELINE_ID),
4357 2 : 0x10,
4358 2 : Duration::ZERO,
4359 2 : &CancellationToken::new(),
4360 2 : &ctx,
4361 2 : )
4362 2 : .await?;
4363 2 :
4364 2 : // Check that the data is still accessible on the branch.
4365 2 : assert_eq!(
4366 7 : newtline.get(*TEST_KEY, Lsn(0x50), &ctx).await?,
4367 2 : test_img(&format!("foo at {}", Lsn(0x40)))
4368 2 : );
4369 2 :
4370 2 : Ok(())
4371 2 : }
4372 :
4373 : #[tokio::test]
4374 2 : async fn timeline_load() -> anyhow::Result<()> {
4375 2 : const TEST_NAME: &str = "timeline_load";
4376 2 : let harness = TenantHarness::create(TEST_NAME)?;
4377 2 : {
4378 8 : let (tenant, ctx) = harness.load().await;
4379 2 : let tline = tenant
4380 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x7000), DEFAULT_PG_VERSION, &ctx)
4381 5 : .await?;
4382 6 : make_some_layers(tline.as_ref(), Lsn(0x8000), &ctx).await?;
4383 2 : // so that all uploads finish & we can call harness.load() below again
4384 2 : tenant
4385 2 : .shutdown(Default::default(), ShutdownMode::FreezeAndFlush)
4386 2 : .instrument(harness.span())
4387 2 : .await
4388 2 : .ok()
4389 2 : .unwrap();
4390 2 : }
4391 2 :
4392 8 : let (tenant, _ctx) = harness.load().await;
4393 2 : tenant
4394 2 : .get_timeline(TIMELINE_ID, true)
4395 2 : .expect("cannot load timeline");
4396 2 :
4397 2 : Ok(())
4398 2 : }
4399 :
4400 : #[tokio::test]
4401 2 : async fn timeline_load_with_ancestor() -> anyhow::Result<()> {
4402 2 : const TEST_NAME: &str = "timeline_load_with_ancestor";
4403 2 : let harness = TenantHarness::create(TEST_NAME)?;
4404 2 : // create two timelines
4405 2 : {
4406 8 : let (tenant, ctx) = harness.load().await;
4407 2 : let tline = tenant
4408 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4409 6 : .await?;
4410 2 :
4411 6 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
4412 2 :
4413 2 : let child_tline = tenant
4414 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x40)), &ctx)
4415 2 : .await?;
4416 2 : child_tline.set_state(TimelineState::Active);
4417 2 :
4418 2 : let newtline = tenant
4419 2 : .get_timeline(NEW_TIMELINE_ID, true)
4420 2 : .expect("Should have a local timeline");
4421 2 :
4422 6 : make_some_layers(newtline.as_ref(), Lsn(0x60), &ctx).await?;
4423 2 :
4424 2 : // so that all uploads finish & we can call harness.load() below again
4425 2 : tenant
4426 2 : .shutdown(Default::default(), ShutdownMode::FreezeAndFlush)
4427 2 : .instrument(harness.span())
4428 4 : .await
4429 2 : .ok()
4430 2 : .unwrap();
4431 2 : }
4432 2 :
4433 2 : // check that both of them are initially unloaded
4434 12 : let (tenant, _ctx) = harness.load().await;
4435 2 :
4436 2 : // check that both, child and ancestor are loaded
4437 2 : let _child_tline = tenant
4438 2 : .get_timeline(NEW_TIMELINE_ID, true)
4439 2 : .expect("cannot get child timeline loaded");
4440 2 :
4441 2 : let _ancestor_tline = tenant
4442 2 : .get_timeline(TIMELINE_ID, true)
4443 2 : .expect("cannot get ancestor timeline loaded");
4444 2 :
4445 2 : Ok(())
4446 2 : }
4447 :
4448 : #[tokio::test]
4449 2 : async fn delta_layer_dumping() -> anyhow::Result<()> {
4450 2 : use storage_layer::AsLayerDesc;
4451 8 : let (tenant, ctx) = TenantHarness::create("test_layer_dumping")?.load().await;
4452 2 : let tline = tenant
4453 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4454 6 : .await?;
4455 6 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
4456 2 :
4457 2 : let layer_map = tline.layers.read().await;
4458 2 : let level0_deltas = layer_map
4459 2 : .layer_map()
4460 2 : .get_level0_deltas()?
4461 2 : .into_iter()
4462 4 : .map(|desc| layer_map.get_from_desc(&desc))
4463 2 : .collect::<Vec<_>>();
4464 2 :
4465 2 : assert!(!level0_deltas.is_empty());
4466 2 :
4467 6 : for delta in level0_deltas {
4468 2 : // Ensure we are dumping a delta layer here
4469 4 : assert!(delta.layer_desc().is_delta);
4470 8 : delta.dump(true, &ctx).await.unwrap();
4471 2 : }
4472 2 :
4473 2 : Ok(())
4474 2 : }
4475 :
4476 : #[tokio::test]
4477 2 : async fn test_images() -> anyhow::Result<()> {
4478 8 : let (tenant, ctx) = TenantHarness::create("test_images")?.load().await;
4479 2 : let tline = tenant
4480 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx)
4481 6 : .await?;
4482 2 :
4483 2 : let mut writer = tline.writer().await;
4484 2 : writer
4485 2 : .put(
4486 2 : *TEST_KEY,
4487 2 : Lsn(0x10),
4488 2 : &Value::Image(test_img("foo at 0x10")),
4489 2 : &ctx,
4490 2 : )
4491 2 : .await?;
4492 2 : writer.finish_write(Lsn(0x10));
4493 2 : drop(writer);
4494 2 :
4495 2 : tline.freeze_and_flush().await?;
4496 2 : tline
4497 2 : .compact(&CancellationToken::new(), EnumSet::empty(), &ctx)
4498 2 : .await?;
4499 2 :
4500 2 : let mut writer = tline.writer().await;
4501 2 : writer
4502 2 : .put(
4503 2 : *TEST_KEY,
4504 2 : Lsn(0x20),
4505 2 : &Value::Image(test_img("foo at 0x20")),
4506 2 : &ctx,
4507 2 : )
4508 2 : .await?;
4509 2 : writer.finish_write(Lsn(0x20));
4510 2 : drop(writer);
4511 2 :
4512 2 : tline.freeze_and_flush().await?;
4513 2 : tline
4514 2 : .compact(&CancellationToken::new(), EnumSet::empty(), &ctx)
4515 2 : .await?;
4516 2 :
4517 2 : let mut writer = tline.writer().await;
4518 2 : writer
4519 2 : .put(
4520 2 : *TEST_KEY,
4521 2 : Lsn(0x30),
4522 2 : &Value::Image(test_img("foo at 0x30")),
4523 2 : &ctx,
4524 2 : )
4525 2 : .await?;
4526 2 : writer.finish_write(Lsn(0x30));
4527 2 : drop(writer);
4528 2 :
4529 2 : tline.freeze_and_flush().await?;
4530 2 : tline
4531 2 : .compact(&CancellationToken::new(), EnumSet::empty(), &ctx)
4532 2 : .await?;
4533 2 :
4534 2 : let mut writer = tline.writer().await;
4535 2 : writer
4536 2 : .put(
4537 2 : *TEST_KEY,
4538 2 : Lsn(0x40),
4539 2 : &Value::Image(test_img("foo at 0x40")),
4540 2 : &ctx,
4541 2 : )
4542 2 : .await?;
4543 2 : writer.finish_write(Lsn(0x40));
4544 2 : drop(writer);
4545 2 :
4546 2 : tline.freeze_and_flush().await?;
4547 2 : tline
4548 2 : .compact(&CancellationToken::new(), EnumSet::empty(), &ctx)
4549 2 : .await?;
4550 2 :
4551 2 : assert_eq!(
4552 4 : tline.get(*TEST_KEY, Lsn(0x10), &ctx).await?,
4553 2 : test_img("foo at 0x10")
4554 2 : );
4555 2 : assert_eq!(
4556 3 : tline.get(*TEST_KEY, Lsn(0x1f), &ctx).await?,
4557 2 : test_img("foo at 0x10")
4558 2 : );
4559 2 : assert_eq!(
4560 2 : tline.get(*TEST_KEY, Lsn(0x20), &ctx).await?,
4561 2 : test_img("foo at 0x20")
4562 2 : );
4563 2 : assert_eq!(
4564 4 : tline.get(*TEST_KEY, Lsn(0x30), &ctx).await?,
4565 2 : test_img("foo at 0x30")
4566 2 : );
4567 2 : assert_eq!(
4568 4 : tline.get(*TEST_KEY, Lsn(0x40), &ctx).await?,
4569 2 : test_img("foo at 0x40")
4570 2 : );
4571 2 :
4572 2 : Ok(())
4573 2 : }
4574 :
4575 4 : async fn bulk_insert_compact_gc(
4576 4 : tenant: &Tenant,
4577 4 : timeline: &Arc<Timeline>,
4578 4 : ctx: &RequestContext,
4579 4 : lsn: Lsn,
4580 4 : repeat: usize,
4581 4 : key_count: usize,
4582 4 : ) -> anyhow::Result<()> {
4583 4 : let compact = true;
4584 72774 : bulk_insert_maybe_compact_gc(tenant, timeline, ctx, lsn, repeat, key_count, compact).await
4585 4 : }
4586 :
4587 8 : async fn bulk_insert_maybe_compact_gc(
4588 8 : tenant: &Tenant,
4589 8 : timeline: &Arc<Timeline>,
4590 8 : ctx: &RequestContext,
4591 8 : mut lsn: Lsn,
4592 8 : repeat: usize,
4593 8 : key_count: usize,
4594 8 : compact: bool,
4595 8 : ) -> anyhow::Result<()> {
4596 8 : let mut test_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
4597 8 : let mut blknum = 0;
4598 8 :
4599 8 : // Enforce that key range is monotonously increasing
4600 8 : let mut keyspace = KeySpaceAccum::new();
4601 8 :
4602 8 : let cancel = CancellationToken::new();
4603 8 :
4604 8 : for _ in 0..repeat {
4605 400 : for _ in 0..key_count {
4606 4000000 : test_key.field6 = blknum;
4607 4000000 : let mut writer = timeline.writer().await;
4608 4000000 : writer
4609 4000000 : .put(
4610 4000000 : test_key,
4611 4000000 : lsn,
4612 4000000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
4613 4000000 : ctx,
4614 4000000 : )
4615 2600 : .await?;
4616 4000000 : writer.finish_write(lsn);
4617 4000000 : drop(writer);
4618 4000000 :
4619 4000000 : keyspace.add_key(test_key);
4620 4000000 :
4621 4000000 : lsn = Lsn(lsn.0 + 0x10);
4622 4000000 : blknum += 1;
4623 : }
4624 :
4625 400 : timeline.freeze_and_flush().await?;
4626 400 : if compact {
4627 : // this requires timeline to be &Arc<Timeline>
4628 40174 : timeline.compact(&cancel, EnumSet::empty(), ctx).await?;
4629 200 : }
4630 :
4631 : // this doesn't really need to use the timeline_id target, but it is closer to what it
4632 : // originally was.
4633 400 : let res = tenant
4634 400 : .gc_iteration(Some(timeline.timeline_id), 0, Duration::ZERO, &cancel, ctx)
4635 400 : .await?;
4636 :
4637 400 : assert_eq!(res.layers_removed, 0, "this never removes anything");
4638 : }
4639 :
4640 8 : Ok(())
4641 8 : }
4642 :
4643 : //
4644 : // Insert 1000 key-value pairs with increasing keys, flush, compact, GC.
4645 : // Repeat 50 times.
4646 : //
4647 : #[tokio::test]
4648 2 : async fn test_bulk_insert() -> anyhow::Result<()> {
4649 2 : let harness = TenantHarness::create("test_bulk_insert")?;
4650 8 : let (tenant, ctx) = harness.load().await;
4651 2 : let tline = tenant
4652 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx)
4653 6 : .await?;
4654 2 :
4655 2 : let lsn = Lsn(0x10);
4656 36387 : bulk_insert_compact_gc(&tenant, &tline, &ctx, lsn, 50, 10000).await?;
4657 2 :
4658 2 : Ok(())
4659 2 : }
4660 :
4661 : // Test the vectored get real implementation against a simple sequential implementation.
4662 : //
4663 : // The test generates a keyspace by repeatedly flushing the in-memory layer and compacting.
4664 : // Projected to 2D the key space looks like below. Lsn grows upwards on the Y axis and keys
4665 : // grow to the right on the X axis.
4666 : // [Delta]
4667 : // [Delta]
4668 : // [Delta]
4669 : // [Delta]
4670 : // ------------ Image ---------------
4671 : //
4672 : // After layer generation we pick the ranges to query as follows:
4673 : // 1. The beginning of each delta layer
4674 : // 2. At the seam between two adjacent delta layers
4675 : //
4676 : // There's one major downside to this test: delta layers only contains images,
4677 : // so the search can stop at the first delta layer and doesn't traverse any deeper.
4678 : #[tokio::test]
4679 2 : async fn test_get_vectored() -> anyhow::Result<()> {
4680 2 : let harness = TenantHarness::create("test_get_vectored")?;
4681 8 : let (tenant, ctx) = harness.load().await;
4682 2 : let tline = tenant
4683 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx)
4684 6 : .await?;
4685 2 :
4686 2 : let lsn = Lsn(0x10);
4687 36387 : bulk_insert_compact_gc(&tenant, &tline, &ctx, lsn, 50, 10000).await?;
4688 2 :
4689 2 : let guard = tline.layers.read().await;
4690 2 : guard.layer_map().dump(true, &ctx).await?;
4691 2 :
4692 2 : let mut reads = Vec::new();
4693 2 : let mut prev = None;
4694 12 : guard.layer_map().iter_historic_layers().for_each(|desc| {
4695 12 : if !desc.is_delta() {
4696 2 : prev = Some(desc.clone());
4697 2 : return;
4698 10 : }
4699 10 :
4700 10 : let start = desc.key_range.start;
4701 10 : let end = desc
4702 10 : .key_range
4703 10 : .start
4704 10 : .add(Timeline::MAX_GET_VECTORED_KEYS.try_into().unwrap());
4705 10 : reads.push(KeySpace {
4706 10 : ranges: vec![start..end],
4707 10 : });
4708 2 :
4709 10 : if let Some(prev) = &prev {
4710 10 : if !prev.is_delta() {
4711 10 : return;
4712 2 : }
4713 0 :
4714 0 : let first_range = Key {
4715 0 : field6: prev.key_range.end.field6 - 4,
4716 0 : ..prev.key_range.end
4717 0 : }..prev.key_range.end;
4718 0 :
4719 0 : let second_range = desc.key_range.start..Key {
4720 0 : field6: desc.key_range.start.field6 + 4,
4721 0 : ..desc.key_range.start
4722 0 : };
4723 0 :
4724 0 : reads.push(KeySpace {
4725 0 : ranges: vec![first_range, second_range],
4726 0 : });
4727 2 : };
4728 2 :
4729 2 : prev = Some(desc.clone());
4730 12 : });
4731 2 :
4732 2 : drop(guard);
4733 2 :
4734 2 : // Pick a big LSN such that we query over all the changes.
4735 2 : let reads_lsn = Lsn(u64::MAX - 1);
4736 2 :
4737 12 : for read in reads {
4738 10 : info!("Doing vectored read on {:?}", read);
4739 2 :
4740 10 : let vectored_res = tline
4741 10 : .get_vectored_impl(
4742 10 : read.clone(),
4743 10 : reads_lsn,
4744 10 : &mut ValuesReconstructState::new(),
4745 10 : &ctx,
4746 10 : )
4747 25 : .await;
4748 10 : tline
4749 10 : .validate_get_vectored_impl(&vectored_res, read, reads_lsn, &ctx)
4750 19 : .await;
4751 2 : }
4752 2 :
4753 2 : Ok(())
4754 2 : }
4755 :
4756 : #[tokio::test]
4757 2 : async fn test_get_vectored_aux_files() -> anyhow::Result<()> {
4758 2 : let harness = TenantHarness::create("test_get_vectored_aux_files")?;
4759 2 :
4760 8 : let (tenant, ctx) = harness.load().await;
4761 2 : let tline = tenant
4762 2 : .create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION, &ctx)
4763 2 : .await?;
4764 2 : let tline = tline.raw_timeline().unwrap();
4765 2 :
4766 2 : let mut modification = tline.begin_modification(Lsn(0x1000));
4767 2 : modification.put_file("foo/bar1", b"content1", &ctx).await?;
4768 2 : modification.set_lsn(Lsn(0x1008))?;
4769 2 : modification.put_file("foo/bar2", b"content2", &ctx).await?;
4770 2 : modification.commit(&ctx).await?;
4771 2 :
4772 2 : let child_timeline_id = TimelineId::generate();
4773 2 : tenant
4774 2 : .branch_timeline_test(
4775 2 : tline,
4776 2 : child_timeline_id,
4777 2 : Some(tline.get_last_record_lsn()),
4778 2 : &ctx,
4779 2 : )
4780 2 : .await?;
4781 2 :
4782 2 : let child_timeline = tenant
4783 2 : .get_timeline(child_timeline_id, true)
4784 2 : .expect("Should have the branched timeline");
4785 2 :
4786 2 : let aux_keyspace = KeySpace {
4787 2 : ranges: vec![NON_INHERITED_RANGE],
4788 2 : };
4789 2 : let read_lsn = child_timeline.get_last_record_lsn();
4790 2 :
4791 2 : let vectored_res = child_timeline
4792 2 : .get_vectored_impl(
4793 2 : aux_keyspace.clone(),
4794 2 : read_lsn,
4795 2 : &mut ValuesReconstructState::new(),
4796 2 : &ctx,
4797 2 : )
4798 2 : .await;
4799 2 :
4800 2 : child_timeline
4801 2 : .validate_get_vectored_impl(&vectored_res, aux_keyspace, read_lsn, &ctx)
4802 2 : .await;
4803 2 :
4804 2 : let images = vectored_res?;
4805 2 : assert!(images.is_empty());
4806 2 : Ok(())
4807 2 : }
4808 :
4809 : // Test that vectored get handles layer gaps correctly
4810 : // by advancing into the next ancestor timeline if required.
4811 : //
4812 : // The test generates timelines that look like the diagram below.
4813 : // We leave a gap in one of the L1 layers at `gap_at_key` (`/` in the diagram).
4814 : // The reconstruct data for that key lies in the ancestor timeline (`X` in the diagram).
4815 : //
4816 : // ```
4817 : //-------------------------------+
4818 : // ... |
4819 : // [ L1 ] |
4820 : // [ / L1 ] | Child Timeline
4821 : // ... |
4822 : // ------------------------------+
4823 : // [ X L1 ] | Parent Timeline
4824 : // ------------------------------+
4825 : // ```
4826 : #[tokio::test]
4827 2 : async fn test_get_vectored_key_gap() -> anyhow::Result<()> {
4828 2 : let tenant_conf = TenantConf {
4829 2 : // Make compaction deterministic
4830 2 : gc_period: Duration::ZERO,
4831 2 : compaction_period: Duration::ZERO,
4832 2 : // Encourage creation of L1 layers
4833 2 : checkpoint_distance: 16 * 1024,
4834 2 : compaction_target_size: 8 * 1024,
4835 2 : ..TenantConf::default()
4836 2 : };
4837 2 :
4838 2 : let harness = TenantHarness::create_custom(
4839 2 : "test_get_vectored_key_gap",
4840 2 : tenant_conf,
4841 2 : TenantId::generate(),
4842 2 : ShardIdentity::unsharded(),
4843 2 : Generation::new(0xdeadbeef),
4844 2 : )?;
4845 8 : let (tenant, ctx) = harness.load().await;
4846 2 :
4847 2 : let mut current_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
4848 2 : let gap_at_key = current_key.add(100);
4849 2 : let mut current_lsn = Lsn(0x10);
4850 2 :
4851 2 : const KEY_COUNT: usize = 10_000;
4852 2 :
4853 2 : let timeline_id = TimelineId::generate();
4854 2 : let current_timeline = tenant
4855 2 : .create_test_timeline(timeline_id, current_lsn, DEFAULT_PG_VERSION, &ctx)
4856 6 : .await?;
4857 2 :
4858 2 : current_lsn += 0x100;
4859 2 :
4860 2 : let mut writer = current_timeline.writer().await;
4861 2 : writer
4862 2 : .put(
4863 2 : gap_at_key,
4864 2 : current_lsn,
4865 2 : &Value::Image(test_img(&format!("{} at {}", gap_at_key, current_lsn))),
4866 2 : &ctx,
4867 2 : )
4868 2 : .await?;
4869 2 : writer.finish_write(current_lsn);
4870 2 : drop(writer);
4871 2 :
4872 2 : let mut latest_lsns = HashMap::new();
4873 2 : latest_lsns.insert(gap_at_key, current_lsn);
4874 2 :
4875 2 : current_timeline.freeze_and_flush().await?;
4876 2 :
4877 2 : let child_timeline_id = TimelineId::generate();
4878 2 :
4879 2 : tenant
4880 2 : .branch_timeline_test(
4881 2 : ¤t_timeline,
4882 2 : child_timeline_id,
4883 2 : Some(current_lsn),
4884 2 : &ctx,
4885 2 : )
4886 2 : .await?;
4887 2 : let child_timeline = tenant
4888 2 : .get_timeline(child_timeline_id, true)
4889 2 : .expect("Should have the branched timeline");
4890 2 :
4891 20002 : for i in 0..KEY_COUNT {
4892 20000 : if current_key == gap_at_key {
4893 2 : current_key = current_key.next();
4894 2 : continue;
4895 19998 : }
4896 19998 :
4897 19998 : current_lsn += 0x10;
4898 2 :
4899 19998 : let mut writer = child_timeline.writer().await;
4900 19998 : writer
4901 19998 : .put(
4902 19998 : current_key,
4903 19998 : current_lsn,
4904 19998 : &Value::Image(test_img(&format!("{} at {}", current_key, current_lsn))),
4905 19998 : &ctx,
4906 19998 : )
4907 76 : .await?;
4908 19998 : writer.finish_write(current_lsn);
4909 19998 : drop(writer);
4910 19998 :
4911 19998 : latest_lsns.insert(current_key, current_lsn);
4912 19998 : current_key = current_key.next();
4913 19998 :
4914 19998 : // Flush every now and then to encourage layer file creation.
4915 19998 : if i % 500 == 0 {
4916 44 : child_timeline.freeze_and_flush().await?;
4917 19958 : }
4918 2 : }
4919 2 :
4920 3 : child_timeline.freeze_and_flush().await?;
4921 2 : let mut flags = EnumSet::new();
4922 2 : flags.insert(CompactFlags::ForceRepartition);
4923 2 : child_timeline
4924 2 : .compact(&CancellationToken::new(), flags, &ctx)
4925 2184 : .await?;
4926 2 :
4927 2 : let key_near_end = {
4928 2 : let mut tmp = current_key;
4929 2 : tmp.field6 -= 10;
4930 2 : tmp
4931 2 : };
4932 2 :
4933 2 : let key_near_gap = {
4934 2 : let mut tmp = gap_at_key;
4935 2 : tmp.field6 -= 10;
4936 2 : tmp
4937 2 : };
4938 2 :
4939 2 : let read = KeySpace {
4940 2 : ranges: vec![key_near_gap..gap_at_key.next(), key_near_end..current_key],
4941 2 : };
4942 2 : let results = child_timeline
4943 2 : .get_vectored_impl(
4944 2 : read.clone(),
4945 2 : current_lsn,
4946 2 : &mut ValuesReconstructState::new(),
4947 2 : &ctx,
4948 2 : )
4949 15 : .await?;
4950 2 :
4951 44 : for (key, img_res) in results {
4952 42 : let expected = test_img(&format!("{} at {}", key, latest_lsns[&key]));
4953 42 : assert_eq!(img_res?, expected);
4954 2 : }
4955 2 :
4956 2 : Ok(())
4957 2 : }
4958 :
4959 : // Test that vectored get descends into ancestor timelines correctly and
4960 : // does not return an image that's newer than requested.
4961 : //
4962 : // The diagram below ilustrates an interesting case. We have a parent timeline
4963 : // (top of the Lsn range) and a child timeline. The request key cannot be reconstructed
4964 : // from the child timeline, so the parent timeline must be visited. When advacing into
4965 : // the child timeline, the read path needs to remember what the requested Lsn was in
4966 : // order to avoid returning an image that's too new. The test below constructs such
4967 : // a timeline setup and does a few queries around the Lsn of each page image.
4968 : // ```
4969 : // LSN
4970 : // ^
4971 : // |
4972 : // |
4973 : // 500 | --------------------------------------> branch point
4974 : // 400 | X
4975 : // 300 | X
4976 : // 200 | --------------------------------------> requested lsn
4977 : // 100 | X
4978 : // |---------------------------------------> Key
4979 : // |
4980 : // ------> requested key
4981 : //
4982 : // Legend:
4983 : // * X - page images
4984 : // ```
4985 : #[tokio::test]
4986 2 : async fn test_get_vectored_ancestor_descent() -> anyhow::Result<()> {
4987 2 : let harness = TenantHarness::create("test_get_vectored_on_lsn_axis")?;
4988 8 : let (tenant, ctx) = harness.load().await;
4989 2 :
4990 2 : let start_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
4991 2 : let end_key = start_key.add(1000);
4992 2 : let child_gap_at_key = start_key.add(500);
4993 2 : let mut parent_gap_lsns: BTreeMap<Lsn, String> = BTreeMap::new();
4994 2 :
4995 2 : let mut current_lsn = Lsn(0x10);
4996 2 :
4997 2 : let timeline_id = TimelineId::generate();
4998 2 : let parent_timeline = tenant
4999 2 : .create_test_timeline(timeline_id, current_lsn, DEFAULT_PG_VERSION, &ctx)
5000 6 : .await?;
5001 2 :
5002 2 : current_lsn += 0x100;
5003 2 :
5004 8 : for _ in 0..3 {
5005 6 : let mut key = start_key;
5006 6006 : while key < end_key {
5007 6000 : current_lsn += 0x10;
5008 6000 :
5009 6000 : let image_value = format!("{} at {}", child_gap_at_key, current_lsn);
5010 2 :
5011 6000 : let mut writer = parent_timeline.writer().await;
5012 6000 : writer
5013 6000 : .put(
5014 6000 : key,
5015 6000 : current_lsn,
5016 6000 : &Value::Image(test_img(&image_value)),
5017 6000 : &ctx,
5018 6000 : )
5019 6 : .await?;
5020 6000 : writer.finish_write(current_lsn);
5021 6000 :
5022 6000 : if key == child_gap_at_key {
5023 6 : parent_gap_lsns.insert(current_lsn, image_value);
5024 5994 : }
5025 2 :
5026 6000 : key = key.next();
5027 2 : }
5028 2 :
5029 6 : parent_timeline.freeze_and_flush().await?;
5030 2 : }
5031 2 :
5032 2 : let child_timeline_id = TimelineId::generate();
5033 2 :
5034 2 : let child_timeline = tenant
5035 2 : .branch_timeline_test(&parent_timeline, child_timeline_id, Some(current_lsn), &ctx)
5036 2 : .await?;
5037 2 :
5038 2 : let mut key = start_key;
5039 2002 : while key < end_key {
5040 2000 : if key == child_gap_at_key {
5041 2 : key = key.next();
5042 2 : continue;
5043 1998 : }
5044 1998 :
5045 1998 : current_lsn += 0x10;
5046 2 :
5047 1998 : let mut writer = child_timeline.writer().await;
5048 1998 : writer
5049 1998 : .put(
5050 1998 : key,
5051 1998 : current_lsn,
5052 1998 : &Value::Image(test_img(&format!("{} at {}", key, current_lsn))),
5053 1998 : &ctx,
5054 1998 : )
5055 17 : .await?;
5056 1998 : writer.finish_write(current_lsn);
5057 1998 :
5058 1998 : key = key.next();
5059 2 : }
5060 2 :
5061 2 : child_timeline.freeze_and_flush().await?;
5062 2 :
5063 2 : let lsn_offsets: [i64; 5] = [-10, -1, 0, 1, 10];
5064 2 : let mut query_lsns = Vec::new();
5065 6 : for image_lsn in parent_gap_lsns.keys().rev() {
5066 36 : for offset in lsn_offsets {
5067 30 : query_lsns.push(Lsn(image_lsn
5068 30 : .0
5069 30 : .checked_add_signed(offset)
5070 30 : .expect("Shouldn't overflow")));
5071 30 : }
5072 2 : }
5073 2 :
5074 32 : for query_lsn in query_lsns {
5075 30 : let results = child_timeline
5076 30 : .get_vectored_impl(
5077 30 : KeySpace {
5078 30 : ranges: vec![child_gap_at_key..child_gap_at_key.next()],
5079 30 : },
5080 30 : query_lsn,
5081 30 : &mut ValuesReconstructState::new(),
5082 30 : &ctx,
5083 30 : )
5084 29 : .await;
5085 2 :
5086 30 : let expected_item = parent_gap_lsns
5087 30 : .iter()
5088 30 : .rev()
5089 68 : .find(|(lsn, _)| **lsn <= query_lsn);
5090 30 :
5091 30 : info!(
5092 2 : "Doing vectored read at LSN {}. Expecting image to be: {:?}",
5093 2 : query_lsn, expected_item
5094 2 : );
5095 2 :
5096 30 : match expected_item {
5097 26 : Some((_, img_value)) => {
5098 26 : let key_results = results.expect("No vectored get error expected");
5099 26 : let key_result = &key_results[&child_gap_at_key];
5100 26 : let returned_img = key_result
5101 26 : .as_ref()
5102 26 : .expect("No page reconstruct error expected");
5103 26 :
5104 26 : info!(
5105 2 : "Vectored read at LSN {} returned image {}",
5106 0 : query_lsn,
5107 0 : std::str::from_utf8(returned_img)?
5108 2 : );
5109 26 : assert_eq!(*returned_img, test_img(img_value));
5110 2 : }
5111 2 : None => {
5112 4 : assert!(matches!(results, Err(GetVectoredError::MissingKey(_))));
5113 2 : }
5114 2 : }
5115 2 : }
5116 2 :
5117 2 : Ok(())
5118 2 : }
5119 :
5120 : #[tokio::test]
5121 2 : async fn test_random_updates() -> anyhow::Result<()> {
5122 2 : let names_algorithms = [
5123 2 : ("test_random_updates_legacy", CompactionAlgorithm::Legacy),
5124 2 : ("test_random_updates_tiered", CompactionAlgorithm::Tiered),
5125 2 : ];
5126 6 : for (name, algorithm) in names_algorithms {
5127 17988 : test_random_updates_algorithm(name, algorithm).await?;
5128 2 : }
5129 2 : Ok(())
5130 2 : }
5131 :
5132 4 : async fn test_random_updates_algorithm(
5133 4 : name: &'static str,
5134 4 : compaction_algorithm: CompactionAlgorithm,
5135 4 : ) -> anyhow::Result<()> {
5136 4 : let mut harness = TenantHarness::create(name)?;
5137 4 : harness.tenant_conf.compaction_algorithm = CompactionAlgorithmSettings {
5138 4 : kind: compaction_algorithm,
5139 4 : };
5140 16 : let (tenant, ctx) = harness.load().await;
5141 4 : let tline = tenant
5142 4 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
5143 11 : .await?;
5144 :
5145 : const NUM_KEYS: usize = 1000;
5146 4 : let cancel = CancellationToken::new();
5147 4 :
5148 4 : let mut test_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
5149 4 : let mut test_key_end = test_key;
5150 4 : test_key_end.field6 = NUM_KEYS as u32;
5151 4 : tline.add_extra_test_dense_keyspace(KeySpace::single(test_key..test_key_end));
5152 4 :
5153 4 : let mut keyspace = KeySpaceAccum::new();
5154 4 :
5155 4 : // Track when each page was last modified. Used to assert that
5156 4 : // a read sees the latest page version.
5157 4 : let mut updated = [Lsn(0); NUM_KEYS];
5158 4 :
5159 4 : let mut lsn = Lsn(0x10);
5160 : #[allow(clippy::needless_range_loop)]
5161 4004 : for blknum in 0..NUM_KEYS {
5162 4000 : lsn = Lsn(lsn.0 + 0x10);
5163 4000 : test_key.field6 = blknum as u32;
5164 4000 : let mut writer = tline.writer().await;
5165 4000 : writer
5166 4000 : .put(
5167 4000 : test_key,
5168 4000 : lsn,
5169 4000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
5170 4000 : &ctx,
5171 4000 : )
5172 4 : .await?;
5173 4000 : writer.finish_write(lsn);
5174 4000 : updated[blknum] = lsn;
5175 4000 : drop(writer);
5176 4000 :
5177 4000 : keyspace.add_key(test_key);
5178 : }
5179 :
5180 204 : for _ in 0..50 {
5181 200200 : for _ in 0..NUM_KEYS {
5182 200000 : lsn = Lsn(lsn.0 + 0x10);
5183 200000 : let blknum = thread_rng().gen_range(0..NUM_KEYS);
5184 200000 : test_key.field6 = blknum as u32;
5185 200000 : let mut writer = tline.writer().await;
5186 200000 : writer
5187 200000 : .put(
5188 200000 : test_key,
5189 200000 : lsn,
5190 200000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
5191 200000 : &ctx,
5192 200000 : )
5193 198 : .await?;
5194 200000 : writer.finish_write(lsn);
5195 200000 : drop(writer);
5196 200000 : updated[blknum] = lsn;
5197 : }
5198 :
5199 : // Read all the blocks
5200 200000 : for (blknum, last_lsn) in updated.iter().enumerate() {
5201 200000 : test_key.field6 = blknum as u32;
5202 200000 : assert_eq!(
5203 200000 : tline.get(test_key, lsn, &ctx).await?,
5204 200000 : test_img(&format!("{} at {}", blknum, last_lsn))
5205 : );
5206 : }
5207 :
5208 : // Perform a cycle of flush, and GC
5209 208 : tline.freeze_and_flush().await?;
5210 200 : tenant
5211 200 : .gc_iteration(Some(tline.timeline_id), 0, Duration::ZERO, &cancel, &ctx)
5212 196 : .await?;
5213 : }
5214 :
5215 4 : Ok(())
5216 4 : }
5217 :
5218 : #[tokio::test]
5219 2 : async fn test_traverse_branches() -> anyhow::Result<()> {
5220 2 : let (tenant, ctx) = TenantHarness::create("test_traverse_branches")?
5221 2 : .load()
5222 8 : .await;
5223 2 : let mut tline = tenant
5224 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
5225 6 : .await?;
5226 2 :
5227 2 : const NUM_KEYS: usize = 1000;
5228 2 :
5229 2 : let mut test_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
5230 2 :
5231 2 : let mut keyspace = KeySpaceAccum::new();
5232 2 :
5233 2 : let cancel = CancellationToken::new();
5234 2 :
5235 2 : // Track when each page was last modified. Used to assert that
5236 2 : // a read sees the latest page version.
5237 2 : let mut updated = [Lsn(0); NUM_KEYS];
5238 2 :
5239 2 : let mut lsn = Lsn(0x10);
5240 2 : #[allow(clippy::needless_range_loop)]
5241 2002 : for blknum in 0..NUM_KEYS {
5242 2000 : lsn = Lsn(lsn.0 + 0x10);
5243 2000 : test_key.field6 = blknum as u32;
5244 2000 : let mut writer = tline.writer().await;
5245 2000 : writer
5246 2000 : .put(
5247 2000 : test_key,
5248 2000 : lsn,
5249 2000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
5250 2000 : &ctx,
5251 2000 : )
5252 2 : .await?;
5253 2000 : writer.finish_write(lsn);
5254 2000 : updated[blknum] = lsn;
5255 2000 : drop(writer);
5256 2000 :
5257 2000 : keyspace.add_key(test_key);
5258 2 : }
5259 2 :
5260 102 : for _ in 0..50 {
5261 100 : let new_tline_id = TimelineId::generate();
5262 100 : tenant
5263 100 : .branch_timeline_test(&tline, new_tline_id, Some(lsn), &ctx)
5264 2 : .await?;
5265 100 : tline = tenant
5266 100 : .get_timeline(new_tline_id, true)
5267 100 : .expect("Should have the branched timeline");
5268 2 :
5269 100100 : for _ in 0..NUM_KEYS {
5270 100000 : lsn = Lsn(lsn.0 + 0x10);
5271 100000 : let blknum = thread_rng().gen_range(0..NUM_KEYS);
5272 100000 : test_key.field6 = blknum as u32;
5273 100000 : let mut writer = tline.writer().await;
5274 100000 : writer
5275 100000 : .put(
5276 100000 : test_key,
5277 100000 : lsn,
5278 100000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
5279 100000 : &ctx,
5280 100000 : )
5281 851 : .await?;
5282 100000 : println!("updating {} at {}", blknum, lsn);
5283 100000 : writer.finish_write(lsn);
5284 100000 : drop(writer);
5285 100000 : updated[blknum] = lsn;
5286 2 : }
5287 2 :
5288 2 : // Read all the blocks
5289 100000 : for (blknum, last_lsn) in updated.iter().enumerate() {
5290 100000 : test_key.field6 = blknum as u32;
5291 100000 : assert_eq!(
5292 100000 : tline.get(test_key, lsn, &ctx).await?,
5293 100000 : test_img(&format!("{} at {}", blknum, last_lsn))
5294 2 : );
5295 2 : }
5296 2 :
5297 2 : // Perform a cycle of flush, compact, and GC
5298 101 : tline.freeze_and_flush().await?;
5299 12975 : tline.compact(&cancel, EnumSet::empty(), &ctx).await?;
5300 100 : tenant
5301 100 : .gc_iteration(Some(tline.timeline_id), 0, Duration::ZERO, &cancel, &ctx)
5302 96 : .await?;
5303 2 : }
5304 2 :
5305 2 : Ok(())
5306 2 : }
5307 :
5308 : #[tokio::test]
5309 2 : async fn test_traverse_ancestors() -> anyhow::Result<()> {
5310 2 : let (tenant, ctx) = TenantHarness::create("test_traverse_ancestors")?
5311 2 : .load()
5312 8 : .await;
5313 2 : let mut tline = tenant
5314 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
5315 6 : .await?;
5316 2 :
5317 2 : const NUM_KEYS: usize = 100;
5318 2 : const NUM_TLINES: usize = 50;
5319 2 :
5320 2 : let mut test_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
5321 2 : // Track page mutation lsns across different timelines.
5322 2 : let mut updated = [[Lsn(0); NUM_KEYS]; NUM_TLINES];
5323 2 :
5324 2 : let mut lsn = Lsn(0x10);
5325 2 :
5326 2 : #[allow(clippy::needless_range_loop)]
5327 102 : for idx in 0..NUM_TLINES {
5328 100 : let new_tline_id = TimelineId::generate();
5329 100 : tenant
5330 100 : .branch_timeline_test(&tline, new_tline_id, Some(lsn), &ctx)
5331 2 : .await?;
5332 100 : tline = tenant
5333 100 : .get_timeline(new_tline_id, true)
5334 100 : .expect("Should have the branched timeline");
5335 2 :
5336 10100 : for _ in 0..NUM_KEYS {
5337 10000 : lsn = Lsn(lsn.0 + 0x10);
5338 10000 : let blknum = thread_rng().gen_range(0..NUM_KEYS);
5339 10000 : test_key.field6 = blknum as u32;
5340 10000 : let mut writer = tline.writer().await;
5341 10000 : writer
5342 10000 : .put(
5343 10000 : test_key,
5344 10000 : lsn,
5345 10000 : &Value::Image(test_img(&format!("{} {} at {}", idx, blknum, lsn))),
5346 10000 : &ctx,
5347 10000 : )
5348 88 : .await?;
5349 10000 : println!("updating [{}][{}] at {}", idx, blknum, lsn);
5350 10000 : writer.finish_write(lsn);
5351 10000 : drop(writer);
5352 10000 : updated[idx][blknum] = lsn;
5353 2 : }
5354 2 : }
5355 2 :
5356 2 : // Read pages from leaf timeline across all ancestors.
5357 100 : for (idx, lsns) in updated.iter().enumerate() {
5358 10000 : for (blknum, lsn) in lsns.iter().enumerate() {
5359 2 : // Skip empty mutations.
5360 10000 : if lsn.0 == 0 {
5361 3652 : continue;
5362 6348 : }
5363 6348 : println!("checking [{idx}][{blknum}] at {lsn}");
5364 6348 : test_key.field6 = blknum as u32;
5365 6348 : assert_eq!(
5366 6348 : tline.get(test_key, *lsn, &ctx).await?,
5367 6348 : test_img(&format!("{idx} {blknum} at {lsn}"))
5368 2 : );
5369 2 : }
5370 2 : }
5371 2 : Ok(())
5372 2 : }
5373 :
5374 : #[tokio::test]
5375 2 : async fn test_write_at_initdb_lsn_takes_optimization_code_path() -> anyhow::Result<()> {
5376 2 : let (tenant, ctx) = TenantHarness::create("test_empty_test_timeline_is_usable")?
5377 2 : .load()
5378 8 : .await;
5379 2 :
5380 2 : let initdb_lsn = Lsn(0x20);
5381 2 : let utline = tenant
5382 2 : .create_empty_timeline(TIMELINE_ID, initdb_lsn, DEFAULT_PG_VERSION, &ctx)
5383 2 : .await?;
5384 2 : let tline = utline.raw_timeline().unwrap();
5385 2 :
5386 2 : // Spawn flush loop now so that we can set the `expect_initdb_optimization`
5387 2 : tline.maybe_spawn_flush_loop();
5388 2 :
5389 2 : // Make sure the timeline has the minimum set of required keys for operation.
5390 2 : // The only operation you can always do on an empty timeline is to `put` new data.
5391 2 : // Except if you `put` at `initdb_lsn`.
5392 2 : // In that case, there's an optimization to directly create image layers instead of delta layers.
5393 2 : // It uses `repartition()`, which assumes some keys to be present.
5394 2 : // Let's make sure the test timeline can handle that case.
5395 2 : {
5396 2 : let mut state = tline.flush_loop_state.lock().unwrap();
5397 2 : assert_eq!(
5398 2 : timeline::FlushLoopState::Running {
5399 2 : expect_initdb_optimization: false,
5400 2 : initdb_optimization_count: 0,
5401 2 : },
5402 2 : *state
5403 2 : );
5404 2 : *state = timeline::FlushLoopState::Running {
5405 2 : expect_initdb_optimization: true,
5406 2 : initdb_optimization_count: 0,
5407 2 : };
5408 2 : }
5409 2 :
5410 2 : // Make writes at the initdb_lsn. When we flush it below, it should be handled by the optimization.
5411 2 : // As explained above, the optimization requires some keys to be present.
5412 2 : // As per `create_empty_timeline` documentation, use init_empty to set them.
5413 2 : // This is what `create_test_timeline` does, by the way.
5414 2 : let mut modification = tline.begin_modification(initdb_lsn);
5415 2 : modification
5416 2 : .init_empty_test_timeline()
5417 2 : .context("init_empty_test_timeline")?;
5418 2 : modification
5419 2 : .commit(&ctx)
5420 2 : .await
5421 2 : .context("commit init_empty_test_timeline modification")?;
5422 2 :
5423 2 : // Do the flush. The flush code will check the expectations that we set above.
5424 2 : tline.freeze_and_flush().await?;
5425 2 :
5426 2 : // assert freeze_and_flush exercised the initdb optimization
5427 2 : {
5428 2 : let state = tline.flush_loop_state.lock().unwrap();
5429 2 : let timeline::FlushLoopState::Running {
5430 2 : expect_initdb_optimization,
5431 2 : initdb_optimization_count,
5432 2 : } = *state
5433 2 : else {
5434 2 : panic!("unexpected state: {:?}", *state);
5435 2 : };
5436 2 : assert!(expect_initdb_optimization);
5437 2 : assert!(initdb_optimization_count > 0);
5438 2 : }
5439 2 : Ok(())
5440 2 : }
5441 :
5442 : #[tokio::test]
5443 2 : async fn test_create_guard_crash() -> anyhow::Result<()> {
5444 2 : let name = "test_create_guard_crash";
5445 2 : let harness = TenantHarness::create(name)?;
5446 2 : {
5447 8 : let (tenant, ctx) = harness.load().await;
5448 2 : let tline = tenant
5449 2 : .create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION, &ctx)
5450 2 : .await?;
5451 2 : // Leave the timeline ID in [`Tenant::timelines_creating`] to exclude attempting to create it again
5452 2 : let raw_tline = tline.raw_timeline().unwrap();
5453 2 : raw_tline
5454 2 : .shutdown(super::timeline::ShutdownMode::Hard)
5455 2 : .instrument(info_span!("test_shutdown", tenant_id=%raw_tline.tenant_shard_id, shard_id=%raw_tline.tenant_shard_id.shard_slug(), timeline_id=%TIMELINE_ID))
5456 2 : .await;
5457 2 : std::mem::forget(tline);
5458 2 : }
5459 2 :
5460 8 : let (tenant, _) = harness.load().await;
5461 2 : match tenant.get_timeline(TIMELINE_ID, false) {
5462 2 : Ok(_) => panic!("timeline should've been removed during load"),
5463 2 : Err(e) => {
5464 2 : assert_eq!(
5465 2 : e,
5466 2 : GetTimelineError::NotFound {
5467 2 : tenant_id: tenant.tenant_shard_id,
5468 2 : timeline_id: TIMELINE_ID,
5469 2 : }
5470 2 : )
5471 2 : }
5472 2 : }
5473 2 :
5474 2 : assert!(!harness
5475 2 : .conf
5476 2 : .timeline_path(&tenant.tenant_shard_id, &TIMELINE_ID)
5477 2 : .exists());
5478 2 :
5479 2 : Ok(())
5480 2 : }
5481 :
5482 : #[tokio::test]
5483 2 : async fn test_read_at_max_lsn() -> anyhow::Result<()> {
5484 2 : let names_algorithms = [
5485 2 : ("test_read_at_max_lsn_legacy", CompactionAlgorithm::Legacy),
5486 2 : ("test_read_at_max_lsn_tiered", CompactionAlgorithm::Tiered),
5487 2 : ];
5488 6 : for (name, algorithm) in names_algorithms {
5489 32937 : test_read_at_max_lsn_algorithm(name, algorithm).await?;
5490 2 : }
5491 2 : Ok(())
5492 2 : }
5493 :
5494 4 : async fn test_read_at_max_lsn_algorithm(
5495 4 : name: &'static str,
5496 4 : compaction_algorithm: CompactionAlgorithm,
5497 4 : ) -> anyhow::Result<()> {
5498 4 : let mut harness = TenantHarness::create(name)?;
5499 4 : harness.tenant_conf.compaction_algorithm = CompactionAlgorithmSettings {
5500 4 : kind: compaction_algorithm,
5501 4 : };
5502 16 : let (tenant, ctx) = harness.load().await;
5503 4 : let tline = tenant
5504 4 : .create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx)
5505 11 : .await?;
5506 :
5507 4 : let lsn = Lsn(0x10);
5508 4 : let compact = false;
5509 32600 : bulk_insert_maybe_compact_gc(&tenant, &tline, &ctx, lsn, 50, 10000, compact).await?;
5510 :
5511 4 : let test_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
5512 4 : let read_lsn = Lsn(u64::MAX - 1);
5513 :
5514 310 : let result = tline.get(test_key, read_lsn, &ctx).await;
5515 4 : assert!(result.is_ok(), "result is not Ok: {}", result.unwrap_err());
5516 :
5517 4 : Ok(())
5518 4 : }
5519 :
5520 : #[tokio::test]
5521 2 : async fn test_metadata_scan() -> anyhow::Result<()> {
5522 2 : let harness = TenantHarness::create("test_metadata_scan")?;
5523 8 : let (tenant, ctx) = harness.load().await;
5524 2 : let tline = tenant
5525 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
5526 6 : .await?;
5527 2 :
5528 2 : const NUM_KEYS: usize = 1000;
5529 2 : const STEP: usize = 10000; // random update + scan base_key + idx * STEP
5530 2 :
5531 2 : let cancel = CancellationToken::new();
5532 2 :
5533 2 : let mut base_key = Key::from_hex("000000000033333333444444445500000000").unwrap();
5534 2 : base_key.field1 = AUX_KEY_PREFIX;
5535 2 : let mut test_key = base_key;
5536 2 :
5537 2 : // Track when each page was last modified. Used to assert that
5538 2 : // a read sees the latest page version.
5539 2 : let mut updated = [Lsn(0); NUM_KEYS];
5540 2 :
5541 2 : let mut lsn = Lsn(0x10);
5542 2 : #[allow(clippy::needless_range_loop)]
5543 2002 : for blknum in 0..NUM_KEYS {
5544 2000 : lsn = Lsn(lsn.0 + 0x10);
5545 2000 : test_key.field6 = (blknum * STEP) as u32;
5546 2000 : let mut writer = tline.writer().await;
5547 2000 : writer
5548 2000 : .put(
5549 2000 : test_key,
5550 2000 : lsn,
5551 2000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
5552 2000 : &ctx,
5553 2000 : )
5554 2 : .await?;
5555 2000 : writer.finish_write(lsn);
5556 2000 : updated[blknum] = lsn;
5557 2000 : drop(writer);
5558 2 : }
5559 2 :
5560 2 : let keyspace = KeySpace::single(base_key..base_key.add((NUM_KEYS * STEP) as u32));
5561 2 :
5562 24 : for iter in 0..=10 {
5563 2 : // Read all the blocks
5564 22000 : for (blknum, last_lsn) in updated.iter().enumerate() {
5565 22000 : test_key.field6 = (blknum * STEP) as u32;
5566 22000 : assert_eq!(
5567 22000 : tline.get(test_key, lsn, &ctx).await?,
5568 22000 : test_img(&format!("{} at {}", blknum, last_lsn))
5569 2 : );
5570 2 : }
5571 2 :
5572 22 : let mut cnt = 0;
5573 22000 : for (key, value) in tline
5574 22 : .get_vectored_impl(
5575 22 : keyspace.clone(),
5576 22 : lsn,
5577 22 : &mut ValuesReconstructState::default(),
5578 22 : &ctx,
5579 22 : )
5580 5617 : .await?
5581 2 : {
5582 22000 : let blknum = key.field6 as usize;
5583 22000 : let value = value?;
5584 22000 : assert!(blknum % STEP == 0);
5585 22000 : let blknum = blknum / STEP;
5586 22000 : assert_eq!(
5587 22000 : value,
5588 22000 : test_img(&format!("{} at {}", blknum, updated[blknum]))
5589 22000 : );
5590 22000 : cnt += 1;
5591 2 : }
5592 2 :
5593 22 : assert_eq!(cnt, NUM_KEYS);
5594 2 :
5595 22022 : for _ in 0..NUM_KEYS {
5596 22000 : lsn = Lsn(lsn.0 + 0x10);
5597 22000 : let blknum = thread_rng().gen_range(0..NUM_KEYS);
5598 22000 : test_key.field6 = (blknum * STEP) as u32;
5599 22000 : let mut writer = tline.writer().await;
5600 22000 : writer
5601 22000 : .put(
5602 22000 : test_key,
5603 22000 : lsn,
5604 22000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
5605 22000 : &ctx,
5606 22000 : )
5607 142 : .await?;
5608 22000 : writer.finish_write(lsn);
5609 22000 : drop(writer);
5610 22000 : updated[blknum] = lsn;
5611 2 : }
5612 2 :
5613 2 : // Perform two cycles of flush, compact, and GC
5614 66 : for round in 0..2 {
5615 44 : tline.freeze_and_flush().await?;
5616 44 : tline
5617 44 : .compact(
5618 44 : &cancel,
5619 44 : if iter % 5 == 0 && round == 0 {
5620 6 : let mut flags = EnumSet::new();
5621 6 : flags.insert(CompactFlags::ForceImageLayerCreation);
5622 6 : flags.insert(CompactFlags::ForceRepartition);
5623 6 : flags
5624 2 : } else {
5625 38 : EnumSet::empty()
5626 2 : },
5627 44 : &ctx,
5628 2 : )
5629 8951 : .await?;
5630 44 : tenant
5631 44 : .gc_iteration(Some(tline.timeline_id), 0, Duration::ZERO, &cancel, &ctx)
5632 44 : .await?;
5633 2 : }
5634 2 : }
5635 2 :
5636 2 : Ok(())
5637 2 : }
5638 :
5639 : #[tokio::test]
5640 2 : async fn test_metadata_compaction_trigger() -> anyhow::Result<()> {
5641 2 : let harness = TenantHarness::create("test_metadata_compaction_trigger")?;
5642 8 : let (tenant, ctx) = harness.load().await;
5643 2 : let tline = tenant
5644 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
5645 6 : .await?;
5646 2 :
5647 2 : let cancel = CancellationToken::new();
5648 2 :
5649 2 : let mut base_key = Key::from_hex("000000000033333333444444445500000000").unwrap();
5650 2 : base_key.field1 = AUX_KEY_PREFIX;
5651 2 : let test_key = base_key;
5652 2 : let mut lsn = Lsn(0x10);
5653 2 :
5654 42 : for _ in 0..20 {
5655 40 : lsn = Lsn(lsn.0 + 0x10);
5656 40 : let mut writer = tline.writer().await;
5657 40 : writer
5658 40 : .put(
5659 40 : test_key,
5660 40 : lsn,
5661 40 : &Value::Image(test_img(&format!("{} at {}", 0, lsn))),
5662 40 : &ctx,
5663 40 : )
5664 20 : .await?;
5665 40 : writer.finish_write(lsn);
5666 40 : drop(writer);
5667 40 : tline.freeze_and_flush().await?; // force create a delta layer
5668 2 : }
5669 2 :
5670 2 : let before_num_l0_delta_files = tline
5671 2 : .layers
5672 2 : .read()
5673 2 : .await
5674 2 : .layer_map()
5675 2 : .get_level0_deltas()?
5676 2 : .len();
5677 2 :
5678 110 : tline.compact(&cancel, EnumSet::empty(), &ctx).await?;
5679 2 :
5680 2 : let after_num_l0_delta_files = tline
5681 2 : .layers
5682 2 : .read()
5683 2 : .await
5684 2 : .layer_map()
5685 2 : .get_level0_deltas()?
5686 2 : .len();
5687 2 :
5688 2 : assert!(after_num_l0_delta_files < before_num_l0_delta_files, "after_num_l0_delta_files={after_num_l0_delta_files}, before_num_l0_delta_files={before_num_l0_delta_files}");
5689 2 :
5690 2 : assert_eq!(
5691 4 : tline.get(test_key, lsn, &ctx).await?,
5692 2 : test_img(&format!("{} at {}", 0, lsn))
5693 2 : );
5694 2 :
5695 2 : Ok(())
5696 2 : }
5697 :
5698 : #[tokio::test]
5699 2 : async fn test_branch_copies_dirty_aux_file_flag() {
5700 2 : let harness = TenantHarness::create("test_branch_copies_dirty_aux_file_flag").unwrap();
5701 2 :
5702 2 : // the default aux file policy to switch is v1 if not set by the admins
5703 2 : assert_eq!(
5704 2 : harness.tenant_conf.switch_aux_file_policy,
5705 2 : AuxFilePolicy::V1
5706 2 : );
5707 8 : let (tenant, ctx) = harness.load().await;
5708 2 :
5709 2 : let mut lsn = Lsn(0x08);
5710 2 :
5711 2 : let tline: Arc<Timeline> = tenant
5712 2 : .create_test_timeline(TIMELINE_ID, lsn, DEFAULT_PG_VERSION, &ctx)
5713 6 : .await
5714 2 : .unwrap();
5715 2 :
5716 2 : // no aux file is written at this point, so the persistent flag should be unset
5717 2 : assert_eq!(tline.last_aux_file_policy.load(), None);
5718 2 :
5719 2 : {
5720 2 : lsn += 8;
5721 2 : let mut modification = tline.begin_modification(lsn);
5722 2 : modification
5723 2 : .put_file("pg_logical/mappings/test1", b"first", &ctx)
5724 4 : .await
5725 2 : .unwrap();
5726 2 : modification.commit(&ctx).await.unwrap();
5727 2 : }
5728 2 :
5729 2 : // there is no tenant manager to pass the configuration through, so lets mimic it
5730 2 : tenant.set_new_location_config(
5731 2 : AttachedTenantConf::try_from(LocationConf::attached_single(
5732 2 : TenantConfOpt {
5733 2 : switch_aux_file_policy: Some(AuxFilePolicy::V2),
5734 2 : ..Default::default()
5735 2 : },
5736 2 : tenant.generation,
5737 2 : &pageserver_api::models::ShardParameters::default(),
5738 2 : ))
5739 2 : .unwrap(),
5740 2 : );
5741 2 :
5742 2 : assert_eq!(
5743 2 : tline.get_switch_aux_file_policy(),
5744 2 : AuxFilePolicy::V2,
5745 2 : "wanted state has been updated"
5746 2 : );
5747 2 : assert_eq!(
5748 2 : tline.last_aux_file_policy.load(),
5749 2 : Some(AuxFilePolicy::V1),
5750 2 : "aux file is written with switch_aux_file_policy unset (which is v1), so we should keep v1"
5751 2 : );
5752 2 :
5753 2 : // we can read everything from the storage
5754 2 : let files = tline.list_aux_files(lsn, &ctx).await.unwrap();
5755 2 : assert_eq!(
5756 2 : files.get("pg_logical/mappings/test1"),
5757 2 : Some(&bytes::Bytes::from_static(b"first"))
5758 2 : );
5759 2 :
5760 2 : {
5761 2 : lsn += 8;
5762 2 : let mut modification = tline.begin_modification(lsn);
5763 2 : modification
5764 2 : .put_file("pg_logical/mappings/test2", b"second", &ctx)
5765 2 : .await
5766 2 : .unwrap();
5767 2 : modification.commit(&ctx).await.unwrap();
5768 2 : }
5769 2 :
5770 2 : assert_eq!(
5771 2 : tline.last_aux_file_policy.load(),
5772 2 : Some(AuxFilePolicy::V1),
5773 2 : "keep v1 storage format when new files are written"
5774 2 : );
5775 2 :
5776 2 : let files = tline.list_aux_files(lsn, &ctx).await.unwrap();
5777 2 : assert_eq!(
5778 2 : files.get("pg_logical/mappings/test2"),
5779 2 : Some(&bytes::Bytes::from_static(b"second"))
5780 2 : );
5781 2 :
5782 2 : let child = tenant
5783 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(lsn), &ctx)
5784 2 : .await
5785 2 : .unwrap();
5786 2 :
5787 2 : // child copies the last flag even if that is not on remote storage yet
5788 2 : assert_eq!(child.get_switch_aux_file_policy(), AuxFilePolicy::V2);
5789 2 : assert_eq!(child.last_aux_file_policy.load(), Some(AuxFilePolicy::V1));
5790 2 :
5791 2 : let files = child.list_aux_files(lsn, &ctx).await.unwrap();
5792 2 : assert_eq!(files.get("pg_logical/mappings/test1"), None);
5793 2 : assert_eq!(files.get("pg_logical/mappings/test2"), None);
5794 2 :
5795 2 : // even if we crash here without flushing parent timeline with it's new
5796 2 : // last_aux_file_policy we are safe, because child was never meant to access ancestor's
5797 2 : // files. the ancestor can even switch back to V1 because of a migration safely.
5798 2 : }
5799 :
5800 : #[tokio::test]
5801 2 : async fn aux_file_policy_switch() {
5802 2 : let mut harness = TenantHarness::create("aux_file_policy_switch").unwrap();
5803 2 : harness.tenant_conf.switch_aux_file_policy = AuxFilePolicy::CrossValidation; // set to cross-validation mode
5804 8 : let (tenant, ctx) = harness.load().await;
5805 2 :
5806 2 : let mut lsn = Lsn(0x08);
5807 2 :
5808 2 : let tline: Arc<Timeline> = tenant
5809 2 : .create_test_timeline(TIMELINE_ID, lsn, DEFAULT_PG_VERSION, &ctx)
5810 6 : .await
5811 2 : .unwrap();
5812 2 :
5813 2 : assert_eq!(
5814 2 : tline.last_aux_file_policy.load(),
5815 2 : None,
5816 2 : "no aux file is written so it should be unset"
5817 2 : );
5818 2 :
5819 2 : {
5820 2 : lsn += 8;
5821 2 : let mut modification = tline.begin_modification(lsn);
5822 2 : modification
5823 2 : .put_file("pg_logical/mappings/test1", b"first", &ctx)
5824 4 : .await
5825 2 : .unwrap();
5826 2 : modification.commit(&ctx).await.unwrap();
5827 2 : }
5828 2 :
5829 2 : // there is no tenant manager to pass the configuration through, so lets mimic it
5830 2 : tenant.set_new_location_config(
5831 2 : AttachedTenantConf::try_from(LocationConf::attached_single(
5832 2 : TenantConfOpt {
5833 2 : switch_aux_file_policy: Some(AuxFilePolicy::V2),
5834 2 : ..Default::default()
5835 2 : },
5836 2 : tenant.generation,
5837 2 : &pageserver_api::models::ShardParameters::default(),
5838 2 : ))
5839 2 : .unwrap(),
5840 2 : );
5841 2 :
5842 2 : assert_eq!(
5843 2 : tline.get_switch_aux_file_policy(),
5844 2 : AuxFilePolicy::V2,
5845 2 : "wanted state has been updated"
5846 2 : );
5847 2 : assert_eq!(
5848 2 : tline.last_aux_file_policy.load(),
5849 2 : Some(AuxFilePolicy::CrossValidation),
5850 2 : "dirty index_part.json reflected state is yet to be updated"
5851 2 : );
5852 2 :
5853 2 : // we can still read the auxfile v1 before we ingest anything new
5854 2 : let files = tline.list_aux_files(lsn, &ctx).await.unwrap();
5855 2 : assert_eq!(
5856 2 : files.get("pg_logical/mappings/test1"),
5857 2 : Some(&bytes::Bytes::from_static(b"first"))
5858 2 : );
5859 2 :
5860 2 : {
5861 2 : lsn += 8;
5862 2 : let mut modification = tline.begin_modification(lsn);
5863 2 : modification
5864 2 : .put_file("pg_logical/mappings/test2", b"second", &ctx)
5865 2 : .await
5866 2 : .unwrap();
5867 2 : modification.commit(&ctx).await.unwrap();
5868 2 : }
5869 2 :
5870 2 : assert_eq!(
5871 2 : tline.last_aux_file_policy.load(),
5872 2 : Some(AuxFilePolicy::V2),
5873 2 : "ingesting a file should apply the wanted switch state when applicable"
5874 2 : );
5875 2 :
5876 2 : let files = tline.list_aux_files(lsn, &ctx).await.unwrap();
5877 2 : assert_eq!(
5878 2 : files.get("pg_logical/mappings/test1"),
5879 2 : Some(&bytes::Bytes::from_static(b"first")),
5880 2 : "cross validation writes to both v1 and v2 so this should be available in v2"
5881 2 : );
5882 2 : assert_eq!(
5883 2 : files.get("pg_logical/mappings/test2"),
5884 2 : Some(&bytes::Bytes::from_static(b"second"))
5885 2 : );
5886 2 :
5887 2 : // mimic again by trying to flip it from V2 to V1 (not switched to while ingesting a file)
5888 2 : tenant.set_new_location_config(
5889 2 : AttachedTenantConf::try_from(LocationConf::attached_single(
5890 2 : TenantConfOpt {
5891 2 : switch_aux_file_policy: Some(AuxFilePolicy::V1),
5892 2 : ..Default::default()
5893 2 : },
5894 2 : tenant.generation,
5895 2 : &pageserver_api::models::ShardParameters::default(),
5896 2 : ))
5897 2 : .unwrap(),
5898 2 : );
5899 2 :
5900 2 : {
5901 2 : lsn += 8;
5902 2 : let mut modification = tline.begin_modification(lsn);
5903 2 : modification
5904 2 : .put_file("pg_logical/mappings/test2", b"third", &ctx)
5905 2 : .await
5906 2 : .unwrap();
5907 2 : modification.commit(&ctx).await.unwrap();
5908 2 : }
5909 2 :
5910 2 : assert_eq!(
5911 2 : tline.get_switch_aux_file_policy(),
5912 2 : AuxFilePolicy::V1,
5913 2 : "wanted state has been updated again, even if invalid request"
5914 2 : );
5915 2 :
5916 2 : assert_eq!(
5917 2 : tline.last_aux_file_policy.load(),
5918 2 : Some(AuxFilePolicy::V2),
5919 2 : "ingesting a file should apply the wanted switch state when applicable"
5920 2 : );
5921 2 :
5922 2 : let files = tline.list_aux_files(lsn, &ctx).await.unwrap();
5923 2 : assert_eq!(
5924 2 : files.get("pg_logical/mappings/test1"),
5925 2 : Some(&bytes::Bytes::from_static(b"first"))
5926 2 : );
5927 2 : assert_eq!(
5928 2 : files.get("pg_logical/mappings/test2"),
5929 2 : Some(&bytes::Bytes::from_static(b"third"))
5930 2 : );
5931 2 :
5932 2 : // mimic again by trying to flip it from from V1 to V2 (not switched to while ingesting a file)
5933 2 : tenant.set_new_location_config(
5934 2 : AttachedTenantConf::try_from(LocationConf::attached_single(
5935 2 : TenantConfOpt {
5936 2 : switch_aux_file_policy: Some(AuxFilePolicy::V2),
5937 2 : ..Default::default()
5938 2 : },
5939 2 : tenant.generation,
5940 2 : &pageserver_api::models::ShardParameters::default(),
5941 2 : ))
5942 2 : .unwrap(),
5943 2 : );
5944 2 :
5945 2 : {
5946 2 : lsn += 8;
5947 2 : let mut modification = tline.begin_modification(lsn);
5948 2 : modification
5949 2 : .put_file("pg_logical/mappings/test3", b"last", &ctx)
5950 2 : .await
5951 2 : .unwrap();
5952 2 : modification.commit(&ctx).await.unwrap();
5953 2 : }
5954 2 :
5955 2 : assert_eq!(tline.get_switch_aux_file_policy(), AuxFilePolicy::V2);
5956 2 :
5957 2 : assert_eq!(tline.last_aux_file_policy.load(), Some(AuxFilePolicy::V2));
5958 2 :
5959 2 : let files = tline.list_aux_files(lsn, &ctx).await.unwrap();
5960 2 : assert_eq!(
5961 2 : files.get("pg_logical/mappings/test1"),
5962 2 : Some(&bytes::Bytes::from_static(b"first"))
5963 2 : );
5964 2 : assert_eq!(
5965 2 : files.get("pg_logical/mappings/test2"),
5966 2 : Some(&bytes::Bytes::from_static(b"third"))
5967 2 : );
5968 2 : assert_eq!(
5969 2 : files.get("pg_logical/mappings/test3"),
5970 2 : Some(&bytes::Bytes::from_static(b"last"))
5971 2 : );
5972 2 : }
5973 :
5974 : #[tokio::test]
5975 2 : async fn aux_file_policy_force_switch() {
5976 2 : let mut harness = TenantHarness::create("aux_file_policy_force_switch").unwrap();
5977 2 : harness.tenant_conf.switch_aux_file_policy = AuxFilePolicy::V1;
5978 8 : let (tenant, ctx) = harness.load().await;
5979 2 :
5980 2 : let mut lsn = Lsn(0x08);
5981 2 :
5982 2 : let tline: Arc<Timeline> = tenant
5983 2 : .create_test_timeline(TIMELINE_ID, lsn, DEFAULT_PG_VERSION, &ctx)
5984 6 : .await
5985 2 : .unwrap();
5986 2 :
5987 2 : assert_eq!(
5988 2 : tline.last_aux_file_policy.load(),
5989 2 : None,
5990 2 : "no aux file is written so it should be unset"
5991 2 : );
5992 2 :
5993 2 : {
5994 2 : lsn += 8;
5995 2 : let mut modification = tline.begin_modification(lsn);
5996 2 : modification
5997 2 : .put_file("pg_logical/mappings/test1", b"first", &ctx)
5998 4 : .await
5999 2 : .unwrap();
6000 2 : modification.commit(&ctx).await.unwrap();
6001 2 : }
6002 2 :
6003 2 : tline.do_switch_aux_policy(AuxFilePolicy::V2).unwrap();
6004 2 :
6005 2 : assert_eq!(
6006 2 : tline.last_aux_file_policy.load(),
6007 2 : Some(AuxFilePolicy::V2),
6008 2 : "dirty index_part.json reflected state is yet to be updated"
6009 2 : );
6010 2 :
6011 2 : // lose all data from v1
6012 2 : let files = tline.list_aux_files(lsn, &ctx).await.unwrap();
6013 2 : assert_eq!(files.get("pg_logical/mappings/test1"), None);
6014 2 :
6015 2 : {
6016 2 : lsn += 8;
6017 2 : let mut modification = tline.begin_modification(lsn);
6018 2 : modification
6019 2 : .put_file("pg_logical/mappings/test2", b"second", &ctx)
6020 2 : .await
6021 2 : .unwrap();
6022 2 : modification.commit(&ctx).await.unwrap();
6023 2 : }
6024 2 :
6025 2 : // read data ingested in v2
6026 2 : let files = tline.list_aux_files(lsn, &ctx).await.unwrap();
6027 2 : assert_eq!(
6028 2 : files.get("pg_logical/mappings/test2"),
6029 2 : Some(&bytes::Bytes::from_static(b"second"))
6030 2 : );
6031 2 : // lose all data from v1
6032 2 : assert_eq!(files.get("pg_logical/mappings/test1"), None);
6033 2 : }
6034 :
6035 : #[tokio::test]
6036 2 : async fn aux_file_policy_auto_detect() {
6037 2 : let mut harness = TenantHarness::create("aux_file_policy_auto_detect").unwrap();
6038 2 : harness.tenant_conf.switch_aux_file_policy = AuxFilePolicy::V2; // set to cross-validation mode
6039 8 : let (tenant, ctx) = harness.load().await;
6040 2 :
6041 2 : let mut lsn = Lsn(0x08);
6042 2 :
6043 2 : let tline: Arc<Timeline> = tenant
6044 2 : .create_test_timeline(TIMELINE_ID, lsn, DEFAULT_PG_VERSION, &ctx)
6045 6 : .await
6046 2 : .unwrap();
6047 2 :
6048 2 : assert_eq!(
6049 2 : tline.last_aux_file_policy.load(),
6050 2 : None,
6051 2 : "no aux file is written so it should be unset"
6052 2 : );
6053 2 :
6054 2 : {
6055 2 : lsn += 8;
6056 2 : let mut modification = tline.begin_modification(lsn);
6057 2 : let buf = AuxFilesDirectory::ser(&AuxFilesDirectory {
6058 2 : files: vec![(
6059 2 : "test_file".to_string(),
6060 2 : Bytes::copy_from_slice(b"test_file"),
6061 2 : )]
6062 2 : .into_iter()
6063 2 : .collect(),
6064 2 : })
6065 2 : .unwrap();
6066 2 : modification.put_for_test(AUX_FILES_KEY, Value::Image(Bytes::from(buf)));
6067 2 : modification.commit(&ctx).await.unwrap();
6068 2 : }
6069 2 :
6070 2 : {
6071 2 : lsn += 8;
6072 2 : let mut modification = tline.begin_modification(lsn);
6073 2 : modification
6074 2 : .put_file("pg_logical/mappings/test1", b"first", &ctx)
6075 2 : .await
6076 2 : .unwrap();
6077 2 : modification.commit(&ctx).await.unwrap();
6078 2 : }
6079 2 :
6080 2 : assert_eq!(
6081 2 : tline.last_aux_file_policy.load(),
6082 2 : Some(AuxFilePolicy::V1),
6083 2 : "keep using v1 because there are aux files writting with v1"
6084 2 : );
6085 2 :
6086 2 : // we can still read the auxfile v1
6087 2 : let files = tline.list_aux_files(lsn, &ctx).await.unwrap();
6088 2 : assert_eq!(
6089 2 : files.get("pg_logical/mappings/test1"),
6090 2 : Some(&bytes::Bytes::from_static(b"first"))
6091 2 : );
6092 2 : assert_eq!(
6093 2 : files.get("test_file"),
6094 2 : Some(&bytes::Bytes::from_static(b"test_file"))
6095 2 : );
6096 2 : }
6097 :
6098 : #[tokio::test]
6099 2 : async fn test_metadata_image_creation() -> anyhow::Result<()> {
6100 2 : let harness = TenantHarness::create("test_metadata_image_creation")?;
6101 8 : let (tenant, ctx) = harness.load().await;
6102 2 : let tline = tenant
6103 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
6104 6 : .await?;
6105 2 :
6106 2 : const NUM_KEYS: usize = 1000;
6107 2 : const STEP: usize = 10000; // random update + scan base_key + idx * STEP
6108 2 :
6109 2 : let cancel = CancellationToken::new();
6110 2 :
6111 2 : let base_key = Key::from_hex("620000000033333333444444445500000000").unwrap();
6112 2 : assert_eq!(base_key.field1, AUX_KEY_PREFIX); // in case someone accidentally changed the prefix...
6113 2 : let mut test_key = base_key;
6114 2 : let mut lsn = Lsn(0x10);
6115 2 :
6116 8 : async fn scan_with_statistics(
6117 8 : tline: &Timeline,
6118 8 : keyspace: &KeySpace,
6119 8 : lsn: Lsn,
6120 8 : ctx: &RequestContext,
6121 8 : ) -> anyhow::Result<(BTreeMap<Key, Result<Bytes, PageReconstructError>>, usize)> {
6122 8 : let mut reconstruct_state = ValuesReconstructState::default();
6123 8 : let res = tline
6124 8 : .get_vectored_impl(keyspace.clone(), lsn, &mut reconstruct_state, ctx)
6125 1689 : .await?;
6126 8 : Ok((res, reconstruct_state.get_delta_layers_visited() as usize))
6127 8 : }
6128 2 :
6129 2 : #[allow(clippy::needless_range_loop)]
6130 2002 : for blknum in 0..NUM_KEYS {
6131 2000 : lsn = Lsn(lsn.0 + 0x10);
6132 2000 : test_key.field6 = (blknum * STEP) as u32;
6133 2000 : let mut writer = tline.writer().await;
6134 2000 : writer
6135 2000 : .put(
6136 2000 : test_key,
6137 2000 : lsn,
6138 2000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
6139 2000 : &ctx,
6140 2000 : )
6141 2 : .await?;
6142 2000 : writer.finish_write(lsn);
6143 2000 : drop(writer);
6144 2 : }
6145 2 :
6146 2 : let keyspace = KeySpace::single(base_key..base_key.add((NUM_KEYS * STEP) as u32));
6147 2 :
6148 22 : for iter in 1..=10 {
6149 20020 : for _ in 0..NUM_KEYS {
6150 20000 : lsn = Lsn(lsn.0 + 0x10);
6151 20000 : let blknum = thread_rng().gen_range(0..NUM_KEYS);
6152 20000 : test_key.field6 = (blknum * STEP) as u32;
6153 20000 : let mut writer = tline.writer().await;
6154 20000 : writer
6155 20000 : .put(
6156 20000 : test_key,
6157 20000 : lsn,
6158 20000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
6159 20000 : &ctx,
6160 20000 : )
6161 19 : .await?;
6162 20000 : writer.finish_write(lsn);
6163 20000 : drop(writer);
6164 2 : }
6165 2 :
6166 20 : tline.freeze_and_flush().await?;
6167 2 :
6168 20 : if iter % 5 == 0 {
6169 4 : let (_, before_delta_file_accessed) =
6170 1681 : scan_with_statistics(&tline, &keyspace, lsn, &ctx).await?;
6171 4 : tline
6172 4 : .compact(
6173 4 : &cancel,
6174 4 : {
6175 4 : let mut flags = EnumSet::new();
6176 4 : flags.insert(CompactFlags::ForceImageLayerCreation);
6177 4 : flags.insert(CompactFlags::ForceRepartition);
6178 4 : flags
6179 4 : },
6180 4 : &ctx,
6181 4 : )
6182 6522 : .await?;
6183 4 : let (_, after_delta_file_accessed) =
6184 8 : scan_with_statistics(&tline, &keyspace, lsn, &ctx).await?;
6185 4 : assert!(after_delta_file_accessed < before_delta_file_accessed, "after_delta_file_accessed={after_delta_file_accessed}, before_delta_file_accessed={before_delta_file_accessed}");
6186 2 : // Given that we already produced an image layer, there should be no delta layer needed for the scan, but still setting a low threshold there for unforeseen circumstances.
6187 4 : assert!(
6188 4 : after_delta_file_accessed <= 2,
6189 2 : "after_delta_file_accessed={after_delta_file_accessed}"
6190 2 : );
6191 16 : }
6192 2 : }
6193 2 :
6194 2 : Ok(())
6195 2 : }
6196 :
6197 : #[tokio::test]
6198 2 : async fn test_vectored_missing_data_key_reads() -> anyhow::Result<()> {
6199 2 : let harness = TenantHarness::create("test_vectored_missing_data_key_reads")?;
6200 8 : let (tenant, ctx) = harness.load().await;
6201 2 :
6202 2 : let base_key = Key::from_hex("000000000033333333444444445500000000").unwrap();
6203 2 : let base_key_child = Key::from_hex("000000000033333333444444445500000001").unwrap();
6204 2 : let base_key_nonexist = Key::from_hex("000000000033333333444444445500000002").unwrap();
6205 2 :
6206 2 : let tline = tenant
6207 2 : .create_test_timeline_with_layers(
6208 2 : TIMELINE_ID,
6209 2 : Lsn(0x10),
6210 2 : DEFAULT_PG_VERSION,
6211 2 : &ctx,
6212 2 : Vec::new(), // delta layers
6213 2 : vec![(Lsn(0x20), vec![(base_key, test_img("data key 1"))])], // image layers
6214 2 : Lsn(0x20), // it's fine to not advance LSN to 0x30 while using 0x30 to get below because `get_vectored_impl` does not wait for LSN
6215 2 : )
6216 13 : .await?;
6217 2 : tline.add_extra_test_dense_keyspace(KeySpace::single(base_key..(base_key_nonexist.next())));
6218 2 :
6219 2 : let child = tenant
6220 2 : .branch_timeline_test_with_layers(
6221 2 : &tline,
6222 2 : NEW_TIMELINE_ID,
6223 2 : Some(Lsn(0x20)),
6224 2 : &ctx,
6225 2 : Vec::new(), // delta layers
6226 2 : vec![(Lsn(0x30), vec![(base_key_child, test_img("data key 2"))])], // image layers
6227 2 : Lsn(0x30),
6228 2 : )
6229 7 : .await
6230 2 : .unwrap();
6231 2 :
6232 12 : async fn get_vectored_impl_wrapper(
6233 12 : tline: &Arc<Timeline>,
6234 12 : key: Key,
6235 12 : lsn: Lsn,
6236 12 : ctx: &RequestContext,
6237 12 : ) -> Result<Option<Bytes>, GetVectoredError> {
6238 12 : let mut reconstruct_state = ValuesReconstructState::new();
6239 12 : let mut res = tline
6240 12 : .get_vectored_impl(
6241 12 : KeySpace::single(key..key.next()),
6242 12 : lsn,
6243 12 : &mut reconstruct_state,
6244 12 : ctx,
6245 12 : )
6246 12 : .await?;
6247 6 : Ok(res.pop_last().map(|(k, v)| {
6248 6 : assert_eq!(k, key);
6249 6 : v.unwrap()
6250 6 : }))
6251 12 : }
6252 2 :
6253 2 : let lsn = Lsn(0x30);
6254 2 :
6255 2 : // test vectored get on parent timeline
6256 2 : assert_eq!(
6257 4 : get_vectored_impl_wrapper(&tline, base_key, lsn, &ctx).await?,
6258 2 : Some(test_img("data key 1"))
6259 2 : );
6260 2 : assert!(get_vectored_impl_wrapper(&tline, base_key_child, lsn, &ctx)
6261 3 : .await
6262 2 : .unwrap_err()
6263 2 : .is_missing_key_error());
6264 2 : assert!(
6265 2 : get_vectored_impl_wrapper(&tline, base_key_nonexist, lsn, &ctx)
6266 2 : .await
6267 2 : .unwrap_err()
6268 2 : .is_missing_key_error()
6269 2 : );
6270 2 :
6271 2 : // test vectored get on child timeline
6272 2 : assert_eq!(
6273 2 : get_vectored_impl_wrapper(&child, base_key, lsn, &ctx).await?,
6274 2 : Some(test_img("data key 1"))
6275 2 : );
6276 2 : assert_eq!(
6277 4 : get_vectored_impl_wrapper(&child, base_key_child, lsn, &ctx).await?,
6278 2 : Some(test_img("data key 2"))
6279 2 : );
6280 2 : assert!(
6281 2 : get_vectored_impl_wrapper(&child, base_key_nonexist, lsn, &ctx)
6282 2 : .await
6283 2 : .unwrap_err()
6284 2 : .is_missing_key_error()
6285 2 : );
6286 2 :
6287 2 : Ok(())
6288 2 : }
6289 :
6290 : #[tokio::test]
6291 2 : async fn test_vectored_missing_metadata_key_reads() -> anyhow::Result<()> {
6292 2 : let harness = TenantHarness::create("test_vectored_missing_metadata_key_reads")?;
6293 8 : let (tenant, ctx) = harness.load().await;
6294 2 :
6295 2 : let base_key = Key::from_hex("620000000033333333444444445500000000").unwrap();
6296 2 : let base_key_child = Key::from_hex("620000000033333333444444445500000001").unwrap();
6297 2 : let base_key_nonexist = Key::from_hex("620000000033333333444444445500000002").unwrap();
6298 2 : assert_eq!(base_key.field1, AUX_KEY_PREFIX); // in case someone accidentally changed the prefix...
6299 2 :
6300 2 : let tline = tenant
6301 2 : .create_test_timeline_with_layers(
6302 2 : TIMELINE_ID,
6303 2 : Lsn(0x10),
6304 2 : DEFAULT_PG_VERSION,
6305 2 : &ctx,
6306 2 : Vec::new(), // delta layers
6307 2 : vec![(Lsn(0x20), vec![(base_key, test_img("metadata key 1"))])], // image layers
6308 2 : Lsn(0x20), // it's fine to not advance LSN to 0x30 while using 0x30 to get below because `get_vectored_impl` does not wait for LSN
6309 2 : )
6310 13 : .await?;
6311 2 :
6312 2 : let child = tenant
6313 2 : .branch_timeline_test_with_layers(
6314 2 : &tline,
6315 2 : NEW_TIMELINE_ID,
6316 2 : Some(Lsn(0x20)),
6317 2 : &ctx,
6318 2 : Vec::new(), // delta layers
6319 2 : vec![(
6320 2 : Lsn(0x30),
6321 2 : vec![(base_key_child, test_img("metadata key 2"))],
6322 2 : )], // image layers
6323 2 : Lsn(0x30),
6324 2 : )
6325 7 : .await
6326 2 : .unwrap();
6327 2 :
6328 12 : async fn get_vectored_impl_wrapper(
6329 12 : tline: &Arc<Timeline>,
6330 12 : key: Key,
6331 12 : lsn: Lsn,
6332 12 : ctx: &RequestContext,
6333 12 : ) -> Result<Option<Bytes>, GetVectoredError> {
6334 12 : let mut reconstruct_state = ValuesReconstructState::new();
6335 12 : let mut res = tline
6336 12 : .get_vectored_impl(
6337 12 : KeySpace::single(key..key.next()),
6338 12 : lsn,
6339 12 : &mut reconstruct_state,
6340 12 : ctx,
6341 12 : )
6342 8 : .await?;
6343 12 : Ok(res.pop_last().map(|(k, v)| {
6344 4 : assert_eq!(k, key);
6345 4 : v.unwrap()
6346 12 : }))
6347 12 : }
6348 2 :
6349 2 : let lsn = Lsn(0x30);
6350 2 :
6351 2 : // test vectored get on parent timeline
6352 2 : assert_eq!(
6353 4 : get_vectored_impl_wrapper(&tline, base_key, lsn, &ctx).await?,
6354 2 : Some(test_img("metadata key 1"))
6355 2 : );
6356 2 : assert_eq!(
6357 2 : get_vectored_impl_wrapper(&tline, base_key_child, lsn, &ctx).await?,
6358 2 : None
6359 2 : );
6360 2 : assert_eq!(
6361 2 : get_vectored_impl_wrapper(&tline, base_key_nonexist, lsn, &ctx).await?,
6362 2 : None
6363 2 : );
6364 2 :
6365 2 : // test vectored get on child timeline
6366 2 : assert_eq!(
6367 2 : get_vectored_impl_wrapper(&child, base_key, lsn, &ctx).await?,
6368 2 : None
6369 2 : );
6370 2 : assert_eq!(
6371 4 : get_vectored_impl_wrapper(&child, base_key_child, lsn, &ctx).await?,
6372 2 : Some(test_img("metadata key 2"))
6373 2 : );
6374 2 : assert_eq!(
6375 2 : get_vectored_impl_wrapper(&child, base_key_nonexist, lsn, &ctx).await?,
6376 2 : None
6377 2 : );
6378 2 :
6379 2 : Ok(())
6380 2 : }
6381 :
6382 12 : async fn get_vectored_impl_wrapper(
6383 12 : tline: &Arc<Timeline>,
6384 12 : key: Key,
6385 12 : lsn: Lsn,
6386 12 : ctx: &RequestContext,
6387 12 : ) -> Result<Option<Bytes>, GetVectoredError> {
6388 12 : let mut reconstruct_state = ValuesReconstructState::new();
6389 12 : let mut res = tline
6390 12 : .get_vectored_impl(
6391 12 : KeySpace::single(key..key.next()),
6392 12 : lsn,
6393 12 : &mut reconstruct_state,
6394 12 : ctx,
6395 12 : )
6396 13 : .await?;
6397 12 : Ok(res.pop_last().map(|(k, v)| {
6398 8 : assert_eq!(k, key);
6399 8 : v.unwrap()
6400 12 : }))
6401 12 : }
6402 :
6403 : #[tokio::test]
6404 2 : async fn test_metadata_tombstone_reads() -> anyhow::Result<()> {
6405 2 : let harness = TenantHarness::create("test_metadata_tombstone_reads")?;
6406 8 : let (tenant, ctx) = harness.load().await;
6407 2 : let key0 = Key::from_hex("620000000033333333444444445500000000").unwrap();
6408 2 : let key1 = Key::from_hex("620000000033333333444444445500000001").unwrap();
6409 2 : let key2 = Key::from_hex("620000000033333333444444445500000002").unwrap();
6410 2 : let key3 = Key::from_hex("620000000033333333444444445500000003").unwrap();
6411 2 :
6412 2 : // We emulate the situation that the compaction algorithm creates an image layer that removes the tombstones
6413 2 : // Lsn 0x30 key0, key3, no key1+key2
6414 2 : // Lsn 0x20 key1+key2 tomestones
6415 2 : // Lsn 0x10 key1 in image, key2 in delta
6416 2 : let tline = tenant
6417 2 : .create_test_timeline_with_layers(
6418 2 : TIMELINE_ID,
6419 2 : Lsn(0x10),
6420 2 : DEFAULT_PG_VERSION,
6421 2 : &ctx,
6422 2 : // delta layers
6423 2 : vec![
6424 2 : vec![(key2, Lsn(0x10), Value::Image(test_img("metadata key 2")))],
6425 2 : vec![(key1, Lsn(0x20), Value::Image(Bytes::new()))],
6426 2 : vec![(key2, Lsn(0x20), Value::Image(Bytes::new()))],
6427 2 : ],
6428 2 : // image layers
6429 2 : vec![
6430 2 : (Lsn(0x10), vec![(key1, test_img("metadata key 1"))]),
6431 2 : (
6432 2 : Lsn(0x30),
6433 2 : vec![
6434 2 : (key0, test_img("metadata key 0")),
6435 2 : (key3, test_img("metadata key 3")),
6436 2 : ],
6437 2 : ),
6438 2 : ],
6439 2 : Lsn(0x30),
6440 2 : )
6441 40 : .await?;
6442 2 :
6443 2 : let lsn = Lsn(0x30);
6444 2 : let old_lsn = Lsn(0x20);
6445 2 :
6446 2 : assert_eq!(
6447 4 : get_vectored_impl_wrapper(&tline, key0, lsn, &ctx).await?,
6448 2 : Some(test_img("metadata key 0"))
6449 2 : );
6450 2 : assert_eq!(
6451 2 : get_vectored_impl_wrapper(&tline, key1, lsn, &ctx).await?,
6452 2 : None,
6453 2 : );
6454 2 : assert_eq!(
6455 2 : get_vectored_impl_wrapper(&tline, key2, lsn, &ctx).await?,
6456 2 : None,
6457 2 : );
6458 2 : assert_eq!(
6459 4 : get_vectored_impl_wrapper(&tline, key1, old_lsn, &ctx).await?,
6460 2 : Some(Bytes::new()),
6461 2 : );
6462 2 : assert_eq!(
6463 4 : get_vectored_impl_wrapper(&tline, key2, old_lsn, &ctx).await?,
6464 2 : Some(Bytes::new()),
6465 2 : );
6466 2 : assert_eq!(
6467 2 : get_vectored_impl_wrapper(&tline, key3, lsn, &ctx).await?,
6468 2 : Some(test_img("metadata key 3"))
6469 2 : );
6470 2 :
6471 2 : Ok(())
6472 2 : }
6473 :
6474 : #[tokio::test]
6475 2 : async fn test_metadata_tombstone_image_creation() {
6476 2 : let harness = TenantHarness::create("test_metadata_tombstone_image_creation").unwrap();
6477 8 : let (tenant, ctx) = harness.load().await;
6478 2 :
6479 2 : let key0 = Key::from_hex("620000000033333333444444445500000000").unwrap();
6480 2 : let key1 = Key::from_hex("620000000033333333444444445500000001").unwrap();
6481 2 : let key2 = Key::from_hex("620000000033333333444444445500000002").unwrap();
6482 2 : let key3 = Key::from_hex("620000000033333333444444445500000003").unwrap();
6483 2 :
6484 2 : let tline = tenant
6485 2 : .create_test_timeline_with_layers(
6486 2 : TIMELINE_ID,
6487 2 : Lsn(0x10),
6488 2 : DEFAULT_PG_VERSION,
6489 2 : &ctx,
6490 2 : // delta layers
6491 2 : vec![
6492 2 : vec![(key2, Lsn(0x10), Value::Image(test_img("metadata key 2")))],
6493 2 : vec![(key1, Lsn(0x20), Value::Image(Bytes::new()))],
6494 2 : vec![(key2, Lsn(0x20), Value::Image(Bytes::new()))],
6495 2 : vec![
6496 2 : (key0, Lsn(0x30), Value::Image(test_img("metadata key 0"))),
6497 2 : (key3, Lsn(0x30), Value::Image(test_img("metadata key 3"))),
6498 2 : ],
6499 2 : ],
6500 2 : // image layers
6501 2 : vec![(Lsn(0x10), vec![(key1, test_img("metadata key 1"))])],
6502 2 : Lsn(0x30),
6503 2 : )
6504 37 : .await
6505 2 : .unwrap();
6506 2 :
6507 2 : let cancel = CancellationToken::new();
6508 2 :
6509 2 : tline
6510 2 : .compact(
6511 2 : &cancel,
6512 2 : {
6513 2 : let mut flags = EnumSet::new();
6514 2 : flags.insert(CompactFlags::ForceImageLayerCreation);
6515 2 : flags.insert(CompactFlags::ForceRepartition);
6516 2 : flags
6517 2 : },
6518 2 : &ctx,
6519 2 : )
6520 49 : .await
6521 2 : .unwrap();
6522 2 :
6523 2 : // Image layers are created at last_record_lsn
6524 2 : let images = tline
6525 2 : .inspect_image_layers(Lsn(0x30), &ctx)
6526 8 : .await
6527 2 : .unwrap()
6528 2 : .into_iter()
6529 20 : .filter(|(k, _)| k.is_metadata_key())
6530 2 : .collect::<Vec<_>>();
6531 2 : assert_eq!(images.len(), 2); // the image layer should only contain two existing keys, tombstones should be removed.
6532 2 : }
6533 :
6534 : #[tokio::test]
6535 2 : async fn test_metadata_tombstone_empty_image_creation() {
6536 2 : let harness =
6537 2 : TenantHarness::create("test_metadata_tombstone_empty_image_creation").unwrap();
6538 8 : let (tenant, ctx) = harness.load().await;
6539 2 :
6540 2 : let key1 = Key::from_hex("620000000033333333444444445500000001").unwrap();
6541 2 : let key2 = Key::from_hex("620000000033333333444444445500000002").unwrap();
6542 2 :
6543 2 : let tline = tenant
6544 2 : .create_test_timeline_with_layers(
6545 2 : TIMELINE_ID,
6546 2 : Lsn(0x10),
6547 2 : DEFAULT_PG_VERSION,
6548 2 : &ctx,
6549 2 : // delta layers
6550 2 : vec![
6551 2 : vec![(key2, Lsn(0x10), Value::Image(test_img("metadata key 2")))],
6552 2 : vec![(key1, Lsn(0x20), Value::Image(Bytes::new()))],
6553 2 : vec![(key2, Lsn(0x20), Value::Image(Bytes::new()))],
6554 2 : ],
6555 2 : // image layers
6556 2 : vec![(Lsn(0x10), vec![(key1, test_img("metadata key 1"))])],
6557 2 : Lsn(0x30),
6558 2 : )
6559 31 : .await
6560 2 : .unwrap();
6561 2 :
6562 2 : let cancel = CancellationToken::new();
6563 2 :
6564 2 : tline
6565 2 : .compact(
6566 2 : &cancel,
6567 2 : {
6568 2 : let mut flags = EnumSet::new();
6569 2 : flags.insert(CompactFlags::ForceImageLayerCreation);
6570 2 : flags.insert(CompactFlags::ForceRepartition);
6571 2 : flags
6572 2 : },
6573 2 : &ctx,
6574 2 : )
6575 37 : .await
6576 2 : .unwrap();
6577 2 :
6578 2 : // Image layers are created at last_record_lsn
6579 2 : let images = tline
6580 2 : .inspect_image_layers(Lsn(0x30), &ctx)
6581 4 : .await
6582 2 : .unwrap()
6583 2 : .into_iter()
6584 16 : .filter(|(k, _)| k.is_metadata_key())
6585 2 : .collect::<Vec<_>>();
6586 2 : assert_eq!(images.len(), 0); // the image layer should not contain tombstones, or it is not created
6587 2 : }
6588 :
6589 : #[tokio::test]
6590 2 : async fn test_simple_bottom_most_compaction_images() -> anyhow::Result<()> {
6591 2 : let harness = TenantHarness::create("test_simple_bottom_most_compaction_images")?;
6592 8 : let (tenant, ctx) = harness.load().await;
6593 2 :
6594 104 : fn get_key(id: u32) -> Key {
6595 104 : // using aux key here b/c they are guaranteed to be inside `collect_keyspace`.
6596 104 : let mut key = Key::from_hex("620000000033333333444444445500000000").unwrap();
6597 104 : key.field6 = id;
6598 104 : key
6599 104 : }
6600 2 :
6601 2 : // We create one bottom-most image layer, a delta layer D1 crossing the GC horizon, D2 below the horizon, and D3 above the horizon.
6602 2 : //
6603 2 : // | D1 | | D3 |
6604 2 : // -| |-- gc horizon -----------------
6605 2 : // | | | D2 |
6606 2 : // --------- img layer ------------------
6607 2 : //
6608 2 : // What we should expact from this compaction is:
6609 2 : // | Part of D1 | | D3 |
6610 2 : // --------- img layer with D1+D2 at GC horizon------------------
6611 2 :
6612 2 : // img layer at 0x10
6613 2 : let img_layer = (0..10)
6614 20 : .map(|id| (get_key(id), Bytes::from(format!("value {id}@0x10"))))
6615 2 : .collect_vec();
6616 2 :
6617 2 : let delta1 = vec![
6618 2 : (
6619 2 : get_key(1),
6620 2 : Lsn(0x20),
6621 2 : Value::Image(Bytes::from("value 1@0x20")),
6622 2 : ),
6623 2 : (
6624 2 : get_key(2),
6625 2 : Lsn(0x30),
6626 2 : Value::Image(Bytes::from("value 2@0x30")),
6627 2 : ),
6628 2 : (
6629 2 : get_key(3),
6630 2 : Lsn(0x40),
6631 2 : Value::Image(Bytes::from("value 3@0x40")),
6632 2 : ),
6633 2 : ];
6634 2 : let delta2 = vec![
6635 2 : (
6636 2 : get_key(5),
6637 2 : Lsn(0x20),
6638 2 : Value::Image(Bytes::from("value 5@0x20")),
6639 2 : ),
6640 2 : (
6641 2 : get_key(6),
6642 2 : Lsn(0x20),
6643 2 : Value::Image(Bytes::from("value 6@0x20")),
6644 2 : ),
6645 2 : ];
6646 2 : let delta3 = vec![
6647 2 : (
6648 2 : get_key(8),
6649 2 : Lsn(0x40),
6650 2 : Value::Image(Bytes::from("value 8@0x40")),
6651 2 : ),
6652 2 : (
6653 2 : get_key(9),
6654 2 : Lsn(0x40),
6655 2 : Value::Image(Bytes::from("value 9@0x40")),
6656 2 : ),
6657 2 : ];
6658 2 :
6659 2 : let tline = tenant
6660 2 : .create_test_timeline_with_layers(
6661 2 : TIMELINE_ID,
6662 2 : Lsn(0x10),
6663 2 : DEFAULT_PG_VERSION,
6664 2 : &ctx,
6665 2 : vec![delta1, delta2, delta3], // delta layers
6666 2 : vec![(Lsn(0x10), img_layer)], // image layers
6667 2 : Lsn(0x50),
6668 2 : )
6669 48 : .await?;
6670 2 : {
6671 2 : // Update GC info
6672 2 : let mut guard = tline.gc_info.write().unwrap();
6673 2 : guard.cutoffs.pitr = Lsn(0x30);
6674 2 : guard.cutoffs.horizon = Lsn(0x30);
6675 2 : }
6676 2 :
6677 2 : let expected_result = [
6678 2 : Bytes::from_static(b"value 0@0x10"),
6679 2 : Bytes::from_static(b"value 1@0x20"),
6680 2 : Bytes::from_static(b"value 2@0x30"),
6681 2 : Bytes::from_static(b"value 3@0x40"),
6682 2 : Bytes::from_static(b"value 4@0x10"),
6683 2 : Bytes::from_static(b"value 5@0x20"),
6684 2 : Bytes::from_static(b"value 6@0x20"),
6685 2 : Bytes::from_static(b"value 7@0x10"),
6686 2 : Bytes::from_static(b"value 8@0x40"),
6687 2 : Bytes::from_static(b"value 9@0x40"),
6688 2 : ];
6689 2 :
6690 20 : for (idx, expected) in expected_result.iter().enumerate() {
6691 20 : assert_eq!(
6692 20 : tline
6693 20 : .get(get_key(idx as u32), Lsn(0x50), &ctx)
6694 17 : .await
6695 20 : .unwrap(),
6696 2 : expected
6697 2 : );
6698 2 : }
6699 2 :
6700 2 : let cancel = CancellationToken::new();
6701 52 : tline.compact_with_gc(&cancel, &ctx).await.unwrap();
6702 2 :
6703 20 : for (idx, expected) in expected_result.iter().enumerate() {
6704 20 : assert_eq!(
6705 20 : tline
6706 20 : .get(get_key(idx as u32), Lsn(0x50), &ctx)
6707 8 : .await
6708 20 : .unwrap(),
6709 2 : expected
6710 2 : );
6711 2 : }
6712 2 :
6713 2 : // Check if the image layer at the GC horizon contains exactly what we want
6714 2 : let image_at_gc_horizon = tline
6715 2 : .inspect_image_layers(Lsn(0x30), &ctx)
6716 2 : .await
6717 2 : .unwrap()
6718 2 : .into_iter()
6719 36 : .filter(|(k, _)| k.is_metadata_key())
6720 2 : .collect::<Vec<_>>();
6721 2 :
6722 2 : assert_eq!(image_at_gc_horizon.len(), 10);
6723 2 : let expected_result = [
6724 2 : Bytes::from_static(b"value 0@0x10"),
6725 2 : Bytes::from_static(b"value 1@0x20"),
6726 2 : Bytes::from_static(b"value 2@0x30"),
6727 2 : Bytes::from_static(b"value 3@0x10"),
6728 2 : Bytes::from_static(b"value 4@0x10"),
6729 2 : Bytes::from_static(b"value 5@0x20"),
6730 2 : Bytes::from_static(b"value 6@0x20"),
6731 2 : Bytes::from_static(b"value 7@0x10"),
6732 2 : Bytes::from_static(b"value 8@0x10"),
6733 2 : Bytes::from_static(b"value 9@0x10"),
6734 2 : ];
6735 22 : for idx in 0..10 {
6736 20 : assert_eq!(
6737 20 : image_at_gc_horizon[idx],
6738 20 : (get_key(idx as u32), expected_result[idx].clone())
6739 20 : );
6740 2 : }
6741 2 :
6742 2 : // Check if old layers are removed / new layers have the expected LSN
6743 2 : let mut all_layers = tline.inspect_historic_layers().await.unwrap();
6744 4 : all_layers.sort_by(|k1, k2| {
6745 4 : (
6746 4 : k1.is_delta,
6747 4 : k1.key_range.start,
6748 4 : k1.key_range.end,
6749 4 : k1.lsn_range.start,
6750 4 : k1.lsn_range.end,
6751 4 : )
6752 4 : .cmp(&(
6753 4 : k2.is_delta,
6754 4 : k2.key_range.start,
6755 4 : k2.key_range.end,
6756 4 : k2.lsn_range.start,
6757 4 : k2.lsn_range.end,
6758 4 : ))
6759 4 : });
6760 2 : assert_eq!(
6761 2 : all_layers,
6762 2 : vec![
6763 2 : // Image layer at GC horizon
6764 2 : PersistentLayerKey {
6765 2 : key_range: Key::MIN..get_key(10),
6766 2 : lsn_range: Lsn(0x30)..Lsn(0x31),
6767 2 : is_delta: false
6768 2 : },
6769 2 : // The delta layer that is cut in the middle
6770 2 : PersistentLayerKey {
6771 2 : key_range: get_key(3)..get_key(4),
6772 2 : lsn_range: Lsn(0x30)..Lsn(0x41),
6773 2 : is_delta: true
6774 2 : },
6775 2 : // The delta layer we created and should not be picked for the compaction
6776 2 : PersistentLayerKey {
6777 2 : key_range: get_key(8)..get_key(10),
6778 2 : lsn_range: Lsn(0x40)..Lsn(0x41),
6779 2 : is_delta: true
6780 2 : }
6781 2 : ]
6782 2 : );
6783 2 :
6784 2 : Ok(())
6785 2 : }
6786 :
6787 : #[tokio::test]
6788 2 : async fn test_neon_test_record() -> anyhow::Result<()> {
6789 2 : let harness = TenantHarness::create("test_neon_test_record")?;
6790 8 : let (tenant, ctx) = harness.load().await;
6791 2 :
6792 24 : fn get_key(id: u32) -> Key {
6793 24 : // using aux key here b/c they are guaranteed to be inside `collect_keyspace`.
6794 24 : let mut key = Key::from_hex("620000000033333333444444445500000000").unwrap();
6795 24 : key.field6 = id;
6796 24 : key
6797 24 : }
6798 2 :
6799 2 : let delta1 = vec![
6800 2 : (
6801 2 : get_key(1),
6802 2 : Lsn(0x20),
6803 2 : Value::WalRecord(NeonWalRecord::wal_append(",0x20")),
6804 2 : ),
6805 2 : (
6806 2 : get_key(1),
6807 2 : Lsn(0x30),
6808 2 : Value::WalRecord(NeonWalRecord::wal_append(",0x30")),
6809 2 : ),
6810 2 : (get_key(2), Lsn(0x10), Value::Image("0x10".into())),
6811 2 : (
6812 2 : get_key(2),
6813 2 : Lsn(0x20),
6814 2 : Value::WalRecord(NeonWalRecord::wal_append(",0x20")),
6815 2 : ),
6816 2 : (
6817 2 : get_key(2),
6818 2 : Lsn(0x30),
6819 2 : Value::WalRecord(NeonWalRecord::wal_append(",0x30")),
6820 2 : ),
6821 2 : (get_key(3), Lsn(0x10), Value::Image("0x10".into())),
6822 2 : (
6823 2 : get_key(3),
6824 2 : Lsn(0x20),
6825 2 : Value::WalRecord(NeonWalRecord::wal_clear()),
6826 2 : ),
6827 2 : (get_key(4), Lsn(0x10), Value::Image("0x10".into())),
6828 2 : (
6829 2 : get_key(4),
6830 2 : Lsn(0x20),
6831 2 : Value::WalRecord(NeonWalRecord::wal_init()),
6832 2 : ),
6833 2 : ];
6834 2 : let image1 = vec![(get_key(1), "0x10".into())];
6835 2 :
6836 2 : let tline = tenant
6837 2 : .create_test_timeline_with_layers(
6838 2 : TIMELINE_ID,
6839 2 : Lsn(0x10),
6840 2 : DEFAULT_PG_VERSION,
6841 2 : &ctx,
6842 2 : vec![delta1], // delta layers
6843 2 : vec![(Lsn(0x10), image1)], // image layers
6844 2 : Lsn(0x50),
6845 2 : )
6846 19 : .await?;
6847 2 :
6848 2 : assert_eq!(
6849 8 : tline.get(get_key(1), Lsn(0x50), &ctx).await?,
6850 2 : Bytes::from_static(b"0x10,0x20,0x30")
6851 2 : );
6852 2 : assert_eq!(
6853 2 : tline.get(get_key(2), Lsn(0x50), &ctx).await?,
6854 2 : Bytes::from_static(b"0x10,0x20,0x30")
6855 2 : );
6856 2 :
6857 2 : // Need to remove the limit of "Neon WAL redo requires base image".
6858 2 :
6859 2 : // assert_eq!(tline.get(get_key(3), Lsn(0x50), &ctx).await?, Bytes::new());
6860 2 : // assert_eq!(tline.get(get_key(4), Lsn(0x50), &ctx).await?, Bytes::new());
6861 2 :
6862 2 : Ok(())
6863 2 : }
6864 :
6865 : #[tokio::test]
6866 2 : async fn test_lsn_lease() -> anyhow::Result<()> {
6867 8 : let (tenant, ctx) = TenantHarness::create("test_lsn_lease")?.load().await;
6868 2 : let key = Key::from_hex("010000000033333333444444445500000000").unwrap();
6869 2 :
6870 2 : let end_lsn = Lsn(0x100);
6871 2 : let image_layers = (0x20..=0x90)
6872 2 : .step_by(0x10)
6873 16 : .map(|n| {
6874 16 : (
6875 16 : Lsn(n),
6876 16 : vec![(key, test_img(&format!("data key at {:x}", n)))],
6877 16 : )
6878 16 : })
6879 2 : .collect();
6880 2 :
6881 2 : let timeline = tenant
6882 2 : .create_test_timeline_with_layers(
6883 2 : TIMELINE_ID,
6884 2 : Lsn(0x10),
6885 2 : DEFAULT_PG_VERSION,
6886 2 : &ctx,
6887 2 : Vec::new(),
6888 2 : image_layers,
6889 2 : end_lsn,
6890 2 : )
6891 62 : .await?;
6892 2 :
6893 2 : let leased_lsns = [0x30, 0x50, 0x70];
6894 2 : let mut leases = Vec::new();
6895 6 : let _: anyhow::Result<_> = leased_lsns.iter().try_for_each(|n| {
6896 6 : leases.push(timeline.make_lsn_lease(Lsn(*n), timeline.get_lsn_lease_length(), &ctx)?);
6897 6 : Ok(())
6898 6 : });
6899 2 :
6900 2 : // Renewing with shorter lease should not change the lease.
6901 2 : let updated_lease_0 =
6902 2 : timeline.make_lsn_lease(Lsn(leased_lsns[0]), Duration::from_secs(0), &ctx)?;
6903 2 : assert_eq!(updated_lease_0.valid_until, leases[0].valid_until);
6904 2 :
6905 2 : // Renewing with a long lease should renew lease with later expiration time.
6906 2 : let updated_lease_1 = timeline.make_lsn_lease(
6907 2 : Lsn(leased_lsns[1]),
6908 2 : timeline.get_lsn_lease_length() * 2,
6909 2 : &ctx,
6910 2 : )?;
6911 2 :
6912 2 : assert!(updated_lease_1.valid_until > leases[1].valid_until);
6913 2 :
6914 2 : // Force set disk consistent lsn so we can get the cutoff at `end_lsn`.
6915 2 : info!(
6916 2 : "latest_gc_cutoff_lsn: {}",
6917 0 : *timeline.get_latest_gc_cutoff_lsn()
6918 2 : );
6919 2 : timeline.force_set_disk_consistent_lsn(end_lsn);
6920 2 :
6921 2 : let res = tenant
6922 2 : .gc_iteration(
6923 2 : Some(TIMELINE_ID),
6924 2 : 0,
6925 2 : Duration::ZERO,
6926 2 : &CancellationToken::new(),
6927 2 : &ctx,
6928 2 : )
6929 2 : .await?;
6930 2 :
6931 2 : // Keeping everything <= Lsn(0x80) b/c leases:
6932 2 : // 0/10: initdb layer
6933 2 : // (0/20..=0/70).step_by(0x10): image layers added when creating the timeline.
6934 2 : assert_eq!(res.layers_needed_by_leases, 7);
6935 2 : // Keeping 0/90 b/c it is the latest layer.
6936 2 : assert_eq!(res.layers_not_updated, 1);
6937 2 : // Removed 0/80.
6938 2 : assert_eq!(res.layers_removed, 1);
6939 2 :
6940 2 : // Make lease on a already GC-ed LSN.
6941 2 : // 0/80 does not have a valid lease + is below latest_gc_cutoff
6942 2 : assert!(Lsn(0x80) < *timeline.get_latest_gc_cutoff_lsn());
6943 2 : let res = timeline.make_lsn_lease(Lsn(0x80), timeline.get_lsn_lease_length(), &ctx);
6944 2 : assert!(res.is_err());
6945 2 :
6946 2 : // Should still be able to renew a currently valid lease
6947 2 : // Assumption: original lease to is still valid for 0/50.
6948 2 : let _ =
6949 2 : timeline.make_lsn_lease(Lsn(leased_lsns[1]), timeline.get_lsn_lease_length(), &ctx)?;
6950 2 :
6951 2 : Ok(())
6952 2 : }
6953 :
6954 : #[tokio::test]
6955 2 : async fn test_simple_bottom_most_compaction_deltas() -> anyhow::Result<()> {
6956 2 : let harness = TenantHarness::create("test_simple_bottom_most_compaction_deltas")?;
6957 8 : let (tenant, ctx) = harness.load().await;
6958 2 :
6959 118 : fn get_key(id: u32) -> Key {
6960 118 : // using aux key here b/c they are guaranteed to be inside `collect_keyspace`.
6961 118 : let mut key = Key::from_hex("620000000033333333444444445500000000").unwrap();
6962 118 : key.field6 = id;
6963 118 : key
6964 118 : }
6965 2 :
6966 2 : // We create one bottom-most image layer, a delta layer D1 crossing the GC horizon, D2 below the horizon, and D3 above the horizon.
6967 2 : //
6968 2 : // | D1 | | D3 |
6969 2 : // -| |-- gc horizon -----------------
6970 2 : // | | | D2 |
6971 2 : // --------- img layer ------------------
6972 2 : //
6973 2 : // What we should expact from this compaction is:
6974 2 : // | Part of D1 | | D3 |
6975 2 : // --------- img layer with D1+D2 at GC horizon------------------
6976 2 :
6977 2 : // img layer at 0x10
6978 2 : let img_layer = (0..10)
6979 20 : .map(|id| (get_key(id), Bytes::from(format!("value {id}@0x10"))))
6980 2 : .collect_vec();
6981 2 :
6982 2 : let delta1 = vec![
6983 2 : (
6984 2 : get_key(1),
6985 2 : Lsn(0x20),
6986 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x20")),
6987 2 : ),
6988 2 : (
6989 2 : get_key(2),
6990 2 : Lsn(0x30),
6991 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x30")),
6992 2 : ),
6993 2 : (
6994 2 : get_key(3),
6995 2 : Lsn(0x28),
6996 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x28")),
6997 2 : ),
6998 2 : (
6999 2 : get_key(3),
7000 2 : Lsn(0x30),
7001 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x30")),
7002 2 : ),
7003 2 : (
7004 2 : get_key(3),
7005 2 : Lsn(0x40),
7006 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x40")),
7007 2 : ),
7008 2 : ];
7009 2 : let delta2 = vec![
7010 2 : (
7011 2 : get_key(5),
7012 2 : Lsn(0x20),
7013 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x20")),
7014 2 : ),
7015 2 : (
7016 2 : get_key(6),
7017 2 : Lsn(0x20),
7018 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x20")),
7019 2 : ),
7020 2 : ];
7021 2 : let delta3 = vec![
7022 2 : (
7023 2 : get_key(8),
7024 2 : Lsn(0x40),
7025 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x40")),
7026 2 : ),
7027 2 : (
7028 2 : get_key(9),
7029 2 : Lsn(0x40),
7030 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x40")),
7031 2 : ),
7032 2 : ];
7033 2 :
7034 2 : let tline = tenant
7035 2 : .create_test_timeline_with_layers(
7036 2 : TIMELINE_ID,
7037 2 : Lsn(0x10),
7038 2 : DEFAULT_PG_VERSION,
7039 2 : &ctx,
7040 2 : vec![delta1, delta2, delta3], // delta layers
7041 2 : vec![(Lsn(0x10), img_layer)], // image layers
7042 2 : Lsn(0x50),
7043 2 : )
7044 49 : .await?;
7045 2 : {
7046 2 : // Update GC info
7047 2 : let mut guard = tline.gc_info.write().unwrap();
7048 2 : *guard = GcInfo {
7049 2 : retain_lsns: vec![],
7050 2 : cutoffs: GcCutoffs {
7051 2 : pitr: Lsn(0x30),
7052 2 : horizon: Lsn(0x30),
7053 2 : },
7054 2 : leases: Default::default(),
7055 2 : };
7056 2 : }
7057 2 :
7058 2 : let expected_result = [
7059 2 : Bytes::from_static(b"value 0@0x10"),
7060 2 : Bytes::from_static(b"value 1@0x10@0x20"),
7061 2 : Bytes::from_static(b"value 2@0x10@0x30"),
7062 2 : Bytes::from_static(b"value 3@0x10@0x28@0x30@0x40"),
7063 2 : Bytes::from_static(b"value 4@0x10"),
7064 2 : Bytes::from_static(b"value 5@0x10@0x20"),
7065 2 : Bytes::from_static(b"value 6@0x10@0x20"),
7066 2 : Bytes::from_static(b"value 7@0x10"),
7067 2 : Bytes::from_static(b"value 8@0x10@0x40"),
7068 2 : Bytes::from_static(b"value 9@0x10@0x40"),
7069 2 : ];
7070 2 :
7071 2 : let expected_result_at_gc_horizon = [
7072 2 : Bytes::from_static(b"value 0@0x10"),
7073 2 : Bytes::from_static(b"value 1@0x10@0x20"),
7074 2 : Bytes::from_static(b"value 2@0x10@0x30"),
7075 2 : Bytes::from_static(b"value 3@0x10@0x28@0x30"),
7076 2 : Bytes::from_static(b"value 4@0x10"),
7077 2 : Bytes::from_static(b"value 5@0x10@0x20"),
7078 2 : Bytes::from_static(b"value 6@0x10@0x20"),
7079 2 : Bytes::from_static(b"value 7@0x10"),
7080 2 : Bytes::from_static(b"value 8@0x10"),
7081 2 : Bytes::from_static(b"value 9@0x10"),
7082 2 : ];
7083 2 :
7084 22 : for idx in 0..10 {
7085 20 : assert_eq!(
7086 20 : tline
7087 20 : .get(get_key(idx as u32), Lsn(0x50), &ctx)
7088 17 : .await
7089 20 : .unwrap(),
7090 20 : &expected_result[idx]
7091 2 : );
7092 20 : assert_eq!(
7093 20 : tline
7094 20 : .get(get_key(idx as u32), Lsn(0x30), &ctx)
7095 2 : .await
7096 20 : .unwrap(),
7097 20 : &expected_result_at_gc_horizon[idx]
7098 2 : );
7099 2 : }
7100 2 :
7101 2 : let cancel = CancellationToken::new();
7102 52 : tline.compact_with_gc(&cancel, &ctx).await.unwrap();
7103 2 :
7104 22 : for idx in 0..10 {
7105 20 : assert_eq!(
7106 20 : tline
7107 20 : .get(get_key(idx as u32), Lsn(0x50), &ctx)
7108 8 : .await
7109 20 : .unwrap(),
7110 20 : &expected_result[idx]
7111 2 : );
7112 20 : assert_eq!(
7113 20 : tline
7114 20 : .get(get_key(idx as u32), Lsn(0x30), &ctx)
7115 2 : .await
7116 20 : .unwrap(),
7117 20 : &expected_result_at_gc_horizon[idx]
7118 2 : );
7119 2 : }
7120 2 :
7121 2 : Ok(())
7122 2 : }
7123 : }
|