Line data Source code
1 : //!
2 : //! Timeline repository implementation that keeps old data in files on disk, and
3 : //! the recent changes in memory. See tenant/*_layer.rs files.
4 : //! The functions here are responsible for locating the correct layer for the
5 : //! get/put call, walking back the timeline branching history as needed.
6 : //!
7 : //! The files are stored in the .neon/tenants/<tenant_id>/timelines/<timeline_id>
8 : //! directory. See docs/pageserver-storage.md for how the files are managed.
9 : //! In addition to the layer files, there is a metadata file in the same
10 : //! directory that contains information about the timeline, in particular its
11 : //! parent timeline, and the last LSN that has been written to disk.
12 : //!
13 :
14 : use anyhow::{bail, Context};
15 : use arc_swap::ArcSwap;
16 : use camino::Utf8Path;
17 : use camino::Utf8PathBuf;
18 : use enumset::EnumSet;
19 : use futures::stream::FuturesUnordered;
20 : use futures::FutureExt;
21 : use futures::StreamExt;
22 : use pageserver_api::models;
23 : use pageserver_api::models::AuxFilePolicy;
24 : use pageserver_api::models::TimelineState;
25 : use pageserver_api::models::TopTenantShardItem;
26 : use pageserver_api::models::WalRedoManagerStatus;
27 : use pageserver_api::shard::ShardIdentity;
28 : use pageserver_api::shard::ShardStripeSize;
29 : use pageserver_api::shard::TenantShardId;
30 : use remote_storage::DownloadError;
31 : use remote_storage::GenericRemoteStorage;
32 : use remote_storage::TimeoutOrCancel;
33 : use std::fmt;
34 : use std::time::SystemTime;
35 : use storage_broker::BrokerClientChannel;
36 : use tokio::io::BufReader;
37 : use tokio::sync::watch;
38 : use tokio::task::JoinSet;
39 : use tokio_util::sync::CancellationToken;
40 : use tracing::*;
41 : use utils::backoff;
42 : use utils::completion;
43 : use utils::crashsafe::path_with_suffix_extension;
44 : use utils::failpoint_support;
45 : use utils::fs_ext;
46 : use utils::pausable_failpoint;
47 : use utils::sync::gate::Gate;
48 : use utils::sync::gate::GateGuard;
49 : use utils::timeout::timeout_cancellable;
50 : use utils::timeout::TimeoutCancellableError;
51 : use utils::zstd::create_zst_tarball;
52 : use utils::zstd::extract_zst_tarball;
53 :
54 : use self::config::AttachedLocationConfig;
55 : use self::config::AttachmentMode;
56 : use self::config::LocationConf;
57 : use self::config::TenantConf;
58 : use self::metadata::TimelineMetadata;
59 : use self::mgr::GetActiveTenantError;
60 : use self::mgr::GetTenantError;
61 : use self::remote_timeline_client::upload::upload_index_part;
62 : use self::remote_timeline_client::RemoteTimelineClient;
63 : use self::timeline::uninit::TimelineCreateGuard;
64 : use self::timeline::uninit::TimelineExclusionError;
65 : use self::timeline::uninit::UninitializedTimeline;
66 : use self::timeline::EvictionTaskTenantState;
67 : use self::timeline::GcCutoffs;
68 : use self::timeline::TimelineResources;
69 : use self::timeline::WaitLsnError;
70 : use crate::config::PageServerConf;
71 : use crate::context::{DownloadBehavior, RequestContext};
72 : use crate::deletion_queue::DeletionQueueClient;
73 : use crate::deletion_queue::DeletionQueueError;
74 : use crate::import_datadir;
75 : use crate::is_uninit_mark;
76 : use crate::metrics::TENANT;
77 : use crate::metrics::{
78 : remove_tenant_metrics, BROKEN_TENANTS_SET, TENANT_STATE_METRIC, TENANT_SYNTHETIC_SIZE_METRIC,
79 : };
80 : use crate::repository::GcResult;
81 : use crate::task_mgr;
82 : use crate::task_mgr::TaskKind;
83 : use crate::tenant::config::LocationMode;
84 : use crate::tenant::config::TenantConfOpt;
85 : pub use crate::tenant::remote_timeline_client::index::IndexPart;
86 : use crate::tenant::remote_timeline_client::remote_initdb_archive_path;
87 : use crate::tenant::remote_timeline_client::MaybeDeletedIndexPart;
88 : use crate::tenant::remote_timeline_client::INITDB_PATH;
89 : use crate::tenant::storage_layer::DeltaLayer;
90 : use crate::tenant::storage_layer::ImageLayer;
91 : use crate::InitializationOrder;
92 : use std::collections::hash_map::Entry;
93 : use std::collections::BTreeSet;
94 : use std::collections::HashMap;
95 : use std::collections::HashSet;
96 : use std::fmt::Debug;
97 : use std::fmt::Display;
98 : use std::fs;
99 : use std::fs::File;
100 : use std::ops::Bound::Included;
101 : use std::sync::atomic::AtomicU64;
102 : use std::sync::atomic::Ordering;
103 : use std::sync::Arc;
104 : use std::sync::Mutex;
105 : use std::time::{Duration, Instant};
106 :
107 : use crate::span;
108 : use crate::tenant::timeline::delete::DeleteTimelineFlow;
109 : use crate::tenant::timeline::uninit::cleanup_timeline_directory;
110 : use crate::virtual_file::VirtualFile;
111 : use crate::walredo::PostgresRedoManager;
112 : use crate::TEMP_FILE_SUFFIX;
113 : use once_cell::sync::Lazy;
114 : pub use pageserver_api::models::TenantState;
115 : use tokio::sync::Semaphore;
116 :
117 0 : static INIT_DB_SEMAPHORE: Lazy<Semaphore> = Lazy::new(|| Semaphore::new(8));
118 : use utils::{
119 : crashsafe,
120 : generation::Generation,
121 : id::TimelineId,
122 : lsn::{Lsn, RecordLsn},
123 : };
124 :
125 : pub mod blob_io;
126 : pub mod block_io;
127 : pub mod vectored_blob_io;
128 :
129 : pub mod disk_btree;
130 : pub(crate) mod ephemeral_file;
131 : pub mod layer_map;
132 :
133 : pub mod metadata;
134 : pub mod remote_timeline_client;
135 : pub mod storage_layer;
136 :
137 : pub mod config;
138 : pub mod mgr;
139 : pub mod secondary;
140 : pub mod tasks;
141 : pub mod upload_queue;
142 :
143 : pub(crate) mod timeline;
144 :
145 : pub mod size;
146 :
147 : pub(crate) mod throttle;
148 :
149 : pub(crate) use crate::span::debug_assert_current_span_has_tenant_and_timeline_id;
150 : pub(crate) use timeline::{LogicalSizeCalculationCause, PageReconstructError, Timeline};
151 :
152 : // re-export for use in walreceiver
153 : pub use crate::tenant::timeline::WalReceiverInfo;
154 :
155 : /// The "tenants" part of `tenants/<tenant>/timelines...`
156 : pub const TENANTS_SEGMENT_NAME: &str = "tenants";
157 :
158 : /// Parts of the `.neon/tenants/<tenant_id>/timelines/<timeline_id>` directory prefix.
159 : pub const TIMELINES_SEGMENT_NAME: &str = "timelines";
160 :
161 : /// References to shared objects that are passed into each tenant, such
162 : /// as the shared remote storage client and process initialization state.
163 : #[derive(Clone)]
164 : pub struct TenantSharedResources {
165 : pub broker_client: storage_broker::BrokerClientChannel,
166 : pub remote_storage: GenericRemoteStorage,
167 : pub deletion_queue_client: DeletionQueueClient,
168 : }
169 :
170 : /// A [`Tenant`] is really an _attached_ tenant. The configuration
171 : /// for an attached tenant is a subset of the [`LocationConf`], represented
172 : /// in this struct.
173 : pub(super) struct AttachedTenantConf {
174 : tenant_conf: TenantConfOpt,
175 : location: AttachedLocationConfig,
176 : }
177 :
178 : impl AttachedTenantConf {
179 0 : fn new(tenant_conf: TenantConfOpt, location: AttachedLocationConfig) -> Self {
180 0 : Self {
181 0 : tenant_conf,
182 0 : location,
183 0 : }
184 0 : }
185 :
186 169 : fn try_from(location_conf: LocationConf) -> anyhow::Result<Self> {
187 169 : match &location_conf.mode {
188 169 : LocationMode::Attached(attach_conf) => Ok(Self {
189 169 : tenant_conf: location_conf.tenant_conf,
190 169 : location: *attach_conf,
191 169 : }),
192 : LocationMode::Secondary(_) => {
193 0 : anyhow::bail!("Attempted to construct AttachedTenantConf from a LocationConf in secondary mode")
194 : }
195 : }
196 169 : }
197 : }
198 : struct TimelinePreload {
199 : timeline_id: TimelineId,
200 : client: RemoteTimelineClient,
201 : index_part: Result<MaybeDeletedIndexPart, DownloadError>,
202 : }
203 :
204 : pub(crate) struct TenantPreload {
205 : timelines: HashMap<TimelineId, TimelinePreload>,
206 : }
207 :
208 : /// When we spawn a tenant, there is a special mode for tenant creation that
209 : /// avoids trying to read anything from remote storage.
210 : pub(crate) enum SpawnMode {
211 : /// Activate as soon as possible
212 : Eager,
213 : /// Lazy activation in the background, with the option to skip the queue if the need comes up
214 : Lazy,
215 : /// Tenant has been created during the lifetime of this process
216 : Create,
217 : }
218 :
219 : ///
220 : /// Tenant consists of multiple timelines. Keep them in a hash table.
221 : ///
222 : pub struct Tenant {
223 : // Global pageserver config parameters
224 : pub conf: &'static PageServerConf,
225 :
226 : /// The value creation timestamp, used to measure activation delay, see:
227 : /// <https://github.com/neondatabase/neon/issues/4025>
228 : constructed_at: Instant,
229 :
230 : state: watch::Sender<TenantState>,
231 :
232 : // Overridden tenant-specific config parameters.
233 : // We keep TenantConfOpt sturct here to preserve the information
234 : // about parameters that are not set.
235 : // This is necessary to allow global config updates.
236 : tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
237 :
238 : tenant_shard_id: TenantShardId,
239 :
240 : // The detailed sharding information, beyond the number/count in tenant_shard_id
241 : shard_identity: ShardIdentity,
242 :
243 : /// The remote storage generation, used to protect S3 objects from split-brain.
244 : /// Does not change over the lifetime of the [`Tenant`] object.
245 : ///
246 : /// This duplicates the generation stored in LocationConf, but that structure is mutable:
247 : /// this copy enforces the invariant that generatio doesn't change during a Tenant's lifetime.
248 : generation: Generation,
249 :
250 : timelines: Mutex<HashMap<TimelineId, Arc<Timeline>>>,
251 :
252 : /// During timeline creation, we first insert the TimelineId to the
253 : /// creating map, then `timelines`, then remove it from the creating map.
254 : /// **Lock order**: if acquring both, acquire`timelines` before `timelines_creating`
255 : timelines_creating: std::sync::Mutex<HashSet<TimelineId>>,
256 :
257 : // This mutex prevents creation of new timelines during GC.
258 : // Adding yet another mutex (in addition to `timelines`) is needed because holding
259 : // `timelines` mutex during all GC iteration
260 : // may block for a long time `get_timeline`, `get_timelines_state`,... and other operations
261 : // with timelines, which in turn may cause dropping replication connection, expiration of wait_for_lsn
262 : // timeout...
263 : gc_cs: tokio::sync::Mutex<()>,
264 : walredo_mgr: Option<Arc<WalRedoManager>>,
265 :
266 : // provides access to timeline data sitting in the remote storage
267 : pub(crate) remote_storage: GenericRemoteStorage,
268 :
269 : // Access to global deletion queue for when this tenant wants to schedule a deletion
270 : deletion_queue_client: DeletionQueueClient,
271 :
272 : /// Cached logical sizes updated updated on each [`Tenant::gather_size_inputs`].
273 : cached_logical_sizes: tokio::sync::Mutex<HashMap<(TimelineId, Lsn), u64>>,
274 : cached_synthetic_tenant_size: Arc<AtomicU64>,
275 :
276 : eviction_task_tenant_state: tokio::sync::Mutex<EvictionTaskTenantState>,
277 :
278 : /// If the tenant is in Activating state, notify this to encourage it
279 : /// to proceed to Active as soon as possible, rather than waiting for lazy
280 : /// background warmup.
281 : pub(crate) activate_now_sem: tokio::sync::Semaphore,
282 :
283 : // Cancellation token fires when we have entered shutdown(). This is a parent of
284 : // Timelines' cancellation token.
285 : pub(crate) cancel: CancellationToken,
286 :
287 : // Users of the Tenant such as the page service must take this Gate to avoid
288 : // trying to use a Tenant which is shutting down.
289 : pub(crate) gate: Gate,
290 :
291 : /// Throttle applied at the top of [`Timeline::get`].
292 : /// All [`Tenant::timelines`] of a given [`Tenant`] instance share the same [`throttle::Throttle`] instance.
293 : pub(crate) timeline_get_throttle:
294 : Arc<throttle::Throttle<&'static crate::metrics::tenant_throttling::TimelineGet>>,
295 :
296 : /// An ongoing timeline detach must be checked during attempts to GC or compact a timeline.
297 : ongoing_timeline_detach: std::sync::Mutex<Option<(TimelineId, utils::completion::Barrier)>>,
298 : }
299 :
300 : impl std::fmt::Debug for Tenant {
301 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
302 0 : write!(f, "{} ({})", self.tenant_shard_id, self.current_state())
303 0 : }
304 : }
305 :
306 : pub(crate) enum WalRedoManager {
307 : Prod(PostgresRedoManager),
308 : #[cfg(test)]
309 : Test(harness::TestRedoManager),
310 : }
311 :
312 : impl From<PostgresRedoManager> for WalRedoManager {
313 0 : fn from(mgr: PostgresRedoManager) -> Self {
314 0 : Self::Prod(mgr)
315 0 : }
316 : }
317 :
318 : #[cfg(test)]
319 : impl From<harness::TestRedoManager> for WalRedoManager {
320 161 : fn from(mgr: harness::TestRedoManager) -> Self {
321 161 : Self::Test(mgr)
322 161 : }
323 : }
324 :
325 : impl WalRedoManager {
326 0 : pub(crate) fn maybe_quiesce(&self, idle_timeout: Duration) {
327 0 : match self {
328 0 : Self::Prod(mgr) => mgr.maybe_quiesce(idle_timeout),
329 0 : #[cfg(test)]
330 0 : Self::Test(_) => {
331 0 : // Not applicable to test redo manager
332 0 : }
333 0 : }
334 0 : }
335 :
336 : /// # Cancel-Safety
337 : ///
338 : /// This method is cancellation-safe.
339 54 : pub async fn request_redo(
340 54 : &self,
341 54 : key: crate::repository::Key,
342 54 : lsn: Lsn,
343 54 : base_img: Option<(Lsn, bytes::Bytes)>,
344 54 : records: Vec<(Lsn, crate::walrecord::NeonWalRecord)>,
345 54 : pg_version: u32,
346 54 : ) -> anyhow::Result<bytes::Bytes> {
347 54 : match self {
348 0 : Self::Prod(mgr) => {
349 0 : mgr.request_redo(key, lsn, base_img, records, pg_version)
350 0 : .await
351 : }
352 : #[cfg(test)]
353 54 : Self::Test(mgr) => {
354 54 : mgr.request_redo(key, lsn, base_img, records, pg_version)
355 0 : .await
356 : }
357 : }
358 54 : }
359 :
360 0 : pub(crate) fn status(&self) -> Option<WalRedoManagerStatus> {
361 0 : match self {
362 0 : WalRedoManager::Prod(m) => Some(m.status()),
363 0 : #[cfg(test)]
364 0 : WalRedoManager::Test(_) => None,
365 0 : }
366 0 : }
367 : }
368 :
369 0 : #[derive(Debug, thiserror::Error, PartialEq, Eq)]
370 : pub enum GetTimelineError {
371 : #[error("Timeline {tenant_id}/{timeline_id} is not active, state: {state:?}")]
372 : NotActive {
373 : tenant_id: TenantShardId,
374 : timeline_id: TimelineId,
375 : state: TimelineState,
376 : },
377 : #[error("Timeline {tenant_id}/{timeline_id} was not found")]
378 : NotFound {
379 : tenant_id: TenantShardId,
380 : timeline_id: TimelineId,
381 : },
382 : }
383 :
384 0 : #[derive(Debug, thiserror::Error)]
385 : pub enum LoadLocalTimelineError {
386 : #[error("FailedToLoad")]
387 : Load(#[source] anyhow::Error),
388 : #[error("FailedToResumeDeletion")]
389 : ResumeDeletion(#[source] anyhow::Error),
390 : }
391 :
392 0 : #[derive(thiserror::Error)]
393 : pub enum DeleteTimelineError {
394 : #[error("NotFound")]
395 : NotFound,
396 :
397 : #[error("HasChildren")]
398 : HasChildren(Vec<TimelineId>),
399 :
400 : #[error("Timeline deletion is already in progress")]
401 : AlreadyInProgress(Arc<tokio::sync::Mutex<DeleteTimelineFlow>>),
402 :
403 : #[error(transparent)]
404 : Other(#[from] anyhow::Error),
405 : }
406 :
407 : impl Debug for DeleteTimelineError {
408 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
409 0 : match self {
410 0 : Self::NotFound => write!(f, "NotFound"),
411 0 : Self::HasChildren(c) => f.debug_tuple("HasChildren").field(c).finish(),
412 0 : Self::AlreadyInProgress(_) => f.debug_tuple("AlreadyInProgress").finish(),
413 0 : Self::Other(e) => f.debug_tuple("Other").field(e).finish(),
414 : }
415 0 : }
416 : }
417 :
418 : pub enum SetStoppingError {
419 : AlreadyStopping(completion::Barrier),
420 : Broken,
421 : }
422 :
423 : impl Debug for SetStoppingError {
424 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
425 0 : match self {
426 0 : Self::AlreadyStopping(_) => f.debug_tuple("AlreadyStopping").finish(),
427 0 : Self::Broken => write!(f, "Broken"),
428 : }
429 0 : }
430 : }
431 :
432 0 : #[derive(thiserror::Error, Debug)]
433 : pub enum CreateTimelineError {
434 : #[error("creation of timeline with the given ID is in progress")]
435 : AlreadyCreating,
436 : #[error("timeline already exists with different parameters")]
437 : Conflict,
438 : #[error(transparent)]
439 : AncestorLsn(anyhow::Error),
440 : #[error("ancestor timeline is not active")]
441 : AncestorNotActive,
442 : #[error("tenant shutting down")]
443 : ShuttingDown,
444 : #[error(transparent)]
445 : Other(#[from] anyhow::Error),
446 : }
447 :
448 : #[derive(thiserror::Error, Debug)]
449 : enum InitdbError {
450 : Other(anyhow::Error),
451 : Cancelled,
452 : Spawn(std::io::Result<()>),
453 : Failed(std::process::ExitStatus, Vec<u8>),
454 : }
455 :
456 : impl fmt::Display for InitdbError {
457 0 : fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
458 0 : match self {
459 0 : InitdbError::Cancelled => write!(f, "Operation was cancelled"),
460 0 : InitdbError::Spawn(e) => write!(f, "Spawn error: {:?}", e),
461 0 : InitdbError::Failed(status, stderr) => write!(
462 0 : f,
463 0 : "Command failed with status {:?}: {}",
464 0 : status,
465 0 : String::from_utf8_lossy(stderr)
466 0 : ),
467 0 : InitdbError::Other(e) => write!(f, "Error: {:?}", e),
468 : }
469 0 : }
470 : }
471 :
472 : impl From<std::io::Error> for InitdbError {
473 0 : fn from(error: std::io::Error) -> Self {
474 0 : InitdbError::Spawn(Err(error))
475 0 : }
476 : }
477 :
478 : enum CreateTimelineCause {
479 : Load,
480 : Delete,
481 : }
482 :
483 0 : #[derive(thiserror::Error, Debug)]
484 : pub(crate) enum GcError {
485 : // The tenant is shutting down
486 : #[error("tenant shutting down")]
487 : TenantCancelled,
488 :
489 : // The tenant is shutting down
490 : #[error("timeline shutting down")]
491 : TimelineCancelled,
492 :
493 : // The tenant is in a state inelegible to run GC
494 : #[error("not active")]
495 : NotActive,
496 :
497 : // A requested GC cutoff LSN was invalid, for example it tried to move backwards
498 : #[error("not active")]
499 : BadLsn { why: String },
500 :
501 : // A remote storage error while scheduling updates after compaction
502 : #[error(transparent)]
503 : Remote(anyhow::Error),
504 :
505 : // An error reading while calculating GC cutoffs
506 : #[error(transparent)]
507 : GcCutoffs(PageReconstructError),
508 :
509 : // If GC was invoked for a particular timeline, this error means it didn't exist
510 : #[error("timeline not found")]
511 : TimelineNotFound,
512 : }
513 :
514 : impl From<PageReconstructError> for GcError {
515 0 : fn from(value: PageReconstructError) -> Self {
516 0 : match value {
517 0 : PageReconstructError::Cancelled => Self::TimelineCancelled,
518 0 : other => Self::GcCutoffs(other),
519 : }
520 0 : }
521 : }
522 :
523 : impl Tenant {
524 : /// Yet another helper for timeline initialization.
525 : ///
526 : /// - Initializes the Timeline struct and inserts it into the tenant's hash map
527 : /// - Scans the local timeline directory for layer files and builds the layer map
528 : /// - Downloads remote index file and adds remote files to the layer map
529 : /// - Schedules remote upload tasks for any files that are present locally but missing from remote storage.
530 : ///
531 : /// If the operation fails, the timeline is left in the tenant's hash map in Broken state. On success,
532 : /// it is marked as Active.
533 : #[allow(clippy::too_many_arguments)]
534 6 : async fn timeline_init_and_sync(
535 6 : &self,
536 6 : timeline_id: TimelineId,
537 6 : resources: TimelineResources,
538 6 : index_part: Option<IndexPart>,
539 6 : metadata: TimelineMetadata,
540 6 : ancestor: Option<Arc<Timeline>>,
541 6 : last_aux_file_policy: Option<AuxFilePolicy>,
542 6 : _ctx: &RequestContext,
543 6 : ) -> anyhow::Result<()> {
544 6 : let tenant_id = self.tenant_shard_id;
545 :
546 6 : let timeline = self.create_timeline_struct(
547 6 : timeline_id,
548 6 : &metadata,
549 6 : ancestor.clone(),
550 6 : resources,
551 6 : CreateTimelineCause::Load,
552 6 : // This could be derived from ancestor branch + index part. Though the only caller of `timeline_init_and_sync` is `load_remote_timeline`,
553 6 : // there will potentially be other caller of this function in the future, and we don't know whether `index_part` or `ancestor` takes precedence.
554 6 : // Therefore, we pass this field explicitly for now, and remove it once we fully migrate to aux file v2.
555 6 : last_aux_file_policy,
556 6 : )?;
557 6 : let disk_consistent_lsn = timeline.get_disk_consistent_lsn();
558 6 : anyhow::ensure!(
559 6 : disk_consistent_lsn.is_valid(),
560 0 : "Timeline {tenant_id}/{timeline_id} has invalid disk_consistent_lsn"
561 : );
562 6 : assert_eq!(
563 6 : disk_consistent_lsn,
564 6 : metadata.disk_consistent_lsn(),
565 0 : "these are used interchangeably"
566 : );
567 :
568 6 : if let Some(index_part) = index_part.as_ref() {
569 6 : timeline.remote_client.init_upload_queue(index_part)?;
570 :
571 6 : timeline
572 6 : .last_aux_file_policy
573 6 : .store(index_part.last_aux_file_policy());
574 : } else {
575 : // No data on the remote storage, but we have local metadata file. We can end up
576 : // here with timeline_create being interrupted before finishing index part upload.
577 : // By doing what we do here, the index part upload is retried.
578 : // If control plane retries timeline creation in the meantime, the mgmt API handler
579 : // for timeline creation will coalesce on the upload we queue here.
580 :
581 : // FIXME: this branch should be dead code as we no longer write local metadata.
582 :
583 0 : timeline
584 0 : .remote_client
585 0 : .init_upload_queue_for_empty_remote(&metadata)?;
586 0 : timeline
587 0 : .remote_client
588 0 : .schedule_index_upload_for_full_metadata_update(&metadata)?;
589 : }
590 :
591 6 : timeline
592 6 : .load_layer_map(disk_consistent_lsn, index_part)
593 6 : .await
594 6 : .with_context(|| {
595 0 : format!("Failed to load layermap for timeline {tenant_id}/{timeline_id}")
596 6 : })?;
597 :
598 : {
599 : // avoiding holding it across awaits
600 6 : let mut timelines_accessor = self.timelines.lock().unwrap();
601 6 : match timelines_accessor.entry(timeline_id) {
602 : // We should never try and load the same timeline twice during startup
603 : Entry::Occupied(_) => {
604 0 : unreachable!(
605 0 : "Timeline {tenant_id}/{timeline_id} already exists in the tenant map"
606 0 : );
607 : }
608 6 : Entry::Vacant(v) => {
609 6 : v.insert(Arc::clone(&timeline));
610 6 : timeline.maybe_spawn_flush_loop();
611 6 : }
612 6 : }
613 6 : };
614 6 :
615 6 : // Sanity check: a timeline should have some content.
616 6 : anyhow::ensure!(
617 6 : ancestor.is_some()
618 4 : || timeline
619 4 : .layers
620 4 : .read()
621 0 : .await
622 4 : .layer_map()
623 4 : .iter_historic_layers()
624 4 : .next()
625 4 : .is_some(),
626 0 : "Timeline has no ancestor and no layer files"
627 : );
628 :
629 6 : Ok(())
630 6 : }
631 :
632 : /// Attach a tenant that's available in cloud storage.
633 : ///
634 : /// This returns quickly, after just creating the in-memory object
635 : /// Tenant struct and launching a background task to download
636 : /// the remote index files. On return, the tenant is most likely still in
637 : /// Attaching state, and it will become Active once the background task
638 : /// finishes. You can use wait_until_active() to wait for the task to
639 : /// complete.
640 : ///
641 : #[allow(clippy::too_many_arguments)]
642 0 : pub(crate) fn spawn(
643 0 : conf: &'static PageServerConf,
644 0 : tenant_shard_id: TenantShardId,
645 0 : resources: TenantSharedResources,
646 0 : attached_conf: AttachedTenantConf,
647 0 : shard_identity: ShardIdentity,
648 0 : init_order: Option<InitializationOrder>,
649 0 : mode: SpawnMode,
650 0 : ctx: &RequestContext,
651 0 : ) -> anyhow::Result<Arc<Tenant>> {
652 0 : let wal_redo_manager = Arc::new(WalRedoManager::from(PostgresRedoManager::new(
653 0 : conf,
654 0 : tenant_shard_id,
655 0 : )));
656 0 :
657 0 : let TenantSharedResources {
658 0 : broker_client,
659 0 : remote_storage,
660 0 : deletion_queue_client,
661 0 : } = resources;
662 0 :
663 0 : let attach_mode = attached_conf.location.attach_mode;
664 0 : let generation = attached_conf.location.generation;
665 0 :
666 0 : let tenant = Arc::new(Tenant::new(
667 0 : TenantState::Attaching,
668 0 : conf,
669 0 : attached_conf,
670 0 : shard_identity,
671 0 : Some(wal_redo_manager),
672 0 : tenant_shard_id,
673 0 : remote_storage.clone(),
674 0 : deletion_queue_client,
675 0 : ));
676 0 :
677 0 : // The attach task will carry a GateGuard, so that shutdown() reliably waits for it to drop out if
678 0 : // we shut down while attaching.
679 0 : let attach_gate_guard = tenant
680 0 : .gate
681 0 : .enter()
682 0 : .expect("We just created the Tenant: nothing else can have shut it down yet");
683 0 :
684 0 : // Do all the hard work in the background
685 0 : let tenant_clone = Arc::clone(&tenant);
686 0 : let ctx = ctx.detached_child(TaskKind::Attach, DownloadBehavior::Warn);
687 0 : task_mgr::spawn(
688 0 : &tokio::runtime::Handle::current(),
689 0 : TaskKind::Attach,
690 0 : Some(tenant_shard_id),
691 0 : None,
692 0 : "attach tenant",
693 : false,
694 0 : async move {
695 0 :
696 0 : info!(
697 : ?attach_mode,
698 0 : "Attaching tenant"
699 : );
700 :
701 0 : let _gate_guard = attach_gate_guard;
702 0 :
703 0 : // Is this tenant being spawned as part of process startup?
704 0 : let starting_up = init_order.is_some();
705 : scopeguard::defer! {
706 : if starting_up {
707 : TENANT.startup_complete.inc();
708 : }
709 : }
710 :
711 : // Ideally we should use Tenant::set_broken_no_wait, but it is not supposed to be used when tenant is in loading state.
712 : enum BrokenVerbosity {
713 : Error,
714 : Info
715 : }
716 0 : let make_broken =
717 0 : |t: &Tenant, err: anyhow::Error, verbosity: BrokenVerbosity| {
718 0 : match verbosity {
719 : BrokenVerbosity::Info => {
720 0 : info!("attach cancelled, setting tenant state to Broken: {err}");
721 : },
722 : BrokenVerbosity::Error => {
723 0 : error!("attach failed, setting tenant state to Broken: {err:?}");
724 : }
725 : }
726 0 : t.state.send_modify(|state| {
727 0 : // The Stopping case is for when we have passed control on to DeleteTenantFlow:
728 0 : // if it errors, we will call make_broken when tenant is already in Stopping.
729 0 : assert!(
730 0 : matches!(*state, TenantState::Attaching | TenantState::Stopping { .. }),
731 0 : "the attach task owns the tenant state until activation is complete"
732 : );
733 :
734 0 : *state = TenantState::broken_from_reason(err.to_string());
735 0 : });
736 0 : };
737 :
738 0 : let mut init_order = init_order;
739 0 : // take the completion because initial tenant loading will complete when all of
740 0 : // these tasks complete.
741 0 : let _completion = init_order
742 0 : .as_mut()
743 0 : .and_then(|x| x.initial_tenant_load.take());
744 0 : let remote_load_completion = init_order
745 0 : .as_mut()
746 0 : .and_then(|x| x.initial_tenant_load_remote.take());
747 :
748 : enum AttachType<'a> {
749 : /// We are attaching this tenant lazily in the background.
750 : Warmup {
751 : _permit: tokio::sync::SemaphorePermit<'a>,
752 : during_startup: bool
753 : },
754 : /// We are attaching this tenant as soon as we can, because for example an
755 : /// endpoint tried to access it.
756 : OnDemand,
757 : /// During normal operations after startup, we are attaching a tenant, and
758 : /// eager attach was requested.
759 : Normal,
760 : }
761 :
762 0 : let attach_type = if matches!(mode, SpawnMode::Lazy) {
763 : // Before doing any I/O, wait for at least one of:
764 : // - A client attempting to access to this tenant (on-demand loading)
765 : // - A permit becoming available in the warmup semaphore (background warmup)
766 :
767 : tokio::select!(
768 : permit = tenant_clone.activate_now_sem.acquire() => {
769 : let _ = permit.expect("activate_now_sem is never closed");
770 : tracing::info!("Activating tenant (on-demand)");
771 : AttachType::OnDemand
772 : },
773 : permit = conf.concurrent_tenant_warmup.inner().acquire() => {
774 : let _permit = permit.expect("concurrent_tenant_warmup semaphore is never closed");
775 : tracing::info!("Activating tenant (warmup)");
776 : AttachType::Warmup {
777 : _permit,
778 : during_startup: init_order.is_some()
779 : }
780 : }
781 : _ = tenant_clone.cancel.cancelled() => {
782 : // This is safe, but should be pretty rare: it is interesting if a tenant
783 : // stayed in Activating for such a long time that shutdown found it in
784 : // that state.
785 : tracing::info!(state=%tenant_clone.current_state(), "Tenant shut down before activation");
786 : // Make the tenant broken so that set_stopping will not hang waiting for it to leave
787 : // the Attaching state. This is an over-reaction (nothing really broke, the tenant is
788 : // just shutting down), but ensures progress.
789 : make_broken(&tenant_clone, anyhow::anyhow!("Shut down while Attaching"), BrokenVerbosity::Info);
790 : return Ok(());
791 : },
792 : )
793 : } else {
794 : // SpawnMode::{Create,Eager} always cause jumping ahead of the
795 : // concurrent_tenant_warmup queue
796 0 : AttachType::Normal
797 : };
798 :
799 0 : let preload = match &mode {
800 : SpawnMode::Create => {
801 0 : None
802 : },
803 : SpawnMode::Eager | SpawnMode::Lazy => {
804 0 : let _preload_timer = TENANT.preload.start_timer();
805 0 : let res = tenant_clone
806 0 : .preload(&remote_storage, task_mgr::shutdown_token())
807 0 : .await;
808 0 : match res {
809 0 : Ok(p) => Some(p),
810 0 : Err(e) => {
811 0 : make_broken(&tenant_clone, anyhow::anyhow!(e), BrokenVerbosity::Error);
812 0 : return Ok(());
813 : }
814 : }
815 : }
816 :
817 : };
818 :
819 : // Remote preload is complete.
820 0 : drop(remote_load_completion);
821 :
822 : // We will time the duration of the attach phase unless this is a creation (attach will do no work)
823 0 : let attached = {
824 0 : let _attach_timer = match mode {
825 0 : SpawnMode::Create => None,
826 0 : SpawnMode::Eager | SpawnMode::Lazy => Some(TENANT.attach.start_timer()),
827 : };
828 0 : tenant_clone.attach(preload, mode, &ctx).await
829 : };
830 :
831 0 : match attached {
832 : Ok(()) => {
833 0 : info!("attach finished, activating");
834 0 : tenant_clone.activate(broker_client, None, &ctx);
835 : }
836 0 : Err(e) => {
837 0 : make_broken(&tenant_clone, anyhow::anyhow!(e), BrokenVerbosity::Error);
838 0 : }
839 : }
840 :
841 : // If we are doing an opportunistic warmup attachment at startup, initialize
842 : // logical size at the same time. This is better than starting a bunch of idle tenants
843 : // with cold caches and then coming back later to initialize their logical sizes.
844 : //
845 : // It also prevents the warmup proccess competing with the concurrency limit on
846 : // logical size calculations: if logical size calculation semaphore is saturated,
847 : // then warmup will wait for that before proceeding to the next tenant.
848 0 : if matches!(attach_type, AttachType::Warmup { during_startup: true, .. }) {
849 0 : let mut futs: FuturesUnordered<_> = tenant_clone.timelines.lock().unwrap().values().cloned().map(|t| t.await_initial_logical_size()).collect();
850 0 : tracing::info!("Waiting for initial logical sizes while warming up...");
851 0 : while futs.next().await.is_some() {}
852 0 : tracing::info!("Warm-up complete");
853 0 : }
854 :
855 0 : Ok(())
856 0 : }
857 0 : .instrument(tracing::info_span!(parent: None, "attach", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), gen=?generation)),
858 : );
859 0 : Ok(tenant)
860 0 : }
861 :
862 322 : #[instrument(skip_all)]
863 : pub(crate) async fn preload(
864 : self: &Arc<Self>,
865 : remote_storage: &GenericRemoteStorage,
866 : cancel: CancellationToken,
867 : ) -> anyhow::Result<TenantPreload> {
868 : span::debug_assert_current_span_has_tenant_id();
869 : // Get list of remote timelines
870 : // download index files for every tenant timeline
871 : info!("listing remote timelines");
872 : let (remote_timeline_ids, other_keys) = remote_timeline_client::list_remote_timelines(
873 : remote_storage,
874 : self.tenant_shard_id,
875 : cancel.clone(),
876 : )
877 : .await?;
878 :
879 : info!("found {} timelines", remote_timeline_ids.len(),);
880 :
881 : for k in other_keys {
882 : warn!("Unexpected non timeline key {k}");
883 : }
884 :
885 : Ok(TenantPreload {
886 : timelines: Self::load_timeline_metadata(
887 : self,
888 : remote_timeline_ids,
889 : remote_storage,
890 : cancel,
891 : )
892 : .await?,
893 : })
894 : }
895 :
896 : ///
897 : /// Background task that downloads all data for a tenant and brings it to Active state.
898 : ///
899 : /// No background tasks are started as part of this routine.
900 : ///
901 161 : async fn attach(
902 161 : self: &Arc<Tenant>,
903 161 : preload: Option<TenantPreload>,
904 161 : mode: SpawnMode,
905 161 : ctx: &RequestContext,
906 161 : ) -> anyhow::Result<()> {
907 161 : span::debug_assert_current_span_has_tenant_id();
908 161 :
909 161 : failpoint_support::sleep_millis_async!("before-attaching-tenant");
910 :
911 161 : let preload = match (preload, mode) {
912 161 : (Some(p), _) => p,
913 0 : (None, SpawnMode::Create) => TenantPreload {
914 0 : timelines: HashMap::new(),
915 0 : },
916 : (None, _) => {
917 0 : anyhow::bail!("local-only deployment is no longer supported, https://github.com/neondatabase/neon/issues/5624");
918 : }
919 : };
920 :
921 161 : let mut timelines_to_resume_deletions = vec![];
922 161 :
923 161 : let mut remote_index_and_client = HashMap::new();
924 161 : let mut timeline_ancestors = HashMap::new();
925 161 : let mut existent_timelines = HashSet::new();
926 167 : for (timeline_id, preload) in preload.timelines {
927 6 : let index_part = match preload.index_part {
928 6 : Ok(i) => {
929 6 : debug!("remote index part exists for timeline {timeline_id}");
930 : // We found index_part on the remote, this is the standard case.
931 6 : existent_timelines.insert(timeline_id);
932 6 : i
933 : }
934 : Err(DownloadError::NotFound) => {
935 : // There is no index_part on the remote. We only get here
936 : // if there is some prefix for the timeline in the remote storage.
937 : // This can e.g. be the initdb.tar.zst archive, maybe a
938 : // remnant from a prior incomplete creation or deletion attempt.
939 : // Delete the local directory as the deciding criterion for a
940 : // timeline's existence is presence of index_part.
941 0 : info!(%timeline_id, "index_part not found on remote");
942 0 : continue;
943 : }
944 0 : Err(e) => {
945 0 : // Some (possibly ephemeral) error happened during index_part download.
946 0 : // Pretend the timeline exists to not delete the timeline directory,
947 0 : // as it might be a temporary issue and we don't want to re-download
948 0 : // everything after it resolves.
949 0 : warn!(%timeline_id, "Failed to load index_part from remote storage, failed creation? ({e})");
950 :
951 0 : existent_timelines.insert(timeline_id);
952 0 : continue;
953 : }
954 : };
955 6 : match index_part {
956 6 : MaybeDeletedIndexPart::IndexPart(index_part) => {
957 6 : timeline_ancestors.insert(timeline_id, index_part.metadata.clone());
958 6 : remote_index_and_client.insert(timeline_id, (index_part, preload.client));
959 6 : }
960 0 : MaybeDeletedIndexPart::Deleted(index_part) => {
961 0 : info!(
962 0 : "timeline {} is deleted, picking to resume deletion",
963 : timeline_id
964 : );
965 0 : timelines_to_resume_deletions.push((timeline_id, index_part, preload.client));
966 : }
967 : }
968 : }
969 :
970 : // For every timeline, download the metadata file, scan the local directory,
971 : // and build a layer map that contains an entry for each remote and local
972 : // layer file.
973 161 : let sorted_timelines = tree_sort_timelines(timeline_ancestors, |m| m.ancestor_timeline())?;
974 167 : for (timeline_id, remote_metadata) in sorted_timelines {
975 6 : let (index_part, remote_client) = remote_index_and_client
976 6 : .remove(&timeline_id)
977 6 : .expect("just put it in above");
978 6 :
979 6 : // TODO again handle early failure
980 6 : self.load_remote_timeline(
981 6 : timeline_id,
982 6 : index_part,
983 6 : remote_metadata,
984 6 : TimelineResources {
985 6 : remote_client,
986 6 : timeline_get_throttle: self.timeline_get_throttle.clone(),
987 6 : },
988 6 : ctx,
989 6 : )
990 11 : .await
991 6 : .with_context(|| {
992 0 : format!(
993 0 : "failed to load remote timeline {} for tenant {}",
994 0 : timeline_id, self.tenant_shard_id
995 0 : )
996 6 : })?;
997 : }
998 :
999 : // Walk through deleted timelines, resume deletion
1000 161 : for (timeline_id, index_part, remote_timeline_client) in timelines_to_resume_deletions {
1001 0 : remote_timeline_client
1002 0 : .init_upload_queue_stopped_to_continue_deletion(&index_part)
1003 0 : .context("init queue stopped")
1004 0 : .map_err(LoadLocalTimelineError::ResumeDeletion)?;
1005 :
1006 0 : DeleteTimelineFlow::resume_deletion(
1007 0 : Arc::clone(self),
1008 0 : timeline_id,
1009 0 : &index_part.metadata,
1010 0 : remote_timeline_client,
1011 0 : )
1012 0 : .instrument(tracing::info_span!("timeline_delete", %timeline_id))
1013 0 : .await
1014 0 : .context("resume_deletion")
1015 0 : .map_err(LoadLocalTimelineError::ResumeDeletion)?;
1016 : }
1017 :
1018 : // The local filesystem contents are a cache of what's in the remote IndexPart;
1019 : // IndexPart is the source of truth.
1020 161 : self.clean_up_timelines(&existent_timelines)?;
1021 :
1022 161 : fail::fail_point!("attach-before-activate", |_| {
1023 0 : anyhow::bail!("attach-before-activate");
1024 161 : });
1025 161 : failpoint_support::sleep_millis_async!("attach-before-activate-sleep", &self.cancel);
1026 :
1027 161 : info!("Done");
1028 :
1029 161 : Ok(())
1030 161 : }
1031 :
1032 : /// Check for any local timeline directories that are temporary, or do not correspond to a
1033 : /// timeline that still exists: this can happen if we crashed during a deletion/creation, or
1034 : /// if a timeline was deleted while the tenant was attached to a different pageserver.
1035 161 : fn clean_up_timelines(&self, existent_timelines: &HashSet<TimelineId>) -> anyhow::Result<()> {
1036 161 : let timelines_dir = self.conf.timelines_path(&self.tenant_shard_id);
1037 :
1038 161 : let entries = match timelines_dir.read_dir_utf8() {
1039 161 : Ok(d) => d,
1040 0 : Err(e) => {
1041 0 : if e.kind() == std::io::ErrorKind::NotFound {
1042 0 : return Ok(());
1043 : } else {
1044 0 : return Err(e).context("list timelines directory for tenant");
1045 : }
1046 : }
1047 : };
1048 :
1049 169 : for entry in entries {
1050 8 : let entry = entry.context("read timeline dir entry")?;
1051 8 : let entry_path = entry.path();
1052 :
1053 8 : let purge = if crate::is_temporary(entry_path)
1054 : // TODO: remove uninit mark code (https://github.com/neondatabase/neon/issues/5718)
1055 8 : || is_uninit_mark(entry_path)
1056 8 : || crate::is_delete_mark(entry_path)
1057 : {
1058 0 : true
1059 : } else {
1060 8 : match TimelineId::try_from(entry_path.file_name()) {
1061 8 : Ok(i) => {
1062 8 : // Purge if the timeline ID does not exist in remote storage: remote storage is the authority.
1063 8 : !existent_timelines.contains(&i)
1064 : }
1065 0 : Err(e) => {
1066 0 : tracing::warn!(
1067 0 : "Unparseable directory in timelines directory: {entry_path}, ignoring ({e})"
1068 : );
1069 : // Do not purge junk: if we don't recognize it, be cautious and leave it for a human.
1070 0 : false
1071 : }
1072 : }
1073 : };
1074 :
1075 8 : if purge {
1076 2 : tracing::info!("Purging stale timeline dentry {entry_path}");
1077 2 : if let Err(e) = match entry.file_type() {
1078 2 : Ok(t) => if t.is_dir() {
1079 2 : std::fs::remove_dir_all(entry_path)
1080 : } else {
1081 0 : std::fs::remove_file(entry_path)
1082 : }
1083 2 : .or_else(fs_ext::ignore_not_found),
1084 0 : Err(e) => Err(e),
1085 : } {
1086 0 : tracing::warn!("Failed to purge stale timeline dentry {entry_path}: {e}");
1087 2 : }
1088 6 : }
1089 : }
1090 :
1091 161 : Ok(())
1092 161 : }
1093 :
1094 : /// Get sum of all remote timelines sizes
1095 : ///
1096 : /// This function relies on the index_part instead of listing the remote storage
1097 0 : pub fn remote_size(&self) -> u64 {
1098 0 : let mut size = 0;
1099 :
1100 0 : for timeline in self.list_timelines() {
1101 0 : size += timeline.remote_client.get_remote_physical_size();
1102 0 : }
1103 :
1104 0 : size
1105 0 : }
1106 :
1107 12 : #[instrument(skip_all, fields(timeline_id=%timeline_id))]
1108 : async fn load_remote_timeline(
1109 : &self,
1110 : timeline_id: TimelineId,
1111 : index_part: IndexPart,
1112 : remote_metadata: TimelineMetadata,
1113 : resources: TimelineResources,
1114 : ctx: &RequestContext,
1115 : ) -> anyhow::Result<()> {
1116 : span::debug_assert_current_span_has_tenant_id();
1117 :
1118 : info!("downloading index file for timeline {}", timeline_id);
1119 : tokio::fs::create_dir_all(self.conf.timeline_path(&self.tenant_shard_id, &timeline_id))
1120 : .await
1121 : .context("Failed to create new timeline directory")?;
1122 :
1123 : let ancestor = if let Some(ancestor_id) = remote_metadata.ancestor_timeline() {
1124 : let timelines = self.timelines.lock().unwrap();
1125 : Some(Arc::clone(timelines.get(&ancestor_id).ok_or_else(
1126 0 : || {
1127 0 : anyhow::anyhow!(
1128 0 : "cannot find ancestor timeline {ancestor_id} for timeline {timeline_id}"
1129 0 : )
1130 0 : },
1131 : )?))
1132 : } else {
1133 : None
1134 : };
1135 :
1136 : let last_aux_file_policy = index_part.last_aux_file_policy();
1137 :
1138 : self.timeline_init_and_sync(
1139 : timeline_id,
1140 : resources,
1141 : Some(index_part),
1142 : remote_metadata,
1143 : ancestor,
1144 : last_aux_file_policy,
1145 : ctx,
1146 : )
1147 : .await
1148 : }
1149 :
1150 : /// Create a placeholder Tenant object for a broken tenant
1151 0 : pub fn create_broken_tenant(
1152 0 : conf: &'static PageServerConf,
1153 0 : tenant_shard_id: TenantShardId,
1154 0 : remote_storage: GenericRemoteStorage,
1155 0 : reason: String,
1156 0 : ) -> Arc<Tenant> {
1157 0 : Arc::new(Tenant::new(
1158 0 : TenantState::Broken {
1159 0 : reason,
1160 0 : backtrace: String::new(),
1161 0 : },
1162 0 : conf,
1163 0 : AttachedTenantConf::try_from(LocationConf::default()).unwrap(),
1164 0 : // Shard identity isn't meaningful for a broken tenant: it's just a placeholder
1165 0 : // to occupy the slot for this TenantShardId.
1166 0 : ShardIdentity::broken(tenant_shard_id.shard_number, tenant_shard_id.shard_count),
1167 0 : None,
1168 0 : tenant_shard_id,
1169 0 : remote_storage,
1170 0 : DeletionQueueClient::broken(),
1171 0 : ))
1172 0 : }
1173 :
1174 161 : async fn load_timeline_metadata(
1175 161 : self: &Arc<Tenant>,
1176 161 : timeline_ids: HashSet<TimelineId>,
1177 161 : remote_storage: &GenericRemoteStorage,
1178 161 : cancel: CancellationToken,
1179 161 : ) -> anyhow::Result<HashMap<TimelineId, TimelinePreload>> {
1180 161 : let mut part_downloads = JoinSet::new();
1181 167 : for timeline_id in timeline_ids {
1182 6 : let client = RemoteTimelineClient::new(
1183 6 : remote_storage.clone(),
1184 6 : self.deletion_queue_client.clone(),
1185 6 : self.conf,
1186 6 : self.tenant_shard_id,
1187 6 : timeline_id,
1188 6 : self.generation,
1189 6 : );
1190 6 : let cancel_clone = cancel.clone();
1191 6 : part_downloads.spawn(
1192 6 : async move {
1193 6 : debug!("starting index part download");
1194 :
1195 24 : let index_part = client.download_index_file(&cancel_clone).await;
1196 :
1197 6 : debug!("finished index part download");
1198 :
1199 6 : Result::<_, anyhow::Error>::Ok(TimelinePreload {
1200 6 : client,
1201 6 : timeline_id,
1202 6 : index_part,
1203 6 : })
1204 6 : }
1205 6 : .map(move |res| {
1206 6 : res.with_context(|| format!("download index part for timeline {timeline_id}"))
1207 6 : })
1208 6 : .instrument(info_span!("download_index_part", %timeline_id)),
1209 : );
1210 : }
1211 :
1212 161 : let mut timeline_preloads: HashMap<TimelineId, TimelinePreload> = HashMap::new();
1213 :
1214 167 : loop {
1215 167 : tokio::select!(
1216 : next = part_downloads.join_next() => {
1217 : match next {
1218 : Some(result) => {
1219 : let preload_result = result.context("join preload task")?;
1220 : let preload = preload_result?;
1221 : timeline_preloads.insert(preload.timeline_id, preload);
1222 : },
1223 : None => {
1224 : break;
1225 : }
1226 : }
1227 : },
1228 : _ = cancel.cancelled() => {
1229 : anyhow::bail!("Cancelled while waiting for remote index download")
1230 : }
1231 167 : )
1232 167 : }
1233 :
1234 161 : Ok(timeline_preloads)
1235 161 : }
1236 :
1237 4 : pub(crate) fn tenant_shard_id(&self) -> TenantShardId {
1238 4 : self.tenant_shard_id
1239 4 : }
1240 :
1241 : /// Get Timeline handle for given Neon timeline ID.
1242 : /// This function is idempotent. It doesn't change internal state in any way.
1243 222 : pub fn get_timeline(
1244 222 : &self,
1245 222 : timeline_id: TimelineId,
1246 222 : active_only: bool,
1247 222 : ) -> Result<Arc<Timeline>, GetTimelineError> {
1248 222 : let timelines_accessor = self.timelines.lock().unwrap();
1249 222 : let timeline = timelines_accessor
1250 222 : .get(&timeline_id)
1251 222 : .ok_or(GetTimelineError::NotFound {
1252 222 : tenant_id: self.tenant_shard_id,
1253 222 : timeline_id,
1254 222 : })?;
1255 :
1256 220 : if active_only && !timeline.is_active() {
1257 0 : Err(GetTimelineError::NotActive {
1258 0 : tenant_id: self.tenant_shard_id,
1259 0 : timeline_id,
1260 0 : state: timeline.current_state(),
1261 0 : })
1262 : } else {
1263 220 : Ok(Arc::clone(timeline))
1264 : }
1265 222 : }
1266 :
1267 : /// Lists timelines the tenant contains.
1268 : /// Up to tenant's implementation to omit certain timelines that ar not considered ready for use.
1269 8 : pub fn list_timelines(&self) -> Vec<Arc<Timeline>> {
1270 8 : self.timelines
1271 8 : .lock()
1272 8 : .unwrap()
1273 8 : .values()
1274 8 : .map(Arc::clone)
1275 8 : .collect()
1276 8 : }
1277 :
1278 0 : pub fn list_timeline_ids(&self) -> Vec<TimelineId> {
1279 0 : self.timelines.lock().unwrap().keys().cloned().collect()
1280 0 : }
1281 :
1282 : /// This is used to create the initial 'main' timeline during bootstrapping,
1283 : /// or when importing a new base backup. The caller is expected to load an
1284 : /// initial image of the datadir to the new timeline after this.
1285 : ///
1286 : /// Until that happens, the on-disk state is invalid (disk_consistent_lsn=Lsn(0))
1287 : /// and the timeline will fail to load at a restart.
1288 : ///
1289 : /// For tests, use `DatadirModification::init_empty_test_timeline` + `commit` to setup the
1290 : /// minimum amount of keys required to get a writable timeline.
1291 : /// (Without it, `put` might fail due to `repartition` failing.)
1292 153 : pub(crate) async fn create_empty_timeline(
1293 153 : &self,
1294 153 : new_timeline_id: TimelineId,
1295 153 : initdb_lsn: Lsn,
1296 153 : pg_version: u32,
1297 153 : _ctx: &RequestContext,
1298 153 : ) -> anyhow::Result<UninitializedTimeline> {
1299 153 : anyhow::ensure!(
1300 153 : self.is_active(),
1301 0 : "Cannot create empty timelines on inactive tenant"
1302 : );
1303 :
1304 : // Protect against concurrent attempts to use this TimelineId
1305 153 : let create_guard = self.create_timeline_create_guard(new_timeline_id)?;
1306 :
1307 151 : let new_metadata = TimelineMetadata::new(
1308 151 : // Initialize disk_consistent LSN to 0, The caller must import some data to
1309 151 : // make it valid, before calling finish_creation()
1310 151 : Lsn(0),
1311 151 : None,
1312 151 : None,
1313 151 : Lsn(0),
1314 151 : initdb_lsn,
1315 151 : initdb_lsn,
1316 151 : pg_version,
1317 151 : );
1318 151 : self.prepare_new_timeline(
1319 151 : new_timeline_id,
1320 151 : &new_metadata,
1321 151 : create_guard,
1322 151 : initdb_lsn,
1323 151 : None,
1324 151 : None,
1325 151 : )
1326 0 : .await
1327 153 : }
1328 :
1329 : /// Helper for unit tests to create an empty timeline.
1330 : ///
1331 : /// The timeline is has state value `Active` but its background loops are not running.
1332 : // This makes the various functions which anyhow::ensure! for Active state work in tests.
1333 : // Our current tests don't need the background loops.
1334 : #[cfg(test)]
1335 143 : pub async fn create_test_timeline(
1336 143 : &self,
1337 143 : new_timeline_id: TimelineId,
1338 143 : initdb_lsn: Lsn,
1339 143 : pg_version: u32,
1340 143 : ctx: &RequestContext,
1341 143 : ) -> anyhow::Result<Arc<Timeline>> {
1342 143 : let uninit_tl = self
1343 143 : .create_empty_timeline(new_timeline_id, initdb_lsn, pg_version, ctx)
1344 0 : .await?;
1345 143 : let tline = uninit_tl.raw_timeline().expect("we just created it");
1346 143 : assert_eq!(tline.get_last_record_lsn(), Lsn(0));
1347 :
1348 : // Setup minimum keys required for the timeline to be usable.
1349 143 : let mut modification = tline.begin_modification(initdb_lsn);
1350 143 : modification
1351 143 : .init_empty_test_timeline()
1352 143 : .context("init_empty_test_timeline")?;
1353 143 : modification
1354 143 : .commit(ctx)
1355 138 : .await
1356 143 : .context("commit init_empty_test_timeline modification")?;
1357 :
1358 : // Flush to disk so that uninit_tl's check for valid disk_consistent_lsn passes.
1359 143 : tline.maybe_spawn_flush_loop();
1360 143 : tline.freeze_and_flush().await.context("freeze_and_flush")?;
1361 :
1362 : // Make sure the freeze_and_flush reaches remote storage.
1363 143 : tline.remote_client.wait_completion().await.unwrap();
1364 :
1365 143 : let tl = uninit_tl.finish_creation()?;
1366 : // The non-test code would call tl.activate() here.
1367 143 : tl.set_state(TimelineState::Active);
1368 143 : Ok(tl)
1369 143 : }
1370 :
1371 : /// Helper for unit tests to create a timeline with some pre-loaded states.
1372 : #[cfg(test)]
1373 : #[allow(clippy::too_many_arguments)]
1374 19 : pub async fn create_test_timeline_with_layers(
1375 19 : &self,
1376 19 : new_timeline_id: TimelineId,
1377 19 : initdb_lsn: Lsn,
1378 19 : pg_version: u32,
1379 19 : ctx: &RequestContext,
1380 19 : delta_layer_desc: Vec<Vec<(pageserver_api::key::Key, Lsn, crate::repository::Value)>>,
1381 19 : image_layer_desc: Vec<(Lsn, Vec<(pageserver_api::key::Key, bytes::Bytes)>)>,
1382 19 : end_lsn: Lsn,
1383 19 : ) -> anyhow::Result<Arc<Timeline>> {
1384 19 : let tline = self
1385 19 : .create_test_timeline(new_timeline_id, initdb_lsn, pg_version, ctx)
1386 58 : .await?;
1387 19 : tline.force_advance_lsn(end_lsn);
1388 53 : for deltas in delta_layer_desc {
1389 34 : tline
1390 34 : .force_create_delta_layer(deltas, Some(initdb_lsn), ctx)
1391 102 : .await?;
1392 : }
1393 53 : for (lsn, images) in image_layer_desc {
1394 35 : tline
1395 35 : .force_create_image_layer(lsn, images, Some(initdb_lsn), ctx)
1396 164 : .await?;
1397 : }
1398 18 : Ok(tline)
1399 19 : }
1400 :
1401 : /// Create a new timeline.
1402 : ///
1403 : /// Returns the new timeline ID and reference to its Timeline object.
1404 : ///
1405 : /// If the caller specified the timeline ID to use (`new_timeline_id`), and timeline with
1406 : /// the same timeline ID already exists, returns CreateTimelineError::AlreadyExists.
1407 : #[allow(clippy::too_many_arguments)]
1408 0 : pub(crate) async fn create_timeline(
1409 0 : self: &Arc<Tenant>,
1410 0 : new_timeline_id: TimelineId,
1411 0 : ancestor_timeline_id: Option<TimelineId>,
1412 0 : mut ancestor_start_lsn: Option<Lsn>,
1413 0 : pg_version: u32,
1414 0 : load_existing_initdb: Option<TimelineId>,
1415 0 : broker_client: storage_broker::BrokerClientChannel,
1416 0 : ctx: &RequestContext,
1417 0 : ) -> Result<Arc<Timeline>, CreateTimelineError> {
1418 0 : if !self.is_active() {
1419 0 : if matches!(self.current_state(), TenantState::Stopping { .. }) {
1420 0 : return Err(CreateTimelineError::ShuttingDown);
1421 : } else {
1422 0 : return Err(CreateTimelineError::Other(anyhow::anyhow!(
1423 0 : "Cannot create timelines on inactive tenant"
1424 0 : )));
1425 : }
1426 0 : }
1427 :
1428 0 : let _gate = self
1429 0 : .gate
1430 0 : .enter()
1431 0 : .map_err(|_| CreateTimelineError::ShuttingDown)?;
1432 :
1433 : // Get exclusive access to the timeline ID: this ensures that it does not already exist,
1434 : // and that no other creation attempts will be allowed in while we are working.
1435 0 : let create_guard = match self.create_timeline_create_guard(new_timeline_id) {
1436 0 : Ok(m) => m,
1437 : Err(TimelineExclusionError::AlreadyCreating) => {
1438 : // Creation is in progress, we cannot create it again, and we cannot
1439 : // check if this request matches the existing one, so caller must try
1440 : // again later.
1441 0 : return Err(CreateTimelineError::AlreadyCreating);
1442 : }
1443 0 : Err(TimelineExclusionError::Other(e)) => {
1444 0 : return Err(CreateTimelineError::Other(e));
1445 : }
1446 0 : Err(TimelineExclusionError::AlreadyExists(existing)) => {
1447 0 : debug!("timeline {new_timeline_id} already exists");
1448 :
1449 : // Idempotency: creating the same timeline twice is not an error, unless
1450 : // the second creation has different parameters.
1451 0 : if existing.get_ancestor_timeline_id() != ancestor_timeline_id
1452 0 : || existing.pg_version != pg_version
1453 0 : || (ancestor_start_lsn.is_some()
1454 0 : && ancestor_start_lsn != Some(existing.get_ancestor_lsn()))
1455 : {
1456 0 : return Err(CreateTimelineError::Conflict);
1457 0 : }
1458 0 :
1459 0 : // Wait for uploads to complete, so that when we return Ok, the timeline
1460 0 : // is known to be durable on remote storage. Just like we do at the end of
1461 0 : // this function, after we have created the timeline ourselves.
1462 0 : //
1463 0 : // We only really care that the initial version of `index_part.json` has
1464 0 : // been uploaded. That's enough to remember that the timeline
1465 0 : // exists. However, there is no function to wait specifically for that so
1466 0 : // we just wait for all in-progress uploads to finish.
1467 0 : existing
1468 0 : .remote_client
1469 0 : .wait_completion()
1470 0 : .await
1471 0 : .context("wait for timeline uploads to complete")?;
1472 :
1473 0 : return Ok(existing);
1474 : }
1475 : };
1476 :
1477 : pausable_failpoint!("timeline-creation-after-uninit");
1478 :
1479 0 : let loaded_timeline = match ancestor_timeline_id {
1480 0 : Some(ancestor_timeline_id) => {
1481 0 : let ancestor_timeline = self
1482 0 : .get_timeline(ancestor_timeline_id, false)
1483 0 : .context("Cannot branch off the timeline that's not present in pageserver")?;
1484 :
1485 : // instead of waiting around, just deny the request because ancestor is not yet
1486 : // ready for other purposes either.
1487 0 : if !ancestor_timeline.is_active() {
1488 0 : return Err(CreateTimelineError::AncestorNotActive);
1489 0 : }
1490 :
1491 0 : if let Some(lsn) = ancestor_start_lsn.as_mut() {
1492 0 : *lsn = lsn.align();
1493 0 :
1494 0 : let ancestor_ancestor_lsn = ancestor_timeline.get_ancestor_lsn();
1495 0 : if ancestor_ancestor_lsn > *lsn {
1496 : // can we safely just branch from the ancestor instead?
1497 0 : return Err(CreateTimelineError::AncestorLsn(anyhow::anyhow!(
1498 0 : "invalid start lsn {} for ancestor timeline {}: less than timeline ancestor lsn {}",
1499 0 : lsn,
1500 0 : ancestor_timeline_id,
1501 0 : ancestor_ancestor_lsn,
1502 0 : )));
1503 0 : }
1504 0 :
1505 0 : // Wait for the WAL to arrive and be processed on the parent branch up
1506 0 : // to the requested branch point. The repository code itself doesn't
1507 0 : // require it, but if we start to receive WAL on the new timeline,
1508 0 : // decoding the new WAL might need to look up previous pages, relation
1509 0 : // sizes etc. and that would get confused if the previous page versions
1510 0 : // are not in the repository yet.
1511 0 : ancestor_timeline
1512 0 : .wait_lsn(*lsn, timeline::WaitLsnWaiter::Tenant, ctx)
1513 0 : .await
1514 0 : .map_err(|e| match e {
1515 0 : e @ (WaitLsnError::Timeout(_) | WaitLsnError::BadState { .. }) => {
1516 0 : CreateTimelineError::AncestorLsn(anyhow::anyhow!(e))
1517 : }
1518 0 : WaitLsnError::Shutdown => CreateTimelineError::ShuttingDown,
1519 0 : })?;
1520 0 : }
1521 :
1522 0 : self.branch_timeline(
1523 0 : &ancestor_timeline,
1524 0 : new_timeline_id,
1525 0 : ancestor_start_lsn,
1526 0 : create_guard,
1527 0 : ctx,
1528 0 : )
1529 0 : .await?
1530 : }
1531 : None => {
1532 0 : self.bootstrap_timeline(
1533 0 : new_timeline_id,
1534 0 : pg_version,
1535 0 : load_existing_initdb,
1536 0 : create_guard,
1537 0 : ctx,
1538 0 : )
1539 0 : .await?
1540 : }
1541 : };
1542 :
1543 : // At this point we have dropped our guard on [`Self::timelines_creating`], and
1544 : // the timeline is visible in [`Self::timelines`], but it is _not_ durable yet. We must
1545 : // not send a success to the caller until it is. The same applies to handling retries,
1546 : // see the handling of [`TimelineExclusionError::AlreadyExists`] above.
1547 0 : let kind = ancestor_timeline_id
1548 0 : .map(|_| "branched")
1549 0 : .unwrap_or("bootstrapped");
1550 0 : loaded_timeline
1551 0 : .remote_client
1552 0 : .wait_completion()
1553 0 : .await
1554 0 : .with_context(|| format!("wait for {} timeline initial uploads to complete", kind))?;
1555 :
1556 0 : loaded_timeline.activate(self.clone(), broker_client, None, ctx);
1557 0 :
1558 0 : Ok(loaded_timeline)
1559 0 : }
1560 :
1561 0 : pub(crate) async fn delete_timeline(
1562 0 : self: Arc<Self>,
1563 0 : timeline_id: TimelineId,
1564 0 : ) -> Result<(), DeleteTimelineError> {
1565 0 : DeleteTimelineFlow::run(&self, timeline_id, false).await?;
1566 :
1567 0 : Ok(())
1568 0 : }
1569 :
1570 : /// perform one garbage collection iteration, removing old data files from disk.
1571 : /// this function is periodically called by gc task.
1572 : /// also it can be explicitly requested through page server api 'do_gc' command.
1573 : ///
1574 : /// `target_timeline_id` specifies the timeline to GC, or None for all.
1575 : ///
1576 : /// The `horizon` an `pitr` parameters determine how much WAL history needs to be retained.
1577 : /// Also known as the retention period, or the GC cutoff point. `horizon` specifies
1578 : /// the amount of history, as LSN difference from current latest LSN on each timeline.
1579 : /// `pitr` specifies the same as a time difference from the current time. The effective
1580 : /// GC cutoff point is determined conservatively by either `horizon` and `pitr`, whichever
1581 : /// requires more history to be retained.
1582 : //
1583 754 : pub(crate) async fn gc_iteration(
1584 754 : &self,
1585 754 : target_timeline_id: Option<TimelineId>,
1586 754 : horizon: u64,
1587 754 : pitr: Duration,
1588 754 : cancel: &CancellationToken,
1589 754 : ctx: &RequestContext,
1590 754 : ) -> Result<GcResult, GcError> {
1591 754 : // Don't start doing work during shutdown
1592 754 : if let TenantState::Stopping { .. } = self.current_state() {
1593 0 : return Ok(GcResult::default());
1594 754 : }
1595 754 :
1596 754 : // there is a global allowed_error for this
1597 754 : if !self.is_active() {
1598 0 : return Err(GcError::NotActive);
1599 754 : }
1600 754 :
1601 754 : {
1602 754 : let conf = self.tenant_conf.load();
1603 754 :
1604 754 : if !conf.location.may_delete_layers_hint() {
1605 0 : info!("Skipping GC in location state {:?}", conf.location);
1606 0 : return Ok(GcResult::default());
1607 754 : }
1608 754 : }
1609 754 :
1610 754 : self.gc_iteration_internal(target_timeline_id, horizon, pitr, cancel, ctx)
1611 718 : .await
1612 754 : }
1613 :
1614 : /// Perform one compaction iteration.
1615 : /// This function is periodically called by compactor task.
1616 : /// Also it can be explicitly requested per timeline through page server
1617 : /// api's 'compact' command.
1618 0 : async fn compaction_iteration(
1619 0 : &self,
1620 0 : cancel: &CancellationToken,
1621 0 : ctx: &RequestContext,
1622 0 : ) -> anyhow::Result<(), timeline::CompactionError> {
1623 0 : // Don't start doing work during shutdown, or when broken, we do not need those in the logs
1624 0 : if !self.is_active() {
1625 0 : return Ok(());
1626 0 : }
1627 0 :
1628 0 : {
1629 0 : let conf = self.tenant_conf.load();
1630 0 : if !conf.location.may_delete_layers_hint() || !conf.location.may_upload_layers_hint() {
1631 0 : info!("Skipping compaction in location state {:?}", conf.location);
1632 0 : return Ok(());
1633 0 : }
1634 0 : }
1635 0 :
1636 0 : // Scan through the hashmap and collect a list of all the timelines,
1637 0 : // while holding the lock. Then drop the lock and actually perform the
1638 0 : // compactions. We don't want to block everything else while the
1639 0 : // compaction runs.
1640 0 : let timelines_to_compact = {
1641 0 : let timelines = self.timelines.lock().unwrap();
1642 0 : let timelines_to_compact = timelines
1643 0 : .iter()
1644 0 : .filter_map(|(timeline_id, timeline)| {
1645 0 : if timeline.is_active() {
1646 0 : Some((*timeline_id, timeline.clone()))
1647 : } else {
1648 0 : None
1649 : }
1650 0 : })
1651 0 : .collect::<Vec<_>>();
1652 0 : drop(timelines);
1653 0 : timelines_to_compact
1654 : };
1655 :
1656 0 : for (timeline_id, timeline) in &timelines_to_compact {
1657 0 : timeline
1658 0 : .compact(cancel, EnumSet::empty(), ctx)
1659 0 : .instrument(info_span!("compact_timeline", %timeline_id))
1660 0 : .await?;
1661 : }
1662 :
1663 0 : Ok(())
1664 0 : }
1665 :
1666 : // Call through to all timelines to freeze ephemeral layers if needed. Usually
1667 : // this happens during ingest: this background housekeeping is for freezing layers
1668 : // that are open but haven't been written to for some time.
1669 0 : async fn ingest_housekeeping(&self) {
1670 0 : // Scan through the hashmap and collect a list of all the timelines,
1671 0 : // while holding the lock. Then drop the lock and actually perform the
1672 0 : // compactions. We don't want to block everything else while the
1673 0 : // compaction runs.
1674 0 : let timelines = {
1675 0 : self.timelines
1676 0 : .lock()
1677 0 : .unwrap()
1678 0 : .values()
1679 0 : .filter_map(|timeline| {
1680 0 : if timeline.is_active() {
1681 0 : Some(timeline.clone())
1682 : } else {
1683 0 : None
1684 : }
1685 0 : })
1686 0 : .collect::<Vec<_>>()
1687 : };
1688 :
1689 0 : for timeline in &timelines {
1690 0 : timeline.maybe_freeze_ephemeral_layer().await;
1691 : }
1692 0 : }
1693 :
1694 2421 : pub fn current_state(&self) -> TenantState {
1695 2421 : self.state.borrow().clone()
1696 2421 : }
1697 :
1698 1661 : pub fn is_active(&self) -> bool {
1699 1661 : self.current_state() == TenantState::Active
1700 1661 : }
1701 :
1702 0 : pub fn generation(&self) -> Generation {
1703 0 : self.generation
1704 0 : }
1705 :
1706 0 : pub(crate) fn wal_redo_manager_status(&self) -> Option<WalRedoManagerStatus> {
1707 0 : self.walredo_mgr.as_ref().and_then(|mgr| mgr.status())
1708 0 : }
1709 :
1710 : /// Changes tenant status to active, unless shutdown was already requested.
1711 : ///
1712 : /// `background_jobs_can_start` is an optional barrier set to a value during pageserver startup
1713 : /// to delay background jobs. Background jobs can be started right away when None is given.
1714 0 : fn activate(
1715 0 : self: &Arc<Self>,
1716 0 : broker_client: BrokerClientChannel,
1717 0 : background_jobs_can_start: Option<&completion::Barrier>,
1718 0 : ctx: &RequestContext,
1719 0 : ) {
1720 0 : span::debug_assert_current_span_has_tenant_id();
1721 0 :
1722 0 : let mut activating = false;
1723 0 : self.state.send_modify(|current_state| {
1724 0 : use pageserver_api::models::ActivatingFrom;
1725 0 : match &*current_state {
1726 : TenantState::Activating(_) | TenantState::Active | TenantState::Broken { .. } | TenantState::Stopping { .. } => {
1727 0 : panic!("caller is responsible for calling activate() only on Loading / Attaching tenants, got {state:?}", state = current_state);
1728 : }
1729 0 : TenantState::Loading => {
1730 0 : *current_state = TenantState::Activating(ActivatingFrom::Loading);
1731 0 : }
1732 0 : TenantState::Attaching => {
1733 0 : *current_state = TenantState::Activating(ActivatingFrom::Attaching);
1734 0 : }
1735 : }
1736 0 : debug!(tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), "Activating tenant");
1737 0 : activating = true;
1738 0 : // Continue outside the closure. We need to grab timelines.lock()
1739 0 : // and we plan to turn it into a tokio::sync::Mutex in a future patch.
1740 0 : });
1741 0 :
1742 0 : if activating {
1743 0 : let timelines_accessor = self.timelines.lock().unwrap();
1744 0 : let timelines_to_activate = timelines_accessor
1745 0 : .values()
1746 0 : .filter(|timeline| !(timeline.is_broken() || timeline.is_stopping()));
1747 0 :
1748 0 : // Spawn gc and compaction loops. The loops will shut themselves
1749 0 : // down when they notice that the tenant is inactive.
1750 0 : tasks::start_background_loops(self, background_jobs_can_start);
1751 0 :
1752 0 : let mut activated_timelines = 0;
1753 :
1754 0 : for timeline in timelines_to_activate {
1755 0 : timeline.activate(
1756 0 : self.clone(),
1757 0 : broker_client.clone(),
1758 0 : background_jobs_can_start,
1759 0 : ctx,
1760 0 : );
1761 0 : activated_timelines += 1;
1762 0 : }
1763 :
1764 0 : self.state.send_modify(move |current_state| {
1765 0 : assert!(
1766 0 : matches!(current_state, TenantState::Activating(_)),
1767 0 : "set_stopping and set_broken wait for us to leave Activating state",
1768 : );
1769 0 : *current_state = TenantState::Active;
1770 0 :
1771 0 : let elapsed = self.constructed_at.elapsed();
1772 0 : let total_timelines = timelines_accessor.len();
1773 0 :
1774 0 : // log a lot of stuff, because some tenants sometimes suffer from user-visible
1775 0 : // times to activate. see https://github.com/neondatabase/neon/issues/4025
1776 0 : info!(
1777 0 : since_creation_millis = elapsed.as_millis(),
1778 0 : tenant_id = %self.tenant_shard_id.tenant_id,
1779 0 : shard_id = %self.tenant_shard_id.shard_slug(),
1780 0 : activated_timelines,
1781 0 : total_timelines,
1782 0 : post_state = <&'static str>::from(&*current_state),
1783 0 : "activation attempt finished"
1784 : );
1785 :
1786 0 : TENANT.activation.observe(elapsed.as_secs_f64());
1787 0 : });
1788 0 : }
1789 0 : }
1790 :
1791 : /// Shutdown the tenant and join all of the spawned tasks.
1792 : ///
1793 : /// The method caters for all use-cases:
1794 : /// - pageserver shutdown (freeze_and_flush == true)
1795 : /// - detach + ignore (freeze_and_flush == false)
1796 : ///
1797 : /// This will attempt to shutdown even if tenant is broken.
1798 : ///
1799 : /// `shutdown_progress` is a [`completion::Barrier`] for the shutdown initiated by this call.
1800 : /// If the tenant is already shutting down, we return a clone of the first shutdown call's
1801 : /// `Barrier` as an `Err`. This not-first caller can use the returned barrier to join with
1802 : /// the ongoing shutdown.
1803 6 : async fn shutdown(
1804 6 : &self,
1805 6 : shutdown_progress: completion::Barrier,
1806 6 : shutdown_mode: timeline::ShutdownMode,
1807 6 : ) -> Result<(), completion::Barrier> {
1808 6 : span::debug_assert_current_span_has_tenant_id();
1809 :
1810 : // Set tenant (and its timlines) to Stoppping state.
1811 : //
1812 : // Since we can only transition into Stopping state after activation is complete,
1813 : // run it in a JoinSet so all tenants have a chance to stop before we get SIGKILLed.
1814 : //
1815 : // Transitioning tenants to Stopping state has a couple of non-obvious side effects:
1816 : // 1. Lock out any new requests to the tenants.
1817 : // 2. Signal cancellation to WAL receivers (we wait on it below).
1818 : // 3. Signal cancellation for other tenant background loops.
1819 : // 4. ???
1820 : //
1821 : // The waiting for the cancellation is not done uniformly.
1822 : // We certainly wait for WAL receivers to shut down.
1823 : // That is necessary so that no new data comes in before the freeze_and_flush.
1824 : // But the tenant background loops are joined-on in our caller.
1825 : // It's mesed up.
1826 : // we just ignore the failure to stop
1827 :
1828 : // If we're still attaching, fire the cancellation token early to drop out: this
1829 : // will prevent us flushing, but ensures timely shutdown if some I/O during attach
1830 : // is very slow.
1831 6 : if matches!(self.current_state(), TenantState::Attaching) {
1832 0 : self.cancel.cancel();
1833 6 : }
1834 :
1835 6 : match self.set_stopping(shutdown_progress, false, false).await {
1836 6 : Ok(()) => {}
1837 0 : Err(SetStoppingError::Broken) => {
1838 0 : // assume that this is acceptable
1839 0 : }
1840 0 : Err(SetStoppingError::AlreadyStopping(other)) => {
1841 0 : // give caller the option to wait for this this shutdown
1842 0 : info!("Tenant::shutdown: AlreadyStopping");
1843 0 : return Err(other);
1844 : }
1845 : };
1846 :
1847 6 : let mut js = tokio::task::JoinSet::new();
1848 6 : {
1849 6 : let timelines = self.timelines.lock().unwrap();
1850 6 : timelines.values().for_each(|timeline| {
1851 6 : let timeline = Arc::clone(timeline);
1852 6 : let timeline_id = timeline.timeline_id;
1853 6 : let span = tracing::info_span!("timeline_shutdown", %timeline_id, ?shutdown_mode);
1854 16 : js.spawn(async move { timeline.shutdown(shutdown_mode).instrument(span).await });
1855 6 : })
1856 6 : };
1857 6 : // test_long_timeline_create_then_tenant_delete is leaning on this message
1858 6 : tracing::info!("Waiting for timelines...");
1859 12 : while let Some(res) = js.join_next().await {
1860 0 : match res {
1861 6 : Ok(()) => {}
1862 0 : Err(je) if je.is_cancelled() => unreachable!("no cancelling used"),
1863 0 : Err(je) if je.is_panic() => { /* logged already */ }
1864 0 : Err(je) => warn!("unexpected JoinError: {je:?}"),
1865 : }
1866 : }
1867 :
1868 : // We cancel the Tenant's cancellation token _after_ the timelines have all shut down. This permits
1869 : // them to continue to do work during their shutdown methods, e.g. flushing data.
1870 6 : tracing::debug!("Cancelling CancellationToken");
1871 6 : self.cancel.cancel();
1872 6 :
1873 6 : // shutdown all tenant and timeline tasks: gc, compaction, page service
1874 6 : // No new tasks will be started for this tenant because it's in `Stopping` state.
1875 6 : //
1876 6 : // this will additionally shutdown and await all timeline tasks.
1877 6 : tracing::debug!("Waiting for tasks...");
1878 6 : task_mgr::shutdown_tasks(None, Some(self.tenant_shard_id), None).await;
1879 :
1880 : // Wait for any in-flight operations to complete
1881 6 : self.gate.close().await;
1882 :
1883 6 : remove_tenant_metrics(&self.tenant_shard_id);
1884 6 :
1885 6 : Ok(())
1886 6 : }
1887 :
1888 : /// Change tenant status to Stopping, to mark that it is being shut down.
1889 : ///
1890 : /// This function waits for the tenant to become active if it isn't already, before transitioning it into Stopping state.
1891 : ///
1892 : /// This function is not cancel-safe!
1893 : ///
1894 : /// `allow_transition_from_loading` is needed for the special case of loading task deleting the tenant.
1895 : /// `allow_transition_from_attaching` is needed for the special case of attaching deleted tenant.
1896 6 : async fn set_stopping(
1897 6 : &self,
1898 6 : progress: completion::Barrier,
1899 6 : allow_transition_from_loading: bool,
1900 6 : allow_transition_from_attaching: bool,
1901 6 : ) -> Result<(), SetStoppingError> {
1902 6 : let mut rx = self.state.subscribe();
1903 6 :
1904 6 : // cannot stop before we're done activating, so wait out until we're done activating
1905 6 : rx.wait_for(|state| match state {
1906 0 : TenantState::Attaching if allow_transition_from_attaching => true,
1907 : TenantState::Activating(_) | TenantState::Attaching => {
1908 0 : info!(
1909 0 : "waiting for {} to turn Active|Broken|Stopping",
1910 0 : <&'static str>::from(state)
1911 : );
1912 0 : false
1913 : }
1914 0 : TenantState::Loading => allow_transition_from_loading,
1915 6 : TenantState::Active | TenantState::Broken { .. } | TenantState::Stopping { .. } => true,
1916 6 : })
1917 0 : .await
1918 6 : .expect("cannot drop self.state while on a &self method");
1919 6 :
1920 6 : // we now know we're done activating, let's see whether this task is the winner to transition into Stopping
1921 6 : let mut err = None;
1922 6 : let stopping = self.state.send_if_modified(|current_state| match current_state {
1923 : TenantState::Activating(_) => {
1924 0 : unreachable!("1we ensured above that we're done with activation, and, there is no re-activation")
1925 : }
1926 : TenantState::Attaching => {
1927 0 : if !allow_transition_from_attaching {
1928 0 : unreachable!("2we ensured above that we're done with activation, and, there is no re-activation")
1929 0 : };
1930 0 : *current_state = TenantState::Stopping { progress };
1931 0 : true
1932 : }
1933 : TenantState::Loading => {
1934 0 : if !allow_transition_from_loading {
1935 0 : unreachable!("3we ensured above that we're done with activation, and, there is no re-activation")
1936 0 : };
1937 0 : *current_state = TenantState::Stopping { progress };
1938 0 : true
1939 : }
1940 : TenantState::Active => {
1941 : // FIXME: due to time-of-check vs time-of-use issues, it can happen that new timelines
1942 : // are created after the transition to Stopping. That's harmless, as the Timelines
1943 : // won't be accessible to anyone afterwards, because the Tenant is in Stopping state.
1944 6 : *current_state = TenantState::Stopping { progress };
1945 6 : // Continue stopping outside the closure. We need to grab timelines.lock()
1946 6 : // and we plan to turn it into a tokio::sync::Mutex in a future patch.
1947 6 : true
1948 : }
1949 0 : TenantState::Broken { reason, .. } => {
1950 0 : info!(
1951 0 : "Cannot set tenant to Stopping state, it is in Broken state due to: {reason}"
1952 : );
1953 0 : err = Some(SetStoppingError::Broken);
1954 0 : false
1955 : }
1956 0 : TenantState::Stopping { progress } => {
1957 0 : info!("Tenant is already in Stopping state");
1958 0 : err = Some(SetStoppingError::AlreadyStopping(progress.clone()));
1959 0 : false
1960 : }
1961 6 : });
1962 6 : match (stopping, err) {
1963 6 : (true, None) => {} // continue
1964 0 : (false, Some(err)) => return Err(err),
1965 0 : (true, Some(_)) => unreachable!(
1966 0 : "send_if_modified closure must error out if not transitioning to Stopping"
1967 0 : ),
1968 0 : (false, None) => unreachable!(
1969 0 : "send_if_modified closure must return true if transitioning to Stopping"
1970 0 : ),
1971 : }
1972 :
1973 6 : let timelines_accessor = self.timelines.lock().unwrap();
1974 6 : let not_broken_timelines = timelines_accessor
1975 6 : .values()
1976 6 : .filter(|timeline| !timeline.is_broken());
1977 12 : for timeline in not_broken_timelines {
1978 6 : timeline.set_state(TimelineState::Stopping);
1979 6 : }
1980 6 : Ok(())
1981 6 : }
1982 :
1983 : /// Method for tenant::mgr to transition us into Broken state in case of a late failure in
1984 : /// `remove_tenant_from_memory`
1985 : ///
1986 : /// This function waits for the tenant to become active if it isn't already, before transitioning it into Stopping state.
1987 : ///
1988 : /// In tests, we also use this to set tenants to Broken state on purpose.
1989 0 : pub(crate) async fn set_broken(&self, reason: String) {
1990 0 : let mut rx = self.state.subscribe();
1991 0 :
1992 0 : // The load & attach routines own the tenant state until it has reached `Active`.
1993 0 : // So, wait until it's done.
1994 0 : rx.wait_for(|state| match state {
1995 : TenantState::Activating(_) | TenantState::Loading | TenantState::Attaching => {
1996 0 : info!(
1997 0 : "waiting for {} to turn Active|Broken|Stopping",
1998 0 : <&'static str>::from(state)
1999 : );
2000 0 : false
2001 : }
2002 0 : TenantState::Active | TenantState::Broken { .. } | TenantState::Stopping { .. } => true,
2003 0 : })
2004 0 : .await
2005 0 : .expect("cannot drop self.state while on a &self method");
2006 0 :
2007 0 : // we now know we're done activating, let's see whether this task is the winner to transition into Broken
2008 0 : self.set_broken_no_wait(reason)
2009 0 : }
2010 :
2011 0 : pub(crate) fn set_broken_no_wait(&self, reason: impl Display) {
2012 0 : let reason = reason.to_string();
2013 0 : self.state.send_modify(|current_state| {
2014 0 : match *current_state {
2015 : TenantState::Activating(_) | TenantState::Loading | TenantState::Attaching => {
2016 0 : unreachable!("we ensured above that we're done with activation, and, there is no re-activation")
2017 : }
2018 : TenantState::Active => {
2019 0 : if cfg!(feature = "testing") {
2020 0 : warn!("Changing Active tenant to Broken state, reason: {}", reason);
2021 0 : *current_state = TenantState::broken_from_reason(reason);
2022 : } else {
2023 0 : unreachable!("not allowed to call set_broken on Active tenants in non-testing builds")
2024 : }
2025 : }
2026 : TenantState::Broken { .. } => {
2027 0 : warn!("Tenant is already in Broken state");
2028 : }
2029 : // This is the only "expected" path, any other path is a bug.
2030 : TenantState::Stopping { .. } => {
2031 0 : warn!(
2032 0 : "Marking Stopping tenant as Broken state, reason: {}",
2033 : reason
2034 : );
2035 0 : *current_state = TenantState::broken_from_reason(reason);
2036 : }
2037 : }
2038 0 : });
2039 0 : }
2040 :
2041 0 : pub fn subscribe_for_state_updates(&self) -> watch::Receiver<TenantState> {
2042 0 : self.state.subscribe()
2043 0 : }
2044 :
2045 : /// The activate_now semaphore is initialized with zero units. As soon as
2046 : /// we add a unit, waiters will be able to acquire a unit and proceed.
2047 0 : pub(crate) fn activate_now(&self) {
2048 0 : self.activate_now_sem.add_permits(1);
2049 0 : }
2050 :
2051 0 : pub(crate) async fn wait_to_become_active(
2052 0 : &self,
2053 0 : timeout: Duration,
2054 0 : ) -> Result<(), GetActiveTenantError> {
2055 0 : let mut receiver = self.state.subscribe();
2056 0 : loop {
2057 0 : let current_state = receiver.borrow_and_update().clone();
2058 0 : match current_state {
2059 : TenantState::Loading | TenantState::Attaching | TenantState::Activating(_) => {
2060 : // in these states, there's a chance that we can reach ::Active
2061 0 : self.activate_now();
2062 0 : match timeout_cancellable(timeout, &self.cancel, receiver.changed()).await {
2063 0 : Ok(r) => {
2064 0 : r.map_err(
2065 0 : |_e: tokio::sync::watch::error::RecvError|
2066 : // Tenant existed but was dropped: report it as non-existent
2067 0 : GetActiveTenantError::NotFound(GetTenantError::NotFound(self.tenant_shard_id.tenant_id))
2068 0 : )?
2069 : }
2070 : Err(TimeoutCancellableError::Cancelled) => {
2071 0 : return Err(GetActiveTenantError::Cancelled);
2072 : }
2073 : Err(TimeoutCancellableError::Timeout) => {
2074 0 : return Err(GetActiveTenantError::WaitForActiveTimeout {
2075 0 : latest_state: Some(self.current_state()),
2076 0 : wait_time: timeout,
2077 0 : });
2078 : }
2079 : }
2080 : }
2081 : TenantState::Active { .. } => {
2082 0 : return Ok(());
2083 : }
2084 0 : TenantState::Broken { reason, .. } => {
2085 0 : // This is fatal, and reported distinctly from the general case of "will never be active" because
2086 0 : // it's logically a 500 to external API users (broken is always a bug).
2087 0 : return Err(GetActiveTenantError::Broken(reason));
2088 : }
2089 : TenantState::Stopping { .. } => {
2090 : // There's no chance the tenant can transition back into ::Active
2091 0 : return Err(GetActiveTenantError::WillNotBecomeActive(current_state));
2092 : }
2093 : }
2094 : }
2095 0 : }
2096 :
2097 0 : pub(crate) fn get_attach_mode(&self) -> AttachmentMode {
2098 0 : self.tenant_conf.load().location.attach_mode
2099 0 : }
2100 :
2101 : /// For API access: generate a LocationConfig equivalent to the one that would be used to
2102 : /// create a Tenant in the same state. Do not use this in hot paths: it's for relatively
2103 : /// rare external API calls, like a reconciliation at startup.
2104 0 : pub(crate) fn get_location_conf(&self) -> models::LocationConfig {
2105 0 : let conf = self.tenant_conf.load();
2106 :
2107 0 : let location_config_mode = match conf.location.attach_mode {
2108 0 : AttachmentMode::Single => models::LocationConfigMode::AttachedSingle,
2109 0 : AttachmentMode::Multi => models::LocationConfigMode::AttachedMulti,
2110 0 : AttachmentMode::Stale => models::LocationConfigMode::AttachedStale,
2111 : };
2112 :
2113 : // We have a pageserver TenantConf, we need the API-facing TenantConfig.
2114 0 : let tenant_config: models::TenantConfig = conf.tenant_conf.clone().into();
2115 0 :
2116 0 : models::LocationConfig {
2117 0 : mode: location_config_mode,
2118 0 : generation: self.generation.into(),
2119 0 : secondary_conf: None,
2120 0 : shard_number: self.shard_identity.number.0,
2121 0 : shard_count: self.shard_identity.count.literal(),
2122 0 : shard_stripe_size: self.shard_identity.stripe_size.0,
2123 0 : tenant_conf: tenant_config,
2124 0 : }
2125 0 : }
2126 :
2127 0 : pub(crate) fn get_tenant_shard_id(&self) -> &TenantShardId {
2128 0 : &self.tenant_shard_id
2129 0 : }
2130 :
2131 0 : pub(crate) fn get_shard_stripe_size(&self) -> ShardStripeSize {
2132 0 : self.shard_identity.stripe_size
2133 0 : }
2134 :
2135 0 : pub(crate) fn get_generation(&self) -> Generation {
2136 0 : self.generation
2137 0 : }
2138 :
2139 : /// This function partially shuts down the tenant (it shuts down the Timelines) and is fallible,
2140 : /// and can leave the tenant in a bad state if it fails. The caller is responsible for
2141 : /// resetting this tenant to a valid state if we fail.
2142 0 : pub(crate) async fn split_prepare(
2143 0 : &self,
2144 0 : child_shards: &Vec<TenantShardId>,
2145 0 : ) -> anyhow::Result<()> {
2146 0 : let timelines = self.timelines.lock().unwrap().clone();
2147 0 : for timeline in timelines.values() {
2148 : // We do not block timeline creation/deletion during splits inside the pageserver: it is up to higher levels
2149 : // to ensure that they do not start a split if currently in the process of doing these.
2150 :
2151 : // Upload an index from the parent: this is partly to provide freshness for the
2152 : // child tenants that will copy it, and partly for general ease-of-debugging: there will
2153 : // always be a parent shard index in the same generation as we wrote the child shard index.
2154 0 : tracing::info!(timeline_id=%timeline.timeline_id, "Uploading index");
2155 0 : timeline
2156 0 : .remote_client
2157 0 : .schedule_index_upload_for_file_changes()?;
2158 0 : timeline.remote_client.wait_completion().await?;
2159 :
2160 : // Shut down the timeline's remote client: this means that the indices we write
2161 : // for child shards will not be invalidated by the parent shard deleting layers.
2162 0 : tracing::info!(timeline_id=%timeline.timeline_id, "Shutting down remote storage client");
2163 0 : timeline.remote_client.shutdown().await;
2164 :
2165 : // Download methods can still be used after shutdown, as they don't flow through the remote client's
2166 : // queue. In principal the RemoteTimelineClient could provide this without downloading it, but this
2167 : // operation is rare, so it's simpler to just download it (and robustly guarantees that the index
2168 : // we use here really is the remotely persistent one).
2169 0 : tracing::info!(timeline_id=%timeline.timeline_id, "Downloading index_part from parent");
2170 0 : let result = timeline.remote_client
2171 0 : .download_index_file(&self.cancel)
2172 0 : .instrument(info_span!("download_index_file", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%timeline.timeline_id))
2173 0 : .await?;
2174 0 : let index_part = match result {
2175 : MaybeDeletedIndexPart::Deleted(_) => {
2176 0 : anyhow::bail!("Timeline deletion happened concurrently with split")
2177 : }
2178 0 : MaybeDeletedIndexPart::IndexPart(p) => p,
2179 : };
2180 :
2181 0 : for child_shard in child_shards {
2182 0 : tracing::info!(timeline_id=%timeline.timeline_id, "Uploading index_part for child {}", child_shard.to_index());
2183 0 : upload_index_part(
2184 0 : &self.remote_storage,
2185 0 : child_shard,
2186 0 : &timeline.timeline_id,
2187 0 : self.generation,
2188 0 : &index_part,
2189 0 : &self.cancel,
2190 0 : )
2191 0 : .await?;
2192 : }
2193 : }
2194 :
2195 0 : Ok(())
2196 0 : }
2197 :
2198 0 : pub(crate) fn get_sizes(&self) -> TopTenantShardItem {
2199 0 : let mut result = TopTenantShardItem {
2200 0 : id: self.tenant_shard_id,
2201 0 : resident_size: 0,
2202 0 : physical_size: 0,
2203 0 : max_logical_size: 0,
2204 0 : };
2205 :
2206 0 : for timeline in self.timelines.lock().unwrap().values() {
2207 0 : result.resident_size += timeline.metrics.resident_physical_size_gauge.get();
2208 0 :
2209 0 : result.physical_size += timeline
2210 0 : .remote_client
2211 0 : .metrics
2212 0 : .remote_physical_size_gauge
2213 0 : .get();
2214 0 : result.max_logical_size = std::cmp::max(
2215 0 : result.max_logical_size,
2216 0 : timeline.metrics.current_logical_size_gauge.get(),
2217 0 : );
2218 0 : }
2219 :
2220 0 : result
2221 0 : }
2222 : }
2223 :
2224 : /// Given a Vec of timelines and their ancestors (timeline_id, ancestor_id),
2225 : /// perform a topological sort, so that the parent of each timeline comes
2226 : /// before the children.
2227 : /// E extracts the ancestor from T
2228 : /// This allows for T to be different. It can be TimelineMetadata, can be Timeline itself, etc.
2229 161 : fn tree_sort_timelines<T, E>(
2230 161 : timelines: HashMap<TimelineId, T>,
2231 161 : extractor: E,
2232 161 : ) -> anyhow::Result<Vec<(TimelineId, T)>>
2233 161 : where
2234 161 : E: Fn(&T) -> Option<TimelineId>,
2235 161 : {
2236 161 : let mut result = Vec::with_capacity(timelines.len());
2237 161 :
2238 161 : let mut now = Vec::with_capacity(timelines.len());
2239 161 : // (ancestor, children)
2240 161 : let mut later: HashMap<TimelineId, Vec<(TimelineId, T)>> =
2241 161 : HashMap::with_capacity(timelines.len());
2242 :
2243 167 : for (timeline_id, value) in timelines {
2244 6 : if let Some(ancestor_id) = extractor(&value) {
2245 2 : let children = later.entry(ancestor_id).or_default();
2246 2 : children.push((timeline_id, value));
2247 4 : } else {
2248 4 : now.push((timeline_id, value));
2249 4 : }
2250 : }
2251 :
2252 167 : while let Some((timeline_id, metadata)) = now.pop() {
2253 6 : result.push((timeline_id, metadata));
2254 : // All children of this can be loaded now
2255 6 : if let Some(mut children) = later.remove(&timeline_id) {
2256 2 : now.append(&mut children);
2257 4 : }
2258 : }
2259 :
2260 : // All timelines should be visited now. Unless there were timelines with missing ancestors.
2261 161 : if !later.is_empty() {
2262 0 : for (missing_id, orphan_ids) in later {
2263 0 : for (orphan_id, _) in orphan_ids {
2264 0 : error!("could not load timeline {orphan_id} because its ancestor timeline {missing_id} could not be loaded");
2265 : }
2266 : }
2267 0 : bail!("could not load tenant because some timelines are missing ancestors");
2268 161 : }
2269 161 :
2270 161 : Ok(result)
2271 161 : }
2272 :
2273 : impl Tenant {
2274 0 : pub fn tenant_specific_overrides(&self) -> TenantConfOpt {
2275 0 : self.tenant_conf.load().tenant_conf.clone()
2276 0 : }
2277 :
2278 0 : pub fn effective_config(&self) -> TenantConf {
2279 0 : self.tenant_specific_overrides()
2280 0 : .merge(self.conf.default_tenant_conf.clone())
2281 0 : }
2282 :
2283 0 : pub fn get_checkpoint_distance(&self) -> u64 {
2284 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2285 0 : tenant_conf
2286 0 : .checkpoint_distance
2287 0 : .unwrap_or(self.conf.default_tenant_conf.checkpoint_distance)
2288 0 : }
2289 :
2290 0 : pub fn get_checkpoint_timeout(&self) -> Duration {
2291 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2292 0 : tenant_conf
2293 0 : .checkpoint_timeout
2294 0 : .unwrap_or(self.conf.default_tenant_conf.checkpoint_timeout)
2295 0 : }
2296 :
2297 0 : pub fn get_compaction_target_size(&self) -> u64 {
2298 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2299 0 : tenant_conf
2300 0 : .compaction_target_size
2301 0 : .unwrap_or(self.conf.default_tenant_conf.compaction_target_size)
2302 0 : }
2303 :
2304 0 : pub fn get_compaction_period(&self) -> Duration {
2305 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2306 0 : tenant_conf
2307 0 : .compaction_period
2308 0 : .unwrap_or(self.conf.default_tenant_conf.compaction_period)
2309 0 : }
2310 :
2311 0 : pub fn get_compaction_threshold(&self) -> usize {
2312 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2313 0 : tenant_conf
2314 0 : .compaction_threshold
2315 0 : .unwrap_or(self.conf.default_tenant_conf.compaction_threshold)
2316 0 : }
2317 :
2318 0 : pub fn get_gc_horizon(&self) -> u64 {
2319 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2320 0 : tenant_conf
2321 0 : .gc_horizon
2322 0 : .unwrap_or(self.conf.default_tenant_conf.gc_horizon)
2323 0 : }
2324 :
2325 0 : pub fn get_gc_period(&self) -> Duration {
2326 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2327 0 : tenant_conf
2328 0 : .gc_period
2329 0 : .unwrap_or(self.conf.default_tenant_conf.gc_period)
2330 0 : }
2331 :
2332 0 : pub fn get_image_creation_threshold(&self) -> usize {
2333 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2334 0 : tenant_conf
2335 0 : .image_creation_threshold
2336 0 : .unwrap_or(self.conf.default_tenant_conf.image_creation_threshold)
2337 0 : }
2338 :
2339 0 : pub fn get_pitr_interval(&self) -> Duration {
2340 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2341 0 : tenant_conf
2342 0 : .pitr_interval
2343 0 : .unwrap_or(self.conf.default_tenant_conf.pitr_interval)
2344 0 : }
2345 :
2346 0 : pub fn get_trace_read_requests(&self) -> bool {
2347 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2348 0 : tenant_conf
2349 0 : .trace_read_requests
2350 0 : .unwrap_or(self.conf.default_tenant_conf.trace_read_requests)
2351 0 : }
2352 :
2353 0 : pub fn get_min_resident_size_override(&self) -> Option<u64> {
2354 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2355 0 : tenant_conf
2356 0 : .min_resident_size_override
2357 0 : .or(self.conf.default_tenant_conf.min_resident_size_override)
2358 0 : }
2359 :
2360 0 : pub fn get_heatmap_period(&self) -> Option<Duration> {
2361 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2362 0 : let heatmap_period = tenant_conf
2363 0 : .heatmap_period
2364 0 : .unwrap_or(self.conf.default_tenant_conf.heatmap_period);
2365 0 : if heatmap_period.is_zero() {
2366 0 : None
2367 : } else {
2368 0 : Some(heatmap_period)
2369 : }
2370 0 : }
2371 :
2372 0 : pub fn get_lsn_lease_length(&self) -> Duration {
2373 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2374 0 : tenant_conf
2375 0 : .lsn_lease_length
2376 0 : .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length)
2377 0 : }
2378 :
2379 0 : pub fn set_new_tenant_config(&self, new_tenant_conf: TenantConfOpt) {
2380 0 : // Use read-copy-update in order to avoid overwriting the location config
2381 0 : // state if this races with [`Tenant::set_new_location_config`]. Note that
2382 0 : // this race is not possible if both request types come from the storage
2383 0 : // controller (as they should!) because an exclusive op lock is required
2384 0 : // on the storage controller side.
2385 0 : self.tenant_conf.rcu(|inner| {
2386 0 : Arc::new(AttachedTenantConf {
2387 0 : tenant_conf: new_tenant_conf.clone(),
2388 0 : location: inner.location,
2389 0 : })
2390 0 : });
2391 0 :
2392 0 : self.tenant_conf_updated(&new_tenant_conf);
2393 0 : // Don't hold self.timelines.lock() during the notifies.
2394 0 : // There's no risk of deadlock right now, but there could be if we consolidate
2395 0 : // mutexes in struct Timeline in the future.
2396 0 : let timelines = self.list_timelines();
2397 0 : for timeline in timelines {
2398 0 : timeline.tenant_conf_updated(&new_tenant_conf);
2399 0 : }
2400 0 : }
2401 :
2402 8 : pub(crate) fn set_new_location_config(&self, new_conf: AttachedTenantConf) {
2403 8 : let new_tenant_conf = new_conf.tenant_conf.clone();
2404 8 :
2405 8 : self.tenant_conf.store(Arc::new(new_conf));
2406 8 :
2407 8 : self.tenant_conf_updated(&new_tenant_conf);
2408 8 : // Don't hold self.timelines.lock() during the notifies.
2409 8 : // There's no risk of deadlock right now, but there could be if we consolidate
2410 8 : // mutexes in struct Timeline in the future.
2411 8 : let timelines = self.list_timelines();
2412 16 : for timeline in timelines {
2413 8 : timeline.tenant_conf_updated(&new_tenant_conf);
2414 8 : }
2415 8 : }
2416 :
2417 169 : fn get_timeline_get_throttle_config(
2418 169 : psconf: &'static PageServerConf,
2419 169 : overrides: &TenantConfOpt,
2420 169 : ) -> throttle::Config {
2421 169 : overrides
2422 169 : .timeline_get_throttle
2423 169 : .clone()
2424 169 : .unwrap_or(psconf.default_tenant_conf.timeline_get_throttle.clone())
2425 169 : }
2426 :
2427 8 : pub(crate) fn tenant_conf_updated(&self, new_conf: &TenantConfOpt) {
2428 8 : let conf = Self::get_timeline_get_throttle_config(self.conf, new_conf);
2429 8 : self.timeline_get_throttle.reconfigure(conf)
2430 8 : }
2431 :
2432 : /// Helper function to create a new Timeline struct.
2433 : ///
2434 : /// The returned Timeline is in Loading state. The caller is responsible for
2435 : /// initializing any on-disk state, and for inserting the Timeline to the 'timelines'
2436 : /// map.
2437 : ///
2438 : /// `validate_ancestor == false` is used when a timeline is created for deletion
2439 : /// and we might not have the ancestor present anymore which is fine for to be
2440 : /// deleted timelines.
2441 383 : fn create_timeline_struct(
2442 383 : &self,
2443 383 : new_timeline_id: TimelineId,
2444 383 : new_metadata: &TimelineMetadata,
2445 383 : ancestor: Option<Arc<Timeline>>,
2446 383 : resources: TimelineResources,
2447 383 : cause: CreateTimelineCause,
2448 383 : last_aux_file_policy: Option<AuxFilePolicy>,
2449 383 : ) -> anyhow::Result<Arc<Timeline>> {
2450 383 : let state = match cause {
2451 : CreateTimelineCause::Load => {
2452 383 : let ancestor_id = new_metadata.ancestor_timeline();
2453 383 : anyhow::ensure!(
2454 383 : ancestor_id == ancestor.as_ref().map(|t| t.timeline_id),
2455 0 : "Timeline's {new_timeline_id} ancestor {ancestor_id:?} was not found"
2456 : );
2457 383 : TimelineState::Loading
2458 : }
2459 0 : CreateTimelineCause::Delete => TimelineState::Stopping,
2460 : };
2461 :
2462 383 : let pg_version = new_metadata.pg_version();
2463 383 :
2464 383 : let timeline = Timeline::new(
2465 383 : self.conf,
2466 383 : Arc::clone(&self.tenant_conf),
2467 383 : new_metadata,
2468 383 : ancestor,
2469 383 : new_timeline_id,
2470 383 : self.tenant_shard_id,
2471 383 : self.generation,
2472 383 : self.shard_identity,
2473 383 : self.walredo_mgr.clone(),
2474 383 : resources,
2475 383 : pg_version,
2476 383 : state,
2477 383 : last_aux_file_policy,
2478 383 : self.cancel.child_token(),
2479 383 : );
2480 383 :
2481 383 : Ok(timeline)
2482 383 : }
2483 :
2484 : // Allow too_many_arguments because a constructor's argument list naturally grows with the
2485 : // number of attributes in the struct: breaking these out into a builder wouldn't be helpful.
2486 : #[allow(clippy::too_many_arguments)]
2487 161 : fn new(
2488 161 : state: TenantState,
2489 161 : conf: &'static PageServerConf,
2490 161 : attached_conf: AttachedTenantConf,
2491 161 : shard_identity: ShardIdentity,
2492 161 : walredo_mgr: Option<Arc<WalRedoManager>>,
2493 161 : tenant_shard_id: TenantShardId,
2494 161 : remote_storage: GenericRemoteStorage,
2495 161 : deletion_queue_client: DeletionQueueClient,
2496 161 : ) -> Tenant {
2497 161 : let (state, mut rx) = watch::channel(state);
2498 161 :
2499 161 : tokio::spawn(async move {
2500 161 : // reflect tenant state in metrics:
2501 161 : // - global per tenant state: TENANT_STATE_METRIC
2502 161 : // - "set" of broken tenants: BROKEN_TENANTS_SET
2503 161 : //
2504 161 : // set of broken tenants should not have zero counts so that it remains accessible for
2505 161 : // alerting.
2506 161 :
2507 161 : let tid = tenant_shard_id.to_string();
2508 161 : let shard_id = tenant_shard_id.shard_slug().to_string();
2509 161 : let set_key = &[tid.as_str(), shard_id.as_str()][..];
2510 161 :
2511 319 : fn inspect_state(state: &TenantState) -> ([&'static str; 1], bool) {
2512 319 : ([state.into()], matches!(state, TenantState::Broken { .. }))
2513 319 : }
2514 161 :
2515 161 : let mut tuple = inspect_state(&rx.borrow_and_update());
2516 161 :
2517 161 : let is_broken = tuple.1;
2518 161 : let mut counted_broken = if is_broken {
2519 : // add the id to the set right away, there should not be any updates on the channel
2520 : // after before tenant is removed, if ever
2521 0 : BROKEN_TENANTS_SET.with_label_values(set_key).set(1);
2522 0 : true
2523 : } else {
2524 161 : false
2525 : };
2526 :
2527 319 : loop {
2528 319 : let labels = &tuple.0;
2529 319 : let current = TENANT_STATE_METRIC.with_label_values(labels);
2530 319 : current.inc();
2531 319 :
2532 319 : if rx.changed().await.is_err() {
2533 : // tenant has been dropped
2534 16 : current.dec();
2535 16 : drop(BROKEN_TENANTS_SET.remove_label_values(set_key));
2536 16 : break;
2537 158 : }
2538 158 :
2539 158 : current.dec();
2540 158 : tuple = inspect_state(&rx.borrow_and_update());
2541 158 :
2542 158 : let is_broken = tuple.1;
2543 158 : if is_broken && !counted_broken {
2544 0 : counted_broken = true;
2545 0 : // insert the tenant_id (back) into the set while avoiding needless counter
2546 0 : // access
2547 0 : BROKEN_TENANTS_SET.with_label_values(set_key).set(1);
2548 158 : }
2549 : }
2550 161 : });
2551 161 :
2552 161 : Tenant {
2553 161 : tenant_shard_id,
2554 161 : shard_identity,
2555 161 : generation: attached_conf.location.generation,
2556 161 : conf,
2557 161 : // using now here is good enough approximation to catch tenants with really long
2558 161 : // activation times.
2559 161 : constructed_at: Instant::now(),
2560 161 : timelines: Mutex::new(HashMap::new()),
2561 161 : timelines_creating: Mutex::new(HashSet::new()),
2562 161 : gc_cs: tokio::sync::Mutex::new(()),
2563 161 : walredo_mgr,
2564 161 : remote_storage,
2565 161 : deletion_queue_client,
2566 161 : state,
2567 161 : cached_logical_sizes: tokio::sync::Mutex::new(HashMap::new()),
2568 161 : cached_synthetic_tenant_size: Arc::new(AtomicU64::new(0)),
2569 161 : eviction_task_tenant_state: tokio::sync::Mutex::new(EvictionTaskTenantState::default()),
2570 161 : activate_now_sem: tokio::sync::Semaphore::new(0),
2571 161 : cancel: CancellationToken::default(),
2572 161 : gate: Gate::default(),
2573 161 : timeline_get_throttle: Arc::new(throttle::Throttle::new(
2574 161 : Tenant::get_timeline_get_throttle_config(conf, &attached_conf.tenant_conf),
2575 161 : &crate::metrics::tenant_throttling::TIMELINE_GET,
2576 161 : )),
2577 161 : tenant_conf: Arc::new(ArcSwap::from_pointee(attached_conf)),
2578 161 : ongoing_timeline_detach: std::sync::Mutex::default(),
2579 161 : }
2580 161 : }
2581 :
2582 : /// Locate and load config
2583 0 : pub(super) fn load_tenant_config(
2584 0 : conf: &'static PageServerConf,
2585 0 : tenant_shard_id: &TenantShardId,
2586 0 : ) -> anyhow::Result<LocationConf> {
2587 0 : let legacy_config_path = conf.tenant_config_path(tenant_shard_id);
2588 0 : let config_path = conf.tenant_location_config_path(tenant_shard_id);
2589 0 :
2590 0 : if config_path.exists() {
2591 : // New-style config takes precedence
2592 0 : let deserialized = Self::read_config(&config_path)?;
2593 0 : Ok(toml_edit::de::from_document::<LocationConf>(deserialized)?)
2594 0 : } else if legacy_config_path.exists() {
2595 : // Upgrade path: found an old-style configuration only
2596 0 : let deserialized = Self::read_config(&legacy_config_path)?;
2597 :
2598 0 : let mut tenant_conf = TenantConfOpt::default();
2599 0 : for (key, item) in deserialized.iter() {
2600 0 : match key {
2601 0 : "tenant_config" => {
2602 0 : tenant_conf = TenantConfOpt::try_from(item.to_owned()).context(format!("Failed to parse config from file '{legacy_config_path}' as pageserver config"))?;
2603 : }
2604 0 : _ => bail!(
2605 0 : "config file {legacy_config_path} has unrecognized pageserver option '{key}'"
2606 0 : ),
2607 : }
2608 : }
2609 :
2610 : // Legacy configs are implicitly in attached state, and do not support sharding
2611 0 : Ok(LocationConf::attached_single(
2612 0 : tenant_conf,
2613 0 : Generation::none(),
2614 0 : &models::ShardParameters::default(),
2615 0 : ))
2616 : } else {
2617 : // FIXME If the config file is not found, assume that we're attaching
2618 : // a detached tenant and config is passed via attach command.
2619 : // https://github.com/neondatabase/neon/issues/1555
2620 : // OR: we're loading after incomplete deletion that managed to remove config.
2621 0 : info!(
2622 0 : "tenant config not found in {} or {}",
2623 : config_path, legacy_config_path
2624 : );
2625 0 : Ok(LocationConf::default())
2626 : }
2627 0 : }
2628 :
2629 0 : fn read_config(path: &Utf8Path) -> anyhow::Result<toml_edit::Document> {
2630 0 : info!("loading tenant configuration from {path}");
2631 :
2632 : // load and parse file
2633 0 : let config = fs::read_to_string(path)
2634 0 : .with_context(|| format!("Failed to load config from path '{path}'"))?;
2635 :
2636 0 : config
2637 0 : .parse::<toml_edit::Document>()
2638 0 : .with_context(|| format!("Failed to parse config from file '{path}' as toml file"))
2639 0 : }
2640 :
2641 0 : #[tracing::instrument(skip_all, fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug()))]
2642 : pub(super) async fn persist_tenant_config(
2643 : conf: &'static PageServerConf,
2644 : tenant_shard_id: &TenantShardId,
2645 : location_conf: &LocationConf,
2646 : ) -> anyhow::Result<()> {
2647 : let legacy_config_path = conf.tenant_config_path(tenant_shard_id);
2648 : let config_path = conf.tenant_location_config_path(tenant_shard_id);
2649 :
2650 : Self::persist_tenant_config_at(
2651 : tenant_shard_id,
2652 : &config_path,
2653 : &legacy_config_path,
2654 : location_conf,
2655 : )
2656 : .await
2657 : }
2658 :
2659 0 : #[tracing::instrument(skip_all, fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug()))]
2660 : pub(super) async fn persist_tenant_config_at(
2661 : tenant_shard_id: &TenantShardId,
2662 : config_path: &Utf8Path,
2663 : legacy_config_path: &Utf8Path,
2664 : location_conf: &LocationConf,
2665 : ) -> anyhow::Result<()> {
2666 : if let LocationMode::Attached(attach_conf) = &location_conf.mode {
2667 : // The modern-style LocationConf config file requires a generation to be set. In case someone
2668 : // is running a pageserver without the infrastructure to set generations, write out the legacy-style
2669 : // config file that only contains TenantConf.
2670 : //
2671 : // This will eventually be removed in https://github.com/neondatabase/neon/issues/5388
2672 :
2673 : if attach_conf.generation.is_none() {
2674 : tracing::info!(
2675 : "Running without generations, writing legacy-style tenant config file"
2676 : );
2677 : Self::persist_tenant_config_legacy(
2678 : tenant_shard_id,
2679 : legacy_config_path,
2680 : &location_conf.tenant_conf,
2681 : )
2682 : .await?;
2683 :
2684 : return Ok(());
2685 : }
2686 : }
2687 :
2688 : debug!("persisting tenantconf to {config_path}");
2689 :
2690 : let mut conf_content = r#"# This file contains a specific per-tenant's config.
2691 : # It is read in case of pageserver restart.
2692 : "#
2693 : .to_string();
2694 :
2695 0 : fail::fail_point!("tenant-config-before-write", |_| {
2696 0 : anyhow::bail!("tenant-config-before-write");
2697 0 : });
2698 :
2699 : // Convert the config to a toml file.
2700 : conf_content += &toml_edit::ser::to_string_pretty(&location_conf)?;
2701 :
2702 : let temp_path = path_with_suffix_extension(config_path, TEMP_FILE_SUFFIX);
2703 :
2704 : let tenant_shard_id = *tenant_shard_id;
2705 : let config_path = config_path.to_owned();
2706 : let conf_content = conf_content.into_bytes();
2707 : VirtualFile::crashsafe_overwrite(config_path.clone(), temp_path, conf_content)
2708 : .await
2709 0 : .with_context(|| format!("write tenant {tenant_shard_id} config to {config_path}"))?;
2710 :
2711 : Ok(())
2712 : }
2713 :
2714 0 : #[tracing::instrument(skip_all, fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug()))]
2715 : async fn persist_tenant_config_legacy(
2716 : tenant_shard_id: &TenantShardId,
2717 : target_config_path: &Utf8Path,
2718 : tenant_conf: &TenantConfOpt,
2719 : ) -> anyhow::Result<()> {
2720 : debug!("persisting tenantconf to {target_config_path}");
2721 :
2722 : let mut conf_content = r#"# This file contains a specific per-tenant's config.
2723 : # It is read in case of pageserver restart.
2724 :
2725 : [tenant_config]
2726 : "#
2727 : .to_string();
2728 :
2729 : // Convert the config to a toml file.
2730 : conf_content += &toml_edit::ser::to_string(&tenant_conf)?;
2731 :
2732 : let temp_path = path_with_suffix_extension(target_config_path, TEMP_FILE_SUFFIX);
2733 :
2734 : let tenant_shard_id = *tenant_shard_id;
2735 : let target_config_path = target_config_path.to_owned();
2736 : let conf_content = conf_content.into_bytes();
2737 : VirtualFile::crashsafe_overwrite(target_config_path.clone(), temp_path, conf_content)
2738 : .await
2739 0 : .with_context(|| {
2740 0 : format!("write tenant {tenant_shard_id} config to {target_config_path}")
2741 0 : })?;
2742 : Ok(())
2743 : }
2744 :
2745 : //
2746 : // How garbage collection works:
2747 : //
2748 : // +--bar------------->
2749 : // /
2750 : // +----+-----foo---------------->
2751 : // /
2752 : // ----main--+-------------------------->
2753 : // \
2754 : // +-----baz-------->
2755 : //
2756 : //
2757 : // 1. Grab 'gc_cs' mutex to prevent new timelines from being created while Timeline's
2758 : // `gc_infos` are being refreshed
2759 : // 2. Scan collected timelines, and on each timeline, make note of the
2760 : // all the points where other timelines have been branched off.
2761 : // We will refrain from removing page versions at those LSNs.
2762 : // 3. For each timeline, scan all layer files on the timeline.
2763 : // Remove all files for which a newer file exists and which
2764 : // don't cover any branch point LSNs.
2765 : //
2766 : // TODO:
2767 : // - if a relation has a non-incremental persistent layer on a child branch, then we
2768 : // don't need to keep that in the parent anymore. But currently
2769 : // we do.
2770 754 : async fn gc_iteration_internal(
2771 754 : &self,
2772 754 : target_timeline_id: Option<TimelineId>,
2773 754 : horizon: u64,
2774 754 : pitr: Duration,
2775 754 : cancel: &CancellationToken,
2776 754 : ctx: &RequestContext,
2777 754 : ) -> Result<GcResult, GcError> {
2778 754 : let mut totals: GcResult = Default::default();
2779 754 : let now = Instant::now();
2780 :
2781 754 : let gc_timelines = self
2782 754 : .refresh_gc_info_internal(target_timeline_id, horizon, pitr, cancel, ctx)
2783 718 : .await?;
2784 :
2785 754 : failpoint_support::sleep_millis_async!("gc_iteration_internal_after_getting_gc_timelines");
2786 :
2787 : // If there is nothing to GC, we don't want any messages in the INFO log.
2788 754 : if !gc_timelines.is_empty() {
2789 754 : info!("{} timelines need GC", gc_timelines.len());
2790 : } else {
2791 0 : debug!("{} timelines need GC", gc_timelines.len());
2792 : }
2793 :
2794 : // Perform GC for each timeline.
2795 : //
2796 : // Note that we don't hold the `Tenant::gc_cs` lock here because we don't want to delay the
2797 : // branch creation task, which requires the GC lock. A GC iteration can run concurrently
2798 : // with branch creation.
2799 : //
2800 : // See comments in [`Tenant::branch_timeline`] for more information about why branch
2801 : // creation task can run concurrently with timeline's GC iteration.
2802 1508 : for timeline in gc_timelines {
2803 754 : if cancel.is_cancelled() {
2804 : // We were requested to shut down. Stop and return with the progress we
2805 : // made.
2806 0 : break;
2807 754 : }
2808 754 : let result = match timeline.gc().await {
2809 : Err(GcError::TimelineCancelled) => {
2810 0 : if target_timeline_id.is_some() {
2811 : // If we were targetting this specific timeline, surface cancellation to caller
2812 0 : return Err(GcError::TimelineCancelled);
2813 : } else {
2814 : // A timeline may be shutting down independently of the tenant's lifecycle: we should
2815 : // skip past this and proceed to try GC on other timelines.
2816 0 : continue;
2817 : }
2818 : }
2819 754 : r => r?,
2820 : };
2821 754 : totals += result;
2822 : }
2823 :
2824 754 : totals.elapsed = now.elapsed();
2825 754 : Ok(totals)
2826 754 : }
2827 :
2828 : /// Refreshes the Timeline::gc_info for all timelines, returning the
2829 : /// vector of timelines which have [`Timeline::get_last_record_lsn`] past
2830 : /// [`Tenant::get_gc_horizon`].
2831 : ///
2832 : /// This is usually executed as part of periodic gc, but can now be triggered more often.
2833 0 : pub(crate) async fn refresh_gc_info(
2834 0 : &self,
2835 0 : cancel: &CancellationToken,
2836 0 : ctx: &RequestContext,
2837 0 : ) -> Result<Vec<Arc<Timeline>>, GcError> {
2838 0 : // since this method can now be called at different rates than the configured gc loop, it
2839 0 : // might be that these configuration values get applied faster than what it was previously,
2840 0 : // since these were only read from the gc task.
2841 0 : let horizon = self.get_gc_horizon();
2842 0 : let pitr = self.get_pitr_interval();
2843 0 :
2844 0 : // refresh all timelines
2845 0 : let target_timeline_id = None;
2846 0 :
2847 0 : self.refresh_gc_info_internal(target_timeline_id, horizon, pitr, cancel, ctx)
2848 0 : .await
2849 0 : }
2850 :
2851 754 : async fn refresh_gc_info_internal(
2852 754 : &self,
2853 754 : target_timeline_id: Option<TimelineId>,
2854 754 : horizon: u64,
2855 754 : pitr: Duration,
2856 754 : cancel: &CancellationToken,
2857 754 : ctx: &RequestContext,
2858 754 : ) -> Result<Vec<Arc<Timeline>>, GcError> {
2859 754 : // before taking the gc_cs lock, do the heavier weight finding of gc_cutoff points for
2860 754 : // currently visible timelines.
2861 754 : let timelines = self
2862 754 : .timelines
2863 754 : .lock()
2864 754 : .unwrap()
2865 754 : .values()
2866 3310 : .filter(|tl| match target_timeline_id.as_ref() {
2867 3310 : Some(target) => &tl.timeline_id == target,
2868 0 : None => true,
2869 3310 : })
2870 754 : .cloned()
2871 754 : .collect::<Vec<_>>();
2872 754 :
2873 754 : let mut gc_cutoffs: HashMap<TimelineId, GcCutoffs> =
2874 754 : HashMap::with_capacity(timelines.len());
2875 :
2876 754 : for timeline in timelines.iter() {
2877 754 : let cutoff = timeline
2878 754 : .get_last_record_lsn()
2879 754 : .checked_sub(horizon)
2880 754 : .unwrap_or(Lsn(0));
2881 :
2882 754 : let cutoffs = timeline.find_gc_cutoffs(cutoff, pitr, cancel, ctx).await?;
2883 754 : let old = gc_cutoffs.insert(timeline.timeline_id, cutoffs);
2884 754 : assert!(old.is_none());
2885 : }
2886 :
2887 754 : if !self.is_active() || self.cancel.is_cancelled() {
2888 0 : return Err(GcError::TenantCancelled);
2889 754 : }
2890 :
2891 : // grab mutex to prevent new timelines from being created here; avoid doing long operations
2892 : // because that will stall branch creation.
2893 754 : let gc_cs = self.gc_cs.lock().await;
2894 :
2895 : // Scan all timelines. For each timeline, remember the timeline ID and
2896 : // the branch point where it was created.
2897 754 : let (all_branchpoints, timelines): (BTreeSet<(TimelineId, Lsn)>, _) = {
2898 754 : let timelines = self.timelines.lock().unwrap();
2899 754 : let mut all_branchpoints = BTreeSet::new();
2900 754 : let timelines = {
2901 754 : if let Some(target_timeline_id) = target_timeline_id.as_ref() {
2902 754 : if timelines.get(target_timeline_id).is_none() {
2903 0 : return Err(GcError::TimelineNotFound);
2904 754 : }
2905 0 : };
2906 :
2907 754 : timelines
2908 754 : .iter()
2909 3310 : .map(|(_timeline_id, timeline_entry)| {
2910 2556 : if let Some(ancestor_timeline_id) =
2911 3310 : &timeline_entry.get_ancestor_timeline_id()
2912 : {
2913 : // If target_timeline is specified, we only need to know branchpoints of its children
2914 2556 : if let Some(timeline_id) = target_timeline_id {
2915 2556 : if ancestor_timeline_id == &timeline_id {
2916 6 : all_branchpoints.insert((
2917 6 : *ancestor_timeline_id,
2918 6 : timeline_entry.get_ancestor_lsn(),
2919 6 : ));
2920 2550 : }
2921 : }
2922 : // Collect branchpoints for all timelines
2923 0 : else {
2924 0 : all_branchpoints.insert((
2925 0 : *ancestor_timeline_id,
2926 0 : timeline_entry.get_ancestor_lsn(),
2927 0 : ));
2928 0 : }
2929 754 : }
2930 :
2931 3310 : timeline_entry.clone()
2932 3310 : })
2933 754 : .collect::<Vec<_>>()
2934 754 : };
2935 754 : (all_branchpoints, timelines)
2936 754 : };
2937 754 :
2938 754 : // Ok, we now know all the branch points.
2939 754 : // Update the GC information for each timeline.
2940 754 : let mut gc_timelines = Vec::with_capacity(timelines.len());
2941 4064 : for timeline in timelines {
2942 : // If target_timeline is specified, ignore all other timelines
2943 3310 : if let Some(target_timeline_id) = target_timeline_id {
2944 3310 : if timeline.timeline_id != target_timeline_id {
2945 2556 : continue;
2946 754 : }
2947 0 : }
2948 :
2949 754 : let branchpoints: Vec<Lsn> = all_branchpoints
2950 754 : .range((
2951 754 : Included((timeline.timeline_id, Lsn(0))),
2952 754 : Included((timeline.timeline_id, Lsn(u64::MAX))),
2953 754 : ))
2954 754 : .map(|&x| x.1)
2955 754 : .collect();
2956 754 :
2957 754 : {
2958 754 : let mut target = timeline.gc_info.write().unwrap();
2959 754 :
2960 754 : let now = SystemTime::now();
2961 754 : target.leases.retain(|_, lease| !lease.is_expired(&now));
2962 754 :
2963 754 : timeline
2964 754 : .metrics
2965 754 : .valid_lsn_lease_count_gauge
2966 754 : .set(target.leases.len() as u64);
2967 754 :
2968 754 : match gc_cutoffs.remove(&timeline.timeline_id) {
2969 754 : Some(cutoffs) => {
2970 754 : target.retain_lsns = branchpoints;
2971 754 : target.cutoffs = cutoffs;
2972 754 : }
2973 0 : None => {
2974 0 : // reasons for this being unavailable:
2975 0 : // - this timeline was created while we were finding cutoffs
2976 0 : // - lsn for timestamp search fails for this timeline repeatedly
2977 0 : //
2978 0 : // in both cases, refreshing the branchpoints is correct.
2979 0 : target.retain_lsns = branchpoints;
2980 0 : }
2981 : };
2982 : }
2983 :
2984 754 : gc_timelines.push(timeline);
2985 : }
2986 754 : drop(gc_cs);
2987 754 : Ok(gc_timelines)
2988 754 : }
2989 :
2990 : /// A substitute for `branch_timeline` for use in unit tests.
2991 : /// The returned timeline will have state value `Active` to make various `anyhow::ensure!()`
2992 : /// calls pass, but, we do not actually call `.activate()` under the hood. So, none of the
2993 : /// timeline background tasks are launched, except the flush loop.
2994 : #[cfg(test)]
2995 228 : async fn branch_timeline_test(
2996 228 : &self,
2997 228 : src_timeline: &Arc<Timeline>,
2998 228 : dst_id: TimelineId,
2999 228 : ancestor_lsn: Option<Lsn>,
3000 228 : ctx: &RequestContext,
3001 228 : ) -> Result<Arc<Timeline>, CreateTimelineError> {
3002 228 : let create_guard = self.create_timeline_create_guard(dst_id).unwrap();
3003 228 : let tl = self
3004 228 : .branch_timeline_impl(src_timeline, dst_id, ancestor_lsn, create_guard, ctx)
3005 4 : .await?;
3006 224 : tl.set_state(TimelineState::Active);
3007 224 : Ok(tl)
3008 228 : }
3009 :
3010 : /// Helper for unit tests to branch a timeline with some pre-loaded states.
3011 : #[cfg(test)]
3012 : #[allow(clippy::too_many_arguments)]
3013 4 : pub async fn branch_timeline_test_with_layers(
3014 4 : &self,
3015 4 : src_timeline: &Arc<Timeline>,
3016 4 : dst_id: TimelineId,
3017 4 : ancestor_lsn: Option<Lsn>,
3018 4 : ctx: &RequestContext,
3019 4 : delta_layer_desc: Vec<Vec<(pageserver_api::key::Key, Lsn, crate::repository::Value)>>,
3020 4 : image_layer_desc: Vec<(Lsn, Vec<(pageserver_api::key::Key, bytes::Bytes)>)>,
3021 4 : end_lsn: Lsn,
3022 4 : ) -> anyhow::Result<Arc<Timeline>> {
3023 4 : let tline = self
3024 4 : .branch_timeline_test(src_timeline, dst_id, ancestor_lsn, ctx)
3025 0 : .await?;
3026 4 : let ancestor_lsn = if let Some(ancestor_lsn) = ancestor_lsn {
3027 4 : ancestor_lsn
3028 : } else {
3029 0 : tline.get_last_record_lsn()
3030 : };
3031 4 : assert!(end_lsn >= ancestor_lsn);
3032 4 : tline.force_advance_lsn(end_lsn);
3033 4 : for deltas in delta_layer_desc {
3034 0 : tline
3035 0 : .force_create_delta_layer(deltas, Some(ancestor_lsn), ctx)
3036 0 : .await?;
3037 : }
3038 8 : for (lsn, images) in image_layer_desc {
3039 4 : tline
3040 4 : .force_create_image_layer(lsn, images, Some(ancestor_lsn), ctx)
3041 14 : .await?;
3042 : }
3043 4 : Ok(tline)
3044 4 : }
3045 :
3046 : /// Branch an existing timeline.
3047 : ///
3048 : /// The caller is responsible for activating the returned timeline.
3049 0 : async fn branch_timeline(
3050 0 : &self,
3051 0 : src_timeline: &Arc<Timeline>,
3052 0 : dst_id: TimelineId,
3053 0 : start_lsn: Option<Lsn>,
3054 0 : timeline_create_guard: TimelineCreateGuard<'_>,
3055 0 : ctx: &RequestContext,
3056 0 : ) -> Result<Arc<Timeline>, CreateTimelineError> {
3057 0 : self.branch_timeline_impl(src_timeline, dst_id, start_lsn, timeline_create_guard, ctx)
3058 0 : .await
3059 0 : }
3060 :
3061 228 : async fn branch_timeline_impl(
3062 228 : &self,
3063 228 : src_timeline: &Arc<Timeline>,
3064 228 : dst_id: TimelineId,
3065 228 : start_lsn: Option<Lsn>,
3066 228 : timeline_create_guard: TimelineCreateGuard<'_>,
3067 228 : _ctx: &RequestContext,
3068 228 : ) -> Result<Arc<Timeline>, CreateTimelineError> {
3069 228 : let src_id = src_timeline.timeline_id;
3070 :
3071 : // We will validate our ancestor LSN in this function. Acquire the GC lock so that
3072 : // this check cannot race with GC, and the ancestor LSN is guaranteed to remain
3073 : // valid while we are creating the branch.
3074 228 : let _gc_cs = self.gc_cs.lock().await;
3075 :
3076 : // If no start LSN is specified, we branch the new timeline from the source timeline's last record LSN
3077 228 : let start_lsn = start_lsn.unwrap_or_else(|| {
3078 2 : let lsn = src_timeline.get_last_record_lsn();
3079 2 : info!("branching timeline {dst_id} from timeline {src_id} at last record LSN: {lsn}");
3080 2 : lsn
3081 228 : });
3082 228 :
3083 228 : // Ensure that `start_lsn` is valid, i.e. the LSN is within the PITR
3084 228 : // horizon on the source timeline
3085 228 : //
3086 228 : // We check it against both the planned GC cutoff stored in 'gc_info',
3087 228 : // and the 'latest_gc_cutoff' of the last GC that was performed. The
3088 228 : // planned GC cutoff in 'gc_info' is normally larger than
3089 228 : // 'latest_gc_cutoff_lsn', but beware of corner cases like if you just
3090 228 : // changed the GC settings for the tenant to make the PITR window
3091 228 : // larger, but some of the data was already removed by an earlier GC
3092 228 : // iteration.
3093 228 :
3094 228 : // check against last actual 'latest_gc_cutoff' first
3095 228 : let latest_gc_cutoff_lsn = src_timeline.get_latest_gc_cutoff_lsn();
3096 228 : src_timeline
3097 228 : .check_lsn_is_in_scope(start_lsn, &latest_gc_cutoff_lsn)
3098 228 : .context(format!(
3099 228 : "invalid branch start lsn: less than latest GC cutoff {}",
3100 228 : *latest_gc_cutoff_lsn,
3101 228 : ))
3102 228 : .map_err(CreateTimelineError::AncestorLsn)?;
3103 :
3104 : // and then the planned GC cutoff
3105 : {
3106 224 : let gc_info = src_timeline.gc_info.read().unwrap();
3107 224 : let cutoff = gc_info.min_cutoff();
3108 224 : if start_lsn < cutoff {
3109 0 : return Err(CreateTimelineError::AncestorLsn(anyhow::anyhow!(
3110 0 : "invalid branch start lsn: less than planned GC cutoff {cutoff}"
3111 0 : )));
3112 224 : }
3113 224 : }
3114 224 :
3115 224 : //
3116 224 : // The branch point is valid, and we are still holding the 'gc_cs' lock
3117 224 : // so that GC cannot advance the GC cutoff until we are finished.
3118 224 : // Proceed with the branch creation.
3119 224 : //
3120 224 :
3121 224 : // Determine prev-LSN for the new timeline. We can only determine it if
3122 224 : // the timeline was branched at the current end of the source timeline.
3123 224 : let RecordLsn {
3124 224 : last: src_last,
3125 224 : prev: src_prev,
3126 224 : } = src_timeline.get_last_record_rlsn();
3127 224 : let dst_prev = if src_last == start_lsn {
3128 214 : Some(src_prev)
3129 : } else {
3130 10 : None
3131 : };
3132 :
3133 : // Create the metadata file, noting the ancestor of the new timeline.
3134 : // There is initially no data in it, but all the read-calls know to look
3135 : // into the ancestor.
3136 224 : let metadata = TimelineMetadata::new(
3137 224 : start_lsn,
3138 224 : dst_prev,
3139 224 : Some(src_id),
3140 224 : start_lsn,
3141 224 : *src_timeline.latest_gc_cutoff_lsn.read(), // FIXME: should we hold onto this guard longer?
3142 224 : src_timeline.initdb_lsn,
3143 224 : src_timeline.pg_version,
3144 224 : );
3145 :
3146 224 : let uninitialized_timeline = self
3147 224 : .prepare_new_timeline(
3148 224 : dst_id,
3149 224 : &metadata,
3150 224 : timeline_create_guard,
3151 224 : start_lsn + 1,
3152 224 : Some(Arc::clone(src_timeline)),
3153 224 : src_timeline.last_aux_file_policy.load(),
3154 224 : )
3155 0 : .await?;
3156 :
3157 224 : let new_timeline = uninitialized_timeline.finish_creation()?;
3158 :
3159 : // Root timeline gets its layers during creation and uploads them along with the metadata.
3160 : // A branch timeline though, when created, can get no writes for some time, hence won't get any layers created.
3161 : // We still need to upload its metadata eagerly: if other nodes `attach` the tenant and miss this timeline, their GC
3162 : // could get incorrect information and remove more layers, than needed.
3163 : // See also https://github.com/neondatabase/neon/issues/3865
3164 224 : new_timeline
3165 224 : .remote_client
3166 224 : .schedule_index_upload_for_full_metadata_update(&metadata)
3167 224 : .context("branch initial metadata upload")?;
3168 :
3169 224 : Ok(new_timeline)
3170 228 : }
3171 :
3172 : /// For unit tests, make this visible so that other modules can directly create timelines
3173 : #[cfg(test)]
3174 4 : #[tracing::instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), %timeline_id))]
3175 : pub(crate) async fn bootstrap_timeline_test(
3176 : &self,
3177 : timeline_id: TimelineId,
3178 : pg_version: u32,
3179 : load_existing_initdb: Option<TimelineId>,
3180 : ctx: &RequestContext,
3181 : ) -> anyhow::Result<Arc<Timeline>> {
3182 : let create_guard = self.create_timeline_create_guard(timeline_id).unwrap();
3183 : self.bootstrap_timeline(
3184 : timeline_id,
3185 : pg_version,
3186 : load_existing_initdb,
3187 : create_guard,
3188 : ctx,
3189 : )
3190 : .await
3191 : }
3192 :
3193 0 : async fn upload_initdb(
3194 0 : &self,
3195 0 : timelines_path: &Utf8PathBuf,
3196 0 : pgdata_path: &Utf8PathBuf,
3197 0 : timeline_id: &TimelineId,
3198 0 : ) -> anyhow::Result<()> {
3199 0 : let temp_path = timelines_path.join(format!(
3200 0 : "{INITDB_PATH}.upload-{timeline_id}.{TEMP_FILE_SUFFIX}"
3201 0 : ));
3202 :
3203 : scopeguard::defer! {
3204 : if let Err(e) = fs::remove_file(&temp_path) {
3205 : error!("Failed to remove temporary initdb archive '{temp_path}': {e}");
3206 : }
3207 : }
3208 :
3209 0 : let (pgdata_zstd, tar_zst_size) = create_zst_tarball(pgdata_path, &temp_path).await?;
3210 : const INITDB_TAR_ZST_WARN_LIMIT: u64 = 2 * 1024 * 1024;
3211 0 : if tar_zst_size > INITDB_TAR_ZST_WARN_LIMIT {
3212 0 : warn!(
3213 0 : "compressed {temp_path} size of {tar_zst_size} is above limit {INITDB_TAR_ZST_WARN_LIMIT}."
3214 : );
3215 0 : }
3216 :
3217 : pausable_failpoint!("before-initdb-upload");
3218 :
3219 0 : backoff::retry(
3220 0 : || async {
3221 0 : self::remote_timeline_client::upload_initdb_dir(
3222 0 : &self.remote_storage,
3223 0 : &self.tenant_shard_id.tenant_id,
3224 0 : timeline_id,
3225 0 : pgdata_zstd.try_clone().await?,
3226 0 : tar_zst_size,
3227 0 : &self.cancel,
3228 0 : )
3229 0 : .await
3230 0 : },
3231 0 : |_| false,
3232 0 : 3,
3233 0 : u32::MAX,
3234 0 : "persist_initdb_tar_zst",
3235 0 : &self.cancel,
3236 0 : )
3237 0 : .await
3238 0 : .ok_or_else(|| anyhow::Error::new(TimeoutOrCancel::Cancel))
3239 0 : .and_then(|x| x)
3240 0 : }
3241 :
3242 : /// - run initdb to init temporary instance and get bootstrap data
3243 : /// - after initialization completes, tar up the temp dir and upload it to S3.
3244 : ///
3245 : /// The caller is responsible for activating the returned timeline.
3246 2 : async fn bootstrap_timeline(
3247 2 : &self,
3248 2 : timeline_id: TimelineId,
3249 2 : pg_version: u32,
3250 2 : load_existing_initdb: Option<TimelineId>,
3251 2 : timeline_create_guard: TimelineCreateGuard<'_>,
3252 2 : ctx: &RequestContext,
3253 2 : ) -> anyhow::Result<Arc<Timeline>> {
3254 2 : // create a `tenant/{tenant_id}/timelines/basebackup-{timeline_id}.{TEMP_FILE_SUFFIX}/`
3255 2 : // temporary directory for basebackup files for the given timeline.
3256 2 :
3257 2 : let timelines_path = self.conf.timelines_path(&self.tenant_shard_id);
3258 2 : let pgdata_path = path_with_suffix_extension(
3259 2 : timelines_path.join(format!("basebackup-{timeline_id}")),
3260 2 : TEMP_FILE_SUFFIX,
3261 2 : );
3262 2 :
3263 2 : // Remove whatever was left from the previous runs: safe because TimelineCreateGuard guarantees
3264 2 : // we won't race with other creations or existent timelines with the same path.
3265 2 : if pgdata_path.exists() {
3266 0 : fs::remove_dir_all(&pgdata_path).with_context(|| {
3267 0 : format!("Failed to remove already existing initdb directory: {pgdata_path}")
3268 0 : })?;
3269 2 : }
3270 :
3271 : // this new directory is very temporary, set to remove it immediately after bootstrap, we don't need it
3272 : scopeguard::defer! {
3273 : if let Err(e) = fs::remove_dir_all(&pgdata_path) {
3274 : // this is unlikely, but we will remove the directory on pageserver restart or another bootstrap call
3275 : error!("Failed to remove temporary initdb directory '{pgdata_path}': {e}");
3276 : }
3277 : }
3278 2 : if let Some(existing_initdb_timeline_id) = load_existing_initdb {
3279 2 : if existing_initdb_timeline_id != timeline_id {
3280 0 : let source_path = &remote_initdb_archive_path(
3281 0 : &self.tenant_shard_id.tenant_id,
3282 0 : &existing_initdb_timeline_id,
3283 0 : );
3284 0 : let dest_path =
3285 0 : &remote_initdb_archive_path(&self.tenant_shard_id.tenant_id, &timeline_id);
3286 0 :
3287 0 : // if this fails, it will get retried by retried control plane requests
3288 0 : self.remote_storage
3289 0 : .copy_object(source_path, dest_path, &self.cancel)
3290 0 : .await
3291 0 : .context("copy initdb tar")?;
3292 2 : }
3293 2 : let (initdb_tar_zst_path, initdb_tar_zst) =
3294 2 : self::remote_timeline_client::download_initdb_tar_zst(
3295 2 : self.conf,
3296 2 : &self.remote_storage,
3297 2 : &self.tenant_shard_id,
3298 2 : &existing_initdb_timeline_id,
3299 2 : &self.cancel,
3300 2 : )
3301 692 : .await
3302 2 : .context("download initdb tar")?;
3303 :
3304 : scopeguard::defer! {
3305 : if let Err(e) = fs::remove_file(&initdb_tar_zst_path) {
3306 : error!("Failed to remove temporary initdb archive '{initdb_tar_zst_path}': {e}");
3307 : }
3308 : }
3309 :
3310 2 : let buf_read =
3311 2 : BufReader::with_capacity(remote_timeline_client::BUFFER_SIZE, initdb_tar_zst);
3312 2 : extract_zst_tarball(&pgdata_path, buf_read)
3313 10149 : .await
3314 2 : .context("extract initdb tar")?;
3315 : } else {
3316 : // Init temporarily repo to get bootstrap data, this creates a directory in the `pgdata_path` path
3317 0 : run_initdb(self.conf, &pgdata_path, pg_version, &self.cancel).await?;
3318 :
3319 : // Upload the created data dir to S3
3320 0 : if self.tenant_shard_id().is_shard_zero() {
3321 0 : self.upload_initdb(&timelines_path, &pgdata_path, &timeline_id)
3322 0 : .await?;
3323 0 : }
3324 : }
3325 2 : let pgdata_lsn = import_datadir::get_lsn_from_controlfile(&pgdata_path)?.align();
3326 2 :
3327 2 : // Import the contents of the data directory at the initial checkpoint
3328 2 : // LSN, and any WAL after that.
3329 2 : // Initdb lsn will be equal to last_record_lsn which will be set after import.
3330 2 : // Because we know it upfront avoid having an option or dummy zero value by passing it to the metadata.
3331 2 : let new_metadata = TimelineMetadata::new(
3332 2 : Lsn(0),
3333 2 : None,
3334 2 : None,
3335 2 : Lsn(0),
3336 2 : pgdata_lsn,
3337 2 : pgdata_lsn,
3338 2 : pg_version,
3339 2 : );
3340 2 : let raw_timeline = self
3341 2 : .prepare_new_timeline(
3342 2 : timeline_id,
3343 2 : &new_metadata,
3344 2 : timeline_create_guard,
3345 2 : pgdata_lsn,
3346 2 : None,
3347 2 : None,
3348 2 : )
3349 0 : .await?;
3350 :
3351 2 : let tenant_shard_id = raw_timeline.owning_tenant.tenant_shard_id;
3352 2 : let unfinished_timeline = raw_timeline.raw_timeline()?;
3353 :
3354 : // Flush the new layer files to disk, before we make the timeline as available to
3355 : // the outside world.
3356 : //
3357 : // Flush loop needs to be spawned in order to be able to flush.
3358 2 : unfinished_timeline.maybe_spawn_flush_loop();
3359 2 :
3360 2 : import_datadir::import_timeline_from_postgres_datadir(
3361 2 : unfinished_timeline,
3362 2 : &pgdata_path,
3363 2 : pgdata_lsn,
3364 2 : ctx,
3365 2 : )
3366 9487 : .await
3367 2 : .with_context(|| {
3368 0 : format!("Failed to import pgdatadir for timeline {tenant_shard_id}/{timeline_id}")
3369 2 : })?;
3370 :
3371 2 : fail::fail_point!("before-checkpoint-new-timeline", |_| {
3372 0 : anyhow::bail!("failpoint before-checkpoint-new-timeline");
3373 2 : });
3374 :
3375 2 : unfinished_timeline
3376 2 : .freeze_and_flush()
3377 2 : .await
3378 2 : .with_context(|| {
3379 0 : format!(
3380 0 : "Failed to flush after pgdatadir import for timeline {tenant_shard_id}/{timeline_id}"
3381 0 : )
3382 2 : })?;
3383 :
3384 : // All done!
3385 2 : let timeline = raw_timeline.finish_creation()?;
3386 :
3387 2 : Ok(timeline)
3388 2 : }
3389 :
3390 : /// Call this before constructing a timeline, to build its required structures
3391 377 : fn build_timeline_resources(&self, timeline_id: TimelineId) -> TimelineResources {
3392 377 : let remote_client = RemoteTimelineClient::new(
3393 377 : self.remote_storage.clone(),
3394 377 : self.deletion_queue_client.clone(),
3395 377 : self.conf,
3396 377 : self.tenant_shard_id,
3397 377 : timeline_id,
3398 377 : self.generation,
3399 377 : );
3400 377 : TimelineResources {
3401 377 : remote_client,
3402 377 : timeline_get_throttle: self.timeline_get_throttle.clone(),
3403 377 : }
3404 377 : }
3405 :
3406 : /// Creates intermediate timeline structure and its files.
3407 : ///
3408 : /// An empty layer map is initialized, and new data and WAL can be imported starting
3409 : /// at 'disk_consistent_lsn'. After any initial data has been imported, call
3410 : /// `finish_creation` to insert the Timeline into the timelines map.
3411 377 : async fn prepare_new_timeline<'a>(
3412 377 : &'a self,
3413 377 : new_timeline_id: TimelineId,
3414 377 : new_metadata: &TimelineMetadata,
3415 377 : create_guard: TimelineCreateGuard<'a>,
3416 377 : start_lsn: Lsn,
3417 377 : ancestor: Option<Arc<Timeline>>,
3418 377 : last_aux_file_policy: Option<AuxFilePolicy>,
3419 377 : ) -> anyhow::Result<UninitializedTimeline> {
3420 377 : let tenant_shard_id = self.tenant_shard_id;
3421 377 :
3422 377 : let resources = self.build_timeline_resources(new_timeline_id);
3423 377 : resources
3424 377 : .remote_client
3425 377 : .init_upload_queue_for_empty_remote(new_metadata)?;
3426 :
3427 377 : let timeline_struct = self
3428 377 : .create_timeline_struct(
3429 377 : new_timeline_id,
3430 377 : new_metadata,
3431 377 : ancestor,
3432 377 : resources,
3433 377 : CreateTimelineCause::Load,
3434 377 : last_aux_file_policy,
3435 377 : )
3436 377 : .context("Failed to create timeline data structure")?;
3437 :
3438 377 : timeline_struct.init_empty_layer_map(start_lsn);
3439 :
3440 377 : if let Err(e) = self
3441 377 : .create_timeline_files(&create_guard.timeline_path)
3442 0 : .await
3443 : {
3444 0 : error!("Failed to create initial files for timeline {tenant_shard_id}/{new_timeline_id}, cleaning up: {e:?}");
3445 0 : cleanup_timeline_directory(create_guard);
3446 0 : return Err(e);
3447 377 : }
3448 377 :
3449 377 : debug!(
3450 0 : "Successfully created initial files for timeline {tenant_shard_id}/{new_timeline_id}"
3451 : );
3452 :
3453 377 : Ok(UninitializedTimeline::new(
3454 377 : self,
3455 377 : new_timeline_id,
3456 377 : Some((timeline_struct, create_guard)),
3457 377 : ))
3458 377 : }
3459 :
3460 377 : async fn create_timeline_files(&self, timeline_path: &Utf8Path) -> anyhow::Result<()> {
3461 377 : crashsafe::create_dir(timeline_path).context("Failed to create timeline directory")?;
3462 :
3463 377 : fail::fail_point!("after-timeline-dir-creation", |_| {
3464 0 : anyhow::bail!("failpoint after-timeline-dir-creation");
3465 377 : });
3466 :
3467 377 : Ok(())
3468 377 : }
3469 :
3470 : /// Get a guard that provides exclusive access to the timeline directory, preventing
3471 : /// concurrent attempts to create the same timeline.
3472 383 : fn create_timeline_create_guard(
3473 383 : &self,
3474 383 : timeline_id: TimelineId,
3475 383 : ) -> Result<TimelineCreateGuard, TimelineExclusionError> {
3476 383 : let tenant_shard_id = self.tenant_shard_id;
3477 383 :
3478 383 : let timeline_path = self.conf.timeline_path(&tenant_shard_id, &timeline_id);
3479 :
3480 383 : let create_guard = TimelineCreateGuard::new(self, timeline_id, timeline_path.clone())?;
3481 :
3482 : // At this stage, we have got exclusive access to in-memory state for this timeline ID
3483 : // for creation.
3484 : // A timeline directory should never exist on disk already:
3485 : // - a previous failed creation would have cleaned up after itself
3486 : // - a pageserver restart would clean up timeline directories that don't have valid remote state
3487 : //
3488 : // Therefore it is an unexpected internal error to encounter a timeline directory already existing here,
3489 : // this error may indicate a bug in cleanup on failed creations.
3490 381 : if timeline_path.exists() {
3491 0 : return Err(TimelineExclusionError::Other(anyhow::anyhow!(
3492 0 : "Timeline directory already exists! This is a bug."
3493 0 : )));
3494 381 : }
3495 381 :
3496 381 : Ok(create_guard)
3497 383 : }
3498 :
3499 : /// Gathers inputs from all of the timelines to produce a sizing model input.
3500 : ///
3501 : /// Future is cancellation safe. Only one calculation can be running at once per tenant.
3502 0 : #[instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug()))]
3503 : pub async fn gather_size_inputs(
3504 : &self,
3505 : // `max_retention_period` overrides the cutoff that is used to calculate the size
3506 : // (only if it is shorter than the real cutoff).
3507 : max_retention_period: Option<u64>,
3508 : cause: LogicalSizeCalculationCause,
3509 : cancel: &CancellationToken,
3510 : ctx: &RequestContext,
3511 : ) -> Result<size::ModelInputs, size::CalculateSyntheticSizeError> {
3512 : let logical_sizes_at_once = self
3513 : .conf
3514 : .concurrent_tenant_size_logical_size_queries
3515 : .inner();
3516 :
3517 : // TODO: Having a single mutex block concurrent reads is not great for performance.
3518 : //
3519 : // But the only case where we need to run multiple of these at once is when we
3520 : // request a size for a tenant manually via API, while another background calculation
3521 : // is in progress (which is not a common case).
3522 : //
3523 : // See more for on the issue #2748 condenced out of the initial PR review.
3524 : let mut shared_cache = tokio::select! {
3525 : locked = self.cached_logical_sizes.lock() => locked,
3526 : _ = cancel.cancelled() => return Err(size::CalculateSyntheticSizeError::Cancelled),
3527 : _ = self.cancel.cancelled() => return Err(size::CalculateSyntheticSizeError::Cancelled),
3528 : };
3529 :
3530 : size::gather_inputs(
3531 : self,
3532 : logical_sizes_at_once,
3533 : max_retention_period,
3534 : &mut shared_cache,
3535 : cause,
3536 : cancel,
3537 : ctx,
3538 : )
3539 : .await
3540 : }
3541 :
3542 : /// Calculate synthetic tenant size and cache the result.
3543 : /// This is periodically called by background worker.
3544 : /// result is cached in tenant struct
3545 0 : #[instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug()))]
3546 : pub async fn calculate_synthetic_size(
3547 : &self,
3548 : cause: LogicalSizeCalculationCause,
3549 : cancel: &CancellationToken,
3550 : ctx: &RequestContext,
3551 : ) -> Result<u64, size::CalculateSyntheticSizeError> {
3552 : let inputs = self.gather_size_inputs(None, cause, cancel, ctx).await?;
3553 :
3554 : let size = inputs.calculate();
3555 :
3556 : self.set_cached_synthetic_size(size);
3557 :
3558 : Ok(size)
3559 : }
3560 :
3561 : /// Cache given synthetic size and update the metric value
3562 0 : pub fn set_cached_synthetic_size(&self, size: u64) {
3563 0 : self.cached_synthetic_tenant_size
3564 0 : .store(size, Ordering::Relaxed);
3565 0 :
3566 0 : // Only shard zero should be calculating synthetic sizes
3567 0 : debug_assert!(self.shard_identity.is_shard_zero());
3568 :
3569 0 : TENANT_SYNTHETIC_SIZE_METRIC
3570 0 : .get_metric_with_label_values(&[&self.tenant_shard_id.tenant_id.to_string()])
3571 0 : .unwrap()
3572 0 : .set(size);
3573 0 : }
3574 :
3575 0 : pub fn cached_synthetic_size(&self) -> u64 {
3576 0 : self.cached_synthetic_tenant_size.load(Ordering::Relaxed)
3577 0 : }
3578 :
3579 : /// Flush any in-progress layers, schedule uploads, and wait for uploads to complete.
3580 : ///
3581 : /// This function can take a long time: callers should wrap it in a timeout if calling
3582 : /// from an external API handler.
3583 : ///
3584 : /// Cancel-safety: cancelling this function may leave I/O running, but such I/O is
3585 : /// still bounded by tenant/timeline shutdown.
3586 0 : #[tracing::instrument(skip_all)]
3587 : pub(crate) async fn flush_remote(&self) -> anyhow::Result<()> {
3588 : let timelines = self.timelines.lock().unwrap().clone();
3589 :
3590 0 : async fn flush_timeline(_gate: GateGuard, timeline: Arc<Timeline>) -> anyhow::Result<()> {
3591 0 : tracing::info!(timeline_id=%timeline.timeline_id, "Flushing...");
3592 0 : timeline.freeze_and_flush().await?;
3593 0 : tracing::info!(timeline_id=%timeline.timeline_id, "Waiting for uploads...");
3594 0 : timeline.remote_client.wait_completion().await?;
3595 :
3596 0 : Ok(())
3597 0 : }
3598 :
3599 : // We do not use a JoinSet for these tasks, because we don't want them to be
3600 : // aborted when this function's future is cancelled: they should stay alive
3601 : // holding their GateGuard until they complete, to ensure their I/Os complete
3602 : // before Timeline shutdown completes.
3603 : let mut results = FuturesUnordered::new();
3604 :
3605 : for (_timeline_id, timeline) in timelines {
3606 : // Run each timeline's flush in a task holding the timeline's gate: this
3607 : // means that if this function's future is cancelled, the Timeline shutdown
3608 : // will still wait for any I/O in here to complete.
3609 : let Ok(gate) = timeline.gate.enter() else {
3610 : continue;
3611 : };
3612 0 : let jh = tokio::task::spawn(async move { flush_timeline(gate, timeline).await });
3613 : results.push(jh);
3614 : }
3615 :
3616 : while let Some(r) = results.next().await {
3617 : if let Err(e) = r {
3618 : if !e.is_cancelled() && !e.is_panic() {
3619 : tracing::error!("unexpected join error: {e:?}");
3620 : }
3621 : }
3622 : }
3623 :
3624 : // The flushes we did above were just writes, but the Tenant might have had
3625 : // pending deletions as well from recent compaction/gc: we want to flush those
3626 : // as well. This requires flushing the global delete queue. This is cheap
3627 : // because it's typically a no-op.
3628 : match self.deletion_queue_client.flush_execute().await {
3629 : Ok(_) => {}
3630 : Err(DeletionQueueError::ShuttingDown) => {}
3631 : }
3632 :
3633 : Ok(())
3634 : }
3635 :
3636 0 : pub(crate) fn get_tenant_conf(&self) -> TenantConfOpt {
3637 0 : self.tenant_conf.load().tenant_conf.clone()
3638 0 : }
3639 : }
3640 :
3641 : /// Create the cluster temporarily in 'initdbpath' directory inside the repository
3642 : /// to get bootstrap data for timeline initialization.
3643 0 : async fn run_initdb(
3644 0 : conf: &'static PageServerConf,
3645 0 : initdb_target_dir: &Utf8Path,
3646 0 : pg_version: u32,
3647 0 : cancel: &CancellationToken,
3648 0 : ) -> Result<(), InitdbError> {
3649 0 : let initdb_bin_path = conf
3650 0 : .pg_bin_dir(pg_version)
3651 0 : .map_err(InitdbError::Other)?
3652 0 : .join("initdb");
3653 0 : let initdb_lib_dir = conf.pg_lib_dir(pg_version).map_err(InitdbError::Other)?;
3654 0 : info!(
3655 0 : "running {} in {}, libdir: {}",
3656 : initdb_bin_path, initdb_target_dir, initdb_lib_dir,
3657 : );
3658 :
3659 0 : let _permit = INIT_DB_SEMAPHORE.acquire().await;
3660 :
3661 0 : let initdb_command = tokio::process::Command::new(&initdb_bin_path)
3662 0 : .args(["-D", initdb_target_dir.as_ref()])
3663 0 : .args(["-U", &conf.superuser])
3664 0 : .args(["-E", "utf8"])
3665 0 : .arg("--no-instructions")
3666 0 : .arg("--no-sync")
3667 0 : .env_clear()
3668 0 : .env("LD_LIBRARY_PATH", &initdb_lib_dir)
3669 0 : .env("DYLD_LIBRARY_PATH", &initdb_lib_dir)
3670 0 : .stdin(std::process::Stdio::null())
3671 0 : // stdout invocation produces the same output every time, we don't need it
3672 0 : .stdout(std::process::Stdio::null())
3673 0 : // we would be interested in the stderr output, if there was any
3674 0 : .stderr(std::process::Stdio::piped())
3675 0 : .spawn()?;
3676 :
3677 : // Ideally we'd select here with the cancellation token, but the problem is that
3678 : // we can't safely terminate initdb: it launches processes of its own, and killing
3679 : // initdb doesn't kill them. After we return from this function, we want the target
3680 : // directory to be able to be cleaned up.
3681 : // See https://github.com/neondatabase/neon/issues/6385
3682 0 : let initdb_output = initdb_command.wait_with_output().await?;
3683 0 : if !initdb_output.status.success() {
3684 0 : return Err(InitdbError::Failed(
3685 0 : initdb_output.status,
3686 0 : initdb_output.stderr,
3687 0 : ));
3688 0 : }
3689 0 :
3690 0 : // This isn't true cancellation support, see above. Still return an error to
3691 0 : // excercise the cancellation code path.
3692 0 : if cancel.is_cancelled() {
3693 0 : return Err(InitdbError::Cancelled);
3694 0 : }
3695 0 :
3696 0 : Ok(())
3697 0 : }
3698 :
3699 : /// Dump contents of a layer file to stdout.
3700 0 : pub async fn dump_layerfile_from_path(
3701 0 : path: &Utf8Path,
3702 0 : verbose: bool,
3703 0 : ctx: &RequestContext,
3704 0 : ) -> anyhow::Result<()> {
3705 : use std::os::unix::fs::FileExt;
3706 :
3707 : // All layer files start with a two-byte "magic" value, to identify the kind of
3708 : // file.
3709 0 : let file = File::open(path)?;
3710 0 : let mut header_buf = [0u8; 2];
3711 0 : file.read_exact_at(&mut header_buf, 0)?;
3712 :
3713 0 : match u16::from_be_bytes(header_buf) {
3714 : crate::IMAGE_FILE_MAGIC => {
3715 0 : ImageLayer::new_for_path(path, file)?
3716 0 : .dump(verbose, ctx)
3717 0 : .await?
3718 : }
3719 : crate::DELTA_FILE_MAGIC => {
3720 0 : DeltaLayer::new_for_path(path, file)?
3721 0 : .dump(verbose, ctx)
3722 0 : .await?
3723 : }
3724 0 : magic => bail!("unrecognized magic identifier: {:?}", magic),
3725 : }
3726 :
3727 0 : Ok(())
3728 0 : }
3729 :
3730 : #[cfg(test)]
3731 : pub(crate) mod harness {
3732 : use bytes::{Bytes, BytesMut};
3733 : use once_cell::sync::OnceCell;
3734 : use pageserver_api::models::ShardParameters;
3735 : use pageserver_api::shard::ShardIndex;
3736 : use utils::logging;
3737 :
3738 : use crate::deletion_queue::mock::MockDeletionQueue;
3739 : use crate::walredo::apply_neon;
3740 : use crate::{repository::Key, walrecord::NeonWalRecord};
3741 :
3742 : use super::*;
3743 : use hex_literal::hex;
3744 : use utils::id::TenantId;
3745 :
3746 : pub const TIMELINE_ID: TimelineId =
3747 : TimelineId::from_array(hex!("11223344556677881122334455667788"));
3748 : pub const NEW_TIMELINE_ID: TimelineId =
3749 : TimelineId::from_array(hex!("AA223344556677881122334455667788"));
3750 :
3751 : /// Convenience function to create a page image with given string as the only content
3752 5028417 : pub fn test_img(s: &str) -> Bytes {
3753 5028417 : let mut buf = BytesMut::new();
3754 5028417 : buf.extend_from_slice(s.as_bytes());
3755 5028417 : buf.resize(64, 0);
3756 5028417 :
3757 5028417 : buf.freeze()
3758 5028417 : }
3759 :
3760 : impl From<TenantConf> for TenantConfOpt {
3761 161 : fn from(tenant_conf: TenantConf) -> Self {
3762 161 : Self {
3763 161 : checkpoint_distance: Some(tenant_conf.checkpoint_distance),
3764 161 : checkpoint_timeout: Some(tenant_conf.checkpoint_timeout),
3765 161 : compaction_target_size: Some(tenant_conf.compaction_target_size),
3766 161 : compaction_period: Some(tenant_conf.compaction_period),
3767 161 : compaction_threshold: Some(tenant_conf.compaction_threshold),
3768 161 : compaction_algorithm: Some(tenant_conf.compaction_algorithm),
3769 161 : gc_horizon: Some(tenant_conf.gc_horizon),
3770 161 : gc_period: Some(tenant_conf.gc_period),
3771 161 : image_creation_threshold: Some(tenant_conf.image_creation_threshold),
3772 161 : pitr_interval: Some(tenant_conf.pitr_interval),
3773 161 : walreceiver_connect_timeout: Some(tenant_conf.walreceiver_connect_timeout),
3774 161 : lagging_wal_timeout: Some(tenant_conf.lagging_wal_timeout),
3775 161 : max_lsn_wal_lag: Some(tenant_conf.max_lsn_wal_lag),
3776 161 : trace_read_requests: Some(tenant_conf.trace_read_requests),
3777 161 : eviction_policy: Some(tenant_conf.eviction_policy),
3778 161 : min_resident_size_override: tenant_conf.min_resident_size_override,
3779 161 : evictions_low_residence_duration_metric_threshold: Some(
3780 161 : tenant_conf.evictions_low_residence_duration_metric_threshold,
3781 161 : ),
3782 161 : heatmap_period: Some(tenant_conf.heatmap_period),
3783 161 : lazy_slru_download: Some(tenant_conf.lazy_slru_download),
3784 161 : timeline_get_throttle: Some(tenant_conf.timeline_get_throttle),
3785 161 : image_layer_creation_check_threshold: Some(
3786 161 : tenant_conf.image_layer_creation_check_threshold,
3787 161 : ),
3788 161 : switch_aux_file_policy: Some(tenant_conf.switch_aux_file_policy),
3789 161 : lsn_lease_length: Some(tenant_conf.lsn_lease_length),
3790 161 : lsn_lease_length_for_ts: Some(tenant_conf.lsn_lease_length_for_ts),
3791 161 : }
3792 161 : }
3793 : }
3794 :
3795 : pub struct TenantHarness {
3796 : pub conf: &'static PageServerConf,
3797 : pub tenant_conf: TenantConf,
3798 : pub tenant_shard_id: TenantShardId,
3799 : pub generation: Generation,
3800 : pub shard: ShardIndex,
3801 : pub remote_storage: GenericRemoteStorage,
3802 : pub remote_fs_dir: Utf8PathBuf,
3803 : pub deletion_queue: MockDeletionQueue,
3804 : }
3805 :
3806 : static LOG_HANDLE: OnceCell<()> = OnceCell::new();
3807 :
3808 167 : pub(crate) fn setup_logging() {
3809 167 : LOG_HANDLE.get_or_init(|| {
3810 155 : logging::init(
3811 155 : logging::LogFormat::Test,
3812 155 : // enable it in case the tests exercise code paths that use
3813 155 : // debug_assert_current_span_has_tenant_and_timeline_id
3814 155 : logging::TracingErrorLayerEnablement::EnableWithRustLogFilter,
3815 155 : logging::Output::Stdout,
3816 155 : )
3817 155 : .expect("Failed to init test logging")
3818 167 : });
3819 167 : }
3820 :
3821 : impl TenantHarness {
3822 161 : pub fn create_custom(
3823 161 : test_name: &'static str,
3824 161 : tenant_conf: TenantConf,
3825 161 : tenant_id: TenantId,
3826 161 : shard_identity: ShardIdentity,
3827 161 : generation: Generation,
3828 161 : ) -> anyhow::Result<Self> {
3829 161 : setup_logging();
3830 161 :
3831 161 : let repo_dir = PageServerConf::test_repo_dir(test_name);
3832 161 : let _ = fs::remove_dir_all(&repo_dir);
3833 161 : fs::create_dir_all(&repo_dir)?;
3834 :
3835 161 : let conf = PageServerConf::dummy_conf(repo_dir);
3836 161 : // Make a static copy of the config. This can never be free'd, but that's
3837 161 : // OK in a test.
3838 161 : let conf: &'static PageServerConf = Box::leak(Box::new(conf));
3839 161 :
3840 161 : let shard = shard_identity.shard_index();
3841 161 : let tenant_shard_id = TenantShardId {
3842 161 : tenant_id,
3843 161 : shard_number: shard.shard_number,
3844 161 : shard_count: shard.shard_count,
3845 161 : };
3846 161 : fs::create_dir_all(conf.tenant_path(&tenant_shard_id))?;
3847 161 : fs::create_dir_all(conf.timelines_path(&tenant_shard_id))?;
3848 :
3849 : use remote_storage::{RemoteStorageConfig, RemoteStorageKind};
3850 161 : let remote_fs_dir = conf.workdir.join("localfs");
3851 161 : std::fs::create_dir_all(&remote_fs_dir).unwrap();
3852 161 : let config = RemoteStorageConfig {
3853 161 : storage: RemoteStorageKind::LocalFs {
3854 161 : local_path: remote_fs_dir.clone(),
3855 161 : },
3856 161 : timeout: RemoteStorageConfig::DEFAULT_TIMEOUT,
3857 161 : };
3858 161 : let remote_storage = GenericRemoteStorage::from_config(&config).unwrap();
3859 161 : let deletion_queue = MockDeletionQueue::new(Some(remote_storage.clone()));
3860 161 :
3861 161 : Ok(Self {
3862 161 : conf,
3863 161 : tenant_conf,
3864 161 : tenant_shard_id,
3865 161 : generation,
3866 161 : shard,
3867 161 : remote_storage,
3868 161 : remote_fs_dir,
3869 161 : deletion_queue,
3870 161 : })
3871 161 : }
3872 :
3873 149 : pub fn create(test_name: &'static str) -> anyhow::Result<Self> {
3874 149 : // Disable automatic GC and compaction to make the unit tests more deterministic.
3875 149 : // The tests perform them manually if needed.
3876 149 : let tenant_conf = TenantConf {
3877 149 : gc_period: Duration::ZERO,
3878 149 : compaction_period: Duration::ZERO,
3879 149 : ..TenantConf::default()
3880 149 : };
3881 149 : let tenant_id = TenantId::generate();
3882 149 : let shard = ShardIdentity::unsharded();
3883 149 : Self::create_custom(
3884 149 : test_name,
3885 149 : tenant_conf,
3886 149 : tenant_id,
3887 149 : shard,
3888 149 : Generation::new(0xdeadbeef),
3889 149 : )
3890 149 : }
3891 :
3892 18 : pub fn span(&self) -> tracing::Span {
3893 18 : info_span!("TenantHarness", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug())
3894 18 : }
3895 :
3896 161 : pub(crate) async fn load(&self) -> (Arc<Tenant>, RequestContext) {
3897 161 : let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error);
3898 161 : (
3899 161 : self.do_try_load(&ctx)
3900 634 : .await
3901 161 : .expect("failed to load test tenant"),
3902 161 : ctx,
3903 161 : )
3904 161 : }
3905 :
3906 322 : #[instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug()))]
3907 : pub(crate) async fn do_try_load(
3908 : &self,
3909 : ctx: &RequestContext,
3910 : ) -> anyhow::Result<Arc<Tenant>> {
3911 : let walredo_mgr = Arc::new(WalRedoManager::from(TestRedoManager));
3912 :
3913 : let tenant = Arc::new(Tenant::new(
3914 : TenantState::Loading,
3915 : self.conf,
3916 : AttachedTenantConf::try_from(LocationConf::attached_single(
3917 : TenantConfOpt::from(self.tenant_conf.clone()),
3918 : self.generation,
3919 : &ShardParameters::default(),
3920 : ))
3921 : .unwrap(),
3922 : // This is a legacy/test code path: sharding isn't supported here.
3923 : ShardIdentity::unsharded(),
3924 : Some(walredo_mgr),
3925 : self.tenant_shard_id,
3926 : self.remote_storage.clone(),
3927 : self.deletion_queue.new_client(),
3928 : ));
3929 :
3930 : let preload = tenant
3931 : .preload(&self.remote_storage, CancellationToken::new())
3932 : .await?;
3933 : tenant.attach(Some(preload), SpawnMode::Eager, ctx).await?;
3934 :
3935 : tenant.state.send_replace(TenantState::Active);
3936 : for timeline in tenant.timelines.lock().unwrap().values() {
3937 : timeline.set_state(TimelineState::Active);
3938 : }
3939 : Ok(tenant)
3940 : }
3941 :
3942 2 : pub fn timeline_path(&self, timeline_id: &TimelineId) -> Utf8PathBuf {
3943 2 : self.conf.timeline_path(&self.tenant_shard_id, timeline_id)
3944 2 : }
3945 : }
3946 :
3947 : // Mock WAL redo manager that doesn't do much
3948 : pub(crate) struct TestRedoManager;
3949 :
3950 : impl TestRedoManager {
3951 : /// # Cancel-Safety
3952 : ///
3953 : /// This method is cancellation-safe.
3954 54 : pub async fn request_redo(
3955 54 : &self,
3956 54 : key: Key,
3957 54 : lsn: Lsn,
3958 54 : base_img: Option<(Lsn, Bytes)>,
3959 54 : records: Vec<(Lsn, NeonWalRecord)>,
3960 54 : _pg_version: u32,
3961 54 : ) -> anyhow::Result<Bytes> {
3962 64 : let records_neon = records.iter().all(|r| apply_neon::can_apply_in_neon(&r.1));
3963 54 : if records_neon {
3964 : // For Neon wal records, we can decode without spawning postgres, so do so.
3965 54 : let base_img = base_img.expect("Neon WAL redo requires base image").1;
3966 54 : let mut page = BytesMut::new();
3967 54 : page.extend_from_slice(&base_img);
3968 118 : for (record_lsn, record) in records {
3969 64 : apply_neon::apply_in_neon(&record, record_lsn, key, &mut page)?;
3970 : }
3971 54 : Ok(page.freeze())
3972 : } else {
3973 : // We never spawn a postgres walredo process in unit tests: just log what we might have done.
3974 0 : let s = format!(
3975 0 : "redo for {} to get to {}, with {} and {} records",
3976 0 : key,
3977 0 : lsn,
3978 0 : if base_img.is_some() {
3979 0 : "base image"
3980 : } else {
3981 0 : "no base image"
3982 : },
3983 0 : records.len()
3984 0 : );
3985 0 : println!("{s}");
3986 0 :
3987 0 : Ok(test_img(&s))
3988 : }
3989 54 : }
3990 : }
3991 : }
3992 :
3993 : #[cfg(test)]
3994 : mod tests {
3995 : use std::collections::BTreeMap;
3996 :
3997 : use super::*;
3998 : use crate::keyspace::KeySpaceAccum;
3999 : use crate::pgdatadir_mapping::AuxFilesDirectory;
4000 : use crate::repository::{Key, Value};
4001 : use crate::tenant::harness::*;
4002 : use crate::tenant::timeline::CompactFlags;
4003 : use crate::walrecord::NeonWalRecord;
4004 : use crate::DEFAULT_PG_VERSION;
4005 : use bytes::{Bytes, BytesMut};
4006 : use hex_literal::hex;
4007 : use itertools::Itertools;
4008 : use pageserver_api::key::{AUX_FILES_KEY, AUX_KEY_PREFIX, NON_INHERITED_RANGE};
4009 : use pageserver_api::keyspace::KeySpace;
4010 : use pageserver_api::models::{CompactionAlgorithm, CompactionAlgorithmSettings};
4011 : use rand::{thread_rng, Rng};
4012 : use storage_layer::PersistentLayerKey;
4013 : use tests::storage_layer::ValuesReconstructState;
4014 : use tests::timeline::{GetVectoredError, ShutdownMode};
4015 : use timeline::GcInfo;
4016 : use utils::bin_ser::BeSer;
4017 : use utils::id::TenantId;
4018 :
4019 : static TEST_KEY: Lazy<Key> =
4020 18 : Lazy::new(|| Key::from_slice(&hex!("010000000033333333444444445500000001")));
4021 :
4022 : #[tokio::test]
4023 2 : async fn test_basic() -> anyhow::Result<()> {
4024 8 : let (tenant, ctx) = TenantHarness::create("test_basic")?.load().await;
4025 2 : let tline = tenant
4026 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx)
4027 6 : .await?;
4028 2 :
4029 2 : let mut writer = tline.writer().await;
4030 2 : writer
4031 2 : .put(
4032 2 : *TEST_KEY,
4033 2 : Lsn(0x10),
4034 2 : &Value::Image(test_img("foo at 0x10")),
4035 2 : &ctx,
4036 2 : )
4037 2 : .await?;
4038 2 : writer.finish_write(Lsn(0x10));
4039 2 : drop(writer);
4040 2 :
4041 2 : let mut writer = tline.writer().await;
4042 2 : writer
4043 2 : .put(
4044 2 : *TEST_KEY,
4045 2 : Lsn(0x20),
4046 2 : &Value::Image(test_img("foo at 0x20")),
4047 2 : &ctx,
4048 2 : )
4049 2 : .await?;
4050 2 : writer.finish_write(Lsn(0x20));
4051 2 : drop(writer);
4052 2 :
4053 2 : assert_eq!(
4054 2 : tline.get(*TEST_KEY, Lsn(0x10), &ctx).await?,
4055 2 : test_img("foo at 0x10")
4056 2 : );
4057 2 : assert_eq!(
4058 2 : tline.get(*TEST_KEY, Lsn(0x1f), &ctx).await?,
4059 2 : test_img("foo at 0x10")
4060 2 : );
4061 2 : assert_eq!(
4062 2 : tline.get(*TEST_KEY, Lsn(0x20), &ctx).await?,
4063 2 : test_img("foo at 0x20")
4064 2 : );
4065 2 :
4066 2 : Ok(())
4067 2 : }
4068 :
4069 : #[tokio::test]
4070 2 : async fn no_duplicate_timelines() -> anyhow::Result<()> {
4071 2 : let (tenant, ctx) = TenantHarness::create("no_duplicate_timelines")?
4072 2 : .load()
4073 8 : .await;
4074 2 : let _ = tenant
4075 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4076 6 : .await?;
4077 2 :
4078 2 : match tenant
4079 2 : .create_empty_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4080 2 : .await
4081 2 : {
4082 2 : Ok(_) => panic!("duplicate timeline creation should fail"),
4083 2 : Err(e) => assert_eq!(e.to_string(), "Already exists".to_string()),
4084 2 : }
4085 2 :
4086 2 : Ok(())
4087 2 : }
4088 :
4089 : /// Convenience function to create a page image with given string as the only content
4090 10 : pub fn test_value(s: &str) -> Value {
4091 10 : let mut buf = BytesMut::new();
4092 10 : buf.extend_from_slice(s.as_bytes());
4093 10 : Value::Image(buf.freeze())
4094 10 : }
4095 :
4096 : ///
4097 : /// Test branch creation
4098 : ///
4099 : #[tokio::test]
4100 2 : async fn test_branch() -> anyhow::Result<()> {
4101 2 : use std::str::from_utf8;
4102 2 :
4103 8 : let (tenant, ctx) = TenantHarness::create("test_branch")?.load().await;
4104 2 : let tline = tenant
4105 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4106 5 : .await?;
4107 2 : let mut writer = tline.writer().await;
4108 2 :
4109 2 : #[allow(non_snake_case)]
4110 2 : let TEST_KEY_A: Key = Key::from_hex("110000000033333333444444445500000001").unwrap();
4111 2 : #[allow(non_snake_case)]
4112 2 : let TEST_KEY_B: Key = Key::from_hex("110000000033333333444444445500000002").unwrap();
4113 2 :
4114 2 : // Insert a value on the timeline
4115 2 : writer
4116 2 : .put(TEST_KEY_A, Lsn(0x20), &test_value("foo at 0x20"), &ctx)
4117 2 : .await?;
4118 2 : writer
4119 2 : .put(TEST_KEY_B, Lsn(0x20), &test_value("foobar at 0x20"), &ctx)
4120 2 : .await?;
4121 2 : writer.finish_write(Lsn(0x20));
4122 2 :
4123 2 : writer
4124 2 : .put(TEST_KEY_A, Lsn(0x30), &test_value("foo at 0x30"), &ctx)
4125 2 : .await?;
4126 2 : writer.finish_write(Lsn(0x30));
4127 2 : writer
4128 2 : .put(TEST_KEY_A, Lsn(0x40), &test_value("foo at 0x40"), &ctx)
4129 2 : .await?;
4130 2 : writer.finish_write(Lsn(0x40));
4131 2 :
4132 2 : //assert_current_logical_size(&tline, Lsn(0x40));
4133 2 :
4134 2 : // Branch the history, modify relation differently on the new timeline
4135 2 : tenant
4136 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x30)), &ctx)
4137 2 : .await?;
4138 2 : let newtline = tenant
4139 2 : .get_timeline(NEW_TIMELINE_ID, true)
4140 2 : .expect("Should have a local timeline");
4141 2 : let mut new_writer = newtline.writer().await;
4142 2 : new_writer
4143 2 : .put(TEST_KEY_A, Lsn(0x40), &test_value("bar at 0x40"), &ctx)
4144 2 : .await?;
4145 2 : new_writer.finish_write(Lsn(0x40));
4146 2 :
4147 2 : // Check page contents on both branches
4148 2 : assert_eq!(
4149 2 : from_utf8(&tline.get(TEST_KEY_A, Lsn(0x40), &ctx).await?)?,
4150 2 : "foo at 0x40"
4151 2 : );
4152 2 : assert_eq!(
4153 2 : from_utf8(&newtline.get(TEST_KEY_A, Lsn(0x40), &ctx).await?)?,
4154 2 : "bar at 0x40"
4155 2 : );
4156 2 : assert_eq!(
4157 2 : from_utf8(&newtline.get(TEST_KEY_B, Lsn(0x40), &ctx).await?)?,
4158 2 : "foobar at 0x20"
4159 2 : );
4160 2 :
4161 2 : //assert_current_logical_size(&tline, Lsn(0x40));
4162 2 :
4163 2 : Ok(())
4164 2 : }
4165 :
4166 20 : async fn make_some_layers(
4167 20 : tline: &Timeline,
4168 20 : start_lsn: Lsn,
4169 20 : ctx: &RequestContext,
4170 20 : ) -> anyhow::Result<()> {
4171 20 : let mut lsn = start_lsn;
4172 : {
4173 20 : let mut writer = tline.writer().await;
4174 : // Create a relation on the timeline
4175 20 : writer
4176 20 : .put(
4177 20 : *TEST_KEY,
4178 20 : lsn,
4179 20 : &Value::Image(test_img(&format!("foo at {}", lsn))),
4180 20 : ctx,
4181 20 : )
4182 10 : .await?;
4183 20 : writer.finish_write(lsn);
4184 20 : lsn += 0x10;
4185 20 : writer
4186 20 : .put(
4187 20 : *TEST_KEY,
4188 20 : lsn,
4189 20 : &Value::Image(test_img(&format!("foo at {}", lsn))),
4190 20 : ctx,
4191 20 : )
4192 0 : .await?;
4193 20 : writer.finish_write(lsn);
4194 20 : lsn += 0x10;
4195 20 : }
4196 20 : tline.freeze_and_flush().await?;
4197 : {
4198 20 : let mut writer = tline.writer().await;
4199 20 : writer
4200 20 : .put(
4201 20 : *TEST_KEY,
4202 20 : lsn,
4203 20 : &Value::Image(test_img(&format!("foo at {}", lsn))),
4204 20 : ctx,
4205 20 : )
4206 10 : .await?;
4207 20 : writer.finish_write(lsn);
4208 20 : lsn += 0x10;
4209 20 : writer
4210 20 : .put(
4211 20 : *TEST_KEY,
4212 20 : lsn,
4213 20 : &Value::Image(test_img(&format!("foo at {}", lsn))),
4214 20 : ctx,
4215 20 : )
4216 0 : .await?;
4217 20 : writer.finish_write(lsn);
4218 20 : }
4219 20 : tline.freeze_and_flush().await.map_err(|e| e.into())
4220 20 : }
4221 :
4222 : #[tokio::test]
4223 2 : async fn test_prohibit_branch_creation_on_garbage_collected_data() -> anyhow::Result<()> {
4224 2 : let (tenant, ctx) =
4225 2 : TenantHarness::create("test_prohibit_branch_creation_on_garbage_collected_data")?
4226 2 : .load()
4227 8 : .await;
4228 2 : let tline = tenant
4229 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4230 6 : .await?;
4231 6 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
4232 2 :
4233 2 : // this removes layers before lsn 40 (50 minus 10), so there are two remaining layers, image and delta for 31-50
4234 2 : // FIXME: this doesn't actually remove any layer currently, given how the flushing
4235 2 : // and compaction works. But it does set the 'cutoff' point so that the cross check
4236 2 : // below should fail.
4237 2 : tenant
4238 2 : .gc_iteration(
4239 2 : Some(TIMELINE_ID),
4240 2 : 0x10,
4241 2 : Duration::ZERO,
4242 2 : &CancellationToken::new(),
4243 2 : &ctx,
4244 2 : )
4245 2 : .await?;
4246 2 :
4247 2 : // try to branch at lsn 25, should fail because we already garbage collected the data
4248 2 : match tenant
4249 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x25)), &ctx)
4250 2 : .await
4251 2 : {
4252 2 : Ok(_) => panic!("branching should have failed"),
4253 2 : Err(err) => {
4254 2 : let CreateTimelineError::AncestorLsn(err) = err else {
4255 2 : panic!("wrong error type")
4256 2 : };
4257 2 : assert!(err.to_string().contains("invalid branch start lsn"));
4258 2 : assert!(err
4259 2 : .source()
4260 2 : .unwrap()
4261 2 : .to_string()
4262 2 : .contains("we might've already garbage collected needed data"))
4263 2 : }
4264 2 : }
4265 2 :
4266 2 : Ok(())
4267 2 : }
4268 :
4269 : #[tokio::test]
4270 2 : async fn test_prohibit_branch_creation_on_pre_initdb_lsn() -> anyhow::Result<()> {
4271 2 : let (tenant, ctx) =
4272 2 : TenantHarness::create("test_prohibit_branch_creation_on_pre_initdb_lsn")?
4273 2 : .load()
4274 8 : .await;
4275 2 :
4276 2 : let tline = tenant
4277 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x50), DEFAULT_PG_VERSION, &ctx)
4278 6 : .await?;
4279 2 : // try to branch at lsn 0x25, should fail because initdb lsn is 0x50
4280 2 : match tenant
4281 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x25)), &ctx)
4282 2 : .await
4283 2 : {
4284 2 : Ok(_) => panic!("branching should have failed"),
4285 2 : Err(err) => {
4286 2 : let CreateTimelineError::AncestorLsn(err) = err else {
4287 2 : panic!("wrong error type");
4288 2 : };
4289 2 : assert!(&err.to_string().contains("invalid branch start lsn"));
4290 2 : assert!(&err
4291 2 : .source()
4292 2 : .unwrap()
4293 2 : .to_string()
4294 2 : .contains("is earlier than latest GC horizon"));
4295 2 : }
4296 2 : }
4297 2 :
4298 2 : Ok(())
4299 2 : }
4300 :
4301 : /*
4302 : // FIXME: This currently fails to error out. Calling GC doesn't currently
4303 : // remove the old value, we'd need to work a little harder
4304 : #[tokio::test]
4305 : async fn test_prohibit_get_for_garbage_collected_data() -> anyhow::Result<()> {
4306 : let repo =
4307 : RepoHarness::create("test_prohibit_get_for_garbage_collected_data")?
4308 : .load();
4309 :
4310 : let tline = repo.create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION)?;
4311 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
4312 :
4313 : repo.gc_iteration(Some(TIMELINE_ID), 0x10, Duration::ZERO)?;
4314 : let latest_gc_cutoff_lsn = tline.get_latest_gc_cutoff_lsn();
4315 : assert!(*latest_gc_cutoff_lsn > Lsn(0x25));
4316 : match tline.get(*TEST_KEY, Lsn(0x25)) {
4317 : Ok(_) => panic!("request for page should have failed"),
4318 : Err(err) => assert!(err.to_string().contains("not found at")),
4319 : }
4320 : Ok(())
4321 : }
4322 : */
4323 :
4324 : #[tokio::test]
4325 2 : async fn test_get_branchpoints_from_an_inactive_timeline() -> anyhow::Result<()> {
4326 2 : let (tenant, ctx) =
4327 2 : TenantHarness::create("test_get_branchpoints_from_an_inactive_timeline")?
4328 2 : .load()
4329 8 : .await;
4330 2 : let tline = tenant
4331 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4332 6 : .await?;
4333 6 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
4334 2 :
4335 2 : tenant
4336 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x40)), &ctx)
4337 2 : .await?;
4338 2 : let newtline = tenant
4339 2 : .get_timeline(NEW_TIMELINE_ID, true)
4340 2 : .expect("Should have a local timeline");
4341 2 :
4342 6 : make_some_layers(newtline.as_ref(), Lsn(0x60), &ctx).await?;
4343 2 :
4344 2 : tline.set_broken("test".to_owned());
4345 2 :
4346 2 : tenant
4347 2 : .gc_iteration(
4348 2 : Some(TIMELINE_ID),
4349 2 : 0x10,
4350 2 : Duration::ZERO,
4351 2 : &CancellationToken::new(),
4352 2 : &ctx,
4353 2 : )
4354 2 : .await?;
4355 2 :
4356 2 : // The branchpoints should contain all timelines, even ones marked
4357 2 : // as Broken.
4358 2 : {
4359 2 : let branchpoints = &tline.gc_info.read().unwrap().retain_lsns;
4360 2 : assert_eq!(branchpoints.len(), 1);
4361 2 : assert_eq!(branchpoints[0], Lsn(0x40));
4362 2 : }
4363 2 :
4364 2 : // You can read the key from the child branch even though the parent is
4365 2 : // Broken, as long as you don't need to access data from the parent.
4366 2 : assert_eq!(
4367 4 : newtline.get(*TEST_KEY, Lsn(0x70), &ctx).await?,
4368 2 : test_img(&format!("foo at {}", Lsn(0x70)))
4369 2 : );
4370 2 :
4371 2 : // This needs to traverse to the parent, and fails.
4372 2 : let err = newtline.get(*TEST_KEY, Lsn(0x50), &ctx).await.unwrap_err();
4373 2 : assert!(err.to_string().starts_with(&format!(
4374 2 : "Bad state on timeline {}: Broken",
4375 2 : tline.timeline_id
4376 2 : )));
4377 2 :
4378 2 : Ok(())
4379 2 : }
4380 :
4381 : #[tokio::test]
4382 2 : async fn test_retain_data_in_parent_which_is_needed_for_child() -> anyhow::Result<()> {
4383 2 : let (tenant, ctx) =
4384 2 : TenantHarness::create("test_retain_data_in_parent_which_is_needed_for_child")?
4385 2 : .load()
4386 7 : .await;
4387 2 : let tline = tenant
4388 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4389 6 : .await?;
4390 6 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
4391 2 :
4392 2 : tenant
4393 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x40)), &ctx)
4394 2 : .await?;
4395 2 : let newtline = tenant
4396 2 : .get_timeline(NEW_TIMELINE_ID, true)
4397 2 : .expect("Should have a local timeline");
4398 2 : // this removes layers before lsn 40 (50 minus 10), so there are two remaining layers, image and delta for 31-50
4399 2 : tenant
4400 2 : .gc_iteration(
4401 2 : Some(TIMELINE_ID),
4402 2 : 0x10,
4403 2 : Duration::ZERO,
4404 2 : &CancellationToken::new(),
4405 2 : &ctx,
4406 2 : )
4407 2 : .await?;
4408 4 : assert!(newtline.get(*TEST_KEY, Lsn(0x25), &ctx).await.is_ok());
4409 2 :
4410 2 : Ok(())
4411 2 : }
4412 : #[tokio::test]
4413 2 : async fn test_parent_keeps_data_forever_after_branching() -> anyhow::Result<()> {
4414 2 : let (tenant, ctx) =
4415 2 : TenantHarness::create("test_parent_keeps_data_forever_after_branching")?
4416 2 : .load()
4417 8 : .await;
4418 2 : let tline = tenant
4419 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4420 6 : .await?;
4421 6 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
4422 2 :
4423 2 : tenant
4424 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x40)), &ctx)
4425 2 : .await?;
4426 2 : let newtline = tenant
4427 2 : .get_timeline(NEW_TIMELINE_ID, true)
4428 2 : .expect("Should have a local timeline");
4429 2 :
4430 6 : make_some_layers(newtline.as_ref(), Lsn(0x60), &ctx).await?;
4431 2 :
4432 2 : // run gc on parent
4433 2 : tenant
4434 2 : .gc_iteration(
4435 2 : Some(TIMELINE_ID),
4436 2 : 0x10,
4437 2 : Duration::ZERO,
4438 2 : &CancellationToken::new(),
4439 2 : &ctx,
4440 2 : )
4441 2 : .await?;
4442 2 :
4443 2 : // Check that the data is still accessible on the branch.
4444 2 : assert_eq!(
4445 7 : newtline.get(*TEST_KEY, Lsn(0x50), &ctx).await?,
4446 2 : test_img(&format!("foo at {}", Lsn(0x40)))
4447 2 : );
4448 2 :
4449 2 : Ok(())
4450 2 : }
4451 :
4452 : #[tokio::test]
4453 2 : async fn timeline_load() -> anyhow::Result<()> {
4454 2 : const TEST_NAME: &str = "timeline_load";
4455 2 : let harness = TenantHarness::create(TEST_NAME)?;
4456 2 : {
4457 8 : let (tenant, ctx) = harness.load().await;
4458 2 : let tline = tenant
4459 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x7000), DEFAULT_PG_VERSION, &ctx)
4460 6 : .await?;
4461 6 : make_some_layers(tline.as_ref(), Lsn(0x8000), &ctx).await?;
4462 2 : // so that all uploads finish & we can call harness.load() below again
4463 2 : tenant
4464 2 : .shutdown(Default::default(), ShutdownMode::FreezeAndFlush)
4465 2 : .instrument(harness.span())
4466 2 : .await
4467 2 : .ok()
4468 2 : .unwrap();
4469 2 : }
4470 2 :
4471 7 : let (tenant, _ctx) = harness.load().await;
4472 2 : tenant
4473 2 : .get_timeline(TIMELINE_ID, true)
4474 2 : .expect("cannot load timeline");
4475 2 :
4476 2 : Ok(())
4477 2 : }
4478 :
4479 : #[tokio::test]
4480 2 : async fn timeline_load_with_ancestor() -> anyhow::Result<()> {
4481 2 : const TEST_NAME: &str = "timeline_load_with_ancestor";
4482 2 : let harness = TenantHarness::create(TEST_NAME)?;
4483 2 : // create two timelines
4484 2 : {
4485 8 : let (tenant, ctx) = harness.load().await;
4486 2 : let tline = tenant
4487 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4488 6 : .await?;
4489 2 :
4490 6 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
4491 2 :
4492 2 : let child_tline = tenant
4493 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x40)), &ctx)
4494 2 : .await?;
4495 2 : child_tline.set_state(TimelineState::Active);
4496 2 :
4497 2 : let newtline = tenant
4498 2 : .get_timeline(NEW_TIMELINE_ID, true)
4499 2 : .expect("Should have a local timeline");
4500 2 :
4501 6 : make_some_layers(newtline.as_ref(), Lsn(0x60), &ctx).await?;
4502 2 :
4503 2 : // so that all uploads finish & we can call harness.load() below again
4504 2 : tenant
4505 2 : .shutdown(Default::default(), ShutdownMode::FreezeAndFlush)
4506 2 : .instrument(harness.span())
4507 4 : .await
4508 2 : .ok()
4509 2 : .unwrap();
4510 2 : }
4511 2 :
4512 2 : // check that both of them are initially unloaded
4513 14 : let (tenant, _ctx) = harness.load().await;
4514 2 :
4515 2 : // check that both, child and ancestor are loaded
4516 2 : let _child_tline = tenant
4517 2 : .get_timeline(NEW_TIMELINE_ID, true)
4518 2 : .expect("cannot get child timeline loaded");
4519 2 :
4520 2 : let _ancestor_tline = tenant
4521 2 : .get_timeline(TIMELINE_ID, true)
4522 2 : .expect("cannot get ancestor timeline loaded");
4523 2 :
4524 2 : Ok(())
4525 2 : }
4526 :
4527 : #[tokio::test]
4528 2 : async fn delta_layer_dumping() -> anyhow::Result<()> {
4529 2 : use storage_layer::AsLayerDesc;
4530 8 : let (tenant, ctx) = TenantHarness::create("test_layer_dumping")?.load().await;
4531 2 : let tline = tenant
4532 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4533 6 : .await?;
4534 6 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
4535 2 :
4536 2 : let layer_map = tline.layers.read().await;
4537 2 : let level0_deltas = layer_map
4538 2 : .layer_map()
4539 2 : .get_level0_deltas()?
4540 2 : .into_iter()
4541 4 : .map(|desc| layer_map.get_from_desc(&desc))
4542 2 : .collect::<Vec<_>>();
4543 2 :
4544 2 : assert!(!level0_deltas.is_empty());
4545 2 :
4546 6 : for delta in level0_deltas {
4547 2 : // Ensure we are dumping a delta layer here
4548 4 : assert!(delta.layer_desc().is_delta);
4549 8 : delta.dump(true, &ctx).await.unwrap();
4550 2 : }
4551 2 :
4552 2 : Ok(())
4553 2 : }
4554 :
4555 : #[tokio::test]
4556 2 : async fn test_images() -> anyhow::Result<()> {
4557 8 : let (tenant, ctx) = TenantHarness::create("test_images")?.load().await;
4558 2 : let tline = tenant
4559 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx)
4560 6 : .await?;
4561 2 :
4562 2 : let mut writer = tline.writer().await;
4563 2 : writer
4564 2 : .put(
4565 2 : *TEST_KEY,
4566 2 : Lsn(0x10),
4567 2 : &Value::Image(test_img("foo at 0x10")),
4568 2 : &ctx,
4569 2 : )
4570 2 : .await?;
4571 2 : writer.finish_write(Lsn(0x10));
4572 2 : drop(writer);
4573 2 :
4574 2 : tline.freeze_and_flush().await?;
4575 2 : tline
4576 2 : .compact(&CancellationToken::new(), EnumSet::empty(), &ctx)
4577 2 : .await?;
4578 2 :
4579 2 : let mut writer = tline.writer().await;
4580 2 : writer
4581 2 : .put(
4582 2 : *TEST_KEY,
4583 2 : Lsn(0x20),
4584 2 : &Value::Image(test_img("foo at 0x20")),
4585 2 : &ctx,
4586 2 : )
4587 2 : .await?;
4588 2 : writer.finish_write(Lsn(0x20));
4589 2 : drop(writer);
4590 2 :
4591 2 : tline.freeze_and_flush().await?;
4592 2 : tline
4593 2 : .compact(&CancellationToken::new(), EnumSet::empty(), &ctx)
4594 2 : .await?;
4595 2 :
4596 2 : let mut writer = tline.writer().await;
4597 2 : writer
4598 2 : .put(
4599 2 : *TEST_KEY,
4600 2 : Lsn(0x30),
4601 2 : &Value::Image(test_img("foo at 0x30")),
4602 2 : &ctx,
4603 2 : )
4604 2 : .await?;
4605 2 : writer.finish_write(Lsn(0x30));
4606 2 : drop(writer);
4607 2 :
4608 2 : tline.freeze_and_flush().await?;
4609 2 : tline
4610 2 : .compact(&CancellationToken::new(), EnumSet::empty(), &ctx)
4611 2 : .await?;
4612 2 :
4613 2 : let mut writer = tline.writer().await;
4614 2 : writer
4615 2 : .put(
4616 2 : *TEST_KEY,
4617 2 : Lsn(0x40),
4618 2 : &Value::Image(test_img("foo at 0x40")),
4619 2 : &ctx,
4620 2 : )
4621 2 : .await?;
4622 2 : writer.finish_write(Lsn(0x40));
4623 2 : drop(writer);
4624 2 :
4625 2 : tline.freeze_and_flush().await?;
4626 2 : tline
4627 2 : .compact(&CancellationToken::new(), EnumSet::empty(), &ctx)
4628 2 : .await?;
4629 2 :
4630 2 : assert_eq!(
4631 4 : tline.get(*TEST_KEY, Lsn(0x10), &ctx).await?,
4632 2 : test_img("foo at 0x10")
4633 2 : );
4634 2 : assert_eq!(
4635 3 : tline.get(*TEST_KEY, Lsn(0x1f), &ctx).await?,
4636 2 : test_img("foo at 0x10")
4637 2 : );
4638 2 : assert_eq!(
4639 2 : tline.get(*TEST_KEY, Lsn(0x20), &ctx).await?,
4640 2 : test_img("foo at 0x20")
4641 2 : );
4642 2 : assert_eq!(
4643 4 : tline.get(*TEST_KEY, Lsn(0x30), &ctx).await?,
4644 2 : test_img("foo at 0x30")
4645 2 : );
4646 2 : assert_eq!(
4647 4 : tline.get(*TEST_KEY, Lsn(0x40), &ctx).await?,
4648 2 : test_img("foo at 0x40")
4649 2 : );
4650 2 :
4651 2 : Ok(())
4652 2 : }
4653 :
4654 4 : async fn bulk_insert_compact_gc(
4655 4 : tenant: &Tenant,
4656 4 : timeline: &Arc<Timeline>,
4657 4 : ctx: &RequestContext,
4658 4 : lsn: Lsn,
4659 4 : repeat: usize,
4660 4 : key_count: usize,
4661 4 : ) -> anyhow::Result<()> {
4662 4 : let compact = true;
4663 72774 : bulk_insert_maybe_compact_gc(tenant, timeline, ctx, lsn, repeat, key_count, compact).await
4664 4 : }
4665 :
4666 8 : async fn bulk_insert_maybe_compact_gc(
4667 8 : tenant: &Tenant,
4668 8 : timeline: &Arc<Timeline>,
4669 8 : ctx: &RequestContext,
4670 8 : mut lsn: Lsn,
4671 8 : repeat: usize,
4672 8 : key_count: usize,
4673 8 : compact: bool,
4674 8 : ) -> anyhow::Result<()> {
4675 8 : let mut test_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
4676 8 : let mut blknum = 0;
4677 8 :
4678 8 : // Enforce that key range is monotonously increasing
4679 8 : let mut keyspace = KeySpaceAccum::new();
4680 8 :
4681 8 : let cancel = CancellationToken::new();
4682 8 :
4683 8 : for _ in 0..repeat {
4684 400 : for _ in 0..key_count {
4685 4000000 : test_key.field6 = blknum;
4686 4000000 : let mut writer = timeline.writer().await;
4687 4000000 : writer
4688 4000000 : .put(
4689 4000000 : test_key,
4690 4000000 : lsn,
4691 4000000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
4692 4000000 : ctx,
4693 4000000 : )
4694 2600 : .await?;
4695 4000000 : writer.finish_write(lsn);
4696 4000000 : drop(writer);
4697 4000000 :
4698 4000000 : keyspace.add_key(test_key);
4699 4000000 :
4700 4000000 : lsn = Lsn(lsn.0 + 0x10);
4701 4000000 : blknum += 1;
4702 : }
4703 :
4704 400 : timeline.freeze_and_flush().await?;
4705 400 : if compact {
4706 : // this requires timeline to be &Arc<Timeline>
4707 40174 : timeline.compact(&cancel, EnumSet::empty(), ctx).await?;
4708 200 : }
4709 :
4710 : // this doesn't really need to use the timeline_id target, but it is closer to what it
4711 : // originally was.
4712 400 : let res = tenant
4713 400 : .gc_iteration(Some(timeline.timeline_id), 0, Duration::ZERO, &cancel, ctx)
4714 400 : .await?;
4715 :
4716 400 : assert_eq!(res.layers_removed, 0, "this never removes anything");
4717 : }
4718 :
4719 8 : Ok(())
4720 8 : }
4721 :
4722 : //
4723 : // Insert 1000 key-value pairs with increasing keys, flush, compact, GC.
4724 : // Repeat 50 times.
4725 : //
4726 : #[tokio::test]
4727 2 : async fn test_bulk_insert() -> anyhow::Result<()> {
4728 2 : let harness = TenantHarness::create("test_bulk_insert")?;
4729 8 : let (tenant, ctx) = harness.load().await;
4730 2 : let tline = tenant
4731 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx)
4732 5 : .await?;
4733 2 :
4734 2 : let lsn = Lsn(0x10);
4735 36387 : bulk_insert_compact_gc(&tenant, &tline, &ctx, lsn, 50, 10000).await?;
4736 2 :
4737 2 : Ok(())
4738 2 : }
4739 :
4740 : // Test the vectored get real implementation against a simple sequential implementation.
4741 : //
4742 : // The test generates a keyspace by repeatedly flushing the in-memory layer and compacting.
4743 : // Projected to 2D the key space looks like below. Lsn grows upwards on the Y axis and keys
4744 : // grow to the right on the X axis.
4745 : // [Delta]
4746 : // [Delta]
4747 : // [Delta]
4748 : // [Delta]
4749 : // ------------ Image ---------------
4750 : //
4751 : // After layer generation we pick the ranges to query as follows:
4752 : // 1. The beginning of each delta layer
4753 : // 2. At the seam between two adjacent delta layers
4754 : //
4755 : // There's one major downside to this test: delta layers only contains images,
4756 : // so the search can stop at the first delta layer and doesn't traverse any deeper.
4757 : #[tokio::test]
4758 2 : async fn test_get_vectored() -> anyhow::Result<()> {
4759 2 : let harness = TenantHarness::create("test_get_vectored")?;
4760 8 : let (tenant, ctx) = harness.load().await;
4761 2 : let tline = tenant
4762 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx)
4763 6 : .await?;
4764 2 :
4765 2 : let lsn = Lsn(0x10);
4766 36387 : bulk_insert_compact_gc(&tenant, &tline, &ctx, lsn, 50, 10000).await?;
4767 2 :
4768 2 : let guard = tline.layers.read().await;
4769 2 : guard.layer_map().dump(true, &ctx).await?;
4770 2 :
4771 2 : let mut reads = Vec::new();
4772 2 : let mut prev = None;
4773 12 : guard.layer_map().iter_historic_layers().for_each(|desc| {
4774 12 : if !desc.is_delta() {
4775 2 : prev = Some(desc.clone());
4776 2 : return;
4777 10 : }
4778 10 :
4779 10 : let start = desc.key_range.start;
4780 10 : let end = desc
4781 10 : .key_range
4782 10 : .start
4783 10 : .add(Timeline::MAX_GET_VECTORED_KEYS.try_into().unwrap());
4784 10 : reads.push(KeySpace {
4785 10 : ranges: vec![start..end],
4786 10 : });
4787 2 :
4788 10 : if let Some(prev) = &prev {
4789 10 : if !prev.is_delta() {
4790 10 : return;
4791 2 : }
4792 0 :
4793 0 : let first_range = Key {
4794 0 : field6: prev.key_range.end.field6 - 4,
4795 0 : ..prev.key_range.end
4796 0 : }..prev.key_range.end;
4797 0 :
4798 0 : let second_range = desc.key_range.start..Key {
4799 0 : field6: desc.key_range.start.field6 + 4,
4800 0 : ..desc.key_range.start
4801 0 : };
4802 0 :
4803 0 : reads.push(KeySpace {
4804 0 : ranges: vec![first_range, second_range],
4805 0 : });
4806 2 : };
4807 2 :
4808 2 : prev = Some(desc.clone());
4809 12 : });
4810 2 :
4811 2 : drop(guard);
4812 2 :
4813 2 : // Pick a big LSN such that we query over all the changes.
4814 2 : let reads_lsn = Lsn(u64::MAX - 1);
4815 2 :
4816 12 : for read in reads {
4817 10 : info!("Doing vectored read on {:?}", read);
4818 2 :
4819 10 : let vectored_res = tline
4820 10 : .get_vectored_impl(
4821 10 : read.clone(),
4822 10 : reads_lsn,
4823 10 : &mut ValuesReconstructState::new(),
4824 10 : &ctx,
4825 10 : )
4826 25 : .await;
4827 10 : tline
4828 10 : .validate_get_vectored_impl(&vectored_res, read, reads_lsn, &ctx)
4829 19 : .await;
4830 2 : }
4831 2 :
4832 2 : Ok(())
4833 2 : }
4834 :
4835 : #[tokio::test]
4836 2 : async fn test_get_vectored_aux_files() -> anyhow::Result<()> {
4837 2 : let harness = TenantHarness::create("test_get_vectored_aux_files")?;
4838 2 :
4839 8 : let (tenant, ctx) = harness.load().await;
4840 2 : let tline = tenant
4841 2 : .create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION, &ctx)
4842 2 : .await?;
4843 2 : let tline = tline.raw_timeline().unwrap();
4844 2 :
4845 2 : let mut modification = tline.begin_modification(Lsn(0x1000));
4846 2 : modification.put_file("foo/bar1", b"content1", &ctx).await?;
4847 2 : modification.set_lsn(Lsn(0x1008))?;
4848 2 : modification.put_file("foo/bar2", b"content2", &ctx).await?;
4849 2 : modification.commit(&ctx).await?;
4850 2 :
4851 2 : let child_timeline_id = TimelineId::generate();
4852 2 : tenant
4853 2 : .branch_timeline_test(
4854 2 : tline,
4855 2 : child_timeline_id,
4856 2 : Some(tline.get_last_record_lsn()),
4857 2 : &ctx,
4858 2 : )
4859 2 : .await?;
4860 2 :
4861 2 : let child_timeline = tenant
4862 2 : .get_timeline(child_timeline_id, true)
4863 2 : .expect("Should have the branched timeline");
4864 2 :
4865 2 : let aux_keyspace = KeySpace {
4866 2 : ranges: vec![NON_INHERITED_RANGE],
4867 2 : };
4868 2 : let read_lsn = child_timeline.get_last_record_lsn();
4869 2 :
4870 2 : let vectored_res = child_timeline
4871 2 : .get_vectored_impl(
4872 2 : aux_keyspace.clone(),
4873 2 : read_lsn,
4874 2 : &mut ValuesReconstructState::new(),
4875 2 : &ctx,
4876 2 : )
4877 2 : .await;
4878 2 :
4879 2 : child_timeline
4880 2 : .validate_get_vectored_impl(&vectored_res, aux_keyspace, read_lsn, &ctx)
4881 2 : .await;
4882 2 :
4883 2 : let images = vectored_res?;
4884 2 : assert!(images.is_empty());
4885 2 : Ok(())
4886 2 : }
4887 :
4888 : // Test that vectored get handles layer gaps correctly
4889 : // by advancing into the next ancestor timeline if required.
4890 : //
4891 : // The test generates timelines that look like the diagram below.
4892 : // We leave a gap in one of the L1 layers at `gap_at_key` (`/` in the diagram).
4893 : // The reconstruct data for that key lies in the ancestor timeline (`X` in the diagram).
4894 : //
4895 : // ```
4896 : //-------------------------------+
4897 : // ... |
4898 : // [ L1 ] |
4899 : // [ / L1 ] | Child Timeline
4900 : // ... |
4901 : // ------------------------------+
4902 : // [ X L1 ] | Parent Timeline
4903 : // ------------------------------+
4904 : // ```
4905 : #[tokio::test]
4906 2 : async fn test_get_vectored_key_gap() -> anyhow::Result<()> {
4907 2 : let tenant_conf = TenantConf {
4908 2 : // Make compaction deterministic
4909 2 : gc_period: Duration::ZERO,
4910 2 : compaction_period: Duration::ZERO,
4911 2 : // Encourage creation of L1 layers
4912 2 : checkpoint_distance: 16 * 1024,
4913 2 : compaction_target_size: 8 * 1024,
4914 2 : ..TenantConf::default()
4915 2 : };
4916 2 :
4917 2 : let harness = TenantHarness::create_custom(
4918 2 : "test_get_vectored_key_gap",
4919 2 : tenant_conf,
4920 2 : TenantId::generate(),
4921 2 : ShardIdentity::unsharded(),
4922 2 : Generation::new(0xdeadbeef),
4923 2 : )?;
4924 8 : let (tenant, ctx) = harness.load().await;
4925 2 :
4926 2 : let mut current_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
4927 2 : let gap_at_key = current_key.add(100);
4928 2 : let mut current_lsn = Lsn(0x10);
4929 2 :
4930 2 : const KEY_COUNT: usize = 10_000;
4931 2 :
4932 2 : let timeline_id = TimelineId::generate();
4933 2 : let current_timeline = tenant
4934 2 : .create_test_timeline(timeline_id, current_lsn, DEFAULT_PG_VERSION, &ctx)
4935 6 : .await?;
4936 2 :
4937 2 : current_lsn += 0x100;
4938 2 :
4939 2 : let mut writer = current_timeline.writer().await;
4940 2 : writer
4941 2 : .put(
4942 2 : gap_at_key,
4943 2 : current_lsn,
4944 2 : &Value::Image(test_img(&format!("{} at {}", gap_at_key, current_lsn))),
4945 2 : &ctx,
4946 2 : )
4947 2 : .await?;
4948 2 : writer.finish_write(current_lsn);
4949 2 : drop(writer);
4950 2 :
4951 2 : let mut latest_lsns = HashMap::new();
4952 2 : latest_lsns.insert(gap_at_key, current_lsn);
4953 2 :
4954 2 : current_timeline.freeze_and_flush().await?;
4955 2 :
4956 2 : let child_timeline_id = TimelineId::generate();
4957 2 :
4958 2 : tenant
4959 2 : .branch_timeline_test(
4960 2 : ¤t_timeline,
4961 2 : child_timeline_id,
4962 2 : Some(current_lsn),
4963 2 : &ctx,
4964 2 : )
4965 2 : .await?;
4966 2 : let child_timeline = tenant
4967 2 : .get_timeline(child_timeline_id, true)
4968 2 : .expect("Should have the branched timeline");
4969 2 :
4970 20002 : for i in 0..KEY_COUNT {
4971 20000 : if current_key == gap_at_key {
4972 2 : current_key = current_key.next();
4973 2 : continue;
4974 19998 : }
4975 19998 :
4976 19998 : current_lsn += 0x10;
4977 2 :
4978 19998 : let mut writer = child_timeline.writer().await;
4979 19998 : writer
4980 19998 : .put(
4981 19998 : current_key,
4982 19998 : current_lsn,
4983 19998 : &Value::Image(test_img(&format!("{} at {}", current_key, current_lsn))),
4984 19998 : &ctx,
4985 19998 : )
4986 68 : .await?;
4987 19998 : writer.finish_write(current_lsn);
4988 19998 : drop(writer);
4989 19998 :
4990 19998 : latest_lsns.insert(current_key, current_lsn);
4991 19998 : current_key = current_key.next();
4992 19998 :
4993 19998 : // Flush every now and then to encourage layer file creation.
4994 19998 : if i % 500 == 0 {
4995 45 : child_timeline.freeze_and_flush().await?;
4996 19958 : }
4997 2 : }
4998 2 :
4999 2 : child_timeline.freeze_and_flush().await?;
5000 2 : let mut flags = EnumSet::new();
5001 2 : flags.insert(CompactFlags::ForceRepartition);
5002 2 : child_timeline
5003 2 : .compact(&CancellationToken::new(), flags, &ctx)
5004 2184 : .await?;
5005 2 :
5006 2 : let key_near_end = {
5007 2 : let mut tmp = current_key;
5008 2 : tmp.field6 -= 10;
5009 2 : tmp
5010 2 : };
5011 2 :
5012 2 : let key_near_gap = {
5013 2 : let mut tmp = gap_at_key;
5014 2 : tmp.field6 -= 10;
5015 2 : tmp
5016 2 : };
5017 2 :
5018 2 : let read = KeySpace {
5019 2 : ranges: vec![key_near_gap..gap_at_key.next(), key_near_end..current_key],
5020 2 : };
5021 2 : let results = child_timeline
5022 2 : .get_vectored_impl(
5023 2 : read.clone(),
5024 2 : current_lsn,
5025 2 : &mut ValuesReconstructState::new(),
5026 2 : &ctx,
5027 2 : )
5028 15 : .await?;
5029 2 :
5030 44 : for (key, img_res) in results {
5031 42 : let expected = test_img(&format!("{} at {}", key, latest_lsns[&key]));
5032 42 : assert_eq!(img_res?, expected);
5033 2 : }
5034 2 :
5035 2 : Ok(())
5036 2 : }
5037 :
5038 : // Test that vectored get descends into ancestor timelines correctly and
5039 : // does not return an image that's newer than requested.
5040 : //
5041 : // The diagram below ilustrates an interesting case. We have a parent timeline
5042 : // (top of the Lsn range) and a child timeline. The request key cannot be reconstructed
5043 : // from the child timeline, so the parent timeline must be visited. When advacing into
5044 : // the child timeline, the read path needs to remember what the requested Lsn was in
5045 : // order to avoid returning an image that's too new. The test below constructs such
5046 : // a timeline setup and does a few queries around the Lsn of each page image.
5047 : // ```
5048 : // LSN
5049 : // ^
5050 : // |
5051 : // |
5052 : // 500 | --------------------------------------> branch point
5053 : // 400 | X
5054 : // 300 | X
5055 : // 200 | --------------------------------------> requested lsn
5056 : // 100 | X
5057 : // |---------------------------------------> Key
5058 : // |
5059 : // ------> requested key
5060 : //
5061 : // Legend:
5062 : // * X - page images
5063 : // ```
5064 : #[tokio::test]
5065 2 : async fn test_get_vectored_ancestor_descent() -> anyhow::Result<()> {
5066 2 : let harness = TenantHarness::create("test_get_vectored_on_lsn_axis")?;
5067 8 : let (tenant, ctx) = harness.load().await;
5068 2 :
5069 2 : let start_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
5070 2 : let end_key = start_key.add(1000);
5071 2 : let child_gap_at_key = start_key.add(500);
5072 2 : let mut parent_gap_lsns: BTreeMap<Lsn, String> = BTreeMap::new();
5073 2 :
5074 2 : let mut current_lsn = Lsn(0x10);
5075 2 :
5076 2 : let timeline_id = TimelineId::generate();
5077 2 : let parent_timeline = tenant
5078 2 : .create_test_timeline(timeline_id, current_lsn, DEFAULT_PG_VERSION, &ctx)
5079 6 : .await?;
5080 2 :
5081 2 : current_lsn += 0x100;
5082 2 :
5083 8 : for _ in 0..3 {
5084 6 : let mut key = start_key;
5085 6006 : while key < end_key {
5086 6000 : current_lsn += 0x10;
5087 6000 :
5088 6000 : let image_value = format!("{} at {}", child_gap_at_key, current_lsn);
5089 2 :
5090 6000 : let mut writer = parent_timeline.writer().await;
5091 6000 : writer
5092 6000 : .put(
5093 6000 : key,
5094 6000 : current_lsn,
5095 6000 : &Value::Image(test_img(&image_value)),
5096 6000 : &ctx,
5097 6000 : )
5098 6 : .await?;
5099 6000 : writer.finish_write(current_lsn);
5100 6000 :
5101 6000 : if key == child_gap_at_key {
5102 6 : parent_gap_lsns.insert(current_lsn, image_value);
5103 5994 : }
5104 2 :
5105 6000 : key = key.next();
5106 2 : }
5107 2 :
5108 6 : parent_timeline.freeze_and_flush().await?;
5109 2 : }
5110 2 :
5111 2 : let child_timeline_id = TimelineId::generate();
5112 2 :
5113 2 : let child_timeline = tenant
5114 2 : .branch_timeline_test(&parent_timeline, child_timeline_id, Some(current_lsn), &ctx)
5115 2 : .await?;
5116 2 :
5117 2 : let mut key = start_key;
5118 2002 : while key < end_key {
5119 2000 : if key == child_gap_at_key {
5120 2 : key = key.next();
5121 2 : continue;
5122 1998 : }
5123 1998 :
5124 1998 : current_lsn += 0x10;
5125 2 :
5126 1998 : let mut writer = child_timeline.writer().await;
5127 1998 : writer
5128 1998 : .put(
5129 1998 : key,
5130 1998 : current_lsn,
5131 1998 : &Value::Image(test_img(&format!("{} at {}", key, current_lsn))),
5132 1998 : &ctx,
5133 1998 : )
5134 17 : .await?;
5135 1998 : writer.finish_write(current_lsn);
5136 1998 :
5137 1998 : key = key.next();
5138 2 : }
5139 2 :
5140 2 : child_timeline.freeze_and_flush().await?;
5141 2 :
5142 2 : let lsn_offsets: [i64; 5] = [-10, -1, 0, 1, 10];
5143 2 : let mut query_lsns = Vec::new();
5144 6 : for image_lsn in parent_gap_lsns.keys().rev() {
5145 36 : for offset in lsn_offsets {
5146 30 : query_lsns.push(Lsn(image_lsn
5147 30 : .0
5148 30 : .checked_add_signed(offset)
5149 30 : .expect("Shouldn't overflow")));
5150 30 : }
5151 2 : }
5152 2 :
5153 32 : for query_lsn in query_lsns {
5154 30 : let results = child_timeline
5155 30 : .get_vectored_impl(
5156 30 : KeySpace {
5157 30 : ranges: vec![child_gap_at_key..child_gap_at_key.next()],
5158 30 : },
5159 30 : query_lsn,
5160 30 : &mut ValuesReconstructState::new(),
5161 30 : &ctx,
5162 30 : )
5163 29 : .await;
5164 2 :
5165 30 : let expected_item = parent_gap_lsns
5166 30 : .iter()
5167 30 : .rev()
5168 68 : .find(|(lsn, _)| **lsn <= query_lsn);
5169 30 :
5170 30 : info!(
5171 2 : "Doing vectored read at LSN {}. Expecting image to be: {:?}",
5172 2 : query_lsn, expected_item
5173 2 : );
5174 2 :
5175 30 : match expected_item {
5176 26 : Some((_, img_value)) => {
5177 26 : let key_results = results.expect("No vectored get error expected");
5178 26 : let key_result = &key_results[&child_gap_at_key];
5179 26 : let returned_img = key_result
5180 26 : .as_ref()
5181 26 : .expect("No page reconstruct error expected");
5182 26 :
5183 26 : info!(
5184 2 : "Vectored read at LSN {} returned image {}",
5185 0 : query_lsn,
5186 0 : std::str::from_utf8(returned_img)?
5187 2 : );
5188 26 : assert_eq!(*returned_img, test_img(img_value));
5189 2 : }
5190 2 : None => {
5191 4 : assert!(matches!(results, Err(GetVectoredError::MissingKey(_))));
5192 2 : }
5193 2 : }
5194 2 : }
5195 2 :
5196 2 : Ok(())
5197 2 : }
5198 :
5199 : #[tokio::test]
5200 2 : async fn test_random_updates() -> anyhow::Result<()> {
5201 2 : let names_algorithms = [
5202 2 : ("test_random_updates_legacy", CompactionAlgorithm::Legacy),
5203 2 : ("test_random_updates_tiered", CompactionAlgorithm::Tiered),
5204 2 : ];
5205 6 : for (name, algorithm) in names_algorithms {
5206 17948 : test_random_updates_algorithm(name, algorithm).await?;
5207 2 : }
5208 2 : Ok(())
5209 2 : }
5210 :
5211 4 : async fn test_random_updates_algorithm(
5212 4 : name: &'static str,
5213 4 : compaction_algorithm: CompactionAlgorithm,
5214 4 : ) -> anyhow::Result<()> {
5215 4 : let mut harness = TenantHarness::create(name)?;
5216 4 : harness.tenant_conf.compaction_algorithm = CompactionAlgorithmSettings {
5217 4 : kind: compaction_algorithm,
5218 4 : };
5219 15 : let (tenant, ctx) = harness.load().await;
5220 4 : let tline = tenant
5221 4 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
5222 11 : .await?;
5223 :
5224 : const NUM_KEYS: usize = 1000;
5225 4 : let cancel = CancellationToken::new();
5226 4 :
5227 4 : let mut test_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
5228 4 : let mut test_key_end = test_key;
5229 4 : test_key_end.field6 = NUM_KEYS as u32;
5230 4 : tline.add_extra_test_dense_keyspace(KeySpace::single(test_key..test_key_end));
5231 4 :
5232 4 : let mut keyspace = KeySpaceAccum::new();
5233 4 :
5234 4 : // Track when each page was last modified. Used to assert that
5235 4 : // a read sees the latest page version.
5236 4 : let mut updated = [Lsn(0); NUM_KEYS];
5237 4 :
5238 4 : let mut lsn = Lsn(0x10);
5239 : #[allow(clippy::needless_range_loop)]
5240 4004 : for blknum in 0..NUM_KEYS {
5241 4000 : lsn = Lsn(lsn.0 + 0x10);
5242 4000 : test_key.field6 = blknum as u32;
5243 4000 : let mut writer = tline.writer().await;
5244 4000 : writer
5245 4000 : .put(
5246 4000 : test_key,
5247 4000 : lsn,
5248 4000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
5249 4000 : &ctx,
5250 4000 : )
5251 4 : .await?;
5252 4000 : writer.finish_write(lsn);
5253 4000 : updated[blknum] = lsn;
5254 4000 : drop(writer);
5255 4000 :
5256 4000 : keyspace.add_key(test_key);
5257 : }
5258 :
5259 204 : for _ in 0..50 {
5260 200200 : for _ in 0..NUM_KEYS {
5261 200000 : lsn = Lsn(lsn.0 + 0x10);
5262 200000 : let blknum = thread_rng().gen_range(0..NUM_KEYS);
5263 200000 : test_key.field6 = blknum as u32;
5264 200000 : let mut writer = tline.writer().await;
5265 200000 : writer
5266 200000 : .put(
5267 200000 : test_key,
5268 200000 : lsn,
5269 200000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
5270 200000 : &ctx,
5271 200000 : )
5272 198 : .await?;
5273 200000 : writer.finish_write(lsn);
5274 200000 : drop(writer);
5275 200000 : updated[blknum] = lsn;
5276 : }
5277 :
5278 : // Read all the blocks
5279 200000 : for (blknum, last_lsn) in updated.iter().enumerate() {
5280 200000 : test_key.field6 = blknum as u32;
5281 200000 : assert_eq!(
5282 200000 : tline.get(test_key, lsn, &ctx).await?,
5283 200000 : test_img(&format!("{} at {}", blknum, last_lsn))
5284 : );
5285 : }
5286 :
5287 : // Perform a cycle of flush, and GC
5288 204 : tline.freeze_and_flush().await?;
5289 200 : tenant
5290 200 : .gc_iteration(Some(tline.timeline_id), 0, Duration::ZERO, &cancel, &ctx)
5291 185 : .await?;
5292 : }
5293 :
5294 4 : Ok(())
5295 4 : }
5296 :
5297 : #[tokio::test]
5298 2 : async fn test_traverse_branches() -> anyhow::Result<()> {
5299 2 : let (tenant, ctx) = TenantHarness::create("test_traverse_branches")?
5300 2 : .load()
5301 8 : .await;
5302 2 : let mut tline = tenant
5303 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
5304 6 : .await?;
5305 2 :
5306 2 : const NUM_KEYS: usize = 1000;
5307 2 :
5308 2 : let mut test_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
5309 2 :
5310 2 : let mut keyspace = KeySpaceAccum::new();
5311 2 :
5312 2 : let cancel = CancellationToken::new();
5313 2 :
5314 2 : // Track when each page was last modified. Used to assert that
5315 2 : // a read sees the latest page version.
5316 2 : let mut updated = [Lsn(0); NUM_KEYS];
5317 2 :
5318 2 : let mut lsn = Lsn(0x10);
5319 2 : #[allow(clippy::needless_range_loop)]
5320 2002 : for blknum in 0..NUM_KEYS {
5321 2000 : lsn = Lsn(lsn.0 + 0x10);
5322 2000 : test_key.field6 = blknum as u32;
5323 2000 : let mut writer = tline.writer().await;
5324 2000 : writer
5325 2000 : .put(
5326 2000 : test_key,
5327 2000 : lsn,
5328 2000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
5329 2000 : &ctx,
5330 2000 : )
5331 2 : .await?;
5332 2000 : writer.finish_write(lsn);
5333 2000 : updated[blknum] = lsn;
5334 2000 : drop(writer);
5335 2000 :
5336 2000 : keyspace.add_key(test_key);
5337 2 : }
5338 2 :
5339 102 : for _ in 0..50 {
5340 100 : let new_tline_id = TimelineId::generate();
5341 100 : tenant
5342 100 : .branch_timeline_test(&tline, new_tline_id, Some(lsn), &ctx)
5343 2 : .await?;
5344 100 : tline = tenant
5345 100 : .get_timeline(new_tline_id, true)
5346 100 : .expect("Should have the branched timeline");
5347 2 :
5348 100100 : for _ in 0..NUM_KEYS {
5349 100000 : lsn = Lsn(lsn.0 + 0x10);
5350 100000 : let blknum = thread_rng().gen_range(0..NUM_KEYS);
5351 100000 : test_key.field6 = blknum as u32;
5352 100000 : let mut writer = tline.writer().await;
5353 100000 : writer
5354 100000 : .put(
5355 100000 : test_key,
5356 100000 : lsn,
5357 100000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
5358 100000 : &ctx,
5359 100000 : )
5360 839 : .await?;
5361 100000 : println!("updating {} at {}", blknum, lsn);
5362 100000 : writer.finish_write(lsn);
5363 100000 : drop(writer);
5364 100000 : updated[blknum] = lsn;
5365 2 : }
5366 2 :
5367 2 : // Read all the blocks
5368 100000 : for (blknum, last_lsn) in updated.iter().enumerate() {
5369 100000 : test_key.field6 = blknum as u32;
5370 100000 : assert_eq!(
5371 100000 : tline.get(test_key, lsn, &ctx).await?,
5372 100000 : test_img(&format!("{} at {}", blknum, last_lsn))
5373 2 : );
5374 2 : }
5375 2 :
5376 2 : // Perform a cycle of flush, compact, and GC
5377 103 : tline.freeze_and_flush().await?;
5378 13193 : tline.compact(&cancel, EnumSet::empty(), &ctx).await?;
5379 100 : tenant
5380 100 : .gc_iteration(Some(tline.timeline_id), 0, Duration::ZERO, &cancel, &ctx)
5381 87 : .await?;
5382 2 : }
5383 2 :
5384 2 : Ok(())
5385 2 : }
5386 :
5387 : #[tokio::test]
5388 2 : async fn test_traverse_ancestors() -> anyhow::Result<()> {
5389 2 : let (tenant, ctx) = TenantHarness::create("test_traverse_ancestors")?
5390 2 : .load()
5391 8 : .await;
5392 2 : let mut tline = tenant
5393 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
5394 6 : .await?;
5395 2 :
5396 2 : const NUM_KEYS: usize = 100;
5397 2 : const NUM_TLINES: usize = 50;
5398 2 :
5399 2 : let mut test_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
5400 2 : // Track page mutation lsns across different timelines.
5401 2 : let mut updated = [[Lsn(0); NUM_KEYS]; NUM_TLINES];
5402 2 :
5403 2 : let mut lsn = Lsn(0x10);
5404 2 :
5405 2 : #[allow(clippy::needless_range_loop)]
5406 102 : for idx in 0..NUM_TLINES {
5407 100 : let new_tline_id = TimelineId::generate();
5408 100 : tenant
5409 100 : .branch_timeline_test(&tline, new_tline_id, Some(lsn), &ctx)
5410 2 : .await?;
5411 100 : tline = tenant
5412 100 : .get_timeline(new_tline_id, true)
5413 100 : .expect("Should have the branched timeline");
5414 2 :
5415 10100 : for _ in 0..NUM_KEYS {
5416 10000 : lsn = Lsn(lsn.0 + 0x10);
5417 10000 : let blknum = thread_rng().gen_range(0..NUM_KEYS);
5418 10000 : test_key.field6 = blknum as u32;
5419 10000 : let mut writer = tline.writer().await;
5420 10000 : writer
5421 10000 : .put(
5422 10000 : test_key,
5423 10000 : lsn,
5424 10000 : &Value::Image(test_img(&format!("{} {} at {}", idx, blknum, lsn))),
5425 10000 : &ctx,
5426 10000 : )
5427 88 : .await?;
5428 10000 : println!("updating [{}][{}] at {}", idx, blknum, lsn);
5429 10000 : writer.finish_write(lsn);
5430 10000 : drop(writer);
5431 10000 : updated[idx][blknum] = lsn;
5432 2 : }
5433 2 : }
5434 2 :
5435 2 : // Read pages from leaf timeline across all ancestors.
5436 100 : for (idx, lsns) in updated.iter().enumerate() {
5437 10000 : for (blknum, lsn) in lsns.iter().enumerate() {
5438 2 : // Skip empty mutations.
5439 10000 : if lsn.0 == 0 {
5440 3650 : continue;
5441 6350 : }
5442 6350 : println!("checking [{idx}][{blknum}] at {lsn}");
5443 6350 : test_key.field6 = blknum as u32;
5444 6350 : assert_eq!(
5445 6350 : tline.get(test_key, *lsn, &ctx).await?,
5446 6350 : test_img(&format!("{idx} {blknum} at {lsn}"))
5447 2 : );
5448 2 : }
5449 2 : }
5450 2 : Ok(())
5451 2 : }
5452 :
5453 : #[tokio::test]
5454 2 : async fn test_write_at_initdb_lsn_takes_optimization_code_path() -> anyhow::Result<()> {
5455 2 : let (tenant, ctx) = TenantHarness::create("test_empty_test_timeline_is_usable")?
5456 2 : .load()
5457 8 : .await;
5458 2 :
5459 2 : let initdb_lsn = Lsn(0x20);
5460 2 : let utline = tenant
5461 2 : .create_empty_timeline(TIMELINE_ID, initdb_lsn, DEFAULT_PG_VERSION, &ctx)
5462 2 : .await?;
5463 2 : let tline = utline.raw_timeline().unwrap();
5464 2 :
5465 2 : // Spawn flush loop now so that we can set the `expect_initdb_optimization`
5466 2 : tline.maybe_spawn_flush_loop();
5467 2 :
5468 2 : // Make sure the timeline has the minimum set of required keys for operation.
5469 2 : // The only operation you can always do on an empty timeline is to `put` new data.
5470 2 : // Except if you `put` at `initdb_lsn`.
5471 2 : // In that case, there's an optimization to directly create image layers instead of delta layers.
5472 2 : // It uses `repartition()`, which assumes some keys to be present.
5473 2 : // Let's make sure the test timeline can handle that case.
5474 2 : {
5475 2 : let mut state = tline.flush_loop_state.lock().unwrap();
5476 2 : assert_eq!(
5477 2 : timeline::FlushLoopState::Running {
5478 2 : expect_initdb_optimization: false,
5479 2 : initdb_optimization_count: 0,
5480 2 : },
5481 2 : *state
5482 2 : );
5483 2 : *state = timeline::FlushLoopState::Running {
5484 2 : expect_initdb_optimization: true,
5485 2 : initdb_optimization_count: 0,
5486 2 : };
5487 2 : }
5488 2 :
5489 2 : // Make writes at the initdb_lsn. When we flush it below, it should be handled by the optimization.
5490 2 : // As explained above, the optimization requires some keys to be present.
5491 2 : // As per `create_empty_timeline` documentation, use init_empty to set them.
5492 2 : // This is what `create_test_timeline` does, by the way.
5493 2 : let mut modification = tline.begin_modification(initdb_lsn);
5494 2 : modification
5495 2 : .init_empty_test_timeline()
5496 2 : .context("init_empty_test_timeline")?;
5497 2 : modification
5498 2 : .commit(&ctx)
5499 2 : .await
5500 2 : .context("commit init_empty_test_timeline modification")?;
5501 2 :
5502 2 : // Do the flush. The flush code will check the expectations that we set above.
5503 2 : tline.freeze_and_flush().await?;
5504 2 :
5505 2 : // assert freeze_and_flush exercised the initdb optimization
5506 2 : {
5507 2 : let state = tline.flush_loop_state.lock().unwrap();
5508 2 : let timeline::FlushLoopState::Running {
5509 2 : expect_initdb_optimization,
5510 2 : initdb_optimization_count,
5511 2 : } = *state
5512 2 : else {
5513 2 : panic!("unexpected state: {:?}", *state);
5514 2 : };
5515 2 : assert!(expect_initdb_optimization);
5516 2 : assert!(initdb_optimization_count > 0);
5517 2 : }
5518 2 : Ok(())
5519 2 : }
5520 :
5521 : #[tokio::test]
5522 2 : async fn test_create_guard_crash() -> anyhow::Result<()> {
5523 2 : let name = "test_create_guard_crash";
5524 2 : let harness = TenantHarness::create(name)?;
5525 2 : {
5526 8 : let (tenant, ctx) = harness.load().await;
5527 2 : let tline = tenant
5528 2 : .create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION, &ctx)
5529 2 : .await?;
5530 2 : // Leave the timeline ID in [`Tenant::timelines_creating`] to exclude attempting to create it again
5531 2 : let raw_tline = tline.raw_timeline().unwrap();
5532 2 : raw_tline
5533 2 : .shutdown(super::timeline::ShutdownMode::Hard)
5534 2 : .instrument(info_span!("test_shutdown", tenant_id=%raw_tline.tenant_shard_id, shard_id=%raw_tline.tenant_shard_id.shard_slug(), timeline_id=%TIMELINE_ID))
5535 2 : .await;
5536 2 : std::mem::forget(tline);
5537 2 : }
5538 2 :
5539 8 : let (tenant, _) = harness.load().await;
5540 2 : match tenant.get_timeline(TIMELINE_ID, false) {
5541 2 : Ok(_) => panic!("timeline should've been removed during load"),
5542 2 : Err(e) => {
5543 2 : assert_eq!(
5544 2 : e,
5545 2 : GetTimelineError::NotFound {
5546 2 : tenant_id: tenant.tenant_shard_id,
5547 2 : timeline_id: TIMELINE_ID,
5548 2 : }
5549 2 : )
5550 2 : }
5551 2 : }
5552 2 :
5553 2 : assert!(!harness
5554 2 : .conf
5555 2 : .timeline_path(&tenant.tenant_shard_id, &TIMELINE_ID)
5556 2 : .exists());
5557 2 :
5558 2 : Ok(())
5559 2 : }
5560 :
5561 : #[tokio::test]
5562 2 : async fn test_read_at_max_lsn() -> anyhow::Result<()> {
5563 2 : let names_algorithms = [
5564 2 : ("test_read_at_max_lsn_legacy", CompactionAlgorithm::Legacy),
5565 2 : ("test_read_at_max_lsn_tiered", CompactionAlgorithm::Tiered),
5566 2 : ];
5567 6 : for (name, algorithm) in names_algorithms {
5568 32937 : test_read_at_max_lsn_algorithm(name, algorithm).await?;
5569 2 : }
5570 2 : Ok(())
5571 2 : }
5572 :
5573 4 : async fn test_read_at_max_lsn_algorithm(
5574 4 : name: &'static str,
5575 4 : compaction_algorithm: CompactionAlgorithm,
5576 4 : ) -> anyhow::Result<()> {
5577 4 : let mut harness = TenantHarness::create(name)?;
5578 4 : harness.tenant_conf.compaction_algorithm = CompactionAlgorithmSettings {
5579 4 : kind: compaction_algorithm,
5580 4 : };
5581 16 : let (tenant, ctx) = harness.load().await;
5582 4 : let tline = tenant
5583 4 : .create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx)
5584 11 : .await?;
5585 :
5586 4 : let lsn = Lsn(0x10);
5587 4 : let compact = false;
5588 32600 : bulk_insert_maybe_compact_gc(&tenant, &tline, &ctx, lsn, 50, 10000, compact).await?;
5589 :
5590 4 : let test_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
5591 4 : let read_lsn = Lsn(u64::MAX - 1);
5592 :
5593 310 : let result = tline.get(test_key, read_lsn, &ctx).await;
5594 4 : assert!(result.is_ok(), "result is not Ok: {}", result.unwrap_err());
5595 :
5596 4 : Ok(())
5597 4 : }
5598 :
5599 : #[tokio::test]
5600 2 : async fn test_metadata_scan() -> anyhow::Result<()> {
5601 2 : let harness = TenantHarness::create("test_metadata_scan")?;
5602 8 : let (tenant, ctx) = harness.load().await;
5603 2 : let tline = tenant
5604 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
5605 6 : .await?;
5606 2 :
5607 2 : const NUM_KEYS: usize = 1000;
5608 2 : const STEP: usize = 10000; // random update + scan base_key + idx * STEP
5609 2 :
5610 2 : let cancel = CancellationToken::new();
5611 2 :
5612 2 : let mut base_key = Key::from_hex("000000000033333333444444445500000000").unwrap();
5613 2 : base_key.field1 = AUX_KEY_PREFIX;
5614 2 : let mut test_key = base_key;
5615 2 :
5616 2 : // Track when each page was last modified. Used to assert that
5617 2 : // a read sees the latest page version.
5618 2 : let mut updated = [Lsn(0); NUM_KEYS];
5619 2 :
5620 2 : let mut lsn = Lsn(0x10);
5621 2 : #[allow(clippy::needless_range_loop)]
5622 2002 : for blknum in 0..NUM_KEYS {
5623 2000 : lsn = Lsn(lsn.0 + 0x10);
5624 2000 : test_key.field6 = (blknum * STEP) as u32;
5625 2000 : let mut writer = tline.writer().await;
5626 2000 : writer
5627 2000 : .put(
5628 2000 : test_key,
5629 2000 : lsn,
5630 2000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
5631 2000 : &ctx,
5632 2000 : )
5633 2 : .await?;
5634 2000 : writer.finish_write(lsn);
5635 2000 : updated[blknum] = lsn;
5636 2000 : drop(writer);
5637 2 : }
5638 2 :
5639 2 : let keyspace = KeySpace::single(base_key..base_key.add((NUM_KEYS * STEP) as u32));
5640 2 :
5641 24 : for iter in 0..=10 {
5642 2 : // Read all the blocks
5643 22000 : for (blknum, last_lsn) in updated.iter().enumerate() {
5644 22000 : test_key.field6 = (blknum * STEP) as u32;
5645 22000 : assert_eq!(
5646 22000 : tline.get(test_key, lsn, &ctx).await?,
5647 22000 : test_img(&format!("{} at {}", blknum, last_lsn))
5648 2 : );
5649 2 : }
5650 2 :
5651 22 : let mut cnt = 0;
5652 22000 : for (key, value) in tline
5653 22 : .get_vectored_impl(
5654 22 : keyspace.clone(),
5655 22 : lsn,
5656 22 : &mut ValuesReconstructState::default(),
5657 22 : &ctx,
5658 22 : )
5659 5617 : .await?
5660 2 : {
5661 22000 : let blknum = key.field6 as usize;
5662 22000 : let value = value?;
5663 22000 : assert!(blknum % STEP == 0);
5664 22000 : let blknum = blknum / STEP;
5665 22000 : assert_eq!(
5666 22000 : value,
5667 22000 : test_img(&format!("{} at {}", blknum, updated[blknum]))
5668 22000 : );
5669 22000 : cnt += 1;
5670 2 : }
5671 2 :
5672 22 : assert_eq!(cnt, NUM_KEYS);
5673 2 :
5674 22022 : for _ in 0..NUM_KEYS {
5675 22000 : lsn = Lsn(lsn.0 + 0x10);
5676 22000 : let blknum = thread_rng().gen_range(0..NUM_KEYS);
5677 22000 : test_key.field6 = (blknum * STEP) as u32;
5678 22000 : let mut writer = tline.writer().await;
5679 22000 : writer
5680 22000 : .put(
5681 22000 : test_key,
5682 22000 : lsn,
5683 22000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
5684 22000 : &ctx,
5685 22000 : )
5686 142 : .await?;
5687 22000 : writer.finish_write(lsn);
5688 22000 : drop(writer);
5689 22000 : updated[blknum] = lsn;
5690 2 : }
5691 2 :
5692 2 : // Perform two cycles of flush, compact, and GC
5693 66 : for round in 0..2 {
5694 44 : tline.freeze_and_flush().await?;
5695 44 : tline
5696 44 : .compact(
5697 44 : &cancel,
5698 44 : if iter % 5 == 0 && round == 0 {
5699 6 : let mut flags = EnumSet::new();
5700 6 : flags.insert(CompactFlags::ForceImageLayerCreation);
5701 6 : flags.insert(CompactFlags::ForceRepartition);
5702 6 : flags
5703 2 : } else {
5704 38 : EnumSet::empty()
5705 2 : },
5706 44 : &ctx,
5707 2 : )
5708 8890 : .await?;
5709 44 : tenant
5710 44 : .gc_iteration(Some(tline.timeline_id), 0, Duration::ZERO, &cancel, &ctx)
5711 36 : .await?;
5712 2 : }
5713 2 : }
5714 2 :
5715 2 : Ok(())
5716 2 : }
5717 :
5718 : #[tokio::test]
5719 2 : async fn test_metadata_compaction_trigger() -> anyhow::Result<()> {
5720 2 : let harness = TenantHarness::create("test_metadata_compaction_trigger")?;
5721 8 : let (tenant, ctx) = harness.load().await;
5722 2 : let tline = tenant
5723 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
5724 6 : .await?;
5725 2 :
5726 2 : let cancel = CancellationToken::new();
5727 2 :
5728 2 : let mut base_key = Key::from_hex("000000000033333333444444445500000000").unwrap();
5729 2 : base_key.field1 = AUX_KEY_PREFIX;
5730 2 : let test_key = base_key;
5731 2 : let mut lsn = Lsn(0x10);
5732 2 :
5733 42 : for _ in 0..20 {
5734 40 : lsn = Lsn(lsn.0 + 0x10);
5735 40 : let mut writer = tline.writer().await;
5736 40 : writer
5737 40 : .put(
5738 40 : test_key,
5739 40 : lsn,
5740 40 : &Value::Image(test_img(&format!("{} at {}", 0, lsn))),
5741 40 : &ctx,
5742 40 : )
5743 20 : .await?;
5744 40 : writer.finish_write(lsn);
5745 40 : drop(writer);
5746 41 : tline.freeze_and_flush().await?; // force create a delta layer
5747 2 : }
5748 2 :
5749 2 : let before_num_l0_delta_files = tline
5750 2 : .layers
5751 2 : .read()
5752 2 : .await
5753 2 : .layer_map()
5754 2 : .get_level0_deltas()?
5755 2 : .len();
5756 2 :
5757 110 : tline.compact(&cancel, EnumSet::empty(), &ctx).await?;
5758 2 :
5759 2 : let after_num_l0_delta_files = tline
5760 2 : .layers
5761 2 : .read()
5762 2 : .await
5763 2 : .layer_map()
5764 2 : .get_level0_deltas()?
5765 2 : .len();
5766 2 :
5767 2 : assert!(after_num_l0_delta_files < before_num_l0_delta_files, "after_num_l0_delta_files={after_num_l0_delta_files}, before_num_l0_delta_files={before_num_l0_delta_files}");
5768 2 :
5769 2 : assert_eq!(
5770 4 : tline.get(test_key, lsn, &ctx).await?,
5771 2 : test_img(&format!("{} at {}", 0, lsn))
5772 2 : );
5773 2 :
5774 2 : Ok(())
5775 2 : }
5776 :
5777 : #[tokio::test]
5778 2 : async fn test_branch_copies_dirty_aux_file_flag() {
5779 2 : let harness = TenantHarness::create("test_branch_copies_dirty_aux_file_flag").unwrap();
5780 2 :
5781 2 : // the default aux file policy to switch is v1 if not set by the admins
5782 2 : assert_eq!(
5783 2 : harness.tenant_conf.switch_aux_file_policy,
5784 2 : AuxFilePolicy::V1
5785 2 : );
5786 8 : let (tenant, ctx) = harness.load().await;
5787 2 :
5788 2 : let mut lsn = Lsn(0x08);
5789 2 :
5790 2 : let tline: Arc<Timeline> = tenant
5791 2 : .create_test_timeline(TIMELINE_ID, lsn, DEFAULT_PG_VERSION, &ctx)
5792 6 : .await
5793 2 : .unwrap();
5794 2 :
5795 2 : // no aux file is written at this point, so the persistent flag should be unset
5796 2 : assert_eq!(tline.last_aux_file_policy.load(), None);
5797 2 :
5798 2 : {
5799 2 : lsn += 8;
5800 2 : let mut modification = tline.begin_modification(lsn);
5801 2 : modification
5802 2 : .put_file("pg_logical/mappings/test1", b"first", &ctx)
5803 4 : .await
5804 2 : .unwrap();
5805 2 : modification.commit(&ctx).await.unwrap();
5806 2 : }
5807 2 :
5808 2 : // there is no tenant manager to pass the configuration through, so lets mimic it
5809 2 : tenant.set_new_location_config(
5810 2 : AttachedTenantConf::try_from(LocationConf::attached_single(
5811 2 : TenantConfOpt {
5812 2 : switch_aux_file_policy: Some(AuxFilePolicy::V2),
5813 2 : ..Default::default()
5814 2 : },
5815 2 : tenant.generation,
5816 2 : &pageserver_api::models::ShardParameters::default(),
5817 2 : ))
5818 2 : .unwrap(),
5819 2 : );
5820 2 :
5821 2 : assert_eq!(
5822 2 : tline.get_switch_aux_file_policy(),
5823 2 : AuxFilePolicy::V2,
5824 2 : "wanted state has been updated"
5825 2 : );
5826 2 : assert_eq!(
5827 2 : tline.last_aux_file_policy.load(),
5828 2 : Some(AuxFilePolicy::V1),
5829 2 : "aux file is written with switch_aux_file_policy unset (which is v1), so we should keep v1"
5830 2 : );
5831 2 :
5832 2 : // we can read everything from the storage
5833 2 : let files = tline.list_aux_files(lsn, &ctx).await.unwrap();
5834 2 : assert_eq!(
5835 2 : files.get("pg_logical/mappings/test1"),
5836 2 : Some(&bytes::Bytes::from_static(b"first"))
5837 2 : );
5838 2 :
5839 2 : {
5840 2 : lsn += 8;
5841 2 : let mut modification = tline.begin_modification(lsn);
5842 2 : modification
5843 2 : .put_file("pg_logical/mappings/test2", b"second", &ctx)
5844 2 : .await
5845 2 : .unwrap();
5846 2 : modification.commit(&ctx).await.unwrap();
5847 2 : }
5848 2 :
5849 2 : assert_eq!(
5850 2 : tline.last_aux_file_policy.load(),
5851 2 : Some(AuxFilePolicy::V1),
5852 2 : "keep v1 storage format when new files are written"
5853 2 : );
5854 2 :
5855 2 : let files = tline.list_aux_files(lsn, &ctx).await.unwrap();
5856 2 : assert_eq!(
5857 2 : files.get("pg_logical/mappings/test2"),
5858 2 : Some(&bytes::Bytes::from_static(b"second"))
5859 2 : );
5860 2 :
5861 2 : let child = tenant
5862 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(lsn), &ctx)
5863 2 : .await
5864 2 : .unwrap();
5865 2 :
5866 2 : // child copies the last flag even if that is not on remote storage yet
5867 2 : assert_eq!(child.get_switch_aux_file_policy(), AuxFilePolicy::V2);
5868 2 : assert_eq!(child.last_aux_file_policy.load(), Some(AuxFilePolicy::V1));
5869 2 :
5870 2 : let files = child.list_aux_files(lsn, &ctx).await.unwrap();
5871 2 : assert_eq!(files.get("pg_logical/mappings/test1"), None);
5872 2 : assert_eq!(files.get("pg_logical/mappings/test2"), None);
5873 2 :
5874 2 : // even if we crash here without flushing parent timeline with it's new
5875 2 : // last_aux_file_policy we are safe, because child was never meant to access ancestor's
5876 2 : // files. the ancestor can even switch back to V1 because of a migration safely.
5877 2 : }
5878 :
5879 : #[tokio::test]
5880 2 : async fn aux_file_policy_switch() {
5881 2 : let mut harness = TenantHarness::create("aux_file_policy_switch").unwrap();
5882 2 : harness.tenant_conf.switch_aux_file_policy = AuxFilePolicy::CrossValidation; // set to cross-validation mode
5883 8 : let (tenant, ctx) = harness.load().await;
5884 2 :
5885 2 : let mut lsn = Lsn(0x08);
5886 2 :
5887 2 : let tline: Arc<Timeline> = tenant
5888 2 : .create_test_timeline(TIMELINE_ID, lsn, DEFAULT_PG_VERSION, &ctx)
5889 5 : .await
5890 2 : .unwrap();
5891 2 :
5892 2 : assert_eq!(
5893 2 : tline.last_aux_file_policy.load(),
5894 2 : None,
5895 2 : "no aux file is written so it should be unset"
5896 2 : );
5897 2 :
5898 2 : {
5899 2 : lsn += 8;
5900 2 : let mut modification = tline.begin_modification(lsn);
5901 2 : modification
5902 2 : .put_file("pg_logical/mappings/test1", b"first", &ctx)
5903 4 : .await
5904 2 : .unwrap();
5905 2 : modification.commit(&ctx).await.unwrap();
5906 2 : }
5907 2 :
5908 2 : // there is no tenant manager to pass the configuration through, so lets mimic it
5909 2 : tenant.set_new_location_config(
5910 2 : AttachedTenantConf::try_from(LocationConf::attached_single(
5911 2 : TenantConfOpt {
5912 2 : switch_aux_file_policy: Some(AuxFilePolicy::V2),
5913 2 : ..Default::default()
5914 2 : },
5915 2 : tenant.generation,
5916 2 : &pageserver_api::models::ShardParameters::default(),
5917 2 : ))
5918 2 : .unwrap(),
5919 2 : );
5920 2 :
5921 2 : assert_eq!(
5922 2 : tline.get_switch_aux_file_policy(),
5923 2 : AuxFilePolicy::V2,
5924 2 : "wanted state has been updated"
5925 2 : );
5926 2 : assert_eq!(
5927 2 : tline.last_aux_file_policy.load(),
5928 2 : Some(AuxFilePolicy::CrossValidation),
5929 2 : "dirty index_part.json reflected state is yet to be updated"
5930 2 : );
5931 2 :
5932 2 : // we can still read the auxfile v1 before we ingest anything new
5933 2 : let files = tline.list_aux_files(lsn, &ctx).await.unwrap();
5934 2 : assert_eq!(
5935 2 : files.get("pg_logical/mappings/test1"),
5936 2 : Some(&bytes::Bytes::from_static(b"first"))
5937 2 : );
5938 2 :
5939 2 : {
5940 2 : lsn += 8;
5941 2 : let mut modification = tline.begin_modification(lsn);
5942 2 : modification
5943 2 : .put_file("pg_logical/mappings/test2", b"second", &ctx)
5944 2 : .await
5945 2 : .unwrap();
5946 2 : modification.commit(&ctx).await.unwrap();
5947 2 : }
5948 2 :
5949 2 : assert_eq!(
5950 2 : tline.last_aux_file_policy.load(),
5951 2 : Some(AuxFilePolicy::V2),
5952 2 : "ingesting a file should apply the wanted switch state when applicable"
5953 2 : );
5954 2 :
5955 2 : let files = tline.list_aux_files(lsn, &ctx).await.unwrap();
5956 2 : assert_eq!(
5957 2 : files.get("pg_logical/mappings/test1"),
5958 2 : Some(&bytes::Bytes::from_static(b"first")),
5959 2 : "cross validation writes to both v1 and v2 so this should be available in v2"
5960 2 : );
5961 2 : assert_eq!(
5962 2 : files.get("pg_logical/mappings/test2"),
5963 2 : Some(&bytes::Bytes::from_static(b"second"))
5964 2 : );
5965 2 :
5966 2 : // mimic again by trying to flip it from V2 to V1 (not switched to while ingesting a file)
5967 2 : tenant.set_new_location_config(
5968 2 : AttachedTenantConf::try_from(LocationConf::attached_single(
5969 2 : TenantConfOpt {
5970 2 : switch_aux_file_policy: Some(AuxFilePolicy::V1),
5971 2 : ..Default::default()
5972 2 : },
5973 2 : tenant.generation,
5974 2 : &pageserver_api::models::ShardParameters::default(),
5975 2 : ))
5976 2 : .unwrap(),
5977 2 : );
5978 2 :
5979 2 : {
5980 2 : lsn += 8;
5981 2 : let mut modification = tline.begin_modification(lsn);
5982 2 : modification
5983 2 : .put_file("pg_logical/mappings/test2", b"third", &ctx)
5984 2 : .await
5985 2 : .unwrap();
5986 2 : modification.commit(&ctx).await.unwrap();
5987 2 : }
5988 2 :
5989 2 : assert_eq!(
5990 2 : tline.get_switch_aux_file_policy(),
5991 2 : AuxFilePolicy::V1,
5992 2 : "wanted state has been updated again, even if invalid request"
5993 2 : );
5994 2 :
5995 2 : assert_eq!(
5996 2 : tline.last_aux_file_policy.load(),
5997 2 : Some(AuxFilePolicy::V2),
5998 2 : "ingesting a file should apply the wanted switch state when applicable"
5999 2 : );
6000 2 :
6001 2 : let files = tline.list_aux_files(lsn, &ctx).await.unwrap();
6002 2 : assert_eq!(
6003 2 : files.get("pg_logical/mappings/test1"),
6004 2 : Some(&bytes::Bytes::from_static(b"first"))
6005 2 : );
6006 2 : assert_eq!(
6007 2 : files.get("pg_logical/mappings/test2"),
6008 2 : Some(&bytes::Bytes::from_static(b"third"))
6009 2 : );
6010 2 :
6011 2 : // mimic again by trying to flip it from from V1 to V2 (not switched to while ingesting a file)
6012 2 : tenant.set_new_location_config(
6013 2 : AttachedTenantConf::try_from(LocationConf::attached_single(
6014 2 : TenantConfOpt {
6015 2 : switch_aux_file_policy: Some(AuxFilePolicy::V2),
6016 2 : ..Default::default()
6017 2 : },
6018 2 : tenant.generation,
6019 2 : &pageserver_api::models::ShardParameters::default(),
6020 2 : ))
6021 2 : .unwrap(),
6022 2 : );
6023 2 :
6024 2 : {
6025 2 : lsn += 8;
6026 2 : let mut modification = tline.begin_modification(lsn);
6027 2 : modification
6028 2 : .put_file("pg_logical/mappings/test3", b"last", &ctx)
6029 2 : .await
6030 2 : .unwrap();
6031 2 : modification.commit(&ctx).await.unwrap();
6032 2 : }
6033 2 :
6034 2 : assert_eq!(tline.get_switch_aux_file_policy(), AuxFilePolicy::V2);
6035 2 :
6036 2 : assert_eq!(tline.last_aux_file_policy.load(), Some(AuxFilePolicy::V2));
6037 2 :
6038 2 : let files = tline.list_aux_files(lsn, &ctx).await.unwrap();
6039 2 : assert_eq!(
6040 2 : files.get("pg_logical/mappings/test1"),
6041 2 : Some(&bytes::Bytes::from_static(b"first"))
6042 2 : );
6043 2 : assert_eq!(
6044 2 : files.get("pg_logical/mappings/test2"),
6045 2 : Some(&bytes::Bytes::from_static(b"third"))
6046 2 : );
6047 2 : assert_eq!(
6048 2 : files.get("pg_logical/mappings/test3"),
6049 2 : Some(&bytes::Bytes::from_static(b"last"))
6050 2 : );
6051 2 : }
6052 :
6053 : #[tokio::test]
6054 2 : async fn aux_file_policy_force_switch() {
6055 2 : let mut harness = TenantHarness::create("aux_file_policy_force_switch").unwrap();
6056 2 : harness.tenant_conf.switch_aux_file_policy = AuxFilePolicy::V1;
6057 8 : let (tenant, ctx) = harness.load().await;
6058 2 :
6059 2 : let mut lsn = Lsn(0x08);
6060 2 :
6061 2 : let tline: Arc<Timeline> = tenant
6062 2 : .create_test_timeline(TIMELINE_ID, lsn, DEFAULT_PG_VERSION, &ctx)
6063 6 : .await
6064 2 : .unwrap();
6065 2 :
6066 2 : assert_eq!(
6067 2 : tline.last_aux_file_policy.load(),
6068 2 : None,
6069 2 : "no aux file is written so it should be unset"
6070 2 : );
6071 2 :
6072 2 : {
6073 2 : lsn += 8;
6074 2 : let mut modification = tline.begin_modification(lsn);
6075 2 : modification
6076 2 : .put_file("pg_logical/mappings/test1", b"first", &ctx)
6077 4 : .await
6078 2 : .unwrap();
6079 2 : modification.commit(&ctx).await.unwrap();
6080 2 : }
6081 2 :
6082 2 : tline.do_switch_aux_policy(AuxFilePolicy::V2).unwrap();
6083 2 :
6084 2 : assert_eq!(
6085 2 : tline.last_aux_file_policy.load(),
6086 2 : Some(AuxFilePolicy::V2),
6087 2 : "dirty index_part.json reflected state is yet to be updated"
6088 2 : );
6089 2 :
6090 2 : // lose all data from v1
6091 2 : let files = tline.list_aux_files(lsn, &ctx).await.unwrap();
6092 2 : assert_eq!(files.get("pg_logical/mappings/test1"), None);
6093 2 :
6094 2 : {
6095 2 : lsn += 8;
6096 2 : let mut modification = tline.begin_modification(lsn);
6097 2 : modification
6098 2 : .put_file("pg_logical/mappings/test2", b"second", &ctx)
6099 2 : .await
6100 2 : .unwrap();
6101 2 : modification.commit(&ctx).await.unwrap();
6102 2 : }
6103 2 :
6104 2 : // read data ingested in v2
6105 2 : let files = tline.list_aux_files(lsn, &ctx).await.unwrap();
6106 2 : assert_eq!(
6107 2 : files.get("pg_logical/mappings/test2"),
6108 2 : Some(&bytes::Bytes::from_static(b"second"))
6109 2 : );
6110 2 : // lose all data from v1
6111 2 : assert_eq!(files.get("pg_logical/mappings/test1"), None);
6112 2 : }
6113 :
6114 : #[tokio::test]
6115 2 : async fn aux_file_policy_auto_detect() {
6116 2 : let mut harness = TenantHarness::create("aux_file_policy_auto_detect").unwrap();
6117 2 : harness.tenant_conf.switch_aux_file_policy = AuxFilePolicy::V2; // set to cross-validation mode
6118 8 : let (tenant, ctx) = harness.load().await;
6119 2 :
6120 2 : let mut lsn = Lsn(0x08);
6121 2 :
6122 2 : let tline: Arc<Timeline> = tenant
6123 2 : .create_test_timeline(TIMELINE_ID, lsn, DEFAULT_PG_VERSION, &ctx)
6124 6 : .await
6125 2 : .unwrap();
6126 2 :
6127 2 : assert_eq!(
6128 2 : tline.last_aux_file_policy.load(),
6129 2 : None,
6130 2 : "no aux file is written so it should be unset"
6131 2 : );
6132 2 :
6133 2 : {
6134 2 : lsn += 8;
6135 2 : let mut modification = tline.begin_modification(lsn);
6136 2 : let buf = AuxFilesDirectory::ser(&AuxFilesDirectory {
6137 2 : files: vec![(
6138 2 : "test_file".to_string(),
6139 2 : Bytes::copy_from_slice(b"test_file"),
6140 2 : )]
6141 2 : .into_iter()
6142 2 : .collect(),
6143 2 : })
6144 2 : .unwrap();
6145 2 : modification.put_for_test(AUX_FILES_KEY, Value::Image(Bytes::from(buf)));
6146 2 : modification.commit(&ctx).await.unwrap();
6147 2 : }
6148 2 :
6149 2 : {
6150 2 : lsn += 8;
6151 2 : let mut modification = tline.begin_modification(lsn);
6152 2 : modification
6153 2 : .put_file("pg_logical/mappings/test1", b"first", &ctx)
6154 2 : .await
6155 2 : .unwrap();
6156 2 : modification.commit(&ctx).await.unwrap();
6157 2 : }
6158 2 :
6159 2 : assert_eq!(
6160 2 : tline.last_aux_file_policy.load(),
6161 2 : Some(AuxFilePolicy::V1),
6162 2 : "keep using v1 because there are aux files writting with v1"
6163 2 : );
6164 2 :
6165 2 : // we can still read the auxfile v1
6166 2 : let files = tline.list_aux_files(lsn, &ctx).await.unwrap();
6167 2 : assert_eq!(
6168 2 : files.get("pg_logical/mappings/test1"),
6169 2 : Some(&bytes::Bytes::from_static(b"first"))
6170 2 : );
6171 2 : assert_eq!(
6172 2 : files.get("test_file"),
6173 2 : Some(&bytes::Bytes::from_static(b"test_file"))
6174 2 : );
6175 2 : }
6176 :
6177 : #[tokio::test]
6178 2 : async fn test_metadata_image_creation() -> anyhow::Result<()> {
6179 2 : let harness = TenantHarness::create("test_metadata_image_creation")?;
6180 8 : let (tenant, ctx) = harness.load().await;
6181 2 : let tline = tenant
6182 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
6183 6 : .await?;
6184 2 :
6185 2 : const NUM_KEYS: usize = 1000;
6186 2 : const STEP: usize = 10000; // random update + scan base_key + idx * STEP
6187 2 :
6188 2 : let cancel = CancellationToken::new();
6189 2 :
6190 2 : let base_key = Key::from_hex("620000000033333333444444445500000000").unwrap();
6191 2 : assert_eq!(base_key.field1, AUX_KEY_PREFIX); // in case someone accidentally changed the prefix...
6192 2 : let mut test_key = base_key;
6193 2 : let mut lsn = Lsn(0x10);
6194 2 :
6195 8 : async fn scan_with_statistics(
6196 8 : tline: &Timeline,
6197 8 : keyspace: &KeySpace,
6198 8 : lsn: Lsn,
6199 8 : ctx: &RequestContext,
6200 8 : ) -> anyhow::Result<(BTreeMap<Key, Result<Bytes, PageReconstructError>>, usize)> {
6201 8 : let mut reconstruct_state = ValuesReconstructState::default();
6202 8 : let res = tline
6203 8 : .get_vectored_impl(keyspace.clone(), lsn, &mut reconstruct_state, ctx)
6204 1674 : .await?;
6205 8 : Ok((res, reconstruct_state.get_delta_layers_visited() as usize))
6206 8 : }
6207 2 :
6208 2 : #[allow(clippy::needless_range_loop)]
6209 2002 : for blknum in 0..NUM_KEYS {
6210 2000 : lsn = Lsn(lsn.0 + 0x10);
6211 2000 : test_key.field6 = (blknum * STEP) as u32;
6212 2000 : let mut writer = tline.writer().await;
6213 2000 : writer
6214 2000 : .put(
6215 2000 : test_key,
6216 2000 : lsn,
6217 2000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
6218 2000 : &ctx,
6219 2000 : )
6220 2 : .await?;
6221 2000 : writer.finish_write(lsn);
6222 2000 : drop(writer);
6223 2 : }
6224 2 :
6225 2 : let keyspace = KeySpace::single(base_key..base_key.add((NUM_KEYS * STEP) as u32));
6226 2 :
6227 22 : for iter in 1..=10 {
6228 20020 : for _ in 0..NUM_KEYS {
6229 20000 : lsn = Lsn(lsn.0 + 0x10);
6230 20000 : let blknum = thread_rng().gen_range(0..NUM_KEYS);
6231 20000 : test_key.field6 = (blknum * STEP) as u32;
6232 20000 : let mut writer = tline.writer().await;
6233 20000 : writer
6234 20000 : .put(
6235 20000 : test_key,
6236 20000 : lsn,
6237 20000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
6238 20000 : &ctx,
6239 20000 : )
6240 19 : .await?;
6241 20000 : writer.finish_write(lsn);
6242 20000 : drop(writer);
6243 2 : }
6244 2 :
6245 20 : tline.freeze_and_flush().await?;
6246 2 :
6247 20 : if iter % 5 == 0 {
6248 4 : let (_, before_delta_file_accessed) =
6249 1666 : scan_with_statistics(&tline, &keyspace, lsn, &ctx).await?;
6250 4 : tline
6251 4 : .compact(
6252 4 : &cancel,
6253 4 : {
6254 4 : let mut flags = EnumSet::new();
6255 4 : flags.insert(CompactFlags::ForceImageLayerCreation);
6256 4 : flags.insert(CompactFlags::ForceRepartition);
6257 4 : flags
6258 4 : },
6259 4 : &ctx,
6260 4 : )
6261 6512 : .await?;
6262 4 : let (_, after_delta_file_accessed) =
6263 8 : scan_with_statistics(&tline, &keyspace, lsn, &ctx).await?;
6264 4 : assert!(after_delta_file_accessed < before_delta_file_accessed, "after_delta_file_accessed={after_delta_file_accessed}, before_delta_file_accessed={before_delta_file_accessed}");
6265 2 : // Given that we already produced an image layer, there should be no delta layer needed for the scan, but still setting a low threshold there for unforeseen circumstances.
6266 4 : assert!(
6267 4 : after_delta_file_accessed <= 2,
6268 2 : "after_delta_file_accessed={after_delta_file_accessed}"
6269 2 : );
6270 16 : }
6271 2 : }
6272 2 :
6273 2 : Ok(())
6274 2 : }
6275 :
6276 : #[tokio::test]
6277 3 : async fn test_vectored_missing_data_key_reads() -> anyhow::Result<()> {
6278 3 : let harness = TenantHarness::create("test_vectored_missing_data_key_reads")?;
6279 12 : let (tenant, ctx) = harness.load().await;
6280 3 :
6281 3 : let base_key = Key::from_hex("000000000033333333444444445500000000").unwrap();
6282 3 : let base_key_child = Key::from_hex("000000000033333333444444445500000001").unwrap();
6283 3 : let base_key_nonexist = Key::from_hex("000000000033333333444444445500000002").unwrap();
6284 3 :
6285 3 : let tline = tenant
6286 3 : .create_test_timeline_with_layers(
6287 3 : TIMELINE_ID,
6288 3 : Lsn(0x10),
6289 3 : DEFAULT_PG_VERSION,
6290 3 : &ctx,
6291 3 : Vec::new(), // delta layers
6292 3 : vec![(Lsn(0x20), vec![(base_key, test_img("data key 1"))])], // image layers
6293 3 : Lsn(0x20), // it's fine to not advance LSN to 0x30 while using 0x30 to get below because `get_vectored_impl` does not wait for LSN
6294 3 : )
6295 24 : .await?;
6296 3 : tline.add_extra_test_dense_keyspace(KeySpace::single(base_key..(base_key_nonexist.next())));
6297 3 :
6298 3 : let child = tenant
6299 2 : .branch_timeline_test_with_layers(
6300 2 : &tline,
6301 2 : NEW_TIMELINE_ID,
6302 2 : Some(Lsn(0x20)),
6303 2 : &ctx,
6304 2 : Vec::new(), // delta layers
6305 2 : vec![(Lsn(0x30), vec![(base_key_child, test_img("data key 2"))])], // image layers
6306 2 : Lsn(0x30),
6307 2 : )
6308 7 : .await
6309 3 : .unwrap();
6310 2 :
6311 12 : async fn get_vectored_impl_wrapper(
6312 12 : tline: &Arc<Timeline>,
6313 12 : key: Key,
6314 12 : lsn: Lsn,
6315 12 : ctx: &RequestContext,
6316 12 : ) -> Result<Option<Bytes>, GetVectoredError> {
6317 12 : let mut reconstruct_state = ValuesReconstructState::new();
6318 12 : let mut res = tline
6319 12 : .get_vectored_impl(
6320 12 : KeySpace::single(key..key.next()),
6321 12 : lsn,
6322 12 : &mut reconstruct_state,
6323 12 : ctx,
6324 12 : )
6325 12 : .await?;
6326 6 : Ok(res.pop_last().map(|(k, v)| {
6327 6 : assert_eq!(k, key);
6328 6 : v.unwrap()
6329 6 : }))
6330 12 : }
6331 2 :
6332 2 : let lsn = Lsn(0x30);
6333 3 :
6334 3 : // test vectored get on parent timeline
6335 3 : assert_eq!(
6336 4 : get_vectored_impl_wrapper(&tline, base_key, lsn, &ctx).await?,
6337 3 : Some(test_img("data key 1"))
6338 3 : );
6339 3 : assert!(get_vectored_impl_wrapper(&tline, base_key_child, lsn, &ctx)
6340 3 : .await
6341 3 : .unwrap_err()
6342 2 : .is_missing_key_error());
6343 3 : assert!(
6344 2 : get_vectored_impl_wrapper(&tline, base_key_nonexist, lsn, &ctx)
6345 3 : .await
6346 3 : .unwrap_err()
6347 2 : .is_missing_key_error()
6348 3 : );
6349 3 :
6350 3 : // test vectored get on child timeline
6351 3 : assert_eq!(
6352 3 : get_vectored_impl_wrapper(&child, base_key, lsn, &ctx).await?,
6353 3 : Some(test_img("data key 1"))
6354 3 : );
6355 3 : assert_eq!(
6356 4 : get_vectored_impl_wrapper(&child, base_key_child, lsn, &ctx).await?,
6357 3 : Some(test_img("data key 2"))
6358 3 : );
6359 3 : assert!(
6360 2 : get_vectored_impl_wrapper(&child, base_key_nonexist, lsn, &ctx)
6361 3 : .await
6362 3 : .unwrap_err()
6363 2 : .is_missing_key_error()
6364 3 : );
6365 3 :
6366 3 : Ok(())
6367 3 : }
6368 :
6369 : #[tokio::test]
6370 2 : async fn test_vectored_missing_metadata_key_reads() -> anyhow::Result<()> {
6371 2 : let harness = TenantHarness::create("test_vectored_missing_data_key_reads")?;
6372 8 : let (tenant, ctx) = harness.load().await;
6373 2 :
6374 2 : let base_key = Key::from_hex("620000000033333333444444445500000000").unwrap();
6375 2 : let base_key_child = Key::from_hex("620000000033333333444444445500000001").unwrap();
6376 2 : let base_key_nonexist = Key::from_hex("620000000033333333444444445500000002").unwrap();
6377 2 : assert_eq!(base_key.field1, AUX_KEY_PREFIX); // in case someone accidentally changed the prefix...
6378 2 :
6379 2 : let tline = tenant
6380 2 : .create_test_timeline_with_layers(
6381 2 : TIMELINE_ID,
6382 2 : Lsn(0x10),
6383 2 : DEFAULT_PG_VERSION,
6384 2 : &ctx,
6385 2 : Vec::new(), // delta layers
6386 2 : vec![(Lsn(0x20), vec![(base_key, test_img("metadata key 1"))])], // image layers
6387 2 : Lsn(0x20), // it's fine to not advance LSN to 0x30 while using 0x30 to get below because `get_vectored_impl` does not wait for LSN
6388 2 : )
6389 13 : .await?;
6390 2 :
6391 2 : let child = tenant
6392 2 : .branch_timeline_test_with_layers(
6393 2 : &tline,
6394 2 : NEW_TIMELINE_ID,
6395 2 : Some(Lsn(0x20)),
6396 2 : &ctx,
6397 2 : Vec::new(), // delta layers
6398 2 : vec![(
6399 2 : Lsn(0x30),
6400 2 : vec![(base_key_child, test_img("metadata key 2"))],
6401 2 : )], // image layers
6402 2 : Lsn(0x30),
6403 2 : )
6404 7 : .await
6405 2 : .unwrap();
6406 2 :
6407 12 : async fn get_vectored_impl_wrapper(
6408 12 : tline: &Arc<Timeline>,
6409 12 : key: Key,
6410 12 : lsn: Lsn,
6411 12 : ctx: &RequestContext,
6412 12 : ) -> Result<Option<Bytes>, GetVectoredError> {
6413 12 : let mut reconstruct_state = ValuesReconstructState::new();
6414 12 : let mut res = tline
6415 12 : .get_vectored_impl(
6416 12 : KeySpace::single(key..key.next()),
6417 12 : lsn,
6418 12 : &mut reconstruct_state,
6419 12 : ctx,
6420 12 : )
6421 8 : .await?;
6422 12 : Ok(res.pop_last().map(|(k, v)| {
6423 4 : assert_eq!(k, key);
6424 4 : v.unwrap()
6425 12 : }))
6426 12 : }
6427 2 :
6428 2 : let lsn = Lsn(0x30);
6429 2 :
6430 2 : // test vectored get on parent timeline
6431 2 : assert_eq!(
6432 4 : get_vectored_impl_wrapper(&tline, base_key, lsn, &ctx).await?,
6433 2 : Some(test_img("metadata key 1"))
6434 2 : );
6435 2 : assert_eq!(
6436 2 : get_vectored_impl_wrapper(&tline, base_key_child, lsn, &ctx).await?,
6437 2 : None
6438 2 : );
6439 2 : assert_eq!(
6440 2 : get_vectored_impl_wrapper(&tline, base_key_nonexist, lsn, &ctx).await?,
6441 2 : None
6442 2 : );
6443 2 :
6444 2 : // test vectored get on child timeline
6445 2 : assert_eq!(
6446 2 : get_vectored_impl_wrapper(&child, base_key, lsn, &ctx).await?,
6447 2 : None
6448 2 : );
6449 2 : assert_eq!(
6450 4 : get_vectored_impl_wrapper(&child, base_key_child, lsn, &ctx).await?,
6451 2 : Some(test_img("metadata key 2"))
6452 2 : );
6453 2 : assert_eq!(
6454 2 : get_vectored_impl_wrapper(&child, base_key_nonexist, lsn, &ctx).await?,
6455 2 : None
6456 2 : );
6457 2 :
6458 2 : Ok(())
6459 2 : }
6460 :
6461 12 : async fn get_vectored_impl_wrapper(
6462 12 : tline: &Arc<Timeline>,
6463 12 : key: Key,
6464 12 : lsn: Lsn,
6465 12 : ctx: &RequestContext,
6466 12 : ) -> Result<Option<Bytes>, GetVectoredError> {
6467 12 : let mut reconstruct_state = ValuesReconstructState::new();
6468 12 : let mut res = tline
6469 12 : .get_vectored_impl(
6470 12 : KeySpace::single(key..key.next()),
6471 12 : lsn,
6472 12 : &mut reconstruct_state,
6473 12 : ctx,
6474 12 : )
6475 13 : .await?;
6476 12 : Ok(res.pop_last().map(|(k, v)| {
6477 8 : assert_eq!(k, key);
6478 8 : v.unwrap()
6479 12 : }))
6480 12 : }
6481 :
6482 : #[tokio::test]
6483 2 : async fn test_metadata_tombstone_reads() -> anyhow::Result<()> {
6484 2 : let harness = TenantHarness::create("test_metadata_tombstone_reads")?;
6485 5 : let (tenant, ctx) = harness.load().await;
6486 2 : let key0 = Key::from_hex("620000000033333333444444445500000000").unwrap();
6487 2 : let key1 = Key::from_hex("620000000033333333444444445500000001").unwrap();
6488 2 : let key2 = Key::from_hex("620000000033333333444444445500000002").unwrap();
6489 2 : let key3 = Key::from_hex("620000000033333333444444445500000003").unwrap();
6490 2 :
6491 2 : // We emulate the situation that the compaction algorithm creates an image layer that removes the tombstones
6492 2 : // Lsn 0x30 key0, key3, no key1+key2
6493 2 : // Lsn 0x20 key1+key2 tomestones
6494 2 : // Lsn 0x10 key1 in image, key2 in delta
6495 2 : let tline = tenant
6496 2 : .create_test_timeline_with_layers(
6497 2 : TIMELINE_ID,
6498 2 : Lsn(0x10),
6499 2 : DEFAULT_PG_VERSION,
6500 2 : &ctx,
6501 2 : // delta layers
6502 2 : vec![
6503 2 : vec![(key2, Lsn(0x10), Value::Image(test_img("metadata key 2")))],
6504 2 : vec![(key1, Lsn(0x20), Value::Image(Bytes::new()))],
6505 2 : vec![(key2, Lsn(0x20), Value::Image(Bytes::new()))],
6506 2 : ],
6507 2 : // image layers
6508 2 : vec![
6509 2 : (Lsn(0x10), vec![(key1, test_img("metadata key 1"))]),
6510 2 : (
6511 2 : Lsn(0x30),
6512 2 : vec![
6513 2 : (key0, test_img("metadata key 0")),
6514 2 : (key3, test_img("metadata key 3")),
6515 2 : ],
6516 2 : ),
6517 2 : ],
6518 2 : Lsn(0x30),
6519 2 : )
6520 40 : .await?;
6521 2 :
6522 2 : let lsn = Lsn(0x30);
6523 2 : let old_lsn = Lsn(0x20);
6524 2 :
6525 2 : assert_eq!(
6526 4 : get_vectored_impl_wrapper(&tline, key0, lsn, &ctx).await?,
6527 2 : Some(test_img("metadata key 0"))
6528 2 : );
6529 2 : assert_eq!(
6530 2 : get_vectored_impl_wrapper(&tline, key1, lsn, &ctx).await?,
6531 2 : None,
6532 2 : );
6533 2 : assert_eq!(
6534 2 : get_vectored_impl_wrapper(&tline, key2, lsn, &ctx).await?,
6535 2 : None,
6536 2 : );
6537 2 : assert_eq!(
6538 4 : get_vectored_impl_wrapper(&tline, key1, old_lsn, &ctx).await?,
6539 2 : Some(Bytes::new()),
6540 2 : );
6541 2 : assert_eq!(
6542 4 : get_vectored_impl_wrapper(&tline, key2, old_lsn, &ctx).await?,
6543 2 : Some(Bytes::new()),
6544 2 : );
6545 2 : assert_eq!(
6546 2 : get_vectored_impl_wrapper(&tline, key3, lsn, &ctx).await?,
6547 2 : Some(test_img("metadata key 3"))
6548 2 : );
6549 2 :
6550 2 : Ok(())
6551 2 : }
6552 :
6553 : #[tokio::test]
6554 2 : async fn test_metadata_tombstone_image_creation() {
6555 2 : let harness = TenantHarness::create("test_metadata_tombstone_image_creation").unwrap();
6556 8 : let (tenant, ctx) = harness.load().await;
6557 2 :
6558 2 : let key0 = Key::from_hex("620000000033333333444444445500000000").unwrap();
6559 2 : let key1 = Key::from_hex("620000000033333333444444445500000001").unwrap();
6560 2 : let key2 = Key::from_hex("620000000033333333444444445500000002").unwrap();
6561 2 : let key3 = Key::from_hex("620000000033333333444444445500000003").unwrap();
6562 2 :
6563 2 : let tline = tenant
6564 2 : .create_test_timeline_with_layers(
6565 2 : TIMELINE_ID,
6566 2 : Lsn(0x10),
6567 2 : DEFAULT_PG_VERSION,
6568 2 : &ctx,
6569 2 : // delta layers
6570 2 : vec![
6571 2 : vec![(key2, Lsn(0x10), Value::Image(test_img("metadata key 2")))],
6572 2 : vec![(key1, Lsn(0x20), Value::Image(Bytes::new()))],
6573 2 : vec![(key2, Lsn(0x20), Value::Image(Bytes::new()))],
6574 2 : vec![
6575 2 : (key0, Lsn(0x30), Value::Image(test_img("metadata key 0"))),
6576 2 : (key3, Lsn(0x30), Value::Image(test_img("metadata key 3"))),
6577 2 : ],
6578 2 : ],
6579 2 : // image layers
6580 2 : vec![(Lsn(0x10), vec![(key1, test_img("metadata key 1"))])],
6581 2 : Lsn(0x30),
6582 2 : )
6583 37 : .await
6584 2 : .unwrap();
6585 2 :
6586 2 : let cancel = CancellationToken::new();
6587 2 :
6588 2 : tline
6589 2 : .compact(
6590 2 : &cancel,
6591 2 : {
6592 2 : let mut flags = EnumSet::new();
6593 2 : flags.insert(CompactFlags::ForceImageLayerCreation);
6594 2 : flags.insert(CompactFlags::ForceRepartition);
6595 2 : flags
6596 2 : },
6597 2 : &ctx,
6598 2 : )
6599 49 : .await
6600 2 : .unwrap();
6601 2 :
6602 2 : // Image layers are created at last_record_lsn
6603 2 : let images = tline
6604 2 : .inspect_image_layers(Lsn(0x30), &ctx)
6605 8 : .await
6606 2 : .unwrap()
6607 2 : .into_iter()
6608 20 : .filter(|(k, _)| k.is_metadata_key())
6609 2 : .collect::<Vec<_>>();
6610 2 : assert_eq!(images.len(), 2); // the image layer should only contain two existing keys, tombstones should be removed.
6611 2 : }
6612 :
6613 : #[tokio::test]
6614 2 : async fn test_metadata_tombstone_empty_image_creation() {
6615 2 : let harness =
6616 2 : TenantHarness::create("test_metadata_tombstone_empty_image_creation").unwrap();
6617 8 : let (tenant, ctx) = harness.load().await;
6618 2 :
6619 2 : let key1 = Key::from_hex("620000000033333333444444445500000001").unwrap();
6620 2 : let key2 = Key::from_hex("620000000033333333444444445500000002").unwrap();
6621 2 :
6622 2 : let tline = tenant
6623 2 : .create_test_timeline_with_layers(
6624 2 : TIMELINE_ID,
6625 2 : Lsn(0x10),
6626 2 : DEFAULT_PG_VERSION,
6627 2 : &ctx,
6628 2 : // delta layers
6629 2 : vec![
6630 2 : vec![(key2, Lsn(0x10), Value::Image(test_img("metadata key 2")))],
6631 2 : vec![(key1, Lsn(0x20), Value::Image(Bytes::new()))],
6632 2 : vec![(key2, Lsn(0x20), Value::Image(Bytes::new()))],
6633 2 : ],
6634 2 : // image layers
6635 2 : vec![(Lsn(0x10), vec![(key1, test_img("metadata key 1"))])],
6636 2 : Lsn(0x30),
6637 2 : )
6638 31 : .await
6639 2 : .unwrap();
6640 2 :
6641 2 : let cancel = CancellationToken::new();
6642 2 :
6643 2 : tline
6644 2 : .compact(
6645 2 : &cancel,
6646 2 : {
6647 2 : let mut flags = EnumSet::new();
6648 2 : flags.insert(CompactFlags::ForceImageLayerCreation);
6649 2 : flags.insert(CompactFlags::ForceRepartition);
6650 2 : flags
6651 2 : },
6652 2 : &ctx,
6653 2 : )
6654 37 : .await
6655 2 : .unwrap();
6656 2 :
6657 2 : // Image layers are created at last_record_lsn
6658 2 : let images = tline
6659 2 : .inspect_image_layers(Lsn(0x30), &ctx)
6660 4 : .await
6661 2 : .unwrap()
6662 2 : .into_iter()
6663 16 : .filter(|(k, _)| k.is_metadata_key())
6664 2 : .collect::<Vec<_>>();
6665 2 : assert_eq!(images.len(), 0); // the image layer should not contain tombstones, or it is not created
6666 2 : }
6667 :
6668 : #[tokio::test]
6669 2 : async fn test_simple_bottom_most_compaction_images() -> anyhow::Result<()> {
6670 2 : let harness = TenantHarness::create("test_simple_bottom_most_compaction_images")?;
6671 8 : let (tenant, ctx) = harness.load().await;
6672 2 :
6673 104 : fn get_key(id: u32) -> Key {
6674 104 : // using aux key here b/c they are guaranteed to be inside `collect_keyspace`.
6675 104 : let mut key = Key::from_hex("620000000033333333444444445500000000").unwrap();
6676 104 : key.field6 = id;
6677 104 : key
6678 104 : }
6679 2 :
6680 2 : // We create one bottom-most image layer, a delta layer D1 crossing the GC horizon, D2 below the horizon, and D3 above the horizon.
6681 2 : //
6682 2 : // | D1 | | D3 |
6683 2 : // -| |-- gc horizon -----------------
6684 2 : // | | | D2 |
6685 2 : // --------- img layer ------------------
6686 2 : //
6687 2 : // What we should expact from this compaction is:
6688 2 : // | Part of D1 | | D3 |
6689 2 : // --------- img layer with D1+D2 at GC horizon------------------
6690 2 :
6691 2 : // img layer at 0x10
6692 2 : let img_layer = (0..10)
6693 20 : .map(|id| (get_key(id), Bytes::from(format!("value {id}@0x10"))))
6694 2 : .collect_vec();
6695 2 :
6696 2 : let delta1 = vec![
6697 2 : (
6698 2 : get_key(1),
6699 2 : Lsn(0x20),
6700 2 : Value::Image(Bytes::from("value 1@0x20")),
6701 2 : ),
6702 2 : (
6703 2 : get_key(2),
6704 2 : Lsn(0x30),
6705 2 : Value::Image(Bytes::from("value 2@0x30")),
6706 2 : ),
6707 2 : (
6708 2 : get_key(3),
6709 2 : Lsn(0x40),
6710 2 : Value::Image(Bytes::from("value 3@0x40")),
6711 2 : ),
6712 2 : ];
6713 2 : let delta2 = vec![
6714 2 : (
6715 2 : get_key(5),
6716 2 : Lsn(0x20),
6717 2 : Value::Image(Bytes::from("value 5@0x20")),
6718 2 : ),
6719 2 : (
6720 2 : get_key(6),
6721 2 : Lsn(0x20),
6722 2 : Value::Image(Bytes::from("value 6@0x20")),
6723 2 : ),
6724 2 : ];
6725 2 : let delta3 = vec![
6726 2 : (
6727 2 : get_key(8),
6728 2 : Lsn(0x40),
6729 2 : Value::Image(Bytes::from("value 8@0x40")),
6730 2 : ),
6731 2 : (
6732 2 : get_key(9),
6733 2 : Lsn(0x40),
6734 2 : Value::Image(Bytes::from("value 9@0x40")),
6735 2 : ),
6736 2 : ];
6737 2 :
6738 2 : let tline = tenant
6739 2 : .create_test_timeline_with_layers(
6740 2 : TIMELINE_ID,
6741 2 : Lsn(0x10),
6742 2 : DEFAULT_PG_VERSION,
6743 2 : &ctx,
6744 2 : vec![delta1, delta2, delta3], // delta layers
6745 2 : vec![(Lsn(0x10), img_layer)], // image layers
6746 2 : Lsn(0x50),
6747 2 : )
6748 49 : .await?;
6749 2 : {
6750 2 : // Update GC info
6751 2 : let mut guard = tline.gc_info.write().unwrap();
6752 2 : guard.cutoffs.pitr = Lsn(0x30);
6753 2 : guard.cutoffs.horizon = Lsn(0x30);
6754 2 : }
6755 2 :
6756 2 : let expected_result = [
6757 2 : Bytes::from_static(b"value 0@0x10"),
6758 2 : Bytes::from_static(b"value 1@0x20"),
6759 2 : Bytes::from_static(b"value 2@0x30"),
6760 2 : Bytes::from_static(b"value 3@0x40"),
6761 2 : Bytes::from_static(b"value 4@0x10"),
6762 2 : Bytes::from_static(b"value 5@0x20"),
6763 2 : Bytes::from_static(b"value 6@0x20"),
6764 2 : Bytes::from_static(b"value 7@0x10"),
6765 2 : Bytes::from_static(b"value 8@0x40"),
6766 2 : Bytes::from_static(b"value 9@0x40"),
6767 2 : ];
6768 2 :
6769 20 : for (idx, expected) in expected_result.iter().enumerate() {
6770 20 : assert_eq!(
6771 20 : tline
6772 20 : .get(get_key(idx as u32), Lsn(0x50), &ctx)
6773 17 : .await
6774 20 : .unwrap(),
6775 2 : expected
6776 2 : );
6777 2 : }
6778 2 :
6779 2 : let cancel = CancellationToken::new();
6780 52 : tline.compact_with_gc(&cancel, &ctx).await.unwrap();
6781 2 :
6782 20 : for (idx, expected) in expected_result.iter().enumerate() {
6783 20 : assert_eq!(
6784 20 : tline
6785 20 : .get(get_key(idx as u32), Lsn(0x50), &ctx)
6786 8 : .await
6787 20 : .unwrap(),
6788 2 : expected
6789 2 : );
6790 2 : }
6791 2 :
6792 2 : // Check if the image layer at the GC horizon contains exactly what we want
6793 2 : let image_at_gc_horizon = tline
6794 2 : .inspect_image_layers(Lsn(0x30), &ctx)
6795 2 : .await
6796 2 : .unwrap()
6797 2 : .into_iter()
6798 36 : .filter(|(k, _)| k.is_metadata_key())
6799 2 : .collect::<Vec<_>>();
6800 2 :
6801 2 : assert_eq!(image_at_gc_horizon.len(), 10);
6802 2 : let expected_result = [
6803 2 : Bytes::from_static(b"value 0@0x10"),
6804 2 : Bytes::from_static(b"value 1@0x20"),
6805 2 : Bytes::from_static(b"value 2@0x30"),
6806 2 : Bytes::from_static(b"value 3@0x10"),
6807 2 : Bytes::from_static(b"value 4@0x10"),
6808 2 : Bytes::from_static(b"value 5@0x20"),
6809 2 : Bytes::from_static(b"value 6@0x20"),
6810 2 : Bytes::from_static(b"value 7@0x10"),
6811 2 : Bytes::from_static(b"value 8@0x10"),
6812 2 : Bytes::from_static(b"value 9@0x10"),
6813 2 : ];
6814 22 : for idx in 0..10 {
6815 20 : assert_eq!(
6816 20 : image_at_gc_horizon[idx],
6817 20 : (get_key(idx as u32), expected_result[idx].clone())
6818 20 : );
6819 2 : }
6820 2 :
6821 2 : // Check if old layers are removed / new layers have the expected LSN
6822 2 : let mut all_layers = tline.inspect_historic_layers().await.unwrap();
6823 4 : all_layers.sort_by(|k1, k2| {
6824 4 : (
6825 4 : k1.is_delta,
6826 4 : k1.key_range.start,
6827 4 : k1.key_range.end,
6828 4 : k1.lsn_range.start,
6829 4 : k1.lsn_range.end,
6830 4 : )
6831 4 : .cmp(&(
6832 4 : k2.is_delta,
6833 4 : k2.key_range.start,
6834 4 : k2.key_range.end,
6835 4 : k2.lsn_range.start,
6836 4 : k2.lsn_range.end,
6837 4 : ))
6838 4 : });
6839 2 : assert_eq!(
6840 2 : all_layers,
6841 2 : vec![
6842 2 : // Image layer at GC horizon
6843 2 : PersistentLayerKey {
6844 2 : key_range: Key::MIN..get_key(10),
6845 2 : lsn_range: Lsn(0x30)..Lsn(0x31),
6846 2 : is_delta: false
6847 2 : },
6848 2 : // The delta layer that is cut in the middle
6849 2 : PersistentLayerKey {
6850 2 : key_range: get_key(3)..get_key(4),
6851 2 : lsn_range: Lsn(0x30)..Lsn(0x41),
6852 2 : is_delta: true
6853 2 : },
6854 2 : // The delta layer we created and should not be picked for the compaction
6855 2 : PersistentLayerKey {
6856 2 : key_range: get_key(8)..get_key(10),
6857 2 : lsn_range: Lsn(0x40)..Lsn(0x41),
6858 2 : is_delta: true
6859 2 : }
6860 2 : ]
6861 2 : );
6862 2 :
6863 2 : Ok(())
6864 2 : }
6865 :
6866 : #[tokio::test]
6867 2 : async fn test_neon_test_record() -> anyhow::Result<()> {
6868 2 : let harness = TenantHarness::create("test_neon_test_record")?;
6869 8 : let (tenant, ctx) = harness.load().await;
6870 2 :
6871 24 : fn get_key(id: u32) -> Key {
6872 24 : // using aux key here b/c they are guaranteed to be inside `collect_keyspace`.
6873 24 : let mut key = Key::from_hex("620000000033333333444444445500000000").unwrap();
6874 24 : key.field6 = id;
6875 24 : key
6876 24 : }
6877 2 :
6878 2 : let delta1 = vec![
6879 2 : (
6880 2 : get_key(1),
6881 2 : Lsn(0x20),
6882 2 : Value::WalRecord(NeonWalRecord::wal_append(",0x20")),
6883 2 : ),
6884 2 : (
6885 2 : get_key(1),
6886 2 : Lsn(0x30),
6887 2 : Value::WalRecord(NeonWalRecord::wal_append(",0x30")),
6888 2 : ),
6889 2 : (get_key(2), Lsn(0x10), Value::Image("0x10".into())),
6890 2 : (
6891 2 : get_key(2),
6892 2 : Lsn(0x20),
6893 2 : Value::WalRecord(NeonWalRecord::wal_append(",0x20")),
6894 2 : ),
6895 2 : (
6896 2 : get_key(2),
6897 2 : Lsn(0x30),
6898 2 : Value::WalRecord(NeonWalRecord::wal_append(",0x30")),
6899 2 : ),
6900 2 : (get_key(3), Lsn(0x10), Value::Image("0x10".into())),
6901 2 : (
6902 2 : get_key(3),
6903 2 : Lsn(0x20),
6904 2 : Value::WalRecord(NeonWalRecord::wal_clear()),
6905 2 : ),
6906 2 : (get_key(4), Lsn(0x10), Value::Image("0x10".into())),
6907 2 : (
6908 2 : get_key(4),
6909 2 : Lsn(0x20),
6910 2 : Value::WalRecord(NeonWalRecord::wal_init()),
6911 2 : ),
6912 2 : ];
6913 2 : let image1 = vec![(get_key(1), "0x10".into())];
6914 2 :
6915 2 : let tline = tenant
6916 2 : .create_test_timeline_with_layers(
6917 2 : TIMELINE_ID,
6918 2 : Lsn(0x10),
6919 2 : DEFAULT_PG_VERSION,
6920 2 : &ctx,
6921 2 : vec![delta1], // delta layers
6922 2 : vec![(Lsn(0x10), image1)], // image layers
6923 2 : Lsn(0x50),
6924 2 : )
6925 19 : .await?;
6926 2 :
6927 2 : assert_eq!(
6928 8 : tline.get(get_key(1), Lsn(0x50), &ctx).await?,
6929 2 : Bytes::from_static(b"0x10,0x20,0x30")
6930 2 : );
6931 2 : assert_eq!(
6932 2 : tline.get(get_key(2), Lsn(0x50), &ctx).await?,
6933 2 : Bytes::from_static(b"0x10,0x20,0x30")
6934 2 : );
6935 2 :
6936 2 : // Need to remove the limit of "Neon WAL redo requires base image".
6937 2 :
6938 2 : // assert_eq!(tline.get(get_key(3), Lsn(0x50), &ctx).await?, Bytes::new());
6939 2 : // assert_eq!(tline.get(get_key(4), Lsn(0x50), &ctx).await?, Bytes::new());
6940 2 :
6941 2 : Ok(())
6942 2 : }
6943 :
6944 : #[tokio::test]
6945 2 : async fn test_lsn_lease() -> anyhow::Result<()> {
6946 8 : let (tenant, ctx) = TenantHarness::create("test_lsn_lease")?.load().await;
6947 2 : let key = Key::from_hex("010000000033333333444444445500000000").unwrap();
6948 2 :
6949 2 : let end_lsn = Lsn(0x100);
6950 2 : let image_layers = (0x20..=0x90)
6951 2 : .step_by(0x10)
6952 16 : .map(|n| {
6953 16 : (
6954 16 : Lsn(n),
6955 16 : vec![(key, test_img(&format!("data key at {:x}", n)))],
6956 16 : )
6957 16 : })
6958 2 : .collect();
6959 2 :
6960 2 : let timeline = tenant
6961 2 : .create_test_timeline_with_layers(
6962 2 : TIMELINE_ID,
6963 2 : Lsn(0x10),
6964 2 : DEFAULT_PG_VERSION,
6965 2 : &ctx,
6966 2 : Vec::new(),
6967 2 : image_layers,
6968 2 : end_lsn,
6969 2 : )
6970 62 : .await?;
6971 2 :
6972 2 : let leased_lsns = [0x30, 0x50, 0x70];
6973 2 : let mut leases = Vec::new();
6974 6 : let _: anyhow::Result<_> = leased_lsns.iter().try_for_each(|n| {
6975 6 : leases.push(timeline.make_lsn_lease(Lsn(*n), timeline.get_lsn_lease_length(), &ctx)?);
6976 6 : Ok(())
6977 6 : });
6978 2 :
6979 2 : // Renewing with shorter lease should not change the lease.
6980 2 : let updated_lease_0 =
6981 2 : timeline.make_lsn_lease(Lsn(leased_lsns[0]), Duration::from_secs(0), &ctx)?;
6982 2 : assert_eq!(updated_lease_0.valid_until, leases[0].valid_until);
6983 2 :
6984 2 : // Renewing with a long lease should renew lease with later expiration time.
6985 2 : let updated_lease_1 = timeline.make_lsn_lease(
6986 2 : Lsn(leased_lsns[1]),
6987 2 : timeline.get_lsn_lease_length() * 2,
6988 2 : &ctx,
6989 2 : )?;
6990 2 :
6991 2 : assert!(updated_lease_1.valid_until > leases[1].valid_until);
6992 2 :
6993 2 : // Force set disk consistent lsn so we can get the cutoff at `end_lsn`.
6994 2 : info!(
6995 2 : "latest_gc_cutoff_lsn: {}",
6996 0 : *timeline.get_latest_gc_cutoff_lsn()
6997 2 : );
6998 2 : timeline.force_set_disk_consistent_lsn(end_lsn);
6999 2 :
7000 2 : let res = tenant
7001 2 : .gc_iteration(
7002 2 : Some(TIMELINE_ID),
7003 2 : 0,
7004 2 : Duration::ZERO,
7005 2 : &CancellationToken::new(),
7006 2 : &ctx,
7007 2 : )
7008 2 : .await?;
7009 2 :
7010 2 : // Keeping everything <= Lsn(0x80) b/c leases:
7011 2 : // 0/10: initdb layer
7012 2 : // (0/20..=0/70).step_by(0x10): image layers added when creating the timeline.
7013 2 : assert_eq!(res.layers_needed_by_leases, 7);
7014 2 : // Keeping 0/90 b/c it is the latest layer.
7015 2 : assert_eq!(res.layers_not_updated, 1);
7016 2 : // Removed 0/80.
7017 2 : assert_eq!(res.layers_removed, 1);
7018 2 :
7019 2 : // Make lease on a already GC-ed LSN.
7020 2 : // 0/80 does not have a valid lease + is below latest_gc_cutoff
7021 2 : assert!(Lsn(0x80) < *timeline.get_latest_gc_cutoff_lsn());
7022 2 : let res = timeline.make_lsn_lease(Lsn(0x80), timeline.get_lsn_lease_length(), &ctx);
7023 2 : assert!(res.is_err());
7024 2 :
7025 2 : // Should still be able to renew a currently valid lease
7026 2 : // Assumption: original lease to is still valid for 0/50.
7027 2 : let _ =
7028 2 : timeline.make_lsn_lease(Lsn(leased_lsns[1]), timeline.get_lsn_lease_length(), &ctx)?;
7029 2 :
7030 2 : Ok(())
7031 2 : }
7032 :
7033 : #[tokio::test]
7034 2 : async fn test_simple_bottom_most_compaction_deltas() -> anyhow::Result<()> {
7035 2 : let harness = TenantHarness::create("test_simple_bottom_most_compaction_deltas")?;
7036 8 : let (tenant, ctx) = harness.load().await;
7037 2 :
7038 114 : fn get_key(id: u32) -> Key {
7039 114 : // using aux key here b/c they are guaranteed to be inside `collect_keyspace`.
7040 114 : let mut key = Key::from_hex("620000000033333333444444445500000000").unwrap();
7041 114 : key.field6 = id;
7042 114 : key
7043 114 : }
7044 2 :
7045 2 : // We create one bottom-most image layer, a delta layer D1 crossing the GC horizon, D2 below the horizon, and D3 above the horizon.
7046 2 : //
7047 2 : // | D1 | | D3 |
7048 2 : // -| |-- gc horizon -----------------
7049 2 : // | | | D2 |
7050 2 : // --------- img layer ------------------
7051 2 : //
7052 2 : // What we should expact from this compaction is:
7053 2 : // | Part of D1 | | D3 |
7054 2 : // --------- img layer with D1+D2 at GC horizon------------------
7055 2 :
7056 2 : // img layer at 0x10
7057 2 : let img_layer = (0..10)
7058 20 : .map(|id| (get_key(id), Bytes::from(format!("value {id}@0x10"))))
7059 2 : .collect_vec();
7060 2 :
7061 2 : let delta1 = vec![
7062 2 : (
7063 2 : get_key(1),
7064 2 : Lsn(0x20),
7065 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x20")),
7066 2 : ),
7067 2 : (
7068 2 : get_key(2),
7069 2 : Lsn(0x30),
7070 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x30")),
7071 2 : ),
7072 2 : (
7073 2 : get_key(3),
7074 2 : Lsn(0x40),
7075 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x40")),
7076 2 : ),
7077 2 : ];
7078 2 : let delta2 = vec![
7079 2 : (
7080 2 : get_key(5),
7081 2 : Lsn(0x20),
7082 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x20")),
7083 2 : ),
7084 2 : (
7085 2 : get_key(6),
7086 2 : Lsn(0x20),
7087 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x20")),
7088 2 : ),
7089 2 : ];
7090 2 : let delta3 = vec![
7091 2 : (
7092 2 : get_key(8),
7093 2 : Lsn(0x40),
7094 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x40")),
7095 2 : ),
7096 2 : (
7097 2 : get_key(9),
7098 2 : Lsn(0x40),
7099 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x40")),
7100 2 : ),
7101 2 : ];
7102 2 :
7103 2 : let tline = tenant
7104 2 : .create_test_timeline_with_layers(
7105 2 : TIMELINE_ID,
7106 2 : Lsn(0x10),
7107 2 : DEFAULT_PG_VERSION,
7108 2 : &ctx,
7109 2 : vec![delta1, delta2, delta3], // delta layers
7110 2 : vec![(Lsn(0x10), img_layer)], // image layers
7111 2 : Lsn(0x50),
7112 2 : )
7113 49 : .await?;
7114 2 : {
7115 2 : // Update GC info
7116 2 : let mut guard = tline.gc_info.write().unwrap();
7117 2 : *guard = GcInfo {
7118 2 : retain_lsns: vec![],
7119 2 : cutoffs: GcCutoffs {
7120 2 : pitr: Lsn(0x30),
7121 2 : horizon: Lsn(0x30),
7122 2 : },
7123 2 : leases: Default::default(),
7124 2 : };
7125 2 : }
7126 2 :
7127 2 : let expected_result = [
7128 2 : Bytes::from_static(b"value 0@0x10"),
7129 2 : Bytes::from_static(b"value 1@0x10@0x20"),
7130 2 : Bytes::from_static(b"value 2@0x10@0x30"),
7131 2 : Bytes::from_static(b"value 3@0x10@0x40"),
7132 2 : Bytes::from_static(b"value 4@0x10"),
7133 2 : Bytes::from_static(b"value 5@0x10@0x20"),
7134 2 : Bytes::from_static(b"value 6@0x10@0x20"),
7135 2 : Bytes::from_static(b"value 7@0x10"),
7136 2 : Bytes::from_static(b"value 8@0x10@0x40"),
7137 2 : Bytes::from_static(b"value 9@0x10@0x40"),
7138 2 : ];
7139 2 :
7140 2 : let expected_result_at_gc_horizon = [
7141 2 : Bytes::from_static(b"value 0@0x10"),
7142 2 : Bytes::from_static(b"value 1@0x10@0x20"),
7143 2 : Bytes::from_static(b"value 2@0x10@0x30"),
7144 2 : Bytes::from_static(b"value 3@0x10"),
7145 2 : Bytes::from_static(b"value 4@0x10"),
7146 2 : Bytes::from_static(b"value 5@0x10@0x20"),
7147 2 : Bytes::from_static(b"value 6@0x10@0x20"),
7148 2 : Bytes::from_static(b"value 7@0x10"),
7149 2 : Bytes::from_static(b"value 8@0x10"),
7150 2 : Bytes::from_static(b"value 9@0x10"),
7151 2 : ];
7152 2 :
7153 22 : for idx in 0..10 {
7154 20 : assert_eq!(
7155 20 : tline
7156 20 : .get(get_key(idx as u32), Lsn(0x50), &ctx)
7157 18 : .await
7158 20 : .unwrap(),
7159 20 : &expected_result[idx]
7160 2 : );
7161 20 : assert_eq!(
7162 20 : tline
7163 20 : .get(get_key(idx as u32), Lsn(0x30), &ctx)
7164 2 : .await
7165 20 : .unwrap(),
7166 20 : &expected_result_at_gc_horizon[idx]
7167 2 : );
7168 2 : }
7169 2 :
7170 2 : let cancel = CancellationToken::new();
7171 52 : tline.compact_with_gc(&cancel, &ctx).await.unwrap();
7172 2 :
7173 22 : for idx in 0..10 {
7174 20 : assert_eq!(
7175 20 : tline
7176 20 : .get(get_key(idx as u32), Lsn(0x50), &ctx)
7177 8 : .await
7178 20 : .unwrap(),
7179 20 : &expected_result[idx]
7180 2 : );
7181 20 : assert_eq!(
7182 20 : tline
7183 20 : .get(get_key(idx as u32), Lsn(0x30), &ctx)
7184 2 : .await
7185 20 : .unwrap(),
7186 20 : &expected_result_at_gc_horizon[idx]
7187 2 : );
7188 2 : }
7189 2 :
7190 2 : Ok(())
7191 2 : }
7192 : }
|