Line data Source code
1 : //!
2 : //! Timeline repository implementation that keeps old data in files on disk, and
3 : //! the recent changes in memory. See tenant/*_layer.rs files.
4 : //! The functions here are responsible for locating the correct layer for the
5 : //! get/put call, walking back the timeline branching history as needed.
6 : //!
7 : //! The files are stored in the .neon/tenants/<tenant_id>/timelines/<timeline_id>
8 : //! directory. See docs/pageserver-storage.md for how the files are managed.
9 : //! In addition to the layer files, there is a metadata file in the same
10 : //! directory that contains information about the timeline, in particular its
11 : //! parent timeline, and the last LSN that has been written to disk.
12 : //!
13 :
14 : use anyhow::{bail, Context};
15 : use arc_swap::ArcSwap;
16 : use camino::Utf8Path;
17 : use camino::Utf8PathBuf;
18 : use enumset::EnumSet;
19 : use futures::stream::FuturesUnordered;
20 : use futures::FutureExt;
21 : use futures::StreamExt;
22 : use pageserver_api::models;
23 : use pageserver_api::models::AuxFilePolicy;
24 : use pageserver_api::models::TimelineState;
25 : use pageserver_api::models::TopTenantShardItem;
26 : use pageserver_api::models::WalRedoManagerStatus;
27 : use pageserver_api::shard::ShardIdentity;
28 : use pageserver_api::shard::ShardStripeSize;
29 : use pageserver_api::shard::TenantShardId;
30 : use remote_storage::DownloadError;
31 : use remote_storage::GenericRemoteStorage;
32 : use remote_storage::TimeoutOrCancel;
33 : use std::fmt;
34 : use std::time::SystemTime;
35 : use storage_broker::BrokerClientChannel;
36 : use tokio::io::BufReader;
37 : use tokio::sync::watch;
38 : use tokio::task::JoinSet;
39 : use tokio_util::sync::CancellationToken;
40 : use tracing::*;
41 : use utils::backoff;
42 : use utils::completion;
43 : use utils::crashsafe::path_with_suffix_extension;
44 : use utils::failpoint_support;
45 : use utils::fs_ext;
46 : use utils::pausable_failpoint;
47 : use utils::sync::gate::Gate;
48 : use utils::sync::gate::GateGuard;
49 : use utils::timeout::timeout_cancellable;
50 : use utils::timeout::TimeoutCancellableError;
51 : use utils::zstd::create_zst_tarball;
52 : use utils::zstd::extract_zst_tarball;
53 :
54 : use self::config::AttachedLocationConfig;
55 : use self::config::AttachmentMode;
56 : use self::config::LocationConf;
57 : use self::config::TenantConf;
58 : use self::metadata::TimelineMetadata;
59 : use self::mgr::GetActiveTenantError;
60 : use self::mgr::GetTenantError;
61 : use self::remote_timeline_client::upload::upload_index_part;
62 : use self::remote_timeline_client::RemoteTimelineClient;
63 : use self::timeline::uninit::TimelineCreateGuard;
64 : use self::timeline::uninit::TimelineExclusionError;
65 : use self::timeline::uninit::UninitializedTimeline;
66 : use self::timeline::EvictionTaskTenantState;
67 : use self::timeline::GcCutoffs;
68 : use self::timeline::TimelineResources;
69 : use self::timeline::WaitLsnError;
70 : use crate::config::PageServerConf;
71 : use crate::context::{DownloadBehavior, RequestContext};
72 : use crate::deletion_queue::DeletionQueueClient;
73 : use crate::deletion_queue::DeletionQueueError;
74 : use crate::import_datadir;
75 : use crate::is_uninit_mark;
76 : use crate::metrics::TENANT;
77 : use crate::metrics::{
78 : remove_tenant_metrics, BROKEN_TENANTS_SET, TENANT_STATE_METRIC, TENANT_SYNTHETIC_SIZE_METRIC,
79 : };
80 : use crate::repository::GcResult;
81 : use crate::task_mgr;
82 : use crate::task_mgr::TaskKind;
83 : use crate::tenant::config::LocationMode;
84 : use crate::tenant::config::TenantConfOpt;
85 : pub use crate::tenant::remote_timeline_client::index::IndexPart;
86 : use crate::tenant::remote_timeline_client::remote_initdb_archive_path;
87 : use crate::tenant::remote_timeline_client::MaybeDeletedIndexPart;
88 : use crate::tenant::remote_timeline_client::INITDB_PATH;
89 : use crate::tenant::storage_layer::DeltaLayer;
90 : use crate::tenant::storage_layer::ImageLayer;
91 : use crate::InitializationOrder;
92 : use std::collections::hash_map::Entry;
93 : use std::collections::BTreeSet;
94 : use std::collections::HashMap;
95 : use std::collections::HashSet;
96 : use std::fmt::Debug;
97 : use std::fmt::Display;
98 : use std::fs;
99 : use std::fs::File;
100 : use std::ops::Bound::Included;
101 : use std::sync::atomic::AtomicU64;
102 : use std::sync::atomic::Ordering;
103 : use std::sync::Arc;
104 : use std::sync::Mutex;
105 : use std::time::{Duration, Instant};
106 :
107 : use crate::span;
108 : use crate::tenant::timeline::delete::DeleteTimelineFlow;
109 : use crate::tenant::timeline::uninit::cleanup_timeline_directory;
110 : use crate::virtual_file::VirtualFile;
111 : use crate::walredo::PostgresRedoManager;
112 : use crate::TEMP_FILE_SUFFIX;
113 : use once_cell::sync::Lazy;
114 : pub use pageserver_api::models::TenantState;
115 : use tokio::sync::Semaphore;
116 :
117 0 : static INIT_DB_SEMAPHORE: Lazy<Semaphore> = Lazy::new(|| Semaphore::new(8));
118 : use utils::{
119 : crashsafe,
120 : generation::Generation,
121 : id::TimelineId,
122 : lsn::{Lsn, RecordLsn},
123 : };
124 :
125 : pub mod blob_io;
126 : pub mod block_io;
127 : pub mod vectored_blob_io;
128 :
129 : pub mod disk_btree;
130 : pub(crate) mod ephemeral_file;
131 : pub mod layer_map;
132 :
133 : pub mod metadata;
134 : pub mod remote_timeline_client;
135 : pub mod storage_layer;
136 :
137 : pub mod config;
138 : pub mod mgr;
139 : pub mod secondary;
140 : pub mod tasks;
141 : pub mod upload_queue;
142 :
143 : pub(crate) mod timeline;
144 :
145 : pub mod size;
146 :
147 : pub(crate) mod throttle;
148 :
149 : pub(crate) use crate::span::debug_assert_current_span_has_tenant_and_timeline_id;
150 : pub(crate) use timeline::{LogicalSizeCalculationCause, PageReconstructError, Timeline};
151 :
152 : // re-export for use in walreceiver
153 : pub use crate::tenant::timeline::WalReceiverInfo;
154 :
155 : /// The "tenants" part of `tenants/<tenant>/timelines...`
156 : pub const TENANTS_SEGMENT_NAME: &str = "tenants";
157 :
158 : /// Parts of the `.neon/tenants/<tenant_id>/timelines/<timeline_id>` directory prefix.
159 : pub const TIMELINES_SEGMENT_NAME: &str = "timelines";
160 :
161 : /// References to shared objects that are passed into each tenant, such
162 : /// as the shared remote storage client and process initialization state.
163 : #[derive(Clone)]
164 : pub struct TenantSharedResources {
165 : pub broker_client: storage_broker::BrokerClientChannel,
166 : pub remote_storage: GenericRemoteStorage,
167 : pub deletion_queue_client: DeletionQueueClient,
168 : }
169 :
170 : /// A [`Tenant`] is really an _attached_ tenant. The configuration
171 : /// for an attached tenant is a subset of the [`LocationConf`], represented
172 : /// in this struct.
173 : pub(super) struct AttachedTenantConf {
174 : tenant_conf: TenantConfOpt,
175 : location: AttachedLocationConfig,
176 : }
177 :
178 : impl AttachedTenantConf {
179 0 : fn new(tenant_conf: TenantConfOpt, location: AttachedLocationConfig) -> Self {
180 0 : Self {
181 0 : tenant_conf,
182 0 : location,
183 0 : }
184 0 : }
185 :
186 169 : fn try_from(location_conf: LocationConf) -> anyhow::Result<Self> {
187 169 : match &location_conf.mode {
188 169 : LocationMode::Attached(attach_conf) => Ok(Self {
189 169 : tenant_conf: location_conf.tenant_conf,
190 169 : location: *attach_conf,
191 169 : }),
192 : LocationMode::Secondary(_) => {
193 0 : anyhow::bail!("Attempted to construct AttachedTenantConf from a LocationConf in secondary mode")
194 : }
195 : }
196 169 : }
197 : }
198 : struct TimelinePreload {
199 : timeline_id: TimelineId,
200 : client: RemoteTimelineClient,
201 : index_part: Result<MaybeDeletedIndexPart, DownloadError>,
202 : }
203 :
204 : pub(crate) struct TenantPreload {
205 : timelines: HashMap<TimelineId, TimelinePreload>,
206 : }
207 :
208 : /// When we spawn a tenant, there is a special mode for tenant creation that
209 : /// avoids trying to read anything from remote storage.
210 : pub(crate) enum SpawnMode {
211 : /// Activate as soon as possible
212 : Eager,
213 : /// Lazy activation in the background, with the option to skip the queue if the need comes up
214 : Lazy,
215 : }
216 :
217 : ///
218 : /// Tenant consists of multiple timelines. Keep them in a hash table.
219 : ///
220 : pub struct Tenant {
221 : // Global pageserver config parameters
222 : pub conf: &'static PageServerConf,
223 :
224 : /// The value creation timestamp, used to measure activation delay, see:
225 : /// <https://github.com/neondatabase/neon/issues/4025>
226 : constructed_at: Instant,
227 :
228 : state: watch::Sender<TenantState>,
229 :
230 : // Overridden tenant-specific config parameters.
231 : // We keep TenantConfOpt sturct here to preserve the information
232 : // about parameters that are not set.
233 : // This is necessary to allow global config updates.
234 : tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
235 :
236 : tenant_shard_id: TenantShardId,
237 :
238 : // The detailed sharding information, beyond the number/count in tenant_shard_id
239 : shard_identity: ShardIdentity,
240 :
241 : /// The remote storage generation, used to protect S3 objects from split-brain.
242 : /// Does not change over the lifetime of the [`Tenant`] object.
243 : ///
244 : /// This duplicates the generation stored in LocationConf, but that structure is mutable:
245 : /// this copy enforces the invariant that generatio doesn't change during a Tenant's lifetime.
246 : generation: Generation,
247 :
248 : timelines: Mutex<HashMap<TimelineId, Arc<Timeline>>>,
249 :
250 : /// During timeline creation, we first insert the TimelineId to the
251 : /// creating map, then `timelines`, then remove it from the creating map.
252 : /// **Lock order**: if acquring both, acquire`timelines` before `timelines_creating`
253 : timelines_creating: std::sync::Mutex<HashSet<TimelineId>>,
254 :
255 : // This mutex prevents creation of new timelines during GC.
256 : // Adding yet another mutex (in addition to `timelines`) is needed because holding
257 : // `timelines` mutex during all GC iteration
258 : // may block for a long time `get_timeline`, `get_timelines_state`,... and other operations
259 : // with timelines, which in turn may cause dropping replication connection, expiration of wait_for_lsn
260 : // timeout...
261 : gc_cs: tokio::sync::Mutex<()>,
262 : walredo_mgr: Option<Arc<WalRedoManager>>,
263 :
264 : // provides access to timeline data sitting in the remote storage
265 : pub(crate) remote_storage: GenericRemoteStorage,
266 :
267 : // Access to global deletion queue for when this tenant wants to schedule a deletion
268 : deletion_queue_client: DeletionQueueClient,
269 :
270 : /// Cached logical sizes updated updated on each [`Tenant::gather_size_inputs`].
271 : cached_logical_sizes: tokio::sync::Mutex<HashMap<(TimelineId, Lsn), u64>>,
272 : cached_synthetic_tenant_size: Arc<AtomicU64>,
273 :
274 : eviction_task_tenant_state: tokio::sync::Mutex<EvictionTaskTenantState>,
275 :
276 : /// If the tenant is in Activating state, notify this to encourage it
277 : /// to proceed to Active as soon as possible, rather than waiting for lazy
278 : /// background warmup.
279 : pub(crate) activate_now_sem: tokio::sync::Semaphore,
280 :
281 : // Cancellation token fires when we have entered shutdown(). This is a parent of
282 : // Timelines' cancellation token.
283 : pub(crate) cancel: CancellationToken,
284 :
285 : // Users of the Tenant such as the page service must take this Gate to avoid
286 : // trying to use a Tenant which is shutting down.
287 : pub(crate) gate: Gate,
288 :
289 : /// Throttle applied at the top of [`Timeline::get`].
290 : /// All [`Tenant::timelines`] of a given [`Tenant`] instance share the same [`throttle::Throttle`] instance.
291 : pub(crate) timeline_get_throttle:
292 : Arc<throttle::Throttle<&'static crate::metrics::tenant_throttling::TimelineGet>>,
293 :
294 : /// An ongoing timeline detach must be checked during attempts to GC or compact a timeline.
295 : ongoing_timeline_detach: std::sync::Mutex<Option<(TimelineId, utils::completion::Barrier)>>,
296 : }
297 :
298 : impl std::fmt::Debug for Tenant {
299 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
300 0 : write!(f, "{} ({})", self.tenant_shard_id, self.current_state())
301 0 : }
302 : }
303 :
304 : pub(crate) enum WalRedoManager {
305 : Prod(PostgresRedoManager),
306 : #[cfg(test)]
307 : Test(harness::TestRedoManager),
308 : }
309 :
310 : impl From<PostgresRedoManager> for WalRedoManager {
311 0 : fn from(mgr: PostgresRedoManager) -> Self {
312 0 : Self::Prod(mgr)
313 0 : }
314 : }
315 :
316 : #[cfg(test)]
317 : impl From<harness::TestRedoManager> for WalRedoManager {
318 161 : fn from(mgr: harness::TestRedoManager) -> Self {
319 161 : Self::Test(mgr)
320 161 : }
321 : }
322 :
323 : impl WalRedoManager {
324 0 : pub(crate) fn maybe_quiesce(&self, idle_timeout: Duration) {
325 0 : match self {
326 0 : Self::Prod(mgr) => mgr.maybe_quiesce(idle_timeout),
327 0 : #[cfg(test)]
328 0 : Self::Test(_) => {
329 0 : // Not applicable to test redo manager
330 0 : }
331 0 : }
332 0 : }
333 :
334 : /// # Cancel-Safety
335 : ///
336 : /// This method is cancellation-safe.
337 58 : pub async fn request_redo(
338 58 : &self,
339 58 : key: crate::repository::Key,
340 58 : lsn: Lsn,
341 58 : base_img: Option<(Lsn, bytes::Bytes)>,
342 58 : records: Vec<(Lsn, crate::walrecord::NeonWalRecord)>,
343 58 : pg_version: u32,
344 58 : ) -> anyhow::Result<bytes::Bytes> {
345 58 : match self {
346 0 : Self::Prod(mgr) => {
347 0 : mgr.request_redo(key, lsn, base_img, records, pg_version)
348 0 : .await
349 : }
350 : #[cfg(test)]
351 58 : Self::Test(mgr) => {
352 58 : mgr.request_redo(key, lsn, base_img, records, pg_version)
353 0 : .await
354 : }
355 : }
356 58 : }
357 :
358 0 : pub(crate) fn status(&self) -> Option<WalRedoManagerStatus> {
359 0 : match self {
360 0 : WalRedoManager::Prod(m) => Some(m.status()),
361 0 : #[cfg(test)]
362 0 : WalRedoManager::Test(_) => None,
363 0 : }
364 0 : }
365 : }
366 :
367 0 : #[derive(Debug, thiserror::Error, PartialEq, Eq)]
368 : pub enum GetTimelineError {
369 : #[error("Timeline {tenant_id}/{timeline_id} is not active, state: {state:?}")]
370 : NotActive {
371 : tenant_id: TenantShardId,
372 : timeline_id: TimelineId,
373 : state: TimelineState,
374 : },
375 : #[error("Timeline {tenant_id}/{timeline_id} was not found")]
376 : NotFound {
377 : tenant_id: TenantShardId,
378 : timeline_id: TimelineId,
379 : },
380 : }
381 :
382 0 : #[derive(Debug, thiserror::Error)]
383 : pub enum LoadLocalTimelineError {
384 : #[error("FailedToLoad")]
385 : Load(#[source] anyhow::Error),
386 : #[error("FailedToResumeDeletion")]
387 : ResumeDeletion(#[source] anyhow::Error),
388 : }
389 :
390 0 : #[derive(thiserror::Error)]
391 : pub enum DeleteTimelineError {
392 : #[error("NotFound")]
393 : NotFound,
394 :
395 : #[error("HasChildren")]
396 : HasChildren(Vec<TimelineId>),
397 :
398 : #[error("Timeline deletion is already in progress")]
399 : AlreadyInProgress(Arc<tokio::sync::Mutex<DeleteTimelineFlow>>),
400 :
401 : #[error(transparent)]
402 : Other(#[from] anyhow::Error),
403 : }
404 :
405 : impl Debug for DeleteTimelineError {
406 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
407 0 : match self {
408 0 : Self::NotFound => write!(f, "NotFound"),
409 0 : Self::HasChildren(c) => f.debug_tuple("HasChildren").field(c).finish(),
410 0 : Self::AlreadyInProgress(_) => f.debug_tuple("AlreadyInProgress").finish(),
411 0 : Self::Other(e) => f.debug_tuple("Other").field(e).finish(),
412 : }
413 0 : }
414 : }
415 :
416 : pub enum SetStoppingError {
417 : AlreadyStopping(completion::Barrier),
418 : Broken,
419 : }
420 :
421 : impl Debug for SetStoppingError {
422 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
423 0 : match self {
424 0 : Self::AlreadyStopping(_) => f.debug_tuple("AlreadyStopping").finish(),
425 0 : Self::Broken => write!(f, "Broken"),
426 : }
427 0 : }
428 : }
429 :
430 0 : #[derive(thiserror::Error, Debug)]
431 : pub enum CreateTimelineError {
432 : #[error("creation of timeline with the given ID is in progress")]
433 : AlreadyCreating,
434 : #[error("timeline already exists with different parameters")]
435 : Conflict,
436 : #[error(transparent)]
437 : AncestorLsn(anyhow::Error),
438 : #[error("ancestor timeline is not active")]
439 : AncestorNotActive,
440 : #[error("tenant shutting down")]
441 : ShuttingDown,
442 : #[error(transparent)]
443 : Other(#[from] anyhow::Error),
444 : }
445 :
446 : #[derive(thiserror::Error, Debug)]
447 : enum InitdbError {
448 : Other(anyhow::Error),
449 : Cancelled,
450 : Spawn(std::io::Result<()>),
451 : Failed(std::process::ExitStatus, Vec<u8>),
452 : }
453 :
454 : impl fmt::Display for InitdbError {
455 0 : fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
456 0 : match self {
457 0 : InitdbError::Cancelled => write!(f, "Operation was cancelled"),
458 0 : InitdbError::Spawn(e) => write!(f, "Spawn error: {:?}", e),
459 0 : InitdbError::Failed(status, stderr) => write!(
460 0 : f,
461 0 : "Command failed with status {:?}: {}",
462 0 : status,
463 0 : String::from_utf8_lossy(stderr)
464 0 : ),
465 0 : InitdbError::Other(e) => write!(f, "Error: {:?}", e),
466 : }
467 0 : }
468 : }
469 :
470 : impl From<std::io::Error> for InitdbError {
471 0 : fn from(error: std::io::Error) -> Self {
472 0 : InitdbError::Spawn(Err(error))
473 0 : }
474 : }
475 :
476 : enum CreateTimelineCause {
477 : Load,
478 : Delete,
479 : }
480 :
481 0 : #[derive(thiserror::Error, Debug)]
482 : pub(crate) enum GcError {
483 : // The tenant is shutting down
484 : #[error("tenant shutting down")]
485 : TenantCancelled,
486 :
487 : // The tenant is shutting down
488 : #[error("timeline shutting down")]
489 : TimelineCancelled,
490 :
491 : // The tenant is in a state inelegible to run GC
492 : #[error("not active")]
493 : NotActive,
494 :
495 : // A requested GC cutoff LSN was invalid, for example it tried to move backwards
496 : #[error("not active")]
497 : BadLsn { why: String },
498 :
499 : // A remote storage error while scheduling updates after compaction
500 : #[error(transparent)]
501 : Remote(anyhow::Error),
502 :
503 : // An error reading while calculating GC cutoffs
504 : #[error(transparent)]
505 : GcCutoffs(PageReconstructError),
506 :
507 : // If GC was invoked for a particular timeline, this error means it didn't exist
508 : #[error("timeline not found")]
509 : TimelineNotFound,
510 : }
511 :
512 : impl From<PageReconstructError> for GcError {
513 0 : fn from(value: PageReconstructError) -> Self {
514 0 : match value {
515 0 : PageReconstructError::Cancelled => Self::TimelineCancelled,
516 0 : other => Self::GcCutoffs(other),
517 : }
518 0 : }
519 : }
520 :
521 : impl Tenant {
522 : /// Yet another helper for timeline initialization.
523 : ///
524 : /// - Initializes the Timeline struct and inserts it into the tenant's hash map
525 : /// - Scans the local timeline directory for layer files and builds the layer map
526 : /// - Downloads remote index file and adds remote files to the layer map
527 : /// - Schedules remote upload tasks for any files that are present locally but missing from remote storage.
528 : ///
529 : /// If the operation fails, the timeline is left in the tenant's hash map in Broken state. On success,
530 : /// it is marked as Active.
531 : #[allow(clippy::too_many_arguments)]
532 6 : async fn timeline_init_and_sync(
533 6 : &self,
534 6 : timeline_id: TimelineId,
535 6 : resources: TimelineResources,
536 6 : index_part: Option<IndexPart>,
537 6 : metadata: TimelineMetadata,
538 6 : ancestor: Option<Arc<Timeline>>,
539 6 : last_aux_file_policy: Option<AuxFilePolicy>,
540 6 : _ctx: &RequestContext,
541 6 : ) -> anyhow::Result<()> {
542 6 : let tenant_id = self.tenant_shard_id;
543 :
544 6 : let timeline = self.create_timeline_struct(
545 6 : timeline_id,
546 6 : &metadata,
547 6 : ancestor.clone(),
548 6 : resources,
549 6 : CreateTimelineCause::Load,
550 6 : // This could be derived from ancestor branch + index part. Though the only caller of `timeline_init_and_sync` is `load_remote_timeline`,
551 6 : // there will potentially be other caller of this function in the future, and we don't know whether `index_part` or `ancestor` takes precedence.
552 6 : // Therefore, we pass this field explicitly for now, and remove it once we fully migrate to aux file v2.
553 6 : last_aux_file_policy,
554 6 : )?;
555 6 : let disk_consistent_lsn = timeline.get_disk_consistent_lsn();
556 6 : anyhow::ensure!(
557 6 : disk_consistent_lsn.is_valid(),
558 0 : "Timeline {tenant_id}/{timeline_id} has invalid disk_consistent_lsn"
559 : );
560 6 : assert_eq!(
561 6 : disk_consistent_lsn,
562 6 : metadata.disk_consistent_lsn(),
563 0 : "these are used interchangeably"
564 : );
565 :
566 6 : if let Some(index_part) = index_part.as_ref() {
567 6 : timeline.remote_client.init_upload_queue(index_part)?;
568 :
569 6 : timeline
570 6 : .last_aux_file_policy
571 6 : .store(index_part.last_aux_file_policy());
572 : } else {
573 : // No data on the remote storage, but we have local metadata file. We can end up
574 : // here with timeline_create being interrupted before finishing index part upload.
575 : // By doing what we do here, the index part upload is retried.
576 : // If control plane retries timeline creation in the meantime, the mgmt API handler
577 : // for timeline creation will coalesce on the upload we queue here.
578 :
579 : // FIXME: this branch should be dead code as we no longer write local metadata.
580 :
581 0 : timeline
582 0 : .remote_client
583 0 : .init_upload_queue_for_empty_remote(&metadata)?;
584 0 : timeline
585 0 : .remote_client
586 0 : .schedule_index_upload_for_full_metadata_update(&metadata)?;
587 : }
588 :
589 6 : timeline
590 6 : .load_layer_map(disk_consistent_lsn, index_part)
591 5 : .await
592 6 : .with_context(|| {
593 0 : format!("Failed to load layermap for timeline {tenant_id}/{timeline_id}")
594 6 : })?;
595 :
596 : {
597 : // avoiding holding it across awaits
598 6 : let mut timelines_accessor = self.timelines.lock().unwrap();
599 6 : match timelines_accessor.entry(timeline_id) {
600 : // We should never try and load the same timeline twice during startup
601 : Entry::Occupied(_) => {
602 0 : unreachable!(
603 0 : "Timeline {tenant_id}/{timeline_id} already exists in the tenant map"
604 0 : );
605 : }
606 6 : Entry::Vacant(v) => {
607 6 : v.insert(Arc::clone(&timeline));
608 6 : timeline.maybe_spawn_flush_loop();
609 6 : }
610 6 : }
611 6 : };
612 6 :
613 6 : // Sanity check: a timeline should have some content.
614 6 : anyhow::ensure!(
615 6 : ancestor.is_some()
616 4 : || timeline
617 4 : .layers
618 4 : .read()
619 0 : .await
620 4 : .layer_map()
621 4 : .iter_historic_layers()
622 4 : .next()
623 4 : .is_some(),
624 0 : "Timeline has no ancestor and no layer files"
625 : );
626 :
627 6 : Ok(())
628 6 : }
629 :
630 : /// Attach a tenant that's available in cloud storage.
631 : ///
632 : /// This returns quickly, after just creating the in-memory object
633 : /// Tenant struct and launching a background task to download
634 : /// the remote index files. On return, the tenant is most likely still in
635 : /// Attaching state, and it will become Active once the background task
636 : /// finishes. You can use wait_until_active() to wait for the task to
637 : /// complete.
638 : ///
639 : #[allow(clippy::too_many_arguments)]
640 0 : pub(crate) fn spawn(
641 0 : conf: &'static PageServerConf,
642 0 : tenant_shard_id: TenantShardId,
643 0 : resources: TenantSharedResources,
644 0 : attached_conf: AttachedTenantConf,
645 0 : shard_identity: ShardIdentity,
646 0 : init_order: Option<InitializationOrder>,
647 0 : mode: SpawnMode,
648 0 : ctx: &RequestContext,
649 0 : ) -> anyhow::Result<Arc<Tenant>> {
650 0 : let wal_redo_manager = Arc::new(WalRedoManager::from(PostgresRedoManager::new(
651 0 : conf,
652 0 : tenant_shard_id,
653 0 : )));
654 0 :
655 0 : let TenantSharedResources {
656 0 : broker_client,
657 0 : remote_storage,
658 0 : deletion_queue_client,
659 0 : } = resources;
660 0 :
661 0 : let attach_mode = attached_conf.location.attach_mode;
662 0 : let generation = attached_conf.location.generation;
663 0 :
664 0 : let tenant = Arc::new(Tenant::new(
665 0 : TenantState::Attaching,
666 0 : conf,
667 0 : attached_conf,
668 0 : shard_identity,
669 0 : Some(wal_redo_manager),
670 0 : tenant_shard_id,
671 0 : remote_storage.clone(),
672 0 : deletion_queue_client,
673 0 : ));
674 0 :
675 0 : // The attach task will carry a GateGuard, so that shutdown() reliably waits for it to drop out if
676 0 : // we shut down while attaching.
677 0 : let attach_gate_guard = tenant
678 0 : .gate
679 0 : .enter()
680 0 : .expect("We just created the Tenant: nothing else can have shut it down yet");
681 0 :
682 0 : // Do all the hard work in the background
683 0 : let tenant_clone = Arc::clone(&tenant);
684 0 : let ctx = ctx.detached_child(TaskKind::Attach, DownloadBehavior::Warn);
685 0 : task_mgr::spawn(
686 0 : &tokio::runtime::Handle::current(),
687 0 : TaskKind::Attach,
688 0 : Some(tenant_shard_id),
689 0 : None,
690 0 : "attach tenant",
691 : false,
692 0 : async move {
693 0 :
694 0 : info!(
695 : ?attach_mode,
696 0 : "Attaching tenant"
697 : );
698 :
699 0 : let _gate_guard = attach_gate_guard;
700 0 :
701 0 : // Is this tenant being spawned as part of process startup?
702 0 : let starting_up = init_order.is_some();
703 : scopeguard::defer! {
704 : if starting_up {
705 : TENANT.startup_complete.inc();
706 : }
707 : }
708 :
709 : // Ideally we should use Tenant::set_broken_no_wait, but it is not supposed to be used when tenant is in loading state.
710 : enum BrokenVerbosity {
711 : Error,
712 : Info
713 : }
714 0 : let make_broken =
715 0 : |t: &Tenant, err: anyhow::Error, verbosity: BrokenVerbosity| {
716 0 : match verbosity {
717 : BrokenVerbosity::Info => {
718 0 : info!("attach cancelled, setting tenant state to Broken: {err}");
719 : },
720 : BrokenVerbosity::Error => {
721 0 : error!("attach failed, setting tenant state to Broken: {err:?}");
722 : }
723 : }
724 0 : t.state.send_modify(|state| {
725 0 : // The Stopping case is for when we have passed control on to DeleteTenantFlow:
726 0 : // if it errors, we will call make_broken when tenant is already in Stopping.
727 0 : assert!(
728 0 : matches!(*state, TenantState::Attaching | TenantState::Stopping { .. }),
729 0 : "the attach task owns the tenant state until activation is complete"
730 : );
731 :
732 0 : *state = TenantState::broken_from_reason(err.to_string());
733 0 : });
734 0 : };
735 :
736 0 : let mut init_order = init_order;
737 0 : // take the completion because initial tenant loading will complete when all of
738 0 : // these tasks complete.
739 0 : let _completion = init_order
740 0 : .as_mut()
741 0 : .and_then(|x| x.initial_tenant_load.take());
742 0 : let remote_load_completion = init_order
743 0 : .as_mut()
744 0 : .and_then(|x| x.initial_tenant_load_remote.take());
745 :
746 : enum AttachType<'a> {
747 : /// We are attaching this tenant lazily in the background.
748 : Warmup {
749 : _permit: tokio::sync::SemaphorePermit<'a>,
750 : during_startup: bool
751 : },
752 : /// We are attaching this tenant as soon as we can, because for example an
753 : /// endpoint tried to access it.
754 : OnDemand,
755 : /// During normal operations after startup, we are attaching a tenant, and
756 : /// eager attach was requested.
757 : Normal,
758 : }
759 :
760 0 : let attach_type = if matches!(mode, SpawnMode::Lazy) {
761 : // Before doing any I/O, wait for at least one of:
762 : // - A client attempting to access to this tenant (on-demand loading)
763 : // - A permit becoming available in the warmup semaphore (background warmup)
764 :
765 : tokio::select!(
766 : permit = tenant_clone.activate_now_sem.acquire() => {
767 : let _ = permit.expect("activate_now_sem is never closed");
768 : tracing::info!("Activating tenant (on-demand)");
769 : AttachType::OnDemand
770 : },
771 : permit = conf.concurrent_tenant_warmup.inner().acquire() => {
772 : let _permit = permit.expect("concurrent_tenant_warmup semaphore is never closed");
773 : tracing::info!("Activating tenant (warmup)");
774 : AttachType::Warmup {
775 : _permit,
776 : during_startup: init_order.is_some()
777 : }
778 : }
779 : _ = tenant_clone.cancel.cancelled() => {
780 : // This is safe, but should be pretty rare: it is interesting if a tenant
781 : // stayed in Activating for such a long time that shutdown found it in
782 : // that state.
783 : tracing::info!(state=%tenant_clone.current_state(), "Tenant shut down before activation");
784 : // Make the tenant broken so that set_stopping will not hang waiting for it to leave
785 : // the Attaching state. This is an over-reaction (nothing really broke, the tenant is
786 : // just shutting down), but ensures progress.
787 : make_broken(&tenant_clone, anyhow::anyhow!("Shut down while Attaching"), BrokenVerbosity::Info);
788 : return Ok(());
789 : },
790 : )
791 : } else {
792 : // SpawnMode::{Create,Eager} always cause jumping ahead of the
793 : // concurrent_tenant_warmup queue
794 0 : AttachType::Normal
795 : };
796 :
797 0 : let preload = match &mode {
798 : SpawnMode::Eager | SpawnMode::Lazy => {
799 0 : let _preload_timer = TENANT.preload.start_timer();
800 0 : let res = tenant_clone
801 0 : .preload(&remote_storage, task_mgr::shutdown_token())
802 0 : .await;
803 0 : match res {
804 0 : Ok(p) => Some(p),
805 0 : Err(e) => {
806 0 : make_broken(&tenant_clone, anyhow::anyhow!(e), BrokenVerbosity::Error);
807 0 : return Ok(());
808 : }
809 : }
810 : }
811 :
812 : };
813 :
814 : // Remote preload is complete.
815 0 : drop(remote_load_completion);
816 :
817 : // We will time the duration of the attach phase unless this is a creation (attach will do no work)
818 0 : let attached = {
819 0 : let _attach_timer = Some(TENANT.attach.start_timer());
820 0 : tenant_clone.attach(preload, &ctx).await
821 : };
822 :
823 0 : match attached {
824 : Ok(()) => {
825 0 : info!("attach finished, activating");
826 0 : tenant_clone.activate(broker_client, None, &ctx);
827 : }
828 0 : Err(e) => {
829 0 : make_broken(&tenant_clone, anyhow::anyhow!(e), BrokenVerbosity::Error);
830 0 : }
831 : }
832 :
833 : // If we are doing an opportunistic warmup attachment at startup, initialize
834 : // logical size at the same time. This is better than starting a bunch of idle tenants
835 : // with cold caches and then coming back later to initialize their logical sizes.
836 : //
837 : // It also prevents the warmup proccess competing with the concurrency limit on
838 : // logical size calculations: if logical size calculation semaphore is saturated,
839 : // then warmup will wait for that before proceeding to the next tenant.
840 0 : if matches!(attach_type, AttachType::Warmup { during_startup: true, .. }) {
841 0 : let mut futs: FuturesUnordered<_> = tenant_clone.timelines.lock().unwrap().values().cloned().map(|t| t.await_initial_logical_size()).collect();
842 0 : tracing::info!("Waiting for initial logical sizes while warming up...");
843 0 : while futs.next().await.is_some() {}
844 0 : tracing::info!("Warm-up complete");
845 0 : }
846 :
847 0 : Ok(())
848 0 : }
849 0 : .instrument(tracing::info_span!(parent: None, "attach", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), gen=?generation)),
850 : );
851 0 : Ok(tenant)
852 0 : }
853 :
854 322 : #[instrument(skip_all)]
855 : pub(crate) async fn preload(
856 : self: &Arc<Self>,
857 : remote_storage: &GenericRemoteStorage,
858 : cancel: CancellationToken,
859 : ) -> anyhow::Result<TenantPreload> {
860 : span::debug_assert_current_span_has_tenant_id();
861 : // Get list of remote timelines
862 : // download index files for every tenant timeline
863 : info!("listing remote timelines");
864 : let (remote_timeline_ids, other_keys) = remote_timeline_client::list_remote_timelines(
865 : remote_storage,
866 : self.tenant_shard_id,
867 : cancel.clone(),
868 : )
869 : .await?;
870 :
871 : info!("found {} timelines", remote_timeline_ids.len(),);
872 :
873 : for k in other_keys {
874 : warn!("Unexpected non timeline key {k}");
875 : }
876 :
877 : Ok(TenantPreload {
878 : timelines: Self::load_timeline_metadata(
879 : self,
880 : remote_timeline_ids,
881 : remote_storage,
882 : cancel,
883 : )
884 : .await?,
885 : })
886 : }
887 :
888 : ///
889 : /// Background task that downloads all data for a tenant and brings it to Active state.
890 : ///
891 : /// No background tasks are started as part of this routine.
892 : ///
893 161 : async fn attach(
894 161 : self: &Arc<Tenant>,
895 161 : preload: Option<TenantPreload>,
896 161 : ctx: &RequestContext,
897 161 : ) -> anyhow::Result<()> {
898 161 : span::debug_assert_current_span_has_tenant_id();
899 161 :
900 161 : failpoint_support::sleep_millis_async!("before-attaching-tenant");
901 :
902 161 : let Some(preload) = preload else {
903 0 : anyhow::bail!("local-only deployment is no longer supported, https://github.com/neondatabase/neon/issues/5624");
904 : };
905 :
906 161 : let mut timelines_to_resume_deletions = vec![];
907 161 :
908 161 : let mut remote_index_and_client = HashMap::new();
909 161 : let mut timeline_ancestors = HashMap::new();
910 161 : let mut existent_timelines = HashSet::new();
911 167 : for (timeline_id, preload) in preload.timelines {
912 6 : let index_part = match preload.index_part {
913 6 : Ok(i) => {
914 6 : debug!("remote index part exists for timeline {timeline_id}");
915 : // We found index_part on the remote, this is the standard case.
916 6 : existent_timelines.insert(timeline_id);
917 6 : i
918 : }
919 : Err(DownloadError::NotFound) => {
920 : // There is no index_part on the remote. We only get here
921 : // if there is some prefix for the timeline in the remote storage.
922 : // This can e.g. be the initdb.tar.zst archive, maybe a
923 : // remnant from a prior incomplete creation or deletion attempt.
924 : // Delete the local directory as the deciding criterion for a
925 : // timeline's existence is presence of index_part.
926 0 : info!(%timeline_id, "index_part not found on remote");
927 0 : continue;
928 : }
929 0 : Err(e) => {
930 0 : // Some (possibly ephemeral) error happened during index_part download.
931 0 : // Pretend the timeline exists to not delete the timeline directory,
932 0 : // as it might be a temporary issue and we don't want to re-download
933 0 : // everything after it resolves.
934 0 : warn!(%timeline_id, "Failed to load index_part from remote storage, failed creation? ({e})");
935 :
936 0 : existent_timelines.insert(timeline_id);
937 0 : continue;
938 : }
939 : };
940 6 : match index_part {
941 6 : MaybeDeletedIndexPart::IndexPart(index_part) => {
942 6 : timeline_ancestors.insert(timeline_id, index_part.metadata.clone());
943 6 : remote_index_and_client.insert(timeline_id, (index_part, preload.client));
944 6 : }
945 0 : MaybeDeletedIndexPart::Deleted(index_part) => {
946 0 : info!(
947 0 : "timeline {} is deleted, picking to resume deletion",
948 : timeline_id
949 : );
950 0 : timelines_to_resume_deletions.push((timeline_id, index_part, preload.client));
951 : }
952 : }
953 : }
954 :
955 : // For every timeline, download the metadata file, scan the local directory,
956 : // and build a layer map that contains an entry for each remote and local
957 : // layer file.
958 161 : let sorted_timelines = tree_sort_timelines(timeline_ancestors, |m| m.ancestor_timeline())?;
959 167 : for (timeline_id, remote_metadata) in sorted_timelines {
960 6 : let (index_part, remote_client) = remote_index_and_client
961 6 : .remove(&timeline_id)
962 6 : .expect("just put it in above");
963 6 :
964 6 : // TODO again handle early failure
965 6 : self.load_remote_timeline(
966 6 : timeline_id,
967 6 : index_part,
968 6 : remote_metadata,
969 6 : TimelineResources {
970 6 : remote_client,
971 6 : timeline_get_throttle: self.timeline_get_throttle.clone(),
972 6 : },
973 6 : ctx,
974 6 : )
975 10 : .await
976 6 : .with_context(|| {
977 0 : format!(
978 0 : "failed to load remote timeline {} for tenant {}",
979 0 : timeline_id, self.tenant_shard_id
980 0 : )
981 6 : })?;
982 : }
983 :
984 : // Walk through deleted timelines, resume deletion
985 161 : for (timeline_id, index_part, remote_timeline_client) in timelines_to_resume_deletions {
986 0 : remote_timeline_client
987 0 : .init_upload_queue_stopped_to_continue_deletion(&index_part)
988 0 : .context("init queue stopped")
989 0 : .map_err(LoadLocalTimelineError::ResumeDeletion)?;
990 :
991 0 : DeleteTimelineFlow::resume_deletion(
992 0 : Arc::clone(self),
993 0 : timeline_id,
994 0 : &index_part.metadata,
995 0 : remote_timeline_client,
996 0 : )
997 0 : .instrument(tracing::info_span!("timeline_delete", %timeline_id))
998 0 : .await
999 0 : .context("resume_deletion")
1000 0 : .map_err(LoadLocalTimelineError::ResumeDeletion)?;
1001 : }
1002 :
1003 : // The local filesystem contents are a cache of what's in the remote IndexPart;
1004 : // IndexPart is the source of truth.
1005 161 : self.clean_up_timelines(&existent_timelines)?;
1006 :
1007 161 : fail::fail_point!("attach-before-activate", |_| {
1008 0 : anyhow::bail!("attach-before-activate");
1009 161 : });
1010 161 : failpoint_support::sleep_millis_async!("attach-before-activate-sleep", &self.cancel);
1011 :
1012 161 : info!("Done");
1013 :
1014 161 : Ok(())
1015 161 : }
1016 :
1017 : /// Check for any local timeline directories that are temporary, or do not correspond to a
1018 : /// timeline that still exists: this can happen if we crashed during a deletion/creation, or
1019 : /// if a timeline was deleted while the tenant was attached to a different pageserver.
1020 161 : fn clean_up_timelines(&self, existent_timelines: &HashSet<TimelineId>) -> anyhow::Result<()> {
1021 161 : let timelines_dir = self.conf.timelines_path(&self.tenant_shard_id);
1022 :
1023 161 : let entries = match timelines_dir.read_dir_utf8() {
1024 161 : Ok(d) => d,
1025 0 : Err(e) => {
1026 0 : if e.kind() == std::io::ErrorKind::NotFound {
1027 0 : return Ok(());
1028 : } else {
1029 0 : return Err(e).context("list timelines directory for tenant");
1030 : }
1031 : }
1032 : };
1033 :
1034 169 : for entry in entries {
1035 8 : let entry = entry.context("read timeline dir entry")?;
1036 8 : let entry_path = entry.path();
1037 :
1038 8 : let purge = if crate::is_temporary(entry_path)
1039 : // TODO: remove uninit mark code (https://github.com/neondatabase/neon/issues/5718)
1040 8 : || is_uninit_mark(entry_path)
1041 8 : || crate::is_delete_mark(entry_path)
1042 : {
1043 0 : true
1044 : } else {
1045 8 : match TimelineId::try_from(entry_path.file_name()) {
1046 8 : Ok(i) => {
1047 8 : // Purge if the timeline ID does not exist in remote storage: remote storage is the authority.
1048 8 : !existent_timelines.contains(&i)
1049 : }
1050 0 : Err(e) => {
1051 0 : tracing::warn!(
1052 0 : "Unparseable directory in timelines directory: {entry_path}, ignoring ({e})"
1053 : );
1054 : // Do not purge junk: if we don't recognize it, be cautious and leave it for a human.
1055 0 : false
1056 : }
1057 : }
1058 : };
1059 :
1060 8 : if purge {
1061 2 : tracing::info!("Purging stale timeline dentry {entry_path}");
1062 2 : if let Err(e) = match entry.file_type() {
1063 2 : Ok(t) => if t.is_dir() {
1064 2 : std::fs::remove_dir_all(entry_path)
1065 : } else {
1066 0 : std::fs::remove_file(entry_path)
1067 : }
1068 2 : .or_else(fs_ext::ignore_not_found),
1069 0 : Err(e) => Err(e),
1070 : } {
1071 0 : tracing::warn!("Failed to purge stale timeline dentry {entry_path}: {e}");
1072 2 : }
1073 6 : }
1074 : }
1075 :
1076 161 : Ok(())
1077 161 : }
1078 :
1079 : /// Get sum of all remote timelines sizes
1080 : ///
1081 : /// This function relies on the index_part instead of listing the remote storage
1082 0 : pub fn remote_size(&self) -> u64 {
1083 0 : let mut size = 0;
1084 :
1085 0 : for timeline in self.list_timelines() {
1086 0 : size += timeline.remote_client.get_remote_physical_size();
1087 0 : }
1088 :
1089 0 : size
1090 0 : }
1091 :
1092 12 : #[instrument(skip_all, fields(timeline_id=%timeline_id))]
1093 : async fn load_remote_timeline(
1094 : &self,
1095 : timeline_id: TimelineId,
1096 : index_part: IndexPart,
1097 : remote_metadata: TimelineMetadata,
1098 : resources: TimelineResources,
1099 : ctx: &RequestContext,
1100 : ) -> anyhow::Result<()> {
1101 : span::debug_assert_current_span_has_tenant_id();
1102 :
1103 : info!("downloading index file for timeline {}", timeline_id);
1104 : tokio::fs::create_dir_all(self.conf.timeline_path(&self.tenant_shard_id, &timeline_id))
1105 : .await
1106 : .context("Failed to create new timeline directory")?;
1107 :
1108 : let ancestor = if let Some(ancestor_id) = remote_metadata.ancestor_timeline() {
1109 : let timelines = self.timelines.lock().unwrap();
1110 : Some(Arc::clone(timelines.get(&ancestor_id).ok_or_else(
1111 0 : || {
1112 0 : anyhow::anyhow!(
1113 0 : "cannot find ancestor timeline {ancestor_id} for timeline {timeline_id}"
1114 0 : )
1115 0 : },
1116 : )?))
1117 : } else {
1118 : None
1119 : };
1120 :
1121 : let last_aux_file_policy = index_part.last_aux_file_policy();
1122 :
1123 : self.timeline_init_and_sync(
1124 : timeline_id,
1125 : resources,
1126 : Some(index_part),
1127 : remote_metadata,
1128 : ancestor,
1129 : last_aux_file_policy,
1130 : ctx,
1131 : )
1132 : .await
1133 : }
1134 :
1135 : /// Create a placeholder Tenant object for a broken tenant
1136 0 : pub fn create_broken_tenant(
1137 0 : conf: &'static PageServerConf,
1138 0 : tenant_shard_id: TenantShardId,
1139 0 : remote_storage: GenericRemoteStorage,
1140 0 : reason: String,
1141 0 : ) -> Arc<Tenant> {
1142 0 : Arc::new(Tenant::new(
1143 0 : TenantState::Broken {
1144 0 : reason,
1145 0 : backtrace: String::new(),
1146 0 : },
1147 0 : conf,
1148 0 : AttachedTenantConf::try_from(LocationConf::default()).unwrap(),
1149 0 : // Shard identity isn't meaningful for a broken tenant: it's just a placeholder
1150 0 : // to occupy the slot for this TenantShardId.
1151 0 : ShardIdentity::broken(tenant_shard_id.shard_number, tenant_shard_id.shard_count),
1152 0 : None,
1153 0 : tenant_shard_id,
1154 0 : remote_storage,
1155 0 : DeletionQueueClient::broken(),
1156 0 : ))
1157 0 : }
1158 :
1159 161 : async fn load_timeline_metadata(
1160 161 : self: &Arc<Tenant>,
1161 161 : timeline_ids: HashSet<TimelineId>,
1162 161 : remote_storage: &GenericRemoteStorage,
1163 161 : cancel: CancellationToken,
1164 161 : ) -> anyhow::Result<HashMap<TimelineId, TimelinePreload>> {
1165 161 : let mut part_downloads = JoinSet::new();
1166 167 : for timeline_id in timeline_ids {
1167 6 : let client = RemoteTimelineClient::new(
1168 6 : remote_storage.clone(),
1169 6 : self.deletion_queue_client.clone(),
1170 6 : self.conf,
1171 6 : self.tenant_shard_id,
1172 6 : timeline_id,
1173 6 : self.generation,
1174 6 : );
1175 6 : let cancel_clone = cancel.clone();
1176 6 : part_downloads.spawn(
1177 6 : async move {
1178 6 : debug!("starting index part download");
1179 :
1180 22 : let index_part = client.download_index_file(&cancel_clone).await;
1181 :
1182 6 : debug!("finished index part download");
1183 :
1184 6 : Result::<_, anyhow::Error>::Ok(TimelinePreload {
1185 6 : client,
1186 6 : timeline_id,
1187 6 : index_part,
1188 6 : })
1189 6 : }
1190 6 : .map(move |res| {
1191 6 : res.with_context(|| format!("download index part for timeline {timeline_id}"))
1192 6 : })
1193 6 : .instrument(info_span!("download_index_part", %timeline_id)),
1194 : );
1195 : }
1196 :
1197 161 : let mut timeline_preloads: HashMap<TimelineId, TimelinePreload> = HashMap::new();
1198 :
1199 167 : loop {
1200 167 : tokio::select!(
1201 : next = part_downloads.join_next() => {
1202 : match next {
1203 : Some(result) => {
1204 : let preload_result = result.context("join preload task")?;
1205 : let preload = preload_result?;
1206 : timeline_preloads.insert(preload.timeline_id, preload);
1207 : },
1208 : None => {
1209 : break;
1210 : }
1211 : }
1212 : },
1213 : _ = cancel.cancelled() => {
1214 : anyhow::bail!("Cancelled while waiting for remote index download")
1215 : }
1216 167 : )
1217 167 : }
1218 :
1219 161 : Ok(timeline_preloads)
1220 161 : }
1221 :
1222 4 : pub(crate) fn tenant_shard_id(&self) -> TenantShardId {
1223 4 : self.tenant_shard_id
1224 4 : }
1225 :
1226 : /// Get Timeline handle for given Neon timeline ID.
1227 : /// This function is idempotent. It doesn't change internal state in any way.
1228 222 : pub fn get_timeline(
1229 222 : &self,
1230 222 : timeline_id: TimelineId,
1231 222 : active_only: bool,
1232 222 : ) -> Result<Arc<Timeline>, GetTimelineError> {
1233 222 : let timelines_accessor = self.timelines.lock().unwrap();
1234 222 : let timeline = timelines_accessor
1235 222 : .get(&timeline_id)
1236 222 : .ok_or(GetTimelineError::NotFound {
1237 222 : tenant_id: self.tenant_shard_id,
1238 222 : timeline_id,
1239 222 : })?;
1240 :
1241 220 : if active_only && !timeline.is_active() {
1242 0 : Err(GetTimelineError::NotActive {
1243 0 : tenant_id: self.tenant_shard_id,
1244 0 : timeline_id,
1245 0 : state: timeline.current_state(),
1246 0 : })
1247 : } else {
1248 220 : Ok(Arc::clone(timeline))
1249 : }
1250 222 : }
1251 :
1252 : /// Lists timelines the tenant contains.
1253 : /// Up to tenant's implementation to omit certain timelines that ar not considered ready for use.
1254 8 : pub fn list_timelines(&self) -> Vec<Arc<Timeline>> {
1255 8 : self.timelines
1256 8 : .lock()
1257 8 : .unwrap()
1258 8 : .values()
1259 8 : .map(Arc::clone)
1260 8 : .collect()
1261 8 : }
1262 :
1263 0 : pub fn list_timeline_ids(&self) -> Vec<TimelineId> {
1264 0 : self.timelines.lock().unwrap().keys().cloned().collect()
1265 0 : }
1266 :
1267 : /// This is used to create the initial 'main' timeline during bootstrapping,
1268 : /// or when importing a new base backup. The caller is expected to load an
1269 : /// initial image of the datadir to the new timeline after this.
1270 : ///
1271 : /// Until that happens, the on-disk state is invalid (disk_consistent_lsn=Lsn(0))
1272 : /// and the timeline will fail to load at a restart.
1273 : ///
1274 : /// For tests, use `DatadirModification::init_empty_test_timeline` + `commit` to setup the
1275 : /// minimum amount of keys required to get a writable timeline.
1276 : /// (Without it, `put` might fail due to `repartition` failing.)
1277 153 : pub(crate) async fn create_empty_timeline(
1278 153 : &self,
1279 153 : new_timeline_id: TimelineId,
1280 153 : initdb_lsn: Lsn,
1281 153 : pg_version: u32,
1282 153 : _ctx: &RequestContext,
1283 153 : ) -> anyhow::Result<UninitializedTimeline> {
1284 153 : anyhow::ensure!(
1285 153 : self.is_active(),
1286 0 : "Cannot create empty timelines on inactive tenant"
1287 : );
1288 :
1289 : // Protect against concurrent attempts to use this TimelineId
1290 153 : let create_guard = self.create_timeline_create_guard(new_timeline_id)?;
1291 :
1292 151 : let new_metadata = TimelineMetadata::new(
1293 151 : // Initialize disk_consistent LSN to 0, The caller must import some data to
1294 151 : // make it valid, before calling finish_creation()
1295 151 : Lsn(0),
1296 151 : None,
1297 151 : None,
1298 151 : Lsn(0),
1299 151 : initdb_lsn,
1300 151 : initdb_lsn,
1301 151 : pg_version,
1302 151 : );
1303 151 : self.prepare_new_timeline(
1304 151 : new_timeline_id,
1305 151 : &new_metadata,
1306 151 : create_guard,
1307 151 : initdb_lsn,
1308 151 : None,
1309 151 : None,
1310 151 : )
1311 0 : .await
1312 153 : }
1313 :
1314 : /// Helper for unit tests to create an empty timeline.
1315 : ///
1316 : /// The timeline is has state value `Active` but its background loops are not running.
1317 : // This makes the various functions which anyhow::ensure! for Active state work in tests.
1318 : // Our current tests don't need the background loops.
1319 : #[cfg(test)]
1320 143 : pub async fn create_test_timeline(
1321 143 : &self,
1322 143 : new_timeline_id: TimelineId,
1323 143 : initdb_lsn: Lsn,
1324 143 : pg_version: u32,
1325 143 : ctx: &RequestContext,
1326 143 : ) -> anyhow::Result<Arc<Timeline>> {
1327 143 : let uninit_tl = self
1328 143 : .create_empty_timeline(new_timeline_id, initdb_lsn, pg_version, ctx)
1329 1 : .await?;
1330 142 : let tline = uninit_tl.raw_timeline().expect("we just created it");
1331 142 : assert_eq!(tline.get_last_record_lsn(), Lsn(0));
1332 :
1333 : // Setup minimum keys required for the timeline to be usable.
1334 142 : let mut modification = tline.begin_modification(initdb_lsn);
1335 142 : modification
1336 142 : .init_empty_test_timeline()
1337 142 : .context("init_empty_test_timeline")?;
1338 142 : modification
1339 142 : .commit(ctx)
1340 136 : .await
1341 142 : .context("commit init_empty_test_timeline modification")?;
1342 :
1343 : // Flush to disk so that uninit_tl's check for valid disk_consistent_lsn passes.
1344 142 : tline.maybe_spawn_flush_loop();
1345 142 : tline.freeze_and_flush().await.context("freeze_and_flush")?;
1346 :
1347 : // Make sure the freeze_and_flush reaches remote storage.
1348 142 : tline.remote_client.wait_completion().await.unwrap();
1349 :
1350 142 : let tl = uninit_tl.finish_creation()?;
1351 : // The non-test code would call tl.activate() here.
1352 142 : tl.set_state(TimelineState::Active);
1353 142 : Ok(tl)
1354 143 : }
1355 :
1356 : /// Helper for unit tests to create a timeline with some pre-loaded states.
1357 : #[cfg(test)]
1358 : #[allow(clippy::too_many_arguments)]
1359 19 : pub async fn create_test_timeline_with_layers(
1360 19 : &self,
1361 19 : new_timeline_id: TimelineId,
1362 19 : initdb_lsn: Lsn,
1363 19 : pg_version: u32,
1364 19 : ctx: &RequestContext,
1365 19 : delta_layer_desc: Vec<Vec<(pageserver_api::key::Key, Lsn, crate::repository::Value)>>,
1366 19 : image_layer_desc: Vec<(Lsn, Vec<(pageserver_api::key::Key, bytes::Bytes)>)>,
1367 19 : end_lsn: Lsn,
1368 19 : ) -> anyhow::Result<Arc<Timeline>> {
1369 19 : let tline = self
1370 19 : .create_test_timeline(new_timeline_id, initdb_lsn, pg_version, ctx)
1371 52 : .await?;
1372 18 : tline.force_advance_lsn(end_lsn);
1373 52 : for deltas in delta_layer_desc {
1374 34 : tline
1375 34 : .force_create_delta_layer(deltas, Some(initdb_lsn), ctx)
1376 102 : .await?;
1377 : }
1378 52 : for (lsn, images) in image_layer_desc {
1379 34 : tline
1380 34 : .force_create_image_layer(lsn, images, Some(initdb_lsn), ctx)
1381 157 : .await?;
1382 : }
1383 18 : Ok(tline)
1384 19 : }
1385 :
1386 : /// Create a new timeline.
1387 : ///
1388 : /// Returns the new timeline ID and reference to its Timeline object.
1389 : ///
1390 : /// If the caller specified the timeline ID to use (`new_timeline_id`), and timeline with
1391 : /// the same timeline ID already exists, returns CreateTimelineError::AlreadyExists.
1392 : #[allow(clippy::too_many_arguments)]
1393 0 : pub(crate) async fn create_timeline(
1394 0 : self: &Arc<Tenant>,
1395 0 : new_timeline_id: TimelineId,
1396 0 : ancestor_timeline_id: Option<TimelineId>,
1397 0 : mut ancestor_start_lsn: Option<Lsn>,
1398 0 : pg_version: u32,
1399 0 : load_existing_initdb: Option<TimelineId>,
1400 0 : broker_client: storage_broker::BrokerClientChannel,
1401 0 : ctx: &RequestContext,
1402 0 : ) -> Result<Arc<Timeline>, CreateTimelineError> {
1403 0 : if !self.is_active() {
1404 0 : if matches!(self.current_state(), TenantState::Stopping { .. }) {
1405 0 : return Err(CreateTimelineError::ShuttingDown);
1406 : } else {
1407 0 : return Err(CreateTimelineError::Other(anyhow::anyhow!(
1408 0 : "Cannot create timelines on inactive tenant"
1409 0 : )));
1410 : }
1411 0 : }
1412 :
1413 0 : let _gate = self
1414 0 : .gate
1415 0 : .enter()
1416 0 : .map_err(|_| CreateTimelineError::ShuttingDown)?;
1417 :
1418 : // Get exclusive access to the timeline ID: this ensures that it does not already exist,
1419 : // and that no other creation attempts will be allowed in while we are working.
1420 0 : let create_guard = match self.create_timeline_create_guard(new_timeline_id) {
1421 0 : Ok(m) => m,
1422 : Err(TimelineExclusionError::AlreadyCreating) => {
1423 : // Creation is in progress, we cannot create it again, and we cannot
1424 : // check if this request matches the existing one, so caller must try
1425 : // again later.
1426 0 : return Err(CreateTimelineError::AlreadyCreating);
1427 : }
1428 0 : Err(TimelineExclusionError::Other(e)) => {
1429 0 : return Err(CreateTimelineError::Other(e));
1430 : }
1431 0 : Err(TimelineExclusionError::AlreadyExists(existing)) => {
1432 0 : debug!("timeline {new_timeline_id} already exists");
1433 :
1434 : // Idempotency: creating the same timeline twice is not an error, unless
1435 : // the second creation has different parameters.
1436 0 : if existing.get_ancestor_timeline_id() != ancestor_timeline_id
1437 0 : || existing.pg_version != pg_version
1438 0 : || (ancestor_start_lsn.is_some()
1439 0 : && ancestor_start_lsn != Some(existing.get_ancestor_lsn()))
1440 : {
1441 0 : return Err(CreateTimelineError::Conflict);
1442 0 : }
1443 0 :
1444 0 : // Wait for uploads to complete, so that when we return Ok, the timeline
1445 0 : // is known to be durable on remote storage. Just like we do at the end of
1446 0 : // this function, after we have created the timeline ourselves.
1447 0 : //
1448 0 : // We only really care that the initial version of `index_part.json` has
1449 0 : // been uploaded. That's enough to remember that the timeline
1450 0 : // exists. However, there is no function to wait specifically for that so
1451 0 : // we just wait for all in-progress uploads to finish.
1452 0 : existing
1453 0 : .remote_client
1454 0 : .wait_completion()
1455 0 : .await
1456 0 : .context("wait for timeline uploads to complete")?;
1457 :
1458 0 : return Ok(existing);
1459 : }
1460 : };
1461 :
1462 : pausable_failpoint!("timeline-creation-after-uninit");
1463 :
1464 0 : let loaded_timeline = match ancestor_timeline_id {
1465 0 : Some(ancestor_timeline_id) => {
1466 0 : let ancestor_timeline = self
1467 0 : .get_timeline(ancestor_timeline_id, false)
1468 0 : .context("Cannot branch off the timeline that's not present in pageserver")?;
1469 :
1470 : // instead of waiting around, just deny the request because ancestor is not yet
1471 : // ready for other purposes either.
1472 0 : if !ancestor_timeline.is_active() {
1473 0 : return Err(CreateTimelineError::AncestorNotActive);
1474 0 : }
1475 :
1476 0 : if let Some(lsn) = ancestor_start_lsn.as_mut() {
1477 0 : *lsn = lsn.align();
1478 0 :
1479 0 : let ancestor_ancestor_lsn = ancestor_timeline.get_ancestor_lsn();
1480 0 : if ancestor_ancestor_lsn > *lsn {
1481 : // can we safely just branch from the ancestor instead?
1482 0 : return Err(CreateTimelineError::AncestorLsn(anyhow::anyhow!(
1483 0 : "invalid start lsn {} for ancestor timeline {}: less than timeline ancestor lsn {}",
1484 0 : lsn,
1485 0 : ancestor_timeline_id,
1486 0 : ancestor_ancestor_lsn,
1487 0 : )));
1488 0 : }
1489 0 :
1490 0 : // Wait for the WAL to arrive and be processed on the parent branch up
1491 0 : // to the requested branch point. The repository code itself doesn't
1492 0 : // require it, but if we start to receive WAL on the new timeline,
1493 0 : // decoding the new WAL might need to look up previous pages, relation
1494 0 : // sizes etc. and that would get confused if the previous page versions
1495 0 : // are not in the repository yet.
1496 0 : ancestor_timeline
1497 0 : .wait_lsn(*lsn, timeline::WaitLsnWaiter::Tenant, ctx)
1498 0 : .await
1499 0 : .map_err(|e| match e {
1500 0 : e @ (WaitLsnError::Timeout(_) | WaitLsnError::BadState { .. }) => {
1501 0 : CreateTimelineError::AncestorLsn(anyhow::anyhow!(e))
1502 : }
1503 0 : WaitLsnError::Shutdown => CreateTimelineError::ShuttingDown,
1504 0 : })?;
1505 0 : }
1506 :
1507 0 : self.branch_timeline(
1508 0 : &ancestor_timeline,
1509 0 : new_timeline_id,
1510 0 : ancestor_start_lsn,
1511 0 : create_guard,
1512 0 : ctx,
1513 0 : )
1514 0 : .await?
1515 : }
1516 : None => {
1517 0 : self.bootstrap_timeline(
1518 0 : new_timeline_id,
1519 0 : pg_version,
1520 0 : load_existing_initdb,
1521 0 : create_guard,
1522 0 : ctx,
1523 0 : )
1524 0 : .await?
1525 : }
1526 : };
1527 :
1528 : // At this point we have dropped our guard on [`Self::timelines_creating`], and
1529 : // the timeline is visible in [`Self::timelines`], but it is _not_ durable yet. We must
1530 : // not send a success to the caller until it is. The same applies to handling retries,
1531 : // see the handling of [`TimelineExclusionError::AlreadyExists`] above.
1532 0 : let kind = ancestor_timeline_id
1533 0 : .map(|_| "branched")
1534 0 : .unwrap_or("bootstrapped");
1535 0 : loaded_timeline
1536 0 : .remote_client
1537 0 : .wait_completion()
1538 0 : .await
1539 0 : .with_context(|| format!("wait for {} timeline initial uploads to complete", kind))?;
1540 :
1541 0 : loaded_timeline.activate(self.clone(), broker_client, None, ctx);
1542 0 :
1543 0 : Ok(loaded_timeline)
1544 0 : }
1545 :
1546 0 : pub(crate) async fn delete_timeline(
1547 0 : self: Arc<Self>,
1548 0 : timeline_id: TimelineId,
1549 0 : ) -> Result<(), DeleteTimelineError> {
1550 0 : DeleteTimelineFlow::run(&self, timeline_id, false).await?;
1551 :
1552 0 : Ok(())
1553 0 : }
1554 :
1555 : /// perform one garbage collection iteration, removing old data files from disk.
1556 : /// this function is periodically called by gc task.
1557 : /// also it can be explicitly requested through page server api 'do_gc' command.
1558 : ///
1559 : /// `target_timeline_id` specifies the timeline to GC, or None for all.
1560 : ///
1561 : /// The `horizon` an `pitr` parameters determine how much WAL history needs to be retained.
1562 : /// Also known as the retention period, or the GC cutoff point. `horizon` specifies
1563 : /// the amount of history, as LSN difference from current latest LSN on each timeline.
1564 : /// `pitr` specifies the same as a time difference from the current time. The effective
1565 : /// GC cutoff point is determined conservatively by either `horizon` and `pitr`, whichever
1566 : /// requires more history to be retained.
1567 : //
1568 754 : pub(crate) async fn gc_iteration(
1569 754 : &self,
1570 754 : target_timeline_id: Option<TimelineId>,
1571 754 : horizon: u64,
1572 754 : pitr: Duration,
1573 754 : cancel: &CancellationToken,
1574 754 : ctx: &RequestContext,
1575 754 : ) -> Result<GcResult, GcError> {
1576 754 : // Don't start doing work during shutdown
1577 754 : if let TenantState::Stopping { .. } = self.current_state() {
1578 0 : return Ok(GcResult::default());
1579 754 : }
1580 754 :
1581 754 : // there is a global allowed_error for this
1582 754 : if !self.is_active() {
1583 0 : return Err(GcError::NotActive);
1584 754 : }
1585 754 :
1586 754 : {
1587 754 : let conf = self.tenant_conf.load();
1588 754 :
1589 754 : if !conf.location.may_delete_layers_hint() {
1590 0 : info!("Skipping GC in location state {:?}", conf.location);
1591 0 : return Ok(GcResult::default());
1592 754 : }
1593 754 : }
1594 754 :
1595 754 : self.gc_iteration_internal(target_timeline_id, horizon, pitr, cancel, ctx)
1596 696 : .await
1597 754 : }
1598 :
1599 : /// Perform one compaction iteration.
1600 : /// This function is periodically called by compactor task.
1601 : /// Also it can be explicitly requested per timeline through page server
1602 : /// api's 'compact' command.
1603 0 : async fn compaction_iteration(
1604 0 : &self,
1605 0 : cancel: &CancellationToken,
1606 0 : ctx: &RequestContext,
1607 0 : ) -> anyhow::Result<(), timeline::CompactionError> {
1608 0 : // Don't start doing work during shutdown, or when broken, we do not need those in the logs
1609 0 : if !self.is_active() {
1610 0 : return Ok(());
1611 0 : }
1612 0 :
1613 0 : {
1614 0 : let conf = self.tenant_conf.load();
1615 0 : if !conf.location.may_delete_layers_hint() || !conf.location.may_upload_layers_hint() {
1616 0 : info!("Skipping compaction in location state {:?}", conf.location);
1617 0 : return Ok(());
1618 0 : }
1619 0 : }
1620 0 :
1621 0 : // Scan through the hashmap and collect a list of all the timelines,
1622 0 : // while holding the lock. Then drop the lock and actually perform the
1623 0 : // compactions. We don't want to block everything else while the
1624 0 : // compaction runs.
1625 0 : let timelines_to_compact = {
1626 0 : let timelines = self.timelines.lock().unwrap();
1627 0 : let timelines_to_compact = timelines
1628 0 : .iter()
1629 0 : .filter_map(|(timeline_id, timeline)| {
1630 0 : if timeline.is_active() {
1631 0 : Some((*timeline_id, timeline.clone()))
1632 : } else {
1633 0 : None
1634 : }
1635 0 : })
1636 0 : .collect::<Vec<_>>();
1637 0 : drop(timelines);
1638 0 : timelines_to_compact
1639 : };
1640 :
1641 0 : for (timeline_id, timeline) in &timelines_to_compact {
1642 0 : timeline
1643 0 : .compact(cancel, EnumSet::empty(), ctx)
1644 0 : .instrument(info_span!("compact_timeline", %timeline_id))
1645 0 : .await?;
1646 : }
1647 :
1648 0 : Ok(())
1649 0 : }
1650 :
1651 : // Call through to all timelines to freeze ephemeral layers if needed. Usually
1652 : // this happens during ingest: this background housekeeping is for freezing layers
1653 : // that are open but haven't been written to for some time.
1654 0 : async fn ingest_housekeeping(&self) {
1655 0 : // Scan through the hashmap and collect a list of all the timelines,
1656 0 : // while holding the lock. Then drop the lock and actually perform the
1657 0 : // compactions. We don't want to block everything else while the
1658 0 : // compaction runs.
1659 0 : let timelines = {
1660 0 : self.timelines
1661 0 : .lock()
1662 0 : .unwrap()
1663 0 : .values()
1664 0 : .filter_map(|timeline| {
1665 0 : if timeline.is_active() {
1666 0 : Some(timeline.clone())
1667 : } else {
1668 0 : None
1669 : }
1670 0 : })
1671 0 : .collect::<Vec<_>>()
1672 : };
1673 :
1674 0 : for timeline in &timelines {
1675 0 : timeline.maybe_freeze_ephemeral_layer().await;
1676 : }
1677 0 : }
1678 :
1679 2421 : pub fn current_state(&self) -> TenantState {
1680 2421 : self.state.borrow().clone()
1681 2421 : }
1682 :
1683 1661 : pub fn is_active(&self) -> bool {
1684 1661 : self.current_state() == TenantState::Active
1685 1661 : }
1686 :
1687 0 : pub fn generation(&self) -> Generation {
1688 0 : self.generation
1689 0 : }
1690 :
1691 0 : pub(crate) fn wal_redo_manager_status(&self) -> Option<WalRedoManagerStatus> {
1692 0 : self.walredo_mgr.as_ref().and_then(|mgr| mgr.status())
1693 0 : }
1694 :
1695 : /// Changes tenant status to active, unless shutdown was already requested.
1696 : ///
1697 : /// `background_jobs_can_start` is an optional barrier set to a value during pageserver startup
1698 : /// to delay background jobs. Background jobs can be started right away when None is given.
1699 0 : fn activate(
1700 0 : self: &Arc<Self>,
1701 0 : broker_client: BrokerClientChannel,
1702 0 : background_jobs_can_start: Option<&completion::Barrier>,
1703 0 : ctx: &RequestContext,
1704 0 : ) {
1705 0 : span::debug_assert_current_span_has_tenant_id();
1706 0 :
1707 0 : let mut activating = false;
1708 0 : self.state.send_modify(|current_state| {
1709 0 : use pageserver_api::models::ActivatingFrom;
1710 0 : match &*current_state {
1711 : TenantState::Activating(_) | TenantState::Active | TenantState::Broken { .. } | TenantState::Stopping { .. } => {
1712 0 : panic!("caller is responsible for calling activate() only on Loading / Attaching tenants, got {state:?}", state = current_state);
1713 : }
1714 0 : TenantState::Loading => {
1715 0 : *current_state = TenantState::Activating(ActivatingFrom::Loading);
1716 0 : }
1717 0 : TenantState::Attaching => {
1718 0 : *current_state = TenantState::Activating(ActivatingFrom::Attaching);
1719 0 : }
1720 : }
1721 0 : debug!(tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), "Activating tenant");
1722 0 : activating = true;
1723 0 : // Continue outside the closure. We need to grab timelines.lock()
1724 0 : // and we plan to turn it into a tokio::sync::Mutex in a future patch.
1725 0 : });
1726 0 :
1727 0 : if activating {
1728 0 : let timelines_accessor = self.timelines.lock().unwrap();
1729 0 : let timelines_to_activate = timelines_accessor
1730 0 : .values()
1731 0 : .filter(|timeline| !(timeline.is_broken() || timeline.is_stopping()));
1732 0 :
1733 0 : // Spawn gc and compaction loops. The loops will shut themselves
1734 0 : // down when they notice that the tenant is inactive.
1735 0 : tasks::start_background_loops(self, background_jobs_can_start);
1736 0 :
1737 0 : let mut activated_timelines = 0;
1738 :
1739 0 : for timeline in timelines_to_activate {
1740 0 : timeline.activate(
1741 0 : self.clone(),
1742 0 : broker_client.clone(),
1743 0 : background_jobs_can_start,
1744 0 : ctx,
1745 0 : );
1746 0 : activated_timelines += 1;
1747 0 : }
1748 :
1749 0 : self.state.send_modify(move |current_state| {
1750 0 : assert!(
1751 0 : matches!(current_state, TenantState::Activating(_)),
1752 0 : "set_stopping and set_broken wait for us to leave Activating state",
1753 : );
1754 0 : *current_state = TenantState::Active;
1755 0 :
1756 0 : let elapsed = self.constructed_at.elapsed();
1757 0 : let total_timelines = timelines_accessor.len();
1758 0 :
1759 0 : // log a lot of stuff, because some tenants sometimes suffer from user-visible
1760 0 : // times to activate. see https://github.com/neondatabase/neon/issues/4025
1761 0 : info!(
1762 0 : since_creation_millis = elapsed.as_millis(),
1763 0 : tenant_id = %self.tenant_shard_id.tenant_id,
1764 0 : shard_id = %self.tenant_shard_id.shard_slug(),
1765 0 : activated_timelines,
1766 0 : total_timelines,
1767 0 : post_state = <&'static str>::from(&*current_state),
1768 0 : "activation attempt finished"
1769 : );
1770 :
1771 0 : TENANT.activation.observe(elapsed.as_secs_f64());
1772 0 : });
1773 0 : }
1774 0 : }
1775 :
1776 : /// Shutdown the tenant and join all of the spawned tasks.
1777 : ///
1778 : /// The method caters for all use-cases:
1779 : /// - pageserver shutdown (freeze_and_flush == true)
1780 : /// - detach + ignore (freeze_and_flush == false)
1781 : ///
1782 : /// This will attempt to shutdown even if tenant is broken.
1783 : ///
1784 : /// `shutdown_progress` is a [`completion::Barrier`] for the shutdown initiated by this call.
1785 : /// If the tenant is already shutting down, we return a clone of the first shutdown call's
1786 : /// `Barrier` as an `Err`. This not-first caller can use the returned barrier to join with
1787 : /// the ongoing shutdown.
1788 6 : async fn shutdown(
1789 6 : &self,
1790 6 : shutdown_progress: completion::Barrier,
1791 6 : shutdown_mode: timeline::ShutdownMode,
1792 6 : ) -> Result<(), completion::Barrier> {
1793 6 : span::debug_assert_current_span_has_tenant_id();
1794 :
1795 : // Set tenant (and its timlines) to Stoppping state.
1796 : //
1797 : // Since we can only transition into Stopping state after activation is complete,
1798 : // run it in a JoinSet so all tenants have a chance to stop before we get SIGKILLed.
1799 : //
1800 : // Transitioning tenants to Stopping state has a couple of non-obvious side effects:
1801 : // 1. Lock out any new requests to the tenants.
1802 : // 2. Signal cancellation to WAL receivers (we wait on it below).
1803 : // 3. Signal cancellation for other tenant background loops.
1804 : // 4. ???
1805 : //
1806 : // The waiting for the cancellation is not done uniformly.
1807 : // We certainly wait for WAL receivers to shut down.
1808 : // That is necessary so that no new data comes in before the freeze_and_flush.
1809 : // But the tenant background loops are joined-on in our caller.
1810 : // It's mesed up.
1811 : // we just ignore the failure to stop
1812 :
1813 : // If we're still attaching, fire the cancellation token early to drop out: this
1814 : // will prevent us flushing, but ensures timely shutdown if some I/O during attach
1815 : // is very slow.
1816 6 : if matches!(self.current_state(), TenantState::Attaching) {
1817 0 : self.cancel.cancel();
1818 6 : }
1819 :
1820 6 : match self.set_stopping(shutdown_progress, false, false).await {
1821 6 : Ok(()) => {}
1822 0 : Err(SetStoppingError::Broken) => {
1823 0 : // assume that this is acceptable
1824 0 : }
1825 0 : Err(SetStoppingError::AlreadyStopping(other)) => {
1826 0 : // give caller the option to wait for this this shutdown
1827 0 : info!("Tenant::shutdown: AlreadyStopping");
1828 0 : return Err(other);
1829 : }
1830 : };
1831 :
1832 6 : let mut js = tokio::task::JoinSet::new();
1833 6 : {
1834 6 : let timelines = self.timelines.lock().unwrap();
1835 6 : timelines.values().for_each(|timeline| {
1836 6 : let timeline = Arc::clone(timeline);
1837 6 : let timeline_id = timeline.timeline_id;
1838 6 : let span = tracing::info_span!("timeline_shutdown", %timeline_id, ?shutdown_mode);
1839 16 : js.spawn(async move { timeline.shutdown(shutdown_mode).instrument(span).await });
1840 6 : })
1841 6 : };
1842 6 : // test_long_timeline_create_then_tenant_delete is leaning on this message
1843 6 : tracing::info!("Waiting for timelines...");
1844 12 : while let Some(res) = js.join_next().await {
1845 0 : match res {
1846 6 : Ok(()) => {}
1847 0 : Err(je) if je.is_cancelled() => unreachable!("no cancelling used"),
1848 0 : Err(je) if je.is_panic() => { /* logged already */ }
1849 0 : Err(je) => warn!("unexpected JoinError: {je:?}"),
1850 : }
1851 : }
1852 :
1853 : // We cancel the Tenant's cancellation token _after_ the timelines have all shut down. This permits
1854 : // them to continue to do work during their shutdown methods, e.g. flushing data.
1855 6 : tracing::debug!("Cancelling CancellationToken");
1856 6 : self.cancel.cancel();
1857 6 :
1858 6 : // shutdown all tenant and timeline tasks: gc, compaction, page service
1859 6 : // No new tasks will be started for this tenant because it's in `Stopping` state.
1860 6 : //
1861 6 : // this will additionally shutdown and await all timeline tasks.
1862 6 : tracing::debug!("Waiting for tasks...");
1863 6 : task_mgr::shutdown_tasks(None, Some(self.tenant_shard_id), None).await;
1864 :
1865 : // Wait for any in-flight operations to complete
1866 6 : self.gate.close().await;
1867 :
1868 6 : remove_tenant_metrics(&self.tenant_shard_id);
1869 6 :
1870 6 : Ok(())
1871 6 : }
1872 :
1873 : /// Change tenant status to Stopping, to mark that it is being shut down.
1874 : ///
1875 : /// This function waits for the tenant to become active if it isn't already, before transitioning it into Stopping state.
1876 : ///
1877 : /// This function is not cancel-safe!
1878 : ///
1879 : /// `allow_transition_from_loading` is needed for the special case of loading task deleting the tenant.
1880 : /// `allow_transition_from_attaching` is needed for the special case of attaching deleted tenant.
1881 6 : async fn set_stopping(
1882 6 : &self,
1883 6 : progress: completion::Barrier,
1884 6 : allow_transition_from_loading: bool,
1885 6 : allow_transition_from_attaching: bool,
1886 6 : ) -> Result<(), SetStoppingError> {
1887 6 : let mut rx = self.state.subscribe();
1888 6 :
1889 6 : // cannot stop before we're done activating, so wait out until we're done activating
1890 6 : rx.wait_for(|state| match state {
1891 0 : TenantState::Attaching if allow_transition_from_attaching => true,
1892 : TenantState::Activating(_) | TenantState::Attaching => {
1893 0 : info!(
1894 0 : "waiting for {} to turn Active|Broken|Stopping",
1895 0 : <&'static str>::from(state)
1896 : );
1897 0 : false
1898 : }
1899 0 : TenantState::Loading => allow_transition_from_loading,
1900 6 : TenantState::Active | TenantState::Broken { .. } | TenantState::Stopping { .. } => true,
1901 6 : })
1902 0 : .await
1903 6 : .expect("cannot drop self.state while on a &self method");
1904 6 :
1905 6 : // we now know we're done activating, let's see whether this task is the winner to transition into Stopping
1906 6 : let mut err = None;
1907 6 : let stopping = self.state.send_if_modified(|current_state| match current_state {
1908 : TenantState::Activating(_) => {
1909 0 : unreachable!("1we ensured above that we're done with activation, and, there is no re-activation")
1910 : }
1911 : TenantState::Attaching => {
1912 0 : if !allow_transition_from_attaching {
1913 0 : unreachable!("2we ensured above that we're done with activation, and, there is no re-activation")
1914 0 : };
1915 0 : *current_state = TenantState::Stopping { progress };
1916 0 : true
1917 : }
1918 : TenantState::Loading => {
1919 0 : if !allow_transition_from_loading {
1920 0 : unreachable!("3we ensured above that we're done with activation, and, there is no re-activation")
1921 0 : };
1922 0 : *current_state = TenantState::Stopping { progress };
1923 0 : true
1924 : }
1925 : TenantState::Active => {
1926 : // FIXME: due to time-of-check vs time-of-use issues, it can happen that new timelines
1927 : // are created after the transition to Stopping. That's harmless, as the Timelines
1928 : // won't be accessible to anyone afterwards, because the Tenant is in Stopping state.
1929 6 : *current_state = TenantState::Stopping { progress };
1930 6 : // Continue stopping outside the closure. We need to grab timelines.lock()
1931 6 : // and we plan to turn it into a tokio::sync::Mutex in a future patch.
1932 6 : true
1933 : }
1934 0 : TenantState::Broken { reason, .. } => {
1935 0 : info!(
1936 0 : "Cannot set tenant to Stopping state, it is in Broken state due to: {reason}"
1937 : );
1938 0 : err = Some(SetStoppingError::Broken);
1939 0 : false
1940 : }
1941 0 : TenantState::Stopping { progress } => {
1942 0 : info!("Tenant is already in Stopping state");
1943 0 : err = Some(SetStoppingError::AlreadyStopping(progress.clone()));
1944 0 : false
1945 : }
1946 6 : });
1947 6 : match (stopping, err) {
1948 6 : (true, None) => {} // continue
1949 0 : (false, Some(err)) => return Err(err),
1950 0 : (true, Some(_)) => unreachable!(
1951 0 : "send_if_modified closure must error out if not transitioning to Stopping"
1952 0 : ),
1953 0 : (false, None) => unreachable!(
1954 0 : "send_if_modified closure must return true if transitioning to Stopping"
1955 0 : ),
1956 : }
1957 :
1958 6 : let timelines_accessor = self.timelines.lock().unwrap();
1959 6 : let not_broken_timelines = timelines_accessor
1960 6 : .values()
1961 6 : .filter(|timeline| !timeline.is_broken());
1962 12 : for timeline in not_broken_timelines {
1963 6 : timeline.set_state(TimelineState::Stopping);
1964 6 : }
1965 6 : Ok(())
1966 6 : }
1967 :
1968 : /// Method for tenant::mgr to transition us into Broken state in case of a late failure in
1969 : /// `remove_tenant_from_memory`
1970 : ///
1971 : /// This function waits for the tenant to become active if it isn't already, before transitioning it into Stopping state.
1972 : ///
1973 : /// In tests, we also use this to set tenants to Broken state on purpose.
1974 0 : pub(crate) async fn set_broken(&self, reason: String) {
1975 0 : let mut rx = self.state.subscribe();
1976 0 :
1977 0 : // The load & attach routines own the tenant state until it has reached `Active`.
1978 0 : // So, wait until it's done.
1979 0 : rx.wait_for(|state| match state {
1980 : TenantState::Activating(_) | TenantState::Loading | TenantState::Attaching => {
1981 0 : info!(
1982 0 : "waiting for {} to turn Active|Broken|Stopping",
1983 0 : <&'static str>::from(state)
1984 : );
1985 0 : false
1986 : }
1987 0 : TenantState::Active | TenantState::Broken { .. } | TenantState::Stopping { .. } => true,
1988 0 : })
1989 0 : .await
1990 0 : .expect("cannot drop self.state while on a &self method");
1991 0 :
1992 0 : // we now know we're done activating, let's see whether this task is the winner to transition into Broken
1993 0 : self.set_broken_no_wait(reason)
1994 0 : }
1995 :
1996 0 : pub(crate) fn set_broken_no_wait(&self, reason: impl Display) {
1997 0 : let reason = reason.to_string();
1998 0 : self.state.send_modify(|current_state| {
1999 0 : match *current_state {
2000 : TenantState::Activating(_) | TenantState::Loading | TenantState::Attaching => {
2001 0 : unreachable!("we ensured above that we're done with activation, and, there is no re-activation")
2002 : }
2003 : TenantState::Active => {
2004 0 : if cfg!(feature = "testing") {
2005 0 : warn!("Changing Active tenant to Broken state, reason: {}", reason);
2006 0 : *current_state = TenantState::broken_from_reason(reason);
2007 : } else {
2008 0 : unreachable!("not allowed to call set_broken on Active tenants in non-testing builds")
2009 : }
2010 : }
2011 : TenantState::Broken { .. } => {
2012 0 : warn!("Tenant is already in Broken state");
2013 : }
2014 : // This is the only "expected" path, any other path is a bug.
2015 : TenantState::Stopping { .. } => {
2016 0 : warn!(
2017 0 : "Marking Stopping tenant as Broken state, reason: {}",
2018 : reason
2019 : );
2020 0 : *current_state = TenantState::broken_from_reason(reason);
2021 : }
2022 : }
2023 0 : });
2024 0 : }
2025 :
2026 0 : pub fn subscribe_for_state_updates(&self) -> watch::Receiver<TenantState> {
2027 0 : self.state.subscribe()
2028 0 : }
2029 :
2030 : /// The activate_now semaphore is initialized with zero units. As soon as
2031 : /// we add a unit, waiters will be able to acquire a unit and proceed.
2032 0 : pub(crate) fn activate_now(&self) {
2033 0 : self.activate_now_sem.add_permits(1);
2034 0 : }
2035 :
2036 0 : pub(crate) async fn wait_to_become_active(
2037 0 : &self,
2038 0 : timeout: Duration,
2039 0 : ) -> Result<(), GetActiveTenantError> {
2040 0 : let mut receiver = self.state.subscribe();
2041 0 : loop {
2042 0 : let current_state = receiver.borrow_and_update().clone();
2043 0 : match current_state {
2044 : TenantState::Loading | TenantState::Attaching | TenantState::Activating(_) => {
2045 : // in these states, there's a chance that we can reach ::Active
2046 0 : self.activate_now();
2047 0 : match timeout_cancellable(timeout, &self.cancel, receiver.changed()).await {
2048 0 : Ok(r) => {
2049 0 : r.map_err(
2050 0 : |_e: tokio::sync::watch::error::RecvError|
2051 : // Tenant existed but was dropped: report it as non-existent
2052 0 : GetActiveTenantError::NotFound(GetTenantError::NotFound(self.tenant_shard_id.tenant_id))
2053 0 : )?
2054 : }
2055 : Err(TimeoutCancellableError::Cancelled) => {
2056 0 : return Err(GetActiveTenantError::Cancelled);
2057 : }
2058 : Err(TimeoutCancellableError::Timeout) => {
2059 0 : return Err(GetActiveTenantError::WaitForActiveTimeout {
2060 0 : latest_state: Some(self.current_state()),
2061 0 : wait_time: timeout,
2062 0 : });
2063 : }
2064 : }
2065 : }
2066 : TenantState::Active { .. } => {
2067 0 : return Ok(());
2068 : }
2069 0 : TenantState::Broken { reason, .. } => {
2070 0 : // This is fatal, and reported distinctly from the general case of "will never be active" because
2071 0 : // it's logically a 500 to external API users (broken is always a bug).
2072 0 : return Err(GetActiveTenantError::Broken(reason));
2073 : }
2074 : TenantState::Stopping { .. } => {
2075 : // There's no chance the tenant can transition back into ::Active
2076 0 : return Err(GetActiveTenantError::WillNotBecomeActive(current_state));
2077 : }
2078 : }
2079 : }
2080 0 : }
2081 :
2082 0 : pub(crate) fn get_attach_mode(&self) -> AttachmentMode {
2083 0 : self.tenant_conf.load().location.attach_mode
2084 0 : }
2085 :
2086 : /// For API access: generate a LocationConfig equivalent to the one that would be used to
2087 : /// create a Tenant in the same state. Do not use this in hot paths: it's for relatively
2088 : /// rare external API calls, like a reconciliation at startup.
2089 0 : pub(crate) fn get_location_conf(&self) -> models::LocationConfig {
2090 0 : let conf = self.tenant_conf.load();
2091 :
2092 0 : let location_config_mode = match conf.location.attach_mode {
2093 0 : AttachmentMode::Single => models::LocationConfigMode::AttachedSingle,
2094 0 : AttachmentMode::Multi => models::LocationConfigMode::AttachedMulti,
2095 0 : AttachmentMode::Stale => models::LocationConfigMode::AttachedStale,
2096 : };
2097 :
2098 : // We have a pageserver TenantConf, we need the API-facing TenantConfig.
2099 0 : let tenant_config: models::TenantConfig = conf.tenant_conf.clone().into();
2100 0 :
2101 0 : models::LocationConfig {
2102 0 : mode: location_config_mode,
2103 0 : generation: self.generation.into(),
2104 0 : secondary_conf: None,
2105 0 : shard_number: self.shard_identity.number.0,
2106 0 : shard_count: self.shard_identity.count.literal(),
2107 0 : shard_stripe_size: self.shard_identity.stripe_size.0,
2108 0 : tenant_conf: tenant_config,
2109 0 : }
2110 0 : }
2111 :
2112 0 : pub(crate) fn get_tenant_shard_id(&self) -> &TenantShardId {
2113 0 : &self.tenant_shard_id
2114 0 : }
2115 :
2116 0 : pub(crate) fn get_shard_stripe_size(&self) -> ShardStripeSize {
2117 0 : self.shard_identity.stripe_size
2118 0 : }
2119 :
2120 0 : pub(crate) fn get_generation(&self) -> Generation {
2121 0 : self.generation
2122 0 : }
2123 :
2124 : /// This function partially shuts down the tenant (it shuts down the Timelines) and is fallible,
2125 : /// and can leave the tenant in a bad state if it fails. The caller is responsible for
2126 : /// resetting this tenant to a valid state if we fail.
2127 0 : pub(crate) async fn split_prepare(
2128 0 : &self,
2129 0 : child_shards: &Vec<TenantShardId>,
2130 0 : ) -> anyhow::Result<()> {
2131 0 : let timelines = self.timelines.lock().unwrap().clone();
2132 0 : for timeline in timelines.values() {
2133 : // We do not block timeline creation/deletion during splits inside the pageserver: it is up to higher levels
2134 : // to ensure that they do not start a split if currently in the process of doing these.
2135 :
2136 : // Upload an index from the parent: this is partly to provide freshness for the
2137 : // child tenants that will copy it, and partly for general ease-of-debugging: there will
2138 : // always be a parent shard index in the same generation as we wrote the child shard index.
2139 0 : tracing::info!(timeline_id=%timeline.timeline_id, "Uploading index");
2140 0 : timeline
2141 0 : .remote_client
2142 0 : .schedule_index_upload_for_file_changes()?;
2143 0 : timeline.remote_client.wait_completion().await?;
2144 :
2145 : // Shut down the timeline's remote client: this means that the indices we write
2146 : // for child shards will not be invalidated by the parent shard deleting layers.
2147 0 : tracing::info!(timeline_id=%timeline.timeline_id, "Shutting down remote storage client");
2148 0 : timeline.remote_client.shutdown().await;
2149 :
2150 : // Download methods can still be used after shutdown, as they don't flow through the remote client's
2151 : // queue. In principal the RemoteTimelineClient could provide this without downloading it, but this
2152 : // operation is rare, so it's simpler to just download it (and robustly guarantees that the index
2153 : // we use here really is the remotely persistent one).
2154 0 : tracing::info!(timeline_id=%timeline.timeline_id, "Downloading index_part from parent");
2155 0 : let result = timeline.remote_client
2156 0 : .download_index_file(&self.cancel)
2157 0 : .instrument(info_span!("download_index_file", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%timeline.timeline_id))
2158 0 : .await?;
2159 0 : let index_part = match result {
2160 : MaybeDeletedIndexPart::Deleted(_) => {
2161 0 : anyhow::bail!("Timeline deletion happened concurrently with split")
2162 : }
2163 0 : MaybeDeletedIndexPart::IndexPart(p) => p,
2164 : };
2165 :
2166 0 : for child_shard in child_shards {
2167 0 : tracing::info!(timeline_id=%timeline.timeline_id, "Uploading index_part for child {}", child_shard.to_index());
2168 0 : upload_index_part(
2169 0 : &self.remote_storage,
2170 0 : child_shard,
2171 0 : &timeline.timeline_id,
2172 0 : self.generation,
2173 0 : &index_part,
2174 0 : &self.cancel,
2175 0 : )
2176 0 : .await?;
2177 : }
2178 : }
2179 :
2180 0 : Ok(())
2181 0 : }
2182 :
2183 0 : pub(crate) fn get_sizes(&self) -> TopTenantShardItem {
2184 0 : let mut result = TopTenantShardItem {
2185 0 : id: self.tenant_shard_id,
2186 0 : resident_size: 0,
2187 0 : physical_size: 0,
2188 0 : max_logical_size: 0,
2189 0 : };
2190 :
2191 0 : for timeline in self.timelines.lock().unwrap().values() {
2192 0 : result.resident_size += timeline.metrics.resident_physical_size_gauge.get();
2193 0 :
2194 0 : result.physical_size += timeline
2195 0 : .remote_client
2196 0 : .metrics
2197 0 : .remote_physical_size_gauge
2198 0 : .get();
2199 0 : result.max_logical_size = std::cmp::max(
2200 0 : result.max_logical_size,
2201 0 : timeline.metrics.current_logical_size_gauge.get(),
2202 0 : );
2203 0 : }
2204 :
2205 0 : result
2206 0 : }
2207 : }
2208 :
2209 : /// Given a Vec of timelines and their ancestors (timeline_id, ancestor_id),
2210 : /// perform a topological sort, so that the parent of each timeline comes
2211 : /// before the children.
2212 : /// E extracts the ancestor from T
2213 : /// This allows for T to be different. It can be TimelineMetadata, can be Timeline itself, etc.
2214 161 : fn tree_sort_timelines<T, E>(
2215 161 : timelines: HashMap<TimelineId, T>,
2216 161 : extractor: E,
2217 161 : ) -> anyhow::Result<Vec<(TimelineId, T)>>
2218 161 : where
2219 161 : E: Fn(&T) -> Option<TimelineId>,
2220 161 : {
2221 161 : let mut result = Vec::with_capacity(timelines.len());
2222 161 :
2223 161 : let mut now = Vec::with_capacity(timelines.len());
2224 161 : // (ancestor, children)
2225 161 : let mut later: HashMap<TimelineId, Vec<(TimelineId, T)>> =
2226 161 : HashMap::with_capacity(timelines.len());
2227 :
2228 167 : for (timeline_id, value) in timelines {
2229 6 : if let Some(ancestor_id) = extractor(&value) {
2230 2 : let children = later.entry(ancestor_id).or_default();
2231 2 : children.push((timeline_id, value));
2232 4 : } else {
2233 4 : now.push((timeline_id, value));
2234 4 : }
2235 : }
2236 :
2237 167 : while let Some((timeline_id, metadata)) = now.pop() {
2238 6 : result.push((timeline_id, metadata));
2239 : // All children of this can be loaded now
2240 6 : if let Some(mut children) = later.remove(&timeline_id) {
2241 2 : now.append(&mut children);
2242 4 : }
2243 : }
2244 :
2245 : // All timelines should be visited now. Unless there were timelines with missing ancestors.
2246 161 : if !later.is_empty() {
2247 0 : for (missing_id, orphan_ids) in later {
2248 0 : for (orphan_id, _) in orphan_ids {
2249 0 : error!("could not load timeline {orphan_id} because its ancestor timeline {missing_id} could not be loaded");
2250 : }
2251 : }
2252 0 : bail!("could not load tenant because some timelines are missing ancestors");
2253 161 : }
2254 161 :
2255 161 : Ok(result)
2256 161 : }
2257 :
2258 : impl Tenant {
2259 0 : pub fn tenant_specific_overrides(&self) -> TenantConfOpt {
2260 0 : self.tenant_conf.load().tenant_conf.clone()
2261 0 : }
2262 :
2263 0 : pub fn effective_config(&self) -> TenantConf {
2264 0 : self.tenant_specific_overrides()
2265 0 : .merge(self.conf.default_tenant_conf.clone())
2266 0 : }
2267 :
2268 0 : pub fn get_checkpoint_distance(&self) -> u64 {
2269 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2270 0 : tenant_conf
2271 0 : .checkpoint_distance
2272 0 : .unwrap_or(self.conf.default_tenant_conf.checkpoint_distance)
2273 0 : }
2274 :
2275 0 : pub fn get_checkpoint_timeout(&self) -> Duration {
2276 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2277 0 : tenant_conf
2278 0 : .checkpoint_timeout
2279 0 : .unwrap_or(self.conf.default_tenant_conf.checkpoint_timeout)
2280 0 : }
2281 :
2282 0 : pub fn get_compaction_target_size(&self) -> u64 {
2283 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2284 0 : tenant_conf
2285 0 : .compaction_target_size
2286 0 : .unwrap_or(self.conf.default_tenant_conf.compaction_target_size)
2287 0 : }
2288 :
2289 0 : pub fn get_compaction_period(&self) -> Duration {
2290 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2291 0 : tenant_conf
2292 0 : .compaction_period
2293 0 : .unwrap_or(self.conf.default_tenant_conf.compaction_period)
2294 0 : }
2295 :
2296 0 : pub fn get_compaction_threshold(&self) -> usize {
2297 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2298 0 : tenant_conf
2299 0 : .compaction_threshold
2300 0 : .unwrap_or(self.conf.default_tenant_conf.compaction_threshold)
2301 0 : }
2302 :
2303 0 : pub fn get_gc_horizon(&self) -> u64 {
2304 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2305 0 : tenant_conf
2306 0 : .gc_horizon
2307 0 : .unwrap_or(self.conf.default_tenant_conf.gc_horizon)
2308 0 : }
2309 :
2310 0 : pub fn get_gc_period(&self) -> Duration {
2311 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2312 0 : tenant_conf
2313 0 : .gc_period
2314 0 : .unwrap_or(self.conf.default_tenant_conf.gc_period)
2315 0 : }
2316 :
2317 0 : pub fn get_image_creation_threshold(&self) -> usize {
2318 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2319 0 : tenant_conf
2320 0 : .image_creation_threshold
2321 0 : .unwrap_or(self.conf.default_tenant_conf.image_creation_threshold)
2322 0 : }
2323 :
2324 0 : pub fn get_pitr_interval(&self) -> Duration {
2325 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2326 0 : tenant_conf
2327 0 : .pitr_interval
2328 0 : .unwrap_or(self.conf.default_tenant_conf.pitr_interval)
2329 0 : }
2330 :
2331 0 : pub fn get_trace_read_requests(&self) -> bool {
2332 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2333 0 : tenant_conf
2334 0 : .trace_read_requests
2335 0 : .unwrap_or(self.conf.default_tenant_conf.trace_read_requests)
2336 0 : }
2337 :
2338 0 : pub fn get_min_resident_size_override(&self) -> Option<u64> {
2339 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2340 0 : tenant_conf
2341 0 : .min_resident_size_override
2342 0 : .or(self.conf.default_tenant_conf.min_resident_size_override)
2343 0 : }
2344 :
2345 0 : pub fn get_heatmap_period(&self) -> Option<Duration> {
2346 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2347 0 : let heatmap_period = tenant_conf
2348 0 : .heatmap_period
2349 0 : .unwrap_or(self.conf.default_tenant_conf.heatmap_period);
2350 0 : if heatmap_period.is_zero() {
2351 0 : None
2352 : } else {
2353 0 : Some(heatmap_period)
2354 : }
2355 0 : }
2356 :
2357 0 : pub fn get_lsn_lease_length(&self) -> Duration {
2358 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2359 0 : tenant_conf
2360 0 : .lsn_lease_length
2361 0 : .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length)
2362 0 : }
2363 :
2364 0 : pub fn set_new_tenant_config(&self, new_tenant_conf: TenantConfOpt) {
2365 0 : // Use read-copy-update in order to avoid overwriting the location config
2366 0 : // state if this races with [`Tenant::set_new_location_config`]. Note that
2367 0 : // this race is not possible if both request types come from the storage
2368 0 : // controller (as they should!) because an exclusive op lock is required
2369 0 : // on the storage controller side.
2370 0 : self.tenant_conf.rcu(|inner| {
2371 0 : Arc::new(AttachedTenantConf {
2372 0 : tenant_conf: new_tenant_conf.clone(),
2373 0 : location: inner.location,
2374 0 : })
2375 0 : });
2376 0 :
2377 0 : self.tenant_conf_updated(&new_tenant_conf);
2378 0 : // Don't hold self.timelines.lock() during the notifies.
2379 0 : // There's no risk of deadlock right now, but there could be if we consolidate
2380 0 : // mutexes in struct Timeline in the future.
2381 0 : let timelines = self.list_timelines();
2382 0 : for timeline in timelines {
2383 0 : timeline.tenant_conf_updated(&new_tenant_conf);
2384 0 : }
2385 0 : }
2386 :
2387 8 : pub(crate) fn set_new_location_config(&self, new_conf: AttachedTenantConf) {
2388 8 : let new_tenant_conf = new_conf.tenant_conf.clone();
2389 8 :
2390 8 : self.tenant_conf.store(Arc::new(new_conf));
2391 8 :
2392 8 : self.tenant_conf_updated(&new_tenant_conf);
2393 8 : // Don't hold self.timelines.lock() during the notifies.
2394 8 : // There's no risk of deadlock right now, but there could be if we consolidate
2395 8 : // mutexes in struct Timeline in the future.
2396 8 : let timelines = self.list_timelines();
2397 16 : for timeline in timelines {
2398 8 : timeline.tenant_conf_updated(&new_tenant_conf);
2399 8 : }
2400 8 : }
2401 :
2402 169 : fn get_timeline_get_throttle_config(
2403 169 : psconf: &'static PageServerConf,
2404 169 : overrides: &TenantConfOpt,
2405 169 : ) -> throttle::Config {
2406 169 : overrides
2407 169 : .timeline_get_throttle
2408 169 : .clone()
2409 169 : .unwrap_or(psconf.default_tenant_conf.timeline_get_throttle.clone())
2410 169 : }
2411 :
2412 8 : pub(crate) fn tenant_conf_updated(&self, new_conf: &TenantConfOpt) {
2413 8 : let conf = Self::get_timeline_get_throttle_config(self.conf, new_conf);
2414 8 : self.timeline_get_throttle.reconfigure(conf)
2415 8 : }
2416 :
2417 : /// Helper function to create a new Timeline struct.
2418 : ///
2419 : /// The returned Timeline is in Loading state. The caller is responsible for
2420 : /// initializing any on-disk state, and for inserting the Timeline to the 'timelines'
2421 : /// map.
2422 : ///
2423 : /// `validate_ancestor == false` is used when a timeline is created for deletion
2424 : /// and we might not have the ancestor present anymore which is fine for to be
2425 : /// deleted timelines.
2426 383 : fn create_timeline_struct(
2427 383 : &self,
2428 383 : new_timeline_id: TimelineId,
2429 383 : new_metadata: &TimelineMetadata,
2430 383 : ancestor: Option<Arc<Timeline>>,
2431 383 : resources: TimelineResources,
2432 383 : cause: CreateTimelineCause,
2433 383 : last_aux_file_policy: Option<AuxFilePolicy>,
2434 383 : ) -> anyhow::Result<Arc<Timeline>> {
2435 383 : let state = match cause {
2436 : CreateTimelineCause::Load => {
2437 383 : let ancestor_id = new_metadata.ancestor_timeline();
2438 383 : anyhow::ensure!(
2439 383 : ancestor_id == ancestor.as_ref().map(|t| t.timeline_id),
2440 0 : "Timeline's {new_timeline_id} ancestor {ancestor_id:?} was not found"
2441 : );
2442 383 : TimelineState::Loading
2443 : }
2444 0 : CreateTimelineCause::Delete => TimelineState::Stopping,
2445 : };
2446 :
2447 383 : let pg_version = new_metadata.pg_version();
2448 383 :
2449 383 : let timeline = Timeline::new(
2450 383 : self.conf,
2451 383 : Arc::clone(&self.tenant_conf),
2452 383 : new_metadata,
2453 383 : ancestor,
2454 383 : new_timeline_id,
2455 383 : self.tenant_shard_id,
2456 383 : self.generation,
2457 383 : self.shard_identity,
2458 383 : self.walredo_mgr.clone(),
2459 383 : resources,
2460 383 : pg_version,
2461 383 : state,
2462 383 : last_aux_file_policy,
2463 383 : self.cancel.child_token(),
2464 383 : );
2465 383 :
2466 383 : Ok(timeline)
2467 383 : }
2468 :
2469 : // Allow too_many_arguments because a constructor's argument list naturally grows with the
2470 : // number of attributes in the struct: breaking these out into a builder wouldn't be helpful.
2471 : #[allow(clippy::too_many_arguments)]
2472 161 : fn new(
2473 161 : state: TenantState,
2474 161 : conf: &'static PageServerConf,
2475 161 : attached_conf: AttachedTenantConf,
2476 161 : shard_identity: ShardIdentity,
2477 161 : walredo_mgr: Option<Arc<WalRedoManager>>,
2478 161 : tenant_shard_id: TenantShardId,
2479 161 : remote_storage: GenericRemoteStorage,
2480 161 : deletion_queue_client: DeletionQueueClient,
2481 161 : ) -> Tenant {
2482 161 : let (state, mut rx) = watch::channel(state);
2483 161 :
2484 161 : tokio::spawn(async move {
2485 161 : // reflect tenant state in metrics:
2486 161 : // - global per tenant state: TENANT_STATE_METRIC
2487 161 : // - "set" of broken tenants: BROKEN_TENANTS_SET
2488 161 : //
2489 161 : // set of broken tenants should not have zero counts so that it remains accessible for
2490 161 : // alerting.
2491 161 :
2492 161 : let tid = tenant_shard_id.to_string();
2493 161 : let shard_id = tenant_shard_id.shard_slug().to_string();
2494 161 : let set_key = &[tid.as_str(), shard_id.as_str()][..];
2495 161 :
2496 318 : fn inspect_state(state: &TenantState) -> ([&'static str; 1], bool) {
2497 318 : ([state.into()], matches!(state, TenantState::Broken { .. }))
2498 318 : }
2499 161 :
2500 161 : let mut tuple = inspect_state(&rx.borrow_and_update());
2501 161 :
2502 161 : let is_broken = tuple.1;
2503 161 : let mut counted_broken = if is_broken {
2504 : // add the id to the set right away, there should not be any updates on the channel
2505 : // after before tenant is removed, if ever
2506 0 : BROKEN_TENANTS_SET.with_label_values(set_key).set(1);
2507 0 : true
2508 : } else {
2509 161 : false
2510 : };
2511 :
2512 318 : loop {
2513 318 : let labels = &tuple.0;
2514 318 : let current = TENANT_STATE_METRIC.with_label_values(labels);
2515 318 : current.inc();
2516 318 :
2517 318 : if rx.changed().await.is_err() {
2518 : // tenant has been dropped
2519 16 : current.dec();
2520 16 : drop(BROKEN_TENANTS_SET.remove_label_values(set_key));
2521 16 : break;
2522 157 : }
2523 157 :
2524 157 : current.dec();
2525 157 : tuple = inspect_state(&rx.borrow_and_update());
2526 157 :
2527 157 : let is_broken = tuple.1;
2528 157 : if is_broken && !counted_broken {
2529 0 : counted_broken = true;
2530 0 : // insert the tenant_id (back) into the set while avoiding needless counter
2531 0 : // access
2532 0 : BROKEN_TENANTS_SET.with_label_values(set_key).set(1);
2533 157 : }
2534 : }
2535 161 : });
2536 161 :
2537 161 : Tenant {
2538 161 : tenant_shard_id,
2539 161 : shard_identity,
2540 161 : generation: attached_conf.location.generation,
2541 161 : conf,
2542 161 : // using now here is good enough approximation to catch tenants with really long
2543 161 : // activation times.
2544 161 : constructed_at: Instant::now(),
2545 161 : timelines: Mutex::new(HashMap::new()),
2546 161 : timelines_creating: Mutex::new(HashSet::new()),
2547 161 : gc_cs: tokio::sync::Mutex::new(()),
2548 161 : walredo_mgr,
2549 161 : remote_storage,
2550 161 : deletion_queue_client,
2551 161 : state,
2552 161 : cached_logical_sizes: tokio::sync::Mutex::new(HashMap::new()),
2553 161 : cached_synthetic_tenant_size: Arc::new(AtomicU64::new(0)),
2554 161 : eviction_task_tenant_state: tokio::sync::Mutex::new(EvictionTaskTenantState::default()),
2555 161 : activate_now_sem: tokio::sync::Semaphore::new(0),
2556 161 : cancel: CancellationToken::default(),
2557 161 : gate: Gate::default(),
2558 161 : timeline_get_throttle: Arc::new(throttle::Throttle::new(
2559 161 : Tenant::get_timeline_get_throttle_config(conf, &attached_conf.tenant_conf),
2560 161 : &crate::metrics::tenant_throttling::TIMELINE_GET,
2561 161 : )),
2562 161 : tenant_conf: Arc::new(ArcSwap::from_pointee(attached_conf)),
2563 161 : ongoing_timeline_detach: std::sync::Mutex::default(),
2564 161 : }
2565 161 : }
2566 :
2567 : /// Locate and load config
2568 0 : pub(super) fn load_tenant_config(
2569 0 : conf: &'static PageServerConf,
2570 0 : tenant_shard_id: &TenantShardId,
2571 0 : ) -> anyhow::Result<LocationConf> {
2572 0 : let legacy_config_path = conf.tenant_config_path(tenant_shard_id);
2573 0 : let config_path = conf.tenant_location_config_path(tenant_shard_id);
2574 0 :
2575 0 : if config_path.exists() {
2576 : // New-style config takes precedence
2577 0 : let deserialized = Self::read_config(&config_path)?;
2578 0 : Ok(toml_edit::de::from_document::<LocationConf>(deserialized)?)
2579 0 : } else if legacy_config_path.exists() {
2580 : // Upgrade path: found an old-style configuration only
2581 0 : let deserialized = Self::read_config(&legacy_config_path)?;
2582 :
2583 0 : let mut tenant_conf = TenantConfOpt::default();
2584 0 : for (key, item) in deserialized.iter() {
2585 0 : match key {
2586 0 : "tenant_config" => {
2587 0 : tenant_conf = TenantConfOpt::try_from(item.to_owned()).context(format!("Failed to parse config from file '{legacy_config_path}' as pageserver config"))?;
2588 : }
2589 0 : _ => bail!(
2590 0 : "config file {legacy_config_path} has unrecognized pageserver option '{key}'"
2591 0 : ),
2592 : }
2593 : }
2594 :
2595 : // Legacy configs are implicitly in attached state, and do not support sharding
2596 0 : Ok(LocationConf::attached_single(
2597 0 : tenant_conf,
2598 0 : Generation::none(),
2599 0 : &models::ShardParameters::default(),
2600 0 : ))
2601 : } else {
2602 : // FIXME If the config file is not found, assume that we're attaching
2603 : // a detached tenant and config is passed via attach command.
2604 : // https://github.com/neondatabase/neon/issues/1555
2605 : // OR: we're loading after incomplete deletion that managed to remove config.
2606 0 : info!(
2607 0 : "tenant config not found in {} or {}",
2608 : config_path, legacy_config_path
2609 : );
2610 0 : Ok(LocationConf::default())
2611 : }
2612 0 : }
2613 :
2614 0 : fn read_config(path: &Utf8Path) -> anyhow::Result<toml_edit::Document> {
2615 0 : info!("loading tenant configuration from {path}");
2616 :
2617 : // load and parse file
2618 0 : let config = fs::read_to_string(path)
2619 0 : .with_context(|| format!("Failed to load config from path '{path}'"))?;
2620 :
2621 0 : config
2622 0 : .parse::<toml_edit::Document>()
2623 0 : .with_context(|| format!("Failed to parse config from file '{path}' as toml file"))
2624 0 : }
2625 :
2626 0 : #[tracing::instrument(skip_all, fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug()))]
2627 : pub(super) async fn persist_tenant_config(
2628 : conf: &'static PageServerConf,
2629 : tenant_shard_id: &TenantShardId,
2630 : location_conf: &LocationConf,
2631 : ) -> anyhow::Result<()> {
2632 : let legacy_config_path = conf.tenant_config_path(tenant_shard_id);
2633 : let config_path = conf.tenant_location_config_path(tenant_shard_id);
2634 :
2635 : Self::persist_tenant_config_at(
2636 : tenant_shard_id,
2637 : &config_path,
2638 : &legacy_config_path,
2639 : location_conf,
2640 : )
2641 : .await
2642 : }
2643 :
2644 0 : #[tracing::instrument(skip_all, fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug()))]
2645 : pub(super) async fn persist_tenant_config_at(
2646 : tenant_shard_id: &TenantShardId,
2647 : config_path: &Utf8Path,
2648 : legacy_config_path: &Utf8Path,
2649 : location_conf: &LocationConf,
2650 : ) -> anyhow::Result<()> {
2651 : if let LocationMode::Attached(attach_conf) = &location_conf.mode {
2652 : // The modern-style LocationConf config file requires a generation to be set. In case someone
2653 : // is running a pageserver without the infrastructure to set generations, write out the legacy-style
2654 : // config file that only contains TenantConf.
2655 : //
2656 : // This will eventually be removed in https://github.com/neondatabase/neon/issues/5388
2657 :
2658 : if attach_conf.generation.is_none() {
2659 : tracing::info!(
2660 : "Running without generations, writing legacy-style tenant config file"
2661 : );
2662 : Self::persist_tenant_config_legacy(
2663 : tenant_shard_id,
2664 : legacy_config_path,
2665 : &location_conf.tenant_conf,
2666 : )
2667 : .await?;
2668 :
2669 : return Ok(());
2670 : }
2671 : }
2672 :
2673 : debug!("persisting tenantconf to {config_path}");
2674 :
2675 : let mut conf_content = r#"# This file contains a specific per-tenant's config.
2676 : # It is read in case of pageserver restart.
2677 : "#
2678 : .to_string();
2679 :
2680 0 : fail::fail_point!("tenant-config-before-write", |_| {
2681 0 : anyhow::bail!("tenant-config-before-write");
2682 0 : });
2683 :
2684 : // Convert the config to a toml file.
2685 : conf_content += &toml_edit::ser::to_string_pretty(&location_conf)?;
2686 :
2687 : let temp_path = path_with_suffix_extension(config_path, TEMP_FILE_SUFFIX);
2688 :
2689 : let tenant_shard_id = *tenant_shard_id;
2690 : let config_path = config_path.to_owned();
2691 : let conf_content = conf_content.into_bytes();
2692 : VirtualFile::crashsafe_overwrite(config_path.clone(), temp_path, conf_content)
2693 : .await
2694 0 : .with_context(|| format!("write tenant {tenant_shard_id} config to {config_path}"))?;
2695 :
2696 : Ok(())
2697 : }
2698 :
2699 0 : #[tracing::instrument(skip_all, fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug()))]
2700 : async fn persist_tenant_config_legacy(
2701 : tenant_shard_id: &TenantShardId,
2702 : target_config_path: &Utf8Path,
2703 : tenant_conf: &TenantConfOpt,
2704 : ) -> anyhow::Result<()> {
2705 : debug!("persisting tenantconf to {target_config_path}");
2706 :
2707 : let mut conf_content = r#"# This file contains a specific per-tenant's config.
2708 : # It is read in case of pageserver restart.
2709 :
2710 : [tenant_config]
2711 : "#
2712 : .to_string();
2713 :
2714 : // Convert the config to a toml file.
2715 : conf_content += &toml_edit::ser::to_string(&tenant_conf)?;
2716 :
2717 : let temp_path = path_with_suffix_extension(target_config_path, TEMP_FILE_SUFFIX);
2718 :
2719 : let tenant_shard_id = *tenant_shard_id;
2720 : let target_config_path = target_config_path.to_owned();
2721 : let conf_content = conf_content.into_bytes();
2722 : VirtualFile::crashsafe_overwrite(target_config_path.clone(), temp_path, conf_content)
2723 : .await
2724 0 : .with_context(|| {
2725 0 : format!("write tenant {tenant_shard_id} config to {target_config_path}")
2726 0 : })?;
2727 : Ok(())
2728 : }
2729 :
2730 : //
2731 : // How garbage collection works:
2732 : //
2733 : // +--bar------------->
2734 : // /
2735 : // +----+-----foo---------------->
2736 : // /
2737 : // ----main--+-------------------------->
2738 : // \
2739 : // +-----baz-------->
2740 : //
2741 : //
2742 : // 1. Grab 'gc_cs' mutex to prevent new timelines from being created while Timeline's
2743 : // `gc_infos` are being refreshed
2744 : // 2. Scan collected timelines, and on each timeline, make note of the
2745 : // all the points where other timelines have been branched off.
2746 : // We will refrain from removing page versions at those LSNs.
2747 : // 3. For each timeline, scan all layer files on the timeline.
2748 : // Remove all files for which a newer file exists and which
2749 : // don't cover any branch point LSNs.
2750 : //
2751 : // TODO:
2752 : // - if a relation has a non-incremental persistent layer on a child branch, then we
2753 : // don't need to keep that in the parent anymore. But currently
2754 : // we do.
2755 754 : async fn gc_iteration_internal(
2756 754 : &self,
2757 754 : target_timeline_id: Option<TimelineId>,
2758 754 : horizon: u64,
2759 754 : pitr: Duration,
2760 754 : cancel: &CancellationToken,
2761 754 : ctx: &RequestContext,
2762 754 : ) -> Result<GcResult, GcError> {
2763 754 : let mut totals: GcResult = Default::default();
2764 754 : let now = Instant::now();
2765 :
2766 754 : let gc_timelines = self
2767 754 : .refresh_gc_info_internal(target_timeline_id, horizon, pitr, cancel, ctx)
2768 695 : .await?;
2769 :
2770 754 : failpoint_support::sleep_millis_async!("gc_iteration_internal_after_getting_gc_timelines");
2771 :
2772 : // If there is nothing to GC, we don't want any messages in the INFO log.
2773 754 : if !gc_timelines.is_empty() {
2774 754 : info!("{} timelines need GC", gc_timelines.len());
2775 : } else {
2776 0 : debug!("{} timelines need GC", gc_timelines.len());
2777 : }
2778 :
2779 : // Perform GC for each timeline.
2780 : //
2781 : // Note that we don't hold the `Tenant::gc_cs` lock here because we don't want to delay the
2782 : // branch creation task, which requires the GC lock. A GC iteration can run concurrently
2783 : // with branch creation.
2784 : //
2785 : // See comments in [`Tenant::branch_timeline`] for more information about why branch
2786 : // creation task can run concurrently with timeline's GC iteration.
2787 1508 : for timeline in gc_timelines {
2788 754 : if cancel.is_cancelled() {
2789 : // We were requested to shut down. Stop and return with the progress we
2790 : // made.
2791 0 : break;
2792 754 : }
2793 754 : let result = match timeline.gc().await {
2794 : Err(GcError::TimelineCancelled) => {
2795 0 : if target_timeline_id.is_some() {
2796 : // If we were targetting this specific timeline, surface cancellation to caller
2797 0 : return Err(GcError::TimelineCancelled);
2798 : } else {
2799 : // A timeline may be shutting down independently of the tenant's lifecycle: we should
2800 : // skip past this and proceed to try GC on other timelines.
2801 0 : continue;
2802 : }
2803 : }
2804 754 : r => r?,
2805 : };
2806 754 : totals += result;
2807 : }
2808 :
2809 754 : totals.elapsed = now.elapsed();
2810 754 : Ok(totals)
2811 754 : }
2812 :
2813 : /// Refreshes the Timeline::gc_info for all timelines, returning the
2814 : /// vector of timelines which have [`Timeline::get_last_record_lsn`] past
2815 : /// [`Tenant::get_gc_horizon`].
2816 : ///
2817 : /// This is usually executed as part of periodic gc, but can now be triggered more often.
2818 0 : pub(crate) async fn refresh_gc_info(
2819 0 : &self,
2820 0 : cancel: &CancellationToken,
2821 0 : ctx: &RequestContext,
2822 0 : ) -> Result<Vec<Arc<Timeline>>, GcError> {
2823 0 : // since this method can now be called at different rates than the configured gc loop, it
2824 0 : // might be that these configuration values get applied faster than what it was previously,
2825 0 : // since these were only read from the gc task.
2826 0 : let horizon = self.get_gc_horizon();
2827 0 : let pitr = self.get_pitr_interval();
2828 0 :
2829 0 : // refresh all timelines
2830 0 : let target_timeline_id = None;
2831 0 :
2832 0 : self.refresh_gc_info_internal(target_timeline_id, horizon, pitr, cancel, ctx)
2833 0 : .await
2834 0 : }
2835 :
2836 754 : async fn refresh_gc_info_internal(
2837 754 : &self,
2838 754 : target_timeline_id: Option<TimelineId>,
2839 754 : horizon: u64,
2840 754 : pitr: Duration,
2841 754 : cancel: &CancellationToken,
2842 754 : ctx: &RequestContext,
2843 754 : ) -> Result<Vec<Arc<Timeline>>, GcError> {
2844 754 : // before taking the gc_cs lock, do the heavier weight finding of gc_cutoff points for
2845 754 : // currently visible timelines.
2846 754 : let timelines = self
2847 754 : .timelines
2848 754 : .lock()
2849 754 : .unwrap()
2850 754 : .values()
2851 3310 : .filter(|tl| match target_timeline_id.as_ref() {
2852 3310 : Some(target) => &tl.timeline_id == target,
2853 0 : None => true,
2854 3310 : })
2855 754 : .cloned()
2856 754 : .collect::<Vec<_>>();
2857 754 :
2858 754 : let mut gc_cutoffs: HashMap<TimelineId, GcCutoffs> =
2859 754 : HashMap::with_capacity(timelines.len());
2860 :
2861 754 : for timeline in timelines.iter() {
2862 754 : let cutoff = timeline
2863 754 : .get_last_record_lsn()
2864 754 : .checked_sub(horizon)
2865 754 : .unwrap_or(Lsn(0));
2866 :
2867 754 : let cutoffs = timeline.find_gc_cutoffs(cutoff, pitr, cancel, ctx).await?;
2868 754 : let old = gc_cutoffs.insert(timeline.timeline_id, cutoffs);
2869 754 : assert!(old.is_none());
2870 : }
2871 :
2872 754 : if !self.is_active() || self.cancel.is_cancelled() {
2873 0 : return Err(GcError::TenantCancelled);
2874 754 : }
2875 :
2876 : // grab mutex to prevent new timelines from being created here; avoid doing long operations
2877 : // because that will stall branch creation.
2878 754 : let gc_cs = self.gc_cs.lock().await;
2879 :
2880 : // Scan all timelines. For each timeline, remember the timeline ID and
2881 : // the branch point where it was created.
2882 754 : let (all_branchpoints, timelines): (BTreeSet<(TimelineId, Lsn)>, _) = {
2883 754 : let timelines = self.timelines.lock().unwrap();
2884 754 : let mut all_branchpoints = BTreeSet::new();
2885 754 : let timelines = {
2886 754 : if let Some(target_timeline_id) = target_timeline_id.as_ref() {
2887 754 : if timelines.get(target_timeline_id).is_none() {
2888 0 : return Err(GcError::TimelineNotFound);
2889 754 : }
2890 0 : };
2891 :
2892 754 : timelines
2893 754 : .iter()
2894 3310 : .map(|(_timeline_id, timeline_entry)| {
2895 2556 : if let Some(ancestor_timeline_id) =
2896 3310 : &timeline_entry.get_ancestor_timeline_id()
2897 : {
2898 : // If target_timeline is specified, we only need to know branchpoints of its children
2899 2556 : if let Some(timeline_id) = target_timeline_id {
2900 2556 : if ancestor_timeline_id == &timeline_id {
2901 6 : all_branchpoints.insert((
2902 6 : *ancestor_timeline_id,
2903 6 : timeline_entry.get_ancestor_lsn(),
2904 6 : ));
2905 2550 : }
2906 : }
2907 : // Collect branchpoints for all timelines
2908 0 : else {
2909 0 : all_branchpoints.insert((
2910 0 : *ancestor_timeline_id,
2911 0 : timeline_entry.get_ancestor_lsn(),
2912 0 : ));
2913 0 : }
2914 754 : }
2915 :
2916 3310 : timeline_entry.clone()
2917 3310 : })
2918 754 : .collect::<Vec<_>>()
2919 754 : };
2920 754 : (all_branchpoints, timelines)
2921 754 : };
2922 754 :
2923 754 : // Ok, we now know all the branch points.
2924 754 : // Update the GC information for each timeline.
2925 754 : let mut gc_timelines = Vec::with_capacity(timelines.len());
2926 4064 : for timeline in timelines {
2927 : // If target_timeline is specified, ignore all other timelines
2928 3310 : if let Some(target_timeline_id) = target_timeline_id {
2929 3310 : if timeline.timeline_id != target_timeline_id {
2930 2556 : continue;
2931 754 : }
2932 0 : }
2933 :
2934 754 : let branchpoints: Vec<Lsn> = all_branchpoints
2935 754 : .range((
2936 754 : Included((timeline.timeline_id, Lsn(0))),
2937 754 : Included((timeline.timeline_id, Lsn(u64::MAX))),
2938 754 : ))
2939 754 : .map(|&x| x.1)
2940 754 : .collect();
2941 754 :
2942 754 : {
2943 754 : let mut target = timeline.gc_info.write().unwrap();
2944 754 :
2945 754 : let now = SystemTime::now();
2946 754 : target.leases.retain(|_, lease| !lease.is_expired(&now));
2947 754 :
2948 754 : timeline
2949 754 : .metrics
2950 754 : .valid_lsn_lease_count_gauge
2951 754 : .set(target.leases.len() as u64);
2952 754 :
2953 754 : match gc_cutoffs.remove(&timeline.timeline_id) {
2954 754 : Some(cutoffs) => {
2955 754 : target.retain_lsns = branchpoints;
2956 754 : target.cutoffs = cutoffs;
2957 754 : }
2958 0 : None => {
2959 0 : // reasons for this being unavailable:
2960 0 : // - this timeline was created while we were finding cutoffs
2961 0 : // - lsn for timestamp search fails for this timeline repeatedly
2962 0 : //
2963 0 : // in both cases, refreshing the branchpoints is correct.
2964 0 : target.retain_lsns = branchpoints;
2965 0 : }
2966 : };
2967 : }
2968 :
2969 754 : gc_timelines.push(timeline);
2970 : }
2971 754 : drop(gc_cs);
2972 754 : Ok(gc_timelines)
2973 754 : }
2974 :
2975 : /// A substitute for `branch_timeline` for use in unit tests.
2976 : /// The returned timeline will have state value `Active` to make various `anyhow::ensure!()`
2977 : /// calls pass, but, we do not actually call `.activate()` under the hood. So, none of the
2978 : /// timeline background tasks are launched, except the flush loop.
2979 : #[cfg(test)]
2980 228 : async fn branch_timeline_test(
2981 228 : &self,
2982 228 : src_timeline: &Arc<Timeline>,
2983 228 : dst_id: TimelineId,
2984 228 : ancestor_lsn: Option<Lsn>,
2985 228 : ctx: &RequestContext,
2986 228 : ) -> Result<Arc<Timeline>, CreateTimelineError> {
2987 228 : let create_guard = self.create_timeline_create_guard(dst_id).unwrap();
2988 228 : let tl = self
2989 228 : .branch_timeline_impl(src_timeline, dst_id, ancestor_lsn, create_guard, ctx)
2990 4 : .await?;
2991 224 : tl.set_state(TimelineState::Active);
2992 224 : Ok(tl)
2993 228 : }
2994 :
2995 : /// Helper for unit tests to branch a timeline with some pre-loaded states.
2996 : #[cfg(test)]
2997 : #[allow(clippy::too_many_arguments)]
2998 4 : pub async fn branch_timeline_test_with_layers(
2999 4 : &self,
3000 4 : src_timeline: &Arc<Timeline>,
3001 4 : dst_id: TimelineId,
3002 4 : ancestor_lsn: Option<Lsn>,
3003 4 : ctx: &RequestContext,
3004 4 : delta_layer_desc: Vec<Vec<(pageserver_api::key::Key, Lsn, crate::repository::Value)>>,
3005 4 : image_layer_desc: Vec<(Lsn, Vec<(pageserver_api::key::Key, bytes::Bytes)>)>,
3006 4 : end_lsn: Lsn,
3007 4 : ) -> anyhow::Result<Arc<Timeline>> {
3008 4 : let tline = self
3009 4 : .branch_timeline_test(src_timeline, dst_id, ancestor_lsn, ctx)
3010 0 : .await?;
3011 4 : let ancestor_lsn = if let Some(ancestor_lsn) = ancestor_lsn {
3012 4 : ancestor_lsn
3013 : } else {
3014 0 : tline.get_last_record_lsn()
3015 : };
3016 4 : assert!(end_lsn >= ancestor_lsn);
3017 4 : tline.force_advance_lsn(end_lsn);
3018 4 : for deltas in delta_layer_desc {
3019 0 : tline
3020 0 : .force_create_delta_layer(deltas, Some(ancestor_lsn), ctx)
3021 0 : .await?;
3022 : }
3023 8 : for (lsn, images) in image_layer_desc {
3024 4 : tline
3025 4 : .force_create_image_layer(lsn, images, Some(ancestor_lsn), ctx)
3026 14 : .await?;
3027 : }
3028 4 : Ok(tline)
3029 4 : }
3030 :
3031 : /// Branch an existing timeline.
3032 : ///
3033 : /// The caller is responsible for activating the returned timeline.
3034 0 : async fn branch_timeline(
3035 0 : &self,
3036 0 : src_timeline: &Arc<Timeline>,
3037 0 : dst_id: TimelineId,
3038 0 : start_lsn: Option<Lsn>,
3039 0 : timeline_create_guard: TimelineCreateGuard<'_>,
3040 0 : ctx: &RequestContext,
3041 0 : ) -> Result<Arc<Timeline>, CreateTimelineError> {
3042 0 : self.branch_timeline_impl(src_timeline, dst_id, start_lsn, timeline_create_guard, ctx)
3043 0 : .await
3044 0 : }
3045 :
3046 228 : async fn branch_timeline_impl(
3047 228 : &self,
3048 228 : src_timeline: &Arc<Timeline>,
3049 228 : dst_id: TimelineId,
3050 228 : start_lsn: Option<Lsn>,
3051 228 : timeline_create_guard: TimelineCreateGuard<'_>,
3052 228 : _ctx: &RequestContext,
3053 228 : ) -> Result<Arc<Timeline>, CreateTimelineError> {
3054 228 : let src_id = src_timeline.timeline_id;
3055 :
3056 : // We will validate our ancestor LSN in this function. Acquire the GC lock so that
3057 : // this check cannot race with GC, and the ancestor LSN is guaranteed to remain
3058 : // valid while we are creating the branch.
3059 228 : let _gc_cs = self.gc_cs.lock().await;
3060 :
3061 : // If no start LSN is specified, we branch the new timeline from the source timeline's last record LSN
3062 228 : let start_lsn = start_lsn.unwrap_or_else(|| {
3063 2 : let lsn = src_timeline.get_last_record_lsn();
3064 2 : info!("branching timeline {dst_id} from timeline {src_id} at last record LSN: {lsn}");
3065 2 : lsn
3066 228 : });
3067 228 :
3068 228 : // Ensure that `start_lsn` is valid, i.e. the LSN is within the PITR
3069 228 : // horizon on the source timeline
3070 228 : //
3071 228 : // We check it against both the planned GC cutoff stored in 'gc_info',
3072 228 : // and the 'latest_gc_cutoff' of the last GC that was performed. The
3073 228 : // planned GC cutoff in 'gc_info' is normally larger than
3074 228 : // 'latest_gc_cutoff_lsn', but beware of corner cases like if you just
3075 228 : // changed the GC settings for the tenant to make the PITR window
3076 228 : // larger, but some of the data was already removed by an earlier GC
3077 228 : // iteration.
3078 228 :
3079 228 : // check against last actual 'latest_gc_cutoff' first
3080 228 : let latest_gc_cutoff_lsn = src_timeline.get_latest_gc_cutoff_lsn();
3081 228 : src_timeline
3082 228 : .check_lsn_is_in_scope(start_lsn, &latest_gc_cutoff_lsn)
3083 228 : .context(format!(
3084 228 : "invalid branch start lsn: less than latest GC cutoff {}",
3085 228 : *latest_gc_cutoff_lsn,
3086 228 : ))
3087 228 : .map_err(CreateTimelineError::AncestorLsn)?;
3088 :
3089 : // and then the planned GC cutoff
3090 : {
3091 224 : let gc_info = src_timeline.gc_info.read().unwrap();
3092 224 : let cutoff = gc_info.min_cutoff();
3093 224 : if start_lsn < cutoff {
3094 0 : return Err(CreateTimelineError::AncestorLsn(anyhow::anyhow!(
3095 0 : "invalid branch start lsn: less than planned GC cutoff {cutoff}"
3096 0 : )));
3097 224 : }
3098 224 : }
3099 224 :
3100 224 : //
3101 224 : // The branch point is valid, and we are still holding the 'gc_cs' lock
3102 224 : // so that GC cannot advance the GC cutoff until we are finished.
3103 224 : // Proceed with the branch creation.
3104 224 : //
3105 224 :
3106 224 : // Determine prev-LSN for the new timeline. We can only determine it if
3107 224 : // the timeline was branched at the current end of the source timeline.
3108 224 : let RecordLsn {
3109 224 : last: src_last,
3110 224 : prev: src_prev,
3111 224 : } = src_timeline.get_last_record_rlsn();
3112 224 : let dst_prev = if src_last == start_lsn {
3113 214 : Some(src_prev)
3114 : } else {
3115 10 : None
3116 : };
3117 :
3118 : // Create the metadata file, noting the ancestor of the new timeline.
3119 : // There is initially no data in it, but all the read-calls know to look
3120 : // into the ancestor.
3121 224 : let metadata = TimelineMetadata::new(
3122 224 : start_lsn,
3123 224 : dst_prev,
3124 224 : Some(src_id),
3125 224 : start_lsn,
3126 224 : *src_timeline.latest_gc_cutoff_lsn.read(), // FIXME: should we hold onto this guard longer?
3127 224 : src_timeline.initdb_lsn,
3128 224 : src_timeline.pg_version,
3129 224 : );
3130 :
3131 224 : let uninitialized_timeline = self
3132 224 : .prepare_new_timeline(
3133 224 : dst_id,
3134 224 : &metadata,
3135 224 : timeline_create_guard,
3136 224 : start_lsn + 1,
3137 224 : Some(Arc::clone(src_timeline)),
3138 224 : src_timeline.last_aux_file_policy.load(),
3139 224 : )
3140 0 : .await?;
3141 :
3142 224 : let new_timeline = uninitialized_timeline.finish_creation()?;
3143 :
3144 : // Root timeline gets its layers during creation and uploads them along with the metadata.
3145 : // A branch timeline though, when created, can get no writes for some time, hence won't get any layers created.
3146 : // We still need to upload its metadata eagerly: if other nodes `attach` the tenant and miss this timeline, their GC
3147 : // could get incorrect information and remove more layers, than needed.
3148 : // See also https://github.com/neondatabase/neon/issues/3865
3149 224 : new_timeline
3150 224 : .remote_client
3151 224 : .schedule_index_upload_for_full_metadata_update(&metadata)
3152 224 : .context("branch initial metadata upload")?;
3153 :
3154 224 : Ok(new_timeline)
3155 228 : }
3156 :
3157 : /// For unit tests, make this visible so that other modules can directly create timelines
3158 : #[cfg(test)]
3159 4 : #[tracing::instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), %timeline_id))]
3160 : pub(crate) async fn bootstrap_timeline_test(
3161 : &self,
3162 : timeline_id: TimelineId,
3163 : pg_version: u32,
3164 : load_existing_initdb: Option<TimelineId>,
3165 : ctx: &RequestContext,
3166 : ) -> anyhow::Result<Arc<Timeline>> {
3167 : let create_guard = self.create_timeline_create_guard(timeline_id).unwrap();
3168 : self.bootstrap_timeline(
3169 : timeline_id,
3170 : pg_version,
3171 : load_existing_initdb,
3172 : create_guard,
3173 : ctx,
3174 : )
3175 : .await
3176 : }
3177 :
3178 0 : async fn upload_initdb(
3179 0 : &self,
3180 0 : timelines_path: &Utf8PathBuf,
3181 0 : pgdata_path: &Utf8PathBuf,
3182 0 : timeline_id: &TimelineId,
3183 0 : ) -> anyhow::Result<()> {
3184 0 : let temp_path = timelines_path.join(format!(
3185 0 : "{INITDB_PATH}.upload-{timeline_id}.{TEMP_FILE_SUFFIX}"
3186 0 : ));
3187 :
3188 : scopeguard::defer! {
3189 : if let Err(e) = fs::remove_file(&temp_path) {
3190 : error!("Failed to remove temporary initdb archive '{temp_path}': {e}");
3191 : }
3192 : }
3193 :
3194 0 : let (pgdata_zstd, tar_zst_size) = create_zst_tarball(pgdata_path, &temp_path).await?;
3195 : const INITDB_TAR_ZST_WARN_LIMIT: u64 = 2 * 1024 * 1024;
3196 0 : if tar_zst_size > INITDB_TAR_ZST_WARN_LIMIT {
3197 0 : warn!(
3198 0 : "compressed {temp_path} size of {tar_zst_size} is above limit {INITDB_TAR_ZST_WARN_LIMIT}."
3199 : );
3200 0 : }
3201 :
3202 : pausable_failpoint!("before-initdb-upload");
3203 :
3204 0 : backoff::retry(
3205 0 : || async {
3206 0 : self::remote_timeline_client::upload_initdb_dir(
3207 0 : &self.remote_storage,
3208 0 : &self.tenant_shard_id.tenant_id,
3209 0 : timeline_id,
3210 0 : pgdata_zstd.try_clone().await?,
3211 0 : tar_zst_size,
3212 0 : &self.cancel,
3213 0 : )
3214 0 : .await
3215 0 : },
3216 0 : |_| false,
3217 0 : 3,
3218 0 : u32::MAX,
3219 0 : "persist_initdb_tar_zst",
3220 0 : &self.cancel,
3221 0 : )
3222 0 : .await
3223 0 : .ok_or_else(|| anyhow::Error::new(TimeoutOrCancel::Cancel))
3224 0 : .and_then(|x| x)
3225 0 : }
3226 :
3227 : /// - run initdb to init temporary instance and get bootstrap data
3228 : /// - after initialization completes, tar up the temp dir and upload it to S3.
3229 : ///
3230 : /// The caller is responsible for activating the returned timeline.
3231 2 : async fn bootstrap_timeline(
3232 2 : &self,
3233 2 : timeline_id: TimelineId,
3234 2 : pg_version: u32,
3235 2 : load_existing_initdb: Option<TimelineId>,
3236 2 : timeline_create_guard: TimelineCreateGuard<'_>,
3237 2 : ctx: &RequestContext,
3238 2 : ) -> anyhow::Result<Arc<Timeline>> {
3239 2 : // create a `tenant/{tenant_id}/timelines/basebackup-{timeline_id}.{TEMP_FILE_SUFFIX}/`
3240 2 : // temporary directory for basebackup files for the given timeline.
3241 2 :
3242 2 : let timelines_path = self.conf.timelines_path(&self.tenant_shard_id);
3243 2 : let pgdata_path = path_with_suffix_extension(
3244 2 : timelines_path.join(format!("basebackup-{timeline_id}")),
3245 2 : TEMP_FILE_SUFFIX,
3246 2 : );
3247 2 :
3248 2 : // Remove whatever was left from the previous runs: safe because TimelineCreateGuard guarantees
3249 2 : // we won't race with other creations or existent timelines with the same path.
3250 2 : if pgdata_path.exists() {
3251 0 : fs::remove_dir_all(&pgdata_path).with_context(|| {
3252 0 : format!("Failed to remove already existing initdb directory: {pgdata_path}")
3253 0 : })?;
3254 2 : }
3255 :
3256 : // this new directory is very temporary, set to remove it immediately after bootstrap, we don't need it
3257 : scopeguard::defer! {
3258 : if let Err(e) = fs::remove_dir_all(&pgdata_path) {
3259 : // this is unlikely, but we will remove the directory on pageserver restart or another bootstrap call
3260 : error!("Failed to remove temporary initdb directory '{pgdata_path}': {e}");
3261 : }
3262 : }
3263 2 : if let Some(existing_initdb_timeline_id) = load_existing_initdb {
3264 2 : if existing_initdb_timeline_id != timeline_id {
3265 0 : let source_path = &remote_initdb_archive_path(
3266 0 : &self.tenant_shard_id.tenant_id,
3267 0 : &existing_initdb_timeline_id,
3268 0 : );
3269 0 : let dest_path =
3270 0 : &remote_initdb_archive_path(&self.tenant_shard_id.tenant_id, &timeline_id);
3271 0 :
3272 0 : // if this fails, it will get retried by retried control plane requests
3273 0 : self.remote_storage
3274 0 : .copy_object(source_path, dest_path, &self.cancel)
3275 0 : .await
3276 0 : .context("copy initdb tar")?;
3277 2 : }
3278 2 : let (initdb_tar_zst_path, initdb_tar_zst) =
3279 2 : self::remote_timeline_client::download_initdb_tar_zst(
3280 2 : self.conf,
3281 2 : &self.remote_storage,
3282 2 : &self.tenant_shard_id,
3283 2 : &existing_initdb_timeline_id,
3284 2 : &self.cancel,
3285 2 : )
3286 723 : .await
3287 2 : .context("download initdb tar")?;
3288 :
3289 : scopeguard::defer! {
3290 : if let Err(e) = fs::remove_file(&initdb_tar_zst_path) {
3291 : error!("Failed to remove temporary initdb archive '{initdb_tar_zst_path}': {e}");
3292 : }
3293 : }
3294 :
3295 2 : let buf_read =
3296 2 : BufReader::with_capacity(remote_timeline_client::BUFFER_SIZE, initdb_tar_zst);
3297 2 : extract_zst_tarball(&pgdata_path, buf_read)
3298 9005 : .await
3299 2 : .context("extract initdb tar")?;
3300 : } else {
3301 : // Init temporarily repo to get bootstrap data, this creates a directory in the `pgdata_path` path
3302 0 : run_initdb(self.conf, &pgdata_path, pg_version, &self.cancel).await?;
3303 :
3304 : // Upload the created data dir to S3
3305 0 : if self.tenant_shard_id().is_shard_zero() {
3306 0 : self.upload_initdb(&timelines_path, &pgdata_path, &timeline_id)
3307 0 : .await?;
3308 0 : }
3309 : }
3310 2 : let pgdata_lsn = import_datadir::get_lsn_from_controlfile(&pgdata_path)?.align();
3311 2 :
3312 2 : // Import the contents of the data directory at the initial checkpoint
3313 2 : // LSN, and any WAL after that.
3314 2 : // Initdb lsn will be equal to last_record_lsn which will be set after import.
3315 2 : // Because we know it upfront avoid having an option or dummy zero value by passing it to the metadata.
3316 2 : let new_metadata = TimelineMetadata::new(
3317 2 : Lsn(0),
3318 2 : None,
3319 2 : None,
3320 2 : Lsn(0),
3321 2 : pgdata_lsn,
3322 2 : pgdata_lsn,
3323 2 : pg_version,
3324 2 : );
3325 2 : let raw_timeline = self
3326 2 : .prepare_new_timeline(
3327 2 : timeline_id,
3328 2 : &new_metadata,
3329 2 : timeline_create_guard,
3330 2 : pgdata_lsn,
3331 2 : None,
3332 2 : None,
3333 2 : )
3334 0 : .await?;
3335 :
3336 2 : let tenant_shard_id = raw_timeline.owning_tenant.tenant_shard_id;
3337 2 : let unfinished_timeline = raw_timeline.raw_timeline()?;
3338 :
3339 : // Flush the new layer files to disk, before we make the timeline as available to
3340 : // the outside world.
3341 : //
3342 : // Flush loop needs to be spawned in order to be able to flush.
3343 2 : unfinished_timeline.maybe_spawn_flush_loop();
3344 2 :
3345 2 : import_datadir::import_timeline_from_postgres_datadir(
3346 2 : unfinished_timeline,
3347 2 : &pgdata_path,
3348 2 : pgdata_lsn,
3349 2 : ctx,
3350 2 : )
3351 7857 : .await
3352 2 : .with_context(|| {
3353 0 : format!("Failed to import pgdatadir for timeline {tenant_shard_id}/{timeline_id}")
3354 2 : })?;
3355 :
3356 2 : fail::fail_point!("before-checkpoint-new-timeline", |_| {
3357 0 : anyhow::bail!("failpoint before-checkpoint-new-timeline");
3358 2 : });
3359 :
3360 2 : unfinished_timeline
3361 2 : .freeze_and_flush()
3362 2 : .await
3363 2 : .with_context(|| {
3364 0 : format!(
3365 0 : "Failed to flush after pgdatadir import for timeline {tenant_shard_id}/{timeline_id}"
3366 0 : )
3367 2 : })?;
3368 :
3369 : // All done!
3370 2 : let timeline = raw_timeline.finish_creation()?;
3371 :
3372 2 : Ok(timeline)
3373 2 : }
3374 :
3375 : /// Call this before constructing a timeline, to build its required structures
3376 377 : fn build_timeline_resources(&self, timeline_id: TimelineId) -> TimelineResources {
3377 377 : let remote_client = RemoteTimelineClient::new(
3378 377 : self.remote_storage.clone(),
3379 377 : self.deletion_queue_client.clone(),
3380 377 : self.conf,
3381 377 : self.tenant_shard_id,
3382 377 : timeline_id,
3383 377 : self.generation,
3384 377 : );
3385 377 : TimelineResources {
3386 377 : remote_client,
3387 377 : timeline_get_throttle: self.timeline_get_throttle.clone(),
3388 377 : }
3389 377 : }
3390 :
3391 : /// Creates intermediate timeline structure and its files.
3392 : ///
3393 : /// An empty layer map is initialized, and new data and WAL can be imported starting
3394 : /// at 'disk_consistent_lsn'. After any initial data has been imported, call
3395 : /// `finish_creation` to insert the Timeline into the timelines map.
3396 377 : async fn prepare_new_timeline<'a>(
3397 377 : &'a self,
3398 377 : new_timeline_id: TimelineId,
3399 377 : new_metadata: &TimelineMetadata,
3400 377 : create_guard: TimelineCreateGuard<'a>,
3401 377 : start_lsn: Lsn,
3402 377 : ancestor: Option<Arc<Timeline>>,
3403 377 : last_aux_file_policy: Option<AuxFilePolicy>,
3404 377 : ) -> anyhow::Result<UninitializedTimeline> {
3405 377 : let tenant_shard_id = self.tenant_shard_id;
3406 377 :
3407 377 : let resources = self.build_timeline_resources(new_timeline_id);
3408 377 : resources
3409 377 : .remote_client
3410 377 : .init_upload_queue_for_empty_remote(new_metadata)?;
3411 :
3412 377 : let timeline_struct = self
3413 377 : .create_timeline_struct(
3414 377 : new_timeline_id,
3415 377 : new_metadata,
3416 377 : ancestor,
3417 377 : resources,
3418 377 : CreateTimelineCause::Load,
3419 377 : last_aux_file_policy,
3420 377 : )
3421 377 : .context("Failed to create timeline data structure")?;
3422 :
3423 377 : timeline_struct.init_empty_layer_map(start_lsn);
3424 :
3425 377 : if let Err(e) = self
3426 377 : .create_timeline_files(&create_guard.timeline_path)
3427 0 : .await
3428 : {
3429 1 : error!("Failed to create initial files for timeline {tenant_shard_id}/{new_timeline_id}, cleaning up: {e:?}");
3430 1 : cleanup_timeline_directory(create_guard);
3431 1 : return Err(e);
3432 376 : }
3433 376 :
3434 376 : debug!(
3435 0 : "Successfully created initial files for timeline {tenant_shard_id}/{new_timeline_id}"
3436 : );
3437 :
3438 376 : Ok(UninitializedTimeline::new(
3439 376 : self,
3440 376 : new_timeline_id,
3441 376 : Some((timeline_struct, create_guard)),
3442 376 : ))
3443 377 : }
3444 :
3445 377 : async fn create_timeline_files(&self, timeline_path: &Utf8Path) -> anyhow::Result<()> {
3446 377 : crashsafe::create_dir(timeline_path).context("Failed to create timeline directory")?;
3447 :
3448 376 : fail::fail_point!("after-timeline-dir-creation", |_| {
3449 0 : anyhow::bail!("failpoint after-timeline-dir-creation");
3450 376 : });
3451 :
3452 376 : Ok(())
3453 377 : }
3454 :
3455 : /// Get a guard that provides exclusive access to the timeline directory, preventing
3456 : /// concurrent attempts to create the same timeline.
3457 383 : fn create_timeline_create_guard(
3458 383 : &self,
3459 383 : timeline_id: TimelineId,
3460 383 : ) -> Result<TimelineCreateGuard, TimelineExclusionError> {
3461 383 : let tenant_shard_id = self.tenant_shard_id;
3462 383 :
3463 383 : let timeline_path = self.conf.timeline_path(&tenant_shard_id, &timeline_id);
3464 :
3465 383 : let create_guard = TimelineCreateGuard::new(self, timeline_id, timeline_path.clone())?;
3466 :
3467 : // At this stage, we have got exclusive access to in-memory state for this timeline ID
3468 : // for creation.
3469 : // A timeline directory should never exist on disk already:
3470 : // - a previous failed creation would have cleaned up after itself
3471 : // - a pageserver restart would clean up timeline directories that don't have valid remote state
3472 : //
3473 : // Therefore it is an unexpected internal error to encounter a timeline directory already existing here,
3474 : // this error may indicate a bug in cleanup on failed creations.
3475 381 : if timeline_path.exists() {
3476 0 : return Err(TimelineExclusionError::Other(anyhow::anyhow!(
3477 0 : "Timeline directory already exists! This is a bug."
3478 0 : )));
3479 381 : }
3480 381 :
3481 381 : Ok(create_guard)
3482 383 : }
3483 :
3484 : /// Gathers inputs from all of the timelines to produce a sizing model input.
3485 : ///
3486 : /// Future is cancellation safe. Only one calculation can be running at once per tenant.
3487 0 : #[instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug()))]
3488 : pub async fn gather_size_inputs(
3489 : &self,
3490 : // `max_retention_period` overrides the cutoff that is used to calculate the size
3491 : // (only if it is shorter than the real cutoff).
3492 : max_retention_period: Option<u64>,
3493 : cause: LogicalSizeCalculationCause,
3494 : cancel: &CancellationToken,
3495 : ctx: &RequestContext,
3496 : ) -> Result<size::ModelInputs, size::CalculateSyntheticSizeError> {
3497 : let logical_sizes_at_once = self
3498 : .conf
3499 : .concurrent_tenant_size_logical_size_queries
3500 : .inner();
3501 :
3502 : // TODO: Having a single mutex block concurrent reads is not great for performance.
3503 : //
3504 : // But the only case where we need to run multiple of these at once is when we
3505 : // request a size for a tenant manually via API, while another background calculation
3506 : // is in progress (which is not a common case).
3507 : //
3508 : // See more for on the issue #2748 condenced out of the initial PR review.
3509 : let mut shared_cache = tokio::select! {
3510 : locked = self.cached_logical_sizes.lock() => locked,
3511 : _ = cancel.cancelled() => return Err(size::CalculateSyntheticSizeError::Cancelled),
3512 : _ = self.cancel.cancelled() => return Err(size::CalculateSyntheticSizeError::Cancelled),
3513 : };
3514 :
3515 : size::gather_inputs(
3516 : self,
3517 : logical_sizes_at_once,
3518 : max_retention_period,
3519 : &mut shared_cache,
3520 : cause,
3521 : cancel,
3522 : ctx,
3523 : )
3524 : .await
3525 : }
3526 :
3527 : /// Calculate synthetic tenant size and cache the result.
3528 : /// This is periodically called by background worker.
3529 : /// result is cached in tenant struct
3530 0 : #[instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug()))]
3531 : pub async fn calculate_synthetic_size(
3532 : &self,
3533 : cause: LogicalSizeCalculationCause,
3534 : cancel: &CancellationToken,
3535 : ctx: &RequestContext,
3536 : ) -> Result<u64, size::CalculateSyntheticSizeError> {
3537 : let inputs = self.gather_size_inputs(None, cause, cancel, ctx).await?;
3538 :
3539 : let size = inputs.calculate();
3540 :
3541 : self.set_cached_synthetic_size(size);
3542 :
3543 : Ok(size)
3544 : }
3545 :
3546 : /// Cache given synthetic size and update the metric value
3547 0 : pub fn set_cached_synthetic_size(&self, size: u64) {
3548 0 : self.cached_synthetic_tenant_size
3549 0 : .store(size, Ordering::Relaxed);
3550 0 :
3551 0 : // Only shard zero should be calculating synthetic sizes
3552 0 : debug_assert!(self.shard_identity.is_shard_zero());
3553 :
3554 0 : TENANT_SYNTHETIC_SIZE_METRIC
3555 0 : .get_metric_with_label_values(&[&self.tenant_shard_id.tenant_id.to_string()])
3556 0 : .unwrap()
3557 0 : .set(size);
3558 0 : }
3559 :
3560 0 : pub fn cached_synthetic_size(&self) -> u64 {
3561 0 : self.cached_synthetic_tenant_size.load(Ordering::Relaxed)
3562 0 : }
3563 :
3564 : /// Flush any in-progress layers, schedule uploads, and wait for uploads to complete.
3565 : ///
3566 : /// This function can take a long time: callers should wrap it in a timeout if calling
3567 : /// from an external API handler.
3568 : ///
3569 : /// Cancel-safety: cancelling this function may leave I/O running, but such I/O is
3570 : /// still bounded by tenant/timeline shutdown.
3571 0 : #[tracing::instrument(skip_all)]
3572 : pub(crate) async fn flush_remote(&self) -> anyhow::Result<()> {
3573 : let timelines = self.timelines.lock().unwrap().clone();
3574 :
3575 0 : async fn flush_timeline(_gate: GateGuard, timeline: Arc<Timeline>) -> anyhow::Result<()> {
3576 0 : tracing::info!(timeline_id=%timeline.timeline_id, "Flushing...");
3577 0 : timeline.freeze_and_flush().await?;
3578 0 : tracing::info!(timeline_id=%timeline.timeline_id, "Waiting for uploads...");
3579 0 : timeline.remote_client.wait_completion().await?;
3580 :
3581 0 : Ok(())
3582 0 : }
3583 :
3584 : // We do not use a JoinSet for these tasks, because we don't want them to be
3585 : // aborted when this function's future is cancelled: they should stay alive
3586 : // holding their GateGuard until they complete, to ensure their I/Os complete
3587 : // before Timeline shutdown completes.
3588 : let mut results = FuturesUnordered::new();
3589 :
3590 : for (_timeline_id, timeline) in timelines {
3591 : // Run each timeline's flush in a task holding the timeline's gate: this
3592 : // means that if this function's future is cancelled, the Timeline shutdown
3593 : // will still wait for any I/O in here to complete.
3594 : let Ok(gate) = timeline.gate.enter() else {
3595 : continue;
3596 : };
3597 0 : let jh = tokio::task::spawn(async move { flush_timeline(gate, timeline).await });
3598 : results.push(jh);
3599 : }
3600 :
3601 : while let Some(r) = results.next().await {
3602 : if let Err(e) = r {
3603 : if !e.is_cancelled() && !e.is_panic() {
3604 : tracing::error!("unexpected join error: {e:?}");
3605 : }
3606 : }
3607 : }
3608 :
3609 : // The flushes we did above were just writes, but the Tenant might have had
3610 : // pending deletions as well from recent compaction/gc: we want to flush those
3611 : // as well. This requires flushing the global delete queue. This is cheap
3612 : // because it's typically a no-op.
3613 : match self.deletion_queue_client.flush_execute().await {
3614 : Ok(_) => {}
3615 : Err(DeletionQueueError::ShuttingDown) => {}
3616 : }
3617 :
3618 : Ok(())
3619 : }
3620 :
3621 0 : pub(crate) fn get_tenant_conf(&self) -> TenantConfOpt {
3622 0 : self.tenant_conf.load().tenant_conf.clone()
3623 0 : }
3624 : }
3625 :
3626 : /// Create the cluster temporarily in 'initdbpath' directory inside the repository
3627 : /// to get bootstrap data for timeline initialization.
3628 0 : async fn run_initdb(
3629 0 : conf: &'static PageServerConf,
3630 0 : initdb_target_dir: &Utf8Path,
3631 0 : pg_version: u32,
3632 0 : cancel: &CancellationToken,
3633 0 : ) -> Result<(), InitdbError> {
3634 0 : let initdb_bin_path = conf
3635 0 : .pg_bin_dir(pg_version)
3636 0 : .map_err(InitdbError::Other)?
3637 0 : .join("initdb");
3638 0 : let initdb_lib_dir = conf.pg_lib_dir(pg_version).map_err(InitdbError::Other)?;
3639 0 : info!(
3640 0 : "running {} in {}, libdir: {}",
3641 : initdb_bin_path, initdb_target_dir, initdb_lib_dir,
3642 : );
3643 :
3644 0 : let _permit = INIT_DB_SEMAPHORE.acquire().await;
3645 :
3646 0 : let initdb_command = tokio::process::Command::new(&initdb_bin_path)
3647 0 : .args(["-D", initdb_target_dir.as_ref()])
3648 0 : .args(["-U", &conf.superuser])
3649 0 : .args(["-E", "utf8"])
3650 0 : .arg("--no-instructions")
3651 0 : .arg("--no-sync")
3652 0 : .env_clear()
3653 0 : .env("LD_LIBRARY_PATH", &initdb_lib_dir)
3654 0 : .env("DYLD_LIBRARY_PATH", &initdb_lib_dir)
3655 0 : .stdin(std::process::Stdio::null())
3656 0 : // stdout invocation produces the same output every time, we don't need it
3657 0 : .stdout(std::process::Stdio::null())
3658 0 : // we would be interested in the stderr output, if there was any
3659 0 : .stderr(std::process::Stdio::piped())
3660 0 : .spawn()?;
3661 :
3662 : // Ideally we'd select here with the cancellation token, but the problem is that
3663 : // we can't safely terminate initdb: it launches processes of its own, and killing
3664 : // initdb doesn't kill them. After we return from this function, we want the target
3665 : // directory to be able to be cleaned up.
3666 : // See https://github.com/neondatabase/neon/issues/6385
3667 0 : let initdb_output = initdb_command.wait_with_output().await?;
3668 0 : if !initdb_output.status.success() {
3669 0 : return Err(InitdbError::Failed(
3670 0 : initdb_output.status,
3671 0 : initdb_output.stderr,
3672 0 : ));
3673 0 : }
3674 0 :
3675 0 : // This isn't true cancellation support, see above. Still return an error to
3676 0 : // excercise the cancellation code path.
3677 0 : if cancel.is_cancelled() {
3678 0 : return Err(InitdbError::Cancelled);
3679 0 : }
3680 0 :
3681 0 : Ok(())
3682 0 : }
3683 :
3684 : /// Dump contents of a layer file to stdout.
3685 0 : pub async fn dump_layerfile_from_path(
3686 0 : path: &Utf8Path,
3687 0 : verbose: bool,
3688 0 : ctx: &RequestContext,
3689 0 : ) -> anyhow::Result<()> {
3690 : use std::os::unix::fs::FileExt;
3691 :
3692 : // All layer files start with a two-byte "magic" value, to identify the kind of
3693 : // file.
3694 0 : let file = File::open(path)?;
3695 0 : let mut header_buf = [0u8; 2];
3696 0 : file.read_exact_at(&mut header_buf, 0)?;
3697 :
3698 0 : match u16::from_be_bytes(header_buf) {
3699 : crate::IMAGE_FILE_MAGIC => {
3700 0 : ImageLayer::new_for_path(path, file)?
3701 0 : .dump(verbose, ctx)
3702 0 : .await?
3703 : }
3704 : crate::DELTA_FILE_MAGIC => {
3705 0 : DeltaLayer::new_for_path(path, file)?
3706 0 : .dump(verbose, ctx)
3707 0 : .await?
3708 : }
3709 0 : magic => bail!("unrecognized magic identifier: {:?}", magic),
3710 : }
3711 :
3712 0 : Ok(())
3713 0 : }
3714 :
3715 : #[cfg(test)]
3716 : pub(crate) mod harness {
3717 : use bytes::{Bytes, BytesMut};
3718 : use once_cell::sync::OnceCell;
3719 : use pageserver_api::models::ShardParameters;
3720 : use pageserver_api::shard::ShardIndex;
3721 : use utils::logging;
3722 :
3723 : use crate::deletion_queue::mock::MockDeletionQueue;
3724 : use crate::walredo::apply_neon;
3725 : use crate::{repository::Key, walrecord::NeonWalRecord};
3726 :
3727 : use super::*;
3728 : use hex_literal::hex;
3729 : use utils::id::TenantId;
3730 :
3731 : pub const TIMELINE_ID: TimelineId =
3732 : TimelineId::from_array(hex!("11223344556677881122334455667788"));
3733 : pub const NEW_TIMELINE_ID: TimelineId =
3734 : TimelineId::from_array(hex!("AA223344556677881122334455667788"));
3735 :
3736 : /// Convenience function to create a page image with given string as the only content
3737 5028400 : pub fn test_img(s: &str) -> Bytes {
3738 5028400 : let mut buf = BytesMut::new();
3739 5028400 : buf.extend_from_slice(s.as_bytes());
3740 5028400 : buf.resize(64, 0);
3741 5028400 :
3742 5028400 : buf.freeze()
3743 5028400 : }
3744 :
3745 : impl From<TenantConf> for TenantConfOpt {
3746 161 : fn from(tenant_conf: TenantConf) -> Self {
3747 161 : Self {
3748 161 : checkpoint_distance: Some(tenant_conf.checkpoint_distance),
3749 161 : checkpoint_timeout: Some(tenant_conf.checkpoint_timeout),
3750 161 : compaction_target_size: Some(tenant_conf.compaction_target_size),
3751 161 : compaction_period: Some(tenant_conf.compaction_period),
3752 161 : compaction_threshold: Some(tenant_conf.compaction_threshold),
3753 161 : compaction_algorithm: Some(tenant_conf.compaction_algorithm),
3754 161 : gc_horizon: Some(tenant_conf.gc_horizon),
3755 161 : gc_period: Some(tenant_conf.gc_period),
3756 161 : image_creation_threshold: Some(tenant_conf.image_creation_threshold),
3757 161 : pitr_interval: Some(tenant_conf.pitr_interval),
3758 161 : walreceiver_connect_timeout: Some(tenant_conf.walreceiver_connect_timeout),
3759 161 : lagging_wal_timeout: Some(tenant_conf.lagging_wal_timeout),
3760 161 : max_lsn_wal_lag: Some(tenant_conf.max_lsn_wal_lag),
3761 161 : trace_read_requests: Some(tenant_conf.trace_read_requests),
3762 161 : eviction_policy: Some(tenant_conf.eviction_policy),
3763 161 : min_resident_size_override: tenant_conf.min_resident_size_override,
3764 161 : evictions_low_residence_duration_metric_threshold: Some(
3765 161 : tenant_conf.evictions_low_residence_duration_metric_threshold,
3766 161 : ),
3767 161 : heatmap_period: Some(tenant_conf.heatmap_period),
3768 161 : lazy_slru_download: Some(tenant_conf.lazy_slru_download),
3769 161 : timeline_get_throttle: Some(tenant_conf.timeline_get_throttle),
3770 161 : image_layer_creation_check_threshold: Some(
3771 161 : tenant_conf.image_layer_creation_check_threshold,
3772 161 : ),
3773 161 : switch_aux_file_policy: Some(tenant_conf.switch_aux_file_policy),
3774 161 : lsn_lease_length: Some(tenant_conf.lsn_lease_length),
3775 161 : lsn_lease_length_for_ts: Some(tenant_conf.lsn_lease_length_for_ts),
3776 161 : }
3777 161 : }
3778 : }
3779 :
3780 : pub struct TenantHarness {
3781 : pub conf: &'static PageServerConf,
3782 : pub tenant_conf: TenantConf,
3783 : pub tenant_shard_id: TenantShardId,
3784 : pub generation: Generation,
3785 : pub shard: ShardIndex,
3786 : pub remote_storage: GenericRemoteStorage,
3787 : pub remote_fs_dir: Utf8PathBuf,
3788 : pub deletion_queue: MockDeletionQueue,
3789 : }
3790 :
3791 : static LOG_HANDLE: OnceCell<()> = OnceCell::new();
3792 :
3793 167 : pub(crate) fn setup_logging() {
3794 167 : LOG_HANDLE.get_or_init(|| {
3795 155 : logging::init(
3796 155 : logging::LogFormat::Test,
3797 155 : // enable it in case the tests exercise code paths that use
3798 155 : // debug_assert_current_span_has_tenant_and_timeline_id
3799 155 : logging::TracingErrorLayerEnablement::EnableWithRustLogFilter,
3800 155 : logging::Output::Stdout,
3801 155 : )
3802 155 : .expect("Failed to init test logging")
3803 167 : });
3804 167 : }
3805 :
3806 : impl TenantHarness {
3807 161 : pub fn create_custom(
3808 161 : test_name: &'static str,
3809 161 : tenant_conf: TenantConf,
3810 161 : tenant_id: TenantId,
3811 161 : shard_identity: ShardIdentity,
3812 161 : generation: Generation,
3813 161 : ) -> anyhow::Result<Self> {
3814 161 : setup_logging();
3815 161 :
3816 161 : let repo_dir = PageServerConf::test_repo_dir(test_name);
3817 161 : let _ = fs::remove_dir_all(&repo_dir);
3818 161 : fs::create_dir_all(&repo_dir)?;
3819 :
3820 161 : let conf = PageServerConf::dummy_conf(repo_dir);
3821 161 : // Make a static copy of the config. This can never be free'd, but that's
3822 161 : // OK in a test.
3823 161 : let conf: &'static PageServerConf = Box::leak(Box::new(conf));
3824 161 :
3825 161 : let shard = shard_identity.shard_index();
3826 161 : let tenant_shard_id = TenantShardId {
3827 161 : tenant_id,
3828 161 : shard_number: shard.shard_number,
3829 161 : shard_count: shard.shard_count,
3830 161 : };
3831 161 : fs::create_dir_all(conf.tenant_path(&tenant_shard_id))?;
3832 161 : fs::create_dir_all(conf.timelines_path(&tenant_shard_id))?;
3833 :
3834 : use remote_storage::{RemoteStorageConfig, RemoteStorageKind};
3835 161 : let remote_fs_dir = conf.workdir.join("localfs");
3836 161 : std::fs::create_dir_all(&remote_fs_dir).unwrap();
3837 161 : let config = RemoteStorageConfig {
3838 161 : storage: RemoteStorageKind::LocalFs {
3839 161 : local_path: remote_fs_dir.clone(),
3840 161 : },
3841 161 : timeout: RemoteStorageConfig::DEFAULT_TIMEOUT,
3842 161 : };
3843 161 : let remote_storage = GenericRemoteStorage::from_config(&config).unwrap();
3844 161 : let deletion_queue = MockDeletionQueue::new(Some(remote_storage.clone()));
3845 161 :
3846 161 : Ok(Self {
3847 161 : conf,
3848 161 : tenant_conf,
3849 161 : tenant_shard_id,
3850 161 : generation,
3851 161 : shard,
3852 161 : remote_storage,
3853 161 : remote_fs_dir,
3854 161 : deletion_queue,
3855 161 : })
3856 161 : }
3857 :
3858 149 : pub fn create(test_name: &'static str) -> anyhow::Result<Self> {
3859 149 : // Disable automatic GC and compaction to make the unit tests more deterministic.
3860 149 : // The tests perform them manually if needed.
3861 149 : let tenant_conf = TenantConf {
3862 149 : gc_period: Duration::ZERO,
3863 149 : compaction_period: Duration::ZERO,
3864 149 : ..TenantConf::default()
3865 149 : };
3866 149 : let tenant_id = TenantId::generate();
3867 149 : let shard = ShardIdentity::unsharded();
3868 149 : Self::create_custom(
3869 149 : test_name,
3870 149 : tenant_conf,
3871 149 : tenant_id,
3872 149 : shard,
3873 149 : Generation::new(0xdeadbeef),
3874 149 : )
3875 149 : }
3876 :
3877 18 : pub fn span(&self) -> tracing::Span {
3878 18 : info_span!("TenantHarness", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug())
3879 18 : }
3880 :
3881 161 : pub(crate) async fn load(&self) -> (Arc<Tenant>, RequestContext) {
3882 161 : let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error);
3883 161 : (
3884 161 : self.do_try_load(&ctx)
3885 638 : .await
3886 161 : .expect("failed to load test tenant"),
3887 161 : ctx,
3888 161 : )
3889 161 : }
3890 :
3891 322 : #[instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug()))]
3892 : pub(crate) async fn do_try_load(
3893 : &self,
3894 : ctx: &RequestContext,
3895 : ) -> anyhow::Result<Arc<Tenant>> {
3896 : let walredo_mgr = Arc::new(WalRedoManager::from(TestRedoManager));
3897 :
3898 : let tenant = Arc::new(Tenant::new(
3899 : TenantState::Loading,
3900 : self.conf,
3901 : AttachedTenantConf::try_from(LocationConf::attached_single(
3902 : TenantConfOpt::from(self.tenant_conf.clone()),
3903 : self.generation,
3904 : &ShardParameters::default(),
3905 : ))
3906 : .unwrap(),
3907 : // This is a legacy/test code path: sharding isn't supported here.
3908 : ShardIdentity::unsharded(),
3909 : Some(walredo_mgr),
3910 : self.tenant_shard_id,
3911 : self.remote_storage.clone(),
3912 : self.deletion_queue.new_client(),
3913 : ));
3914 :
3915 : let preload = tenant
3916 : .preload(&self.remote_storage, CancellationToken::new())
3917 : .await?;
3918 : tenant.attach(Some(preload), ctx).await?;
3919 :
3920 : tenant.state.send_replace(TenantState::Active);
3921 : for timeline in tenant.timelines.lock().unwrap().values() {
3922 : timeline.set_state(TimelineState::Active);
3923 : }
3924 : Ok(tenant)
3925 : }
3926 :
3927 2 : pub fn timeline_path(&self, timeline_id: &TimelineId) -> Utf8PathBuf {
3928 2 : self.conf.timeline_path(&self.tenant_shard_id, timeline_id)
3929 2 : }
3930 : }
3931 :
3932 : // Mock WAL redo manager that doesn't do much
3933 : pub(crate) struct TestRedoManager;
3934 :
3935 : impl TestRedoManager {
3936 : /// # Cancel-Safety
3937 : ///
3938 : /// This method is cancellation-safe.
3939 58 : pub async fn request_redo(
3940 58 : &self,
3941 58 : key: Key,
3942 58 : lsn: Lsn,
3943 58 : base_img: Option<(Lsn, Bytes)>,
3944 58 : records: Vec<(Lsn, NeonWalRecord)>,
3945 58 : _pg_version: u32,
3946 58 : ) -> anyhow::Result<Bytes> {
3947 76 : let records_neon = records.iter().all(|r| apply_neon::can_apply_in_neon(&r.1));
3948 58 : if records_neon {
3949 : // For Neon wal records, we can decode without spawning postgres, so do so.
3950 58 : let base_img = base_img.expect("Neon WAL redo requires base image").1;
3951 58 : let mut page = BytesMut::new();
3952 58 : page.extend_from_slice(&base_img);
3953 134 : for (record_lsn, record) in records {
3954 76 : apply_neon::apply_in_neon(&record, record_lsn, key, &mut page)?;
3955 : }
3956 58 : Ok(page.freeze())
3957 : } else {
3958 : // We never spawn a postgres walredo process in unit tests: just log what we might have done.
3959 0 : let s = format!(
3960 0 : "redo for {} to get to {}, with {} and {} records",
3961 0 : key,
3962 0 : lsn,
3963 0 : if base_img.is_some() {
3964 0 : "base image"
3965 : } else {
3966 0 : "no base image"
3967 : },
3968 0 : records.len()
3969 0 : );
3970 0 : println!("{s}");
3971 0 :
3972 0 : Ok(test_img(&s))
3973 : }
3974 58 : }
3975 : }
3976 : }
3977 :
3978 : #[cfg(test)]
3979 : mod tests {
3980 : use std::collections::BTreeMap;
3981 :
3982 : use super::*;
3983 : use crate::keyspace::KeySpaceAccum;
3984 : use crate::pgdatadir_mapping::AuxFilesDirectory;
3985 : use crate::repository::{Key, Value};
3986 : use crate::tenant::harness::*;
3987 : use crate::tenant::timeline::CompactFlags;
3988 : use crate::walrecord::NeonWalRecord;
3989 : use crate::DEFAULT_PG_VERSION;
3990 : use bytes::{Bytes, BytesMut};
3991 : use hex_literal::hex;
3992 : use itertools::Itertools;
3993 : use pageserver_api::key::{AUX_FILES_KEY, AUX_KEY_PREFIX, NON_INHERITED_RANGE};
3994 : use pageserver_api::keyspace::KeySpace;
3995 : use pageserver_api::models::{CompactionAlgorithm, CompactionAlgorithmSettings};
3996 : use rand::{thread_rng, Rng};
3997 : use storage_layer::PersistentLayerKey;
3998 : use tests::storage_layer::ValuesReconstructState;
3999 : use tests::timeline::{GetVectoredError, ShutdownMode};
4000 : use timeline::GcInfo;
4001 : use utils::bin_ser::BeSer;
4002 : use utils::id::TenantId;
4003 :
4004 : static TEST_KEY: Lazy<Key> =
4005 18 : Lazy::new(|| Key::from_slice(&hex!("010000000033333333444444445500000001")));
4006 :
4007 : #[tokio::test]
4008 2 : async fn test_basic() -> anyhow::Result<()> {
4009 8 : let (tenant, ctx) = TenantHarness::create("test_basic")?.load().await;
4010 2 : let tline = tenant
4011 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx)
4012 6 : .await?;
4013 2 :
4014 2 : let mut writer = tline.writer().await;
4015 2 : writer
4016 2 : .put(
4017 2 : *TEST_KEY,
4018 2 : Lsn(0x10),
4019 2 : &Value::Image(test_img("foo at 0x10")),
4020 2 : &ctx,
4021 2 : )
4022 2 : .await?;
4023 2 : writer.finish_write(Lsn(0x10));
4024 2 : drop(writer);
4025 2 :
4026 2 : let mut writer = tline.writer().await;
4027 2 : writer
4028 2 : .put(
4029 2 : *TEST_KEY,
4030 2 : Lsn(0x20),
4031 2 : &Value::Image(test_img("foo at 0x20")),
4032 2 : &ctx,
4033 2 : )
4034 2 : .await?;
4035 2 : writer.finish_write(Lsn(0x20));
4036 2 : drop(writer);
4037 2 :
4038 2 : assert_eq!(
4039 2 : tline.get(*TEST_KEY, Lsn(0x10), &ctx).await?,
4040 2 : test_img("foo at 0x10")
4041 2 : );
4042 2 : assert_eq!(
4043 2 : tline.get(*TEST_KEY, Lsn(0x1f), &ctx).await?,
4044 2 : test_img("foo at 0x10")
4045 2 : );
4046 2 : assert_eq!(
4047 2 : tline.get(*TEST_KEY, Lsn(0x20), &ctx).await?,
4048 2 : test_img("foo at 0x20")
4049 2 : );
4050 2 :
4051 2 : Ok(())
4052 2 : }
4053 :
4054 : #[tokio::test]
4055 2 : async fn no_duplicate_timelines() -> anyhow::Result<()> {
4056 2 : let (tenant, ctx) = TenantHarness::create("no_duplicate_timelines")?
4057 2 : .load()
4058 8 : .await;
4059 2 : let _ = tenant
4060 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4061 6 : .await?;
4062 2 :
4063 2 : match tenant
4064 2 : .create_empty_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4065 2 : .await
4066 2 : {
4067 2 : Ok(_) => panic!("duplicate timeline creation should fail"),
4068 2 : Err(e) => assert_eq!(e.to_string(), "Already exists".to_string()),
4069 2 : }
4070 2 :
4071 2 : Ok(())
4072 2 : }
4073 :
4074 : /// Convenience function to create a page image with given string as the only content
4075 10 : pub fn test_value(s: &str) -> Value {
4076 10 : let mut buf = BytesMut::new();
4077 10 : buf.extend_from_slice(s.as_bytes());
4078 10 : Value::Image(buf.freeze())
4079 10 : }
4080 :
4081 : ///
4082 : /// Test branch creation
4083 : ///
4084 : #[tokio::test]
4085 2 : async fn test_branch() -> anyhow::Result<()> {
4086 2 : use std::str::from_utf8;
4087 2 :
4088 5 : let (tenant, ctx) = TenantHarness::create("test_branch")?.load().await;
4089 2 : let tline = tenant
4090 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4091 6 : .await?;
4092 2 : let mut writer = tline.writer().await;
4093 2 :
4094 2 : #[allow(non_snake_case)]
4095 2 : let TEST_KEY_A: Key = Key::from_hex("110000000033333333444444445500000001").unwrap();
4096 2 : #[allow(non_snake_case)]
4097 2 : let TEST_KEY_B: Key = Key::from_hex("110000000033333333444444445500000002").unwrap();
4098 2 :
4099 2 : // Insert a value on the timeline
4100 2 : writer
4101 2 : .put(TEST_KEY_A, Lsn(0x20), &test_value("foo at 0x20"), &ctx)
4102 2 : .await?;
4103 2 : writer
4104 2 : .put(TEST_KEY_B, Lsn(0x20), &test_value("foobar at 0x20"), &ctx)
4105 2 : .await?;
4106 2 : writer.finish_write(Lsn(0x20));
4107 2 :
4108 2 : writer
4109 2 : .put(TEST_KEY_A, Lsn(0x30), &test_value("foo at 0x30"), &ctx)
4110 2 : .await?;
4111 2 : writer.finish_write(Lsn(0x30));
4112 2 : writer
4113 2 : .put(TEST_KEY_A, Lsn(0x40), &test_value("foo at 0x40"), &ctx)
4114 2 : .await?;
4115 2 : writer.finish_write(Lsn(0x40));
4116 2 :
4117 2 : //assert_current_logical_size(&tline, Lsn(0x40));
4118 2 :
4119 2 : // Branch the history, modify relation differently on the new timeline
4120 2 : tenant
4121 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x30)), &ctx)
4122 2 : .await?;
4123 2 : let newtline = tenant
4124 2 : .get_timeline(NEW_TIMELINE_ID, true)
4125 2 : .expect("Should have a local timeline");
4126 2 : let mut new_writer = newtline.writer().await;
4127 2 : new_writer
4128 2 : .put(TEST_KEY_A, Lsn(0x40), &test_value("bar at 0x40"), &ctx)
4129 2 : .await?;
4130 2 : new_writer.finish_write(Lsn(0x40));
4131 2 :
4132 2 : // Check page contents on both branches
4133 2 : assert_eq!(
4134 2 : from_utf8(&tline.get(TEST_KEY_A, Lsn(0x40), &ctx).await?)?,
4135 2 : "foo at 0x40"
4136 2 : );
4137 2 : assert_eq!(
4138 2 : from_utf8(&newtline.get(TEST_KEY_A, Lsn(0x40), &ctx).await?)?,
4139 2 : "bar at 0x40"
4140 2 : );
4141 2 : assert_eq!(
4142 2 : from_utf8(&newtline.get(TEST_KEY_B, Lsn(0x40), &ctx).await?)?,
4143 2 : "foobar at 0x20"
4144 2 : );
4145 2 :
4146 2 : //assert_current_logical_size(&tline, Lsn(0x40));
4147 2 :
4148 2 : Ok(())
4149 2 : }
4150 :
4151 20 : async fn make_some_layers(
4152 20 : tline: &Timeline,
4153 20 : start_lsn: Lsn,
4154 20 : ctx: &RequestContext,
4155 20 : ) -> anyhow::Result<()> {
4156 20 : let mut lsn = start_lsn;
4157 : {
4158 20 : let mut writer = tline.writer().await;
4159 : // Create a relation on the timeline
4160 20 : writer
4161 20 : .put(
4162 20 : *TEST_KEY,
4163 20 : lsn,
4164 20 : &Value::Image(test_img(&format!("foo at {}", lsn))),
4165 20 : ctx,
4166 20 : )
4167 10 : .await?;
4168 20 : writer.finish_write(lsn);
4169 20 : lsn += 0x10;
4170 20 : writer
4171 20 : .put(
4172 20 : *TEST_KEY,
4173 20 : lsn,
4174 20 : &Value::Image(test_img(&format!("foo at {}", lsn))),
4175 20 : ctx,
4176 20 : )
4177 0 : .await?;
4178 20 : writer.finish_write(lsn);
4179 20 : lsn += 0x10;
4180 20 : }
4181 20 : tline.freeze_and_flush().await?;
4182 : {
4183 20 : let mut writer = tline.writer().await;
4184 20 : writer
4185 20 : .put(
4186 20 : *TEST_KEY,
4187 20 : lsn,
4188 20 : &Value::Image(test_img(&format!("foo at {}", lsn))),
4189 20 : ctx,
4190 20 : )
4191 10 : .await?;
4192 20 : writer.finish_write(lsn);
4193 20 : lsn += 0x10;
4194 20 : writer
4195 20 : .put(
4196 20 : *TEST_KEY,
4197 20 : lsn,
4198 20 : &Value::Image(test_img(&format!("foo at {}", lsn))),
4199 20 : ctx,
4200 20 : )
4201 0 : .await?;
4202 20 : writer.finish_write(lsn);
4203 20 : }
4204 20 : tline.freeze_and_flush().await.map_err(|e| e.into())
4205 20 : }
4206 :
4207 : #[tokio::test]
4208 2 : async fn test_prohibit_branch_creation_on_garbage_collected_data() -> anyhow::Result<()> {
4209 2 : let (tenant, ctx) =
4210 2 : TenantHarness::create("test_prohibit_branch_creation_on_garbage_collected_data")?
4211 2 : .load()
4212 8 : .await;
4213 2 : let tline = tenant
4214 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4215 6 : .await?;
4216 6 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
4217 2 :
4218 2 : // this removes layers before lsn 40 (50 minus 10), so there are two remaining layers, image and delta for 31-50
4219 2 : // FIXME: this doesn't actually remove any layer currently, given how the flushing
4220 2 : // and compaction works. But it does set the 'cutoff' point so that the cross check
4221 2 : // below should fail.
4222 2 : tenant
4223 2 : .gc_iteration(
4224 2 : Some(TIMELINE_ID),
4225 2 : 0x10,
4226 2 : Duration::ZERO,
4227 2 : &CancellationToken::new(),
4228 2 : &ctx,
4229 2 : )
4230 2 : .await?;
4231 2 :
4232 2 : // try to branch at lsn 25, should fail because we already garbage collected the data
4233 2 : match tenant
4234 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x25)), &ctx)
4235 2 : .await
4236 2 : {
4237 2 : Ok(_) => panic!("branching should have failed"),
4238 2 : Err(err) => {
4239 2 : let CreateTimelineError::AncestorLsn(err) = err else {
4240 2 : panic!("wrong error type")
4241 2 : };
4242 2 : assert!(err.to_string().contains("invalid branch start lsn"));
4243 2 : assert!(err
4244 2 : .source()
4245 2 : .unwrap()
4246 2 : .to_string()
4247 2 : .contains("we might've already garbage collected needed data"))
4248 2 : }
4249 2 : }
4250 2 :
4251 2 : Ok(())
4252 2 : }
4253 :
4254 : #[tokio::test]
4255 2 : async fn test_prohibit_branch_creation_on_pre_initdb_lsn() -> anyhow::Result<()> {
4256 2 : let (tenant, ctx) =
4257 2 : TenantHarness::create("test_prohibit_branch_creation_on_pre_initdb_lsn")?
4258 2 : .load()
4259 8 : .await;
4260 2 :
4261 2 : let tline = tenant
4262 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x50), DEFAULT_PG_VERSION, &ctx)
4263 6 : .await?;
4264 2 : // try to branch at lsn 0x25, should fail because initdb lsn is 0x50
4265 2 : match tenant
4266 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x25)), &ctx)
4267 2 : .await
4268 2 : {
4269 2 : Ok(_) => panic!("branching should have failed"),
4270 2 : Err(err) => {
4271 2 : let CreateTimelineError::AncestorLsn(err) = err else {
4272 2 : panic!("wrong error type");
4273 2 : };
4274 2 : assert!(&err.to_string().contains("invalid branch start lsn"));
4275 2 : assert!(&err
4276 2 : .source()
4277 2 : .unwrap()
4278 2 : .to_string()
4279 2 : .contains("is earlier than latest GC horizon"));
4280 2 : }
4281 2 : }
4282 2 :
4283 2 : Ok(())
4284 2 : }
4285 :
4286 : /*
4287 : // FIXME: This currently fails to error out. Calling GC doesn't currently
4288 : // remove the old value, we'd need to work a little harder
4289 : #[tokio::test]
4290 : async fn test_prohibit_get_for_garbage_collected_data() -> anyhow::Result<()> {
4291 : let repo =
4292 : RepoHarness::create("test_prohibit_get_for_garbage_collected_data")?
4293 : .load();
4294 :
4295 : let tline = repo.create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION)?;
4296 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
4297 :
4298 : repo.gc_iteration(Some(TIMELINE_ID), 0x10, Duration::ZERO)?;
4299 : let latest_gc_cutoff_lsn = tline.get_latest_gc_cutoff_lsn();
4300 : assert!(*latest_gc_cutoff_lsn > Lsn(0x25));
4301 : match tline.get(*TEST_KEY, Lsn(0x25)) {
4302 : Ok(_) => panic!("request for page should have failed"),
4303 : Err(err) => assert!(err.to_string().contains("not found at")),
4304 : }
4305 : Ok(())
4306 : }
4307 : */
4308 :
4309 : #[tokio::test]
4310 2 : async fn test_get_branchpoints_from_an_inactive_timeline() -> anyhow::Result<()> {
4311 2 : let (tenant, ctx) =
4312 2 : TenantHarness::create("test_get_branchpoints_from_an_inactive_timeline")?
4313 2 : .load()
4314 8 : .await;
4315 2 : let tline = tenant
4316 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4317 5 : .await?;
4318 6 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
4319 2 :
4320 2 : tenant
4321 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x40)), &ctx)
4322 2 : .await?;
4323 2 : let newtline = tenant
4324 2 : .get_timeline(NEW_TIMELINE_ID, true)
4325 2 : .expect("Should have a local timeline");
4326 2 :
4327 6 : make_some_layers(newtline.as_ref(), Lsn(0x60), &ctx).await?;
4328 2 :
4329 2 : tline.set_broken("test".to_owned());
4330 2 :
4331 2 : tenant
4332 2 : .gc_iteration(
4333 2 : Some(TIMELINE_ID),
4334 2 : 0x10,
4335 2 : Duration::ZERO,
4336 2 : &CancellationToken::new(),
4337 2 : &ctx,
4338 2 : )
4339 2 : .await?;
4340 2 :
4341 2 : // The branchpoints should contain all timelines, even ones marked
4342 2 : // as Broken.
4343 2 : {
4344 2 : let branchpoints = &tline.gc_info.read().unwrap().retain_lsns;
4345 2 : assert_eq!(branchpoints.len(), 1);
4346 2 : assert_eq!(branchpoints[0], Lsn(0x40));
4347 2 : }
4348 2 :
4349 2 : // You can read the key from the child branch even though the parent is
4350 2 : // Broken, as long as you don't need to access data from the parent.
4351 2 : assert_eq!(
4352 4 : newtline.get(*TEST_KEY, Lsn(0x70), &ctx).await?,
4353 2 : test_img(&format!("foo at {}", Lsn(0x70)))
4354 2 : );
4355 2 :
4356 2 : // This needs to traverse to the parent, and fails.
4357 2 : let err = newtline.get(*TEST_KEY, Lsn(0x50), &ctx).await.unwrap_err();
4358 2 : assert!(err.to_string().starts_with(&format!(
4359 2 : "Bad state on timeline {}: Broken",
4360 2 : tline.timeline_id
4361 2 : )));
4362 2 :
4363 2 : Ok(())
4364 2 : }
4365 :
4366 : #[tokio::test]
4367 2 : async fn test_retain_data_in_parent_which_is_needed_for_child() -> anyhow::Result<()> {
4368 2 : let (tenant, ctx) =
4369 2 : TenantHarness::create("test_retain_data_in_parent_which_is_needed_for_child")?
4370 2 : .load()
4371 8 : .await;
4372 2 : let tline = tenant
4373 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4374 6 : .await?;
4375 6 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
4376 2 :
4377 2 : tenant
4378 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x40)), &ctx)
4379 2 : .await?;
4380 2 : let newtline = tenant
4381 2 : .get_timeline(NEW_TIMELINE_ID, true)
4382 2 : .expect("Should have a local timeline");
4383 2 : // this removes layers before lsn 40 (50 minus 10), so there are two remaining layers, image and delta for 31-50
4384 2 : tenant
4385 2 : .gc_iteration(
4386 2 : Some(TIMELINE_ID),
4387 2 : 0x10,
4388 2 : Duration::ZERO,
4389 2 : &CancellationToken::new(),
4390 2 : &ctx,
4391 2 : )
4392 2 : .await?;
4393 4 : assert!(newtline.get(*TEST_KEY, Lsn(0x25), &ctx).await.is_ok());
4394 2 :
4395 2 : Ok(())
4396 2 : }
4397 : #[tokio::test]
4398 2 : async fn test_parent_keeps_data_forever_after_branching() -> anyhow::Result<()> {
4399 2 : let (tenant, ctx) =
4400 2 : TenantHarness::create("test_parent_keeps_data_forever_after_branching")?
4401 2 : .load()
4402 8 : .await;
4403 2 : let tline = tenant
4404 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4405 6 : .await?;
4406 6 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
4407 2 :
4408 2 : tenant
4409 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x40)), &ctx)
4410 2 : .await?;
4411 2 : let newtline = tenant
4412 2 : .get_timeline(NEW_TIMELINE_ID, true)
4413 2 : .expect("Should have a local timeline");
4414 2 :
4415 6 : make_some_layers(newtline.as_ref(), Lsn(0x60), &ctx).await?;
4416 2 :
4417 2 : // run gc on parent
4418 2 : tenant
4419 2 : .gc_iteration(
4420 2 : Some(TIMELINE_ID),
4421 2 : 0x10,
4422 2 : Duration::ZERO,
4423 2 : &CancellationToken::new(),
4424 2 : &ctx,
4425 2 : )
4426 2 : .await?;
4427 2 :
4428 2 : // Check that the data is still accessible on the branch.
4429 2 : assert_eq!(
4430 7 : newtline.get(*TEST_KEY, Lsn(0x50), &ctx).await?,
4431 2 : test_img(&format!("foo at {}", Lsn(0x40)))
4432 2 : );
4433 2 :
4434 2 : Ok(())
4435 2 : }
4436 :
4437 : #[tokio::test]
4438 2 : async fn timeline_load() -> anyhow::Result<()> {
4439 2 : const TEST_NAME: &str = "timeline_load";
4440 2 : let harness = TenantHarness::create(TEST_NAME)?;
4441 2 : {
4442 8 : let (tenant, ctx) = harness.load().await;
4443 2 : let tline = tenant
4444 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x7000), DEFAULT_PG_VERSION, &ctx)
4445 6 : .await?;
4446 6 : make_some_layers(tline.as_ref(), Lsn(0x8000), &ctx).await?;
4447 2 : // so that all uploads finish & we can call harness.load() below again
4448 2 : tenant
4449 2 : .shutdown(Default::default(), ShutdownMode::FreezeAndFlush)
4450 2 : .instrument(harness.span())
4451 2 : .await
4452 2 : .ok()
4453 2 : .unwrap();
4454 2 : }
4455 2 :
4456 7 : let (tenant, _ctx) = harness.load().await;
4457 2 : tenant
4458 2 : .get_timeline(TIMELINE_ID, true)
4459 2 : .expect("cannot load timeline");
4460 2 :
4461 2 : Ok(())
4462 2 : }
4463 :
4464 : #[tokio::test]
4465 2 : async fn timeline_load_with_ancestor() -> anyhow::Result<()> {
4466 2 : const TEST_NAME: &str = "timeline_load_with_ancestor";
4467 2 : let harness = TenantHarness::create(TEST_NAME)?;
4468 2 : // create two timelines
4469 2 : {
4470 8 : let (tenant, ctx) = harness.load().await;
4471 2 : let tline = tenant
4472 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4473 6 : .await?;
4474 2 :
4475 6 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
4476 2 :
4477 2 : let child_tline = tenant
4478 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x40)), &ctx)
4479 2 : .await?;
4480 2 : child_tline.set_state(TimelineState::Active);
4481 2 :
4482 2 : let newtline = tenant
4483 2 : .get_timeline(NEW_TIMELINE_ID, true)
4484 2 : .expect("Should have a local timeline");
4485 2 :
4486 6 : make_some_layers(newtline.as_ref(), Lsn(0x60), &ctx).await?;
4487 2 :
4488 2 : // so that all uploads finish & we can call harness.load() below again
4489 2 : tenant
4490 2 : .shutdown(Default::default(), ShutdownMode::FreezeAndFlush)
4491 2 : .instrument(harness.span())
4492 4 : .await
4493 2 : .ok()
4494 2 : .unwrap();
4495 2 : }
4496 2 :
4497 2 : // check that both of them are initially unloaded
4498 12 : let (tenant, _ctx) = harness.load().await;
4499 2 :
4500 2 : // check that both, child and ancestor are loaded
4501 2 : let _child_tline = tenant
4502 2 : .get_timeline(NEW_TIMELINE_ID, true)
4503 2 : .expect("cannot get child timeline loaded");
4504 2 :
4505 2 : let _ancestor_tline = tenant
4506 2 : .get_timeline(TIMELINE_ID, true)
4507 2 : .expect("cannot get ancestor timeline loaded");
4508 2 :
4509 2 : Ok(())
4510 2 : }
4511 :
4512 : #[tokio::test]
4513 2 : async fn delta_layer_dumping() -> anyhow::Result<()> {
4514 2 : use storage_layer::AsLayerDesc;
4515 8 : let (tenant, ctx) = TenantHarness::create("test_layer_dumping")?.load().await;
4516 2 : let tline = tenant
4517 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4518 6 : .await?;
4519 6 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
4520 2 :
4521 2 : let layer_map = tline.layers.read().await;
4522 2 : let level0_deltas = layer_map
4523 2 : .layer_map()
4524 2 : .get_level0_deltas()?
4525 2 : .into_iter()
4526 4 : .map(|desc| layer_map.get_from_desc(&desc))
4527 2 : .collect::<Vec<_>>();
4528 2 :
4529 2 : assert!(!level0_deltas.is_empty());
4530 2 :
4531 6 : for delta in level0_deltas {
4532 2 : // Ensure we are dumping a delta layer here
4533 4 : assert!(delta.layer_desc().is_delta);
4534 8 : delta.dump(true, &ctx).await.unwrap();
4535 2 : }
4536 2 :
4537 2 : Ok(())
4538 2 : }
4539 :
4540 : #[tokio::test]
4541 2 : async fn test_images() -> anyhow::Result<()> {
4542 8 : let (tenant, ctx) = TenantHarness::create("test_images")?.load().await;
4543 2 : let tline = tenant
4544 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx)
4545 5 : .await?;
4546 2 :
4547 2 : let mut writer = tline.writer().await;
4548 2 : writer
4549 2 : .put(
4550 2 : *TEST_KEY,
4551 2 : Lsn(0x10),
4552 2 : &Value::Image(test_img("foo at 0x10")),
4553 2 : &ctx,
4554 2 : )
4555 2 : .await?;
4556 2 : writer.finish_write(Lsn(0x10));
4557 2 : drop(writer);
4558 2 :
4559 2 : tline.freeze_and_flush().await?;
4560 2 : tline
4561 2 : .compact(&CancellationToken::new(), EnumSet::empty(), &ctx)
4562 2 : .await?;
4563 2 :
4564 2 : let mut writer = tline.writer().await;
4565 2 : writer
4566 2 : .put(
4567 2 : *TEST_KEY,
4568 2 : Lsn(0x20),
4569 2 : &Value::Image(test_img("foo at 0x20")),
4570 2 : &ctx,
4571 2 : )
4572 2 : .await?;
4573 2 : writer.finish_write(Lsn(0x20));
4574 2 : drop(writer);
4575 2 :
4576 2 : tline.freeze_and_flush().await?;
4577 2 : tline
4578 2 : .compact(&CancellationToken::new(), EnumSet::empty(), &ctx)
4579 2 : .await?;
4580 2 :
4581 2 : let mut writer = tline.writer().await;
4582 2 : writer
4583 2 : .put(
4584 2 : *TEST_KEY,
4585 2 : Lsn(0x30),
4586 2 : &Value::Image(test_img("foo at 0x30")),
4587 2 : &ctx,
4588 2 : )
4589 2 : .await?;
4590 2 : writer.finish_write(Lsn(0x30));
4591 2 : drop(writer);
4592 2 :
4593 2 : tline.freeze_and_flush().await?;
4594 2 : tline
4595 2 : .compact(&CancellationToken::new(), EnumSet::empty(), &ctx)
4596 2 : .await?;
4597 2 :
4598 2 : let mut writer = tline.writer().await;
4599 2 : writer
4600 2 : .put(
4601 2 : *TEST_KEY,
4602 2 : Lsn(0x40),
4603 2 : &Value::Image(test_img("foo at 0x40")),
4604 2 : &ctx,
4605 2 : )
4606 2 : .await?;
4607 2 : writer.finish_write(Lsn(0x40));
4608 2 : drop(writer);
4609 2 :
4610 2 : tline.freeze_and_flush().await?;
4611 2 : tline
4612 2 : .compact(&CancellationToken::new(), EnumSet::empty(), &ctx)
4613 2 : .await?;
4614 2 :
4615 2 : assert_eq!(
4616 4 : tline.get(*TEST_KEY, Lsn(0x10), &ctx).await?,
4617 2 : test_img("foo at 0x10")
4618 2 : );
4619 2 : assert_eq!(
4620 3 : tline.get(*TEST_KEY, Lsn(0x1f), &ctx).await?,
4621 2 : test_img("foo at 0x10")
4622 2 : );
4623 2 : assert_eq!(
4624 2 : tline.get(*TEST_KEY, Lsn(0x20), &ctx).await?,
4625 2 : test_img("foo at 0x20")
4626 2 : );
4627 2 : assert_eq!(
4628 4 : tline.get(*TEST_KEY, Lsn(0x30), &ctx).await?,
4629 2 : test_img("foo at 0x30")
4630 2 : );
4631 2 : assert_eq!(
4632 4 : tline.get(*TEST_KEY, Lsn(0x40), &ctx).await?,
4633 2 : test_img("foo at 0x40")
4634 2 : );
4635 2 :
4636 2 : Ok(())
4637 2 : }
4638 :
4639 4 : async fn bulk_insert_compact_gc(
4640 4 : tenant: &Tenant,
4641 4 : timeline: &Arc<Timeline>,
4642 4 : ctx: &RequestContext,
4643 4 : lsn: Lsn,
4644 4 : repeat: usize,
4645 4 : key_count: usize,
4646 4 : ) -> anyhow::Result<()> {
4647 4 : let compact = true;
4648 72773 : bulk_insert_maybe_compact_gc(tenant, timeline, ctx, lsn, repeat, key_count, compact).await
4649 4 : }
4650 :
4651 8 : async fn bulk_insert_maybe_compact_gc(
4652 8 : tenant: &Tenant,
4653 8 : timeline: &Arc<Timeline>,
4654 8 : ctx: &RequestContext,
4655 8 : mut lsn: Lsn,
4656 8 : repeat: usize,
4657 8 : key_count: usize,
4658 8 : compact: bool,
4659 8 : ) -> anyhow::Result<()> {
4660 8 : let mut test_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
4661 8 : let mut blknum = 0;
4662 8 :
4663 8 : // Enforce that key range is monotonously increasing
4664 8 : let mut keyspace = KeySpaceAccum::new();
4665 8 :
4666 8 : let cancel = CancellationToken::new();
4667 8 :
4668 8 : for _ in 0..repeat {
4669 400 : for _ in 0..key_count {
4670 4000000 : test_key.field6 = blknum;
4671 4000000 : let mut writer = timeline.writer().await;
4672 4000000 : writer
4673 4000000 : .put(
4674 4000000 : test_key,
4675 4000000 : lsn,
4676 4000000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
4677 4000000 : ctx,
4678 4000000 : )
4679 2600 : .await?;
4680 4000000 : writer.finish_write(lsn);
4681 4000000 : drop(writer);
4682 4000000 :
4683 4000000 : keyspace.add_key(test_key);
4684 4000000 :
4685 4000000 : lsn = Lsn(lsn.0 + 0x10);
4686 4000000 : blknum += 1;
4687 : }
4688 :
4689 400 : timeline.freeze_and_flush().await?;
4690 400 : if compact {
4691 : // this requires timeline to be &Arc<Timeline>
4692 40174 : timeline.compact(&cancel, EnumSet::empty(), ctx).await?;
4693 200 : }
4694 :
4695 : // this doesn't really need to use the timeline_id target, but it is closer to what it
4696 : // originally was.
4697 400 : let res = tenant
4698 400 : .gc_iteration(Some(timeline.timeline_id), 0, Duration::ZERO, &cancel, ctx)
4699 399 : .await?;
4700 :
4701 400 : assert_eq!(res.layers_removed, 0, "this never removes anything");
4702 : }
4703 :
4704 8 : Ok(())
4705 8 : }
4706 :
4707 : //
4708 : // Insert 1000 key-value pairs with increasing keys, flush, compact, GC.
4709 : // Repeat 50 times.
4710 : //
4711 : #[tokio::test]
4712 2 : async fn test_bulk_insert() -> anyhow::Result<()> {
4713 2 : let harness = TenantHarness::create("test_bulk_insert")?;
4714 8 : let (tenant, ctx) = harness.load().await;
4715 2 : let tline = tenant
4716 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx)
4717 5 : .await?;
4718 2 :
4719 2 : let lsn = Lsn(0x10);
4720 36386 : bulk_insert_compact_gc(&tenant, &tline, &ctx, lsn, 50, 10000).await?;
4721 2 :
4722 2 : Ok(())
4723 2 : }
4724 :
4725 : // Test the vectored get real implementation against a simple sequential implementation.
4726 : //
4727 : // The test generates a keyspace by repeatedly flushing the in-memory layer and compacting.
4728 : // Projected to 2D the key space looks like below. Lsn grows upwards on the Y axis and keys
4729 : // grow to the right on the X axis.
4730 : // [Delta]
4731 : // [Delta]
4732 : // [Delta]
4733 : // [Delta]
4734 : // ------------ Image ---------------
4735 : //
4736 : // After layer generation we pick the ranges to query as follows:
4737 : // 1. The beginning of each delta layer
4738 : // 2. At the seam between two adjacent delta layers
4739 : //
4740 : // There's one major downside to this test: delta layers only contains images,
4741 : // so the search can stop at the first delta layer and doesn't traverse any deeper.
4742 : #[tokio::test]
4743 2 : async fn test_get_vectored() -> anyhow::Result<()> {
4744 2 : let harness = TenantHarness::create("test_get_vectored")?;
4745 8 : let (tenant, ctx) = harness.load().await;
4746 2 : let tline = tenant
4747 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx)
4748 5 : .await?;
4749 2 :
4750 2 : let lsn = Lsn(0x10);
4751 36387 : bulk_insert_compact_gc(&tenant, &tline, &ctx, lsn, 50, 10000).await?;
4752 2 :
4753 2 : let guard = tline.layers.read().await;
4754 2 : guard.layer_map().dump(true, &ctx).await?;
4755 2 :
4756 2 : let mut reads = Vec::new();
4757 2 : let mut prev = None;
4758 12 : guard.layer_map().iter_historic_layers().for_each(|desc| {
4759 12 : if !desc.is_delta() {
4760 2 : prev = Some(desc.clone());
4761 2 : return;
4762 10 : }
4763 10 :
4764 10 : let start = desc.key_range.start;
4765 10 : let end = desc
4766 10 : .key_range
4767 10 : .start
4768 10 : .add(Timeline::MAX_GET_VECTORED_KEYS.try_into().unwrap());
4769 10 : reads.push(KeySpace {
4770 10 : ranges: vec![start..end],
4771 10 : });
4772 2 :
4773 10 : if let Some(prev) = &prev {
4774 10 : if !prev.is_delta() {
4775 10 : return;
4776 2 : }
4777 0 :
4778 0 : let first_range = Key {
4779 0 : field6: prev.key_range.end.field6 - 4,
4780 0 : ..prev.key_range.end
4781 0 : }..prev.key_range.end;
4782 0 :
4783 0 : let second_range = desc.key_range.start..Key {
4784 0 : field6: desc.key_range.start.field6 + 4,
4785 0 : ..desc.key_range.start
4786 0 : };
4787 0 :
4788 0 : reads.push(KeySpace {
4789 0 : ranges: vec![first_range, second_range],
4790 0 : });
4791 2 : };
4792 2 :
4793 2 : prev = Some(desc.clone());
4794 12 : });
4795 2 :
4796 2 : drop(guard);
4797 2 :
4798 2 : // Pick a big LSN such that we query over all the changes.
4799 2 : let reads_lsn = Lsn(u64::MAX - 1);
4800 2 :
4801 12 : for read in reads {
4802 10 : info!("Doing vectored read on {:?}", read);
4803 2 :
4804 10 : let vectored_res = tline
4805 10 : .get_vectored_impl(
4806 10 : read.clone(),
4807 10 : reads_lsn,
4808 10 : &mut ValuesReconstructState::new(),
4809 10 : &ctx,
4810 10 : )
4811 25 : .await;
4812 10 : tline
4813 10 : .validate_get_vectored_impl(&vectored_res, read, reads_lsn, &ctx)
4814 19 : .await;
4815 2 : }
4816 2 :
4817 2 : Ok(())
4818 2 : }
4819 :
4820 : #[tokio::test]
4821 2 : async fn test_get_vectored_aux_files() -> anyhow::Result<()> {
4822 2 : let harness = TenantHarness::create("test_get_vectored_aux_files")?;
4823 2 :
4824 8 : let (tenant, ctx) = harness.load().await;
4825 2 : let tline = tenant
4826 2 : .create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION, &ctx)
4827 2 : .await?;
4828 2 : let tline = tline.raw_timeline().unwrap();
4829 2 :
4830 2 : let mut modification = tline.begin_modification(Lsn(0x1000));
4831 2 : modification.put_file("foo/bar1", b"content1", &ctx).await?;
4832 2 : modification.set_lsn(Lsn(0x1008))?;
4833 2 : modification.put_file("foo/bar2", b"content2", &ctx).await?;
4834 2 : modification.commit(&ctx).await?;
4835 2 :
4836 2 : let child_timeline_id = TimelineId::generate();
4837 2 : tenant
4838 2 : .branch_timeline_test(
4839 2 : tline,
4840 2 : child_timeline_id,
4841 2 : Some(tline.get_last_record_lsn()),
4842 2 : &ctx,
4843 2 : )
4844 2 : .await?;
4845 2 :
4846 2 : let child_timeline = tenant
4847 2 : .get_timeline(child_timeline_id, true)
4848 2 : .expect("Should have the branched timeline");
4849 2 :
4850 2 : let aux_keyspace = KeySpace {
4851 2 : ranges: vec![NON_INHERITED_RANGE],
4852 2 : };
4853 2 : let read_lsn = child_timeline.get_last_record_lsn();
4854 2 :
4855 2 : let vectored_res = child_timeline
4856 2 : .get_vectored_impl(
4857 2 : aux_keyspace.clone(),
4858 2 : read_lsn,
4859 2 : &mut ValuesReconstructState::new(),
4860 2 : &ctx,
4861 2 : )
4862 2 : .await;
4863 2 :
4864 2 : child_timeline
4865 2 : .validate_get_vectored_impl(&vectored_res, aux_keyspace, read_lsn, &ctx)
4866 2 : .await;
4867 2 :
4868 2 : let images = vectored_res?;
4869 2 : assert!(images.is_empty());
4870 2 : Ok(())
4871 2 : }
4872 :
4873 : // Test that vectored get handles layer gaps correctly
4874 : // by advancing into the next ancestor timeline if required.
4875 : //
4876 : // The test generates timelines that look like the diagram below.
4877 : // We leave a gap in one of the L1 layers at `gap_at_key` (`/` in the diagram).
4878 : // The reconstruct data for that key lies in the ancestor timeline (`X` in the diagram).
4879 : //
4880 : // ```
4881 : //-------------------------------+
4882 : // ... |
4883 : // [ L1 ] |
4884 : // [ / L1 ] | Child Timeline
4885 : // ... |
4886 : // ------------------------------+
4887 : // [ X L1 ] | Parent Timeline
4888 : // ------------------------------+
4889 : // ```
4890 : #[tokio::test]
4891 2 : async fn test_get_vectored_key_gap() -> anyhow::Result<()> {
4892 2 : let tenant_conf = TenantConf {
4893 2 : // Make compaction deterministic
4894 2 : gc_period: Duration::ZERO,
4895 2 : compaction_period: Duration::ZERO,
4896 2 : // Encourage creation of L1 layers
4897 2 : checkpoint_distance: 16 * 1024,
4898 2 : compaction_target_size: 8 * 1024,
4899 2 : ..TenantConf::default()
4900 2 : };
4901 2 :
4902 2 : let harness = TenantHarness::create_custom(
4903 2 : "test_get_vectored_key_gap",
4904 2 : tenant_conf,
4905 2 : TenantId::generate(),
4906 2 : ShardIdentity::unsharded(),
4907 2 : Generation::new(0xdeadbeef),
4908 2 : )?;
4909 8 : let (tenant, ctx) = harness.load().await;
4910 2 :
4911 2 : let mut current_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
4912 2 : let gap_at_key = current_key.add(100);
4913 2 : let mut current_lsn = Lsn(0x10);
4914 2 :
4915 2 : const KEY_COUNT: usize = 10_000;
4916 2 :
4917 2 : let timeline_id = TimelineId::generate();
4918 2 : let current_timeline = tenant
4919 2 : .create_test_timeline(timeline_id, current_lsn, DEFAULT_PG_VERSION, &ctx)
4920 6 : .await?;
4921 2 :
4922 2 : current_lsn += 0x100;
4923 2 :
4924 2 : let mut writer = current_timeline.writer().await;
4925 2 : writer
4926 2 : .put(
4927 2 : gap_at_key,
4928 2 : current_lsn,
4929 2 : &Value::Image(test_img(&format!("{} at {}", gap_at_key, current_lsn))),
4930 2 : &ctx,
4931 2 : )
4932 2 : .await?;
4933 2 : writer.finish_write(current_lsn);
4934 2 : drop(writer);
4935 2 :
4936 2 : let mut latest_lsns = HashMap::new();
4937 2 : latest_lsns.insert(gap_at_key, current_lsn);
4938 2 :
4939 2 : current_timeline.freeze_and_flush().await?;
4940 2 :
4941 2 : let child_timeline_id = TimelineId::generate();
4942 2 :
4943 2 : tenant
4944 2 : .branch_timeline_test(
4945 2 : ¤t_timeline,
4946 2 : child_timeline_id,
4947 2 : Some(current_lsn),
4948 2 : &ctx,
4949 2 : )
4950 2 : .await?;
4951 2 : let child_timeline = tenant
4952 2 : .get_timeline(child_timeline_id, true)
4953 2 : .expect("Should have the branched timeline");
4954 2 :
4955 20002 : for i in 0..KEY_COUNT {
4956 20000 : if current_key == gap_at_key {
4957 2 : current_key = current_key.next();
4958 2 : continue;
4959 19998 : }
4960 19998 :
4961 19998 : current_lsn += 0x10;
4962 2 :
4963 19998 : let mut writer = child_timeline.writer().await;
4964 19998 : writer
4965 19998 : .put(
4966 19998 : current_key,
4967 19998 : current_lsn,
4968 19998 : &Value::Image(test_img(&format!("{} at {}", current_key, current_lsn))),
4969 19998 : &ctx,
4970 19998 : )
4971 73 : .await?;
4972 19998 : writer.finish_write(current_lsn);
4973 19998 : drop(writer);
4974 19998 :
4975 19998 : latest_lsns.insert(current_key, current_lsn);
4976 19998 : current_key = current_key.next();
4977 19998 :
4978 19998 : // Flush every now and then to encourage layer file creation.
4979 19998 : if i % 500 == 0 {
4980 41 : child_timeline.freeze_and_flush().await?;
4981 19958 : }
4982 2 : }
4983 2 :
4984 3 : child_timeline.freeze_and_flush().await?;
4985 2 : let mut flags = EnumSet::new();
4986 2 : flags.insert(CompactFlags::ForceRepartition);
4987 2 : child_timeline
4988 2 : .compact(&CancellationToken::new(), flags, &ctx)
4989 2184 : .await?;
4990 2 :
4991 2 : let key_near_end = {
4992 2 : let mut tmp = current_key;
4993 2 : tmp.field6 -= 10;
4994 2 : tmp
4995 2 : };
4996 2 :
4997 2 : let key_near_gap = {
4998 2 : let mut tmp = gap_at_key;
4999 2 : tmp.field6 -= 10;
5000 2 : tmp
5001 2 : };
5002 2 :
5003 2 : let read = KeySpace {
5004 2 : ranges: vec![key_near_gap..gap_at_key.next(), key_near_end..current_key],
5005 2 : };
5006 2 : let results = child_timeline
5007 2 : .get_vectored_impl(
5008 2 : read.clone(),
5009 2 : current_lsn,
5010 2 : &mut ValuesReconstructState::new(),
5011 2 : &ctx,
5012 2 : )
5013 15 : .await?;
5014 2 :
5015 44 : for (key, img_res) in results {
5016 42 : let expected = test_img(&format!("{} at {}", key, latest_lsns[&key]));
5017 42 : assert_eq!(img_res?, expected);
5018 2 : }
5019 2 :
5020 2 : Ok(())
5021 2 : }
5022 :
5023 : // Test that vectored get descends into ancestor timelines correctly and
5024 : // does not return an image that's newer than requested.
5025 : //
5026 : // The diagram below ilustrates an interesting case. We have a parent timeline
5027 : // (top of the Lsn range) and a child timeline. The request key cannot be reconstructed
5028 : // from the child timeline, so the parent timeline must be visited. When advacing into
5029 : // the child timeline, the read path needs to remember what the requested Lsn was in
5030 : // order to avoid returning an image that's too new. The test below constructs such
5031 : // a timeline setup and does a few queries around the Lsn of each page image.
5032 : // ```
5033 : // LSN
5034 : // ^
5035 : // |
5036 : // |
5037 : // 500 | --------------------------------------> branch point
5038 : // 400 | X
5039 : // 300 | X
5040 : // 200 | --------------------------------------> requested lsn
5041 : // 100 | X
5042 : // |---------------------------------------> Key
5043 : // |
5044 : // ------> requested key
5045 : //
5046 : // Legend:
5047 : // * X - page images
5048 : // ```
5049 : #[tokio::test]
5050 2 : async fn test_get_vectored_ancestor_descent() -> anyhow::Result<()> {
5051 2 : let harness = TenantHarness::create("test_get_vectored_on_lsn_axis")?;
5052 8 : let (tenant, ctx) = harness.load().await;
5053 2 :
5054 2 : let start_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
5055 2 : let end_key = start_key.add(1000);
5056 2 : let child_gap_at_key = start_key.add(500);
5057 2 : let mut parent_gap_lsns: BTreeMap<Lsn, String> = BTreeMap::new();
5058 2 :
5059 2 : let mut current_lsn = Lsn(0x10);
5060 2 :
5061 2 : let timeline_id = TimelineId::generate();
5062 2 : let parent_timeline = tenant
5063 2 : .create_test_timeline(timeline_id, current_lsn, DEFAULT_PG_VERSION, &ctx)
5064 5 : .await?;
5065 2 :
5066 2 : current_lsn += 0x100;
5067 2 :
5068 8 : for _ in 0..3 {
5069 6 : let mut key = start_key;
5070 6006 : while key < end_key {
5071 6000 : current_lsn += 0x10;
5072 6000 :
5073 6000 : let image_value = format!("{} at {}", child_gap_at_key, current_lsn);
5074 2 :
5075 6000 : let mut writer = parent_timeline.writer().await;
5076 6000 : writer
5077 6000 : .put(
5078 6000 : key,
5079 6000 : current_lsn,
5080 6000 : &Value::Image(test_img(&image_value)),
5081 6000 : &ctx,
5082 6000 : )
5083 6 : .await?;
5084 6000 : writer.finish_write(current_lsn);
5085 6000 :
5086 6000 : if key == child_gap_at_key {
5087 6 : parent_gap_lsns.insert(current_lsn, image_value);
5088 5994 : }
5089 2 :
5090 6000 : key = key.next();
5091 2 : }
5092 2 :
5093 6 : parent_timeline.freeze_and_flush().await?;
5094 2 : }
5095 2 :
5096 2 : let child_timeline_id = TimelineId::generate();
5097 2 :
5098 2 : let child_timeline = tenant
5099 2 : .branch_timeline_test(&parent_timeline, child_timeline_id, Some(current_lsn), &ctx)
5100 2 : .await?;
5101 2 :
5102 2 : let mut key = start_key;
5103 2002 : while key < end_key {
5104 2000 : if key == child_gap_at_key {
5105 2 : key = key.next();
5106 2 : continue;
5107 1998 : }
5108 1998 :
5109 1998 : current_lsn += 0x10;
5110 2 :
5111 1998 : let mut writer = child_timeline.writer().await;
5112 1998 : writer
5113 1998 : .put(
5114 1998 : key,
5115 1998 : current_lsn,
5116 1998 : &Value::Image(test_img(&format!("{} at {}", key, current_lsn))),
5117 1998 : &ctx,
5118 1998 : )
5119 17 : .await?;
5120 1998 : writer.finish_write(current_lsn);
5121 1998 :
5122 1998 : key = key.next();
5123 2 : }
5124 2 :
5125 2 : child_timeline.freeze_and_flush().await?;
5126 2 :
5127 2 : let lsn_offsets: [i64; 5] = [-10, -1, 0, 1, 10];
5128 2 : let mut query_lsns = Vec::new();
5129 6 : for image_lsn in parent_gap_lsns.keys().rev() {
5130 36 : for offset in lsn_offsets {
5131 30 : query_lsns.push(Lsn(image_lsn
5132 30 : .0
5133 30 : .checked_add_signed(offset)
5134 30 : .expect("Shouldn't overflow")));
5135 30 : }
5136 2 : }
5137 2 :
5138 32 : for query_lsn in query_lsns {
5139 30 : let results = child_timeline
5140 30 : .get_vectored_impl(
5141 30 : KeySpace {
5142 30 : ranges: vec![child_gap_at_key..child_gap_at_key.next()],
5143 30 : },
5144 30 : query_lsn,
5145 30 : &mut ValuesReconstructState::new(),
5146 30 : &ctx,
5147 30 : )
5148 29 : .await;
5149 2 :
5150 30 : let expected_item = parent_gap_lsns
5151 30 : .iter()
5152 30 : .rev()
5153 68 : .find(|(lsn, _)| **lsn <= query_lsn);
5154 30 :
5155 30 : info!(
5156 2 : "Doing vectored read at LSN {}. Expecting image to be: {:?}",
5157 2 : query_lsn, expected_item
5158 2 : );
5159 2 :
5160 30 : match expected_item {
5161 26 : Some((_, img_value)) => {
5162 26 : let key_results = results.expect("No vectored get error expected");
5163 26 : let key_result = &key_results[&child_gap_at_key];
5164 26 : let returned_img = key_result
5165 26 : .as_ref()
5166 26 : .expect("No page reconstruct error expected");
5167 26 :
5168 26 : info!(
5169 2 : "Vectored read at LSN {} returned image {}",
5170 0 : query_lsn,
5171 0 : std::str::from_utf8(returned_img)?
5172 2 : );
5173 26 : assert_eq!(*returned_img, test_img(img_value));
5174 2 : }
5175 2 : None => {
5176 4 : assert!(matches!(results, Err(GetVectoredError::MissingKey(_))));
5177 2 : }
5178 2 : }
5179 2 : }
5180 2 :
5181 2 : Ok(())
5182 2 : }
5183 :
5184 : #[tokio::test]
5185 2 : async fn test_random_updates() -> anyhow::Result<()> {
5186 2 : let names_algorithms = [
5187 2 : ("test_random_updates_legacy", CompactionAlgorithm::Legacy),
5188 2 : ("test_random_updates_tiered", CompactionAlgorithm::Tiered),
5189 2 : ];
5190 6 : for (name, algorithm) in names_algorithms {
5191 17825 : test_random_updates_algorithm(name, algorithm).await?;
5192 2 : }
5193 2 : Ok(())
5194 2 : }
5195 :
5196 4 : async fn test_random_updates_algorithm(
5197 4 : name: &'static str,
5198 4 : compaction_algorithm: CompactionAlgorithm,
5199 4 : ) -> anyhow::Result<()> {
5200 4 : let mut harness = TenantHarness::create(name)?;
5201 4 : harness.tenant_conf.compaction_algorithm = CompactionAlgorithmSettings {
5202 4 : kind: compaction_algorithm,
5203 4 : };
5204 16 : let (tenant, ctx) = harness.load().await;
5205 4 : let tline = tenant
5206 4 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
5207 10 : .await?;
5208 :
5209 : const NUM_KEYS: usize = 1000;
5210 4 : let cancel = CancellationToken::new();
5211 4 :
5212 4 : let mut test_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
5213 4 : let mut test_key_end = test_key;
5214 4 : test_key_end.field6 = NUM_KEYS as u32;
5215 4 : tline.add_extra_test_dense_keyspace(KeySpace::single(test_key..test_key_end));
5216 4 :
5217 4 : let mut keyspace = KeySpaceAccum::new();
5218 4 :
5219 4 : // Track when each page was last modified. Used to assert that
5220 4 : // a read sees the latest page version.
5221 4 : let mut updated = [Lsn(0); NUM_KEYS];
5222 4 :
5223 4 : let mut lsn = Lsn(0x10);
5224 : #[allow(clippy::needless_range_loop)]
5225 4004 : for blknum in 0..NUM_KEYS {
5226 4000 : lsn = Lsn(lsn.0 + 0x10);
5227 4000 : test_key.field6 = blknum as u32;
5228 4000 : let mut writer = tline.writer().await;
5229 4000 : writer
5230 4000 : .put(
5231 4000 : test_key,
5232 4000 : lsn,
5233 4000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
5234 4000 : &ctx,
5235 4000 : )
5236 4 : .await?;
5237 4000 : writer.finish_write(lsn);
5238 4000 : updated[blknum] = lsn;
5239 4000 : drop(writer);
5240 4000 :
5241 4000 : keyspace.add_key(test_key);
5242 : }
5243 :
5244 204 : for _ in 0..50 {
5245 200200 : for _ in 0..NUM_KEYS {
5246 200000 : lsn = Lsn(lsn.0 + 0x10);
5247 200000 : let blknum = thread_rng().gen_range(0..NUM_KEYS);
5248 200000 : test_key.field6 = blknum as u32;
5249 200000 : let mut writer = tline.writer().await;
5250 200000 : writer
5251 200000 : .put(
5252 200000 : test_key,
5253 200000 : lsn,
5254 200000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
5255 200000 : &ctx,
5256 200000 : )
5257 198 : .await?;
5258 200000 : writer.finish_write(lsn);
5259 200000 : drop(writer);
5260 200000 : updated[blknum] = lsn;
5261 : }
5262 :
5263 : // Read all the blocks
5264 200000 : for (blknum, last_lsn) in updated.iter().enumerate() {
5265 200000 : test_key.field6 = blknum as u32;
5266 200000 : assert_eq!(
5267 200000 : tline.get(test_key, lsn, &ctx).await?,
5268 200000 : test_img(&format!("{} at {}", blknum, last_lsn))
5269 : );
5270 : }
5271 :
5272 : // Perform a cycle of flush, and GC
5273 202 : tline.freeze_and_flush().await?;
5274 200 : tenant
5275 200 : .gc_iteration(Some(tline.timeline_id), 0, Duration::ZERO, &cancel, &ctx)
5276 175 : .await?;
5277 : }
5278 :
5279 4 : Ok(())
5280 4 : }
5281 :
5282 : #[tokio::test]
5283 2 : async fn test_traverse_branches() -> anyhow::Result<()> {
5284 2 : let (tenant, ctx) = TenantHarness::create("test_traverse_branches")?
5285 2 : .load()
5286 8 : .await;
5287 2 : let mut tline = tenant
5288 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
5289 6 : .await?;
5290 2 :
5291 2 : const NUM_KEYS: usize = 1000;
5292 2 :
5293 2 : let mut test_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
5294 2 :
5295 2 : let mut keyspace = KeySpaceAccum::new();
5296 2 :
5297 2 : let cancel = CancellationToken::new();
5298 2 :
5299 2 : // Track when each page was last modified. Used to assert that
5300 2 : // a read sees the latest page version.
5301 2 : let mut updated = [Lsn(0); NUM_KEYS];
5302 2 :
5303 2 : let mut lsn = Lsn(0x10);
5304 2 : #[allow(clippy::needless_range_loop)]
5305 2002 : for blknum in 0..NUM_KEYS {
5306 2000 : lsn = Lsn(lsn.0 + 0x10);
5307 2000 : test_key.field6 = blknum as u32;
5308 2000 : let mut writer = tline.writer().await;
5309 2000 : writer
5310 2000 : .put(
5311 2000 : test_key,
5312 2000 : lsn,
5313 2000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
5314 2000 : &ctx,
5315 2000 : )
5316 2 : .await?;
5317 2000 : writer.finish_write(lsn);
5318 2000 : updated[blknum] = lsn;
5319 2000 : drop(writer);
5320 2000 :
5321 2000 : keyspace.add_key(test_key);
5322 2 : }
5323 2 :
5324 102 : for _ in 0..50 {
5325 100 : let new_tline_id = TimelineId::generate();
5326 100 : tenant
5327 100 : .branch_timeline_test(&tline, new_tline_id, Some(lsn), &ctx)
5328 2 : .await?;
5329 100 : tline = tenant
5330 100 : .get_timeline(new_tline_id, true)
5331 100 : .expect("Should have the branched timeline");
5332 2 :
5333 100100 : for _ in 0..NUM_KEYS {
5334 100000 : lsn = Lsn(lsn.0 + 0x10);
5335 100000 : let blknum = thread_rng().gen_range(0..NUM_KEYS);
5336 100000 : test_key.field6 = blknum as u32;
5337 100000 : let mut writer = tline.writer().await;
5338 100000 : writer
5339 100000 : .put(
5340 100000 : test_key,
5341 100000 : lsn,
5342 100000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
5343 100000 : &ctx,
5344 100000 : )
5345 822 : .await?;
5346 100000 : println!("updating {} at {}", blknum, lsn);
5347 100000 : writer.finish_write(lsn);
5348 100000 : drop(writer);
5349 100000 : updated[blknum] = lsn;
5350 2 : }
5351 2 :
5352 2 : // Read all the blocks
5353 100000 : for (blknum, last_lsn) in updated.iter().enumerate() {
5354 100000 : test_key.field6 = blknum as u32;
5355 100000 : assert_eq!(
5356 100000 : tline.get(test_key, lsn, &ctx).await?,
5357 100000 : test_img(&format!("{} at {}", blknum, last_lsn))
5358 2 : );
5359 2 : }
5360 2 :
5361 2 : // Perform a cycle of flush, compact, and GC
5362 101 : tline.freeze_and_flush().await?;
5363 13099 : tline.compact(&cancel, EnumSet::empty(), &ctx).await?;
5364 100 : tenant
5365 100 : .gc_iteration(Some(tline.timeline_id), 0, Duration::ZERO, &cancel, &ctx)
5366 81 : .await?;
5367 2 : }
5368 2 :
5369 2 : Ok(())
5370 2 : }
5371 :
5372 : #[tokio::test]
5373 2 : async fn test_traverse_ancestors() -> anyhow::Result<()> {
5374 2 : let (tenant, ctx) = TenantHarness::create("test_traverse_ancestors")?
5375 2 : .load()
5376 8 : .await;
5377 2 : let mut tline = tenant
5378 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
5379 5 : .await?;
5380 2 :
5381 2 : const NUM_KEYS: usize = 100;
5382 2 : const NUM_TLINES: usize = 50;
5383 2 :
5384 2 : let mut test_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
5385 2 : // Track page mutation lsns across different timelines.
5386 2 : let mut updated = [[Lsn(0); NUM_KEYS]; NUM_TLINES];
5387 2 :
5388 2 : let mut lsn = Lsn(0x10);
5389 2 :
5390 2 : #[allow(clippy::needless_range_loop)]
5391 102 : for idx in 0..NUM_TLINES {
5392 100 : let new_tline_id = TimelineId::generate();
5393 100 : tenant
5394 100 : .branch_timeline_test(&tline, new_tline_id, Some(lsn), &ctx)
5395 2 : .await?;
5396 100 : tline = tenant
5397 100 : .get_timeline(new_tline_id, true)
5398 100 : .expect("Should have the branched timeline");
5399 2 :
5400 10100 : for _ in 0..NUM_KEYS {
5401 10000 : lsn = Lsn(lsn.0 + 0x10);
5402 10000 : let blknum = thread_rng().gen_range(0..NUM_KEYS);
5403 10000 : test_key.field6 = blknum as u32;
5404 10000 : let mut writer = tline.writer().await;
5405 10000 : writer
5406 10000 : .put(
5407 10000 : test_key,
5408 10000 : lsn,
5409 10000 : &Value::Image(test_img(&format!("{} {} at {}", idx, blknum, lsn))),
5410 10000 : &ctx,
5411 10000 : )
5412 88 : .await?;
5413 10000 : println!("updating [{}][{}] at {}", idx, blknum, lsn);
5414 10000 : writer.finish_write(lsn);
5415 10000 : drop(writer);
5416 10000 : updated[idx][blknum] = lsn;
5417 2 : }
5418 2 : }
5419 2 :
5420 2 : // Read pages from leaf timeline across all ancestors.
5421 100 : for (idx, lsns) in updated.iter().enumerate() {
5422 10000 : for (blknum, lsn) in lsns.iter().enumerate() {
5423 2 : // Skip empty mutations.
5424 10000 : if lsn.0 == 0 {
5425 3667 : continue;
5426 6333 : }
5427 6333 : println!("checking [{idx}][{blknum}] at {lsn}");
5428 6333 : test_key.field6 = blknum as u32;
5429 6333 : assert_eq!(
5430 6333 : tline.get(test_key, *lsn, &ctx).await?,
5431 6333 : test_img(&format!("{idx} {blknum} at {lsn}"))
5432 2 : );
5433 2 : }
5434 2 : }
5435 2 : Ok(())
5436 2 : }
5437 :
5438 : #[tokio::test]
5439 2 : async fn test_write_at_initdb_lsn_takes_optimization_code_path() -> anyhow::Result<()> {
5440 2 : let (tenant, ctx) = TenantHarness::create("test_empty_test_timeline_is_usable")?
5441 2 : .load()
5442 8 : .await;
5443 2 :
5444 2 : let initdb_lsn = Lsn(0x20);
5445 2 : let utline = tenant
5446 2 : .create_empty_timeline(TIMELINE_ID, initdb_lsn, DEFAULT_PG_VERSION, &ctx)
5447 2 : .await?;
5448 2 : let tline = utline.raw_timeline().unwrap();
5449 2 :
5450 2 : // Spawn flush loop now so that we can set the `expect_initdb_optimization`
5451 2 : tline.maybe_spawn_flush_loop();
5452 2 :
5453 2 : // Make sure the timeline has the minimum set of required keys for operation.
5454 2 : // The only operation you can always do on an empty timeline is to `put` new data.
5455 2 : // Except if you `put` at `initdb_lsn`.
5456 2 : // In that case, there's an optimization to directly create image layers instead of delta layers.
5457 2 : // It uses `repartition()`, which assumes some keys to be present.
5458 2 : // Let's make sure the test timeline can handle that case.
5459 2 : {
5460 2 : let mut state = tline.flush_loop_state.lock().unwrap();
5461 2 : assert_eq!(
5462 2 : timeline::FlushLoopState::Running {
5463 2 : expect_initdb_optimization: false,
5464 2 : initdb_optimization_count: 0,
5465 2 : },
5466 2 : *state
5467 2 : );
5468 2 : *state = timeline::FlushLoopState::Running {
5469 2 : expect_initdb_optimization: true,
5470 2 : initdb_optimization_count: 0,
5471 2 : };
5472 2 : }
5473 2 :
5474 2 : // Make writes at the initdb_lsn. When we flush it below, it should be handled by the optimization.
5475 2 : // As explained above, the optimization requires some keys to be present.
5476 2 : // As per `create_empty_timeline` documentation, use init_empty to set them.
5477 2 : // This is what `create_test_timeline` does, by the way.
5478 2 : let mut modification = tline.begin_modification(initdb_lsn);
5479 2 : modification
5480 2 : .init_empty_test_timeline()
5481 2 : .context("init_empty_test_timeline")?;
5482 2 : modification
5483 2 : .commit(&ctx)
5484 2 : .await
5485 2 : .context("commit init_empty_test_timeline modification")?;
5486 2 :
5487 2 : // Do the flush. The flush code will check the expectations that we set above.
5488 2 : tline.freeze_and_flush().await?;
5489 2 :
5490 2 : // assert freeze_and_flush exercised the initdb optimization
5491 2 : {
5492 2 : let state = tline.flush_loop_state.lock().unwrap();
5493 2 : let timeline::FlushLoopState::Running {
5494 2 : expect_initdb_optimization,
5495 2 : initdb_optimization_count,
5496 2 : } = *state
5497 2 : else {
5498 2 : panic!("unexpected state: {:?}", *state);
5499 2 : };
5500 2 : assert!(expect_initdb_optimization);
5501 2 : assert!(initdb_optimization_count > 0);
5502 2 : }
5503 2 : Ok(())
5504 2 : }
5505 :
5506 : #[tokio::test]
5507 2 : async fn test_create_guard_crash() -> anyhow::Result<()> {
5508 2 : let name = "test_create_guard_crash";
5509 2 : let harness = TenantHarness::create(name)?;
5510 2 : {
5511 8 : let (tenant, ctx) = harness.load().await;
5512 2 : let tline = tenant
5513 2 : .create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION, &ctx)
5514 2 : .await?;
5515 2 : // Leave the timeline ID in [`Tenant::timelines_creating`] to exclude attempting to create it again
5516 2 : let raw_tline = tline.raw_timeline().unwrap();
5517 2 : raw_tline
5518 2 : .shutdown(super::timeline::ShutdownMode::Hard)
5519 2 : .instrument(info_span!("test_shutdown", tenant_id=%raw_tline.tenant_shard_id, shard_id=%raw_tline.tenant_shard_id.shard_slug(), timeline_id=%TIMELINE_ID))
5520 2 : .await;
5521 2 : std::mem::forget(tline);
5522 2 : }
5523 2 :
5524 8 : let (tenant, _) = harness.load().await;
5525 2 : match tenant.get_timeline(TIMELINE_ID, false) {
5526 2 : Ok(_) => panic!("timeline should've been removed during load"),
5527 2 : Err(e) => {
5528 2 : assert_eq!(
5529 2 : e,
5530 2 : GetTimelineError::NotFound {
5531 2 : tenant_id: tenant.tenant_shard_id,
5532 2 : timeline_id: TIMELINE_ID,
5533 2 : }
5534 2 : )
5535 2 : }
5536 2 : }
5537 2 :
5538 2 : assert!(!harness
5539 2 : .conf
5540 2 : .timeline_path(&tenant.tenant_shard_id, &TIMELINE_ID)
5541 2 : .exists());
5542 2 :
5543 2 : Ok(())
5544 2 : }
5545 :
5546 : #[tokio::test]
5547 2 : async fn test_read_at_max_lsn() -> anyhow::Result<()> {
5548 2 : let names_algorithms = [
5549 2 : ("test_read_at_max_lsn_legacy", CompactionAlgorithm::Legacy),
5550 2 : ("test_read_at_max_lsn_tiered", CompactionAlgorithm::Tiered),
5551 2 : ];
5552 6 : for (name, algorithm) in names_algorithms {
5553 32937 : test_read_at_max_lsn_algorithm(name, algorithm).await?;
5554 2 : }
5555 2 : Ok(())
5556 2 : }
5557 :
5558 4 : async fn test_read_at_max_lsn_algorithm(
5559 4 : name: &'static str,
5560 4 : compaction_algorithm: CompactionAlgorithm,
5561 4 : ) -> anyhow::Result<()> {
5562 4 : let mut harness = TenantHarness::create(name)?;
5563 4 : harness.tenant_conf.compaction_algorithm = CompactionAlgorithmSettings {
5564 4 : kind: compaction_algorithm,
5565 4 : };
5566 16 : let (tenant, ctx) = harness.load().await;
5567 4 : let tline = tenant
5568 4 : .create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx)
5569 11 : .await?;
5570 :
5571 4 : let lsn = Lsn(0x10);
5572 4 : let compact = false;
5573 32600 : bulk_insert_maybe_compact_gc(&tenant, &tline, &ctx, lsn, 50, 10000, compact).await?;
5574 :
5575 4 : let test_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
5576 4 : let read_lsn = Lsn(u64::MAX - 1);
5577 :
5578 310 : let result = tline.get(test_key, read_lsn, &ctx).await;
5579 4 : assert!(result.is_ok(), "result is not Ok: {}", result.unwrap_err());
5580 :
5581 4 : Ok(())
5582 4 : }
5583 :
5584 : #[tokio::test]
5585 2 : async fn test_metadata_scan() -> anyhow::Result<()> {
5586 2 : let harness = TenantHarness::create("test_metadata_scan")?;
5587 8 : let (tenant, ctx) = harness.load().await;
5588 2 : let tline = tenant
5589 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
5590 6 : .await?;
5591 2 :
5592 2 : const NUM_KEYS: usize = 1000;
5593 2 : const STEP: usize = 10000; // random update + scan base_key + idx * STEP
5594 2 :
5595 2 : let cancel = CancellationToken::new();
5596 2 :
5597 2 : let mut base_key = Key::from_hex("000000000033333333444444445500000000").unwrap();
5598 2 : base_key.field1 = AUX_KEY_PREFIX;
5599 2 : let mut test_key = base_key;
5600 2 :
5601 2 : // Track when each page was last modified. Used to assert that
5602 2 : // a read sees the latest page version.
5603 2 : let mut updated = [Lsn(0); NUM_KEYS];
5604 2 :
5605 2 : let mut lsn = Lsn(0x10);
5606 2 : #[allow(clippy::needless_range_loop)]
5607 2002 : for blknum in 0..NUM_KEYS {
5608 2000 : lsn = Lsn(lsn.0 + 0x10);
5609 2000 : test_key.field6 = (blknum * STEP) as u32;
5610 2000 : let mut writer = tline.writer().await;
5611 2000 : writer
5612 2000 : .put(
5613 2000 : test_key,
5614 2000 : lsn,
5615 2000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
5616 2000 : &ctx,
5617 2000 : )
5618 2 : .await?;
5619 2000 : writer.finish_write(lsn);
5620 2000 : updated[blknum] = lsn;
5621 2000 : drop(writer);
5622 2 : }
5623 2 :
5624 2 : let keyspace = KeySpace::single(base_key..base_key.add((NUM_KEYS * STEP) as u32));
5625 2 :
5626 24 : for iter in 0..=10 {
5627 2 : // Read all the blocks
5628 22000 : for (blknum, last_lsn) in updated.iter().enumerate() {
5629 22000 : test_key.field6 = (blknum * STEP) as u32;
5630 22000 : assert_eq!(
5631 22000 : tline.get(test_key, lsn, &ctx).await?,
5632 22000 : test_img(&format!("{} at {}", blknum, last_lsn))
5633 2 : );
5634 2 : }
5635 2 :
5636 22 : let mut cnt = 0;
5637 22000 : for (key, value) in tline
5638 22 : .get_vectored_impl(
5639 22 : keyspace.clone(),
5640 22 : lsn,
5641 22 : &mut ValuesReconstructState::default(),
5642 22 : &ctx,
5643 22 : )
5644 5559 : .await?
5645 2 : {
5646 22000 : let blknum = key.field6 as usize;
5647 22000 : let value = value?;
5648 22000 : assert!(blknum % STEP == 0);
5649 22000 : let blknum = blknum / STEP;
5650 22000 : assert_eq!(
5651 22000 : value,
5652 22000 : test_img(&format!("{} at {}", blknum, updated[blknum]))
5653 22000 : );
5654 22000 : cnt += 1;
5655 2 : }
5656 2 :
5657 22 : assert_eq!(cnt, NUM_KEYS);
5658 2 :
5659 22022 : for _ in 0..NUM_KEYS {
5660 22000 : lsn = Lsn(lsn.0 + 0x10);
5661 22000 : let blknum = thread_rng().gen_range(0..NUM_KEYS);
5662 22000 : test_key.field6 = (blknum * STEP) as u32;
5663 22000 : let mut writer = tline.writer().await;
5664 22000 : writer
5665 22000 : .put(
5666 22000 : test_key,
5667 22000 : lsn,
5668 22000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
5669 22000 : &ctx,
5670 22000 : )
5671 126 : .await?;
5672 22000 : writer.finish_write(lsn);
5673 22000 : drop(writer);
5674 22000 : updated[blknum] = lsn;
5675 2 : }
5676 2 :
5677 2 : // Perform two cycles of flush, compact, and GC
5678 66 : for round in 0..2 {
5679 44 : tline.freeze_and_flush().await?;
5680 44 : tline
5681 44 : .compact(
5682 44 : &cancel,
5683 44 : if iter % 5 == 0 && round == 0 {
5684 6 : let mut flags = EnumSet::new();
5685 6 : flags.insert(CompactFlags::ForceImageLayerCreation);
5686 6 : flags.insert(CompactFlags::ForceRepartition);
5687 6 : flags
5688 2 : } else {
5689 38 : EnumSet::empty()
5690 2 : },
5691 44 : &ctx,
5692 2 : )
5693 8922 : .await?;
5694 44 : tenant
5695 44 : .gc_iteration(Some(tline.timeline_id), 0, Duration::ZERO, &cancel, &ctx)
5696 31 : .await?;
5697 2 : }
5698 2 : }
5699 2 :
5700 2 : Ok(())
5701 2 : }
5702 :
5703 : #[tokio::test]
5704 2 : async fn test_metadata_compaction_trigger() -> anyhow::Result<()> {
5705 2 : let harness = TenantHarness::create("test_metadata_compaction_trigger")?;
5706 8 : let (tenant, ctx) = harness.load().await;
5707 2 : let tline = tenant
5708 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
5709 6 : .await?;
5710 2 :
5711 2 : let cancel = CancellationToken::new();
5712 2 :
5713 2 : let mut base_key = Key::from_hex("000000000033333333444444445500000000").unwrap();
5714 2 : base_key.field1 = AUX_KEY_PREFIX;
5715 2 : let test_key = base_key;
5716 2 : let mut lsn = Lsn(0x10);
5717 2 :
5718 42 : for _ in 0..20 {
5719 40 : lsn = Lsn(lsn.0 + 0x10);
5720 40 : let mut writer = tline.writer().await;
5721 40 : writer
5722 40 : .put(
5723 40 : test_key,
5724 40 : lsn,
5725 40 : &Value::Image(test_img(&format!("{} at {}", 0, lsn))),
5726 40 : &ctx,
5727 40 : )
5728 20 : .await?;
5729 40 : writer.finish_write(lsn);
5730 40 : drop(writer);
5731 41 : tline.freeze_and_flush().await?; // force create a delta layer
5732 2 : }
5733 2 :
5734 2 : let before_num_l0_delta_files = tline
5735 2 : .layers
5736 2 : .read()
5737 2 : .await
5738 2 : .layer_map()
5739 2 : .get_level0_deltas()?
5740 2 : .len();
5741 2 :
5742 110 : tline.compact(&cancel, EnumSet::empty(), &ctx).await?;
5743 2 :
5744 2 : let after_num_l0_delta_files = tline
5745 2 : .layers
5746 2 : .read()
5747 2 : .await
5748 2 : .layer_map()
5749 2 : .get_level0_deltas()?
5750 2 : .len();
5751 2 :
5752 2 : assert!(after_num_l0_delta_files < before_num_l0_delta_files, "after_num_l0_delta_files={after_num_l0_delta_files}, before_num_l0_delta_files={before_num_l0_delta_files}");
5753 2 :
5754 2 : assert_eq!(
5755 4 : tline.get(test_key, lsn, &ctx).await?,
5756 2 : test_img(&format!("{} at {}", 0, lsn))
5757 2 : );
5758 2 :
5759 2 : Ok(())
5760 2 : }
5761 :
5762 : #[tokio::test]
5763 2 : async fn test_branch_copies_dirty_aux_file_flag() {
5764 2 : let harness = TenantHarness::create("test_branch_copies_dirty_aux_file_flag").unwrap();
5765 2 :
5766 2 : // the default aux file policy to switch is v1 if not set by the admins
5767 2 : assert_eq!(
5768 2 : harness.tenant_conf.switch_aux_file_policy,
5769 2 : AuxFilePolicy::V1
5770 2 : );
5771 8 : let (tenant, ctx) = harness.load().await;
5772 2 :
5773 2 : let mut lsn = Lsn(0x08);
5774 2 :
5775 2 : let tline: Arc<Timeline> = tenant
5776 2 : .create_test_timeline(TIMELINE_ID, lsn, DEFAULT_PG_VERSION, &ctx)
5777 6 : .await
5778 2 : .unwrap();
5779 2 :
5780 2 : // no aux file is written at this point, so the persistent flag should be unset
5781 2 : assert_eq!(tline.last_aux_file_policy.load(), None);
5782 2 :
5783 2 : {
5784 2 : lsn += 8;
5785 2 : let mut modification = tline.begin_modification(lsn);
5786 2 : modification
5787 2 : .put_file("pg_logical/mappings/test1", b"first", &ctx)
5788 4 : .await
5789 2 : .unwrap();
5790 2 : modification.commit(&ctx).await.unwrap();
5791 2 : }
5792 2 :
5793 2 : // there is no tenant manager to pass the configuration through, so lets mimic it
5794 2 : tenant.set_new_location_config(
5795 2 : AttachedTenantConf::try_from(LocationConf::attached_single(
5796 2 : TenantConfOpt {
5797 2 : switch_aux_file_policy: Some(AuxFilePolicy::V2),
5798 2 : ..Default::default()
5799 2 : },
5800 2 : tenant.generation,
5801 2 : &pageserver_api::models::ShardParameters::default(),
5802 2 : ))
5803 2 : .unwrap(),
5804 2 : );
5805 2 :
5806 2 : assert_eq!(
5807 2 : tline.get_switch_aux_file_policy(),
5808 2 : AuxFilePolicy::V2,
5809 2 : "wanted state has been updated"
5810 2 : );
5811 2 : assert_eq!(
5812 2 : tline.last_aux_file_policy.load(),
5813 2 : Some(AuxFilePolicy::V1),
5814 2 : "aux file is written with switch_aux_file_policy unset (which is v1), so we should keep v1"
5815 2 : );
5816 2 :
5817 2 : // we can read everything from the storage
5818 2 : let files = tline.list_aux_files(lsn, &ctx).await.unwrap();
5819 2 : assert_eq!(
5820 2 : files.get("pg_logical/mappings/test1"),
5821 2 : Some(&bytes::Bytes::from_static(b"first"))
5822 2 : );
5823 2 :
5824 2 : {
5825 2 : lsn += 8;
5826 2 : let mut modification = tline.begin_modification(lsn);
5827 2 : modification
5828 2 : .put_file("pg_logical/mappings/test2", b"second", &ctx)
5829 2 : .await
5830 2 : .unwrap();
5831 2 : modification.commit(&ctx).await.unwrap();
5832 2 : }
5833 2 :
5834 2 : assert_eq!(
5835 2 : tline.last_aux_file_policy.load(),
5836 2 : Some(AuxFilePolicy::V1),
5837 2 : "keep v1 storage format when new files are written"
5838 2 : );
5839 2 :
5840 2 : let files = tline.list_aux_files(lsn, &ctx).await.unwrap();
5841 2 : assert_eq!(
5842 2 : files.get("pg_logical/mappings/test2"),
5843 2 : Some(&bytes::Bytes::from_static(b"second"))
5844 2 : );
5845 2 :
5846 2 : let child = tenant
5847 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(lsn), &ctx)
5848 2 : .await
5849 2 : .unwrap();
5850 2 :
5851 2 : // child copies the last flag even if that is not on remote storage yet
5852 2 : assert_eq!(child.get_switch_aux_file_policy(), AuxFilePolicy::V2);
5853 2 : assert_eq!(child.last_aux_file_policy.load(), Some(AuxFilePolicy::V1));
5854 2 :
5855 2 : let files = child.list_aux_files(lsn, &ctx).await.unwrap();
5856 2 : assert_eq!(files.get("pg_logical/mappings/test1"), None);
5857 2 : assert_eq!(files.get("pg_logical/mappings/test2"), None);
5858 2 :
5859 2 : // even if we crash here without flushing parent timeline with it's new
5860 2 : // last_aux_file_policy we are safe, because child was never meant to access ancestor's
5861 2 : // files. the ancestor can even switch back to V1 because of a migration safely.
5862 2 : }
5863 :
5864 : #[tokio::test]
5865 2 : async fn aux_file_policy_switch() {
5866 2 : let mut harness = TenantHarness::create("aux_file_policy_switch").unwrap();
5867 2 : harness.tenant_conf.switch_aux_file_policy = AuxFilePolicy::CrossValidation; // set to cross-validation mode
5868 8 : let (tenant, ctx) = harness.load().await;
5869 2 :
5870 2 : let mut lsn = Lsn(0x08);
5871 2 :
5872 2 : let tline: Arc<Timeline> = tenant
5873 2 : .create_test_timeline(TIMELINE_ID, lsn, DEFAULT_PG_VERSION, &ctx)
5874 6 : .await
5875 2 : .unwrap();
5876 2 :
5877 2 : assert_eq!(
5878 2 : tline.last_aux_file_policy.load(),
5879 2 : None,
5880 2 : "no aux file is written so it should be unset"
5881 2 : );
5882 2 :
5883 2 : {
5884 2 : lsn += 8;
5885 2 : let mut modification = tline.begin_modification(lsn);
5886 2 : modification
5887 2 : .put_file("pg_logical/mappings/test1", b"first", &ctx)
5888 4 : .await
5889 2 : .unwrap();
5890 2 : modification.commit(&ctx).await.unwrap();
5891 2 : }
5892 2 :
5893 2 : // there is no tenant manager to pass the configuration through, so lets mimic it
5894 2 : tenant.set_new_location_config(
5895 2 : AttachedTenantConf::try_from(LocationConf::attached_single(
5896 2 : TenantConfOpt {
5897 2 : switch_aux_file_policy: Some(AuxFilePolicy::V2),
5898 2 : ..Default::default()
5899 2 : },
5900 2 : tenant.generation,
5901 2 : &pageserver_api::models::ShardParameters::default(),
5902 2 : ))
5903 2 : .unwrap(),
5904 2 : );
5905 2 :
5906 2 : assert_eq!(
5907 2 : tline.get_switch_aux_file_policy(),
5908 2 : AuxFilePolicy::V2,
5909 2 : "wanted state has been updated"
5910 2 : );
5911 2 : assert_eq!(
5912 2 : tline.last_aux_file_policy.load(),
5913 2 : Some(AuxFilePolicy::CrossValidation),
5914 2 : "dirty index_part.json reflected state is yet to be updated"
5915 2 : );
5916 2 :
5917 2 : // we can still read the auxfile v1 before we ingest anything new
5918 2 : let files = tline.list_aux_files(lsn, &ctx).await.unwrap();
5919 2 : assert_eq!(
5920 2 : files.get("pg_logical/mappings/test1"),
5921 2 : Some(&bytes::Bytes::from_static(b"first"))
5922 2 : );
5923 2 :
5924 2 : {
5925 2 : lsn += 8;
5926 2 : let mut modification = tline.begin_modification(lsn);
5927 2 : modification
5928 2 : .put_file("pg_logical/mappings/test2", b"second", &ctx)
5929 2 : .await
5930 2 : .unwrap();
5931 2 : modification.commit(&ctx).await.unwrap();
5932 2 : }
5933 2 :
5934 2 : assert_eq!(
5935 2 : tline.last_aux_file_policy.load(),
5936 2 : Some(AuxFilePolicy::V2),
5937 2 : "ingesting a file should apply the wanted switch state when applicable"
5938 2 : );
5939 2 :
5940 2 : let files = tline.list_aux_files(lsn, &ctx).await.unwrap();
5941 2 : assert_eq!(
5942 2 : files.get("pg_logical/mappings/test1"),
5943 2 : Some(&bytes::Bytes::from_static(b"first")),
5944 2 : "cross validation writes to both v1 and v2 so this should be available in v2"
5945 2 : );
5946 2 : assert_eq!(
5947 2 : files.get("pg_logical/mappings/test2"),
5948 2 : Some(&bytes::Bytes::from_static(b"second"))
5949 2 : );
5950 2 :
5951 2 : // mimic again by trying to flip it from V2 to V1 (not switched to while ingesting a file)
5952 2 : tenant.set_new_location_config(
5953 2 : AttachedTenantConf::try_from(LocationConf::attached_single(
5954 2 : TenantConfOpt {
5955 2 : switch_aux_file_policy: Some(AuxFilePolicy::V1),
5956 2 : ..Default::default()
5957 2 : },
5958 2 : tenant.generation,
5959 2 : &pageserver_api::models::ShardParameters::default(),
5960 2 : ))
5961 2 : .unwrap(),
5962 2 : );
5963 2 :
5964 2 : {
5965 2 : lsn += 8;
5966 2 : let mut modification = tline.begin_modification(lsn);
5967 2 : modification
5968 2 : .put_file("pg_logical/mappings/test2", b"third", &ctx)
5969 2 : .await
5970 2 : .unwrap();
5971 2 : modification.commit(&ctx).await.unwrap();
5972 2 : }
5973 2 :
5974 2 : assert_eq!(
5975 2 : tline.get_switch_aux_file_policy(),
5976 2 : AuxFilePolicy::V1,
5977 2 : "wanted state has been updated again, even if invalid request"
5978 2 : );
5979 2 :
5980 2 : assert_eq!(
5981 2 : tline.last_aux_file_policy.load(),
5982 2 : Some(AuxFilePolicy::V2),
5983 2 : "ingesting a file should apply the wanted switch state when applicable"
5984 2 : );
5985 2 :
5986 2 : let files = tline.list_aux_files(lsn, &ctx).await.unwrap();
5987 2 : assert_eq!(
5988 2 : files.get("pg_logical/mappings/test1"),
5989 2 : Some(&bytes::Bytes::from_static(b"first"))
5990 2 : );
5991 2 : assert_eq!(
5992 2 : files.get("pg_logical/mappings/test2"),
5993 2 : Some(&bytes::Bytes::from_static(b"third"))
5994 2 : );
5995 2 :
5996 2 : // mimic again by trying to flip it from from V1 to V2 (not switched to while ingesting a file)
5997 2 : tenant.set_new_location_config(
5998 2 : AttachedTenantConf::try_from(LocationConf::attached_single(
5999 2 : TenantConfOpt {
6000 2 : switch_aux_file_policy: Some(AuxFilePolicy::V2),
6001 2 : ..Default::default()
6002 2 : },
6003 2 : tenant.generation,
6004 2 : &pageserver_api::models::ShardParameters::default(),
6005 2 : ))
6006 2 : .unwrap(),
6007 2 : );
6008 2 :
6009 2 : {
6010 2 : lsn += 8;
6011 2 : let mut modification = tline.begin_modification(lsn);
6012 2 : modification
6013 2 : .put_file("pg_logical/mappings/test3", b"last", &ctx)
6014 2 : .await
6015 2 : .unwrap();
6016 2 : modification.commit(&ctx).await.unwrap();
6017 2 : }
6018 2 :
6019 2 : assert_eq!(tline.get_switch_aux_file_policy(), AuxFilePolicy::V2);
6020 2 :
6021 2 : assert_eq!(tline.last_aux_file_policy.load(), Some(AuxFilePolicy::V2));
6022 2 :
6023 2 : let files = tline.list_aux_files(lsn, &ctx).await.unwrap();
6024 2 : assert_eq!(
6025 2 : files.get("pg_logical/mappings/test1"),
6026 2 : Some(&bytes::Bytes::from_static(b"first"))
6027 2 : );
6028 2 : assert_eq!(
6029 2 : files.get("pg_logical/mappings/test2"),
6030 2 : Some(&bytes::Bytes::from_static(b"third"))
6031 2 : );
6032 2 : assert_eq!(
6033 2 : files.get("pg_logical/mappings/test3"),
6034 2 : Some(&bytes::Bytes::from_static(b"last"))
6035 2 : );
6036 2 : }
6037 :
6038 : #[tokio::test]
6039 2 : async fn aux_file_policy_force_switch() {
6040 2 : let mut harness = TenantHarness::create("aux_file_policy_force_switch").unwrap();
6041 2 : harness.tenant_conf.switch_aux_file_policy = AuxFilePolicy::V1;
6042 8 : let (tenant, ctx) = harness.load().await;
6043 2 :
6044 2 : let mut lsn = Lsn(0x08);
6045 2 :
6046 2 : let tline: Arc<Timeline> = tenant
6047 2 : .create_test_timeline(TIMELINE_ID, lsn, DEFAULT_PG_VERSION, &ctx)
6048 6 : .await
6049 2 : .unwrap();
6050 2 :
6051 2 : assert_eq!(
6052 2 : tline.last_aux_file_policy.load(),
6053 2 : None,
6054 2 : "no aux file is written so it should be unset"
6055 2 : );
6056 2 :
6057 2 : {
6058 2 : lsn += 8;
6059 2 : let mut modification = tline.begin_modification(lsn);
6060 2 : modification
6061 2 : .put_file("pg_logical/mappings/test1", b"first", &ctx)
6062 4 : .await
6063 2 : .unwrap();
6064 2 : modification.commit(&ctx).await.unwrap();
6065 2 : }
6066 2 :
6067 2 : tline.do_switch_aux_policy(AuxFilePolicy::V2).unwrap();
6068 2 :
6069 2 : assert_eq!(
6070 2 : tline.last_aux_file_policy.load(),
6071 2 : Some(AuxFilePolicy::V2),
6072 2 : "dirty index_part.json reflected state is yet to be updated"
6073 2 : );
6074 2 :
6075 2 : // lose all data from v1
6076 2 : let files = tline.list_aux_files(lsn, &ctx).await.unwrap();
6077 2 : assert_eq!(files.get("pg_logical/mappings/test1"), None);
6078 2 :
6079 2 : {
6080 2 : lsn += 8;
6081 2 : let mut modification = tline.begin_modification(lsn);
6082 2 : modification
6083 2 : .put_file("pg_logical/mappings/test2", b"second", &ctx)
6084 2 : .await
6085 2 : .unwrap();
6086 2 : modification.commit(&ctx).await.unwrap();
6087 2 : }
6088 2 :
6089 2 : // read data ingested in v2
6090 2 : let files = tline.list_aux_files(lsn, &ctx).await.unwrap();
6091 2 : assert_eq!(
6092 2 : files.get("pg_logical/mappings/test2"),
6093 2 : Some(&bytes::Bytes::from_static(b"second"))
6094 2 : );
6095 2 : // lose all data from v1
6096 2 : assert_eq!(files.get("pg_logical/mappings/test1"), None);
6097 2 : }
6098 :
6099 : #[tokio::test]
6100 2 : async fn aux_file_policy_auto_detect() {
6101 2 : let mut harness = TenantHarness::create("aux_file_policy_auto_detect").unwrap();
6102 2 : harness.tenant_conf.switch_aux_file_policy = AuxFilePolicy::V2; // set to cross-validation mode
6103 8 : let (tenant, ctx) = harness.load().await;
6104 2 :
6105 2 : let mut lsn = Lsn(0x08);
6106 2 :
6107 2 : let tline: Arc<Timeline> = tenant
6108 2 : .create_test_timeline(TIMELINE_ID, lsn, DEFAULT_PG_VERSION, &ctx)
6109 6 : .await
6110 2 : .unwrap();
6111 2 :
6112 2 : assert_eq!(
6113 2 : tline.last_aux_file_policy.load(),
6114 2 : None,
6115 2 : "no aux file is written so it should be unset"
6116 2 : );
6117 2 :
6118 2 : {
6119 2 : lsn += 8;
6120 2 : let mut modification = tline.begin_modification(lsn);
6121 2 : let buf = AuxFilesDirectory::ser(&AuxFilesDirectory {
6122 2 : files: vec![(
6123 2 : "test_file".to_string(),
6124 2 : Bytes::copy_from_slice(b"test_file"),
6125 2 : )]
6126 2 : .into_iter()
6127 2 : .collect(),
6128 2 : })
6129 2 : .unwrap();
6130 2 : modification.put_for_test(AUX_FILES_KEY, Value::Image(Bytes::from(buf)));
6131 2 : modification.commit(&ctx).await.unwrap();
6132 2 : }
6133 2 :
6134 2 : {
6135 2 : lsn += 8;
6136 2 : let mut modification = tline.begin_modification(lsn);
6137 2 : modification
6138 2 : .put_file("pg_logical/mappings/test1", b"first", &ctx)
6139 2 : .await
6140 2 : .unwrap();
6141 2 : modification.commit(&ctx).await.unwrap();
6142 2 : }
6143 2 :
6144 2 : assert_eq!(
6145 2 : tline.last_aux_file_policy.load(),
6146 2 : Some(AuxFilePolicy::V1),
6147 2 : "keep using v1 because there are aux files writting with v1"
6148 2 : );
6149 2 :
6150 2 : // we can still read the auxfile v1
6151 2 : let files = tline.list_aux_files(lsn, &ctx).await.unwrap();
6152 2 : assert_eq!(
6153 2 : files.get("pg_logical/mappings/test1"),
6154 2 : Some(&bytes::Bytes::from_static(b"first"))
6155 2 : );
6156 2 : assert_eq!(
6157 2 : files.get("test_file"),
6158 2 : Some(&bytes::Bytes::from_static(b"test_file"))
6159 2 : );
6160 2 : }
6161 :
6162 : #[tokio::test]
6163 2 : async fn test_metadata_image_creation() -> anyhow::Result<()> {
6164 2 : let harness = TenantHarness::create("test_metadata_image_creation")?;
6165 8 : let (tenant, ctx) = harness.load().await;
6166 2 : let tline = tenant
6167 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
6168 6 : .await?;
6169 2 :
6170 2 : const NUM_KEYS: usize = 1000;
6171 2 : const STEP: usize = 10000; // random update + scan base_key + idx * STEP
6172 2 :
6173 2 : let cancel = CancellationToken::new();
6174 2 :
6175 2 : let base_key = Key::from_hex("620000000033333333444444445500000000").unwrap();
6176 2 : assert_eq!(base_key.field1, AUX_KEY_PREFIX); // in case someone accidentally changed the prefix...
6177 2 : let mut test_key = base_key;
6178 2 : let mut lsn = Lsn(0x10);
6179 2 :
6180 8 : async fn scan_with_statistics(
6181 8 : tline: &Timeline,
6182 8 : keyspace: &KeySpace,
6183 8 : lsn: Lsn,
6184 8 : ctx: &RequestContext,
6185 8 : ) -> anyhow::Result<(BTreeMap<Key, Result<Bytes, PageReconstructError>>, usize)> {
6186 8 : let mut reconstruct_state = ValuesReconstructState::default();
6187 8 : let res = tline
6188 8 : .get_vectored_impl(keyspace.clone(), lsn, &mut reconstruct_state, ctx)
6189 1666 : .await?;
6190 8 : Ok((res, reconstruct_state.get_delta_layers_visited() as usize))
6191 8 : }
6192 2 :
6193 2 : #[allow(clippy::needless_range_loop)]
6194 2002 : for blknum in 0..NUM_KEYS {
6195 2000 : lsn = Lsn(lsn.0 + 0x10);
6196 2000 : test_key.field6 = (blknum * STEP) as u32;
6197 2000 : let mut writer = tline.writer().await;
6198 2000 : writer
6199 2000 : .put(
6200 2000 : test_key,
6201 2000 : lsn,
6202 2000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
6203 2000 : &ctx,
6204 2000 : )
6205 2 : .await?;
6206 2000 : writer.finish_write(lsn);
6207 2000 : drop(writer);
6208 2 : }
6209 2 :
6210 2 : let keyspace = KeySpace::single(base_key..base_key.add((NUM_KEYS * STEP) as u32));
6211 2 :
6212 22 : for iter in 1..=10 {
6213 20020 : for _ in 0..NUM_KEYS {
6214 20000 : lsn = Lsn(lsn.0 + 0x10);
6215 20000 : let blknum = thread_rng().gen_range(0..NUM_KEYS);
6216 20000 : test_key.field6 = (blknum * STEP) as u32;
6217 20000 : let mut writer = tline.writer().await;
6218 20000 : writer
6219 20000 : .put(
6220 20000 : test_key,
6221 20000 : lsn,
6222 20000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
6223 20000 : &ctx,
6224 20000 : )
6225 19 : .await?;
6226 20000 : writer.finish_write(lsn);
6227 20000 : drop(writer);
6228 2 : }
6229 2 :
6230 20 : tline.freeze_and_flush().await?;
6231 2 :
6232 20 : if iter % 5 == 0 {
6233 4 : let (_, before_delta_file_accessed) =
6234 1658 : scan_with_statistics(&tline, &keyspace, lsn, &ctx).await?;
6235 4 : tline
6236 4 : .compact(
6237 4 : &cancel,
6238 4 : {
6239 4 : let mut flags = EnumSet::new();
6240 4 : flags.insert(CompactFlags::ForceImageLayerCreation);
6241 4 : flags.insert(CompactFlags::ForceRepartition);
6242 4 : flags
6243 4 : },
6244 4 : &ctx,
6245 4 : )
6246 6509 : .await?;
6247 4 : let (_, after_delta_file_accessed) =
6248 8 : scan_with_statistics(&tline, &keyspace, lsn, &ctx).await?;
6249 4 : assert!(after_delta_file_accessed < before_delta_file_accessed, "after_delta_file_accessed={after_delta_file_accessed}, before_delta_file_accessed={before_delta_file_accessed}");
6250 2 : // Given that we already produced an image layer, there should be no delta layer needed for the scan, but still setting a low threshold there for unforeseen circumstances.
6251 4 : assert!(
6252 4 : after_delta_file_accessed <= 2,
6253 2 : "after_delta_file_accessed={after_delta_file_accessed}"
6254 2 : );
6255 16 : }
6256 2 : }
6257 2 :
6258 2 : Ok(())
6259 2 : }
6260 :
6261 : #[tokio::test]
6262 2 : async fn test_vectored_missing_data_key_reads() -> anyhow::Result<()> {
6263 2 : let harness = TenantHarness::create("test_vectored_missing_data_key_reads")?;
6264 8 : let (tenant, ctx) = harness.load().await;
6265 2 :
6266 2 : let base_key = Key::from_hex("000000000033333333444444445500000000").unwrap();
6267 2 : let base_key_child = Key::from_hex("000000000033333333444444445500000001").unwrap();
6268 2 : let base_key_nonexist = Key::from_hex("000000000033333333444444445500000002").unwrap();
6269 2 :
6270 2 : let tline = tenant
6271 2 : .create_test_timeline_with_layers(
6272 2 : TIMELINE_ID,
6273 2 : Lsn(0x10),
6274 2 : DEFAULT_PG_VERSION,
6275 2 : &ctx,
6276 2 : Vec::new(), // delta layers
6277 2 : vec![(Lsn(0x20), vec![(base_key, test_img("data key 1"))])], // image layers
6278 2 : Lsn(0x20), // it's fine to not advance LSN to 0x30 while using 0x30 to get below because `get_vectored_impl` does not wait for LSN
6279 2 : )
6280 13 : .await?;
6281 2 : tline.add_extra_test_dense_keyspace(KeySpace::single(base_key..(base_key_nonexist.next())));
6282 2 :
6283 2 : let child = tenant
6284 2 : .branch_timeline_test_with_layers(
6285 2 : &tline,
6286 2 : NEW_TIMELINE_ID,
6287 2 : Some(Lsn(0x20)),
6288 2 : &ctx,
6289 2 : Vec::new(), // delta layers
6290 2 : vec![(Lsn(0x30), vec![(base_key_child, test_img("data key 2"))])], // image layers
6291 2 : Lsn(0x30),
6292 2 : )
6293 7 : .await
6294 2 : .unwrap();
6295 2 :
6296 12 : async fn get_vectored_impl_wrapper(
6297 12 : tline: &Arc<Timeline>,
6298 12 : key: Key,
6299 12 : lsn: Lsn,
6300 12 : ctx: &RequestContext,
6301 12 : ) -> Result<Option<Bytes>, GetVectoredError> {
6302 12 : let mut reconstruct_state = ValuesReconstructState::new();
6303 12 : let mut res = tline
6304 12 : .get_vectored_impl(
6305 12 : KeySpace::single(key..key.next()),
6306 12 : lsn,
6307 12 : &mut reconstruct_state,
6308 12 : ctx,
6309 12 : )
6310 12 : .await?;
6311 6 : Ok(res.pop_last().map(|(k, v)| {
6312 6 : assert_eq!(k, key);
6313 6 : v.unwrap()
6314 6 : }))
6315 12 : }
6316 2 :
6317 2 : let lsn = Lsn(0x30);
6318 2 :
6319 2 : // test vectored get on parent timeline
6320 2 : assert_eq!(
6321 4 : get_vectored_impl_wrapper(&tline, base_key, lsn, &ctx).await?,
6322 2 : Some(test_img("data key 1"))
6323 2 : );
6324 2 : assert!(get_vectored_impl_wrapper(&tline, base_key_child, lsn, &ctx)
6325 3 : .await
6326 2 : .unwrap_err()
6327 2 : .is_missing_key_error());
6328 2 : assert!(
6329 2 : get_vectored_impl_wrapper(&tline, base_key_nonexist, lsn, &ctx)
6330 2 : .await
6331 2 : .unwrap_err()
6332 2 : .is_missing_key_error()
6333 2 : );
6334 2 :
6335 2 : // test vectored get on child timeline
6336 2 : assert_eq!(
6337 2 : get_vectored_impl_wrapper(&child, base_key, lsn, &ctx).await?,
6338 2 : Some(test_img("data key 1"))
6339 2 : );
6340 2 : assert_eq!(
6341 4 : get_vectored_impl_wrapper(&child, base_key_child, lsn, &ctx).await?,
6342 2 : Some(test_img("data key 2"))
6343 2 : );
6344 2 : assert!(
6345 2 : get_vectored_impl_wrapper(&child, base_key_nonexist, lsn, &ctx)
6346 2 : .await
6347 2 : .unwrap_err()
6348 2 : .is_missing_key_error()
6349 2 : );
6350 2 :
6351 2 : Ok(())
6352 2 : }
6353 :
6354 : #[tokio::test]
6355 3 : async fn test_vectored_missing_metadata_key_reads() -> anyhow::Result<()> {
6356 3 : let harness = TenantHarness::create("test_vectored_missing_data_key_reads")?;
6357 12 : let (tenant, ctx) = harness.load().await;
6358 3 :
6359 3 : let base_key = Key::from_hex("620000000033333333444444445500000000").unwrap();
6360 3 : let base_key_child = Key::from_hex("620000000033333333444444445500000001").unwrap();
6361 3 : let base_key_nonexist = Key::from_hex("620000000033333333444444445500000002").unwrap();
6362 3 : assert_eq!(base_key.field1, AUX_KEY_PREFIX); // in case someone accidentally changed the prefix...
6363 3 :
6364 3 : let tline = tenant
6365 3 : .create_test_timeline_with_layers(
6366 3 : TIMELINE_ID,
6367 3 : Lsn(0x10),
6368 3 : DEFAULT_PG_VERSION,
6369 3 : &ctx,
6370 3 : Vec::new(), // delta layers
6371 3 : vec![(Lsn(0x20), vec![(base_key, test_img("metadata key 1"))])], // image layers
6372 3 : Lsn(0x20), // it's fine to not advance LSN to 0x30 while using 0x30 to get below because `get_vectored_impl` does not wait for LSN
6373 3 : )
6374 13 : .await?;
6375 3 :
6376 3 : let child = tenant
6377 2 : .branch_timeline_test_with_layers(
6378 2 : &tline,
6379 2 : NEW_TIMELINE_ID,
6380 2 : Some(Lsn(0x20)),
6381 2 : &ctx,
6382 2 : Vec::new(), // delta layers
6383 2 : vec![(
6384 2 : Lsn(0x30),
6385 2 : vec![(base_key_child, test_img("metadata key 2"))],
6386 2 : )], // image layers
6387 2 : Lsn(0x30),
6388 2 : )
6389 7 : .await
6390 3 : .unwrap();
6391 2 :
6392 12 : async fn get_vectored_impl_wrapper(
6393 12 : tline: &Arc<Timeline>,
6394 12 : key: Key,
6395 12 : lsn: Lsn,
6396 12 : ctx: &RequestContext,
6397 12 : ) -> Result<Option<Bytes>, GetVectoredError> {
6398 12 : let mut reconstruct_state = ValuesReconstructState::new();
6399 12 : let mut res = tline
6400 12 : .get_vectored_impl(
6401 12 : KeySpace::single(key..key.next()),
6402 12 : lsn,
6403 12 : &mut reconstruct_state,
6404 12 : ctx,
6405 12 : )
6406 8 : .await?;
6407 12 : Ok(res.pop_last().map(|(k, v)| {
6408 4 : assert_eq!(k, key);
6409 4 : v.unwrap()
6410 12 : }))
6411 12 : }
6412 2 :
6413 2 : let lsn = Lsn(0x30);
6414 3 :
6415 3 : // test vectored get on parent timeline
6416 3 : assert_eq!(
6417 4 : get_vectored_impl_wrapper(&tline, base_key, lsn, &ctx).await?,
6418 3 : Some(test_img("metadata key 1"))
6419 3 : );
6420 3 : assert_eq!(
6421 3 : get_vectored_impl_wrapper(&tline, base_key_child, lsn, &ctx).await?,
6422 3 : None
6423 3 : );
6424 3 : assert_eq!(
6425 3 : get_vectored_impl_wrapper(&tline, base_key_nonexist, lsn, &ctx).await?,
6426 3 : None
6427 3 : );
6428 3 :
6429 3 : // test vectored get on child timeline
6430 3 : assert_eq!(
6431 3 : get_vectored_impl_wrapper(&child, base_key, lsn, &ctx).await?,
6432 3 : None
6433 3 : );
6434 3 : assert_eq!(
6435 4 : get_vectored_impl_wrapper(&child, base_key_child, lsn, &ctx).await?,
6436 3 : Some(test_img("metadata key 2"))
6437 3 : );
6438 3 : assert_eq!(
6439 3 : get_vectored_impl_wrapper(&child, base_key_nonexist, lsn, &ctx).await?,
6440 3 : None
6441 3 : );
6442 3 :
6443 3 : Ok(())
6444 3 : }
6445 :
6446 12 : async fn get_vectored_impl_wrapper(
6447 12 : tline: &Arc<Timeline>,
6448 12 : key: Key,
6449 12 : lsn: Lsn,
6450 12 : ctx: &RequestContext,
6451 12 : ) -> Result<Option<Bytes>, GetVectoredError> {
6452 12 : let mut reconstruct_state = ValuesReconstructState::new();
6453 12 : let mut res = tline
6454 12 : .get_vectored_impl(
6455 12 : KeySpace::single(key..key.next()),
6456 12 : lsn,
6457 12 : &mut reconstruct_state,
6458 12 : ctx,
6459 12 : )
6460 13 : .await?;
6461 12 : Ok(res.pop_last().map(|(k, v)| {
6462 8 : assert_eq!(k, key);
6463 8 : v.unwrap()
6464 12 : }))
6465 12 : }
6466 :
6467 : #[tokio::test]
6468 2 : async fn test_metadata_tombstone_reads() -> anyhow::Result<()> {
6469 2 : let harness = TenantHarness::create("test_metadata_tombstone_reads")?;
6470 8 : let (tenant, ctx) = harness.load().await;
6471 2 : let key0 = Key::from_hex("620000000033333333444444445500000000").unwrap();
6472 2 : let key1 = Key::from_hex("620000000033333333444444445500000001").unwrap();
6473 2 : let key2 = Key::from_hex("620000000033333333444444445500000002").unwrap();
6474 2 : let key3 = Key::from_hex("620000000033333333444444445500000003").unwrap();
6475 2 :
6476 2 : // We emulate the situation that the compaction algorithm creates an image layer that removes the tombstones
6477 2 : // Lsn 0x30 key0, key3, no key1+key2
6478 2 : // Lsn 0x20 key1+key2 tomestones
6479 2 : // Lsn 0x10 key1 in image, key2 in delta
6480 2 : let tline = tenant
6481 2 : .create_test_timeline_with_layers(
6482 2 : TIMELINE_ID,
6483 2 : Lsn(0x10),
6484 2 : DEFAULT_PG_VERSION,
6485 2 : &ctx,
6486 2 : // delta layers
6487 2 : vec![
6488 2 : vec![(key2, Lsn(0x10), Value::Image(test_img("metadata key 2")))],
6489 2 : vec![(key1, Lsn(0x20), Value::Image(Bytes::new()))],
6490 2 : vec![(key2, Lsn(0x20), Value::Image(Bytes::new()))],
6491 2 : ],
6492 2 : // image layers
6493 2 : vec![
6494 2 : (Lsn(0x10), vec![(key1, test_img("metadata key 1"))]),
6495 2 : (
6496 2 : Lsn(0x30),
6497 2 : vec![
6498 2 : (key0, test_img("metadata key 0")),
6499 2 : (key3, test_img("metadata key 3")),
6500 2 : ],
6501 2 : ),
6502 2 : ],
6503 2 : Lsn(0x30),
6504 2 : )
6505 40 : .await?;
6506 2 :
6507 2 : let lsn = Lsn(0x30);
6508 2 : let old_lsn = Lsn(0x20);
6509 2 :
6510 2 : assert_eq!(
6511 4 : get_vectored_impl_wrapper(&tline, key0, lsn, &ctx).await?,
6512 2 : Some(test_img("metadata key 0"))
6513 2 : );
6514 2 : assert_eq!(
6515 2 : get_vectored_impl_wrapper(&tline, key1, lsn, &ctx).await?,
6516 2 : None,
6517 2 : );
6518 2 : assert_eq!(
6519 2 : get_vectored_impl_wrapper(&tline, key2, lsn, &ctx).await?,
6520 2 : None,
6521 2 : );
6522 2 : assert_eq!(
6523 4 : get_vectored_impl_wrapper(&tline, key1, old_lsn, &ctx).await?,
6524 2 : Some(Bytes::new()),
6525 2 : );
6526 2 : assert_eq!(
6527 4 : get_vectored_impl_wrapper(&tline, key2, old_lsn, &ctx).await?,
6528 2 : Some(Bytes::new()),
6529 2 : );
6530 2 : assert_eq!(
6531 2 : get_vectored_impl_wrapper(&tline, key3, lsn, &ctx).await?,
6532 2 : Some(test_img("metadata key 3"))
6533 2 : );
6534 2 :
6535 2 : Ok(())
6536 2 : }
6537 :
6538 : #[tokio::test]
6539 2 : async fn test_metadata_tombstone_image_creation() {
6540 2 : let harness = TenantHarness::create("test_metadata_tombstone_image_creation").unwrap();
6541 8 : let (tenant, ctx) = harness.load().await;
6542 2 :
6543 2 : let key0 = Key::from_hex("620000000033333333444444445500000000").unwrap();
6544 2 : let key1 = Key::from_hex("620000000033333333444444445500000001").unwrap();
6545 2 : let key2 = Key::from_hex("620000000033333333444444445500000002").unwrap();
6546 2 : let key3 = Key::from_hex("620000000033333333444444445500000003").unwrap();
6547 2 :
6548 2 : let tline = tenant
6549 2 : .create_test_timeline_with_layers(
6550 2 : TIMELINE_ID,
6551 2 : Lsn(0x10),
6552 2 : DEFAULT_PG_VERSION,
6553 2 : &ctx,
6554 2 : // delta layers
6555 2 : vec![
6556 2 : vec![(key2, Lsn(0x10), Value::Image(test_img("metadata key 2")))],
6557 2 : vec![(key1, Lsn(0x20), Value::Image(Bytes::new()))],
6558 2 : vec![(key2, Lsn(0x20), Value::Image(Bytes::new()))],
6559 2 : vec![
6560 2 : (key0, Lsn(0x30), Value::Image(test_img("metadata key 0"))),
6561 2 : (key3, Lsn(0x30), Value::Image(test_img("metadata key 3"))),
6562 2 : ],
6563 2 : ],
6564 2 : // image layers
6565 2 : vec![(Lsn(0x10), vec![(key1, test_img("metadata key 1"))])],
6566 2 : Lsn(0x30),
6567 2 : )
6568 37 : .await
6569 2 : .unwrap();
6570 2 :
6571 2 : let cancel = CancellationToken::new();
6572 2 :
6573 2 : tline
6574 2 : .compact(
6575 2 : &cancel,
6576 2 : {
6577 2 : let mut flags = EnumSet::new();
6578 2 : flags.insert(CompactFlags::ForceImageLayerCreation);
6579 2 : flags.insert(CompactFlags::ForceRepartition);
6580 2 : flags
6581 2 : },
6582 2 : &ctx,
6583 2 : )
6584 49 : .await
6585 2 : .unwrap();
6586 2 :
6587 2 : // Image layers are created at last_record_lsn
6588 2 : let images = tline
6589 2 : .inspect_image_layers(Lsn(0x30), &ctx)
6590 8 : .await
6591 2 : .unwrap()
6592 2 : .into_iter()
6593 20 : .filter(|(k, _)| k.is_metadata_key())
6594 2 : .collect::<Vec<_>>();
6595 2 : assert_eq!(images.len(), 2); // the image layer should only contain two existing keys, tombstones should be removed.
6596 2 : }
6597 :
6598 : #[tokio::test]
6599 2 : async fn test_metadata_tombstone_empty_image_creation() {
6600 2 : let harness =
6601 2 : TenantHarness::create("test_metadata_tombstone_empty_image_creation").unwrap();
6602 8 : let (tenant, ctx) = harness.load().await;
6603 2 :
6604 2 : let key1 = Key::from_hex("620000000033333333444444445500000001").unwrap();
6605 2 : let key2 = Key::from_hex("620000000033333333444444445500000002").unwrap();
6606 2 :
6607 2 : let tline = tenant
6608 2 : .create_test_timeline_with_layers(
6609 2 : TIMELINE_ID,
6610 2 : Lsn(0x10),
6611 2 : DEFAULT_PG_VERSION,
6612 2 : &ctx,
6613 2 : // delta layers
6614 2 : vec![
6615 2 : vec![(key2, Lsn(0x10), Value::Image(test_img("metadata key 2")))],
6616 2 : vec![(key1, Lsn(0x20), Value::Image(Bytes::new()))],
6617 2 : vec![(key2, Lsn(0x20), Value::Image(Bytes::new()))],
6618 2 : ],
6619 2 : // image layers
6620 2 : vec![(Lsn(0x10), vec![(key1, test_img("metadata key 1"))])],
6621 2 : Lsn(0x30),
6622 2 : )
6623 31 : .await
6624 2 : .unwrap();
6625 2 :
6626 2 : let cancel = CancellationToken::new();
6627 2 :
6628 2 : tline
6629 2 : .compact(
6630 2 : &cancel,
6631 2 : {
6632 2 : let mut flags = EnumSet::new();
6633 2 : flags.insert(CompactFlags::ForceImageLayerCreation);
6634 2 : flags.insert(CompactFlags::ForceRepartition);
6635 2 : flags
6636 2 : },
6637 2 : &ctx,
6638 2 : )
6639 37 : .await
6640 2 : .unwrap();
6641 2 :
6642 2 : // Image layers are created at last_record_lsn
6643 2 : let images = tline
6644 2 : .inspect_image_layers(Lsn(0x30), &ctx)
6645 4 : .await
6646 2 : .unwrap()
6647 2 : .into_iter()
6648 16 : .filter(|(k, _)| k.is_metadata_key())
6649 2 : .collect::<Vec<_>>();
6650 2 : assert_eq!(images.len(), 0); // the image layer should not contain tombstones, or it is not created
6651 2 : }
6652 :
6653 : #[tokio::test]
6654 2 : async fn test_simple_bottom_most_compaction_images() -> anyhow::Result<()> {
6655 2 : let harness = TenantHarness::create("test_simple_bottom_most_compaction_images")?;
6656 8 : let (tenant, ctx) = harness.load().await;
6657 2 :
6658 104 : fn get_key(id: u32) -> Key {
6659 104 : // using aux key here b/c they are guaranteed to be inside `collect_keyspace`.
6660 104 : let mut key = Key::from_hex("620000000033333333444444445500000000").unwrap();
6661 104 : key.field6 = id;
6662 104 : key
6663 104 : }
6664 2 :
6665 2 : // We create one bottom-most image layer, a delta layer D1 crossing the GC horizon, D2 below the horizon, and D3 above the horizon.
6666 2 : //
6667 2 : // | D1 | | D3 |
6668 2 : // -| |-- gc horizon -----------------
6669 2 : // | | | D2 |
6670 2 : // --------- img layer ------------------
6671 2 : //
6672 2 : // What we should expact from this compaction is:
6673 2 : // | Part of D1 | | D3 |
6674 2 : // --------- img layer with D1+D2 at GC horizon------------------
6675 2 :
6676 2 : // img layer at 0x10
6677 2 : let img_layer = (0..10)
6678 20 : .map(|id| (get_key(id), Bytes::from(format!("value {id}@0x10"))))
6679 2 : .collect_vec();
6680 2 :
6681 2 : let delta1 = vec![
6682 2 : (
6683 2 : get_key(1),
6684 2 : Lsn(0x20),
6685 2 : Value::Image(Bytes::from("value 1@0x20")),
6686 2 : ),
6687 2 : (
6688 2 : get_key(2),
6689 2 : Lsn(0x30),
6690 2 : Value::Image(Bytes::from("value 2@0x30")),
6691 2 : ),
6692 2 : (
6693 2 : get_key(3),
6694 2 : Lsn(0x40),
6695 2 : Value::Image(Bytes::from("value 3@0x40")),
6696 2 : ),
6697 2 : ];
6698 2 : let delta2 = vec![
6699 2 : (
6700 2 : get_key(5),
6701 2 : Lsn(0x20),
6702 2 : Value::Image(Bytes::from("value 5@0x20")),
6703 2 : ),
6704 2 : (
6705 2 : get_key(6),
6706 2 : Lsn(0x20),
6707 2 : Value::Image(Bytes::from("value 6@0x20")),
6708 2 : ),
6709 2 : ];
6710 2 : let delta3 = vec![
6711 2 : (
6712 2 : get_key(8),
6713 2 : Lsn(0x40),
6714 2 : Value::Image(Bytes::from("value 8@0x40")),
6715 2 : ),
6716 2 : (
6717 2 : get_key(9),
6718 2 : Lsn(0x40),
6719 2 : Value::Image(Bytes::from("value 9@0x40")),
6720 2 : ),
6721 2 : ];
6722 2 :
6723 2 : let tline = tenant
6724 2 : .create_test_timeline_with_layers(
6725 2 : TIMELINE_ID,
6726 2 : Lsn(0x10),
6727 2 : DEFAULT_PG_VERSION,
6728 2 : &ctx,
6729 2 : vec![delta1, delta2, delta3], // delta layers
6730 2 : vec![(Lsn(0x10), img_layer)], // image layers
6731 2 : Lsn(0x50),
6732 2 : )
6733 49 : .await?;
6734 2 : {
6735 2 : // Update GC info
6736 2 : let mut guard = tline.gc_info.write().unwrap();
6737 2 : guard.cutoffs.pitr = Lsn(0x30);
6738 2 : guard.cutoffs.horizon = Lsn(0x30);
6739 2 : }
6740 2 :
6741 2 : let expected_result = [
6742 2 : Bytes::from_static(b"value 0@0x10"),
6743 2 : Bytes::from_static(b"value 1@0x20"),
6744 2 : Bytes::from_static(b"value 2@0x30"),
6745 2 : Bytes::from_static(b"value 3@0x40"),
6746 2 : Bytes::from_static(b"value 4@0x10"),
6747 2 : Bytes::from_static(b"value 5@0x20"),
6748 2 : Bytes::from_static(b"value 6@0x20"),
6749 2 : Bytes::from_static(b"value 7@0x10"),
6750 2 : Bytes::from_static(b"value 8@0x40"),
6751 2 : Bytes::from_static(b"value 9@0x40"),
6752 2 : ];
6753 2 :
6754 20 : for (idx, expected) in expected_result.iter().enumerate() {
6755 20 : assert_eq!(
6756 20 : tline
6757 20 : .get(get_key(idx as u32), Lsn(0x50), &ctx)
6758 17 : .await
6759 20 : .unwrap(),
6760 2 : expected
6761 2 : );
6762 2 : }
6763 2 :
6764 2 : let cancel = CancellationToken::new();
6765 52 : tline.compact_with_gc(&cancel, &ctx).await.unwrap();
6766 2 :
6767 20 : for (idx, expected) in expected_result.iter().enumerate() {
6768 20 : assert_eq!(
6769 20 : tline
6770 20 : .get(get_key(idx as u32), Lsn(0x50), &ctx)
6771 8 : .await
6772 20 : .unwrap(),
6773 2 : expected
6774 2 : );
6775 2 : }
6776 2 :
6777 2 : // Check if the image layer at the GC horizon contains exactly what we want
6778 2 : let image_at_gc_horizon = tline
6779 2 : .inspect_image_layers(Lsn(0x30), &ctx)
6780 2 : .await
6781 2 : .unwrap()
6782 2 : .into_iter()
6783 36 : .filter(|(k, _)| k.is_metadata_key())
6784 2 : .collect::<Vec<_>>();
6785 2 :
6786 2 : assert_eq!(image_at_gc_horizon.len(), 10);
6787 2 : let expected_result = [
6788 2 : Bytes::from_static(b"value 0@0x10"),
6789 2 : Bytes::from_static(b"value 1@0x20"),
6790 2 : Bytes::from_static(b"value 2@0x30"),
6791 2 : Bytes::from_static(b"value 3@0x10"),
6792 2 : Bytes::from_static(b"value 4@0x10"),
6793 2 : Bytes::from_static(b"value 5@0x20"),
6794 2 : Bytes::from_static(b"value 6@0x20"),
6795 2 : Bytes::from_static(b"value 7@0x10"),
6796 2 : Bytes::from_static(b"value 8@0x10"),
6797 2 : Bytes::from_static(b"value 9@0x10"),
6798 2 : ];
6799 22 : for idx in 0..10 {
6800 20 : assert_eq!(
6801 20 : image_at_gc_horizon[idx],
6802 20 : (get_key(idx as u32), expected_result[idx].clone())
6803 20 : );
6804 2 : }
6805 2 :
6806 2 : // Check if old layers are removed / new layers have the expected LSN
6807 2 : let mut all_layers = tline.inspect_historic_layers().await.unwrap();
6808 4 : all_layers.sort_by(|k1, k2| {
6809 4 : (
6810 4 : k1.is_delta,
6811 4 : k1.key_range.start,
6812 4 : k1.key_range.end,
6813 4 : k1.lsn_range.start,
6814 4 : k1.lsn_range.end,
6815 4 : )
6816 4 : .cmp(&(
6817 4 : k2.is_delta,
6818 4 : k2.key_range.start,
6819 4 : k2.key_range.end,
6820 4 : k2.lsn_range.start,
6821 4 : k2.lsn_range.end,
6822 4 : ))
6823 4 : });
6824 2 : assert_eq!(
6825 2 : all_layers,
6826 2 : vec![
6827 2 : // Image layer at GC horizon
6828 2 : PersistentLayerKey {
6829 2 : key_range: Key::MIN..get_key(10),
6830 2 : lsn_range: Lsn(0x30)..Lsn(0x31),
6831 2 : is_delta: false
6832 2 : },
6833 2 : // The delta layer that is cut in the middle
6834 2 : PersistentLayerKey {
6835 2 : key_range: get_key(3)..get_key(4),
6836 2 : lsn_range: Lsn(0x30)..Lsn(0x41),
6837 2 : is_delta: true
6838 2 : },
6839 2 : // The delta layer we created and should not be picked for the compaction
6840 2 : PersistentLayerKey {
6841 2 : key_range: get_key(8)..get_key(10),
6842 2 : lsn_range: Lsn(0x40)..Lsn(0x41),
6843 2 : is_delta: true
6844 2 : }
6845 2 : ]
6846 2 : );
6847 2 :
6848 2 : Ok(())
6849 2 : }
6850 :
6851 : #[tokio::test]
6852 2 : async fn test_neon_test_record() -> anyhow::Result<()> {
6853 2 : let harness = TenantHarness::create("test_neon_test_record")?;
6854 8 : let (tenant, ctx) = harness.load().await;
6855 2 :
6856 24 : fn get_key(id: u32) -> Key {
6857 24 : // using aux key here b/c they are guaranteed to be inside `collect_keyspace`.
6858 24 : let mut key = Key::from_hex("620000000033333333444444445500000000").unwrap();
6859 24 : key.field6 = id;
6860 24 : key
6861 24 : }
6862 2 :
6863 2 : let delta1 = vec![
6864 2 : (
6865 2 : get_key(1),
6866 2 : Lsn(0x20),
6867 2 : Value::WalRecord(NeonWalRecord::wal_append(",0x20")),
6868 2 : ),
6869 2 : (
6870 2 : get_key(1),
6871 2 : Lsn(0x30),
6872 2 : Value::WalRecord(NeonWalRecord::wal_append(",0x30")),
6873 2 : ),
6874 2 : (get_key(2), Lsn(0x10), Value::Image("0x10".into())),
6875 2 : (
6876 2 : get_key(2),
6877 2 : Lsn(0x20),
6878 2 : Value::WalRecord(NeonWalRecord::wal_append(",0x20")),
6879 2 : ),
6880 2 : (
6881 2 : get_key(2),
6882 2 : Lsn(0x30),
6883 2 : Value::WalRecord(NeonWalRecord::wal_append(",0x30")),
6884 2 : ),
6885 2 : (get_key(3), Lsn(0x10), Value::Image("0x10".into())),
6886 2 : (
6887 2 : get_key(3),
6888 2 : Lsn(0x20),
6889 2 : Value::WalRecord(NeonWalRecord::wal_clear()),
6890 2 : ),
6891 2 : (get_key(4), Lsn(0x10), Value::Image("0x10".into())),
6892 2 : (
6893 2 : get_key(4),
6894 2 : Lsn(0x20),
6895 2 : Value::WalRecord(NeonWalRecord::wal_init()),
6896 2 : ),
6897 2 : ];
6898 2 : let image1 = vec![(get_key(1), "0x10".into())];
6899 2 :
6900 2 : let tline = tenant
6901 2 : .create_test_timeline_with_layers(
6902 2 : TIMELINE_ID,
6903 2 : Lsn(0x10),
6904 2 : DEFAULT_PG_VERSION,
6905 2 : &ctx,
6906 2 : vec![delta1], // delta layers
6907 2 : vec![(Lsn(0x10), image1)], // image layers
6908 2 : Lsn(0x50),
6909 2 : )
6910 19 : .await?;
6911 2 :
6912 2 : assert_eq!(
6913 8 : tline.get(get_key(1), Lsn(0x50), &ctx).await?,
6914 2 : Bytes::from_static(b"0x10,0x20,0x30")
6915 2 : );
6916 2 : assert_eq!(
6917 2 : tline.get(get_key(2), Lsn(0x50), &ctx).await?,
6918 2 : Bytes::from_static(b"0x10,0x20,0x30")
6919 2 : );
6920 2 :
6921 2 : // Need to remove the limit of "Neon WAL redo requires base image".
6922 2 :
6923 2 : // assert_eq!(tline.get(get_key(3), Lsn(0x50), &ctx).await?, Bytes::new());
6924 2 : // assert_eq!(tline.get(get_key(4), Lsn(0x50), &ctx).await?, Bytes::new());
6925 2 :
6926 2 : Ok(())
6927 2 : }
6928 :
6929 : #[tokio::test]
6930 2 : async fn test_lsn_lease() -> anyhow::Result<()> {
6931 8 : let (tenant, ctx) = TenantHarness::create("test_lsn_lease")?.load().await;
6932 2 : let key = Key::from_hex("010000000033333333444444445500000000").unwrap();
6933 2 :
6934 2 : let end_lsn = Lsn(0x100);
6935 2 : let image_layers = (0x20..=0x90)
6936 2 : .step_by(0x10)
6937 16 : .map(|n| {
6938 16 : (
6939 16 : Lsn(n),
6940 16 : vec![(key, test_img(&format!("data key at {:x}", n)))],
6941 16 : )
6942 16 : })
6943 2 : .collect();
6944 2 :
6945 2 : let timeline = tenant
6946 2 : .create_test_timeline_with_layers(
6947 2 : TIMELINE_ID,
6948 2 : Lsn(0x10),
6949 2 : DEFAULT_PG_VERSION,
6950 2 : &ctx,
6951 2 : Vec::new(),
6952 2 : image_layers,
6953 2 : end_lsn,
6954 2 : )
6955 61 : .await?;
6956 2 :
6957 2 : let leased_lsns = [0x30, 0x50, 0x70];
6958 2 : let mut leases = Vec::new();
6959 6 : let _: anyhow::Result<_> = leased_lsns.iter().try_for_each(|n| {
6960 6 : leases.push(timeline.make_lsn_lease(Lsn(*n), timeline.get_lsn_lease_length(), &ctx)?);
6961 6 : Ok(())
6962 6 : });
6963 2 :
6964 2 : // Renewing with shorter lease should not change the lease.
6965 2 : let updated_lease_0 =
6966 2 : timeline.make_lsn_lease(Lsn(leased_lsns[0]), Duration::from_secs(0), &ctx)?;
6967 2 : assert_eq!(updated_lease_0.valid_until, leases[0].valid_until);
6968 2 :
6969 2 : // Renewing with a long lease should renew lease with later expiration time.
6970 2 : let updated_lease_1 = timeline.make_lsn_lease(
6971 2 : Lsn(leased_lsns[1]),
6972 2 : timeline.get_lsn_lease_length() * 2,
6973 2 : &ctx,
6974 2 : )?;
6975 2 :
6976 2 : assert!(updated_lease_1.valid_until > leases[1].valid_until);
6977 2 :
6978 2 : // Force set disk consistent lsn so we can get the cutoff at `end_lsn`.
6979 2 : info!(
6980 2 : "latest_gc_cutoff_lsn: {}",
6981 0 : *timeline.get_latest_gc_cutoff_lsn()
6982 2 : );
6983 2 : timeline.force_set_disk_consistent_lsn(end_lsn);
6984 2 :
6985 2 : let res = tenant
6986 2 : .gc_iteration(
6987 2 : Some(TIMELINE_ID),
6988 2 : 0,
6989 2 : Duration::ZERO,
6990 2 : &CancellationToken::new(),
6991 2 : &ctx,
6992 2 : )
6993 2 : .await?;
6994 2 :
6995 2 : // Keeping everything <= Lsn(0x80) b/c leases:
6996 2 : // 0/10: initdb layer
6997 2 : // (0/20..=0/70).step_by(0x10): image layers added when creating the timeline.
6998 2 : assert_eq!(res.layers_needed_by_leases, 7);
6999 2 : // Keeping 0/90 b/c it is the latest layer.
7000 2 : assert_eq!(res.layers_not_updated, 1);
7001 2 : // Removed 0/80.
7002 2 : assert_eq!(res.layers_removed, 1);
7003 2 :
7004 2 : // Make lease on a already GC-ed LSN.
7005 2 : // 0/80 does not have a valid lease + is below latest_gc_cutoff
7006 2 : assert!(Lsn(0x80) < *timeline.get_latest_gc_cutoff_lsn());
7007 2 : let res = timeline.make_lsn_lease(Lsn(0x80), timeline.get_lsn_lease_length(), &ctx);
7008 2 : assert!(res.is_err());
7009 2 :
7010 2 : // Should still be able to renew a currently valid lease
7011 2 : // Assumption: original lease to is still valid for 0/50.
7012 2 : let _ =
7013 2 : timeline.make_lsn_lease(Lsn(leased_lsns[1]), timeline.get_lsn_lease_length(), &ctx)?;
7014 2 :
7015 2 : Ok(())
7016 2 : }
7017 :
7018 : #[tokio::test]
7019 2 : async fn test_simple_bottom_most_compaction_deltas() -> anyhow::Result<()> {
7020 2 : let harness = TenantHarness::create("test_simple_bottom_most_compaction_deltas")?;
7021 8 : let (tenant, ctx) = harness.load().await;
7022 2 :
7023 118 : fn get_key(id: u32) -> Key {
7024 118 : // using aux key here b/c they are guaranteed to be inside `collect_keyspace`.
7025 118 : let mut key = Key::from_hex("620000000033333333444444445500000000").unwrap();
7026 118 : key.field6 = id;
7027 118 : key
7028 118 : }
7029 2 :
7030 2 : // We create one bottom-most image layer, a delta layer D1 crossing the GC horizon, D2 below the horizon, and D3 above the horizon.
7031 2 : //
7032 2 : // | D1 | | D3 |
7033 2 : // -| |-- gc horizon -----------------
7034 2 : // | | | D2 |
7035 2 : // --------- img layer ------------------
7036 2 : //
7037 2 : // What we should expact from this compaction is:
7038 2 : // | Part of D1 | | D3 |
7039 2 : // --------- img layer with D1+D2 at GC horizon------------------
7040 2 :
7041 2 : // img layer at 0x10
7042 2 : let img_layer = (0..10)
7043 20 : .map(|id| (get_key(id), Bytes::from(format!("value {id}@0x10"))))
7044 2 : .collect_vec();
7045 2 :
7046 2 : let delta1 = vec![
7047 2 : (
7048 2 : get_key(1),
7049 2 : Lsn(0x20),
7050 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x20")),
7051 2 : ),
7052 2 : (
7053 2 : get_key(2),
7054 2 : Lsn(0x30),
7055 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x30")),
7056 2 : ),
7057 2 : (
7058 2 : get_key(3),
7059 2 : Lsn(0x28),
7060 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x28")),
7061 2 : ),
7062 2 : (
7063 2 : get_key(3),
7064 2 : Lsn(0x30),
7065 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x30")),
7066 2 : ),
7067 2 : (
7068 2 : get_key(3),
7069 2 : Lsn(0x40),
7070 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x40")),
7071 2 : ),
7072 2 : ];
7073 2 : let delta2 = vec![
7074 2 : (
7075 2 : get_key(5),
7076 2 : Lsn(0x20),
7077 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x20")),
7078 2 : ),
7079 2 : (
7080 2 : get_key(6),
7081 2 : Lsn(0x20),
7082 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x20")),
7083 2 : ),
7084 2 : ];
7085 2 : let delta3 = vec![
7086 2 : (
7087 2 : get_key(8),
7088 2 : Lsn(0x40),
7089 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x40")),
7090 2 : ),
7091 2 : (
7092 2 : get_key(9),
7093 2 : Lsn(0x40),
7094 2 : Value::WalRecord(NeonWalRecord::wal_append("@0x40")),
7095 2 : ),
7096 2 : ];
7097 2 :
7098 2 : let tline = tenant
7099 2 : .create_test_timeline_with_layers(
7100 2 : TIMELINE_ID,
7101 2 : Lsn(0x10),
7102 2 : DEFAULT_PG_VERSION,
7103 2 : &ctx,
7104 2 : vec![delta1, delta2, delta3], // delta layers
7105 2 : vec![(Lsn(0x10), img_layer)], // image layers
7106 2 : Lsn(0x50),
7107 2 : )
7108 48 : .await?;
7109 2 : {
7110 2 : // Update GC info
7111 2 : let mut guard = tline.gc_info.write().unwrap();
7112 2 : *guard = GcInfo {
7113 2 : retain_lsns: vec![],
7114 2 : cutoffs: GcCutoffs {
7115 2 : pitr: Lsn(0x30),
7116 2 : horizon: Lsn(0x30),
7117 2 : },
7118 2 : leases: Default::default(),
7119 2 : };
7120 2 : }
7121 2 :
7122 2 : let expected_result = [
7123 2 : Bytes::from_static(b"value 0@0x10"),
7124 2 : Bytes::from_static(b"value 1@0x10@0x20"),
7125 2 : Bytes::from_static(b"value 2@0x10@0x30"),
7126 2 : Bytes::from_static(b"value 3@0x10@0x28@0x30@0x40"),
7127 2 : Bytes::from_static(b"value 4@0x10"),
7128 2 : Bytes::from_static(b"value 5@0x10@0x20"),
7129 2 : Bytes::from_static(b"value 6@0x10@0x20"),
7130 2 : Bytes::from_static(b"value 7@0x10"),
7131 2 : Bytes::from_static(b"value 8@0x10@0x40"),
7132 2 : Bytes::from_static(b"value 9@0x10@0x40"),
7133 2 : ];
7134 2 :
7135 2 : let expected_result_at_gc_horizon = [
7136 2 : Bytes::from_static(b"value 0@0x10"),
7137 2 : Bytes::from_static(b"value 1@0x10@0x20"),
7138 2 : Bytes::from_static(b"value 2@0x10@0x30"),
7139 2 : Bytes::from_static(b"value 3@0x10@0x28@0x30"),
7140 2 : Bytes::from_static(b"value 4@0x10"),
7141 2 : Bytes::from_static(b"value 5@0x10@0x20"),
7142 2 : Bytes::from_static(b"value 6@0x10@0x20"),
7143 2 : Bytes::from_static(b"value 7@0x10"),
7144 2 : Bytes::from_static(b"value 8@0x10"),
7145 2 : Bytes::from_static(b"value 9@0x10"),
7146 2 : ];
7147 2 :
7148 22 : for idx in 0..10 {
7149 20 : assert_eq!(
7150 20 : tline
7151 20 : .get(get_key(idx as u32), Lsn(0x50), &ctx)
7152 17 : .await
7153 20 : .unwrap(),
7154 20 : &expected_result[idx]
7155 2 : );
7156 20 : assert_eq!(
7157 20 : tline
7158 20 : .get(get_key(idx as u32), Lsn(0x30), &ctx)
7159 2 : .await
7160 20 : .unwrap(),
7161 20 : &expected_result_at_gc_horizon[idx]
7162 2 : );
7163 2 : }
7164 2 :
7165 2 : let cancel = CancellationToken::new();
7166 52 : tline.compact_with_gc(&cancel, &ctx).await.unwrap();
7167 2 :
7168 22 : for idx in 0..10 {
7169 20 : assert_eq!(
7170 20 : tline
7171 20 : .get(get_key(idx as u32), Lsn(0x50), &ctx)
7172 8 : .await
7173 20 : .unwrap(),
7174 20 : &expected_result[idx]
7175 2 : );
7176 20 : assert_eq!(
7177 20 : tline
7178 20 : .get(get_key(idx as u32), Lsn(0x30), &ctx)
7179 2 : .await
7180 20 : .unwrap(),
7181 20 : &expected_result_at_gc_horizon[idx]
7182 2 : );
7183 2 : }
7184 2 :
7185 2 : Ok(())
7186 2 : }
7187 : }
|