Line data Source code
1 : //!
2 : //! Timeline repository implementation that keeps old data in files on disk, and
3 : //! the recent changes in memory. See tenant/*_layer.rs files.
4 : //! The functions here are responsible for locating the correct layer for the
5 : //! get/put call, walking back the timeline branching history as needed.
6 : //!
7 : //! The files are stored in the .neon/tenants/<tenant_id>/timelines/<timeline_id>
8 : //! directory. See docs/pageserver-storage.md for how the files are managed.
9 : //! In addition to the layer files, there is a metadata file in the same
10 : //! directory that contains information about the timeline, in particular its
11 : //! parent timeline, and the last LSN that has been written to disk.
12 : //!
13 :
14 : use anyhow::{bail, Context};
15 : use arc_swap::ArcSwap;
16 : use camino::Utf8Path;
17 : use camino::Utf8PathBuf;
18 : use enumset::EnumSet;
19 : use futures::stream::FuturesUnordered;
20 : use futures::FutureExt;
21 : use futures::StreamExt;
22 : use pageserver_api::models;
23 : use pageserver_api::models::AuxFilePolicy;
24 : use pageserver_api::models::TimelineState;
25 : use pageserver_api::models::TopTenantShardItem;
26 : use pageserver_api::models::WalRedoManagerStatus;
27 : use pageserver_api::shard::ShardIdentity;
28 : use pageserver_api::shard::ShardStripeSize;
29 : use pageserver_api::shard::TenantShardId;
30 : use remote_storage::DownloadError;
31 : use remote_storage::GenericRemoteStorage;
32 : use remote_storage::TimeoutOrCancel;
33 : use std::fmt;
34 : use storage_broker::BrokerClientChannel;
35 : use tokio::io::BufReader;
36 : use tokio::sync::watch;
37 : use tokio::task::JoinSet;
38 : use tokio_util::sync::CancellationToken;
39 : use tracing::*;
40 : use utils::backoff;
41 : use utils::completion;
42 : use utils::crashsafe::path_with_suffix_extension;
43 : use utils::failpoint_support;
44 : use utils::fs_ext;
45 : use utils::sync::gate::Gate;
46 : use utils::sync::gate::GateGuard;
47 : use utils::timeout::timeout_cancellable;
48 : use utils::timeout::TimeoutCancellableError;
49 : use utils::zstd::create_zst_tarball;
50 : use utils::zstd::extract_zst_tarball;
51 :
52 : use self::config::AttachedLocationConfig;
53 : use self::config::AttachmentMode;
54 : use self::config::LocationConf;
55 : use self::config::TenantConf;
56 : use self::delete::DeleteTenantFlow;
57 : use self::metadata::TimelineMetadata;
58 : use self::mgr::GetActiveTenantError;
59 : use self::mgr::GetTenantError;
60 : use self::mgr::TenantsMap;
61 : use self::remote_timeline_client::upload::upload_index_part;
62 : use self::remote_timeline_client::RemoteTimelineClient;
63 : use self::timeline::uninit::TimelineCreateGuard;
64 : use self::timeline::uninit::TimelineExclusionError;
65 : use self::timeline::uninit::UninitializedTimeline;
66 : use self::timeline::EvictionTaskTenantState;
67 : use self::timeline::TimelineResources;
68 : use self::timeline::WaitLsnError;
69 : use self::timeline::{GcCutoffs, GcInfo};
70 : use crate::config::PageServerConf;
71 : use crate::context::{DownloadBehavior, RequestContext};
72 : use crate::deletion_queue::DeletionQueueClient;
73 : use crate::deletion_queue::DeletionQueueError;
74 : use crate::import_datadir;
75 : use crate::is_uninit_mark;
76 : use crate::metrics::TENANT;
77 : use crate::metrics::{
78 : remove_tenant_metrics, BROKEN_TENANTS_SET, TENANT_STATE_METRIC, TENANT_SYNTHETIC_SIZE_METRIC,
79 : };
80 : use crate::repository::GcResult;
81 : use crate::task_mgr;
82 : use crate::task_mgr::TaskKind;
83 : use crate::tenant::config::LocationMode;
84 : use crate::tenant::config::TenantConfOpt;
85 : pub use crate::tenant::remote_timeline_client::index::IndexPart;
86 : use crate::tenant::remote_timeline_client::remote_initdb_archive_path;
87 : use crate::tenant::remote_timeline_client::MaybeDeletedIndexPart;
88 : use crate::tenant::remote_timeline_client::INITDB_PATH;
89 : use crate::tenant::storage_layer::DeltaLayer;
90 : use crate::tenant::storage_layer::ImageLayer;
91 : use crate::InitializationOrder;
92 : use std::collections::hash_map::Entry;
93 : use std::collections::BTreeSet;
94 : use std::collections::HashMap;
95 : use std::collections::HashSet;
96 : use std::fmt::Debug;
97 : use std::fmt::Display;
98 : use std::fs;
99 : use std::fs::File;
100 : use std::ops::Bound::Included;
101 : use std::sync::atomic::AtomicU64;
102 : use std::sync::atomic::Ordering;
103 : use std::sync::Arc;
104 : use std::sync::Mutex;
105 : use std::time::{Duration, Instant};
106 :
107 : use crate::span;
108 : use crate::tenant::timeline::delete::DeleteTimelineFlow;
109 : use crate::tenant::timeline::uninit::cleanup_timeline_directory;
110 : use crate::virtual_file::VirtualFile;
111 : use crate::walredo::PostgresRedoManager;
112 : use crate::TEMP_FILE_SUFFIX;
113 : use once_cell::sync::Lazy;
114 : pub use pageserver_api::models::TenantState;
115 : use tokio::sync::Semaphore;
116 :
117 0 : static INIT_DB_SEMAPHORE: Lazy<Semaphore> = Lazy::new(|| Semaphore::new(8));
118 : use utils::{
119 : crashsafe,
120 : generation::Generation,
121 : id::TimelineId,
122 : lsn::{Lsn, RecordLsn},
123 : };
124 :
125 : /// Declare a failpoint that can use the `pause` failpoint action.
126 : /// We don't want to block the executor thread, hence, spawn_blocking + await.
127 : macro_rules! pausable_failpoint {
128 : ($name:literal) => {
129 : if cfg!(feature = "testing") {
130 : tokio::task::spawn_blocking({
131 : let current = tracing::Span::current();
132 5463 : move || {
133 5463 : let _entered = current.entered();
134 5463 : tracing::info!("at failpoint {}", $name);
135 : fail::fail_point!($name);
136 5463 : }
137 : })
138 : .await
139 : .expect("spawn_blocking");
140 : }
141 : };
142 : ($name:literal, $cond:expr) => {
143 : if cfg!(feature = "testing") {
144 : if $cond {
145 : pausable_failpoint!($name)
146 : }
147 : }
148 : };
149 : }
150 :
151 : pub mod blob_io;
152 : pub mod block_io;
153 : pub mod vectored_blob_io;
154 :
155 : pub mod disk_btree;
156 : pub(crate) mod ephemeral_file;
157 : pub mod layer_map;
158 :
159 : pub mod metadata;
160 : pub mod remote_timeline_client;
161 : pub mod storage_layer;
162 :
163 : pub mod config;
164 : pub mod delete;
165 : pub mod mgr;
166 : pub mod secondary;
167 : pub mod tasks;
168 : pub mod upload_queue;
169 :
170 : pub(crate) mod timeline;
171 :
172 : pub mod size;
173 :
174 : pub(crate) mod throttle;
175 :
176 : pub(crate) use crate::span::debug_assert_current_span_has_tenant_and_timeline_id;
177 : pub(crate) use timeline::{LogicalSizeCalculationCause, PageReconstructError, Timeline};
178 :
179 : // re-export for use in walreceiver
180 : pub use crate::tenant::timeline::WalReceiverInfo;
181 :
182 : /// The "tenants" part of `tenants/<tenant>/timelines...`
183 : pub const TENANTS_SEGMENT_NAME: &str = "tenants";
184 :
185 : /// Parts of the `.neon/tenants/<tenant_id>/timelines/<timeline_id>` directory prefix.
186 : pub const TIMELINES_SEGMENT_NAME: &str = "timelines";
187 :
188 : pub const TENANT_DELETED_MARKER_FILE_NAME: &str = "deleted";
189 :
190 : /// References to shared objects that are passed into each tenant, such
191 : /// as the shared remote storage client and process initialization state.
192 : #[derive(Clone)]
193 : pub struct TenantSharedResources {
194 : pub broker_client: storage_broker::BrokerClientChannel,
195 : pub remote_storage: GenericRemoteStorage,
196 : pub deletion_queue_client: DeletionQueueClient,
197 : }
198 :
199 : /// A [`Tenant`] is really an _attached_ tenant. The configuration
200 : /// for an attached tenant is a subset of the [`LocationConf`], represented
201 : /// in this struct.
202 : pub(super) struct AttachedTenantConf {
203 : tenant_conf: TenantConfOpt,
204 : location: AttachedLocationConfig,
205 : }
206 :
207 : impl AttachedTenantConf {
208 0 : fn new(tenant_conf: TenantConfOpt, location: AttachedLocationConfig) -> Self {
209 0 : Self {
210 0 : tenant_conf,
211 0 : location,
212 0 : }
213 0 : }
214 :
215 138 : fn try_from(location_conf: LocationConf) -> anyhow::Result<Self> {
216 138 : match &location_conf.mode {
217 138 : LocationMode::Attached(attach_conf) => Ok(Self {
218 138 : tenant_conf: location_conf.tenant_conf,
219 138 : location: *attach_conf,
220 138 : }),
221 : LocationMode::Secondary(_) => {
222 0 : anyhow::bail!("Attempted to construct AttachedTenantConf from a LocationConf in secondary mode")
223 : }
224 : }
225 138 : }
226 : }
227 : struct TimelinePreload {
228 : timeline_id: TimelineId,
229 : client: RemoteTimelineClient,
230 : index_part: Result<MaybeDeletedIndexPart, DownloadError>,
231 : }
232 :
233 : pub(crate) struct TenantPreload {
234 : deleting: bool,
235 : timelines: HashMap<TimelineId, TimelinePreload>,
236 : }
237 :
238 : /// When we spawn a tenant, there is a special mode for tenant creation that
239 : /// avoids trying to read anything from remote storage.
240 : pub(crate) enum SpawnMode {
241 : /// Activate as soon as possible
242 : Eager,
243 : /// Lazy activation in the background, with the option to skip the queue if the need comes up
244 : Lazy,
245 : /// Tenant has been created during the lifetime of this process
246 : Create,
247 : }
248 :
249 : ///
250 : /// Tenant consists of multiple timelines. Keep them in a hash table.
251 : ///
252 : pub struct Tenant {
253 : // Global pageserver config parameters
254 : pub conf: &'static PageServerConf,
255 :
256 : /// The value creation timestamp, used to measure activation delay, see:
257 : /// <https://github.com/neondatabase/neon/issues/4025>
258 : constructed_at: Instant,
259 :
260 : state: watch::Sender<TenantState>,
261 :
262 : // Overridden tenant-specific config parameters.
263 : // We keep TenantConfOpt sturct here to preserve the information
264 : // about parameters that are not set.
265 : // This is necessary to allow global config updates.
266 : tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
267 :
268 : tenant_shard_id: TenantShardId,
269 :
270 : // The detailed sharding information, beyond the number/count in tenant_shard_id
271 : shard_identity: ShardIdentity,
272 :
273 : /// The remote storage generation, used to protect S3 objects from split-brain.
274 : /// Does not change over the lifetime of the [`Tenant`] object.
275 : ///
276 : /// This duplicates the generation stored in LocationConf, but that structure is mutable:
277 : /// this copy enforces the invariant that generatio doesn't change during a Tenant's lifetime.
278 : generation: Generation,
279 :
280 : timelines: Mutex<HashMap<TimelineId, Arc<Timeline>>>,
281 :
282 : /// During timeline creation, we first insert the TimelineId to the
283 : /// creating map, then `timelines`, then remove it from the creating map.
284 : /// **Lock order**: if acquring both, acquire`timelines` before `timelines_creating`
285 : timelines_creating: std::sync::Mutex<HashSet<TimelineId>>,
286 :
287 : // This mutex prevents creation of new timelines during GC.
288 : // Adding yet another mutex (in addition to `timelines`) is needed because holding
289 : // `timelines` mutex during all GC iteration
290 : // may block for a long time `get_timeline`, `get_timelines_state`,... and other operations
291 : // with timelines, which in turn may cause dropping replication connection, expiration of wait_for_lsn
292 : // timeout...
293 : gc_cs: tokio::sync::Mutex<()>,
294 : walredo_mgr: Option<Arc<WalRedoManager>>,
295 :
296 : // provides access to timeline data sitting in the remote storage
297 : pub(crate) remote_storage: GenericRemoteStorage,
298 :
299 : // Access to global deletion queue for when this tenant wants to schedule a deletion
300 : deletion_queue_client: DeletionQueueClient,
301 :
302 : /// Cached logical sizes updated updated on each [`Tenant::gather_size_inputs`].
303 : cached_logical_sizes: tokio::sync::Mutex<HashMap<(TimelineId, Lsn), u64>>,
304 : cached_synthetic_tenant_size: Arc<AtomicU64>,
305 :
306 : eviction_task_tenant_state: tokio::sync::Mutex<EvictionTaskTenantState>,
307 :
308 : /// If the tenant is in Activating state, notify this to encourage it
309 : /// to proceed to Active as soon as possible, rather than waiting for lazy
310 : /// background warmup.
311 : pub(crate) activate_now_sem: tokio::sync::Semaphore,
312 :
313 : pub(crate) delete_progress: Arc<tokio::sync::Mutex<DeleteTenantFlow>>,
314 :
315 : // Cancellation token fires when we have entered shutdown(). This is a parent of
316 : // Timelines' cancellation token.
317 : pub(crate) cancel: CancellationToken,
318 :
319 : // Users of the Tenant such as the page service must take this Gate to avoid
320 : // trying to use a Tenant which is shutting down.
321 : pub(crate) gate: Gate,
322 :
323 : /// Throttle applied at the top of [`Timeline::get`].
324 : /// All [`Tenant::timelines`] of a given [`Tenant`] instance share the same [`throttle::Throttle`] instance.
325 : pub(crate) timeline_get_throttle:
326 : Arc<throttle::Throttle<&'static crate::metrics::tenant_throttling::TimelineGet>>,
327 :
328 : /// An ongoing timeline detach must be checked during attempts to GC or compact a timeline.
329 : ongoing_timeline_detach: std::sync::Mutex<Option<(TimelineId, utils::completion::Barrier)>>,
330 : }
331 :
332 : impl std::fmt::Debug for Tenant {
333 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
334 0 : write!(f, "{} ({})", self.tenant_shard_id, self.current_state())
335 0 : }
336 : }
337 :
338 : pub(crate) enum WalRedoManager {
339 : Prod(PostgresRedoManager),
340 : #[cfg(test)]
341 : Test(harness::TestRedoManager),
342 : }
343 :
344 : impl From<PostgresRedoManager> for WalRedoManager {
345 0 : fn from(mgr: PostgresRedoManager) -> Self {
346 0 : Self::Prod(mgr)
347 0 : }
348 : }
349 :
350 : #[cfg(test)]
351 : impl From<harness::TestRedoManager> for WalRedoManager {
352 130 : fn from(mgr: harness::TestRedoManager) -> Self {
353 130 : Self::Test(mgr)
354 130 : }
355 : }
356 :
357 : impl WalRedoManager {
358 0 : pub(crate) fn maybe_quiesce(&self, idle_timeout: Duration) {
359 0 : match self {
360 0 : Self::Prod(mgr) => mgr.maybe_quiesce(idle_timeout),
361 0 : #[cfg(test)]
362 0 : Self::Test(_) => {
363 0 : // Not applicable to test redo manager
364 0 : }
365 0 : }
366 0 : }
367 :
368 : /// # Cancel-Safety
369 : ///
370 : /// This method is cancellation-safe.
371 12 : pub async fn request_redo(
372 12 : &self,
373 12 : key: crate::repository::Key,
374 12 : lsn: Lsn,
375 12 : base_img: Option<(Lsn, bytes::Bytes)>,
376 12 : records: Vec<(Lsn, crate::walrecord::NeonWalRecord)>,
377 12 : pg_version: u32,
378 12 : ) -> anyhow::Result<bytes::Bytes> {
379 12 : match self {
380 0 : Self::Prod(mgr) => {
381 0 : mgr.request_redo(key, lsn, base_img, records, pg_version)
382 0 : .await
383 : }
384 : #[cfg(test)]
385 12 : Self::Test(mgr) => {
386 12 : mgr.request_redo(key, lsn, base_img, records, pg_version)
387 0 : .await
388 : }
389 : }
390 12 : }
391 :
392 0 : pub(crate) fn status(&self) -> Option<WalRedoManagerStatus> {
393 0 : match self {
394 0 : WalRedoManager::Prod(m) => Some(m.status()),
395 0 : #[cfg(test)]
396 0 : WalRedoManager::Test(_) => None,
397 0 : }
398 0 : }
399 : }
400 :
401 0 : #[derive(Debug, thiserror::Error, PartialEq, Eq)]
402 : pub enum GetTimelineError {
403 : #[error("Timeline {tenant_id}/{timeline_id} is not active, state: {state:?}")]
404 : NotActive {
405 : tenant_id: TenantShardId,
406 : timeline_id: TimelineId,
407 : state: TimelineState,
408 : },
409 : #[error("Timeline {tenant_id}/{timeline_id} was not found")]
410 : NotFound {
411 : tenant_id: TenantShardId,
412 : timeline_id: TimelineId,
413 : },
414 : }
415 :
416 0 : #[derive(Debug, thiserror::Error)]
417 : pub enum LoadLocalTimelineError {
418 : #[error("FailedToLoad")]
419 : Load(#[source] anyhow::Error),
420 : #[error("FailedToResumeDeletion")]
421 : ResumeDeletion(#[source] anyhow::Error),
422 : }
423 :
424 0 : #[derive(thiserror::Error)]
425 : pub enum DeleteTimelineError {
426 : #[error("NotFound")]
427 : NotFound,
428 :
429 : #[error("HasChildren")]
430 : HasChildren(Vec<TimelineId>),
431 :
432 : #[error("Timeline deletion is already in progress")]
433 : AlreadyInProgress(Arc<tokio::sync::Mutex<DeleteTimelineFlow>>),
434 :
435 : #[error(transparent)]
436 : Other(#[from] anyhow::Error),
437 : }
438 :
439 : impl Debug for DeleteTimelineError {
440 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
441 0 : match self {
442 0 : Self::NotFound => write!(f, "NotFound"),
443 0 : Self::HasChildren(c) => f.debug_tuple("HasChildren").field(c).finish(),
444 0 : Self::AlreadyInProgress(_) => f.debug_tuple("AlreadyInProgress").finish(),
445 0 : Self::Other(e) => f.debug_tuple("Other").field(e).finish(),
446 : }
447 0 : }
448 : }
449 :
450 : pub enum SetStoppingError {
451 : AlreadyStopping(completion::Barrier),
452 : Broken,
453 : }
454 :
455 : impl Debug for SetStoppingError {
456 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
457 0 : match self {
458 0 : Self::AlreadyStopping(_) => f.debug_tuple("AlreadyStopping").finish(),
459 0 : Self::Broken => write!(f, "Broken"),
460 : }
461 0 : }
462 : }
463 :
464 0 : #[derive(thiserror::Error, Debug)]
465 : pub enum CreateTimelineError {
466 : #[error("creation of timeline with the given ID is in progress")]
467 : AlreadyCreating,
468 : #[error("timeline already exists with different parameters")]
469 : Conflict,
470 : #[error(transparent)]
471 : AncestorLsn(anyhow::Error),
472 : #[error("ancestor timeline is not active")]
473 : AncestorNotActive,
474 : #[error("tenant shutting down")]
475 : ShuttingDown,
476 : #[error(transparent)]
477 : Other(#[from] anyhow::Error),
478 : }
479 :
480 : #[derive(thiserror::Error, Debug)]
481 : enum InitdbError {
482 : Other(anyhow::Error),
483 : Cancelled,
484 : Spawn(std::io::Result<()>),
485 : Failed(std::process::ExitStatus, Vec<u8>),
486 : }
487 :
488 : impl fmt::Display for InitdbError {
489 0 : fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
490 0 : match self {
491 0 : InitdbError::Cancelled => write!(f, "Operation was cancelled"),
492 0 : InitdbError::Spawn(e) => write!(f, "Spawn error: {:?}", e),
493 0 : InitdbError::Failed(status, stderr) => write!(
494 0 : f,
495 0 : "Command failed with status {:?}: {}",
496 0 : status,
497 0 : String::from_utf8_lossy(stderr)
498 0 : ),
499 0 : InitdbError::Other(e) => write!(f, "Error: {:?}", e),
500 : }
501 0 : }
502 : }
503 :
504 : impl From<std::io::Error> for InitdbError {
505 0 : fn from(error: std::io::Error) -> Self {
506 0 : InitdbError::Spawn(Err(error))
507 0 : }
508 : }
509 :
510 : enum CreateTimelineCause {
511 : Load,
512 : Delete,
513 : }
514 :
515 : impl Tenant {
516 : /// Yet another helper for timeline initialization.
517 : ///
518 : /// - Initializes the Timeline struct and inserts it into the tenant's hash map
519 : /// - Scans the local timeline directory for layer files and builds the layer map
520 : /// - Downloads remote index file and adds remote files to the layer map
521 : /// - Schedules remote upload tasks for any files that are present locally but missing from remote storage.
522 : ///
523 : /// If the operation fails, the timeline is left in the tenant's hash map in Broken state. On success,
524 : /// it is marked as Active.
525 : #[allow(clippy::too_many_arguments)]
526 6 : async fn timeline_init_and_sync(
527 6 : &self,
528 6 : timeline_id: TimelineId,
529 6 : resources: TimelineResources,
530 6 : index_part: Option<IndexPart>,
531 6 : metadata: TimelineMetadata,
532 6 : ancestor: Option<Arc<Timeline>>,
533 6 : last_aux_file_policy: Option<AuxFilePolicy>,
534 6 : _ctx: &RequestContext,
535 6 : ) -> anyhow::Result<()> {
536 6 : let tenant_id = self.tenant_shard_id;
537 :
538 6 : let timeline = self.create_timeline_struct(
539 6 : timeline_id,
540 6 : &metadata,
541 6 : ancestor.clone(),
542 6 : resources,
543 6 : CreateTimelineCause::Load,
544 6 : // This could be derived from ancestor branch + index part. Though the only caller of `timeline_init_and_sync` is `load_remote_timeline`,
545 6 : // there will potentially be other caller of this function in the future, and we don't know whether `index_part` or `ancestor` takes precedence.
546 6 : // Therefore, we pass this field explicitly for now, and remove it once we fully migrate to aux file v2.
547 6 : last_aux_file_policy,
548 6 : )?;
549 6 : let disk_consistent_lsn = timeline.get_disk_consistent_lsn();
550 6 : anyhow::ensure!(
551 6 : disk_consistent_lsn.is_valid(),
552 0 : "Timeline {tenant_id}/{timeline_id} has invalid disk_consistent_lsn"
553 : );
554 6 : assert_eq!(
555 6 : disk_consistent_lsn,
556 6 : metadata.disk_consistent_lsn(),
557 0 : "these are used interchangeably"
558 : );
559 :
560 6 : if let Some(index_part) = index_part.as_ref() {
561 6 : timeline.remote_client.init_upload_queue(index_part)?;
562 :
563 6 : timeline
564 6 : .last_aux_file_policy
565 6 : .store(index_part.last_aux_file_policy());
566 : } else {
567 : // No data on the remote storage, but we have local metadata file. We can end up
568 : // here with timeline_create being interrupted before finishing index part upload.
569 : // By doing what we do here, the index part upload is retried.
570 : // If control plane retries timeline creation in the meantime, the mgmt API handler
571 : // for timeline creation will coalesce on the upload we queue here.
572 :
573 : // FIXME: this branch should be dead code as we no longer write local metadata.
574 :
575 0 : timeline
576 0 : .remote_client
577 0 : .init_upload_queue_for_empty_remote(&metadata)?;
578 0 : timeline
579 0 : .remote_client
580 0 : .schedule_index_upload_for_full_metadata_update(&metadata)?;
581 : }
582 :
583 6 : timeline
584 6 : .load_layer_map(disk_consistent_lsn, index_part)
585 5 : .await
586 6 : .with_context(|| {
587 0 : format!("Failed to load layermap for timeline {tenant_id}/{timeline_id}")
588 6 : })?;
589 :
590 : {
591 : // avoiding holding it across awaits
592 6 : let mut timelines_accessor = self.timelines.lock().unwrap();
593 6 : match timelines_accessor.entry(timeline_id) {
594 : // We should never try and load the same timeline twice during startup
595 : Entry::Occupied(_) => {
596 0 : unreachable!(
597 0 : "Timeline {tenant_id}/{timeline_id} already exists in the tenant map"
598 0 : );
599 : }
600 6 : Entry::Vacant(v) => {
601 6 : v.insert(Arc::clone(&timeline));
602 6 : timeline.maybe_spawn_flush_loop();
603 6 : }
604 6 : }
605 6 : };
606 6 :
607 6 : // Sanity check: a timeline should have some content.
608 6 : anyhow::ensure!(
609 6 : ancestor.is_some()
610 4 : || timeline
611 4 : .layers
612 4 : .read()
613 0 : .await
614 4 : .layer_map()
615 4 : .iter_historic_layers()
616 4 : .next()
617 4 : .is_some(),
618 0 : "Timeline has no ancestor and no layer files"
619 : );
620 :
621 6 : Ok(())
622 6 : }
623 :
624 : /// Attach a tenant that's available in cloud storage.
625 : ///
626 : /// This returns quickly, after just creating the in-memory object
627 : /// Tenant struct and launching a background task to download
628 : /// the remote index files. On return, the tenant is most likely still in
629 : /// Attaching state, and it will become Active once the background task
630 : /// finishes. You can use wait_until_active() to wait for the task to
631 : /// complete.
632 : ///
633 : #[allow(clippy::too_many_arguments)]
634 0 : pub(crate) fn spawn(
635 0 : conf: &'static PageServerConf,
636 0 : tenant_shard_id: TenantShardId,
637 0 : resources: TenantSharedResources,
638 0 : attached_conf: AttachedTenantConf,
639 0 : shard_identity: ShardIdentity,
640 0 : init_order: Option<InitializationOrder>,
641 0 : tenants: &'static std::sync::RwLock<TenantsMap>,
642 0 : mode: SpawnMode,
643 0 : ctx: &RequestContext,
644 0 : ) -> anyhow::Result<Arc<Tenant>> {
645 0 : let wal_redo_manager = Arc::new(WalRedoManager::from(PostgresRedoManager::new(
646 0 : conf,
647 0 : tenant_shard_id,
648 0 : )));
649 0 :
650 0 : let TenantSharedResources {
651 0 : broker_client,
652 0 : remote_storage,
653 0 : deletion_queue_client,
654 0 : } = resources;
655 0 :
656 0 : let attach_mode = attached_conf.location.attach_mode;
657 0 : let generation = attached_conf.location.generation;
658 0 :
659 0 : let tenant = Arc::new(Tenant::new(
660 0 : TenantState::Attaching,
661 0 : conf,
662 0 : attached_conf,
663 0 : shard_identity,
664 0 : Some(wal_redo_manager),
665 0 : tenant_shard_id,
666 0 : remote_storage.clone(),
667 0 : deletion_queue_client,
668 0 : ));
669 0 :
670 0 : // The attach task will carry a GateGuard, so that shutdown() reliably waits for it to drop out if
671 0 : // we shut down while attaching.
672 0 : let attach_gate_guard = tenant
673 0 : .gate
674 0 : .enter()
675 0 : .expect("We just created the Tenant: nothing else can have shut it down yet");
676 0 :
677 0 : // Do all the hard work in the background
678 0 : let tenant_clone = Arc::clone(&tenant);
679 0 : let ctx = ctx.detached_child(TaskKind::Attach, DownloadBehavior::Warn);
680 0 : task_mgr::spawn(
681 0 : &tokio::runtime::Handle::current(),
682 0 : TaskKind::Attach,
683 0 : Some(tenant_shard_id),
684 0 : None,
685 0 : "attach tenant",
686 : false,
687 0 : async move {
688 0 :
689 0 : info!(
690 : ?attach_mode,
691 0 : "Attaching tenant"
692 : );
693 :
694 0 : let _gate_guard = attach_gate_guard;
695 0 :
696 0 : // Is this tenant being spawned as part of process startup?
697 0 : let starting_up = init_order.is_some();
698 : scopeguard::defer! {
699 : if starting_up {
700 : TENANT.startup_complete.inc();
701 : }
702 : }
703 :
704 : // Ideally we should use Tenant::set_broken_no_wait, but it is not supposed to be used when tenant is in loading state.
705 : enum BrokenVerbosity {
706 : Error,
707 : Info
708 : }
709 0 : let make_broken =
710 0 : |t: &Tenant, err: anyhow::Error, verbosity: BrokenVerbosity| {
711 0 : match verbosity {
712 : BrokenVerbosity::Info => {
713 0 : info!("attach cancelled, setting tenant state to Broken: {err}");
714 : },
715 : BrokenVerbosity::Error => {
716 0 : error!("attach failed, setting tenant state to Broken: {err:?}");
717 : }
718 : }
719 0 : t.state.send_modify(|state| {
720 0 : // The Stopping case is for when we have passed control on to DeleteTenantFlow:
721 0 : // if it errors, we will call make_broken when tenant is already in Stopping.
722 0 : assert!(
723 0 : matches!(*state, TenantState::Attaching | TenantState::Stopping { .. }),
724 0 : "the attach task owns the tenant state until activation is complete"
725 : );
726 :
727 0 : *state = TenantState::broken_from_reason(err.to_string());
728 0 : });
729 0 : };
730 :
731 0 : let mut init_order = init_order;
732 0 : // take the completion because initial tenant loading will complete when all of
733 0 : // these tasks complete.
734 0 : let _completion = init_order
735 0 : .as_mut()
736 0 : .and_then(|x| x.initial_tenant_load.take());
737 0 : let remote_load_completion = init_order
738 0 : .as_mut()
739 0 : .and_then(|x| x.initial_tenant_load_remote.take());
740 :
741 : enum AttachType<'a> {
742 : /// We are attaching this tenant lazily in the background.
743 : Warmup {
744 : _permit: tokio::sync::SemaphorePermit<'a>,
745 : during_startup: bool
746 : },
747 : /// We are attaching this tenant as soon as we can, because for example an
748 : /// endpoint tried to access it.
749 : OnDemand,
750 : /// During normal operations after startup, we are attaching a tenant, and
751 : /// eager attach was requested.
752 : Normal,
753 : }
754 :
755 0 : let attach_type = if matches!(mode, SpawnMode::Lazy) {
756 : // Before doing any I/O, wait for at least one of:
757 : // - A client attempting to access to this tenant (on-demand loading)
758 : // - A permit becoming available in the warmup semaphore (background warmup)
759 :
760 : tokio::select!(
761 : permit = tenant_clone.activate_now_sem.acquire() => {
762 : let _ = permit.expect("activate_now_sem is never closed");
763 : tracing::info!("Activating tenant (on-demand)");
764 : AttachType::OnDemand
765 : },
766 : permit = conf.concurrent_tenant_warmup.inner().acquire() => {
767 : let _permit = permit.expect("concurrent_tenant_warmup semaphore is never closed");
768 : tracing::info!("Activating tenant (warmup)");
769 : AttachType::Warmup {
770 : _permit,
771 : during_startup: init_order.is_some()
772 : }
773 : }
774 : _ = tenant_clone.cancel.cancelled() => {
775 : // This is safe, but should be pretty rare: it is interesting if a tenant
776 : // stayed in Activating for such a long time that shutdown found it in
777 : // that state.
778 : tracing::info!(state=%tenant_clone.current_state(), "Tenant shut down before activation");
779 : // Make the tenant broken so that set_stopping will not hang waiting for it to leave
780 : // the Attaching state. This is an over-reaction (nothing really broke, the tenant is
781 : // just shutting down), but ensures progress.
782 : make_broken(&tenant_clone, anyhow::anyhow!("Shut down while Attaching"), BrokenVerbosity::Info);
783 : return Ok(());
784 : },
785 : )
786 : } else {
787 : // SpawnMode::{Create,Eager} always cause jumping ahead of the
788 : // concurrent_tenant_warmup queue
789 0 : AttachType::Normal
790 : };
791 :
792 0 : let preload = match &mode {
793 : SpawnMode::Create => {
794 0 : None
795 : },
796 : SpawnMode::Eager | SpawnMode::Lazy => {
797 0 : let _preload_timer = TENANT.preload.start_timer();
798 0 : let res = tenant_clone
799 0 : .preload(&remote_storage, task_mgr::shutdown_token())
800 0 : .await;
801 0 : match res {
802 0 : Ok(p) => Some(p),
803 0 : Err(e) => {
804 0 : make_broken(&tenant_clone, anyhow::anyhow!(e), BrokenVerbosity::Error);
805 0 : return Ok(());
806 : }
807 : }
808 : }
809 :
810 : };
811 :
812 : // Remote preload is complete.
813 0 : drop(remote_load_completion);
814 :
815 0 : let pending_deletion = {
816 0 : match DeleteTenantFlow::should_resume_deletion(
817 0 : conf,
818 0 : preload.as_ref().map(|p| p.deleting).unwrap_or(false),
819 0 : &tenant_clone,
820 0 : )
821 0 : .await
822 : {
823 0 : Ok(should_resume_deletion) => should_resume_deletion,
824 0 : Err(err) => {
825 0 : make_broken(&tenant_clone, anyhow::anyhow!(err), BrokenVerbosity::Error);
826 0 : return Ok(());
827 : }
828 : }
829 : };
830 :
831 0 : info!("pending_deletion {}", pending_deletion.is_some());
832 :
833 0 : if let Some(deletion) = pending_deletion {
834 : // as we are no longer loading, signal completion by dropping
835 : // the completion while we resume deletion
836 0 : drop(_completion);
837 0 : let background_jobs_can_start =
838 0 : init_order.as_ref().map(|x| &x.background_jobs_can_start);
839 0 : if let Some(background) = background_jobs_can_start {
840 0 : info!("waiting for backgound jobs barrier");
841 0 : background.clone().wait().await;
842 0 : info!("ready for backgound jobs barrier");
843 0 : }
844 :
845 0 : let deleted = DeleteTenantFlow::resume_from_attach(
846 0 : deletion,
847 0 : &tenant_clone,
848 0 : preload,
849 0 : tenants,
850 0 : &ctx,
851 0 : )
852 0 : .await;
853 :
854 0 : if let Err(e) = deleted {
855 0 : make_broken(&tenant_clone, anyhow::anyhow!(e), BrokenVerbosity::Error);
856 0 : }
857 :
858 0 : return Ok(());
859 0 : }
860 :
861 : // We will time the duration of the attach phase unless this is a creation (attach will do no work)
862 0 : let attached = {
863 0 : let _attach_timer = match mode {
864 0 : SpawnMode::Create => None,
865 0 : SpawnMode::Eager | SpawnMode::Lazy => Some(TENANT.attach.start_timer()),
866 : };
867 0 : tenant_clone.attach(preload, mode, &ctx).await
868 : };
869 :
870 0 : match attached {
871 : Ok(()) => {
872 0 : info!("attach finished, activating");
873 0 : tenant_clone.activate(broker_client, None, &ctx);
874 : }
875 0 : Err(e) => {
876 0 : make_broken(&tenant_clone, anyhow::anyhow!(e), BrokenVerbosity::Error);
877 0 : }
878 : }
879 :
880 : // If we are doing an opportunistic warmup attachment at startup, initialize
881 : // logical size at the same time. This is better than starting a bunch of idle tenants
882 : // with cold caches and then coming back later to initialize their logical sizes.
883 : //
884 : // It also prevents the warmup proccess competing with the concurrency limit on
885 : // logical size calculations: if logical size calculation semaphore is saturated,
886 : // then warmup will wait for that before proceeding to the next tenant.
887 0 : if matches!(attach_type, AttachType::Warmup { during_startup: true, .. }) {
888 0 : let mut futs: FuturesUnordered<_> = tenant_clone.timelines.lock().unwrap().values().cloned().map(|t| t.await_initial_logical_size()).collect();
889 0 : tracing::info!("Waiting for initial logical sizes while warming up...");
890 0 : while futs.next().await.is_some() {}
891 0 : tracing::info!("Warm-up complete");
892 0 : }
893 :
894 0 : Ok(())
895 0 : }
896 0 : .instrument(tracing::info_span!(parent: None, "attach", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), gen=?generation)),
897 : );
898 0 : Ok(tenant)
899 0 : }
900 :
901 260 : #[instrument(skip_all)]
902 : pub(crate) async fn preload(
903 : self: &Arc<Self>,
904 : remote_storage: &GenericRemoteStorage,
905 : cancel: CancellationToken,
906 : ) -> anyhow::Result<TenantPreload> {
907 : span::debug_assert_current_span_has_tenant_id();
908 : // Get list of remote timelines
909 : // download index files for every tenant timeline
910 : info!("listing remote timelines");
911 : let (remote_timeline_ids, other_keys) = remote_timeline_client::list_remote_timelines(
912 : remote_storage,
913 : self.tenant_shard_id,
914 : cancel.clone(),
915 : )
916 : .await?;
917 :
918 : let deleting = other_keys.contains(TENANT_DELETED_MARKER_FILE_NAME);
919 : info!(
920 : "found {} timelines, deleting={}",
921 : remote_timeline_ids.len(),
922 : deleting
923 : );
924 :
925 : for k in other_keys {
926 : if k != TENANT_DELETED_MARKER_FILE_NAME {
927 : warn!("Unexpected non timeline key {k}");
928 : }
929 : }
930 :
931 : Ok(TenantPreload {
932 : deleting,
933 : timelines: Self::load_timeline_metadata(
934 : self,
935 : remote_timeline_ids,
936 : remote_storage,
937 : cancel,
938 : )
939 : .await?,
940 : })
941 : }
942 :
943 : ///
944 : /// Background task that downloads all data for a tenant and brings it to Active state.
945 : ///
946 : /// No background tasks are started as part of this routine.
947 : ///
948 130 : async fn attach(
949 130 : self: &Arc<Tenant>,
950 130 : preload: Option<TenantPreload>,
951 130 : mode: SpawnMode,
952 130 : ctx: &RequestContext,
953 130 : ) -> anyhow::Result<()> {
954 130 : span::debug_assert_current_span_has_tenant_id();
955 130 :
956 130 : failpoint_support::sleep_millis_async!("before-attaching-tenant");
957 :
958 130 : let preload = match (preload, mode) {
959 130 : (Some(p), _) => p,
960 0 : (None, SpawnMode::Create) => TenantPreload {
961 0 : deleting: false,
962 0 : timelines: HashMap::new(),
963 0 : },
964 : (None, _) => {
965 0 : anyhow::bail!("local-only deployment is no longer supported, https://github.com/neondatabase/neon/issues/5624");
966 : }
967 : };
968 :
969 130 : let mut timelines_to_resume_deletions = vec![];
970 130 :
971 130 : let mut remote_index_and_client = HashMap::new();
972 130 : let mut timeline_ancestors = HashMap::new();
973 130 : let mut existent_timelines = HashSet::new();
974 136 : for (timeline_id, preload) in preload.timelines {
975 6 : let index_part = match preload.index_part {
976 6 : Ok(i) => {
977 6 : debug!("remote index part exists for timeline {timeline_id}");
978 : // We found index_part on the remote, this is the standard case.
979 6 : existent_timelines.insert(timeline_id);
980 6 : i
981 : }
982 : Err(DownloadError::NotFound) => {
983 : // There is no index_part on the remote. We only get here
984 : // if there is some prefix for the timeline in the remote storage.
985 : // This can e.g. be the initdb.tar.zst archive, maybe a
986 : // remnant from a prior incomplete creation or deletion attempt.
987 : // Delete the local directory as the deciding criterion for a
988 : // timeline's existence is presence of index_part.
989 0 : info!(%timeline_id, "index_part not found on remote");
990 0 : continue;
991 : }
992 0 : Err(e) => {
993 0 : // Some (possibly ephemeral) error happened during index_part download.
994 0 : // Pretend the timeline exists to not delete the timeline directory,
995 0 : // as it might be a temporary issue and we don't want to re-download
996 0 : // everything after it resolves.
997 0 : warn!(%timeline_id, "Failed to load index_part from remote storage, failed creation? ({e})");
998 :
999 0 : existent_timelines.insert(timeline_id);
1000 0 : continue;
1001 : }
1002 : };
1003 6 : match index_part {
1004 6 : MaybeDeletedIndexPart::IndexPart(index_part) => {
1005 6 : timeline_ancestors.insert(timeline_id, index_part.metadata.clone());
1006 6 : remote_index_and_client.insert(timeline_id, (index_part, preload.client));
1007 6 : }
1008 0 : MaybeDeletedIndexPart::Deleted(index_part) => {
1009 0 : info!(
1010 0 : "timeline {} is deleted, picking to resume deletion",
1011 : timeline_id
1012 : );
1013 0 : timelines_to_resume_deletions.push((timeline_id, index_part, preload.client));
1014 : }
1015 : }
1016 : }
1017 :
1018 : // For every timeline, download the metadata file, scan the local directory,
1019 : // and build a layer map that contains an entry for each remote and local
1020 : // layer file.
1021 130 : let sorted_timelines = tree_sort_timelines(timeline_ancestors, |m| m.ancestor_timeline())?;
1022 136 : for (timeline_id, remote_metadata) in sorted_timelines {
1023 6 : let (index_part, remote_client) = remote_index_and_client
1024 6 : .remove(&timeline_id)
1025 6 : .expect("just put it in above");
1026 6 :
1027 6 : // TODO again handle early failure
1028 6 : self.load_remote_timeline(
1029 6 : timeline_id,
1030 6 : index_part,
1031 6 : remote_metadata,
1032 6 : TimelineResources {
1033 6 : remote_client,
1034 6 : deletion_queue_client: self.deletion_queue_client.clone(),
1035 6 : timeline_get_throttle: self.timeline_get_throttle.clone(),
1036 6 : },
1037 6 : ctx,
1038 6 : )
1039 11 : .await
1040 6 : .with_context(|| {
1041 0 : format!(
1042 0 : "failed to load remote timeline {} for tenant {}",
1043 0 : timeline_id, self.tenant_shard_id
1044 0 : )
1045 6 : })?;
1046 : }
1047 :
1048 : // Walk through deleted timelines, resume deletion
1049 130 : for (timeline_id, index_part, remote_timeline_client) in timelines_to_resume_deletions {
1050 0 : remote_timeline_client
1051 0 : .init_upload_queue_stopped_to_continue_deletion(&index_part)
1052 0 : .context("init queue stopped")
1053 0 : .map_err(LoadLocalTimelineError::ResumeDeletion)?;
1054 :
1055 0 : DeleteTimelineFlow::resume_deletion(
1056 0 : Arc::clone(self),
1057 0 : timeline_id,
1058 0 : &index_part.metadata,
1059 0 : remote_timeline_client,
1060 0 : self.deletion_queue_client.clone(),
1061 0 : )
1062 0 : .instrument(tracing::info_span!("timeline_delete", %timeline_id))
1063 0 : .await
1064 0 : .context("resume_deletion")
1065 0 : .map_err(LoadLocalTimelineError::ResumeDeletion)?;
1066 : }
1067 :
1068 : // The local filesystem contents are a cache of what's in the remote IndexPart;
1069 : // IndexPart is the source of truth.
1070 130 : self.clean_up_timelines(&existent_timelines)?;
1071 :
1072 130 : fail::fail_point!("attach-before-activate", |_| {
1073 0 : anyhow::bail!("attach-before-activate");
1074 130 : });
1075 130 : failpoint_support::sleep_millis_async!("attach-before-activate-sleep", &self.cancel);
1076 :
1077 130 : info!("Done");
1078 :
1079 130 : Ok(())
1080 130 : }
1081 :
1082 : /// Check for any local timeline directories that are temporary, or do not correspond to a
1083 : /// timeline that still exists: this can happen if we crashed during a deletion/creation, or
1084 : /// if a timeline was deleted while the tenant was attached to a different pageserver.
1085 130 : fn clean_up_timelines(&self, existent_timelines: &HashSet<TimelineId>) -> anyhow::Result<()> {
1086 130 : let timelines_dir = self.conf.timelines_path(&self.tenant_shard_id);
1087 :
1088 130 : let entries = match timelines_dir.read_dir_utf8() {
1089 130 : Ok(d) => d,
1090 0 : Err(e) => {
1091 0 : if e.kind() == std::io::ErrorKind::NotFound {
1092 0 : return Ok(());
1093 : } else {
1094 0 : return Err(e).context("list timelines directory for tenant");
1095 : }
1096 : }
1097 : };
1098 :
1099 138 : for entry in entries {
1100 8 : let entry = entry.context("read timeline dir entry")?;
1101 8 : let entry_path = entry.path();
1102 :
1103 8 : let purge = if crate::is_temporary(entry_path)
1104 : // TODO: remove uninit mark code (https://github.com/neondatabase/neon/issues/5718)
1105 8 : || is_uninit_mark(entry_path)
1106 8 : || crate::is_delete_mark(entry_path)
1107 : {
1108 0 : true
1109 : } else {
1110 8 : match TimelineId::try_from(entry_path.file_name()) {
1111 8 : Ok(i) => {
1112 8 : // Purge if the timeline ID does not exist in remote storage: remote storage is the authority.
1113 8 : !existent_timelines.contains(&i)
1114 : }
1115 0 : Err(e) => {
1116 0 : tracing::warn!(
1117 0 : "Unparseable directory in timelines directory: {entry_path}, ignoring ({e})"
1118 : );
1119 : // Do not purge junk: if we don't recognize it, be cautious and leave it for a human.
1120 0 : false
1121 : }
1122 : }
1123 : };
1124 :
1125 8 : if purge {
1126 2 : tracing::info!("Purging stale timeline dentry {entry_path}");
1127 2 : if let Err(e) = match entry.file_type() {
1128 2 : Ok(t) => if t.is_dir() {
1129 2 : std::fs::remove_dir_all(entry_path)
1130 : } else {
1131 0 : std::fs::remove_file(entry_path)
1132 : }
1133 2 : .or_else(fs_ext::ignore_not_found),
1134 0 : Err(e) => Err(e),
1135 : } {
1136 0 : tracing::warn!("Failed to purge stale timeline dentry {entry_path}: {e}");
1137 2 : }
1138 6 : }
1139 : }
1140 :
1141 130 : Ok(())
1142 130 : }
1143 :
1144 : /// Get sum of all remote timelines sizes
1145 : ///
1146 : /// This function relies on the index_part instead of listing the remote storage
1147 0 : pub fn remote_size(&self) -> u64 {
1148 0 : let mut size = 0;
1149 :
1150 0 : for timeline in self.list_timelines() {
1151 0 : size += timeline.remote_client.get_remote_physical_size();
1152 0 : }
1153 :
1154 0 : size
1155 0 : }
1156 :
1157 12 : #[instrument(skip_all, fields(timeline_id=%timeline_id))]
1158 : async fn load_remote_timeline(
1159 : &self,
1160 : timeline_id: TimelineId,
1161 : index_part: IndexPart,
1162 : remote_metadata: TimelineMetadata,
1163 : resources: TimelineResources,
1164 : ctx: &RequestContext,
1165 : ) -> anyhow::Result<()> {
1166 : span::debug_assert_current_span_has_tenant_id();
1167 :
1168 : info!("downloading index file for timeline {}", timeline_id);
1169 : tokio::fs::create_dir_all(self.conf.timeline_path(&self.tenant_shard_id, &timeline_id))
1170 : .await
1171 : .context("Failed to create new timeline directory")?;
1172 :
1173 : let ancestor = if let Some(ancestor_id) = remote_metadata.ancestor_timeline() {
1174 : let timelines = self.timelines.lock().unwrap();
1175 : Some(Arc::clone(timelines.get(&ancestor_id).ok_or_else(
1176 0 : || {
1177 0 : anyhow::anyhow!(
1178 0 : "cannot find ancestor timeline {ancestor_id} for timeline {timeline_id}"
1179 0 : )
1180 0 : },
1181 : )?))
1182 : } else {
1183 : None
1184 : };
1185 :
1186 : let last_aux_file_policy = index_part.last_aux_file_policy();
1187 :
1188 : self.timeline_init_and_sync(
1189 : timeline_id,
1190 : resources,
1191 : Some(index_part),
1192 : remote_metadata,
1193 : ancestor,
1194 : last_aux_file_policy,
1195 : ctx,
1196 : )
1197 : .await
1198 : }
1199 :
1200 : /// Create a placeholder Tenant object for a broken tenant
1201 0 : pub fn create_broken_tenant(
1202 0 : conf: &'static PageServerConf,
1203 0 : tenant_shard_id: TenantShardId,
1204 0 : remote_storage: GenericRemoteStorage,
1205 0 : reason: String,
1206 0 : ) -> Arc<Tenant> {
1207 0 : Arc::new(Tenant::new(
1208 0 : TenantState::Broken {
1209 0 : reason,
1210 0 : backtrace: String::new(),
1211 0 : },
1212 0 : conf,
1213 0 : AttachedTenantConf::try_from(LocationConf::default()).unwrap(),
1214 0 : // Shard identity isn't meaningful for a broken tenant: it's just a placeholder
1215 0 : // to occupy the slot for this TenantShardId.
1216 0 : ShardIdentity::broken(tenant_shard_id.shard_number, tenant_shard_id.shard_count),
1217 0 : None,
1218 0 : tenant_shard_id,
1219 0 : remote_storage,
1220 0 : DeletionQueueClient::broken(),
1221 0 : ))
1222 0 : }
1223 :
1224 130 : async fn load_timeline_metadata(
1225 130 : self: &Arc<Tenant>,
1226 130 : timeline_ids: HashSet<TimelineId>,
1227 130 : remote_storage: &GenericRemoteStorage,
1228 130 : cancel: CancellationToken,
1229 130 : ) -> anyhow::Result<HashMap<TimelineId, TimelinePreload>> {
1230 130 : let mut part_downloads = JoinSet::new();
1231 136 : for timeline_id in timeline_ids {
1232 6 : let client = RemoteTimelineClient::new(
1233 6 : remote_storage.clone(),
1234 6 : self.deletion_queue_client.clone(),
1235 6 : self.conf,
1236 6 : self.tenant_shard_id,
1237 6 : timeline_id,
1238 6 : self.generation,
1239 6 : );
1240 6 : let cancel_clone = cancel.clone();
1241 6 : part_downloads.spawn(
1242 6 : async move {
1243 6 : debug!("starting index part download");
1244 :
1245 24 : let index_part = client.download_index_file(&cancel_clone).await;
1246 :
1247 6 : debug!("finished index part download");
1248 :
1249 6 : Result::<_, anyhow::Error>::Ok(TimelinePreload {
1250 6 : client,
1251 6 : timeline_id,
1252 6 : index_part,
1253 6 : })
1254 6 : }
1255 6 : .map(move |res| {
1256 6 : res.with_context(|| format!("download index part for timeline {timeline_id}"))
1257 6 : })
1258 6 : .instrument(info_span!("download_index_part", %timeline_id)),
1259 : );
1260 : }
1261 :
1262 130 : let mut timeline_preloads: HashMap<TimelineId, TimelinePreload> = HashMap::new();
1263 :
1264 136 : loop {
1265 136 : tokio::select!(
1266 : next = part_downloads.join_next() => {
1267 : match next {
1268 : Some(result) => {
1269 : let preload_result = result.context("join preload task")?;
1270 : let preload = preload_result?;
1271 : timeline_preloads.insert(preload.timeline_id, preload);
1272 : },
1273 : None => {
1274 : break;
1275 : }
1276 : }
1277 : },
1278 : _ = cancel.cancelled() => {
1279 : anyhow::bail!("Cancelled while waiting for remote index download")
1280 : }
1281 136 : )
1282 136 : }
1283 :
1284 130 : Ok(timeline_preloads)
1285 130 : }
1286 :
1287 4 : pub(crate) fn tenant_shard_id(&self) -> TenantShardId {
1288 4 : self.tenant_shard_id
1289 4 : }
1290 :
1291 : /// Get Timeline handle for given Neon timeline ID.
1292 : /// This function is idempotent. It doesn't change internal state in any way.
1293 3536 : pub fn get_timeline(
1294 3536 : &self,
1295 3536 : timeline_id: TimelineId,
1296 3536 : active_only: bool,
1297 3536 : ) -> Result<Arc<Timeline>, GetTimelineError> {
1298 3536 : let timelines_accessor = self.timelines.lock().unwrap();
1299 3536 : let timeline = timelines_accessor
1300 3536 : .get(&timeline_id)
1301 3536 : .ok_or(GetTimelineError::NotFound {
1302 3536 : tenant_id: self.tenant_shard_id,
1303 3536 : timeline_id,
1304 3536 : })?;
1305 :
1306 3534 : if active_only && !timeline.is_active() {
1307 0 : Err(GetTimelineError::NotActive {
1308 0 : tenant_id: self.tenant_shard_id,
1309 0 : timeline_id,
1310 0 : state: timeline.current_state(),
1311 0 : })
1312 : } else {
1313 3534 : Ok(Arc::clone(timeline))
1314 : }
1315 3536 : }
1316 :
1317 : /// Lists timelines the tenant contains.
1318 : /// Up to tenant's implementation to omit certain timelines that ar not considered ready for use.
1319 8 : pub fn list_timelines(&self) -> Vec<Arc<Timeline>> {
1320 8 : self.timelines
1321 8 : .lock()
1322 8 : .unwrap()
1323 8 : .values()
1324 8 : .map(Arc::clone)
1325 8 : .collect()
1326 8 : }
1327 :
1328 0 : pub fn list_timeline_ids(&self) -> Vec<TimelineId> {
1329 0 : self.timelines.lock().unwrap().keys().cloned().collect()
1330 0 : }
1331 :
1332 : /// This is used to create the initial 'main' timeline during bootstrapping,
1333 : /// or when importing a new base backup. The caller is expected to load an
1334 : /// initial image of the datadir to the new timeline after this.
1335 : ///
1336 : /// Until that happens, the on-disk state is invalid (disk_consistent_lsn=Lsn(0))
1337 : /// and the timeline will fail to load at a restart.
1338 : ///
1339 : /// For tests, use `DatadirModification::init_empty_test_timeline` + `commit` to setup the
1340 : /// minimum amount of keys required to get a writable timeline.
1341 : /// (Without it, `put` might fail due to `repartition` failing.)
1342 122 : pub(crate) async fn create_empty_timeline(
1343 122 : &self,
1344 122 : new_timeline_id: TimelineId,
1345 122 : initdb_lsn: Lsn,
1346 122 : pg_version: u32,
1347 122 : _ctx: &RequestContext,
1348 122 : ) -> anyhow::Result<UninitializedTimeline> {
1349 122 : anyhow::ensure!(
1350 122 : self.is_active(),
1351 0 : "Cannot create empty timelines on inactive tenant"
1352 : );
1353 :
1354 : // Protect against concurrent attempts to use this TimelineId
1355 122 : let create_guard = self.create_timeline_create_guard(new_timeline_id)?;
1356 :
1357 120 : let new_metadata = TimelineMetadata::new(
1358 120 : // Initialize disk_consistent LSN to 0, The caller must import some data to
1359 120 : // make it valid, before calling finish_creation()
1360 120 : Lsn(0),
1361 120 : None,
1362 120 : None,
1363 120 : Lsn(0),
1364 120 : initdb_lsn,
1365 120 : initdb_lsn,
1366 120 : pg_version,
1367 120 : );
1368 120 : self.prepare_new_timeline(
1369 120 : new_timeline_id,
1370 120 : &new_metadata,
1371 120 : create_guard,
1372 120 : initdb_lsn,
1373 120 : None,
1374 120 : None,
1375 120 : )
1376 0 : .await
1377 122 : }
1378 :
1379 : /// Helper for unit tests to create an empty timeline.
1380 : ///
1381 : /// The timeline is has state value `Active` but its background loops are not running.
1382 : // This makes the various functions which anyhow::ensure! for Active state work in tests.
1383 : // Our current tests don't need the background loops.
1384 : #[cfg(test)]
1385 112 : pub async fn create_test_timeline(
1386 112 : &self,
1387 112 : new_timeline_id: TimelineId,
1388 112 : initdb_lsn: Lsn,
1389 112 : pg_version: u32,
1390 112 : ctx: &RequestContext,
1391 112 : ) -> anyhow::Result<Arc<Timeline>> {
1392 112 : let uninit_tl = self
1393 112 : .create_empty_timeline(new_timeline_id, initdb_lsn, pg_version, ctx)
1394 0 : .await?;
1395 112 : let tline = uninit_tl.raw_timeline().expect("we just created it");
1396 112 : assert_eq!(tline.get_last_record_lsn(), Lsn(0));
1397 :
1398 : // Setup minimum keys required for the timeline to be usable.
1399 112 : let mut modification = tline.begin_modification(initdb_lsn);
1400 112 : modification
1401 112 : .init_empty_test_timeline()
1402 112 : .context("init_empty_test_timeline")?;
1403 112 : modification
1404 112 : .commit(ctx)
1405 110 : .await
1406 112 : .context("commit init_empty_test_timeline modification")?;
1407 :
1408 : // Flush to disk so that uninit_tl's check for valid disk_consistent_lsn passes.
1409 112 : tline.maybe_spawn_flush_loop();
1410 112 : tline.freeze_and_flush().await.context("freeze_and_flush")?;
1411 :
1412 : // Make sure the freeze_and_flush reaches remote storage.
1413 112 : tline.remote_client.wait_completion().await.unwrap();
1414 :
1415 112 : let tl = uninit_tl.finish_creation()?;
1416 : // The non-test code would call tl.activate() here.
1417 112 : tl.set_state(TimelineState::Active);
1418 112 : Ok(tl)
1419 112 : }
1420 :
1421 : /// Create a new timeline.
1422 : ///
1423 : /// Returns the new timeline ID and reference to its Timeline object.
1424 : ///
1425 : /// If the caller specified the timeline ID to use (`new_timeline_id`), and timeline with
1426 : /// the same timeline ID already exists, returns CreateTimelineError::AlreadyExists.
1427 : #[allow(clippy::too_many_arguments)]
1428 0 : pub(crate) async fn create_timeline(
1429 0 : self: &Arc<Tenant>,
1430 0 : new_timeline_id: TimelineId,
1431 0 : ancestor_timeline_id: Option<TimelineId>,
1432 0 : mut ancestor_start_lsn: Option<Lsn>,
1433 0 : pg_version: u32,
1434 0 : load_existing_initdb: Option<TimelineId>,
1435 0 : broker_client: storage_broker::BrokerClientChannel,
1436 0 : ctx: &RequestContext,
1437 0 : ) -> Result<Arc<Timeline>, CreateTimelineError> {
1438 0 : if !self.is_active() {
1439 0 : if matches!(self.current_state(), TenantState::Stopping { .. }) {
1440 0 : return Err(CreateTimelineError::ShuttingDown);
1441 : } else {
1442 0 : return Err(CreateTimelineError::Other(anyhow::anyhow!(
1443 0 : "Cannot create timelines on inactive tenant"
1444 0 : )));
1445 : }
1446 0 : }
1447 :
1448 0 : let _gate = self
1449 0 : .gate
1450 0 : .enter()
1451 0 : .map_err(|_| CreateTimelineError::ShuttingDown)?;
1452 :
1453 : // Get exclusive access to the timeline ID: this ensures that it does not already exist,
1454 : // and that no other creation attempts will be allowed in while we are working.
1455 0 : let create_guard = match self.create_timeline_create_guard(new_timeline_id) {
1456 0 : Ok(m) => m,
1457 : Err(TimelineExclusionError::AlreadyCreating) => {
1458 : // Creation is in progress, we cannot create it again, and we cannot
1459 : // check if this request matches the existing one, so caller must try
1460 : // again later.
1461 0 : return Err(CreateTimelineError::AlreadyCreating);
1462 : }
1463 0 : Err(TimelineExclusionError::Other(e)) => {
1464 0 : return Err(CreateTimelineError::Other(e));
1465 : }
1466 0 : Err(TimelineExclusionError::AlreadyExists(existing)) => {
1467 0 : debug!("timeline {new_timeline_id} already exists");
1468 :
1469 : // Idempotency: creating the same timeline twice is not an error, unless
1470 : // the second creation has different parameters.
1471 0 : if existing.get_ancestor_timeline_id() != ancestor_timeline_id
1472 0 : || existing.pg_version != pg_version
1473 0 : || (ancestor_start_lsn.is_some()
1474 0 : && ancestor_start_lsn != Some(existing.get_ancestor_lsn()))
1475 : {
1476 0 : return Err(CreateTimelineError::Conflict);
1477 0 : }
1478 0 :
1479 0 : // Wait for uploads to complete, so that when we return Ok, the timeline
1480 0 : // is known to be durable on remote storage. Just like we do at the end of
1481 0 : // this function, after we have created the timeline ourselves.
1482 0 : //
1483 0 : // We only really care that the initial version of `index_part.json` has
1484 0 : // been uploaded. That's enough to remember that the timeline
1485 0 : // exists. However, there is no function to wait specifically for that so
1486 0 : // we just wait for all in-progress uploads to finish.
1487 0 : existing
1488 0 : .remote_client
1489 0 : .wait_completion()
1490 0 : .await
1491 0 : .context("wait for timeline uploads to complete")?;
1492 :
1493 0 : return Ok(existing);
1494 : }
1495 : };
1496 :
1497 : pausable_failpoint!("timeline-creation-after-uninit");
1498 :
1499 0 : let loaded_timeline = match ancestor_timeline_id {
1500 0 : Some(ancestor_timeline_id) => {
1501 0 : let ancestor_timeline = self
1502 0 : .get_timeline(ancestor_timeline_id, false)
1503 0 : .context("Cannot branch off the timeline that's not present in pageserver")?;
1504 :
1505 : // instead of waiting around, just deny the request because ancestor is not yet
1506 : // ready for other purposes either.
1507 0 : if !ancestor_timeline.is_active() {
1508 0 : return Err(CreateTimelineError::AncestorNotActive);
1509 0 : }
1510 :
1511 0 : if let Some(lsn) = ancestor_start_lsn.as_mut() {
1512 0 : *lsn = lsn.align();
1513 0 :
1514 0 : let ancestor_ancestor_lsn = ancestor_timeline.get_ancestor_lsn();
1515 0 : if ancestor_ancestor_lsn > *lsn {
1516 : // can we safely just branch from the ancestor instead?
1517 0 : return Err(CreateTimelineError::AncestorLsn(anyhow::anyhow!(
1518 0 : "invalid start lsn {} for ancestor timeline {}: less than timeline ancestor lsn {}",
1519 0 : lsn,
1520 0 : ancestor_timeline_id,
1521 0 : ancestor_ancestor_lsn,
1522 0 : )));
1523 0 : }
1524 0 :
1525 0 : // Wait for the WAL to arrive and be processed on the parent branch up
1526 0 : // to the requested branch point. The repository code itself doesn't
1527 0 : // require it, but if we start to receive WAL on the new timeline,
1528 0 : // decoding the new WAL might need to look up previous pages, relation
1529 0 : // sizes etc. and that would get confused if the previous page versions
1530 0 : // are not in the repository yet.
1531 0 : ancestor_timeline
1532 0 : .wait_lsn(*lsn, timeline::WaitLsnWaiter::Tenant, ctx)
1533 0 : .await
1534 0 : .map_err(|e| match e {
1535 0 : e @ (WaitLsnError::Timeout(_) | WaitLsnError::BadState) => {
1536 0 : CreateTimelineError::AncestorLsn(anyhow::anyhow!(e))
1537 : }
1538 0 : WaitLsnError::Shutdown => CreateTimelineError::ShuttingDown,
1539 0 : })?;
1540 0 : }
1541 :
1542 0 : self.branch_timeline(
1543 0 : &ancestor_timeline,
1544 0 : new_timeline_id,
1545 0 : ancestor_start_lsn,
1546 0 : create_guard,
1547 0 : ctx,
1548 0 : )
1549 0 : .await?
1550 : }
1551 : None => {
1552 0 : self.bootstrap_timeline(
1553 0 : new_timeline_id,
1554 0 : pg_version,
1555 0 : load_existing_initdb,
1556 0 : create_guard,
1557 0 : ctx,
1558 0 : )
1559 0 : .await?
1560 : }
1561 : };
1562 :
1563 : // At this point we have dropped our guard on [`Self::timelines_creating`], and
1564 : // the timeline is visible in [`Self::timelines`], but it is _not_ durable yet. We must
1565 : // not send a success to the caller until it is. The same applies to handling retries,
1566 : // see the handling of [`TimelineExclusionError::AlreadyExists`] above.
1567 0 : let kind = ancestor_timeline_id
1568 0 : .map(|_| "branched")
1569 0 : .unwrap_or("bootstrapped");
1570 0 : loaded_timeline
1571 0 : .remote_client
1572 0 : .wait_completion()
1573 0 : .await
1574 0 : .with_context(|| format!("wait for {} timeline initial uploads to complete", kind))?;
1575 :
1576 0 : loaded_timeline.activate(self.clone(), broker_client, None, ctx);
1577 0 :
1578 0 : Ok(loaded_timeline)
1579 0 : }
1580 :
1581 0 : pub(crate) async fn delete_timeline(
1582 0 : self: Arc<Self>,
1583 0 : timeline_id: TimelineId,
1584 0 : ) -> Result<(), DeleteTimelineError> {
1585 0 : DeleteTimelineFlow::run(&self, timeline_id, false).await?;
1586 :
1587 0 : Ok(())
1588 0 : }
1589 :
1590 : /// perform one garbage collection iteration, removing old data files from disk.
1591 : /// this function is periodically called by gc task.
1592 : /// also it can be explicitly requested through page server api 'do_gc' command.
1593 : ///
1594 : /// `target_timeline_id` specifies the timeline to GC, or None for all.
1595 : ///
1596 : /// The `horizon` an `pitr` parameters determine how much WAL history needs to be retained.
1597 : /// Also known as the retention period, or the GC cutoff point. `horizon` specifies
1598 : /// the amount of history, as LSN difference from current latest LSN on each timeline.
1599 : /// `pitr` specifies the same as a time difference from the current time. The effective
1600 : /// GC cutoff point is determined conservatively by either `horizon` and `pitr`, whichever
1601 : /// requires more history to be retained.
1602 : //
1603 756 : pub async fn gc_iteration(
1604 756 : &self,
1605 756 : target_timeline_id: Option<TimelineId>,
1606 756 : horizon: u64,
1607 756 : pitr: Duration,
1608 756 : cancel: &CancellationToken,
1609 756 : ctx: &RequestContext,
1610 756 : ) -> anyhow::Result<GcResult> {
1611 756 : // Don't start doing work during shutdown
1612 756 : if let TenantState::Stopping { .. } = self.current_state() {
1613 0 : return Ok(GcResult::default());
1614 756 : }
1615 756 :
1616 756 : // there is a global allowed_error for this
1617 756 : anyhow::ensure!(
1618 756 : self.is_active(),
1619 0 : "Cannot run GC iteration on inactive tenant"
1620 : );
1621 :
1622 : {
1623 756 : let conf = self.tenant_conf.load();
1624 756 :
1625 756 : if !conf.location.may_delete_layers_hint() {
1626 0 : info!("Skipping GC in location state {:?}", conf.location);
1627 0 : return Ok(GcResult::default());
1628 756 : }
1629 756 : }
1630 756 :
1631 756 : self.gc_iteration_internal(target_timeline_id, horizon, pitr, cancel, ctx)
1632 718 : .await
1633 756 : }
1634 :
1635 : /// Perform one compaction iteration.
1636 : /// This function is periodically called by compactor task.
1637 : /// Also it can be explicitly requested per timeline through page server
1638 : /// api's 'compact' command.
1639 0 : async fn compaction_iteration(
1640 0 : &self,
1641 0 : cancel: &CancellationToken,
1642 0 : ctx: &RequestContext,
1643 0 : ) -> anyhow::Result<(), timeline::CompactionError> {
1644 0 : // Don't start doing work during shutdown, or when broken, we do not need those in the logs
1645 0 : if !self.is_active() {
1646 0 : return Ok(());
1647 0 : }
1648 0 :
1649 0 : {
1650 0 : let conf = self.tenant_conf.load();
1651 0 : if !conf.location.may_delete_layers_hint() || !conf.location.may_upload_layers_hint() {
1652 0 : info!("Skipping compaction in location state {:?}", conf.location);
1653 0 : return Ok(());
1654 0 : }
1655 0 : }
1656 0 :
1657 0 : // Scan through the hashmap and collect a list of all the timelines,
1658 0 : // while holding the lock. Then drop the lock and actually perform the
1659 0 : // compactions. We don't want to block everything else while the
1660 0 : // compaction runs.
1661 0 : let timelines_to_compact = {
1662 0 : let timelines = self.timelines.lock().unwrap();
1663 0 : let timelines_to_compact = timelines
1664 0 : .iter()
1665 0 : .filter_map(|(timeline_id, timeline)| {
1666 0 : if timeline.is_active() {
1667 0 : Some((*timeline_id, timeline.clone()))
1668 : } else {
1669 0 : None
1670 : }
1671 0 : })
1672 0 : .collect::<Vec<_>>();
1673 0 : drop(timelines);
1674 0 : timelines_to_compact
1675 : };
1676 :
1677 0 : for (timeline_id, timeline) in &timelines_to_compact {
1678 0 : timeline
1679 0 : .compact(cancel, EnumSet::empty(), ctx)
1680 0 : .instrument(info_span!("compact_timeline", %timeline_id))
1681 0 : .await?;
1682 : }
1683 :
1684 0 : Ok(())
1685 0 : }
1686 :
1687 : // Call through to all timelines to freeze ephemeral layers if needed. Usually
1688 : // this happens during ingest: this background housekeeping is for freezing layers
1689 : // that are open but haven't been written to for some time.
1690 0 : async fn ingest_housekeeping(&self) {
1691 0 : // Scan through the hashmap and collect a list of all the timelines,
1692 0 : // while holding the lock. Then drop the lock and actually perform the
1693 0 : // compactions. We don't want to block everything else while the
1694 0 : // compaction runs.
1695 0 : let timelines = {
1696 0 : self.timelines
1697 0 : .lock()
1698 0 : .unwrap()
1699 0 : .values()
1700 0 : .filter_map(|timeline| {
1701 0 : if timeline.is_active() {
1702 0 : Some(timeline.clone())
1703 : } else {
1704 0 : None
1705 : }
1706 0 : })
1707 0 : .collect::<Vec<_>>()
1708 : };
1709 :
1710 0 : for timeline in &timelines {
1711 0 : timeline.maybe_freeze_ephemeral_layer().await;
1712 : }
1713 0 : }
1714 :
1715 2396 : pub fn current_state(&self) -> TenantState {
1716 2396 : self.state.borrow().clone()
1717 2396 : }
1718 :
1719 1634 : pub fn is_active(&self) -> bool {
1720 1634 : self.current_state() == TenantState::Active
1721 1634 : }
1722 :
1723 0 : pub fn generation(&self) -> Generation {
1724 0 : self.generation
1725 0 : }
1726 :
1727 0 : pub(crate) fn wal_redo_manager_status(&self) -> Option<WalRedoManagerStatus> {
1728 0 : self.walredo_mgr.as_ref().and_then(|mgr| mgr.status())
1729 0 : }
1730 :
1731 : /// Changes tenant status to active, unless shutdown was already requested.
1732 : ///
1733 : /// `background_jobs_can_start` is an optional barrier set to a value during pageserver startup
1734 : /// to delay background jobs. Background jobs can be started right away when None is given.
1735 0 : fn activate(
1736 0 : self: &Arc<Self>,
1737 0 : broker_client: BrokerClientChannel,
1738 0 : background_jobs_can_start: Option<&completion::Barrier>,
1739 0 : ctx: &RequestContext,
1740 0 : ) {
1741 0 : span::debug_assert_current_span_has_tenant_id();
1742 0 :
1743 0 : let mut activating = false;
1744 0 : self.state.send_modify(|current_state| {
1745 0 : use pageserver_api::models::ActivatingFrom;
1746 0 : match &*current_state {
1747 : TenantState::Activating(_) | TenantState::Active | TenantState::Broken { .. } | TenantState::Stopping { .. } => {
1748 0 : panic!("caller is responsible for calling activate() only on Loading / Attaching tenants, got {state:?}", state = current_state);
1749 : }
1750 0 : TenantState::Loading => {
1751 0 : *current_state = TenantState::Activating(ActivatingFrom::Loading);
1752 0 : }
1753 0 : TenantState::Attaching => {
1754 0 : *current_state = TenantState::Activating(ActivatingFrom::Attaching);
1755 0 : }
1756 : }
1757 0 : debug!(tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), "Activating tenant");
1758 0 : activating = true;
1759 0 : // Continue outside the closure. We need to grab timelines.lock()
1760 0 : // and we plan to turn it into a tokio::sync::Mutex in a future patch.
1761 0 : });
1762 0 :
1763 0 : if activating {
1764 0 : let timelines_accessor = self.timelines.lock().unwrap();
1765 0 : let timelines_to_activate = timelines_accessor
1766 0 : .values()
1767 0 : .filter(|timeline| !(timeline.is_broken() || timeline.is_stopping()));
1768 0 :
1769 0 : // Spawn gc and compaction loops. The loops will shut themselves
1770 0 : // down when they notice that the tenant is inactive.
1771 0 : tasks::start_background_loops(self, background_jobs_can_start);
1772 0 :
1773 0 : let mut activated_timelines = 0;
1774 :
1775 0 : for timeline in timelines_to_activate {
1776 0 : timeline.activate(
1777 0 : self.clone(),
1778 0 : broker_client.clone(),
1779 0 : background_jobs_can_start,
1780 0 : ctx,
1781 0 : );
1782 0 : activated_timelines += 1;
1783 0 : }
1784 :
1785 0 : self.state.send_modify(move |current_state| {
1786 0 : assert!(
1787 0 : matches!(current_state, TenantState::Activating(_)),
1788 0 : "set_stopping and set_broken wait for us to leave Activating state",
1789 : );
1790 0 : *current_state = TenantState::Active;
1791 0 :
1792 0 : let elapsed = self.constructed_at.elapsed();
1793 0 : let total_timelines = timelines_accessor.len();
1794 0 :
1795 0 : // log a lot of stuff, because some tenants sometimes suffer from user-visible
1796 0 : // times to activate. see https://github.com/neondatabase/neon/issues/4025
1797 0 : info!(
1798 0 : since_creation_millis = elapsed.as_millis(),
1799 0 : tenant_id = %self.tenant_shard_id.tenant_id,
1800 0 : shard_id = %self.tenant_shard_id.shard_slug(),
1801 0 : activated_timelines,
1802 0 : total_timelines,
1803 0 : post_state = <&'static str>::from(&*current_state),
1804 0 : "activation attempt finished"
1805 : );
1806 :
1807 0 : TENANT.activation.observe(elapsed.as_secs_f64());
1808 0 : });
1809 0 : }
1810 0 : }
1811 :
1812 : /// Shutdown the tenant and join all of the spawned tasks.
1813 : ///
1814 : /// The method caters for all use-cases:
1815 : /// - pageserver shutdown (freeze_and_flush == true)
1816 : /// - detach + ignore (freeze_and_flush == false)
1817 : ///
1818 : /// This will attempt to shutdown even if tenant is broken.
1819 : ///
1820 : /// `shutdown_progress` is a [`completion::Barrier`] for the shutdown initiated by this call.
1821 : /// If the tenant is already shutting down, we return a clone of the first shutdown call's
1822 : /// `Barrier` as an `Err`. This not-first caller can use the returned barrier to join with
1823 : /// the ongoing shutdown.
1824 6 : async fn shutdown(
1825 6 : &self,
1826 6 : shutdown_progress: completion::Barrier,
1827 6 : shutdown_mode: timeline::ShutdownMode,
1828 6 : ) -> Result<(), completion::Barrier> {
1829 6 : span::debug_assert_current_span_has_tenant_id();
1830 :
1831 : // Set tenant (and its timlines) to Stoppping state.
1832 : //
1833 : // Since we can only transition into Stopping state after activation is complete,
1834 : // run it in a JoinSet so all tenants have a chance to stop before we get SIGKILLed.
1835 : //
1836 : // Transitioning tenants to Stopping state has a couple of non-obvious side effects:
1837 : // 1. Lock out any new requests to the tenants.
1838 : // 2. Signal cancellation to WAL receivers (we wait on it below).
1839 : // 3. Signal cancellation for other tenant background loops.
1840 : // 4. ???
1841 : //
1842 : // The waiting for the cancellation is not done uniformly.
1843 : // We certainly wait for WAL receivers to shut down.
1844 : // That is necessary so that no new data comes in before the freeze_and_flush.
1845 : // But the tenant background loops are joined-on in our caller.
1846 : // It's mesed up.
1847 : // we just ignore the failure to stop
1848 :
1849 : // If we're still attaching, fire the cancellation token early to drop out: this
1850 : // will prevent us flushing, but ensures timely shutdown if some I/O during attach
1851 : // is very slow.
1852 6 : if matches!(self.current_state(), TenantState::Attaching) {
1853 0 : self.cancel.cancel();
1854 6 : }
1855 :
1856 6 : match self.set_stopping(shutdown_progress, false, false).await {
1857 6 : Ok(()) => {}
1858 0 : Err(SetStoppingError::Broken) => {
1859 0 : // assume that this is acceptable
1860 0 : }
1861 0 : Err(SetStoppingError::AlreadyStopping(other)) => {
1862 0 : // give caller the option to wait for this this shutdown
1863 0 : info!("Tenant::shutdown: AlreadyStopping");
1864 0 : return Err(other);
1865 : }
1866 : };
1867 :
1868 6 : let mut js = tokio::task::JoinSet::new();
1869 6 : {
1870 6 : let timelines = self.timelines.lock().unwrap();
1871 6 : timelines.values().for_each(|timeline| {
1872 6 : let timeline = Arc::clone(timeline);
1873 6 : let timeline_id = timeline.timeline_id;
1874 6 : let span = tracing::info_span!("timeline_shutdown", %timeline_id, ?shutdown_mode);
1875 16 : js.spawn(async move { timeline.shutdown(shutdown_mode).instrument(span).await });
1876 6 : })
1877 6 : };
1878 6 : // test_long_timeline_create_then_tenant_delete is leaning on this message
1879 6 : tracing::info!("Waiting for timelines...");
1880 12 : while let Some(res) = js.join_next().await {
1881 0 : match res {
1882 6 : Ok(()) => {}
1883 0 : Err(je) if je.is_cancelled() => unreachable!("no cancelling used"),
1884 0 : Err(je) if je.is_panic() => { /* logged already */ }
1885 0 : Err(je) => warn!("unexpected JoinError: {je:?}"),
1886 : }
1887 : }
1888 :
1889 : // We cancel the Tenant's cancellation token _after_ the timelines have all shut down. This permits
1890 : // them to continue to do work during their shutdown methods, e.g. flushing data.
1891 6 : tracing::debug!("Cancelling CancellationToken");
1892 6 : self.cancel.cancel();
1893 6 :
1894 6 : // shutdown all tenant and timeline tasks: gc, compaction, page service
1895 6 : // No new tasks will be started for this tenant because it's in `Stopping` state.
1896 6 : //
1897 6 : // this will additionally shutdown and await all timeline tasks.
1898 6 : tracing::debug!("Waiting for tasks...");
1899 6 : task_mgr::shutdown_tasks(None, Some(self.tenant_shard_id), None).await;
1900 :
1901 : // Wait for any in-flight operations to complete
1902 6 : self.gate.close().await;
1903 :
1904 6 : remove_tenant_metrics(&self.tenant_shard_id);
1905 6 :
1906 6 : Ok(())
1907 6 : }
1908 :
1909 : /// Change tenant status to Stopping, to mark that it is being shut down.
1910 : ///
1911 : /// This function waits for the tenant to become active if it isn't already, before transitioning it into Stopping state.
1912 : ///
1913 : /// This function is not cancel-safe!
1914 : ///
1915 : /// `allow_transition_from_loading` is needed for the special case of loading task deleting the tenant.
1916 : /// `allow_transition_from_attaching` is needed for the special case of attaching deleted tenant.
1917 6 : async fn set_stopping(
1918 6 : &self,
1919 6 : progress: completion::Barrier,
1920 6 : allow_transition_from_loading: bool,
1921 6 : allow_transition_from_attaching: bool,
1922 6 : ) -> Result<(), SetStoppingError> {
1923 6 : let mut rx = self.state.subscribe();
1924 6 :
1925 6 : // cannot stop before we're done activating, so wait out until we're done activating
1926 6 : rx.wait_for(|state| match state {
1927 0 : TenantState::Attaching if allow_transition_from_attaching => true,
1928 : TenantState::Activating(_) | TenantState::Attaching => {
1929 0 : info!(
1930 0 : "waiting for {} to turn Active|Broken|Stopping",
1931 0 : <&'static str>::from(state)
1932 : );
1933 0 : false
1934 : }
1935 0 : TenantState::Loading => allow_transition_from_loading,
1936 6 : TenantState::Active | TenantState::Broken { .. } | TenantState::Stopping { .. } => true,
1937 6 : })
1938 0 : .await
1939 6 : .expect("cannot drop self.state while on a &self method");
1940 6 :
1941 6 : // we now know we're done activating, let's see whether this task is the winner to transition into Stopping
1942 6 : let mut err = None;
1943 6 : let stopping = self.state.send_if_modified(|current_state| match current_state {
1944 : TenantState::Activating(_) => {
1945 0 : unreachable!("1we ensured above that we're done with activation, and, there is no re-activation")
1946 : }
1947 : TenantState::Attaching => {
1948 0 : if !allow_transition_from_attaching {
1949 0 : unreachable!("2we ensured above that we're done with activation, and, there is no re-activation")
1950 0 : };
1951 0 : *current_state = TenantState::Stopping { progress };
1952 0 : true
1953 : }
1954 : TenantState::Loading => {
1955 0 : if !allow_transition_from_loading {
1956 0 : unreachable!("3we ensured above that we're done with activation, and, there is no re-activation")
1957 0 : };
1958 0 : *current_state = TenantState::Stopping { progress };
1959 0 : true
1960 : }
1961 : TenantState::Active => {
1962 : // FIXME: due to time-of-check vs time-of-use issues, it can happen that new timelines
1963 : // are created after the transition to Stopping. That's harmless, as the Timelines
1964 : // won't be accessible to anyone afterwards, because the Tenant is in Stopping state.
1965 6 : *current_state = TenantState::Stopping { progress };
1966 6 : // Continue stopping outside the closure. We need to grab timelines.lock()
1967 6 : // and we plan to turn it into a tokio::sync::Mutex in a future patch.
1968 6 : true
1969 : }
1970 0 : TenantState::Broken { reason, .. } => {
1971 0 : info!(
1972 0 : "Cannot set tenant to Stopping state, it is in Broken state due to: {reason}"
1973 : );
1974 0 : err = Some(SetStoppingError::Broken);
1975 0 : false
1976 : }
1977 0 : TenantState::Stopping { progress } => {
1978 0 : info!("Tenant is already in Stopping state");
1979 0 : err = Some(SetStoppingError::AlreadyStopping(progress.clone()));
1980 0 : false
1981 : }
1982 6 : });
1983 6 : match (stopping, err) {
1984 6 : (true, None) => {} // continue
1985 0 : (false, Some(err)) => return Err(err),
1986 0 : (true, Some(_)) => unreachable!(
1987 0 : "send_if_modified closure must error out if not transitioning to Stopping"
1988 0 : ),
1989 0 : (false, None) => unreachable!(
1990 0 : "send_if_modified closure must return true if transitioning to Stopping"
1991 0 : ),
1992 : }
1993 :
1994 6 : let timelines_accessor = self.timelines.lock().unwrap();
1995 6 : let not_broken_timelines = timelines_accessor
1996 6 : .values()
1997 6 : .filter(|timeline| !timeline.is_broken());
1998 12 : for timeline in not_broken_timelines {
1999 6 : timeline.set_state(TimelineState::Stopping);
2000 6 : }
2001 6 : Ok(())
2002 6 : }
2003 :
2004 : /// Method for tenant::mgr to transition us into Broken state in case of a late failure in
2005 : /// `remove_tenant_from_memory`
2006 : ///
2007 : /// This function waits for the tenant to become active if it isn't already, before transitioning it into Stopping state.
2008 : ///
2009 : /// In tests, we also use this to set tenants to Broken state on purpose.
2010 0 : pub(crate) async fn set_broken(&self, reason: String) {
2011 0 : let mut rx = self.state.subscribe();
2012 0 :
2013 0 : // The load & attach routines own the tenant state until it has reached `Active`.
2014 0 : // So, wait until it's done.
2015 0 : rx.wait_for(|state| match state {
2016 : TenantState::Activating(_) | TenantState::Loading | TenantState::Attaching => {
2017 0 : info!(
2018 0 : "waiting for {} to turn Active|Broken|Stopping",
2019 0 : <&'static str>::from(state)
2020 : );
2021 0 : false
2022 : }
2023 0 : TenantState::Active | TenantState::Broken { .. } | TenantState::Stopping { .. } => true,
2024 0 : })
2025 0 : .await
2026 0 : .expect("cannot drop self.state while on a &self method");
2027 0 :
2028 0 : // we now know we're done activating, let's see whether this task is the winner to transition into Broken
2029 0 : self.set_broken_no_wait(reason)
2030 0 : }
2031 :
2032 0 : pub(crate) fn set_broken_no_wait(&self, reason: impl Display) {
2033 0 : let reason = reason.to_string();
2034 0 : self.state.send_modify(|current_state| {
2035 0 : match *current_state {
2036 : TenantState::Activating(_) | TenantState::Loading | TenantState::Attaching => {
2037 0 : unreachable!("we ensured above that we're done with activation, and, there is no re-activation")
2038 : }
2039 : TenantState::Active => {
2040 0 : if cfg!(feature = "testing") {
2041 0 : warn!("Changing Active tenant to Broken state, reason: {}", reason);
2042 0 : *current_state = TenantState::broken_from_reason(reason);
2043 : } else {
2044 0 : unreachable!("not allowed to call set_broken on Active tenants in non-testing builds")
2045 : }
2046 : }
2047 : TenantState::Broken { .. } => {
2048 0 : warn!("Tenant is already in Broken state");
2049 : }
2050 : // This is the only "expected" path, any other path is a bug.
2051 : TenantState::Stopping { .. } => {
2052 0 : warn!(
2053 0 : "Marking Stopping tenant as Broken state, reason: {}",
2054 : reason
2055 : );
2056 0 : *current_state = TenantState::broken_from_reason(reason);
2057 : }
2058 : }
2059 0 : });
2060 0 : }
2061 :
2062 0 : pub fn subscribe_for_state_updates(&self) -> watch::Receiver<TenantState> {
2063 0 : self.state.subscribe()
2064 0 : }
2065 :
2066 : /// The activate_now semaphore is initialized with zero units. As soon as
2067 : /// we add a unit, waiters will be able to acquire a unit and proceed.
2068 0 : pub(crate) fn activate_now(&self) {
2069 0 : self.activate_now_sem.add_permits(1);
2070 0 : }
2071 :
2072 0 : pub(crate) async fn wait_to_become_active(
2073 0 : &self,
2074 0 : timeout: Duration,
2075 0 : ) -> Result<(), GetActiveTenantError> {
2076 0 : let mut receiver = self.state.subscribe();
2077 0 : loop {
2078 0 : let current_state = receiver.borrow_and_update().clone();
2079 0 : match current_state {
2080 : TenantState::Loading | TenantState::Attaching | TenantState::Activating(_) => {
2081 : // in these states, there's a chance that we can reach ::Active
2082 0 : self.activate_now();
2083 0 : match timeout_cancellable(timeout, &self.cancel, receiver.changed()).await {
2084 0 : Ok(r) => {
2085 0 : r.map_err(
2086 0 : |_e: tokio::sync::watch::error::RecvError|
2087 : // Tenant existed but was dropped: report it as non-existent
2088 0 : GetActiveTenantError::NotFound(GetTenantError::NotFound(self.tenant_shard_id.tenant_id))
2089 0 : )?
2090 : }
2091 : Err(TimeoutCancellableError::Cancelled) => {
2092 0 : return Err(GetActiveTenantError::Cancelled);
2093 : }
2094 : Err(TimeoutCancellableError::Timeout) => {
2095 0 : return Err(GetActiveTenantError::WaitForActiveTimeout {
2096 0 : latest_state: Some(self.current_state()),
2097 0 : wait_time: timeout,
2098 0 : });
2099 : }
2100 : }
2101 : }
2102 : TenantState::Active { .. } => {
2103 0 : return Ok(());
2104 : }
2105 0 : TenantState::Broken { reason, .. } => {
2106 0 : // This is fatal, and reported distinctly from the general case of "will never be active" because
2107 0 : // it's logically a 500 to external API users (broken is always a bug).
2108 0 : return Err(GetActiveTenantError::Broken(reason));
2109 : }
2110 : TenantState::Stopping { .. } => {
2111 : // There's no chance the tenant can transition back into ::Active
2112 0 : return Err(GetActiveTenantError::WillNotBecomeActive(current_state));
2113 : }
2114 : }
2115 : }
2116 0 : }
2117 :
2118 0 : pub(crate) fn get_attach_mode(&self) -> AttachmentMode {
2119 0 : self.tenant_conf.load().location.attach_mode
2120 0 : }
2121 :
2122 : /// For API access: generate a LocationConfig equivalent to the one that would be used to
2123 : /// create a Tenant in the same state. Do not use this in hot paths: it's for relatively
2124 : /// rare external API calls, like a reconciliation at startup.
2125 0 : pub(crate) fn get_location_conf(&self) -> models::LocationConfig {
2126 0 : let conf = self.tenant_conf.load();
2127 :
2128 0 : let location_config_mode = match conf.location.attach_mode {
2129 0 : AttachmentMode::Single => models::LocationConfigMode::AttachedSingle,
2130 0 : AttachmentMode::Multi => models::LocationConfigMode::AttachedMulti,
2131 0 : AttachmentMode::Stale => models::LocationConfigMode::AttachedStale,
2132 : };
2133 :
2134 : // We have a pageserver TenantConf, we need the API-facing TenantConfig.
2135 0 : let tenant_config: models::TenantConfig = conf.tenant_conf.clone().into();
2136 0 :
2137 0 : models::LocationConfig {
2138 0 : mode: location_config_mode,
2139 0 : generation: self.generation.into(),
2140 0 : secondary_conf: None,
2141 0 : shard_number: self.shard_identity.number.0,
2142 0 : shard_count: self.shard_identity.count.literal(),
2143 0 : shard_stripe_size: self.shard_identity.stripe_size.0,
2144 0 : tenant_conf: tenant_config,
2145 0 : }
2146 0 : }
2147 :
2148 0 : pub(crate) fn get_tenant_shard_id(&self) -> &TenantShardId {
2149 0 : &self.tenant_shard_id
2150 0 : }
2151 :
2152 0 : pub(crate) fn get_shard_stripe_size(&self) -> ShardStripeSize {
2153 0 : self.shard_identity.stripe_size
2154 0 : }
2155 :
2156 0 : pub(crate) fn get_generation(&self) -> Generation {
2157 0 : self.generation
2158 0 : }
2159 :
2160 : /// This function partially shuts down the tenant (it shuts down the Timelines) and is fallible,
2161 : /// and can leave the tenant in a bad state if it fails. The caller is responsible for
2162 : /// resetting this tenant to a valid state if we fail.
2163 0 : pub(crate) async fn split_prepare(
2164 0 : &self,
2165 0 : child_shards: &Vec<TenantShardId>,
2166 0 : ) -> anyhow::Result<()> {
2167 0 : let timelines = self.timelines.lock().unwrap().clone();
2168 0 : for timeline in timelines.values() {
2169 : // We do not block timeline creation/deletion during splits inside the pageserver: it is up to higher levels
2170 : // to ensure that they do not start a split if currently in the process of doing these.
2171 :
2172 : // Upload an index from the parent: this is partly to provide freshness for the
2173 : // child tenants that will copy it, and partly for general ease-of-debugging: there will
2174 : // always be a parent shard index in the same generation as we wrote the child shard index.
2175 0 : timeline
2176 0 : .remote_client
2177 0 : .schedule_index_upload_for_file_changes()?;
2178 0 : timeline.remote_client.wait_completion().await?;
2179 :
2180 : // Shut down the timeline's remote client: this means that the indices we write
2181 : // for child shards will not be invalidated by the parent shard deleting layers.
2182 0 : timeline.remote_client.shutdown().await;
2183 :
2184 : // Download methods can still be used after shutdown, as they don't flow through the remote client's
2185 : // queue. In principal the RemoteTimelineClient could provide this without downloading it, but this
2186 : // operation is rare, so it's simpler to just download it (and robustly guarantees that the index
2187 : // we use here really is the remotely persistent one).
2188 0 : let result = timeline.remote_client
2189 0 : .download_index_file(&self.cancel)
2190 0 : .instrument(info_span!("download_index_file", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%timeline.timeline_id))
2191 0 : .await?;
2192 0 : let index_part = match result {
2193 : MaybeDeletedIndexPart::Deleted(_) => {
2194 0 : anyhow::bail!("Timeline deletion happened concurrently with split")
2195 : }
2196 0 : MaybeDeletedIndexPart::IndexPart(p) => p,
2197 : };
2198 :
2199 0 : for child_shard in child_shards {
2200 0 : upload_index_part(
2201 0 : &self.remote_storage,
2202 0 : child_shard,
2203 0 : &timeline.timeline_id,
2204 0 : self.generation,
2205 0 : &index_part,
2206 0 : &self.cancel,
2207 0 : )
2208 0 : .await?;
2209 : }
2210 : }
2211 :
2212 0 : Ok(())
2213 0 : }
2214 :
2215 0 : pub(crate) fn get_sizes(&self) -> TopTenantShardItem {
2216 0 : let mut result = TopTenantShardItem {
2217 0 : id: self.tenant_shard_id,
2218 0 : resident_size: 0,
2219 0 : physical_size: 0,
2220 0 : max_logical_size: 0,
2221 0 : };
2222 :
2223 0 : for timeline in self.timelines.lock().unwrap().values() {
2224 0 : result.resident_size += timeline.metrics.resident_physical_size_gauge.get();
2225 0 :
2226 0 : result.physical_size += timeline
2227 0 : .remote_client
2228 0 : .metrics
2229 0 : .remote_physical_size_gauge
2230 0 : .get();
2231 0 : result.max_logical_size = std::cmp::max(
2232 0 : result.max_logical_size,
2233 0 : timeline.metrics.current_logical_size_gauge.get(),
2234 0 : );
2235 0 : }
2236 :
2237 0 : result
2238 0 : }
2239 : }
2240 :
2241 : /// Given a Vec of timelines and their ancestors (timeline_id, ancestor_id),
2242 : /// perform a topological sort, so that the parent of each timeline comes
2243 : /// before the children.
2244 : /// E extracts the ancestor from T
2245 : /// This allows for T to be different. It can be TimelineMetadata, can be Timeline itself, etc.
2246 130 : fn tree_sort_timelines<T, E>(
2247 130 : timelines: HashMap<TimelineId, T>,
2248 130 : extractor: E,
2249 130 : ) -> anyhow::Result<Vec<(TimelineId, T)>>
2250 130 : where
2251 130 : E: Fn(&T) -> Option<TimelineId>,
2252 130 : {
2253 130 : let mut result = Vec::with_capacity(timelines.len());
2254 130 :
2255 130 : let mut now = Vec::with_capacity(timelines.len());
2256 130 : // (ancestor, children)
2257 130 : let mut later: HashMap<TimelineId, Vec<(TimelineId, T)>> =
2258 130 : HashMap::with_capacity(timelines.len());
2259 :
2260 136 : for (timeline_id, value) in timelines {
2261 6 : if let Some(ancestor_id) = extractor(&value) {
2262 2 : let children = later.entry(ancestor_id).or_default();
2263 2 : children.push((timeline_id, value));
2264 4 : } else {
2265 4 : now.push((timeline_id, value));
2266 4 : }
2267 : }
2268 :
2269 136 : while let Some((timeline_id, metadata)) = now.pop() {
2270 6 : result.push((timeline_id, metadata));
2271 : // All children of this can be loaded now
2272 6 : if let Some(mut children) = later.remove(&timeline_id) {
2273 2 : now.append(&mut children);
2274 4 : }
2275 : }
2276 :
2277 : // All timelines should be visited now. Unless there were timelines with missing ancestors.
2278 130 : if !later.is_empty() {
2279 0 : for (missing_id, orphan_ids) in later {
2280 0 : for (orphan_id, _) in orphan_ids {
2281 0 : error!("could not load timeline {orphan_id} because its ancestor timeline {missing_id} could not be loaded");
2282 : }
2283 : }
2284 0 : bail!("could not load tenant because some timelines are missing ancestors");
2285 130 : }
2286 130 :
2287 130 : Ok(result)
2288 130 : }
2289 :
2290 : impl Tenant {
2291 0 : pub fn tenant_specific_overrides(&self) -> TenantConfOpt {
2292 0 : self.tenant_conf.load().tenant_conf.clone()
2293 0 : }
2294 :
2295 0 : pub fn effective_config(&self) -> TenantConf {
2296 0 : self.tenant_specific_overrides()
2297 0 : .merge(self.conf.default_tenant_conf.clone())
2298 0 : }
2299 :
2300 0 : pub fn get_checkpoint_distance(&self) -> u64 {
2301 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2302 0 : tenant_conf
2303 0 : .checkpoint_distance
2304 0 : .unwrap_or(self.conf.default_tenant_conf.checkpoint_distance)
2305 0 : }
2306 :
2307 0 : pub fn get_checkpoint_timeout(&self) -> Duration {
2308 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2309 0 : tenant_conf
2310 0 : .checkpoint_timeout
2311 0 : .unwrap_or(self.conf.default_tenant_conf.checkpoint_timeout)
2312 0 : }
2313 :
2314 0 : pub fn get_compaction_target_size(&self) -> u64 {
2315 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2316 0 : tenant_conf
2317 0 : .compaction_target_size
2318 0 : .unwrap_or(self.conf.default_tenant_conf.compaction_target_size)
2319 0 : }
2320 :
2321 0 : pub fn get_compaction_period(&self) -> Duration {
2322 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2323 0 : tenant_conf
2324 0 : .compaction_period
2325 0 : .unwrap_or(self.conf.default_tenant_conf.compaction_period)
2326 0 : }
2327 :
2328 0 : pub fn get_compaction_threshold(&self) -> usize {
2329 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2330 0 : tenant_conf
2331 0 : .compaction_threshold
2332 0 : .unwrap_or(self.conf.default_tenant_conf.compaction_threshold)
2333 0 : }
2334 :
2335 0 : pub fn get_gc_horizon(&self) -> u64 {
2336 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2337 0 : tenant_conf
2338 0 : .gc_horizon
2339 0 : .unwrap_or(self.conf.default_tenant_conf.gc_horizon)
2340 0 : }
2341 :
2342 0 : pub fn get_gc_period(&self) -> Duration {
2343 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2344 0 : tenant_conf
2345 0 : .gc_period
2346 0 : .unwrap_or(self.conf.default_tenant_conf.gc_period)
2347 0 : }
2348 :
2349 0 : pub fn get_image_creation_threshold(&self) -> usize {
2350 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2351 0 : tenant_conf
2352 0 : .image_creation_threshold
2353 0 : .unwrap_or(self.conf.default_tenant_conf.image_creation_threshold)
2354 0 : }
2355 :
2356 0 : pub fn get_pitr_interval(&self) -> Duration {
2357 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2358 0 : tenant_conf
2359 0 : .pitr_interval
2360 0 : .unwrap_or(self.conf.default_tenant_conf.pitr_interval)
2361 0 : }
2362 :
2363 0 : pub fn get_trace_read_requests(&self) -> bool {
2364 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2365 0 : tenant_conf
2366 0 : .trace_read_requests
2367 0 : .unwrap_or(self.conf.default_tenant_conf.trace_read_requests)
2368 0 : }
2369 :
2370 0 : pub fn get_min_resident_size_override(&self) -> Option<u64> {
2371 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2372 0 : tenant_conf
2373 0 : .min_resident_size_override
2374 0 : .or(self.conf.default_tenant_conf.min_resident_size_override)
2375 0 : }
2376 :
2377 0 : pub fn get_heatmap_period(&self) -> Option<Duration> {
2378 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2379 0 : let heatmap_period = tenant_conf
2380 0 : .heatmap_period
2381 0 : .unwrap_or(self.conf.default_tenant_conf.heatmap_period);
2382 0 : if heatmap_period.is_zero() {
2383 0 : None
2384 : } else {
2385 0 : Some(heatmap_period)
2386 : }
2387 0 : }
2388 :
2389 0 : pub fn set_new_tenant_config(&self, new_tenant_conf: TenantConfOpt) {
2390 0 : // Use read-copy-update in order to avoid overwriting the location config
2391 0 : // state if this races with [`Tenant::set_new_location_config`]. Note that
2392 0 : // this race is not possible if both request types come from the storage
2393 0 : // controller (as they should!) because an exclusive op lock is required
2394 0 : // on the storage controller side.
2395 0 : self.tenant_conf.rcu(|inner| {
2396 0 : Arc::new(AttachedTenantConf {
2397 0 : tenant_conf: new_tenant_conf.clone(),
2398 0 : location: inner.location,
2399 0 : })
2400 0 : });
2401 0 :
2402 0 : self.tenant_conf_updated(&new_tenant_conf);
2403 0 : // Don't hold self.timelines.lock() during the notifies.
2404 0 : // There's no risk of deadlock right now, but there could be if we consolidate
2405 0 : // mutexes in struct Timeline in the future.
2406 0 : let timelines = self.list_timelines();
2407 0 : for timeline in timelines {
2408 0 : timeline.tenant_conf_updated(&new_tenant_conf);
2409 0 : }
2410 0 : }
2411 :
2412 8 : pub(crate) fn set_new_location_config(&self, new_conf: AttachedTenantConf) {
2413 8 : let new_tenant_conf = new_conf.tenant_conf.clone();
2414 8 :
2415 8 : self.tenant_conf.store(Arc::new(new_conf));
2416 8 :
2417 8 : self.tenant_conf_updated(&new_tenant_conf);
2418 8 : // Don't hold self.timelines.lock() during the notifies.
2419 8 : // There's no risk of deadlock right now, but there could be if we consolidate
2420 8 : // mutexes in struct Timeline in the future.
2421 8 : let timelines = self.list_timelines();
2422 16 : for timeline in timelines {
2423 8 : timeline.tenant_conf_updated(&new_tenant_conf);
2424 8 : }
2425 8 : }
2426 :
2427 138 : fn get_timeline_get_throttle_config(
2428 138 : psconf: &'static PageServerConf,
2429 138 : overrides: &TenantConfOpt,
2430 138 : ) -> throttle::Config {
2431 138 : overrides
2432 138 : .timeline_get_throttle
2433 138 : .clone()
2434 138 : .unwrap_or(psconf.default_tenant_conf.timeline_get_throttle.clone())
2435 138 : }
2436 :
2437 8 : pub(crate) fn tenant_conf_updated(&self, new_conf: &TenantConfOpt) {
2438 8 : let conf = Self::get_timeline_get_throttle_config(self.conf, new_conf);
2439 8 : self.timeline_get_throttle.reconfigure(conf)
2440 8 : }
2441 :
2442 : /// Helper function to create a new Timeline struct.
2443 : ///
2444 : /// The returned Timeline is in Loading state. The caller is responsible for
2445 : /// initializing any on-disk state, and for inserting the Timeline to the 'timelines'
2446 : /// map.
2447 : ///
2448 : /// `validate_ancestor == false` is used when a timeline is created for deletion
2449 : /// and we might not have the ancestor present anymore which is fine for to be
2450 : /// deleted timelines.
2451 352 : fn create_timeline_struct(
2452 352 : &self,
2453 352 : new_timeline_id: TimelineId,
2454 352 : new_metadata: &TimelineMetadata,
2455 352 : ancestor: Option<Arc<Timeline>>,
2456 352 : resources: TimelineResources,
2457 352 : cause: CreateTimelineCause,
2458 352 : last_aux_file_policy: Option<AuxFilePolicy>,
2459 352 : ) -> anyhow::Result<Arc<Timeline>> {
2460 352 : let state = match cause {
2461 : CreateTimelineCause::Load => {
2462 352 : let ancestor_id = new_metadata.ancestor_timeline();
2463 352 : anyhow::ensure!(
2464 352 : ancestor_id == ancestor.as_ref().map(|t| t.timeline_id),
2465 0 : "Timeline's {new_timeline_id} ancestor {ancestor_id:?} was not found"
2466 : );
2467 352 : TimelineState::Loading
2468 : }
2469 0 : CreateTimelineCause::Delete => TimelineState::Stopping,
2470 : };
2471 :
2472 352 : let pg_version = new_metadata.pg_version();
2473 352 :
2474 352 : let timeline = Timeline::new(
2475 352 : self.conf,
2476 352 : Arc::clone(&self.tenant_conf),
2477 352 : new_metadata,
2478 352 : ancestor,
2479 352 : new_timeline_id,
2480 352 : self.tenant_shard_id,
2481 352 : self.generation,
2482 352 : self.shard_identity,
2483 352 : self.walredo_mgr.clone(),
2484 352 : resources,
2485 352 : pg_version,
2486 352 : state,
2487 352 : last_aux_file_policy,
2488 352 : self.cancel.child_token(),
2489 352 : );
2490 352 :
2491 352 : Ok(timeline)
2492 352 : }
2493 :
2494 : // Allow too_many_arguments because a constructor's argument list naturally grows with the
2495 : // number of attributes in the struct: breaking these out into a builder wouldn't be helpful.
2496 : #[allow(clippy::too_many_arguments)]
2497 130 : fn new(
2498 130 : state: TenantState,
2499 130 : conf: &'static PageServerConf,
2500 130 : attached_conf: AttachedTenantConf,
2501 130 : shard_identity: ShardIdentity,
2502 130 : walredo_mgr: Option<Arc<WalRedoManager>>,
2503 130 : tenant_shard_id: TenantShardId,
2504 130 : remote_storage: GenericRemoteStorage,
2505 130 : deletion_queue_client: DeletionQueueClient,
2506 130 : ) -> Tenant {
2507 130 : let (state, mut rx) = watch::channel(state);
2508 130 :
2509 130 : tokio::spawn(async move {
2510 130 : // reflect tenant state in metrics:
2511 130 : // - global per tenant state: TENANT_STATE_METRIC
2512 130 : // - "set" of broken tenants: BROKEN_TENANTS_SET
2513 130 : //
2514 130 : // set of broken tenants should not have zero counts so that it remains accessible for
2515 130 : // alerting.
2516 130 :
2517 130 : let tid = tenant_shard_id.to_string();
2518 130 : let shard_id = tenant_shard_id.shard_slug().to_string();
2519 130 : let set_key = &[tid.as_str(), shard_id.as_str()][..];
2520 130 :
2521 258 : fn inspect_state(state: &TenantState) -> ([&'static str; 1], bool) {
2522 258 : ([state.into()], matches!(state, TenantState::Broken { .. }))
2523 258 : }
2524 130 :
2525 130 : let mut tuple = inspect_state(&rx.borrow_and_update());
2526 130 :
2527 130 : let is_broken = tuple.1;
2528 130 : let mut counted_broken = if is_broken {
2529 : // add the id to the set right away, there should not be any updates on the channel
2530 : // after before tenant is removed, if ever
2531 0 : BROKEN_TENANTS_SET.with_label_values(set_key).set(1);
2532 0 : true
2533 : } else {
2534 130 : false
2535 : };
2536 :
2537 258 : loop {
2538 258 : let labels = &tuple.0;
2539 258 : let current = TENANT_STATE_METRIC.with_label_values(labels);
2540 258 : current.inc();
2541 258 :
2542 258 : if rx.changed().await.is_err() {
2543 : // tenant has been dropped
2544 10 : current.dec();
2545 10 : drop(BROKEN_TENANTS_SET.remove_label_values(set_key));
2546 10 : break;
2547 128 : }
2548 128 :
2549 128 : current.dec();
2550 128 : tuple = inspect_state(&rx.borrow_and_update());
2551 128 :
2552 128 : let is_broken = tuple.1;
2553 128 : if is_broken && !counted_broken {
2554 0 : counted_broken = true;
2555 0 : // insert the tenant_id (back) into the set while avoiding needless counter
2556 0 : // access
2557 0 : BROKEN_TENANTS_SET.with_label_values(set_key).set(1);
2558 128 : }
2559 : }
2560 130 : });
2561 130 :
2562 130 : Tenant {
2563 130 : tenant_shard_id,
2564 130 : shard_identity,
2565 130 : generation: attached_conf.location.generation,
2566 130 : conf,
2567 130 : // using now here is good enough approximation to catch tenants with really long
2568 130 : // activation times.
2569 130 : constructed_at: Instant::now(),
2570 130 : timelines: Mutex::new(HashMap::new()),
2571 130 : timelines_creating: Mutex::new(HashSet::new()),
2572 130 : gc_cs: tokio::sync::Mutex::new(()),
2573 130 : walredo_mgr,
2574 130 : remote_storage,
2575 130 : deletion_queue_client,
2576 130 : state,
2577 130 : cached_logical_sizes: tokio::sync::Mutex::new(HashMap::new()),
2578 130 : cached_synthetic_tenant_size: Arc::new(AtomicU64::new(0)),
2579 130 : eviction_task_tenant_state: tokio::sync::Mutex::new(EvictionTaskTenantState::default()),
2580 130 : activate_now_sem: tokio::sync::Semaphore::new(0),
2581 130 : delete_progress: Arc::new(tokio::sync::Mutex::new(DeleteTenantFlow::default())),
2582 130 : cancel: CancellationToken::default(),
2583 130 : gate: Gate::default(),
2584 130 : timeline_get_throttle: Arc::new(throttle::Throttle::new(
2585 130 : Tenant::get_timeline_get_throttle_config(conf, &attached_conf.tenant_conf),
2586 130 : &crate::metrics::tenant_throttling::TIMELINE_GET,
2587 130 : )),
2588 130 : tenant_conf: Arc::new(ArcSwap::from_pointee(attached_conf)),
2589 130 : ongoing_timeline_detach: std::sync::Mutex::default(),
2590 130 : }
2591 130 : }
2592 :
2593 : /// Locate and load config
2594 0 : pub(super) fn load_tenant_config(
2595 0 : conf: &'static PageServerConf,
2596 0 : tenant_shard_id: &TenantShardId,
2597 0 : ) -> anyhow::Result<LocationConf> {
2598 0 : let legacy_config_path = conf.tenant_config_path(tenant_shard_id);
2599 0 : let config_path = conf.tenant_location_config_path(tenant_shard_id);
2600 0 :
2601 0 : if config_path.exists() {
2602 : // New-style config takes precedence
2603 0 : let deserialized = Self::read_config(&config_path)?;
2604 0 : Ok(toml_edit::de::from_document::<LocationConf>(deserialized)?)
2605 0 : } else if legacy_config_path.exists() {
2606 : // Upgrade path: found an old-style configuration only
2607 0 : let deserialized = Self::read_config(&legacy_config_path)?;
2608 :
2609 0 : let mut tenant_conf = TenantConfOpt::default();
2610 0 : for (key, item) in deserialized.iter() {
2611 0 : match key {
2612 0 : "tenant_config" => {
2613 0 : tenant_conf = TenantConfOpt::try_from(item.to_owned()).context(format!("Failed to parse config from file '{legacy_config_path}' as pageserver config"))?;
2614 : }
2615 0 : _ => bail!(
2616 0 : "config file {legacy_config_path} has unrecognized pageserver option '{key}'"
2617 0 : ),
2618 : }
2619 : }
2620 :
2621 : // Legacy configs are implicitly in attached state, and do not support sharding
2622 0 : Ok(LocationConf::attached_single(
2623 0 : tenant_conf,
2624 0 : Generation::none(),
2625 0 : &models::ShardParameters::default(),
2626 0 : ))
2627 : } else {
2628 : // FIXME If the config file is not found, assume that we're attaching
2629 : // a detached tenant and config is passed via attach command.
2630 : // https://github.com/neondatabase/neon/issues/1555
2631 : // OR: we're loading after incomplete deletion that managed to remove config.
2632 0 : info!(
2633 0 : "tenant config not found in {} or {}",
2634 : config_path, legacy_config_path
2635 : );
2636 0 : Ok(LocationConf::default())
2637 : }
2638 0 : }
2639 :
2640 0 : fn read_config(path: &Utf8Path) -> anyhow::Result<toml_edit::Document> {
2641 0 : info!("loading tenant configuration from {path}");
2642 :
2643 : // load and parse file
2644 0 : let config = fs::read_to_string(path)
2645 0 : .with_context(|| format!("Failed to load config from path '{path}'"))?;
2646 :
2647 0 : config
2648 0 : .parse::<toml_edit::Document>()
2649 0 : .with_context(|| format!("Failed to parse config from file '{path}' as toml file"))
2650 0 : }
2651 :
2652 0 : #[tracing::instrument(skip_all, fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug()))]
2653 : pub(super) async fn persist_tenant_config(
2654 : conf: &'static PageServerConf,
2655 : tenant_shard_id: &TenantShardId,
2656 : location_conf: &LocationConf,
2657 : ) -> anyhow::Result<()> {
2658 : let legacy_config_path = conf.tenant_config_path(tenant_shard_id);
2659 : let config_path = conf.tenant_location_config_path(tenant_shard_id);
2660 :
2661 : Self::persist_tenant_config_at(
2662 : tenant_shard_id,
2663 : &config_path,
2664 : &legacy_config_path,
2665 : location_conf,
2666 : )
2667 : .await
2668 : }
2669 :
2670 0 : #[tracing::instrument(skip_all, fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug()))]
2671 : pub(super) async fn persist_tenant_config_at(
2672 : tenant_shard_id: &TenantShardId,
2673 : config_path: &Utf8Path,
2674 : legacy_config_path: &Utf8Path,
2675 : location_conf: &LocationConf,
2676 : ) -> anyhow::Result<()> {
2677 : if let LocationMode::Attached(attach_conf) = &location_conf.mode {
2678 : // The modern-style LocationConf config file requires a generation to be set. In case someone
2679 : // is running a pageserver without the infrastructure to set generations, write out the legacy-style
2680 : // config file that only contains TenantConf.
2681 : //
2682 : // This will eventually be removed in https://github.com/neondatabase/neon/issues/5388
2683 :
2684 : if attach_conf.generation.is_none() {
2685 : tracing::info!(
2686 : "Running without generations, writing legacy-style tenant config file"
2687 : );
2688 : Self::persist_tenant_config_legacy(
2689 : tenant_shard_id,
2690 : legacy_config_path,
2691 : &location_conf.tenant_conf,
2692 : )
2693 : .await?;
2694 :
2695 : return Ok(());
2696 : }
2697 : }
2698 :
2699 : debug!("persisting tenantconf to {config_path}");
2700 :
2701 : let mut conf_content = r#"# This file contains a specific per-tenant's config.
2702 : # It is read in case of pageserver restart.
2703 : "#
2704 : .to_string();
2705 :
2706 0 : fail::fail_point!("tenant-config-before-write", |_| {
2707 0 : anyhow::bail!("tenant-config-before-write");
2708 0 : });
2709 :
2710 : // Convert the config to a toml file.
2711 : conf_content += &toml_edit::ser::to_string_pretty(&location_conf)?;
2712 :
2713 : let temp_path = path_with_suffix_extension(config_path, TEMP_FILE_SUFFIX);
2714 :
2715 : let tenant_shard_id = *tenant_shard_id;
2716 : let config_path = config_path.to_owned();
2717 : let conf_content = conf_content.into_bytes();
2718 : VirtualFile::crashsafe_overwrite(config_path.clone(), temp_path, conf_content)
2719 : .await
2720 0 : .with_context(|| format!("write tenant {tenant_shard_id} config to {config_path}"))?;
2721 :
2722 : Ok(())
2723 : }
2724 :
2725 0 : #[tracing::instrument(skip_all, fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug()))]
2726 : async fn persist_tenant_config_legacy(
2727 : tenant_shard_id: &TenantShardId,
2728 : target_config_path: &Utf8Path,
2729 : tenant_conf: &TenantConfOpt,
2730 : ) -> anyhow::Result<()> {
2731 : debug!("persisting tenantconf to {target_config_path}");
2732 :
2733 : let mut conf_content = r#"# This file contains a specific per-tenant's config.
2734 : # It is read in case of pageserver restart.
2735 :
2736 : [tenant_config]
2737 : "#
2738 : .to_string();
2739 :
2740 : // Convert the config to a toml file.
2741 : conf_content += &toml_edit::ser::to_string(&tenant_conf)?;
2742 :
2743 : let temp_path = path_with_suffix_extension(target_config_path, TEMP_FILE_SUFFIX);
2744 :
2745 : let tenant_shard_id = *tenant_shard_id;
2746 : let target_config_path = target_config_path.to_owned();
2747 : let conf_content = conf_content.into_bytes();
2748 : VirtualFile::crashsafe_overwrite(target_config_path.clone(), temp_path, conf_content)
2749 : .await
2750 0 : .with_context(|| {
2751 0 : format!("write tenant {tenant_shard_id} config to {target_config_path}")
2752 0 : })?;
2753 : Ok(())
2754 : }
2755 :
2756 : //
2757 : // How garbage collection works:
2758 : //
2759 : // +--bar------------->
2760 : // /
2761 : // +----+-----foo---------------->
2762 : // /
2763 : // ----main--+-------------------------->
2764 : // \
2765 : // +-----baz-------->
2766 : //
2767 : //
2768 : // 1. Grab 'gc_cs' mutex to prevent new timelines from being created while Timeline's
2769 : // `gc_infos` are being refreshed
2770 : // 2. Scan collected timelines, and on each timeline, make note of the
2771 : // all the points where other timelines have been branched off.
2772 : // We will refrain from removing page versions at those LSNs.
2773 : // 3. For each timeline, scan all layer files on the timeline.
2774 : // Remove all files for which a newer file exists and which
2775 : // don't cover any branch point LSNs.
2776 : //
2777 : // TODO:
2778 : // - if a relation has a non-incremental persistent layer on a child branch, then we
2779 : // don't need to keep that in the parent anymore. But currently
2780 : // we do.
2781 756 : async fn gc_iteration_internal(
2782 756 : &self,
2783 756 : target_timeline_id: Option<TimelineId>,
2784 756 : horizon: u64,
2785 756 : pitr: Duration,
2786 756 : cancel: &CancellationToken,
2787 756 : ctx: &RequestContext,
2788 756 : ) -> anyhow::Result<GcResult> {
2789 756 : let mut totals: GcResult = Default::default();
2790 756 : let now = Instant::now();
2791 :
2792 756 : let gc_timelines = match self
2793 756 : .refresh_gc_info_internal(target_timeline_id, horizon, pitr, cancel, ctx)
2794 718 : .await
2795 : {
2796 756 : Ok(result) => result,
2797 0 : Err(e) => {
2798 0 : if let Some(PageReconstructError::Cancelled) =
2799 0 : e.downcast_ref::<PageReconstructError>()
2800 : {
2801 : // Handle cancellation
2802 0 : totals.elapsed = now.elapsed();
2803 0 : return Ok(totals);
2804 : } else {
2805 : // Propagate other errors
2806 0 : return Err(e);
2807 : }
2808 : }
2809 : };
2810 :
2811 756 : failpoint_support::sleep_millis_async!("gc_iteration_internal_after_getting_gc_timelines");
2812 :
2813 : // If there is nothing to GC, we don't want any messages in the INFO log.
2814 756 : if !gc_timelines.is_empty() {
2815 756 : info!("{} timelines need GC", gc_timelines.len());
2816 : } else {
2817 0 : debug!("{} timelines need GC", gc_timelines.len());
2818 : }
2819 :
2820 : // Perform GC for each timeline.
2821 : //
2822 : // Note that we don't hold the `Tenant::gc_cs` lock here because we don't want to delay the
2823 : // branch creation task, which requires the GC lock. A GC iteration can run concurrently
2824 : // with branch creation.
2825 : //
2826 : // See comments in [`Tenant::branch_timeline`] for more information about why branch
2827 : // creation task can run concurrently with timeline's GC iteration.
2828 1512 : for timeline in gc_timelines {
2829 756 : if cancel.is_cancelled() {
2830 : // We were requested to shut down. Stop and return with the progress we
2831 : // made.
2832 0 : break;
2833 756 : }
2834 756 : let result = timeline.gc().await?;
2835 756 : totals += result;
2836 : }
2837 :
2838 756 : totals.elapsed = now.elapsed();
2839 756 : Ok(totals)
2840 756 : }
2841 :
2842 : /// Refreshes the Timeline::gc_info for all timelines, returning the
2843 : /// vector of timelines which have [`Timeline::get_last_record_lsn`] past
2844 : /// [`Tenant::get_gc_horizon`].
2845 : ///
2846 : /// This is usually executed as part of periodic gc, but can now be triggered more often.
2847 0 : pub async fn refresh_gc_info(
2848 0 : &self,
2849 0 : cancel: &CancellationToken,
2850 0 : ctx: &RequestContext,
2851 0 : ) -> anyhow::Result<Vec<Arc<Timeline>>> {
2852 0 : // since this method can now be called at different rates than the configured gc loop, it
2853 0 : // might be that these configuration values get applied faster than what it was previously,
2854 0 : // since these were only read from the gc task.
2855 0 : let horizon = self.get_gc_horizon();
2856 0 : let pitr = self.get_pitr_interval();
2857 0 :
2858 0 : // refresh all timelines
2859 0 : let target_timeline_id = None;
2860 0 :
2861 0 : self.refresh_gc_info_internal(target_timeline_id, horizon, pitr, cancel, ctx)
2862 0 : .await
2863 0 : }
2864 :
2865 756 : async fn refresh_gc_info_internal(
2866 756 : &self,
2867 756 : target_timeline_id: Option<TimelineId>,
2868 756 : horizon: u64,
2869 756 : pitr: Duration,
2870 756 : cancel: &CancellationToken,
2871 756 : ctx: &RequestContext,
2872 756 : ) -> anyhow::Result<Vec<Arc<Timeline>>> {
2873 756 : // before taking the gc_cs lock, do the heavier weight finding of gc_cutoff points for
2874 756 : // currently visible timelines.
2875 756 : let timelines = self
2876 756 : .timelines
2877 756 : .lock()
2878 756 : .unwrap()
2879 756 : .values()
2880 3314 : .filter(|tl| match target_timeline_id.as_ref() {
2881 3314 : Some(target) => &tl.timeline_id == target,
2882 0 : None => true,
2883 3314 : })
2884 756 : .cloned()
2885 756 : .collect::<Vec<_>>();
2886 756 :
2887 756 : let mut gc_cutoffs: HashMap<TimelineId, GcCutoffs> =
2888 756 : HashMap::with_capacity(timelines.len());
2889 :
2890 756 : for timeline in timelines.iter() {
2891 756 : let cutoff = timeline
2892 756 : .get_last_record_lsn()
2893 756 : .checked_sub(horizon)
2894 756 : .unwrap_or(Lsn(0));
2895 :
2896 756 : let res = timeline.find_gc_cutoffs(cutoff, pitr, cancel, ctx).await;
2897 :
2898 756 : match res {
2899 756 : Ok(cutoffs) => {
2900 756 : let old = gc_cutoffs.insert(timeline.timeline_id, cutoffs);
2901 756 : assert!(old.is_none());
2902 : }
2903 0 : Err(e) => {
2904 0 : tracing::warn!(timeline_id = %timeline.timeline_id, "ignoring failure to find gc cutoffs: {e:#}");
2905 : }
2906 : }
2907 : }
2908 :
2909 756 : if !self.is_active() {
2910 0 : anyhow::bail!("shutting down");
2911 756 : }
2912 :
2913 : // grab mutex to prevent new timelines from being created here; avoid doing long operations
2914 : // because that will stall branch creation.
2915 756 : let gc_cs = self.gc_cs.lock().await;
2916 :
2917 : // Scan all timelines. For each timeline, remember the timeline ID and
2918 : // the branch point where it was created.
2919 756 : let (all_branchpoints, timeline_ids): (BTreeSet<(TimelineId, Lsn)>, _) = {
2920 756 : let timelines = self.timelines.lock().unwrap();
2921 756 : let mut all_branchpoints = BTreeSet::new();
2922 756 : let timeline_ids = {
2923 756 : if let Some(target_timeline_id) = target_timeline_id.as_ref() {
2924 756 : if timelines.get(target_timeline_id).is_none() {
2925 0 : bail!("gc target timeline does not exist")
2926 756 : }
2927 0 : };
2928 :
2929 756 : timelines
2930 756 : .iter()
2931 3314 : .map(|(timeline_id, timeline_entry)| {
2932 2558 : if let Some(ancestor_timeline_id) =
2933 3314 : &timeline_entry.get_ancestor_timeline_id()
2934 : {
2935 : // If target_timeline is specified, we only need to know branchpoints of its children
2936 2558 : if let Some(timeline_id) = target_timeline_id {
2937 2558 : if ancestor_timeline_id == &timeline_id {
2938 6 : all_branchpoints.insert((
2939 6 : *ancestor_timeline_id,
2940 6 : timeline_entry.get_ancestor_lsn(),
2941 6 : ));
2942 2552 : }
2943 : }
2944 : // Collect branchpoints for all timelines
2945 0 : else {
2946 0 : all_branchpoints.insert((
2947 0 : *ancestor_timeline_id,
2948 0 : timeline_entry.get_ancestor_lsn(),
2949 0 : ));
2950 0 : }
2951 756 : }
2952 :
2953 3314 : *timeline_id
2954 3314 : })
2955 756 : .collect::<Vec<_>>()
2956 756 : };
2957 756 : (all_branchpoints, timeline_ids)
2958 756 : };
2959 756 :
2960 756 : // Ok, we now know all the branch points.
2961 756 : // Update the GC information for each timeline.
2962 756 : let mut gc_timelines = Vec::with_capacity(timeline_ids.len());
2963 4070 : for timeline_id in timeline_ids {
2964 : // Timeline is known to be local and loaded.
2965 3314 : let timeline = self
2966 3314 : .get_timeline(timeline_id, false)
2967 3314 : .with_context(|| format!("Timeline {timeline_id} was not found"))?;
2968 :
2969 : // If target_timeline is specified, ignore all other timelines
2970 3314 : if let Some(target_timeline_id) = target_timeline_id {
2971 3314 : if timeline_id != target_timeline_id {
2972 2558 : continue;
2973 756 : }
2974 0 : }
2975 :
2976 756 : let branchpoints: Vec<Lsn> = all_branchpoints
2977 756 : .range((
2978 756 : Included((timeline_id, Lsn(0))),
2979 756 : Included((timeline_id, Lsn(u64::MAX))),
2980 756 : ))
2981 756 : .map(|&x| x.1)
2982 756 : .collect();
2983 756 :
2984 756 : {
2985 756 : let mut target = timeline.gc_info.write().unwrap();
2986 756 :
2987 756 : match gc_cutoffs.remove(&timeline_id) {
2988 756 : Some(cutoffs) => {
2989 756 : *target = GcInfo {
2990 756 : retain_lsns: branchpoints,
2991 756 : cutoffs,
2992 756 : };
2993 756 : }
2994 0 : None => {
2995 0 : // reasons for this being unavailable:
2996 0 : // - this timeline was created while we were finding cutoffs
2997 0 : // - lsn for timestamp search fails for this timeline repeatedly
2998 0 : //
2999 0 : // in both cases, refreshing the branchpoints is correct.
3000 0 : target.retain_lsns = branchpoints;
3001 0 : }
3002 : };
3003 : }
3004 :
3005 756 : gc_timelines.push(timeline);
3006 : }
3007 756 : drop(gc_cs);
3008 756 : Ok(gc_timelines)
3009 756 : }
3010 :
3011 : /// A substitute for `branch_timeline` for use in unit tests.
3012 : /// The returned timeline will have state value `Active` to make various `anyhow::ensure!()`
3013 : /// calls pass, but, we do not actually call `.activate()` under the hood. So, none of the
3014 : /// timeline background tasks are launched, except the flush loop.
3015 : #[cfg(test)]
3016 228 : async fn branch_timeline_test(
3017 228 : &self,
3018 228 : src_timeline: &Arc<Timeline>,
3019 228 : dst_id: TimelineId,
3020 228 : start_lsn: Option<Lsn>,
3021 228 : ctx: &RequestContext,
3022 228 : ) -> Result<Arc<Timeline>, CreateTimelineError> {
3023 228 : let create_guard = self.create_timeline_create_guard(dst_id).unwrap();
3024 228 : let tl = self
3025 228 : .branch_timeline_impl(src_timeline, dst_id, start_lsn, create_guard, ctx)
3026 4 : .await?;
3027 224 : tl.set_state(TimelineState::Active);
3028 224 : Ok(tl)
3029 228 : }
3030 :
3031 : /// Branch an existing timeline.
3032 : ///
3033 : /// The caller is responsible for activating the returned timeline.
3034 0 : async fn branch_timeline(
3035 0 : &self,
3036 0 : src_timeline: &Arc<Timeline>,
3037 0 : dst_id: TimelineId,
3038 0 : start_lsn: Option<Lsn>,
3039 0 : timeline_create_guard: TimelineCreateGuard<'_>,
3040 0 : ctx: &RequestContext,
3041 0 : ) -> Result<Arc<Timeline>, CreateTimelineError> {
3042 0 : self.branch_timeline_impl(src_timeline, dst_id, start_lsn, timeline_create_guard, ctx)
3043 0 : .await
3044 0 : }
3045 :
3046 228 : async fn branch_timeline_impl(
3047 228 : &self,
3048 228 : src_timeline: &Arc<Timeline>,
3049 228 : dst_id: TimelineId,
3050 228 : start_lsn: Option<Lsn>,
3051 228 : timeline_create_guard: TimelineCreateGuard<'_>,
3052 228 : _ctx: &RequestContext,
3053 228 : ) -> Result<Arc<Timeline>, CreateTimelineError> {
3054 228 : let src_id = src_timeline.timeline_id;
3055 :
3056 : // We will validate our ancestor LSN in this function. Acquire the GC lock so that
3057 : // this check cannot race with GC, and the ancestor LSN is guaranteed to remain
3058 : // valid while we are creating the branch.
3059 228 : let _gc_cs = self.gc_cs.lock().await;
3060 :
3061 : // If no start LSN is specified, we branch the new timeline from the source timeline's last record LSN
3062 228 : let start_lsn = start_lsn.unwrap_or_else(|| {
3063 2 : let lsn = src_timeline.get_last_record_lsn();
3064 2 : info!("branching timeline {dst_id} from timeline {src_id} at last record LSN: {lsn}");
3065 2 : lsn
3066 228 : });
3067 228 :
3068 228 : // Ensure that `start_lsn` is valid, i.e. the LSN is within the PITR
3069 228 : // horizon on the source timeline
3070 228 : //
3071 228 : // We check it against both the planned GC cutoff stored in 'gc_info',
3072 228 : // and the 'latest_gc_cutoff' of the last GC that was performed. The
3073 228 : // planned GC cutoff in 'gc_info' is normally larger than
3074 228 : // 'latest_gc_cutoff_lsn', but beware of corner cases like if you just
3075 228 : // changed the GC settings for the tenant to make the PITR window
3076 228 : // larger, but some of the data was already removed by an earlier GC
3077 228 : // iteration.
3078 228 :
3079 228 : // check against last actual 'latest_gc_cutoff' first
3080 228 : let latest_gc_cutoff_lsn = src_timeline.get_latest_gc_cutoff_lsn();
3081 228 : src_timeline
3082 228 : .check_lsn_is_in_scope(start_lsn, &latest_gc_cutoff_lsn)
3083 228 : .context(format!(
3084 228 : "invalid branch start lsn: less than latest GC cutoff {}",
3085 228 : *latest_gc_cutoff_lsn,
3086 228 : ))
3087 228 : .map_err(CreateTimelineError::AncestorLsn)?;
3088 :
3089 : // and then the planned GC cutoff
3090 : {
3091 224 : let gc_info = src_timeline.gc_info.read().unwrap();
3092 224 : let cutoff = gc_info.min_cutoff();
3093 224 : if start_lsn < cutoff {
3094 0 : return Err(CreateTimelineError::AncestorLsn(anyhow::anyhow!(
3095 0 : "invalid branch start lsn: less than planned GC cutoff {cutoff}"
3096 0 : )));
3097 224 : }
3098 224 : }
3099 224 :
3100 224 : //
3101 224 : // The branch point is valid, and we are still holding the 'gc_cs' lock
3102 224 : // so that GC cannot advance the GC cutoff until we are finished.
3103 224 : // Proceed with the branch creation.
3104 224 : //
3105 224 :
3106 224 : // Determine prev-LSN for the new timeline. We can only determine it if
3107 224 : // the timeline was branched at the current end of the source timeline.
3108 224 : let RecordLsn {
3109 224 : last: src_last,
3110 224 : prev: src_prev,
3111 224 : } = src_timeline.get_last_record_rlsn();
3112 224 : let dst_prev = if src_last == start_lsn {
3113 214 : Some(src_prev)
3114 : } else {
3115 10 : None
3116 : };
3117 :
3118 : // Create the metadata file, noting the ancestor of the new timeline.
3119 : // There is initially no data in it, but all the read-calls know to look
3120 : // into the ancestor.
3121 224 : let metadata = TimelineMetadata::new(
3122 224 : start_lsn,
3123 224 : dst_prev,
3124 224 : Some(src_id),
3125 224 : start_lsn,
3126 224 : *src_timeline.latest_gc_cutoff_lsn.read(), // FIXME: should we hold onto this guard longer?
3127 224 : src_timeline.initdb_lsn,
3128 224 : src_timeline.pg_version,
3129 224 : );
3130 :
3131 224 : let uninitialized_timeline = self
3132 224 : .prepare_new_timeline(
3133 224 : dst_id,
3134 224 : &metadata,
3135 224 : timeline_create_guard,
3136 224 : start_lsn + 1,
3137 224 : Some(Arc::clone(src_timeline)),
3138 224 : src_timeline.last_aux_file_policy.load(),
3139 224 : )
3140 0 : .await?;
3141 :
3142 224 : let new_timeline = uninitialized_timeline.finish_creation()?;
3143 :
3144 : // Root timeline gets its layers during creation and uploads them along with the metadata.
3145 : // A branch timeline though, when created, can get no writes for some time, hence won't get any layers created.
3146 : // We still need to upload its metadata eagerly: if other nodes `attach` the tenant and miss this timeline, their GC
3147 : // could get incorrect information and remove more layers, than needed.
3148 : // See also https://github.com/neondatabase/neon/issues/3865
3149 224 : new_timeline
3150 224 : .remote_client
3151 224 : .schedule_index_upload_for_full_metadata_update(&metadata)
3152 224 : .context("branch initial metadata upload")?;
3153 :
3154 224 : Ok(new_timeline)
3155 228 : }
3156 :
3157 : /// For unit tests, make this visible so that other modules can directly create timelines
3158 : #[cfg(test)]
3159 4 : #[tracing::instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), %timeline_id))]
3160 : pub(crate) async fn bootstrap_timeline_test(
3161 : &self,
3162 : timeline_id: TimelineId,
3163 : pg_version: u32,
3164 : load_existing_initdb: Option<TimelineId>,
3165 : ctx: &RequestContext,
3166 : ) -> anyhow::Result<Arc<Timeline>> {
3167 : let create_guard = self.create_timeline_create_guard(timeline_id).unwrap();
3168 : self.bootstrap_timeline(
3169 : timeline_id,
3170 : pg_version,
3171 : load_existing_initdb,
3172 : create_guard,
3173 : ctx,
3174 : )
3175 : .await
3176 : }
3177 :
3178 0 : async fn upload_initdb(
3179 0 : &self,
3180 0 : timelines_path: &Utf8PathBuf,
3181 0 : pgdata_path: &Utf8PathBuf,
3182 0 : timeline_id: &TimelineId,
3183 0 : ) -> anyhow::Result<()> {
3184 0 : let temp_path = timelines_path.join(format!(
3185 0 : "{INITDB_PATH}.upload-{timeline_id}.{TEMP_FILE_SUFFIX}"
3186 0 : ));
3187 :
3188 : scopeguard::defer! {
3189 : if let Err(e) = fs::remove_file(&temp_path) {
3190 : error!("Failed to remove temporary initdb archive '{temp_path}': {e}");
3191 : }
3192 : }
3193 :
3194 0 : let (pgdata_zstd, tar_zst_size) = create_zst_tarball(pgdata_path, &temp_path).await?;
3195 : const INITDB_TAR_ZST_WARN_LIMIT: u64 = 2 * 1024 * 1024;
3196 0 : if tar_zst_size > INITDB_TAR_ZST_WARN_LIMIT {
3197 0 : warn!(
3198 0 : "compressed {temp_path} size of {tar_zst_size} is above limit {INITDB_TAR_ZST_WARN_LIMIT}."
3199 : );
3200 0 : }
3201 :
3202 : pausable_failpoint!("before-initdb-upload");
3203 :
3204 0 : backoff::retry(
3205 0 : || async {
3206 0 : self::remote_timeline_client::upload_initdb_dir(
3207 0 : &self.remote_storage,
3208 0 : &self.tenant_shard_id.tenant_id,
3209 0 : timeline_id,
3210 0 : pgdata_zstd.try_clone().await?,
3211 0 : tar_zst_size,
3212 0 : &self.cancel,
3213 0 : )
3214 0 : .await
3215 0 : },
3216 0 : |_| false,
3217 0 : 3,
3218 0 : u32::MAX,
3219 0 : "persist_initdb_tar_zst",
3220 0 : &self.cancel,
3221 0 : )
3222 0 : .await
3223 0 : .ok_or_else(|| anyhow::Error::new(TimeoutOrCancel::Cancel))
3224 0 : .and_then(|x| x)
3225 0 : }
3226 :
3227 : /// - run initdb to init temporary instance and get bootstrap data
3228 : /// - after initialization completes, tar up the temp dir and upload it to S3.
3229 : ///
3230 : /// The caller is responsible for activating the returned timeline.
3231 2 : async fn bootstrap_timeline(
3232 2 : &self,
3233 2 : timeline_id: TimelineId,
3234 2 : pg_version: u32,
3235 2 : load_existing_initdb: Option<TimelineId>,
3236 2 : timeline_create_guard: TimelineCreateGuard<'_>,
3237 2 : ctx: &RequestContext,
3238 2 : ) -> anyhow::Result<Arc<Timeline>> {
3239 2 : // create a `tenant/{tenant_id}/timelines/basebackup-{timeline_id}.{TEMP_FILE_SUFFIX}/`
3240 2 : // temporary directory for basebackup files for the given timeline.
3241 2 :
3242 2 : let timelines_path = self.conf.timelines_path(&self.tenant_shard_id);
3243 2 : let pgdata_path = path_with_suffix_extension(
3244 2 : timelines_path.join(format!("basebackup-{timeline_id}")),
3245 2 : TEMP_FILE_SUFFIX,
3246 2 : );
3247 2 :
3248 2 : // Remove whatever was left from the previous runs: safe because TimelineCreateGuard guarantees
3249 2 : // we won't race with other creations or existent timelines with the same path.
3250 2 : if pgdata_path.exists() {
3251 0 : fs::remove_dir_all(&pgdata_path).with_context(|| {
3252 0 : format!("Failed to remove already existing initdb directory: {pgdata_path}")
3253 0 : })?;
3254 2 : }
3255 :
3256 : // this new directory is very temporary, set to remove it immediately after bootstrap, we don't need it
3257 : scopeguard::defer! {
3258 : if let Err(e) = fs::remove_dir_all(&pgdata_path) {
3259 : // this is unlikely, but we will remove the directory on pageserver restart or another bootstrap call
3260 : error!("Failed to remove temporary initdb directory '{pgdata_path}': {e}");
3261 : }
3262 : }
3263 2 : if let Some(existing_initdb_timeline_id) = load_existing_initdb {
3264 2 : if existing_initdb_timeline_id != timeline_id {
3265 0 : let source_path = &remote_initdb_archive_path(
3266 0 : &self.tenant_shard_id.tenant_id,
3267 0 : &existing_initdb_timeline_id,
3268 0 : );
3269 0 : let dest_path =
3270 0 : &remote_initdb_archive_path(&self.tenant_shard_id.tenant_id, &timeline_id);
3271 0 :
3272 0 : // if this fails, it will get retried by retried control plane requests
3273 0 : self.remote_storage
3274 0 : .copy_object(source_path, dest_path, &self.cancel)
3275 0 : .await
3276 0 : .context("copy initdb tar")?;
3277 2 : }
3278 2 : let (initdb_tar_zst_path, initdb_tar_zst) =
3279 2 : self::remote_timeline_client::download_initdb_tar_zst(
3280 2 : self.conf,
3281 2 : &self.remote_storage,
3282 2 : &self.tenant_shard_id,
3283 2 : &existing_initdb_timeline_id,
3284 2 : &self.cancel,
3285 2 : )
3286 735 : .await
3287 2 : .context("download initdb tar")?;
3288 :
3289 : scopeguard::defer! {
3290 : if let Err(e) = fs::remove_file(&initdb_tar_zst_path) {
3291 : error!("Failed to remove temporary initdb archive '{initdb_tar_zst_path}': {e}");
3292 : }
3293 : }
3294 :
3295 2 : let buf_read =
3296 2 : BufReader::with_capacity(remote_timeline_client::BUFFER_SIZE, initdb_tar_zst);
3297 2 : extract_zst_tarball(&pgdata_path, buf_read)
3298 10221 : .await
3299 2 : .context("extract initdb tar")?;
3300 : } else {
3301 : // Init temporarily repo to get bootstrap data, this creates a directory in the `pgdata_path` path
3302 0 : run_initdb(self.conf, &pgdata_path, pg_version, &self.cancel).await?;
3303 :
3304 : // Upload the created data dir to S3
3305 0 : if self.tenant_shard_id().is_shard_zero() {
3306 0 : self.upload_initdb(&timelines_path, &pgdata_path, &timeline_id)
3307 0 : .await?;
3308 0 : }
3309 : }
3310 2 : let pgdata_lsn = import_datadir::get_lsn_from_controlfile(&pgdata_path)?.align();
3311 2 :
3312 2 : // Import the contents of the data directory at the initial checkpoint
3313 2 : // LSN, and any WAL after that.
3314 2 : // Initdb lsn will be equal to last_record_lsn which will be set after import.
3315 2 : // Because we know it upfront avoid having an option or dummy zero value by passing it to the metadata.
3316 2 : let new_metadata = TimelineMetadata::new(
3317 2 : Lsn(0),
3318 2 : None,
3319 2 : None,
3320 2 : Lsn(0),
3321 2 : pgdata_lsn,
3322 2 : pgdata_lsn,
3323 2 : pg_version,
3324 2 : );
3325 2 : let raw_timeline = self
3326 2 : .prepare_new_timeline(
3327 2 : timeline_id,
3328 2 : &new_metadata,
3329 2 : timeline_create_guard,
3330 2 : pgdata_lsn,
3331 2 : None,
3332 2 : None,
3333 2 : )
3334 0 : .await?;
3335 :
3336 2 : let tenant_shard_id = raw_timeline.owning_tenant.tenant_shard_id;
3337 2 : let unfinished_timeline = raw_timeline.raw_timeline()?;
3338 :
3339 2 : import_datadir::import_timeline_from_postgres_datadir(
3340 2 : unfinished_timeline,
3341 2 : &pgdata_path,
3342 2 : pgdata_lsn,
3343 2 : ctx,
3344 2 : )
3345 9083 : .await
3346 2 : .with_context(|| {
3347 0 : format!("Failed to import pgdatadir for timeline {tenant_shard_id}/{timeline_id}")
3348 2 : })?;
3349 :
3350 : // Flush the new layer files to disk, before we make the timeline as available to
3351 : // the outside world.
3352 : //
3353 : // Flush loop needs to be spawned in order to be able to flush.
3354 2 : unfinished_timeline.maybe_spawn_flush_loop();
3355 2 :
3356 2 : fail::fail_point!("before-checkpoint-new-timeline", |_| {
3357 0 : anyhow::bail!("failpoint before-checkpoint-new-timeline");
3358 2 : });
3359 :
3360 2 : unfinished_timeline
3361 2 : .freeze_and_flush()
3362 2 : .await
3363 2 : .with_context(|| {
3364 0 : format!(
3365 0 : "Failed to flush after pgdatadir import for timeline {tenant_shard_id}/{timeline_id}"
3366 0 : )
3367 2 : })?;
3368 :
3369 : // All done!
3370 2 : let timeline = raw_timeline.finish_creation()?;
3371 :
3372 2 : Ok(timeline)
3373 2 : }
3374 :
3375 : /// Call this before constructing a timeline, to build its required structures
3376 346 : fn build_timeline_resources(&self, timeline_id: TimelineId) -> TimelineResources {
3377 346 : let remote_client = RemoteTimelineClient::new(
3378 346 : self.remote_storage.clone(),
3379 346 : self.deletion_queue_client.clone(),
3380 346 : self.conf,
3381 346 : self.tenant_shard_id,
3382 346 : timeline_id,
3383 346 : self.generation,
3384 346 : );
3385 346 : TimelineResources {
3386 346 : remote_client,
3387 346 : deletion_queue_client: self.deletion_queue_client.clone(),
3388 346 : timeline_get_throttle: self.timeline_get_throttle.clone(),
3389 346 : }
3390 346 : }
3391 :
3392 : /// Creates intermediate timeline structure and its files.
3393 : ///
3394 : /// An empty layer map is initialized, and new data and WAL can be imported starting
3395 : /// at 'disk_consistent_lsn'. After any initial data has been imported, call
3396 : /// `finish_creation` to insert the Timeline into the timelines map.
3397 346 : async fn prepare_new_timeline<'a>(
3398 346 : &'a self,
3399 346 : new_timeline_id: TimelineId,
3400 346 : new_metadata: &TimelineMetadata,
3401 346 : create_guard: TimelineCreateGuard<'a>,
3402 346 : start_lsn: Lsn,
3403 346 : ancestor: Option<Arc<Timeline>>,
3404 346 : last_aux_file_policy: Option<AuxFilePolicy>,
3405 346 : ) -> anyhow::Result<UninitializedTimeline> {
3406 346 : let tenant_shard_id = self.tenant_shard_id;
3407 346 :
3408 346 : let resources = self.build_timeline_resources(new_timeline_id);
3409 346 : resources
3410 346 : .remote_client
3411 346 : .init_upload_queue_for_empty_remote(new_metadata)?;
3412 :
3413 346 : let timeline_struct = self
3414 346 : .create_timeline_struct(
3415 346 : new_timeline_id,
3416 346 : new_metadata,
3417 346 : ancestor,
3418 346 : resources,
3419 346 : CreateTimelineCause::Load,
3420 346 : last_aux_file_policy,
3421 346 : )
3422 346 : .context("Failed to create timeline data structure")?;
3423 :
3424 346 : timeline_struct.init_empty_layer_map(start_lsn);
3425 :
3426 346 : if let Err(e) = self
3427 346 : .create_timeline_files(&create_guard.timeline_path)
3428 0 : .await
3429 : {
3430 0 : error!("Failed to create initial files for timeline {tenant_shard_id}/{new_timeline_id}, cleaning up: {e:?}");
3431 0 : cleanup_timeline_directory(create_guard);
3432 0 : return Err(e);
3433 346 : }
3434 346 :
3435 346 : debug!(
3436 0 : "Successfully created initial files for timeline {tenant_shard_id}/{new_timeline_id}"
3437 : );
3438 :
3439 346 : Ok(UninitializedTimeline::new(
3440 346 : self,
3441 346 : new_timeline_id,
3442 346 : Some((timeline_struct, create_guard)),
3443 346 : ))
3444 346 : }
3445 :
3446 346 : async fn create_timeline_files(&self, timeline_path: &Utf8Path) -> anyhow::Result<()> {
3447 346 : crashsafe::create_dir(timeline_path).context("Failed to create timeline directory")?;
3448 :
3449 346 : fail::fail_point!("after-timeline-dir-creation", |_| {
3450 0 : anyhow::bail!("failpoint after-timeline-dir-creation");
3451 346 : });
3452 :
3453 346 : Ok(())
3454 346 : }
3455 :
3456 : /// Get a guard that provides exclusive access to the timeline directory, preventing
3457 : /// concurrent attempts to create the same timeline.
3458 352 : fn create_timeline_create_guard(
3459 352 : &self,
3460 352 : timeline_id: TimelineId,
3461 352 : ) -> Result<TimelineCreateGuard, TimelineExclusionError> {
3462 352 : let tenant_shard_id = self.tenant_shard_id;
3463 352 :
3464 352 : let timeline_path = self.conf.timeline_path(&tenant_shard_id, &timeline_id);
3465 :
3466 352 : let create_guard = TimelineCreateGuard::new(self, timeline_id, timeline_path.clone())?;
3467 :
3468 : // At this stage, we have got exclusive access to in-memory state for this timeline ID
3469 : // for creation.
3470 : // A timeline directory should never exist on disk already:
3471 : // - a previous failed creation would have cleaned up after itself
3472 : // - a pageserver restart would clean up timeline directories that don't have valid remote state
3473 : //
3474 : // Therefore it is an unexpected internal error to encounter a timeline directory already existing here,
3475 : // this error may indicate a bug in cleanup on failed creations.
3476 350 : if timeline_path.exists() {
3477 0 : return Err(TimelineExclusionError::Other(anyhow::anyhow!(
3478 0 : "Timeline directory already exists! This is a bug."
3479 0 : )));
3480 350 : }
3481 350 :
3482 350 : Ok(create_guard)
3483 352 : }
3484 :
3485 : /// Gathers inputs from all of the timelines to produce a sizing model input.
3486 : ///
3487 : /// Future is cancellation safe. Only one calculation can be running at once per tenant.
3488 0 : #[instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug()))]
3489 : pub async fn gather_size_inputs(
3490 : &self,
3491 : // `max_retention_period` overrides the cutoff that is used to calculate the size
3492 : // (only if it is shorter than the real cutoff).
3493 : max_retention_period: Option<u64>,
3494 : cause: LogicalSizeCalculationCause,
3495 : cancel: &CancellationToken,
3496 : ctx: &RequestContext,
3497 : ) -> anyhow::Result<size::ModelInputs> {
3498 : let logical_sizes_at_once = self
3499 : .conf
3500 : .concurrent_tenant_size_logical_size_queries
3501 : .inner();
3502 :
3503 : // TODO: Having a single mutex block concurrent reads is not great for performance.
3504 : //
3505 : // But the only case where we need to run multiple of these at once is when we
3506 : // request a size for a tenant manually via API, while another background calculation
3507 : // is in progress (which is not a common case).
3508 : //
3509 : // See more for on the issue #2748 condenced out of the initial PR review.
3510 : let mut shared_cache = tokio::select! {
3511 : locked = self.cached_logical_sizes.lock() => locked,
3512 : _ = cancel.cancelled() => anyhow::bail!("cancelled"),
3513 : _ = self.cancel.cancelled() => anyhow::bail!("tenant is shutting down"),
3514 : };
3515 :
3516 : size::gather_inputs(
3517 : self,
3518 : logical_sizes_at_once,
3519 : max_retention_period,
3520 : &mut shared_cache,
3521 : cause,
3522 : cancel,
3523 : ctx,
3524 : )
3525 : .await
3526 : }
3527 :
3528 : /// Calculate synthetic tenant size and cache the result.
3529 : /// This is periodically called by background worker.
3530 : /// result is cached in tenant struct
3531 0 : #[instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug()))]
3532 : pub async fn calculate_synthetic_size(
3533 : &self,
3534 : cause: LogicalSizeCalculationCause,
3535 : cancel: &CancellationToken,
3536 : ctx: &RequestContext,
3537 : ) -> anyhow::Result<u64> {
3538 : let inputs = self.gather_size_inputs(None, cause, cancel, ctx).await?;
3539 :
3540 : let size = inputs.calculate()?;
3541 :
3542 : self.set_cached_synthetic_size(size);
3543 :
3544 : Ok(size)
3545 : }
3546 :
3547 : /// Cache given synthetic size and update the metric value
3548 0 : pub fn set_cached_synthetic_size(&self, size: u64) {
3549 0 : self.cached_synthetic_tenant_size
3550 0 : .store(size, Ordering::Relaxed);
3551 0 :
3552 0 : // Only shard zero should be calculating synthetic sizes
3553 0 : debug_assert!(self.shard_identity.is_shard_zero());
3554 :
3555 0 : TENANT_SYNTHETIC_SIZE_METRIC
3556 0 : .get_metric_with_label_values(&[&self.tenant_shard_id.tenant_id.to_string()])
3557 0 : .unwrap()
3558 0 : .set(size);
3559 0 : }
3560 :
3561 0 : pub fn cached_synthetic_size(&self) -> u64 {
3562 0 : self.cached_synthetic_tenant_size.load(Ordering::Relaxed)
3563 0 : }
3564 :
3565 : /// Flush any in-progress layers, schedule uploads, and wait for uploads to complete.
3566 : ///
3567 : /// This function can take a long time: callers should wrap it in a timeout if calling
3568 : /// from an external API handler.
3569 : ///
3570 : /// Cancel-safety: cancelling this function may leave I/O running, but such I/O is
3571 : /// still bounded by tenant/timeline shutdown.
3572 0 : #[tracing::instrument(skip_all)]
3573 : pub(crate) async fn flush_remote(&self) -> anyhow::Result<()> {
3574 : let timelines = self.timelines.lock().unwrap().clone();
3575 :
3576 0 : async fn flush_timeline(_gate: GateGuard, timeline: Arc<Timeline>) -> anyhow::Result<()> {
3577 0 : tracing::info!(timeline_id=%timeline.timeline_id, "Flushing...");
3578 0 : timeline.freeze_and_flush().await?;
3579 0 : tracing::info!(timeline_id=%timeline.timeline_id, "Waiting for uploads...");
3580 0 : timeline.remote_client.wait_completion().await?;
3581 :
3582 0 : Ok(())
3583 0 : }
3584 :
3585 : // We do not use a JoinSet for these tasks, because we don't want them to be
3586 : // aborted when this function's future is cancelled: they should stay alive
3587 : // holding their GateGuard until they complete, to ensure their I/Os complete
3588 : // before Timeline shutdown completes.
3589 : let mut results = FuturesUnordered::new();
3590 :
3591 : for (_timeline_id, timeline) in timelines {
3592 : // Run each timeline's flush in a task holding the timeline's gate: this
3593 : // means that if this function's future is cancelled, the Timeline shutdown
3594 : // will still wait for any I/O in here to complete.
3595 : let Ok(gate) = timeline.gate.enter() else {
3596 : continue;
3597 : };
3598 0 : let jh = tokio::task::spawn(async move { flush_timeline(gate, timeline).await });
3599 : results.push(jh);
3600 : }
3601 :
3602 : while let Some(r) = results.next().await {
3603 : if let Err(e) = r {
3604 : if !e.is_cancelled() && !e.is_panic() {
3605 : tracing::error!("unexpected join error: {e:?}");
3606 : }
3607 : }
3608 : }
3609 :
3610 : // The flushes we did above were just writes, but the Tenant might have had
3611 : // pending deletions as well from recent compaction/gc: we want to flush those
3612 : // as well. This requires flushing the global delete queue. This is cheap
3613 : // because it's typically a no-op.
3614 : match self.deletion_queue_client.flush_execute().await {
3615 : Ok(_) => {}
3616 : Err(DeletionQueueError::ShuttingDown) => {}
3617 : }
3618 :
3619 : Ok(())
3620 : }
3621 :
3622 0 : pub(crate) fn get_tenant_conf(&self) -> TenantConfOpt {
3623 0 : self.tenant_conf.load().tenant_conf.clone()
3624 0 : }
3625 : }
3626 :
3627 : /// Create the cluster temporarily in 'initdbpath' directory inside the repository
3628 : /// to get bootstrap data for timeline initialization.
3629 0 : async fn run_initdb(
3630 0 : conf: &'static PageServerConf,
3631 0 : initdb_target_dir: &Utf8Path,
3632 0 : pg_version: u32,
3633 0 : cancel: &CancellationToken,
3634 0 : ) -> Result<(), InitdbError> {
3635 0 : let initdb_bin_path = conf
3636 0 : .pg_bin_dir(pg_version)
3637 0 : .map_err(InitdbError::Other)?
3638 0 : .join("initdb");
3639 0 : let initdb_lib_dir = conf.pg_lib_dir(pg_version).map_err(InitdbError::Other)?;
3640 0 : info!(
3641 0 : "running {} in {}, libdir: {}",
3642 : initdb_bin_path, initdb_target_dir, initdb_lib_dir,
3643 : );
3644 :
3645 0 : let _permit = INIT_DB_SEMAPHORE.acquire().await;
3646 :
3647 0 : let initdb_command = tokio::process::Command::new(&initdb_bin_path)
3648 0 : .args(["-D", initdb_target_dir.as_ref()])
3649 0 : .args(["-U", &conf.superuser])
3650 0 : .args(["-E", "utf8"])
3651 0 : .arg("--no-instructions")
3652 0 : .arg("--no-sync")
3653 0 : .env_clear()
3654 0 : .env("LD_LIBRARY_PATH", &initdb_lib_dir)
3655 0 : .env("DYLD_LIBRARY_PATH", &initdb_lib_dir)
3656 0 : .stdin(std::process::Stdio::null())
3657 0 : // stdout invocation produces the same output every time, we don't need it
3658 0 : .stdout(std::process::Stdio::null())
3659 0 : // we would be interested in the stderr output, if there was any
3660 0 : .stderr(std::process::Stdio::piped())
3661 0 : .spawn()?;
3662 :
3663 : // Ideally we'd select here with the cancellation token, but the problem is that
3664 : // we can't safely terminate initdb: it launches processes of its own, and killing
3665 : // initdb doesn't kill them. After we return from this function, we want the target
3666 : // directory to be able to be cleaned up.
3667 : // See https://github.com/neondatabase/neon/issues/6385
3668 0 : let initdb_output = initdb_command.wait_with_output().await?;
3669 0 : if !initdb_output.status.success() {
3670 0 : return Err(InitdbError::Failed(
3671 0 : initdb_output.status,
3672 0 : initdb_output.stderr,
3673 0 : ));
3674 0 : }
3675 0 :
3676 0 : // This isn't true cancellation support, see above. Still return an error to
3677 0 : // excercise the cancellation code path.
3678 0 : if cancel.is_cancelled() {
3679 0 : return Err(InitdbError::Cancelled);
3680 0 : }
3681 0 :
3682 0 : Ok(())
3683 0 : }
3684 :
3685 : /// Dump contents of a layer file to stdout.
3686 0 : pub async fn dump_layerfile_from_path(
3687 0 : path: &Utf8Path,
3688 0 : verbose: bool,
3689 0 : ctx: &RequestContext,
3690 0 : ) -> anyhow::Result<()> {
3691 : use std::os::unix::fs::FileExt;
3692 :
3693 : // All layer files start with a two-byte "magic" value, to identify the kind of
3694 : // file.
3695 0 : let file = File::open(path)?;
3696 0 : let mut header_buf = [0u8; 2];
3697 0 : file.read_exact_at(&mut header_buf, 0)?;
3698 :
3699 0 : match u16::from_be_bytes(header_buf) {
3700 : crate::IMAGE_FILE_MAGIC => {
3701 0 : ImageLayer::new_for_path(path, file)?
3702 0 : .dump(verbose, ctx)
3703 0 : .await?
3704 : }
3705 : crate::DELTA_FILE_MAGIC => {
3706 0 : DeltaLayer::new_for_path(path, file)?
3707 0 : .dump(verbose, ctx)
3708 0 : .await?
3709 : }
3710 0 : magic => bail!("unrecognized magic identifier: {:?}", magic),
3711 : }
3712 :
3713 0 : Ok(())
3714 0 : }
3715 :
3716 : #[cfg(test)]
3717 : pub(crate) mod harness {
3718 : use bytes::{Bytes, BytesMut};
3719 : use once_cell::sync::OnceCell;
3720 : use pageserver_api::models::ShardParameters;
3721 : use pageserver_api::shard::ShardIndex;
3722 : use utils::logging;
3723 :
3724 : use crate::deletion_queue::mock::MockDeletionQueue;
3725 : use crate::walredo::apply_neon;
3726 : use crate::{repository::Key, walrecord::NeonWalRecord};
3727 :
3728 : use super::*;
3729 : use hex_literal::hex;
3730 : use utils::id::TenantId;
3731 :
3732 : pub const TIMELINE_ID: TimelineId =
3733 : TimelineId::from_array(hex!("11223344556677881122334455667788"));
3734 : pub const NEW_TIMELINE_ID: TimelineId =
3735 : TimelineId::from_array(hex!("AA223344556677881122334455667788"));
3736 :
3737 : /// Convenience function to create a page image with given string as the only content
3738 5028415 : pub fn test_img(s: &str) -> Bytes {
3739 5028415 : let mut buf = BytesMut::new();
3740 5028415 : buf.extend_from_slice(s.as_bytes());
3741 5028415 : buf.resize(64, 0);
3742 5028415 :
3743 5028415 : buf.freeze()
3744 5028415 : }
3745 :
3746 : impl From<TenantConf> for TenantConfOpt {
3747 130 : fn from(tenant_conf: TenantConf) -> Self {
3748 130 : Self {
3749 130 : checkpoint_distance: Some(tenant_conf.checkpoint_distance),
3750 130 : checkpoint_timeout: Some(tenant_conf.checkpoint_timeout),
3751 130 : compaction_target_size: Some(tenant_conf.compaction_target_size),
3752 130 : compaction_period: Some(tenant_conf.compaction_period),
3753 130 : compaction_threshold: Some(tenant_conf.compaction_threshold),
3754 130 : compaction_algorithm: Some(tenant_conf.compaction_algorithm),
3755 130 : gc_horizon: Some(tenant_conf.gc_horizon),
3756 130 : gc_period: Some(tenant_conf.gc_period),
3757 130 : image_creation_threshold: Some(tenant_conf.image_creation_threshold),
3758 130 : pitr_interval: Some(tenant_conf.pitr_interval),
3759 130 : walreceiver_connect_timeout: Some(tenant_conf.walreceiver_connect_timeout),
3760 130 : lagging_wal_timeout: Some(tenant_conf.lagging_wal_timeout),
3761 130 : max_lsn_wal_lag: Some(tenant_conf.max_lsn_wal_lag),
3762 130 : trace_read_requests: Some(tenant_conf.trace_read_requests),
3763 130 : eviction_policy: Some(tenant_conf.eviction_policy),
3764 130 : min_resident_size_override: tenant_conf.min_resident_size_override,
3765 130 : evictions_low_residence_duration_metric_threshold: Some(
3766 130 : tenant_conf.evictions_low_residence_duration_metric_threshold,
3767 130 : ),
3768 130 : heatmap_period: Some(tenant_conf.heatmap_period),
3769 130 : lazy_slru_download: Some(tenant_conf.lazy_slru_download),
3770 130 : timeline_get_throttle: Some(tenant_conf.timeline_get_throttle),
3771 130 : image_layer_creation_check_threshold: Some(
3772 130 : tenant_conf.image_layer_creation_check_threshold,
3773 130 : ),
3774 130 : switch_aux_file_policy: Some(tenant_conf.switch_aux_file_policy),
3775 130 : }
3776 130 : }
3777 : }
3778 :
3779 : pub struct TenantHarness {
3780 : pub conf: &'static PageServerConf,
3781 : pub tenant_conf: TenantConf,
3782 : pub tenant_shard_id: TenantShardId,
3783 : pub generation: Generation,
3784 : pub shard: ShardIndex,
3785 : pub remote_storage: GenericRemoteStorage,
3786 : pub remote_fs_dir: Utf8PathBuf,
3787 : pub deletion_queue: MockDeletionQueue,
3788 : }
3789 :
3790 : static LOG_HANDLE: OnceCell<()> = OnceCell::new();
3791 :
3792 136 : pub(crate) fn setup_logging() {
3793 136 : LOG_HANDLE.get_or_init(|| {
3794 132 : logging::init(
3795 132 : logging::LogFormat::Test,
3796 132 : // enable it in case the tests exercise code paths that use
3797 132 : // debug_assert_current_span_has_tenant_and_timeline_id
3798 132 : logging::TracingErrorLayerEnablement::EnableWithRustLogFilter,
3799 132 : logging::Output::Stdout,
3800 132 : )
3801 132 : .expect("Failed to init test logging")
3802 136 : });
3803 136 : }
3804 :
3805 : impl TenantHarness {
3806 130 : pub fn create_custom(
3807 130 : test_name: &'static str,
3808 130 : tenant_conf: TenantConf,
3809 130 : ) -> anyhow::Result<Self> {
3810 130 : setup_logging();
3811 130 :
3812 130 : let repo_dir = PageServerConf::test_repo_dir(test_name);
3813 130 : let _ = fs::remove_dir_all(&repo_dir);
3814 130 : fs::create_dir_all(&repo_dir)?;
3815 :
3816 130 : let conf = PageServerConf::dummy_conf(repo_dir);
3817 130 : // Make a static copy of the config. This can never be free'd, but that's
3818 130 : // OK in a test.
3819 130 : let conf: &'static PageServerConf = Box::leak(Box::new(conf));
3820 130 :
3821 130 : let tenant_id = TenantId::generate();
3822 130 : let tenant_shard_id = TenantShardId::unsharded(tenant_id);
3823 130 : fs::create_dir_all(conf.tenant_path(&tenant_shard_id))?;
3824 130 : fs::create_dir_all(conf.timelines_path(&tenant_shard_id))?;
3825 :
3826 : use remote_storage::{RemoteStorageConfig, RemoteStorageKind};
3827 130 : let remote_fs_dir = conf.workdir.join("localfs");
3828 130 : std::fs::create_dir_all(&remote_fs_dir).unwrap();
3829 130 : let config = RemoteStorageConfig {
3830 130 : storage: RemoteStorageKind::LocalFs(remote_fs_dir.clone()),
3831 130 : timeout: RemoteStorageConfig::DEFAULT_TIMEOUT,
3832 130 : };
3833 130 : let remote_storage = GenericRemoteStorage::from_config(&config).unwrap();
3834 130 : let deletion_queue = MockDeletionQueue::new(Some(remote_storage.clone()));
3835 130 :
3836 130 : Ok(Self {
3837 130 : conf,
3838 130 : tenant_conf,
3839 130 : tenant_shard_id,
3840 130 : generation: Generation::new(0xdeadbeef),
3841 130 : shard: ShardIndex::unsharded(),
3842 130 : remote_storage,
3843 130 : remote_fs_dir,
3844 130 : deletion_queue,
3845 130 : })
3846 130 : }
3847 :
3848 128 : pub fn create(test_name: &'static str) -> anyhow::Result<Self> {
3849 128 : // Disable automatic GC and compaction to make the unit tests more deterministic.
3850 128 : // The tests perform them manually if needed.
3851 128 : let tenant_conf = TenantConf {
3852 128 : gc_period: Duration::ZERO,
3853 128 : compaction_period: Duration::ZERO,
3854 128 : ..TenantConf::default()
3855 128 : };
3856 128 :
3857 128 : Self::create_custom(test_name, tenant_conf)
3858 128 : }
3859 :
3860 18 : pub fn span(&self) -> tracing::Span {
3861 18 : info_span!("TenantHarness", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug())
3862 18 : }
3863 :
3864 130 : pub(crate) async fn load(&self) -> (Arc<Tenant>, RequestContext) {
3865 130 : let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error);
3866 130 : (
3867 130 : self.do_try_load(&ctx)
3868 520 : .await
3869 130 : .expect("failed to load test tenant"),
3870 130 : ctx,
3871 130 : )
3872 130 : }
3873 :
3874 260 : #[instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug()))]
3875 : pub(crate) async fn do_try_load(
3876 : &self,
3877 : ctx: &RequestContext,
3878 : ) -> anyhow::Result<Arc<Tenant>> {
3879 : let walredo_mgr = Arc::new(WalRedoManager::from(TestRedoManager));
3880 :
3881 : let tenant = Arc::new(Tenant::new(
3882 : TenantState::Loading,
3883 : self.conf,
3884 : AttachedTenantConf::try_from(LocationConf::attached_single(
3885 : TenantConfOpt::from(self.tenant_conf.clone()),
3886 : self.generation,
3887 : &ShardParameters::default(),
3888 : ))
3889 : .unwrap(),
3890 : // This is a legacy/test code path: sharding isn't supported here.
3891 : ShardIdentity::unsharded(),
3892 : Some(walredo_mgr),
3893 : self.tenant_shard_id,
3894 : self.remote_storage.clone(),
3895 : self.deletion_queue.new_client(),
3896 : ));
3897 :
3898 : let preload = tenant
3899 : .preload(&self.remote_storage, CancellationToken::new())
3900 : .await?;
3901 : tenant.attach(Some(preload), SpawnMode::Eager, ctx).await?;
3902 :
3903 : tenant.state.send_replace(TenantState::Active);
3904 : for timeline in tenant.timelines.lock().unwrap().values() {
3905 : timeline.set_state(TimelineState::Active);
3906 : }
3907 : Ok(tenant)
3908 : }
3909 :
3910 2 : pub fn timeline_path(&self, timeline_id: &TimelineId) -> Utf8PathBuf {
3911 2 : self.conf.timeline_path(&self.tenant_shard_id, timeline_id)
3912 2 : }
3913 : }
3914 :
3915 : // Mock WAL redo manager that doesn't do much
3916 : pub(crate) struct TestRedoManager;
3917 :
3918 : impl TestRedoManager {
3919 : /// # Cancel-Safety
3920 : ///
3921 : /// This method is cancellation-safe.
3922 12 : pub async fn request_redo(
3923 12 : &self,
3924 12 : key: Key,
3925 12 : lsn: Lsn,
3926 12 : base_img: Option<(Lsn, Bytes)>,
3927 12 : records: Vec<(Lsn, NeonWalRecord)>,
3928 12 : _pg_version: u32,
3929 12 : ) -> anyhow::Result<Bytes> {
3930 18 : let records_neon = records.iter().all(|r| apply_neon::can_apply_in_neon(&r.1));
3931 12 : if records_neon {
3932 : // For Neon wal records, we can decode without spawning postgres, so do so.
3933 12 : let base_img = base_img.expect("Neon WAL redo requires base image").1;
3934 12 : let mut page = BytesMut::new();
3935 12 : page.extend_from_slice(&base_img);
3936 30 : for (_record_lsn, record) in records {
3937 18 : apply_neon::apply_in_neon(&record, key, &mut page)?;
3938 : }
3939 12 : Ok(page.freeze())
3940 : } else {
3941 : // We never spawn a postgres walredo process in unit tests: just log what we might have done.
3942 0 : let s = format!(
3943 0 : "redo for {} to get to {}, with {} and {} records",
3944 0 : key,
3945 0 : lsn,
3946 0 : if base_img.is_some() {
3947 0 : "base image"
3948 : } else {
3949 0 : "no base image"
3950 : },
3951 0 : records.len()
3952 0 : );
3953 0 : println!("{s}");
3954 0 :
3955 0 : Ok(test_img(&s))
3956 : }
3957 12 : }
3958 : }
3959 : }
3960 :
3961 : #[cfg(test)]
3962 : mod tests {
3963 : use std::collections::BTreeMap;
3964 :
3965 : use super::*;
3966 : use crate::keyspace::KeySpaceAccum;
3967 : use crate::repository::{Key, Value};
3968 : use crate::tenant::harness::*;
3969 : use crate::tenant::timeline::CompactFlags;
3970 : use crate::DEFAULT_PG_VERSION;
3971 : use bytes::{Bytes, BytesMut};
3972 : use hex_literal::hex;
3973 : use pageserver_api::key::{AUX_FILES_KEY, AUX_KEY_PREFIX, NON_INHERITED_RANGE};
3974 : use pageserver_api::keyspace::KeySpace;
3975 : use pageserver_api::models::CompactionAlgorithm;
3976 : use rand::{thread_rng, Rng};
3977 : use tests::storage_layer::ValuesReconstructState;
3978 : use tests::timeline::{GetVectoredError, ShutdownMode};
3979 :
3980 : static TEST_KEY: Lazy<Key> =
3981 18 : Lazy::new(|| Key::from_slice(&hex!("010000000033333333444444445500000001")));
3982 :
3983 : #[tokio::test]
3984 2 : async fn test_basic() -> anyhow::Result<()> {
3985 8 : let (tenant, ctx) = TenantHarness::create("test_basic")?.load().await;
3986 2 : let tline = tenant
3987 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx)
3988 6 : .await?;
3989 2 :
3990 2 : let mut writer = tline.writer().await;
3991 2 : writer
3992 2 : .put(
3993 2 : *TEST_KEY,
3994 2 : Lsn(0x10),
3995 2 : &Value::Image(test_img("foo at 0x10")),
3996 2 : &ctx,
3997 2 : )
3998 2 : .await?;
3999 2 : writer.finish_write(Lsn(0x10));
4000 2 : drop(writer);
4001 2 :
4002 2 : let mut writer = tline.writer().await;
4003 2 : writer
4004 2 : .put(
4005 2 : *TEST_KEY,
4006 2 : Lsn(0x20),
4007 2 : &Value::Image(test_img("foo at 0x20")),
4008 2 : &ctx,
4009 2 : )
4010 2 : .await?;
4011 2 : writer.finish_write(Lsn(0x20));
4012 2 : drop(writer);
4013 2 :
4014 2 : assert_eq!(
4015 2 : tline.get(*TEST_KEY, Lsn(0x10), &ctx).await?,
4016 2 : test_img("foo at 0x10")
4017 2 : );
4018 2 : assert_eq!(
4019 2 : tline.get(*TEST_KEY, Lsn(0x1f), &ctx).await?,
4020 2 : test_img("foo at 0x10")
4021 2 : );
4022 2 : assert_eq!(
4023 2 : tline.get(*TEST_KEY, Lsn(0x20), &ctx).await?,
4024 2 : test_img("foo at 0x20")
4025 2 : );
4026 2 :
4027 2 : Ok(())
4028 2 : }
4029 :
4030 : #[tokio::test]
4031 2 : async fn no_duplicate_timelines() -> anyhow::Result<()> {
4032 2 : let (tenant, ctx) = TenantHarness::create("no_duplicate_timelines")?
4033 2 : .load()
4034 8 : .await;
4035 2 : let _ = tenant
4036 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4037 6 : .await?;
4038 2 :
4039 2 : match tenant
4040 2 : .create_empty_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4041 2 : .await
4042 2 : {
4043 2 : Ok(_) => panic!("duplicate timeline creation should fail"),
4044 2 : Err(e) => assert_eq!(e.to_string(), "Already exists".to_string()),
4045 2 : }
4046 2 :
4047 2 : Ok(())
4048 2 : }
4049 :
4050 : /// Convenience function to create a page image with given string as the only content
4051 10 : pub fn test_value(s: &str) -> Value {
4052 10 : let mut buf = BytesMut::new();
4053 10 : buf.extend_from_slice(s.as_bytes());
4054 10 : Value::Image(buf.freeze())
4055 10 : }
4056 :
4057 : ///
4058 : /// Test branch creation
4059 : ///
4060 : #[tokio::test]
4061 2 : async fn test_branch() -> anyhow::Result<()> {
4062 2 : use std::str::from_utf8;
4063 2 :
4064 8 : let (tenant, ctx) = TenantHarness::create("test_branch")?.load().await;
4065 2 : let tline = tenant
4066 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4067 6 : .await?;
4068 2 : let mut writer = tline.writer().await;
4069 2 :
4070 2 : #[allow(non_snake_case)]
4071 2 : let TEST_KEY_A: Key = Key::from_hex("110000000033333333444444445500000001").unwrap();
4072 2 : #[allow(non_snake_case)]
4073 2 : let TEST_KEY_B: Key = Key::from_hex("110000000033333333444444445500000002").unwrap();
4074 2 :
4075 2 : // Insert a value on the timeline
4076 2 : writer
4077 2 : .put(TEST_KEY_A, Lsn(0x20), &test_value("foo at 0x20"), &ctx)
4078 2 : .await?;
4079 2 : writer
4080 2 : .put(TEST_KEY_B, Lsn(0x20), &test_value("foobar at 0x20"), &ctx)
4081 2 : .await?;
4082 2 : writer.finish_write(Lsn(0x20));
4083 2 :
4084 2 : writer
4085 2 : .put(TEST_KEY_A, Lsn(0x30), &test_value("foo at 0x30"), &ctx)
4086 2 : .await?;
4087 2 : writer.finish_write(Lsn(0x30));
4088 2 : writer
4089 2 : .put(TEST_KEY_A, Lsn(0x40), &test_value("foo at 0x40"), &ctx)
4090 2 : .await?;
4091 2 : writer.finish_write(Lsn(0x40));
4092 2 :
4093 2 : //assert_current_logical_size(&tline, Lsn(0x40));
4094 2 :
4095 2 : // Branch the history, modify relation differently on the new timeline
4096 2 : tenant
4097 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x30)), &ctx)
4098 2 : .await?;
4099 2 : let newtline = tenant
4100 2 : .get_timeline(NEW_TIMELINE_ID, true)
4101 2 : .expect("Should have a local timeline");
4102 2 : let mut new_writer = newtline.writer().await;
4103 2 : new_writer
4104 2 : .put(TEST_KEY_A, Lsn(0x40), &test_value("bar at 0x40"), &ctx)
4105 2 : .await?;
4106 2 : new_writer.finish_write(Lsn(0x40));
4107 2 :
4108 2 : // Check page contents on both branches
4109 2 : assert_eq!(
4110 2 : from_utf8(&tline.get(TEST_KEY_A, Lsn(0x40), &ctx).await?)?,
4111 2 : "foo at 0x40"
4112 2 : );
4113 2 : assert_eq!(
4114 2 : from_utf8(&newtline.get(TEST_KEY_A, Lsn(0x40), &ctx).await?)?,
4115 2 : "bar at 0x40"
4116 2 : );
4117 2 : assert_eq!(
4118 2 : from_utf8(&newtline.get(TEST_KEY_B, Lsn(0x40), &ctx).await?)?,
4119 2 : "foobar at 0x20"
4120 2 : );
4121 2 :
4122 2 : //assert_current_logical_size(&tline, Lsn(0x40));
4123 2 :
4124 2 : Ok(())
4125 2 : }
4126 :
4127 20 : async fn make_some_layers(
4128 20 : tline: &Timeline,
4129 20 : start_lsn: Lsn,
4130 20 : ctx: &RequestContext,
4131 20 : ) -> anyhow::Result<()> {
4132 20 : let mut lsn = start_lsn;
4133 : {
4134 20 : let mut writer = tline.writer().await;
4135 : // Create a relation on the timeline
4136 20 : writer
4137 20 : .put(
4138 20 : *TEST_KEY,
4139 20 : lsn,
4140 20 : &Value::Image(test_img(&format!("foo at {}", lsn))),
4141 20 : ctx,
4142 20 : )
4143 10 : .await?;
4144 20 : writer.finish_write(lsn);
4145 20 : lsn += 0x10;
4146 20 : writer
4147 20 : .put(
4148 20 : *TEST_KEY,
4149 20 : lsn,
4150 20 : &Value::Image(test_img(&format!("foo at {}", lsn))),
4151 20 : ctx,
4152 20 : )
4153 0 : .await?;
4154 20 : writer.finish_write(lsn);
4155 20 : lsn += 0x10;
4156 20 : }
4157 20 : tline.freeze_and_flush().await?;
4158 : {
4159 20 : let mut writer = tline.writer().await;
4160 20 : writer
4161 20 : .put(
4162 20 : *TEST_KEY,
4163 20 : lsn,
4164 20 : &Value::Image(test_img(&format!("foo at {}", lsn))),
4165 20 : ctx,
4166 20 : )
4167 10 : .await?;
4168 20 : writer.finish_write(lsn);
4169 20 : lsn += 0x10;
4170 20 : writer
4171 20 : .put(
4172 20 : *TEST_KEY,
4173 20 : lsn,
4174 20 : &Value::Image(test_img(&format!("foo at {}", lsn))),
4175 20 : ctx,
4176 20 : )
4177 0 : .await?;
4178 20 : writer.finish_write(lsn);
4179 20 : }
4180 20 : tline.freeze_and_flush().await
4181 20 : }
4182 :
4183 : #[tokio::test]
4184 2 : async fn test_prohibit_branch_creation_on_garbage_collected_data() -> anyhow::Result<()> {
4185 2 : let (tenant, ctx) =
4186 2 : TenantHarness::create("test_prohibit_branch_creation_on_garbage_collected_data")?
4187 2 : .load()
4188 8 : .await;
4189 2 : let tline = tenant
4190 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4191 6 : .await?;
4192 6 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
4193 2 :
4194 2 : // this removes layers before lsn 40 (50 minus 10), so there are two remaining layers, image and delta for 31-50
4195 2 : // FIXME: this doesn't actually remove any layer currently, given how the flushing
4196 2 : // and compaction works. But it does set the 'cutoff' point so that the cross check
4197 2 : // below should fail.
4198 2 : tenant
4199 2 : .gc_iteration(
4200 2 : Some(TIMELINE_ID),
4201 2 : 0x10,
4202 2 : Duration::ZERO,
4203 2 : &CancellationToken::new(),
4204 2 : &ctx,
4205 2 : )
4206 2 : .await?;
4207 2 :
4208 2 : // try to branch at lsn 25, should fail because we already garbage collected the data
4209 2 : match tenant
4210 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x25)), &ctx)
4211 2 : .await
4212 2 : {
4213 2 : Ok(_) => panic!("branching should have failed"),
4214 2 : Err(err) => {
4215 2 : let CreateTimelineError::AncestorLsn(err) = err else {
4216 2 : panic!("wrong error type")
4217 2 : };
4218 2 : assert!(err.to_string().contains("invalid branch start lsn"));
4219 2 : assert!(err
4220 2 : .source()
4221 2 : .unwrap()
4222 2 : .to_string()
4223 2 : .contains("we might've already garbage collected needed data"))
4224 2 : }
4225 2 : }
4226 2 :
4227 2 : Ok(())
4228 2 : }
4229 :
4230 : #[tokio::test]
4231 2 : async fn test_prohibit_branch_creation_on_pre_initdb_lsn() -> anyhow::Result<()> {
4232 2 : let (tenant, ctx) =
4233 2 : TenantHarness::create("test_prohibit_branch_creation_on_pre_initdb_lsn")?
4234 2 : .load()
4235 8 : .await;
4236 2 :
4237 2 : let tline = tenant
4238 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x50), DEFAULT_PG_VERSION, &ctx)
4239 6 : .await?;
4240 2 : // try to branch at lsn 0x25, should fail because initdb lsn is 0x50
4241 2 : match tenant
4242 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x25)), &ctx)
4243 2 : .await
4244 2 : {
4245 2 : Ok(_) => panic!("branching should have failed"),
4246 2 : Err(err) => {
4247 2 : let CreateTimelineError::AncestorLsn(err) = err else {
4248 2 : panic!("wrong error type");
4249 2 : };
4250 2 : assert!(&err.to_string().contains("invalid branch start lsn"));
4251 2 : assert!(&err
4252 2 : .source()
4253 2 : .unwrap()
4254 2 : .to_string()
4255 2 : .contains("is earlier than latest GC horizon"));
4256 2 : }
4257 2 : }
4258 2 :
4259 2 : Ok(())
4260 2 : }
4261 :
4262 : /*
4263 : // FIXME: This currently fails to error out. Calling GC doesn't currently
4264 : // remove the old value, we'd need to work a little harder
4265 : #[tokio::test]
4266 : async fn test_prohibit_get_for_garbage_collected_data() -> anyhow::Result<()> {
4267 : let repo =
4268 : RepoHarness::create("test_prohibit_get_for_garbage_collected_data")?
4269 : .load();
4270 :
4271 : let tline = repo.create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION)?;
4272 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
4273 :
4274 : repo.gc_iteration(Some(TIMELINE_ID), 0x10, Duration::ZERO)?;
4275 : let latest_gc_cutoff_lsn = tline.get_latest_gc_cutoff_lsn();
4276 : assert!(*latest_gc_cutoff_lsn > Lsn(0x25));
4277 : match tline.get(*TEST_KEY, Lsn(0x25)) {
4278 : Ok(_) => panic!("request for page should have failed"),
4279 : Err(err) => assert!(err.to_string().contains("not found at")),
4280 : }
4281 : Ok(())
4282 : }
4283 : */
4284 :
4285 : #[tokio::test]
4286 2 : async fn test_get_branchpoints_from_an_inactive_timeline() -> anyhow::Result<()> {
4287 2 : let (tenant, ctx) =
4288 2 : TenantHarness::create("test_get_branchpoints_from_an_inactive_timeline")?
4289 2 : .load()
4290 8 : .await;
4291 2 : let tline = tenant
4292 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4293 6 : .await?;
4294 6 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
4295 2 :
4296 2 : tenant
4297 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x40)), &ctx)
4298 2 : .await?;
4299 2 : let newtline = tenant
4300 2 : .get_timeline(NEW_TIMELINE_ID, true)
4301 2 : .expect("Should have a local timeline");
4302 2 :
4303 6 : make_some_layers(newtline.as_ref(), Lsn(0x60), &ctx).await?;
4304 2 :
4305 2 : tline.set_broken("test".to_owned());
4306 2 :
4307 2 : tenant
4308 2 : .gc_iteration(
4309 2 : Some(TIMELINE_ID),
4310 2 : 0x10,
4311 2 : Duration::ZERO,
4312 2 : &CancellationToken::new(),
4313 2 : &ctx,
4314 2 : )
4315 2 : .await?;
4316 2 :
4317 2 : // The branchpoints should contain all timelines, even ones marked
4318 2 : // as Broken.
4319 2 : {
4320 2 : let branchpoints = &tline.gc_info.read().unwrap().retain_lsns;
4321 2 : assert_eq!(branchpoints.len(), 1);
4322 2 : assert_eq!(branchpoints[0], Lsn(0x40));
4323 2 : }
4324 2 :
4325 2 : // You can read the key from the child branch even though the parent is
4326 2 : // Broken, as long as you don't need to access data from the parent.
4327 2 : assert_eq!(
4328 4 : newtline.get(*TEST_KEY, Lsn(0x70), &ctx).await?,
4329 2 : test_img(&format!("foo at {}", Lsn(0x70)))
4330 2 : );
4331 2 :
4332 2 : // This needs to traverse to the parent, and fails.
4333 2 : let err = newtline.get(*TEST_KEY, Lsn(0x50), &ctx).await.unwrap_err();
4334 2 : assert!(err
4335 2 : .to_string()
4336 2 : .contains("will not become active. Current state: Broken"));
4337 2 :
4338 2 : Ok(())
4339 2 : }
4340 :
4341 : #[tokio::test]
4342 2 : async fn test_retain_data_in_parent_which_is_needed_for_child() -> anyhow::Result<()> {
4343 2 : let (tenant, ctx) =
4344 2 : TenantHarness::create("test_retain_data_in_parent_which_is_needed_for_child")?
4345 2 : .load()
4346 8 : .await;
4347 2 : let tline = tenant
4348 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4349 6 : .await?;
4350 6 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
4351 2 :
4352 2 : tenant
4353 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x40)), &ctx)
4354 2 : .await?;
4355 2 : let newtline = tenant
4356 2 : .get_timeline(NEW_TIMELINE_ID, true)
4357 2 : .expect("Should have a local timeline");
4358 2 : // this removes layers before lsn 40 (50 minus 10), so there are two remaining layers, image and delta for 31-50
4359 2 : tenant
4360 2 : .gc_iteration(
4361 2 : Some(TIMELINE_ID),
4362 2 : 0x10,
4363 2 : Duration::ZERO,
4364 2 : &CancellationToken::new(),
4365 2 : &ctx,
4366 2 : )
4367 2 : .await?;
4368 4 : assert!(newtline.get(*TEST_KEY, Lsn(0x25), &ctx).await.is_ok());
4369 2 :
4370 2 : Ok(())
4371 2 : }
4372 : #[tokio::test]
4373 2 : async fn test_parent_keeps_data_forever_after_branching() -> anyhow::Result<()> {
4374 2 : let (tenant, ctx) =
4375 2 : TenantHarness::create("test_parent_keeps_data_forever_after_branching")?
4376 2 : .load()
4377 8 : .await;
4378 2 : let tline = tenant
4379 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4380 6 : .await?;
4381 6 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
4382 2 :
4383 2 : tenant
4384 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x40)), &ctx)
4385 2 : .await?;
4386 2 : let newtline = tenant
4387 2 : .get_timeline(NEW_TIMELINE_ID, true)
4388 2 : .expect("Should have a local timeline");
4389 2 :
4390 6 : make_some_layers(newtline.as_ref(), Lsn(0x60), &ctx).await?;
4391 2 :
4392 2 : // run gc on parent
4393 2 : tenant
4394 2 : .gc_iteration(
4395 2 : Some(TIMELINE_ID),
4396 2 : 0x10,
4397 2 : Duration::ZERO,
4398 2 : &CancellationToken::new(),
4399 2 : &ctx,
4400 2 : )
4401 2 : .await?;
4402 2 :
4403 2 : // Check that the data is still accessible on the branch.
4404 2 : assert_eq!(
4405 7 : newtline.get(*TEST_KEY, Lsn(0x50), &ctx).await?,
4406 2 : test_img(&format!("foo at {}", Lsn(0x40)))
4407 2 : );
4408 2 :
4409 2 : Ok(())
4410 2 : }
4411 :
4412 : #[tokio::test]
4413 2 : async fn timeline_load() -> anyhow::Result<()> {
4414 2 : const TEST_NAME: &str = "timeline_load";
4415 2 : let harness = TenantHarness::create(TEST_NAME)?;
4416 2 : {
4417 8 : let (tenant, ctx) = harness.load().await;
4418 2 : let tline = tenant
4419 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x7000), DEFAULT_PG_VERSION, &ctx)
4420 6 : .await?;
4421 6 : make_some_layers(tline.as_ref(), Lsn(0x8000), &ctx).await?;
4422 2 : // so that all uploads finish & we can call harness.load() below again
4423 2 : tenant
4424 2 : .shutdown(Default::default(), ShutdownMode::FreezeAndFlush)
4425 2 : .instrument(harness.span())
4426 2 : .await
4427 2 : .ok()
4428 2 : .unwrap();
4429 2 : }
4430 2 :
4431 7 : let (tenant, _ctx) = harness.load().await;
4432 2 : tenant
4433 2 : .get_timeline(TIMELINE_ID, true)
4434 2 : .expect("cannot load timeline");
4435 2 :
4436 2 : Ok(())
4437 2 : }
4438 :
4439 : #[tokio::test]
4440 2 : async fn timeline_load_with_ancestor() -> anyhow::Result<()> {
4441 2 : const TEST_NAME: &str = "timeline_load_with_ancestor";
4442 2 : let harness = TenantHarness::create(TEST_NAME)?;
4443 2 : // create two timelines
4444 2 : {
4445 8 : let (tenant, ctx) = harness.load().await;
4446 2 : let tline = tenant
4447 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4448 6 : .await?;
4449 2 :
4450 6 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
4451 2 :
4452 2 : let child_tline = tenant
4453 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x40)), &ctx)
4454 2 : .await?;
4455 2 : child_tline.set_state(TimelineState::Active);
4456 2 :
4457 2 : let newtline = tenant
4458 2 : .get_timeline(NEW_TIMELINE_ID, true)
4459 2 : .expect("Should have a local timeline");
4460 2 :
4461 6 : make_some_layers(newtline.as_ref(), Lsn(0x60), &ctx).await?;
4462 2 :
4463 2 : // so that all uploads finish & we can call harness.load() below again
4464 2 : tenant
4465 2 : .shutdown(Default::default(), ShutdownMode::FreezeAndFlush)
4466 2 : .instrument(harness.span())
4467 4 : .await
4468 2 : .ok()
4469 2 : .unwrap();
4470 2 : }
4471 2 :
4472 2 : // check that both of them are initially unloaded
4473 13 : let (tenant, _ctx) = harness.load().await;
4474 2 :
4475 2 : // check that both, child and ancestor are loaded
4476 2 : let _child_tline = tenant
4477 2 : .get_timeline(NEW_TIMELINE_ID, true)
4478 2 : .expect("cannot get child timeline loaded");
4479 2 :
4480 2 : let _ancestor_tline = tenant
4481 2 : .get_timeline(TIMELINE_ID, true)
4482 2 : .expect("cannot get ancestor timeline loaded");
4483 2 :
4484 2 : Ok(())
4485 2 : }
4486 :
4487 : #[tokio::test]
4488 2 : async fn delta_layer_dumping() -> anyhow::Result<()> {
4489 2 : use storage_layer::AsLayerDesc;
4490 8 : let (tenant, ctx) = TenantHarness::create("test_layer_dumping")?.load().await;
4491 2 : let tline = tenant
4492 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4493 6 : .await?;
4494 6 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
4495 2 :
4496 2 : let layer_map = tline.layers.read().await;
4497 2 : let level0_deltas = layer_map
4498 2 : .layer_map()
4499 2 : .get_level0_deltas()?
4500 2 : .into_iter()
4501 4 : .map(|desc| layer_map.get_from_desc(&desc))
4502 2 : .collect::<Vec<_>>();
4503 2 :
4504 2 : assert!(!level0_deltas.is_empty());
4505 2 :
4506 6 : for delta in level0_deltas {
4507 2 : // Ensure we are dumping a delta layer here
4508 4 : assert!(delta.layer_desc().is_delta);
4509 8 : delta.dump(true, &ctx).await.unwrap();
4510 2 : }
4511 2 :
4512 2 : Ok(())
4513 2 : }
4514 :
4515 : #[tokio::test]
4516 2 : async fn test_images() -> anyhow::Result<()> {
4517 8 : let (tenant, ctx) = TenantHarness::create("test_images")?.load().await;
4518 2 : let tline = tenant
4519 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx)
4520 6 : .await?;
4521 2 :
4522 2 : let mut writer = tline.writer().await;
4523 2 : writer
4524 2 : .put(
4525 2 : *TEST_KEY,
4526 2 : Lsn(0x10),
4527 2 : &Value::Image(test_img("foo at 0x10")),
4528 2 : &ctx,
4529 2 : )
4530 2 : .await?;
4531 2 : writer.finish_write(Lsn(0x10));
4532 2 : drop(writer);
4533 2 :
4534 2 : tline.freeze_and_flush().await?;
4535 2 : tline
4536 2 : .compact(&CancellationToken::new(), EnumSet::empty(), &ctx)
4537 2 : .await?;
4538 2 :
4539 2 : let mut writer = tline.writer().await;
4540 2 : writer
4541 2 : .put(
4542 2 : *TEST_KEY,
4543 2 : Lsn(0x20),
4544 2 : &Value::Image(test_img("foo at 0x20")),
4545 2 : &ctx,
4546 2 : )
4547 2 : .await?;
4548 2 : writer.finish_write(Lsn(0x20));
4549 2 : drop(writer);
4550 2 :
4551 2 : tline.freeze_and_flush().await?;
4552 2 : tline
4553 2 : .compact(&CancellationToken::new(), EnumSet::empty(), &ctx)
4554 2 : .await?;
4555 2 :
4556 2 : let mut writer = tline.writer().await;
4557 2 : writer
4558 2 : .put(
4559 2 : *TEST_KEY,
4560 2 : Lsn(0x30),
4561 2 : &Value::Image(test_img("foo at 0x30")),
4562 2 : &ctx,
4563 2 : )
4564 2 : .await?;
4565 2 : writer.finish_write(Lsn(0x30));
4566 2 : drop(writer);
4567 2 :
4568 2 : tline.freeze_and_flush().await?;
4569 2 : tline
4570 2 : .compact(&CancellationToken::new(), EnumSet::empty(), &ctx)
4571 2 : .await?;
4572 2 :
4573 2 : let mut writer = tline.writer().await;
4574 2 : writer
4575 2 : .put(
4576 2 : *TEST_KEY,
4577 2 : Lsn(0x40),
4578 2 : &Value::Image(test_img("foo at 0x40")),
4579 2 : &ctx,
4580 2 : )
4581 2 : .await?;
4582 2 : writer.finish_write(Lsn(0x40));
4583 2 : drop(writer);
4584 2 :
4585 2 : tline.freeze_and_flush().await?;
4586 2 : tline
4587 2 : .compact(&CancellationToken::new(), EnumSet::empty(), &ctx)
4588 2 : .await?;
4589 2 :
4590 2 : assert_eq!(
4591 4 : tline.get(*TEST_KEY, Lsn(0x10), &ctx).await?,
4592 2 : test_img("foo at 0x10")
4593 2 : );
4594 2 : assert_eq!(
4595 3 : tline.get(*TEST_KEY, Lsn(0x1f), &ctx).await?,
4596 2 : test_img("foo at 0x10")
4597 2 : );
4598 2 : assert_eq!(
4599 2 : tline.get(*TEST_KEY, Lsn(0x20), &ctx).await?,
4600 2 : test_img("foo at 0x20")
4601 2 : );
4602 2 : assert_eq!(
4603 4 : tline.get(*TEST_KEY, Lsn(0x30), &ctx).await?,
4604 2 : test_img("foo at 0x30")
4605 2 : );
4606 2 : assert_eq!(
4607 4 : tline.get(*TEST_KEY, Lsn(0x40), &ctx).await?,
4608 2 : test_img("foo at 0x40")
4609 2 : );
4610 2 :
4611 2 : Ok(())
4612 2 : }
4613 :
4614 4 : async fn bulk_insert_compact_gc(
4615 4 : tenant: &Tenant,
4616 4 : timeline: &Arc<Timeline>,
4617 4 : ctx: &RequestContext,
4618 4 : lsn: Lsn,
4619 4 : repeat: usize,
4620 4 : key_count: usize,
4621 4 : ) -> anyhow::Result<()> {
4622 4 : let compact = true;
4623 103674 : bulk_insert_maybe_compact_gc(tenant, timeline, ctx, lsn, repeat, key_count, compact).await
4624 4 : }
4625 :
4626 8 : async fn bulk_insert_maybe_compact_gc(
4627 8 : tenant: &Tenant,
4628 8 : timeline: &Arc<Timeline>,
4629 8 : ctx: &RequestContext,
4630 8 : mut lsn: Lsn,
4631 8 : repeat: usize,
4632 8 : key_count: usize,
4633 8 : compact: bool,
4634 8 : ) -> anyhow::Result<()> {
4635 8 : let mut test_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
4636 8 : let mut blknum = 0;
4637 8 :
4638 8 : // Enforce that key range is monotonously increasing
4639 8 : let mut keyspace = KeySpaceAccum::new();
4640 8 :
4641 8 : let cancel = CancellationToken::new();
4642 8 :
4643 8 : for _ in 0..repeat {
4644 400 : for _ in 0..key_count {
4645 4000000 : test_key.field6 = blknum;
4646 4000000 : let mut writer = timeline.writer().await;
4647 4000000 : writer
4648 4000000 : .put(
4649 4000000 : test_key,
4650 4000000 : lsn,
4651 4000000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
4652 4000000 : ctx,
4653 4000000 : )
4654 38400 : .await?;
4655 4000000 : writer.finish_write(lsn);
4656 4000000 : drop(writer);
4657 4000000 :
4658 4000000 : keyspace.add_key(test_key);
4659 4000000 :
4660 4000000 : lsn = Lsn(lsn.0 + 0x10);
4661 4000000 : blknum += 1;
4662 : }
4663 :
4664 400 : timeline.freeze_and_flush().await?;
4665 400 : if compact {
4666 : // this requires timeline to be &Arc<Timeline>
4667 40274 : timeline.compact(&cancel, EnumSet::empty(), ctx).await?;
4668 200 : }
4669 :
4670 : // this doesn't really need to use the timeline_id target, but it is closer to what it
4671 : // originally was.
4672 400 : let res = tenant
4673 400 : .gc_iteration(Some(timeline.timeline_id), 0, Duration::ZERO, &cancel, ctx)
4674 400 : .await?;
4675 :
4676 400 : assert_eq!(res.layers_removed, 0, "this never removes anything");
4677 : }
4678 :
4679 8 : Ok(())
4680 8 : }
4681 :
4682 : //
4683 : // Insert 1000 key-value pairs with increasing keys, flush, compact, GC.
4684 : // Repeat 50 times.
4685 : //
4686 : #[tokio::test]
4687 2 : async fn test_bulk_insert() -> anyhow::Result<()> {
4688 2 : let harness = TenantHarness::create("test_bulk_insert")?;
4689 8 : let (tenant, ctx) = harness.load().await;
4690 2 : let tline = tenant
4691 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx)
4692 6 : .await?;
4693 2 :
4694 2 : let lsn = Lsn(0x10);
4695 51837 : bulk_insert_compact_gc(&tenant, &tline, &ctx, lsn, 50, 10000).await?;
4696 2 :
4697 2 : Ok(())
4698 2 : }
4699 :
4700 : // Test the vectored get real implementation against a simple sequential implementation.
4701 : //
4702 : // The test generates a keyspace by repeatedly flushing the in-memory layer and compacting.
4703 : // Projected to 2D the key space looks like below. Lsn grows upwards on the Y axis and keys
4704 : // grow to the right on the X axis.
4705 : // [Delta]
4706 : // [Delta]
4707 : // [Delta]
4708 : // [Delta]
4709 : // ------------ Image ---------------
4710 : //
4711 : // After layer generation we pick the ranges to query as follows:
4712 : // 1. The beginning of each delta layer
4713 : // 2. At the seam between two adjacent delta layers
4714 : //
4715 : // There's one major downside to this test: delta layers only contains images,
4716 : // so the search can stop at the first delta layer and doesn't traverse any deeper.
4717 : #[tokio::test]
4718 2 : async fn test_get_vectored() -> anyhow::Result<()> {
4719 2 : let harness = TenantHarness::create("test_get_vectored")?;
4720 8 : let (tenant, ctx) = harness.load().await;
4721 2 : let tline = tenant
4722 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx)
4723 6 : .await?;
4724 2 :
4725 2 : let lsn = Lsn(0x10);
4726 51837 : bulk_insert_compact_gc(&tenant, &tline, &ctx, lsn, 50, 10000).await?;
4727 2 :
4728 2 : let guard = tline.layers.read().await;
4729 2 : guard.layer_map().dump(true, &ctx).await?;
4730 2 :
4731 2 : let mut reads = Vec::new();
4732 2 : let mut prev = None;
4733 12 : guard.layer_map().iter_historic_layers().for_each(|desc| {
4734 12 : if !desc.is_delta() {
4735 2 : prev = Some(desc.clone());
4736 2 : return;
4737 10 : }
4738 10 :
4739 10 : let start = desc.key_range.start;
4740 10 : let end = desc
4741 10 : .key_range
4742 10 : .start
4743 10 : .add(Timeline::MAX_GET_VECTORED_KEYS.try_into().unwrap());
4744 10 : reads.push(KeySpace {
4745 10 : ranges: vec![start..end],
4746 10 : });
4747 2 :
4748 10 : if let Some(prev) = &prev {
4749 10 : if !prev.is_delta() {
4750 10 : return;
4751 2 : }
4752 0 :
4753 0 : let first_range = Key {
4754 0 : field6: prev.key_range.end.field6 - 4,
4755 0 : ..prev.key_range.end
4756 0 : }..prev.key_range.end;
4757 0 :
4758 0 : let second_range = desc.key_range.start..Key {
4759 0 : field6: desc.key_range.start.field6 + 4,
4760 0 : ..desc.key_range.start
4761 0 : };
4762 0 :
4763 0 : reads.push(KeySpace {
4764 0 : ranges: vec![first_range, second_range],
4765 0 : });
4766 2 : };
4767 2 :
4768 2 : prev = Some(desc.clone());
4769 12 : });
4770 2 :
4771 2 : drop(guard);
4772 2 :
4773 2 : // Pick a big LSN such that we query over all the changes.
4774 2 : let reads_lsn = Lsn(u64::MAX - 1);
4775 2 :
4776 12 : for read in reads {
4777 10 : info!("Doing vectored read on {:?}", read);
4778 2 :
4779 10 : let vectored_res = tline
4780 10 : .get_vectored_impl(
4781 10 : read.clone(),
4782 10 : reads_lsn,
4783 10 : &mut ValuesReconstructState::new(),
4784 10 : &ctx,
4785 10 : )
4786 25 : .await;
4787 10 : tline
4788 10 : .validate_get_vectored_impl(&vectored_res, read, reads_lsn, &ctx)
4789 19 : .await;
4790 2 : }
4791 2 :
4792 2 : Ok(())
4793 2 : }
4794 :
4795 : #[tokio::test]
4796 2 : async fn test_get_vectored_aux_files() -> anyhow::Result<()> {
4797 2 : let harness = TenantHarness::create("test_get_vectored_aux_files")?;
4798 2 :
4799 8 : let (tenant, ctx) = harness.load().await;
4800 2 : let tline = tenant
4801 2 : .create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION, &ctx)
4802 2 : .await?;
4803 2 : let tline = tline.raw_timeline().unwrap();
4804 2 :
4805 2 : let mut modification = tline.begin_modification(Lsn(0x1000));
4806 2 : modification.put_file("foo/bar1", b"content1", &ctx).await?;
4807 2 : modification.set_lsn(Lsn(0x1008))?;
4808 2 : modification.put_file("foo/bar2", b"content2", &ctx).await?;
4809 2 : modification.commit(&ctx).await?;
4810 2 :
4811 2 : let child_timeline_id = TimelineId::generate();
4812 2 : tenant
4813 2 : .branch_timeline_test(
4814 2 : tline,
4815 2 : child_timeline_id,
4816 2 : Some(tline.get_last_record_lsn()),
4817 2 : &ctx,
4818 2 : )
4819 2 : .await?;
4820 2 :
4821 2 : let child_timeline = tenant
4822 2 : .get_timeline(child_timeline_id, true)
4823 2 : .expect("Should have the branched timeline");
4824 2 :
4825 2 : let aux_keyspace = KeySpace {
4826 2 : ranges: vec![NON_INHERITED_RANGE],
4827 2 : };
4828 2 : let read_lsn = child_timeline.get_last_record_lsn();
4829 2 :
4830 2 : let vectored_res = child_timeline
4831 2 : .get_vectored_impl(
4832 2 : aux_keyspace.clone(),
4833 2 : read_lsn,
4834 2 : &mut ValuesReconstructState::new(),
4835 2 : &ctx,
4836 2 : )
4837 2 : .await;
4838 2 :
4839 2 : child_timeline
4840 2 : .validate_get_vectored_impl(&vectored_res, aux_keyspace, read_lsn, &ctx)
4841 2 : .await;
4842 2 :
4843 2 : let images = vectored_res?;
4844 2 : assert!(images.is_empty());
4845 2 : Ok(())
4846 2 : }
4847 :
4848 : // Test that vectored get handles layer gaps correctly
4849 : // by advancing into the next ancestor timeline if required.
4850 : //
4851 : // The test generates timelines that look like the diagram below.
4852 : // We leave a gap in one of the L1 layers at `gap_at_key` (`/` in the diagram).
4853 : // The reconstruct data for that key lies in the ancestor timeline (`X` in the diagram).
4854 : //
4855 : // ```
4856 : //-------------------------------+
4857 : // ... |
4858 : // [ L1 ] |
4859 : // [ / L1 ] | Child Timeline
4860 : // ... |
4861 : // ------------------------------+
4862 : // [ X L1 ] | Parent Timeline
4863 : // ------------------------------+
4864 : // ```
4865 : #[tokio::test]
4866 2 : async fn test_get_vectored_key_gap() -> anyhow::Result<()> {
4867 2 : let tenant_conf = TenantConf {
4868 2 : // Make compaction deterministic
4869 2 : gc_period: Duration::ZERO,
4870 2 : compaction_period: Duration::ZERO,
4871 2 : // Encourage creation of L1 layers
4872 2 : checkpoint_distance: 16 * 1024,
4873 2 : compaction_target_size: 8 * 1024,
4874 2 : ..TenantConf::default()
4875 2 : };
4876 2 :
4877 2 : let harness = TenantHarness::create_custom("test_get_vectored_key_gap", tenant_conf)?;
4878 8 : let (tenant, ctx) = harness.load().await;
4879 2 :
4880 2 : let mut current_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
4881 2 : let gap_at_key = current_key.add(100);
4882 2 : let mut current_lsn = Lsn(0x10);
4883 2 :
4884 2 : const KEY_COUNT: usize = 10_000;
4885 2 :
4886 2 : let timeline_id = TimelineId::generate();
4887 2 : let current_timeline = tenant
4888 2 : .create_test_timeline(timeline_id, current_lsn, DEFAULT_PG_VERSION, &ctx)
4889 6 : .await?;
4890 2 :
4891 2 : current_lsn += 0x100;
4892 2 :
4893 2 : let mut writer = current_timeline.writer().await;
4894 2 : writer
4895 2 : .put(
4896 2 : gap_at_key,
4897 2 : current_lsn,
4898 2 : &Value::Image(test_img(&format!("{} at {}", gap_at_key, current_lsn))),
4899 2 : &ctx,
4900 2 : )
4901 2 : .await?;
4902 2 : writer.finish_write(current_lsn);
4903 2 : drop(writer);
4904 2 :
4905 2 : let mut latest_lsns = HashMap::new();
4906 2 : latest_lsns.insert(gap_at_key, current_lsn);
4907 2 :
4908 2 : current_timeline.freeze_and_flush().await?;
4909 2 :
4910 2 : let child_timeline_id = TimelineId::generate();
4911 2 :
4912 2 : tenant
4913 2 : .branch_timeline_test(
4914 2 : ¤t_timeline,
4915 2 : child_timeline_id,
4916 2 : Some(current_lsn),
4917 2 : &ctx,
4918 2 : )
4919 2 : .await?;
4920 2 : let child_timeline = tenant
4921 2 : .get_timeline(child_timeline_id, true)
4922 2 : .expect("Should have the branched timeline");
4923 2 :
4924 20002 : for i in 0..KEY_COUNT {
4925 20000 : if current_key == gap_at_key {
4926 2 : current_key = current_key.next();
4927 2 : continue;
4928 19998 : }
4929 19998 :
4930 19998 : current_lsn += 0x10;
4931 2 :
4932 19998 : let mut writer = child_timeline.writer().await;
4933 19998 : writer
4934 19998 : .put(
4935 19998 : current_key,
4936 19998 : current_lsn,
4937 19998 : &Value::Image(test_img(&format!("{} at {}", current_key, current_lsn))),
4938 19998 : &ctx,
4939 19998 : )
4940 321 : .await?;
4941 19998 : writer.finish_write(current_lsn);
4942 19998 : drop(writer);
4943 19998 :
4944 19998 : latest_lsns.insert(current_key, current_lsn);
4945 19998 : current_key = current_key.next();
4946 19998 :
4947 19998 : // Flush every now and then to encourage layer file creation.
4948 19998 : if i % 500 == 0 {
4949 40 : child_timeline.freeze_and_flush().await?;
4950 19958 : }
4951 2 : }
4952 2 :
4953 2 : child_timeline.freeze_and_flush().await?;
4954 2 : let mut flags = EnumSet::new();
4955 2 : flags.insert(CompactFlags::ForceRepartition);
4956 2 : child_timeline
4957 2 : .compact(&CancellationToken::new(), flags, &ctx)
4958 1086 : .await?;
4959 2 :
4960 2 : let key_near_end = {
4961 2 : let mut tmp = current_key;
4962 2 : tmp.field6 -= 10;
4963 2 : tmp
4964 2 : };
4965 2 :
4966 2 : let key_near_gap = {
4967 2 : let mut tmp = gap_at_key;
4968 2 : tmp.field6 -= 10;
4969 2 : tmp
4970 2 : };
4971 2 :
4972 2 : let read = KeySpace {
4973 2 : ranges: vec![key_near_gap..gap_at_key.next(), key_near_end..current_key],
4974 2 : };
4975 2 : let results = child_timeline
4976 2 : .get_vectored_impl(
4977 2 : read.clone(),
4978 2 : current_lsn,
4979 2 : &mut ValuesReconstructState::new(),
4980 2 : &ctx,
4981 2 : )
4982 15 : .await?;
4983 2 :
4984 44 : for (key, img_res) in results {
4985 42 : let expected = test_img(&format!("{} at {}", key, latest_lsns[&key]));
4986 42 : assert_eq!(img_res?, expected);
4987 2 : }
4988 2 :
4989 2 : Ok(())
4990 2 : }
4991 :
4992 : // Test that vectored get descends into ancestor timelines correctly and
4993 : // does not return an image that's newer than requested.
4994 : //
4995 : // The diagram below ilustrates an interesting case. We have a parent timeline
4996 : // (top of the Lsn range) and a child timeline. The request key cannot be reconstructed
4997 : // from the child timeline, so the parent timeline must be visited. When advacing into
4998 : // the child timeline, the read path needs to remember what the requested Lsn was in
4999 : // order to avoid returning an image that's too new. The test below constructs such
5000 : // a timeline setup and does a few queries around the Lsn of each page image.
5001 : // ```
5002 : // LSN
5003 : // ^
5004 : // |
5005 : // |
5006 : // 500 | --------------------------------------> branch point
5007 : // 400 | X
5008 : // 300 | X
5009 : // 200 | --------------------------------------> requested lsn
5010 : // 100 | X
5011 : // |---------------------------------------> Key
5012 : // |
5013 : // ------> requested key
5014 : //
5015 : // Legend:
5016 : // * X - page images
5017 : // ```
5018 : #[tokio::test]
5019 2 : async fn test_get_vectored_ancestor_descent() -> anyhow::Result<()> {
5020 2 : let harness = TenantHarness::create("test_get_vectored_on_lsn_axis")?;
5021 8 : let (tenant, ctx) = harness.load().await;
5022 2 :
5023 2 : let start_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
5024 2 : let end_key = start_key.add(1000);
5025 2 : let child_gap_at_key = start_key.add(500);
5026 2 : let mut parent_gap_lsns: BTreeMap<Lsn, String> = BTreeMap::new();
5027 2 :
5028 2 : let mut current_lsn = Lsn(0x10);
5029 2 :
5030 2 : let timeline_id = TimelineId::generate();
5031 2 : let parent_timeline = tenant
5032 2 : .create_test_timeline(timeline_id, current_lsn, DEFAULT_PG_VERSION, &ctx)
5033 6 : .await?;
5034 2 :
5035 2 : current_lsn += 0x100;
5036 2 :
5037 8 : for _ in 0..3 {
5038 6 : let mut key = start_key;
5039 6006 : while key < end_key {
5040 6000 : current_lsn += 0x10;
5041 6000 :
5042 6000 : let image_value = format!("{} at {}", child_gap_at_key, current_lsn);
5043 2 :
5044 6000 : let mut writer = parent_timeline.writer().await;
5045 6000 : writer
5046 6000 : .put(
5047 6000 : key,
5048 6000 : current_lsn,
5049 6000 : &Value::Image(test_img(&image_value)),
5050 6000 : &ctx,
5051 6000 : )
5052 99 : .await?;
5053 6000 : writer.finish_write(current_lsn);
5054 6000 :
5055 6000 : if key == child_gap_at_key {
5056 6 : parent_gap_lsns.insert(current_lsn, image_value);
5057 5994 : }
5058 2 :
5059 6000 : key = key.next();
5060 2 : }
5061 2 :
5062 6 : parent_timeline.freeze_and_flush().await?;
5063 2 : }
5064 2 :
5065 2 : let child_timeline_id = TimelineId::generate();
5066 2 :
5067 2 : let child_timeline = tenant
5068 2 : .branch_timeline_test(&parent_timeline, child_timeline_id, Some(current_lsn), &ctx)
5069 2 : .await?;
5070 2 :
5071 2 : let mut key = start_key;
5072 2002 : while key < end_key {
5073 2000 : if key == child_gap_at_key {
5074 2 : key = key.next();
5075 2 : continue;
5076 1998 : }
5077 1998 :
5078 1998 : current_lsn += 0x10;
5079 2 :
5080 1998 : let mut writer = child_timeline.writer().await;
5081 1998 : writer
5082 1998 : .put(
5083 1998 : key,
5084 1998 : current_lsn,
5085 1998 : &Value::Image(test_img(&format!("{} at {}", key, current_lsn))),
5086 1998 : &ctx,
5087 1998 : )
5088 59 : .await?;
5089 1998 : writer.finish_write(current_lsn);
5090 1998 :
5091 1998 : key = key.next();
5092 2 : }
5093 2 :
5094 2 : child_timeline.freeze_and_flush().await?;
5095 2 :
5096 2 : let lsn_offsets: [i64; 5] = [-10, -1, 0, 1, 10];
5097 2 : let mut query_lsns = Vec::new();
5098 6 : for image_lsn in parent_gap_lsns.keys().rev() {
5099 36 : for offset in lsn_offsets {
5100 30 : query_lsns.push(Lsn(image_lsn
5101 30 : .0
5102 30 : .checked_add_signed(offset)
5103 30 : .expect("Shouldn't overflow")));
5104 30 : }
5105 2 : }
5106 2 :
5107 32 : for query_lsn in query_lsns {
5108 30 : let results = child_timeline
5109 30 : .get_vectored_impl(
5110 30 : KeySpace {
5111 30 : ranges: vec![child_gap_at_key..child_gap_at_key.next()],
5112 30 : },
5113 30 : query_lsn,
5114 30 : &mut ValuesReconstructState::new(),
5115 30 : &ctx,
5116 30 : )
5117 29 : .await;
5118 2 :
5119 30 : let expected_item = parent_gap_lsns
5120 30 : .iter()
5121 30 : .rev()
5122 68 : .find(|(lsn, _)| **lsn <= query_lsn);
5123 30 :
5124 30 : info!(
5125 2 : "Doing vectored read at LSN {}. Expecting image to be: {:?}",
5126 2 : query_lsn, expected_item
5127 2 : );
5128 2 :
5129 30 : match expected_item {
5130 26 : Some((_, img_value)) => {
5131 26 : let key_results = results.expect("No vectored get error expected");
5132 26 : let key_result = &key_results[&child_gap_at_key];
5133 26 : let returned_img = key_result
5134 26 : .as_ref()
5135 26 : .expect("No page reconstruct error expected");
5136 26 :
5137 26 : info!(
5138 2 : "Vectored read at LSN {} returned image {}",
5139 0 : query_lsn,
5140 0 : std::str::from_utf8(returned_img)?
5141 2 : );
5142 26 : assert_eq!(*returned_img, test_img(img_value));
5143 2 : }
5144 2 : None => {
5145 4 : assert!(matches!(results, Err(GetVectoredError::MissingKey(_))));
5146 2 : }
5147 2 : }
5148 2 : }
5149 2 :
5150 2 : Ok(())
5151 2 : }
5152 :
5153 : #[tokio::test]
5154 2 : async fn test_random_updates() -> anyhow::Result<()> {
5155 2 : let names_algorithms = [
5156 2 : ("test_random_updates_legacy", CompactionAlgorithm::Legacy),
5157 2 : ("test_random_updates_tiered", CompactionAlgorithm::Tiered),
5158 2 : ];
5159 6 : for (name, algorithm) in names_algorithms {
5160 22459 : test_random_updates_algorithm(name, algorithm).await?;
5161 2 : }
5162 2 : Ok(())
5163 2 : }
5164 :
5165 4 : async fn test_random_updates_algorithm(
5166 4 : name: &'static str,
5167 4 : compaction_algorithm: CompactionAlgorithm,
5168 4 : ) -> anyhow::Result<()> {
5169 4 : let mut harness = TenantHarness::create(name)?;
5170 4 : harness.tenant_conf.compaction_algorithm = compaction_algorithm;
5171 16 : let (tenant, ctx) = harness.load().await;
5172 4 : let tline = tenant
5173 4 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
5174 11 : .await?;
5175 :
5176 : const NUM_KEYS: usize = 1000;
5177 4 : let cancel = CancellationToken::new();
5178 4 :
5179 4 : let mut test_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
5180 4 :
5181 4 : let mut keyspace = KeySpaceAccum::new();
5182 4 :
5183 4 : // Track when each page was last modified. Used to assert that
5184 4 : // a read sees the latest page version.
5185 4 : let mut updated = [Lsn(0); NUM_KEYS];
5186 4 :
5187 4 : let mut lsn = Lsn(0x10);
5188 : #[allow(clippy::needless_range_loop)]
5189 4004 : for blknum in 0..NUM_KEYS {
5190 4000 : lsn = Lsn(lsn.0 + 0x10);
5191 4000 : test_key.field6 = blknum as u32;
5192 4000 : let mut writer = tline.writer().await;
5193 4000 : writer
5194 4000 : .put(
5195 4000 : test_key,
5196 4000 : lsn,
5197 4000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
5198 4000 : &ctx,
5199 4000 : )
5200 66 : .await?;
5201 4000 : writer.finish_write(lsn);
5202 4000 : updated[blknum] = lsn;
5203 4000 : drop(writer);
5204 4000 :
5205 4000 : keyspace.add_key(test_key);
5206 : }
5207 :
5208 204 : for _ in 0..50 {
5209 200200 : for _ in 0..NUM_KEYS {
5210 200000 : lsn = Lsn(lsn.0 + 0x10);
5211 200000 : let blknum = thread_rng().gen_range(0..NUM_KEYS);
5212 200000 : test_key.field6 = blknum as u32;
5213 200000 : let mut writer = tline.writer().await;
5214 200000 : writer
5215 200000 : .put(
5216 200000 : test_key,
5217 200000 : lsn,
5218 200000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
5219 200000 : &ctx,
5220 200000 : )
5221 3280 : .await?;
5222 200000 : writer.finish_write(lsn);
5223 200000 : drop(writer);
5224 200000 : updated[blknum] = lsn;
5225 : }
5226 :
5227 : // Read all the blocks
5228 200000 : for (blknum, last_lsn) in updated.iter().enumerate() {
5229 200000 : test_key.field6 = blknum as u32;
5230 200000 : assert_eq!(
5231 200000 : tline.get(test_key, lsn, &ctx).await?,
5232 200000 : test_img(&format!("{} at {}", blknum, last_lsn))
5233 : );
5234 : }
5235 :
5236 : // Perform a cycle of flush, and GC
5237 201 : tline.freeze_and_flush().await?;
5238 200 : tenant
5239 200 : .gc_iteration(Some(tline.timeline_id), 0, Duration::ZERO, &cancel, &ctx)
5240 187 : .await?;
5241 : }
5242 :
5243 4 : Ok(())
5244 4 : }
5245 :
5246 : #[tokio::test]
5247 2 : async fn test_traverse_branches() -> anyhow::Result<()> {
5248 2 : let (tenant, ctx) = TenantHarness::create("test_traverse_branches")?
5249 2 : .load()
5250 8 : .await;
5251 2 : let mut tline = tenant
5252 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
5253 6 : .await?;
5254 2 :
5255 2 : const NUM_KEYS: usize = 1000;
5256 2 :
5257 2 : let mut test_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
5258 2 :
5259 2 : let mut keyspace = KeySpaceAccum::new();
5260 2 :
5261 2 : let cancel = CancellationToken::new();
5262 2 :
5263 2 : // Track when each page was last modified. Used to assert that
5264 2 : // a read sees the latest page version.
5265 2 : let mut updated = [Lsn(0); NUM_KEYS];
5266 2 :
5267 2 : let mut lsn = Lsn(0x10);
5268 2 : #[allow(clippy::needless_range_loop)]
5269 2002 : for blknum in 0..NUM_KEYS {
5270 2000 : lsn = Lsn(lsn.0 + 0x10);
5271 2000 : test_key.field6 = blknum as u32;
5272 2000 : let mut writer = tline.writer().await;
5273 2000 : writer
5274 2000 : .put(
5275 2000 : test_key,
5276 2000 : lsn,
5277 2000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
5278 2000 : &ctx,
5279 2000 : )
5280 33 : .await?;
5281 2000 : writer.finish_write(lsn);
5282 2000 : updated[blknum] = lsn;
5283 2000 : drop(writer);
5284 2000 :
5285 2000 : keyspace.add_key(test_key);
5286 2 : }
5287 2 :
5288 102 : for _ in 0..50 {
5289 100 : let new_tline_id = TimelineId::generate();
5290 100 : tenant
5291 100 : .branch_timeline_test(&tline, new_tline_id, Some(lsn), &ctx)
5292 2 : .await?;
5293 100 : tline = tenant
5294 100 : .get_timeline(new_tline_id, true)
5295 100 : .expect("Should have the branched timeline");
5296 2 :
5297 100100 : for _ in 0..NUM_KEYS {
5298 100000 : lsn = Lsn(lsn.0 + 0x10);
5299 100000 : let blknum = thread_rng().gen_range(0..NUM_KEYS);
5300 100000 : test_key.field6 = blknum as u32;
5301 100000 : let mut writer = tline.writer().await;
5302 100000 : writer
5303 100000 : .put(
5304 100000 : test_key,
5305 100000 : lsn,
5306 100000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
5307 100000 : &ctx,
5308 100000 : )
5309 2916 : .await?;
5310 100000 : println!("updating {} at {}", blknum, lsn);
5311 100000 : writer.finish_write(lsn);
5312 100000 : drop(writer);
5313 100000 : updated[blknum] = lsn;
5314 2 : }
5315 2 :
5316 2 : // Read all the blocks
5317 100000 : for (blknum, last_lsn) in updated.iter().enumerate() {
5318 100000 : test_key.field6 = blknum as u32;
5319 100000 : assert_eq!(
5320 100000 : tline.get(test_key, lsn, &ctx).await?,
5321 100000 : test_img(&format!("{} at {}", blknum, last_lsn))
5322 2 : );
5323 2 : }
5324 2 :
5325 2 : // Perform a cycle of flush, compact, and GC
5326 104 : tline.freeze_and_flush().await?;
5327 13342 : tline.compact(&cancel, EnumSet::empty(), &ctx).await?;
5328 100 : tenant
5329 100 : .gc_iteration(Some(tline.timeline_id), 0, Duration::ZERO, &cancel, &ctx)
5330 88 : .await?;
5331 2 : }
5332 2 :
5333 2 : Ok(())
5334 2 : }
5335 :
5336 : #[tokio::test]
5337 2 : async fn test_traverse_ancestors() -> anyhow::Result<()> {
5338 2 : let (tenant, ctx) = TenantHarness::create("test_traverse_ancestors")?
5339 2 : .load()
5340 8 : .await;
5341 2 : let mut tline = tenant
5342 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
5343 6 : .await?;
5344 2 :
5345 2 : const NUM_KEYS: usize = 100;
5346 2 : const NUM_TLINES: usize = 50;
5347 2 :
5348 2 : let mut test_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
5349 2 : // Track page mutation lsns across different timelines.
5350 2 : let mut updated = [[Lsn(0); NUM_KEYS]; NUM_TLINES];
5351 2 :
5352 2 : let mut lsn = Lsn(0x10);
5353 2 :
5354 2 : #[allow(clippy::needless_range_loop)]
5355 102 : for idx in 0..NUM_TLINES {
5356 100 : let new_tline_id = TimelineId::generate();
5357 100 : tenant
5358 100 : .branch_timeline_test(&tline, new_tline_id, Some(lsn), &ctx)
5359 2 : .await?;
5360 100 : tline = tenant
5361 100 : .get_timeline(new_tline_id, true)
5362 100 : .expect("Should have the branched timeline");
5363 2 :
5364 10100 : for _ in 0..NUM_KEYS {
5365 10000 : lsn = Lsn(lsn.0 + 0x10);
5366 10000 : let blknum = thread_rng().gen_range(0..NUM_KEYS);
5367 10000 : test_key.field6 = blknum as u32;
5368 10000 : let mut writer = tline.writer().await;
5369 10000 : writer
5370 10000 : .put(
5371 10000 : test_key,
5372 10000 : lsn,
5373 10000 : &Value::Image(test_img(&format!("{} {} at {}", idx, blknum, lsn))),
5374 10000 : &ctx,
5375 10000 : )
5376 318 : .await?;
5377 10000 : println!("updating [{}][{}] at {}", idx, blknum, lsn);
5378 10000 : writer.finish_write(lsn);
5379 10000 : drop(writer);
5380 10000 : updated[idx][blknum] = lsn;
5381 2 : }
5382 2 : }
5383 2 :
5384 2 : // Read pages from leaf timeline across all ancestors.
5385 100 : for (idx, lsns) in updated.iter().enumerate() {
5386 10000 : for (blknum, lsn) in lsns.iter().enumerate() {
5387 2 : // Skip empty mutations.
5388 10000 : if lsn.0 == 0 {
5389 3611 : continue;
5390 6389 : }
5391 6389 : println!("checking [{idx}][{blknum}] at {lsn}");
5392 6389 : test_key.field6 = blknum as u32;
5393 6389 : assert_eq!(
5394 6389 : tline.get(test_key, *lsn, &ctx).await?,
5395 6389 : test_img(&format!("{idx} {blknum} at {lsn}"))
5396 2 : );
5397 2 : }
5398 2 : }
5399 2 : Ok(())
5400 2 : }
5401 :
5402 : #[tokio::test]
5403 2 : async fn test_write_at_initdb_lsn_takes_optimization_code_path() -> anyhow::Result<()> {
5404 2 : let (tenant, ctx) = TenantHarness::create("test_empty_test_timeline_is_usable")?
5405 2 : .load()
5406 8 : .await;
5407 2 :
5408 2 : let initdb_lsn = Lsn(0x20);
5409 2 : let utline = tenant
5410 2 : .create_empty_timeline(TIMELINE_ID, initdb_lsn, DEFAULT_PG_VERSION, &ctx)
5411 2 : .await?;
5412 2 : let tline = utline.raw_timeline().unwrap();
5413 2 :
5414 2 : // Spawn flush loop now so that we can set the `expect_initdb_optimization`
5415 2 : tline.maybe_spawn_flush_loop();
5416 2 :
5417 2 : // Make sure the timeline has the minimum set of required keys for operation.
5418 2 : // The only operation you can always do on an empty timeline is to `put` new data.
5419 2 : // Except if you `put` at `initdb_lsn`.
5420 2 : // In that case, there's an optimization to directly create image layers instead of delta layers.
5421 2 : // It uses `repartition()`, which assumes some keys to be present.
5422 2 : // Let's make sure the test timeline can handle that case.
5423 2 : {
5424 2 : let mut state = tline.flush_loop_state.lock().unwrap();
5425 2 : assert_eq!(
5426 2 : timeline::FlushLoopState::Running {
5427 2 : expect_initdb_optimization: false,
5428 2 : initdb_optimization_count: 0,
5429 2 : },
5430 2 : *state
5431 2 : );
5432 2 : *state = timeline::FlushLoopState::Running {
5433 2 : expect_initdb_optimization: true,
5434 2 : initdb_optimization_count: 0,
5435 2 : };
5436 2 : }
5437 2 :
5438 2 : // Make writes at the initdb_lsn. When we flush it below, it should be handled by the optimization.
5439 2 : // As explained above, the optimization requires some keys to be present.
5440 2 : // As per `create_empty_timeline` documentation, use init_empty to set them.
5441 2 : // This is what `create_test_timeline` does, by the way.
5442 2 : let mut modification = tline.begin_modification(initdb_lsn);
5443 2 : modification
5444 2 : .init_empty_test_timeline()
5445 2 : .context("init_empty_test_timeline")?;
5446 2 : modification
5447 2 : .commit(&ctx)
5448 2 : .await
5449 2 : .context("commit init_empty_test_timeline modification")?;
5450 2 :
5451 2 : // Do the flush. The flush code will check the expectations that we set above.
5452 2 : tline.freeze_and_flush().await?;
5453 2 :
5454 2 : // assert freeze_and_flush exercised the initdb optimization
5455 2 : {
5456 2 : let state = tline.flush_loop_state.lock().unwrap();
5457 2 : let timeline::FlushLoopState::Running {
5458 2 : expect_initdb_optimization,
5459 2 : initdb_optimization_count,
5460 2 : } = *state
5461 2 : else {
5462 2 : panic!("unexpected state: {:?}", *state);
5463 2 : };
5464 2 : assert!(expect_initdb_optimization);
5465 2 : assert!(initdb_optimization_count > 0);
5466 2 : }
5467 2 : Ok(())
5468 2 : }
5469 :
5470 : #[tokio::test]
5471 2 : async fn test_create_guard_crash() -> anyhow::Result<()> {
5472 2 : let name = "test_create_guard_crash";
5473 2 : let harness = TenantHarness::create(name)?;
5474 2 : {
5475 5 : let (tenant, ctx) = harness.load().await;
5476 2 : let tline = tenant
5477 2 : .create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION, &ctx)
5478 2 : .await?;
5479 2 : // Leave the timeline ID in [`Tenant::timelines_creating`] to exclude attempting to create it again
5480 2 : let raw_tline = tline.raw_timeline().unwrap();
5481 2 : raw_tline
5482 2 : .shutdown(super::timeline::ShutdownMode::Hard)
5483 2 : .instrument(info_span!("test_shutdown", tenant_id=%raw_tline.tenant_shard_id, shard_id=%raw_tline.tenant_shard_id.shard_slug(), timeline_id=%TIMELINE_ID))
5484 2 : .await;
5485 2 : std::mem::forget(tline);
5486 2 : }
5487 2 :
5488 8 : let (tenant, _) = harness.load().await;
5489 2 : match tenant.get_timeline(TIMELINE_ID, false) {
5490 2 : Ok(_) => panic!("timeline should've been removed during load"),
5491 2 : Err(e) => {
5492 2 : assert_eq!(
5493 2 : e,
5494 2 : GetTimelineError::NotFound {
5495 2 : tenant_id: tenant.tenant_shard_id,
5496 2 : timeline_id: TIMELINE_ID,
5497 2 : }
5498 2 : )
5499 2 : }
5500 2 : }
5501 2 :
5502 2 : assert!(!harness
5503 2 : .conf
5504 2 : .timeline_path(&tenant.tenant_shard_id, &TIMELINE_ID)
5505 2 : .exists());
5506 2 :
5507 2 : Ok(())
5508 2 : }
5509 :
5510 : #[tokio::test]
5511 2 : async fn test_read_at_max_lsn() -> anyhow::Result<()> {
5512 2 : let names_algorithms = [
5513 2 : ("test_read_at_max_lsn_legacy", CompactionAlgorithm::Legacy),
5514 2 : ("test_read_at_max_lsn_tiered", CompactionAlgorithm::Tiered),
5515 2 : ];
5516 6 : for (name, algorithm) in names_algorithms {
5517 63736 : test_read_at_max_lsn_algorithm(name, algorithm).await?;
5518 2 : }
5519 2 : Ok(())
5520 2 : }
5521 :
5522 4 : async fn test_read_at_max_lsn_algorithm(
5523 4 : name: &'static str,
5524 4 : compaction_algorithm: CompactionAlgorithm,
5525 4 : ) -> anyhow::Result<()> {
5526 4 : let mut harness = TenantHarness::create(name)?;
5527 4 : harness.tenant_conf.compaction_algorithm = compaction_algorithm;
5528 16 : let (tenant, ctx) = harness.load().await;
5529 4 : let tline = tenant
5530 4 : .create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx)
5531 10 : .await?;
5532 :
5533 4 : let lsn = Lsn(0x10);
5534 4 : let compact = false;
5535 63400 : bulk_insert_maybe_compact_gc(&tenant, &tline, &ctx, lsn, 50, 10000, compact).await?;
5536 :
5537 4 : let test_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
5538 4 : let read_lsn = Lsn(u64::MAX - 1);
5539 :
5540 310 : let result = tline.get(test_key, read_lsn, &ctx).await;
5541 4 : assert!(result.is_ok(), "result is not Ok: {}", result.unwrap_err());
5542 :
5543 4 : Ok(())
5544 4 : }
5545 :
5546 : #[tokio::test]
5547 2 : async fn test_metadata_scan() -> anyhow::Result<()> {
5548 2 : let harness = TenantHarness::create("test_metadata_scan")?;
5549 8 : let (tenant, ctx) = harness.load().await;
5550 2 : let tline = tenant
5551 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
5552 6 : .await?;
5553 2 :
5554 2 : const NUM_KEYS: usize = 1000;
5555 2 : const STEP: usize = 10000; // random update + scan base_key + idx * STEP
5556 2 :
5557 2 : let cancel = CancellationToken::new();
5558 2 :
5559 2 : let mut base_key = Key::from_hex("000000000033333333444444445500000000").unwrap();
5560 2 : base_key.field1 = AUX_KEY_PREFIX;
5561 2 : let mut test_key = base_key;
5562 2 :
5563 2 : // Track when each page was last modified. Used to assert that
5564 2 : // a read sees the latest page version.
5565 2 : let mut updated = [Lsn(0); NUM_KEYS];
5566 2 :
5567 2 : let mut lsn = Lsn(0x10);
5568 2 : #[allow(clippy::needless_range_loop)]
5569 2002 : for blknum in 0..NUM_KEYS {
5570 2000 : lsn = Lsn(lsn.0 + 0x10);
5571 2000 : test_key.field6 = (blknum * STEP) as u32;
5572 2000 : let mut writer = tline.writer().await;
5573 2000 : writer
5574 2000 : .put(
5575 2000 : test_key,
5576 2000 : lsn,
5577 2000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
5578 2000 : &ctx,
5579 2000 : )
5580 33 : .await?;
5581 2000 : writer.finish_write(lsn);
5582 2000 : updated[blknum] = lsn;
5583 2000 : drop(writer);
5584 2 : }
5585 2 :
5586 2 : let keyspace = KeySpace::single(base_key..base_key.add((NUM_KEYS * STEP) as u32));
5587 2 :
5588 24 : for iter in 0..=10 {
5589 2 : // Read all the blocks
5590 22000 : for (blknum, last_lsn) in updated.iter().enumerate() {
5591 22000 : test_key.field6 = (blknum * STEP) as u32;
5592 22000 : assert_eq!(
5593 22000 : tline.get(test_key, lsn, &ctx).await?,
5594 22000 : test_img(&format!("{} at {}", blknum, last_lsn))
5595 2 : );
5596 2 : }
5597 2 :
5598 22 : let mut cnt = 0;
5599 22000 : for (key, value) in tline
5600 22 : .get_vectored_impl(
5601 22 : keyspace.clone(),
5602 22 : lsn,
5603 22 : &mut ValuesReconstructState::default(),
5604 22 : &ctx,
5605 22 : )
5606 5613 : .await?
5607 2 : {
5608 22000 : let blknum = key.field6 as usize;
5609 22000 : let value = value?;
5610 22000 : assert!(blknum % STEP == 0);
5611 22000 : let blknum = blknum / STEP;
5612 22000 : assert_eq!(
5613 22000 : value,
5614 22000 : test_img(&format!("{} at {}", blknum, updated[blknum]))
5615 22000 : );
5616 22000 : cnt += 1;
5617 2 : }
5618 2 :
5619 22 : assert_eq!(cnt, NUM_KEYS);
5620 2 :
5621 22022 : for _ in 0..NUM_KEYS {
5622 22000 : lsn = Lsn(lsn.0 + 0x10);
5623 22000 : let blknum = thread_rng().gen_range(0..NUM_KEYS);
5624 22000 : test_key.field6 = (blknum * STEP) as u32;
5625 22000 : let mut writer = tline.writer().await;
5626 22000 : writer
5627 22000 : .put(
5628 22000 : test_key,
5629 22000 : lsn,
5630 22000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
5631 22000 : &ctx,
5632 22000 : )
5633 625 : .await?;
5634 22000 : writer.finish_write(lsn);
5635 22000 : drop(writer);
5636 22000 : updated[blknum] = lsn;
5637 2 : }
5638 2 :
5639 2 : // Perform two cycles of flush, compact, and GC
5640 66 : for round in 0..2 {
5641 44 : tline.freeze_and_flush().await?;
5642 44 : tline
5643 44 : .compact(
5644 44 : &cancel,
5645 44 : if iter % 5 == 0 && round == 0 {
5646 6 : let mut flags = EnumSet::new();
5647 6 : flags.insert(CompactFlags::ForceImageLayerCreation);
5648 6 : flags.insert(CompactFlags::ForceRepartition);
5649 6 : flags
5650 2 : } else {
5651 38 : EnumSet::empty()
5652 2 : },
5653 44 : &ctx,
5654 2 : )
5655 8981 : .await?;
5656 44 : tenant
5657 44 : .gc_iteration(Some(tline.timeline_id), 0, Duration::ZERO, &cancel, &ctx)
5658 34 : .await?;
5659 2 : }
5660 2 : }
5661 2 :
5662 2 : Ok(())
5663 2 : }
5664 :
5665 : #[tokio::test]
5666 2 : async fn test_metadata_compaction_trigger() -> anyhow::Result<()> {
5667 2 : let harness = TenantHarness::create("test_metadata_compaction_trigger")?;
5668 8 : let (tenant, ctx) = harness.load().await;
5669 2 : let tline = tenant
5670 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
5671 6 : .await?;
5672 2 :
5673 2 : let cancel = CancellationToken::new();
5674 2 :
5675 2 : let mut base_key = Key::from_hex("000000000033333333444444445500000000").unwrap();
5676 2 : base_key.field1 = AUX_KEY_PREFIX;
5677 2 : let test_key = base_key;
5678 2 : let mut lsn = Lsn(0x10);
5679 2 :
5680 42 : for _ in 0..20 {
5681 40 : lsn = Lsn(lsn.0 + 0x10);
5682 40 : let mut writer = tline.writer().await;
5683 40 : writer
5684 40 : .put(
5685 40 : test_key,
5686 40 : lsn,
5687 40 : &Value::Image(test_img(&format!("{} at {}", 0, lsn))),
5688 40 : &ctx,
5689 40 : )
5690 20 : .await?;
5691 40 : writer.finish_write(lsn);
5692 40 : drop(writer);
5693 40 : tline.freeze_and_flush().await?; // force create a delta layer
5694 2 : }
5695 2 :
5696 2 : let before_num_l0_delta_files = tline
5697 2 : .layers
5698 2 : .read()
5699 2 : .await
5700 2 : .layer_map()
5701 2 : .get_level0_deltas()?
5702 2 : .len();
5703 2 :
5704 111 : tline.compact(&cancel, EnumSet::empty(), &ctx).await?;
5705 2 :
5706 2 : let after_num_l0_delta_files = tline
5707 2 : .layers
5708 2 : .read()
5709 2 : .await
5710 2 : .layer_map()
5711 2 : .get_level0_deltas()?
5712 2 : .len();
5713 2 :
5714 2 : assert!(after_num_l0_delta_files < before_num_l0_delta_files, "after_num_l0_delta_files={after_num_l0_delta_files}, before_num_l0_delta_files={before_num_l0_delta_files}");
5715 2 :
5716 2 : assert_eq!(
5717 4 : tline.get(test_key, lsn, &ctx).await?,
5718 2 : test_img(&format!("{} at {}", 0, lsn))
5719 2 : );
5720 2 :
5721 2 : Ok(())
5722 2 : }
5723 :
5724 : #[tokio::test]
5725 2 : async fn test_branch_copies_dirty_aux_file_flag() {
5726 2 : let harness = TenantHarness::create("test_branch_copies_dirty_aux_file_flag").unwrap();
5727 2 :
5728 2 : // the default aux file policy to switch is v1 if not set by the admins
5729 2 : assert_eq!(
5730 2 : harness.tenant_conf.switch_aux_file_policy,
5731 2 : AuxFilePolicy::V1
5732 2 : );
5733 8 : let (tenant, ctx) = harness.load().await;
5734 2 :
5735 2 : let mut lsn = Lsn(0x08);
5736 2 :
5737 2 : let tline: Arc<Timeline> = tenant
5738 2 : .create_test_timeline(TIMELINE_ID, lsn, DEFAULT_PG_VERSION, &ctx)
5739 6 : .await
5740 2 : .unwrap();
5741 2 :
5742 2 : // no aux file is written at this point, so the persistent flag should be unset
5743 2 : assert_eq!(tline.last_aux_file_policy.load(), None);
5744 2 :
5745 2 : {
5746 2 : lsn += 8;
5747 2 : let mut modification = tline.begin_modification(lsn);
5748 2 : modification
5749 2 : .put_file("pg_logical/mappings/test1", b"first", &ctx)
5750 4 : .await
5751 2 : .unwrap();
5752 2 : modification.commit(&ctx).await.unwrap();
5753 2 : }
5754 2 :
5755 2 : // there is no tenant manager to pass the configuration through, so lets mimic it
5756 2 : tenant.set_new_location_config(
5757 2 : AttachedTenantConf::try_from(LocationConf::attached_single(
5758 2 : TenantConfOpt {
5759 2 : switch_aux_file_policy: Some(AuxFilePolicy::V2),
5760 2 : ..Default::default()
5761 2 : },
5762 2 : tenant.generation,
5763 2 : &pageserver_api::models::ShardParameters::default(),
5764 2 : ))
5765 2 : .unwrap(),
5766 2 : );
5767 2 :
5768 2 : assert_eq!(
5769 2 : tline.get_switch_aux_file_policy(),
5770 2 : AuxFilePolicy::V2,
5771 2 : "wanted state has been updated"
5772 2 : );
5773 2 : assert_eq!(
5774 2 : tline.last_aux_file_policy.load(),
5775 2 : Some(AuxFilePolicy::V1),
5776 2 : "aux file is written with switch_aux_file_policy unset (which is v1), so we should keep v1"
5777 2 : );
5778 2 :
5779 2 : // we can read everything from the storage
5780 2 : let files = tline.list_aux_files(lsn, &ctx).await.unwrap();
5781 2 : assert_eq!(
5782 2 : files.get("pg_logical/mappings/test1"),
5783 2 : Some(&bytes::Bytes::from_static(b"first"))
5784 2 : );
5785 2 :
5786 2 : {
5787 2 : lsn += 8;
5788 2 : let mut modification = tline.begin_modification(lsn);
5789 2 : modification
5790 2 : .put_file("pg_logical/mappings/test2", b"second", &ctx)
5791 2 : .await
5792 2 : .unwrap();
5793 2 : modification.commit(&ctx).await.unwrap();
5794 2 : }
5795 2 :
5796 2 : assert_eq!(
5797 2 : tline.last_aux_file_policy.load(),
5798 2 : Some(AuxFilePolicy::V1),
5799 2 : "keep v1 storage format when new files are written"
5800 2 : );
5801 2 :
5802 2 : let files = tline.list_aux_files(lsn, &ctx).await.unwrap();
5803 2 : assert_eq!(
5804 2 : files.get("pg_logical/mappings/test2"),
5805 2 : Some(&bytes::Bytes::from_static(b"second"))
5806 2 : );
5807 2 :
5808 2 : let child = tenant
5809 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(lsn), &ctx)
5810 2 : .await
5811 2 : .unwrap();
5812 2 :
5813 2 : // child copies the last flag even if that is not on remote storage yet
5814 2 : assert_eq!(child.get_switch_aux_file_policy(), AuxFilePolicy::V2);
5815 2 : assert_eq!(child.last_aux_file_policy.load(), Some(AuxFilePolicy::V1));
5816 2 :
5817 2 : let files = child.list_aux_files(lsn, &ctx).await.unwrap();
5818 2 : assert_eq!(files.get("pg_logical/mappings/test1"), None);
5819 2 : assert_eq!(files.get("pg_logical/mappings/test2"), None);
5820 2 :
5821 2 : // even if we crash here without flushing parent timeline with it's new
5822 2 : // last_aux_file_policy we are safe, because child was never meant to access ancestor's
5823 2 : // files. the ancestor can even switch back to V1 because of a migration safely.
5824 2 : }
5825 :
5826 : #[tokio::test]
5827 2 : async fn aux_file_policy_switch() {
5828 2 : let mut harness = TenantHarness::create("aux_file_policy_switch").unwrap();
5829 2 : harness.tenant_conf.switch_aux_file_policy = AuxFilePolicy::CrossValidation; // set to cross-validation mode
5830 8 : let (tenant, ctx) = harness.load().await;
5831 2 :
5832 2 : let mut lsn = Lsn(0x08);
5833 2 :
5834 2 : let tline: Arc<Timeline> = tenant
5835 2 : .create_test_timeline(TIMELINE_ID, lsn, DEFAULT_PG_VERSION, &ctx)
5836 6 : .await
5837 2 : .unwrap();
5838 2 :
5839 2 : assert_eq!(
5840 2 : tline.last_aux_file_policy.load(),
5841 2 : None,
5842 2 : "no aux file is written so it should be unset"
5843 2 : );
5844 2 :
5845 2 : {
5846 2 : lsn += 8;
5847 2 : let mut modification = tline.begin_modification(lsn);
5848 2 : modification
5849 2 : .put_file("pg_logical/mappings/test1", b"first", &ctx)
5850 4 : .await
5851 2 : .unwrap();
5852 2 : modification.commit(&ctx).await.unwrap();
5853 2 : }
5854 2 :
5855 2 : // there is no tenant manager to pass the configuration through, so lets mimic it
5856 2 : tenant.set_new_location_config(
5857 2 : AttachedTenantConf::try_from(LocationConf::attached_single(
5858 2 : TenantConfOpt {
5859 2 : switch_aux_file_policy: Some(AuxFilePolicy::V2),
5860 2 : ..Default::default()
5861 2 : },
5862 2 : tenant.generation,
5863 2 : &pageserver_api::models::ShardParameters::default(),
5864 2 : ))
5865 2 : .unwrap(),
5866 2 : );
5867 2 :
5868 2 : assert_eq!(
5869 2 : tline.get_switch_aux_file_policy(),
5870 2 : AuxFilePolicy::V2,
5871 2 : "wanted state has been updated"
5872 2 : );
5873 2 : assert_eq!(
5874 2 : tline.last_aux_file_policy.load(),
5875 2 : Some(AuxFilePolicy::CrossValidation),
5876 2 : "dirty index_part.json reflected state is yet to be updated"
5877 2 : );
5878 2 :
5879 2 : // we can still read the auxfile v1 before we ingest anything new
5880 2 : let files = tline.list_aux_files(lsn, &ctx).await.unwrap();
5881 2 : assert_eq!(
5882 2 : files.get("pg_logical/mappings/test1"),
5883 2 : Some(&bytes::Bytes::from_static(b"first"))
5884 2 : );
5885 2 :
5886 2 : {
5887 2 : lsn += 8;
5888 2 : let mut modification = tline.begin_modification(lsn);
5889 2 : modification
5890 2 : .put_file("pg_logical/mappings/test2", b"second", &ctx)
5891 2 : .await
5892 2 : .unwrap();
5893 2 : modification.commit(&ctx).await.unwrap();
5894 2 : }
5895 2 :
5896 2 : assert_eq!(
5897 2 : tline.last_aux_file_policy.load(),
5898 2 : Some(AuxFilePolicy::V2),
5899 2 : "ingesting a file should apply the wanted switch state when applicable"
5900 2 : );
5901 2 :
5902 2 : let files = tline.list_aux_files(lsn, &ctx).await.unwrap();
5903 2 : assert_eq!(
5904 2 : files.get("pg_logical/mappings/test1"),
5905 2 : Some(&bytes::Bytes::from_static(b"first")),
5906 2 : "cross validation writes to both v1 and v2 so this should be available in v2"
5907 2 : );
5908 2 : assert_eq!(
5909 2 : files.get("pg_logical/mappings/test2"),
5910 2 : Some(&bytes::Bytes::from_static(b"second"))
5911 2 : );
5912 2 :
5913 2 : // mimic again by trying to flip it from V2 to V1 (not switched to while ingesting a file)
5914 2 : tenant.set_new_location_config(
5915 2 : AttachedTenantConf::try_from(LocationConf::attached_single(
5916 2 : TenantConfOpt {
5917 2 : switch_aux_file_policy: Some(AuxFilePolicy::V1),
5918 2 : ..Default::default()
5919 2 : },
5920 2 : tenant.generation,
5921 2 : &pageserver_api::models::ShardParameters::default(),
5922 2 : ))
5923 2 : .unwrap(),
5924 2 : );
5925 2 :
5926 2 : {
5927 2 : lsn += 8;
5928 2 : let mut modification = tline.begin_modification(lsn);
5929 2 : modification
5930 2 : .put_file("pg_logical/mappings/test2", b"third", &ctx)
5931 2 : .await
5932 2 : .unwrap();
5933 2 : modification.commit(&ctx).await.unwrap();
5934 2 : }
5935 2 :
5936 2 : assert_eq!(
5937 2 : tline.get_switch_aux_file_policy(),
5938 2 : AuxFilePolicy::V1,
5939 2 : "wanted state has been updated again, even if invalid request"
5940 2 : );
5941 2 :
5942 2 : assert_eq!(
5943 2 : tline.last_aux_file_policy.load(),
5944 2 : Some(AuxFilePolicy::V2),
5945 2 : "ingesting a file should apply the wanted switch state when applicable"
5946 2 : );
5947 2 :
5948 2 : let files = tline.list_aux_files(lsn, &ctx).await.unwrap();
5949 2 : assert_eq!(
5950 2 : files.get("pg_logical/mappings/test1"),
5951 2 : Some(&bytes::Bytes::from_static(b"first"))
5952 2 : );
5953 2 : assert_eq!(
5954 2 : files.get("pg_logical/mappings/test2"),
5955 2 : Some(&bytes::Bytes::from_static(b"third"))
5956 2 : );
5957 2 :
5958 2 : // mimic again by trying to flip it from from V1 to V2 (not switched to while ingesting a file)
5959 2 : tenant.set_new_location_config(
5960 2 : AttachedTenantConf::try_from(LocationConf::attached_single(
5961 2 : TenantConfOpt {
5962 2 : switch_aux_file_policy: Some(AuxFilePolicy::V2),
5963 2 : ..Default::default()
5964 2 : },
5965 2 : tenant.generation,
5966 2 : &pageserver_api::models::ShardParameters::default(),
5967 2 : ))
5968 2 : .unwrap(),
5969 2 : );
5970 2 :
5971 2 : {
5972 2 : lsn += 8;
5973 2 : let mut modification = tline.begin_modification(lsn);
5974 2 : modification
5975 2 : .put_file("pg_logical/mappings/test3", b"last", &ctx)
5976 2 : .await
5977 2 : .unwrap();
5978 2 : modification.commit(&ctx).await.unwrap();
5979 2 : }
5980 2 :
5981 2 : assert_eq!(tline.get_switch_aux_file_policy(), AuxFilePolicy::V2);
5982 2 :
5983 2 : assert_eq!(tline.last_aux_file_policy.load(), Some(AuxFilePolicy::V2));
5984 2 :
5985 2 : let files = tline.list_aux_files(lsn, &ctx).await.unwrap();
5986 2 : assert_eq!(
5987 2 : files.get("pg_logical/mappings/test1"),
5988 2 : Some(&bytes::Bytes::from_static(b"first"))
5989 2 : );
5990 2 : assert_eq!(
5991 2 : files.get("pg_logical/mappings/test2"),
5992 2 : Some(&bytes::Bytes::from_static(b"third"))
5993 2 : );
5994 2 : assert_eq!(
5995 2 : files.get("pg_logical/mappings/test3"),
5996 2 : Some(&bytes::Bytes::from_static(b"last"))
5997 2 : );
5998 2 :
5999 2 : // Check that we are going to remove v1 aux files.
6000 2 : let (mut dense_keyspace, _) = tline.collect_keyspace(lsn, &ctx).await.unwrap();
6001 2 : assert!(dense_keyspace.remove_overlapping_with(&KeySpace::single(AUX_FILES_KEY..AUX_FILES_KEY.next())).is_empty());
6002 2 : }
6003 :
6004 : #[tokio::test]
6005 2 : async fn test_metadata_image_creation() -> anyhow::Result<()> {
6006 2 : let harness = TenantHarness::create("test_metadata_image_creation")?;
6007 8 : let (tenant, ctx) = harness.load().await;
6008 2 : let tline = tenant
6009 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
6010 6 : .await?;
6011 2 :
6012 2 : const NUM_KEYS: usize = 1000;
6013 2 : const STEP: usize = 10000; // random update + scan base_key + idx * STEP
6014 2 :
6015 2 : let cancel = CancellationToken::new();
6016 2 :
6017 2 : let mut base_key = Key::from_hex("000000000033333333444444445500000000").unwrap();
6018 2 : base_key.field1 = AUX_KEY_PREFIX;
6019 2 : let mut test_key = base_key;
6020 2 : let mut lsn = Lsn(0x10);
6021 2 :
6022 8 : async fn scan_with_statistics(
6023 8 : tline: &Timeline,
6024 8 : keyspace: &KeySpace,
6025 8 : lsn: Lsn,
6026 8 : ctx: &RequestContext,
6027 8 : ) -> anyhow::Result<(BTreeMap<Key, Result<Bytes, PageReconstructError>>, usize)> {
6028 8 : let mut reconstruct_state = ValuesReconstructState::default();
6029 8 : let res = tline
6030 8 : .get_vectored_impl(keyspace.clone(), lsn, &mut reconstruct_state, ctx)
6031 1691 : .await?;
6032 8 : Ok((res, reconstruct_state.get_delta_layers_visited() as usize))
6033 8 : }
6034 2 :
6035 2 : #[allow(clippy::needless_range_loop)]
6036 2002 : for blknum in 0..NUM_KEYS {
6037 2000 : lsn = Lsn(lsn.0 + 0x10);
6038 2000 : test_key.field6 = (blknum * STEP) as u32;
6039 2000 : let mut writer = tline.writer().await;
6040 2000 : writer
6041 2000 : .put(
6042 2000 : test_key,
6043 2000 : lsn,
6044 2000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
6045 2000 : &ctx,
6046 2000 : )
6047 33 : .await?;
6048 2000 : writer.finish_write(lsn);
6049 2000 : drop(writer);
6050 2 : }
6051 2 :
6052 2 : let keyspace = KeySpace::single(base_key..base_key.add((NUM_KEYS * STEP) as u32));
6053 2 :
6054 22 : for iter in 1..=10 {
6055 20020 : for _ in 0..NUM_KEYS {
6056 20000 : lsn = Lsn(lsn.0 + 0x10);
6057 20000 : let blknum = thread_rng().gen_range(0..NUM_KEYS);
6058 20000 : test_key.field6 = (blknum * STEP) as u32;
6059 20000 : let mut writer = tline.writer().await;
6060 20000 : writer
6061 20000 : .put(
6062 20000 : test_key,
6063 20000 : lsn,
6064 20000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
6065 20000 : &ctx,
6066 20000 : )
6067 320 : .await?;
6068 20000 : writer.finish_write(lsn);
6069 20000 : drop(writer);
6070 2 : }
6071 2 :
6072 20 : tline.freeze_and_flush().await?;
6073 2 :
6074 20 : if iter % 5 == 0 {
6075 4 : let (_, before_delta_file_accessed) =
6076 1683 : scan_with_statistics(&tline, &keyspace, lsn, &ctx).await?;
6077 4 : tline
6078 4 : .compact(
6079 4 : &cancel,
6080 4 : {
6081 4 : let mut flags = EnumSet::new();
6082 4 : flags.insert(CompactFlags::ForceImageLayerCreation);
6083 4 : flags.insert(CompactFlags::ForceRepartition);
6084 4 : flags
6085 4 : },
6086 4 : &ctx,
6087 4 : )
6088 6531 : .await?;
6089 4 : let (_, after_delta_file_accessed) =
6090 8 : scan_with_statistics(&tline, &keyspace, lsn, &ctx).await?;
6091 4 : assert!(after_delta_file_accessed < before_delta_file_accessed, "after_delta_file_accessed={after_delta_file_accessed}, before_delta_file_accessed={before_delta_file_accessed}");
6092 2 : // Given that we already produced an image layer, there should be no delta layer needed for the scan, but still setting a low threshold there for unforeseen circumstances.
6093 4 : assert!(
6094 4 : after_delta_file_accessed <= 2,
6095 2 : "after_delta_file_accessed={after_delta_file_accessed}"
6096 2 : );
6097 16 : }
6098 2 : }
6099 2 :
6100 2 : Ok(())
6101 2 : }
6102 :
6103 : #[tokio::test]
6104 2 : async fn test_vectored_missing_data_key_reads() -> anyhow::Result<()> {
6105 2 : let harness = TenantHarness::create("test_vectored_missing_data_key_reads")?;
6106 8 : let (tenant, ctx) = harness.load().await;
6107 2 : let tline = tenant
6108 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
6109 6 : .await?;
6110 2 :
6111 2 : let cancel = CancellationToken::new();
6112 2 :
6113 2 : let base_key = Key::from_hex("000000000033333333444444445500000000").unwrap();
6114 2 : let base_key_child = Key::from_hex("000000000033333333444444445500000001").unwrap();
6115 2 : let base_key_nonexist = Key::from_hex("000000000033333333444444445500000002").unwrap();
6116 2 :
6117 2 : let mut lsn = Lsn(0x20);
6118 2 :
6119 2 : {
6120 2 : let mut writer = tline.writer().await;
6121 2 : writer
6122 2 : .put(base_key, lsn, &Value::Image(test_img("data key 1")), &ctx)
6123 2 : .await?;
6124 2 : writer.finish_write(lsn);
6125 2 : drop(writer);
6126 2 :
6127 2 : tline.freeze_and_flush().await?; // this will create a image layer
6128 2 : }
6129 2 :
6130 2 : let child = tenant
6131 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(lsn), &ctx)
6132 2 : .await
6133 2 : .unwrap();
6134 2 :
6135 2 : lsn.0 += 0x10;
6136 2 :
6137 2 : {
6138 2 : let mut writer = child.writer().await;
6139 2 : writer
6140 2 : .put(
6141 2 : base_key_child,
6142 2 : lsn,
6143 2 : &Value::Image(test_img("data key 2")),
6144 2 : &ctx,
6145 2 : )
6146 2 : .await?;
6147 2 : writer.finish_write(lsn);
6148 2 : drop(writer);
6149 2 :
6150 2 : child.freeze_and_flush().await?; // this will create a delta
6151 2 :
6152 2 : {
6153 2 : // update the partitioning to include the test key space, otherwise they
6154 2 : // will be dropped by image layer creation
6155 2 : let mut guard = child.partitioning.lock().await;
6156 2 : let ((partitioning, _), partition_lsn) = &mut *guard;
6157 2 : partitioning
6158 2 : .parts
6159 2 : .push(KeySpace::single(base_key..base_key_nonexist)); // exclude the nonexist key
6160 2 : *partition_lsn = lsn;
6161 2 : }
6162 2 :
6163 2 : child
6164 2 : .compact(
6165 2 : &cancel,
6166 2 : {
6167 2 : let mut set = EnumSet::empty();
6168 2 : set.insert(CompactFlags::ForceImageLayerCreation);
6169 2 : set
6170 2 : },
6171 2 : &ctx,
6172 2 : )
6173 19 : .await?; // force create an image layer for the keys, TODO: check if the image layer is created
6174 2 : }
6175 2 :
6176 12 : async fn get_vectored_impl_wrapper(
6177 12 : tline: &Arc<Timeline>,
6178 12 : key: Key,
6179 12 : lsn: Lsn,
6180 12 : ctx: &RequestContext,
6181 12 : ) -> Result<Option<Bytes>, GetVectoredError> {
6182 12 : let mut reconstruct_state = ValuesReconstructState::new();
6183 12 : let mut res = tline
6184 12 : .get_vectored_impl(
6185 12 : KeySpace::single(key..key.next()),
6186 12 : lsn,
6187 12 : &mut reconstruct_state,
6188 12 : ctx,
6189 12 : )
6190 9 : .await?;
6191 6 : Ok(res.pop_last().map(|(k, v)| {
6192 6 : assert_eq!(k, key);
6193 6 : v.unwrap()
6194 6 : }))
6195 12 : }
6196 2 :
6197 2 : // test vectored get on parent timeline
6198 2 : assert_eq!(
6199 2 : get_vectored_impl_wrapper(&tline, base_key, lsn, &ctx).await?,
6200 2 : Some(test_img("data key 1"))
6201 2 : );
6202 2 : assert!(get_vectored_impl_wrapper(&tline, base_key_child, lsn, &ctx)
6203 3 : .await
6204 2 : .unwrap_err()
6205 2 : .is_missing_key_error());
6206 2 : assert!(
6207 2 : get_vectored_impl_wrapper(&tline, base_key_nonexist, lsn, &ctx)
6208 2 : .await
6209 2 : .unwrap_err()
6210 2 : .is_missing_key_error()
6211 2 : );
6212 2 :
6213 2 : // test vectored get on child timeline
6214 2 : assert_eq!(
6215 4 : get_vectored_impl_wrapper(&child, base_key, lsn, &ctx).await?,
6216 2 : Some(test_img("data key 1"))
6217 2 : );
6218 2 : assert_eq!(
6219 2 : get_vectored_impl_wrapper(&child, base_key_child, lsn, &ctx).await?,
6220 2 : Some(test_img("data key 2"))
6221 2 : );
6222 2 : assert!(
6223 2 : get_vectored_impl_wrapper(&child, base_key_nonexist, lsn, &ctx)
6224 2 : .await
6225 2 : .unwrap_err()
6226 2 : .is_missing_key_error()
6227 2 : );
6228 2 :
6229 2 : Ok(())
6230 2 : }
6231 :
6232 : #[tokio::test]
6233 2 : async fn test_vectored_missing_metadata_key_reads() -> anyhow::Result<()> {
6234 2 : let harness = TenantHarness::create("test_vectored_missing_metadata_key_reads")?;
6235 8 : let (tenant, ctx) = harness.load().await;
6236 2 : let tline = tenant
6237 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
6238 6 : .await?;
6239 2 :
6240 2 : let cancel = CancellationToken::new();
6241 2 :
6242 2 : let mut base_key = Key::from_hex("000000000033333333444444445500000000").unwrap();
6243 2 : let mut base_key_child = Key::from_hex("000000000033333333444444445500000001").unwrap();
6244 2 : let mut base_key_nonexist = Key::from_hex("000000000033333333444444445500000002").unwrap();
6245 2 : base_key.field1 = AUX_KEY_PREFIX;
6246 2 : base_key_child.field1 = AUX_KEY_PREFIX;
6247 2 : base_key_nonexist.field1 = AUX_KEY_PREFIX;
6248 2 :
6249 2 : let mut lsn = Lsn(0x20);
6250 2 :
6251 2 : {
6252 2 : let mut writer = tline.writer().await;
6253 2 : writer
6254 2 : .put(
6255 2 : base_key,
6256 2 : lsn,
6257 2 : &Value::Image(test_img("metadata key 1")),
6258 2 : &ctx,
6259 2 : )
6260 2 : .await?;
6261 2 : writer.finish_write(lsn);
6262 2 : drop(writer);
6263 2 :
6264 2 : tline.freeze_and_flush().await?; // this will create an image layer
6265 2 :
6266 2 : tline
6267 2 : .compact(
6268 2 : &cancel,
6269 2 : {
6270 2 : let mut set = EnumSet::empty();
6271 2 : set.insert(CompactFlags::ForceImageLayerCreation);
6272 2 : set.insert(CompactFlags::ForceRepartition);
6273 2 : set
6274 2 : },
6275 2 : &ctx,
6276 2 : )
6277 39 : .await?; // force create an image layer for metadata keys
6278 2 : tenant
6279 2 : .gc_iteration(Some(tline.timeline_id), 0, Duration::ZERO, &cancel, &ctx)
6280 2 : .await?;
6281 2 : }
6282 2 :
6283 2 : let child = tenant
6284 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(lsn), &ctx)
6285 2 : .await
6286 2 : .unwrap();
6287 2 :
6288 2 : lsn.0 += 0x10;
6289 2 :
6290 2 : {
6291 2 : let mut writer = child.writer().await;
6292 2 : writer
6293 2 : .put(
6294 2 : base_key_child,
6295 2 : lsn,
6296 2 : &Value::Image(test_img("metadata key 2")),
6297 2 : &ctx,
6298 2 : )
6299 2 : .await?;
6300 2 : writer.finish_write(lsn);
6301 2 : drop(writer);
6302 2 :
6303 2 : child.freeze_and_flush().await?;
6304 2 :
6305 2 : child
6306 2 : .compact(
6307 2 : &cancel,
6308 2 : {
6309 2 : let mut set = EnumSet::empty();
6310 2 : set.insert(CompactFlags::ForceImageLayerCreation);
6311 2 : set.insert(CompactFlags::ForceRepartition);
6312 2 : set
6313 2 : },
6314 2 : &ctx,
6315 2 : )
6316 37 : .await?; // force create an image layer for metadata keys
6317 2 : tenant
6318 2 : .gc_iteration(Some(child.timeline_id), 0, Duration::ZERO, &cancel, &ctx)
6319 2 : .await?;
6320 2 : }
6321 2 :
6322 12 : async fn get_vectored_impl_wrapper(
6323 12 : tline: &Arc<Timeline>,
6324 12 : key: Key,
6325 12 : lsn: Lsn,
6326 12 : ctx: &RequestContext,
6327 12 : ) -> Result<Option<Bytes>, GetVectoredError> {
6328 12 : let mut reconstruct_state = ValuesReconstructState::new();
6329 12 : let mut res = tline
6330 12 : .get_vectored_impl(
6331 12 : KeySpace::single(key..key.next()),
6332 12 : lsn,
6333 12 : &mut reconstruct_state,
6334 12 : ctx,
6335 12 : )
6336 8 : .await?;
6337 12 : Ok(res.pop_last().map(|(k, v)| {
6338 4 : assert_eq!(k, key);
6339 4 : v.unwrap()
6340 12 : }))
6341 12 : }
6342 2 :
6343 2 : // test vectored get on parent timeline
6344 2 : assert_eq!(
6345 4 : get_vectored_impl_wrapper(&tline, base_key, lsn, &ctx).await?,
6346 2 : Some(test_img("metadata key 1"))
6347 2 : );
6348 2 : assert_eq!(
6349 2 : get_vectored_impl_wrapper(&tline, base_key_child, lsn, &ctx).await?,
6350 2 : None
6351 2 : );
6352 2 : assert_eq!(
6353 2 : get_vectored_impl_wrapper(&tline, base_key_nonexist, lsn, &ctx).await?,
6354 2 : None
6355 2 : );
6356 2 :
6357 2 : // test vectored get on child timeline
6358 2 : assert_eq!(
6359 3 : get_vectored_impl_wrapper(&child, base_key, lsn, &ctx).await?,
6360 2 : None
6361 2 : );
6362 2 : assert_eq!(
6363 2 : get_vectored_impl_wrapper(&child, base_key_child, lsn, &ctx).await?,
6364 2 : Some(test_img("metadata key 2"))
6365 2 : );
6366 2 : assert_eq!(
6367 2 : get_vectored_impl_wrapper(&child, base_key_nonexist, lsn, &ctx).await?,
6368 2 : None
6369 2 : );
6370 2 :
6371 2 : Ok(())
6372 2 : }
6373 : }
|