Line data Source code
1 : //!
2 : //! Timeline repository implementation that keeps old data in files on disk, and
3 : //! the recent changes in memory. See tenant/*_layer.rs files.
4 : //! The functions here are responsible for locating the correct layer for the
5 : //! get/put call, walking back the timeline branching history as needed.
6 : //!
7 : //! The files are stored in the .neon/tenants/<tenant_id>/timelines/<timeline_id>
8 : //! directory. See docs/pageserver-storage.md for how the files are managed.
9 : //! In addition to the layer files, there is a metadata file in the same
10 : //! directory that contains information about the timeline, in particular its
11 : //! parent timeline, and the last LSN that has been written to disk.
12 : //!
13 :
14 : use anyhow::{bail, Context};
15 : use arc_swap::ArcSwap;
16 : use camino::Utf8Path;
17 : use camino::Utf8PathBuf;
18 : use enumset::EnumSet;
19 : use futures::stream::FuturesUnordered;
20 : use futures::FutureExt;
21 : use futures::StreamExt;
22 : use pageserver_api::models;
23 : use pageserver_api::models::TimelineState;
24 : use pageserver_api::models::WalRedoManagerStatus;
25 : use pageserver_api::shard::ShardIdentity;
26 : use pageserver_api::shard::ShardStripeSize;
27 : use pageserver_api::shard::TenantShardId;
28 : use remote_storage::DownloadError;
29 : use remote_storage::GenericRemoteStorage;
30 : use remote_storage::TimeoutOrCancel;
31 : use std::fmt;
32 : use storage_broker::BrokerClientChannel;
33 : use tokio::io::BufReader;
34 : use tokio::sync::watch;
35 : use tokio::task::JoinSet;
36 : use tokio_util::sync::CancellationToken;
37 : use tracing::*;
38 : use utils::backoff;
39 : use utils::completion;
40 : use utils::crashsafe::path_with_suffix_extension;
41 : use utils::failpoint_support;
42 : use utils::fs_ext;
43 : use utils::sync::gate::Gate;
44 : use utils::sync::gate::GateGuard;
45 : use utils::timeout::timeout_cancellable;
46 : use utils::timeout::TimeoutCancellableError;
47 : use utils::zstd::create_zst_tarball;
48 : use utils::zstd::extract_zst_tarball;
49 :
50 : use self::config::AttachedLocationConfig;
51 : use self::config::AttachmentMode;
52 : use self::config::LocationConf;
53 : use self::config::TenantConf;
54 : use self::delete::DeleteTenantFlow;
55 : use self::metadata::TimelineMetadata;
56 : use self::mgr::GetActiveTenantError;
57 : use self::mgr::GetTenantError;
58 : use self::mgr::TenantsMap;
59 : use self::remote_timeline_client::upload::upload_index_part;
60 : use self::remote_timeline_client::RemoteTimelineClient;
61 : use self::timeline::uninit::TimelineCreateGuard;
62 : use self::timeline::uninit::TimelineExclusionError;
63 : use self::timeline::uninit::UninitializedTimeline;
64 : use self::timeline::EvictionTaskTenantState;
65 : use self::timeline::TimelineResources;
66 : use self::timeline::WaitLsnError;
67 : use self::timeline::{GcCutoffs, GcInfo};
68 : use crate::config::PageServerConf;
69 : use crate::context::{DownloadBehavior, RequestContext};
70 : use crate::deletion_queue::DeletionQueueClient;
71 : use crate::deletion_queue::DeletionQueueError;
72 : use crate::import_datadir;
73 : use crate::is_uninit_mark;
74 : use crate::metrics::TENANT;
75 : use crate::metrics::{
76 : remove_tenant_metrics, BROKEN_TENANTS_SET, TENANT_STATE_METRIC, TENANT_SYNTHETIC_SIZE_METRIC,
77 : };
78 : use crate::repository::GcResult;
79 : use crate::task_mgr;
80 : use crate::task_mgr::TaskKind;
81 : use crate::tenant::config::LocationMode;
82 : use crate::tenant::config::TenantConfOpt;
83 : pub use crate::tenant::remote_timeline_client::index::IndexPart;
84 : use crate::tenant::remote_timeline_client::remote_initdb_archive_path;
85 : use crate::tenant::remote_timeline_client::MaybeDeletedIndexPart;
86 : use crate::tenant::remote_timeline_client::INITDB_PATH;
87 : use crate::tenant::storage_layer::DeltaLayer;
88 : use crate::tenant::storage_layer::ImageLayer;
89 : use crate::InitializationOrder;
90 : use std::collections::hash_map::Entry;
91 : use std::collections::BTreeSet;
92 : use std::collections::HashMap;
93 : use std::collections::HashSet;
94 : use std::fmt::Debug;
95 : use std::fmt::Display;
96 : use std::fs;
97 : use std::fs::File;
98 : use std::ops::Bound::Included;
99 : use std::sync::atomic::AtomicU64;
100 : use std::sync::atomic::Ordering;
101 : use std::sync::Arc;
102 : use std::sync::Mutex;
103 : use std::time::{Duration, Instant};
104 :
105 : use crate::span;
106 : use crate::tenant::timeline::delete::DeleteTimelineFlow;
107 : use crate::tenant::timeline::uninit::cleanup_timeline_directory;
108 : use crate::virtual_file::VirtualFile;
109 : use crate::walredo::PostgresRedoManager;
110 : use crate::TEMP_FILE_SUFFIX;
111 : use once_cell::sync::Lazy;
112 : pub use pageserver_api::models::TenantState;
113 : use tokio::sync::Semaphore;
114 :
115 0 : static INIT_DB_SEMAPHORE: Lazy<Semaphore> = Lazy::new(|| Semaphore::new(8));
116 : use utils::{
117 : crashsafe,
118 : generation::Generation,
119 : id::TimelineId,
120 : lsn::{Lsn, RecordLsn},
121 : };
122 :
123 : /// Declare a failpoint that can use the `pause` failpoint action.
124 : /// We don't want to block the executor thread, hence, spawn_blocking + await.
125 : macro_rules! pausable_failpoint {
126 : ($name:literal) => {
127 : if cfg!(feature = "testing") {
128 : tokio::task::spawn_blocking({
129 : let current = tracing::Span::current();
130 4977 : move || {
131 4977 : let _entered = current.entered();
132 4977 : tracing::info!("at failpoint {}", $name);
133 : fail::fail_point!($name);
134 4977 : }
135 : })
136 : .await
137 : .expect("spawn_blocking");
138 : }
139 : };
140 : ($name:literal, $cond:expr) => {
141 : if cfg!(feature = "testing") {
142 : if $cond {
143 : pausable_failpoint!($name)
144 : }
145 : }
146 : };
147 : }
148 :
149 : pub mod blob_io;
150 : pub mod block_io;
151 : pub mod vectored_blob_io;
152 :
153 : pub mod disk_btree;
154 : pub(crate) mod ephemeral_file;
155 : pub mod layer_map;
156 :
157 : pub mod metadata;
158 : pub mod remote_timeline_client;
159 : pub mod storage_layer;
160 :
161 : pub mod config;
162 : pub mod delete;
163 : pub mod mgr;
164 : pub mod secondary;
165 : pub mod tasks;
166 : pub mod upload_queue;
167 :
168 : pub(crate) mod timeline;
169 :
170 : pub mod size;
171 :
172 : pub(crate) mod throttle;
173 :
174 : pub(crate) use crate::span::debug_assert_current_span_has_tenant_and_timeline_id;
175 : pub(crate) use timeline::{LogicalSizeCalculationCause, PageReconstructError, Timeline};
176 :
177 : // re-export for use in walreceiver
178 : pub use crate::tenant::timeline::WalReceiverInfo;
179 :
180 : /// The "tenants" part of `tenants/<tenant>/timelines...`
181 : pub const TENANTS_SEGMENT_NAME: &str = "tenants";
182 :
183 : /// Parts of the `.neon/tenants/<tenant_id>/timelines/<timeline_id>` directory prefix.
184 : pub const TIMELINES_SEGMENT_NAME: &str = "timelines";
185 :
186 : pub const TENANT_DELETED_MARKER_FILE_NAME: &str = "deleted";
187 :
188 : /// References to shared objects that are passed into each tenant, such
189 : /// as the shared remote storage client and process initialization state.
190 : #[derive(Clone)]
191 : pub struct TenantSharedResources {
192 : pub broker_client: storage_broker::BrokerClientChannel,
193 : pub remote_storage: Option<GenericRemoteStorage>,
194 : pub deletion_queue_client: DeletionQueueClient,
195 : }
196 :
197 : /// A [`Tenant`] is really an _attached_ tenant. The configuration
198 : /// for an attached tenant is a subset of the [`LocationConf`], represented
199 : /// in this struct.
200 : pub(super) struct AttachedTenantConf {
201 : tenant_conf: TenantConfOpt,
202 : location: AttachedLocationConfig,
203 : }
204 :
205 : impl AttachedTenantConf {
206 0 : fn new(tenant_conf: TenantConfOpt, location: AttachedLocationConfig) -> Self {
207 0 : Self {
208 0 : tenant_conf,
209 0 : location,
210 0 : }
211 0 : }
212 :
213 118 : fn try_from(location_conf: LocationConf) -> anyhow::Result<Self> {
214 118 : match &location_conf.mode {
215 118 : LocationMode::Attached(attach_conf) => Ok(Self {
216 118 : tenant_conf: location_conf.tenant_conf,
217 118 : location: *attach_conf,
218 118 : }),
219 : LocationMode::Secondary(_) => {
220 0 : anyhow::bail!("Attempted to construct AttachedTenantConf from a LocationConf in secondary mode")
221 : }
222 : }
223 118 : }
224 : }
225 : struct TimelinePreload {
226 : timeline_id: TimelineId,
227 : client: RemoteTimelineClient,
228 : index_part: Result<MaybeDeletedIndexPart, DownloadError>,
229 : }
230 :
231 : pub(crate) struct TenantPreload {
232 : deleting: bool,
233 : timelines: HashMap<TimelineId, TimelinePreload>,
234 : }
235 :
236 : /// When we spawn a tenant, there is a special mode for tenant creation that
237 : /// avoids trying to read anything from remote storage.
238 : pub(crate) enum SpawnMode {
239 : /// Activate as soon as possible
240 : Eager,
241 : /// Lazy activation in the background, with the option to skip the queue if the need comes up
242 : Lazy,
243 : /// Tenant has been created during the lifetime of this process
244 : Create,
245 : }
246 :
247 : ///
248 : /// Tenant consists of multiple timelines. Keep them in a hash table.
249 : ///
250 : pub struct Tenant {
251 : // Global pageserver config parameters
252 : pub conf: &'static PageServerConf,
253 :
254 : /// The value creation timestamp, used to measure activation delay, see:
255 : /// <https://github.com/neondatabase/neon/issues/4025>
256 : constructed_at: Instant,
257 :
258 : state: watch::Sender<TenantState>,
259 :
260 : // Overridden tenant-specific config parameters.
261 : // We keep TenantConfOpt sturct here to preserve the information
262 : // about parameters that are not set.
263 : // This is necessary to allow global config updates.
264 : tenant_conf: Arc<ArcSwap<AttachedTenantConf>>,
265 :
266 : tenant_shard_id: TenantShardId,
267 :
268 : // The detailed sharding information, beyond the number/count in tenant_shard_id
269 : shard_identity: ShardIdentity,
270 :
271 : /// The remote storage generation, used to protect S3 objects from split-brain.
272 : /// Does not change over the lifetime of the [`Tenant`] object.
273 : ///
274 : /// This duplicates the generation stored in LocationConf, but that structure is mutable:
275 : /// this copy enforces the invariant that generatio doesn't change during a Tenant's lifetime.
276 : generation: Generation,
277 :
278 : timelines: Mutex<HashMap<TimelineId, Arc<Timeline>>>,
279 :
280 : /// During timeline creation, we first insert the TimelineId to the
281 : /// creating map, then `timelines`, then remove it from the creating map.
282 : /// **Lock order**: if acquring both, acquire`timelines` before `timelines_creating`
283 : timelines_creating: std::sync::Mutex<HashSet<TimelineId>>,
284 :
285 : // This mutex prevents creation of new timelines during GC.
286 : // Adding yet another mutex (in addition to `timelines`) is needed because holding
287 : // `timelines` mutex during all GC iteration
288 : // may block for a long time `get_timeline`, `get_timelines_state`,... and other operations
289 : // with timelines, which in turn may cause dropping replication connection, expiration of wait_for_lsn
290 : // timeout...
291 : gc_cs: tokio::sync::Mutex<()>,
292 : walredo_mgr: Option<Arc<WalRedoManager>>,
293 :
294 : // provides access to timeline data sitting in the remote storage
295 : pub(crate) remote_storage: Option<GenericRemoteStorage>,
296 :
297 : // Access to global deletion queue for when this tenant wants to schedule a deletion
298 : deletion_queue_client: DeletionQueueClient,
299 :
300 : /// Cached logical sizes updated updated on each [`Tenant::gather_size_inputs`].
301 : cached_logical_sizes: tokio::sync::Mutex<HashMap<(TimelineId, Lsn), u64>>,
302 : cached_synthetic_tenant_size: Arc<AtomicU64>,
303 :
304 : eviction_task_tenant_state: tokio::sync::Mutex<EvictionTaskTenantState>,
305 :
306 : /// If the tenant is in Activating state, notify this to encourage it
307 : /// to proceed to Active as soon as possible, rather than waiting for lazy
308 : /// background warmup.
309 : pub(crate) activate_now_sem: tokio::sync::Semaphore,
310 :
311 : pub(crate) delete_progress: Arc<tokio::sync::Mutex<DeleteTenantFlow>>,
312 :
313 : // Cancellation token fires when we have entered shutdown(). This is a parent of
314 : // Timelines' cancellation token.
315 : pub(crate) cancel: CancellationToken,
316 :
317 : // Users of the Tenant such as the page service must take this Gate to avoid
318 : // trying to use a Tenant which is shutting down.
319 : pub(crate) gate: Gate,
320 :
321 : /// Throttle applied at the top of [`Timeline::get`].
322 : /// All [`Tenant::timelines`] of a given [`Tenant`] instance share the same [`throttle::Throttle`] instance.
323 : pub(crate) timeline_get_throttle:
324 : Arc<throttle::Throttle<&'static crate::metrics::tenant_throttling::TimelineGet>>,
325 :
326 : /// An ongoing timeline detach must be checked during attempts to GC or compact a timeline.
327 : ongoing_timeline_detach: std::sync::Mutex<Option<(TimelineId, utils::completion::Barrier)>>,
328 : }
329 :
330 : impl std::fmt::Debug for Tenant {
331 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
332 0 : write!(f, "{} ({})", self.tenant_shard_id, self.current_state())
333 0 : }
334 : }
335 :
336 : pub(crate) enum WalRedoManager {
337 : Prod(PostgresRedoManager),
338 : #[cfg(test)]
339 : Test(harness::TestRedoManager),
340 : }
341 :
342 : impl From<PostgresRedoManager> for WalRedoManager {
343 0 : fn from(mgr: PostgresRedoManager) -> Self {
344 0 : Self::Prod(mgr)
345 0 : }
346 : }
347 :
348 : #[cfg(test)]
349 : impl From<harness::TestRedoManager> for WalRedoManager {
350 118 : fn from(mgr: harness::TestRedoManager) -> Self {
351 118 : Self::Test(mgr)
352 118 : }
353 : }
354 :
355 : impl WalRedoManager {
356 0 : pub(crate) fn maybe_quiesce(&self, idle_timeout: Duration) {
357 0 : match self {
358 0 : Self::Prod(mgr) => mgr.maybe_quiesce(idle_timeout),
359 0 : #[cfg(test)]
360 0 : Self::Test(_) => {
361 0 : // Not applicable to test redo manager
362 0 : }
363 0 : }
364 0 : }
365 :
366 : /// # Cancel-Safety
367 : ///
368 : /// This method is cancellation-safe.
369 6 : pub async fn request_redo(
370 6 : &self,
371 6 : key: crate::repository::Key,
372 6 : lsn: Lsn,
373 6 : base_img: Option<(Lsn, bytes::Bytes)>,
374 6 : records: Vec<(Lsn, crate::walrecord::NeonWalRecord)>,
375 6 : pg_version: u32,
376 6 : ) -> anyhow::Result<bytes::Bytes> {
377 6 : match self {
378 0 : Self::Prod(mgr) => {
379 0 : mgr.request_redo(key, lsn, base_img, records, pg_version)
380 0 : .await
381 : }
382 : #[cfg(test)]
383 6 : Self::Test(mgr) => {
384 6 : mgr.request_redo(key, lsn, base_img, records, pg_version)
385 0 : .await
386 : }
387 : }
388 6 : }
389 :
390 0 : pub(crate) fn status(&self) -> Option<WalRedoManagerStatus> {
391 0 : match self {
392 0 : WalRedoManager::Prod(m) => Some(m.status()),
393 0 : #[cfg(test)]
394 0 : WalRedoManager::Test(_) => None,
395 0 : }
396 0 : }
397 : }
398 :
399 0 : #[derive(Debug, thiserror::Error, PartialEq, Eq)]
400 : pub enum GetTimelineError {
401 : #[error("Timeline {tenant_id}/{timeline_id} is not active, state: {state:?}")]
402 : NotActive {
403 : tenant_id: TenantShardId,
404 : timeline_id: TimelineId,
405 : state: TimelineState,
406 : },
407 : #[error("Timeline {tenant_id}/{timeline_id} was not found")]
408 : NotFound {
409 : tenant_id: TenantShardId,
410 : timeline_id: TimelineId,
411 : },
412 : }
413 :
414 0 : #[derive(Debug, thiserror::Error)]
415 : pub enum LoadLocalTimelineError {
416 : #[error("FailedToLoad")]
417 : Load(#[source] anyhow::Error),
418 : #[error("FailedToResumeDeletion")]
419 : ResumeDeletion(#[source] anyhow::Error),
420 : }
421 :
422 0 : #[derive(thiserror::Error)]
423 : pub enum DeleteTimelineError {
424 : #[error("NotFound")]
425 : NotFound,
426 :
427 : #[error("HasChildren")]
428 : HasChildren(Vec<TimelineId>),
429 :
430 : #[error("Timeline deletion is already in progress")]
431 : AlreadyInProgress(Arc<tokio::sync::Mutex<DeleteTimelineFlow>>),
432 :
433 : #[error(transparent)]
434 : Other(#[from] anyhow::Error),
435 : }
436 :
437 : impl Debug for DeleteTimelineError {
438 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
439 0 : match self {
440 0 : Self::NotFound => write!(f, "NotFound"),
441 0 : Self::HasChildren(c) => f.debug_tuple("HasChildren").field(c).finish(),
442 0 : Self::AlreadyInProgress(_) => f.debug_tuple("AlreadyInProgress").finish(),
443 0 : Self::Other(e) => f.debug_tuple("Other").field(e).finish(),
444 : }
445 0 : }
446 : }
447 :
448 : pub enum SetStoppingError {
449 : AlreadyStopping(completion::Barrier),
450 : Broken,
451 : }
452 :
453 : impl Debug for SetStoppingError {
454 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
455 0 : match self {
456 0 : Self::AlreadyStopping(_) => f.debug_tuple("AlreadyStopping").finish(),
457 0 : Self::Broken => write!(f, "Broken"),
458 : }
459 0 : }
460 : }
461 :
462 0 : #[derive(thiserror::Error, Debug)]
463 : pub enum CreateTimelineError {
464 : #[error("creation of timeline with the given ID is in progress")]
465 : AlreadyCreating,
466 : #[error("timeline already exists with different parameters")]
467 : Conflict,
468 : #[error(transparent)]
469 : AncestorLsn(anyhow::Error),
470 : #[error("ancestor timeline is not active")]
471 : AncestorNotActive,
472 : #[error("tenant shutting down")]
473 : ShuttingDown,
474 : #[error(transparent)]
475 : Other(#[from] anyhow::Error),
476 : }
477 :
478 : #[derive(thiserror::Error, Debug)]
479 : enum InitdbError {
480 : Other(anyhow::Error),
481 : Cancelled,
482 : Spawn(std::io::Result<()>),
483 : Failed(std::process::ExitStatus, Vec<u8>),
484 : }
485 :
486 : impl fmt::Display for InitdbError {
487 0 : fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
488 0 : match self {
489 0 : InitdbError::Cancelled => write!(f, "Operation was cancelled"),
490 0 : InitdbError::Spawn(e) => write!(f, "Spawn error: {:?}", e),
491 0 : InitdbError::Failed(status, stderr) => write!(
492 0 : f,
493 0 : "Command failed with status {:?}: {}",
494 0 : status,
495 0 : String::from_utf8_lossy(stderr)
496 0 : ),
497 0 : InitdbError::Other(e) => write!(f, "Error: {:?}", e),
498 : }
499 0 : }
500 : }
501 :
502 : impl From<std::io::Error> for InitdbError {
503 0 : fn from(error: std::io::Error) -> Self {
504 0 : InitdbError::Spawn(Err(error))
505 0 : }
506 : }
507 :
508 : enum CreateTimelineCause {
509 : Load,
510 : Delete,
511 : }
512 :
513 : impl Tenant {
514 : /// Yet another helper for timeline initialization.
515 : ///
516 : /// - Initializes the Timeline struct and inserts it into the tenant's hash map
517 : /// - Scans the local timeline directory for layer files and builds the layer map
518 : /// - Downloads remote index file and adds remote files to the layer map
519 : /// - Schedules remote upload tasks for any files that are present locally but missing from remote storage.
520 : ///
521 : /// If the operation fails, the timeline is left in the tenant's hash map in Broken state. On success,
522 : /// it is marked as Active.
523 : #[allow(clippy::too_many_arguments)]
524 6 : async fn timeline_init_and_sync(
525 6 : &self,
526 6 : timeline_id: TimelineId,
527 6 : resources: TimelineResources,
528 6 : index_part: Option<IndexPart>,
529 6 : metadata: TimelineMetadata,
530 6 : ancestor: Option<Arc<Timeline>>,
531 6 : _ctx: &RequestContext,
532 6 : ) -> anyhow::Result<()> {
533 6 : let tenant_id = self.tenant_shard_id;
534 :
535 6 : let timeline = self.create_timeline_struct(
536 6 : timeline_id,
537 6 : &metadata,
538 6 : ancestor.clone(),
539 6 : resources,
540 6 : CreateTimelineCause::Load,
541 6 : )?;
542 6 : let disk_consistent_lsn = timeline.get_disk_consistent_lsn();
543 6 : anyhow::ensure!(
544 6 : disk_consistent_lsn.is_valid(),
545 0 : "Timeline {tenant_id}/{timeline_id} has invalid disk_consistent_lsn"
546 : );
547 6 : assert_eq!(
548 6 : disk_consistent_lsn,
549 6 : metadata.disk_consistent_lsn(),
550 0 : "these are used interchangeably"
551 : );
552 :
553 6 : if let Some(index_part) = index_part.as_ref() {
554 6 : timeline
555 6 : .remote_client
556 6 : .as_ref()
557 6 : .unwrap()
558 6 : .init_upload_queue(index_part)?;
559 0 : } else if self.remote_storage.is_some() {
560 : // No data on the remote storage, but we have local metadata file. We can end up
561 : // here with timeline_create being interrupted before finishing index part upload.
562 : // By doing what we do here, the index part upload is retried.
563 : // If control plane retries timeline creation in the meantime, the mgmt API handler
564 : // for timeline creation will coalesce on the upload we queue here.
565 : // FIXME: this branch should be dead code as we no longer write local metadata.
566 0 : let rtc = timeline.remote_client.as_ref().unwrap();
567 0 : rtc.init_upload_queue_for_empty_remote(&metadata)?;
568 0 : rtc.schedule_index_upload_for_full_metadata_update(&metadata)?;
569 0 : }
570 :
571 6 : timeline
572 6 : .load_layer_map(disk_consistent_lsn, index_part)
573 6 : .await
574 6 : .with_context(|| {
575 0 : format!("Failed to load layermap for timeline {tenant_id}/{timeline_id}")
576 6 : })?;
577 :
578 : {
579 : // avoiding holding it across awaits
580 6 : let mut timelines_accessor = self.timelines.lock().unwrap();
581 6 : match timelines_accessor.entry(timeline_id) {
582 : // We should never try and load the same timeline twice during startup
583 : Entry::Occupied(_) => {
584 0 : unreachable!(
585 0 : "Timeline {tenant_id}/{timeline_id} already exists in the tenant map"
586 0 : );
587 : }
588 6 : Entry::Vacant(v) => {
589 6 : v.insert(Arc::clone(&timeline));
590 6 : timeline.maybe_spawn_flush_loop();
591 6 : }
592 6 : }
593 6 : };
594 6 :
595 6 : // Sanity check: a timeline should have some content.
596 6 : anyhow::ensure!(
597 6 : ancestor.is_some()
598 4 : || timeline
599 4 : .layers
600 4 : .read()
601 0 : .await
602 4 : .layer_map()
603 4 : .iter_historic_layers()
604 4 : .next()
605 4 : .is_some(),
606 0 : "Timeline has no ancestor and no layer files"
607 : );
608 :
609 6 : Ok(())
610 6 : }
611 :
612 : /// Attach a tenant that's available in cloud storage.
613 : ///
614 : /// This returns quickly, after just creating the in-memory object
615 : /// Tenant struct and launching a background task to download
616 : /// the remote index files. On return, the tenant is most likely still in
617 : /// Attaching state, and it will become Active once the background task
618 : /// finishes. You can use wait_until_active() to wait for the task to
619 : /// complete.
620 : ///
621 : #[allow(clippy::too_many_arguments)]
622 0 : pub(crate) fn spawn(
623 0 : conf: &'static PageServerConf,
624 0 : tenant_shard_id: TenantShardId,
625 0 : resources: TenantSharedResources,
626 0 : attached_conf: AttachedTenantConf,
627 0 : shard_identity: ShardIdentity,
628 0 : init_order: Option<InitializationOrder>,
629 0 : tenants: &'static std::sync::RwLock<TenantsMap>,
630 0 : mode: SpawnMode,
631 0 : ctx: &RequestContext,
632 0 : ) -> anyhow::Result<Arc<Tenant>> {
633 0 : let wal_redo_manager = Arc::new(WalRedoManager::from(PostgresRedoManager::new(
634 0 : conf,
635 0 : tenant_shard_id,
636 0 : )));
637 0 :
638 0 : let TenantSharedResources {
639 0 : broker_client,
640 0 : remote_storage,
641 0 : deletion_queue_client,
642 0 : } = resources;
643 0 :
644 0 : let attach_mode = attached_conf.location.attach_mode;
645 0 : let generation = attached_conf.location.generation;
646 0 :
647 0 : let tenant = Arc::new(Tenant::new(
648 0 : TenantState::Attaching,
649 0 : conf,
650 0 : attached_conf,
651 0 : shard_identity,
652 0 : Some(wal_redo_manager),
653 0 : tenant_shard_id,
654 0 : remote_storage.clone(),
655 0 : deletion_queue_client,
656 0 : ));
657 0 :
658 0 : // The attach task will carry a GateGuard, so that shutdown() reliably waits for it to drop out if
659 0 : // we shut down while attaching.
660 0 : let attach_gate_guard = tenant
661 0 : .gate
662 0 : .enter()
663 0 : .expect("We just created the Tenant: nothing else can have shut it down yet");
664 0 :
665 0 : // Do all the hard work in the background
666 0 : let tenant_clone = Arc::clone(&tenant);
667 0 : let ctx = ctx.detached_child(TaskKind::Attach, DownloadBehavior::Warn);
668 0 : task_mgr::spawn(
669 0 : &tokio::runtime::Handle::current(),
670 0 : TaskKind::Attach,
671 0 : Some(tenant_shard_id),
672 0 : None,
673 0 : "attach tenant",
674 : false,
675 0 : async move {
676 0 :
677 0 : info!(
678 : ?attach_mode,
679 0 : "Attaching tenant"
680 : );
681 :
682 0 : let _gate_guard = attach_gate_guard;
683 0 :
684 0 : // Is this tenant being spawned as part of process startup?
685 0 : let starting_up = init_order.is_some();
686 : scopeguard::defer! {
687 : if starting_up {
688 : TENANT.startup_complete.inc();
689 : }
690 : }
691 :
692 : // Ideally we should use Tenant::set_broken_no_wait, but it is not supposed to be used when tenant is in loading state.
693 : enum BrokenVerbosity {
694 : Error,
695 : Info
696 : }
697 0 : let make_broken =
698 0 : |t: &Tenant, err: anyhow::Error, verbosity: BrokenVerbosity| {
699 0 : match verbosity {
700 : BrokenVerbosity::Info => {
701 0 : info!("attach cancelled, setting tenant state to Broken: {err}");
702 : },
703 : BrokenVerbosity::Error => {
704 0 : error!("attach failed, setting tenant state to Broken: {err:?}");
705 : }
706 : }
707 0 : t.state.send_modify(|state| {
708 0 : // The Stopping case is for when we have passed control on to DeleteTenantFlow:
709 0 : // if it errors, we will call make_broken when tenant is already in Stopping.
710 0 : assert!(
711 0 : matches!(*state, TenantState::Attaching | TenantState::Stopping { .. }),
712 0 : "the attach task owns the tenant state until activation is complete"
713 : );
714 :
715 0 : *state = TenantState::broken_from_reason(err.to_string());
716 0 : });
717 0 : };
718 :
719 0 : let mut init_order = init_order;
720 0 : // take the completion because initial tenant loading will complete when all of
721 0 : // these tasks complete.
722 0 : let _completion = init_order
723 0 : .as_mut()
724 0 : .and_then(|x| x.initial_tenant_load.take());
725 0 : let remote_load_completion = init_order
726 0 : .as_mut()
727 0 : .and_then(|x| x.initial_tenant_load_remote.take());
728 :
729 : enum AttachType<'a> {
730 : /// We are attaching this tenant lazily in the background.
731 : Warmup {
732 : _permit: tokio::sync::SemaphorePermit<'a>,
733 : during_startup: bool
734 : },
735 : /// We are attaching this tenant as soon as we can, because for example an
736 : /// endpoint tried to access it.
737 : OnDemand,
738 : /// During normal operations after startup, we are attaching a tenant, and
739 : /// eager attach was requested.
740 : Normal,
741 : }
742 :
743 0 : let attach_type = if matches!(mode, SpawnMode::Lazy) {
744 : // Before doing any I/O, wait for at least one of:
745 : // - A client attempting to access to this tenant (on-demand loading)
746 : // - A permit becoming available in the warmup semaphore (background warmup)
747 :
748 : tokio::select!(
749 : permit = tenant_clone.activate_now_sem.acquire() => {
750 : let _ = permit.expect("activate_now_sem is never closed");
751 : tracing::info!("Activating tenant (on-demand)");
752 : AttachType::OnDemand
753 : },
754 : permit = conf.concurrent_tenant_warmup.inner().acquire() => {
755 : let _permit = permit.expect("concurrent_tenant_warmup semaphore is never closed");
756 : tracing::info!("Activating tenant (warmup)");
757 : AttachType::Warmup {
758 : _permit,
759 : during_startup: init_order.is_some()
760 : }
761 : }
762 : _ = tenant_clone.cancel.cancelled() => {
763 : // This is safe, but should be pretty rare: it is interesting if a tenant
764 : // stayed in Activating for such a long time that shutdown found it in
765 : // that state.
766 : tracing::info!(state=%tenant_clone.current_state(), "Tenant shut down before activation");
767 : // Make the tenant broken so that set_stopping will not hang waiting for it to leave
768 : // the Attaching state. This is an over-reaction (nothing really broke, the tenant is
769 : // just shutting down), but ensures progress.
770 : make_broken(&tenant_clone, anyhow::anyhow!("Shut down while Attaching"), BrokenVerbosity::Info);
771 : return Ok(());
772 : },
773 : )
774 : } else {
775 : // SpawnMode::{Create,Eager} always cause jumping ahead of the
776 : // concurrent_tenant_warmup queue
777 0 : AttachType::Normal
778 : };
779 :
780 0 : let preload = match (&mode, &remote_storage) {
781 : (SpawnMode::Create, _) => {
782 0 : None
783 : },
784 0 : (SpawnMode::Eager | SpawnMode::Lazy, Some(remote_storage)) => {
785 0 : let _preload_timer = TENANT.preload.start_timer();
786 0 : let res = tenant_clone
787 0 : .preload(remote_storage, task_mgr::shutdown_token())
788 0 : .await;
789 0 : match res {
790 0 : Ok(p) => Some(p),
791 0 : Err(e) => {
792 0 : make_broken(&tenant_clone, anyhow::anyhow!(e), BrokenVerbosity::Error);
793 0 : return Ok(());
794 : }
795 : }
796 : }
797 : (_, None) => {
798 0 : let _preload_timer = TENANT.preload.start_timer();
799 0 : None
800 : }
801 : };
802 :
803 : // Remote preload is complete.
804 0 : drop(remote_load_completion);
805 :
806 0 : let pending_deletion = {
807 0 : match DeleteTenantFlow::should_resume_deletion(
808 0 : conf,
809 0 : preload.as_ref().map(|p| p.deleting).unwrap_or(false),
810 0 : &tenant_clone,
811 0 : )
812 0 : .await
813 : {
814 0 : Ok(should_resume_deletion) => should_resume_deletion,
815 0 : Err(err) => {
816 0 : make_broken(&tenant_clone, anyhow::anyhow!(err), BrokenVerbosity::Error);
817 0 : return Ok(());
818 : }
819 : }
820 : };
821 :
822 0 : info!("pending_deletion {}", pending_deletion.is_some());
823 :
824 0 : if let Some(deletion) = pending_deletion {
825 : // as we are no longer loading, signal completion by dropping
826 : // the completion while we resume deletion
827 0 : drop(_completion);
828 0 : let background_jobs_can_start =
829 0 : init_order.as_ref().map(|x| &x.background_jobs_can_start);
830 0 : if let Some(background) = background_jobs_can_start {
831 0 : info!("waiting for backgound jobs barrier");
832 0 : background.clone().wait().await;
833 0 : info!("ready for backgound jobs barrier");
834 0 : }
835 :
836 0 : let deleted = DeleteTenantFlow::resume_from_attach(
837 0 : deletion,
838 0 : &tenant_clone,
839 0 : preload,
840 0 : tenants,
841 0 : &ctx,
842 0 : )
843 0 : .await;
844 :
845 0 : if let Err(e) = deleted {
846 0 : make_broken(&tenant_clone, anyhow::anyhow!(e), BrokenVerbosity::Error);
847 0 : }
848 :
849 0 : return Ok(());
850 0 : }
851 :
852 : // We will time the duration of the attach phase unless this is a creation (attach will do no work)
853 0 : let attached = {
854 0 : let _attach_timer = match mode {
855 0 : SpawnMode::Create => None,
856 0 : SpawnMode::Eager | SpawnMode::Lazy => Some(TENANT.attach.start_timer()),
857 : };
858 0 : tenant_clone.attach(preload, mode, &ctx).await
859 : };
860 :
861 0 : match attached {
862 : Ok(()) => {
863 0 : info!("attach finished, activating");
864 0 : tenant_clone.activate(broker_client, None, &ctx);
865 : }
866 0 : Err(e) => {
867 0 : make_broken(&tenant_clone, anyhow::anyhow!(e), BrokenVerbosity::Error);
868 0 : }
869 : }
870 :
871 : // If we are doing an opportunistic warmup attachment at startup, initialize
872 : // logical size at the same time. This is better than starting a bunch of idle tenants
873 : // with cold caches and then coming back later to initialize their logical sizes.
874 : //
875 : // It also prevents the warmup proccess competing with the concurrency limit on
876 : // logical size calculations: if logical size calculation semaphore is saturated,
877 : // then warmup will wait for that before proceeding to the next tenant.
878 0 : if matches!(attach_type, AttachType::Warmup { during_startup: true, .. }) {
879 0 : let mut futs: FuturesUnordered<_> = tenant_clone.timelines.lock().unwrap().values().cloned().map(|t| t.await_initial_logical_size()).collect();
880 0 : tracing::info!("Waiting for initial logical sizes while warming up...");
881 0 : while futs.next().await.is_some() {}
882 0 : tracing::info!("Warm-up complete");
883 0 : }
884 :
885 0 : Ok(())
886 0 : }
887 0 : .instrument(tracing::info_span!(parent: None, "attach", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), gen=?generation)),
888 : );
889 0 : Ok(tenant)
890 0 : }
891 :
892 236 : #[instrument(skip_all)]
893 : pub(crate) async fn preload(
894 : self: &Arc<Self>,
895 : remote_storage: &GenericRemoteStorage,
896 : cancel: CancellationToken,
897 : ) -> anyhow::Result<TenantPreload> {
898 : span::debug_assert_current_span_has_tenant_id();
899 : // Get list of remote timelines
900 : // download index files for every tenant timeline
901 : info!("listing remote timelines");
902 : let (remote_timeline_ids, other_keys) = remote_timeline_client::list_remote_timelines(
903 : remote_storage,
904 : self.tenant_shard_id,
905 : cancel.clone(),
906 : )
907 : .await?;
908 :
909 : let deleting = other_keys.contains(TENANT_DELETED_MARKER_FILE_NAME);
910 : info!(
911 : "found {} timelines, deleting={}",
912 : remote_timeline_ids.len(),
913 : deleting
914 : );
915 :
916 : for k in other_keys {
917 : if k != TENANT_DELETED_MARKER_FILE_NAME {
918 : warn!("Unexpected non timeline key {k}");
919 : }
920 : }
921 :
922 : Ok(TenantPreload {
923 : deleting,
924 : timelines: Self::load_timeline_metadata(
925 : self,
926 : remote_timeline_ids,
927 : remote_storage,
928 : cancel,
929 : )
930 : .await?,
931 : })
932 : }
933 :
934 : ///
935 : /// Background task that downloads all data for a tenant and brings it to Active state.
936 : ///
937 : /// No background tasks are started as part of this routine.
938 : ///
939 118 : async fn attach(
940 118 : self: &Arc<Tenant>,
941 118 : preload: Option<TenantPreload>,
942 118 : mode: SpawnMode,
943 118 : ctx: &RequestContext,
944 118 : ) -> anyhow::Result<()> {
945 118 : span::debug_assert_current_span_has_tenant_id();
946 118 :
947 118 : failpoint_support::sleep_millis_async!("before-attaching-tenant");
948 :
949 118 : let preload = match (preload, mode) {
950 118 : (Some(p), _) => p,
951 0 : (None, SpawnMode::Create) => TenantPreload {
952 0 : deleting: false,
953 0 : timelines: HashMap::new(),
954 0 : },
955 : (None, _) => {
956 0 : anyhow::bail!("local-only deployment is no longer supported, https://github.com/neondatabase/neon/issues/5624");
957 : }
958 : };
959 :
960 118 : let mut timelines_to_resume_deletions = vec![];
961 118 :
962 118 : let mut remote_index_and_client = HashMap::new();
963 118 : let mut timeline_ancestors = HashMap::new();
964 118 : let mut existent_timelines = HashSet::new();
965 124 : for (timeline_id, preload) in preload.timelines {
966 6 : let index_part = match preload.index_part {
967 6 : Ok(i) => {
968 6 : debug!("remote index part exists for timeline {timeline_id}");
969 : // We found index_part on the remote, this is the standard case.
970 6 : existent_timelines.insert(timeline_id);
971 6 : i
972 : }
973 : Err(DownloadError::NotFound) => {
974 : // There is no index_part on the remote. We only get here
975 : // if there is some prefix for the timeline in the remote storage.
976 : // This can e.g. be the initdb.tar.zst archive, maybe a
977 : // remnant from a prior incomplete creation or deletion attempt.
978 : // Delete the local directory as the deciding criterion for a
979 : // timeline's existence is presence of index_part.
980 0 : info!(%timeline_id, "index_part not found on remote");
981 0 : continue;
982 : }
983 0 : Err(e) => {
984 0 : // Some (possibly ephemeral) error happened during index_part download.
985 0 : // Pretend the timeline exists to not delete the timeline directory,
986 0 : // as it might be a temporary issue and we don't want to re-download
987 0 : // everything after it resolves.
988 0 : warn!(%timeline_id, "Failed to load index_part from remote storage, failed creation? ({e})");
989 :
990 0 : existent_timelines.insert(timeline_id);
991 0 : continue;
992 : }
993 : };
994 6 : match index_part {
995 6 : MaybeDeletedIndexPart::IndexPart(index_part) => {
996 6 : timeline_ancestors.insert(timeline_id, index_part.metadata.clone());
997 6 : remote_index_and_client.insert(timeline_id, (index_part, preload.client));
998 6 : }
999 0 : MaybeDeletedIndexPart::Deleted(index_part) => {
1000 0 : info!(
1001 0 : "timeline {} is deleted, picking to resume deletion",
1002 : timeline_id
1003 : );
1004 0 : timelines_to_resume_deletions.push((timeline_id, index_part, preload.client));
1005 : }
1006 : }
1007 : }
1008 :
1009 : // For every timeline, download the metadata file, scan the local directory,
1010 : // and build a layer map that contains an entry for each remote and local
1011 : // layer file.
1012 118 : let sorted_timelines = tree_sort_timelines(timeline_ancestors, |m| m.ancestor_timeline())?;
1013 124 : for (timeline_id, remote_metadata) in sorted_timelines {
1014 6 : let (index_part, remote_client) = remote_index_and_client
1015 6 : .remove(&timeline_id)
1016 6 : .expect("just put it in above");
1017 6 :
1018 6 : // TODO again handle early failure
1019 6 : self.load_remote_timeline(
1020 6 : timeline_id,
1021 6 : index_part,
1022 6 : remote_metadata,
1023 6 : TimelineResources {
1024 6 : remote_client: Some(remote_client),
1025 6 : deletion_queue_client: self.deletion_queue_client.clone(),
1026 6 : timeline_get_throttle: self.timeline_get_throttle.clone(),
1027 6 : },
1028 6 : ctx,
1029 6 : )
1030 12 : .await
1031 6 : .with_context(|| {
1032 0 : format!(
1033 0 : "failed to load remote timeline {} for tenant {}",
1034 0 : timeline_id, self.tenant_shard_id
1035 0 : )
1036 6 : })?;
1037 : }
1038 :
1039 : // Walk through deleted timelines, resume deletion
1040 118 : for (timeline_id, index_part, remote_timeline_client) in timelines_to_resume_deletions {
1041 0 : remote_timeline_client
1042 0 : .init_upload_queue_stopped_to_continue_deletion(&index_part)
1043 0 : .context("init queue stopped")
1044 0 : .map_err(LoadLocalTimelineError::ResumeDeletion)?;
1045 :
1046 0 : DeleteTimelineFlow::resume_deletion(
1047 0 : Arc::clone(self),
1048 0 : timeline_id,
1049 0 : &index_part.metadata,
1050 0 : Some(remote_timeline_client),
1051 0 : self.deletion_queue_client.clone(),
1052 0 : )
1053 0 : .instrument(tracing::info_span!("timeline_delete", %timeline_id))
1054 0 : .await
1055 0 : .context("resume_deletion")
1056 0 : .map_err(LoadLocalTimelineError::ResumeDeletion)?;
1057 : }
1058 :
1059 : // The local filesystem contents are a cache of what's in the remote IndexPart;
1060 : // IndexPart is the source of truth.
1061 118 : self.clean_up_timelines(&existent_timelines)?;
1062 :
1063 118 : fail::fail_point!("attach-before-activate", |_| {
1064 0 : anyhow::bail!("attach-before-activate");
1065 118 : });
1066 118 : failpoint_support::sleep_millis_async!("attach-before-activate-sleep", &self.cancel);
1067 :
1068 118 : info!("Done");
1069 :
1070 118 : Ok(())
1071 118 : }
1072 :
1073 : /// Check for any local timeline directories that are temporary, or do not correspond to a
1074 : /// timeline that still exists: this can happen if we crashed during a deletion/creation, or
1075 : /// if a timeline was deleted while the tenant was attached to a different pageserver.
1076 118 : fn clean_up_timelines(&self, existent_timelines: &HashSet<TimelineId>) -> anyhow::Result<()> {
1077 118 : let timelines_dir = self.conf.timelines_path(&self.tenant_shard_id);
1078 :
1079 118 : let entries = match timelines_dir.read_dir_utf8() {
1080 118 : Ok(d) => d,
1081 0 : Err(e) => {
1082 0 : if e.kind() == std::io::ErrorKind::NotFound {
1083 0 : return Ok(());
1084 : } else {
1085 0 : return Err(e).context("list timelines directory for tenant");
1086 : }
1087 : }
1088 : };
1089 :
1090 126 : for entry in entries {
1091 8 : let entry = entry.context("read timeline dir entry")?;
1092 8 : let entry_path = entry.path();
1093 :
1094 8 : let purge = if crate::is_temporary(entry_path)
1095 : // TODO: remove uninit mark code (https://github.com/neondatabase/neon/issues/5718)
1096 8 : || is_uninit_mark(entry_path)
1097 8 : || crate::is_delete_mark(entry_path)
1098 : {
1099 0 : true
1100 : } else {
1101 8 : match TimelineId::try_from(entry_path.file_name()) {
1102 8 : Ok(i) => {
1103 8 : // Purge if the timeline ID does not exist in remote storage: remote storage is the authority.
1104 8 : !existent_timelines.contains(&i)
1105 : }
1106 0 : Err(e) => {
1107 0 : tracing::warn!(
1108 0 : "Unparseable directory in timelines directory: {entry_path}, ignoring ({e})"
1109 : );
1110 : // Do not purge junk: if we don't recognize it, be cautious and leave it for a human.
1111 0 : false
1112 : }
1113 : }
1114 : };
1115 :
1116 8 : if purge {
1117 2 : tracing::info!("Purging stale timeline dentry {entry_path}");
1118 2 : if let Err(e) = match entry.file_type() {
1119 2 : Ok(t) => if t.is_dir() {
1120 2 : std::fs::remove_dir_all(entry_path)
1121 : } else {
1122 0 : std::fs::remove_file(entry_path)
1123 : }
1124 2 : .or_else(fs_ext::ignore_not_found),
1125 0 : Err(e) => Err(e),
1126 : } {
1127 0 : tracing::warn!("Failed to purge stale timeline dentry {entry_path}: {e}");
1128 2 : }
1129 6 : }
1130 : }
1131 :
1132 118 : Ok(())
1133 118 : }
1134 :
1135 : /// Get sum of all remote timelines sizes
1136 : ///
1137 : /// This function relies on the index_part instead of listing the remote storage
1138 0 : pub fn remote_size(&self) -> u64 {
1139 0 : let mut size = 0;
1140 :
1141 0 : for timeline in self.list_timelines() {
1142 0 : if let Some(remote_client) = &timeline.remote_client {
1143 0 : size += remote_client.get_remote_physical_size();
1144 0 : }
1145 : }
1146 :
1147 0 : size
1148 0 : }
1149 :
1150 12 : #[instrument(skip_all, fields(timeline_id=%timeline_id))]
1151 : async fn load_remote_timeline(
1152 : &self,
1153 : timeline_id: TimelineId,
1154 : index_part: IndexPart,
1155 : remote_metadata: TimelineMetadata,
1156 : resources: TimelineResources,
1157 : ctx: &RequestContext,
1158 : ) -> anyhow::Result<()> {
1159 : span::debug_assert_current_span_has_tenant_id();
1160 :
1161 : info!("downloading index file for timeline {}", timeline_id);
1162 : tokio::fs::create_dir_all(self.conf.timeline_path(&self.tenant_shard_id, &timeline_id))
1163 : .await
1164 : .context("Failed to create new timeline directory")?;
1165 :
1166 : let ancestor = if let Some(ancestor_id) = remote_metadata.ancestor_timeline() {
1167 : let timelines = self.timelines.lock().unwrap();
1168 : Some(Arc::clone(timelines.get(&ancestor_id).ok_or_else(
1169 0 : || {
1170 0 : anyhow::anyhow!(
1171 0 : "cannot find ancestor timeline {ancestor_id} for timeline {timeline_id}"
1172 0 : )
1173 0 : },
1174 : )?))
1175 : } else {
1176 : None
1177 : };
1178 :
1179 : self.timeline_init_and_sync(
1180 : timeline_id,
1181 : resources,
1182 : Some(index_part),
1183 : remote_metadata,
1184 : ancestor,
1185 : ctx,
1186 : )
1187 : .await
1188 : }
1189 :
1190 : /// Create a placeholder Tenant object for a broken tenant
1191 0 : pub fn create_broken_tenant(
1192 0 : conf: &'static PageServerConf,
1193 0 : tenant_shard_id: TenantShardId,
1194 0 : reason: String,
1195 0 : ) -> Arc<Tenant> {
1196 0 : Arc::new(Tenant::new(
1197 0 : TenantState::Broken {
1198 0 : reason,
1199 0 : backtrace: String::new(),
1200 0 : },
1201 0 : conf,
1202 0 : AttachedTenantConf::try_from(LocationConf::default()).unwrap(),
1203 0 : // Shard identity isn't meaningful for a broken tenant: it's just a placeholder
1204 0 : // to occupy the slot for this TenantShardId.
1205 0 : ShardIdentity::broken(tenant_shard_id.shard_number, tenant_shard_id.shard_count),
1206 0 : None,
1207 0 : tenant_shard_id,
1208 0 : None,
1209 0 : DeletionQueueClient::broken(),
1210 0 : ))
1211 0 : }
1212 :
1213 118 : async fn load_timeline_metadata(
1214 118 : self: &Arc<Tenant>,
1215 118 : timeline_ids: HashSet<TimelineId>,
1216 118 : remote_storage: &GenericRemoteStorage,
1217 118 : cancel: CancellationToken,
1218 118 : ) -> anyhow::Result<HashMap<TimelineId, TimelinePreload>> {
1219 118 : let mut part_downloads = JoinSet::new();
1220 124 : for timeline_id in timeline_ids {
1221 6 : let client = RemoteTimelineClient::new(
1222 6 : remote_storage.clone(),
1223 6 : self.deletion_queue_client.clone(),
1224 6 : self.conf,
1225 6 : self.tenant_shard_id,
1226 6 : timeline_id,
1227 6 : self.generation,
1228 6 : );
1229 6 : let cancel_clone = cancel.clone();
1230 6 : part_downloads.spawn(
1231 6 : async move {
1232 6 : debug!("starting index part download");
1233 :
1234 23 : let index_part = client.download_index_file(&cancel_clone).await;
1235 :
1236 6 : debug!("finished index part download");
1237 :
1238 6 : Result::<_, anyhow::Error>::Ok(TimelinePreload {
1239 6 : client,
1240 6 : timeline_id,
1241 6 : index_part,
1242 6 : })
1243 6 : }
1244 6 : .map(move |res| {
1245 6 : res.with_context(|| format!("download index part for timeline {timeline_id}"))
1246 6 : })
1247 6 : .instrument(info_span!("download_index_part", %timeline_id)),
1248 : );
1249 : }
1250 :
1251 118 : let mut timeline_preloads: HashMap<TimelineId, TimelinePreload> = HashMap::new();
1252 :
1253 124 : loop {
1254 124 : tokio::select!(
1255 : next = part_downloads.join_next() => {
1256 : match next {
1257 : Some(result) => {
1258 : let preload_result = result.context("join preload task")?;
1259 : let preload = preload_result?;
1260 : timeline_preloads.insert(preload.timeline_id, preload);
1261 : },
1262 : None => {
1263 : break;
1264 : }
1265 : }
1266 : },
1267 : _ = cancel.cancelled() => {
1268 : anyhow::bail!("Cancelled while waiting for remote index download")
1269 : }
1270 124 : )
1271 124 : }
1272 :
1273 118 : Ok(timeline_preloads)
1274 118 : }
1275 :
1276 4 : pub(crate) fn tenant_shard_id(&self) -> TenantShardId {
1277 4 : self.tenant_shard_id
1278 4 : }
1279 :
1280 : /// Get Timeline handle for given Neon timeline ID.
1281 : /// This function is idempotent. It doesn't change internal state in any way.
1282 3506 : pub fn get_timeline(
1283 3506 : &self,
1284 3506 : timeline_id: TimelineId,
1285 3506 : active_only: bool,
1286 3506 : ) -> Result<Arc<Timeline>, GetTimelineError> {
1287 3506 : let timelines_accessor = self.timelines.lock().unwrap();
1288 3506 : let timeline = timelines_accessor
1289 3506 : .get(&timeline_id)
1290 3506 : .ok_or(GetTimelineError::NotFound {
1291 3506 : tenant_id: self.tenant_shard_id,
1292 3506 : timeline_id,
1293 3506 : })?;
1294 :
1295 3504 : if active_only && !timeline.is_active() {
1296 0 : Err(GetTimelineError::NotActive {
1297 0 : tenant_id: self.tenant_shard_id,
1298 0 : timeline_id,
1299 0 : state: timeline.current_state(),
1300 0 : })
1301 : } else {
1302 3504 : Ok(Arc::clone(timeline))
1303 : }
1304 3506 : }
1305 :
1306 : /// Lists timelines the tenant contains.
1307 : /// Up to tenant's implementation to omit certain timelines that ar not considered ready for use.
1308 0 : pub fn list_timelines(&self) -> Vec<Arc<Timeline>> {
1309 0 : self.timelines
1310 0 : .lock()
1311 0 : .unwrap()
1312 0 : .values()
1313 0 : .map(Arc::clone)
1314 0 : .collect()
1315 0 : }
1316 :
1317 0 : pub fn list_timeline_ids(&self) -> Vec<TimelineId> {
1318 0 : self.timelines.lock().unwrap().keys().cloned().collect()
1319 0 : }
1320 :
1321 : /// This is used to create the initial 'main' timeline during bootstrapping,
1322 : /// or when importing a new base backup. The caller is expected to load an
1323 : /// initial image of the datadir to the new timeline after this.
1324 : ///
1325 : /// Until that happens, the on-disk state is invalid (disk_consistent_lsn=Lsn(0))
1326 : /// and the timeline will fail to load at a restart.
1327 : ///
1328 : /// For tests, use `DatadirModification::init_empty_test_timeline` + `commit` to setup the
1329 : /// minimum amount of keys required to get a writable timeline.
1330 : /// (Without it, `put` might fail due to `repartition` failing.)
1331 110 : pub(crate) async fn create_empty_timeline(
1332 110 : &self,
1333 110 : new_timeline_id: TimelineId,
1334 110 : initdb_lsn: Lsn,
1335 110 : pg_version: u32,
1336 110 : _ctx: &RequestContext,
1337 110 : ) -> anyhow::Result<UninitializedTimeline> {
1338 110 : anyhow::ensure!(
1339 110 : self.is_active(),
1340 0 : "Cannot create empty timelines on inactive tenant"
1341 : );
1342 :
1343 : // Protect against concurrent attempts to use this TimelineId
1344 110 : let create_guard = self.create_timeline_create_guard(new_timeline_id)?;
1345 :
1346 108 : let new_metadata = TimelineMetadata::new(
1347 108 : // Initialize disk_consistent LSN to 0, The caller must import some data to
1348 108 : // make it valid, before calling finish_creation()
1349 108 : Lsn(0),
1350 108 : None,
1351 108 : None,
1352 108 : Lsn(0),
1353 108 : initdb_lsn,
1354 108 : initdb_lsn,
1355 108 : pg_version,
1356 108 : );
1357 108 : self.prepare_new_timeline(
1358 108 : new_timeline_id,
1359 108 : &new_metadata,
1360 108 : create_guard,
1361 108 : initdb_lsn,
1362 108 : None,
1363 108 : )
1364 0 : .await
1365 110 : }
1366 :
1367 : /// Helper for unit tests to create an empty timeline.
1368 : ///
1369 : /// The timeline is has state value `Active` but its background loops are not running.
1370 : // This makes the various functions which anyhow::ensure! for Active state work in tests.
1371 : // Our current tests don't need the background loops.
1372 : #[cfg(test)]
1373 100 : pub async fn create_test_timeline(
1374 100 : &self,
1375 100 : new_timeline_id: TimelineId,
1376 100 : initdb_lsn: Lsn,
1377 100 : pg_version: u32,
1378 100 : ctx: &RequestContext,
1379 100 : ) -> anyhow::Result<Arc<Timeline>> {
1380 100 : let uninit_tl = self
1381 100 : .create_empty_timeline(new_timeline_id, initdb_lsn, pg_version, ctx)
1382 0 : .await?;
1383 100 : let tline = uninit_tl.raw_timeline().expect("we just created it");
1384 100 : assert_eq!(tline.get_last_record_lsn(), Lsn(0));
1385 :
1386 : // Setup minimum keys required for the timeline to be usable.
1387 100 : let mut modification = tline.begin_modification(initdb_lsn);
1388 100 : modification
1389 100 : .init_empty_test_timeline()
1390 100 : .context("init_empty_test_timeline")?;
1391 100 : modification
1392 100 : .commit(ctx)
1393 98 : .await
1394 100 : .context("commit init_empty_test_timeline modification")?;
1395 :
1396 : // Flush to disk so that uninit_tl's check for valid disk_consistent_lsn passes.
1397 100 : tline.maybe_spawn_flush_loop();
1398 100 : tline.freeze_and_flush().await.context("freeze_and_flush")?;
1399 :
1400 : // Make sure the freeze_and_flush reaches remote storage.
1401 100 : tline
1402 100 : .remote_client
1403 100 : .as_ref()
1404 100 : .unwrap()
1405 100 : .wait_completion()
1406 88 : .await
1407 100 : .unwrap();
1408 :
1409 100 : let tl = uninit_tl.finish_creation()?;
1410 : // The non-test code would call tl.activate() here.
1411 100 : tl.set_state(TimelineState::Active);
1412 100 : Ok(tl)
1413 100 : }
1414 :
1415 : /// Create a new timeline.
1416 : ///
1417 : /// Returns the new timeline ID and reference to its Timeline object.
1418 : ///
1419 : /// If the caller specified the timeline ID to use (`new_timeline_id`), and timeline with
1420 : /// the same timeline ID already exists, returns CreateTimelineError::AlreadyExists.
1421 : #[allow(clippy::too_many_arguments)]
1422 0 : pub(crate) async fn create_timeline(
1423 0 : self: &Arc<Tenant>,
1424 0 : new_timeline_id: TimelineId,
1425 0 : ancestor_timeline_id: Option<TimelineId>,
1426 0 : mut ancestor_start_lsn: Option<Lsn>,
1427 0 : pg_version: u32,
1428 0 : load_existing_initdb: Option<TimelineId>,
1429 0 : broker_client: storage_broker::BrokerClientChannel,
1430 0 : ctx: &RequestContext,
1431 0 : ) -> Result<Arc<Timeline>, CreateTimelineError> {
1432 0 : if !self.is_active() {
1433 0 : if matches!(self.current_state(), TenantState::Stopping { .. }) {
1434 0 : return Err(CreateTimelineError::ShuttingDown);
1435 : } else {
1436 0 : return Err(CreateTimelineError::Other(anyhow::anyhow!(
1437 0 : "Cannot create timelines on inactive tenant"
1438 0 : )));
1439 : }
1440 0 : }
1441 :
1442 0 : let _gate = self
1443 0 : .gate
1444 0 : .enter()
1445 0 : .map_err(|_| CreateTimelineError::ShuttingDown)?;
1446 :
1447 : // Get exclusive access to the timeline ID: this ensures that it does not already exist,
1448 : // and that no other creation attempts will be allowed in while we are working.
1449 0 : let create_guard = match self.create_timeline_create_guard(new_timeline_id) {
1450 0 : Ok(m) => m,
1451 : Err(TimelineExclusionError::AlreadyCreating) => {
1452 : // Creation is in progress, we cannot create it again, and we cannot
1453 : // check if this request matches the existing one, so caller must try
1454 : // again later.
1455 0 : return Err(CreateTimelineError::AlreadyCreating);
1456 : }
1457 0 : Err(TimelineExclusionError::Other(e)) => {
1458 0 : return Err(CreateTimelineError::Other(e));
1459 : }
1460 0 : Err(TimelineExclusionError::AlreadyExists(existing)) => {
1461 0 : debug!("timeline {new_timeline_id} already exists");
1462 :
1463 : // Idempotency: creating the same timeline twice is not an error, unless
1464 : // the second creation has different parameters.
1465 0 : if existing.get_ancestor_timeline_id() != ancestor_timeline_id
1466 0 : || existing.pg_version != pg_version
1467 0 : || (ancestor_start_lsn.is_some()
1468 0 : && ancestor_start_lsn != Some(existing.get_ancestor_lsn()))
1469 : {
1470 0 : return Err(CreateTimelineError::Conflict);
1471 0 : }
1472 :
1473 0 : if let Some(remote_client) = existing.remote_client.as_ref() {
1474 : // Wait for uploads to complete, so that when we return Ok, the timeline
1475 : // is known to be durable on remote storage. Just like we do at the end of
1476 : // this function, after we have created the timeline ourselves.
1477 : //
1478 : // We only really care that the initial version of `index_part.json` has
1479 : // been uploaded. That's enough to remember that the timeline
1480 : // exists. However, there is no function to wait specifically for that so
1481 : // we just wait for all in-progress uploads to finish.
1482 0 : remote_client
1483 0 : .wait_completion()
1484 0 : .await
1485 0 : .context("wait for timeline uploads to complete")?;
1486 0 : }
1487 :
1488 0 : return Ok(existing);
1489 : }
1490 : };
1491 :
1492 : pausable_failpoint!("timeline-creation-after-uninit");
1493 :
1494 0 : let loaded_timeline = match ancestor_timeline_id {
1495 0 : Some(ancestor_timeline_id) => {
1496 0 : let ancestor_timeline = self
1497 0 : .get_timeline(ancestor_timeline_id, false)
1498 0 : .context("Cannot branch off the timeline that's not present in pageserver")?;
1499 :
1500 : // instead of waiting around, just deny the request because ancestor is not yet
1501 : // ready for other purposes either.
1502 0 : if !ancestor_timeline.is_active() {
1503 0 : return Err(CreateTimelineError::AncestorNotActive);
1504 0 : }
1505 :
1506 0 : if let Some(lsn) = ancestor_start_lsn.as_mut() {
1507 0 : *lsn = lsn.align();
1508 0 :
1509 0 : let ancestor_ancestor_lsn = ancestor_timeline.get_ancestor_lsn();
1510 0 : if ancestor_ancestor_lsn > *lsn {
1511 : // can we safely just branch from the ancestor instead?
1512 0 : return Err(CreateTimelineError::AncestorLsn(anyhow::anyhow!(
1513 0 : "invalid start lsn {} for ancestor timeline {}: less than timeline ancestor lsn {}",
1514 0 : lsn,
1515 0 : ancestor_timeline_id,
1516 0 : ancestor_ancestor_lsn,
1517 0 : )));
1518 0 : }
1519 0 :
1520 0 : // Wait for the WAL to arrive and be processed on the parent branch up
1521 0 : // to the requested branch point. The repository code itself doesn't
1522 0 : // require it, but if we start to receive WAL on the new timeline,
1523 0 : // decoding the new WAL might need to look up previous pages, relation
1524 0 : // sizes etc. and that would get confused if the previous page versions
1525 0 : // are not in the repository yet.
1526 0 : ancestor_timeline
1527 0 : .wait_lsn(*lsn, timeline::WaitLsnWaiter::Tenant, ctx)
1528 0 : .await
1529 0 : .map_err(|e| match e {
1530 0 : e @ (WaitLsnError::Timeout(_) | WaitLsnError::BadState) => {
1531 0 : CreateTimelineError::AncestorLsn(anyhow::anyhow!(e))
1532 : }
1533 0 : WaitLsnError::Shutdown => CreateTimelineError::ShuttingDown,
1534 0 : })?;
1535 0 : }
1536 :
1537 0 : self.branch_timeline(
1538 0 : &ancestor_timeline,
1539 0 : new_timeline_id,
1540 0 : ancestor_start_lsn,
1541 0 : create_guard,
1542 0 : ctx,
1543 0 : )
1544 0 : .await?
1545 : }
1546 : None => {
1547 0 : self.bootstrap_timeline(
1548 0 : new_timeline_id,
1549 0 : pg_version,
1550 0 : load_existing_initdb,
1551 0 : create_guard,
1552 0 : ctx,
1553 0 : )
1554 0 : .await?
1555 : }
1556 : };
1557 :
1558 : // At this point we have dropped our guard on [`Self::timelines_creating`], and
1559 : // the timeline is visible in [`Self::timelines`], but it is _not_ durable yet. We must
1560 : // not send a success to the caller until it is. The same applies to handling retries,
1561 : // see the handling of [`TimelineExclusionError::AlreadyExists`] above.
1562 0 : if let Some(remote_client) = loaded_timeline.remote_client.as_ref() {
1563 0 : let kind = ancestor_timeline_id
1564 0 : .map(|_| "branched")
1565 0 : .unwrap_or("bootstrapped");
1566 0 : remote_client.wait_completion().await.with_context(|| {
1567 0 : format!("wait for {} timeline initial uploads to complete", kind)
1568 0 : })?;
1569 0 : }
1570 :
1571 0 : loaded_timeline.activate(self.clone(), broker_client, None, ctx);
1572 0 :
1573 0 : Ok(loaded_timeline)
1574 0 : }
1575 :
1576 0 : pub(crate) async fn delete_timeline(
1577 0 : self: Arc<Self>,
1578 0 : timeline_id: TimelineId,
1579 0 : ) -> Result<(), DeleteTimelineError> {
1580 0 : DeleteTimelineFlow::run(&self, timeline_id, false).await?;
1581 :
1582 0 : Ok(())
1583 0 : }
1584 :
1585 : /// perform one garbage collection iteration, removing old data files from disk.
1586 : /// this function is periodically called by gc task.
1587 : /// also it can be explicitly requested through page server api 'do_gc' command.
1588 : ///
1589 : /// `target_timeline_id` specifies the timeline to GC, or None for all.
1590 : ///
1591 : /// The `horizon` an `pitr` parameters determine how much WAL history needs to be retained.
1592 : /// Also known as the retention period, or the GC cutoff point. `horizon` specifies
1593 : /// the amount of history, as LSN difference from current latest LSN on each timeline.
1594 : /// `pitr` specifies the same as a time difference from the current time. The effective
1595 : /// GC cutoff point is determined conservatively by either `horizon` and `pitr`, whichever
1596 : /// requires more history to be retained.
1597 : //
1598 728 : pub async fn gc_iteration(
1599 728 : &self,
1600 728 : target_timeline_id: Option<TimelineId>,
1601 728 : horizon: u64,
1602 728 : pitr: Duration,
1603 728 : cancel: &CancellationToken,
1604 728 : ctx: &RequestContext,
1605 728 : ) -> anyhow::Result<GcResult> {
1606 728 : // Don't start doing work during shutdown
1607 728 : if let TenantState::Stopping { .. } = self.current_state() {
1608 0 : return Ok(GcResult::default());
1609 728 : }
1610 728 :
1611 728 : // there is a global allowed_error for this
1612 728 : anyhow::ensure!(
1613 728 : self.is_active(),
1614 0 : "Cannot run GC iteration on inactive tenant"
1615 : );
1616 :
1617 : {
1618 728 : let conf = self.tenant_conf.load();
1619 728 :
1620 728 : if !conf.location.may_delete_layers_hint() {
1621 0 : info!("Skipping GC in location state {:?}", conf.location);
1622 0 : return Ok(GcResult::default());
1623 728 : }
1624 728 : }
1625 728 :
1626 728 : self.gc_iteration_internal(target_timeline_id, horizon, pitr, cancel, ctx)
1627 728 : .await
1628 728 : }
1629 :
1630 : /// Perform one compaction iteration.
1631 : /// This function is periodically called by compactor task.
1632 : /// Also it can be explicitly requested per timeline through page server
1633 : /// api's 'compact' command.
1634 0 : async fn compaction_iteration(
1635 0 : &self,
1636 0 : cancel: &CancellationToken,
1637 0 : ctx: &RequestContext,
1638 0 : ) -> anyhow::Result<(), timeline::CompactionError> {
1639 0 : // Don't start doing work during shutdown, or when broken, we do not need those in the logs
1640 0 : if !self.is_active() {
1641 0 : return Ok(());
1642 0 : }
1643 0 :
1644 0 : {
1645 0 : let conf = self.tenant_conf.load();
1646 0 : if !conf.location.may_delete_layers_hint() || !conf.location.may_upload_layers_hint() {
1647 0 : info!("Skipping compaction in location state {:?}", conf.location);
1648 0 : return Ok(());
1649 0 : }
1650 0 : }
1651 0 :
1652 0 : // Scan through the hashmap and collect a list of all the timelines,
1653 0 : // while holding the lock. Then drop the lock and actually perform the
1654 0 : // compactions. We don't want to block everything else while the
1655 0 : // compaction runs.
1656 0 : let timelines_to_compact = {
1657 0 : let timelines = self.timelines.lock().unwrap();
1658 0 : let timelines_to_compact = timelines
1659 0 : .iter()
1660 0 : .filter_map(|(timeline_id, timeline)| {
1661 0 : if timeline.is_active() {
1662 0 : Some((*timeline_id, timeline.clone()))
1663 : } else {
1664 0 : None
1665 : }
1666 0 : })
1667 0 : .collect::<Vec<_>>();
1668 0 : drop(timelines);
1669 0 : timelines_to_compact
1670 : };
1671 :
1672 0 : for (timeline_id, timeline) in &timelines_to_compact {
1673 0 : timeline
1674 0 : .compact(cancel, EnumSet::empty(), ctx)
1675 0 : .instrument(info_span!("compact_timeline", %timeline_id))
1676 0 : .await?;
1677 : }
1678 :
1679 0 : Ok(())
1680 0 : }
1681 :
1682 : // Call through to all timelines to freeze ephemeral layers if needed. Usually
1683 : // this happens during ingest: this background housekeeping is for freezing layers
1684 : // that are open but haven't been written to for some time.
1685 0 : async fn ingest_housekeeping(&self) {
1686 0 : // Scan through the hashmap and collect a list of all the timelines,
1687 0 : // while holding the lock. Then drop the lock and actually perform the
1688 0 : // compactions. We don't want to block everything else while the
1689 0 : // compaction runs.
1690 0 : let timelines = {
1691 0 : self.timelines
1692 0 : .lock()
1693 0 : .unwrap()
1694 0 : .values()
1695 0 : .filter_map(|timeline| {
1696 0 : if timeline.is_active() {
1697 0 : Some(timeline.clone())
1698 : } else {
1699 0 : None
1700 : }
1701 0 : })
1702 0 : .collect::<Vec<_>>()
1703 : };
1704 :
1705 0 : for timeline in &timelines {
1706 0 : timeline.maybe_freeze_ephemeral_layer().await;
1707 : }
1708 0 : }
1709 :
1710 2300 : pub fn current_state(&self) -> TenantState {
1711 2300 : self.state.borrow().clone()
1712 2300 : }
1713 :
1714 1566 : pub fn is_active(&self) -> bool {
1715 1566 : self.current_state() == TenantState::Active
1716 1566 : }
1717 :
1718 0 : pub fn generation(&self) -> Generation {
1719 0 : self.generation
1720 0 : }
1721 :
1722 0 : pub(crate) fn wal_redo_manager_status(&self) -> Option<WalRedoManagerStatus> {
1723 0 : self.walredo_mgr.as_ref().and_then(|mgr| mgr.status())
1724 0 : }
1725 :
1726 : /// Changes tenant status to active, unless shutdown was already requested.
1727 : ///
1728 : /// `background_jobs_can_start` is an optional barrier set to a value during pageserver startup
1729 : /// to delay background jobs. Background jobs can be started right away when None is given.
1730 0 : fn activate(
1731 0 : self: &Arc<Self>,
1732 0 : broker_client: BrokerClientChannel,
1733 0 : background_jobs_can_start: Option<&completion::Barrier>,
1734 0 : ctx: &RequestContext,
1735 0 : ) {
1736 0 : span::debug_assert_current_span_has_tenant_id();
1737 0 :
1738 0 : let mut activating = false;
1739 0 : self.state.send_modify(|current_state| {
1740 0 : use pageserver_api::models::ActivatingFrom;
1741 0 : match &*current_state {
1742 : TenantState::Activating(_) | TenantState::Active | TenantState::Broken { .. } | TenantState::Stopping { .. } => {
1743 0 : panic!("caller is responsible for calling activate() only on Loading / Attaching tenants, got {state:?}", state = current_state);
1744 : }
1745 0 : TenantState::Loading => {
1746 0 : *current_state = TenantState::Activating(ActivatingFrom::Loading);
1747 0 : }
1748 0 : TenantState::Attaching => {
1749 0 : *current_state = TenantState::Activating(ActivatingFrom::Attaching);
1750 0 : }
1751 : }
1752 0 : debug!(tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), "Activating tenant");
1753 0 : activating = true;
1754 0 : // Continue outside the closure. We need to grab timelines.lock()
1755 0 : // and we plan to turn it into a tokio::sync::Mutex in a future patch.
1756 0 : });
1757 0 :
1758 0 : if activating {
1759 0 : let timelines_accessor = self.timelines.lock().unwrap();
1760 0 : let timelines_to_activate = timelines_accessor
1761 0 : .values()
1762 0 : .filter(|timeline| !(timeline.is_broken() || timeline.is_stopping()));
1763 0 :
1764 0 : // Spawn gc and compaction loops. The loops will shut themselves
1765 0 : // down when they notice that the tenant is inactive.
1766 0 : tasks::start_background_loops(self, background_jobs_can_start);
1767 0 :
1768 0 : let mut activated_timelines = 0;
1769 :
1770 0 : for timeline in timelines_to_activate {
1771 0 : timeline.activate(
1772 0 : self.clone(),
1773 0 : broker_client.clone(),
1774 0 : background_jobs_can_start,
1775 0 : ctx,
1776 0 : );
1777 0 : activated_timelines += 1;
1778 0 : }
1779 :
1780 0 : self.state.send_modify(move |current_state| {
1781 0 : assert!(
1782 0 : matches!(current_state, TenantState::Activating(_)),
1783 0 : "set_stopping and set_broken wait for us to leave Activating state",
1784 : );
1785 0 : *current_state = TenantState::Active;
1786 0 :
1787 0 : let elapsed = self.constructed_at.elapsed();
1788 0 : let total_timelines = timelines_accessor.len();
1789 0 :
1790 0 : // log a lot of stuff, because some tenants sometimes suffer from user-visible
1791 0 : // times to activate. see https://github.com/neondatabase/neon/issues/4025
1792 0 : info!(
1793 0 : since_creation_millis = elapsed.as_millis(),
1794 0 : tenant_id = %self.tenant_shard_id.tenant_id,
1795 0 : shard_id = %self.tenant_shard_id.shard_slug(),
1796 0 : activated_timelines,
1797 0 : total_timelines,
1798 0 : post_state = <&'static str>::from(&*current_state),
1799 0 : "activation attempt finished"
1800 : );
1801 :
1802 0 : TENANT.activation.observe(elapsed.as_secs_f64());
1803 0 : });
1804 0 : }
1805 0 : }
1806 :
1807 : /// Shutdown the tenant and join all of the spawned tasks.
1808 : ///
1809 : /// The method caters for all use-cases:
1810 : /// - pageserver shutdown (freeze_and_flush == true)
1811 : /// - detach + ignore (freeze_and_flush == false)
1812 : ///
1813 : /// This will attempt to shutdown even if tenant is broken.
1814 : ///
1815 : /// `shutdown_progress` is a [`completion::Barrier`] for the shutdown initiated by this call.
1816 : /// If the tenant is already shutting down, we return a clone of the first shutdown call's
1817 : /// `Barrier` as an `Err`. This not-first caller can use the returned barrier to join with
1818 : /// the ongoing shutdown.
1819 6 : async fn shutdown(
1820 6 : &self,
1821 6 : shutdown_progress: completion::Barrier,
1822 6 : shutdown_mode: timeline::ShutdownMode,
1823 6 : ) -> Result<(), completion::Barrier> {
1824 6 : span::debug_assert_current_span_has_tenant_id();
1825 :
1826 : // Set tenant (and its timlines) to Stoppping state.
1827 : //
1828 : // Since we can only transition into Stopping state after activation is complete,
1829 : // run it in a JoinSet so all tenants have a chance to stop before we get SIGKILLed.
1830 : //
1831 : // Transitioning tenants to Stopping state has a couple of non-obvious side effects:
1832 : // 1. Lock out any new requests to the tenants.
1833 : // 2. Signal cancellation to WAL receivers (we wait on it below).
1834 : // 3. Signal cancellation for other tenant background loops.
1835 : // 4. ???
1836 : //
1837 : // The waiting for the cancellation is not done uniformly.
1838 : // We certainly wait for WAL receivers to shut down.
1839 : // That is necessary so that no new data comes in before the freeze_and_flush.
1840 : // But the tenant background loops are joined-on in our caller.
1841 : // It's mesed up.
1842 : // we just ignore the failure to stop
1843 :
1844 : // If we're still attaching, fire the cancellation token early to drop out: this
1845 : // will prevent us flushing, but ensures timely shutdown if some I/O during attach
1846 : // is very slow.
1847 6 : if matches!(self.current_state(), TenantState::Attaching) {
1848 0 : self.cancel.cancel();
1849 6 : }
1850 :
1851 6 : match self.set_stopping(shutdown_progress, false, false).await {
1852 6 : Ok(()) => {}
1853 0 : Err(SetStoppingError::Broken) => {
1854 0 : // assume that this is acceptable
1855 0 : }
1856 0 : Err(SetStoppingError::AlreadyStopping(other)) => {
1857 0 : // give caller the option to wait for this this shutdown
1858 0 : info!("Tenant::shutdown: AlreadyStopping");
1859 0 : return Err(other);
1860 : }
1861 : };
1862 :
1863 6 : let mut js = tokio::task::JoinSet::new();
1864 6 : {
1865 6 : let timelines = self.timelines.lock().unwrap();
1866 6 : timelines.values().for_each(|timeline| {
1867 6 : let timeline = Arc::clone(timeline);
1868 6 : let timeline_id = timeline.timeline_id;
1869 6 : let span = tracing::info_span!("timeline_shutdown", %timeline_id, ?shutdown_mode);
1870 16 : js.spawn(async move { timeline.shutdown(shutdown_mode).instrument(span).await });
1871 6 : })
1872 6 : };
1873 6 : // test_long_timeline_create_then_tenant_delete is leaning on this message
1874 6 : tracing::info!("Waiting for timelines...");
1875 12 : while let Some(res) = js.join_next().await {
1876 0 : match res {
1877 6 : Ok(()) => {}
1878 0 : Err(je) if je.is_cancelled() => unreachable!("no cancelling used"),
1879 0 : Err(je) if je.is_panic() => { /* logged already */ }
1880 0 : Err(je) => warn!("unexpected JoinError: {je:?}"),
1881 : }
1882 : }
1883 :
1884 : // We cancel the Tenant's cancellation token _after_ the timelines have all shut down. This permits
1885 : // them to continue to do work during their shutdown methods, e.g. flushing data.
1886 6 : tracing::debug!("Cancelling CancellationToken");
1887 6 : self.cancel.cancel();
1888 6 :
1889 6 : // shutdown all tenant and timeline tasks: gc, compaction, page service
1890 6 : // No new tasks will be started for this tenant because it's in `Stopping` state.
1891 6 : //
1892 6 : // this will additionally shutdown and await all timeline tasks.
1893 6 : tracing::debug!("Waiting for tasks...");
1894 6 : task_mgr::shutdown_tasks(None, Some(self.tenant_shard_id), None).await;
1895 :
1896 : // Wait for any in-flight operations to complete
1897 6 : self.gate.close().await;
1898 :
1899 6 : remove_tenant_metrics(&self.tenant_shard_id);
1900 6 :
1901 6 : Ok(())
1902 6 : }
1903 :
1904 : /// Change tenant status to Stopping, to mark that it is being shut down.
1905 : ///
1906 : /// This function waits for the tenant to become active if it isn't already, before transitioning it into Stopping state.
1907 : ///
1908 : /// This function is not cancel-safe!
1909 : ///
1910 : /// `allow_transition_from_loading` is needed for the special case of loading task deleting the tenant.
1911 : /// `allow_transition_from_attaching` is needed for the special case of attaching deleted tenant.
1912 6 : async fn set_stopping(
1913 6 : &self,
1914 6 : progress: completion::Barrier,
1915 6 : allow_transition_from_loading: bool,
1916 6 : allow_transition_from_attaching: bool,
1917 6 : ) -> Result<(), SetStoppingError> {
1918 6 : let mut rx = self.state.subscribe();
1919 6 :
1920 6 : // cannot stop before we're done activating, so wait out until we're done activating
1921 6 : rx.wait_for(|state| match state {
1922 0 : TenantState::Attaching if allow_transition_from_attaching => true,
1923 : TenantState::Activating(_) | TenantState::Attaching => {
1924 0 : info!(
1925 0 : "waiting for {} to turn Active|Broken|Stopping",
1926 0 : <&'static str>::from(state)
1927 : );
1928 0 : false
1929 : }
1930 0 : TenantState::Loading => allow_transition_from_loading,
1931 6 : TenantState::Active | TenantState::Broken { .. } | TenantState::Stopping { .. } => true,
1932 6 : })
1933 0 : .await
1934 6 : .expect("cannot drop self.state while on a &self method");
1935 6 :
1936 6 : // we now know we're done activating, let's see whether this task is the winner to transition into Stopping
1937 6 : let mut err = None;
1938 6 : let stopping = self.state.send_if_modified(|current_state| match current_state {
1939 : TenantState::Activating(_) => {
1940 0 : unreachable!("1we ensured above that we're done with activation, and, there is no re-activation")
1941 : }
1942 : TenantState::Attaching => {
1943 0 : if !allow_transition_from_attaching {
1944 0 : unreachable!("2we ensured above that we're done with activation, and, there is no re-activation")
1945 0 : };
1946 0 : *current_state = TenantState::Stopping { progress };
1947 0 : true
1948 : }
1949 : TenantState::Loading => {
1950 0 : if !allow_transition_from_loading {
1951 0 : unreachable!("3we ensured above that we're done with activation, and, there is no re-activation")
1952 0 : };
1953 0 : *current_state = TenantState::Stopping { progress };
1954 0 : true
1955 : }
1956 : TenantState::Active => {
1957 : // FIXME: due to time-of-check vs time-of-use issues, it can happen that new timelines
1958 : // are created after the transition to Stopping. That's harmless, as the Timelines
1959 : // won't be accessible to anyone afterwards, because the Tenant is in Stopping state.
1960 6 : *current_state = TenantState::Stopping { progress };
1961 6 : // Continue stopping outside the closure. We need to grab timelines.lock()
1962 6 : // and we plan to turn it into a tokio::sync::Mutex in a future patch.
1963 6 : true
1964 : }
1965 0 : TenantState::Broken { reason, .. } => {
1966 0 : info!(
1967 0 : "Cannot set tenant to Stopping state, it is in Broken state due to: {reason}"
1968 : );
1969 0 : err = Some(SetStoppingError::Broken);
1970 0 : false
1971 : }
1972 0 : TenantState::Stopping { progress } => {
1973 0 : info!("Tenant is already in Stopping state");
1974 0 : err = Some(SetStoppingError::AlreadyStopping(progress.clone()));
1975 0 : false
1976 : }
1977 6 : });
1978 6 : match (stopping, err) {
1979 6 : (true, None) => {} // continue
1980 0 : (false, Some(err)) => return Err(err),
1981 0 : (true, Some(_)) => unreachable!(
1982 0 : "send_if_modified closure must error out if not transitioning to Stopping"
1983 0 : ),
1984 0 : (false, None) => unreachable!(
1985 0 : "send_if_modified closure must return true if transitioning to Stopping"
1986 0 : ),
1987 : }
1988 :
1989 6 : let timelines_accessor = self.timelines.lock().unwrap();
1990 6 : let not_broken_timelines = timelines_accessor
1991 6 : .values()
1992 6 : .filter(|timeline| !timeline.is_broken());
1993 12 : for timeline in not_broken_timelines {
1994 6 : timeline.set_state(TimelineState::Stopping);
1995 6 : }
1996 6 : Ok(())
1997 6 : }
1998 :
1999 : /// Method for tenant::mgr to transition us into Broken state in case of a late failure in
2000 : /// `remove_tenant_from_memory`
2001 : ///
2002 : /// This function waits for the tenant to become active if it isn't already, before transitioning it into Stopping state.
2003 : ///
2004 : /// In tests, we also use this to set tenants to Broken state on purpose.
2005 0 : pub(crate) async fn set_broken(&self, reason: String) {
2006 0 : let mut rx = self.state.subscribe();
2007 0 :
2008 0 : // The load & attach routines own the tenant state until it has reached `Active`.
2009 0 : // So, wait until it's done.
2010 0 : rx.wait_for(|state| match state {
2011 : TenantState::Activating(_) | TenantState::Loading | TenantState::Attaching => {
2012 0 : info!(
2013 0 : "waiting for {} to turn Active|Broken|Stopping",
2014 0 : <&'static str>::from(state)
2015 : );
2016 0 : false
2017 : }
2018 0 : TenantState::Active | TenantState::Broken { .. } | TenantState::Stopping { .. } => true,
2019 0 : })
2020 0 : .await
2021 0 : .expect("cannot drop self.state while on a &self method");
2022 0 :
2023 0 : // we now know we're done activating, let's see whether this task is the winner to transition into Broken
2024 0 : self.set_broken_no_wait(reason)
2025 0 : }
2026 :
2027 0 : pub(crate) fn set_broken_no_wait(&self, reason: impl Display) {
2028 0 : let reason = reason.to_string();
2029 0 : self.state.send_modify(|current_state| {
2030 0 : match *current_state {
2031 : TenantState::Activating(_) | TenantState::Loading | TenantState::Attaching => {
2032 0 : unreachable!("we ensured above that we're done with activation, and, there is no re-activation")
2033 : }
2034 : TenantState::Active => {
2035 0 : if cfg!(feature = "testing") {
2036 0 : warn!("Changing Active tenant to Broken state, reason: {}", reason);
2037 0 : *current_state = TenantState::broken_from_reason(reason);
2038 : } else {
2039 0 : unreachable!("not allowed to call set_broken on Active tenants in non-testing builds")
2040 : }
2041 : }
2042 : TenantState::Broken { .. } => {
2043 0 : warn!("Tenant is already in Broken state");
2044 : }
2045 : // This is the only "expected" path, any other path is a bug.
2046 : TenantState::Stopping { .. } => {
2047 0 : warn!(
2048 0 : "Marking Stopping tenant as Broken state, reason: {}",
2049 : reason
2050 : );
2051 0 : *current_state = TenantState::broken_from_reason(reason);
2052 : }
2053 : }
2054 0 : });
2055 0 : }
2056 :
2057 0 : pub fn subscribe_for_state_updates(&self) -> watch::Receiver<TenantState> {
2058 0 : self.state.subscribe()
2059 0 : }
2060 :
2061 : /// The activate_now semaphore is initialized with zero units. As soon as
2062 : /// we add a unit, waiters will be able to acquire a unit and proceed.
2063 0 : pub(crate) fn activate_now(&self) {
2064 0 : self.activate_now_sem.add_permits(1);
2065 0 : }
2066 :
2067 0 : pub(crate) async fn wait_to_become_active(
2068 0 : &self,
2069 0 : timeout: Duration,
2070 0 : ) -> Result<(), GetActiveTenantError> {
2071 0 : let mut receiver = self.state.subscribe();
2072 0 : loop {
2073 0 : let current_state = receiver.borrow_and_update().clone();
2074 0 : match current_state {
2075 : TenantState::Loading | TenantState::Attaching | TenantState::Activating(_) => {
2076 : // in these states, there's a chance that we can reach ::Active
2077 0 : self.activate_now();
2078 0 : match timeout_cancellable(timeout, &self.cancel, receiver.changed()).await {
2079 0 : Ok(r) => {
2080 0 : r.map_err(
2081 0 : |_e: tokio::sync::watch::error::RecvError|
2082 : // Tenant existed but was dropped: report it as non-existent
2083 0 : GetActiveTenantError::NotFound(GetTenantError::NotFound(self.tenant_shard_id.tenant_id))
2084 0 : )?
2085 : }
2086 : Err(TimeoutCancellableError::Cancelled) => {
2087 0 : return Err(GetActiveTenantError::Cancelled);
2088 : }
2089 : Err(TimeoutCancellableError::Timeout) => {
2090 0 : return Err(GetActiveTenantError::WaitForActiveTimeout {
2091 0 : latest_state: Some(self.current_state()),
2092 0 : wait_time: timeout,
2093 0 : });
2094 : }
2095 : }
2096 : }
2097 : TenantState::Active { .. } => {
2098 0 : return Ok(());
2099 : }
2100 0 : TenantState::Broken { reason, .. } => {
2101 0 : // This is fatal, and reported distinctly from the general case of "will never be active" because
2102 0 : // it's logically a 500 to external API users (broken is always a bug).
2103 0 : return Err(GetActiveTenantError::Broken(reason));
2104 : }
2105 : TenantState::Stopping { .. } => {
2106 : // There's no chance the tenant can transition back into ::Active
2107 0 : return Err(GetActiveTenantError::WillNotBecomeActive(current_state));
2108 : }
2109 : }
2110 : }
2111 0 : }
2112 :
2113 0 : pub(crate) fn get_attach_mode(&self) -> AttachmentMode {
2114 0 : self.tenant_conf.load().location.attach_mode
2115 0 : }
2116 :
2117 : /// For API access: generate a LocationConfig equivalent to the one that would be used to
2118 : /// create a Tenant in the same state. Do not use this in hot paths: it's for relatively
2119 : /// rare external API calls, like a reconciliation at startup.
2120 0 : pub(crate) fn get_location_conf(&self) -> models::LocationConfig {
2121 0 : let conf = self.tenant_conf.load();
2122 :
2123 0 : let location_config_mode = match conf.location.attach_mode {
2124 0 : AttachmentMode::Single => models::LocationConfigMode::AttachedSingle,
2125 0 : AttachmentMode::Multi => models::LocationConfigMode::AttachedMulti,
2126 0 : AttachmentMode::Stale => models::LocationConfigMode::AttachedStale,
2127 : };
2128 :
2129 : // We have a pageserver TenantConf, we need the API-facing TenantConfig.
2130 0 : let tenant_config: models::TenantConfig = conf.tenant_conf.clone().into();
2131 0 :
2132 0 : models::LocationConfig {
2133 0 : mode: location_config_mode,
2134 0 : generation: self.generation.into(),
2135 0 : secondary_conf: None,
2136 0 : shard_number: self.shard_identity.number.0,
2137 0 : shard_count: self.shard_identity.count.literal(),
2138 0 : shard_stripe_size: self.shard_identity.stripe_size.0,
2139 0 : tenant_conf: tenant_config,
2140 0 : }
2141 0 : }
2142 :
2143 0 : pub(crate) fn get_tenant_shard_id(&self) -> &TenantShardId {
2144 0 : &self.tenant_shard_id
2145 0 : }
2146 :
2147 0 : pub(crate) fn get_shard_stripe_size(&self) -> ShardStripeSize {
2148 0 : self.shard_identity.stripe_size
2149 0 : }
2150 :
2151 0 : pub(crate) fn get_generation(&self) -> Generation {
2152 0 : self.generation
2153 0 : }
2154 :
2155 : /// This function partially shuts down the tenant (it shuts down the Timelines) and is fallible,
2156 : /// and can leave the tenant in a bad state if it fails. The caller is responsible for
2157 : /// resetting this tenant to a valid state if we fail.
2158 0 : pub(crate) async fn split_prepare(
2159 0 : &self,
2160 0 : child_shards: &Vec<TenantShardId>,
2161 0 : ) -> anyhow::Result<()> {
2162 0 : let timelines = self.timelines.lock().unwrap().clone();
2163 0 : for timeline in timelines.values() {
2164 0 : let Some(tl_client) = &timeline.remote_client else {
2165 0 : anyhow::bail!("Remote storage is mandatory");
2166 : };
2167 :
2168 0 : let Some(remote_storage) = &self.remote_storage else {
2169 0 : anyhow::bail!("Remote storage is mandatory");
2170 : };
2171 :
2172 : // We do not block timeline creation/deletion during splits inside the pageserver: it is up to higher levels
2173 : // to ensure that they do not start a split if currently in the process of doing these.
2174 :
2175 : // Upload an index from the parent: this is partly to provide freshness for the
2176 : // child tenants that will copy it, and partly for general ease-of-debugging: there will
2177 : // always be a parent shard index in the same generation as we wrote the child shard index.
2178 0 : tl_client.schedule_index_upload_for_file_changes()?;
2179 0 : tl_client.wait_completion().await?;
2180 :
2181 : // Shut down the timeline's remote client: this means that the indices we write
2182 : // for child shards will not be invalidated by the parent shard deleting layers.
2183 0 : tl_client.shutdown().await;
2184 :
2185 : // Download methods can still be used after shutdown, as they don't flow through the remote client's
2186 : // queue. In principal the RemoteTimelineClient could provide this without downloading it, but this
2187 : // operation is rare, so it's simpler to just download it (and robustly guarantees that the index
2188 : // we use here really is the remotely persistent one).
2189 0 : let result = tl_client
2190 0 : .download_index_file(&self.cancel)
2191 0 : .instrument(info_span!("download_index_file", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%timeline.timeline_id))
2192 0 : .await?;
2193 0 : let index_part = match result {
2194 : MaybeDeletedIndexPart::Deleted(_) => {
2195 0 : anyhow::bail!("Timeline deletion happened concurrently with split")
2196 : }
2197 0 : MaybeDeletedIndexPart::IndexPart(p) => p,
2198 : };
2199 :
2200 0 : for child_shard in child_shards {
2201 0 : upload_index_part(
2202 0 : remote_storage,
2203 0 : child_shard,
2204 0 : &timeline.timeline_id,
2205 0 : self.generation,
2206 0 : &index_part,
2207 0 : &self.cancel,
2208 0 : )
2209 0 : .await?;
2210 : }
2211 : }
2212 :
2213 0 : Ok(())
2214 0 : }
2215 : }
2216 :
2217 : /// Given a Vec of timelines and their ancestors (timeline_id, ancestor_id),
2218 : /// perform a topological sort, so that the parent of each timeline comes
2219 : /// before the children.
2220 : /// E extracts the ancestor from T
2221 : /// This allows for T to be different. It can be TimelineMetadata, can be Timeline itself, etc.
2222 118 : fn tree_sort_timelines<T, E>(
2223 118 : timelines: HashMap<TimelineId, T>,
2224 118 : extractor: E,
2225 118 : ) -> anyhow::Result<Vec<(TimelineId, T)>>
2226 118 : where
2227 118 : E: Fn(&T) -> Option<TimelineId>,
2228 118 : {
2229 118 : let mut result = Vec::with_capacity(timelines.len());
2230 118 :
2231 118 : let mut now = Vec::with_capacity(timelines.len());
2232 118 : // (ancestor, children)
2233 118 : let mut later: HashMap<TimelineId, Vec<(TimelineId, T)>> =
2234 118 : HashMap::with_capacity(timelines.len());
2235 :
2236 124 : for (timeline_id, value) in timelines {
2237 6 : if let Some(ancestor_id) = extractor(&value) {
2238 2 : let children = later.entry(ancestor_id).or_default();
2239 2 : children.push((timeline_id, value));
2240 4 : } else {
2241 4 : now.push((timeline_id, value));
2242 4 : }
2243 : }
2244 :
2245 124 : while let Some((timeline_id, metadata)) = now.pop() {
2246 6 : result.push((timeline_id, metadata));
2247 : // All children of this can be loaded now
2248 6 : if let Some(mut children) = later.remove(&timeline_id) {
2249 2 : now.append(&mut children);
2250 4 : }
2251 : }
2252 :
2253 : // All timelines should be visited now. Unless there were timelines with missing ancestors.
2254 118 : if !later.is_empty() {
2255 0 : for (missing_id, orphan_ids) in later {
2256 0 : for (orphan_id, _) in orphan_ids {
2257 0 : error!("could not load timeline {orphan_id} because its ancestor timeline {missing_id} could not be loaded");
2258 : }
2259 : }
2260 0 : bail!("could not load tenant because some timelines are missing ancestors");
2261 118 : }
2262 118 :
2263 118 : Ok(result)
2264 118 : }
2265 :
2266 : impl Tenant {
2267 0 : pub fn tenant_specific_overrides(&self) -> TenantConfOpt {
2268 0 : self.tenant_conf.load().tenant_conf.clone()
2269 0 : }
2270 :
2271 0 : pub fn effective_config(&self) -> TenantConf {
2272 0 : self.tenant_specific_overrides()
2273 0 : .merge(self.conf.default_tenant_conf.clone())
2274 0 : }
2275 :
2276 0 : pub fn get_checkpoint_distance(&self) -> u64 {
2277 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2278 0 : tenant_conf
2279 0 : .checkpoint_distance
2280 0 : .unwrap_or(self.conf.default_tenant_conf.checkpoint_distance)
2281 0 : }
2282 :
2283 0 : pub fn get_checkpoint_timeout(&self) -> Duration {
2284 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2285 0 : tenant_conf
2286 0 : .checkpoint_timeout
2287 0 : .unwrap_or(self.conf.default_tenant_conf.checkpoint_timeout)
2288 0 : }
2289 :
2290 0 : pub fn get_compaction_target_size(&self) -> u64 {
2291 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2292 0 : tenant_conf
2293 0 : .compaction_target_size
2294 0 : .unwrap_or(self.conf.default_tenant_conf.compaction_target_size)
2295 0 : }
2296 :
2297 0 : pub fn get_compaction_period(&self) -> Duration {
2298 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2299 0 : tenant_conf
2300 0 : .compaction_period
2301 0 : .unwrap_or(self.conf.default_tenant_conf.compaction_period)
2302 0 : }
2303 :
2304 0 : pub fn get_compaction_threshold(&self) -> usize {
2305 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2306 0 : tenant_conf
2307 0 : .compaction_threshold
2308 0 : .unwrap_or(self.conf.default_tenant_conf.compaction_threshold)
2309 0 : }
2310 :
2311 0 : pub fn get_gc_horizon(&self) -> u64 {
2312 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2313 0 : tenant_conf
2314 0 : .gc_horizon
2315 0 : .unwrap_or(self.conf.default_tenant_conf.gc_horizon)
2316 0 : }
2317 :
2318 0 : pub fn get_gc_period(&self) -> Duration {
2319 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2320 0 : tenant_conf
2321 0 : .gc_period
2322 0 : .unwrap_or(self.conf.default_tenant_conf.gc_period)
2323 0 : }
2324 :
2325 0 : pub fn get_image_creation_threshold(&self) -> usize {
2326 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2327 0 : tenant_conf
2328 0 : .image_creation_threshold
2329 0 : .unwrap_or(self.conf.default_tenant_conf.image_creation_threshold)
2330 0 : }
2331 :
2332 0 : pub fn get_pitr_interval(&self) -> Duration {
2333 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2334 0 : tenant_conf
2335 0 : .pitr_interval
2336 0 : .unwrap_or(self.conf.default_tenant_conf.pitr_interval)
2337 0 : }
2338 :
2339 0 : pub fn get_trace_read_requests(&self) -> bool {
2340 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2341 0 : tenant_conf
2342 0 : .trace_read_requests
2343 0 : .unwrap_or(self.conf.default_tenant_conf.trace_read_requests)
2344 0 : }
2345 :
2346 0 : pub fn get_min_resident_size_override(&self) -> Option<u64> {
2347 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2348 0 : tenant_conf
2349 0 : .min_resident_size_override
2350 0 : .or(self.conf.default_tenant_conf.min_resident_size_override)
2351 0 : }
2352 :
2353 0 : pub fn get_heatmap_period(&self) -> Option<Duration> {
2354 0 : let tenant_conf = self.tenant_conf.load().tenant_conf.clone();
2355 0 : let heatmap_period = tenant_conf
2356 0 : .heatmap_period
2357 0 : .unwrap_or(self.conf.default_tenant_conf.heatmap_period);
2358 0 : if heatmap_period.is_zero() {
2359 0 : None
2360 : } else {
2361 0 : Some(heatmap_period)
2362 : }
2363 0 : }
2364 :
2365 0 : pub fn set_new_tenant_config(&self, new_tenant_conf: TenantConfOpt) {
2366 0 : // Use read-copy-update in order to avoid overwriting the location config
2367 0 : // state if this races with [`Tenant::set_new_location_config`]. Note that
2368 0 : // this race is not possible if both request types come from the storage
2369 0 : // controller (as they should!) because an exclusive op lock is required
2370 0 : // on the storage controller side.
2371 0 : self.tenant_conf.rcu(|inner| {
2372 0 : Arc::new(AttachedTenantConf {
2373 0 : tenant_conf: new_tenant_conf.clone(),
2374 0 : location: inner.location,
2375 0 : })
2376 0 : });
2377 0 :
2378 0 : self.tenant_conf_updated(&new_tenant_conf);
2379 0 : // Don't hold self.timelines.lock() during the notifies.
2380 0 : // There's no risk of deadlock right now, but there could be if we consolidate
2381 0 : // mutexes in struct Timeline in the future.
2382 0 : let timelines = self.list_timelines();
2383 0 : for timeline in timelines {
2384 0 : timeline.tenant_conf_updated(&new_tenant_conf);
2385 0 : }
2386 0 : }
2387 :
2388 0 : pub(crate) fn set_new_location_config(&self, new_conf: AttachedTenantConf) {
2389 0 : let new_tenant_conf = new_conf.tenant_conf.clone();
2390 0 :
2391 0 : self.tenant_conf.store(Arc::new(new_conf));
2392 0 :
2393 0 : self.tenant_conf_updated(&new_tenant_conf);
2394 0 : // Don't hold self.timelines.lock() during the notifies.
2395 0 : // There's no risk of deadlock right now, but there could be if we consolidate
2396 0 : // mutexes in struct Timeline in the future.
2397 0 : let timelines = self.list_timelines();
2398 0 : for timeline in timelines {
2399 0 : timeline.tenant_conf_updated(&new_tenant_conf);
2400 0 : }
2401 0 : }
2402 :
2403 118 : fn get_timeline_get_throttle_config(
2404 118 : psconf: &'static PageServerConf,
2405 118 : overrides: &TenantConfOpt,
2406 118 : ) -> throttle::Config {
2407 118 : overrides
2408 118 : .timeline_get_throttle
2409 118 : .clone()
2410 118 : .unwrap_or(psconf.default_tenant_conf.timeline_get_throttle.clone())
2411 118 : }
2412 :
2413 0 : pub(crate) fn tenant_conf_updated(&self, new_conf: &TenantConfOpt) {
2414 0 : let conf = Self::get_timeline_get_throttle_config(self.conf, new_conf);
2415 0 : self.timeline_get_throttle.reconfigure(conf)
2416 0 : }
2417 :
2418 : /// Helper function to create a new Timeline struct.
2419 : ///
2420 : /// The returned Timeline is in Loading state. The caller is responsible for
2421 : /// initializing any on-disk state, and for inserting the Timeline to the 'timelines'
2422 : /// map.
2423 : ///
2424 : /// `validate_ancestor == false` is used when a timeline is created for deletion
2425 : /// and we might not have the ancestor present anymore which is fine for to be
2426 : /// deleted timelines.
2427 334 : fn create_timeline_struct(
2428 334 : &self,
2429 334 : new_timeline_id: TimelineId,
2430 334 : new_metadata: &TimelineMetadata,
2431 334 : ancestor: Option<Arc<Timeline>>,
2432 334 : resources: TimelineResources,
2433 334 : cause: CreateTimelineCause,
2434 334 : ) -> anyhow::Result<Arc<Timeline>> {
2435 334 : let state = match cause {
2436 : CreateTimelineCause::Load => {
2437 334 : let ancestor_id = new_metadata.ancestor_timeline();
2438 334 : anyhow::ensure!(
2439 334 : ancestor_id == ancestor.as_ref().map(|t| t.timeline_id),
2440 0 : "Timeline's {new_timeline_id} ancestor {ancestor_id:?} was not found"
2441 : );
2442 334 : TimelineState::Loading
2443 : }
2444 0 : CreateTimelineCause::Delete => TimelineState::Stopping,
2445 : };
2446 :
2447 334 : let pg_version = new_metadata.pg_version();
2448 334 :
2449 334 : let timeline = Timeline::new(
2450 334 : self.conf,
2451 334 : Arc::clone(&self.tenant_conf),
2452 334 : new_metadata,
2453 334 : ancestor,
2454 334 : new_timeline_id,
2455 334 : self.tenant_shard_id,
2456 334 : self.generation,
2457 334 : self.shard_identity,
2458 334 : self.walredo_mgr.clone(),
2459 334 : resources,
2460 334 : pg_version,
2461 334 : state,
2462 334 : self.cancel.child_token(),
2463 334 : );
2464 334 :
2465 334 : Ok(timeline)
2466 334 : }
2467 :
2468 : // Allow too_many_arguments because a constructor's argument list naturally grows with the
2469 : // number of attributes in the struct: breaking these out into a builder wouldn't be helpful.
2470 : #[allow(clippy::too_many_arguments)]
2471 118 : fn new(
2472 118 : state: TenantState,
2473 118 : conf: &'static PageServerConf,
2474 118 : attached_conf: AttachedTenantConf,
2475 118 : shard_identity: ShardIdentity,
2476 118 : walredo_mgr: Option<Arc<WalRedoManager>>,
2477 118 : tenant_shard_id: TenantShardId,
2478 118 : remote_storage: Option<GenericRemoteStorage>,
2479 118 : deletion_queue_client: DeletionQueueClient,
2480 118 : ) -> Tenant {
2481 118 : let (state, mut rx) = watch::channel(state);
2482 118 :
2483 118 : tokio::spawn(async move {
2484 118 : // reflect tenant state in metrics:
2485 118 : // - global per tenant state: TENANT_STATE_METRIC
2486 118 : // - "set" of broken tenants: BROKEN_TENANTS_SET
2487 118 : //
2488 118 : // set of broken tenants should not have zero counts so that it remains accessible for
2489 118 : // alerting.
2490 118 :
2491 118 : let tid = tenant_shard_id.to_string();
2492 118 : let shard_id = tenant_shard_id.shard_slug().to_string();
2493 118 : let set_key = &[tid.as_str(), shard_id.as_str()][..];
2494 118 :
2495 234 : fn inspect_state(state: &TenantState) -> ([&'static str; 1], bool) {
2496 234 : ([state.into()], matches!(state, TenantState::Broken { .. }))
2497 234 : }
2498 118 :
2499 118 : let mut tuple = inspect_state(&rx.borrow_and_update());
2500 118 :
2501 118 : let is_broken = tuple.1;
2502 118 : let mut counted_broken = if is_broken {
2503 : // add the id to the set right away, there should not be any updates on the channel
2504 : // after before tenant is removed, if ever
2505 0 : BROKEN_TENANTS_SET.with_label_values(set_key).set(1);
2506 0 : true
2507 : } else {
2508 118 : false
2509 : };
2510 :
2511 234 : loop {
2512 234 : let labels = &tuple.0;
2513 234 : let current = TENANT_STATE_METRIC.with_label_values(labels);
2514 234 : current.inc();
2515 234 :
2516 234 : if rx.changed().await.is_err() {
2517 : // tenant has been dropped
2518 10 : current.dec();
2519 10 : drop(BROKEN_TENANTS_SET.remove_label_values(set_key));
2520 10 : break;
2521 116 : }
2522 116 :
2523 116 : current.dec();
2524 116 : tuple = inspect_state(&rx.borrow_and_update());
2525 116 :
2526 116 : let is_broken = tuple.1;
2527 116 : if is_broken && !counted_broken {
2528 0 : counted_broken = true;
2529 0 : // insert the tenant_id (back) into the set while avoiding needless counter
2530 0 : // access
2531 0 : BROKEN_TENANTS_SET.with_label_values(set_key).set(1);
2532 116 : }
2533 : }
2534 118 : });
2535 118 :
2536 118 : Tenant {
2537 118 : tenant_shard_id,
2538 118 : shard_identity,
2539 118 : generation: attached_conf.location.generation,
2540 118 : conf,
2541 118 : // using now here is good enough approximation to catch tenants with really long
2542 118 : // activation times.
2543 118 : constructed_at: Instant::now(),
2544 118 : timelines: Mutex::new(HashMap::new()),
2545 118 : timelines_creating: Mutex::new(HashSet::new()),
2546 118 : gc_cs: tokio::sync::Mutex::new(()),
2547 118 : walredo_mgr,
2548 118 : remote_storage,
2549 118 : deletion_queue_client,
2550 118 : state,
2551 118 : cached_logical_sizes: tokio::sync::Mutex::new(HashMap::new()),
2552 118 : cached_synthetic_tenant_size: Arc::new(AtomicU64::new(0)),
2553 118 : eviction_task_tenant_state: tokio::sync::Mutex::new(EvictionTaskTenantState::default()),
2554 118 : activate_now_sem: tokio::sync::Semaphore::new(0),
2555 118 : delete_progress: Arc::new(tokio::sync::Mutex::new(DeleteTenantFlow::default())),
2556 118 : cancel: CancellationToken::default(),
2557 118 : gate: Gate::default(),
2558 118 : timeline_get_throttle: Arc::new(throttle::Throttle::new(
2559 118 : Tenant::get_timeline_get_throttle_config(conf, &attached_conf.tenant_conf),
2560 118 : &crate::metrics::tenant_throttling::TIMELINE_GET,
2561 118 : )),
2562 118 : tenant_conf: Arc::new(ArcSwap::from_pointee(attached_conf)),
2563 118 : ongoing_timeline_detach: std::sync::Mutex::default(),
2564 118 : }
2565 118 : }
2566 :
2567 : /// Locate and load config
2568 0 : pub(super) fn load_tenant_config(
2569 0 : conf: &'static PageServerConf,
2570 0 : tenant_shard_id: &TenantShardId,
2571 0 : ) -> anyhow::Result<LocationConf> {
2572 0 : let legacy_config_path = conf.tenant_config_path(tenant_shard_id);
2573 0 : let config_path = conf.tenant_location_config_path(tenant_shard_id);
2574 0 :
2575 0 : if config_path.exists() {
2576 : // New-style config takes precedence
2577 0 : let deserialized = Self::read_config(&config_path)?;
2578 0 : Ok(toml_edit::de::from_document::<LocationConf>(deserialized)?)
2579 0 : } else if legacy_config_path.exists() {
2580 : // Upgrade path: found an old-style configuration only
2581 0 : let deserialized = Self::read_config(&legacy_config_path)?;
2582 :
2583 0 : let mut tenant_conf = TenantConfOpt::default();
2584 0 : for (key, item) in deserialized.iter() {
2585 0 : match key {
2586 0 : "tenant_config" => {
2587 0 : tenant_conf = TenantConfOpt::try_from(item.to_owned()).context(format!("Failed to parse config from file '{legacy_config_path}' as pageserver config"))?;
2588 : }
2589 0 : _ => bail!(
2590 0 : "config file {legacy_config_path} has unrecognized pageserver option '{key}'"
2591 0 : ),
2592 : }
2593 : }
2594 :
2595 : // Legacy configs are implicitly in attached state, and do not support sharding
2596 0 : Ok(LocationConf::attached_single(
2597 0 : tenant_conf,
2598 0 : Generation::none(),
2599 0 : &models::ShardParameters::default(),
2600 0 : ))
2601 : } else {
2602 : // FIXME If the config file is not found, assume that we're attaching
2603 : // a detached tenant and config is passed via attach command.
2604 : // https://github.com/neondatabase/neon/issues/1555
2605 : // OR: we're loading after incomplete deletion that managed to remove config.
2606 0 : info!(
2607 0 : "tenant config not found in {} or {}",
2608 : config_path, legacy_config_path
2609 : );
2610 0 : Ok(LocationConf::default())
2611 : }
2612 0 : }
2613 :
2614 0 : fn read_config(path: &Utf8Path) -> anyhow::Result<toml_edit::Document> {
2615 0 : info!("loading tenant configuration from {path}");
2616 :
2617 : // load and parse file
2618 0 : let config = fs::read_to_string(path)
2619 0 : .with_context(|| format!("Failed to load config from path '{path}'"))?;
2620 :
2621 0 : config
2622 0 : .parse::<toml_edit::Document>()
2623 0 : .with_context(|| format!("Failed to parse config from file '{path}' as toml file"))
2624 0 : }
2625 :
2626 0 : #[tracing::instrument(skip_all, fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug()))]
2627 : pub(super) async fn persist_tenant_config(
2628 : conf: &'static PageServerConf,
2629 : tenant_shard_id: &TenantShardId,
2630 : location_conf: &LocationConf,
2631 : ) -> anyhow::Result<()> {
2632 : let legacy_config_path = conf.tenant_config_path(tenant_shard_id);
2633 : let config_path = conf.tenant_location_config_path(tenant_shard_id);
2634 :
2635 : Self::persist_tenant_config_at(
2636 : tenant_shard_id,
2637 : &config_path,
2638 : &legacy_config_path,
2639 : location_conf,
2640 : )
2641 : .await
2642 : }
2643 :
2644 0 : #[tracing::instrument(skip_all, fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug()))]
2645 : pub(super) async fn persist_tenant_config_at(
2646 : tenant_shard_id: &TenantShardId,
2647 : config_path: &Utf8Path,
2648 : legacy_config_path: &Utf8Path,
2649 : location_conf: &LocationConf,
2650 : ) -> anyhow::Result<()> {
2651 : if let LocationMode::Attached(attach_conf) = &location_conf.mode {
2652 : // The modern-style LocationConf config file requires a generation to be set. In case someone
2653 : // is running a pageserver without the infrastructure to set generations, write out the legacy-style
2654 : // config file that only contains TenantConf.
2655 : //
2656 : // This will eventually be removed in https://github.com/neondatabase/neon/issues/5388
2657 :
2658 : if attach_conf.generation.is_none() {
2659 : tracing::info!(
2660 : "Running without generations, writing legacy-style tenant config file"
2661 : );
2662 : Self::persist_tenant_config_legacy(
2663 : tenant_shard_id,
2664 : legacy_config_path,
2665 : &location_conf.tenant_conf,
2666 : )
2667 : .await?;
2668 :
2669 : return Ok(());
2670 : }
2671 : }
2672 :
2673 : debug!("persisting tenantconf to {config_path}");
2674 :
2675 : let mut conf_content = r#"# This file contains a specific per-tenant's config.
2676 : # It is read in case of pageserver restart.
2677 : "#
2678 : .to_string();
2679 :
2680 0 : fail::fail_point!("tenant-config-before-write", |_| {
2681 0 : anyhow::bail!("tenant-config-before-write");
2682 0 : });
2683 :
2684 : // Convert the config to a toml file.
2685 : conf_content += &toml_edit::ser::to_string_pretty(&location_conf)?;
2686 :
2687 : let temp_path = path_with_suffix_extension(config_path, TEMP_FILE_SUFFIX);
2688 :
2689 : let tenant_shard_id = *tenant_shard_id;
2690 : let config_path = config_path.to_owned();
2691 : let conf_content = conf_content.into_bytes();
2692 : VirtualFile::crashsafe_overwrite(config_path.clone(), temp_path, conf_content)
2693 : .await
2694 0 : .with_context(|| format!("write tenant {tenant_shard_id} config to {config_path}"))?;
2695 :
2696 : Ok(())
2697 : }
2698 :
2699 0 : #[tracing::instrument(skip_all, fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug()))]
2700 : async fn persist_tenant_config_legacy(
2701 : tenant_shard_id: &TenantShardId,
2702 : target_config_path: &Utf8Path,
2703 : tenant_conf: &TenantConfOpt,
2704 : ) -> anyhow::Result<()> {
2705 : debug!("persisting tenantconf to {target_config_path}");
2706 :
2707 : let mut conf_content = r#"# This file contains a specific per-tenant's config.
2708 : # It is read in case of pageserver restart.
2709 :
2710 : [tenant_config]
2711 : "#
2712 : .to_string();
2713 :
2714 : // Convert the config to a toml file.
2715 : conf_content += &toml_edit::ser::to_string(&tenant_conf)?;
2716 :
2717 : let temp_path = path_with_suffix_extension(target_config_path, TEMP_FILE_SUFFIX);
2718 :
2719 : let tenant_shard_id = *tenant_shard_id;
2720 : let target_config_path = target_config_path.to_owned();
2721 : let conf_content = conf_content.into_bytes();
2722 : VirtualFile::crashsafe_overwrite(target_config_path.clone(), temp_path, conf_content)
2723 : .await
2724 0 : .with_context(|| {
2725 0 : format!("write tenant {tenant_shard_id} config to {target_config_path}")
2726 0 : })?;
2727 : Ok(())
2728 : }
2729 :
2730 : //
2731 : // How garbage collection works:
2732 : //
2733 : // +--bar------------->
2734 : // /
2735 : // +----+-----foo---------------->
2736 : // /
2737 : // ----main--+-------------------------->
2738 : // \
2739 : // +-----baz-------->
2740 : //
2741 : //
2742 : // 1. Grab 'gc_cs' mutex to prevent new timelines from being created while Timeline's
2743 : // `gc_infos` are being refreshed
2744 : // 2. Scan collected timelines, and on each timeline, make note of the
2745 : // all the points where other timelines have been branched off.
2746 : // We will refrain from removing page versions at those LSNs.
2747 : // 3. For each timeline, scan all layer files on the timeline.
2748 : // Remove all files for which a newer file exists and which
2749 : // don't cover any branch point LSNs.
2750 : //
2751 : // TODO:
2752 : // - if a relation has a non-incremental persistent layer on a child branch, then we
2753 : // don't need to keep that in the parent anymore. But currently
2754 : // we do.
2755 728 : async fn gc_iteration_internal(
2756 728 : &self,
2757 728 : target_timeline_id: Option<TimelineId>,
2758 728 : horizon: u64,
2759 728 : pitr: Duration,
2760 728 : cancel: &CancellationToken,
2761 728 : ctx: &RequestContext,
2762 728 : ) -> anyhow::Result<GcResult> {
2763 728 : let mut totals: GcResult = Default::default();
2764 728 : let now = Instant::now();
2765 :
2766 728 : let gc_timelines = match self
2767 728 : .refresh_gc_info_internal(target_timeline_id, horizon, pitr, cancel, ctx)
2768 728 : .await
2769 : {
2770 728 : Ok(result) => result,
2771 0 : Err(e) => {
2772 0 : if let Some(PageReconstructError::Cancelled) =
2773 0 : e.downcast_ref::<PageReconstructError>()
2774 : {
2775 : // Handle cancellation
2776 0 : totals.elapsed = now.elapsed();
2777 0 : return Ok(totals);
2778 : } else {
2779 : // Propagate other errors
2780 0 : return Err(e);
2781 : }
2782 : }
2783 : };
2784 :
2785 728 : failpoint_support::sleep_millis_async!("gc_iteration_internal_after_getting_gc_timelines");
2786 :
2787 : // If there is nothing to GC, we don't want any messages in the INFO log.
2788 728 : if !gc_timelines.is_empty() {
2789 728 : info!("{} timelines need GC", gc_timelines.len());
2790 : } else {
2791 0 : debug!("{} timelines need GC", gc_timelines.len());
2792 : }
2793 :
2794 : // Perform GC for each timeline.
2795 : //
2796 : // Note that we don't hold the `Tenant::gc_cs` lock here because we don't want to delay the
2797 : // branch creation task, which requires the GC lock. A GC iteration can run concurrently
2798 : // with branch creation.
2799 : //
2800 : // See comments in [`Tenant::branch_timeline`] for more information about why branch
2801 : // creation task can run concurrently with timeline's GC iteration.
2802 1456 : for timeline in gc_timelines {
2803 728 : if task_mgr::is_shutdown_requested() || cancel.is_cancelled() {
2804 : // We were requested to shut down. Stop and return with the progress we
2805 : // made.
2806 0 : break;
2807 728 : }
2808 728 : let result = timeline.gc().await?;
2809 728 : totals += result;
2810 : }
2811 :
2812 728 : totals.elapsed = now.elapsed();
2813 728 : Ok(totals)
2814 728 : }
2815 :
2816 : /// Refreshes the Timeline::gc_info for all timelines, returning the
2817 : /// vector of timelines which have [`Timeline::get_last_record_lsn`] past
2818 : /// [`Tenant::get_gc_horizon`].
2819 : ///
2820 : /// This is usually executed as part of periodic gc, but can now be triggered more often.
2821 0 : pub async fn refresh_gc_info(
2822 0 : &self,
2823 0 : cancel: &CancellationToken,
2824 0 : ctx: &RequestContext,
2825 0 : ) -> anyhow::Result<Vec<Arc<Timeline>>> {
2826 0 : // since this method can now be called at different rates than the configured gc loop, it
2827 0 : // might be that these configuration values get applied faster than what it was previously,
2828 0 : // since these were only read from the gc task.
2829 0 : let horizon = self.get_gc_horizon();
2830 0 : let pitr = self.get_pitr_interval();
2831 0 :
2832 0 : // refresh all timelines
2833 0 : let target_timeline_id = None;
2834 0 :
2835 0 : self.refresh_gc_info_internal(target_timeline_id, horizon, pitr, cancel, ctx)
2836 0 : .await
2837 0 : }
2838 :
2839 728 : async fn refresh_gc_info_internal(
2840 728 : &self,
2841 728 : target_timeline_id: Option<TimelineId>,
2842 728 : horizon: u64,
2843 728 : pitr: Duration,
2844 728 : cancel: &CancellationToken,
2845 728 : ctx: &RequestContext,
2846 728 : ) -> anyhow::Result<Vec<Arc<Timeline>>> {
2847 728 : // before taking the gc_cs lock, do the heavier weight finding of gc_cutoff points for
2848 728 : // currently visible timelines.
2849 728 : let timelines = self
2850 728 : .timelines
2851 728 : .lock()
2852 728 : .unwrap()
2853 728 : .values()
2854 3284 : .filter(|tl| match target_timeline_id.as_ref() {
2855 3284 : Some(target) => &tl.timeline_id == target,
2856 0 : None => true,
2857 3284 : })
2858 728 : .cloned()
2859 728 : .collect::<Vec<_>>();
2860 728 :
2861 728 : let mut gc_cutoffs: HashMap<TimelineId, GcCutoffs> =
2862 728 : HashMap::with_capacity(timelines.len());
2863 :
2864 728 : for timeline in timelines.iter() {
2865 728 : let cutoff = timeline
2866 728 : .get_last_record_lsn()
2867 728 : .checked_sub(horizon)
2868 728 : .unwrap_or(Lsn(0));
2869 :
2870 728 : let res = timeline.find_gc_cutoffs(cutoff, pitr, cancel, ctx).await;
2871 :
2872 728 : match res {
2873 728 : Ok(cutoffs) => {
2874 728 : let old = gc_cutoffs.insert(timeline.timeline_id, cutoffs);
2875 728 : assert!(old.is_none());
2876 : }
2877 0 : Err(e) => {
2878 0 : tracing::warn!(timeline_id = %timeline.timeline_id, "ignoring failure to find gc cutoffs: {e:#}");
2879 : }
2880 : }
2881 : }
2882 :
2883 728 : if !self.is_active() {
2884 0 : anyhow::bail!("shutting down");
2885 728 : }
2886 :
2887 : // grab mutex to prevent new timelines from being created here; avoid doing long operations
2888 : // because that will stall branch creation.
2889 728 : let gc_cs = self.gc_cs.lock().await;
2890 :
2891 : // Scan all timelines. For each timeline, remember the timeline ID and
2892 : // the branch point where it was created.
2893 728 : let (all_branchpoints, timeline_ids): (BTreeSet<(TimelineId, Lsn)>, _) = {
2894 728 : let timelines = self.timelines.lock().unwrap();
2895 728 : let mut all_branchpoints = BTreeSet::new();
2896 728 : let timeline_ids = {
2897 728 : if let Some(target_timeline_id) = target_timeline_id.as_ref() {
2898 728 : if timelines.get(target_timeline_id).is_none() {
2899 0 : bail!("gc target timeline does not exist")
2900 728 : }
2901 0 : };
2902 :
2903 728 : timelines
2904 728 : .iter()
2905 3284 : .map(|(timeline_id, timeline_entry)| {
2906 2556 : if let Some(ancestor_timeline_id) =
2907 3284 : &timeline_entry.get_ancestor_timeline_id()
2908 : {
2909 : // If target_timeline is specified, we only need to know branchpoints of its children
2910 2556 : if let Some(timeline_id) = target_timeline_id {
2911 2556 : if ancestor_timeline_id == &timeline_id {
2912 6 : all_branchpoints.insert((
2913 6 : *ancestor_timeline_id,
2914 6 : timeline_entry.get_ancestor_lsn(),
2915 6 : ));
2916 2550 : }
2917 : }
2918 : // Collect branchpoints for all timelines
2919 0 : else {
2920 0 : all_branchpoints.insert((
2921 0 : *ancestor_timeline_id,
2922 0 : timeline_entry.get_ancestor_lsn(),
2923 0 : ));
2924 0 : }
2925 728 : }
2926 :
2927 3284 : *timeline_id
2928 3284 : })
2929 728 : .collect::<Vec<_>>()
2930 728 : };
2931 728 : (all_branchpoints, timeline_ids)
2932 728 : };
2933 728 :
2934 728 : // Ok, we now know all the branch points.
2935 728 : // Update the GC information for each timeline.
2936 728 : let mut gc_timelines = Vec::with_capacity(timeline_ids.len());
2937 4012 : for timeline_id in timeline_ids {
2938 : // Timeline is known to be local and loaded.
2939 3284 : let timeline = self
2940 3284 : .get_timeline(timeline_id, false)
2941 3284 : .with_context(|| format!("Timeline {timeline_id} was not found"))?;
2942 :
2943 : // If target_timeline is specified, ignore all other timelines
2944 3284 : if let Some(target_timeline_id) = target_timeline_id {
2945 3284 : if timeline_id != target_timeline_id {
2946 2556 : continue;
2947 728 : }
2948 0 : }
2949 :
2950 728 : let branchpoints: Vec<Lsn> = all_branchpoints
2951 728 : .range((
2952 728 : Included((timeline_id, Lsn(0))),
2953 728 : Included((timeline_id, Lsn(u64::MAX))),
2954 728 : ))
2955 728 : .map(|&x| x.1)
2956 728 : .collect();
2957 728 :
2958 728 : {
2959 728 : let mut target = timeline.gc_info.write().unwrap();
2960 728 :
2961 728 : match gc_cutoffs.remove(&timeline_id) {
2962 728 : Some(cutoffs) => {
2963 728 : *target = GcInfo {
2964 728 : retain_lsns: branchpoints,
2965 728 : cutoffs,
2966 728 : };
2967 728 : }
2968 0 : None => {
2969 0 : // reasons for this being unavailable:
2970 0 : // - this timeline was created while we were finding cutoffs
2971 0 : // - lsn for timestamp search fails for this timeline repeatedly
2972 0 : //
2973 0 : // in both cases, refreshing the branchpoints is correct.
2974 0 : target.retain_lsns = branchpoints;
2975 0 : }
2976 : };
2977 : }
2978 :
2979 728 : gc_timelines.push(timeline);
2980 : }
2981 728 : drop(gc_cs);
2982 728 : Ok(gc_timelines)
2983 728 : }
2984 :
2985 : /// A substitute for `branch_timeline` for use in unit tests.
2986 : /// The returned timeline will have state value `Active` to make various `anyhow::ensure!()`
2987 : /// calls pass, but, we do not actually call `.activate()` under the hood. So, none of the
2988 : /// timeline background tasks are launched, except the flush loop.
2989 : #[cfg(test)]
2990 222 : async fn branch_timeline_test(
2991 222 : &self,
2992 222 : src_timeline: &Arc<Timeline>,
2993 222 : dst_id: TimelineId,
2994 222 : start_lsn: Option<Lsn>,
2995 222 : ctx: &RequestContext,
2996 222 : ) -> Result<Arc<Timeline>, CreateTimelineError> {
2997 222 : let create_guard = self.create_timeline_create_guard(dst_id).unwrap();
2998 222 : let tl = self
2999 222 : .branch_timeline_impl(src_timeline, dst_id, start_lsn, create_guard, ctx)
3000 4 : .await?;
3001 218 : tl.set_state(TimelineState::Active);
3002 218 : Ok(tl)
3003 222 : }
3004 :
3005 : /// Branch an existing timeline.
3006 : ///
3007 : /// The caller is responsible for activating the returned timeline.
3008 0 : async fn branch_timeline(
3009 0 : &self,
3010 0 : src_timeline: &Arc<Timeline>,
3011 0 : dst_id: TimelineId,
3012 0 : start_lsn: Option<Lsn>,
3013 0 : timeline_create_guard: TimelineCreateGuard<'_>,
3014 0 : ctx: &RequestContext,
3015 0 : ) -> Result<Arc<Timeline>, CreateTimelineError> {
3016 0 : self.branch_timeline_impl(src_timeline, dst_id, start_lsn, timeline_create_guard, ctx)
3017 0 : .await
3018 0 : }
3019 :
3020 222 : async fn branch_timeline_impl(
3021 222 : &self,
3022 222 : src_timeline: &Arc<Timeline>,
3023 222 : dst_id: TimelineId,
3024 222 : start_lsn: Option<Lsn>,
3025 222 : timeline_create_guard: TimelineCreateGuard<'_>,
3026 222 : _ctx: &RequestContext,
3027 222 : ) -> Result<Arc<Timeline>, CreateTimelineError> {
3028 222 : let src_id = src_timeline.timeline_id;
3029 :
3030 : // We will validate our ancestor LSN in this function. Acquire the GC lock so that
3031 : // this check cannot race with GC, and the ancestor LSN is guaranteed to remain
3032 : // valid while we are creating the branch.
3033 222 : let _gc_cs = self.gc_cs.lock().await;
3034 :
3035 : // If no start LSN is specified, we branch the new timeline from the source timeline's last record LSN
3036 222 : let start_lsn = start_lsn.unwrap_or_else(|| {
3037 2 : let lsn = src_timeline.get_last_record_lsn();
3038 2 : info!("branching timeline {dst_id} from timeline {src_id} at last record LSN: {lsn}");
3039 2 : lsn
3040 222 : });
3041 222 :
3042 222 : // Ensure that `start_lsn` is valid, i.e. the LSN is within the PITR
3043 222 : // horizon on the source timeline
3044 222 : //
3045 222 : // We check it against both the planned GC cutoff stored in 'gc_info',
3046 222 : // and the 'latest_gc_cutoff' of the last GC that was performed. The
3047 222 : // planned GC cutoff in 'gc_info' is normally larger than
3048 222 : // 'latest_gc_cutoff_lsn', but beware of corner cases like if you just
3049 222 : // changed the GC settings for the tenant to make the PITR window
3050 222 : // larger, but some of the data was already removed by an earlier GC
3051 222 : // iteration.
3052 222 :
3053 222 : // check against last actual 'latest_gc_cutoff' first
3054 222 : let latest_gc_cutoff_lsn = src_timeline.get_latest_gc_cutoff_lsn();
3055 222 : src_timeline
3056 222 : .check_lsn_is_in_scope(start_lsn, &latest_gc_cutoff_lsn)
3057 222 : .context(format!(
3058 222 : "invalid branch start lsn: less than latest GC cutoff {}",
3059 222 : *latest_gc_cutoff_lsn,
3060 222 : ))
3061 222 : .map_err(CreateTimelineError::AncestorLsn)?;
3062 :
3063 : // and then the planned GC cutoff
3064 : {
3065 218 : let gc_info = src_timeline.gc_info.read().unwrap();
3066 218 : let cutoff = gc_info.min_cutoff();
3067 218 : if start_lsn < cutoff {
3068 0 : return Err(CreateTimelineError::AncestorLsn(anyhow::anyhow!(
3069 0 : "invalid branch start lsn: less than planned GC cutoff {cutoff}"
3070 0 : )));
3071 218 : }
3072 218 : }
3073 218 :
3074 218 : //
3075 218 : // The branch point is valid, and we are still holding the 'gc_cs' lock
3076 218 : // so that GC cannot advance the GC cutoff until we are finished.
3077 218 : // Proceed with the branch creation.
3078 218 : //
3079 218 :
3080 218 : // Determine prev-LSN for the new timeline. We can only determine it if
3081 218 : // the timeline was branched at the current end of the source timeline.
3082 218 : let RecordLsn {
3083 218 : last: src_last,
3084 218 : prev: src_prev,
3085 218 : } = src_timeline.get_last_record_rlsn();
3086 218 : let dst_prev = if src_last == start_lsn {
3087 208 : Some(src_prev)
3088 : } else {
3089 10 : None
3090 : };
3091 :
3092 : // Create the metadata file, noting the ancestor of the new timeline.
3093 : // There is initially no data in it, but all the read-calls know to look
3094 : // into the ancestor.
3095 218 : let metadata = TimelineMetadata::new(
3096 218 : start_lsn,
3097 218 : dst_prev,
3098 218 : Some(src_id),
3099 218 : start_lsn,
3100 218 : *src_timeline.latest_gc_cutoff_lsn.read(), // FIXME: should we hold onto this guard longer?
3101 218 : src_timeline.initdb_lsn,
3102 218 : src_timeline.pg_version,
3103 218 : );
3104 :
3105 218 : let uninitialized_timeline = self
3106 218 : .prepare_new_timeline(
3107 218 : dst_id,
3108 218 : &metadata,
3109 218 : timeline_create_guard,
3110 218 : start_lsn + 1,
3111 218 : Some(Arc::clone(src_timeline)),
3112 218 : )
3113 0 : .await?;
3114 :
3115 218 : let new_timeline = uninitialized_timeline.finish_creation()?;
3116 :
3117 : // Root timeline gets its layers during creation and uploads them along with the metadata.
3118 : // A branch timeline though, when created, can get no writes for some time, hence won't get any layers created.
3119 : // We still need to upload its metadata eagerly: if other nodes `attach` the tenant and miss this timeline, their GC
3120 : // could get incorrect information and remove more layers, than needed.
3121 : // See also https://github.com/neondatabase/neon/issues/3865
3122 218 : if let Some(remote_client) = new_timeline.remote_client.as_ref() {
3123 218 : remote_client
3124 218 : .schedule_index_upload_for_full_metadata_update(&metadata)
3125 218 : .context("branch initial metadata upload")?;
3126 0 : }
3127 :
3128 218 : Ok(new_timeline)
3129 222 : }
3130 :
3131 : /// For unit tests, make this visible so that other modules can directly create timelines
3132 : #[cfg(test)]
3133 4 : #[tracing::instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), %timeline_id))]
3134 : pub(crate) async fn bootstrap_timeline_test(
3135 : &self,
3136 : timeline_id: TimelineId,
3137 : pg_version: u32,
3138 : load_existing_initdb: Option<TimelineId>,
3139 : ctx: &RequestContext,
3140 : ) -> anyhow::Result<Arc<Timeline>> {
3141 : let create_guard = self.create_timeline_create_guard(timeline_id).unwrap();
3142 : self.bootstrap_timeline(
3143 : timeline_id,
3144 : pg_version,
3145 : load_existing_initdb,
3146 : create_guard,
3147 : ctx,
3148 : )
3149 : .await
3150 : }
3151 :
3152 0 : async fn upload_initdb(
3153 0 : &self,
3154 0 : timelines_path: &Utf8PathBuf,
3155 0 : pgdata_path: &Utf8PathBuf,
3156 0 : timeline_id: &TimelineId,
3157 0 : ) -> anyhow::Result<()> {
3158 0 : let Some(storage) = &self.remote_storage else {
3159 : // No remote storage? No upload.
3160 0 : return Ok(());
3161 : };
3162 :
3163 0 : let temp_path = timelines_path.join(format!(
3164 0 : "{INITDB_PATH}.upload-{timeline_id}.{TEMP_FILE_SUFFIX}"
3165 0 : ));
3166 :
3167 : scopeguard::defer! {
3168 : if let Err(e) = fs::remove_file(&temp_path) {
3169 : error!("Failed to remove temporary initdb archive '{temp_path}': {e}");
3170 : }
3171 : }
3172 :
3173 0 : let (pgdata_zstd, tar_zst_size) = create_zst_tarball(pgdata_path, &temp_path).await?;
3174 : const INITDB_TAR_ZST_WARN_LIMIT: u64 = 2 * 1024 * 1024;
3175 0 : if tar_zst_size > INITDB_TAR_ZST_WARN_LIMIT {
3176 0 : warn!(
3177 0 : "compressed {temp_path} size of {tar_zst_size} is above limit {INITDB_TAR_ZST_WARN_LIMIT}."
3178 : );
3179 0 : }
3180 :
3181 : pausable_failpoint!("before-initdb-upload");
3182 :
3183 0 : backoff::retry(
3184 0 : || async {
3185 0 : self::remote_timeline_client::upload_initdb_dir(
3186 0 : storage,
3187 0 : &self.tenant_shard_id.tenant_id,
3188 0 : timeline_id,
3189 0 : pgdata_zstd.try_clone().await?,
3190 0 : tar_zst_size,
3191 0 : &self.cancel,
3192 0 : )
3193 0 : .await
3194 0 : },
3195 0 : |_| false,
3196 0 : 3,
3197 0 : u32::MAX,
3198 0 : "persist_initdb_tar_zst",
3199 0 : &self.cancel,
3200 0 : )
3201 0 : .await
3202 0 : .ok_or_else(|| anyhow::Error::new(TimeoutOrCancel::Cancel))
3203 0 : .and_then(|x| x)
3204 0 : }
3205 :
3206 : /// - run initdb to init temporary instance and get bootstrap data
3207 : /// - after initialization completes, tar up the temp dir and upload it to S3.
3208 : ///
3209 : /// The caller is responsible for activating the returned timeline.
3210 2 : async fn bootstrap_timeline(
3211 2 : &self,
3212 2 : timeline_id: TimelineId,
3213 2 : pg_version: u32,
3214 2 : load_existing_initdb: Option<TimelineId>,
3215 2 : timeline_create_guard: TimelineCreateGuard<'_>,
3216 2 : ctx: &RequestContext,
3217 2 : ) -> anyhow::Result<Arc<Timeline>> {
3218 2 : // create a `tenant/{tenant_id}/timelines/basebackup-{timeline_id}.{TEMP_FILE_SUFFIX}/`
3219 2 : // temporary directory for basebackup files for the given timeline.
3220 2 :
3221 2 : let timelines_path = self.conf.timelines_path(&self.tenant_shard_id);
3222 2 : let pgdata_path = path_with_suffix_extension(
3223 2 : timelines_path.join(format!("basebackup-{timeline_id}")),
3224 2 : TEMP_FILE_SUFFIX,
3225 2 : );
3226 2 :
3227 2 : // Remove whatever was left from the previous runs: safe because TimelineCreateGuard guarantees
3228 2 : // we won't race with other creations or existent timelines with the same path.
3229 2 : if pgdata_path.exists() {
3230 0 : fs::remove_dir_all(&pgdata_path).with_context(|| {
3231 0 : format!("Failed to remove already existing initdb directory: {pgdata_path}")
3232 0 : })?;
3233 2 : }
3234 :
3235 : // this new directory is very temporary, set to remove it immediately after bootstrap, we don't need it
3236 : scopeguard::defer! {
3237 : if let Err(e) = fs::remove_dir_all(&pgdata_path) {
3238 : // this is unlikely, but we will remove the directory on pageserver restart or another bootstrap call
3239 : error!("Failed to remove temporary initdb directory '{pgdata_path}': {e}");
3240 : }
3241 : }
3242 2 : if let Some(existing_initdb_timeline_id) = load_existing_initdb {
3243 2 : let Some(storage) = &self.remote_storage else {
3244 0 : bail!("no storage configured but load_existing_initdb set to {existing_initdb_timeline_id}");
3245 : };
3246 2 : if existing_initdb_timeline_id != timeline_id {
3247 0 : let source_path = &remote_initdb_archive_path(
3248 0 : &self.tenant_shard_id.tenant_id,
3249 0 : &existing_initdb_timeline_id,
3250 0 : );
3251 0 : let dest_path =
3252 0 : &remote_initdb_archive_path(&self.tenant_shard_id.tenant_id, &timeline_id);
3253 0 :
3254 0 : // if this fails, it will get retried by retried control plane requests
3255 0 : storage
3256 0 : .copy_object(source_path, dest_path, &self.cancel)
3257 0 : .await
3258 0 : .context("copy initdb tar")?;
3259 2 : }
3260 2 : let (initdb_tar_zst_path, initdb_tar_zst) =
3261 2 : self::remote_timeline_client::download_initdb_tar_zst(
3262 2 : self.conf,
3263 2 : storage,
3264 2 : &self.tenant_shard_id,
3265 2 : &existing_initdb_timeline_id,
3266 2 : &self.cancel,
3267 2 : )
3268 704 : .await
3269 2 : .context("download initdb tar")?;
3270 :
3271 : scopeguard::defer! {
3272 : if let Err(e) = fs::remove_file(&initdb_tar_zst_path) {
3273 : error!("Failed to remove temporary initdb archive '{initdb_tar_zst_path}': {e}");
3274 : }
3275 : }
3276 :
3277 2 : let buf_read =
3278 2 : BufReader::with_capacity(remote_timeline_client::BUFFER_SIZE, initdb_tar_zst);
3279 2 : extract_zst_tarball(&pgdata_path, buf_read)
3280 11331 : .await
3281 2 : .context("extract initdb tar")?;
3282 : } else {
3283 : // Init temporarily repo to get bootstrap data, this creates a directory in the `pgdata_path` path
3284 0 : run_initdb(self.conf, &pgdata_path, pg_version, &self.cancel).await?;
3285 :
3286 : // Upload the created data dir to S3
3287 0 : if self.tenant_shard_id().is_shard_zero() {
3288 0 : self.upload_initdb(&timelines_path, &pgdata_path, &timeline_id)
3289 0 : .await?;
3290 0 : }
3291 : }
3292 2 : let pgdata_lsn = import_datadir::get_lsn_from_controlfile(&pgdata_path)?.align();
3293 2 :
3294 2 : // Import the contents of the data directory at the initial checkpoint
3295 2 : // LSN, and any WAL after that.
3296 2 : // Initdb lsn will be equal to last_record_lsn which will be set after import.
3297 2 : // Because we know it upfront avoid having an option or dummy zero value by passing it to the metadata.
3298 2 : let new_metadata = TimelineMetadata::new(
3299 2 : Lsn(0),
3300 2 : None,
3301 2 : None,
3302 2 : Lsn(0),
3303 2 : pgdata_lsn,
3304 2 : pgdata_lsn,
3305 2 : pg_version,
3306 2 : );
3307 2 : let raw_timeline = self
3308 2 : .prepare_new_timeline(
3309 2 : timeline_id,
3310 2 : &new_metadata,
3311 2 : timeline_create_guard,
3312 2 : pgdata_lsn,
3313 2 : None,
3314 2 : )
3315 0 : .await?;
3316 :
3317 2 : let tenant_shard_id = raw_timeline.owning_tenant.tenant_shard_id;
3318 2 : let unfinished_timeline = raw_timeline.raw_timeline()?;
3319 :
3320 2 : import_datadir::import_timeline_from_postgres_datadir(
3321 2 : unfinished_timeline,
3322 2 : &pgdata_path,
3323 2 : pgdata_lsn,
3324 2 : ctx,
3325 2 : )
3326 9529 : .await
3327 2 : .with_context(|| {
3328 0 : format!("Failed to import pgdatadir for timeline {tenant_shard_id}/{timeline_id}")
3329 2 : })?;
3330 :
3331 : // Flush the new layer files to disk, before we make the timeline as available to
3332 : // the outside world.
3333 : //
3334 : // Flush loop needs to be spawned in order to be able to flush.
3335 2 : unfinished_timeline.maybe_spawn_flush_loop();
3336 2 :
3337 2 : fail::fail_point!("before-checkpoint-new-timeline", |_| {
3338 0 : anyhow::bail!("failpoint before-checkpoint-new-timeline");
3339 2 : });
3340 :
3341 2 : unfinished_timeline
3342 2 : .freeze_and_flush()
3343 2 : .await
3344 2 : .with_context(|| {
3345 0 : format!(
3346 0 : "Failed to flush after pgdatadir import for timeline {tenant_shard_id}/{timeline_id}"
3347 0 : )
3348 2 : })?;
3349 :
3350 : // All done!
3351 2 : let timeline = raw_timeline.finish_creation()?;
3352 :
3353 2 : Ok(timeline)
3354 2 : }
3355 :
3356 : /// Call this before constructing a timeline, to build its required structures
3357 328 : fn build_timeline_resources(&self, timeline_id: TimelineId) -> TimelineResources {
3358 328 : let remote_client = if let Some(remote_storage) = self.remote_storage.as_ref() {
3359 328 : let remote_client = RemoteTimelineClient::new(
3360 328 : remote_storage.clone(),
3361 328 : self.deletion_queue_client.clone(),
3362 328 : self.conf,
3363 328 : self.tenant_shard_id,
3364 328 : timeline_id,
3365 328 : self.generation,
3366 328 : );
3367 328 : Some(remote_client)
3368 : } else {
3369 0 : None
3370 : };
3371 :
3372 328 : TimelineResources {
3373 328 : remote_client,
3374 328 : deletion_queue_client: self.deletion_queue_client.clone(),
3375 328 : timeline_get_throttle: self.timeline_get_throttle.clone(),
3376 328 : }
3377 328 : }
3378 :
3379 : /// Creates intermediate timeline structure and its files.
3380 : ///
3381 : /// An empty layer map is initialized, and new data and WAL can be imported starting
3382 : /// at 'disk_consistent_lsn'. After any initial data has been imported, call
3383 : /// `finish_creation` to insert the Timeline into the timelines map.
3384 328 : async fn prepare_new_timeline<'a>(
3385 328 : &'a self,
3386 328 : new_timeline_id: TimelineId,
3387 328 : new_metadata: &TimelineMetadata,
3388 328 : create_guard: TimelineCreateGuard<'a>,
3389 328 : start_lsn: Lsn,
3390 328 : ancestor: Option<Arc<Timeline>>,
3391 328 : ) -> anyhow::Result<UninitializedTimeline> {
3392 328 : let tenant_shard_id = self.tenant_shard_id;
3393 328 :
3394 328 : let resources = self.build_timeline_resources(new_timeline_id);
3395 328 : if let Some(remote_client) = &resources.remote_client {
3396 328 : remote_client.init_upload_queue_for_empty_remote(new_metadata)?;
3397 0 : }
3398 :
3399 328 : let timeline_struct = self
3400 328 : .create_timeline_struct(
3401 328 : new_timeline_id,
3402 328 : new_metadata,
3403 328 : ancestor,
3404 328 : resources,
3405 328 : CreateTimelineCause::Load,
3406 328 : )
3407 328 : .context("Failed to create timeline data structure")?;
3408 :
3409 328 : timeline_struct.init_empty_layer_map(start_lsn);
3410 :
3411 328 : if let Err(e) = self
3412 328 : .create_timeline_files(&create_guard.timeline_path)
3413 0 : .await
3414 : {
3415 0 : error!("Failed to create initial files for timeline {tenant_shard_id}/{new_timeline_id}, cleaning up: {e:?}");
3416 0 : cleanup_timeline_directory(create_guard);
3417 0 : return Err(e);
3418 328 : }
3419 328 :
3420 328 : debug!(
3421 0 : "Successfully created initial files for timeline {tenant_shard_id}/{new_timeline_id}"
3422 : );
3423 :
3424 328 : Ok(UninitializedTimeline::new(
3425 328 : self,
3426 328 : new_timeline_id,
3427 328 : Some((timeline_struct, create_guard)),
3428 328 : ))
3429 328 : }
3430 :
3431 328 : async fn create_timeline_files(&self, timeline_path: &Utf8Path) -> anyhow::Result<()> {
3432 328 : crashsafe::create_dir(timeline_path).context("Failed to create timeline directory")?;
3433 :
3434 328 : fail::fail_point!("after-timeline-dir-creation", |_| {
3435 0 : anyhow::bail!("failpoint after-timeline-dir-creation");
3436 328 : });
3437 :
3438 328 : Ok(())
3439 328 : }
3440 :
3441 : /// Get a guard that provides exclusive access to the timeline directory, preventing
3442 : /// concurrent attempts to create the same timeline.
3443 334 : fn create_timeline_create_guard(
3444 334 : &self,
3445 334 : timeline_id: TimelineId,
3446 334 : ) -> Result<TimelineCreateGuard, TimelineExclusionError> {
3447 334 : let tenant_shard_id = self.tenant_shard_id;
3448 334 :
3449 334 : let timeline_path = self.conf.timeline_path(&tenant_shard_id, &timeline_id);
3450 :
3451 334 : let create_guard = TimelineCreateGuard::new(self, timeline_id, timeline_path.clone())?;
3452 :
3453 : // At this stage, we have got exclusive access to in-memory state for this timeline ID
3454 : // for creation.
3455 : // A timeline directory should never exist on disk already:
3456 : // - a previous failed creation would have cleaned up after itself
3457 : // - a pageserver restart would clean up timeline directories that don't have valid remote state
3458 : //
3459 : // Therefore it is an unexpected internal error to encounter a timeline directory already existing here,
3460 : // this error may indicate a bug in cleanup on failed creations.
3461 332 : if timeline_path.exists() {
3462 0 : return Err(TimelineExclusionError::Other(anyhow::anyhow!(
3463 0 : "Timeline directory already exists! This is a bug."
3464 0 : )));
3465 332 : }
3466 332 :
3467 332 : Ok(create_guard)
3468 334 : }
3469 :
3470 : /// Gathers inputs from all of the timelines to produce a sizing model input.
3471 : ///
3472 : /// Future is cancellation safe. Only one calculation can be running at once per tenant.
3473 0 : #[instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug()))]
3474 : pub async fn gather_size_inputs(
3475 : &self,
3476 : // `max_retention_period` overrides the cutoff that is used to calculate the size
3477 : // (only if it is shorter than the real cutoff).
3478 : max_retention_period: Option<u64>,
3479 : cause: LogicalSizeCalculationCause,
3480 : cancel: &CancellationToken,
3481 : ctx: &RequestContext,
3482 : ) -> anyhow::Result<size::ModelInputs> {
3483 : let logical_sizes_at_once = self
3484 : .conf
3485 : .concurrent_tenant_size_logical_size_queries
3486 : .inner();
3487 :
3488 : // TODO: Having a single mutex block concurrent reads is not great for performance.
3489 : //
3490 : // But the only case where we need to run multiple of these at once is when we
3491 : // request a size for a tenant manually via API, while another background calculation
3492 : // is in progress (which is not a common case).
3493 : //
3494 : // See more for on the issue #2748 condenced out of the initial PR review.
3495 : let mut shared_cache = tokio::select! {
3496 : locked = self.cached_logical_sizes.lock() => locked,
3497 : _ = cancel.cancelled() => anyhow::bail!("cancelled"),
3498 : _ = self.cancel.cancelled() => anyhow::bail!("tenant is shutting down"),
3499 : };
3500 :
3501 : size::gather_inputs(
3502 : self,
3503 : logical_sizes_at_once,
3504 : max_retention_period,
3505 : &mut shared_cache,
3506 : cause,
3507 : cancel,
3508 : ctx,
3509 : )
3510 : .await
3511 : }
3512 :
3513 : /// Calculate synthetic tenant size and cache the result.
3514 : /// This is periodically called by background worker.
3515 : /// result is cached in tenant struct
3516 0 : #[instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug()))]
3517 : pub async fn calculate_synthetic_size(
3518 : &self,
3519 : cause: LogicalSizeCalculationCause,
3520 : cancel: &CancellationToken,
3521 : ctx: &RequestContext,
3522 : ) -> anyhow::Result<u64> {
3523 : let inputs = self.gather_size_inputs(None, cause, cancel, ctx).await?;
3524 :
3525 : let size = inputs.calculate()?;
3526 :
3527 : self.set_cached_synthetic_size(size);
3528 :
3529 : Ok(size)
3530 : }
3531 :
3532 : /// Cache given synthetic size and update the metric value
3533 0 : pub fn set_cached_synthetic_size(&self, size: u64) {
3534 0 : self.cached_synthetic_tenant_size
3535 0 : .store(size, Ordering::Relaxed);
3536 0 :
3537 0 : // Only shard zero should be calculating synthetic sizes
3538 0 : debug_assert!(self.shard_identity.is_shard_zero());
3539 :
3540 0 : TENANT_SYNTHETIC_SIZE_METRIC
3541 0 : .get_metric_with_label_values(&[&self.tenant_shard_id.tenant_id.to_string()])
3542 0 : .unwrap()
3543 0 : .set(size);
3544 0 : }
3545 :
3546 0 : pub fn cached_synthetic_size(&self) -> u64 {
3547 0 : self.cached_synthetic_tenant_size.load(Ordering::Relaxed)
3548 0 : }
3549 :
3550 : /// Flush any in-progress layers, schedule uploads, and wait for uploads to complete.
3551 : ///
3552 : /// This function can take a long time: callers should wrap it in a timeout if calling
3553 : /// from an external API handler.
3554 : ///
3555 : /// Cancel-safety: cancelling this function may leave I/O running, but such I/O is
3556 : /// still bounded by tenant/timeline shutdown.
3557 0 : #[tracing::instrument(skip_all)]
3558 : pub(crate) async fn flush_remote(&self) -> anyhow::Result<()> {
3559 : let timelines = self.timelines.lock().unwrap().clone();
3560 :
3561 0 : async fn flush_timeline(_gate: GateGuard, timeline: Arc<Timeline>) -> anyhow::Result<()> {
3562 0 : tracing::info!(timeline_id=%timeline.timeline_id, "Flushing...");
3563 0 : timeline.freeze_and_flush().await?;
3564 0 : tracing::info!(timeline_id=%timeline.timeline_id, "Waiting for uploads...");
3565 0 : if let Some(client) = &timeline.remote_client {
3566 0 : client.wait_completion().await?;
3567 0 : }
3568 :
3569 0 : Ok(())
3570 0 : }
3571 :
3572 : // We do not use a JoinSet for these tasks, because we don't want them to be
3573 : // aborted when this function's future is cancelled: they should stay alive
3574 : // holding their GateGuard until they complete, to ensure their I/Os complete
3575 : // before Timeline shutdown completes.
3576 : let mut results = FuturesUnordered::new();
3577 :
3578 : for (_timeline_id, timeline) in timelines {
3579 : // Run each timeline's flush in a task holding the timeline's gate: this
3580 : // means that if this function's future is cancelled, the Timeline shutdown
3581 : // will still wait for any I/O in here to complete.
3582 : let Ok(gate) = timeline.gate.enter() else {
3583 : continue;
3584 : };
3585 0 : let jh = tokio::task::spawn(async move { flush_timeline(gate, timeline).await });
3586 : results.push(jh);
3587 : }
3588 :
3589 : while let Some(r) = results.next().await {
3590 : if let Err(e) = r {
3591 : if !e.is_cancelled() && !e.is_panic() {
3592 : tracing::error!("unexpected join error: {e:?}");
3593 : }
3594 : }
3595 : }
3596 :
3597 : // The flushes we did above were just writes, but the Tenant might have had
3598 : // pending deletions as well from recent compaction/gc: we want to flush those
3599 : // as well. This requires flushing the global delete queue. This is cheap
3600 : // because it's typically a no-op.
3601 : match self.deletion_queue_client.flush_execute().await {
3602 : Ok(_) => {}
3603 : Err(DeletionQueueError::ShuttingDown) => {}
3604 : }
3605 :
3606 : Ok(())
3607 : }
3608 :
3609 0 : pub(crate) fn get_tenant_conf(&self) -> TenantConfOpt {
3610 0 : self.tenant_conf.load().tenant_conf.clone()
3611 0 : }
3612 : }
3613 :
3614 : /// Create the cluster temporarily in 'initdbpath' directory inside the repository
3615 : /// to get bootstrap data for timeline initialization.
3616 0 : async fn run_initdb(
3617 0 : conf: &'static PageServerConf,
3618 0 : initdb_target_dir: &Utf8Path,
3619 0 : pg_version: u32,
3620 0 : cancel: &CancellationToken,
3621 0 : ) -> Result<(), InitdbError> {
3622 0 : let initdb_bin_path = conf
3623 0 : .pg_bin_dir(pg_version)
3624 0 : .map_err(InitdbError::Other)?
3625 0 : .join("initdb");
3626 0 : let initdb_lib_dir = conf.pg_lib_dir(pg_version).map_err(InitdbError::Other)?;
3627 0 : info!(
3628 0 : "running {} in {}, libdir: {}",
3629 : initdb_bin_path, initdb_target_dir, initdb_lib_dir,
3630 : );
3631 :
3632 0 : let _permit = INIT_DB_SEMAPHORE.acquire().await;
3633 :
3634 0 : let initdb_command = tokio::process::Command::new(&initdb_bin_path)
3635 0 : .args(["-D", initdb_target_dir.as_ref()])
3636 0 : .args(["-U", &conf.superuser])
3637 0 : .args(["-E", "utf8"])
3638 0 : .arg("--no-instructions")
3639 0 : .arg("--no-sync")
3640 0 : .env_clear()
3641 0 : .env("LD_LIBRARY_PATH", &initdb_lib_dir)
3642 0 : .env("DYLD_LIBRARY_PATH", &initdb_lib_dir)
3643 0 : .stdin(std::process::Stdio::null())
3644 0 : // stdout invocation produces the same output every time, we don't need it
3645 0 : .stdout(std::process::Stdio::null())
3646 0 : // we would be interested in the stderr output, if there was any
3647 0 : .stderr(std::process::Stdio::piped())
3648 0 : .spawn()?;
3649 :
3650 : // Ideally we'd select here with the cancellation token, but the problem is that
3651 : // we can't safely terminate initdb: it launches processes of its own, and killing
3652 : // initdb doesn't kill them. After we return from this function, we want the target
3653 : // directory to be able to be cleaned up.
3654 : // See https://github.com/neondatabase/neon/issues/6385
3655 0 : let initdb_output = initdb_command.wait_with_output().await?;
3656 0 : if !initdb_output.status.success() {
3657 0 : return Err(InitdbError::Failed(
3658 0 : initdb_output.status,
3659 0 : initdb_output.stderr,
3660 0 : ));
3661 0 : }
3662 0 :
3663 0 : // This isn't true cancellation support, see above. Still return an error to
3664 0 : // excercise the cancellation code path.
3665 0 : if cancel.is_cancelled() {
3666 0 : return Err(InitdbError::Cancelled);
3667 0 : }
3668 0 :
3669 0 : Ok(())
3670 0 : }
3671 :
3672 : /// Dump contents of a layer file to stdout.
3673 0 : pub async fn dump_layerfile_from_path(
3674 0 : path: &Utf8Path,
3675 0 : verbose: bool,
3676 0 : ctx: &RequestContext,
3677 0 : ) -> anyhow::Result<()> {
3678 : use std::os::unix::fs::FileExt;
3679 :
3680 : // All layer files start with a two-byte "magic" value, to identify the kind of
3681 : // file.
3682 0 : let file = File::open(path)?;
3683 0 : let mut header_buf = [0u8; 2];
3684 0 : file.read_exact_at(&mut header_buf, 0)?;
3685 :
3686 0 : match u16::from_be_bytes(header_buf) {
3687 : crate::IMAGE_FILE_MAGIC => {
3688 0 : ImageLayer::new_for_path(path, file)?
3689 0 : .dump(verbose, ctx)
3690 0 : .await?
3691 : }
3692 : crate::DELTA_FILE_MAGIC => {
3693 0 : DeltaLayer::new_for_path(path, file)?
3694 0 : .dump(verbose, ctx)
3695 0 : .await?
3696 : }
3697 0 : magic => bail!("unrecognized magic identifier: {:?}", magic),
3698 : }
3699 :
3700 0 : Ok(())
3701 0 : }
3702 :
3703 : #[cfg(test)]
3704 : pub(crate) mod harness {
3705 : use bytes::{Bytes, BytesMut};
3706 : use once_cell::sync::OnceCell;
3707 : use pageserver_api::models::ShardParameters;
3708 : use pageserver_api::shard::ShardIndex;
3709 : use utils::logging;
3710 :
3711 : use crate::deletion_queue::mock::MockDeletionQueue;
3712 : use crate::walredo::apply_neon;
3713 : use crate::{repository::Key, walrecord::NeonWalRecord};
3714 :
3715 : use super::*;
3716 : use hex_literal::hex;
3717 : use utils::id::TenantId;
3718 :
3719 : pub const TIMELINE_ID: TimelineId =
3720 : TimelineId::from_array(hex!("11223344556677881122334455667788"));
3721 : pub const NEW_TIMELINE_ID: TimelineId =
3722 : TimelineId::from_array(hex!("AA223344556677881122334455667788"));
3723 :
3724 : /// Convenience function to create a page image with given string as the only content
3725 5000287 : pub fn test_img(s: &str) -> Bytes {
3726 5000287 : let mut buf = BytesMut::new();
3727 5000287 : buf.extend_from_slice(s.as_bytes());
3728 5000287 : buf.resize(64, 0);
3729 5000287 :
3730 5000287 : buf.freeze()
3731 5000287 : }
3732 :
3733 : impl From<TenantConf> for TenantConfOpt {
3734 118 : fn from(tenant_conf: TenantConf) -> Self {
3735 118 : Self {
3736 118 : checkpoint_distance: Some(tenant_conf.checkpoint_distance),
3737 118 : checkpoint_timeout: Some(tenant_conf.checkpoint_timeout),
3738 118 : compaction_target_size: Some(tenant_conf.compaction_target_size),
3739 118 : compaction_period: Some(tenant_conf.compaction_period),
3740 118 : compaction_threshold: Some(tenant_conf.compaction_threshold),
3741 118 : compaction_algorithm: Some(tenant_conf.compaction_algorithm),
3742 118 : gc_horizon: Some(tenant_conf.gc_horizon),
3743 118 : gc_period: Some(tenant_conf.gc_period),
3744 118 : image_creation_threshold: Some(tenant_conf.image_creation_threshold),
3745 118 : pitr_interval: Some(tenant_conf.pitr_interval),
3746 118 : walreceiver_connect_timeout: Some(tenant_conf.walreceiver_connect_timeout),
3747 118 : lagging_wal_timeout: Some(tenant_conf.lagging_wal_timeout),
3748 118 : max_lsn_wal_lag: Some(tenant_conf.max_lsn_wal_lag),
3749 118 : trace_read_requests: Some(tenant_conf.trace_read_requests),
3750 118 : eviction_policy: Some(tenant_conf.eviction_policy),
3751 118 : min_resident_size_override: tenant_conf.min_resident_size_override,
3752 118 : evictions_low_residence_duration_metric_threshold: Some(
3753 118 : tenant_conf.evictions_low_residence_duration_metric_threshold,
3754 118 : ),
3755 118 : heatmap_period: Some(tenant_conf.heatmap_period),
3756 118 : lazy_slru_download: Some(tenant_conf.lazy_slru_download),
3757 118 : timeline_get_throttle: Some(tenant_conf.timeline_get_throttle),
3758 118 : image_layer_creation_check_threshold: Some(
3759 118 : tenant_conf.image_layer_creation_check_threshold,
3760 118 : ),
3761 118 : switch_aux_file_policy: Some(tenant_conf.switch_aux_file_policy),
3762 118 : }
3763 118 : }
3764 : }
3765 :
3766 : pub struct TenantHarness {
3767 : pub conf: &'static PageServerConf,
3768 : pub tenant_conf: TenantConf,
3769 : pub tenant_shard_id: TenantShardId,
3770 : pub generation: Generation,
3771 : pub shard: ShardIndex,
3772 : pub remote_storage: GenericRemoteStorage,
3773 : pub remote_fs_dir: Utf8PathBuf,
3774 : pub deletion_queue: MockDeletionQueue,
3775 : }
3776 :
3777 : static LOG_HANDLE: OnceCell<()> = OnceCell::new();
3778 :
3779 124 : pub(crate) fn setup_logging() {
3780 124 : LOG_HANDLE.get_or_init(|| {
3781 120 : logging::init(
3782 120 : logging::LogFormat::Test,
3783 120 : // enable it in case the tests exercise code paths that use
3784 120 : // debug_assert_current_span_has_tenant_and_timeline_id
3785 120 : logging::TracingErrorLayerEnablement::EnableWithRustLogFilter,
3786 120 : logging::Output::Stdout,
3787 120 : )
3788 120 : .expect("Failed to init test logging")
3789 124 : });
3790 124 : }
3791 :
3792 : impl TenantHarness {
3793 118 : pub fn create_custom(
3794 118 : test_name: &'static str,
3795 118 : tenant_conf: TenantConf,
3796 118 : ) -> anyhow::Result<Self> {
3797 118 : setup_logging();
3798 118 :
3799 118 : let repo_dir = PageServerConf::test_repo_dir(test_name);
3800 118 : let _ = fs::remove_dir_all(&repo_dir);
3801 118 : fs::create_dir_all(&repo_dir)?;
3802 :
3803 118 : let conf = PageServerConf::dummy_conf(repo_dir);
3804 118 : // Make a static copy of the config. This can never be free'd, but that's
3805 118 : // OK in a test.
3806 118 : let conf: &'static PageServerConf = Box::leak(Box::new(conf));
3807 118 :
3808 118 : let tenant_id = TenantId::generate();
3809 118 : let tenant_shard_id = TenantShardId::unsharded(tenant_id);
3810 118 : fs::create_dir_all(conf.tenant_path(&tenant_shard_id))?;
3811 118 : fs::create_dir_all(conf.timelines_path(&tenant_shard_id))?;
3812 :
3813 : use remote_storage::{RemoteStorageConfig, RemoteStorageKind};
3814 118 : let remote_fs_dir = conf.workdir.join("localfs");
3815 118 : std::fs::create_dir_all(&remote_fs_dir).unwrap();
3816 118 : let config = RemoteStorageConfig {
3817 118 : storage: RemoteStorageKind::LocalFs(remote_fs_dir.clone()),
3818 118 : timeout: RemoteStorageConfig::DEFAULT_TIMEOUT,
3819 118 : };
3820 118 : let remote_storage = GenericRemoteStorage::from_config(&config).unwrap();
3821 118 : let deletion_queue = MockDeletionQueue::new(Some(remote_storage.clone()));
3822 118 :
3823 118 : Ok(Self {
3824 118 : conf,
3825 118 : tenant_conf,
3826 118 : tenant_shard_id,
3827 118 : generation: Generation::new(0xdeadbeef),
3828 118 : shard: ShardIndex::unsharded(),
3829 118 : remote_storage,
3830 118 : remote_fs_dir,
3831 118 : deletion_queue,
3832 118 : })
3833 118 : }
3834 :
3835 116 : pub fn create(test_name: &'static str) -> anyhow::Result<Self> {
3836 116 : // Disable automatic GC and compaction to make the unit tests more deterministic.
3837 116 : // The tests perform them manually if needed.
3838 116 : let tenant_conf = TenantConf {
3839 116 : gc_period: Duration::ZERO,
3840 116 : compaction_period: Duration::ZERO,
3841 116 : ..TenantConf::default()
3842 116 : };
3843 116 :
3844 116 : Self::create_custom(test_name, tenant_conf)
3845 116 : }
3846 :
3847 18 : pub fn span(&self) -> tracing::Span {
3848 18 : info_span!("TenantHarness", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug())
3849 18 : }
3850 :
3851 118 : pub(crate) async fn load(&self) -> (Arc<Tenant>, RequestContext) {
3852 118 : let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error);
3853 118 : (
3854 118 : self.do_try_load(&ctx)
3855 472 : .await
3856 118 : .expect("failed to load test tenant"),
3857 118 : ctx,
3858 118 : )
3859 118 : }
3860 :
3861 236 : #[instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug()))]
3862 : pub(crate) async fn do_try_load(
3863 : &self,
3864 : ctx: &RequestContext,
3865 : ) -> anyhow::Result<Arc<Tenant>> {
3866 : let walredo_mgr = Arc::new(WalRedoManager::from(TestRedoManager));
3867 :
3868 : let tenant = Arc::new(Tenant::new(
3869 : TenantState::Loading,
3870 : self.conf,
3871 : AttachedTenantConf::try_from(LocationConf::attached_single(
3872 : TenantConfOpt::from(self.tenant_conf.clone()),
3873 : self.generation,
3874 : &ShardParameters::default(),
3875 : ))
3876 : .unwrap(),
3877 : // This is a legacy/test code path: sharding isn't supported here.
3878 : ShardIdentity::unsharded(),
3879 : Some(walredo_mgr),
3880 : self.tenant_shard_id,
3881 : Some(self.remote_storage.clone()),
3882 : self.deletion_queue.new_client(),
3883 : ));
3884 :
3885 : let preload = tenant
3886 : .preload(&self.remote_storage, CancellationToken::new())
3887 : .await?;
3888 : tenant.attach(Some(preload), SpawnMode::Eager, ctx).await?;
3889 :
3890 : tenant.state.send_replace(TenantState::Active);
3891 : for timeline in tenant.timelines.lock().unwrap().values() {
3892 : timeline.set_state(TimelineState::Active);
3893 : }
3894 : Ok(tenant)
3895 : }
3896 :
3897 2 : pub fn timeline_path(&self, timeline_id: &TimelineId) -> Utf8PathBuf {
3898 2 : self.conf.timeline_path(&self.tenant_shard_id, timeline_id)
3899 2 : }
3900 : }
3901 :
3902 : // Mock WAL redo manager that doesn't do much
3903 : pub(crate) struct TestRedoManager;
3904 :
3905 : impl TestRedoManager {
3906 : /// # Cancel-Safety
3907 : ///
3908 : /// This method is cancellation-safe.
3909 6 : pub async fn request_redo(
3910 6 : &self,
3911 6 : key: Key,
3912 6 : lsn: Lsn,
3913 6 : base_img: Option<(Lsn, Bytes)>,
3914 6 : records: Vec<(Lsn, NeonWalRecord)>,
3915 6 : _pg_version: u32,
3916 6 : ) -> anyhow::Result<Bytes> {
3917 10 : let records_neon = records.iter().all(|r| apply_neon::can_apply_in_neon(&r.1));
3918 6 : if records_neon {
3919 : // For Neon wal records, we can decode without spawning postgres, so do so.
3920 6 : let base_img = base_img.expect("Neon WAL redo requires base image").1;
3921 6 : let mut page = BytesMut::new();
3922 6 : page.extend_from_slice(&base_img);
3923 16 : for (_record_lsn, record) in records {
3924 10 : apply_neon::apply_in_neon(&record, key, &mut page)?;
3925 : }
3926 6 : Ok(page.freeze())
3927 : } else {
3928 : // We never spawn a postgres walredo process in unit tests: just log what we might have done.
3929 0 : let s = format!(
3930 0 : "redo for {} to get to {}, with {} and {} records",
3931 0 : key,
3932 0 : lsn,
3933 0 : if base_img.is_some() {
3934 0 : "base image"
3935 : } else {
3936 0 : "no base image"
3937 : },
3938 0 : records.len()
3939 0 : );
3940 0 : println!("{s}");
3941 0 :
3942 0 : Ok(test_img(&s))
3943 : }
3944 6 : }
3945 : }
3946 : }
3947 :
3948 : #[cfg(test)]
3949 : mod tests {
3950 : use std::collections::BTreeMap;
3951 :
3952 : use super::*;
3953 : use crate::keyspace::KeySpaceAccum;
3954 : use crate::repository::{Key, Value};
3955 : use crate::tenant::harness::*;
3956 : use crate::tenant::timeline::CompactFlags;
3957 : use crate::DEFAULT_PG_VERSION;
3958 : use bytes::BytesMut;
3959 : use hex_literal::hex;
3960 : use pageserver_api::key::{AUX_KEY_PREFIX, NON_INHERITED_RANGE};
3961 : use pageserver_api::keyspace::KeySpace;
3962 : use pageserver_api::models::CompactionAlgorithm;
3963 : use rand::{thread_rng, Rng};
3964 : use tests::storage_layer::ValuesReconstructState;
3965 : use tests::timeline::{GetVectoredError, ShutdownMode};
3966 :
3967 : static TEST_KEY: Lazy<Key> =
3968 18 : Lazy::new(|| Key::from_slice(&hex!("010000000033333333444444445500000001")));
3969 :
3970 : #[tokio::test]
3971 2 : async fn test_basic() -> anyhow::Result<()> {
3972 8 : let (tenant, ctx) = TenantHarness::create("test_basic")?.load().await;
3973 2 : let tline = tenant
3974 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx)
3975 5 : .await?;
3976 2 :
3977 2 : let mut writer = tline.writer().await;
3978 2 : writer
3979 2 : .put(
3980 2 : *TEST_KEY,
3981 2 : Lsn(0x10),
3982 2 : &Value::Image(test_img("foo at 0x10")),
3983 2 : &ctx,
3984 2 : )
3985 2 : .await?;
3986 2 : writer.finish_write(Lsn(0x10));
3987 2 : drop(writer);
3988 2 :
3989 2 : let mut writer = tline.writer().await;
3990 2 : writer
3991 2 : .put(
3992 2 : *TEST_KEY,
3993 2 : Lsn(0x20),
3994 2 : &Value::Image(test_img("foo at 0x20")),
3995 2 : &ctx,
3996 2 : )
3997 2 : .await?;
3998 2 : writer.finish_write(Lsn(0x20));
3999 2 : drop(writer);
4000 2 :
4001 2 : assert_eq!(
4002 2 : tline.get(*TEST_KEY, Lsn(0x10), &ctx).await?,
4003 2 : test_img("foo at 0x10")
4004 2 : );
4005 2 : assert_eq!(
4006 2 : tline.get(*TEST_KEY, Lsn(0x1f), &ctx).await?,
4007 2 : test_img("foo at 0x10")
4008 2 : );
4009 2 : assert_eq!(
4010 2 : tline.get(*TEST_KEY, Lsn(0x20), &ctx).await?,
4011 2 : test_img("foo at 0x20")
4012 2 : );
4013 2 :
4014 2 : Ok(())
4015 2 : }
4016 :
4017 : #[tokio::test]
4018 2 : async fn no_duplicate_timelines() -> anyhow::Result<()> {
4019 2 : let (tenant, ctx) = TenantHarness::create("no_duplicate_timelines")?
4020 2 : .load()
4021 8 : .await;
4022 2 : let _ = tenant
4023 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4024 6 : .await?;
4025 2 :
4026 2 : match tenant
4027 2 : .create_empty_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4028 2 : .await
4029 2 : {
4030 2 : Ok(_) => panic!("duplicate timeline creation should fail"),
4031 2 : Err(e) => assert_eq!(e.to_string(), "Already exists".to_string()),
4032 2 : }
4033 2 :
4034 2 : Ok(())
4035 2 : }
4036 :
4037 : /// Convenience function to create a page image with given string as the only content
4038 10 : pub fn test_value(s: &str) -> Value {
4039 10 : let mut buf = BytesMut::new();
4040 10 : buf.extend_from_slice(s.as_bytes());
4041 10 : Value::Image(buf.freeze())
4042 10 : }
4043 :
4044 : ///
4045 : /// Test branch creation
4046 : ///
4047 : #[tokio::test]
4048 2 : async fn test_branch() -> anyhow::Result<()> {
4049 2 : use std::str::from_utf8;
4050 2 :
4051 8 : let (tenant, ctx) = TenantHarness::create("test_branch")?.load().await;
4052 2 : let tline = tenant
4053 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4054 6 : .await?;
4055 2 : let mut writer = tline.writer().await;
4056 2 :
4057 2 : #[allow(non_snake_case)]
4058 2 : let TEST_KEY_A: Key = Key::from_hex("110000000033333333444444445500000001").unwrap();
4059 2 : #[allow(non_snake_case)]
4060 2 : let TEST_KEY_B: Key = Key::from_hex("110000000033333333444444445500000002").unwrap();
4061 2 :
4062 2 : // Insert a value on the timeline
4063 2 : writer
4064 2 : .put(TEST_KEY_A, Lsn(0x20), &test_value("foo at 0x20"), &ctx)
4065 2 : .await?;
4066 2 : writer
4067 2 : .put(TEST_KEY_B, Lsn(0x20), &test_value("foobar at 0x20"), &ctx)
4068 2 : .await?;
4069 2 : writer.finish_write(Lsn(0x20));
4070 2 :
4071 2 : writer
4072 2 : .put(TEST_KEY_A, Lsn(0x30), &test_value("foo at 0x30"), &ctx)
4073 2 : .await?;
4074 2 : writer.finish_write(Lsn(0x30));
4075 2 : writer
4076 2 : .put(TEST_KEY_A, Lsn(0x40), &test_value("foo at 0x40"), &ctx)
4077 2 : .await?;
4078 2 : writer.finish_write(Lsn(0x40));
4079 2 :
4080 2 : //assert_current_logical_size(&tline, Lsn(0x40));
4081 2 :
4082 2 : // Branch the history, modify relation differently on the new timeline
4083 2 : tenant
4084 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x30)), &ctx)
4085 2 : .await?;
4086 2 : let newtline = tenant
4087 2 : .get_timeline(NEW_TIMELINE_ID, true)
4088 2 : .expect("Should have a local timeline");
4089 2 : let mut new_writer = newtline.writer().await;
4090 2 : new_writer
4091 2 : .put(TEST_KEY_A, Lsn(0x40), &test_value("bar at 0x40"), &ctx)
4092 2 : .await?;
4093 2 : new_writer.finish_write(Lsn(0x40));
4094 2 :
4095 2 : // Check page contents on both branches
4096 2 : assert_eq!(
4097 2 : from_utf8(&tline.get(TEST_KEY_A, Lsn(0x40), &ctx).await?)?,
4098 2 : "foo at 0x40"
4099 2 : );
4100 2 : assert_eq!(
4101 2 : from_utf8(&newtline.get(TEST_KEY_A, Lsn(0x40), &ctx).await?)?,
4102 2 : "bar at 0x40"
4103 2 : );
4104 2 : assert_eq!(
4105 2 : from_utf8(&newtline.get(TEST_KEY_B, Lsn(0x40), &ctx).await?)?,
4106 2 : "foobar at 0x20"
4107 2 : );
4108 2 :
4109 2 : //assert_current_logical_size(&tline, Lsn(0x40));
4110 2 :
4111 2 : Ok(())
4112 2 : }
4113 :
4114 20 : async fn make_some_layers(
4115 20 : tline: &Timeline,
4116 20 : start_lsn: Lsn,
4117 20 : ctx: &RequestContext,
4118 20 : ) -> anyhow::Result<()> {
4119 20 : let mut lsn = start_lsn;
4120 : {
4121 20 : let mut writer = tline.writer().await;
4122 : // Create a relation on the timeline
4123 20 : writer
4124 20 : .put(
4125 20 : *TEST_KEY,
4126 20 : lsn,
4127 20 : &Value::Image(test_img(&format!("foo at {}", lsn))),
4128 20 : ctx,
4129 20 : )
4130 10 : .await?;
4131 20 : writer.finish_write(lsn);
4132 20 : lsn += 0x10;
4133 20 : writer
4134 20 : .put(
4135 20 : *TEST_KEY,
4136 20 : lsn,
4137 20 : &Value::Image(test_img(&format!("foo at {}", lsn))),
4138 20 : ctx,
4139 20 : )
4140 0 : .await?;
4141 20 : writer.finish_write(lsn);
4142 20 : lsn += 0x10;
4143 20 : }
4144 20 : tline.freeze_and_flush().await?;
4145 : {
4146 20 : let mut writer = tline.writer().await;
4147 20 : writer
4148 20 : .put(
4149 20 : *TEST_KEY,
4150 20 : lsn,
4151 20 : &Value::Image(test_img(&format!("foo at {}", lsn))),
4152 20 : ctx,
4153 20 : )
4154 10 : .await?;
4155 20 : writer.finish_write(lsn);
4156 20 : lsn += 0x10;
4157 20 : writer
4158 20 : .put(
4159 20 : *TEST_KEY,
4160 20 : lsn,
4161 20 : &Value::Image(test_img(&format!("foo at {}", lsn))),
4162 20 : ctx,
4163 20 : )
4164 0 : .await?;
4165 20 : writer.finish_write(lsn);
4166 20 : }
4167 20 : tline.freeze_and_flush().await
4168 20 : }
4169 :
4170 : #[tokio::test]
4171 2 : async fn test_prohibit_branch_creation_on_garbage_collected_data() -> anyhow::Result<()> {
4172 2 : let (tenant, ctx) =
4173 2 : TenantHarness::create("test_prohibit_branch_creation_on_garbage_collected_data")?
4174 2 : .load()
4175 8 : .await;
4176 2 : let tline = tenant
4177 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4178 6 : .await?;
4179 6 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
4180 2 :
4181 2 : // this removes layers before lsn 40 (50 minus 10), so there are two remaining layers, image and delta for 31-50
4182 2 : // FIXME: this doesn't actually remove any layer currently, given how the flushing
4183 2 : // and compaction works. But it does set the 'cutoff' point so that the cross check
4184 2 : // below should fail.
4185 2 : tenant
4186 2 : .gc_iteration(
4187 2 : Some(TIMELINE_ID),
4188 2 : 0x10,
4189 2 : Duration::ZERO,
4190 2 : &CancellationToken::new(),
4191 2 : &ctx,
4192 2 : )
4193 2 : .await?;
4194 2 :
4195 2 : // try to branch at lsn 25, should fail because we already garbage collected the data
4196 2 : match tenant
4197 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x25)), &ctx)
4198 2 : .await
4199 2 : {
4200 2 : Ok(_) => panic!("branching should have failed"),
4201 2 : Err(err) => {
4202 2 : let CreateTimelineError::AncestorLsn(err) = err else {
4203 2 : panic!("wrong error type")
4204 2 : };
4205 2 : assert!(err.to_string().contains("invalid branch start lsn"));
4206 2 : assert!(err
4207 2 : .source()
4208 2 : .unwrap()
4209 2 : .to_string()
4210 2 : .contains("we might've already garbage collected needed data"))
4211 2 : }
4212 2 : }
4213 2 :
4214 2 : Ok(())
4215 2 : }
4216 :
4217 : #[tokio::test]
4218 2 : async fn test_prohibit_branch_creation_on_pre_initdb_lsn() -> anyhow::Result<()> {
4219 2 : let (tenant, ctx) =
4220 2 : TenantHarness::create("test_prohibit_branch_creation_on_pre_initdb_lsn")?
4221 2 : .load()
4222 8 : .await;
4223 2 :
4224 2 : let tline = tenant
4225 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x50), DEFAULT_PG_VERSION, &ctx)
4226 6 : .await?;
4227 2 : // try to branch at lsn 0x25, should fail because initdb lsn is 0x50
4228 2 : match tenant
4229 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x25)), &ctx)
4230 2 : .await
4231 2 : {
4232 2 : Ok(_) => panic!("branching should have failed"),
4233 2 : Err(err) => {
4234 2 : let CreateTimelineError::AncestorLsn(err) = err else {
4235 2 : panic!("wrong error type");
4236 2 : };
4237 2 : assert!(&err.to_string().contains("invalid branch start lsn"));
4238 2 : assert!(&err
4239 2 : .source()
4240 2 : .unwrap()
4241 2 : .to_string()
4242 2 : .contains("is earlier than latest GC horizon"));
4243 2 : }
4244 2 : }
4245 2 :
4246 2 : Ok(())
4247 2 : }
4248 :
4249 : /*
4250 : // FIXME: This currently fails to error out. Calling GC doesn't currently
4251 : // remove the old value, we'd need to work a little harder
4252 : #[tokio::test]
4253 : async fn test_prohibit_get_for_garbage_collected_data() -> anyhow::Result<()> {
4254 : let repo =
4255 : RepoHarness::create("test_prohibit_get_for_garbage_collected_data")?
4256 : .load();
4257 :
4258 : let tline = repo.create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION)?;
4259 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
4260 :
4261 : repo.gc_iteration(Some(TIMELINE_ID), 0x10, Duration::ZERO)?;
4262 : let latest_gc_cutoff_lsn = tline.get_latest_gc_cutoff_lsn();
4263 : assert!(*latest_gc_cutoff_lsn > Lsn(0x25));
4264 : match tline.get(*TEST_KEY, Lsn(0x25)) {
4265 : Ok(_) => panic!("request for page should have failed"),
4266 : Err(err) => assert!(err.to_string().contains("not found at")),
4267 : }
4268 : Ok(())
4269 : }
4270 : */
4271 :
4272 : #[tokio::test]
4273 2 : async fn test_get_branchpoints_from_an_inactive_timeline() -> anyhow::Result<()> {
4274 2 : let (tenant, ctx) =
4275 2 : TenantHarness::create("test_get_branchpoints_from_an_inactive_timeline")?
4276 2 : .load()
4277 8 : .await;
4278 2 : let tline = tenant
4279 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4280 6 : .await?;
4281 6 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
4282 2 :
4283 2 : tenant
4284 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x40)), &ctx)
4285 2 : .await?;
4286 2 : let newtline = tenant
4287 2 : .get_timeline(NEW_TIMELINE_ID, true)
4288 2 : .expect("Should have a local timeline");
4289 2 :
4290 6 : make_some_layers(newtline.as_ref(), Lsn(0x60), &ctx).await?;
4291 2 :
4292 2 : tline.set_broken("test".to_owned());
4293 2 :
4294 2 : tenant
4295 2 : .gc_iteration(
4296 2 : Some(TIMELINE_ID),
4297 2 : 0x10,
4298 2 : Duration::ZERO,
4299 2 : &CancellationToken::new(),
4300 2 : &ctx,
4301 2 : )
4302 2 : .await?;
4303 2 :
4304 2 : // The branchpoints should contain all timelines, even ones marked
4305 2 : // as Broken.
4306 2 : {
4307 2 : let branchpoints = &tline.gc_info.read().unwrap().retain_lsns;
4308 2 : assert_eq!(branchpoints.len(), 1);
4309 2 : assert_eq!(branchpoints[0], Lsn(0x40));
4310 2 : }
4311 2 :
4312 2 : // You can read the key from the child branch even though the parent is
4313 2 : // Broken, as long as you don't need to access data from the parent.
4314 2 : assert_eq!(
4315 4 : newtline.get(*TEST_KEY, Lsn(0x70), &ctx).await?,
4316 2 : test_img(&format!("foo at {}", Lsn(0x70)))
4317 2 : );
4318 2 :
4319 2 : // This needs to traverse to the parent, and fails.
4320 2 : let err = newtline.get(*TEST_KEY, Lsn(0x50), &ctx).await.unwrap_err();
4321 2 : assert!(err
4322 2 : .to_string()
4323 2 : .contains("will not become active. Current state: Broken"));
4324 2 :
4325 2 : Ok(())
4326 2 : }
4327 :
4328 : #[tokio::test]
4329 2 : async fn test_retain_data_in_parent_which_is_needed_for_child() -> anyhow::Result<()> {
4330 2 : let (tenant, ctx) =
4331 2 : TenantHarness::create("test_retain_data_in_parent_which_is_needed_for_child")?
4332 2 : .load()
4333 8 : .await;
4334 2 : let tline = tenant
4335 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4336 6 : .await?;
4337 6 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
4338 2 :
4339 2 : tenant
4340 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x40)), &ctx)
4341 2 : .await?;
4342 2 : let newtline = tenant
4343 2 : .get_timeline(NEW_TIMELINE_ID, true)
4344 2 : .expect("Should have a local timeline");
4345 2 : // this removes layers before lsn 40 (50 minus 10), so there are two remaining layers, image and delta for 31-50
4346 2 : tenant
4347 2 : .gc_iteration(
4348 2 : Some(TIMELINE_ID),
4349 2 : 0x10,
4350 2 : Duration::ZERO,
4351 2 : &CancellationToken::new(),
4352 2 : &ctx,
4353 2 : )
4354 2 : .await?;
4355 4 : assert!(newtline.get(*TEST_KEY, Lsn(0x25), &ctx).await.is_ok());
4356 2 :
4357 2 : Ok(())
4358 2 : }
4359 : #[tokio::test]
4360 2 : async fn test_parent_keeps_data_forever_after_branching() -> anyhow::Result<()> {
4361 2 : let (tenant, ctx) =
4362 2 : TenantHarness::create("test_parent_keeps_data_forever_after_branching")?
4363 2 : .load()
4364 8 : .await;
4365 2 : let tline = tenant
4366 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4367 4 : .await?;
4368 6 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
4369 2 :
4370 2 : tenant
4371 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x40)), &ctx)
4372 2 : .await?;
4373 2 : let newtline = tenant
4374 2 : .get_timeline(NEW_TIMELINE_ID, true)
4375 2 : .expect("Should have a local timeline");
4376 2 :
4377 6 : make_some_layers(newtline.as_ref(), Lsn(0x60), &ctx).await?;
4378 2 :
4379 2 : // run gc on parent
4380 2 : tenant
4381 2 : .gc_iteration(
4382 2 : Some(TIMELINE_ID),
4383 2 : 0x10,
4384 2 : Duration::ZERO,
4385 2 : &CancellationToken::new(),
4386 2 : &ctx,
4387 2 : )
4388 2 : .await?;
4389 2 :
4390 2 : // Check that the data is still accessible on the branch.
4391 2 : assert_eq!(
4392 7 : newtline.get(*TEST_KEY, Lsn(0x50), &ctx).await?,
4393 2 : test_img(&format!("foo at {}", Lsn(0x40)))
4394 2 : );
4395 2 :
4396 2 : Ok(())
4397 2 : }
4398 :
4399 : #[tokio::test]
4400 2 : async fn timeline_load() -> anyhow::Result<()> {
4401 2 : const TEST_NAME: &str = "timeline_load";
4402 2 : let harness = TenantHarness::create(TEST_NAME)?;
4403 2 : {
4404 8 : let (tenant, ctx) = harness.load().await;
4405 2 : let tline = tenant
4406 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x7000), DEFAULT_PG_VERSION, &ctx)
4407 6 : .await?;
4408 6 : make_some_layers(tline.as_ref(), Lsn(0x8000), &ctx).await?;
4409 2 : // so that all uploads finish & we can call harness.load() below again
4410 2 : tenant
4411 2 : .shutdown(Default::default(), ShutdownMode::FreezeAndFlush)
4412 2 : .instrument(harness.span())
4413 2 : .await
4414 2 : .ok()
4415 2 : .unwrap();
4416 2 : }
4417 2 :
4418 8 : let (tenant, _ctx) = harness.load().await;
4419 2 : tenant
4420 2 : .get_timeline(TIMELINE_ID, true)
4421 2 : .expect("cannot load timeline");
4422 2 :
4423 2 : Ok(())
4424 2 : }
4425 :
4426 : #[tokio::test]
4427 2 : async fn timeline_load_with_ancestor() -> anyhow::Result<()> {
4428 2 : const TEST_NAME: &str = "timeline_load_with_ancestor";
4429 2 : let harness = TenantHarness::create(TEST_NAME)?;
4430 2 : // create two timelines
4431 2 : {
4432 8 : let (tenant, ctx) = harness.load().await;
4433 2 : let tline = tenant
4434 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4435 5 : .await?;
4436 2 :
4437 6 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
4438 2 :
4439 2 : let child_tline = tenant
4440 2 : .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(Lsn(0x40)), &ctx)
4441 2 : .await?;
4442 2 : child_tline.set_state(TimelineState::Active);
4443 2 :
4444 2 : let newtline = tenant
4445 2 : .get_timeline(NEW_TIMELINE_ID, true)
4446 2 : .expect("Should have a local timeline");
4447 2 :
4448 6 : make_some_layers(newtline.as_ref(), Lsn(0x60), &ctx).await?;
4449 2 :
4450 2 : // so that all uploads finish & we can call harness.load() below again
4451 2 : tenant
4452 2 : .shutdown(Default::default(), ShutdownMode::FreezeAndFlush)
4453 2 : .instrument(harness.span())
4454 3 : .await
4455 2 : .ok()
4456 2 : .unwrap();
4457 2 : }
4458 2 :
4459 2 : // check that both of them are initially unloaded
4460 14 : let (tenant, _ctx) = harness.load().await;
4461 2 :
4462 2 : // check that both, child and ancestor are loaded
4463 2 : let _child_tline = tenant
4464 2 : .get_timeline(NEW_TIMELINE_ID, true)
4465 2 : .expect("cannot get child timeline loaded");
4466 2 :
4467 2 : let _ancestor_tline = tenant
4468 2 : .get_timeline(TIMELINE_ID, true)
4469 2 : .expect("cannot get ancestor timeline loaded");
4470 2 :
4471 2 : Ok(())
4472 2 : }
4473 :
4474 : #[tokio::test]
4475 2 : async fn delta_layer_dumping() -> anyhow::Result<()> {
4476 2 : use storage_layer::AsLayerDesc;
4477 8 : let (tenant, ctx) = TenantHarness::create("test_layer_dumping")?.load().await;
4478 2 : let tline = tenant
4479 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
4480 6 : .await?;
4481 6 : make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?;
4482 2 :
4483 2 : let layer_map = tline.layers.read().await;
4484 2 : let level0_deltas = layer_map
4485 2 : .layer_map()
4486 2 : .get_level0_deltas()?
4487 2 : .into_iter()
4488 4 : .map(|desc| layer_map.get_from_desc(&desc))
4489 2 : .collect::<Vec<_>>();
4490 2 :
4491 2 : assert!(!level0_deltas.is_empty());
4492 2 :
4493 6 : for delta in level0_deltas {
4494 2 : // Ensure we are dumping a delta layer here
4495 4 : assert!(delta.layer_desc().is_delta);
4496 8 : delta.dump(true, &ctx).await.unwrap();
4497 2 : }
4498 2 :
4499 2 : Ok(())
4500 2 : }
4501 :
4502 : #[tokio::test]
4503 2 : async fn test_images() -> anyhow::Result<()> {
4504 8 : let (tenant, ctx) = TenantHarness::create("test_images")?.load().await;
4505 2 : let tline = tenant
4506 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx)
4507 6 : .await?;
4508 2 :
4509 2 : let mut writer = tline.writer().await;
4510 2 : writer
4511 2 : .put(
4512 2 : *TEST_KEY,
4513 2 : Lsn(0x10),
4514 2 : &Value::Image(test_img("foo at 0x10")),
4515 2 : &ctx,
4516 2 : )
4517 2 : .await?;
4518 2 : writer.finish_write(Lsn(0x10));
4519 2 : drop(writer);
4520 2 :
4521 2 : tline.freeze_and_flush().await?;
4522 2 : tline
4523 2 : .compact(&CancellationToken::new(), EnumSet::empty(), &ctx)
4524 2 : .await?;
4525 2 :
4526 2 : let mut writer = tline.writer().await;
4527 2 : writer
4528 2 : .put(
4529 2 : *TEST_KEY,
4530 2 : Lsn(0x20),
4531 2 : &Value::Image(test_img("foo at 0x20")),
4532 2 : &ctx,
4533 2 : )
4534 2 : .await?;
4535 2 : writer.finish_write(Lsn(0x20));
4536 2 : drop(writer);
4537 2 :
4538 2 : tline.freeze_and_flush().await?;
4539 2 : tline
4540 2 : .compact(&CancellationToken::new(), EnumSet::empty(), &ctx)
4541 2 : .await?;
4542 2 :
4543 2 : let mut writer = tline.writer().await;
4544 2 : writer
4545 2 : .put(
4546 2 : *TEST_KEY,
4547 2 : Lsn(0x30),
4548 2 : &Value::Image(test_img("foo at 0x30")),
4549 2 : &ctx,
4550 2 : )
4551 2 : .await?;
4552 2 : writer.finish_write(Lsn(0x30));
4553 2 : drop(writer);
4554 2 :
4555 2 : tline.freeze_and_flush().await?;
4556 2 : tline
4557 2 : .compact(&CancellationToken::new(), EnumSet::empty(), &ctx)
4558 2 : .await?;
4559 2 :
4560 2 : let mut writer = tline.writer().await;
4561 2 : writer
4562 2 : .put(
4563 2 : *TEST_KEY,
4564 2 : Lsn(0x40),
4565 2 : &Value::Image(test_img("foo at 0x40")),
4566 2 : &ctx,
4567 2 : )
4568 2 : .await?;
4569 2 : writer.finish_write(Lsn(0x40));
4570 2 : drop(writer);
4571 2 :
4572 2 : tline.freeze_and_flush().await?;
4573 2 : tline
4574 2 : .compact(&CancellationToken::new(), EnumSet::empty(), &ctx)
4575 2 : .await?;
4576 2 :
4577 2 : assert_eq!(
4578 4 : tline.get(*TEST_KEY, Lsn(0x10), &ctx).await?,
4579 2 : test_img("foo at 0x10")
4580 2 : );
4581 2 : assert_eq!(
4582 3 : tline.get(*TEST_KEY, Lsn(0x1f), &ctx).await?,
4583 2 : test_img("foo at 0x10")
4584 2 : );
4585 2 : assert_eq!(
4586 2 : tline.get(*TEST_KEY, Lsn(0x20), &ctx).await?,
4587 2 : test_img("foo at 0x20")
4588 2 : );
4589 2 : assert_eq!(
4590 4 : tline.get(*TEST_KEY, Lsn(0x30), &ctx).await?,
4591 2 : test_img("foo at 0x30")
4592 2 : );
4593 2 : assert_eq!(
4594 4 : tline.get(*TEST_KEY, Lsn(0x40), &ctx).await?,
4595 2 : test_img("foo at 0x40")
4596 2 : );
4597 2 :
4598 2 : Ok(())
4599 2 : }
4600 :
4601 4 : async fn bulk_insert_compact_gc(
4602 4 : tenant: &Tenant,
4603 4 : timeline: &Arc<Timeline>,
4604 4 : ctx: &RequestContext,
4605 4 : lsn: Lsn,
4606 4 : repeat: usize,
4607 4 : key_count: usize,
4608 4 : ) -> anyhow::Result<()> {
4609 4 : let compact = true;
4610 103574 : bulk_insert_maybe_compact_gc(tenant, timeline, ctx, lsn, repeat, key_count, compact).await
4611 4 : }
4612 :
4613 8 : async fn bulk_insert_maybe_compact_gc(
4614 8 : tenant: &Tenant,
4615 8 : timeline: &Arc<Timeline>,
4616 8 : ctx: &RequestContext,
4617 8 : mut lsn: Lsn,
4618 8 : repeat: usize,
4619 8 : key_count: usize,
4620 8 : compact: bool,
4621 8 : ) -> anyhow::Result<()> {
4622 8 : let mut test_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
4623 8 : let mut blknum = 0;
4624 8 :
4625 8 : // Enforce that key range is monotonously increasing
4626 8 : let mut keyspace = KeySpaceAccum::new();
4627 8 :
4628 8 : let cancel = CancellationToken::new();
4629 8 :
4630 8 : for _ in 0..repeat {
4631 400 : for _ in 0..key_count {
4632 4000000 : test_key.field6 = blknum;
4633 4000000 : let mut writer = timeline.writer().await;
4634 4000000 : writer
4635 4000000 : .put(
4636 4000000 : test_key,
4637 4000000 : lsn,
4638 4000000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
4639 4000000 : ctx,
4640 4000000 : )
4641 38400 : .await?;
4642 4000000 : writer.finish_write(lsn);
4643 4000000 : drop(writer);
4644 4000000 :
4645 4000000 : keyspace.add_key(test_key);
4646 4000000 :
4647 4000000 : lsn = Lsn(lsn.0 + 0x10);
4648 4000000 : blknum += 1;
4649 : }
4650 :
4651 400 : timeline.freeze_and_flush().await?;
4652 400 : if compact {
4653 : // this requires timeline to be &Arc<Timeline>
4654 40174 : timeline.compact(&cancel, EnumSet::empty(), ctx).await?;
4655 200 : }
4656 :
4657 : // this doesn't really need to use the timeline_id target, but it is closer to what it
4658 : // originally was.
4659 400 : let res = tenant
4660 400 : .gc_iteration(Some(timeline.timeline_id), 0, Duration::ZERO, &cancel, ctx)
4661 400 : .await?;
4662 :
4663 400 : assert_eq!(res.layers_removed, 0, "this never removes anything");
4664 : }
4665 :
4666 8 : Ok(())
4667 8 : }
4668 :
4669 : //
4670 : // Insert 1000 key-value pairs with increasing keys, flush, compact, GC.
4671 : // Repeat 50 times.
4672 : //
4673 : #[tokio::test]
4674 2 : async fn test_bulk_insert() -> anyhow::Result<()> {
4675 2 : let harness = TenantHarness::create("test_bulk_insert")?;
4676 8 : let (tenant, ctx) = harness.load().await;
4677 2 : let tline = tenant
4678 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx)
4679 6 : .await?;
4680 2 :
4681 2 : let lsn = Lsn(0x10);
4682 51787 : bulk_insert_compact_gc(&tenant, &tline, &ctx, lsn, 50, 10000).await?;
4683 2 :
4684 2 : Ok(())
4685 2 : }
4686 :
4687 : // Test the vectored get real implementation against a simple sequential implementation.
4688 : //
4689 : // The test generates a keyspace by repeatedly flushing the in-memory layer and compacting.
4690 : // Projected to 2D the key space looks like below. Lsn grows upwards on the Y axis and keys
4691 : // grow to the right on the X axis.
4692 : // [Delta]
4693 : // [Delta]
4694 : // [Delta]
4695 : // [Delta]
4696 : // ------------ Image ---------------
4697 : //
4698 : // After layer generation we pick the ranges to query as follows:
4699 : // 1. The beginning of each delta layer
4700 : // 2. At the seam between two adjacent delta layers
4701 : //
4702 : // There's one major downside to this test: delta layers only contains images,
4703 : // so the search can stop at the first delta layer and doesn't traverse any deeper.
4704 : #[tokio::test]
4705 2 : async fn test_get_vectored() -> anyhow::Result<()> {
4706 2 : let harness = TenantHarness::create("test_get_vectored")?;
4707 8 : let (tenant, ctx) = harness.load().await;
4708 2 : let tline = tenant
4709 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx)
4710 6 : .await?;
4711 2 :
4712 2 : let lsn = Lsn(0x10);
4713 51787 : bulk_insert_compact_gc(&tenant, &tline, &ctx, lsn, 50, 10000).await?;
4714 2 :
4715 2 : let guard = tline.layers.read().await;
4716 2 : guard.layer_map().dump(true, &ctx).await?;
4717 2 :
4718 2 : let mut reads = Vec::new();
4719 2 : let mut prev = None;
4720 12 : guard.layer_map().iter_historic_layers().for_each(|desc| {
4721 12 : if !desc.is_delta() {
4722 2 : prev = Some(desc.clone());
4723 2 : return;
4724 10 : }
4725 10 :
4726 10 : let start = desc.key_range.start;
4727 10 : let end = desc
4728 10 : .key_range
4729 10 : .start
4730 10 : .add(Timeline::MAX_GET_VECTORED_KEYS.try_into().unwrap());
4731 10 : reads.push(KeySpace {
4732 10 : ranges: vec![start..end],
4733 10 : });
4734 2 :
4735 10 : if let Some(prev) = &prev {
4736 10 : if !prev.is_delta() {
4737 10 : return;
4738 2 : }
4739 0 :
4740 0 : let first_range = Key {
4741 0 : field6: prev.key_range.end.field6 - 4,
4742 0 : ..prev.key_range.end
4743 0 : }..prev.key_range.end;
4744 0 :
4745 0 : let second_range = desc.key_range.start..Key {
4746 0 : field6: desc.key_range.start.field6 + 4,
4747 0 : ..desc.key_range.start
4748 0 : };
4749 0 :
4750 0 : reads.push(KeySpace {
4751 0 : ranges: vec![first_range, second_range],
4752 0 : });
4753 2 : };
4754 2 :
4755 2 : prev = Some(desc.clone());
4756 12 : });
4757 2 :
4758 2 : drop(guard);
4759 2 :
4760 2 : // Pick a big LSN such that we query over all the changes.
4761 2 : let reads_lsn = Lsn(u64::MAX - 1);
4762 2 :
4763 12 : for read in reads {
4764 10 : info!("Doing vectored read on {:?}", read);
4765 2 :
4766 10 : let vectored_res = tline
4767 10 : .get_vectored_impl(read.clone(), reads_lsn, ValuesReconstructState::new(), &ctx)
4768 25 : .await;
4769 10 : tline
4770 10 : .validate_get_vectored_impl(&vectored_res, read, reads_lsn, &ctx)
4771 19 : .await;
4772 2 : }
4773 2 :
4774 2 : Ok(())
4775 2 : }
4776 :
4777 : #[tokio::test]
4778 2 : async fn test_get_vectored_aux_files() -> anyhow::Result<()> {
4779 2 : let harness = TenantHarness::create("test_get_vectored_aux_files")?;
4780 2 :
4781 8 : let (tenant, ctx) = harness.load().await;
4782 2 : let tline = tenant
4783 2 : .create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION, &ctx)
4784 2 : .await?;
4785 2 : let tline = tline.raw_timeline().unwrap();
4786 2 :
4787 2 : let mut modification = tline.begin_modification(Lsn(0x1000));
4788 2 : modification.put_file("foo/bar1", b"content1", &ctx).await?;
4789 2 : modification.set_lsn(Lsn(0x1008))?;
4790 2 : modification.put_file("foo/bar2", b"content2", &ctx).await?;
4791 2 : modification.commit(&ctx).await?;
4792 2 :
4793 2 : let child_timeline_id = TimelineId::generate();
4794 2 : tenant
4795 2 : .branch_timeline_test(
4796 2 : tline,
4797 2 : child_timeline_id,
4798 2 : Some(tline.get_last_record_lsn()),
4799 2 : &ctx,
4800 2 : )
4801 2 : .await?;
4802 2 :
4803 2 : let child_timeline = tenant
4804 2 : .get_timeline(child_timeline_id, true)
4805 2 : .expect("Should have the branched timeline");
4806 2 :
4807 2 : let aux_keyspace = KeySpace {
4808 2 : ranges: vec![NON_INHERITED_RANGE],
4809 2 : };
4810 2 : let read_lsn = child_timeline.get_last_record_lsn();
4811 2 :
4812 2 : let vectored_res = child_timeline
4813 2 : .get_vectored_impl(
4814 2 : aux_keyspace.clone(),
4815 2 : read_lsn,
4816 2 : ValuesReconstructState::new(),
4817 2 : &ctx,
4818 2 : )
4819 2 : .await;
4820 2 :
4821 2 : child_timeline
4822 2 : .validate_get_vectored_impl(&vectored_res, aux_keyspace, read_lsn, &ctx)
4823 2 : .await;
4824 2 :
4825 2 : let images = vectored_res?;
4826 2 : assert!(images.is_empty());
4827 2 : Ok(())
4828 2 : }
4829 :
4830 : // Test that vectored get handles layer gaps correctly
4831 : // by advancing into the next ancestor timeline if required.
4832 : //
4833 : // The test generates timelines that look like the diagram below.
4834 : // We leave a gap in one of the L1 layers at `gap_at_key` (`/` in the diagram).
4835 : // The reconstruct data for that key lies in the ancestor timeline (`X` in the diagram).
4836 : //
4837 : // ```
4838 : //-------------------------------+
4839 : // ... |
4840 : // [ L1 ] |
4841 : // [ / L1 ] | Child Timeline
4842 : // ... |
4843 : // ------------------------------+
4844 : // [ X L1 ] | Parent Timeline
4845 : // ------------------------------+
4846 : // ```
4847 : #[tokio::test]
4848 2 : async fn test_get_vectored_key_gap() -> anyhow::Result<()> {
4849 2 : let tenant_conf = TenantConf {
4850 2 : // Make compaction deterministic
4851 2 : gc_period: Duration::ZERO,
4852 2 : compaction_period: Duration::ZERO,
4853 2 : // Encourage creation of L1 layers
4854 2 : checkpoint_distance: 16 * 1024,
4855 2 : compaction_target_size: 8 * 1024,
4856 2 : ..TenantConf::default()
4857 2 : };
4858 2 :
4859 2 : let harness = TenantHarness::create_custom("test_get_vectored_key_gap", tenant_conf)?;
4860 8 : let (tenant, ctx) = harness.load().await;
4861 2 :
4862 2 : let mut current_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
4863 2 : let gap_at_key = current_key.add(100);
4864 2 : let mut current_lsn = Lsn(0x10);
4865 2 :
4866 2 : const KEY_COUNT: usize = 10_000;
4867 2 :
4868 2 : let timeline_id = TimelineId::generate();
4869 2 : let current_timeline = tenant
4870 2 : .create_test_timeline(timeline_id, current_lsn, DEFAULT_PG_VERSION, &ctx)
4871 6 : .await?;
4872 2 :
4873 2 : current_lsn += 0x100;
4874 2 :
4875 2 : let mut writer = current_timeline.writer().await;
4876 2 : writer
4877 2 : .put(
4878 2 : gap_at_key,
4879 2 : current_lsn,
4880 2 : &Value::Image(test_img(&format!("{} at {}", gap_at_key, current_lsn))),
4881 2 : &ctx,
4882 2 : )
4883 2 : .await?;
4884 2 : writer.finish_write(current_lsn);
4885 2 : drop(writer);
4886 2 :
4887 2 : let mut latest_lsns = HashMap::new();
4888 2 : latest_lsns.insert(gap_at_key, current_lsn);
4889 2 :
4890 2 : current_timeline.freeze_and_flush().await?;
4891 2 :
4892 2 : let child_timeline_id = TimelineId::generate();
4893 2 :
4894 2 : tenant
4895 2 : .branch_timeline_test(
4896 2 : ¤t_timeline,
4897 2 : child_timeline_id,
4898 2 : Some(current_lsn),
4899 2 : &ctx,
4900 2 : )
4901 2 : .await?;
4902 2 : let child_timeline = tenant
4903 2 : .get_timeline(child_timeline_id, true)
4904 2 : .expect("Should have the branched timeline");
4905 2 :
4906 20002 : for i in 0..KEY_COUNT {
4907 20000 : if current_key == gap_at_key {
4908 2 : current_key = current_key.next();
4909 2 : continue;
4910 19998 : }
4911 19998 :
4912 19998 : current_lsn += 0x10;
4913 2 :
4914 19998 : let mut writer = child_timeline.writer().await;
4915 19998 : writer
4916 19998 : .put(
4917 19998 : current_key,
4918 19998 : current_lsn,
4919 19998 : &Value::Image(test_img(&format!("{} at {}", current_key, current_lsn))),
4920 19998 : &ctx,
4921 19998 : )
4922 321 : .await?;
4923 19998 : writer.finish_write(current_lsn);
4924 19998 : drop(writer);
4925 19998 :
4926 19998 : latest_lsns.insert(current_key, current_lsn);
4927 19998 : current_key = current_key.next();
4928 19998 :
4929 19998 : // Flush every now and then to encourage layer file creation.
4930 19998 : if i % 500 == 0 {
4931 40 : child_timeline.freeze_and_flush().await?;
4932 19958 : }
4933 2 : }
4934 2 :
4935 2 : child_timeline.freeze_and_flush().await?;
4936 2 : let mut flags = EnumSet::new();
4937 2 : flags.insert(CompactFlags::ForceRepartition);
4938 2 : child_timeline
4939 2 : .compact(&CancellationToken::new(), flags, &ctx)
4940 1085 : .await?;
4941 2 :
4942 2 : let key_near_end = {
4943 2 : let mut tmp = current_key;
4944 2 : tmp.field6 -= 10;
4945 2 : tmp
4946 2 : };
4947 2 :
4948 2 : let key_near_gap = {
4949 2 : let mut tmp = gap_at_key;
4950 2 : tmp.field6 -= 10;
4951 2 : tmp
4952 2 : };
4953 2 :
4954 2 : let read = KeySpace {
4955 2 : ranges: vec![key_near_gap..gap_at_key.next(), key_near_end..current_key],
4956 2 : };
4957 2 : let results = child_timeline
4958 2 : .get_vectored_impl(
4959 2 : read.clone(),
4960 2 : current_lsn,
4961 2 : ValuesReconstructState::new(),
4962 2 : &ctx,
4963 2 : )
4964 15 : .await?;
4965 2 :
4966 44 : for (key, img_res) in results {
4967 42 : let expected = test_img(&format!("{} at {}", key, latest_lsns[&key]));
4968 42 : assert_eq!(img_res?, expected);
4969 2 : }
4970 2 :
4971 2 : Ok(())
4972 2 : }
4973 :
4974 : // Test that vectored get descends into ancestor timelines correctly and
4975 : // does not return an image that's newer than requested.
4976 : //
4977 : // The diagram below ilustrates an interesting case. We have a parent timeline
4978 : // (top of the Lsn range) and a child timeline. The request key cannot be reconstructed
4979 : // from the child timeline, so the parent timeline must be visited. When advacing into
4980 : // the child timeline, the read path needs to remember what the requested Lsn was in
4981 : // order to avoid returning an image that's too new. The test below constructs such
4982 : // a timeline setup and does a few queries around the Lsn of each page image.
4983 : // ```
4984 : // LSN
4985 : // ^
4986 : // |
4987 : // |
4988 : // 500 | --------------------------------------> branch point
4989 : // 400 | X
4990 : // 300 | X
4991 : // 200 | --------------------------------------> requested lsn
4992 : // 100 | X
4993 : // |---------------------------------------> Key
4994 : // |
4995 : // ------> requested key
4996 : //
4997 : // Legend:
4998 : // * X - page images
4999 : // ```
5000 : #[tokio::test]
5001 2 : async fn test_get_vectored_ancestor_descent() -> anyhow::Result<()> {
5002 2 : let harness = TenantHarness::create("test_get_vectored_on_lsn_axis")?;
5003 8 : let (tenant, ctx) = harness.load().await;
5004 2 :
5005 2 : let start_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
5006 2 : let end_key = start_key.add(1000);
5007 2 : let child_gap_at_key = start_key.add(500);
5008 2 : let mut parent_gap_lsns: BTreeMap<Lsn, String> = BTreeMap::new();
5009 2 :
5010 2 : let mut current_lsn = Lsn(0x10);
5011 2 :
5012 2 : let timeline_id = TimelineId::generate();
5013 2 : let parent_timeline = tenant
5014 2 : .create_test_timeline(timeline_id, current_lsn, DEFAULT_PG_VERSION, &ctx)
5015 5 : .await?;
5016 2 :
5017 2 : current_lsn += 0x100;
5018 2 :
5019 8 : for _ in 0..3 {
5020 6 : let mut key = start_key;
5021 6006 : while key < end_key {
5022 6000 : current_lsn += 0x10;
5023 6000 :
5024 6000 : let image_value = format!("{} at {}", child_gap_at_key, current_lsn);
5025 2 :
5026 6000 : let mut writer = parent_timeline.writer().await;
5027 6000 : writer
5028 6000 : .put(
5029 6000 : key,
5030 6000 : current_lsn,
5031 6000 : &Value::Image(test_img(&image_value)),
5032 6000 : &ctx,
5033 6000 : )
5034 99 : .await?;
5035 6000 : writer.finish_write(current_lsn);
5036 6000 :
5037 6000 : if key == child_gap_at_key {
5038 6 : parent_gap_lsns.insert(current_lsn, image_value);
5039 5994 : }
5040 2 :
5041 6000 : key = key.next();
5042 2 : }
5043 2 :
5044 6 : parent_timeline.freeze_and_flush().await?;
5045 2 : }
5046 2 :
5047 2 : let child_timeline_id = TimelineId::generate();
5048 2 :
5049 2 : let child_timeline = tenant
5050 2 : .branch_timeline_test(&parent_timeline, child_timeline_id, Some(current_lsn), &ctx)
5051 2 : .await?;
5052 2 :
5053 2 : let mut key = start_key;
5054 2002 : while key < end_key {
5055 2000 : if key == child_gap_at_key {
5056 2 : key = key.next();
5057 2 : continue;
5058 1998 : }
5059 1998 :
5060 1998 : current_lsn += 0x10;
5061 2 :
5062 1998 : let mut writer = child_timeline.writer().await;
5063 1998 : writer
5064 1998 : .put(
5065 1998 : key,
5066 1998 : current_lsn,
5067 1998 : &Value::Image(test_img(&format!("{} at {}", key, current_lsn))),
5068 1998 : &ctx,
5069 1998 : )
5070 59 : .await?;
5071 1998 : writer.finish_write(current_lsn);
5072 1998 :
5073 1998 : key = key.next();
5074 2 : }
5075 2 :
5076 2 : child_timeline.freeze_and_flush().await?;
5077 2 :
5078 2 : let lsn_offsets: [i64; 5] = [-10, -1, 0, 1, 10];
5079 2 : let mut query_lsns = Vec::new();
5080 6 : for image_lsn in parent_gap_lsns.keys().rev() {
5081 36 : for offset in lsn_offsets {
5082 30 : query_lsns.push(Lsn(image_lsn
5083 30 : .0
5084 30 : .checked_add_signed(offset)
5085 30 : .expect("Shouldn't overflow")));
5086 30 : }
5087 2 : }
5088 2 :
5089 32 : for query_lsn in query_lsns {
5090 30 : let results = child_timeline
5091 30 : .get_vectored_impl(
5092 30 : KeySpace {
5093 30 : ranges: vec![child_gap_at_key..child_gap_at_key.next()],
5094 30 : },
5095 30 : query_lsn,
5096 30 : ValuesReconstructState::new(),
5097 30 : &ctx,
5098 30 : )
5099 29 : .await;
5100 2 :
5101 30 : let expected_item = parent_gap_lsns
5102 30 : .iter()
5103 30 : .rev()
5104 68 : .find(|(lsn, _)| **lsn <= query_lsn);
5105 30 :
5106 30 : info!(
5107 2 : "Doing vectored read at LSN {}. Expecting image to be: {:?}",
5108 2 : query_lsn, expected_item
5109 2 : );
5110 2 :
5111 30 : match expected_item {
5112 26 : Some((_, img_value)) => {
5113 26 : let key_results = results.expect("No vectored get error expected");
5114 26 : let key_result = &key_results[&child_gap_at_key];
5115 26 : let returned_img = key_result
5116 26 : .as_ref()
5117 26 : .expect("No page reconstruct error expected");
5118 26 :
5119 26 : info!(
5120 2 : "Vectored read at LSN {} returned image {}",
5121 0 : query_lsn,
5122 0 : std::str::from_utf8(returned_img)?
5123 2 : );
5124 26 : assert_eq!(*returned_img, test_img(img_value));
5125 2 : }
5126 2 : None => {
5127 4 : assert!(matches!(results, Err(GetVectoredError::MissingKey(_))));
5128 2 : }
5129 2 : }
5130 2 : }
5131 2 :
5132 2 : Ok(())
5133 2 : }
5134 :
5135 : #[tokio::test]
5136 2 : async fn test_random_updates() -> anyhow::Result<()> {
5137 2 : let names_algorithms = [
5138 2 : ("test_random_updates_legacy", CompactionAlgorithm::Legacy),
5139 2 : ("test_random_updates_tiered", CompactionAlgorithm::Tiered),
5140 2 : ];
5141 6 : for (name, algorithm) in names_algorithms {
5142 22416 : test_random_updates_algorithm(name, algorithm).await?;
5143 2 : }
5144 2 : Ok(())
5145 2 : }
5146 :
5147 4 : async fn test_random_updates_algorithm(
5148 4 : name: &'static str,
5149 4 : compaction_algorithm: CompactionAlgorithm,
5150 4 : ) -> anyhow::Result<()> {
5151 4 : let mut harness = TenantHarness::create(name)?;
5152 4 : harness.tenant_conf.compaction_algorithm = compaction_algorithm;
5153 16 : let (tenant, ctx) = harness.load().await;
5154 4 : let tline = tenant
5155 4 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
5156 11 : .await?;
5157 :
5158 : const NUM_KEYS: usize = 1000;
5159 4 : let cancel = CancellationToken::new();
5160 4 :
5161 4 : let mut test_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
5162 4 :
5163 4 : let mut keyspace = KeySpaceAccum::new();
5164 4 :
5165 4 : // Track when each page was last modified. Used to assert that
5166 4 : // a read sees the latest page version.
5167 4 : let mut updated = [Lsn(0); NUM_KEYS];
5168 4 :
5169 4 : let mut lsn = Lsn(0x10);
5170 : #[allow(clippy::needless_range_loop)]
5171 4004 : for blknum in 0..NUM_KEYS {
5172 4000 : lsn = Lsn(lsn.0 + 0x10);
5173 4000 : test_key.field6 = blknum as u32;
5174 4000 : let mut writer = tline.writer().await;
5175 4000 : writer
5176 4000 : .put(
5177 4000 : test_key,
5178 4000 : lsn,
5179 4000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
5180 4000 : &ctx,
5181 4000 : )
5182 66 : .await?;
5183 4000 : writer.finish_write(lsn);
5184 4000 : updated[blknum] = lsn;
5185 4000 : drop(writer);
5186 4000 :
5187 4000 : keyspace.add_key(test_key);
5188 : }
5189 :
5190 204 : for _ in 0..50 {
5191 200200 : for _ in 0..NUM_KEYS {
5192 200000 : lsn = Lsn(lsn.0 + 0x10);
5193 200000 : let blknum = thread_rng().gen_range(0..NUM_KEYS);
5194 200000 : test_key.field6 = blknum as u32;
5195 200000 : let mut writer = tline.writer().await;
5196 200000 : writer
5197 200000 : .put(
5198 200000 : test_key,
5199 200000 : lsn,
5200 200000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
5201 200000 : &ctx,
5202 200000 : )
5203 3280 : .await?;
5204 200000 : writer.finish_write(lsn);
5205 200000 : drop(writer);
5206 200000 : updated[blknum] = lsn;
5207 : }
5208 :
5209 : // Read all the blocks
5210 200000 : for (blknum, last_lsn) in updated.iter().enumerate() {
5211 200000 : test_key.field6 = blknum as u32;
5212 200000 : assert_eq!(
5213 200000 : tline.get(test_key, lsn, &ctx).await?,
5214 200000 : test_img(&format!("{} at {}", blknum, last_lsn))
5215 : );
5216 : }
5217 :
5218 : // Perform a cycle of flush, and GC
5219 203 : tline.freeze_and_flush().await?;
5220 200 : tenant
5221 200 : .gc_iteration(Some(tline.timeline_id), 0, Duration::ZERO, &cancel, &ctx)
5222 200 : .await?;
5223 : }
5224 :
5225 4 : Ok(())
5226 4 : }
5227 :
5228 : #[tokio::test]
5229 2 : async fn test_traverse_branches() -> anyhow::Result<()> {
5230 2 : let (tenant, ctx) = TenantHarness::create("test_traverse_branches")?
5231 2 : .load()
5232 8 : .await;
5233 2 : let mut tline = tenant
5234 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
5235 6 : .await?;
5236 2 :
5237 2 : const NUM_KEYS: usize = 1000;
5238 2 :
5239 2 : let mut test_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
5240 2 :
5241 2 : let mut keyspace = KeySpaceAccum::new();
5242 2 :
5243 2 : let cancel = CancellationToken::new();
5244 2 :
5245 2 : // Track when each page was last modified. Used to assert that
5246 2 : // a read sees the latest page version.
5247 2 : let mut updated = [Lsn(0); NUM_KEYS];
5248 2 :
5249 2 : let mut lsn = Lsn(0x10);
5250 2 : #[allow(clippy::needless_range_loop)]
5251 2002 : for blknum in 0..NUM_KEYS {
5252 2000 : lsn = Lsn(lsn.0 + 0x10);
5253 2000 : test_key.field6 = blknum as u32;
5254 2000 : let mut writer = tline.writer().await;
5255 2000 : writer
5256 2000 : .put(
5257 2000 : test_key,
5258 2000 : lsn,
5259 2000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
5260 2000 : &ctx,
5261 2000 : )
5262 33 : .await?;
5263 2000 : writer.finish_write(lsn);
5264 2000 : updated[blknum] = lsn;
5265 2000 : drop(writer);
5266 2000 :
5267 2000 : keyspace.add_key(test_key);
5268 2 : }
5269 2 :
5270 102 : for _ in 0..50 {
5271 100 : let new_tline_id = TimelineId::generate();
5272 100 : tenant
5273 100 : .branch_timeline_test(&tline, new_tline_id, Some(lsn), &ctx)
5274 2 : .await?;
5275 100 : tline = tenant
5276 100 : .get_timeline(new_tline_id, true)
5277 100 : .expect("Should have the branched timeline");
5278 2 :
5279 100100 : for _ in 0..NUM_KEYS {
5280 100000 : lsn = Lsn(lsn.0 + 0x10);
5281 100000 : let blknum = thread_rng().gen_range(0..NUM_KEYS);
5282 100000 : test_key.field6 = blknum as u32;
5283 100000 : let mut writer = tline.writer().await;
5284 100000 : writer
5285 100000 : .put(
5286 100000 : test_key,
5287 100000 : lsn,
5288 100000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
5289 100000 : &ctx,
5290 100000 : )
5291 2950 : .await?;
5292 100000 : println!("updating {} at {}", blknum, lsn);
5293 100000 : writer.finish_write(lsn);
5294 100000 : drop(writer);
5295 100000 : updated[blknum] = lsn;
5296 2 : }
5297 2 :
5298 2 : // Read all the blocks
5299 100000 : for (blknum, last_lsn) in updated.iter().enumerate() {
5300 100000 : test_key.field6 = blknum as u32;
5301 100000 : assert_eq!(
5302 100000 : tline.get(test_key, lsn, &ctx).await?,
5303 100000 : test_img(&format!("{} at {}", blknum, last_lsn))
5304 2 : );
5305 2 : }
5306 2 :
5307 2 : // Perform a cycle of flush, compact, and GC
5308 100 : tline.freeze_and_flush().await?;
5309 13180 : tline.compact(&cancel, EnumSet::empty(), &ctx).await?;
5310 100 : tenant
5311 100 : .gc_iteration(Some(tline.timeline_id), 0, Duration::ZERO, &cancel, &ctx)
5312 100 : .await?;
5313 2 : }
5314 2 :
5315 2 : Ok(())
5316 2 : }
5317 :
5318 : #[tokio::test]
5319 2 : async fn test_traverse_ancestors() -> anyhow::Result<()> {
5320 2 : let (tenant, ctx) = TenantHarness::create("test_traverse_ancestors")?
5321 2 : .load()
5322 8 : .await;
5323 2 : let mut tline = tenant
5324 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
5325 6 : .await?;
5326 2 :
5327 2 : const NUM_KEYS: usize = 100;
5328 2 : const NUM_TLINES: usize = 50;
5329 2 :
5330 2 : let mut test_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
5331 2 : // Track page mutation lsns across different timelines.
5332 2 : let mut updated = [[Lsn(0); NUM_KEYS]; NUM_TLINES];
5333 2 :
5334 2 : let mut lsn = Lsn(0x10);
5335 2 :
5336 2 : #[allow(clippy::needless_range_loop)]
5337 102 : for idx in 0..NUM_TLINES {
5338 100 : let new_tline_id = TimelineId::generate();
5339 100 : tenant
5340 100 : .branch_timeline_test(&tline, new_tline_id, Some(lsn), &ctx)
5341 2 : .await?;
5342 100 : tline = tenant
5343 100 : .get_timeline(new_tline_id, true)
5344 100 : .expect("Should have the branched timeline");
5345 2 :
5346 10100 : for _ in 0..NUM_KEYS {
5347 10000 : lsn = Lsn(lsn.0 + 0x10);
5348 10000 : let blknum = thread_rng().gen_range(0..NUM_KEYS);
5349 10000 : test_key.field6 = blknum as u32;
5350 10000 : let mut writer = tline.writer().await;
5351 10000 : writer
5352 10000 : .put(
5353 10000 : test_key,
5354 10000 : lsn,
5355 10000 : &Value::Image(test_img(&format!("{} {} at {}", idx, blknum, lsn))),
5356 10000 : &ctx,
5357 10000 : )
5358 318 : .await?;
5359 10000 : println!("updating [{}][{}] at {}", idx, blknum, lsn);
5360 10000 : writer.finish_write(lsn);
5361 10000 : drop(writer);
5362 10000 : updated[idx][blknum] = lsn;
5363 2 : }
5364 2 : }
5365 2 :
5366 2 : // Read pages from leaf timeline across all ancestors.
5367 100 : for (idx, lsns) in updated.iter().enumerate() {
5368 10000 : for (blknum, lsn) in lsns.iter().enumerate() {
5369 2 : // Skip empty mutations.
5370 10000 : if lsn.0 == 0 {
5371 3679 : continue;
5372 6321 : }
5373 6321 : println!("checking [{idx}][{blknum}] at {lsn}");
5374 6321 : test_key.field6 = blknum as u32;
5375 6321 : assert_eq!(
5376 6321 : tline.get(test_key, *lsn, &ctx).await?,
5377 6321 : test_img(&format!("{idx} {blknum} at {lsn}"))
5378 2 : );
5379 2 : }
5380 2 : }
5381 2 : Ok(())
5382 2 : }
5383 :
5384 : #[tokio::test]
5385 2 : async fn test_write_at_initdb_lsn_takes_optimization_code_path() -> anyhow::Result<()> {
5386 2 : let (tenant, ctx) = TenantHarness::create("test_empty_test_timeline_is_usable")?
5387 2 : .load()
5388 8 : .await;
5389 2 :
5390 2 : let initdb_lsn = Lsn(0x20);
5391 2 : let utline = tenant
5392 2 : .create_empty_timeline(TIMELINE_ID, initdb_lsn, DEFAULT_PG_VERSION, &ctx)
5393 2 : .await?;
5394 2 : let tline = utline.raw_timeline().unwrap();
5395 2 :
5396 2 : // Spawn flush loop now so that we can set the `expect_initdb_optimization`
5397 2 : tline.maybe_spawn_flush_loop();
5398 2 :
5399 2 : // Make sure the timeline has the minimum set of required keys for operation.
5400 2 : // The only operation you can always do on an empty timeline is to `put` new data.
5401 2 : // Except if you `put` at `initdb_lsn`.
5402 2 : // In that case, there's an optimization to directly create image layers instead of delta layers.
5403 2 : // It uses `repartition()`, which assumes some keys to be present.
5404 2 : // Let's make sure the test timeline can handle that case.
5405 2 : {
5406 2 : let mut state = tline.flush_loop_state.lock().unwrap();
5407 2 : assert_eq!(
5408 2 : timeline::FlushLoopState::Running {
5409 2 : expect_initdb_optimization: false,
5410 2 : initdb_optimization_count: 0,
5411 2 : },
5412 2 : *state
5413 2 : );
5414 2 : *state = timeline::FlushLoopState::Running {
5415 2 : expect_initdb_optimization: true,
5416 2 : initdb_optimization_count: 0,
5417 2 : };
5418 2 : }
5419 2 :
5420 2 : // Make writes at the initdb_lsn. When we flush it below, it should be handled by the optimization.
5421 2 : // As explained above, the optimization requires some keys to be present.
5422 2 : // As per `create_empty_timeline` documentation, use init_empty to set them.
5423 2 : // This is what `create_test_timeline` does, by the way.
5424 2 : let mut modification = tline.begin_modification(initdb_lsn);
5425 2 : modification
5426 2 : .init_empty_test_timeline()
5427 2 : .context("init_empty_test_timeline")?;
5428 2 : modification
5429 2 : .commit(&ctx)
5430 2 : .await
5431 2 : .context("commit init_empty_test_timeline modification")?;
5432 2 :
5433 2 : // Do the flush. The flush code will check the expectations that we set above.
5434 2 : tline.freeze_and_flush().await?;
5435 2 :
5436 2 : // assert freeze_and_flush exercised the initdb optimization
5437 2 : {
5438 2 : let state = tline.flush_loop_state.lock().unwrap();
5439 2 : let timeline::FlushLoopState::Running {
5440 2 : expect_initdb_optimization,
5441 2 : initdb_optimization_count,
5442 2 : } = *state
5443 2 : else {
5444 2 : panic!("unexpected state: {:?}", *state);
5445 2 : };
5446 2 : assert!(expect_initdb_optimization);
5447 2 : assert!(initdb_optimization_count > 0);
5448 2 : }
5449 2 : Ok(())
5450 2 : }
5451 :
5452 : #[tokio::test]
5453 2 : async fn test_create_guard_crash() -> anyhow::Result<()> {
5454 2 : let name = "test_create_guard_crash";
5455 2 : let harness = TenantHarness::create(name)?;
5456 2 : {
5457 8 : let (tenant, ctx) = harness.load().await;
5458 2 : let tline = tenant
5459 2 : .create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION, &ctx)
5460 2 : .await?;
5461 2 : // Leave the timeline ID in [`Tenant::timelines_creating`] to exclude attempting to create it again
5462 2 : let raw_tline = tline.raw_timeline().unwrap();
5463 2 : raw_tline
5464 2 : .shutdown(super::timeline::ShutdownMode::Hard)
5465 2 : .instrument(info_span!("test_shutdown", tenant_id=%raw_tline.tenant_shard_id, shard_id=%raw_tline.tenant_shard_id.shard_slug(), timeline_id=%TIMELINE_ID))
5466 2 : .await;
5467 2 : std::mem::forget(tline);
5468 2 : }
5469 2 :
5470 8 : let (tenant, _) = harness.load().await;
5471 2 : match tenant.get_timeline(TIMELINE_ID, false) {
5472 2 : Ok(_) => panic!("timeline should've been removed during load"),
5473 2 : Err(e) => {
5474 2 : assert_eq!(
5475 2 : e,
5476 2 : GetTimelineError::NotFound {
5477 2 : tenant_id: tenant.tenant_shard_id,
5478 2 : timeline_id: TIMELINE_ID,
5479 2 : }
5480 2 : )
5481 2 : }
5482 2 : }
5483 2 :
5484 2 : assert!(!harness
5485 2 : .conf
5486 2 : .timeline_path(&tenant.tenant_shard_id, &TIMELINE_ID)
5487 2 : .exists());
5488 2 :
5489 2 : Ok(())
5490 2 : }
5491 :
5492 : #[tokio::test]
5493 2 : async fn test_read_at_max_lsn() -> anyhow::Result<()> {
5494 2 : let names_algorithms = [
5495 2 : ("test_read_at_max_lsn_legacy", CompactionAlgorithm::Legacy),
5496 2 : ("test_read_at_max_lsn_tiered", CompactionAlgorithm::Tiered),
5497 2 : ];
5498 6 : for (name, algorithm) in names_algorithms {
5499 63734 : test_read_at_max_lsn_algorithm(name, algorithm).await?;
5500 2 : }
5501 2 : Ok(())
5502 2 : }
5503 :
5504 4 : async fn test_read_at_max_lsn_algorithm(
5505 4 : name: &'static str,
5506 4 : compaction_algorithm: CompactionAlgorithm,
5507 4 : ) -> anyhow::Result<()> {
5508 4 : let mut harness = TenantHarness::create(name)?;
5509 4 : harness.tenant_conf.compaction_algorithm = compaction_algorithm;
5510 13 : let (tenant, ctx) = harness.load().await;
5511 4 : let tline = tenant
5512 4 : .create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx)
5513 11 : .await?;
5514 :
5515 4 : let lsn = Lsn(0x10);
5516 4 : let compact = false;
5517 63400 : bulk_insert_maybe_compact_gc(&tenant, &tline, &ctx, lsn, 50, 10000, compact).await?;
5518 :
5519 4 : let test_key = Key::from_hex("010000000033333333444444445500000000").unwrap();
5520 4 : let read_lsn = Lsn(u64::MAX - 1);
5521 :
5522 310 : let result = tline.get(test_key, read_lsn, &ctx).await;
5523 4 : assert!(result.is_ok(), "result is not Ok: {}", result.unwrap_err());
5524 :
5525 4 : Ok(())
5526 4 : }
5527 :
5528 : #[tokio::test]
5529 2 : async fn test_metadata_scan() -> anyhow::Result<()> {
5530 2 : let harness = TenantHarness::create("test_metadata_scan")?;
5531 8 : let (tenant, ctx) = harness.load().await;
5532 2 : let tline = tenant
5533 2 : .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
5534 6 : .await?;
5535 2 :
5536 2 : const NUM_KEYS: usize = 1000;
5537 2 : const STEP: usize = 100; // random update + scan base_key + idx * STEP
5538 2 :
5539 2 : let cancel = CancellationToken::new();
5540 2 :
5541 2 : let mut base_key = Key::from_hex("000000000033333333444444445500000000").unwrap();
5542 2 : base_key.field1 = AUX_KEY_PREFIX;
5543 2 : let mut test_key = base_key;
5544 2 :
5545 2 : // Track when each page was last modified. Used to assert that
5546 2 : // a read sees the latest page version.
5547 2 : let mut updated = [Lsn(0); NUM_KEYS];
5548 2 :
5549 2 : let mut lsn = Lsn(0x10);
5550 2 : #[allow(clippy::needless_range_loop)]
5551 2002 : for blknum in 0..NUM_KEYS {
5552 2000 : lsn = Lsn(lsn.0 + 0x10);
5553 2000 : test_key.field6 = (blknum * STEP) as u32;
5554 2000 : let mut writer = tline.writer().await;
5555 2000 : writer
5556 2000 : .put(
5557 2000 : test_key,
5558 2000 : lsn,
5559 2000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
5560 2000 : &ctx,
5561 2000 : )
5562 33 : .await?;
5563 2000 : writer.finish_write(lsn);
5564 2000 : updated[blknum] = lsn;
5565 2000 : drop(writer);
5566 2 : }
5567 2 :
5568 2 : let keyspace = KeySpace::single(base_key..base_key.add((NUM_KEYS * STEP) as u32));
5569 2 :
5570 22 : for _ in 0..10 {
5571 2 : // Read all the blocks
5572 20000 : for (blknum, last_lsn) in updated.iter().enumerate() {
5573 20000 : test_key.field6 = (blknum * STEP) as u32;
5574 20000 : assert_eq!(
5575 20000 : tline.get(test_key, lsn, &ctx).await?,
5576 20000 : test_img(&format!("{} at {}", blknum, last_lsn))
5577 2 : );
5578 2 : }
5579 2 :
5580 20 : let mut cnt = 0;
5581 20000 : for (key, value) in tline
5582 20 : .get_vectored_impl(
5583 20 : keyspace.clone(),
5584 20 : lsn,
5585 20 : ValuesReconstructState::default(),
5586 20 : &ctx,
5587 20 : )
5588 7344 : .await?
5589 2 : {
5590 20000 : let blknum = key.field6 as usize;
5591 20000 : let value = value?;
5592 20000 : assert!(blknum % STEP == 0);
5593 20000 : let blknum = blknum / STEP;
5594 20000 : assert_eq!(
5595 20000 : value,
5596 20000 : test_img(&format!("{} at {}", blknum, updated[blknum]))
5597 20000 : );
5598 20000 : cnt += 1;
5599 2 : }
5600 2 :
5601 20 : assert_eq!(cnt, NUM_KEYS);
5602 2 :
5603 20020 : for _ in 0..NUM_KEYS {
5604 20000 : lsn = Lsn(lsn.0 + 0x10);
5605 20000 : let blknum = thread_rng().gen_range(0..NUM_KEYS);
5606 20000 : test_key.field6 = (blknum * STEP) as u32;
5607 20000 : let mut writer = tline.writer().await;
5608 20000 : writer
5609 20000 : .put(
5610 20000 : test_key,
5611 20000 : lsn,
5612 20000 : &Value::Image(test_img(&format!("{} at {}", blknum, lsn))),
5613 20000 : &ctx,
5614 20000 : )
5615 553 : .await?;
5616 20000 : writer.finish_write(lsn);
5617 20000 : drop(writer);
5618 20000 : updated[blknum] = lsn;
5619 2 : }
5620 2 :
5621 2 : // Perform a cycle of flush, compact, and GC
5622 21 : tline.freeze_and_flush().await?;
5623 494 : tline.compact(&cancel, EnumSet::empty(), &ctx).await?;
5624 20 : tenant
5625 20 : .gc_iteration(Some(tline.timeline_id), 0, Duration::ZERO, &cancel, &ctx)
5626 20 : .await?;
5627 2 : }
5628 2 :
5629 2 : Ok(())
5630 2 : }
5631 : }
|