Line data Source code
1 : use std::{
2 : collections::{HashMap, HashSet},
3 : pin::Pin,
4 : str::FromStr,
5 : sync::Arc,
6 : time::{Duration, Instant, SystemTime},
7 : };
8 :
9 : use crate::{
10 : config::PageServerConf,
11 : context::RequestContext,
12 : disk_usage_eviction_task::{
13 : finite_f32, DiskUsageEvictionInfo, EvictionCandidate, EvictionLayer, EvictionSecondaryLayer,
14 : },
15 : metrics::SECONDARY_MODE,
16 : tenant::{
17 : config::SecondaryLocationConfig,
18 : debug_assert_current_span_has_tenant_and_timeline_id,
19 : ephemeral_file::is_ephemeral_file,
20 : remote_timeline_client::{
21 : index::LayerFileMetadata, is_temp_download_file, FAILED_DOWNLOAD_WARN_THRESHOLD,
22 : FAILED_REMOTE_OP_RETRIES,
23 : },
24 : span::debug_assert_current_span_has_tenant_id,
25 : storage_layer::{layer::local_layer_path, LayerName},
26 : tasks::{warn_when_period_overrun, BackgroundLoopKind},
27 : },
28 : virtual_file::{on_fatal_io_error, MaybeFatalIo, VirtualFile},
29 : TEMP_FILE_SUFFIX,
30 : };
31 :
32 : use super::{
33 : heatmap::HeatMapLayer,
34 : scheduler::{
35 : self, period_jitter, period_warmup, Completion, JobGenerator, SchedulingResult,
36 : TenantBackgroundJobs,
37 : },
38 : SecondaryTenant,
39 : };
40 :
41 : use crate::tenant::{
42 : mgr::TenantManager,
43 : remote_timeline_client::{download::download_layer_file, remote_heatmap_path},
44 : };
45 :
46 : use camino::Utf8PathBuf;
47 : use chrono::format::{DelayedFormat, StrftimeItems};
48 : use futures::Future;
49 : use pageserver_api::models::SecondaryProgress;
50 : use pageserver_api::shard::TenantShardId;
51 : use remote_storage::{DownloadError, Etag, GenericRemoteStorage};
52 :
53 : use tokio_util::sync::CancellationToken;
54 : use tracing::{info_span, instrument, warn, Instrument};
55 : use utils::{
56 : backoff, completion::Barrier, crashsafe::path_with_suffix_extension, failpoint_support, fs_ext,
57 : id::TimelineId, serde_system_time,
58 : };
59 :
60 : use super::{
61 : heatmap::{HeatMapTenant, HeatMapTimeline},
62 : CommandRequest, DownloadCommand,
63 : };
64 :
65 : /// For each tenant, default period for how long must have passed since the last download_tenant call before
66 : /// calling it again. This default is replaced with the value of [`HeatMapTenant::upload_period_ms`] after first
67 : /// download, if the uploader populated it.
68 : const DEFAULT_DOWNLOAD_INTERVAL: Duration = Duration::from_millis(60000);
69 :
70 0 : pub(super) async fn downloader_task(
71 0 : tenant_manager: Arc<TenantManager>,
72 0 : remote_storage: GenericRemoteStorage,
73 0 : command_queue: tokio::sync::mpsc::Receiver<CommandRequest<DownloadCommand>>,
74 0 : background_jobs_can_start: Barrier,
75 0 : cancel: CancellationToken,
76 0 : root_ctx: RequestContext,
77 0 : ) {
78 0 : let concurrency = tenant_manager.get_conf().secondary_download_concurrency;
79 0 :
80 0 : let generator = SecondaryDownloader {
81 0 : tenant_manager,
82 0 : remote_storage,
83 0 : root_ctx,
84 0 : };
85 0 : let mut scheduler = Scheduler::new(generator, concurrency);
86 0 :
87 0 : scheduler
88 0 : .run(command_queue, background_jobs_can_start, cancel)
89 0 : .instrument(info_span!("secondary_download_scheduler"))
90 0 : .await
91 0 : }
92 :
93 : struct SecondaryDownloader {
94 : tenant_manager: Arc<TenantManager>,
95 : remote_storage: GenericRemoteStorage,
96 : root_ctx: RequestContext,
97 : }
98 :
99 : #[derive(Debug, Clone)]
100 : pub(super) struct OnDiskState {
101 : metadata: LayerFileMetadata,
102 : access_time: SystemTime,
103 : local_path: Utf8PathBuf,
104 : }
105 :
106 : impl OnDiskState {
107 0 : fn new(
108 0 : _conf: &'static PageServerConf,
109 0 : _tenant_shard_id: &TenantShardId,
110 0 : _imeline_id: &TimelineId,
111 0 : _ame: LayerName,
112 0 : metadata: LayerFileMetadata,
113 0 : access_time: SystemTime,
114 0 : local_path: Utf8PathBuf,
115 0 : ) -> Self {
116 0 : Self {
117 0 : metadata,
118 0 : access_time,
119 0 : local_path,
120 0 : }
121 0 : }
122 :
123 : // This is infallible, because all errors are either acceptable (ENOENT), or totally
124 : // unexpected (fatal).
125 0 : pub(super) fn remove_blocking(&self) {
126 0 : // We tolerate ENOENT, because between planning eviction and executing
127 0 : // it, the secondary downloader could have seen an updated heatmap that
128 0 : // resulted in a layer being deleted.
129 0 : // Other local I/O errors are process-fatal: these should never happen.
130 0 : std::fs::remove_file(&self.local_path)
131 0 : .or_else(fs_ext::ignore_not_found)
132 0 : .fatal_err("Deleting secondary layer")
133 0 : }
134 : }
135 :
136 : #[derive(Debug, Clone, Default)]
137 : pub(super) struct SecondaryDetailTimeline {
138 : pub(super) on_disk_layers: HashMap<LayerName, OnDiskState>,
139 :
140 : /// We remember when layers were evicted, to prevent re-downloading them.
141 : pub(super) evicted_at: HashMap<LayerName, SystemTime>,
142 : }
143 :
144 : // Aspects of a heatmap that we remember after downloading it
145 : #[derive(Clone, Debug)]
146 : struct DownloadSummary {
147 : etag: Etag,
148 : #[allow(unused)]
149 : mtime: SystemTime,
150 : upload_period: Duration,
151 : }
152 :
153 : /// This state is written by the secondary downloader, it is opaque
154 : /// to TenantManager
155 : #[derive(Debug)]
156 : pub(super) struct SecondaryDetail {
157 : pub(super) config: SecondaryLocationConfig,
158 :
159 : last_download: Option<DownloadSummary>,
160 : next_download: Option<Instant>,
161 : pub(super) timelines: HashMap<TimelineId, SecondaryDetailTimeline>,
162 : }
163 :
164 : /// Helper for logging SystemTime
165 0 : fn strftime(t: &'_ SystemTime) -> DelayedFormat<StrftimeItems<'_>> {
166 0 : let datetime: chrono::DateTime<chrono::Utc> = (*t).into();
167 0 : datetime.format("%d/%m/%Y %T")
168 0 : }
169 :
170 : /// Information returned from download function when it detects the heatmap has changed
171 : struct HeatMapModified {
172 : etag: Etag,
173 : last_modified: SystemTime,
174 : bytes: Vec<u8>,
175 : }
176 :
177 : enum HeatMapDownload {
178 : // The heatmap's etag has changed: return the new etag, mtime and the body bytes
179 : Modified(HeatMapModified),
180 : // The heatmap's etag is unchanged
181 : Unmodified,
182 : }
183 :
184 : impl SecondaryDetail {
185 0 : pub(super) fn new(config: SecondaryLocationConfig) -> Self {
186 0 : Self {
187 0 : config,
188 0 : last_download: None,
189 0 : next_download: None,
190 0 : timelines: HashMap::new(),
191 0 : }
192 0 : }
193 :
194 : /// Additionally returns the total number of layers, used for more stable relative access time
195 : /// based eviction.
196 0 : pub(super) fn get_layers_for_eviction(
197 0 : &self,
198 0 : parent: &Arc<SecondaryTenant>,
199 0 : ) -> (DiskUsageEvictionInfo, usize) {
200 0 : let mut result = DiskUsageEvictionInfo::default();
201 0 : let mut total_layers = 0;
202 :
203 0 : for (timeline_id, timeline_detail) in &self.timelines {
204 0 : result
205 0 : .resident_layers
206 0 : .extend(timeline_detail.on_disk_layers.iter().map(|(name, ods)| {
207 0 : EvictionCandidate {
208 0 : layer: EvictionLayer::Secondary(EvictionSecondaryLayer {
209 0 : secondary_tenant: parent.clone(),
210 0 : timeline_id: *timeline_id,
211 0 : name: name.clone(),
212 0 : metadata: ods.metadata.clone(),
213 0 : }),
214 0 : last_activity_ts: ods.access_time,
215 0 : relative_last_activity: finite_f32::FiniteF32::ZERO,
216 0 : }
217 0 : }));
218 0 :
219 0 : // total might be missing currently downloading layers, but as a lower than actual
220 0 : // value it is good enough approximation.
221 0 : total_layers += timeline_detail.on_disk_layers.len() + timeline_detail.evicted_at.len();
222 0 : }
223 0 : result.max_layer_size = result
224 0 : .resident_layers
225 0 : .iter()
226 0 : .map(|l| l.layer.get_file_size())
227 0 : .max();
228 0 :
229 0 : tracing::debug!(
230 0 : "eviction: secondary tenant {} found {} timelines, {} layers",
231 0 : parent.get_tenant_shard_id(),
232 0 : self.timelines.len(),
233 0 : result.resident_layers.len()
234 : );
235 :
236 0 : (result, total_layers)
237 0 : }
238 : }
239 :
240 : struct PendingDownload {
241 : secondary_state: Arc<SecondaryTenant>,
242 : last_download: Option<DownloadSummary>,
243 : target_time: Option<Instant>,
244 : }
245 :
246 : impl scheduler::PendingJob for PendingDownload {
247 0 : fn get_tenant_shard_id(&self) -> &TenantShardId {
248 0 : self.secondary_state.get_tenant_shard_id()
249 0 : }
250 : }
251 :
252 : struct RunningDownload {
253 : barrier: Barrier,
254 : }
255 :
256 : impl scheduler::RunningJob for RunningDownload {
257 0 : fn get_barrier(&self) -> Barrier {
258 0 : self.barrier.clone()
259 0 : }
260 : }
261 :
262 : struct CompleteDownload {
263 : secondary_state: Arc<SecondaryTenant>,
264 : completed_at: Instant,
265 : }
266 :
267 : impl scheduler::Completion for CompleteDownload {
268 0 : fn get_tenant_shard_id(&self) -> &TenantShardId {
269 0 : self.secondary_state.get_tenant_shard_id()
270 0 : }
271 : }
272 :
273 : type Scheduler = TenantBackgroundJobs<
274 : SecondaryDownloader,
275 : PendingDownload,
276 : RunningDownload,
277 : CompleteDownload,
278 : DownloadCommand,
279 : >;
280 :
281 : impl JobGenerator<PendingDownload, RunningDownload, CompleteDownload, DownloadCommand>
282 : for SecondaryDownloader
283 : {
284 0 : #[instrument(skip_all, fields(tenant_id=%completion.get_tenant_shard_id().tenant_id, shard_id=%completion.get_tenant_shard_id().shard_slug()))]
285 : fn on_completion(&mut self, completion: CompleteDownload) {
286 : let CompleteDownload {
287 : secondary_state,
288 : completed_at: _completed_at,
289 : } = completion;
290 :
291 : tracing::debug!("Secondary tenant download completed");
292 :
293 : let mut detail = secondary_state.detail.lock().unwrap();
294 :
295 : let period = detail
296 : .last_download
297 : .as_ref()
298 0 : .map(|d| d.upload_period)
299 : .unwrap_or(DEFAULT_DOWNLOAD_INTERVAL);
300 :
301 : // We advance next_download irrespective of errors: we don't want error cases to result in
302 : // expensive busy-polling.
303 : detail.next_download = Some(Instant::now() + period_jitter(period, 5));
304 : }
305 :
306 0 : async fn schedule(&mut self) -> SchedulingResult<PendingDownload> {
307 0 : let mut result = SchedulingResult {
308 0 : jobs: Vec::new(),
309 0 : want_interval: None,
310 0 : };
311 0 :
312 0 : // Step 1: identify some tenants that we may work on
313 0 : let mut tenants: Vec<Arc<SecondaryTenant>> = Vec::new();
314 0 : self.tenant_manager
315 0 : .foreach_secondary_tenants(|_id, secondary_state| {
316 0 : tenants.push(secondary_state.clone());
317 0 : });
318 0 :
319 0 : // Step 2: filter out tenants which are not yet elegible to run
320 0 : let now = Instant::now();
321 0 : result.jobs = tenants
322 0 : .into_iter()
323 0 : .filter_map(|secondary_tenant| {
324 0 : let (last_download, next_download) = {
325 0 : let mut detail = secondary_tenant.detail.lock().unwrap();
326 0 :
327 0 : if !detail.config.warm {
328 : // Downloads are disabled for this tenant
329 0 : detail.next_download = None;
330 0 : return None;
331 0 : }
332 0 :
333 0 : if detail.next_download.is_none() {
334 0 : // Initialize randomly in the range from 0 to our interval: this uniformly spreads the start times. Subsequent
335 0 : // rounds will use a smaller jitter to avoid accidentally synchronizing later.
336 0 : detail.next_download = Some(now.checked_add(period_warmup(DEFAULT_DOWNLOAD_INTERVAL)).expect(
337 0 : "Using our constant, which is known to be small compared with clock range",
338 0 : ));
339 0 : }
340 0 : (detail.last_download.clone(), detail.next_download.unwrap())
341 0 : };
342 0 :
343 0 : if now > next_download {
344 0 : Some(PendingDownload {
345 0 : secondary_state: secondary_tenant,
346 0 : last_download,
347 0 : target_time: Some(next_download),
348 0 : })
349 : } else {
350 0 : None
351 : }
352 0 : })
353 0 : .collect();
354 0 :
355 0 : // Step 3: sort by target execution time to run most urgent first.
356 0 : result.jobs.sort_by_key(|j| j.target_time);
357 0 :
358 0 : result
359 0 : }
360 :
361 0 : fn on_command(&mut self, command: DownloadCommand) -> anyhow::Result<PendingDownload> {
362 0 : let tenant_shard_id = command.get_tenant_shard_id();
363 0 :
364 0 : let tenant = self
365 0 : .tenant_manager
366 0 : .get_secondary_tenant_shard(*tenant_shard_id);
367 0 : let Some(tenant) = tenant else {
368 0 : return Err(anyhow::anyhow!("Not found or not in Secondary mode"));
369 : };
370 :
371 0 : Ok(PendingDownload {
372 0 : target_time: None,
373 0 : last_download: None,
374 0 : secondary_state: tenant,
375 0 : })
376 0 : }
377 :
378 0 : fn spawn(
379 0 : &mut self,
380 0 : job: PendingDownload,
381 0 : ) -> (
382 0 : RunningDownload,
383 0 : Pin<Box<dyn Future<Output = CompleteDownload> + Send>>,
384 0 : ) {
385 0 : let PendingDownload {
386 0 : secondary_state,
387 0 : last_download,
388 0 : target_time,
389 0 : } = job;
390 0 :
391 0 : let (completion, barrier) = utils::completion::channel();
392 0 : let remote_storage = self.remote_storage.clone();
393 0 : let conf = self.tenant_manager.get_conf();
394 0 : let tenant_shard_id = *secondary_state.get_tenant_shard_id();
395 0 : let download_ctx = self.root_ctx.attached_child();
396 0 : (RunningDownload { barrier }, Box::pin(async move {
397 0 : let _completion = completion;
398 0 :
399 0 : match TenantDownloader::new(conf, &remote_storage, &secondary_state)
400 0 : .download(&download_ctx)
401 0 : .await
402 : {
403 : Err(UpdateError::NoData) => {
404 0 : tracing::info!("No heatmap found for tenant. This is fine if it is new.");
405 : },
406 : Err(UpdateError::NoSpace) => {
407 0 : tracing::warn!("Insufficient space while downloading. Will retry later.");
408 : }
409 : Err(UpdateError::Cancelled) => {
410 0 : tracing::info!("Shut down while downloading");
411 : },
412 0 : Err(UpdateError::Deserialize(e)) => {
413 0 : tracing::error!("Corrupt content while downloading tenant: {e}");
414 : },
415 0 : Err(e @ (UpdateError::DownloadError(_) | UpdateError::Other(_))) => {
416 0 : tracing::error!("Error while downloading tenant: {e}");
417 : },
418 0 : Ok(()) => {}
419 : };
420 :
421 : // Irrespective of the result, we will reschedule ourselves to run after our usual period.
422 :
423 : // If the job had a target execution time, we may check our final execution
424 : // time against that for observability purposes.
425 0 : if let (Some(target_time), Some(last_download)) = (target_time, last_download) {
426 0 : // Elapsed time includes any scheduling lag as well as the execution of the job
427 0 : let elapsed = Instant::now().duration_since(target_time);
428 0 :
429 0 : warn_when_period_overrun(
430 0 : elapsed,
431 0 : last_download.upload_period,
432 0 : BackgroundLoopKind::SecondaryDownload,
433 0 : );
434 0 : }
435 :
436 0 : CompleteDownload {
437 0 : secondary_state,
438 0 : completed_at: Instant::now(),
439 0 : }
440 0 : }.instrument(info_span!(parent: None, "secondary_download", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug()))))
441 0 : }
442 : }
443 :
444 : /// This type is a convenience to group together the various functions involved in
445 : /// freshening a secondary tenant.
446 : struct TenantDownloader<'a> {
447 : conf: &'static PageServerConf,
448 : remote_storage: &'a GenericRemoteStorage,
449 : secondary_state: &'a SecondaryTenant,
450 : }
451 :
452 : /// Errors that may be encountered while updating a tenant
453 0 : #[derive(thiserror::Error, Debug)]
454 : enum UpdateError {
455 : #[error("No remote data found")]
456 : NoData,
457 : #[error("Insufficient local storage space")]
458 : NoSpace,
459 : #[error("Failed to download")]
460 : DownloadError(DownloadError),
461 : #[error(transparent)]
462 : Deserialize(#[from] serde_json::Error),
463 : #[error("Cancelled")]
464 : Cancelled,
465 : #[error(transparent)]
466 : Other(#[from] anyhow::Error),
467 : }
468 :
469 : impl From<DownloadError> for UpdateError {
470 0 : fn from(value: DownloadError) -> Self {
471 0 : match &value {
472 0 : DownloadError::Cancelled => Self::Cancelled,
473 0 : DownloadError::NotFound => Self::NoData,
474 0 : _ => Self::DownloadError(value),
475 : }
476 0 : }
477 : }
478 :
479 : impl From<std::io::Error> for UpdateError {
480 0 : fn from(value: std::io::Error) -> Self {
481 0 : if let Some(nix::errno::Errno::ENOSPC) = value.raw_os_error().map(nix::errno::from_i32) {
482 0 : UpdateError::NoSpace
483 0 : } else if value
484 0 : .get_ref()
485 0 : .and_then(|x| x.downcast_ref::<DownloadError>())
486 0 : .is_some()
487 : {
488 0 : UpdateError::from(DownloadError::from(value))
489 : } else {
490 : // An I/O error from e.g. tokio::io::copy_buf is most likely a remote storage issue
491 0 : UpdateError::Other(anyhow::anyhow!(value))
492 : }
493 0 : }
494 : }
495 :
496 : impl<'a> TenantDownloader<'a> {
497 0 : fn new(
498 0 : conf: &'static PageServerConf,
499 0 : remote_storage: &'a GenericRemoteStorage,
500 0 : secondary_state: &'a SecondaryTenant,
501 0 : ) -> Self {
502 0 : Self {
503 0 : conf,
504 0 : remote_storage,
505 0 : secondary_state,
506 0 : }
507 0 : }
508 :
509 0 : async fn download(&self, ctx: &RequestContext) -> Result<(), UpdateError> {
510 0 : debug_assert_current_span_has_tenant_id();
511 :
512 : // For the duration of a download, we must hold the SecondaryTenant::gate, to ensure
513 : // cover our access to local storage.
514 0 : let Ok(_guard) = self.secondary_state.gate.enter() else {
515 : // Shutting down
516 0 : return Err(UpdateError::Cancelled);
517 : };
518 :
519 0 : let tenant_shard_id = self.secondary_state.get_tenant_shard_id();
520 0 :
521 0 : // We will use the etag from last successful download to make the download conditional on changes
522 0 : let last_download = self
523 0 : .secondary_state
524 0 : .detail
525 0 : .lock()
526 0 : .unwrap()
527 0 : .last_download
528 0 : .clone();
529 :
530 : // Download the tenant's heatmap
531 : let HeatMapModified {
532 0 : last_modified: heatmap_mtime,
533 0 : etag: heatmap_etag,
534 0 : bytes: heatmap_bytes,
535 0 : } = match tokio::select!(
536 0 : bytes = self.download_heatmap(last_download.as_ref().map(|d| &d.etag)) => {bytes?},
537 0 : _ = self.secondary_state.cancel.cancelled() => return Ok(())
538 0 : ) {
539 : HeatMapDownload::Unmodified => {
540 0 : tracing::info!("Heatmap unchanged since last successful download");
541 0 : return Ok(());
542 : }
543 0 : HeatMapDownload::Modified(m) => m,
544 : };
545 :
546 0 : let heatmap = serde_json::from_slice::<HeatMapTenant>(&heatmap_bytes)?;
547 :
548 : // Save the heatmap: this will be useful on restart, allowing us to reconstruct
549 : // layer metadata without having to re-download it.
550 0 : let heatmap_path = self.conf.tenant_heatmap_path(tenant_shard_id);
551 0 :
552 0 : let temp_path = path_with_suffix_extension(&heatmap_path, TEMP_FILE_SUFFIX);
553 0 : let context_msg = format!("write tenant {tenant_shard_id} heatmap to {heatmap_path}");
554 0 : let heatmap_path_bg = heatmap_path.clone();
555 0 : VirtualFile::crashsafe_overwrite(heatmap_path_bg, temp_path, heatmap_bytes)
556 0 : .await
557 0 : .maybe_fatal_err(&context_msg)?;
558 :
559 0 : tracing::debug!(
560 0 : "Wrote local heatmap to {}, with {} timelines",
561 0 : heatmap_path,
562 0 : heatmap.timelines.len()
563 : );
564 :
565 : // Get or initialize the local disk state for the timelines we will update
566 0 : let mut timeline_states = HashMap::new();
567 0 : for timeline in &heatmap.timelines {
568 0 : let timeline_state = self
569 0 : .secondary_state
570 0 : .detail
571 0 : .lock()
572 0 : .unwrap()
573 0 : .timelines
574 0 : .get(&timeline.timeline_id)
575 0 : .cloned();
576 :
577 0 : let timeline_state = match timeline_state {
578 0 : Some(t) => t,
579 : None => {
580 : // We have no existing state: need to scan local disk for layers first.
581 0 : let timeline_state =
582 0 : init_timeline_state(self.conf, tenant_shard_id, timeline).await;
583 :
584 : // Re-acquire detail lock now that we're done with async load from local FS
585 0 : self.secondary_state
586 0 : .detail
587 0 : .lock()
588 0 : .unwrap()
589 0 : .timelines
590 0 : .insert(timeline.timeline_id, timeline_state.clone());
591 0 : timeline_state
592 : }
593 : };
594 :
595 0 : timeline_states.insert(timeline.timeline_id, timeline_state);
596 : }
597 :
598 : // Clean up any local layers that aren't in the heatmap. We do this first for all timelines, on the general
599 : // principle that deletions should be done before writes wherever possible, and so that we can use this
600 : // phase to initialize our SecondaryProgress.
601 0 : {
602 0 : *self.secondary_state.progress.lock().unwrap() =
603 0 : self.prepare_timelines(&heatmap, heatmap_mtime).await?;
604 : }
605 :
606 : // Download the layers in the heatmap
607 0 : for timeline in heatmap.timelines {
608 0 : let timeline_state = timeline_states
609 0 : .remove(&timeline.timeline_id)
610 0 : .expect("Just populated above");
611 0 :
612 0 : if self.secondary_state.cancel.is_cancelled() {
613 0 : tracing::debug!(
614 0 : "Cancelled before downloading timeline {}",
615 : timeline.timeline_id
616 : );
617 0 : return Ok(());
618 0 : }
619 0 :
620 0 : let timeline_id = timeline.timeline_id;
621 0 : self.download_timeline(timeline, timeline_state, ctx)
622 0 : .instrument(tracing::info_span!(
623 : "secondary_download_timeline",
624 : tenant_id=%tenant_shard_id.tenant_id,
625 0 : shard_id=%tenant_shard_id.shard_slug(),
626 : %timeline_id
627 : ))
628 0 : .await?;
629 : }
630 :
631 : // Only update last_etag after a full successful download: this way will not skip
632 : // the next download, even if the heatmap's actual etag is unchanged.
633 0 : self.secondary_state.detail.lock().unwrap().last_download = Some(DownloadSummary {
634 0 : etag: heatmap_etag,
635 0 : mtime: heatmap_mtime,
636 0 : upload_period: heatmap
637 0 : .upload_period_ms
638 0 : .map(|ms| Duration::from_millis(ms as u64))
639 0 : .unwrap_or(DEFAULT_DOWNLOAD_INTERVAL),
640 0 : });
641 0 :
642 0 : // Robustness: we should have updated progress properly, but in case we didn't, make sure
643 0 : // we don't leave the tenant in a state where we claim to have successfully downloaded
644 0 : // everything, but our progress is incomplete. The invariant here should be that if
645 0 : // we have set `last_download` to this heatmap's etag, then the next time we see that
646 0 : // etag we can safely do no work (i.e. we must be complete).
647 0 : let mut progress = self.secondary_state.progress.lock().unwrap();
648 0 : debug_assert!(progress.layers_downloaded == progress.layers_total);
649 0 : debug_assert!(progress.bytes_downloaded == progress.bytes_total);
650 0 : if progress.layers_downloaded != progress.layers_total
651 0 : || progress.bytes_downloaded != progress.bytes_total
652 : {
653 0 : tracing::warn!("Correcting drift in progress stats ({progress:?})");
654 0 : progress.layers_downloaded = progress.layers_total;
655 0 : progress.bytes_downloaded = progress.bytes_total;
656 0 : }
657 :
658 0 : Ok(())
659 0 : }
660 :
661 : /// Do any fast local cleanup that comes before the much slower process of downloading
662 : /// layers from remote storage. In the process, initialize the SecondaryProgress object
663 : /// that will later be updated incrementally as we download layers.
664 0 : async fn prepare_timelines(
665 0 : &self,
666 0 : heatmap: &HeatMapTenant,
667 0 : heatmap_mtime: SystemTime,
668 0 : ) -> Result<SecondaryProgress, UpdateError> {
669 0 : let heatmap_stats = heatmap.get_stats();
670 0 : // We will construct a progress object, and then populate its initial "downloaded" numbers
671 0 : // while iterating through local layer state in [`Self::prepare_timelines`]
672 0 : let mut progress = SecondaryProgress {
673 0 : layers_total: heatmap_stats.layers,
674 0 : bytes_total: heatmap_stats.bytes,
675 0 : heatmap_mtime: Some(serde_system_time::SystemTime(heatmap_mtime)),
676 0 : layers_downloaded: 0,
677 0 : bytes_downloaded: 0,
678 0 : };
679 0 : // Accumulate list of things to delete while holding the detail lock, for execution after dropping the lock
680 0 : let mut delete_layers = Vec::new();
681 0 : let mut delete_timelines = Vec::new();
682 0 : {
683 0 : let mut detail = self.secondary_state.detail.lock().unwrap();
684 0 : for (timeline_id, timeline_state) in &mut detail.timelines {
685 0 : let Some(heatmap_timeline_index) = heatmap
686 0 : .timelines
687 0 : .iter()
688 0 : .position(|t| t.timeline_id == *timeline_id)
689 : else {
690 : // This timeline is no longer referenced in the heatmap: delete it locally
691 0 : delete_timelines.push(*timeline_id);
692 0 : continue;
693 : };
694 :
695 0 : let heatmap_timeline = heatmap.timelines.get(heatmap_timeline_index).unwrap();
696 0 :
697 0 : let layers_in_heatmap = heatmap_timeline
698 0 : .layers
699 0 : .iter()
700 0 : .map(|l| (&l.name, l.metadata.generation))
701 0 : .collect::<HashSet<_>>();
702 0 : let layers_on_disk = timeline_state
703 0 : .on_disk_layers
704 0 : .iter()
705 0 : .map(|l| (l.0, l.1.metadata.generation))
706 0 : .collect::<HashSet<_>>();
707 0 :
708 0 : let mut layer_count = layers_on_disk.len();
709 0 : let mut layer_byte_count: u64 = timeline_state
710 0 : .on_disk_layers
711 0 : .values()
712 0 : .map(|l| l.metadata.file_size)
713 0 : .sum();
714 :
715 : // Remove on-disk layers that are no longer present in heatmap
716 0 : for (layer_file_name, generation) in layers_on_disk.difference(&layers_in_heatmap) {
717 0 : layer_count -= 1;
718 0 : layer_byte_count -= timeline_state
719 0 : .on_disk_layers
720 0 : .get(layer_file_name)
721 0 : .unwrap()
722 0 : .metadata
723 0 : .file_size;
724 0 :
725 0 : let local_path = local_layer_path(
726 0 : self.conf,
727 0 : self.secondary_state.get_tenant_shard_id(),
728 0 : timeline_id,
729 0 : layer_file_name,
730 0 : generation,
731 0 : );
732 0 :
733 0 : delete_layers.push((*timeline_id, (*layer_file_name).clone(), local_path));
734 0 : }
735 :
736 0 : progress.bytes_downloaded += layer_byte_count;
737 0 : progress.layers_downloaded += layer_count;
738 : }
739 :
740 0 : for delete_timeline in &delete_timelines {
741 0 : // We haven't removed from disk yet, but optimistically remove from in-memory state: if removal
742 0 : // from disk fails that will be a fatal error.
743 0 : detail.timelines.remove(delete_timeline);
744 0 : }
745 : }
746 :
747 : // Execute accumulated deletions
748 0 : for (timeline_id, layer_name, local_path) in delete_layers {
749 0 : tracing::info!(timeline_id=%timeline_id, "Removing secondary local layer {layer_name} because it's absent in heatmap",);
750 :
751 0 : tokio::fs::remove_file(&local_path)
752 0 : .await
753 0 : .or_else(fs_ext::ignore_not_found)
754 0 : .maybe_fatal_err("Removing secondary layer")?;
755 :
756 : // Update in-memory housekeeping to reflect the absence of the deleted layer
757 0 : let mut detail = self.secondary_state.detail.lock().unwrap();
758 0 : let Some(timeline_state) = detail.timelines.get_mut(&timeline_id) else {
759 0 : continue;
760 : };
761 0 : timeline_state.on_disk_layers.remove(&layer_name);
762 : }
763 :
764 0 : for timeline_id in delete_timelines {
765 0 : let timeline_path = self
766 0 : .conf
767 0 : .timeline_path(self.secondary_state.get_tenant_shard_id(), &timeline_id);
768 0 : tracing::info!(timeline_id=%timeline_id,
769 0 : "Timeline no longer in heatmap, removing from secondary location"
770 : );
771 0 : tokio::fs::remove_dir_all(&timeline_path)
772 0 : .await
773 0 : .or_else(fs_ext::ignore_not_found)
774 0 : .maybe_fatal_err("Removing secondary timeline")?;
775 : }
776 :
777 0 : Ok(progress)
778 0 : }
779 :
780 : /// Returns downloaded bytes if the etag differs from `prev_etag`, or None if the object
781 : /// still matches `prev_etag`.
782 0 : async fn download_heatmap(
783 0 : &self,
784 0 : prev_etag: Option<&Etag>,
785 0 : ) -> Result<HeatMapDownload, UpdateError> {
786 0 : debug_assert_current_span_has_tenant_id();
787 0 : let tenant_shard_id = self.secondary_state.get_tenant_shard_id();
788 0 : // TODO: pull up etag check into the request, to do a conditional GET rather than
789 0 : // issuing a GET and then maybe ignoring the response body
790 0 : // (https://github.com/neondatabase/neon/issues/6199)
791 0 : tracing::debug!("Downloading heatmap for secondary tenant",);
792 :
793 0 : let heatmap_path = remote_heatmap_path(tenant_shard_id);
794 0 : let cancel = &self.secondary_state.cancel;
795 0 :
796 0 : backoff::retry(
797 0 : || async {
798 0 : let download = self
799 0 : .remote_storage
800 0 : .download(&heatmap_path, cancel)
801 0 : .await
802 0 : .map_err(UpdateError::from)?;
803 0 :
804 0 : SECONDARY_MODE.download_heatmap.inc();
805 0 :
806 0 : if Some(&download.etag) == prev_etag {
807 0 : Ok(HeatMapDownload::Unmodified)
808 0 : } else {
809 0 : let mut heatmap_bytes = Vec::new();
810 0 : let mut body = tokio_util::io::StreamReader::new(download.download_stream);
811 0 : let _size = tokio::io::copy_buf(&mut body, &mut heatmap_bytes).await?;
812 0 : Ok(HeatMapDownload::Modified(HeatMapModified {
813 0 : etag: download.etag,
814 0 : last_modified: download.last_modified,
815 0 : bytes: heatmap_bytes,
816 0 : }))
817 0 : }
818 0 : },
819 0 : |e| matches!(e, UpdateError::NoData | UpdateError::Cancelled),
820 0 : FAILED_DOWNLOAD_WARN_THRESHOLD,
821 0 : FAILED_REMOTE_OP_RETRIES,
822 0 : "download heatmap",
823 0 : cancel,
824 0 : )
825 0 : .await
826 0 : .ok_or_else(|| UpdateError::Cancelled)
827 0 : .and_then(|x| x)
828 0 : }
829 :
830 0 : async fn download_timeline(
831 0 : &self,
832 0 : timeline: HeatMapTimeline,
833 0 : timeline_state: SecondaryDetailTimeline,
834 0 : ctx: &RequestContext,
835 0 : ) -> Result<(), UpdateError> {
836 0 : debug_assert_current_span_has_tenant_and_timeline_id();
837 0 : let tenant_shard_id = self.secondary_state.get_tenant_shard_id();
838 0 :
839 0 : // Accumulate updates to the state
840 0 : let mut touched = Vec::new();
841 0 :
842 0 : tracing::debug!(timeline_id=%timeline.timeline_id, "Downloading layers, {} in heatmap", timeline.layers.len());
843 :
844 : // Download heatmap layers that are not present on local disk, or update their
845 : // access time if they are already present.
846 0 : for layer in timeline.layers {
847 0 : if self.secondary_state.cancel.is_cancelled() {
848 0 : tracing::debug!("Cancelled -- dropping out of layer loop");
849 0 : return Err(UpdateError::Cancelled);
850 0 : }
851 :
852 : // Existing on-disk layers: just update their access time.
853 0 : if let Some(on_disk) = timeline_state.on_disk_layers.get(&layer.name) {
854 0 : tracing::debug!("Layer {} is already on disk", layer.name);
855 :
856 0 : if cfg!(debug_assertions) {
857 : // Debug for https://github.com/neondatabase/neon/issues/6966: check that the files we think
858 : // are already present on disk are really there.
859 0 : match tokio::fs::metadata(&on_disk.local_path).await {
860 0 : Ok(meta) => {
861 0 : tracing::debug!(
862 0 : "Layer {} present at {}, size {}",
863 0 : layer.name,
864 0 : on_disk.local_path,
865 0 : meta.len(),
866 : );
867 : }
868 0 : Err(e) => {
869 0 : tracing::warn!(
870 0 : "Layer {} not found at {} ({})",
871 : layer.name,
872 : on_disk.local_path,
873 : e
874 : );
875 0 : debug_assert!(false);
876 : }
877 : }
878 0 : }
879 :
880 0 : if on_disk.metadata != layer.metadata || on_disk.access_time != layer.access_time {
881 : // We already have this layer on disk. Update its access time.
882 0 : tracing::debug!(
883 0 : "Access time updated for layer {}: {} -> {}",
884 0 : layer.name,
885 0 : strftime(&on_disk.access_time),
886 0 : strftime(&layer.access_time)
887 : );
888 0 : touched.push(layer);
889 0 : }
890 0 : continue;
891 : } else {
892 0 : tracing::debug!("Layer {} not present on disk yet", layer.name);
893 : }
894 :
895 : // Eviction: if we evicted a layer, then do not re-download it unless it was accessed more
896 : // recently than it was evicted.
897 0 : if let Some(evicted_at) = timeline_state.evicted_at.get(&layer.name) {
898 0 : if &layer.access_time > evicted_at {
899 0 : tracing::info!(
900 0 : "Re-downloading evicted layer {}, accessed at {}, evicted at {}",
901 0 : layer.name,
902 0 : strftime(&layer.access_time),
903 0 : strftime(evicted_at)
904 : );
905 : } else {
906 0 : tracing::trace!(
907 0 : "Not re-downloading evicted layer {}, accessed at {}, evicted at {}",
908 0 : layer.name,
909 0 : strftime(&layer.access_time),
910 0 : strftime(evicted_at)
911 : );
912 0 : self.skip_layer(layer);
913 0 : continue;
914 : }
915 0 : }
916 :
917 0 : match self
918 0 : .download_layer(tenant_shard_id, &timeline.timeline_id, layer, ctx)
919 0 : .await?
920 : {
921 0 : Some(layer) => touched.push(layer),
922 0 : None => {
923 0 : // Not an error but we didn't download it: remote layer is missing. Don't add it to the list of
924 0 : // things to consider touched.
925 0 : }
926 : }
927 : }
928 :
929 : // Write updates to state to record layers we just downloaded or touched.
930 : {
931 0 : let mut detail = self.secondary_state.detail.lock().unwrap();
932 0 : let timeline_detail = detail.timelines.entry(timeline.timeline_id).or_default();
933 0 :
934 0 : tracing::info!("Wrote timeline_detail for {} touched layers", touched.len());
935 :
936 0 : for t in touched {
937 : use std::collections::hash_map::Entry;
938 0 : match timeline_detail.on_disk_layers.entry(t.name.clone()) {
939 0 : Entry::Occupied(mut v) => {
940 0 : v.get_mut().access_time = t.access_time;
941 0 : }
942 0 : Entry::Vacant(e) => {
943 0 : let local_path = local_layer_path(
944 0 : self.conf,
945 0 : tenant_shard_id,
946 0 : &timeline.timeline_id,
947 0 : &t.name,
948 0 : &t.metadata.generation,
949 0 : );
950 0 : e.insert(OnDiskState::new(
951 0 : self.conf,
952 0 : tenant_shard_id,
953 0 : &timeline.timeline_id,
954 0 : t.name,
955 0 : t.metadata.clone(),
956 0 : t.access_time,
957 0 : local_path,
958 0 : ));
959 0 : }
960 : }
961 : }
962 : }
963 :
964 0 : Ok(())
965 0 : }
966 :
967 : /// Call this during timeline download if a layer will _not_ be downloaded, to update progress statistics
968 0 : fn skip_layer(&self, layer: HeatMapLayer) {
969 0 : let mut progress = self.secondary_state.progress.lock().unwrap();
970 0 : progress.layers_total = progress.layers_total.saturating_sub(1);
971 0 : progress.bytes_total = progress
972 0 : .bytes_total
973 0 : .saturating_sub(layer.metadata.file_size);
974 0 : }
975 :
976 0 : async fn download_layer(
977 0 : &self,
978 0 : tenant_shard_id: &TenantShardId,
979 0 : timeline_id: &TimelineId,
980 0 : layer: HeatMapLayer,
981 0 : ctx: &RequestContext,
982 0 : ) -> Result<Option<HeatMapLayer>, UpdateError> {
983 0 : // Failpoint for simulating slow remote storage
984 0 : failpoint_support::sleep_millis_async!(
985 : "secondary-layer-download-sleep",
986 0 : &self.secondary_state.cancel
987 : );
988 :
989 0 : let local_path = local_layer_path(
990 0 : self.conf,
991 0 : tenant_shard_id,
992 0 : timeline_id,
993 0 : &layer.name,
994 0 : &layer.metadata.generation,
995 0 : );
996 0 :
997 0 : // Note: no backoff::retry wrapper here because download_layer_file does its own retries internally
998 0 : tracing::info!(
999 0 : "Starting download of layer {}, size {}",
1000 : layer.name,
1001 : layer.metadata.file_size
1002 : );
1003 0 : let downloaded_bytes = download_layer_file(
1004 0 : self.conf,
1005 0 : self.remote_storage,
1006 0 : *tenant_shard_id,
1007 0 : *timeline_id,
1008 0 : &layer.name,
1009 0 : &layer.metadata,
1010 0 : &local_path,
1011 0 : &self.secondary_state.cancel,
1012 0 : ctx,
1013 0 : )
1014 0 : .await;
1015 :
1016 0 : let downloaded_bytes = match downloaded_bytes {
1017 0 : Ok(bytes) => bytes,
1018 : Err(DownloadError::NotFound) => {
1019 : // A heatmap might be out of date and refer to a layer that doesn't exist any more.
1020 : // This is harmless: continue to download the next layer. It is expected during compaction
1021 : // GC.
1022 0 : tracing::debug!(
1023 0 : "Skipped downloading missing layer {}, raced with compaction/gc?",
1024 : layer.name
1025 : );
1026 0 : self.skip_layer(layer);
1027 0 :
1028 0 : return Ok(None);
1029 : }
1030 0 : Err(e) => return Err(e.into()),
1031 : };
1032 :
1033 0 : if downloaded_bytes != layer.metadata.file_size {
1034 0 : let local_path = local_layer_path(
1035 0 : self.conf,
1036 0 : tenant_shard_id,
1037 0 : timeline_id,
1038 0 : &layer.name,
1039 0 : &layer.metadata.generation,
1040 0 : );
1041 0 :
1042 0 : tracing::warn!(
1043 0 : "Downloaded layer {} with unexpected size {} != {}. Removing download.",
1044 : layer.name,
1045 : downloaded_bytes,
1046 : layer.metadata.file_size
1047 : );
1048 :
1049 0 : tokio::fs::remove_file(&local_path)
1050 0 : .await
1051 0 : .or_else(fs_ext::ignore_not_found)?;
1052 : } else {
1053 0 : tracing::info!("Downloaded layer {}, size {}", layer.name, downloaded_bytes);
1054 0 : let mut progress = self.secondary_state.progress.lock().unwrap();
1055 0 : progress.bytes_downloaded += downloaded_bytes;
1056 0 : progress.layers_downloaded += 1;
1057 : }
1058 :
1059 0 : SECONDARY_MODE.download_layer.inc();
1060 0 :
1061 0 : Ok(Some(layer))
1062 0 : }
1063 : }
1064 :
1065 : /// Scan local storage and build up Layer objects based on the metadata in a HeatMapTimeline
1066 0 : async fn init_timeline_state(
1067 0 : conf: &'static PageServerConf,
1068 0 : tenant_shard_id: &TenantShardId,
1069 0 : heatmap: &HeatMapTimeline,
1070 0 : ) -> SecondaryDetailTimeline {
1071 0 : let timeline_path = conf.timeline_path(tenant_shard_id, &heatmap.timeline_id);
1072 0 : let mut detail = SecondaryDetailTimeline::default();
1073 :
1074 0 : let mut dir = match tokio::fs::read_dir(&timeline_path).await {
1075 0 : Ok(d) => d,
1076 0 : Err(e) => {
1077 0 : if e.kind() == std::io::ErrorKind::NotFound {
1078 0 : let context = format!("Creating timeline directory {timeline_path}");
1079 0 : tracing::info!("{}", context);
1080 0 : tokio::fs::create_dir_all(&timeline_path)
1081 0 : .await
1082 0 : .fatal_err(&context);
1083 0 :
1084 0 : // No entries to report: drop out.
1085 0 : return detail;
1086 : } else {
1087 0 : on_fatal_io_error(&e, &format!("Reading timeline dir {timeline_path}"));
1088 : }
1089 : }
1090 : };
1091 :
1092 : // As we iterate through layers found on disk, we will look up their metadata from this map.
1093 : // Layers not present in metadata will be discarded.
1094 0 : let heatmap_metadata: HashMap<&LayerName, &HeatMapLayer> =
1095 0 : heatmap.layers.iter().map(|l| (&l.name, l)).collect();
1096 :
1097 0 : while let Some(dentry) = dir
1098 0 : .next_entry()
1099 0 : .await
1100 0 : .fatal_err(&format!("Listing {timeline_path}"))
1101 : {
1102 0 : let Ok(file_path) = Utf8PathBuf::from_path_buf(dentry.path()) else {
1103 0 : tracing::warn!("Malformed filename at {}", dentry.path().to_string_lossy());
1104 0 : continue;
1105 : };
1106 0 : let local_meta = dentry
1107 0 : .metadata()
1108 0 : .await
1109 0 : .fatal_err(&format!("Read metadata on {}", file_path));
1110 0 :
1111 0 : let file_name = file_path.file_name().expect("created it from the dentry");
1112 0 : if crate::is_temporary(&file_path)
1113 0 : || is_temp_download_file(&file_path)
1114 0 : || is_ephemeral_file(file_name)
1115 : {
1116 : // Temporary files are frequently left behind from restarting during downloads
1117 0 : tracing::info!("Cleaning up temporary file {file_path}");
1118 0 : if let Err(e) = tokio::fs::remove_file(&file_path)
1119 0 : .await
1120 0 : .or_else(fs_ext::ignore_not_found)
1121 : {
1122 0 : tracing::error!("Failed to remove temporary file {file_path}: {e}");
1123 0 : }
1124 0 : continue;
1125 0 : }
1126 0 :
1127 0 : match LayerName::from_str(file_name) {
1128 0 : Ok(name) => {
1129 0 : let remote_meta = heatmap_metadata.get(&name);
1130 0 : match remote_meta {
1131 0 : Some(remote_meta) => {
1132 0 : // TODO: checksums for layers (https://github.com/neondatabase/neon/issues/2784)
1133 0 : if local_meta.len() != remote_meta.metadata.file_size {
1134 : // This should not happen, because we do crashsafe write-then-rename when downloading
1135 : // layers, and layers in remote storage are immutable. Remove the local file because
1136 : // we cannot trust it.
1137 0 : tracing::warn!(
1138 0 : "Removing local layer {name} with unexpected local size {} != {}",
1139 0 : local_meta.len(),
1140 : remote_meta.metadata.file_size
1141 : );
1142 0 : } else {
1143 0 : // We expect the access time to be initialized immediately afterwards, when
1144 0 : // the latest heatmap is applied to the state.
1145 0 : detail.on_disk_layers.insert(
1146 0 : name.clone(),
1147 0 : OnDiskState::new(
1148 0 : conf,
1149 0 : tenant_shard_id,
1150 0 : &heatmap.timeline_id,
1151 0 : name,
1152 0 : remote_meta.metadata.clone(),
1153 0 : remote_meta.access_time,
1154 0 : file_path,
1155 0 : ),
1156 0 : );
1157 0 : }
1158 : }
1159 : None => {
1160 : // FIXME: consider some optimization when transitioning from attached to secondary: maybe
1161 : // wait until we have seen a heatmap that is more recent than the most recent on-disk state? Otherwise
1162 : // we will end up deleting any layers which were created+uploaded more recently than the heatmap.
1163 0 : tracing::info!(
1164 0 : "Removing secondary local layer {} because it's absent in heatmap",
1165 : name
1166 : );
1167 0 : tokio::fs::remove_file(&dentry.path())
1168 0 : .await
1169 0 : .or_else(fs_ext::ignore_not_found)
1170 0 : .fatal_err(&format!(
1171 0 : "Removing layer {}",
1172 0 : dentry.path().to_string_lossy()
1173 0 : ));
1174 : }
1175 : }
1176 : }
1177 : Err(_) => {
1178 : // Ignore it.
1179 0 : tracing::warn!("Unexpected file in timeline directory: {file_name}");
1180 : }
1181 : }
1182 : }
1183 :
1184 0 : detail
1185 0 : }
|