Line data Source code
1 : use std::{
2 : collections::HashMap,
3 : pin::Pin,
4 : sync::{Arc, Weak},
5 : time::{Duration, Instant},
6 : };
7 :
8 : use crate::{
9 : metrics::SECONDARY_MODE,
10 : tenant::{
11 : config::AttachmentMode,
12 : mgr::GetTenantError,
13 : mgr::TenantManager,
14 : remote_timeline_client::remote_heatmap_path,
15 : span::debug_assert_current_span_has_tenant_id,
16 : tasks::{warn_when_period_overrun, BackgroundLoopKind},
17 : Tenant,
18 : },
19 : };
20 :
21 : use futures::Future;
22 : use pageserver_api::shard::TenantShardId;
23 : use remote_storage::{GenericRemoteStorage, TimeoutOrCancel};
24 :
25 : use super::{
26 : heatmap::HeatMapTenant,
27 : scheduler::{
28 : self, period_jitter, period_warmup, JobGenerator, RunningJob, SchedulingResult,
29 : TenantBackgroundJobs,
30 : },
31 : CommandRequest, UploadCommand,
32 : };
33 : use tokio_util::sync::CancellationToken;
34 : use tracing::{info_span, instrument, Instrument};
35 : use utils::{backoff, completion::Barrier, yielding_loop::yielding_loop};
36 :
37 0 : pub(super) async fn heatmap_uploader_task(
38 0 : tenant_manager: Arc<TenantManager>,
39 0 : remote_storage: GenericRemoteStorage,
40 0 : command_queue: tokio::sync::mpsc::Receiver<CommandRequest<UploadCommand>>,
41 0 : background_jobs_can_start: Barrier,
42 0 : cancel: CancellationToken,
43 0 : ) {
44 0 : let concurrency = tenant_manager.get_conf().heatmap_upload_concurrency;
45 0 :
46 0 : let generator = HeatmapUploader {
47 0 : tenant_manager,
48 0 : remote_storage,
49 0 : cancel: cancel.clone(),
50 0 : tenants: HashMap::new(),
51 0 : };
52 0 : let mut scheduler = Scheduler::new(generator, concurrency);
53 0 :
54 0 : scheduler
55 0 : .run(command_queue, background_jobs_can_start, cancel)
56 0 : .instrument(info_span!("heatmap_upload_scheduler"))
57 0 : .await
58 0 : }
59 :
60 : /// This type is owned by a single task ([`heatmap_uploader_task`]) which runs an event
61 : /// handling loop and mutates it as needed: there are no locks here, because that event loop
62 : /// can hold &mut references to this type throughout.
63 : struct HeatmapUploader {
64 : tenant_manager: Arc<TenantManager>,
65 : remote_storage: GenericRemoteStorage,
66 : cancel: CancellationToken,
67 :
68 : tenants: HashMap<TenantShardId, UploaderTenantState>,
69 : }
70 :
71 : struct WriteInProgress {
72 : barrier: Barrier,
73 : }
74 :
75 : impl RunningJob for WriteInProgress {
76 0 : fn get_barrier(&self) -> Barrier {
77 0 : self.barrier.clone()
78 0 : }
79 : }
80 :
81 : struct UploadPending {
82 : tenant: Arc<Tenant>,
83 : last_upload: Option<LastUploadState>,
84 : target_time: Option<Instant>,
85 : period: Option<Duration>,
86 : }
87 :
88 : impl scheduler::PendingJob for UploadPending {
89 0 : fn get_tenant_shard_id(&self) -> &TenantShardId {
90 0 : self.tenant.get_tenant_shard_id()
91 0 : }
92 : }
93 :
94 : struct WriteComplete {
95 : tenant_shard_id: TenantShardId,
96 : completed_at: Instant,
97 : uploaded: Option<LastUploadState>,
98 : next_upload: Option<Instant>,
99 : }
100 :
101 : impl scheduler::Completion for WriteComplete {
102 0 : fn get_tenant_shard_id(&self) -> &TenantShardId {
103 0 : &self.tenant_shard_id
104 0 : }
105 : }
106 :
107 : /// The heatmap uploader keeps a little bit of per-tenant state, mainly to remember
108 : /// when we last did a write. We only populate this after doing at least one
109 : /// write for a tenant -- this avoids holding state for tenants that have
110 : /// uploads disabled.
111 : struct UploaderTenantState {
112 : // This Weak only exists to enable culling idle instances of this type
113 : // when the Tenant has been deallocated.
114 : tenant: Weak<Tenant>,
115 :
116 : /// Digest of the serialized heatmap that we last successfully uploaded
117 : last_upload_state: Option<LastUploadState>,
118 :
119 : /// When the last upload attempt completed (may have been successful or failed)
120 : last_upload: Option<Instant>,
121 :
122 : /// When should we next do an upload? None means never.
123 : next_upload: Option<Instant>,
124 : }
125 :
126 : type Scheduler = TenantBackgroundJobs<
127 : HeatmapUploader,
128 : UploadPending,
129 : WriteInProgress,
130 : WriteComplete,
131 : UploadCommand,
132 : >;
133 :
134 : impl JobGenerator<UploadPending, WriteInProgress, WriteComplete, UploadCommand>
135 : for HeatmapUploader
136 : {
137 0 : async fn schedule(&mut self) -> SchedulingResult<UploadPending> {
138 0 : // Cull any entries in self.tenants whose Arc<Tenant> is gone
139 0 : self.tenants
140 0 : .retain(|_k, v| v.tenant.upgrade().is_some() && v.next_upload.is_some());
141 0 :
142 0 : let now = Instant::now();
143 0 :
144 0 : let mut result = SchedulingResult {
145 0 : jobs: Vec::new(),
146 0 : want_interval: None,
147 0 : };
148 0 :
149 0 : let tenants = self.tenant_manager.get_attached_active_tenant_shards();
150 0 :
151 0 : yielding_loop(1000, &self.cancel, tenants.into_iter(), |tenant| {
152 0 : let period = match tenant.get_heatmap_period() {
153 : None => {
154 : // Heatmaps are disabled for this tenant
155 0 : return;
156 : }
157 0 : Some(period) => {
158 0 : // If any tenant has asked for uploads more frequent than our scheduling interval,
159 0 : // reduce it to match so that we can keep up. This is mainly useful in testing, where
160 0 : // we may set rather short intervals.
161 0 : result.want_interval = match result.want_interval {
162 0 : None => Some(period),
163 0 : Some(existing) => Some(std::cmp::min(period, existing)),
164 : };
165 :
166 0 : period
167 0 : }
168 0 : };
169 0 :
170 0 : // Stale attachments do not upload anything: if we are in this state, there is probably some
171 0 : // other attachment in mode Single or Multi running on another pageserver, and we don't
172 0 : // want to thrash and overwrite their heatmap uploads.
173 0 : if tenant.get_attach_mode() == AttachmentMode::Stale {
174 0 : return;
175 0 : }
176 0 :
177 0 : // Create an entry in self.tenants if one doesn't already exist: this will later be updated
178 0 : // with the completion time in on_completion.
179 0 : let state = self
180 0 : .tenants
181 0 : .entry(*tenant.get_tenant_shard_id())
182 0 : .or_insert_with(|| UploaderTenantState {
183 0 : tenant: Arc::downgrade(&tenant),
184 0 : last_upload: None,
185 0 : next_upload: Some(now.checked_add(period_warmup(period)).unwrap_or(now)),
186 0 : last_upload_state: None,
187 0 : });
188 0 :
189 0 : // Decline to do the upload if insufficient time has passed
190 0 : if state.next_upload.map(|nu| nu > now).unwrap_or(false) {
191 0 : return;
192 0 : }
193 0 :
194 0 : let last_upload = state.last_upload_state.clone();
195 0 : result.jobs.push(UploadPending {
196 0 : tenant,
197 0 : last_upload,
198 0 : target_time: state.next_upload,
199 0 : period: Some(period),
200 0 : });
201 0 : })
202 0 : .await
203 0 : .ok();
204 0 :
205 0 : result
206 0 : }
207 :
208 0 : fn spawn(
209 0 : &mut self,
210 0 : job: UploadPending,
211 0 : ) -> (
212 0 : WriteInProgress,
213 0 : Pin<Box<dyn Future<Output = WriteComplete> + Send>>,
214 0 : ) {
215 0 : let UploadPending {
216 0 : tenant,
217 0 : last_upload,
218 0 : target_time,
219 0 : period,
220 0 : } = job;
221 0 :
222 0 : let remote_storage = self.remote_storage.clone();
223 0 : let (completion, barrier) = utils::completion::channel();
224 0 : let tenant_shard_id = *tenant.get_tenant_shard_id();
225 0 : (WriteInProgress { barrier }, Box::pin(async move {
226 0 : // Guard for the barrier in [`WriteInProgress`]
227 0 : let _completion = completion;
228 0 :
229 0 : let started_at = Instant::now();
230 0 : let uploaded = match upload_tenant_heatmap(remote_storage, &tenant, last_upload.clone()).await {
231 0 : Ok(UploadHeatmapOutcome::Uploaded(uploaded)) => {
232 0 : let duration = Instant::now().duration_since(started_at);
233 0 : SECONDARY_MODE
234 0 : .upload_heatmap_duration
235 0 : .observe(duration.as_secs_f64());
236 0 : SECONDARY_MODE.upload_heatmap.inc();
237 0 : Some(uploaded)
238 : }
239 0 : Ok(UploadHeatmapOutcome::NoChange | UploadHeatmapOutcome::Skipped) => last_upload,
240 0 : Err(UploadHeatmapError::Upload(e)) => {
241 0 : tracing::warn!(
242 0 : "Failed to upload heatmap for tenant {}: {e:#}",
243 0 : tenant.get_tenant_shard_id(),
244 : );
245 0 : let duration = Instant::now().duration_since(started_at);
246 0 : SECONDARY_MODE
247 0 : .upload_heatmap_duration
248 0 : .observe(duration.as_secs_f64());
249 0 : SECONDARY_MODE.upload_heatmap_errors.inc();
250 0 : last_upload
251 : }
252 : Err(UploadHeatmapError::Cancelled) => {
253 0 : tracing::info!("Cancelled heatmap upload, shutting down");
254 0 : last_upload
255 : }
256 : };
257 :
258 0 : let now = Instant::now();
259 :
260 : // If the job had a target execution time, we may check our final execution
261 : // time against that for observability purposes.
262 0 : if let (Some(target_time), Some(period)) = (target_time, period) {
263 0 : // Elapsed time includes any scheduling lag as well as the execution of the job
264 0 : let elapsed = now.duration_since(target_time);
265 0 :
266 0 : warn_when_period_overrun(elapsed, period, BackgroundLoopKind::HeatmapUpload);
267 0 : }
268 :
269 0 : let next_upload = tenant
270 0 : .get_heatmap_period()
271 0 : .and_then(|period| now.checked_add(period_jitter(period, 5)));
272 0 :
273 0 : WriteComplete {
274 0 : tenant_shard_id: *tenant.get_tenant_shard_id(),
275 0 : completed_at: now,
276 0 : uploaded,
277 0 : next_upload,
278 0 : }
279 0 : }.instrument(info_span!(parent: None, "heatmap_upload", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug()))))
280 0 : }
281 :
282 0 : fn on_command(&mut self, command: UploadCommand) -> anyhow::Result<UploadPending> {
283 0 : let tenant_shard_id = command.get_tenant_shard_id();
284 0 :
285 0 : tracing::info!(
286 0 : tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(),
287 0 : "Starting heatmap write on command");
288 0 : let tenant = self
289 0 : .tenant_manager
290 0 : .get_attached_tenant_shard(*tenant_shard_id)
291 0 : .map_err(|e| anyhow::anyhow!(e))?;
292 0 : if !tenant.is_active() {
293 0 : return Err(GetTenantError::NotActive(*tenant_shard_id).into());
294 0 : }
295 0 :
296 0 : Ok(UploadPending {
297 0 : // Ignore our state for last digest: this forces an upload even if nothing has changed
298 0 : last_upload: None,
299 0 : tenant,
300 0 : target_time: None,
301 0 : period: None,
302 0 : })
303 0 : }
304 :
305 0 : #[instrument(skip_all, fields(tenant_id=%completion.tenant_shard_id.tenant_id, shard_id=%completion.tenant_shard_id.shard_slug()))]
306 : fn on_completion(&mut self, completion: WriteComplete) {
307 : tracing::debug!("Heatmap upload completed");
308 : let WriteComplete {
309 : tenant_shard_id,
310 : completed_at,
311 : uploaded,
312 : next_upload,
313 : } = completion;
314 : use std::collections::hash_map::Entry;
315 : match self.tenants.entry(tenant_shard_id) {
316 : Entry::Vacant(_) => {
317 : // Tenant state was dropped, nothing to update.
318 : }
319 : Entry::Occupied(mut entry) => {
320 : entry.get_mut().last_upload = Some(completed_at);
321 : entry.get_mut().last_upload_state = uploaded;
322 : entry.get_mut().next_upload = next_upload
323 : }
324 : }
325 : }
326 : }
327 :
328 : enum UploadHeatmapOutcome {
329 : /// We successfully wrote to remote storage, with this digest.
330 : Uploaded(LastUploadState),
331 : /// We did not upload because the heatmap digest was unchanged since the last upload
332 : NoChange,
333 : /// We skipped the upload for some reason, such as tenant/timeline not ready
334 : Skipped,
335 : }
336 :
337 0 : #[derive(thiserror::Error, Debug)]
338 : enum UploadHeatmapError {
339 : #[error("Cancelled")]
340 : Cancelled,
341 :
342 : #[error(transparent)]
343 : Upload(#[from] anyhow::Error),
344 : }
345 :
346 : /// Digests describing the heatmap we most recently uploaded successfully.
347 : ///
348 : /// md5 is generally a bad hash. We use it because it's convenient for interop with AWS S3's ETag,
349 : /// which is also an md5sum.
350 : #[derive(Clone)]
351 : struct LastUploadState {
352 : // Digest of json-encoded HeatMapTenant
353 : uploaded_digest: md5::Digest,
354 :
355 : // Digest without atimes set.
356 : layers_only_digest: md5::Digest,
357 : }
358 :
359 : /// The inner upload operation. This will skip if `last_digest` is Some and matches the digest
360 : /// of the object we would have uploaded.
361 0 : async fn upload_tenant_heatmap(
362 0 : remote_storage: GenericRemoteStorage,
363 0 : tenant: &Arc<Tenant>,
364 0 : last_upload: Option<LastUploadState>,
365 0 : ) -> Result<UploadHeatmapOutcome, UploadHeatmapError> {
366 0 : debug_assert_current_span_has_tenant_id();
367 0 :
368 0 : let generation = tenant.get_generation();
369 0 : debug_assert!(!generation.is_none());
370 0 : if generation.is_none() {
371 : // We do not expect this: None generations should only appear in historic layer metadata, not in running Tenants
372 0 : tracing::warn!("Skipping heatmap upload for tenant with generation==None");
373 0 : return Ok(UploadHeatmapOutcome::Skipped);
374 0 : }
375 0 :
376 0 : let mut heatmap = HeatMapTenant {
377 0 : timelines: Vec::new(),
378 0 : generation,
379 0 : upload_period_ms: tenant.get_heatmap_period().map(|p| p.as_millis()),
380 0 : };
381 0 : let timelines = tenant.timelines.lock().unwrap().clone();
382 :
383 : // Ensure that Tenant::shutdown waits for any upload in flight: this is needed because otherwise
384 : // when we delete a tenant, we might race with an upload in flight and end up leaving a heatmap behind
385 : // in remote storage.
386 0 : let Ok(_guard) = tenant.gate.enter() else {
387 0 : tracing::info!("Skipping heatmap upload for tenant which is shutting down");
388 0 : return Err(UploadHeatmapError::Cancelled);
389 : };
390 :
391 0 : for (timeline_id, timeline) in timelines {
392 0 : let heatmap_timeline = timeline.generate_heatmap().await;
393 0 : match heatmap_timeline {
394 : None => {
395 0 : tracing::debug!(
396 0 : "Skipping heatmap upload because timeline {timeline_id} is not ready"
397 : );
398 0 : return Ok(UploadHeatmapOutcome::Skipped);
399 : }
400 0 : Some(heatmap_timeline) => {
401 0 : heatmap.timelines.push(heatmap_timeline);
402 0 : }
403 : }
404 : }
405 :
406 : // Serialize the heatmap
407 0 : let bytes = serde_json::to_vec(&heatmap).map_err(|e| anyhow::anyhow!(e))?;
408 :
409 : // Drop out early if nothing changed since our last upload
410 0 : let digest = md5::compute(&bytes);
411 0 : if Some(&digest) == last_upload.as_ref().map(|d| &d.uploaded_digest) {
412 0 : return Ok(UploadHeatmapOutcome::NoChange);
413 0 : }
414 0 :
415 0 : // Calculate a digest that omits atimes, so that we can distinguish actual changes in
416 0 : // layers from changes only in atimes.
417 0 : let heatmap_size_bytes = heatmap.get_stats().bytes;
418 0 : let layers_only_bytes =
419 0 : serde_json::to_vec(&heatmap.strip_atimes()).map_err(|e| anyhow::anyhow!(e))?;
420 0 : let layers_only_digest = md5::compute(&layers_only_bytes);
421 0 : if heatmap_size_bytes < tenant.get_checkpoint_distance() {
422 : // For small tenants, skip upload if only atimes changed. This avoids doing frequent
423 : // uploads from long-idle tenants whose atimes are just incremented by periodic
424 : // size calculations.
425 0 : if Some(&layers_only_digest) == last_upload.as_ref().map(|d| &d.layers_only_digest) {
426 0 : return Ok(UploadHeatmapOutcome::NoChange);
427 0 : }
428 0 : }
429 :
430 0 : let bytes = bytes::Bytes::from(bytes);
431 0 : let size = bytes.len();
432 0 :
433 0 : let path = remote_heatmap_path(tenant.get_tenant_shard_id());
434 0 :
435 0 : let cancel = &tenant.cancel;
436 0 :
437 0 : tracing::debug!("Uploading {size} byte heatmap to {path}");
438 0 : if let Err(e) = backoff::retry(
439 0 : || async {
440 0 : let bytes = futures::stream::once(futures::future::ready(Ok(bytes.clone())));
441 0 : remote_storage
442 0 : .upload_storage_object(bytes, size, &path, cancel)
443 0 : .await
444 0 : },
445 0 : TimeoutOrCancel::caused_by_cancel,
446 0 : 3,
447 0 : u32::MAX,
448 0 : "Uploading heatmap",
449 0 : cancel,
450 0 : )
451 0 : .await
452 0 : .ok_or_else(|| anyhow::anyhow!("Shutting down"))
453 0 : .and_then(|x| x)
454 : {
455 0 : if cancel.is_cancelled() {
456 0 : return Err(UploadHeatmapError::Cancelled);
457 : } else {
458 0 : return Err(e.into());
459 : }
460 0 : }
461 0 :
462 0 : tracing::info!("Successfully uploaded {size} byte heatmap to {path}");
463 :
464 0 : Ok(UploadHeatmapOutcome::Uploaded(LastUploadState {
465 0 : uploaded_digest: digest,
466 0 : layers_only_digest,
467 0 : }))
468 0 : }
|