Line data Source code
1 : use std::{
2 : collections::HashMap,
3 : pin::Pin,
4 : sync::{Arc, Weak},
5 : time::{Duration, Instant},
6 : };
7 :
8 : use crate::{
9 : metrics::SECONDARY_MODE,
10 : tenant::{
11 : config::AttachmentMode,
12 : mgr::GetTenantError,
13 : mgr::TenantManager,
14 : remote_timeline_client::remote_heatmap_path,
15 : span::debug_assert_current_span_has_tenant_id,
16 : tasks::{warn_when_period_overrun, BackgroundLoopKind},
17 : Tenant,
18 : },
19 : };
20 :
21 : use futures::Future;
22 : use pageserver_api::shard::TenantShardId;
23 : use remote_storage::{GenericRemoteStorage, TimeoutOrCancel};
24 :
25 : use super::{
26 : heatmap::HeatMapTenant,
27 : scheduler::{
28 : self, period_jitter, period_warmup, JobGenerator, RunningJob, SchedulingResult,
29 : TenantBackgroundJobs,
30 : },
31 : CommandRequest, UploadCommand,
32 : };
33 : use tokio_util::sync::CancellationToken;
34 : use tracing::{info_span, instrument, Instrument};
35 : use utils::{backoff, completion::Barrier, yielding_loop::yielding_loop};
36 :
37 0 : pub(super) async fn heatmap_uploader_task(
38 0 : tenant_manager: Arc<TenantManager>,
39 0 : remote_storage: GenericRemoteStorage,
40 0 : command_queue: tokio::sync::mpsc::Receiver<CommandRequest<UploadCommand>>,
41 0 : background_jobs_can_start: Barrier,
42 0 : cancel: CancellationToken,
43 0 : ) {
44 0 : let concurrency = tenant_manager.get_conf().heatmap_upload_concurrency;
45 0 :
46 0 : let generator = HeatmapUploader {
47 0 : tenant_manager,
48 0 : remote_storage,
49 0 : cancel: cancel.clone(),
50 0 : tenants: HashMap::new(),
51 0 : };
52 0 : let mut scheduler = Scheduler::new(generator, concurrency);
53 0 :
54 0 : scheduler
55 0 : .run(command_queue, background_jobs_can_start, cancel)
56 0 : .instrument(info_span!("heatmap_upload_scheduler"))
57 0 : .await
58 0 : }
59 :
60 : /// This type is owned by a single task ([`heatmap_uploader_task`]) which runs an event
61 : /// handling loop and mutates it as needed: there are no locks here, because that event loop
62 : /// can hold &mut references to this type throughout.
63 : struct HeatmapUploader {
64 : tenant_manager: Arc<TenantManager>,
65 : remote_storage: GenericRemoteStorage,
66 : cancel: CancellationToken,
67 :
68 : tenants: HashMap<TenantShardId, UploaderTenantState>,
69 : }
70 :
71 : struct WriteInProgress {
72 : barrier: Barrier,
73 : }
74 :
75 : impl RunningJob for WriteInProgress {
76 0 : fn get_barrier(&self) -> Barrier {
77 0 : self.barrier.clone()
78 0 : }
79 : }
80 :
81 : struct UploadPending {
82 : tenant: Arc<Tenant>,
83 : last_upload: Option<LastUploadState>,
84 : target_time: Option<Instant>,
85 : period: Option<Duration>,
86 : }
87 :
88 : impl scheduler::PendingJob for UploadPending {
89 0 : fn get_tenant_shard_id(&self) -> &TenantShardId {
90 0 : self.tenant.get_tenant_shard_id()
91 0 : }
92 : }
93 :
94 : struct WriteComplete {
95 : tenant_shard_id: TenantShardId,
96 : completed_at: Instant,
97 : uploaded: Option<LastUploadState>,
98 : next_upload: Option<Instant>,
99 : }
100 :
101 : impl scheduler::Completion for WriteComplete {
102 0 : fn get_tenant_shard_id(&self) -> &TenantShardId {
103 0 : &self.tenant_shard_id
104 0 : }
105 : }
106 :
107 : /// The heatmap uploader keeps a little bit of per-tenant state, mainly to remember
108 : /// when we last did a write. We only populate this after doing at least one
109 : /// write for a tenant -- this avoids holding state for tenants that have
110 : /// uploads disabled.
111 :
112 : struct UploaderTenantState {
113 : // This Weak only exists to enable culling idle instances of this type
114 : // when the Tenant has been deallocated.
115 : tenant: Weak<Tenant>,
116 :
117 : /// Digest of the serialized heatmap that we last successfully uploaded
118 : last_upload_state: Option<LastUploadState>,
119 :
120 : /// When the last upload attempt completed (may have been successful or failed)
121 : last_upload: Option<Instant>,
122 :
123 : /// When should we next do an upload? None means never.
124 : next_upload: Option<Instant>,
125 : }
126 :
127 : type Scheduler = TenantBackgroundJobs<
128 : HeatmapUploader,
129 : UploadPending,
130 : WriteInProgress,
131 : WriteComplete,
132 : UploadCommand,
133 : >;
134 :
135 : impl JobGenerator<UploadPending, WriteInProgress, WriteComplete, UploadCommand>
136 : for HeatmapUploader
137 : {
138 0 : async fn schedule(&mut self) -> SchedulingResult<UploadPending> {
139 0 : // Cull any entries in self.tenants whose Arc<Tenant> is gone
140 0 : self.tenants
141 0 : .retain(|_k, v| v.tenant.upgrade().is_some() && v.next_upload.is_some());
142 0 :
143 0 : let now = Instant::now();
144 0 :
145 0 : let mut result = SchedulingResult {
146 0 : jobs: Vec::new(),
147 0 : want_interval: None,
148 0 : };
149 0 :
150 0 : let tenants = self.tenant_manager.get_attached_active_tenant_shards();
151 0 :
152 0 : yielding_loop(1000, &self.cancel, tenants.into_iter(), |tenant| {
153 0 : let period = match tenant.get_heatmap_period() {
154 : None => {
155 : // Heatmaps are disabled for this tenant
156 0 : return;
157 : }
158 0 : Some(period) => {
159 0 : // If any tenant has asked for uploads more frequent than our scheduling interval,
160 0 : // reduce it to match so that we can keep up. This is mainly useful in testing, where
161 0 : // we may set rather short intervals.
162 0 : result.want_interval = match result.want_interval {
163 0 : None => Some(period),
164 0 : Some(existing) => Some(std::cmp::min(period, existing)),
165 : };
166 :
167 0 : period
168 0 : }
169 0 : };
170 0 :
171 0 : // Stale attachments do not upload anything: if we are in this state, there is probably some
172 0 : // other attachment in mode Single or Multi running on another pageserver, and we don't
173 0 : // want to thrash and overwrite their heatmap uploads.
174 0 : if tenant.get_attach_mode() == AttachmentMode::Stale {
175 0 : return;
176 0 : }
177 0 :
178 0 : // Create an entry in self.tenants if one doesn't already exist: this will later be updated
179 0 : // with the completion time in on_completion.
180 0 : let state = self
181 0 : .tenants
182 0 : .entry(*tenant.get_tenant_shard_id())
183 0 : .or_insert_with(|| UploaderTenantState {
184 0 : tenant: Arc::downgrade(&tenant),
185 0 : last_upload: None,
186 0 : next_upload: Some(now.checked_add(period_warmup(period)).unwrap_or(now)),
187 0 : last_upload_state: None,
188 0 : });
189 0 :
190 0 : // Decline to do the upload if insufficient time has passed
191 0 : if state.next_upload.map(|nu| nu > now).unwrap_or(false) {
192 0 : return;
193 0 : }
194 0 :
195 0 : let last_upload = state.last_upload_state.clone();
196 0 : result.jobs.push(UploadPending {
197 0 : tenant,
198 0 : last_upload,
199 0 : target_time: state.next_upload,
200 0 : period: Some(period),
201 0 : });
202 0 : })
203 0 : .await
204 0 : .ok();
205 0 :
206 0 : result
207 0 : }
208 :
209 0 : fn spawn(
210 0 : &mut self,
211 0 : job: UploadPending,
212 0 : ) -> (
213 0 : WriteInProgress,
214 0 : Pin<Box<dyn Future<Output = WriteComplete> + Send>>,
215 0 : ) {
216 0 : let UploadPending {
217 0 : tenant,
218 0 : last_upload,
219 0 : target_time,
220 0 : period,
221 0 : } = job;
222 0 :
223 0 : let remote_storage = self.remote_storage.clone();
224 0 : let (completion, barrier) = utils::completion::channel();
225 0 : let tenant_shard_id = *tenant.get_tenant_shard_id();
226 0 : (WriteInProgress { barrier }, Box::pin(async move {
227 0 : // Guard for the barrier in [`WriteInProgress`]
228 0 : let _completion = completion;
229 0 :
230 0 : let started_at = Instant::now();
231 0 : let uploaded = match upload_tenant_heatmap(remote_storage, &tenant, last_upload.clone()).await {
232 0 : Ok(UploadHeatmapOutcome::Uploaded(uploaded)) => {
233 0 : let duration = Instant::now().duration_since(started_at);
234 0 : SECONDARY_MODE
235 0 : .upload_heatmap_duration
236 0 : .observe(duration.as_secs_f64());
237 0 : SECONDARY_MODE.upload_heatmap.inc();
238 0 : Some(uploaded)
239 : }
240 0 : Ok(UploadHeatmapOutcome::NoChange | UploadHeatmapOutcome::Skipped) => last_upload,
241 0 : Err(UploadHeatmapError::Upload(e)) => {
242 0 : tracing::warn!(
243 0 : "Failed to upload heatmap for tenant {}: {e:#}",
244 0 : tenant.get_tenant_shard_id(),
245 : );
246 0 : let duration = Instant::now().duration_since(started_at);
247 0 : SECONDARY_MODE
248 0 : .upload_heatmap_duration
249 0 : .observe(duration.as_secs_f64());
250 0 : SECONDARY_MODE.upload_heatmap_errors.inc();
251 0 : last_upload
252 : }
253 : Err(UploadHeatmapError::Cancelled) => {
254 0 : tracing::info!("Cancelled heatmap upload, shutting down");
255 0 : last_upload
256 : }
257 : };
258 :
259 0 : let now = Instant::now();
260 :
261 : // If the job had a target execution time, we may check our final execution
262 : // time against that for observability purposes.
263 0 : if let (Some(target_time), Some(period)) = (target_time, period) {
264 0 : // Elapsed time includes any scheduling lag as well as the execution of the job
265 0 : let elapsed = now.duration_since(target_time);
266 0 :
267 0 : warn_when_period_overrun(elapsed, period, BackgroundLoopKind::HeatmapUpload);
268 0 : }
269 :
270 0 : let next_upload = tenant
271 0 : .get_heatmap_period()
272 0 : .and_then(|period| now.checked_add(period_jitter(period, 5)));
273 0 :
274 0 : WriteComplete {
275 0 : tenant_shard_id: *tenant.get_tenant_shard_id(),
276 0 : completed_at: now,
277 0 : uploaded,
278 0 : next_upload,
279 0 : }
280 0 : }.instrument(info_span!(parent: None, "heatmap_upload", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug()))))
281 0 : }
282 :
283 0 : fn on_command(&mut self, command: UploadCommand) -> anyhow::Result<UploadPending> {
284 0 : let tenant_shard_id = command.get_tenant_shard_id();
285 0 :
286 0 : tracing::info!(
287 0 : tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(),
288 0 : "Starting heatmap write on command");
289 0 : let tenant = self
290 0 : .tenant_manager
291 0 : .get_attached_tenant_shard(*tenant_shard_id)
292 0 : .map_err(|e| anyhow::anyhow!(e))?;
293 0 : if !tenant.is_active() {
294 0 : return Err(GetTenantError::NotActive(*tenant_shard_id).into());
295 0 : }
296 0 :
297 0 : Ok(UploadPending {
298 0 : // Ignore our state for last digest: this forces an upload even if nothing has changed
299 0 : last_upload: None,
300 0 : tenant,
301 0 : target_time: None,
302 0 : period: None,
303 0 : })
304 0 : }
305 :
306 0 : #[instrument(skip_all, fields(tenant_id=%completion.tenant_shard_id.tenant_id, shard_id=%completion.tenant_shard_id.shard_slug()))]
307 : fn on_completion(&mut self, completion: WriteComplete) {
308 : tracing::debug!("Heatmap upload completed");
309 : let WriteComplete {
310 : tenant_shard_id,
311 : completed_at,
312 : uploaded,
313 : next_upload,
314 : } = completion;
315 : use std::collections::hash_map::Entry;
316 : match self.tenants.entry(tenant_shard_id) {
317 : Entry::Vacant(_) => {
318 : // Tenant state was dropped, nothing to update.
319 : }
320 : Entry::Occupied(mut entry) => {
321 : entry.get_mut().last_upload = Some(completed_at);
322 : entry.get_mut().last_upload_state = uploaded;
323 : entry.get_mut().next_upload = next_upload
324 : }
325 : }
326 : }
327 : }
328 :
329 : enum UploadHeatmapOutcome {
330 : /// We successfully wrote to remote storage, with this digest.
331 : Uploaded(LastUploadState),
332 : /// We did not upload because the heatmap digest was unchanged since the last upload
333 : NoChange,
334 : /// We skipped the upload for some reason, such as tenant/timeline not ready
335 : Skipped,
336 : }
337 :
338 0 : #[derive(thiserror::Error, Debug)]
339 : enum UploadHeatmapError {
340 : #[error("Cancelled")]
341 : Cancelled,
342 :
343 : #[error(transparent)]
344 : Upload(#[from] anyhow::Error),
345 : }
346 :
347 : /// Digests describing the heatmap we most recently uploaded successfully.
348 : ///
349 : /// md5 is generally a bad hash. We use it because it's convenient for interop with AWS S3's ETag,
350 : /// which is also an md5sum.
351 : #[derive(Clone)]
352 : struct LastUploadState {
353 : // Digest of json-encoded HeatMapTenant
354 : uploaded_digest: md5::Digest,
355 :
356 : // Digest without atimes set.
357 : layers_only_digest: md5::Digest,
358 : }
359 :
360 : /// The inner upload operation. This will skip if `last_digest` is Some and matches the digest
361 : /// of the object we would have uploaded.
362 0 : async fn upload_tenant_heatmap(
363 0 : remote_storage: GenericRemoteStorage,
364 0 : tenant: &Arc<Tenant>,
365 0 : last_upload: Option<LastUploadState>,
366 0 : ) -> Result<UploadHeatmapOutcome, UploadHeatmapError> {
367 0 : debug_assert_current_span_has_tenant_id();
368 0 :
369 0 : let generation = tenant.get_generation();
370 0 : debug_assert!(!generation.is_none());
371 0 : if generation.is_none() {
372 : // We do not expect this: None generations should only appear in historic layer metadata, not in running Tenants
373 0 : tracing::warn!("Skipping heatmap upload for tenant with generation==None");
374 0 : return Ok(UploadHeatmapOutcome::Skipped);
375 0 : }
376 0 :
377 0 : let mut heatmap = HeatMapTenant {
378 0 : timelines: Vec::new(),
379 0 : generation,
380 0 : upload_period_ms: tenant.get_heatmap_period().map(|p| p.as_millis()),
381 0 : };
382 0 : let timelines = tenant.timelines.lock().unwrap().clone();
383 :
384 : // Ensure that Tenant::shutdown waits for any upload in flight: this is needed because otherwise
385 : // when we delete a tenant, we might race with an upload in flight and end up leaving a heatmap behind
386 : // in remote storage.
387 0 : let Ok(_guard) = tenant.gate.enter() else {
388 0 : tracing::info!("Skipping heatmap upload for tenant which is shutting down");
389 0 : return Err(UploadHeatmapError::Cancelled);
390 : };
391 :
392 0 : for (timeline_id, timeline) in timelines {
393 0 : let heatmap_timeline = timeline.generate_heatmap().await;
394 0 : match heatmap_timeline {
395 : None => {
396 0 : tracing::debug!(
397 0 : "Skipping heatmap upload because timeline {timeline_id} is not ready"
398 : );
399 0 : return Ok(UploadHeatmapOutcome::Skipped);
400 : }
401 0 : Some(heatmap_timeline) => {
402 0 : heatmap.timelines.push(heatmap_timeline);
403 0 : }
404 : }
405 : }
406 :
407 : // Serialize the heatmap
408 0 : let bytes = serde_json::to_vec(&heatmap).map_err(|e| anyhow::anyhow!(e))?;
409 :
410 : // Drop out early if nothing changed since our last upload
411 0 : let digest = md5::compute(&bytes);
412 0 : if Some(&digest) == last_upload.as_ref().map(|d| &d.uploaded_digest) {
413 0 : return Ok(UploadHeatmapOutcome::NoChange);
414 0 : }
415 0 :
416 0 : // Calculate a digest that omits atimes, so that we can distinguish actual changes in
417 0 : // layers from changes only in atimes.
418 0 : let heatmap_size_bytes = heatmap.get_stats().bytes;
419 0 : let layers_only_bytes =
420 0 : serde_json::to_vec(&heatmap.strip_atimes()).map_err(|e| anyhow::anyhow!(e))?;
421 0 : let layers_only_digest = md5::compute(&layers_only_bytes);
422 0 : if heatmap_size_bytes < tenant.get_checkpoint_distance() {
423 : // For small tenants, skip upload if only atimes changed. This avoids doing frequent
424 : // uploads from long-idle tenants whose atimes are just incremented by periodic
425 : // size calculations.
426 0 : if Some(&layers_only_digest) == last_upload.as_ref().map(|d| &d.layers_only_digest) {
427 0 : return Ok(UploadHeatmapOutcome::NoChange);
428 0 : }
429 0 : }
430 :
431 0 : let bytes = bytes::Bytes::from(bytes);
432 0 : let size = bytes.len();
433 0 :
434 0 : let path = remote_heatmap_path(tenant.get_tenant_shard_id());
435 0 :
436 0 : let cancel = &tenant.cancel;
437 0 :
438 0 : tracing::debug!("Uploading {size} byte heatmap to {path}");
439 0 : if let Err(e) = backoff::retry(
440 0 : || async {
441 0 : let bytes = futures::stream::once(futures::future::ready(Ok(bytes.clone())));
442 0 : remote_storage
443 0 : .upload_storage_object(bytes, size, &path, cancel)
444 0 : .await
445 0 : },
446 0 : TimeoutOrCancel::caused_by_cancel,
447 0 : 3,
448 0 : u32::MAX,
449 0 : "Uploading heatmap",
450 0 : cancel,
451 0 : )
452 0 : .await
453 0 : .ok_or_else(|| anyhow::anyhow!("Shutting down"))
454 0 : .and_then(|x| x)
455 : {
456 0 : if cancel.is_cancelled() {
457 0 : return Err(UploadHeatmapError::Cancelled);
458 : } else {
459 0 : return Err(e.into());
460 : }
461 0 : }
462 0 :
463 0 : tracing::info!("Successfully uploaded {size} byte heatmap to {path}");
464 :
465 0 : Ok(UploadHeatmapOutcome::Uploaded(LastUploadState {
466 0 : uploaded_digest: digest,
467 0 : layers_only_digest,
468 0 : }))
469 0 : }
|