Line data Source code
1 : use std::cmp;
2 : use std::collections::hash_map::Entry;
3 : use std::collections::{HashMap, HashSet};
4 : use std::sync::Arc;
5 :
6 : use tenant_size_model::svg::SvgBranchKind;
7 : use tokio::sync::oneshot::error::RecvError;
8 : use tokio::sync::Semaphore;
9 : use tokio_util::sync::CancellationToken;
10 :
11 : use crate::context::RequestContext;
12 : use crate::pgdatadir_mapping::CalculateLogicalSizeError;
13 :
14 : use super::{GcError, LogicalSizeCalculationCause, Tenant};
15 : use crate::tenant::{MaybeOffloaded, Timeline};
16 : use utils::id::TimelineId;
17 : use utils::lsn::Lsn;
18 :
19 : use tracing::*;
20 :
21 : use tenant_size_model::{Segment, StorageModel};
22 :
23 : /// Inputs to the actual tenant sizing model
24 : ///
25 : /// Implements [`serde::Serialize`] but is not meant to be part of the public API, instead meant to
26 : /// be a transferrable format between execution environments and developer.
27 : ///
28 : /// This tracks more information than the actual StorageModel that calculation
29 : /// needs. We will convert this into a StorageModel when it's time to perform
30 : /// the calculation.
31 : ///
32 12 : #[derive(Debug, serde::Serialize, serde::Deserialize)]
33 : pub struct ModelInputs {
34 : pub segments: Vec<SegmentMeta>,
35 : pub timeline_inputs: Vec<TimelineInputs>,
36 : }
37 :
38 : /// A [`Segment`], with some extra information for display purposes
39 112 : #[derive(Debug, serde::Serialize, serde::Deserialize)]
40 : pub struct SegmentMeta {
41 : pub segment: Segment,
42 : pub timeline_id: TimelineId,
43 : pub kind: LsnKind,
44 : }
45 :
46 0 : #[derive(thiserror::Error, Debug)]
47 : pub(crate) enum CalculateSyntheticSizeError {
48 : /// Something went wrong internally to the calculation of logical size at a particular branch point
49 : #[error("Failed to calculated logical size on timeline {timeline_id} at {lsn}: {error}")]
50 : LogicalSize {
51 : timeline_id: TimelineId,
52 : lsn: Lsn,
53 : error: CalculateLogicalSizeError,
54 : },
55 :
56 : /// Something went wrong internally when calculating GC parameters at start of size calculation
57 : #[error(transparent)]
58 : GcInfo(GcError),
59 :
60 : /// Totally unexpected errors, like panics joining a task
61 : #[error(transparent)]
62 : Fatal(anyhow::Error),
63 :
64 : /// Tenant shut down while calculating size
65 : #[error("Cancelled")]
66 : Cancelled,
67 : }
68 :
69 : impl From<GcError> for CalculateSyntheticSizeError {
70 0 : fn from(value: GcError) -> Self {
71 0 : match value {
72 : GcError::TenantCancelled | GcError::TimelineCancelled => {
73 0 : CalculateSyntheticSizeError::Cancelled
74 : }
75 0 : other => CalculateSyntheticSizeError::GcInfo(other),
76 : }
77 0 : }
78 : }
79 :
80 : impl SegmentMeta {
81 0 : fn size_needed(&self) -> bool {
82 0 : match self.kind {
83 : LsnKind::BranchStart => {
84 : // If we don't have a later GcCutoff point on this branch, and
85 : // no ancestor, calculate size for the branch start point.
86 0 : self.segment.needed && self.segment.parent.is_none()
87 : }
88 0 : LsnKind::BranchPoint => true,
89 0 : LsnKind::GcCutOff => true,
90 0 : LsnKind::BranchEnd => false,
91 0 : LsnKind::LeasePoint => true,
92 0 : LsnKind::LeaseStart => false,
93 0 : LsnKind::LeaseEnd => false,
94 : }
95 0 : }
96 : }
97 :
98 : #[derive(
99 56 : Debug, Clone, Copy, Eq, Ord, PartialEq, PartialOrd, serde::Serialize, serde::Deserialize,
100 : )]
101 : pub enum LsnKind {
102 : /// A timeline starting here
103 : BranchStart,
104 : /// A child timeline branches off from here
105 : BranchPoint,
106 : /// GC cutoff point
107 : GcCutOff,
108 : /// Last record LSN
109 : BranchEnd,
110 : /// A LSN lease is granted here.
111 : LeasePoint,
112 : /// A lease starts from here.
113 : LeaseStart,
114 : /// Last record LSN for the lease (should have the same LSN as the previous [`LsnKind::LeaseStart`]).
115 : LeaseEnd,
116 : }
117 :
118 : impl From<LsnKind> for SvgBranchKind {
119 0 : fn from(kind: LsnKind) -> Self {
120 0 : match kind {
121 0 : LsnKind::LeasePoint | LsnKind::LeaseStart | LsnKind::LeaseEnd => SvgBranchKind::Lease,
122 0 : _ => SvgBranchKind::Timeline,
123 : }
124 0 : }
125 : }
126 :
127 : /// Collect all relevant LSNs to the inputs. These will only be helpful in the serialized form as
128 : /// part of [`ModelInputs`] from the HTTP api, explaining the inputs.
129 64 : #[derive(Debug, serde::Serialize, serde::Deserialize)]
130 : pub struct TimelineInputs {
131 : pub timeline_id: TimelineId,
132 :
133 : pub ancestor_id: Option<TimelineId>,
134 :
135 : ancestor_lsn: Lsn,
136 : last_record: Lsn,
137 : latest_gc_cutoff: Lsn,
138 :
139 : /// Cutoff point based on GC settings
140 : next_pitr_cutoff: Lsn,
141 :
142 : /// Cutoff point calculated from the user-supplied 'max_retention_period'
143 : retention_param_cutoff: Option<Lsn>,
144 :
145 : /// Lease points on the timeline
146 : lease_points: Vec<Lsn>,
147 : }
148 :
149 : /// Gathers the inputs for the tenant sizing model.
150 : ///
151 : /// Tenant size does not consider the latest state, but only the state until next_pitr_cutoff, which
152 : /// is updated on-demand, during the start of this calculation and separate from the
153 : /// [`TimelineInputs::latest_gc_cutoff`].
154 : ///
155 : /// For timelines in general:
156 : ///
157 : /// ```text
158 : /// 0-----|---------|----|------------| · · · · · |·> lsn
159 : /// initdb_lsn branchpoints* next_pitr_cutoff latest
160 : /// ```
161 0 : pub(super) async fn gather_inputs(
162 0 : tenant: &Tenant,
163 0 : limit: &Arc<Semaphore>,
164 0 : max_retention_period: Option<u64>,
165 0 : logical_size_cache: &mut HashMap<(TimelineId, Lsn), u64>,
166 0 : cause: LogicalSizeCalculationCause,
167 0 : cancel: &CancellationToken,
168 0 : ctx: &RequestContext,
169 0 : ) -> Result<ModelInputs, CalculateSyntheticSizeError> {
170 0 : // refresh is needed to update [`timeline::GcCutoffs`]
171 0 : tenant.refresh_gc_info(cancel, ctx).await?;
172 :
173 : // Collect information about all the timelines
174 0 : let mut timelines = tenant.list_timelines();
175 0 :
176 0 : if timelines.is_empty() {
177 : // perhaps the tenant has just been created, and as such doesn't have any data yet
178 0 : return Ok(ModelInputs {
179 0 : segments: vec![],
180 0 : timeline_inputs: Vec::new(),
181 0 : });
182 0 : }
183 0 :
184 0 : // Filter out timelines that are not active
185 0 : //
186 0 : // There may be a race when a timeline is dropped,
187 0 : // but it is unlikely to cause any issues. In the worst case,
188 0 : // the calculation will error out.
189 0 : timelines.retain(|t| t.is_active());
190 0 : // Also filter out archived timelines.
191 0 : timelines.retain(|t| t.is_archived() != Some(true));
192 0 :
193 0 : // Build a map of branch points.
194 0 : let mut branchpoints: HashMap<TimelineId, HashSet<Lsn>> = HashMap::new();
195 0 : for timeline in timelines.iter() {
196 0 : if let Some(ancestor_id) = timeline.get_ancestor_timeline_id() {
197 0 : branchpoints
198 0 : .entry(ancestor_id)
199 0 : .or_default()
200 0 : .insert(timeline.get_ancestor_lsn());
201 0 : }
202 : }
203 :
204 : // These become the final result.
205 0 : let mut timeline_inputs = Vec::with_capacity(timelines.len());
206 0 : let mut segments: Vec<SegmentMeta> = Vec::new();
207 0 :
208 0 : //
209 0 : // Build Segments representing each timeline. As we do that, also remember
210 0 : // the branchpoints and branch startpoints in 'branchpoint_segments' and
211 0 : // 'branchstart_segments'
212 0 : //
213 0 :
214 0 : // BranchPoint segments of each timeline
215 0 : // (timeline, branchpoint LSN) -> segment_id
216 0 : let mut branchpoint_segments: HashMap<(TimelineId, Lsn), usize> = HashMap::new();
217 :
218 : // timeline, Branchpoint seg id, (ancestor, ancestor LSN)
219 : type BranchStartSegment = (TimelineId, usize, Option<(TimelineId, Lsn)>);
220 0 : let mut branchstart_segments: Vec<BranchStartSegment> = Vec::new();
221 :
222 0 : for timeline in timelines.iter() {
223 0 : let timeline_id = timeline.timeline_id;
224 0 : let last_record_lsn = timeline.get_last_record_lsn();
225 0 : let ancestor_lsn = timeline.get_ancestor_lsn();
226 0 :
227 0 : // there's a race between the update (holding tenant.gc_lock) and this read but it
228 0 : // might not be an issue, because it's not for Timeline::gc
229 0 : let gc_info = timeline.gc_info.read().unwrap();
230 0 :
231 0 : // similar to gc, but Timeline::get_latest_gc_cutoff_lsn() will not be updated before a
232 0 : // new gc run, which we have no control over. however differently from `Timeline::gc`
233 0 : // we don't consider the `Timeline::disk_consistent_lsn` at all, because we are not
234 0 : // actually removing files.
235 0 : //
236 0 : // We only consider [`timeline::GcCutoffs::time`], and not [`timeline::GcCutoffs::space`], because from
237 0 : // a user's perspective they have only requested retention up to the time bound (pitr_cutoff), rather
238 0 : // than our internal space cutoff. This means that if someone drops a database and waits for their
239 0 : // PITR interval, they will see synthetic size decrease, even if we are still storing data inside
240 0 : // the space cutoff.
241 0 : let mut next_pitr_cutoff = gc_info.cutoffs.time;
242 :
243 : // If the caller provided a shorter retention period, use that instead of the GC cutoff.
244 0 : let retention_param_cutoff = if let Some(max_retention_period) = max_retention_period {
245 0 : let param_cutoff = Lsn(last_record_lsn.0.saturating_sub(max_retention_period));
246 0 : if next_pitr_cutoff < param_cutoff {
247 0 : next_pitr_cutoff = param_cutoff;
248 0 : }
249 0 : Some(param_cutoff)
250 : } else {
251 0 : None
252 : };
253 :
254 0 : let lease_points = gc_info
255 0 : .leases
256 0 : .keys()
257 0 : .filter(|&&lsn| lsn > ancestor_lsn)
258 0 : .copied()
259 0 : .collect::<Vec<_>>();
260 0 :
261 0 : // next_pitr_cutoff in parent branch are not of interest (right now at least), nor do we
262 0 : // want to query any logical size before initdb_lsn.
263 0 : let branch_start_lsn = cmp::max(ancestor_lsn, timeline.initdb_lsn);
264 0 :
265 0 : // Build "interesting LSNs" on this timeline
266 0 : let mut lsns: Vec<(Lsn, LsnKind)> = gc_info
267 0 : .retain_lsns
268 0 : .iter()
269 0 : .filter(|(lsn, _child_id, is_offloaded)| {
270 0 : lsn > &ancestor_lsn && *is_offloaded == MaybeOffloaded::No
271 0 : })
272 0 : .copied()
273 0 : // this assumes there are no other retain_lsns than the branchpoints
274 0 : .map(|(lsn, _child_id, _is_offloaded)| (lsn, LsnKind::BranchPoint))
275 0 : .collect::<Vec<_>>();
276 0 :
277 0 : lsns.extend(lease_points.iter().map(|&lsn| (lsn, LsnKind::LeasePoint)));
278 0 :
279 0 : drop(gc_info);
280 :
281 : // Add branch points we collected earlier, just in case there were any that were
282 : // not present in retain_lsns. We will remove any duplicates below later.
283 0 : if let Some(this_branchpoints) = branchpoints.get(&timeline_id) {
284 0 : lsns.extend(
285 0 : this_branchpoints
286 0 : .iter()
287 0 : .map(|lsn| (*lsn, LsnKind::BranchPoint)),
288 0 : )
289 0 : }
290 :
291 : // Add a point for the PITR cutoff
292 0 : let branch_start_needed = next_pitr_cutoff <= branch_start_lsn;
293 0 : if !branch_start_needed {
294 0 : lsns.push((next_pitr_cutoff, LsnKind::GcCutOff));
295 0 : }
296 :
297 0 : lsns.sort_unstable();
298 0 : lsns.dedup();
299 0 :
300 0 : //
301 0 : // Create Segments for the interesting points.
302 0 : //
303 0 :
304 0 : // Timeline start point
305 0 : let ancestor = timeline
306 0 : .get_ancestor_timeline_id()
307 0 : .map(|ancestor_id| (ancestor_id, ancestor_lsn));
308 0 : branchstart_segments.push((timeline_id, segments.len(), ancestor));
309 0 : segments.push(SegmentMeta {
310 0 : segment: Segment {
311 0 : parent: None, // filled in later
312 0 : lsn: branch_start_lsn.0,
313 0 : size: None, // filled in later
314 0 : needed: branch_start_needed,
315 0 : },
316 0 : timeline_id: timeline.timeline_id,
317 0 : kind: LsnKind::BranchStart,
318 0 : });
319 0 :
320 0 : // GC cutoff point, and any branch points, i.e. points where
321 0 : // other timelines branch off from this timeline.
322 0 : let mut parent = segments.len() - 1;
323 0 : for (lsn, kind) in lsns {
324 0 : if kind == LsnKind::BranchPoint {
325 0 : branchpoint_segments.insert((timeline_id, lsn), segments.len());
326 0 : }
327 :
328 0 : segments.push(SegmentMeta {
329 0 : segment: Segment {
330 0 : parent: Some(parent),
331 0 : lsn: lsn.0,
332 0 : size: None,
333 0 : needed: lsn > next_pitr_cutoff,
334 0 : },
335 0 : timeline_id: timeline.timeline_id,
336 0 : kind,
337 0 : });
338 0 :
339 0 : parent = segments.len() - 1;
340 0 :
341 0 : if kind == LsnKind::LeasePoint {
342 0 : // Needs `LeaseStart` and `LeaseEnd` as well to model lease as a read-only branch that never writes data
343 0 : // (i.e. it's lsn has not advanced from ancestor_lsn), and therefore the three segments have the same LSN
344 0 : // value. Without the other two segments, the calculation code would not count the leased LSN as a point
345 0 : // to be retained.
346 0 : // Did not use `BranchStart` or `BranchEnd` so we can differentiate branches and leases during debug.
347 0 : //
348 0 : // Alt Design: rewrite the entire calculation code to be independent of timeline id. Both leases and
349 0 : // branch points can be given a synthetic id so we can unite them.
350 0 : let mut lease_parent = parent;
351 0 :
352 0 : // Start of a lease.
353 0 : segments.push(SegmentMeta {
354 0 : segment: Segment {
355 0 : parent: Some(lease_parent),
356 0 : lsn: lsn.0,
357 0 : size: None, // Filled in later, if necessary
358 0 : needed: lsn > next_pitr_cutoff, // only needed if the point is within rentention.
359 0 : },
360 0 : timeline_id: timeline.timeline_id,
361 0 : kind: LsnKind::LeaseStart,
362 0 : });
363 0 : lease_parent += 1;
364 0 :
365 0 : // End of the lease.
366 0 : segments.push(SegmentMeta {
367 0 : segment: Segment {
368 0 : parent: Some(lease_parent),
369 0 : lsn: lsn.0,
370 0 : size: None, // Filled in later, if necessary
371 0 : needed: true, // everything at the lease LSN must be readable => is needed
372 0 : },
373 0 : timeline_id: timeline.timeline_id,
374 0 : kind: LsnKind::LeaseEnd,
375 0 : });
376 0 : }
377 : }
378 :
379 : // Current end of the timeline
380 0 : segments.push(SegmentMeta {
381 0 : segment: Segment {
382 0 : parent: Some(parent),
383 0 : lsn: last_record_lsn.0,
384 0 : size: None, // Filled in later, if necessary
385 0 : needed: true,
386 0 : },
387 0 : timeline_id: timeline.timeline_id,
388 0 : kind: LsnKind::BranchEnd,
389 0 : });
390 0 :
391 0 : timeline_inputs.push(TimelineInputs {
392 0 : timeline_id: timeline.timeline_id,
393 0 : ancestor_id: timeline.get_ancestor_timeline_id(),
394 0 : ancestor_lsn,
395 0 : last_record: last_record_lsn,
396 0 : // this is not used above, because it might not have updated recently enough
397 0 : latest_gc_cutoff: *timeline.get_latest_gc_cutoff_lsn(),
398 0 : next_pitr_cutoff,
399 0 : retention_param_cutoff,
400 0 : lease_points,
401 0 : });
402 : }
403 :
404 : // We now have all segments from the timelines in 'segments'. The timelines
405 : // haven't been linked to each other yet, though. Do that.
406 0 : for (_timeline_id, seg_id, ancestor) in branchstart_segments {
407 : // Look up the branch point
408 0 : if let Some(ancestor) = ancestor {
409 0 : let parent_id = *branchpoint_segments.get(&ancestor).unwrap();
410 0 : segments[seg_id].segment.parent = Some(parent_id);
411 0 : }
412 : }
413 :
414 : // We left the 'size' field empty in all of the Segments so far.
415 : // Now find logical sizes for all of the points that might need or benefit from them.
416 0 : fill_logical_sizes(
417 0 : &timelines,
418 0 : &mut segments,
419 0 : limit,
420 0 : logical_size_cache,
421 0 : cause,
422 0 : ctx,
423 0 : )
424 0 : .await?;
425 :
426 0 : if tenant.cancel.is_cancelled() {
427 : // If we're shutting down, return an error rather than a sparse result that might include some
428 : // timelines from before we started shutting down
429 0 : return Err(CalculateSyntheticSizeError::Cancelled);
430 0 : }
431 0 :
432 0 : Ok(ModelInputs {
433 0 : segments,
434 0 : timeline_inputs,
435 0 : })
436 0 : }
437 :
438 : /// Augment 'segments' with logical sizes
439 : ///
440 : /// This will leave segments' sizes as None if the Timeline associated with the segment is deleted concurrently
441 : /// (i.e. we cannot read its logical size at a particular LSN).
442 0 : async fn fill_logical_sizes(
443 0 : timelines: &[Arc<Timeline>],
444 0 : segments: &mut [SegmentMeta],
445 0 : limit: &Arc<Semaphore>,
446 0 : logical_size_cache: &mut HashMap<(TimelineId, Lsn), u64>,
447 0 : cause: LogicalSizeCalculationCause,
448 0 : ctx: &RequestContext,
449 0 : ) -> Result<(), CalculateSyntheticSizeError> {
450 0 : let timeline_hash: HashMap<TimelineId, Arc<Timeline>> = HashMap::from_iter(
451 0 : timelines
452 0 : .iter()
453 0 : .map(|timeline| (timeline.timeline_id, Arc::clone(timeline))),
454 0 : );
455 0 :
456 0 : // record the used/inserted cache keys here, to remove extras not to start leaking
457 0 : // after initial run the cache should be quite stable, but live timelines will eventually
458 0 : // require new lsns to be inspected.
459 0 : let mut sizes_needed = HashMap::<(TimelineId, Lsn), Option<u64>>::new();
460 0 :
461 0 : // with joinset, on drop, all of the tasks will just be de-scheduled, which we can use to
462 0 : // our advantage with `?` error handling.
463 0 : let mut joinset = tokio::task::JoinSet::new();
464 :
465 : // For each point that would benefit from having a logical size available,
466 : // spawn a Task to fetch it, unless we have it cached already.
467 0 : for seg in segments.iter() {
468 0 : if !seg.size_needed() {
469 0 : continue;
470 0 : }
471 0 :
472 0 : let timeline_id = seg.timeline_id;
473 0 : let lsn = Lsn(seg.segment.lsn);
474 :
475 0 : if let Entry::Vacant(e) = sizes_needed.entry((timeline_id, lsn)) {
476 0 : let cached_size = logical_size_cache.get(&(timeline_id, lsn)).cloned();
477 0 : if cached_size.is_none() {
478 0 : let timeline = Arc::clone(timeline_hash.get(&timeline_id).unwrap());
479 0 : let parallel_size_calcs = Arc::clone(limit);
480 0 : let ctx = ctx.attached_child();
481 0 : joinset.spawn(
482 0 : calculate_logical_size(parallel_size_calcs, timeline, lsn, cause, ctx)
483 0 : .in_current_span(),
484 0 : );
485 0 : }
486 0 : e.insert(cached_size);
487 0 : }
488 : }
489 :
490 : // Perform the size lookups
491 0 : let mut have_any_error = None;
492 0 : while let Some(res) = joinset.join_next().await {
493 : // each of these come with Result<anyhow::Result<_>, JoinError>
494 : // because of spawn + spawn_blocking
495 0 : match res {
496 0 : Err(join_error) if join_error.is_cancelled() => {
497 0 : unreachable!("we are not cancelling any of the futures, nor should be");
498 : }
499 0 : Err(join_error) => {
500 0 : // cannot really do anything, as this panic is likely a bug
501 0 : error!("task that calls spawn_ondemand_logical_size_calculation panicked: {join_error:#}");
502 :
503 0 : have_any_error = Some(CalculateSyntheticSizeError::Fatal(
504 0 : anyhow::anyhow!(join_error)
505 0 : .context("task that calls spawn_ondemand_logical_size_calculation"),
506 0 : ));
507 : }
508 0 : Ok(Err(recv_result_error)) => {
509 0 : // cannot really do anything, as this panic is likely a bug
510 0 : error!("failed to receive logical size query result: {recv_result_error:#}");
511 0 : have_any_error = Some(CalculateSyntheticSizeError::Fatal(
512 0 : anyhow::anyhow!(recv_result_error)
513 0 : .context("Receiving logical size query result"),
514 0 : ));
515 : }
516 0 : Ok(Ok(TimelineAtLsnSizeResult(timeline, lsn, Err(error)))) => {
517 0 : if matches!(error, CalculateLogicalSizeError::Cancelled) {
518 : // Skip this: it's okay if one timeline among many is shutting down while we
519 : // calculate inputs for the overall tenant.
520 0 : continue;
521 : } else {
522 0 : warn!(
523 0 : timeline_id=%timeline.timeline_id,
524 0 : "failed to calculate logical size at {lsn}: {error:#}"
525 : );
526 0 : have_any_error = Some(CalculateSyntheticSizeError::LogicalSize {
527 0 : timeline_id: timeline.timeline_id,
528 0 : lsn,
529 0 : error,
530 0 : });
531 : }
532 : }
533 0 : Ok(Ok(TimelineAtLsnSizeResult(timeline, lsn, Ok(size)))) => {
534 0 : debug!(timeline_id=%timeline.timeline_id, %lsn, size, "size calculated");
535 :
536 0 : logical_size_cache.insert((timeline.timeline_id, lsn), size);
537 0 : sizes_needed.insert((timeline.timeline_id, lsn), Some(size));
538 : }
539 : }
540 : }
541 :
542 : // prune any keys not needed anymore; we record every used key and added key.
543 0 : logical_size_cache.retain(|key, _| sizes_needed.contains_key(key));
544 :
545 0 : if let Some(error) = have_any_error {
546 : // we cannot complete this round, because we are missing data.
547 : // we have however cached all we were able to request calculation on.
548 0 : return Err(error);
549 0 : }
550 :
551 : // Insert the looked up sizes to the Segments
552 0 : for seg in segments.iter_mut() {
553 0 : if !seg.size_needed() {
554 0 : continue;
555 0 : }
556 0 :
557 0 : let timeline_id = seg.timeline_id;
558 0 : let lsn = Lsn(seg.segment.lsn);
559 :
560 0 : if let Some(Some(size)) = sizes_needed.get(&(timeline_id, lsn)) {
561 0 : seg.segment.size = Some(*size);
562 0 : }
563 : }
564 0 : Ok(())
565 0 : }
566 :
567 : impl ModelInputs {
568 4 : pub fn calculate_model(&self) -> tenant_size_model::StorageModel {
569 4 : // Convert SegmentMetas into plain Segments
570 4 : StorageModel {
571 4 : segments: self
572 4 : .segments
573 4 : .iter()
574 28 : .map(|seg| seg.segment.clone())
575 4 : .collect(),
576 4 : }
577 4 : }
578 :
579 : // calculate total project size
580 2 : pub fn calculate(&self) -> u64 {
581 2 : let storage = self.calculate_model();
582 2 : let sizes = storage.calculate();
583 2 : sizes.total_size
584 2 : }
585 : }
586 :
587 : /// Newtype around the tuple that carries the timeline at lsn logical size calculation.
588 : struct TimelineAtLsnSizeResult(
589 : Arc<crate::tenant::Timeline>,
590 : utils::lsn::Lsn,
591 : Result<u64, CalculateLogicalSizeError>,
592 : );
593 :
594 0 : #[instrument(skip_all, fields(timeline_id=%timeline.timeline_id, lsn=%lsn))]
595 : async fn calculate_logical_size(
596 : limit: Arc<tokio::sync::Semaphore>,
597 : timeline: Arc<crate::tenant::Timeline>,
598 : lsn: utils::lsn::Lsn,
599 : cause: LogicalSizeCalculationCause,
600 : ctx: RequestContext,
601 : ) -> Result<TimelineAtLsnSizeResult, RecvError> {
602 : let _permit = tokio::sync::Semaphore::acquire_owned(limit)
603 : .await
604 : .expect("global semaphore should not had been closed");
605 :
606 : let size_res = timeline
607 : .spawn_ondemand_logical_size_calculation(lsn, cause, ctx)
608 : .instrument(info_span!("spawn_ondemand_logical_size_calculation"))
609 : .await?;
610 : Ok(TimelineAtLsnSizeResult(timeline, lsn, size_res))
611 : }
612 :
613 : #[test]
614 2 : fn verify_size_for_multiple_branches() {
615 2 : // this is generated from integration test test_tenant_size_with_multiple_branches, but this way
616 2 : // it has the stable lsn's
617 2 : //
618 2 : // The timeline_inputs don't participate in the size calculation, and are here just to explain
619 2 : // the inputs.
620 2 : let doc = r#"
621 2 : {
622 2 : "segments": [
623 2 : {
624 2 : "segment": {
625 2 : "parent": 9,
626 2 : "lsn": 26033560,
627 2 : "size": null,
628 2 : "needed": false
629 2 : },
630 2 : "timeline_id": "20b129c9b50cff7213e6503a31b2a5ce",
631 2 : "kind": "BranchStart"
632 2 : },
633 2 : {
634 2 : "segment": {
635 2 : "parent": 0,
636 2 : "lsn": 35720400,
637 2 : "size": 25206784,
638 2 : "needed": false
639 2 : },
640 2 : "timeline_id": "20b129c9b50cff7213e6503a31b2a5ce",
641 2 : "kind": "GcCutOff"
642 2 : },
643 2 : {
644 2 : "segment": {
645 2 : "parent": 1,
646 2 : "lsn": 35851472,
647 2 : "size": null,
648 2 : "needed": true
649 2 : },
650 2 : "timeline_id": "20b129c9b50cff7213e6503a31b2a5ce",
651 2 : "kind": "BranchEnd"
652 2 : },
653 2 : {
654 2 : "segment": {
655 2 : "parent": 7,
656 2 : "lsn": 24566168,
657 2 : "size": null,
658 2 : "needed": false
659 2 : },
660 2 : "timeline_id": "454626700469f0a9914949b9d018e876",
661 2 : "kind": "BranchStart"
662 2 : },
663 2 : {
664 2 : "segment": {
665 2 : "parent": 3,
666 2 : "lsn": 25261936,
667 2 : "size": 26050560,
668 2 : "needed": false
669 2 : },
670 2 : "timeline_id": "454626700469f0a9914949b9d018e876",
671 2 : "kind": "GcCutOff"
672 2 : },
673 2 : {
674 2 : "segment": {
675 2 : "parent": 4,
676 2 : "lsn": 25393008,
677 2 : "size": null,
678 2 : "needed": true
679 2 : },
680 2 : "timeline_id": "454626700469f0a9914949b9d018e876",
681 2 : "kind": "BranchEnd"
682 2 : },
683 2 : {
684 2 : "segment": {
685 2 : "parent": null,
686 2 : "lsn": 23694408,
687 2 : "size": null,
688 2 : "needed": false
689 2 : },
690 2 : "timeline_id": "cb5e3cbe60a4afc00d01880e1a37047f",
691 2 : "kind": "BranchStart"
692 2 : },
693 2 : {
694 2 : "segment": {
695 2 : "parent": 6,
696 2 : "lsn": 24566168,
697 2 : "size": 25739264,
698 2 : "needed": false
699 2 : },
700 2 : "timeline_id": "cb5e3cbe60a4afc00d01880e1a37047f",
701 2 : "kind": "BranchPoint"
702 2 : },
703 2 : {
704 2 : "segment": {
705 2 : "parent": 7,
706 2 : "lsn": 25902488,
707 2 : "size": 26402816,
708 2 : "needed": false
709 2 : },
710 2 : "timeline_id": "cb5e3cbe60a4afc00d01880e1a37047f",
711 2 : "kind": "GcCutOff"
712 2 : },
713 2 : {
714 2 : "segment": {
715 2 : "parent": 8,
716 2 : "lsn": 26033560,
717 2 : "size": 26468352,
718 2 : "needed": true
719 2 : },
720 2 : "timeline_id": "cb5e3cbe60a4afc00d01880e1a37047f",
721 2 : "kind": "BranchPoint"
722 2 : },
723 2 : {
724 2 : "segment": {
725 2 : "parent": 9,
726 2 : "lsn": 26033560,
727 2 : "size": null,
728 2 : "needed": true
729 2 : },
730 2 : "timeline_id": "cb5e3cbe60a4afc00d01880e1a37047f",
731 2 : "kind": "BranchEnd"
732 2 : }
733 2 : ],
734 2 : "timeline_inputs": [
735 2 : {
736 2 : "timeline_id": "20b129c9b50cff7213e6503a31b2a5ce",
737 2 : "ancestor_lsn": "0/18D3D98",
738 2 : "last_record": "0/2230CD0",
739 2 : "latest_gc_cutoff": "0/1698C48",
740 2 : "next_pitr_cutoff": "0/2210CD0",
741 2 : "retention_param_cutoff": null,
742 2 : "lease_points": []
743 2 : },
744 2 : {
745 2 : "timeline_id": "454626700469f0a9914949b9d018e876",
746 2 : "ancestor_lsn": "0/176D998",
747 2 : "last_record": "0/1837770",
748 2 : "latest_gc_cutoff": "0/1698C48",
749 2 : "next_pitr_cutoff": "0/1817770",
750 2 : "retention_param_cutoff": null,
751 2 : "lease_points": []
752 2 : },
753 2 : {
754 2 : "timeline_id": "cb5e3cbe60a4afc00d01880e1a37047f",
755 2 : "ancestor_lsn": "0/0",
756 2 : "last_record": "0/18D3D98",
757 2 : "latest_gc_cutoff": "0/1698C48",
758 2 : "next_pitr_cutoff": "0/18B3D98",
759 2 : "retention_param_cutoff": null,
760 2 : "lease_points": []
761 2 : }
762 2 : ]
763 2 : }
764 2 : "#;
765 2 : let inputs: ModelInputs = serde_json::from_str(doc).unwrap();
766 2 :
767 2 : assert_eq!(inputs.calculate(), 37_851_408);
768 2 : }
769 :
770 : #[test]
771 2 : fn verify_size_for_one_branch() {
772 2 : let doc = r#"
773 2 : {
774 2 : "segments": [
775 2 : {
776 2 : "segment": {
777 2 : "parent": null,
778 2 : "lsn": 0,
779 2 : "size": null,
780 2 : "needed": false
781 2 : },
782 2 : "timeline_id": "f15ae0cf21cce2ba27e4d80c6709a6cd",
783 2 : "kind": "BranchStart"
784 2 : },
785 2 : {
786 2 : "segment": {
787 2 : "parent": 0,
788 2 : "lsn": 305547335776,
789 2 : "size": 220054675456,
790 2 : "needed": false
791 2 : },
792 2 : "timeline_id": "f15ae0cf21cce2ba27e4d80c6709a6cd",
793 2 : "kind": "GcCutOff"
794 2 : },
795 2 : {
796 2 : "segment": {
797 2 : "parent": 1,
798 2 : "lsn": 305614444640,
799 2 : "size": null,
800 2 : "needed": true
801 2 : },
802 2 : "timeline_id": "f15ae0cf21cce2ba27e4d80c6709a6cd",
803 2 : "kind": "BranchEnd"
804 2 : }
805 2 : ],
806 2 : "timeline_inputs": [
807 2 : {
808 2 : "timeline_id": "f15ae0cf21cce2ba27e4d80c6709a6cd",
809 2 : "ancestor_lsn": "0/0",
810 2 : "last_record": "47/280A5860",
811 2 : "latest_gc_cutoff": "47/240A5860",
812 2 : "next_pitr_cutoff": "47/240A5860",
813 2 : "retention_param_cutoff": "0/0",
814 2 : "lease_points": []
815 2 : }
816 2 : ]
817 2 : }"#;
818 2 :
819 2 : let model: ModelInputs = serde_json::from_str(doc).unwrap();
820 2 :
821 2 : let res = model.calculate_model().calculate();
822 2 :
823 2 : println!("calculated synthetic size: {}", res.total_size);
824 2 : println!("result: {:?}", serde_json::to_string(&res.segments));
825 :
826 : use utils::lsn::Lsn;
827 2 : let latest_gc_cutoff_lsn: Lsn = "47/240A5860".parse().unwrap();
828 2 : let last_lsn: Lsn = "47/280A5860".parse().unwrap();
829 2 : println!(
830 2 : "latest_gc_cutoff lsn 47/240A5860 is {}, last_lsn lsn 47/280A5860 is {}",
831 2 : u64::from(latest_gc_cutoff_lsn),
832 2 : u64::from(last_lsn)
833 2 : );
834 2 : assert_eq!(res.total_size, 220121784320);
835 2 : }
|