LCOV - code coverage report
Current view: top level - libs/pageserver_api/src - keyspace.rs (source / functions) Coverage Total Hit
Test: 12c2fc96834f59604b8ade5b9add28f1dce41ec6.info Lines: 97.1 % 967 939
Test Date: 2024-07-03 15:33:13 Functions: 97.1 % 70 68

            Line data    Source code
       1              : use postgres_ffi::BLCKSZ;
       2              : use std::ops::Range;
       3              : 
       4              : use crate::{
       5              :     key::Key,
       6              :     shard::{ShardCount, ShardIdentity},
       7              : };
       8              : use itertools::Itertools;
       9              : 
      10              : ///
      11              : /// Represents a set of Keys, in a compact form.
      12              : ///
      13              : #[derive(Clone, Debug, Default, PartialEq, Eq)]
      14              : pub struct KeySpace {
      15              :     /// Contiguous ranges of keys that belong to the key space. In key order,
      16              :     /// and with no overlap.
      17              :     pub ranges: Vec<Range<Key>>,
      18              : }
      19              : 
      20              : impl std::fmt::Display for KeySpace {
      21            0 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
      22            0 :         write!(f, "[")?;
      23            0 :         for range in &self.ranges {
      24            0 :             write!(f, "{}..{},", range.start, range.end)?;
      25              :         }
      26            0 :         write!(f, "]")
      27            0 :     }
      28              : }
      29              : 
      30              : /// A wrapper type for sparse keyspaces.
      31              : #[derive(Clone, Debug, Default, PartialEq, Eq)]
      32              : pub struct SparseKeySpace(pub KeySpace);
      33              : 
      34              : /// Represents a contiguous half-open range of the keyspace, masked according to a particular
      35              : /// ShardNumber's stripes: within this range of keys, only some "belong" to the current
      36              : /// shard.
      37              : ///
      38              : /// When we iterate over keys within this object, we will skip any keys that don't belong
      39              : /// to this shard.
      40              : ///
      41              : /// The start + end keys may not belong to the shard: these specify where layer files should
      42              : /// start  + end, but we will never actually read/write those keys.
      43              : #[derive(Clone, Debug, PartialEq, Eq)]
      44              : pub struct ShardedRange<'a> {
      45              :     pub shard_identity: &'a ShardIdentity,
      46              :     pub range: Range<Key>,
      47              : }
      48              : 
      49              : // Calculate the size of a range within the blocks of the same relation, or spanning only the
      50              : // top page in the previous relation's space.
      51      4349464 : fn contiguous_range_len(range: &Range<Key>) -> u32 {
      52      4349464 :     debug_assert!(is_contiguous_range(range));
      53      4349464 :     if range.start.field6 == 0xffffffff {
      54            8 :         range.end.field6 + 1
      55              :     } else {
      56      4349456 :         range.end.field6 - range.start.field6
      57              :     }
      58      4349464 : }
      59              : 
      60              : /// Return true if this key range includes only keys in the same relation's data blocks, or
      61              : /// just spanning one relation and the logical size (0xffffffff) block of the relation before it.
      62              : ///
      63              : /// Contiguous in this context means we know the keys are in use _somewhere_, but it might not
      64              : /// be on our shard.  Later in ShardedRange we do the extra work to figure out how much
      65              : /// of a given contiguous range is present on one shard.
      66              : ///
      67              : /// This matters, because:
      68              : /// - Within such ranges, keys are used contiguously.  Outside such ranges it is sparse.
      69              : /// - Within such ranges, we may calculate distances using simple subtraction of field6.
      70      8698878 : fn is_contiguous_range(range: &Range<Key>) -> bool {
      71      8698878 :     range.start.field1 == range.end.field1
      72      8698722 :         && range.start.field2 == range.end.field2
      73      8698694 :         && range.start.field3 == range.end.field3
      74      8698624 :         && range.start.field4 == range.end.field4
      75      8698622 :         && (range.start.field5 == range.end.field5
      76           24 :             || (range.start.field6 == 0xffffffff && range.start.field5 + 1 == range.end.field5))
      77      8698878 : }
      78              : 
      79              : impl<'a> ShardedRange<'a> {
      80         3610 :     pub fn new(range: Range<Key>, shard_identity: &'a ShardIdentity) -> Self {
      81         3610 :         Self {
      82         3610 :             shard_identity,
      83         3610 :             range,
      84         3610 :         }
      85         3610 :     }
      86              : 
      87              :     /// Break up this range into chunks, each of which has at least one local key in it if the
      88              :     /// total range has at least one local key.
      89         3596 :     pub fn fragment(self, target_nblocks: u32) -> Vec<(u32, Range<Key>)> {
      90         3596 :         // Optimization for single-key case (e.g. logical size keys)
      91         3596 :         if self.range.end == self.range.start.add(1) {
      92         1312 :             return vec![(
      93         1312 :                 if self.shard_identity.is_key_disposable(&self.range.start) {
      94            0 :                     0
      95              :                 } else {
      96         1312 :                     1
      97              :                 },
      98         1312 :                 self.range,
      99              :             )];
     100         2284 :         }
     101         2284 : 
     102         2284 :         if !is_contiguous_range(&self.range) {
     103              :             // Ranges that span relations are not fragmented.  We only get these ranges as a result
     104              :             // of operations that act on existing layers, so we trust that the existing range is
     105              :             // reasonably small.
     106            4 :             return vec![(u32::MAX, self.range)];
     107         2280 :         }
     108         2280 : 
     109         2280 :         let mut fragments: Vec<(u32, Range<Key>)> = Vec::new();
     110         2280 : 
     111         2280 :         let mut cursor = self.range.start;
     112         4854 :         while cursor < self.range.end {
     113         2574 :             let advance_by = self.distance_to_next_boundary(cursor);
     114         2574 :             let is_fragment_disposable = self.shard_identity.is_key_disposable(&cursor);
     115              : 
     116              :             // If the previous fragment is undersized, then we seek to consume enough
     117              :             // blocks to complete it.
     118         2574 :             let (want_blocks, merge_last_fragment) = match fragments.last_mut() {
     119          294 :                 Some(frag) if frag.0 < target_nblocks => (target_nblocks - frag.0, Some(frag)),
     120          272 :                 Some(frag) => {
     121          272 :                     // Prev block is complete, want the full number.
     122          272 :                     (
     123          272 :                         target_nblocks,
     124          272 :                         if is_fragment_disposable {
     125              :                             // If this current range will be empty (not shard-local data), we will merge into previous
     126            0 :                             Some(frag)
     127              :                         } else {
     128          272 :                             None
     129              :                         },
     130              :                     )
     131              :                 }
     132              :                 None => {
     133              :                     // First iteration, want the full number
     134         2280 :                     (target_nblocks, None)
     135              :                 }
     136              :             };
     137              : 
     138         2574 :             let advance_by = if is_fragment_disposable {
     139          936 :                 advance_by
     140              :             } else {
     141         1638 :                 std::cmp::min(advance_by, want_blocks)
     142              :             };
     143              : 
     144         2574 :             let next_cursor = cursor.add(advance_by);
     145              : 
     146         2574 :             let this_frag = (
     147         2574 :                 if is_fragment_disposable {
     148          936 :                     0
     149              :                 } else {
     150         1638 :                     advance_by
     151              :                 },
     152         2574 :                 cursor..next_cursor,
     153         2574 :             );
     154         2574 :             cursor = next_cursor;
     155              : 
     156         2574 :             if let Some(last_fragment) = merge_last_fragment {
     157           22 :                 // Previous fragment was short or this one is empty, merge into it
     158           22 :                 last_fragment.0 += this_frag.0;
     159           22 :                 last_fragment.1.end = this_frag.1.end;
     160         2552 :             } else {
     161         2552 :                 fragments.push(this_frag);
     162         2552 :             }
     163              :         }
     164              : 
     165         2280 :         fragments
     166         3596 :     }
     167              : 
     168              :     /// Estimate the physical pages that are within this range, on this shard.  This returns
     169              :     /// u32::MAX if the range spans relations: this return value should be interpreted as "large".
     170         2038 :     pub fn page_count(&self) -> u32 {
     171         2038 :         // Special cases for single keys like logical sizes
     172         2038 :         if self.range.end == self.range.start.add(1) {
     173           12 :             return if self.shard_identity.is_key_disposable(&self.range.start) {
     174            6 :                 0
     175              :             } else {
     176            6 :                 1
     177              :             };
     178         2026 :         }
     179         2026 : 
     180         2026 :         // We can only do an authentic calculation of contiguous key ranges
     181         2026 :         if !is_contiguous_range(&self.range) {
     182            8 :             return u32::MAX;
     183         2018 :         }
     184         2018 : 
     185         2018 :         // Special case for single sharded tenants: our logical and physical sizes are the same
     186         2018 :         if self.shard_identity.count < ShardCount::new(2) {
     187         1048 :             return contiguous_range_len(&self.range);
     188          970 :         }
     189          970 : 
     190          970 :         // Normal path: step through stripes and part-stripes in the range, evaluate whether each one belongs
     191          970 :         // to Self, and add the stripe's block count to our total if so.
     192          970 :         let mut result: u64 = 0;
     193          970 :         let mut cursor = self.range.start;
     194         1962 :         while cursor < self.range.end {
     195              :             // Count up to the next stripe_size boundary or end of range
     196          992 :             let advance_by = self.distance_to_next_boundary(cursor);
     197          992 : 
     198          992 :             // If this blocks in this stripe belong to us, add them to our count
     199          992 :             if !self.shard_identity.is_key_disposable(&cursor) {
     200           56 :                 result += advance_by as u64;
     201          936 :             }
     202              : 
     203          992 :             cursor = cursor.add(advance_by);
     204              :         }
     205              : 
     206          970 :         if result > u32::MAX as u64 {
     207            0 :             u32::MAX
     208              :         } else {
     209          970 :             result as u32
     210              :         }
     211         2038 :     }
     212              : 
     213              :     /// Advance the cursor to the next potential fragment boundary: this is either
     214              :     /// a stripe boundary, or the end of the range.
     215         3566 :     fn distance_to_next_boundary(&self, cursor: Key) -> u32 {
     216         3566 :         let distance_to_range_end = contiguous_range_len(&(cursor..self.range.end));
     217         3566 : 
     218         3566 :         if self.shard_identity.count < ShardCount::new(2) {
     219              :             // Optimization: don't bother stepping through stripes if the tenant isn't sharded.
     220         1568 :             return distance_to_range_end;
     221         1998 :         }
     222         1998 : 
     223         1998 :         if cursor.field6 == 0xffffffff {
     224              :             // We are wrapping from one relation's logical size to the next relation's first data block
     225            8 :             return 1;
     226         1990 :         }
     227         1990 : 
     228         1990 :         let stripe_index = cursor.field6 / self.shard_identity.stripe_size.0;
     229         1990 :         let stripe_remainder = self.shard_identity.stripe_size.0
     230         1990 :             - (cursor.field6 - stripe_index * self.shard_identity.stripe_size.0);
     231         1990 : 
     232         1990 :         if cfg!(debug_assertions) {
     233              :             // We should never overflow field5 and field6 -- our callers check this earlier
     234              :             // and would have returned their u32::MAX cases if the input range violated this.
     235         1990 :             let next_cursor = cursor.add(stripe_remainder);
     236         1990 :             debug_assert!(
     237         1990 :                 next_cursor.field1 == cursor.field1
     238         1990 :                     && next_cursor.field2 == cursor.field2
     239         1990 :                     && next_cursor.field3 == cursor.field3
     240         1990 :                     && next_cursor.field4 == cursor.field4
     241         1990 :                     && next_cursor.field5 == cursor.field5
     242              :             )
     243            0 :         }
     244              : 
     245         1990 :         std::cmp::min(stripe_remainder, distance_to_range_end)
     246         3566 :     }
     247              : 
     248              :     /// Whereas `page_count` estimates the number of pages physically in this range on this shard,
     249              :     /// this function simply calculates the number of pages in the space, without accounting for those
     250              :     /// pages that would not actually be stored on this node.
     251              :     ///
     252              :     /// Don't use this function in code that works with physical entities like layer files.
     253      4345096 :     pub fn raw_size(range: &Range<Key>) -> u32 {
     254      4345096 :         if is_contiguous_range(range) {
     255      4344850 :             contiguous_range_len(range)
     256              :         } else {
     257          246 :             u32::MAX
     258              :         }
     259      4345096 :     }
     260              : }
     261              : 
     262              : impl KeySpace {
     263              :     /// Create a key space with a single range.
     264          132 :     pub fn single(key_range: Range<Key>) -> Self {
     265          132 :         Self {
     266          132 :             ranges: vec![key_range],
     267          132 :         }
     268          132 :     }
     269              : 
     270              :     /// Partition a key space into roughly chunks of roughly 'target_size' bytes
     271              :     /// in each partition.
     272              :     ///
     273          262 :     pub fn partition(&self, shard_identity: &ShardIdentity, target_size: u64) -> KeyPartitioning {
     274          262 :         // Assume that each value is 8k in size.
     275          262 :         let target_nblocks = (target_size / BLCKSZ as u64) as u32;
     276          262 : 
     277          262 :         let mut parts = Vec::new();
     278          262 :         let mut current_part = Vec::new();
     279          262 :         let mut current_part_size: usize = 0;
     280         1834 :         for range in &self.ranges {
     281              :             // While doing partitioning, wrap the range in ShardedRange so that our size calculations
     282              :             // will respect shard striping rather than assuming all keys within a range are present.
     283         1572 :             let range = ShardedRange::new(range.clone(), shard_identity);
     284              : 
     285              :             // Chunk up the range into parts that each contain up to target_size local blocks
     286         1578 :             for (frag_on_shard_size, frag_range) in range.fragment(target_nblocks) {
     287              :                 // If appending the next contiguous range in the keyspace to the current
     288              :                 // partition would cause it to be too large, and our current partition
     289              :                 // covers at least one block that is physically present in this shard,
     290              :                 // then start a new partition
     291         1578 :                 if current_part_size + frag_on_shard_size as usize > target_nblocks as usize
     292           26 :                     && current_part_size > 0
     293           26 :                 {
     294           26 :                     parts.push(KeySpace {
     295           26 :                         ranges: current_part,
     296           26 :                     });
     297           26 :                     current_part = Vec::new();
     298           26 :                     current_part_size = 0;
     299         1552 :                 }
     300         1578 :                 current_part.push(frag_range.start..frag_range.end);
     301         1578 :                 current_part_size += frag_on_shard_size as usize;
     302              :             }
     303              :         }
     304              : 
     305              :         // add last partition that wasn't full yet.
     306          262 :         if !current_part.is_empty() {
     307          262 :             parts.push(KeySpace {
     308          262 :                 ranges: current_part,
     309          262 :             });
     310          262 :         }
     311              : 
     312          262 :         KeyPartitioning { parts }
     313          262 :     }
     314              : 
     315          618 :     pub fn is_empty(&self) -> bool {
     316          618 :         self.total_raw_size() == 0
     317          618 :     }
     318              : 
     319              :     /// Merge another keyspace into the current one.
     320              :     /// Note: the keyspaces must not overlap (enforced via assertions). To merge overlapping key ranges, use `KeySpaceRandomAccum`.
     321          414 :     pub fn merge(&mut self, other: &KeySpace) {
     322          414 :         let all_ranges = self
     323          414 :             .ranges
     324          414 :             .iter()
     325        70262 :             .merge_by(other.ranges.iter(), |lhs, rhs| lhs.start < rhs.start);
     326          414 : 
     327          414 :         let mut accum = KeySpaceAccum::new();
     328          414 :         let mut prev: Option<&Range<Key>> = None;
     329       107466 :         for range in all_ranges {
     330       107052 :             if let Some(prev) = prev {
     331       106844 :                 let overlap =
     332       106844 :                     std::cmp::max(range.start, prev.start) < std::cmp::min(range.end, prev.end);
     333       106844 :                 assert!(
     334       106844 :                     !overlap,
     335            0 :                     "Attempt to merge ovelapping keyspaces: {:?} overlaps {:?}",
     336              :                     prev, range
     337              :                 );
     338          208 :             }
     339              : 
     340       107052 :             accum.add_range(range.clone());
     341       107052 :             prev = Some(range);
     342              :         }
     343              : 
     344          414 :         self.ranges = accum.to_keyspace().ranges;
     345          414 :     }
     346              : 
     347              :     /// Remove all keys in `other` from `self`.
     348              :     /// This can involve splitting or removing of existing ranges.
     349              :     /// Returns the removed keyspace
     350          866 :     pub fn remove_overlapping_with(&mut self, other: &KeySpace) -> KeySpace {
     351          866 :         let (self_start, self_end) = match (self.start(), self.end()) {
     352          796 :             (Some(start), Some(end)) => (start, end),
     353              :             _ => {
     354              :                 // self is empty
     355           70 :                 return KeySpace::default();
     356              :             }
     357              :         };
     358              : 
     359              :         // Key spaces are sorted by definition, so skip ahead to the first
     360              :         // potentially intersecting range. Similarly, ignore ranges that start
     361              :         // after the current keyspace ends.
     362          796 :         let other_ranges = other
     363          796 :             .ranges
     364          796 :             .iter()
     365        10548 :             .skip_while(|range| self_start >= range.end)
     366        70362 :             .take_while(|range| self_end > range.start);
     367          796 : 
     368          796 :         let mut removed_accum = KeySpaceRandomAccum::new();
     369        71108 :         for range in other_ranges {
     370       202596 :             while let Some(overlap_at) = self.overlaps_at(range) {
     371       132284 :                 let overlapped = self.ranges[overlap_at].clone();
     372       132284 : 
     373       132284 :                 if overlapped.start < range.start && overlapped.end <= range.end {
     374           16 :                     // Higher part of the range is completely overlapped.
     375           16 :                     removed_accum.add_range(range.start..self.ranges[overlap_at].end);
     376           16 :                     self.ranges[overlap_at].end = range.start;
     377       132268 :                 }
     378       132284 :                 if overlapped.start >= range.start && overlapped.end > range.end {
     379           70 :                     // Lower part of the range is completely overlapped.
     380           70 :                     removed_accum.add_range(self.ranges[overlap_at].start..range.end);
     381           70 :                     self.ranges[overlap_at].start = range.end;
     382       132214 :                 }
     383       132284 :                 if overlapped.start < range.start && overlapped.end > range.end {
     384        69974 :                     // Middle part of the range is overlapped.
     385        69974 :                     removed_accum.add_range(range.clone());
     386        69974 :                     self.ranges[overlap_at].end = range.start;
     387        69974 :                     self.ranges
     388        69974 :                         .insert(overlap_at + 1, range.end..overlapped.end);
     389        69974 :                 }
     390       132284 :                 if overlapped.start >= range.start && overlapped.end <= range.end {
     391        62224 :                     // Whole range is overlapped
     392        62224 :                     removed_accum.add_range(self.ranges[overlap_at].clone());
     393        62224 :                     self.ranges.remove(overlap_at);
     394        70060 :                 }
     395              :             }
     396              :         }
     397              : 
     398          796 :         removed_accum.to_keyspace()
     399          866 :     }
     400              : 
     401          886 :     pub fn start(&self) -> Option<Key> {
     402          886 :         self.ranges.first().map(|range| range.start)
     403          886 :     }
     404              : 
     405          866 :     pub fn end(&self) -> Option<Key> {
     406          866 :         self.ranges.last().map(|range| range.end)
     407          866 :     }
     408              : 
     409              :     /// The size of the keyspace in pages, before accounting for sharding
     410         1734 :     pub fn total_raw_size(&self) -> usize {
     411         1734 :         self.ranges
     412         1734 :             .iter()
     413        73145 :             .map(|range| ShardedRange::raw_size(range) as usize)
     414         1734 :             .sum()
     415         1734 :     }
     416              : 
     417       278593 :     fn overlaps_at(&self, range: &Range<Key>) -> Option<usize> {
     418      2076887 :         match self.ranges.binary_search_by_key(&range.end, |r| r.start) {
     419          114 :             Ok(0) => None,
     420         2587 :             Err(0) => None,
     421       111977 :             Ok(index) if self.ranges[index - 1].end > range.start => Some(index - 1),
     422       163915 :             Err(index) if self.ranges[index - 1].end > range.start => Some(index - 1),
     423       112686 :             _ => None,
     424              :         }
     425       278593 :     }
     426              : 
     427              :     ///
     428              :     /// Check if key space contains overlapping range
     429              :     ///
     430        75997 :     pub fn overlaps(&self, range: &Range<Key>) -> bool {
     431        75997 :         self.overlaps_at(range).is_some()
     432        75997 :     }
     433              : 
     434              :     /// Check if the keyspace contains a key
     435        74215 :     pub fn contains(&self, key: &Key) -> bool {
     436        74215 :         self.overlaps(&(*key..key.next()))
     437        74215 :     }
     438              : }
     439              : 
     440              : ///
     441              : /// Represents a partitioning of the key space.
     442              : ///
     443              : /// The only kind of partitioning we do is to partition the key space into
     444              : /// partitions that are roughly equal in physical size (see KeySpace::partition).
     445              : /// But this data structure could represent any partitioning.
     446              : ///
     447              : #[derive(Clone, Debug, Default)]
     448              : pub struct KeyPartitioning {
     449              :     pub parts: Vec<KeySpace>,
     450              : }
     451              : 
     452              : /// Represents a partitioning of the sparse key space.
     453              : #[derive(Clone, Debug, Default)]
     454              : pub struct SparseKeyPartitioning {
     455              :     pub parts: Vec<SparseKeySpace>,
     456              : }
     457              : 
     458              : impl KeyPartitioning {
     459          768 :     pub fn new() -> Self {
     460          768 :         KeyPartitioning { parts: Vec::new() }
     461          768 :     }
     462              : 
     463              :     /// Convert a key partitioning to a sparse partition.
     464          384 :     pub fn into_sparse(self) -> SparseKeyPartitioning {
     465          384 :         SparseKeyPartitioning {
     466          384 :             parts: self.parts.into_iter().map(SparseKeySpace).collect(),
     467          384 :         }
     468          384 :     }
     469              : }
     470              : 
     471              : impl SparseKeyPartitioning {
     472              :     /// Note: use this function with caution. Attempt to handle a sparse keyspace in the same way as a dense keyspace will
     473              :     /// cause long/dead loops.
     474          364 :     pub fn into_dense(self) -> KeyPartitioning {
     475          364 :         KeyPartitioning {
     476          364 :             parts: self.parts.into_iter().map(|x| x.0).collect(),
     477          364 :         }
     478          364 :     }
     479              : }
     480              : 
     481              : ///
     482              : /// A helper object, to collect a set of keys and key ranges into a KeySpace
     483              : /// object. This takes care of merging adjacent keys and key ranges into
     484              : /// contiguous ranges.
     485              : ///
     486              : #[derive(Clone, Debug, Default)]
     487              : pub struct KeySpaceAccum {
     488              :     accum: Option<Range<Key>>,
     489              : 
     490              :     ranges: Vec<Range<Key>>,
     491              :     size: u64,
     492              : }
     493              : 
     494              : impl KeySpaceAccum {
     495        79899 :     pub fn new() -> Self {
     496        79899 :         Self {
     497        79899 :             accum: None,
     498        79899 :             ranges: Vec::new(),
     499        79899 :             size: 0,
     500        79899 :         }
     501        79899 :     }
     502              : 
     503              :     #[inline(always)]
     504      4081250 :     pub fn add_key(&mut self, key: Key) {
     505      4081250 :         self.add_range(singleton_range(key))
     506      4081250 :     }
     507              : 
     508              :     #[inline(always)]
     509      4271945 :     pub fn add_range(&mut self, range: Range<Key>) {
     510      4271945 :         self.size += ShardedRange::raw_size(&range) as u64;
     511      4271945 : 
     512      4271945 :         match self.accum.as_mut() {
     513      4178262 :             Some(accum) => {
     514      4178262 :                 if range.start == accum.end {
     515      4067084 :                     accum.end = range.end;
     516      4067084 :                 } else {
     517              :                     // TODO: to efficiently support small sharding stripe sizes, we should avoid starting
     518              :                     // a new range here if the skipped region was all keys that don't belong on this shard.
     519              :                     // (https://github.com/neondatabase/neon/issues/6247)
     520       111178 :                     assert!(range.start > accum.end);
     521       111178 :                     self.ranges.push(accum.clone());
     522       111178 :                     *accum = range;
     523              :                 }
     524              :             }
     525        93683 :             None => self.accum = Some(range),
     526              :         }
     527      4271945 :     }
     528              : 
     529        89059 :     pub fn to_keyspace(mut self) -> KeySpace {
     530        89059 :         if let Some(accum) = self.accum.take() {
     531        85571 :             self.ranges.push(accum);
     532        85571 :         }
     533        89059 :         KeySpace {
     534        89059 :             ranges: self.ranges,
     535        89059 :         }
     536        89059 :     }
     537              : 
     538          968 :     pub fn consume_keyspace(&mut self) -> KeySpace {
     539          968 :         std::mem::take(self).to_keyspace()
     540          968 :     }
     541              : 
     542              :     // The total number of keys in this object, ignoring any sharding effects that might cause some of
     543              :     // the keys to be omitted in storage on this shard.
     544         2248 :     pub fn raw_size(&self) -> u64 {
     545         2248 :         self.size
     546         2248 :     }
     547              : }
     548              : 
     549              : ///
     550              : /// A helper object, to collect a set of keys and key ranges into a KeySpace
     551              : /// object. Key ranges may be inserted in any order and can overlap.
     552              : ///
     553              : #[derive(Clone, Debug, Default)]
     554              : pub struct KeySpaceRandomAccum {
     555              :     ranges: Vec<Range<Key>>,
     556              : }
     557              : 
     558              : impl KeySpaceRandomAccum {
     559         2368 :     pub fn new() -> Self {
     560         2368 :         Self { ranges: Vec::new() }
     561         2368 :     }
     562              : 
     563        40508 :     pub fn add_key(&mut self, key: Key) {
     564        40508 :         self.add_range(singleton_range(key))
     565        40508 :     }
     566              : 
     567       236787 :     pub fn add_range(&mut self, range: Range<Key>) {
     568       236787 :         self.ranges.push(range);
     569       236787 :     }
     570              : 
     571        63885 :     pub fn add_keyspace(&mut self, keyspace: KeySpace) {
     572       127772 :         for range in keyspace.ranges {
     573        63887 :             self.add_range(range);
     574        63887 :         }
     575        63885 :     }
     576              : 
     577         1630 :     pub fn to_keyspace(mut self) -> KeySpace {
     578         1630 :         let mut ranges = Vec::new();
     579         1630 :         if !self.ranges.is_empty() {
     580       473174 :             self.ranges.sort_by_key(|r| r.start);
     581          936 :             let mut start = self.ranges.first().unwrap().start;
     582          936 :             let mut end = self.ranges.first().unwrap().end;
     583       237651 :             for r in self.ranges {
     584       236715 :                 assert!(r.start >= start);
     585       236715 :                 if r.start > end {
     586       235385 :                     ranges.push(start..end);
     587       235385 :                     start = r.start;
     588       235385 :                     end = r.end;
     589       235385 :                 } else if r.end > end {
     590          380 :                     end = r.end;
     591          950 :                 }
     592              :             }
     593          936 :             ranges.push(start..end);
     594          694 :         }
     595         1630 :         KeySpace { ranges }
     596         1630 :     }
     597              : 
     598          816 :     pub fn consume_keyspace(&mut self) -> KeySpace {
     599          816 :         let mut prev_accum = KeySpaceRandomAccum::new();
     600          816 :         std::mem::swap(self, &mut prev_accum);
     601          816 : 
     602          816 :         prev_accum.to_keyspace()
     603          816 :     }
     604              : }
     605              : 
     606      4121758 : pub fn singleton_range(key: Key) -> Range<Key> {
     607      4121758 :     key..key.next()
     608      4121758 : }
     609              : 
     610              : #[cfg(test)]
     611              : mod tests {
     612              :     use rand::{RngCore, SeedableRng};
     613              : 
     614              :     use crate::{
     615              :         models::ShardParameters,
     616              :         shard::{ShardCount, ShardNumber},
     617              :     };
     618              : 
     619              :     use super::*;
     620              :     use std::fmt::Write;
     621              : 
     622              :     // Helper function to create a key range.
     623              :     //
     624              :     // Make the tests below less verbose.
     625           92 :     fn kr(irange: Range<i128>) -> Range<Key> {
     626           92 :         Key::from_i128(irange.start)..Key::from_i128(irange.end)
     627           92 :     }
     628              : 
     629              :     #[allow(dead_code)]
     630            0 :     fn dump_keyspace(ks: &KeySpace) {
     631            0 :         for r in ks.ranges.iter() {
     632            0 :             println!("  {}..{}", r.start.to_i128(), r.end.to_i128());
     633            0 :         }
     634            0 :     }
     635              : 
     636           22 :     fn assert_ks_eq(actual: &KeySpace, expected: Vec<Range<Key>>) {
     637           22 :         if actual.ranges != expected {
     638            0 :             let mut msg = String::new();
     639            0 : 
     640            0 :             writeln!(msg, "expected:").unwrap();
     641            0 :             for r in &expected {
     642            0 :                 writeln!(msg, "  {}..{}", r.start.to_i128(), r.end.to_i128()).unwrap();
     643            0 :             }
     644            0 :             writeln!(msg, "got:").unwrap();
     645            0 :             for r in &actual.ranges {
     646            0 :                 writeln!(msg, "  {}..{}", r.start.to_i128(), r.end.to_i128()).unwrap();
     647            0 :             }
     648            0 :             panic!("{}", msg);
     649           22 :         }
     650           22 :     }
     651              : 
     652              :     #[test]
     653            2 :     fn keyspace_consume() {
     654            2 :         let ranges = vec![kr(0..10), kr(20..35), kr(40..45)];
     655            2 : 
     656            2 :         let mut accum = KeySpaceAccum::new();
     657            8 :         for range in &ranges {
     658            6 :             accum.add_range(range.clone());
     659            6 :         }
     660              : 
     661            2 :         let expected_size: u64 = ranges
     662            2 :             .iter()
     663            6 :             .map(|r| ShardedRange::raw_size(r) as u64)
     664            2 :             .sum();
     665            2 :         assert_eq!(accum.raw_size(), expected_size);
     666              : 
     667            2 :         assert_ks_eq(&accum.consume_keyspace(), ranges.clone());
     668            2 :         assert_eq!(accum.raw_size(), 0);
     669              : 
     670            2 :         assert_ks_eq(&accum.consume_keyspace(), vec![]);
     671            2 :         assert_eq!(accum.raw_size(), 0);
     672              : 
     673            8 :         for range in &ranges {
     674            6 :             accum.add_range(range.clone());
     675            6 :         }
     676            2 :         assert_ks_eq(&accum.to_keyspace(), ranges);
     677            2 :     }
     678              : 
     679              :     #[test]
     680            2 :     fn keyspace_add_range() {
     681            2 :         // two separate ranges
     682            2 :         //
     683            2 :         // #####
     684            2 :         //         #####
     685            2 :         let mut ks = KeySpaceRandomAccum::default();
     686            2 :         ks.add_range(kr(0..10));
     687            2 :         ks.add_range(kr(20..30));
     688            2 :         assert_ks_eq(&ks.to_keyspace(), vec![kr(0..10), kr(20..30)]);
     689            2 : 
     690            2 :         // two separate ranges, added in reverse order
     691            2 :         //
     692            2 :         //         #####
     693            2 :         // #####
     694            2 :         let mut ks = KeySpaceRandomAccum::default();
     695            2 :         ks.add_range(kr(20..30));
     696            2 :         ks.add_range(kr(0..10));
     697            2 : 
     698            2 :         // add range that is adjacent to the end of an existing range
     699            2 :         //
     700            2 :         // #####
     701            2 :         //      #####
     702            2 :         ks.add_range(kr(0..10));
     703            2 :         ks.add_range(kr(10..30));
     704            2 :         assert_ks_eq(&ks.to_keyspace(), vec![kr(0..30)]);
     705            2 : 
     706            2 :         // add range that is adjacent to the start of an existing range
     707            2 :         //
     708            2 :         //      #####
     709            2 :         // #####
     710            2 :         let mut ks = KeySpaceRandomAccum::default();
     711            2 :         ks.add_range(kr(10..30));
     712            2 :         ks.add_range(kr(0..10));
     713            2 :         assert_ks_eq(&ks.to_keyspace(), vec![kr(0..30)]);
     714            2 : 
     715            2 :         // add range that overlaps with the end of an existing range
     716            2 :         //
     717            2 :         // #####
     718            2 :         //    #####
     719            2 :         let mut ks = KeySpaceRandomAccum::default();
     720            2 :         ks.add_range(kr(0..10));
     721            2 :         ks.add_range(kr(5..30));
     722            2 :         assert_ks_eq(&ks.to_keyspace(), vec![kr(0..30)]);
     723            2 : 
     724            2 :         // add range that overlaps with the start of an existing range
     725            2 :         //
     726            2 :         //    #####
     727            2 :         // #####
     728            2 :         let mut ks = KeySpaceRandomAccum::default();
     729            2 :         ks.add_range(kr(5..30));
     730            2 :         ks.add_range(kr(0..10));
     731            2 :         assert_ks_eq(&ks.to_keyspace(), vec![kr(0..30)]);
     732            2 : 
     733            2 :         // add range that is fully covered by an existing range
     734            2 :         //
     735            2 :         // #########
     736            2 :         //   #####
     737            2 :         let mut ks = KeySpaceRandomAccum::default();
     738            2 :         ks.add_range(kr(0..30));
     739            2 :         ks.add_range(kr(10..20));
     740            2 :         assert_ks_eq(&ks.to_keyspace(), vec![kr(0..30)]);
     741            2 : 
     742            2 :         // add range that extends an existing range from both ends
     743            2 :         //
     744            2 :         //   #####
     745            2 :         // #########
     746            2 :         let mut ks = KeySpaceRandomAccum::default();
     747            2 :         ks.add_range(kr(10..20));
     748            2 :         ks.add_range(kr(0..30));
     749            2 :         assert_ks_eq(&ks.to_keyspace(), vec![kr(0..30)]);
     750            2 : 
     751            2 :         // add a range that overlaps with two existing ranges, joining them
     752            2 :         //
     753            2 :         // #####   #####
     754            2 :         //    #######
     755            2 :         let mut ks = KeySpaceRandomAccum::default();
     756            2 :         ks.add_range(kr(0..10));
     757            2 :         ks.add_range(kr(20..30));
     758            2 :         ks.add_range(kr(5..25));
     759            2 :         assert_ks_eq(&ks.to_keyspace(), vec![kr(0..30)]);
     760            2 :     }
     761              : 
     762              :     #[test]
     763            2 :     fn keyspace_overlaps() {
     764            2 :         let mut ks = KeySpaceRandomAccum::default();
     765            2 :         ks.add_range(kr(10..20));
     766            2 :         ks.add_range(kr(30..40));
     767            2 :         let ks = ks.to_keyspace();
     768            2 : 
     769            2 :         //        #####      #####
     770            2 :         // xxxx
     771            2 :         assert!(!ks.overlaps(&kr(0..5)));
     772              : 
     773              :         //        #####      #####
     774              :         //   xxxx
     775            2 :         assert!(!ks.overlaps(&kr(5..9)));
     776              : 
     777              :         //        #####      #####
     778              :         //    xxxx
     779            2 :         assert!(!ks.overlaps(&kr(5..10)));
     780              : 
     781              :         //        #####      #####
     782              :         //     xxxx
     783            2 :         assert!(ks.overlaps(&kr(5..11)));
     784              : 
     785              :         //        #####      #####
     786              :         //        xxxx
     787            2 :         assert!(ks.overlaps(&kr(10..15)));
     788              : 
     789              :         //        #####      #####
     790              :         //         xxxx
     791            2 :         assert!(ks.overlaps(&kr(15..20)));
     792              : 
     793              :         //        #####      #####
     794              :         //           xxxx
     795            2 :         assert!(ks.overlaps(&kr(15..25)));
     796              : 
     797              :         //        #####      #####
     798              :         //              xxxx
     799            2 :         assert!(!ks.overlaps(&kr(22..28)));
     800              : 
     801              :         //        #####      #####
     802              :         //               xxxx
     803            2 :         assert!(!ks.overlaps(&kr(25..30)));
     804              : 
     805              :         //        #####      #####
     806              :         //                      xxxx
     807            2 :         assert!(ks.overlaps(&kr(35..35)));
     808              : 
     809              :         //        #####      #####
     810              :         //                        xxxx
     811            2 :         assert!(!ks.overlaps(&kr(40..45)));
     812              : 
     813              :         //        #####      #####
     814              :         //                        xxxx
     815            2 :         assert!(!ks.overlaps(&kr(45..50)));
     816              : 
     817              :         //        #####      #####
     818              :         //        xxxxxxxxxxx
     819            2 :         assert!(ks.overlaps(&kr(0..30))); // XXXXX This fails currently!
     820            2 :     }
     821              : 
     822              :     #[test]
     823            2 :     fn test_remove_full_overlapps() {
     824            2 :         let mut key_space1 = KeySpace {
     825            2 :             ranges: vec![
     826            2 :                 Key::from_i128(1)..Key::from_i128(4),
     827            2 :                 Key::from_i128(5)..Key::from_i128(8),
     828            2 :                 Key::from_i128(10)..Key::from_i128(12),
     829            2 :             ],
     830            2 :         };
     831            2 :         let key_space2 = KeySpace {
     832            2 :             ranges: vec![
     833            2 :                 Key::from_i128(2)..Key::from_i128(3),
     834            2 :                 Key::from_i128(6)..Key::from_i128(7),
     835            2 :                 Key::from_i128(11)..Key::from_i128(13),
     836            2 :             ],
     837            2 :         };
     838            2 :         let removed = key_space1.remove_overlapping_with(&key_space2);
     839            2 :         let removed_expected = KeySpace {
     840            2 :             ranges: vec![
     841            2 :                 Key::from_i128(2)..Key::from_i128(3),
     842            2 :                 Key::from_i128(6)..Key::from_i128(7),
     843            2 :                 Key::from_i128(11)..Key::from_i128(12),
     844            2 :             ],
     845            2 :         };
     846            2 :         assert_eq!(removed, removed_expected);
     847              : 
     848            2 :         assert_eq!(
     849            2 :             key_space1.ranges,
     850            2 :             vec![
     851            2 :                 Key::from_i128(1)..Key::from_i128(2),
     852            2 :                 Key::from_i128(3)..Key::from_i128(4),
     853            2 :                 Key::from_i128(5)..Key::from_i128(6),
     854            2 :                 Key::from_i128(7)..Key::from_i128(8),
     855            2 :                 Key::from_i128(10)..Key::from_i128(11)
     856            2 :             ]
     857            2 :         );
     858            2 :     }
     859              : 
     860              :     #[test]
     861            2 :     fn test_remove_partial_overlaps() {
     862            2 :         // Test partial ovelaps
     863            2 :         let mut key_space1 = KeySpace {
     864            2 :             ranges: vec![
     865            2 :                 Key::from_i128(1)..Key::from_i128(5),
     866            2 :                 Key::from_i128(7)..Key::from_i128(10),
     867            2 :                 Key::from_i128(12)..Key::from_i128(15),
     868            2 :             ],
     869            2 :         };
     870            2 :         let key_space2 = KeySpace {
     871            2 :             ranges: vec![
     872            2 :                 Key::from_i128(3)..Key::from_i128(6),
     873            2 :                 Key::from_i128(8)..Key::from_i128(11),
     874            2 :                 Key::from_i128(14)..Key::from_i128(17),
     875            2 :             ],
     876            2 :         };
     877            2 : 
     878            2 :         let removed = key_space1.remove_overlapping_with(&key_space2);
     879            2 :         let removed_expected = KeySpace {
     880            2 :             ranges: vec![
     881            2 :                 Key::from_i128(3)..Key::from_i128(5),
     882            2 :                 Key::from_i128(8)..Key::from_i128(10),
     883            2 :                 Key::from_i128(14)..Key::from_i128(15),
     884            2 :             ],
     885            2 :         };
     886            2 :         assert_eq!(removed, removed_expected);
     887              : 
     888            2 :         assert_eq!(
     889            2 :             key_space1.ranges,
     890            2 :             vec![
     891            2 :                 Key::from_i128(1)..Key::from_i128(3),
     892            2 :                 Key::from_i128(7)..Key::from_i128(8),
     893            2 :                 Key::from_i128(12)..Key::from_i128(14),
     894            2 :             ]
     895            2 :         );
     896            2 :     }
     897              : 
     898              :     #[test]
     899            2 :     fn test_remove_no_overlaps() {
     900            2 :         let mut key_space1 = KeySpace {
     901            2 :             ranges: vec![
     902            2 :                 Key::from_i128(1)..Key::from_i128(5),
     903            2 :                 Key::from_i128(7)..Key::from_i128(10),
     904            2 :                 Key::from_i128(12)..Key::from_i128(15),
     905            2 :             ],
     906            2 :         };
     907            2 :         let key_space2 = KeySpace {
     908            2 :             ranges: vec![
     909            2 :                 Key::from_i128(6)..Key::from_i128(7),
     910            2 :                 Key::from_i128(11)..Key::from_i128(12),
     911            2 :                 Key::from_i128(15)..Key::from_i128(17),
     912            2 :             ],
     913            2 :         };
     914            2 : 
     915            2 :         let removed = key_space1.remove_overlapping_with(&key_space2);
     916            2 :         let removed_expected = KeySpace::default();
     917            2 :         assert_eq!(removed, removed_expected);
     918              : 
     919            2 :         assert_eq!(
     920            2 :             key_space1.ranges,
     921            2 :             vec![
     922            2 :                 Key::from_i128(1)..Key::from_i128(5),
     923            2 :                 Key::from_i128(7)..Key::from_i128(10),
     924            2 :                 Key::from_i128(12)..Key::from_i128(15),
     925            2 :             ]
     926            2 :         );
     927            2 :     }
     928              : 
     929              :     #[test]
     930            2 :     fn test_remove_one_range_overlaps_multiple() {
     931            2 :         let mut key_space1 = KeySpace {
     932            2 :             ranges: vec![
     933            2 :                 Key::from_i128(1)..Key::from_i128(3),
     934            2 :                 Key::from_i128(3)..Key::from_i128(6),
     935            2 :                 Key::from_i128(6)..Key::from_i128(10),
     936            2 :                 Key::from_i128(12)..Key::from_i128(15),
     937            2 :                 Key::from_i128(17)..Key::from_i128(20),
     938            2 :                 Key::from_i128(20)..Key::from_i128(30),
     939            2 :                 Key::from_i128(30)..Key::from_i128(40),
     940            2 :             ],
     941            2 :         };
     942            2 :         let key_space2 = KeySpace {
     943            2 :             ranges: vec![Key::from_i128(9)..Key::from_i128(19)],
     944            2 :         };
     945            2 : 
     946            2 :         let removed = key_space1.remove_overlapping_with(&key_space2);
     947            2 :         let removed_expected = KeySpace {
     948            2 :             ranges: vec![
     949            2 :                 Key::from_i128(9)..Key::from_i128(10),
     950            2 :                 Key::from_i128(12)..Key::from_i128(15),
     951            2 :                 Key::from_i128(17)..Key::from_i128(19),
     952            2 :             ],
     953            2 :         };
     954            2 :         assert_eq!(removed, removed_expected);
     955              : 
     956            2 :         assert_eq!(
     957            2 :             key_space1.ranges,
     958            2 :             vec![
     959            2 :                 Key::from_i128(1)..Key::from_i128(3),
     960            2 :                 Key::from_i128(3)..Key::from_i128(6),
     961            2 :                 Key::from_i128(6)..Key::from_i128(9),
     962            2 :                 Key::from_i128(19)..Key::from_i128(20),
     963            2 :                 Key::from_i128(20)..Key::from_i128(30),
     964            2 :                 Key::from_i128(30)..Key::from_i128(40),
     965            2 :             ]
     966            2 :         );
     967            2 :     }
     968              :     #[test]
     969            2 :     fn sharded_range_relation_gap() {
     970            2 :         let shard_identity = ShardIdentity::new(
     971            2 :             ShardNumber(0),
     972            2 :             ShardCount::new(4),
     973            2 :             ShardParameters::DEFAULT_STRIPE_SIZE,
     974            2 :         )
     975            2 :         .unwrap();
     976            2 : 
     977            2 :         let range = ShardedRange::new(
     978            2 :             Range {
     979            2 :                 start: Key::from_hex("000000067F00000005000040100300000000").unwrap(),
     980            2 :                 end: Key::from_hex("000000067F00000005000040130000004000").unwrap(),
     981            2 :             },
     982            2 :             &shard_identity,
     983            2 :         );
     984            2 : 
     985            2 :         // Key range spans relations, expect MAX
     986            2 :         assert_eq!(range.page_count(), u32::MAX);
     987            2 :     }
     988              : 
     989              :     #[test]
     990            2 :     fn shard_identity_keyspaces_single_key() {
     991            2 :         let shard_identity = ShardIdentity::new(
     992            2 :             ShardNumber(1),
     993            2 :             ShardCount::new(4),
     994            2 :             ShardParameters::DEFAULT_STRIPE_SIZE,
     995            2 :         )
     996            2 :         .unwrap();
     997            2 : 
     998            2 :         let range = ShardedRange::new(
     999            2 :             Range {
    1000            2 :                 start: Key::from_hex("000000067f000000010000007000ffffffff").unwrap(),
    1001            2 :                 end: Key::from_hex("000000067f00000001000000700100000000").unwrap(),
    1002            2 :             },
    1003            2 :             &shard_identity,
    1004            2 :         );
    1005            2 :         // Single-key range on logical size key
    1006            2 :         assert_eq!(range.page_count(), 1);
    1007            2 :     }
    1008              : 
    1009              :     /// Test the helper that we use to identify ranges which go outside the data blocks of a single relation
    1010              :     #[test]
    1011            2 :     fn contiguous_range_check() {
    1012            2 :         assert!(!is_contiguous_range(
    1013            2 :             &(Key::from_hex("000000067f00000001000004df00fffffffe").unwrap()
    1014            2 :                 ..Key::from_hex("000000067f00000001000004df0100000003").unwrap())
    1015            2 :         ),);
    1016              : 
    1017              :         // The ranges goes all the way up to the 0xffffffff, including it: this is
    1018              :         // not considered a rel block range because 0xffffffff stores logical sizes,
    1019              :         // not blocks.
    1020            2 :         assert!(!is_contiguous_range(
    1021            2 :             &(Key::from_hex("000000067f00000001000004df00fffffffe").unwrap()
    1022            2 :                 ..Key::from_hex("000000067f00000001000004df0100000000").unwrap())
    1023            2 :         ),);
    1024              : 
    1025              :         // Keys within the normal data region of a relation
    1026            2 :         assert!(is_contiguous_range(
    1027            2 :             &(Key::from_hex("000000067f00000001000004df0000000000").unwrap()
    1028            2 :                 ..Key::from_hex("000000067f00000001000004df0000000080").unwrap())
    1029            2 :         ),);
    1030              : 
    1031              :         // The logical size key of one forkno, then some blocks in the next
    1032            2 :         assert!(is_contiguous_range(
    1033            2 :             &(Key::from_hex("000000067f00000001000004df00ffffffff").unwrap()
    1034            2 :                 ..Key::from_hex("000000067f00000001000004df0100000080").unwrap())
    1035            2 :         ),);
    1036            2 :     }
    1037              : 
    1038              :     #[test]
    1039            2 :     fn shard_identity_keyspaces_forkno_gap() {
    1040            2 :         let shard_identity = ShardIdentity::new(
    1041            2 :             ShardNumber(1),
    1042            2 :             ShardCount::new(4),
    1043            2 :             ShardParameters::DEFAULT_STRIPE_SIZE,
    1044            2 :         )
    1045            2 :         .unwrap();
    1046            2 : 
    1047            2 :         let range = ShardedRange::new(
    1048            2 :             Range {
    1049            2 :                 start: Key::from_hex("000000067f00000001000004df00fffffffe").unwrap(),
    1050            2 :                 end: Key::from_hex("000000067f00000001000004df0100000003").unwrap(),
    1051            2 :             },
    1052            2 :             &shard_identity,
    1053            2 :         );
    1054            2 : 
    1055            2 :         // Range spanning the end of one forkno and the start of the next: we do not attempt to
    1056            2 :         // calculate a valid size, because we have no way to know if they keys between start
    1057            2 :         // and end are actually in use.
    1058            2 :         assert_eq!(range.page_count(), u32::MAX);
    1059            2 :     }
    1060              : 
    1061              :     #[test]
    1062            2 :     fn shard_identity_keyspaces_one_relation() {
    1063           10 :         for shard_number in 0..4 {
    1064            8 :             let shard_identity = ShardIdentity::new(
    1065            8 :                 ShardNumber(shard_number),
    1066            8 :                 ShardCount::new(4),
    1067            8 :                 ShardParameters::DEFAULT_STRIPE_SIZE,
    1068            8 :             )
    1069            8 :             .unwrap();
    1070            8 : 
    1071            8 :             let range = ShardedRange::new(
    1072            8 :                 Range {
    1073            8 :                     start: Key::from_hex("000000067f00000001000000ae0000000000").unwrap(),
    1074            8 :                     end: Key::from_hex("000000067f00000001000000ae0000000001").unwrap(),
    1075            8 :                 },
    1076            8 :                 &shard_identity,
    1077            8 :             );
    1078            8 : 
    1079            8 :             // Very simple case: range covering block zero of one relation, where that block maps to shard zero
    1080            8 :             if shard_number == 0 {
    1081            2 :                 assert_eq!(range.page_count(), 1);
    1082              :             } else {
    1083              :                 // Other shards should perceive the range's size as zero
    1084            6 :                 assert_eq!(range.page_count(), 0);
    1085              :             }
    1086              :         }
    1087            2 :     }
    1088              : 
    1089              :     /// Test helper: construct a ShardedRange and call fragment() on it, returning
    1090              :     /// the total page count in the range and the fragments.
    1091         2024 :     fn do_fragment(
    1092         2024 :         range_start: Key,
    1093         2024 :         range_end: Key,
    1094         2024 :         shard_identity: &ShardIdentity,
    1095         2024 :         target_nblocks: u32,
    1096         2024 :     ) -> (u32, Vec<(u32, Range<Key>)>) {
    1097         2024 :         let range = ShardedRange::new(
    1098         2024 :             Range {
    1099         2024 :                 start: range_start,
    1100         2024 :                 end: range_end,
    1101         2024 :             },
    1102         2024 :             shard_identity,
    1103         2024 :         );
    1104         2024 : 
    1105         2024 :         let page_count = range.page_count();
    1106         2024 :         let fragments = range.fragment(target_nblocks);
    1107         2024 : 
    1108         2024 :         // Invariant: we always get at least one fragment
    1109         2024 :         assert!(!fragments.is_empty());
    1110              : 
    1111              :         // Invariant: the first/last fragment start/end should equal the input start/end
    1112         2024 :         assert_eq!(fragments.first().unwrap().1.start, range_start);
    1113         2024 :         assert_eq!(fragments.last().unwrap().1.end, range_end);
    1114              : 
    1115         2024 :         if page_count > 0 {
    1116              :             // Invariant: every fragment must contain at least one shard-local page, if the
    1117              :             // total range contains at least one shard-local page
    1118         1374 :             let all_nonzero = fragments.iter().all(|f| f.0 > 0);
    1119         1108 :             if !all_nonzero {
    1120            0 :                 eprintln!("Found a zero-length fragment: {:?}", fragments);
    1121         1108 :             }
    1122         1108 :             assert!(all_nonzero);
    1123              :         } else {
    1124              :             // A range with no shard-local pages should always be returned as a single fragment
    1125          916 :             assert_eq!(fragments, vec![(0, range_start..range_end)]);
    1126              :         }
    1127              : 
    1128              :         // Invariant: fragments must be ordered and non-overlapping
    1129         2024 :         let mut last: Option<Range<Key>> = None;
    1130         4314 :         for frag in &fragments {
    1131         2290 :             if let Some(last) = last {
    1132          266 :                 assert!(frag.1.start >= last.end);
    1133          266 :                 assert!(frag.1.start > last.start);
    1134         2024 :             }
    1135         2290 :             last = Some(frag.1.clone())
    1136              :         }
    1137              : 
    1138              :         // Invariant: fragments respect target_nblocks
    1139         4314 :         for frag in &fragments {
    1140         2290 :             assert!(frag.0 == u32::MAX || frag.0 <= target_nblocks);
    1141              :         }
    1142              : 
    1143         2024 :         (page_count, fragments)
    1144         2024 :     }
    1145              : 
    1146              :     /// Really simple tests for fragment(), on a range that just contains a single stripe
    1147              :     /// for a single tenant.
    1148              :     #[test]
    1149            2 :     fn sharded_range_fragment_simple() {
    1150            2 :         let shard_identity = ShardIdentity::new(
    1151            2 :             ShardNumber(0),
    1152            2 :             ShardCount::new(4),
    1153            2 :             ShardParameters::DEFAULT_STRIPE_SIZE,
    1154            2 :         )
    1155            2 :         .unwrap();
    1156            2 : 
    1157            2 :         // A range which we happen to know covers exactly one stripe which belongs to this shard
    1158            2 :         let input_start = Key::from_hex("000000067f00000001000000ae0000000000").unwrap();
    1159            2 :         let input_end = Key::from_hex("000000067f00000001000000ae0000008000").unwrap();
    1160            2 : 
    1161            2 :         // Ask for stripe_size blocks, we get the whole stripe
    1162            2 :         assert_eq!(
    1163            2 :             do_fragment(input_start, input_end, &shard_identity, 32768),
    1164            2 :             (32768, vec![(32768, input_start..input_end)])
    1165            2 :         );
    1166              : 
    1167              :         // Ask for more, we still get the whole stripe
    1168            2 :         assert_eq!(
    1169            2 :             do_fragment(input_start, input_end, &shard_identity, 10000000),
    1170            2 :             (32768, vec![(32768, input_start..input_end)])
    1171            2 :         );
    1172              : 
    1173              :         // Ask for target_nblocks of half the stripe size, we get two halves
    1174            2 :         assert_eq!(
    1175            2 :             do_fragment(input_start, input_end, &shard_identity, 16384),
    1176            2 :             (
    1177            2 :                 32768,
    1178            2 :                 vec![
    1179            2 :                     (16384, input_start..input_start.add(16384)),
    1180            2 :                     (16384, input_start.add(16384)..input_end)
    1181            2 :                 ]
    1182            2 :             )
    1183            2 :         );
    1184            2 :     }
    1185              : 
    1186              :     #[test]
    1187            2 :     fn sharded_range_fragment_multi_stripe() {
    1188            2 :         let shard_identity = ShardIdentity::new(
    1189            2 :             ShardNumber(0),
    1190            2 :             ShardCount::new(4),
    1191            2 :             ShardParameters::DEFAULT_STRIPE_SIZE,
    1192            2 :         )
    1193            2 :         .unwrap();
    1194            2 : 
    1195            2 :         // A range which covers multiple stripes, exactly one of which belongs to the current shard.
    1196            2 :         let input_start = Key::from_hex("000000067f00000001000000ae0000000000").unwrap();
    1197            2 :         let input_end = Key::from_hex("000000067f00000001000000ae0000020000").unwrap();
    1198            2 :         // Ask for all the blocks, get a fragment that covers the whole range but reports
    1199            2 :         // its size to be just the blocks belonging to our shard.
    1200            2 :         assert_eq!(
    1201            2 :             do_fragment(input_start, input_end, &shard_identity, 131072),
    1202            2 :             (32768, vec![(32768, input_start..input_end)])
    1203            2 :         );
    1204              : 
    1205              :         // Ask for a sub-stripe quantity
    1206            2 :         assert_eq!(
    1207            2 :             do_fragment(input_start, input_end, &shard_identity, 16000),
    1208            2 :             (
    1209            2 :                 32768,
    1210            2 :                 vec![
    1211            2 :                     (16000, input_start..input_start.add(16000)),
    1212            2 :                     (16000, input_start.add(16000)..input_start.add(32000)),
    1213            2 :                     (768, input_start.add(32000)..input_end),
    1214            2 :                 ]
    1215            2 :             )
    1216            2 :         );
    1217              : 
    1218              :         // Try on a range that starts slightly after our owned stripe
    1219            2 :         assert_eq!(
    1220            2 :             do_fragment(input_start.add(1), input_end, &shard_identity, 131072),
    1221            2 :             (32767, vec![(32767, input_start.add(1)..input_end)])
    1222            2 :         );
    1223            2 :     }
    1224              : 
    1225              :     /// Test our calculations work correctly when we start a range from the logical size key of
    1226              :     /// a previous relation.
    1227              :     #[test]
    1228            2 :     fn sharded_range_fragment_starting_from_logical_size() {
    1229            2 :         let input_start = Key::from_hex("000000067f00000001000000ae00ffffffff").unwrap();
    1230            2 :         let input_end = Key::from_hex("000000067f00000001000000ae0100008000").unwrap();
    1231            2 : 
    1232            2 :         // Shard 0 owns the first stripe in the relation, and the preceding logical size is shard local too
    1233            2 :         let shard_identity = ShardIdentity::new(
    1234            2 :             ShardNumber(0),
    1235            2 :             ShardCount::new(4),
    1236            2 :             ShardParameters::DEFAULT_STRIPE_SIZE,
    1237            2 :         )
    1238            2 :         .unwrap();
    1239            2 :         assert_eq!(
    1240            2 :             do_fragment(input_start, input_end, &shard_identity, 0x10000),
    1241            2 :             (0x8001, vec![(0x8001, input_start..input_end)])
    1242            2 :         );
    1243              : 
    1244              :         // Shard 1 does not own the first stripe in the relation, but it does own the logical size (all shards
    1245              :         // store all logical sizes)
    1246            2 :         let shard_identity = ShardIdentity::new(
    1247            2 :             ShardNumber(1),
    1248            2 :             ShardCount::new(4),
    1249            2 :             ShardParameters::DEFAULT_STRIPE_SIZE,
    1250            2 :         )
    1251            2 :         .unwrap();
    1252            2 :         assert_eq!(
    1253            2 :             do_fragment(input_start, input_end, &shard_identity, 0x10000),
    1254            2 :             (0x1, vec![(0x1, input_start..input_end)])
    1255            2 :         );
    1256            2 :     }
    1257              : 
    1258              :     /// Test that ShardedRange behaves properly when used on un-sharded data
    1259              :     #[test]
    1260            2 :     fn sharded_range_fragment_unsharded() {
    1261            2 :         let shard_identity = ShardIdentity::unsharded();
    1262            2 : 
    1263            2 :         let input_start = Key::from_hex("000000067f00000001000000ae0000000000").unwrap();
    1264            2 :         let input_end = Key::from_hex("000000067f00000001000000ae0000010000").unwrap();
    1265            2 :         assert_eq!(
    1266            2 :             do_fragment(input_start, input_end, &shard_identity, 0x8000),
    1267            2 :             (
    1268            2 :                 0x10000,
    1269            2 :                 vec![
    1270            2 :                     (0x8000, input_start..input_start.add(0x8000)),
    1271            2 :                     (0x8000, input_start.add(0x8000)..input_start.add(0x10000))
    1272            2 :                 ]
    1273            2 :             )
    1274            2 :         );
    1275            2 :     }
    1276              : 
    1277              :     #[test]
    1278            2 :     fn sharded_range_fragment_cross_relation() {
    1279            2 :         let shard_identity = ShardIdentity::unsharded();
    1280            2 : 
    1281            2 :         // A range that spans relations: expect fragmentation to give up and return a u32::MAX size
    1282            2 :         let input_start = Key::from_hex("000000067f00000001000000ae0000000000").unwrap();
    1283            2 :         let input_end = Key::from_hex("000000068f00000001000000ae0000010000").unwrap();
    1284            2 :         assert_eq!(
    1285            2 :             do_fragment(input_start, input_end, &shard_identity, 0x8000),
    1286            2 :             (u32::MAX, vec![(u32::MAX, input_start..input_end),])
    1287            2 :         );
    1288              : 
    1289              :         // Same, but using a sharded identity
    1290            2 :         let shard_identity = ShardIdentity::new(
    1291            2 :             ShardNumber(0),
    1292            2 :             ShardCount::new(4),
    1293            2 :             ShardParameters::DEFAULT_STRIPE_SIZE,
    1294            2 :         )
    1295            2 :         .unwrap();
    1296            2 :         assert_eq!(
    1297            2 :             do_fragment(input_start, input_end, &shard_identity, 0x8000),
    1298            2 :             (u32::MAX, vec![(u32::MAX, input_start..input_end),])
    1299            2 :         );
    1300            2 :     }
    1301              : 
    1302              :     #[test]
    1303            2 :     fn sharded_range_fragment_tiny_nblocks() {
    1304            2 :         let shard_identity = ShardIdentity::unsharded();
    1305            2 : 
    1306            2 :         // A range that spans relations: expect fragmentation to give up and return a u32::MAX size
    1307            2 :         let input_start = Key::from_hex("000000067F00000001000004E10000000000").unwrap();
    1308            2 :         let input_end = Key::from_hex("000000067F00000001000004E10000000038").unwrap();
    1309            2 :         assert_eq!(
    1310            2 :             do_fragment(input_start, input_end, &shard_identity, 16),
    1311            2 :             (
    1312            2 :                 0x38,
    1313            2 :                 vec![
    1314            2 :                     (16, input_start..input_start.add(16)),
    1315            2 :                     (16, input_start.add(16)..input_start.add(32)),
    1316            2 :                     (16, input_start.add(32)..input_start.add(48)),
    1317            2 :                     (8, input_start.add(48)..input_end),
    1318            2 :                 ]
    1319            2 :             )
    1320            2 :         );
    1321            2 :     }
    1322              : 
    1323              :     #[test]
    1324            2 :     fn sharded_range_fragment_fuzz() {
    1325            2 :         // Use a fixed seed: we don't want to explicitly pick values, but we do want
    1326            2 :         // the test to be reproducible.
    1327            2 :         let mut prng = rand::rngs::StdRng::seed_from_u64(0xdeadbeef);
    1328              : 
    1329         2002 :         for _i in 0..1000 {
    1330         2000 :             let shard_identity = if prng.next_u32() % 2 == 0 {
    1331         1038 :                 ShardIdentity::unsharded()
    1332              :             } else {
    1333          962 :                 let shard_count = prng.next_u32() % 127 + 1;
    1334          962 :                 ShardIdentity::new(
    1335          962 :                     ShardNumber((prng.next_u32() % shard_count) as u8),
    1336          962 :                     ShardCount::new(shard_count as u8),
    1337          962 :                     ShardParameters::DEFAULT_STRIPE_SIZE,
    1338          962 :                 )
    1339          962 :                 .unwrap()
    1340              :             };
    1341              : 
    1342         2000 :             let target_nblocks = prng.next_u32() % 65536 + 1;
    1343         2000 : 
    1344         2000 :             let start_offset = prng.next_u32() % 16384;
    1345         2000 : 
    1346         2000 :             // Try ranges up to 4GiB in size, that are always at least 1
    1347         2000 :             let range_size = prng.next_u32() % 8192 + 1;
    1348         2000 : 
    1349         2000 :             // A range that spans relations: expect fragmentation to give up and return a u32::MAX size
    1350         2000 :             let input_start = Key::from_hex("000000067F00000001000004E10000000000")
    1351         2000 :                 .unwrap()
    1352         2000 :                 .add(start_offset);
    1353         2000 :             let input_end = input_start.add(range_size);
    1354         2000 : 
    1355         2000 :             // This test's main success conditions are the invariants baked into do_fragment
    1356         2000 :             let (_total_size, fragments) =
    1357         2000 :                 do_fragment(input_start, input_end, &shard_identity, target_nblocks);
    1358         2000 : 
    1359         2000 :             // Pick a random key within the range and check it appears in the output
    1360         2000 :             let example_key = input_start.add(prng.next_u32() % range_size);
    1361         2000 : 
    1362         2000 :             // Panic on unwrap if it isn't found
    1363         2000 :             let example_key_frag = fragments
    1364         2000 :                 .iter()
    1365         2146 :                 .find(|f| f.1.contains(&example_key))
    1366         2000 :                 .unwrap();
    1367         2000 : 
    1368         2000 :             // Check that the fragment containing our random key has a nonzero size if
    1369         2000 :             // that key is shard-local
    1370         2000 :             let example_key_local = !shard_identity.is_key_disposable(&example_key);
    1371         2000 :             if example_key_local {
    1372         1084 :                 assert!(example_key_frag.0 > 0);
    1373          916 :             }
    1374              :         }
    1375            2 :     }
    1376              : }
        

Generated by: LCOV version 2.1-beta