Line data Source code
1 : //! Tool for extracting content-dependent metadata about layers. Useful for scanning real project layer files and evaluating the effectiveness of different heuristics on them.
2 : //!
3 : //! Currently it only analyzes holes, which are regions within the layer range that the layer contains no updates for. In the future it might do more analysis (maybe key quantiles?) but it should never return sensitive data.
4 :
5 : use anyhow::Result;
6 : use camino::{Utf8Path, Utf8PathBuf};
7 : use pageserver::context::{DownloadBehavior, RequestContext};
8 : use pageserver::task_mgr::TaskKind;
9 : use pageserver::tenant::{TENANTS_SEGMENT_NAME, TIMELINES_SEGMENT_NAME};
10 : use std::cmp::Ordering;
11 : use std::collections::BinaryHeap;
12 : use std::ops::Range;
13 : use std::{fs, str};
14 :
15 : use pageserver::page_cache::{self, PAGE_SZ};
16 : use pageserver::repository::{Key, KEY_SIZE};
17 : use pageserver::tenant::block_io::FileBlockReader;
18 : use pageserver::tenant::disk_btree::{DiskBtreeReader, VisitDirection};
19 : use pageserver::tenant::storage_layer::delta_layer::{Summary, DELTA_KEY_SIZE};
20 : use pageserver::tenant::storage_layer::range_overlaps;
21 : use pageserver::virtual_file::{self, VirtualFile};
22 :
23 : use utils::{bin_ser::BeSer, lsn::Lsn};
24 :
25 : use crate::AnalyzeLayerMapCmd;
26 :
27 : const MIN_HOLE_LENGTH: i128 = (128 * 1024 * 1024 / PAGE_SZ) as i128;
28 : const DEFAULT_MAX_HOLES: usize = 10;
29 :
30 : /// Wrapper for key range to provide reverse ordering by range length for BinaryHeap
31 : #[derive(PartialEq, Eq)]
32 : pub struct Hole(Range<Key>);
33 :
34 : impl Ord for Hole {
35 0 : fn cmp(&self, other: &Self) -> Ordering {
36 0 : let other_len = other.0.end.to_i128() - other.0.start.to_i128();
37 0 : let self_len = self.0.end.to_i128() - self.0.start.to_i128();
38 0 : other_len.cmp(&self_len)
39 0 : }
40 : }
41 :
42 : impl PartialOrd for Hole {
43 0 : fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
44 0 : Some(self.cmp(other))
45 0 : }
46 : }
47 :
48 : pub(crate) struct LayerFile {
49 : pub key_range: Range<Key>,
50 : pub lsn_range: Range<Lsn>,
51 : pub is_delta: bool,
52 : pub holes: Vec<Hole>,
53 : }
54 :
55 : impl LayerFile {
56 0 : fn skips(&self, key_range: &Range<Key>) -> bool {
57 0 : if !range_overlaps(&self.key_range, key_range) {
58 0 : return false;
59 0 : }
60 0 : let start = match self
61 0 : .holes
62 0 : .binary_search_by_key(&key_range.start, |hole| hole.0.start)
63 : {
64 0 : Ok(index) => index,
65 0 : Err(index) => {
66 0 : if index == 0 {
67 0 : return false;
68 0 : }
69 0 : index - 1
70 : }
71 : };
72 0 : self.holes[start].0.end >= key_range.end
73 0 : }
74 : }
75 :
76 0 : pub(crate) fn parse_filename(name: &str) -> Option<LayerFile> {
77 0 : let split: Vec<&str> = name.split("__").collect();
78 0 : if split.len() != 2 {
79 0 : return None;
80 0 : }
81 0 : let keys: Vec<&str> = split[0].split('-').collect();
82 0 : let lsn_and_opt_generation: Vec<&str> = split[1].split('v').collect();
83 0 : let lsns: Vec<&str> = lsn_and_opt_generation[0].split('-').collect();
84 : let the_lsns: [&str; 2];
85 :
86 : /*
87 : * Generations add a -vX-XXXXXX postfix, which causes issues when we try to
88 : * parse 'vX' as an LSN.
89 : */
90 0 : let is_delta = if lsns.len() == 1 || lsns[1].is_empty() {
91 0 : the_lsns = [lsns[0], lsns[0]];
92 0 : false
93 : } else {
94 0 : the_lsns = [lsns[0], lsns[1]];
95 0 : true
96 : };
97 :
98 0 : let key_range = Key::from_hex(keys[0]).unwrap()..Key::from_hex(keys[1]).unwrap();
99 0 : let lsn_range = Lsn::from_hex(the_lsns[0]).unwrap()..Lsn::from_hex(the_lsns[1]).unwrap();
100 0 : let holes = Vec::new();
101 0 : Some(LayerFile {
102 0 : key_range,
103 0 : lsn_range,
104 0 : is_delta,
105 0 : holes,
106 0 : })
107 0 : }
108 :
109 : // Finds the max_holes largest holes, ignoring any that are smaller than MIN_HOLE_LENGTH"
110 0 : async fn get_holes(path: &Utf8Path, max_holes: usize, ctx: &RequestContext) -> Result<Vec<Hole>> {
111 0 : let file = VirtualFile::open(path, ctx).await?;
112 0 : let file_id = page_cache::next_file_id();
113 0 : let block_reader = FileBlockReader::new(&file, file_id);
114 0 : let summary_blk = block_reader.read_blk(0, ctx).await?;
115 0 : let actual_summary = Summary::des_prefix(summary_blk.as_ref())?;
116 0 : let tree_reader = DiskBtreeReader::<_, DELTA_KEY_SIZE>::new(
117 0 : actual_summary.index_start_blk,
118 0 : actual_summary.index_root_blk,
119 0 : block_reader,
120 0 : );
121 0 : // min-heap (reserve space for one more element added before eviction)
122 0 : let mut heap: BinaryHeap<Hole> = BinaryHeap::with_capacity(max_holes + 1);
123 0 : let mut prev_key: Option<Key> = None;
124 0 : tree_reader
125 0 : .visit(
126 0 : &[0u8; DELTA_KEY_SIZE],
127 0 : VisitDirection::Forwards,
128 0 : |key, _value| {
129 0 : let curr = Key::from_slice(&key[..KEY_SIZE]);
130 0 : if let Some(prev) = prev_key {
131 0 : if curr.to_i128() - prev.to_i128() >= MIN_HOLE_LENGTH {
132 0 : heap.push(Hole(prev..curr));
133 0 : if heap.len() > max_holes {
134 0 : heap.pop(); // remove smallest hole
135 0 : }
136 0 : }
137 0 : }
138 0 : prev_key = Some(curr.next());
139 0 : true
140 0 : },
141 0 : ctx,
142 0 : )
143 0 : .await?;
144 0 : let mut holes = heap.into_vec();
145 0 : holes.sort_by_key(|hole| hole.0.start);
146 0 : Ok(holes)
147 0 : }
148 :
149 0 : pub(crate) async fn main(cmd: &AnalyzeLayerMapCmd) -> Result<()> {
150 0 : let storage_path = &cmd.path;
151 0 : let max_holes = cmd.max_holes.unwrap_or(DEFAULT_MAX_HOLES);
152 0 : let ctx = RequestContext::new(TaskKind::DebugTool, DownloadBehavior::Error);
153 0 :
154 0 : // Initialize virtual_file (file desriptor cache) and page cache which are needed to access layer persistent B-Tree.
155 0 : pageserver::virtual_file::init(
156 0 : 10,
157 0 : virtual_file::api::IoEngineKind::StdFs,
158 0 : pageserver_api::config::defaults::DEFAULT_IO_BUFFER_ALIGNMENT,
159 0 : );
160 0 : pageserver::page_cache::init(100);
161 0 :
162 0 : let mut total_delta_layers = 0usize;
163 0 : let mut total_image_layers = 0usize;
164 0 : let mut total_excess_layers = 0usize;
165 0 : for tenant in fs::read_dir(storage_path.join(TENANTS_SEGMENT_NAME))? {
166 0 : let tenant = tenant?;
167 0 : if !tenant.file_type()?.is_dir() {
168 0 : continue;
169 0 : }
170 0 : for timeline in fs::read_dir(tenant.path().join(TIMELINES_SEGMENT_NAME))? {
171 0 : let timeline = timeline?;
172 0 : if !timeline.file_type()?.is_dir() {
173 0 : continue;
174 0 : }
175 0 : // Collect sorted vec of layers and count deltas
176 0 : let mut layers = Vec::new();
177 0 : let mut n_deltas = 0usize;
178 :
179 0 : for layer in fs::read_dir(timeline.path())? {
180 0 : let layer = layer?;
181 0 : if let Some(mut layer_file) =
182 0 : parse_filename(&layer.file_name().into_string().unwrap())
183 : {
184 0 : if layer_file.is_delta {
185 0 : let layer_path =
186 0 : Utf8PathBuf::from_path_buf(layer.path()).expect("non-Unicode path");
187 0 : layer_file.holes = get_holes(&layer_path, max_holes, &ctx).await?;
188 0 : n_deltas += 1;
189 0 : }
190 0 : layers.push(layer_file);
191 0 : }
192 : }
193 0 : layers.sort_by_key(|layer| layer.lsn_range.end);
194 0 :
195 0 : // Count the number of holes and number of excess layers.
196 0 : // Excess layer is image layer generated when holes in delta layers are not considered.
197 0 : let mut n_excess_layers = 0usize;
198 0 : let mut n_holes = 0usize;
199 :
200 0 : for i in 0..layers.len() {
201 0 : if !layers[i].is_delta {
202 0 : let mut n_deltas_since_last_image = 0usize;
203 0 : let mut n_skipped = 0usize;
204 0 : let img_key_range = &layers[i].key_range;
205 0 : for j in (0..i).rev() {
206 0 : if range_overlaps(img_key_range, &layers[j].key_range) {
207 0 : if layers[j].is_delta {
208 0 : n_deltas_since_last_image += 1;
209 0 : if layers[j].skips(img_key_range) {
210 0 : n_skipped += 1;
211 0 : }
212 : } else {
213 : // Image layer is always dense, despite to the fact that it doesn't contain all possible
214 : // key values in the specified range: there are may be no keys in the storage belonging
215 : // to the image layer range but not present in the image layer.
216 0 : break;
217 : }
218 0 : }
219 : }
220 0 : if n_deltas_since_last_image >= 3 && n_deltas_since_last_image - n_skipped < 3 {
221 0 : // It is just approximation: it doesn't take in account all image coverage.
222 0 : // Moreover the new layer map doesn't count total deltas, but the max stack of overlapping deltas.
223 0 : n_excess_layers += 1;
224 0 : }
225 0 : n_holes += n_skipped;
226 0 : }
227 : }
228 0 : println!(
229 0 : "Tenant {} timeline {} delta layers {} image layers {} excess layers {} holes {}",
230 0 : tenant.file_name().into_string().unwrap(),
231 0 : timeline.file_name().into_string().unwrap(),
232 0 : n_deltas,
233 0 : layers.len() - n_deltas,
234 0 : n_excess_layers,
235 0 : n_holes
236 0 : );
237 0 : total_delta_layers += n_deltas;
238 0 : total_image_layers += layers.len() - n_deltas;
239 0 : total_excess_layers += n_excess_layers;
240 : }
241 : }
242 0 : println!(
243 0 : "Total delta layers {} image layers {} excess layers {}",
244 0 : total_delta_layers, total_image_layers, total_excess_layers
245 0 : );
246 0 : Ok(())
247 0 : }
|