Line data Source code
1 : //! A helper tool to manage pageserver binary files.
2 : //! Accepts a file as an argument, attempts to parse it with all ways possible
3 : //! and prints its interpreted context.
4 : //!
5 : //! Separate, `metadata` subcommand allows to print and update pageserver's metadata file.
6 :
7 : mod draw_timeline_dir;
8 : mod index_part;
9 : mod key;
10 : mod layer_map_analyzer;
11 : mod layers;
12 : mod page_trace;
13 :
14 : use std::str::FromStr;
15 : use std::time::{Duration, SystemTime};
16 :
17 : use camino::{Utf8Path, Utf8PathBuf};
18 : use clap::{Parser, Subcommand};
19 : use index_part::IndexPartCmd;
20 : use layers::LayerCmd;
21 : use page_trace::PageTraceCmd;
22 : use pageserver::context::{DownloadBehavior, RequestContext};
23 : use pageserver::page_cache;
24 : use pageserver::task_mgr::TaskKind;
25 : use pageserver::tenant::dump_layerfile_from_path;
26 : use pageserver::tenant::metadata::TimelineMetadata;
27 : use pageserver::virtual_file::api::IoMode;
28 : use pageserver::virtual_file::{self};
29 : use pageserver_api::shard::TenantShardId;
30 : use postgres_ffi::ControlFileData;
31 : use remote_storage::{RemotePath, RemoteStorageConfig};
32 : use tokio_util::sync::CancellationToken;
33 : use utils::id::TimelineId;
34 : use utils::logging::{self, LogFormat, TracingErrorLayerEnablement};
35 : use utils::lsn::Lsn;
36 : use utils::project_git_version;
37 :
38 : project_git_version!(GIT_VERSION);
39 :
40 : #[derive(Parser)]
41 : #[command(
42 : version = GIT_VERSION,
43 : about = "Neon Pageserver binutils",
44 : long_about = "Reads pageserver (and related) binary files management utility"
45 : )]
46 : #[command(propagate_version = true)]
47 : struct CliOpts {
48 : #[command(subcommand)]
49 : command: Commands,
50 : }
51 :
52 : #[derive(Subcommand)]
53 : enum Commands {
54 : Metadata(MetadataCmd),
55 : #[command(subcommand)]
56 : IndexPart(IndexPartCmd),
57 : PrintLayerFile(PrintLayerFileCmd),
58 : TimeTravelRemotePrefix(TimeTravelRemotePrefixCmd),
59 : DrawTimeline {},
60 : AnalyzeLayerMap(AnalyzeLayerMapCmd),
61 : #[command(subcommand)]
62 : Layer(LayerCmd),
63 : /// Debug print a hex key found from logs
64 : Key(key::DescribeKeyCommand),
65 : PageTrace(PageTraceCmd),
66 : }
67 :
68 : /// Read and update pageserver metadata file
69 : #[derive(Parser)]
70 : struct MetadataCmd {
71 : /// Input metadata file path
72 : metadata_path: Utf8PathBuf,
73 : /// Replace disk consistent Lsn
74 : disk_consistent_lsn: Option<Lsn>,
75 : /// Replace previous record Lsn
76 : prev_record_lsn: Option<Lsn>,
77 : /// Replace latest gc cuttoff
78 : latest_gc_cuttoff: Option<Lsn>,
79 : }
80 :
81 : #[derive(Parser)]
82 : struct PrintLayerFileCmd {
83 : /// Pageserver data path
84 : path: Utf8PathBuf,
85 : }
86 :
87 : /// Roll back the time for the specified prefix using S3 history.
88 : ///
89 : /// The command is fairly low level and powerful. Validation is only very light,
90 : /// so it is more powerful, and thus potentially more dangerous.
91 : #[derive(Parser)]
92 : struct TimeTravelRemotePrefixCmd {
93 : /// A configuration string for the remote_storage configuration.
94 : ///
95 : /// Example: `remote_storage = { bucket_name = "aws-storage-bucket-name", bucket_region = "us-east-2" }`
96 : config_toml_str: String,
97 : /// remote prefix to time travel recover. For safety reasons, we require it to contain
98 : /// a timeline or tenant ID in the prefix.
99 : prefix: String,
100 : /// Timestamp to travel to. Given in format like `2024-01-20T10:45:45Z`. Assumes UTC and second accuracy.
101 : travel_to: String,
102 : /// Timestamp of the start of the operation, must be after any changes we want to roll back and after.
103 : /// You can use a few seconds before invoking the command. Same format as `travel_to`.
104 : done_if_after: Option<String>,
105 : }
106 :
107 : #[derive(Parser)]
108 : struct AnalyzeLayerMapCmd {
109 : /// Pageserver data path
110 : path: Utf8PathBuf,
111 : /// Max holes
112 : max_holes: Option<usize>,
113 : }
114 :
115 : #[tokio::main]
116 0 : async fn main() -> anyhow::Result<()> {
117 0 : logging::init(
118 0 : LogFormat::Plain,
119 0 : TracingErrorLayerEnablement::EnableWithRustLogFilter,
120 0 : logging::Output::Stdout,
121 0 : )?;
122 :
123 0 : logging::replace_panic_hook_with_tracing_panic_hook().forget();
124 :
125 0 : let cli = CliOpts::parse();
126 :
127 0 : match cli.command {
128 0 : Commands::Layer(cmd) => {
129 0 : layers::main(&cmd).await?;
130 : }
131 0 : Commands::Metadata(cmd) => {
132 0 : handle_metadata(&cmd)?;
133 : }
134 0 : Commands::IndexPart(cmd) => {
135 0 : index_part::main(&cmd).await?;
136 : }
137 : Commands::DrawTimeline {} => {
138 0 : draw_timeline_dir::main()?;
139 : }
140 0 : Commands::AnalyzeLayerMap(cmd) => {
141 0 : layer_map_analyzer::main(&cmd).await?;
142 : }
143 0 : Commands::PrintLayerFile(cmd) => {
144 0 : if let Err(e) = read_pg_control_file(&cmd.path) {
145 0 : println!(
146 0 : "Failed to read input file as a pg control one: {e:#}\n\
147 0 : Attempting to read it as layer file"
148 : );
149 0 : print_layerfile(&cmd.path).await?;
150 0 : }
151 : }
152 0 : Commands::TimeTravelRemotePrefix(cmd) => {
153 0 : let timestamp = humantime::parse_rfc3339(&cmd.travel_to)
154 0 : .map_err(|_e| anyhow::anyhow!("Invalid time for travel_to: '{}'", cmd.travel_to))?;
155 :
156 0 : let done_if_after = if let Some(done_if_after) = &cmd.done_if_after {
157 0 : humantime::parse_rfc3339(done_if_after).map_err(|_e| {
158 0 : anyhow::anyhow!("Invalid time for done_if_after: '{}'", done_if_after)
159 0 : })?
160 : } else {
161 : const SAFETY_MARGIN: Duration = Duration::from_secs(3);
162 0 : tokio::time::sleep(SAFETY_MARGIN).await;
163 : // Convert to string representation and back to get rid of sub-second values
164 0 : let done_if_after = SystemTime::now();
165 0 : tokio::time::sleep(SAFETY_MARGIN).await;
166 0 : done_if_after
167 : };
168 :
169 0 : let timestamp = strip_subsecond(timestamp);
170 0 : let done_if_after = strip_subsecond(done_if_after);
171 :
172 0 : let Some(prefix) = validate_prefix(&cmd.prefix) else {
173 0 : println!("specified prefix '{}' failed validation", cmd.prefix);
174 0 : return Ok(());
175 : };
176 0 : let config = RemoteStorageConfig::from_toml_str(&cmd.config_toml_str)?;
177 0 : let storage = remote_storage::GenericRemoteStorage::from_config(&config).await;
178 0 : let cancel = CancellationToken::new();
179 : // Complexity limit: as we are running this command locally, we should have a lot of memory available, and we do not
180 : // need to limit the number of versions we are going to delete.
181 0 : storage
182 0 : .unwrap()
183 0 : .time_travel_recover(Some(&prefix), timestamp, done_if_after, &cancel, None)
184 0 : .await?;
185 : }
186 0 : Commands::Key(dkc) => dkc.execute(),
187 0 : Commands::PageTrace(cmd) => page_trace::main(&cmd)?,
188 : };
189 0 : Ok(())
190 0 : }
191 :
192 0 : fn read_pg_control_file(control_file_path: &Utf8Path) -> anyhow::Result<()> {
193 0 : let control_file = ControlFileData::decode(&std::fs::read(control_file_path)?)?;
194 0 : println!("{control_file:?}");
195 0 : let control_file_initdb = Lsn(control_file.checkPoint);
196 0 : println!(
197 0 : "pg_initdb_lsn: {}, aligned: {}",
198 : control_file_initdb,
199 0 : control_file_initdb.align()
200 : );
201 0 : Ok(())
202 0 : }
203 :
204 0 : async fn print_layerfile(path: &Utf8Path) -> anyhow::Result<()> {
205 : // Basic initialization of things that don't change after startup
206 0 : virtual_file::init(
207 : 10,
208 0 : virtual_file::api::IoEngineKind::StdFs,
209 0 : IoMode::preferred(),
210 0 : virtual_file::SyncMode::Sync,
211 : );
212 0 : page_cache::init(100);
213 0 : let ctx =
214 0 : RequestContext::new(TaskKind::DebugTool, DownloadBehavior::Error).with_scope_debug_tools();
215 0 : dump_layerfile_from_path(path, true, &ctx).await
216 0 : }
217 :
218 0 : fn handle_metadata(
219 0 : MetadataCmd {
220 0 : metadata_path: path,
221 0 : disk_consistent_lsn,
222 0 : prev_record_lsn,
223 0 : latest_gc_cuttoff,
224 0 : }: &MetadataCmd,
225 0 : ) -> Result<(), anyhow::Error> {
226 0 : let metadata_bytes = std::fs::read(path)?;
227 0 : let mut meta = TimelineMetadata::from_bytes(&metadata_bytes)?;
228 0 : println!("Current metadata:\n{meta:?}");
229 0 : let mut update_meta = false;
230 : // TODO: simplify this part
231 0 : if let Some(disk_consistent_lsn) = disk_consistent_lsn {
232 0 : meta = TimelineMetadata::new(
233 0 : *disk_consistent_lsn,
234 0 : meta.prev_record_lsn(),
235 0 : meta.ancestor_timeline(),
236 0 : meta.ancestor_lsn(),
237 0 : meta.latest_gc_cutoff_lsn(),
238 0 : meta.initdb_lsn(),
239 0 : meta.pg_version(),
240 0 : );
241 0 : update_meta = true;
242 0 : }
243 0 : if let Some(prev_record_lsn) = prev_record_lsn {
244 0 : meta = TimelineMetadata::new(
245 0 : meta.disk_consistent_lsn(),
246 0 : Some(*prev_record_lsn),
247 0 : meta.ancestor_timeline(),
248 0 : meta.ancestor_lsn(),
249 0 : meta.latest_gc_cutoff_lsn(),
250 0 : meta.initdb_lsn(),
251 0 : meta.pg_version(),
252 0 : );
253 0 : update_meta = true;
254 0 : }
255 0 : if let Some(latest_gc_cuttoff) = latest_gc_cuttoff {
256 0 : meta = TimelineMetadata::new(
257 0 : meta.disk_consistent_lsn(),
258 0 : meta.prev_record_lsn(),
259 0 : meta.ancestor_timeline(),
260 0 : meta.ancestor_lsn(),
261 0 : *latest_gc_cuttoff,
262 0 : meta.initdb_lsn(),
263 0 : meta.pg_version(),
264 0 : );
265 0 : update_meta = true;
266 0 : }
267 :
268 0 : if update_meta {
269 0 : let metadata_bytes = meta.to_bytes()?;
270 0 : std::fs::write(path, metadata_bytes)?;
271 0 : }
272 :
273 0 : Ok(())
274 0 : }
275 :
276 : /// Ensures that the given S3 prefix is sufficiently constrained.
277 : /// The command is very risky already and we don't want to expose something
278 : /// that allows usually unintentional and quite catastrophic time travel of
279 : /// an entire bucket, which would be a major catastrophy and away
280 : /// by only one character change (similar to "rm -r /home /username/foobar").
281 15 : fn validate_prefix(prefix: &str) -> Option<RemotePath> {
282 15 : if prefix.is_empty() {
283 : // Empty prefix means we want to specify the *whole* bucket
284 1 : return None;
285 14 : }
286 14 : let components = prefix.split('/').collect::<Vec<_>>();
287 14 : let (last, components) = {
288 14 : let last = components.last()?;
289 14 : if last.is_empty() {
290 : (
291 7 : components.iter().nth_back(1)?,
292 7 : &components[..(components.len() - 1)],
293 : )
294 : } else {
295 7 : (last, &components[..])
296 : }
297 : };
298 : 'valid: {
299 14 : if let Ok(_timeline_id) = TimelineId::from_str(last) {
300 : // Ends in either a tenant or timeline ID
301 5 : break 'valid;
302 9 : }
303 9 : if *last == "timelines" {
304 3 : if let Some(before_last) = components.iter().nth_back(1) {
305 3 : if let Ok(_tenant_id) = TenantShardId::from_str(before_last) {
306 : // Has a valid tenant id
307 3 : break 'valid;
308 0 : }
309 0 : }
310 6 : }
311 :
312 6 : return None;
313 : }
314 8 : RemotePath::from_string(prefix).ok()
315 15 : }
316 :
317 0 : fn strip_subsecond(timestamp: SystemTime) -> SystemTime {
318 0 : let ts_str = humantime::format_rfc3339_seconds(timestamp).to_string();
319 0 : humantime::parse_rfc3339(&ts_str).expect("can't parse just created timestamp")
320 0 : }
321 :
322 : #[cfg(test)]
323 : mod tests {
324 : use super::*;
325 :
326 : #[test]
327 1 : fn test_validate_prefix() {
328 1 : assert_eq!(validate_prefix(""), None);
329 1 : assert_eq!(validate_prefix("/"), None);
330 : #[track_caller]
331 7 : fn assert_valid(prefix: &str) {
332 7 : let remote_path = RemotePath::from_string(prefix).unwrap();
333 7 : assert_eq!(validate_prefix(prefix), Some(remote_path));
334 7 : }
335 1 : assert_valid("wal/3aa8fcc61f6d357410b7de754b1d9001/641e5342083b2235ee3deb8066819683/");
336 : // Path is not relative but absolute
337 1 : assert_eq!(
338 1 : validate_prefix(
339 1 : "/wal/3aa8fcc61f6d357410b7de754b1d9001/641e5342083b2235ee3deb8066819683/"
340 : ),
341 : None
342 : );
343 1 : assert_valid("wal/3aa8fcc61f6d357410b7de754b1d9001/");
344 : // Partial tenant IDs should be invalid, S3 will match all tenants with the specific ID prefix
345 1 : assert_eq!(validate_prefix("wal/3aa8fcc61f6d357410b7d"), None);
346 1 : assert_eq!(validate_prefix("wal"), None);
347 1 : assert_eq!(validate_prefix("/wal/"), None);
348 1 : assert_valid("pageserver/v1/tenants/3aa8fcc61f6d357410b7de754b1d9001");
349 : // Partial tenant ID
350 1 : assert_eq!(
351 1 : validate_prefix("pageserver/v1/tenants/3aa8fcc61f6d357410b"),
352 : None
353 : );
354 1 : assert_valid("pageserver/v1/tenants/3aa8fcc61f6d357410b7de754b1d9001/timelines");
355 1 : assert_valid("pageserver/v1/tenants/3aa8fcc61f6d357410b7de754b1d9001-0004/timelines");
356 1 : assert_valid("pageserver/v1/tenants/3aa8fcc61f6d357410b7de754b1d9001/timelines/");
357 1 : assert_valid(
358 1 : "pageserver/v1/tenants/3aa8fcc61f6d357410b7de754b1d9001/timelines/641e5342083b2235ee3deb8066819683",
359 : );
360 1 : assert_eq!(validate_prefix("pageserver/v1/tenants/"), None);
361 1 : }
362 : }
|