Line data Source code
1 : //! A helper tool to manage pageserver binary files.
2 : //! Accepts a file as an argument, attempts to parse it with all ways possible
3 : //! and prints its interpreted context.
4 : //!
5 : //! Separate, `metadata` subcommand allows to print and update pageserver's metadata file.
6 :
7 : mod draw_timeline_dir;
8 : mod index_part;
9 : mod key;
10 : mod layer_map_analyzer;
11 : mod layers;
12 :
13 : use std::{
14 : str::FromStr,
15 : time::{Duration, SystemTime},
16 : };
17 :
18 : use camino::{Utf8Path, Utf8PathBuf};
19 : use clap::{Parser, Subcommand};
20 : use index_part::IndexPartCmd;
21 : use layers::LayerCmd;
22 : use pageserver::{
23 : context::{DownloadBehavior, RequestContext},
24 : page_cache,
25 : task_mgr::TaskKind,
26 : tenant::{dump_layerfile_from_path, metadata::TimelineMetadata},
27 : virtual_file::{self, api::IoMode},
28 : };
29 : use pageserver_api::shard::TenantShardId;
30 : use postgres_ffi::ControlFileData;
31 : use remote_storage::{RemotePath, RemoteStorageConfig};
32 : use tokio_util::sync::CancellationToken;
33 : use utils::{
34 : id::TimelineId,
35 : logging::{self, LogFormat, TracingErrorLayerEnablement},
36 : lsn::Lsn,
37 : project_git_version,
38 : };
39 :
40 : project_git_version!(GIT_VERSION);
41 :
42 0 : #[derive(Parser)]
43 : #[command(
44 : version = GIT_VERSION,
45 : about = "Neon Pageserver binutils",
46 : long_about = "Reads pageserver (and related) binary files management utility"
47 : )]
48 : #[command(propagate_version = true)]
49 : struct CliOpts {
50 : #[command(subcommand)]
51 : command: Commands,
52 : }
53 :
54 0 : #[derive(Subcommand)]
55 : enum Commands {
56 : Metadata(MetadataCmd),
57 : #[command(subcommand)]
58 : IndexPart(IndexPartCmd),
59 : PrintLayerFile(PrintLayerFileCmd),
60 : TimeTravelRemotePrefix(TimeTravelRemotePrefixCmd),
61 : DrawTimeline {},
62 : AnalyzeLayerMap(AnalyzeLayerMapCmd),
63 : #[command(subcommand)]
64 : Layer(LayerCmd),
65 : /// Debug print a hex key found from logs
66 : Key(key::DescribeKeyCommand),
67 : }
68 :
69 : /// Read and update pageserver metadata file
70 0 : #[derive(Parser)]
71 : struct MetadataCmd {
72 : /// Input metadata file path
73 0 : metadata_path: Utf8PathBuf,
74 : /// Replace disk consistent Lsn
75 : disk_consistent_lsn: Option<Lsn>,
76 : /// Replace previous record Lsn
77 : prev_record_lsn: Option<Lsn>,
78 : /// Replace latest gc cuttoff
79 : latest_gc_cuttoff: Option<Lsn>,
80 : }
81 :
82 0 : #[derive(Parser)]
83 : struct PrintLayerFileCmd {
84 : /// Pageserver data path
85 0 : path: Utf8PathBuf,
86 : }
87 :
88 : /// Roll back the time for the specified prefix using S3 history.
89 : ///
90 : /// The command is fairly low level and powerful. Validation is only very light,
91 : /// so it is more powerful, and thus potentially more dangerous.
92 0 : #[derive(Parser)]
93 : struct TimeTravelRemotePrefixCmd {
94 : /// A configuration string for the remote_storage configuration.
95 : ///
96 : /// Example: `remote_storage = { bucket_name = "aws-storage-bucket-name", bucket_region = "us-east-2" }`
97 0 : config_toml_str: String,
98 : /// remote prefix to time travel recover. For safety reasons, we require it to contain
99 : /// a timeline or tenant ID in the prefix.
100 0 : prefix: String,
101 : /// Timestamp to travel to. Given in format like `2024-01-20T10:45:45Z`. Assumes UTC and second accuracy.
102 0 : travel_to: String,
103 : /// Timestamp of the start of the operation, must be after any changes we want to roll back and after.
104 : /// You can use a few seconds before invoking the command. Same format as `travel_to`.
105 : done_if_after: Option<String>,
106 : }
107 :
108 0 : #[derive(Parser)]
109 : struct AnalyzeLayerMapCmd {
110 : /// Pageserver data path
111 0 : path: Utf8PathBuf,
112 : /// Max holes
113 : max_holes: Option<usize>,
114 : }
115 :
116 : #[tokio::main]
117 0 : async fn main() -> anyhow::Result<()> {
118 0 : logging::init(
119 0 : LogFormat::Plain,
120 0 : TracingErrorLayerEnablement::EnableWithRustLogFilter,
121 0 : logging::Output::Stdout,
122 0 : )?;
123 0 :
124 0 : logging::replace_panic_hook_with_tracing_panic_hook().forget();
125 0 :
126 0 : let cli = CliOpts::parse();
127 0 :
128 0 : match cli.command {
129 0 : Commands::Layer(cmd) => {
130 0 : layers::main(&cmd).await?;
131 0 : }
132 0 : Commands::Metadata(cmd) => {
133 0 : handle_metadata(&cmd)?;
134 0 : }
135 0 : Commands::IndexPart(cmd) => {
136 0 : index_part::main(&cmd).await?;
137 0 : }
138 0 : Commands::DrawTimeline {} => {
139 0 : draw_timeline_dir::main()?;
140 0 : }
141 0 : Commands::AnalyzeLayerMap(cmd) => {
142 0 : layer_map_analyzer::main(&cmd).await?;
143 0 : }
144 0 : Commands::PrintLayerFile(cmd) => {
145 0 : if let Err(e) = read_pg_control_file(&cmd.path) {
146 0 : println!(
147 0 : "Failed to read input file as a pg control one: {e:#}\n\
148 0 : Attempting to read it as layer file"
149 0 : );
150 0 : print_layerfile(&cmd.path).await?;
151 0 : }
152 0 : }
153 0 : Commands::TimeTravelRemotePrefix(cmd) => {
154 0 : let timestamp = humantime::parse_rfc3339(&cmd.travel_to)
155 0 : .map_err(|_e| anyhow::anyhow!("Invalid time for travel_to: '{}'", cmd.travel_to))?;
156 0 :
157 0 : let done_if_after = if let Some(done_if_after) = &cmd.done_if_after {
158 0 : humantime::parse_rfc3339(done_if_after).map_err(|_e| {
159 0 : anyhow::anyhow!("Invalid time for done_if_after: '{}'", done_if_after)
160 0 : })?
161 0 : } else {
162 0 : const SAFETY_MARGIN: Duration = Duration::from_secs(3);
163 0 : tokio::time::sleep(SAFETY_MARGIN).await;
164 0 : // Convert to string representation and back to get rid of sub-second values
165 0 : let done_if_after = SystemTime::now();
166 0 : tokio::time::sleep(SAFETY_MARGIN).await;
167 0 : done_if_after
168 0 : };
169 0 :
170 0 : let timestamp = strip_subsecond(timestamp);
171 0 : let done_if_after = strip_subsecond(done_if_after);
172 0 :
173 0 : let Some(prefix) = validate_prefix(&cmd.prefix) else {
174 0 : println!("specified prefix '{}' failed validation", cmd.prefix);
175 0 : return Ok(());
176 0 : };
177 0 : let toml_document = toml_edit::DocumentMut::from_str(&cmd.config_toml_str)?;
178 0 : let toml_item = toml_document
179 0 : .get("remote_storage")
180 0 : .expect("need remote_storage");
181 0 : let config = RemoteStorageConfig::from_toml(toml_item)?;
182 0 : let storage = remote_storage::GenericRemoteStorage::from_config(&config).await;
183 0 : let cancel = CancellationToken::new();
184 0 : storage
185 0 : .unwrap()
186 0 : .time_travel_recover(Some(&prefix), timestamp, done_if_after, &cancel)
187 0 : .await?;
188 0 : }
189 0 : Commands::Key(dkc) => dkc.execute(),
190 0 : };
191 0 : Ok(())
192 0 : }
193 :
194 0 : fn read_pg_control_file(control_file_path: &Utf8Path) -> anyhow::Result<()> {
195 0 : let control_file = ControlFileData::decode(&std::fs::read(control_file_path)?)?;
196 0 : println!("{control_file:?}");
197 0 : let control_file_initdb = Lsn(control_file.checkPoint);
198 0 : println!(
199 0 : "pg_initdb_lsn: {}, aligned: {}",
200 0 : control_file_initdb,
201 0 : control_file_initdb.align()
202 0 : );
203 0 : Ok(())
204 0 : }
205 :
206 0 : async fn print_layerfile(path: &Utf8Path) -> anyhow::Result<()> {
207 0 : // Basic initialization of things that don't change after startup
208 0 : virtual_file::init(
209 0 : 10,
210 0 : virtual_file::api::IoEngineKind::StdFs,
211 0 : IoMode::preferred(),
212 0 : );
213 0 : page_cache::init(100);
214 0 : let ctx = RequestContext::new(TaskKind::DebugTool, DownloadBehavior::Error);
215 0 : dump_layerfile_from_path(path, true, &ctx).await
216 0 : }
217 :
218 0 : fn handle_metadata(
219 0 : MetadataCmd {
220 0 : metadata_path: path,
221 0 : disk_consistent_lsn,
222 0 : prev_record_lsn,
223 0 : latest_gc_cuttoff,
224 0 : }: &MetadataCmd,
225 0 : ) -> Result<(), anyhow::Error> {
226 0 : let metadata_bytes = std::fs::read(path)?;
227 0 : let mut meta = TimelineMetadata::from_bytes(&metadata_bytes)?;
228 0 : println!("Current metadata:\n{meta:?}");
229 0 : let mut update_meta = false;
230 : // TODO: simplify this part
231 0 : if let Some(disk_consistent_lsn) = disk_consistent_lsn {
232 0 : meta = TimelineMetadata::new(
233 0 : *disk_consistent_lsn,
234 0 : meta.prev_record_lsn(),
235 0 : meta.ancestor_timeline(),
236 0 : meta.ancestor_lsn(),
237 0 : meta.latest_gc_cutoff_lsn(),
238 0 : meta.initdb_lsn(),
239 0 : meta.pg_version(),
240 0 : );
241 0 : update_meta = true;
242 0 : }
243 0 : if let Some(prev_record_lsn) = prev_record_lsn {
244 0 : meta = TimelineMetadata::new(
245 0 : meta.disk_consistent_lsn(),
246 0 : Some(*prev_record_lsn),
247 0 : meta.ancestor_timeline(),
248 0 : meta.ancestor_lsn(),
249 0 : meta.latest_gc_cutoff_lsn(),
250 0 : meta.initdb_lsn(),
251 0 : meta.pg_version(),
252 0 : );
253 0 : update_meta = true;
254 0 : }
255 0 : if let Some(latest_gc_cuttoff) = latest_gc_cuttoff {
256 0 : meta = TimelineMetadata::new(
257 0 : meta.disk_consistent_lsn(),
258 0 : meta.prev_record_lsn(),
259 0 : meta.ancestor_timeline(),
260 0 : meta.ancestor_lsn(),
261 0 : *latest_gc_cuttoff,
262 0 : meta.initdb_lsn(),
263 0 : meta.pg_version(),
264 0 : );
265 0 : update_meta = true;
266 0 : }
267 :
268 0 : if update_meta {
269 0 : let metadata_bytes = meta.to_bytes()?;
270 0 : std::fs::write(path, metadata_bytes)?;
271 0 : }
272 :
273 0 : Ok(())
274 0 : }
275 :
276 : /// Ensures that the given S3 prefix is sufficiently constrained.
277 : /// The command is very risky already and we don't want to expose something
278 : /// that allows usually unintentional and quite catastrophic time travel of
279 : /// an entire bucket, which would be a major catastrophy and away
280 : /// by only one character change (similar to "rm -r /home /username/foobar").
281 15 : fn validate_prefix(prefix: &str) -> Option<RemotePath> {
282 15 : if prefix.is_empty() {
283 : // Empty prefix means we want to specify the *whole* bucket
284 1 : return None;
285 14 : }
286 14 : let components = prefix.split('/').collect::<Vec<_>>();
287 14 : let (last, components) = {
288 14 : let last = components.last()?;
289 14 : if last.is_empty() {
290 : (
291 7 : components.iter().nth_back(1)?,
292 7 : &components[..(components.len() - 1)],
293 : )
294 : } else {
295 7 : (last, &components[..])
296 : }
297 : };
298 : 'valid: {
299 14 : if let Ok(_timeline_id) = TimelineId::from_str(last) {
300 : // Ends in either a tenant or timeline ID
301 5 : break 'valid;
302 9 : }
303 9 : if *last == "timelines" {
304 3 : if let Some(before_last) = components.iter().nth_back(1) {
305 3 : if let Ok(_tenant_id) = TenantShardId::from_str(before_last) {
306 : // Has a valid tenant id
307 3 : break 'valid;
308 0 : }
309 0 : }
310 6 : }
311 :
312 6 : return None;
313 : }
314 8 : RemotePath::from_string(prefix).ok()
315 15 : }
316 :
317 0 : fn strip_subsecond(timestamp: SystemTime) -> SystemTime {
318 0 : let ts_str = humantime::format_rfc3339_seconds(timestamp).to_string();
319 0 : humantime::parse_rfc3339(&ts_str).expect("can't parse just created timestamp")
320 0 : }
321 :
322 : #[cfg(test)]
323 : mod tests {
324 : use super::*;
325 :
326 : #[test]
327 1 : fn test_validate_prefix() {
328 1 : assert_eq!(validate_prefix(""), None);
329 1 : assert_eq!(validate_prefix("/"), None);
330 : #[track_caller]
331 7 : fn assert_valid(prefix: &str) {
332 7 : let remote_path = RemotePath::from_string(prefix).unwrap();
333 7 : assert_eq!(validate_prefix(prefix), Some(remote_path));
334 7 : }
335 1 : assert_valid("wal/3aa8fcc61f6d357410b7de754b1d9001/641e5342083b2235ee3deb8066819683/");
336 1 : // Path is not relative but absolute
337 1 : assert_eq!(
338 1 : validate_prefix(
339 1 : "/wal/3aa8fcc61f6d357410b7de754b1d9001/641e5342083b2235ee3deb8066819683/"
340 1 : ),
341 1 : None
342 1 : );
343 1 : assert_valid("wal/3aa8fcc61f6d357410b7de754b1d9001/");
344 1 : // Partial tenant IDs should be invalid, S3 will match all tenants with the specific ID prefix
345 1 : assert_eq!(validate_prefix("wal/3aa8fcc61f6d357410b7d"), None);
346 1 : assert_eq!(validate_prefix("wal"), None);
347 1 : assert_eq!(validate_prefix("/wal/"), None);
348 1 : assert_valid("pageserver/v1/tenants/3aa8fcc61f6d357410b7de754b1d9001");
349 1 : // Partial tenant ID
350 1 : assert_eq!(
351 1 : validate_prefix("pageserver/v1/tenants/3aa8fcc61f6d357410b"),
352 1 : None
353 1 : );
354 1 : assert_valid("pageserver/v1/tenants/3aa8fcc61f6d357410b7de754b1d9001/timelines");
355 1 : assert_valid("pageserver/v1/tenants/3aa8fcc61f6d357410b7de754b1d9001-0004/timelines");
356 1 : assert_valid("pageserver/v1/tenants/3aa8fcc61f6d357410b7de754b1d9001/timelines/");
357 1 : assert_valid("pageserver/v1/tenants/3aa8fcc61f6d357410b7de754b1d9001/timelines/641e5342083b2235ee3deb8066819683");
358 1 : assert_eq!(validate_prefix("pageserver/v1/tenants/"), None);
359 1 : }
360 : }
|