Line data Source code
1 : use anyhow::{anyhow, bail, Context};
2 : use camino::Utf8PathBuf;
3 : use pageserver_api::controller_api::{MetadataHealthUpdateRequest, MetadataHealthUpdateResponse};
4 : use pageserver_api::shard::TenantShardId;
5 : use reqwest::{Method, Url};
6 : use storage_controller_client::control_api;
7 : use storage_scrubber::garbage::{find_garbage, purge_garbage, PurgeMode};
8 : use storage_scrubber::pageserver_physical_gc::GcMode;
9 : use storage_scrubber::scan_pageserver_metadata::scan_pageserver_metadata;
10 : use storage_scrubber::scan_safekeeper_metadata::DatabaseOrList;
11 : use storage_scrubber::tenant_snapshot::SnapshotDownloader;
12 : use storage_scrubber::{find_large_objects, ControllerClientConfig};
13 : use storage_scrubber::{
14 : init_logging, pageserver_physical_gc::pageserver_physical_gc,
15 : scan_safekeeper_metadata::scan_safekeeper_metadata, BucketConfig, ConsoleConfig, NodeKind,
16 : TraversingDepth,
17 : };
18 :
19 : use clap::{Parser, Subcommand};
20 : use utils::id::TenantId;
21 :
22 : use utils::{project_build_tag, project_git_version};
23 :
24 : project_git_version!(GIT_VERSION);
25 : project_build_tag!(BUILD_TAG);
26 :
27 0 : #[derive(Parser)]
28 : #[command(author, version, about, long_about = None)]
29 : #[command(arg_required_else_help(true))]
30 : struct Cli {
31 : #[command(subcommand)]
32 : command: Command,
33 :
34 0 : #[arg(short, long, default_value_t = false)]
35 0 : delete: bool,
36 :
37 : #[arg(long)]
38 : /// URL to storage controller. e.g. http://127.0.0.1:1234 when using `neon_local`
39 : controller_api: Option<Url>,
40 :
41 : #[arg(long)]
42 : /// JWT token for authenticating with storage controller. Requires scope 'scrubber' or 'admin'.
43 : controller_jwt: Option<String>,
44 :
45 : /// If set to true, the scrubber will exit with error code on fatal error.
46 0 : #[arg(long, default_value_t = false)]
47 0 : exit_code: bool,
48 : }
49 :
50 0 : #[derive(Subcommand, Debug)]
51 : enum Command {
52 : FindGarbage {
53 : #[arg(short, long)]
54 0 : node_kind: NodeKind,
55 0 : #[arg(short, long, default_value_t=TraversingDepth::Tenant)]
56 0 : depth: TraversingDepth,
57 0 : #[arg(short, long, default_value_t = String::from("garbage.json"))]
58 0 : output_path: String,
59 : },
60 : PurgeGarbage {
61 : #[arg(short, long)]
62 0 : input_path: String,
63 0 : #[arg(short, long, default_value_t = PurgeMode::DeletedOnly)]
64 0 : mode: PurgeMode,
65 : #[arg(long = "min-age")]
66 0 : min_age: humantime::Duration,
67 : },
68 : #[command(verbatim_doc_comment)]
69 : ScanMetadata {
70 : #[arg(short, long)]
71 0 : node_kind: NodeKind,
72 0 : #[arg(short, long, default_value_t = false)]
73 0 : json: bool,
74 : #[arg(long = "tenant-id", num_args = 0..)]
75 0 : tenant_ids: Vec<TenantShardId>,
76 0 : #[arg(long = "post", default_value_t = false)]
77 0 : post_to_storcon: bool,
78 : #[arg(long, default_value = None)]
79 : /// For safekeeper node_kind only, points to db with debug dump
80 : dump_db_connstr: Option<String>,
81 : /// For safekeeper node_kind only, table in the db with debug dump
82 : #[arg(long, default_value = None)]
83 : dump_db_table: Option<String>,
84 : /// For safekeeper node_kind only, json list of timelines and their lsn info
85 : #[arg(long, default_value = None)]
86 : timeline_lsns: Option<String>,
87 : },
88 : TenantSnapshot {
89 : #[arg(long = "tenant-id")]
90 0 : tenant_id: TenantId,
91 0 : #[arg(long = "concurrency", short = 'j', default_value_t = 8)]
92 0 : concurrency: usize,
93 : #[arg(short, long)]
94 0 : output_path: Utf8PathBuf,
95 : },
96 : PageserverPhysicalGc {
97 : #[arg(long = "tenant-id", num_args = 0..)]
98 0 : tenant_ids: Vec<TenantShardId>,
99 : #[arg(long = "min-age")]
100 0 : min_age: humantime::Duration,
101 0 : #[arg(short, long, default_value_t = GcMode::IndicesOnly)]
102 0 : mode: GcMode,
103 : },
104 : FindLargeObjects {
105 : #[arg(long = "min-size")]
106 0 : min_size: u64,
107 0 : #[arg(short, long, default_value_t = false)]
108 0 : ignore_deltas: bool,
109 0 : #[arg(long = "concurrency", short = 'j', default_value_t = 64)]
110 0 : concurrency: usize,
111 : },
112 : CronJob {
113 : // PageserverPhysicalGc
114 : #[arg(long = "min-age")]
115 0 : gc_min_age: humantime::Duration,
116 0 : #[arg(short, long, default_value_t = GcMode::IndicesOnly)]
117 0 : gc_mode: GcMode,
118 : // ScanMetadata
119 0 : #[arg(long = "post", default_value_t = false)]
120 0 : post_to_storcon: bool,
121 : },
122 : }
123 :
124 : #[tokio::main]
125 0 : async fn main() -> anyhow::Result<()> {
126 0 : let cli = Cli::parse();
127 0 :
128 0 : let bucket_config = BucketConfig::from_env()?;
129 0 :
130 0 : let command_log_name = match &cli.command {
131 0 : Command::ScanMetadata { .. } => "scan",
132 0 : Command::FindGarbage { .. } => "find-garbage",
133 0 : Command::PurgeGarbage { .. } => "purge-garbage",
134 0 : Command::TenantSnapshot { .. } => "tenant-snapshot",
135 0 : Command::PageserverPhysicalGc { .. } => "pageserver-physical-gc",
136 0 : Command::FindLargeObjects { .. } => "find-large-objects",
137 0 : Command::CronJob { .. } => "cron-job",
138 0 : };
139 0 : let _guard = init_logging(&format!(
140 0 : "{}_{}_{}_{}.log",
141 0 : std::env::args().next().unwrap(),
142 0 : command_log_name,
143 0 : bucket_config.bucket_name().unwrap_or("nobucket"),
144 0 : chrono::Utc::now().format("%Y_%m_%d__%H_%M_%S")
145 0 : ));
146 0 :
147 0 : tracing::info!("version: {}, build_tag {}", GIT_VERSION, BUILD_TAG);
148 0 :
149 0 : let controller_client = cli.controller_api.map(|controller_api| {
150 0 : ControllerClientConfig {
151 0 : controller_api,
152 0 : // Default to no key: this is a convenience when working in a development environment
153 0 : controller_jwt: cli.controller_jwt.unwrap_or("".to_owned()),
154 0 : }
155 0 : .build_client()
156 0 : });
157 0 :
158 0 : match cli.command {
159 0 : Command::ScanMetadata {
160 0 : json,
161 0 : tenant_ids,
162 0 : node_kind,
163 0 : post_to_storcon,
164 0 : dump_db_connstr,
165 0 : dump_db_table,
166 0 : timeline_lsns,
167 0 : } => {
168 0 : if let NodeKind::Safekeeper = node_kind {
169 0 : let db_or_list = match (timeline_lsns, dump_db_connstr) {
170 0 : (Some(timeline_lsns), _) => {
171 0 : let timeline_lsns = serde_json::from_str(&timeline_lsns).context("parsing timeline_lsns")?;
172 0 : DatabaseOrList::List(timeline_lsns)
173 0 : }
174 0 : (None, Some(dump_db_connstr)) => {
175 0 : let dump_db_table = dump_db_table.ok_or_else(|| anyhow::anyhow!("dump_db_table not specified"))?;
176 0 : let tenant_ids = tenant_ids.iter().map(|tshid| tshid.tenant_id).collect();
177 0 : DatabaseOrList::Database { tenant_ids, connstr: dump_db_connstr, table: dump_db_table }
178 0 : }
179 0 : (None, None) => anyhow::bail!("neither `timeline_lsns` specified, nor `dump_db_connstr` and `dump_db_table`"),
180 0 : };
181 0 : let summary = scan_safekeeper_metadata(bucket_config.clone(), db_or_list).await?;
182 0 : if json {
183 0 : println!("{}", serde_json::to_string(&summary).unwrap())
184 0 : } else {
185 0 : println!("{}", summary.summary_string());
186 0 : }
187 0 : if summary.is_fatal() {
188 0 : bail!("Fatal scrub errors detected");
189 0 : }
190 0 : if summary.is_empty() {
191 0 : // Strictly speaking an empty bucket is a valid bucket, but if someone ran the
192 0 : // scrubber they were likely expecting to scan something, and if we see no timelines
193 0 : // at all then it's likely due to some configuration issues like a bad prefix
194 0 : bail!("No timelines found in {}", bucket_config.desc_str());
195 0 : }
196 0 : Ok(())
197 0 : } else {
198 0 : scan_pageserver_metadata_cmd(
199 0 : bucket_config,
200 0 : controller_client.as_ref(),
201 0 : tenant_ids,
202 0 : json,
203 0 : post_to_storcon,
204 0 : cli.exit_code,
205 0 : )
206 0 : .await
207 0 : }
208 0 : }
209 0 : Command::FindGarbage {
210 0 : node_kind,
211 0 : depth,
212 0 : output_path,
213 0 : } => {
214 0 : let console_config = ConsoleConfig::from_env()?;
215 0 : find_garbage(bucket_config, console_config, depth, node_kind, output_path).await
216 0 : }
217 0 : Command::PurgeGarbage {
218 0 : input_path,
219 0 : mode,
220 0 : min_age,
221 0 : } => purge_garbage(input_path, mode, min_age.into(), !cli.delete).await,
222 0 : Command::TenantSnapshot {
223 0 : tenant_id,
224 0 : output_path,
225 0 : concurrency,
226 0 : } => {
227 0 : let downloader =
228 0 : SnapshotDownloader::new(bucket_config, tenant_id, output_path, concurrency).await?;
229 0 : downloader.download().await
230 0 : }
231 0 : Command::PageserverPhysicalGc {
232 0 : tenant_ids,
233 0 : min_age,
234 0 : mode,
235 0 : } => {
236 0 : pageserver_physical_gc_cmd(
237 0 : &bucket_config,
238 0 : controller_client.as_ref(),
239 0 : tenant_ids,
240 0 : min_age,
241 0 : mode,
242 0 : )
243 0 : .await
244 0 : }
245 0 : Command::FindLargeObjects {
246 0 : min_size,
247 0 : ignore_deltas,
248 0 : concurrency,
249 0 : } => {
250 0 : let summary = find_large_objects::find_large_objects(
251 0 : bucket_config,
252 0 : min_size,
253 0 : ignore_deltas,
254 0 : concurrency,
255 0 : )
256 0 : .await?;
257 0 : println!("{}", serde_json::to_string(&summary).unwrap());
258 0 : Ok(())
259 0 : }
260 0 : Command::CronJob {
261 0 : gc_min_age,
262 0 : gc_mode,
263 0 : post_to_storcon,
264 0 : } => {
265 0 : run_cron_job(
266 0 : bucket_config,
267 0 : controller_client.as_ref(),
268 0 : gc_min_age,
269 0 : gc_mode,
270 0 : post_to_storcon,
271 0 : cli.exit_code,
272 0 : )
273 0 : .await
274 0 : }
275 0 : }
276 0 : }
277 :
278 : /// Runs the scrubber cron job.
279 : /// 1. Do pageserver physical gc
280 : /// 2. Scan pageserver metadata
281 0 : pub async fn run_cron_job(
282 0 : bucket_config: BucketConfig,
283 0 : controller_client: Option<&control_api::Client>,
284 0 : gc_min_age: humantime::Duration,
285 0 : gc_mode: GcMode,
286 0 : post_to_storcon: bool,
287 0 : exit_code: bool,
288 0 : ) -> anyhow::Result<()> {
289 0 : tracing::info!(%gc_min_age, %gc_mode, "Running pageserver-physical-gc");
290 0 : pageserver_physical_gc_cmd(
291 0 : &bucket_config,
292 0 : controller_client,
293 0 : Vec::new(),
294 0 : gc_min_age,
295 0 : gc_mode,
296 0 : )
297 0 : .await?;
298 0 : tracing::info!(%post_to_storcon, node_kind = %NodeKind::Pageserver, "Running scan-metadata");
299 0 : scan_pageserver_metadata_cmd(
300 0 : bucket_config,
301 0 : controller_client,
302 0 : Vec::new(),
303 0 : true,
304 0 : post_to_storcon,
305 0 : exit_code,
306 0 : )
307 0 : .await?;
308 :
309 0 : Ok(())
310 0 : }
311 :
312 0 : pub async fn pageserver_physical_gc_cmd(
313 0 : bucket_config: &BucketConfig,
314 0 : controller_client: Option<&control_api::Client>,
315 0 : tenant_shard_ids: Vec<TenantShardId>,
316 0 : min_age: humantime::Duration,
317 0 : mode: GcMode,
318 0 : ) -> anyhow::Result<()> {
319 0 : match (controller_client, mode) {
320 0 : (Some(_), _) => {
321 0 : // Any mode may run when controller API is set
322 0 : }
323 : (None, GcMode::Full) => {
324 : // The part of physical GC where we erase ancestor layers cannot be done safely without
325 : // confirming the most recent complete shard split with the controller. Refuse to run, rather
326 : // than doing it unsafely.
327 0 : return Err(anyhow!(
328 0 : "Full physical GC requires `--controller-api` and `--controller-jwt` to run"
329 0 : ));
330 : }
331 0 : (None, GcMode::DryRun | GcMode::IndicesOnly) => {
332 0 : // These GcModes do not require the controller to run.
333 0 : }
334 : }
335 :
336 0 : let summary = pageserver_physical_gc(
337 0 : bucket_config,
338 0 : controller_client,
339 0 : tenant_shard_ids,
340 0 : min_age.into(),
341 0 : mode,
342 0 : )
343 0 : .await?;
344 0 : println!("{}", serde_json::to_string(&summary).unwrap());
345 0 : Ok(())
346 0 : }
347 :
348 0 : pub async fn scan_pageserver_metadata_cmd(
349 0 : bucket_config: BucketConfig,
350 0 : controller_client: Option<&control_api::Client>,
351 0 : tenant_shard_ids: Vec<TenantShardId>,
352 0 : json: bool,
353 0 : post_to_storcon: bool,
354 0 : exit_code: bool,
355 0 : ) -> anyhow::Result<()> {
356 0 : if controller_client.is_none() && post_to_storcon {
357 0 : return Err(anyhow!("Posting pageserver scan health status to storage controller requires `--controller-api` and `--controller-jwt` to run"));
358 0 : }
359 0 : match scan_pageserver_metadata(bucket_config.clone(), tenant_shard_ids).await {
360 0 : Err(e) => {
361 0 : tracing::error!("Failed: {e}");
362 0 : Err(e)
363 : }
364 0 : Ok(summary) => {
365 0 : if json {
366 0 : println!("{}", serde_json::to_string(&summary).unwrap())
367 0 : } else {
368 0 : println!("{}", summary.summary_string());
369 0 : }
370 :
371 0 : if post_to_storcon {
372 0 : if let Some(client) = controller_client {
373 0 : let body = summary.build_health_update_request();
374 0 : client
375 0 : .dispatch::<MetadataHealthUpdateRequest, MetadataHealthUpdateResponse>(
376 0 : Method::POST,
377 0 : "control/v1/metadata_health/update".to_string(),
378 0 : Some(body),
379 0 : )
380 0 : .await?;
381 0 : }
382 0 : }
383 :
384 0 : if summary.is_fatal() {
385 0 : tracing::error!("Fatal scrub errors detected");
386 0 : if exit_code {
387 0 : std::process::exit(1);
388 0 : }
389 0 : } else if summary.is_empty() {
390 : // Strictly speaking an empty bucket is a valid bucket, but if someone ran the
391 : // scrubber they were likely expecting to scan something, and if we see no timelines
392 : // at all then it's likely due to some configuration issues like a bad prefix
393 0 : tracing::error!("No timelines found in {}", bucket_config.desc_str());
394 0 : if exit_code {
395 0 : std::process::exit(1);
396 0 : }
397 0 : }
398 :
399 0 : Ok(())
400 : }
401 : }
402 0 : }
|