Line data Source code
1 : use futures::StreamExt;
2 : use std::{
3 : collections::{HashMap, HashSet},
4 : str::FromStr,
5 : time::Duration,
6 : };
7 :
8 : use clap::{Parser, Subcommand};
9 : use pageserver_api::{
10 : controller_api::{
11 : AvailabilityZone, NodeAvailabilityWrapper, NodeDescribeResponse, NodeShardResponse,
12 : SafekeeperDescribeResponse, SafekeeperSchedulingPolicyRequest, ShardSchedulingPolicy,
13 : ShardsPreferredAzsRequest, ShardsPreferredAzsResponse, SkSchedulingPolicy,
14 : TenantCreateRequest, TenantDescribeResponse, TenantPolicyRequest,
15 : },
16 : models::{
17 : EvictionPolicy, EvictionPolicyLayerAccessThreshold, LocationConfigSecondary,
18 : ShardParameters, TenantConfig, TenantConfigPatchRequest, TenantConfigRequest,
19 : TenantShardSplitRequest, TenantShardSplitResponse,
20 : },
21 : shard::{ShardStripeSize, TenantShardId},
22 : };
23 : use pageserver_client::mgmt_api::{self};
24 : use reqwest::{Method, StatusCode, Url};
25 : use utils::id::{NodeId, TenantId, TimelineId};
26 :
27 : use pageserver_api::controller_api::{
28 : NodeConfigureRequest, NodeRegisterRequest, NodeSchedulingPolicy, PlacementPolicy,
29 : TenantShardMigrateRequest, TenantShardMigrateResponse,
30 : };
31 : use storage_controller_client::control_api::Client;
32 :
33 : #[derive(Subcommand, Debug)]
34 : enum Command {
35 : /// Register a pageserver with the storage controller. This shouldn't usually be necessary,
36 : /// since pageservers auto-register when they start up
37 : NodeRegister {
38 : #[arg(long)]
39 0 : node_id: NodeId,
40 :
41 : #[arg(long)]
42 0 : listen_pg_addr: String,
43 : #[arg(long)]
44 0 : listen_pg_port: u16,
45 :
46 : #[arg(long)]
47 0 : listen_http_addr: String,
48 : #[arg(long)]
49 0 : listen_http_port: u16,
50 : #[arg(long)]
51 0 : availability_zone_id: String,
52 : },
53 :
54 : /// Modify a node's configuration in the storage controller
55 : NodeConfigure {
56 : #[arg(long)]
57 0 : node_id: NodeId,
58 :
59 : /// Availability is usually auto-detected based on heartbeats. Set 'offline' here to
60 : /// manually mark a node offline
61 : #[arg(long)]
62 : availability: Option<NodeAvailabilityArg>,
63 : /// Scheduling policy controls whether tenant shards may be scheduled onto this node.
64 : #[arg(long)]
65 : scheduling: Option<NodeSchedulingPolicy>,
66 : },
67 : NodeDelete {
68 : #[arg(long)]
69 0 : node_id: NodeId,
70 : },
71 : /// Modify a tenant's policies in the storage controller
72 : TenantPolicy {
73 : #[arg(long)]
74 0 : tenant_id: TenantId,
75 : /// Placement policy controls whether a tenant is `detached`, has only a secondary location (`secondary`),
76 : /// or is in the normal attached state with N secondary locations (`attached:N`)
77 : #[arg(long)]
78 : placement: Option<PlacementPolicyArg>,
79 : /// Scheduling policy enables pausing the controller's scheduling activity involving this tenant. `active` is normal,
80 : /// `essential` disables optimization scheduling changes, `pause` disables all scheduling changes, and `stop` prevents
81 : /// all reconciliation activity including for scheduling changes already made. `pause` and `stop` can make a tenant
82 : /// unavailable, and are only for use in emergencies.
83 : #[arg(long)]
84 : scheduling: Option<ShardSchedulingPolicyArg>,
85 : },
86 : /// List nodes known to the storage controller
87 : Nodes {},
88 : /// List tenants known to the storage controller
89 : Tenants {
90 : /// If this field is set, it will list the tenants on a specific node
91 : node_id: Option<NodeId>,
92 : },
93 : /// Create a new tenant in the storage controller, and by extension on pageservers.
94 : TenantCreate {
95 : #[arg(long)]
96 0 : tenant_id: TenantId,
97 : },
98 : /// Delete a tenant in the storage controller, and by extension on pageservers.
99 : TenantDelete {
100 : #[arg(long)]
101 0 : tenant_id: TenantId,
102 : },
103 : /// Split an existing tenant into a higher number of shards than its current shard count.
104 : TenantShardSplit {
105 : #[arg(long)]
106 0 : tenant_id: TenantId,
107 : #[arg(long)]
108 0 : shard_count: u8,
109 : /// Optional, in 8kiB pages. e.g. set 2048 for 16MB stripes.
110 : #[arg(long)]
111 : stripe_size: Option<u32>,
112 : },
113 : /// Migrate the attached location for a tenant shard to a specific pageserver.
114 : TenantShardMigrate {
115 : #[arg(long)]
116 0 : tenant_shard_id: TenantShardId,
117 : #[arg(long)]
118 0 : node: NodeId,
119 : },
120 : /// Migrate the secondary location for a tenant shard to a specific pageserver.
121 : TenantShardMigrateSecondary {
122 : #[arg(long)]
123 0 : tenant_shard_id: TenantShardId,
124 : #[arg(long)]
125 0 : node: NodeId,
126 : },
127 : /// Cancel any ongoing reconciliation for this shard
128 : TenantShardCancelReconcile {
129 : #[arg(long)]
130 0 : tenant_shard_id: TenantShardId,
131 : },
132 : /// Set the pageserver tenant configuration of a tenant: this is the configuration structure
133 : /// that is passed through to pageservers, and does not affect storage controller behavior.
134 : /// Any previous tenant configs are overwritten.
135 : SetTenantConfig {
136 : #[arg(long)]
137 0 : tenant_id: TenantId,
138 : #[arg(long)]
139 0 : config: String,
140 : },
141 : /// Patch the pageserver tenant configuration of a tenant. Any fields with null values in the
142 : /// provided JSON are unset from the tenant config and all fields with non-null values are set.
143 : /// Unspecified fields are not changed.
144 : PatchTenantConfig {
145 : #[arg(long)]
146 0 : tenant_id: TenantId,
147 : #[arg(long)]
148 0 : config: String,
149 : },
150 : /// Print details about a particular tenant, including all its shards' states.
151 : TenantDescribe {
152 : #[arg(long)]
153 0 : tenant_id: TenantId,
154 : },
155 : /// For a tenant which hasn't been onboarded to the storage controller yet, add it in secondary
156 : /// mode so that it can warm up content on a pageserver.
157 : TenantWarmup {
158 : #[arg(long)]
159 0 : tenant_id: TenantId,
160 : },
161 : TenantSetPreferredAz {
162 : #[arg(long)]
163 0 : tenant_id: TenantId,
164 : #[arg(long)]
165 : preferred_az: Option<String>,
166 : },
167 : /// Uncleanly drop a tenant from the storage controller: this doesn't delete anything from pageservers. Appropriate
168 : /// if you e.g. used `tenant-warmup` by mistake on a tenant ID that doesn't really exist, or is in some other region.
169 : TenantDrop {
170 : #[arg(long)]
171 0 : tenant_id: TenantId,
172 : #[arg(long)]
173 0 : unclean: bool,
174 : },
175 : NodeDrop {
176 : #[arg(long)]
177 0 : node_id: NodeId,
178 : #[arg(long)]
179 0 : unclean: bool,
180 : },
181 : TenantSetTimeBasedEviction {
182 : #[arg(long)]
183 0 : tenant_id: TenantId,
184 : #[arg(long)]
185 0 : period: humantime::Duration,
186 : #[arg(long)]
187 0 : threshold: humantime::Duration,
188 : },
189 : // Migrate away from a set of specified pageservers by moving the primary attachments to pageservers
190 : // outside of the specified set.
191 : BulkMigrate {
192 : // Set of pageserver node ids to drain.
193 : #[arg(long)]
194 0 : nodes: Vec<NodeId>,
195 : // Optional: migration concurrency (default is 8)
196 : #[arg(long)]
197 : concurrency: Option<usize>,
198 : // Optional: maximum number of shards to migrate
199 : #[arg(long)]
200 : max_shards: Option<usize>,
201 : // Optional: when set to true, nothing is migrated, but the plan is printed to stdout
202 : #[arg(long)]
203 : dry_run: Option<bool>,
204 : },
205 : /// Start draining the specified pageserver.
206 : /// The drain is complete when the schedulling policy returns to active.
207 : StartDrain {
208 : #[arg(long)]
209 0 : node_id: NodeId,
210 : },
211 : /// Cancel draining the specified pageserver and wait for `timeout`
212 : /// for the operation to be canceled. May be retried.
213 : CancelDrain {
214 : #[arg(long)]
215 0 : node_id: NodeId,
216 : #[arg(long)]
217 0 : timeout: humantime::Duration,
218 : },
219 : /// Start filling the specified pageserver.
220 : /// The drain is complete when the schedulling policy returns to active.
221 : StartFill {
222 : #[arg(long)]
223 0 : node_id: NodeId,
224 : },
225 : /// Cancel filling the specified pageserver and wait for `timeout`
226 : /// for the operation to be canceled. May be retried.
227 : CancelFill {
228 : #[arg(long)]
229 0 : node_id: NodeId,
230 : #[arg(long)]
231 0 : timeout: humantime::Duration,
232 : },
233 : /// List safekeepers known to the storage controller
234 : Safekeepers {},
235 : /// Set the scheduling policy of the specified safekeeper
236 : SafekeeperScheduling {
237 : #[arg(long)]
238 0 : node_id: NodeId,
239 : #[arg(long)]
240 0 : scheduling_policy: SkSchedulingPolicyArg,
241 : },
242 : /// Downloads any missing heatmap layers for all shard for a given timeline
243 : DownloadHeatmapLayers {
244 : /// Tenant ID or tenant shard ID. When an unsharded tenant ID is specified,
245 : /// the operation is performed on all shards. When a sharded tenant ID is
246 : /// specified, the operation is only performed on the specified shard.
247 : #[arg(long)]
248 0 : tenant_shard_id: TenantShardId,
249 : #[arg(long)]
250 0 : timeline_id: TimelineId,
251 : /// Optional: Maximum download concurrency (default is 16)
252 : #[arg(long)]
253 : concurrency: Option<usize>,
254 : },
255 : }
256 :
257 : #[derive(Parser)]
258 : #[command(
259 : author,
260 : version,
261 : about,
262 : long_about = "CLI for Storage Controller Support/Debug"
263 : )]
264 : #[command(arg_required_else_help(true))]
265 : struct Cli {
266 : #[arg(long)]
267 : /// URL to storage controller. e.g. http://127.0.0.1:1234 when using `neon_local`
268 0 : api: Url,
269 :
270 : #[arg(long)]
271 : /// JWT token for authenticating with storage controller. Depending on the API used, this
272 : /// should have either `pageserverapi` or `admin` scopes: for convenience, you should mint
273 : /// a token with both scopes to use with this tool.
274 : jwt: Option<String>,
275 :
276 : #[command(subcommand)]
277 : command: Command,
278 : }
279 :
280 : #[derive(Debug, Clone)]
281 : struct PlacementPolicyArg(PlacementPolicy);
282 :
283 : impl FromStr for PlacementPolicyArg {
284 : type Err = anyhow::Error;
285 :
286 0 : fn from_str(s: &str) -> Result<Self, Self::Err> {
287 0 : match s {
288 0 : "detached" => Ok(Self(PlacementPolicy::Detached)),
289 0 : "secondary" => Ok(Self(PlacementPolicy::Secondary)),
290 0 : _ if s.starts_with("attached:") => {
291 0 : let mut splitter = s.split(':');
292 0 : let _prefix = splitter.next().unwrap();
293 0 : match splitter.next().and_then(|s| s.parse::<usize>().ok()) {
294 0 : Some(n) => Ok(Self(PlacementPolicy::Attached(n))),
295 0 : None => Err(anyhow::anyhow!(
296 0 : "Invalid format '{s}', a valid example is 'attached:1'"
297 0 : )),
298 : }
299 : }
300 0 : _ => Err(anyhow::anyhow!(
301 0 : "Unknown placement policy '{s}', try detached,secondary,attached:<n>"
302 0 : )),
303 : }
304 0 : }
305 : }
306 :
307 : #[derive(Debug, Clone)]
308 : struct SkSchedulingPolicyArg(SkSchedulingPolicy);
309 :
310 : impl FromStr for SkSchedulingPolicyArg {
311 : type Err = anyhow::Error;
312 :
313 0 : fn from_str(s: &str) -> Result<Self, Self::Err> {
314 0 : SkSchedulingPolicy::from_str(s).map(Self)
315 0 : }
316 : }
317 :
318 : #[derive(Debug, Clone)]
319 : struct ShardSchedulingPolicyArg(ShardSchedulingPolicy);
320 :
321 : impl FromStr for ShardSchedulingPolicyArg {
322 : type Err = anyhow::Error;
323 :
324 0 : fn from_str(s: &str) -> Result<Self, Self::Err> {
325 0 : match s {
326 0 : "active" => Ok(Self(ShardSchedulingPolicy::Active)),
327 0 : "essential" => Ok(Self(ShardSchedulingPolicy::Essential)),
328 0 : "pause" => Ok(Self(ShardSchedulingPolicy::Pause)),
329 0 : "stop" => Ok(Self(ShardSchedulingPolicy::Stop)),
330 0 : _ => Err(anyhow::anyhow!(
331 0 : "Unknown scheduling policy '{s}', try active,essential,pause,stop"
332 0 : )),
333 : }
334 0 : }
335 : }
336 :
337 : #[derive(Debug, Clone)]
338 : struct NodeAvailabilityArg(NodeAvailabilityWrapper);
339 :
340 : impl FromStr for NodeAvailabilityArg {
341 : type Err = anyhow::Error;
342 :
343 0 : fn from_str(s: &str) -> Result<Self, Self::Err> {
344 0 : match s {
345 0 : "active" => Ok(Self(NodeAvailabilityWrapper::Active)),
346 0 : "offline" => Ok(Self(NodeAvailabilityWrapper::Offline)),
347 0 : _ => Err(anyhow::anyhow!("Unknown availability state '{s}'")),
348 : }
349 0 : }
350 : }
351 :
352 0 : async fn wait_for_scheduling_policy<F>(
353 0 : client: Client,
354 0 : node_id: NodeId,
355 0 : timeout: Duration,
356 0 : f: F,
357 0 : ) -> anyhow::Result<NodeSchedulingPolicy>
358 0 : where
359 0 : F: Fn(NodeSchedulingPolicy) -> bool,
360 0 : {
361 0 : let waiter = tokio::time::timeout(timeout, async move {
362 : loop {
363 0 : let node = client
364 0 : .dispatch::<(), NodeDescribeResponse>(
365 0 : Method::GET,
366 0 : format!("control/v1/node/{node_id}"),
367 0 : None,
368 0 : )
369 0 : .await?;
370 :
371 0 : if f(node.scheduling) {
372 0 : return Ok::<NodeSchedulingPolicy, mgmt_api::Error>(node.scheduling);
373 0 : }
374 : }
375 0 : });
376 0 :
377 0 : Ok(waiter.await??)
378 0 : }
379 :
380 : #[tokio::main]
381 0 : async fn main() -> anyhow::Result<()> {
382 0 : let cli = Cli::parse();
383 0 :
384 0 : let storcon_client = Client::new(cli.api.clone(), cli.jwt.clone());
385 0 :
386 0 : let mut trimmed = cli.api.to_string();
387 0 : trimmed.pop();
388 0 : let vps_client = mgmt_api::Client::new(trimmed, cli.jwt.as_deref());
389 0 :
390 0 : match cli.command {
391 0 : Command::NodeRegister {
392 0 : node_id,
393 0 : listen_pg_addr,
394 0 : listen_pg_port,
395 0 : listen_http_addr,
396 0 : listen_http_port,
397 0 : availability_zone_id,
398 0 : } => {
399 0 : storcon_client
400 0 : .dispatch::<_, ()>(
401 0 : Method::POST,
402 0 : "control/v1/node".to_string(),
403 0 : Some(NodeRegisterRequest {
404 0 : node_id,
405 0 : listen_pg_addr,
406 0 : listen_pg_port,
407 0 : listen_http_addr,
408 0 : listen_http_port,
409 0 : availability_zone_id: AvailabilityZone(availability_zone_id),
410 0 : }),
411 0 : )
412 0 : .await?;
413 0 : }
414 0 : Command::TenantCreate { tenant_id } => {
415 0 : storcon_client
416 0 : .dispatch::<_, ()>(
417 0 : Method::POST,
418 0 : "v1/tenant".to_string(),
419 0 : Some(TenantCreateRequest {
420 0 : new_tenant_id: TenantShardId::unsharded(tenant_id),
421 0 : generation: None,
422 0 : shard_parameters: ShardParameters::default(),
423 0 : placement_policy: Some(PlacementPolicy::Attached(1)),
424 0 : config: TenantConfig::default(),
425 0 : }),
426 0 : )
427 0 : .await?;
428 0 : }
429 0 : Command::TenantDelete { tenant_id } => {
430 0 : let status = vps_client
431 0 : .tenant_delete(TenantShardId::unsharded(tenant_id))
432 0 : .await?;
433 0 : tracing::info!("Delete status: {}", status);
434 0 : }
435 0 : Command::Nodes {} => {
436 0 : let mut resp = storcon_client
437 0 : .dispatch::<(), Vec<NodeDescribeResponse>>(
438 0 : Method::GET,
439 0 : "control/v1/node".to_string(),
440 0 : None,
441 0 : )
442 0 : .await?;
443 0 :
444 0 : resp.sort_by(|a, b| a.listen_http_addr.cmp(&b.listen_http_addr));
445 0 :
446 0 : let mut table = comfy_table::Table::new();
447 0 : table.set_header(["Id", "Hostname", "AZ", "Scheduling", "Availability"]);
448 0 : for node in resp {
449 0 : table.add_row([
450 0 : format!("{}", node.id),
451 0 : node.listen_http_addr,
452 0 : node.availability_zone_id,
453 0 : format!("{:?}", node.scheduling),
454 0 : format!("{:?}", node.availability),
455 0 : ]);
456 0 : }
457 0 : println!("{table}");
458 0 : }
459 0 : Command::NodeConfigure {
460 0 : node_id,
461 0 : availability,
462 0 : scheduling,
463 0 : } => {
464 0 : let req = NodeConfigureRequest {
465 0 : node_id,
466 0 : availability: availability.map(|a| a.0),
467 0 : scheduling,
468 0 : };
469 0 : storcon_client
470 0 : .dispatch::<_, ()>(
471 0 : Method::PUT,
472 0 : format!("control/v1/node/{node_id}/config"),
473 0 : Some(req),
474 0 : )
475 0 : .await?;
476 0 : }
477 0 : Command::Tenants {
478 0 : node_id: Some(node_id),
479 0 : } => {
480 0 : let describe_response = storcon_client
481 0 : .dispatch::<(), NodeShardResponse>(
482 0 : Method::GET,
483 0 : format!("control/v1/node/{node_id}/shards"),
484 0 : None,
485 0 : )
486 0 : .await?;
487 0 : let shards = describe_response.shards;
488 0 : let mut table = comfy_table::Table::new();
489 0 : table.set_header([
490 0 : "Shard",
491 0 : "Intended Primary/Secondary",
492 0 : "Observed Primary/Secondary",
493 0 : ]);
494 0 : for shard in shards {
495 0 : table.add_row([
496 0 : format!("{}", shard.tenant_shard_id),
497 0 : match shard.is_intended_secondary {
498 0 : None => "".to_string(),
499 0 : Some(true) => "Secondary".to_string(),
500 0 : Some(false) => "Primary".to_string(),
501 0 : },
502 0 : match shard.is_observed_secondary {
503 0 : None => "".to_string(),
504 0 : Some(true) => "Secondary".to_string(),
505 0 : Some(false) => "Primary".to_string(),
506 0 : },
507 0 : ]);
508 0 : }
509 0 : println!("{table}");
510 0 : }
511 0 : Command::Tenants { node_id: None } => {
512 0 : // Set up output formatting
513 0 : let mut table = comfy_table::Table::new();
514 0 : table.set_header([
515 0 : "TenantId",
516 0 : "Preferred AZ",
517 0 : "ShardCount",
518 0 : "StripeSize",
519 0 : "Placement",
520 0 : "Scheduling",
521 0 : ]);
522 0 :
523 0 : // Pagination loop over listing API
524 0 : let mut start_after = None;
525 0 : const LIMIT: usize = 1000;
526 0 : loop {
527 0 : let path = match start_after {
528 0 : None => format!("control/v1/tenant?limit={LIMIT}"),
529 0 : Some(start_after) => {
530 0 : format!("control/v1/tenant?limit={LIMIT}&start_after={start_after}")
531 0 : }
532 0 : };
533 0 :
534 0 : let resp = storcon_client
535 0 : .dispatch::<(), Vec<TenantDescribeResponse>>(Method::GET, path, None)
536 0 : .await?;
537 0 :
538 0 : if resp.is_empty() {
539 0 : // End of data reached
540 0 : break;
541 0 : }
542 0 :
543 0 : // Give some visual feedback while we're building up the table (comfy_table doesn't have
544 0 : // streaming output)
545 0 : if resp.len() >= LIMIT {
546 0 : eprint!(".");
547 0 : }
548 0 :
549 0 : start_after = Some(resp.last().unwrap().tenant_id);
550 0 :
551 0 : for tenant in resp {
552 0 : let shard_zero = tenant.shards.into_iter().next().unwrap();
553 0 : table.add_row([
554 0 : format!("{}", tenant.tenant_id),
555 0 : shard_zero
556 0 : .preferred_az_id
557 0 : .as_ref()
558 0 : .cloned()
559 0 : .unwrap_or("".to_string()),
560 0 : format!("{}", shard_zero.tenant_shard_id.shard_count.literal()),
561 0 : format!("{:?}", tenant.stripe_size),
562 0 : format!("{:?}", tenant.policy),
563 0 : format!("{:?}", shard_zero.scheduling_policy),
564 0 : ]);
565 0 : }
566 0 : }
567 0 :
568 0 : // Terminate progress dots
569 0 : if table.row_count() > LIMIT {
570 0 : eprint!("");
571 0 : }
572 0 :
573 0 : println!("{table}");
574 0 : }
575 0 : Command::TenantPolicy {
576 0 : tenant_id,
577 0 : placement,
578 0 : scheduling,
579 0 : } => {
580 0 : let req = TenantPolicyRequest {
581 0 : scheduling: scheduling.map(|s| s.0),
582 0 : placement: placement.map(|p| p.0),
583 0 : };
584 0 : storcon_client
585 0 : .dispatch::<_, ()>(
586 0 : Method::PUT,
587 0 : format!("control/v1/tenant/{tenant_id}/policy"),
588 0 : Some(req),
589 0 : )
590 0 : .await?;
591 0 : }
592 0 : Command::TenantShardSplit {
593 0 : tenant_id,
594 0 : shard_count,
595 0 : stripe_size,
596 0 : } => {
597 0 : let req = TenantShardSplitRequest {
598 0 : new_shard_count: shard_count,
599 0 : new_stripe_size: stripe_size.map(ShardStripeSize),
600 0 : };
601 0 :
602 0 : let response = storcon_client
603 0 : .dispatch::<TenantShardSplitRequest, TenantShardSplitResponse>(
604 0 : Method::PUT,
605 0 : format!("control/v1/tenant/{tenant_id}/shard_split"),
606 0 : Some(req),
607 0 : )
608 0 : .await?;
609 0 : println!(
610 0 : "Split tenant {} into {} shards: {}",
611 0 : tenant_id,
612 0 : shard_count,
613 0 : response
614 0 : .new_shards
615 0 : .iter()
616 0 : .map(|s| format!("{:?}", s))
617 0 : .collect::<Vec<_>>()
618 0 : .join(",")
619 0 : );
620 0 : }
621 0 : Command::TenantShardMigrate {
622 0 : tenant_shard_id,
623 0 : node,
624 0 : } => {
625 0 : let req = TenantShardMigrateRequest {
626 0 : node_id: node,
627 0 : migration_config: None,
628 0 : };
629 0 :
630 0 : storcon_client
631 0 : .dispatch::<TenantShardMigrateRequest, TenantShardMigrateResponse>(
632 0 : Method::PUT,
633 0 : format!("control/v1/tenant/{tenant_shard_id}/migrate"),
634 0 : Some(req),
635 0 : )
636 0 : .await?;
637 0 : }
638 0 : Command::TenantShardMigrateSecondary {
639 0 : tenant_shard_id,
640 0 : node,
641 0 : } => {
642 0 : let req = TenantShardMigrateRequest {
643 0 : node_id: node,
644 0 : migration_config: None,
645 0 : };
646 0 :
647 0 : storcon_client
648 0 : .dispatch::<TenantShardMigrateRequest, TenantShardMigrateResponse>(
649 0 : Method::PUT,
650 0 : format!("control/v1/tenant/{tenant_shard_id}/migrate_secondary"),
651 0 : Some(req),
652 0 : )
653 0 : .await?;
654 0 : }
655 0 : Command::TenantShardCancelReconcile { tenant_shard_id } => {
656 0 : storcon_client
657 0 : .dispatch::<(), ()>(
658 0 : Method::PUT,
659 0 : format!("control/v1/tenant/{tenant_shard_id}/cancel_reconcile"),
660 0 : None,
661 0 : )
662 0 : .await?;
663 0 : }
664 0 : Command::SetTenantConfig { tenant_id, config } => {
665 0 : let tenant_conf = serde_json::from_str(&config)?;
666 0 :
667 0 : vps_client
668 0 : .set_tenant_config(&TenantConfigRequest {
669 0 : tenant_id,
670 0 : config: tenant_conf,
671 0 : })
672 0 : .await?;
673 0 : }
674 0 : Command::PatchTenantConfig { tenant_id, config } => {
675 0 : let tenant_conf = serde_json::from_str(&config)?;
676 0 :
677 0 : vps_client
678 0 : .patch_tenant_config(&TenantConfigPatchRequest {
679 0 : tenant_id,
680 0 : config: tenant_conf,
681 0 : })
682 0 : .await?;
683 0 : }
684 0 : Command::TenantDescribe { tenant_id } => {
685 0 : let TenantDescribeResponse {
686 0 : tenant_id,
687 0 : shards,
688 0 : stripe_size,
689 0 : policy,
690 0 : config,
691 0 : } = storcon_client
692 0 : .dispatch::<(), TenantDescribeResponse>(
693 0 : Method::GET,
694 0 : format!("control/v1/tenant/{tenant_id}"),
695 0 : None,
696 0 : )
697 0 : .await?;
698 0 :
699 0 : let nodes = storcon_client
700 0 : .dispatch::<(), Vec<NodeDescribeResponse>>(
701 0 : Method::GET,
702 0 : "control/v1/node".to_string(),
703 0 : None,
704 0 : )
705 0 : .await?;
706 0 : let nodes = nodes
707 0 : .into_iter()
708 0 : .map(|n| (n.id, n))
709 0 : .collect::<HashMap<_, _>>();
710 0 :
711 0 : println!("Tenant {tenant_id}");
712 0 : let mut table = comfy_table::Table::new();
713 0 : table.add_row(["Policy", &format!("{:?}", policy)]);
714 0 : table.add_row(["Stripe size", &format!("{:?}", stripe_size)]);
715 0 : table.add_row(["Config", &serde_json::to_string_pretty(&config).unwrap()]);
716 0 : println!("{table}");
717 0 : println!("Shards:");
718 0 : let mut table = comfy_table::Table::new();
719 0 : table.set_header([
720 0 : "Shard",
721 0 : "Attached",
722 0 : "Attached AZ",
723 0 : "Secondary",
724 0 : "Last error",
725 0 : "status",
726 0 : ]);
727 0 : for shard in shards {
728 0 : let secondary = shard
729 0 : .node_secondary
730 0 : .iter()
731 0 : .map(|n| format!("{}", n))
732 0 : .collect::<Vec<_>>()
733 0 : .join(",");
734 0 :
735 0 : let mut status_parts = Vec::new();
736 0 : if shard.is_reconciling {
737 0 : status_parts.push("reconciling");
738 0 : }
739 0 :
740 0 : if shard.is_pending_compute_notification {
741 0 : status_parts.push("pending_compute");
742 0 : }
743 0 :
744 0 : if shard.is_splitting {
745 0 : status_parts.push("splitting");
746 0 : }
747 0 : let status = status_parts.join(",");
748 0 :
749 0 : let attached_node = shard
750 0 : .node_attached
751 0 : .as_ref()
752 0 : .map(|id| nodes.get(id).expect("Shard references nonexistent node"));
753 0 :
754 0 : table.add_row([
755 0 : format!("{}", shard.tenant_shard_id),
756 0 : attached_node
757 0 : .map(|n| format!("{} ({})", n.listen_http_addr, n.id))
758 0 : .unwrap_or(String::new()),
759 0 : attached_node
760 0 : .map(|n| n.availability_zone_id.clone())
761 0 : .unwrap_or(String::new()),
762 0 : secondary,
763 0 : shard.last_error,
764 0 : status,
765 0 : ]);
766 0 : }
767 0 : println!("{table}");
768 0 : }
769 0 : Command::TenantSetPreferredAz {
770 0 : tenant_id,
771 0 : preferred_az,
772 0 : } => {
773 0 : // First learn about the tenant's shards
774 0 : let describe_response = storcon_client
775 0 : .dispatch::<(), TenantDescribeResponse>(
776 0 : Method::GET,
777 0 : format!("control/v1/tenant/{tenant_id}"),
778 0 : None,
779 0 : )
780 0 : .await?;
781 0 :
782 0 : // Learn about nodes to validate the AZ ID
783 0 : let nodes = storcon_client
784 0 : .dispatch::<(), Vec<NodeDescribeResponse>>(
785 0 : Method::GET,
786 0 : "control/v1/node".to_string(),
787 0 : None,
788 0 : )
789 0 : .await?;
790 0 :
791 0 : if let Some(preferred_az) = &preferred_az {
792 0 : let azs = nodes
793 0 : .into_iter()
794 0 : .map(|n| (n.availability_zone_id))
795 0 : .collect::<HashSet<_>>();
796 0 : if !azs.contains(preferred_az) {
797 0 : anyhow::bail!(
798 0 : "AZ {} not found on any node: known AZs are: {:?}",
799 0 : preferred_az,
800 0 : azs
801 0 : );
802 0 : }
803 0 : } else {
804 0 : // Make it obvious to the user that since they've omitted an AZ, we're clearing it
805 0 : eprintln!("Clearing preferred AZ for tenant {}", tenant_id);
806 0 : }
807 0 :
808 0 : // Construct a request that modifies all the tenant's shards
809 0 : let req = ShardsPreferredAzsRequest {
810 0 : preferred_az_ids: describe_response
811 0 : .shards
812 0 : .into_iter()
813 0 : .map(|s| {
814 0 : (
815 0 : s.tenant_shard_id,
816 0 : preferred_az.clone().map(AvailabilityZone),
817 0 : )
818 0 : })
819 0 : .collect(),
820 0 : };
821 0 : storcon_client
822 0 : .dispatch::<ShardsPreferredAzsRequest, ShardsPreferredAzsResponse>(
823 0 : Method::PUT,
824 0 : "control/v1/preferred_azs".to_string(),
825 0 : Some(req),
826 0 : )
827 0 : .await?;
828 0 : }
829 0 : Command::TenantWarmup { tenant_id } => {
830 0 : let describe_response = storcon_client
831 0 : .dispatch::<(), TenantDescribeResponse>(
832 0 : Method::GET,
833 0 : format!("control/v1/tenant/{tenant_id}"),
834 0 : None,
835 0 : )
836 0 : .await;
837 0 : match describe_response {
838 0 : Ok(describe) => {
839 0 : if matches!(describe.policy, PlacementPolicy::Secondary) {
840 0 : // Fine: it's already known to controller in secondary mode: calling
841 0 : // again to put it into secondary mode won't cause problems.
842 0 : } else {
843 0 : anyhow::bail!("Tenant already present with policy {:?}", describe.policy);
844 0 : }
845 0 : }
846 0 : Err(mgmt_api::Error::ApiError(StatusCode::NOT_FOUND, _)) => {
847 0 : // Fine: this tenant isn't know to the storage controller yet.
848 0 : }
849 0 : Err(e) => {
850 0 : // Unexpected API error
851 0 : return Err(e.into());
852 0 : }
853 0 : }
854 0 :
855 0 : vps_client
856 0 : .location_config(
857 0 : TenantShardId::unsharded(tenant_id),
858 0 : pageserver_api::models::LocationConfig {
859 0 : mode: pageserver_api::models::LocationConfigMode::Secondary,
860 0 : generation: None,
861 0 : secondary_conf: Some(LocationConfigSecondary { warm: true }),
862 0 : shard_number: 0,
863 0 : shard_count: 0,
864 0 : shard_stripe_size: ShardParameters::DEFAULT_STRIPE_SIZE.0,
865 0 : tenant_conf: TenantConfig::default(),
866 0 : },
867 0 : None,
868 0 : true,
869 0 : )
870 0 : .await?;
871 0 :
872 0 : let describe_response = storcon_client
873 0 : .dispatch::<(), TenantDescribeResponse>(
874 0 : Method::GET,
875 0 : format!("control/v1/tenant/{tenant_id}"),
876 0 : None,
877 0 : )
878 0 : .await?;
879 0 :
880 0 : let secondary_ps_id = describe_response
881 0 : .shards
882 0 : .first()
883 0 : .unwrap()
884 0 : .node_secondary
885 0 : .first()
886 0 : .unwrap();
887 0 :
888 0 : println!("Tenant {tenant_id} warming up on pageserver {secondary_ps_id}");
889 0 : loop {
890 0 : let (status, progress) = vps_client
891 0 : .tenant_secondary_download(
892 0 : TenantShardId::unsharded(tenant_id),
893 0 : Some(Duration::from_secs(10)),
894 0 : )
895 0 : .await?;
896 0 : println!(
897 0 : "Progress: {}/{} layers, {}/{} bytes",
898 0 : progress.layers_downloaded,
899 0 : progress.layers_total,
900 0 : progress.bytes_downloaded,
901 0 : progress.bytes_total
902 0 : );
903 0 : match status {
904 0 : StatusCode::OK => {
905 0 : println!("Download complete");
906 0 : break;
907 0 : }
908 0 : StatusCode::ACCEPTED => {
909 0 : // Loop
910 0 : }
911 0 : _ => {
912 0 : anyhow::bail!("Unexpected download status: {status}");
913 0 : }
914 0 : }
915 0 : }
916 0 : }
917 0 : Command::TenantDrop { tenant_id, unclean } => {
918 0 : if !unclean {
919 0 : anyhow::bail!("This command is not a tenant deletion, and uncleanly drops all controller state for the tenant. If you know what you're doing, add `--unclean` to proceed.")
920 0 : }
921 0 : storcon_client
922 0 : .dispatch::<(), ()>(
923 0 : Method::POST,
924 0 : format!("debug/v1/tenant/{tenant_id}/drop"),
925 0 : None,
926 0 : )
927 0 : .await?;
928 0 : }
929 0 : Command::NodeDrop { node_id, unclean } => {
930 0 : if !unclean {
931 0 : anyhow::bail!("This command is not a clean node decommission, and uncleanly drops all controller state for the node, without checking if any tenants still refer to it. If you know what you're doing, add `--unclean` to proceed.")
932 0 : }
933 0 : storcon_client
934 0 : .dispatch::<(), ()>(Method::POST, format!("debug/v1/node/{node_id}/drop"), None)
935 0 : .await?;
936 0 : }
937 0 : Command::NodeDelete { node_id } => {
938 0 : storcon_client
939 0 : .dispatch::<(), ()>(Method::DELETE, format!("control/v1/node/{node_id}"), None)
940 0 : .await?;
941 0 : }
942 0 : Command::TenantSetTimeBasedEviction {
943 0 : tenant_id,
944 0 : period,
945 0 : threshold,
946 0 : } => {
947 0 : vps_client
948 0 : .set_tenant_config(&TenantConfigRequest {
949 0 : tenant_id,
950 0 : config: TenantConfig {
951 0 : eviction_policy: Some(EvictionPolicy::LayerAccessThreshold(
952 0 : EvictionPolicyLayerAccessThreshold {
953 0 : period: period.into(),
954 0 : threshold: threshold.into(),
955 0 : },
956 0 : )),
957 0 : heatmap_period: Some("300s".to_string()),
958 0 : ..Default::default()
959 0 : },
960 0 : })
961 0 : .await?;
962 0 : }
963 0 : Command::BulkMigrate {
964 0 : nodes,
965 0 : concurrency,
966 0 : max_shards,
967 0 : dry_run,
968 0 : } => {
969 0 : // Load the list of nodes, split them up into the drained and filled sets,
970 0 : // and validate that draining is possible.
971 0 : let node_descs = storcon_client
972 0 : .dispatch::<(), Vec<NodeDescribeResponse>>(
973 0 : Method::GET,
974 0 : "control/v1/node".to_string(),
975 0 : None,
976 0 : )
977 0 : .await?;
978 0 :
979 0 : let mut node_to_drain_descs = Vec::new();
980 0 : let mut node_to_fill_descs = Vec::new();
981 0 :
982 0 : for desc in node_descs {
983 0 : let to_drain = nodes.iter().any(|id| *id == desc.id);
984 0 : if to_drain {
985 0 : node_to_drain_descs.push(desc);
986 0 : } else {
987 0 : node_to_fill_descs.push(desc);
988 0 : }
989 0 : }
990 0 :
991 0 : if nodes.len() != node_to_drain_descs.len() {
992 0 : anyhow::bail!("Bulk migration requested away from node which doesn't exist.")
993 0 : }
994 0 :
995 0 : node_to_fill_descs.retain(|desc| {
996 0 : matches!(desc.availability, NodeAvailabilityWrapper::Active)
997 0 : && matches!(
998 0 : desc.scheduling,
999 0 : NodeSchedulingPolicy::Active | NodeSchedulingPolicy::Filling
1000 0 : )
1001 0 : });
1002 0 :
1003 0 : if node_to_fill_descs.is_empty() {
1004 0 : anyhow::bail!("There are no nodes to migrate to")
1005 0 : }
1006 0 :
1007 0 : // Set the node scheduling policy to draining for the nodes which
1008 0 : // we plan to drain.
1009 0 : for node_desc in node_to_drain_descs.iter() {
1010 0 : let req = NodeConfigureRequest {
1011 0 : node_id: node_desc.id,
1012 0 : availability: None,
1013 0 : scheduling: Some(NodeSchedulingPolicy::Draining),
1014 0 : };
1015 0 :
1016 0 : storcon_client
1017 0 : .dispatch::<_, ()>(
1018 0 : Method::PUT,
1019 0 : format!("control/v1/node/{}/config", node_desc.id),
1020 0 : Some(req),
1021 0 : )
1022 0 : .await?;
1023 0 : }
1024 0 :
1025 0 : // Perform the migration: move each tenant shard scheduled on a node to
1026 0 : // be drained to a node which is being filled. A simple round robin
1027 0 : // strategy is used to pick the new node.
1028 0 : let tenants = storcon_client
1029 0 : .dispatch::<(), Vec<TenantDescribeResponse>>(
1030 0 : Method::GET,
1031 0 : "control/v1/tenant".to_string(),
1032 0 : None,
1033 0 : )
1034 0 : .await?;
1035 0 :
1036 0 : let mut selected_node_idx = 0;
1037 0 :
1038 0 : struct MigrationMove {
1039 0 : tenant_shard_id: TenantShardId,
1040 0 : from: NodeId,
1041 0 : to: NodeId,
1042 0 : }
1043 0 :
1044 0 : let mut moves: Vec<MigrationMove> = Vec::new();
1045 0 :
1046 0 : let shards = tenants
1047 0 : .into_iter()
1048 0 : .flat_map(|tenant| tenant.shards.into_iter());
1049 0 : for shard in shards {
1050 0 : if let Some(max_shards) = max_shards {
1051 0 : if moves.len() >= max_shards {
1052 0 : println!(
1053 0 : "Stop planning shard moves since the requested maximum was reached"
1054 0 : );
1055 0 : break;
1056 0 : }
1057 0 : }
1058 0 :
1059 0 : let should_migrate = {
1060 0 : if let Some(attached_to) = shard.node_attached {
1061 0 : node_to_drain_descs
1062 0 : .iter()
1063 0 : .map(|desc| desc.id)
1064 0 : .any(|id| id == attached_to)
1065 0 : } else {
1066 0 : false
1067 0 : }
1068 0 : };
1069 0 :
1070 0 : if !should_migrate {
1071 0 : continue;
1072 0 : }
1073 0 :
1074 0 : moves.push(MigrationMove {
1075 0 : tenant_shard_id: shard.tenant_shard_id,
1076 0 : from: shard
1077 0 : .node_attached
1078 0 : .expect("We only migrate attached tenant shards"),
1079 0 : to: node_to_fill_descs[selected_node_idx].id,
1080 0 : });
1081 0 : selected_node_idx = (selected_node_idx + 1) % node_to_fill_descs.len();
1082 0 : }
1083 0 :
1084 0 : let total_moves = moves.len();
1085 0 :
1086 0 : if dry_run == Some(true) {
1087 0 : println!("Dryrun requested. Planned {total_moves} moves:");
1088 0 : for mv in &moves {
1089 0 : println!("{}: {} -> {}", mv.tenant_shard_id, mv.from, mv.to)
1090 0 : }
1091 0 :
1092 0 : return Ok(());
1093 0 : }
1094 0 :
1095 0 : const DEFAULT_MIGRATE_CONCURRENCY: usize = 8;
1096 0 : let mut stream = futures::stream::iter(moves)
1097 0 : .map(|mv| {
1098 0 : let client = Client::new(cli.api.clone(), cli.jwt.clone());
1099 0 : async move {
1100 0 : client
1101 0 : .dispatch::<TenantShardMigrateRequest, TenantShardMigrateResponse>(
1102 0 : Method::PUT,
1103 0 : format!("control/v1/tenant/{}/migrate", mv.tenant_shard_id),
1104 0 : Some(TenantShardMigrateRequest {
1105 0 : node_id: mv.to,
1106 0 : migration_config: None,
1107 0 : }),
1108 0 : )
1109 0 : .await
1110 0 : .map_err(|e| (mv.tenant_shard_id, mv.from, mv.to, e))
1111 0 : }
1112 0 : })
1113 0 : .buffered(concurrency.unwrap_or(DEFAULT_MIGRATE_CONCURRENCY));
1114 0 :
1115 0 : let mut success = 0;
1116 0 : let mut failure = 0;
1117 0 :
1118 0 : while let Some(res) = stream.next().await {
1119 0 : match res {
1120 0 : Ok(_) => {
1121 0 : success += 1;
1122 0 : }
1123 0 : Err((tenant_shard_id, from, to, error)) => {
1124 0 : failure += 1;
1125 0 : println!(
1126 0 : "Failed to migrate {} from node {} to node {}: {}",
1127 0 : tenant_shard_id, from, to, error
1128 0 : );
1129 0 : }
1130 0 : }
1131 0 :
1132 0 : if (success + failure) % 20 == 0 {
1133 0 : println!(
1134 0 : "Processed {}/{} shards: {} succeeded, {} failed",
1135 0 : success + failure,
1136 0 : total_moves,
1137 0 : success,
1138 0 : failure
1139 0 : );
1140 0 : }
1141 0 : }
1142 0 :
1143 0 : println!(
1144 0 : "Processed {}/{} shards: {} succeeded, {} failed",
1145 0 : success + failure,
1146 0 : total_moves,
1147 0 : success,
1148 0 : failure
1149 0 : );
1150 0 : }
1151 0 : Command::StartDrain { node_id } => {
1152 0 : storcon_client
1153 0 : .dispatch::<(), ()>(
1154 0 : Method::PUT,
1155 0 : format!("control/v1/node/{node_id}/drain"),
1156 0 : None,
1157 0 : )
1158 0 : .await?;
1159 0 : println!("Drain started for {node_id}");
1160 0 : }
1161 0 : Command::CancelDrain { node_id, timeout } => {
1162 0 : storcon_client
1163 0 : .dispatch::<(), ()>(
1164 0 : Method::DELETE,
1165 0 : format!("control/v1/node/{node_id}/drain"),
1166 0 : None,
1167 0 : )
1168 0 : .await?;
1169 0 :
1170 0 : println!("Waiting for node {node_id} to quiesce on scheduling policy ...");
1171 0 :
1172 0 : let final_policy =
1173 0 : wait_for_scheduling_policy(storcon_client, node_id, *timeout, |sched| {
1174 0 : use NodeSchedulingPolicy::*;
1175 0 : matches!(sched, Active | PauseForRestart)
1176 0 : })
1177 0 : .await?;
1178 0 :
1179 0 : println!(
1180 0 : "Drain was cancelled for node {node_id}. Schedulling policy is now {final_policy:?}"
1181 0 : );
1182 0 : }
1183 0 : Command::StartFill { node_id } => {
1184 0 : storcon_client
1185 0 : .dispatch::<(), ()>(Method::PUT, format!("control/v1/node/{node_id}/fill"), None)
1186 0 : .await?;
1187 0 :
1188 0 : println!("Fill started for {node_id}");
1189 0 : }
1190 0 : Command::CancelFill { node_id, timeout } => {
1191 0 : storcon_client
1192 0 : .dispatch::<(), ()>(
1193 0 : Method::DELETE,
1194 0 : format!("control/v1/node/{node_id}/fill"),
1195 0 : None,
1196 0 : )
1197 0 : .await?;
1198 0 :
1199 0 : println!("Waiting for node {node_id} to quiesce on scheduling policy ...");
1200 0 :
1201 0 : let final_policy =
1202 0 : wait_for_scheduling_policy(storcon_client, node_id, *timeout, |sched| {
1203 0 : use NodeSchedulingPolicy::*;
1204 0 : matches!(sched, Active)
1205 0 : })
1206 0 : .await?;
1207 0 :
1208 0 : println!(
1209 0 : "Fill was cancelled for node {node_id}. Schedulling policy is now {final_policy:?}"
1210 0 : );
1211 0 : }
1212 0 : Command::Safekeepers {} => {
1213 0 : let mut resp = storcon_client
1214 0 : .dispatch::<(), Vec<SafekeeperDescribeResponse>>(
1215 0 : Method::GET,
1216 0 : "control/v1/safekeeper".to_string(),
1217 0 : None,
1218 0 : )
1219 0 : .await?;
1220 0 :
1221 0 : resp.sort_by(|a, b| a.id.cmp(&b.id));
1222 0 :
1223 0 : let mut table = comfy_table::Table::new();
1224 0 : table.set_header([
1225 0 : "Id",
1226 0 : "Version",
1227 0 : "Host",
1228 0 : "Port",
1229 0 : "Http Port",
1230 0 : "AZ Id",
1231 0 : "Scheduling",
1232 0 : ]);
1233 0 : for sk in resp {
1234 0 : table.add_row([
1235 0 : format!("{}", sk.id),
1236 0 : format!("{}", sk.version),
1237 0 : sk.host,
1238 0 : format!("{}", sk.port),
1239 0 : format!("{}", sk.http_port),
1240 0 : sk.availability_zone_id.clone(),
1241 0 : String::from(sk.scheduling_policy),
1242 0 : ]);
1243 0 : }
1244 0 : println!("{table}");
1245 0 : }
1246 0 : Command::SafekeeperScheduling {
1247 0 : node_id,
1248 0 : scheduling_policy,
1249 0 : } => {
1250 0 : let scheduling_policy = scheduling_policy.0;
1251 0 : storcon_client
1252 0 : .dispatch::<SafekeeperSchedulingPolicyRequest, ()>(
1253 0 : Method::POST,
1254 0 : format!("control/v1/safekeeper/{node_id}/scheduling_policy"),
1255 0 : Some(SafekeeperSchedulingPolicyRequest { scheduling_policy }),
1256 0 : )
1257 0 : .await?;
1258 0 : println!(
1259 0 : "Scheduling policy of {node_id} set to {}",
1260 0 : String::from(scheduling_policy)
1261 0 : );
1262 0 : }
1263 0 : Command::DownloadHeatmapLayers {
1264 0 : tenant_shard_id,
1265 0 : timeline_id,
1266 0 : concurrency,
1267 0 : } => {
1268 0 : let mut path = format!(
1269 0 : "/v1/tenant/{}/timeline/{}/download_heatmap_layers",
1270 0 : tenant_shard_id, timeline_id,
1271 0 : );
1272 0 :
1273 0 : if let Some(c) = concurrency {
1274 0 : path = format!("{path}?concurrency={c}");
1275 0 : }
1276 0 :
1277 0 : storcon_client
1278 0 : .dispatch::<(), ()>(Method::POST, path, None)
1279 0 : .await?;
1280 0 : }
1281 0 : }
1282 0 :
1283 0 : Ok(())
1284 0 : }
|