Line data Source code
1 : use std::collections::{HashMap, HashSet};
2 : use std::path::PathBuf;
3 : use std::str::FromStr;
4 : use std::time::Duration;
5 :
6 : use clap::{Parser, Subcommand};
7 : use futures::StreamExt;
8 : use pageserver_api::controller_api::{
9 : AvailabilityZone, MigrationConfig, NodeAvailabilityWrapper, NodeConfigureRequest,
10 : NodeDescribeResponse, NodeRegisterRequest, NodeSchedulingPolicy, NodeShardResponse,
11 : PlacementPolicy, SafekeeperDescribeResponse, SafekeeperSchedulingPolicyRequest,
12 : ShardSchedulingPolicy, ShardsPreferredAzsRequest, ShardsPreferredAzsResponse,
13 : SkSchedulingPolicy, TenantCreateRequest, TenantDescribeResponse, TenantPolicyRequest,
14 : TenantShardMigrateRequest, TenantShardMigrateResponse,
15 : };
16 : use pageserver_api::models::{
17 : EvictionPolicy, EvictionPolicyLayerAccessThreshold, ShardParameters, TenantConfig,
18 : TenantConfigPatchRequest, TenantConfigRequest, TenantShardSplitRequest,
19 : TenantShardSplitResponse,
20 : };
21 : use pageserver_api::shard::{ShardStripeSize, TenantShardId};
22 : use pageserver_client::mgmt_api::{self};
23 : use reqwest::{Certificate, Method, StatusCode, Url};
24 : use storage_controller_client::control_api::Client;
25 : use utils::id::{NodeId, TenantId, TimelineId};
26 :
27 : #[derive(Subcommand, Debug)]
28 : enum Command {
29 : /// Register a pageserver with the storage controller. This shouldn't usually be necessary,
30 : /// since pageservers auto-register when they start up
31 : NodeRegister {
32 : #[arg(long)]
33 : node_id: NodeId,
34 :
35 : #[arg(long)]
36 : listen_pg_addr: String,
37 : #[arg(long)]
38 : listen_pg_port: u16,
39 : #[arg(long)]
40 : listen_grpc_addr: Option<String>,
41 : #[arg(long)]
42 : listen_grpc_port: Option<u16>,
43 :
44 : #[arg(long)]
45 : listen_http_addr: String,
46 : #[arg(long)]
47 : listen_http_port: u16,
48 : #[arg(long)]
49 : listen_https_port: Option<u16>,
50 :
51 : #[arg(long)]
52 : availability_zone_id: String,
53 : },
54 :
55 : /// Modify a node's configuration in the storage controller
56 : NodeConfigure {
57 : #[arg(long)]
58 : node_id: NodeId,
59 :
60 : /// Availability is usually auto-detected based on heartbeats. Set 'offline' here to
61 : /// manually mark a node offline
62 : #[arg(long)]
63 : availability: Option<NodeAvailabilityArg>,
64 : /// Scheduling policy controls whether tenant shards may be scheduled onto this node.
65 : #[arg(long)]
66 : scheduling: Option<NodeSchedulingPolicy>,
67 : },
68 : /// Exists for backup usage and will be removed in future.
69 : /// Use [`Command::NodeStartDelete`] instead, if possible.
70 : NodeDelete {
71 : #[arg(long)]
72 : node_id: NodeId,
73 : },
74 : /// Start deletion of the specified pageserver.
75 : NodeStartDelete {
76 : #[arg(long)]
77 : node_id: NodeId,
78 : },
79 : /// Cancel deletion of the specified pageserver and wait for `timeout`
80 : /// for the operation to be canceled. May be retried.
81 : NodeCancelDelete {
82 : #[arg(long)]
83 : node_id: NodeId,
84 : #[arg(long)]
85 : timeout: humantime::Duration,
86 : },
87 : /// Delete a tombstone of node from the storage controller.
88 : /// This is used when we want to allow the node to be re-registered.
89 : NodeDeleteTombstone {
90 : #[arg(long)]
91 : node_id: NodeId,
92 : },
93 : /// Modify a tenant's policies in the storage controller
94 : TenantPolicy {
95 : #[arg(long)]
96 : tenant_id: TenantId,
97 : /// Placement policy controls whether a tenant is `detached`, has only a secondary location (`secondary`),
98 : /// or is in the normal attached state with N secondary locations (`attached:N`)
99 : #[arg(long)]
100 : placement: Option<PlacementPolicyArg>,
101 : /// Scheduling policy enables pausing the controller's scheduling activity involving this tenant. `active` is normal,
102 : /// `essential` disables optimization scheduling changes, `pause` disables all scheduling changes, and `stop` prevents
103 : /// all reconciliation activity including for scheduling changes already made. `pause` and `stop` can make a tenant
104 : /// unavailable, and are only for use in emergencies.
105 : #[arg(long)]
106 : scheduling: Option<ShardSchedulingPolicyArg>,
107 : },
108 : /// List nodes known to the storage controller
109 : Nodes {},
110 : /// List soft deleted nodes known to the storage controller
111 : NodeTombstones {},
112 : /// List tenants known to the storage controller
113 : Tenants {
114 : /// If this field is set, it will list the tenants on a specific node
115 : node_id: Option<NodeId>,
116 : },
117 : /// Create a new tenant in the storage controller, and by extension on pageservers.
118 : TenantCreate {
119 : #[arg(long)]
120 : tenant_id: TenantId,
121 : },
122 : /// Delete a tenant in the storage controller, and by extension on pageservers.
123 : TenantDelete {
124 : #[arg(long)]
125 : tenant_id: TenantId,
126 : },
127 : /// Split an existing tenant into a higher number of shards than its current shard count.
128 : TenantShardSplit {
129 : #[arg(long)]
130 : tenant_id: TenantId,
131 : #[arg(long)]
132 : shard_count: u8,
133 : /// Optional, in 8kiB pages. e.g. set 2048 for 16MB stripes.
134 : #[arg(long)]
135 : stripe_size: Option<u32>,
136 : },
137 : /// Migrate the attached location for a tenant shard to a specific pageserver.
138 : TenantShardMigrate {
139 : #[arg(long)]
140 : tenant_shard_id: TenantShardId,
141 : #[arg(long)]
142 : node: NodeId,
143 : #[arg(long, default_value_t = true, action = clap::ArgAction::Set)]
144 : prewarm: bool,
145 : #[arg(long, default_value_t = false, action = clap::ArgAction::Set)]
146 : override_scheduler: bool,
147 : },
148 : /// Watch the location of a tenant shard evolve, e.g. while expecting it to migrate
149 : TenantShardWatch {
150 : #[arg(long)]
151 : tenant_shard_id: TenantShardId,
152 : },
153 : /// Migrate the secondary location for a tenant shard to a specific pageserver.
154 : TenantShardMigrateSecondary {
155 : #[arg(long)]
156 : tenant_shard_id: TenantShardId,
157 : #[arg(long)]
158 : node: NodeId,
159 : },
160 : /// Cancel any ongoing reconciliation for this shard
161 : TenantShardCancelReconcile {
162 : #[arg(long)]
163 : tenant_shard_id: TenantShardId,
164 : },
165 : /// Set the pageserver tenant configuration of a tenant: this is the configuration structure
166 : /// that is passed through to pageservers, and does not affect storage controller behavior.
167 : /// Any previous tenant configs are overwritten.
168 : SetTenantConfig {
169 : #[arg(long)]
170 : tenant_id: TenantId,
171 : #[arg(long)]
172 : config: String,
173 : },
174 : /// Patch the pageserver tenant configuration of a tenant. Any fields with null values in the
175 : /// provided JSON are unset from the tenant config and all fields with non-null values are set.
176 : /// Unspecified fields are not changed.
177 : PatchTenantConfig {
178 : #[arg(long)]
179 : tenant_id: TenantId,
180 : #[arg(long)]
181 : config: String,
182 : },
183 : /// Print details about a particular tenant, including all its shards' states.
184 : TenantDescribe {
185 : #[arg(long)]
186 : tenant_id: TenantId,
187 : },
188 : TenantSetPreferredAz {
189 : #[arg(long)]
190 : tenant_id: TenantId,
191 : #[arg(long)]
192 : preferred_az: Option<String>,
193 : },
194 : /// Uncleanly drop a tenant from the storage controller: this doesn't delete anything from pageservers. Appropriate
195 : /// if you e.g. used `tenant-warmup` by mistake on a tenant ID that doesn't really exist, or is in some other region.
196 : TenantDrop {
197 : #[arg(long)]
198 : tenant_id: TenantId,
199 : #[arg(long)]
200 : unclean: bool,
201 : },
202 : NodeDrop {
203 : #[arg(long)]
204 : node_id: NodeId,
205 : #[arg(long)]
206 : unclean: bool,
207 : },
208 : TenantSetTimeBasedEviction {
209 : #[arg(long)]
210 : tenant_id: TenantId,
211 : #[arg(long)]
212 : period: humantime::Duration,
213 : #[arg(long)]
214 : threshold: humantime::Duration,
215 : },
216 : // Migrate away from a set of specified pageservers by moving the primary attachments to pageservers
217 : // outside of the specified set.
218 : BulkMigrate {
219 : // Set of pageserver node ids to drain.
220 : #[arg(long)]
221 : nodes: Vec<NodeId>,
222 : // Optional: migration concurrency (default is 8)
223 : #[arg(long)]
224 : concurrency: Option<usize>,
225 : // Optional: maximum number of shards to migrate
226 : #[arg(long)]
227 : max_shards: Option<usize>,
228 : // Optional: when set to true, nothing is migrated, but the plan is printed to stdout
229 : #[arg(long)]
230 : dry_run: Option<bool>,
231 : },
232 : /// Start draining the specified pageserver.
233 : /// The drain is complete when the schedulling policy returns to active.
234 : StartDrain {
235 : #[arg(long)]
236 : node_id: NodeId,
237 : },
238 : /// Cancel draining the specified pageserver and wait for `timeout`
239 : /// for the operation to be canceled. May be retried.
240 : CancelDrain {
241 : #[arg(long)]
242 : node_id: NodeId,
243 : #[arg(long)]
244 : timeout: humantime::Duration,
245 : },
246 : /// Start filling the specified pageserver.
247 : /// The drain is complete when the schedulling policy returns to active.
248 : StartFill {
249 : #[arg(long)]
250 : node_id: NodeId,
251 : },
252 : /// Cancel filling the specified pageserver and wait for `timeout`
253 : /// for the operation to be canceled. May be retried.
254 : CancelFill {
255 : #[arg(long)]
256 : node_id: NodeId,
257 : #[arg(long)]
258 : timeout: humantime::Duration,
259 : },
260 : /// List safekeepers known to the storage controller
261 : Safekeepers {},
262 : /// Set the scheduling policy of the specified safekeeper
263 : SafekeeperScheduling {
264 : #[arg(long)]
265 : node_id: NodeId,
266 : #[arg(long)]
267 : scheduling_policy: SkSchedulingPolicyArg,
268 : },
269 : /// Downloads any missing heatmap layers for all shard for a given timeline
270 : DownloadHeatmapLayers {
271 : /// Tenant ID or tenant shard ID. When an unsharded tenant ID is specified,
272 : /// the operation is performed on all shards. When a sharded tenant ID is
273 : /// specified, the operation is only performed on the specified shard.
274 : #[arg(long)]
275 : tenant_shard_id: TenantShardId,
276 : #[arg(long)]
277 : timeline_id: TimelineId,
278 : /// Optional: Maximum download concurrency (default is 16)
279 : #[arg(long)]
280 : concurrency: Option<usize>,
281 : },
282 : }
283 :
284 : #[derive(Parser)]
285 : #[command(
286 : author,
287 : version,
288 : about,
289 : long_about = "CLI for Storage Controller Support/Debug"
290 : )]
291 : #[command(arg_required_else_help(true))]
292 : struct Cli {
293 : #[arg(long)]
294 : /// URL to storage controller. e.g. http://127.0.0.1:1234 when using `neon_local`
295 : api: Url,
296 :
297 : #[arg(long)]
298 : /// JWT token for authenticating with storage controller. Depending on the API used, this
299 : /// should have either `pageserverapi` or `admin` scopes: for convenience, you should mint
300 : /// a token with both scopes to use with this tool.
301 : jwt: Option<String>,
302 :
303 : #[arg(long)]
304 : /// Trusted root CA certificates to use in https APIs.
305 : ssl_ca_file: Option<PathBuf>,
306 :
307 : #[command(subcommand)]
308 : command: Command,
309 : }
310 :
311 : #[derive(Debug, Clone)]
312 : struct PlacementPolicyArg(PlacementPolicy);
313 :
314 : impl FromStr for PlacementPolicyArg {
315 : type Err = anyhow::Error;
316 :
317 0 : fn from_str(s: &str) -> Result<Self, Self::Err> {
318 0 : match s {
319 0 : "detached" => Ok(Self(PlacementPolicy::Detached)),
320 0 : "secondary" => Ok(Self(PlacementPolicy::Secondary)),
321 0 : _ if s.starts_with("attached:") => {
322 0 : let mut splitter = s.split(':');
323 0 : let _prefix = splitter.next().unwrap();
324 0 : match splitter.next().and_then(|s| s.parse::<usize>().ok()) {
325 0 : Some(n) => Ok(Self(PlacementPolicy::Attached(n))),
326 0 : None => Err(anyhow::anyhow!(
327 0 : "Invalid format '{s}', a valid example is 'attached:1'"
328 0 : )),
329 : }
330 : }
331 0 : _ => Err(anyhow::anyhow!(
332 0 : "Unknown placement policy '{s}', try detached,secondary,attached:<n>"
333 0 : )),
334 : }
335 0 : }
336 : }
337 :
338 : #[derive(Debug, Clone)]
339 : struct SkSchedulingPolicyArg(SkSchedulingPolicy);
340 :
341 : impl FromStr for SkSchedulingPolicyArg {
342 : type Err = anyhow::Error;
343 :
344 0 : fn from_str(s: &str) -> Result<Self, Self::Err> {
345 0 : SkSchedulingPolicy::from_str(s).map(Self)
346 0 : }
347 : }
348 :
349 : #[derive(Debug, Clone)]
350 : struct ShardSchedulingPolicyArg(ShardSchedulingPolicy);
351 :
352 : impl FromStr for ShardSchedulingPolicyArg {
353 : type Err = anyhow::Error;
354 :
355 0 : fn from_str(s: &str) -> Result<Self, Self::Err> {
356 0 : match s {
357 0 : "active" => Ok(Self(ShardSchedulingPolicy::Active)),
358 0 : "essential" => Ok(Self(ShardSchedulingPolicy::Essential)),
359 0 : "pause" => Ok(Self(ShardSchedulingPolicy::Pause)),
360 0 : "stop" => Ok(Self(ShardSchedulingPolicy::Stop)),
361 0 : _ => Err(anyhow::anyhow!(
362 0 : "Unknown scheduling policy '{s}', try active,essential,pause,stop"
363 0 : )),
364 : }
365 0 : }
366 : }
367 :
368 : #[derive(Debug, Clone)]
369 : struct NodeAvailabilityArg(NodeAvailabilityWrapper);
370 :
371 : impl FromStr for NodeAvailabilityArg {
372 : type Err = anyhow::Error;
373 :
374 0 : fn from_str(s: &str) -> Result<Self, Self::Err> {
375 0 : match s {
376 0 : "active" => Ok(Self(NodeAvailabilityWrapper::Active)),
377 0 : "offline" => Ok(Self(NodeAvailabilityWrapper::Offline)),
378 0 : _ => Err(anyhow::anyhow!("Unknown availability state '{s}'")),
379 : }
380 0 : }
381 : }
382 :
383 0 : async fn wait_for_scheduling_policy<F>(
384 0 : client: Client,
385 0 : node_id: NodeId,
386 0 : timeout: Duration,
387 0 : f: F,
388 0 : ) -> anyhow::Result<NodeSchedulingPolicy>
389 0 : where
390 0 : F: Fn(NodeSchedulingPolicy) -> bool,
391 0 : {
392 0 : let waiter = tokio::time::timeout(timeout, async move {
393 : loop {
394 0 : let node = client
395 0 : .dispatch::<(), NodeDescribeResponse>(
396 0 : Method::GET,
397 0 : format!("control/v1/node/{node_id}"),
398 0 : None,
399 0 : )
400 0 : .await?;
401 :
402 0 : if f(node.scheduling) {
403 0 : return Ok::<NodeSchedulingPolicy, mgmt_api::Error>(node.scheduling);
404 0 : }
405 : }
406 0 : });
407 :
408 0 : Ok(waiter.await??)
409 0 : }
410 :
411 : #[tokio::main]
412 0 : async fn main() -> anyhow::Result<()> {
413 0 : let cli = Cli::parse();
414 :
415 0 : let ssl_ca_certs = match &cli.ssl_ca_file {
416 0 : Some(ssl_ca_file) => {
417 0 : let buf = tokio::fs::read(ssl_ca_file).await?;
418 0 : Certificate::from_pem_bundle(&buf)?
419 : }
420 0 : None => Vec::new(),
421 : };
422 :
423 0 : let mut http_client = reqwest::Client::builder();
424 0 : for ssl_ca_cert in ssl_ca_certs {
425 0 : http_client = http_client.add_root_certificate(ssl_ca_cert);
426 0 : }
427 0 : let http_client = http_client.build()?;
428 :
429 0 : let storcon_client = Client::new(http_client.clone(), cli.api.clone(), cli.jwt.clone());
430 :
431 0 : let mut trimmed = cli.api.to_string();
432 0 : trimmed.pop();
433 0 : let vps_client = mgmt_api::Client::new(http_client.clone(), trimmed, cli.jwt.as_deref());
434 :
435 0 : match cli.command {
436 0 : Command::NodeRegister {
437 0 : node_id,
438 0 : listen_pg_addr,
439 0 : listen_pg_port,
440 0 : listen_grpc_addr,
441 0 : listen_grpc_port,
442 0 : listen_http_addr,
443 0 : listen_http_port,
444 0 : listen_https_port,
445 0 : availability_zone_id,
446 0 : } => {
447 0 : storcon_client
448 0 : .dispatch::<_, ()>(
449 0 : Method::POST,
450 0 : "control/v1/node".to_string(),
451 0 : Some(NodeRegisterRequest {
452 0 : node_id,
453 0 : listen_pg_addr,
454 0 : listen_pg_port,
455 0 : listen_grpc_addr,
456 0 : listen_grpc_port,
457 0 : listen_http_addr,
458 0 : listen_http_port,
459 0 : listen_https_port,
460 0 : availability_zone_id: AvailabilityZone(availability_zone_id),
461 0 : }),
462 0 : )
463 0 : .await?;
464 0 : }
465 0 : Command::TenantCreate { tenant_id } => {
466 0 : storcon_client
467 0 : .dispatch::<_, ()>(
468 0 : Method::POST,
469 0 : "v1/tenant".to_string(),
470 0 : Some(TenantCreateRequest {
471 0 : new_tenant_id: TenantShardId::unsharded(tenant_id),
472 0 : generation: None,
473 0 : shard_parameters: ShardParameters::default(),
474 0 : placement_policy: Some(PlacementPolicy::Attached(1)),
475 0 : config: TenantConfig::default(),
476 0 : }),
477 0 : )
478 0 : .await?;
479 0 : }
480 0 : Command::TenantDelete { tenant_id } => {
481 0 : let status = vps_client
482 0 : .tenant_delete(TenantShardId::unsharded(tenant_id))
483 0 : .await?;
484 0 : tracing::info!("Delete status: {}", status);
485 0 : }
486 0 : Command::Nodes {} => {
487 0 : let mut resp = storcon_client
488 0 : .dispatch::<(), Vec<NodeDescribeResponse>>(
489 0 : Method::GET,
490 0 : "control/v1/node".to_string(),
491 0 : None,
492 0 : )
493 0 : .await?;
494 0 :
495 0 : resp.sort_by(|a, b| a.listen_http_addr.cmp(&b.listen_http_addr));
496 0 :
497 0 : let mut table = comfy_table::Table::new();
498 0 : table.set_header(["Id", "Hostname", "AZ", "Scheduling", "Availability"]);
499 0 : for node in resp {
500 0 : table.add_row([
501 0 : format!("{}", node.id),
502 0 : node.listen_http_addr,
503 0 : node.availability_zone_id,
504 0 : format!("{:?}", node.scheduling),
505 0 : format!("{:?}", node.availability),
506 0 : ]);
507 0 : }
508 0 : println!("{table}");
509 0 : }
510 0 : Command::NodeConfigure {
511 0 : node_id,
512 0 : availability,
513 0 : scheduling,
514 0 : } => {
515 0 : let req = NodeConfigureRequest {
516 0 : node_id,
517 0 : availability: availability.map(|a| a.0),
518 0 : scheduling,
519 0 : };
520 0 : storcon_client
521 0 : .dispatch::<_, ()>(
522 0 : Method::PUT,
523 0 : format!("control/v1/node/{node_id}/config"),
524 0 : Some(req),
525 0 : )
526 0 : .await?;
527 0 : }
528 0 : Command::Tenants {
529 0 : node_id: Some(node_id),
530 0 : } => {
531 0 : let describe_response = storcon_client
532 0 : .dispatch::<(), NodeShardResponse>(
533 0 : Method::GET,
534 0 : format!("control/v1/node/{node_id}/shards"),
535 0 : None,
536 0 : )
537 0 : .await?;
538 0 : let shards = describe_response.shards;
539 0 : let mut table = comfy_table::Table::new();
540 0 : table.set_header([
541 0 : "Shard",
542 0 : "Intended Primary/Secondary",
543 0 : "Observed Primary/Secondary",
544 0 : ]);
545 0 : for shard in shards {
546 0 : table.add_row([
547 0 : format!("{}", shard.tenant_shard_id),
548 0 : match shard.is_intended_secondary {
549 0 : None => "".to_string(),
550 0 : Some(true) => "Secondary".to_string(),
551 0 : Some(false) => "Primary".to_string(),
552 0 : },
553 0 : match shard.is_observed_secondary {
554 0 : None => "".to_string(),
555 0 : Some(true) => "Secondary".to_string(),
556 0 : Some(false) => "Primary".to_string(),
557 0 : },
558 0 : ]);
559 0 : }
560 0 : println!("{table}");
561 0 : }
562 0 : Command::Tenants { node_id: None } => {
563 0 : // Set up output formatting
564 0 : let mut table = comfy_table::Table::new();
565 0 : table.set_header([
566 0 : "TenantId",
567 0 : "Preferred AZ",
568 0 : "ShardCount",
569 0 : "StripeSize",
570 0 : "Placement",
571 0 : "Scheduling",
572 0 : ]);
573 0 :
574 0 : // Pagination loop over listing API
575 0 : let mut start_after = None;
576 0 : const LIMIT: usize = 1000;
577 0 : loop {
578 0 : let path = match start_after {
579 0 : None => format!("control/v1/tenant?limit={LIMIT}"),
580 0 : Some(start_after) => {
581 0 : format!("control/v1/tenant?limit={LIMIT}&start_after={start_after}")
582 0 : }
583 0 : };
584 0 :
585 0 : let resp = storcon_client
586 0 : .dispatch::<(), Vec<TenantDescribeResponse>>(Method::GET, path, None)
587 0 : .await?;
588 0 :
589 0 : if resp.is_empty() {
590 0 : // End of data reached
591 0 : break;
592 0 : }
593 0 :
594 0 : // Give some visual feedback while we're building up the table (comfy_table doesn't have
595 0 : // streaming output)
596 0 : if resp.len() >= LIMIT {
597 0 : eprint!(".");
598 0 : }
599 0 :
600 0 : start_after = Some(resp.last().unwrap().tenant_id);
601 0 :
602 0 : for tenant in resp {
603 0 : let shard_zero = tenant.shards.into_iter().next().unwrap();
604 0 : table.add_row([
605 0 : format!("{}", tenant.tenant_id),
606 0 : shard_zero
607 0 : .preferred_az_id
608 0 : .as_ref()
609 0 : .cloned()
610 0 : .unwrap_or("".to_string()),
611 0 : format!("{}", shard_zero.tenant_shard_id.shard_count.literal()),
612 0 : format!("{:?}", tenant.stripe_size),
613 0 : format!("{:?}", tenant.policy),
614 0 : format!("{:?}", shard_zero.scheduling_policy),
615 0 : ]);
616 0 : }
617 0 : }
618 0 :
619 0 : // Terminate progress dots
620 0 : if table.row_count() > LIMIT {
621 0 : eprint!("");
622 0 : }
623 0 :
624 0 : println!("{table}");
625 0 : }
626 0 : Command::TenantPolicy {
627 0 : tenant_id,
628 0 : placement,
629 0 : scheduling,
630 0 : } => {
631 0 : let req = TenantPolicyRequest {
632 0 : scheduling: scheduling.map(|s| s.0),
633 0 : placement: placement.map(|p| p.0),
634 0 : };
635 0 : storcon_client
636 0 : .dispatch::<_, ()>(
637 0 : Method::PUT,
638 0 : format!("control/v1/tenant/{tenant_id}/policy"),
639 0 : Some(req),
640 0 : )
641 0 : .await?;
642 0 : }
643 0 : Command::TenantShardSplit {
644 0 : tenant_id,
645 0 : shard_count,
646 0 : stripe_size,
647 0 : } => {
648 0 : let req = TenantShardSplitRequest {
649 0 : new_shard_count: shard_count,
650 0 : new_stripe_size: stripe_size.map(ShardStripeSize),
651 0 : };
652 0 :
653 0 : let response = storcon_client
654 0 : .dispatch::<TenantShardSplitRequest, TenantShardSplitResponse>(
655 0 : Method::PUT,
656 0 : format!("control/v1/tenant/{tenant_id}/shard_split"),
657 0 : Some(req),
658 0 : )
659 0 : .await?;
660 0 : println!(
661 0 : "Split tenant {} into {} shards: {}",
662 0 : tenant_id,
663 0 : shard_count,
664 0 : response
665 0 : .new_shards
666 0 : .iter()
667 0 : .map(|s| format!("{s:?}"))
668 0 : .collect::<Vec<_>>()
669 0 : .join(",")
670 0 : );
671 0 : }
672 0 : Command::TenantShardMigrate {
673 0 : tenant_shard_id,
674 0 : node,
675 0 : prewarm,
676 0 : override_scheduler,
677 0 : } => {
678 0 : let migration_config = MigrationConfig {
679 0 : prewarm,
680 0 : override_scheduler,
681 0 : ..Default::default()
682 0 : };
683 0 :
684 0 : let req = TenantShardMigrateRequest {
685 0 : node_id: node,
686 0 : origin_node_id: None,
687 0 : migration_config,
688 0 : };
689 0 :
690 0 : match storcon_client
691 0 : .dispatch::<TenantShardMigrateRequest, TenantShardMigrateResponse>(
692 0 : Method::PUT,
693 0 : format!("control/v1/tenant/{tenant_shard_id}/migrate"),
694 0 : Some(req),
695 0 : )
696 0 : .await
697 0 : {
698 0 : Err(mgmt_api::Error::ApiError(StatusCode::PRECONDITION_FAILED, msg)) => {
699 0 : anyhow::bail!(
700 0 : "Migration to {node} rejected, may require `--force` ({}) ",
701 0 : msg
702 0 : );
703 0 : }
704 0 : Err(e) => return Err(e.into()),
705 0 : Ok(_) => {}
706 0 : }
707 0 :
708 0 : watch_tenant_shard(storcon_client, tenant_shard_id, Some(node)).await?;
709 0 : }
710 0 : Command::TenantShardWatch { tenant_shard_id } => {
711 0 : watch_tenant_shard(storcon_client, tenant_shard_id, None).await?;
712 0 : }
713 0 : Command::TenantShardMigrateSecondary {
714 0 : tenant_shard_id,
715 0 : node,
716 0 : } => {
717 0 : let req = TenantShardMigrateRequest {
718 0 : node_id: node,
719 0 : origin_node_id: None,
720 0 : migration_config: MigrationConfig::default(),
721 0 : };
722 0 :
723 0 : storcon_client
724 0 : .dispatch::<TenantShardMigrateRequest, TenantShardMigrateResponse>(
725 0 : Method::PUT,
726 0 : format!("control/v1/tenant/{tenant_shard_id}/migrate_secondary"),
727 0 : Some(req),
728 0 : )
729 0 : .await?;
730 0 : }
731 0 : Command::TenantShardCancelReconcile { tenant_shard_id } => {
732 0 : storcon_client
733 0 : .dispatch::<(), ()>(
734 0 : Method::PUT,
735 0 : format!("control/v1/tenant/{tenant_shard_id}/cancel_reconcile"),
736 0 : None,
737 0 : )
738 0 : .await?;
739 0 : }
740 0 : Command::SetTenantConfig { tenant_id, config } => {
741 0 : let tenant_conf = serde_json::from_str(&config)?;
742 0 :
743 0 : vps_client
744 0 : .set_tenant_config(&TenantConfigRequest {
745 0 : tenant_id,
746 0 : config: tenant_conf,
747 0 : })
748 0 : .await?;
749 0 : }
750 0 : Command::PatchTenantConfig { tenant_id, config } => {
751 0 : let tenant_conf = serde_json::from_str(&config)?;
752 0 :
753 0 : vps_client
754 0 : .patch_tenant_config(&TenantConfigPatchRequest {
755 0 : tenant_id,
756 0 : config: tenant_conf,
757 0 : })
758 0 : .await?;
759 0 : }
760 0 : Command::TenantDescribe { tenant_id } => {
761 0 : let TenantDescribeResponse {
762 0 : tenant_id,
763 0 : shards,
764 0 : stripe_size,
765 0 : policy,
766 0 : config,
767 0 : } = storcon_client
768 0 : .dispatch::<(), TenantDescribeResponse>(
769 0 : Method::GET,
770 0 : format!("control/v1/tenant/{tenant_id}"),
771 0 : None,
772 0 : )
773 0 : .await?;
774 0 :
775 0 : let nodes = storcon_client
776 0 : .dispatch::<(), Vec<NodeDescribeResponse>>(
777 0 : Method::GET,
778 0 : "control/v1/node".to_string(),
779 0 : None,
780 0 : )
781 0 : .await?;
782 0 : let nodes = nodes
783 0 : .into_iter()
784 0 : .map(|n| (n.id, n))
785 0 : .collect::<HashMap<_, _>>();
786 0 :
787 0 : println!("Tenant {tenant_id}");
788 0 : let mut table = comfy_table::Table::new();
789 0 : table.add_row(["Policy", &format!("{policy:?}")]);
790 0 : table.add_row(["Stripe size", &format!("{stripe_size:?}")]);
791 0 : table.add_row(["Config", &serde_json::to_string_pretty(&config).unwrap()]);
792 0 : println!("{table}");
793 0 : println!("Shards:");
794 0 : let mut table = comfy_table::Table::new();
795 0 : table.set_header([
796 0 : "Shard",
797 0 : "Attached",
798 0 : "Attached AZ",
799 0 : "Secondary",
800 0 : "Last error",
801 0 : "status",
802 0 : ]);
803 0 : for shard in shards {
804 0 : let secondary = shard
805 0 : .node_secondary
806 0 : .iter()
807 0 : .map(|n| format!("{n}"))
808 0 : .collect::<Vec<_>>()
809 0 : .join(",");
810 0 :
811 0 : let mut status_parts = Vec::new();
812 0 : if shard.is_reconciling {
813 0 : status_parts.push("reconciling");
814 0 : }
815 0 :
816 0 : if shard.is_pending_compute_notification {
817 0 : status_parts.push("pending_compute");
818 0 : }
819 0 :
820 0 : if shard.is_splitting {
821 0 : status_parts.push("splitting");
822 0 : }
823 0 : let status = status_parts.join(",");
824 0 :
825 0 : let attached_node = shard
826 0 : .node_attached
827 0 : .as_ref()
828 0 : .map(|id| nodes.get(id).expect("Shard references nonexistent node"));
829 0 :
830 0 : table.add_row([
831 0 : format!("{}", shard.tenant_shard_id),
832 0 : attached_node
833 0 : .map(|n| format!("{} ({})", n.listen_http_addr, n.id))
834 0 : .unwrap_or(String::new()),
835 0 : attached_node
836 0 : .map(|n| n.availability_zone_id.clone())
837 0 : .unwrap_or(String::new()),
838 0 : secondary,
839 0 : shard.last_error,
840 0 : status,
841 0 : ]);
842 0 : }
843 0 : println!("{table}");
844 0 : }
845 0 : Command::TenantSetPreferredAz {
846 0 : tenant_id,
847 0 : preferred_az,
848 0 : } => {
849 0 : // First learn about the tenant's shards
850 0 : let describe_response = storcon_client
851 0 : .dispatch::<(), TenantDescribeResponse>(
852 0 : Method::GET,
853 0 : format!("control/v1/tenant/{tenant_id}"),
854 0 : None,
855 0 : )
856 0 : .await?;
857 0 :
858 0 : // Learn about nodes to validate the AZ ID
859 0 : let nodes = storcon_client
860 0 : .dispatch::<(), Vec<NodeDescribeResponse>>(
861 0 : Method::GET,
862 0 : "control/v1/node".to_string(),
863 0 : None,
864 0 : )
865 0 : .await?;
866 0 :
867 0 : if let Some(preferred_az) = &preferred_az {
868 0 : let azs = nodes
869 0 : .into_iter()
870 0 : .map(|n| (n.availability_zone_id))
871 0 : .collect::<HashSet<_>>();
872 0 : if !azs.contains(preferred_az) {
873 0 : anyhow::bail!(
874 0 : "AZ {} not found on any node: known AZs are: {:?}",
875 0 : preferred_az,
876 0 : azs
877 0 : );
878 0 : }
879 0 : } else {
880 0 : // Make it obvious to the user that since they've omitted an AZ, we're clearing it
881 0 : eprintln!("Clearing preferred AZ for tenant {tenant_id}");
882 0 : }
883 0 :
884 0 : // Construct a request that modifies all the tenant's shards
885 0 : let req = ShardsPreferredAzsRequest {
886 0 : preferred_az_ids: describe_response
887 0 : .shards
888 0 : .into_iter()
889 0 : .map(|s| {
890 0 : (
891 0 : s.tenant_shard_id,
892 0 : preferred_az.clone().map(AvailabilityZone),
893 0 : )
894 0 : })
895 0 : .collect(),
896 0 : };
897 0 : storcon_client
898 0 : .dispatch::<ShardsPreferredAzsRequest, ShardsPreferredAzsResponse>(
899 0 : Method::PUT,
900 0 : "control/v1/preferred_azs".to_string(),
901 0 : Some(req),
902 0 : )
903 0 : .await?;
904 0 : }
905 0 : Command::TenantDrop { tenant_id, unclean } => {
906 0 : if !unclean {
907 0 : anyhow::bail!(
908 0 : "This command is not a tenant deletion, and uncleanly drops all controller state for the tenant. If you know what you're doing, add `--unclean` to proceed."
909 0 : )
910 0 : }
911 0 : storcon_client
912 0 : .dispatch::<(), ()>(
913 0 : Method::POST,
914 0 : format!("debug/v1/tenant/{tenant_id}/drop"),
915 0 : None,
916 0 : )
917 0 : .await?;
918 0 : }
919 0 : Command::NodeDrop { node_id, unclean } => {
920 0 : if !unclean {
921 0 : anyhow::bail!(
922 0 : "This command is not a clean node decommission, and uncleanly drops all controller state for the node, without checking if any tenants still refer to it. If you know what you're doing, add `--unclean` to proceed."
923 0 : )
924 0 : }
925 0 : storcon_client
926 0 : .dispatch::<(), ()>(Method::POST, format!("debug/v1/node/{node_id}/drop"), None)
927 0 : .await?;
928 0 : }
929 0 : Command::NodeDelete { node_id } => {
930 0 : eprintln!("Warning: This command is obsolete and will be removed in a future version");
931 0 : eprintln!("Use `NodeStartDelete` instead, if possible");
932 0 : storcon_client
933 0 : .dispatch::<(), ()>(Method::DELETE, format!("control/v1/node/{node_id}"), None)
934 0 : .await?;
935 0 : }
936 0 : Command::NodeStartDelete { node_id } => {
937 0 : storcon_client
938 0 : .dispatch::<(), ()>(
939 0 : Method::PUT,
940 0 : format!("control/v1/node/{node_id}/delete"),
941 0 : None,
942 0 : )
943 0 : .await?;
944 0 : println!("Delete started for {node_id}");
945 0 : }
946 0 : Command::NodeCancelDelete { node_id, timeout } => {
947 0 : storcon_client
948 0 : .dispatch::<(), ()>(
949 0 : Method::DELETE,
950 0 : format!("control/v1/node/{node_id}/delete"),
951 0 : None,
952 0 : )
953 0 : .await?;
954 0 :
955 0 : println!("Waiting for node {node_id} to quiesce on scheduling policy ...");
956 0 :
957 0 : let final_policy =
958 0 : wait_for_scheduling_policy(storcon_client, node_id, *timeout, |sched| {
959 0 : !matches!(sched, NodeSchedulingPolicy::Deleting)
960 0 : })
961 0 : .await?;
962 0 :
963 0 : println!(
964 0 : "Delete was cancelled for node {node_id}. Schedulling policy is now {final_policy:?}"
965 0 : );
966 0 : }
967 0 : Command::NodeDeleteTombstone { node_id } => {
968 0 : storcon_client
969 0 : .dispatch::<(), ()>(
970 0 : Method::DELETE,
971 0 : format!("debug/v1/tombstone/{node_id}"),
972 0 : None,
973 0 : )
974 0 : .await?;
975 0 : }
976 0 : Command::NodeTombstones {} => {
977 0 : let mut resp = storcon_client
978 0 : .dispatch::<(), Vec<NodeDescribeResponse>>(
979 0 : Method::GET,
980 0 : "debug/v1/tombstone".to_string(),
981 0 : None,
982 0 : )
983 0 : .await?;
984 0 :
985 0 : resp.sort_by(|a, b| a.listen_http_addr.cmp(&b.listen_http_addr));
986 0 :
987 0 : let mut table = comfy_table::Table::new();
988 0 : table.set_header(["Id", "Hostname", "AZ", "Scheduling", "Availability"]);
989 0 : for node in resp {
990 0 : table.add_row([
991 0 : format!("{}", node.id),
992 0 : node.listen_http_addr,
993 0 : node.availability_zone_id,
994 0 : format!("{:?}", node.scheduling),
995 0 : format!("{:?}", node.availability),
996 0 : ]);
997 0 : }
998 0 : println!("{table}");
999 0 : }
1000 0 : Command::TenantSetTimeBasedEviction {
1001 0 : tenant_id,
1002 0 : period,
1003 0 : threshold,
1004 0 : } => {
1005 0 : vps_client
1006 0 : .set_tenant_config(&TenantConfigRequest {
1007 0 : tenant_id,
1008 0 : config: TenantConfig {
1009 0 : eviction_policy: Some(EvictionPolicy::LayerAccessThreshold(
1010 0 : EvictionPolicyLayerAccessThreshold {
1011 0 : period: period.into(),
1012 0 : threshold: threshold.into(),
1013 0 : },
1014 0 : )),
1015 0 : heatmap_period: Some(Duration::from_secs(300)),
1016 0 : ..Default::default()
1017 0 : },
1018 0 : })
1019 0 : .await?;
1020 0 : }
1021 0 : Command::BulkMigrate {
1022 0 : nodes,
1023 0 : concurrency,
1024 0 : max_shards,
1025 0 : dry_run,
1026 0 : } => {
1027 0 : // Load the list of nodes, split them up into the drained and filled sets,
1028 0 : // and validate that draining is possible.
1029 0 : let node_descs = storcon_client
1030 0 : .dispatch::<(), Vec<NodeDescribeResponse>>(
1031 0 : Method::GET,
1032 0 : "control/v1/node".to_string(),
1033 0 : None,
1034 0 : )
1035 0 : .await?;
1036 0 :
1037 0 : let mut node_to_drain_descs = Vec::new();
1038 0 : let mut node_to_fill_descs = Vec::new();
1039 0 :
1040 0 : for desc in node_descs {
1041 0 : let to_drain = nodes.contains(&desc.id);
1042 0 : if to_drain {
1043 0 : node_to_drain_descs.push(desc);
1044 0 : } else {
1045 0 : node_to_fill_descs.push(desc);
1046 0 : }
1047 0 : }
1048 0 :
1049 0 : if nodes.len() != node_to_drain_descs.len() {
1050 0 : anyhow::bail!("Bulk migration requested away from node which doesn't exist.")
1051 0 : }
1052 0 :
1053 0 : node_to_fill_descs.retain(|desc| {
1054 0 : matches!(desc.availability, NodeAvailabilityWrapper::Active)
1055 0 : && matches!(
1056 0 : desc.scheduling,
1057 0 : NodeSchedulingPolicy::Active | NodeSchedulingPolicy::Filling
1058 0 : )
1059 0 : });
1060 0 :
1061 0 : if node_to_fill_descs.is_empty() {
1062 0 : anyhow::bail!("There are no nodes to migrate to")
1063 0 : }
1064 0 :
1065 0 : // Set the node scheduling policy to draining for the nodes which
1066 0 : // we plan to drain.
1067 0 : for node_desc in node_to_drain_descs.iter() {
1068 0 : let req = NodeConfigureRequest {
1069 0 : node_id: node_desc.id,
1070 0 : availability: None,
1071 0 : scheduling: Some(NodeSchedulingPolicy::Draining),
1072 0 : };
1073 0 :
1074 0 : storcon_client
1075 0 : .dispatch::<_, ()>(
1076 0 : Method::PUT,
1077 0 : format!("control/v1/node/{}/config", node_desc.id),
1078 0 : Some(req),
1079 0 : )
1080 0 : .await?;
1081 0 : }
1082 0 :
1083 0 : // Perform the migration: move each tenant shard scheduled on a node to
1084 0 : // be drained to a node which is being filled. A simple round robin
1085 0 : // strategy is used to pick the new node.
1086 0 : let tenants = storcon_client
1087 0 : .dispatch::<(), Vec<TenantDescribeResponse>>(
1088 0 : Method::GET,
1089 0 : "control/v1/tenant".to_string(),
1090 0 : None,
1091 0 : )
1092 0 : .await?;
1093 0 :
1094 0 : let mut selected_node_idx = 0;
1095 0 :
1096 0 : struct MigrationMove {
1097 0 : tenant_shard_id: TenantShardId,
1098 0 : from: NodeId,
1099 0 : to: NodeId,
1100 0 : }
1101 0 :
1102 0 : let mut moves: Vec<MigrationMove> = Vec::new();
1103 0 :
1104 0 : let shards = tenants
1105 0 : .into_iter()
1106 0 : .flat_map(|tenant| tenant.shards.into_iter());
1107 0 : for shard in shards {
1108 0 : if let Some(max_shards) = max_shards {
1109 0 : if moves.len() >= max_shards {
1110 0 : println!(
1111 0 : "Stop planning shard moves since the requested maximum was reached"
1112 0 : );
1113 0 : break;
1114 0 : }
1115 0 : }
1116 0 :
1117 0 : let should_migrate = {
1118 0 : if let Some(attached_to) = shard.node_attached {
1119 0 : node_to_drain_descs
1120 0 : .iter()
1121 0 : .map(|desc| desc.id)
1122 0 : .any(|id| id == attached_to)
1123 0 : } else {
1124 0 : false
1125 0 : }
1126 0 : };
1127 0 :
1128 0 : if !should_migrate {
1129 0 : continue;
1130 0 : }
1131 0 :
1132 0 : moves.push(MigrationMove {
1133 0 : tenant_shard_id: shard.tenant_shard_id,
1134 0 : from: shard
1135 0 : .node_attached
1136 0 : .expect("We only migrate attached tenant shards"),
1137 0 : to: node_to_fill_descs[selected_node_idx].id,
1138 0 : });
1139 0 : selected_node_idx = (selected_node_idx + 1) % node_to_fill_descs.len();
1140 0 : }
1141 0 :
1142 0 : let total_moves = moves.len();
1143 0 :
1144 0 : if dry_run == Some(true) {
1145 0 : println!("Dryrun requested. Planned {total_moves} moves:");
1146 0 : for mv in &moves {
1147 0 : println!("{}: {} -> {}", mv.tenant_shard_id, mv.from, mv.to)
1148 0 : }
1149 0 :
1150 0 : return Ok(());
1151 0 : }
1152 0 :
1153 0 : const DEFAULT_MIGRATE_CONCURRENCY: usize = 8;
1154 0 : let mut stream = futures::stream::iter(moves)
1155 0 : .map(|mv| {
1156 0 : let client = Client::new(http_client.clone(), cli.api.clone(), cli.jwt.clone());
1157 0 : async move {
1158 0 : client
1159 0 : .dispatch::<TenantShardMigrateRequest, TenantShardMigrateResponse>(
1160 0 : Method::PUT,
1161 0 : format!("control/v1/tenant/{}/migrate", mv.tenant_shard_id),
1162 0 : Some(TenantShardMigrateRequest {
1163 0 : node_id: mv.to,
1164 0 : origin_node_id: Some(mv.from),
1165 0 : migration_config: MigrationConfig::default(),
1166 0 : }),
1167 0 : )
1168 0 : .await
1169 0 : .map_err(|e| (mv.tenant_shard_id, mv.from, mv.to, e))
1170 0 : }
1171 0 : })
1172 0 : .buffered(concurrency.unwrap_or(DEFAULT_MIGRATE_CONCURRENCY));
1173 0 :
1174 0 : let mut success = 0;
1175 0 : let mut failure = 0;
1176 0 :
1177 0 : while let Some(res) = stream.next().await {
1178 0 : match res {
1179 0 : Ok(_) => {
1180 0 : success += 1;
1181 0 : }
1182 0 : Err((tenant_shard_id, from, to, error)) => {
1183 0 : failure += 1;
1184 0 : println!(
1185 0 : "Failed to migrate {tenant_shard_id} from node {from} to node {to}: {error}"
1186 0 : );
1187 0 : }
1188 0 : }
1189 0 :
1190 0 : if (success + failure) % 20 == 0 {
1191 0 : println!(
1192 0 : "Processed {}/{} shards: {} succeeded, {} failed",
1193 0 : success + failure,
1194 0 : total_moves,
1195 0 : success,
1196 0 : failure
1197 0 : );
1198 0 : }
1199 0 : }
1200 0 :
1201 0 : println!(
1202 0 : "Processed {}/{} shards: {} succeeded, {} failed",
1203 0 : success + failure,
1204 0 : total_moves,
1205 0 : success,
1206 0 : failure
1207 0 : );
1208 0 : }
1209 0 : Command::StartDrain { node_id } => {
1210 0 : storcon_client
1211 0 : .dispatch::<(), ()>(
1212 0 : Method::PUT,
1213 0 : format!("control/v1/node/{node_id}/drain"),
1214 0 : None,
1215 0 : )
1216 0 : .await?;
1217 0 : println!("Drain started for {node_id}");
1218 0 : }
1219 0 : Command::CancelDrain { node_id, timeout } => {
1220 0 : storcon_client
1221 0 : .dispatch::<(), ()>(
1222 0 : Method::DELETE,
1223 0 : format!("control/v1/node/{node_id}/drain"),
1224 0 : None,
1225 0 : )
1226 0 : .await?;
1227 0 :
1228 0 : println!("Waiting for node {node_id} to quiesce on scheduling policy ...");
1229 0 :
1230 0 : let final_policy =
1231 0 : wait_for_scheduling_policy(storcon_client, node_id, *timeout, |sched| {
1232 0 : use NodeSchedulingPolicy::*;
1233 0 : matches!(sched, Active | PauseForRestart)
1234 0 : })
1235 0 : .await?;
1236 0 :
1237 0 : println!(
1238 0 : "Drain was cancelled for node {node_id}. Schedulling policy is now {final_policy:?}"
1239 0 : );
1240 0 : }
1241 0 : Command::StartFill { node_id } => {
1242 0 : storcon_client
1243 0 : .dispatch::<(), ()>(Method::PUT, format!("control/v1/node/{node_id}/fill"), None)
1244 0 : .await?;
1245 0 :
1246 0 : println!("Fill started for {node_id}");
1247 0 : }
1248 0 : Command::CancelFill { node_id, timeout } => {
1249 0 : storcon_client
1250 0 : .dispatch::<(), ()>(
1251 0 : Method::DELETE,
1252 0 : format!("control/v1/node/{node_id}/fill"),
1253 0 : None,
1254 0 : )
1255 0 : .await?;
1256 0 :
1257 0 : println!("Waiting for node {node_id} to quiesce on scheduling policy ...");
1258 0 :
1259 0 : let final_policy =
1260 0 : wait_for_scheduling_policy(storcon_client, node_id, *timeout, |sched| {
1261 0 : use NodeSchedulingPolicy::*;
1262 0 : matches!(sched, Active)
1263 0 : })
1264 0 : .await?;
1265 0 :
1266 0 : println!(
1267 0 : "Fill was cancelled for node {node_id}. Schedulling policy is now {final_policy:?}"
1268 0 : );
1269 0 : }
1270 0 : Command::Safekeepers {} => {
1271 0 : let mut resp = storcon_client
1272 0 : .dispatch::<(), Vec<SafekeeperDescribeResponse>>(
1273 0 : Method::GET,
1274 0 : "control/v1/safekeeper".to_string(),
1275 0 : None,
1276 0 : )
1277 0 : .await?;
1278 0 :
1279 0 : resp.sort_by(|a, b| a.id.cmp(&b.id));
1280 0 :
1281 0 : let mut table = comfy_table::Table::new();
1282 0 : table.set_header([
1283 0 : "Id",
1284 0 : "Version",
1285 0 : "Host",
1286 0 : "Port",
1287 0 : "Http Port",
1288 0 : "AZ Id",
1289 0 : "Scheduling",
1290 0 : ]);
1291 0 : for sk in resp {
1292 0 : table.add_row([
1293 0 : format!("{}", sk.id),
1294 0 : format!("{}", sk.version),
1295 0 : sk.host,
1296 0 : format!("{}", sk.port),
1297 0 : format!("{}", sk.http_port),
1298 0 : sk.availability_zone_id.clone(),
1299 0 : String::from(sk.scheduling_policy),
1300 0 : ]);
1301 0 : }
1302 0 : println!("{table}");
1303 0 : }
1304 0 : Command::SafekeeperScheduling {
1305 0 : node_id,
1306 0 : scheduling_policy,
1307 0 : } => {
1308 0 : let scheduling_policy = scheduling_policy.0;
1309 0 : storcon_client
1310 0 : .dispatch::<SafekeeperSchedulingPolicyRequest, ()>(
1311 0 : Method::POST,
1312 0 : format!("control/v1/safekeeper/{node_id}/scheduling_policy"),
1313 0 : Some(SafekeeperSchedulingPolicyRequest { scheduling_policy }),
1314 0 : )
1315 0 : .await?;
1316 0 : println!(
1317 0 : "Scheduling policy of {node_id} set to {}",
1318 0 : String::from(scheduling_policy)
1319 0 : );
1320 0 : }
1321 0 : Command::DownloadHeatmapLayers {
1322 0 : tenant_shard_id,
1323 0 : timeline_id,
1324 0 : concurrency,
1325 0 : } => {
1326 0 : let mut path = format!(
1327 0 : "/v1/tenant/{tenant_shard_id}/timeline/{timeline_id}/download_heatmap_layers",
1328 0 : );
1329 0 :
1330 0 : if let Some(c) = concurrency {
1331 0 : path = format!("{path}?concurrency={c}");
1332 0 : }
1333 0 :
1334 0 : storcon_client
1335 0 : .dispatch::<(), ()>(Method::POST, path, None)
1336 0 : .await?;
1337 0 : }
1338 0 : }
1339 0 :
1340 0 : Ok(())
1341 0 : }
1342 :
1343 : static WATCH_INTERVAL: Duration = Duration::from_secs(5);
1344 :
1345 0 : async fn watch_tenant_shard(
1346 0 : storcon_client: Client,
1347 0 : tenant_shard_id: TenantShardId,
1348 0 : until_migrated_to: Option<NodeId>,
1349 0 : ) -> anyhow::Result<()> {
1350 0 : if let Some(until_migrated_to) = until_migrated_to {
1351 0 : println!(
1352 0 : "Waiting for tenant shard {tenant_shard_id} to be migrated to node {until_migrated_to}"
1353 0 : );
1354 0 : }
1355 :
1356 : loop {
1357 0 : let desc = storcon_client
1358 0 : .dispatch::<(), TenantDescribeResponse>(
1359 0 : Method::GET,
1360 0 : format!("control/v1/tenant/{}", tenant_shard_id.tenant_id),
1361 0 : None,
1362 0 : )
1363 0 : .await?;
1364 :
1365 : // Output the current state of the tenant shard
1366 0 : let shard = desc
1367 0 : .shards
1368 0 : .iter()
1369 0 : .find(|s| s.tenant_shard_id == tenant_shard_id)
1370 0 : .ok_or(anyhow::anyhow!("Tenant shard not found"))?;
1371 0 : let summary = format!(
1372 0 : "attached: {} secondary: {} {}",
1373 0 : shard
1374 0 : .node_attached
1375 0 : .map(|n| format!("{n}"))
1376 0 : .unwrap_or("none".to_string()),
1377 0 : shard
1378 0 : .node_secondary
1379 0 : .iter()
1380 0 : .map(|n| n.to_string())
1381 0 : .collect::<Vec<_>>()
1382 0 : .join(","),
1383 0 : if shard.is_reconciling {
1384 0 : "(reconciler active)"
1385 : } else {
1386 0 : "(reconciler idle)"
1387 : }
1388 : );
1389 0 : println!("{summary}");
1390 :
1391 : // Maybe drop out if we finished migration
1392 0 : if let Some(until_migrated_to) = until_migrated_to {
1393 0 : if shard.node_attached == Some(until_migrated_to) && !shard.is_reconciling {
1394 0 : println!("Tenant shard {tenant_shard_id} is now on node {until_migrated_to}");
1395 0 : break;
1396 0 : }
1397 0 : }
1398 :
1399 0 : tokio::time::sleep(WATCH_INTERVAL).await;
1400 : }
1401 0 : Ok(())
1402 0 : }
|