LCOV - code coverage report
Current view: top level - control_plane/storcon_cli/src - main.rs (source / functions) Coverage Total Hit
Test: 1e20c4f2b28aa592527961bb32170ebbd2c9172f.info Lines: 0.0 % 1064 0
Test Date: 2025-07-16 12:29:03 Functions: 0.0 % 43 0

            Line data    Source code
       1              : use std::collections::{HashMap, HashSet};
       2              : use std::path::PathBuf;
       3              : use std::str::FromStr;
       4              : use std::time::Duration;
       5              : 
       6              : use clap::{Parser, Subcommand};
       7              : use futures::StreamExt;
       8              : use pageserver_api::controller_api::{
       9              :     AvailabilityZone, MigrationConfig, NodeAvailabilityWrapper, NodeConfigureRequest,
      10              :     NodeDescribeResponse, NodeRegisterRequest, NodeSchedulingPolicy, NodeShardResponse,
      11              :     PlacementPolicy, SafekeeperDescribeResponse, SafekeeperSchedulingPolicyRequest,
      12              :     ShardSchedulingPolicy, ShardsPreferredAzsRequest, ShardsPreferredAzsResponse,
      13              :     SkSchedulingPolicy, TenantCreateRequest, TenantDescribeResponse, TenantPolicyRequest,
      14              :     TenantShardMigrateRequest, TenantShardMigrateResponse, TimelineSafekeeperMigrateRequest,
      15              : };
      16              : use pageserver_api::models::{
      17              :     EvictionPolicy, EvictionPolicyLayerAccessThreshold, ShardParameters, TenantConfig,
      18              :     TenantConfigPatchRequest, TenantConfigRequest, TenantShardSplitRequest,
      19              :     TenantShardSplitResponse,
      20              : };
      21              : use pageserver_api::shard::{ShardStripeSize, TenantShardId};
      22              : use pageserver_client::mgmt_api::{self};
      23              : use reqwest::{Certificate, Method, StatusCode, Url};
      24              : use safekeeper_api::models::TimelineLocateResponse;
      25              : use storage_controller_client::control_api::Client;
      26              : use utils::id::{NodeId, TenantId, TimelineId};
      27              : 
      28              : #[derive(Subcommand, Debug)]
      29              : enum Command {
      30              :     /// Register a pageserver with the storage controller.  This shouldn't usually be necessary,
      31              :     /// since pageservers auto-register when they start up
      32              :     NodeRegister {
      33              :         #[arg(long)]
      34              :         node_id: NodeId,
      35              : 
      36              :         #[arg(long)]
      37              :         listen_pg_addr: String,
      38              :         #[arg(long)]
      39              :         listen_pg_port: u16,
      40              :         #[arg(long)]
      41              :         listen_grpc_addr: Option<String>,
      42              :         #[arg(long)]
      43              :         listen_grpc_port: Option<u16>,
      44              : 
      45              :         #[arg(long)]
      46              :         listen_http_addr: String,
      47              :         #[arg(long)]
      48              :         listen_http_port: u16,
      49              :         #[arg(long)]
      50              :         listen_https_port: Option<u16>,
      51              : 
      52              :         #[arg(long)]
      53              :         availability_zone_id: String,
      54              :     },
      55              : 
      56              :     /// Modify a node's configuration in the storage controller
      57              :     NodeConfigure {
      58              :         #[arg(long)]
      59              :         node_id: NodeId,
      60              : 
      61              :         /// Availability is usually auto-detected based on heartbeats.  Set 'offline' here to
      62              :         /// manually mark a node offline
      63              :         #[arg(long)]
      64              :         availability: Option<NodeAvailabilityArg>,
      65              :         /// Scheduling policy controls whether tenant shards may be scheduled onto this node.
      66              :         #[arg(long)]
      67              :         scheduling: Option<NodeSchedulingPolicy>,
      68              :     },
      69              :     /// Exists for backup usage and will be removed in future.
      70              :     /// Use [`Command::NodeStartDelete`] instead, if possible.
      71              :     NodeDelete {
      72              :         #[arg(long)]
      73              :         node_id: NodeId,
      74              :     },
      75              :     /// Start deletion of the specified pageserver.
      76              :     NodeStartDelete {
      77              :         #[arg(long)]
      78              :         node_id: NodeId,
      79              :     },
      80              :     /// Cancel deletion of the specified pageserver and wait for `timeout`
      81              :     /// for the operation to be canceled. May be retried.
      82              :     NodeCancelDelete {
      83              :         #[arg(long)]
      84              :         node_id: NodeId,
      85              :         #[arg(long)]
      86              :         timeout: humantime::Duration,
      87              :     },
      88              :     /// Delete a tombstone of node from the storage controller.
      89              :     /// This is used when we want to allow the node to be re-registered.
      90              :     NodeDeleteTombstone {
      91              :         #[arg(long)]
      92              :         node_id: NodeId,
      93              :     },
      94              :     /// Modify a tenant's policies in the storage controller
      95              :     TenantPolicy {
      96              :         #[arg(long)]
      97              :         tenant_id: TenantId,
      98              :         /// Placement policy controls whether a tenant is `detached`, has only a secondary location (`secondary`),
      99              :         /// or is in the normal attached state with N secondary locations (`attached:N`)
     100              :         #[arg(long)]
     101              :         placement: Option<PlacementPolicyArg>,
     102              :         /// Scheduling policy enables pausing the controller's scheduling activity involving this tenant.  `active` is normal,
     103              :         /// `essential` disables optimization scheduling changes, `pause` disables all scheduling changes, and `stop` prevents
     104              :         /// all reconciliation activity including for scheduling changes already made.  `pause` and `stop` can make a tenant
     105              :         /// unavailable, and are only for use in emergencies.
     106              :         #[arg(long)]
     107              :         scheduling: Option<ShardSchedulingPolicyArg>,
     108              :     },
     109              :     /// List nodes known to the storage controller
     110              :     Nodes {},
     111              :     /// List soft deleted nodes known to the storage controller
     112              :     NodeTombstones {},
     113              :     /// List tenants known to the storage controller
     114              :     Tenants {
     115              :         /// If this field is set, it will list the tenants on a specific node
     116              :         node_id: Option<NodeId>,
     117              :     },
     118              :     /// Create a new tenant in the storage controller, and by extension on pageservers.
     119              :     TenantCreate {
     120              :         #[arg(long)]
     121              :         tenant_id: TenantId,
     122              :     },
     123              :     /// Delete a tenant in the storage controller, and by extension on pageservers.
     124              :     TenantDelete {
     125              :         #[arg(long)]
     126              :         tenant_id: TenantId,
     127              :     },
     128              :     /// Split an existing tenant into a higher number of shards than its current shard count.
     129              :     TenantShardSplit {
     130              :         #[arg(long)]
     131              :         tenant_id: TenantId,
     132              :         #[arg(long)]
     133              :         shard_count: u8,
     134              :         /// Optional, in 8kiB pages.  e.g. set 2048 for 16MB stripes.
     135              :         #[arg(long)]
     136              :         stripe_size: Option<u32>,
     137              :     },
     138              :     /// Migrate the attached location for a tenant shard to a specific pageserver.
     139              :     TenantShardMigrate {
     140              :         #[arg(long)]
     141              :         tenant_shard_id: TenantShardId,
     142              :         #[arg(long)]
     143              :         node: NodeId,
     144              :         #[arg(long, default_value_t = true, action = clap::ArgAction::Set)]
     145              :         prewarm: bool,
     146              :         #[arg(long, default_value_t = false, action = clap::ArgAction::Set)]
     147              :         override_scheduler: bool,
     148              :     },
     149              :     /// Watch the location of a tenant shard evolve, e.g. while expecting it to migrate
     150              :     TenantShardWatch {
     151              :         #[arg(long)]
     152              :         tenant_shard_id: TenantShardId,
     153              :     },
     154              :     /// Migrate the secondary location for a tenant shard to a specific pageserver.
     155              :     TenantShardMigrateSecondary {
     156              :         #[arg(long)]
     157              :         tenant_shard_id: TenantShardId,
     158              :         #[arg(long)]
     159              :         node: NodeId,
     160              :     },
     161              :     /// Cancel any ongoing reconciliation for this shard
     162              :     TenantShardCancelReconcile {
     163              :         #[arg(long)]
     164              :         tenant_shard_id: TenantShardId,
     165              :     },
     166              :     /// Set the pageserver tenant configuration of a tenant: this is the configuration structure
     167              :     /// that is passed through to pageservers, and does not affect storage controller behavior.
     168              :     /// Any previous tenant configs are overwritten.
     169              :     SetTenantConfig {
     170              :         #[arg(long)]
     171              :         tenant_id: TenantId,
     172              :         #[arg(long)]
     173              :         config: String,
     174              :     },
     175              :     /// Patch the pageserver tenant configuration of a tenant. Any fields with null values in the
     176              :     /// provided JSON are unset from the tenant config and all fields with non-null values are set.
     177              :     /// Unspecified fields are not changed.
     178              :     PatchTenantConfig {
     179              :         #[arg(long)]
     180              :         tenant_id: TenantId,
     181              :         #[arg(long)]
     182              :         config: String,
     183              :     },
     184              :     /// Print details about a particular tenant, including all its shards' states.
     185              :     TenantDescribe {
     186              :         #[arg(long)]
     187              :         tenant_id: TenantId,
     188              :     },
     189              :     TenantSetPreferredAz {
     190              :         #[arg(long)]
     191              :         tenant_id: TenantId,
     192              :         #[arg(long)]
     193              :         preferred_az: Option<String>,
     194              :     },
     195              :     /// Uncleanly drop a tenant from the storage controller: this doesn't delete anything from pageservers. Appropriate
     196              :     /// if you e.g. used `tenant-warmup` by mistake on a tenant ID that doesn't really exist, or is in some other region.
     197              :     TenantDrop {
     198              :         #[arg(long)]
     199              :         tenant_id: TenantId,
     200              :         #[arg(long)]
     201              :         unclean: bool,
     202              :     },
     203              :     NodeDrop {
     204              :         #[arg(long)]
     205              :         node_id: NodeId,
     206              :         #[arg(long)]
     207              :         unclean: bool,
     208              :     },
     209              :     TenantSetTimeBasedEviction {
     210              :         #[arg(long)]
     211              :         tenant_id: TenantId,
     212              :         #[arg(long)]
     213              :         period: humantime::Duration,
     214              :         #[arg(long)]
     215              :         threshold: humantime::Duration,
     216              :     },
     217              :     // Migrate away from a set of specified pageservers by moving the primary attachments to pageservers
     218              :     // outside of the specified set.
     219              :     BulkMigrate {
     220              :         // Set of pageserver node ids to drain.
     221              :         #[arg(long)]
     222              :         nodes: Vec<NodeId>,
     223              :         // Optional: migration concurrency (default is 8)
     224              :         #[arg(long)]
     225              :         concurrency: Option<usize>,
     226              :         // Optional: maximum number of shards to migrate
     227              :         #[arg(long)]
     228              :         max_shards: Option<usize>,
     229              :         // Optional: when set to true, nothing is migrated, but the plan is printed to stdout
     230              :         #[arg(long)]
     231              :         dry_run: Option<bool>,
     232              :     },
     233              :     /// Start draining the specified pageserver.
     234              :     /// The drain is complete when the schedulling policy returns to active.
     235              :     StartDrain {
     236              :         #[arg(long)]
     237              :         node_id: NodeId,
     238              :     },
     239              :     /// Cancel draining the specified pageserver and wait for `timeout`
     240              :     /// for the operation to be canceled. May be retried.
     241              :     CancelDrain {
     242              :         #[arg(long)]
     243              :         node_id: NodeId,
     244              :         #[arg(long)]
     245              :         timeout: humantime::Duration,
     246              :     },
     247              :     /// Start filling the specified pageserver.
     248              :     /// The drain is complete when the schedulling policy returns to active.
     249              :     StartFill {
     250              :         #[arg(long)]
     251              :         node_id: NodeId,
     252              :     },
     253              :     /// Cancel filling the specified pageserver and wait for `timeout`
     254              :     /// for the operation to be canceled. May be retried.
     255              :     CancelFill {
     256              :         #[arg(long)]
     257              :         node_id: NodeId,
     258              :         #[arg(long)]
     259              :         timeout: humantime::Duration,
     260              :     },
     261              :     /// List safekeepers known to the storage controller
     262              :     Safekeepers {},
     263              :     /// Set the scheduling policy of the specified safekeeper
     264              :     SafekeeperScheduling {
     265              :         #[arg(long)]
     266              :         node_id: NodeId,
     267              :         #[arg(long)]
     268              :         scheduling_policy: SkSchedulingPolicyArg,
     269              :     },
     270              :     /// Downloads any missing heatmap layers for all shard for a given timeline
     271              :     DownloadHeatmapLayers {
     272              :         /// Tenant ID or tenant shard ID. When an unsharded tenant ID is specified,
     273              :         /// the operation is performed on all shards. When a sharded tenant ID is
     274              :         /// specified, the operation is only performed on the specified shard.
     275              :         #[arg(long)]
     276              :         tenant_shard_id: TenantShardId,
     277              :         #[arg(long)]
     278              :         timeline_id: TimelineId,
     279              :         /// Optional: Maximum download concurrency (default is 16)
     280              :         #[arg(long)]
     281              :         concurrency: Option<usize>,
     282              :     },
     283              :     /// Locate safekeepers for a timeline from the storcon DB.
     284              :     TimelineLocate {
     285              :         #[arg(long)]
     286              :         tenant_id: TenantId,
     287              :         #[arg(long)]
     288              :         timeline_id: TimelineId,
     289              :     },
     290              :     /// Migrate a timeline to a new set of safekeepers
     291              :     TimelineSafekeeperMigrate {
     292              :         #[arg(long)]
     293              :         tenant_id: TenantId,
     294              :         #[arg(long)]
     295              :         timeline_id: TimelineId,
     296              :         /// Example: --new-sk-set 1,2,3
     297              :         #[arg(long, required = true, value_delimiter = ',')]
     298              :         new_sk_set: Vec<NodeId>,
     299              :     },
     300              : }
     301              : 
     302              : #[derive(Parser)]
     303              : #[command(
     304              :     author,
     305              :     version,
     306              :     about,
     307              :     long_about = "CLI for Storage Controller Support/Debug"
     308              : )]
     309              : #[command(arg_required_else_help(true))]
     310              : struct Cli {
     311              :     #[arg(long)]
     312              :     /// URL to storage controller.  e.g. http://127.0.0.1:1234 when using `neon_local`
     313              :     api: Url,
     314              : 
     315              :     #[arg(long)]
     316              :     /// JWT token for authenticating with storage controller.  Depending on the API used, this
     317              :     /// should have either `pageserverapi` or `admin` scopes: for convenience, you should mint
     318              :     /// a token with both scopes to use with this tool.
     319              :     jwt: Option<String>,
     320              : 
     321              :     #[arg(long)]
     322              :     /// Trusted root CA certificates to use in https APIs.
     323              :     ssl_ca_file: Option<PathBuf>,
     324              : 
     325              :     #[command(subcommand)]
     326              :     command: Command,
     327              : }
     328              : 
     329              : #[derive(Debug, Clone)]
     330              : struct PlacementPolicyArg(PlacementPolicy);
     331              : 
     332              : impl FromStr for PlacementPolicyArg {
     333              :     type Err = anyhow::Error;
     334              : 
     335            0 :     fn from_str(s: &str) -> Result<Self, Self::Err> {
     336            0 :         match s {
     337            0 :             "detached" => Ok(Self(PlacementPolicy::Detached)),
     338            0 :             "secondary" => Ok(Self(PlacementPolicy::Secondary)),
     339            0 :             _ if s.starts_with("attached:") => {
     340            0 :                 let mut splitter = s.split(':');
     341            0 :                 let _prefix = splitter.next().unwrap();
     342            0 :                 match splitter.next().and_then(|s| s.parse::<usize>().ok()) {
     343            0 :                     Some(n) => Ok(Self(PlacementPolicy::Attached(n))),
     344            0 :                     None => Err(anyhow::anyhow!(
     345            0 :                         "Invalid format '{s}', a valid example is 'attached:1'"
     346            0 :                     )),
     347              :                 }
     348              :             }
     349            0 :             _ => Err(anyhow::anyhow!(
     350            0 :                 "Unknown placement policy '{s}', try detached,secondary,attached:<n>"
     351            0 :             )),
     352              :         }
     353            0 :     }
     354              : }
     355              : 
     356              : #[derive(Debug, Clone)]
     357              : struct SkSchedulingPolicyArg(SkSchedulingPolicy);
     358              : 
     359              : impl FromStr for SkSchedulingPolicyArg {
     360              :     type Err = anyhow::Error;
     361              : 
     362            0 :     fn from_str(s: &str) -> Result<Self, Self::Err> {
     363            0 :         SkSchedulingPolicy::from_str(s).map(Self)
     364            0 :     }
     365              : }
     366              : 
     367              : #[derive(Debug, Clone)]
     368              : struct ShardSchedulingPolicyArg(ShardSchedulingPolicy);
     369              : 
     370              : impl FromStr for ShardSchedulingPolicyArg {
     371              :     type Err = anyhow::Error;
     372              : 
     373            0 :     fn from_str(s: &str) -> Result<Self, Self::Err> {
     374            0 :         match s {
     375            0 :             "active" => Ok(Self(ShardSchedulingPolicy::Active)),
     376            0 :             "essential" => Ok(Self(ShardSchedulingPolicy::Essential)),
     377            0 :             "pause" => Ok(Self(ShardSchedulingPolicy::Pause)),
     378            0 :             "stop" => Ok(Self(ShardSchedulingPolicy::Stop)),
     379            0 :             _ => Err(anyhow::anyhow!(
     380            0 :                 "Unknown scheduling policy '{s}', try active,essential,pause,stop"
     381            0 :             )),
     382              :         }
     383            0 :     }
     384              : }
     385              : 
     386              : #[derive(Debug, Clone)]
     387              : struct NodeAvailabilityArg(NodeAvailabilityWrapper);
     388              : 
     389              : impl FromStr for NodeAvailabilityArg {
     390              :     type Err = anyhow::Error;
     391              : 
     392            0 :     fn from_str(s: &str) -> Result<Self, Self::Err> {
     393            0 :         match s {
     394            0 :             "active" => Ok(Self(NodeAvailabilityWrapper::Active)),
     395            0 :             "offline" => Ok(Self(NodeAvailabilityWrapper::Offline)),
     396            0 :             _ => Err(anyhow::anyhow!("Unknown availability state '{s}'")),
     397              :         }
     398            0 :     }
     399              : }
     400              : 
     401            0 : async fn wait_for_scheduling_policy<F>(
     402            0 :     client: Client,
     403            0 :     node_id: NodeId,
     404            0 :     timeout: Duration,
     405            0 :     f: F,
     406            0 : ) -> anyhow::Result<NodeSchedulingPolicy>
     407            0 : where
     408            0 :     F: Fn(NodeSchedulingPolicy) -> bool,
     409            0 : {
     410            0 :     let waiter = tokio::time::timeout(timeout, async move {
     411              :         loop {
     412            0 :             let node = client
     413            0 :                 .dispatch::<(), NodeDescribeResponse>(
     414            0 :                     Method::GET,
     415            0 :                     format!("control/v1/node/{node_id}"),
     416            0 :                     None,
     417            0 :                 )
     418            0 :                 .await?;
     419              : 
     420            0 :             if f(node.scheduling) {
     421            0 :                 return Ok::<NodeSchedulingPolicy, mgmt_api::Error>(node.scheduling);
     422            0 :             }
     423              :         }
     424            0 :     });
     425              : 
     426            0 :     Ok(waiter.await??)
     427            0 : }
     428              : 
     429              : #[tokio::main]
     430            0 : async fn main() -> anyhow::Result<()> {
     431            0 :     let cli = Cli::parse();
     432              : 
     433            0 :     let ssl_ca_certs = match &cli.ssl_ca_file {
     434            0 :         Some(ssl_ca_file) => {
     435            0 :             let buf = tokio::fs::read(ssl_ca_file).await?;
     436            0 :             Certificate::from_pem_bundle(&buf)?
     437              :         }
     438            0 :         None => Vec::new(),
     439              :     };
     440              : 
     441            0 :     let mut http_client = reqwest::Client::builder();
     442            0 :     for ssl_ca_cert in ssl_ca_certs {
     443            0 :         http_client = http_client.add_root_certificate(ssl_ca_cert);
     444            0 :     }
     445            0 :     let http_client = http_client.build()?;
     446              : 
     447            0 :     let storcon_client = Client::new(http_client.clone(), cli.api.clone(), cli.jwt.clone());
     448              : 
     449            0 :     let mut trimmed = cli.api.to_string();
     450            0 :     trimmed.pop();
     451            0 :     let vps_client = mgmt_api::Client::new(http_client.clone(), trimmed, cli.jwt.as_deref());
     452              : 
     453            0 :     match cli.command {
     454            0 :         Command::NodeRegister {
     455            0 :             node_id,
     456            0 :             listen_pg_addr,
     457            0 :             listen_pg_port,
     458            0 :             listen_grpc_addr,
     459            0 :             listen_grpc_port,
     460            0 :             listen_http_addr,
     461            0 :             listen_http_port,
     462            0 :             listen_https_port,
     463            0 :             availability_zone_id,
     464            0 :         } => {
     465            0 :             storcon_client
     466            0 :                 .dispatch::<_, ()>(
     467            0 :                     Method::POST,
     468            0 :                     "control/v1/node".to_string(),
     469            0 :                     Some(NodeRegisterRequest {
     470            0 :                         node_id,
     471            0 :                         listen_pg_addr,
     472            0 :                         listen_pg_port,
     473            0 :                         listen_grpc_addr,
     474            0 :                         listen_grpc_port,
     475            0 :                         listen_http_addr,
     476            0 :                         listen_http_port,
     477            0 :                         listen_https_port,
     478            0 :                         availability_zone_id: AvailabilityZone(availability_zone_id),
     479            0 :                         node_ip_addr: None,
     480            0 :                     }),
     481            0 :                 )
     482            0 :                 .await?;
     483            0 :         }
     484            0 :         Command::TenantCreate { tenant_id } => {
     485            0 :             storcon_client
     486            0 :                 .dispatch::<_, ()>(
     487            0 :                     Method::POST,
     488            0 :                     "v1/tenant".to_string(),
     489            0 :                     Some(TenantCreateRequest {
     490            0 :                         new_tenant_id: TenantShardId::unsharded(tenant_id),
     491            0 :                         generation: None,
     492            0 :                         shard_parameters: ShardParameters::default(),
     493            0 :                         placement_policy: Some(PlacementPolicy::Attached(1)),
     494            0 :                         config: TenantConfig::default(),
     495            0 :                     }),
     496            0 :                 )
     497            0 :                 .await?;
     498            0 :         }
     499            0 :         Command::TenantDelete { tenant_id } => {
     500            0 :             let status = vps_client
     501            0 :                 .tenant_delete(TenantShardId::unsharded(tenant_id))
     502            0 :                 .await?;
     503            0 :             tracing::info!("Delete status: {}", status);
     504            0 :         }
     505            0 :         Command::Nodes {} => {
     506            0 :             let mut resp = storcon_client
     507            0 :                 .dispatch::<(), Vec<NodeDescribeResponse>>(
     508            0 :                     Method::GET,
     509            0 :                     "control/v1/node".to_string(),
     510            0 :                     None,
     511            0 :                 )
     512            0 :                 .await?;
     513            0 : 
     514            0 :             resp.sort_by(|a, b| a.listen_http_addr.cmp(&b.listen_http_addr));
     515            0 : 
     516            0 :             let mut table = comfy_table::Table::new();
     517            0 :             table.set_header(["Id", "Hostname", "AZ", "Scheduling", "Availability"]);
     518            0 :             for node in resp {
     519            0 :                 table.add_row([
     520            0 :                     format!("{}", node.id),
     521            0 :                     node.listen_http_addr,
     522            0 :                     node.availability_zone_id,
     523            0 :                     format!("{:?}", node.scheduling),
     524            0 :                     format!("{:?}", node.availability),
     525            0 :                 ]);
     526            0 :             }
     527            0 :             println!("{table}");
     528            0 :         }
     529            0 :         Command::NodeConfigure {
     530            0 :             node_id,
     531            0 :             availability,
     532            0 :             scheduling,
     533            0 :         } => {
     534            0 :             let req = NodeConfigureRequest {
     535            0 :                 node_id,
     536            0 :                 availability: availability.map(|a| a.0),
     537            0 :                 scheduling,
     538            0 :             };
     539            0 :             storcon_client
     540            0 :                 .dispatch::<_, ()>(
     541            0 :                     Method::PUT,
     542            0 :                     format!("control/v1/node/{node_id}/config"),
     543            0 :                     Some(req),
     544            0 :                 )
     545            0 :                 .await?;
     546            0 :         }
     547            0 :         Command::Tenants {
     548            0 :             node_id: Some(node_id),
     549            0 :         } => {
     550            0 :             let describe_response = storcon_client
     551            0 :                 .dispatch::<(), NodeShardResponse>(
     552            0 :                     Method::GET,
     553            0 :                     format!("control/v1/node/{node_id}/shards"),
     554            0 :                     None,
     555            0 :                 )
     556            0 :                 .await?;
     557            0 :             let shards = describe_response.shards;
     558            0 :             let mut table = comfy_table::Table::new();
     559            0 :             table.set_header([
     560            0 :                 "Shard",
     561            0 :                 "Intended Primary/Secondary",
     562            0 :                 "Observed Primary/Secondary",
     563            0 :             ]);
     564            0 :             for shard in shards {
     565            0 :                 table.add_row([
     566            0 :                     format!("{}", shard.tenant_shard_id),
     567            0 :                     match shard.is_intended_secondary {
     568            0 :                         None => "".to_string(),
     569            0 :                         Some(true) => "Secondary".to_string(),
     570            0 :                         Some(false) => "Primary".to_string(),
     571            0 :                     },
     572            0 :                     match shard.is_observed_secondary {
     573            0 :                         None => "".to_string(),
     574            0 :                         Some(true) => "Secondary".to_string(),
     575            0 :                         Some(false) => "Primary".to_string(),
     576            0 :                     },
     577            0 :                 ]);
     578            0 :             }
     579            0 :             println!("{table}");
     580            0 :         }
     581            0 :         Command::Tenants { node_id: None } => {
     582            0 :             // Set up output formatting
     583            0 :             let mut table = comfy_table::Table::new();
     584            0 :             table.set_header([
     585            0 :                 "TenantId",
     586            0 :                 "Preferred AZ",
     587            0 :                 "ShardCount",
     588            0 :                 "StripeSize",
     589            0 :                 "Placement",
     590            0 :                 "Scheduling",
     591            0 :             ]);
     592            0 : 
     593            0 :             // Pagination loop over listing API
     594            0 :             let mut start_after = None;
     595            0 :             const LIMIT: usize = 1000;
     596            0 :             loop {
     597            0 :                 let path = match start_after {
     598            0 :                     None => format!("control/v1/tenant?limit={LIMIT}"),
     599            0 :                     Some(start_after) => {
     600            0 :                         format!("control/v1/tenant?limit={LIMIT}&start_after={start_after}")
     601            0 :                     }
     602            0 :                 };
     603            0 : 
     604            0 :                 let resp = storcon_client
     605            0 :                     .dispatch::<(), Vec<TenantDescribeResponse>>(Method::GET, path, None)
     606            0 :                     .await?;
     607            0 : 
     608            0 :                 if resp.is_empty() {
     609            0 :                     // End of data reached
     610            0 :                     break;
     611            0 :                 }
     612            0 : 
     613            0 :                 // Give some visual feedback while we're building up the table (comfy_table doesn't have
     614            0 :                 // streaming output)
     615            0 :                 if resp.len() >= LIMIT {
     616            0 :                     eprint!(".");
     617            0 :                 }
     618            0 : 
     619            0 :                 start_after = Some(resp.last().unwrap().tenant_id);
     620            0 : 
     621            0 :                 for tenant in resp {
     622            0 :                     let shard_zero = tenant.shards.into_iter().next().unwrap();
     623            0 :                     table.add_row([
     624            0 :                         format!("{}", tenant.tenant_id),
     625            0 :                         shard_zero
     626            0 :                             .preferred_az_id
     627            0 :                             .as_ref()
     628            0 :                             .cloned()
     629            0 :                             .unwrap_or("".to_string()),
     630            0 :                         format!("{}", shard_zero.tenant_shard_id.shard_count.literal()),
     631            0 :                         format!("{:?}", tenant.stripe_size),
     632            0 :                         format!("{:?}", tenant.policy),
     633            0 :                         format!("{:?}", shard_zero.scheduling_policy),
     634            0 :                     ]);
     635            0 :                 }
     636            0 :             }
     637            0 : 
     638            0 :             // Terminate progress dots
     639            0 :             if table.row_count() > LIMIT {
     640            0 :                 eprint!("");
     641            0 :             }
     642            0 : 
     643            0 :             println!("{table}");
     644            0 :         }
     645            0 :         Command::TenantPolicy {
     646            0 :             tenant_id,
     647            0 :             placement,
     648            0 :             scheduling,
     649            0 :         } => {
     650            0 :             let req = TenantPolicyRequest {
     651            0 :                 scheduling: scheduling.map(|s| s.0),
     652            0 :                 placement: placement.map(|p| p.0),
     653            0 :             };
     654            0 :             storcon_client
     655            0 :                 .dispatch::<_, ()>(
     656            0 :                     Method::PUT,
     657            0 :                     format!("control/v1/tenant/{tenant_id}/policy"),
     658            0 :                     Some(req),
     659            0 :                 )
     660            0 :                 .await?;
     661            0 :         }
     662            0 :         Command::TenantShardSplit {
     663            0 :             tenant_id,
     664            0 :             shard_count,
     665            0 :             stripe_size,
     666            0 :         } => {
     667            0 :             let req = TenantShardSplitRequest {
     668            0 :                 new_shard_count: shard_count,
     669            0 :                 new_stripe_size: stripe_size.map(ShardStripeSize),
     670            0 :             };
     671            0 : 
     672            0 :             let response = storcon_client
     673            0 :                 .dispatch::<TenantShardSplitRequest, TenantShardSplitResponse>(
     674            0 :                     Method::PUT,
     675            0 :                     format!("control/v1/tenant/{tenant_id}/shard_split"),
     676            0 :                     Some(req),
     677            0 :                 )
     678            0 :                 .await?;
     679            0 :             println!(
     680            0 :                 "Split tenant {} into {} shards: {}",
     681            0 :                 tenant_id,
     682            0 :                 shard_count,
     683            0 :                 response
     684            0 :                     .new_shards
     685            0 :                     .iter()
     686            0 :                     .map(|s| format!("{s:?}"))
     687            0 :                     .collect::<Vec<_>>()
     688            0 :                     .join(",")
     689            0 :             );
     690            0 :         }
     691            0 :         Command::TenantShardMigrate {
     692            0 :             tenant_shard_id,
     693            0 :             node,
     694            0 :             prewarm,
     695            0 :             override_scheduler,
     696            0 :         } => {
     697            0 :             let migration_config = MigrationConfig {
     698            0 :                 prewarm,
     699            0 :                 override_scheduler,
     700            0 :                 ..Default::default()
     701            0 :             };
     702            0 : 
     703            0 :             let req = TenantShardMigrateRequest {
     704            0 :                 node_id: node,
     705            0 :                 origin_node_id: None,
     706            0 :                 migration_config,
     707            0 :             };
     708            0 : 
     709            0 :             match storcon_client
     710            0 :                 .dispatch::<TenantShardMigrateRequest, TenantShardMigrateResponse>(
     711            0 :                     Method::PUT,
     712            0 :                     format!("control/v1/tenant/{tenant_shard_id}/migrate"),
     713            0 :                     Some(req),
     714            0 :                 )
     715            0 :                 .await
     716            0 :             {
     717            0 :                 Err(mgmt_api::Error::ApiError(StatusCode::PRECONDITION_FAILED, msg)) => {
     718            0 :                     anyhow::bail!(
     719            0 :                         "Migration to {node} rejected, may require `--force` ({}) ",
     720            0 :                         msg
     721            0 :                     );
     722            0 :                 }
     723            0 :                 Err(e) => return Err(e.into()),
     724            0 :                 Ok(_) => {}
     725            0 :             }
     726            0 : 
     727            0 :             watch_tenant_shard(storcon_client, tenant_shard_id, Some(node)).await?;
     728            0 :         }
     729            0 :         Command::TenantShardWatch { tenant_shard_id } => {
     730            0 :             watch_tenant_shard(storcon_client, tenant_shard_id, None).await?;
     731            0 :         }
     732            0 :         Command::TenantShardMigrateSecondary {
     733            0 :             tenant_shard_id,
     734            0 :             node,
     735            0 :         } => {
     736            0 :             let req = TenantShardMigrateRequest {
     737            0 :                 node_id: node,
     738            0 :                 origin_node_id: None,
     739            0 :                 migration_config: MigrationConfig::default(),
     740            0 :             };
     741            0 : 
     742            0 :             storcon_client
     743            0 :                 .dispatch::<TenantShardMigrateRequest, TenantShardMigrateResponse>(
     744            0 :                     Method::PUT,
     745            0 :                     format!("control/v1/tenant/{tenant_shard_id}/migrate_secondary"),
     746            0 :                     Some(req),
     747            0 :                 )
     748            0 :                 .await?;
     749            0 :         }
     750            0 :         Command::TenantShardCancelReconcile { tenant_shard_id } => {
     751            0 :             storcon_client
     752            0 :                 .dispatch::<(), ()>(
     753            0 :                     Method::PUT,
     754            0 :                     format!("control/v1/tenant/{tenant_shard_id}/cancel_reconcile"),
     755            0 :                     None,
     756            0 :                 )
     757            0 :                 .await?;
     758            0 :         }
     759            0 :         Command::SetTenantConfig { tenant_id, config } => {
     760            0 :             let tenant_conf = serde_json::from_str(&config)?;
     761            0 : 
     762            0 :             vps_client
     763            0 :                 .set_tenant_config(&TenantConfigRequest {
     764            0 :                     tenant_id,
     765            0 :                     config: tenant_conf,
     766            0 :                 })
     767            0 :                 .await?;
     768            0 :         }
     769            0 :         Command::PatchTenantConfig { tenant_id, config } => {
     770            0 :             let tenant_conf = serde_json::from_str(&config)?;
     771            0 : 
     772            0 :             vps_client
     773            0 :                 .patch_tenant_config(&TenantConfigPatchRequest {
     774            0 :                     tenant_id,
     775            0 :                     config: tenant_conf,
     776            0 :                 })
     777            0 :                 .await?;
     778            0 :         }
     779            0 :         Command::TenantDescribe { tenant_id } => {
     780            0 :             let TenantDescribeResponse {
     781            0 :                 tenant_id,
     782            0 :                 shards,
     783            0 :                 stripe_size,
     784            0 :                 policy,
     785            0 :                 config,
     786            0 :             } = storcon_client
     787            0 :                 .dispatch::<(), TenantDescribeResponse>(
     788            0 :                     Method::GET,
     789            0 :                     format!("control/v1/tenant/{tenant_id}"),
     790            0 :                     None,
     791            0 :                 )
     792            0 :                 .await?;
     793            0 : 
     794            0 :             let nodes = storcon_client
     795            0 :                 .dispatch::<(), Vec<NodeDescribeResponse>>(
     796            0 :                     Method::GET,
     797            0 :                     "control/v1/node".to_string(),
     798            0 :                     None,
     799            0 :                 )
     800            0 :                 .await?;
     801            0 :             let nodes = nodes
     802            0 :                 .into_iter()
     803            0 :                 .map(|n| (n.id, n))
     804            0 :                 .collect::<HashMap<_, _>>();
     805            0 : 
     806            0 :             println!("Tenant {tenant_id}");
     807            0 :             let mut table = comfy_table::Table::new();
     808            0 :             table.add_row(["Policy", &format!("{policy:?}")]);
     809            0 :             table.add_row(["Stripe size", &format!("{stripe_size:?}")]);
     810            0 :             table.add_row(["Config", &serde_json::to_string_pretty(&config).unwrap()]);
     811            0 :             println!("{table}");
     812            0 :             println!("Shards:");
     813            0 :             let mut table = comfy_table::Table::new();
     814            0 :             table.set_header([
     815            0 :                 "Shard",
     816            0 :                 "Attached",
     817            0 :                 "Attached AZ",
     818            0 :                 "Secondary",
     819            0 :                 "Last error",
     820            0 :                 "status",
     821            0 :             ]);
     822            0 :             for shard in shards {
     823            0 :                 let secondary = shard
     824            0 :                     .node_secondary
     825            0 :                     .iter()
     826            0 :                     .map(|n| format!("{n}"))
     827            0 :                     .collect::<Vec<_>>()
     828            0 :                     .join(",");
     829            0 : 
     830            0 :                 let mut status_parts = Vec::new();
     831            0 :                 if shard.is_reconciling {
     832            0 :                     status_parts.push("reconciling");
     833            0 :                 }
     834            0 : 
     835            0 :                 if shard.is_pending_compute_notification {
     836            0 :                     status_parts.push("pending_compute");
     837            0 :                 }
     838            0 : 
     839            0 :                 if shard.is_splitting {
     840            0 :                     status_parts.push("splitting");
     841            0 :                 }
     842            0 :                 let status = status_parts.join(",");
     843            0 : 
     844            0 :                 let attached_node = shard
     845            0 :                     .node_attached
     846            0 :                     .as_ref()
     847            0 :                     .map(|id| nodes.get(id).expect("Shard references nonexistent node"));
     848            0 : 
     849            0 :                 table.add_row([
     850            0 :                     format!("{}", shard.tenant_shard_id),
     851            0 :                     attached_node
     852            0 :                         .map(|n| format!("{} ({})", n.listen_http_addr, n.id))
     853            0 :                         .unwrap_or(String::new()),
     854            0 :                     attached_node
     855            0 :                         .map(|n| n.availability_zone_id.clone())
     856            0 :                         .unwrap_or(String::new()),
     857            0 :                     secondary,
     858            0 :                     shard.last_error,
     859            0 :                     status,
     860            0 :                 ]);
     861            0 :             }
     862            0 :             println!("{table}");
     863            0 :         }
     864            0 :         Command::TenantSetPreferredAz {
     865            0 :             tenant_id,
     866            0 :             preferred_az,
     867            0 :         } => {
     868            0 :             // First learn about the tenant's shards
     869            0 :             let describe_response = storcon_client
     870            0 :                 .dispatch::<(), TenantDescribeResponse>(
     871            0 :                     Method::GET,
     872            0 :                     format!("control/v1/tenant/{tenant_id}"),
     873            0 :                     None,
     874            0 :                 )
     875            0 :                 .await?;
     876            0 : 
     877            0 :             // Learn about nodes to validate the AZ ID
     878            0 :             let nodes = storcon_client
     879            0 :                 .dispatch::<(), Vec<NodeDescribeResponse>>(
     880            0 :                     Method::GET,
     881            0 :                     "control/v1/node".to_string(),
     882            0 :                     None,
     883            0 :                 )
     884            0 :                 .await?;
     885            0 : 
     886            0 :             if let Some(preferred_az) = &preferred_az {
     887            0 :                 let azs = nodes
     888            0 :                     .into_iter()
     889            0 :                     .map(|n| (n.availability_zone_id))
     890            0 :                     .collect::<HashSet<_>>();
     891            0 :                 if !azs.contains(preferred_az) {
     892            0 :                     anyhow::bail!(
     893            0 :                         "AZ {} not found on any node: known AZs are: {:?}",
     894            0 :                         preferred_az,
     895            0 :                         azs
     896            0 :                     );
     897            0 :                 }
     898            0 :             } else {
     899            0 :                 // Make it obvious to the user that since they've omitted an AZ, we're clearing it
     900            0 :                 eprintln!("Clearing preferred AZ for tenant {tenant_id}");
     901            0 :             }
     902            0 : 
     903            0 :             // Construct a request that modifies all the tenant's shards
     904            0 :             let req = ShardsPreferredAzsRequest {
     905            0 :                 preferred_az_ids: describe_response
     906            0 :                     .shards
     907            0 :                     .into_iter()
     908            0 :                     .map(|s| {
     909            0 :                         (
     910            0 :                             s.tenant_shard_id,
     911            0 :                             preferred_az.clone().map(AvailabilityZone),
     912            0 :                         )
     913            0 :                     })
     914            0 :                     .collect(),
     915            0 :             };
     916            0 :             storcon_client
     917            0 :                 .dispatch::<ShardsPreferredAzsRequest, ShardsPreferredAzsResponse>(
     918            0 :                     Method::PUT,
     919            0 :                     "control/v1/preferred_azs".to_string(),
     920            0 :                     Some(req),
     921            0 :                 )
     922            0 :                 .await?;
     923            0 :         }
     924            0 :         Command::TenantDrop { tenant_id, unclean } => {
     925            0 :             if !unclean {
     926            0 :                 anyhow::bail!(
     927            0 :                     "This command is not a tenant deletion, and uncleanly drops all controller state for the tenant.  If you know what you're doing, add `--unclean` to proceed."
     928            0 :                 )
     929            0 :             }
     930            0 :             storcon_client
     931            0 :                 .dispatch::<(), ()>(
     932            0 :                     Method::POST,
     933            0 :                     format!("debug/v1/tenant/{tenant_id}/drop"),
     934            0 :                     None,
     935            0 :                 )
     936            0 :                 .await?;
     937            0 :         }
     938            0 :         Command::NodeDrop { node_id, unclean } => {
     939            0 :             if !unclean {
     940            0 :                 anyhow::bail!(
     941            0 :                     "This command is not a clean node decommission, and uncleanly drops all controller state for the node, without checking if any tenants still refer to it.  If you know what you're doing, add `--unclean` to proceed."
     942            0 :                 )
     943            0 :             }
     944            0 :             storcon_client
     945            0 :                 .dispatch::<(), ()>(Method::POST, format!("debug/v1/node/{node_id}/drop"), None)
     946            0 :                 .await?;
     947            0 :         }
     948            0 :         Command::NodeDelete { node_id } => {
     949            0 :             eprintln!("Warning: This command is obsolete and will be removed in a future version");
     950            0 :             eprintln!("Use `NodeStartDelete` instead, if possible");
     951            0 :             storcon_client
     952            0 :                 .dispatch::<(), ()>(Method::DELETE, format!("control/v1/node/{node_id}"), None)
     953            0 :                 .await?;
     954            0 :         }
     955            0 :         Command::NodeStartDelete { node_id } => {
     956            0 :             storcon_client
     957            0 :                 .dispatch::<(), ()>(
     958            0 :                     Method::PUT,
     959            0 :                     format!("control/v1/node/{node_id}/delete"),
     960            0 :                     None,
     961            0 :                 )
     962            0 :                 .await?;
     963            0 :             println!("Delete started for {node_id}");
     964            0 :         }
     965            0 :         Command::NodeCancelDelete { node_id, timeout } => {
     966            0 :             storcon_client
     967            0 :                 .dispatch::<(), ()>(
     968            0 :                     Method::DELETE,
     969            0 :                     format!("control/v1/node/{node_id}/delete"),
     970            0 :                     None,
     971            0 :                 )
     972            0 :                 .await?;
     973            0 : 
     974            0 :             println!("Waiting for node {node_id} to quiesce on scheduling policy ...");
     975            0 : 
     976            0 :             let final_policy =
     977            0 :                 wait_for_scheduling_policy(storcon_client, node_id, *timeout, |sched| {
     978            0 :                     !matches!(sched, NodeSchedulingPolicy::Deleting)
     979            0 :                 })
     980            0 :                 .await?;
     981            0 : 
     982            0 :             println!(
     983            0 :                 "Delete was cancelled for node {node_id}. Schedulling policy is now {final_policy:?}"
     984            0 :             );
     985            0 :         }
     986            0 :         Command::NodeDeleteTombstone { node_id } => {
     987            0 :             storcon_client
     988            0 :                 .dispatch::<(), ()>(
     989            0 :                     Method::DELETE,
     990            0 :                     format!("debug/v1/tombstone/{node_id}"),
     991            0 :                     None,
     992            0 :                 )
     993            0 :                 .await?;
     994            0 :         }
     995            0 :         Command::NodeTombstones {} => {
     996            0 :             let mut resp = storcon_client
     997            0 :                 .dispatch::<(), Vec<NodeDescribeResponse>>(
     998            0 :                     Method::GET,
     999            0 :                     "debug/v1/tombstone".to_string(),
    1000            0 :                     None,
    1001            0 :                 )
    1002            0 :                 .await?;
    1003            0 : 
    1004            0 :             resp.sort_by(|a, b| a.listen_http_addr.cmp(&b.listen_http_addr));
    1005            0 : 
    1006            0 :             let mut table = comfy_table::Table::new();
    1007            0 :             table.set_header(["Id", "Hostname", "AZ", "Scheduling", "Availability"]);
    1008            0 :             for node in resp {
    1009            0 :                 table.add_row([
    1010            0 :                     format!("{}", node.id),
    1011            0 :                     node.listen_http_addr,
    1012            0 :                     node.availability_zone_id,
    1013            0 :                     format!("{:?}", node.scheduling),
    1014            0 :                     format!("{:?}", node.availability),
    1015            0 :                 ]);
    1016            0 :             }
    1017            0 :             println!("{table}");
    1018            0 :         }
    1019            0 :         Command::TenantSetTimeBasedEviction {
    1020            0 :             tenant_id,
    1021            0 :             period,
    1022            0 :             threshold,
    1023            0 :         } => {
    1024            0 :             vps_client
    1025            0 :                 .set_tenant_config(&TenantConfigRequest {
    1026            0 :                     tenant_id,
    1027            0 :                     config: TenantConfig {
    1028            0 :                         eviction_policy: Some(EvictionPolicy::LayerAccessThreshold(
    1029            0 :                             EvictionPolicyLayerAccessThreshold {
    1030            0 :                                 period: period.into(),
    1031            0 :                                 threshold: threshold.into(),
    1032            0 :                             },
    1033            0 :                         )),
    1034            0 :                         heatmap_period: Some(Duration::from_secs(300)),
    1035            0 :                         ..Default::default()
    1036            0 :                     },
    1037            0 :                 })
    1038            0 :                 .await?;
    1039            0 :         }
    1040            0 :         Command::BulkMigrate {
    1041            0 :             nodes,
    1042            0 :             concurrency,
    1043            0 :             max_shards,
    1044            0 :             dry_run,
    1045            0 :         } => {
    1046            0 :             // Load the list of nodes, split them up into the drained and filled sets,
    1047            0 :             // and validate that draining is possible.
    1048            0 :             let node_descs = storcon_client
    1049            0 :                 .dispatch::<(), Vec<NodeDescribeResponse>>(
    1050            0 :                     Method::GET,
    1051            0 :                     "control/v1/node".to_string(),
    1052            0 :                     None,
    1053            0 :                 )
    1054            0 :                 .await?;
    1055            0 : 
    1056            0 :             let mut node_to_drain_descs = Vec::new();
    1057            0 :             let mut node_to_fill_descs = Vec::new();
    1058            0 : 
    1059            0 :             for desc in node_descs {
    1060            0 :                 let to_drain = nodes.contains(&desc.id);
    1061            0 :                 if to_drain {
    1062            0 :                     node_to_drain_descs.push(desc);
    1063            0 :                 } else {
    1064            0 :                     node_to_fill_descs.push(desc);
    1065            0 :                 }
    1066            0 :             }
    1067            0 : 
    1068            0 :             if nodes.len() != node_to_drain_descs.len() {
    1069            0 :                 anyhow::bail!("Bulk migration requested away from node which doesn't exist.")
    1070            0 :             }
    1071            0 : 
    1072            0 :             node_to_fill_descs.retain(|desc| {
    1073            0 :                 matches!(desc.availability, NodeAvailabilityWrapper::Active)
    1074            0 :                     && matches!(
    1075            0 :                         desc.scheduling,
    1076            0 :                         NodeSchedulingPolicy::Active | NodeSchedulingPolicy::Filling
    1077            0 :                     )
    1078            0 :             });
    1079            0 : 
    1080            0 :             if node_to_fill_descs.is_empty() {
    1081            0 :                 anyhow::bail!("There are no nodes to migrate to")
    1082            0 :             }
    1083            0 : 
    1084            0 :             // Set the node scheduling policy to draining for the nodes which
    1085            0 :             // we plan to drain.
    1086            0 :             for node_desc in node_to_drain_descs.iter() {
    1087            0 :                 let req = NodeConfigureRequest {
    1088            0 :                     node_id: node_desc.id,
    1089            0 :                     availability: None,
    1090            0 :                     scheduling: Some(NodeSchedulingPolicy::Draining),
    1091            0 :                 };
    1092            0 : 
    1093            0 :                 storcon_client
    1094            0 :                     .dispatch::<_, ()>(
    1095            0 :                         Method::PUT,
    1096            0 :                         format!("control/v1/node/{}/config", node_desc.id),
    1097            0 :                         Some(req),
    1098            0 :                     )
    1099            0 :                     .await?;
    1100            0 :             }
    1101            0 : 
    1102            0 :             // Perform the migration: move each tenant shard scheduled on a node to
    1103            0 :             // be drained to a node which is being filled. A simple round robin
    1104            0 :             // strategy is used to pick the new node.
    1105            0 :             let tenants = storcon_client
    1106            0 :                 .dispatch::<(), Vec<TenantDescribeResponse>>(
    1107            0 :                     Method::GET,
    1108            0 :                     "control/v1/tenant".to_string(),
    1109            0 :                     None,
    1110            0 :                 )
    1111            0 :                 .await?;
    1112            0 : 
    1113            0 :             let mut selected_node_idx = 0;
    1114            0 : 
    1115            0 :             struct MigrationMove {
    1116            0 :                 tenant_shard_id: TenantShardId,
    1117            0 :                 from: NodeId,
    1118            0 :                 to: NodeId,
    1119            0 :             }
    1120            0 : 
    1121            0 :             let mut moves: Vec<MigrationMove> = Vec::new();
    1122            0 : 
    1123            0 :             let shards = tenants
    1124            0 :                 .into_iter()
    1125            0 :                 .flat_map(|tenant| tenant.shards.into_iter());
    1126            0 :             for shard in shards {
    1127            0 :                 if let Some(max_shards) = max_shards {
    1128            0 :                     if moves.len() >= max_shards {
    1129            0 :                         println!(
    1130            0 :                             "Stop planning shard moves since the requested maximum was reached"
    1131            0 :                         );
    1132            0 :                         break;
    1133            0 :                     }
    1134            0 :                 }
    1135            0 : 
    1136            0 :                 let should_migrate = {
    1137            0 :                     if let Some(attached_to) = shard.node_attached {
    1138            0 :                         node_to_drain_descs
    1139            0 :                             .iter()
    1140            0 :                             .map(|desc| desc.id)
    1141            0 :                             .any(|id| id == attached_to)
    1142            0 :                     } else {
    1143            0 :                         false
    1144            0 :                     }
    1145            0 :                 };
    1146            0 : 
    1147            0 :                 if !should_migrate {
    1148            0 :                     continue;
    1149            0 :                 }
    1150            0 : 
    1151            0 :                 moves.push(MigrationMove {
    1152            0 :                     tenant_shard_id: shard.tenant_shard_id,
    1153            0 :                     from: shard
    1154            0 :                         .node_attached
    1155            0 :                         .expect("We only migrate attached tenant shards"),
    1156            0 :                     to: node_to_fill_descs[selected_node_idx].id,
    1157            0 :                 });
    1158            0 :                 selected_node_idx = (selected_node_idx + 1) % node_to_fill_descs.len();
    1159            0 :             }
    1160            0 : 
    1161            0 :             let total_moves = moves.len();
    1162            0 : 
    1163            0 :             if dry_run == Some(true) {
    1164            0 :                 println!("Dryrun requested. Planned {total_moves} moves:");
    1165            0 :                 for mv in &moves {
    1166            0 :                     println!("{}: {} -> {}", mv.tenant_shard_id, mv.from, mv.to)
    1167            0 :                 }
    1168            0 : 
    1169            0 :                 return Ok(());
    1170            0 :             }
    1171            0 : 
    1172            0 :             const DEFAULT_MIGRATE_CONCURRENCY: usize = 8;
    1173            0 :             let mut stream = futures::stream::iter(moves)
    1174            0 :                 .map(|mv| {
    1175            0 :                     let client = Client::new(http_client.clone(), cli.api.clone(), cli.jwt.clone());
    1176            0 :                     async move {
    1177            0 :                         client
    1178            0 :                             .dispatch::<TenantShardMigrateRequest, TenantShardMigrateResponse>(
    1179            0 :                                 Method::PUT,
    1180            0 :                                 format!("control/v1/tenant/{}/migrate", mv.tenant_shard_id),
    1181            0 :                                 Some(TenantShardMigrateRequest {
    1182            0 :                                     node_id: mv.to,
    1183            0 :                                     origin_node_id: Some(mv.from),
    1184            0 :                                     migration_config: MigrationConfig::default(),
    1185            0 :                                 }),
    1186            0 :                             )
    1187            0 :                             .await
    1188            0 :                             .map_err(|e| (mv.tenant_shard_id, mv.from, mv.to, e))
    1189            0 :                     }
    1190            0 :                 })
    1191            0 :                 .buffered(concurrency.unwrap_or(DEFAULT_MIGRATE_CONCURRENCY));
    1192            0 : 
    1193            0 :             let mut success = 0;
    1194            0 :             let mut failure = 0;
    1195            0 : 
    1196            0 :             while let Some(res) = stream.next().await {
    1197            0 :                 match res {
    1198            0 :                     Ok(_) => {
    1199            0 :                         success += 1;
    1200            0 :                     }
    1201            0 :                     Err((tenant_shard_id, from, to, error)) => {
    1202            0 :                         failure += 1;
    1203            0 :                         println!(
    1204            0 :                             "Failed to migrate {tenant_shard_id} from node {from} to node {to}: {error}"
    1205            0 :                         );
    1206            0 :                     }
    1207            0 :                 }
    1208            0 : 
    1209            0 :                 if (success + failure) % 20 == 0 {
    1210            0 :                     println!(
    1211            0 :                         "Processed {}/{} shards: {} succeeded, {} failed",
    1212            0 :                         success + failure,
    1213            0 :                         total_moves,
    1214            0 :                         success,
    1215            0 :                         failure
    1216            0 :                     );
    1217            0 :                 }
    1218            0 :             }
    1219            0 : 
    1220            0 :             println!(
    1221            0 :                 "Processed {}/{} shards: {} succeeded, {} failed",
    1222            0 :                 success + failure,
    1223            0 :                 total_moves,
    1224            0 :                 success,
    1225            0 :                 failure
    1226            0 :             );
    1227            0 :         }
    1228            0 :         Command::StartDrain { node_id } => {
    1229            0 :             storcon_client
    1230            0 :                 .dispatch::<(), ()>(
    1231            0 :                     Method::PUT,
    1232            0 :                     format!("control/v1/node/{node_id}/drain"),
    1233            0 :                     None,
    1234            0 :                 )
    1235            0 :                 .await?;
    1236            0 :             println!("Drain started for {node_id}");
    1237            0 :         }
    1238            0 :         Command::CancelDrain { node_id, timeout } => {
    1239            0 :             storcon_client
    1240            0 :                 .dispatch::<(), ()>(
    1241            0 :                     Method::DELETE,
    1242            0 :                     format!("control/v1/node/{node_id}/drain"),
    1243            0 :                     None,
    1244            0 :                 )
    1245            0 :                 .await?;
    1246            0 : 
    1247            0 :             println!("Waiting for node {node_id} to quiesce on scheduling policy ...");
    1248            0 : 
    1249            0 :             let final_policy =
    1250            0 :                 wait_for_scheduling_policy(storcon_client, node_id, *timeout, |sched| {
    1251            0 :                     use NodeSchedulingPolicy::*;
    1252            0 :                     matches!(sched, Active | PauseForRestart)
    1253            0 :                 })
    1254            0 :                 .await?;
    1255            0 : 
    1256            0 :             println!(
    1257            0 :                 "Drain was cancelled for node {node_id}. Schedulling policy is now {final_policy:?}"
    1258            0 :             );
    1259            0 :         }
    1260            0 :         Command::StartFill { node_id } => {
    1261            0 :             storcon_client
    1262            0 :                 .dispatch::<(), ()>(Method::PUT, format!("control/v1/node/{node_id}/fill"), None)
    1263            0 :                 .await?;
    1264            0 : 
    1265            0 :             println!("Fill started for {node_id}");
    1266            0 :         }
    1267            0 :         Command::CancelFill { node_id, timeout } => {
    1268            0 :             storcon_client
    1269            0 :                 .dispatch::<(), ()>(
    1270            0 :                     Method::DELETE,
    1271            0 :                     format!("control/v1/node/{node_id}/fill"),
    1272            0 :                     None,
    1273            0 :                 )
    1274            0 :                 .await?;
    1275            0 : 
    1276            0 :             println!("Waiting for node {node_id} to quiesce on scheduling policy ...");
    1277            0 : 
    1278            0 :             let final_policy =
    1279            0 :                 wait_for_scheduling_policy(storcon_client, node_id, *timeout, |sched| {
    1280            0 :                     use NodeSchedulingPolicy::*;
    1281            0 :                     matches!(sched, Active)
    1282            0 :                 })
    1283            0 :                 .await?;
    1284            0 : 
    1285            0 :             println!(
    1286            0 :                 "Fill was cancelled for node {node_id}. Schedulling policy is now {final_policy:?}"
    1287            0 :             );
    1288            0 :         }
    1289            0 :         Command::Safekeepers {} => {
    1290            0 :             let mut resp = storcon_client
    1291            0 :                 .dispatch::<(), Vec<SafekeeperDescribeResponse>>(
    1292            0 :                     Method::GET,
    1293            0 :                     "control/v1/safekeeper".to_string(),
    1294            0 :                     None,
    1295            0 :                 )
    1296            0 :                 .await?;
    1297            0 : 
    1298            0 :             resp.sort_by(|a, b| a.id.cmp(&b.id));
    1299            0 : 
    1300            0 :             let mut table = comfy_table::Table::new();
    1301            0 :             table.set_header([
    1302            0 :                 "Id",
    1303            0 :                 "Version",
    1304            0 :                 "Host",
    1305            0 :                 "Port",
    1306            0 :                 "Http Port",
    1307            0 :                 "AZ Id",
    1308            0 :                 "Scheduling",
    1309            0 :             ]);
    1310            0 :             for sk in resp {
    1311            0 :                 table.add_row([
    1312            0 :                     format!("{}", sk.id),
    1313            0 :                     format!("{}", sk.version),
    1314            0 :                     sk.host,
    1315            0 :                     format!("{}", sk.port),
    1316            0 :                     format!("{}", sk.http_port),
    1317            0 :                     sk.availability_zone_id.clone(),
    1318            0 :                     String::from(sk.scheduling_policy),
    1319            0 :                 ]);
    1320            0 :             }
    1321            0 :             println!("{table}");
    1322            0 :         }
    1323            0 :         Command::SafekeeperScheduling {
    1324            0 :             node_id,
    1325            0 :             scheduling_policy,
    1326            0 :         } => {
    1327            0 :             let scheduling_policy = scheduling_policy.0;
    1328            0 :             storcon_client
    1329            0 :                 .dispatch::<SafekeeperSchedulingPolicyRequest, ()>(
    1330            0 :                     Method::POST,
    1331            0 :                     format!("control/v1/safekeeper/{node_id}/scheduling_policy"),
    1332            0 :                     Some(SafekeeperSchedulingPolicyRequest { scheduling_policy }),
    1333            0 :                 )
    1334            0 :                 .await?;
    1335            0 :             println!(
    1336            0 :                 "Scheduling policy of {node_id} set to {}",
    1337            0 :                 String::from(scheduling_policy)
    1338            0 :             );
    1339            0 :         }
    1340            0 :         Command::DownloadHeatmapLayers {
    1341            0 :             tenant_shard_id,
    1342            0 :             timeline_id,
    1343            0 :             concurrency,
    1344            0 :         } => {
    1345            0 :             let mut path = format!(
    1346            0 :                 "v1/tenant/{tenant_shard_id}/timeline/{timeline_id}/download_heatmap_layers",
    1347            0 :             );
    1348            0 : 
    1349            0 :             if let Some(c) = concurrency {
    1350            0 :                 path = format!("{path}?concurrency={c}");
    1351            0 :             }
    1352            0 : 
    1353            0 :             storcon_client
    1354            0 :                 .dispatch::<(), ()>(Method::POST, path, None)
    1355            0 :                 .await?;
    1356            0 :         }
    1357            0 :         Command::TimelineLocate {
    1358            0 :             tenant_id,
    1359            0 :             timeline_id,
    1360            0 :         } => {
    1361            0 :             let path = format!("debug/v1/tenant/{tenant_id}/timeline/{timeline_id}/locate");
    1362            0 : 
    1363            0 :             let resp = storcon_client
    1364            0 :                 .dispatch::<(), TimelineLocateResponse>(Method::GET, path, None)
    1365            0 :                 .await?;
    1366            0 : 
    1367            0 :             let sk_set = resp.sk_set.iter().map(|id| id.0 as i64).collect::<Vec<_>>();
    1368            0 :             let new_sk_set = resp
    1369            0 :                 .new_sk_set
    1370            0 :                 .as_ref()
    1371            0 :                 .map(|ids| ids.iter().map(|id| id.0 as i64).collect::<Vec<_>>());
    1372            0 : 
    1373            0 :             println!("generation = {}", resp.generation);
    1374            0 :             println!("sk_set = {sk_set:?}");
    1375            0 :             println!("new_sk_set = {new_sk_set:?}");
    1376            0 :         }
    1377            0 :         Command::TimelineSafekeeperMigrate {
    1378            0 :             tenant_id,
    1379            0 :             timeline_id,
    1380            0 :             new_sk_set,
    1381            0 :         } => {
    1382            0 :             let path = format!("v1/tenant/{tenant_id}/timeline/{timeline_id}/safekeeper_migrate");
    1383            0 : 
    1384            0 :             storcon_client
    1385            0 :                 .dispatch::<_, ()>(
    1386            0 :                     Method::POST,
    1387            0 :                     path,
    1388            0 :                     Some(TimelineSafekeeperMigrateRequest { new_sk_set }),
    1389            0 :                 )
    1390            0 :                 .await?;
    1391            0 :         }
    1392            0 :     }
    1393            0 : 
    1394            0 :     Ok(())
    1395            0 : }
    1396              : 
    1397              : static WATCH_INTERVAL: Duration = Duration::from_secs(5);
    1398              : 
    1399            0 : async fn watch_tenant_shard(
    1400            0 :     storcon_client: Client,
    1401            0 :     tenant_shard_id: TenantShardId,
    1402            0 :     until_migrated_to: Option<NodeId>,
    1403            0 : ) -> anyhow::Result<()> {
    1404            0 :     if let Some(until_migrated_to) = until_migrated_to {
    1405            0 :         println!(
    1406            0 :             "Waiting for tenant shard {tenant_shard_id} to be migrated to node {until_migrated_to}"
    1407            0 :         );
    1408            0 :     }
    1409              : 
    1410              :     loop {
    1411            0 :         let desc = storcon_client
    1412            0 :             .dispatch::<(), TenantDescribeResponse>(
    1413            0 :                 Method::GET,
    1414            0 :                 format!("control/v1/tenant/{}", tenant_shard_id.tenant_id),
    1415            0 :                 None,
    1416            0 :             )
    1417            0 :             .await?;
    1418              : 
    1419              :         // Output the current state of the tenant shard
    1420            0 :         let shard = desc
    1421            0 :             .shards
    1422            0 :             .iter()
    1423            0 :             .find(|s| s.tenant_shard_id == tenant_shard_id)
    1424            0 :             .ok_or(anyhow::anyhow!("Tenant shard not found"))?;
    1425            0 :         let summary = format!(
    1426            0 :             "attached: {} secondary: {} {}",
    1427            0 :             shard
    1428            0 :                 .node_attached
    1429            0 :                 .map(|n| format!("{n}"))
    1430            0 :                 .unwrap_or("none".to_string()),
    1431            0 :             shard
    1432            0 :                 .node_secondary
    1433            0 :                 .iter()
    1434            0 :                 .map(|n| n.to_string())
    1435            0 :                 .collect::<Vec<_>>()
    1436            0 :                 .join(","),
    1437            0 :             if shard.is_reconciling {
    1438            0 :                 "(reconciler active)"
    1439              :             } else {
    1440            0 :                 "(reconciler idle)"
    1441              :             }
    1442              :         );
    1443            0 :         println!("{summary}");
    1444              : 
    1445              :         // Maybe drop out if we finished migration
    1446            0 :         if let Some(until_migrated_to) = until_migrated_to {
    1447            0 :             if shard.node_attached == Some(until_migrated_to) && !shard.is_reconciling {
    1448            0 :                 println!("Tenant shard {tenant_shard_id} is now on node {until_migrated_to}");
    1449            0 :                 break;
    1450            0 :             }
    1451            0 :         }
    1452              : 
    1453            0 :         tokio::time::sleep(WATCH_INTERVAL).await;
    1454              :     }
    1455            0 :     Ok(())
    1456            0 : }
        

Generated by: LCOV version 2.1-beta