LCOV - differential code coverage report
Current view: top level - compute_tools/src/bin - compute_ctl.rs (source / functions) Coverage Total Hit UBC CBC
Current: cd44433dd675caa99df17a61b18949c8387e2242.info Lines: 83.8 % 396 332 64 332
Current Date: 2024-01-09 02:06:09 Functions: 70.0 % 30 21 9 21
Baseline: 66c52a629a0f4a503e193045e0df4c77139e344b.info
Baseline Date: 2024-01-08 15:34:46

           TLA  Line data    Source code
       1                 : //!
       2                 : //! Postgres wrapper (`compute_ctl`) is intended to be run as a Docker entrypoint or as a `systemd`
       3                 : //! `ExecStart` option. It will handle all the `Neon` specifics during compute node
       4                 : //! initialization:
       5                 : //! - `compute_ctl` accepts cluster (compute node) specification as a JSON file.
       6                 : //! - Every start is a fresh start, so the data directory is removed and
       7                 : //!   initialized again on each run.
       8                 : //! - If remote_extension_config is provided, it will be used to fetch extensions list
       9                 : //!  and download `shared_preload_libraries` from the remote storage.
      10                 : //! - Next it will put configuration files into the `PGDATA` directory.
      11                 : //! - Sync safekeepers and get commit LSN.
      12                 : //! - Get `basebackup` from pageserver using the returned on the previous step LSN.
      13                 : //! - Try to start `postgres` and wait until it is ready to accept connections.
      14                 : //! - Check and alter/drop/create roles and databases.
      15                 : //! - Hang waiting on the `postmaster` process to exit.
      16                 : //!
      17                 : //! Also `compute_ctl` spawns two separate service threads:
      18                 : //! - `compute-monitor` checks the last Postgres activity timestamp and saves it
      19                 : //!   into the shared `ComputeNode`;
      20                 : //! - `http-endpoint` runs a Hyper HTTP API server, which serves readiness and the
      21                 : //!   last activity requests.
      22                 : //!
      23                 : //! If `AUTOSCALING` environment variable is set, `compute_ctl` will start the
      24                 : //! `vm-monitor` located in [`neon/libs/vm_monitor`]. For VM compute nodes,
      25                 : //! `vm-monitor` communicates with the VM autoscaling system. It coordinates
      26                 : //! downscaling and requests immediate upscaling under resource pressure.
      27                 : //!
      28                 : //! Usage example:
      29                 : //! ```sh
      30                 : //! compute_ctl -D /var/db/postgres/compute \
      31                 : //!             -C 'postgresql://cloud_admin@localhost/postgres' \
      32                 : //!             -S /var/db/postgres/specs/current.json \
      33                 : //!             -b /usr/local/bin/postgres \
      34                 : //!             -r http://pg-ext-s3-gateway \
      35                 : //!             --pgbouncer-connstr 'host=localhost port=6432 dbname=pgbouncer user=cloud_admin sslmode=disable'
      36                 : //!             --pgbouncer-ini-path /etc/pgbouncer.ini \
      37                 : //! ```
      38                 : //!
      39                 : use std::collections::HashMap;
      40                 : use std::fs::File;
      41                 : use std::path::Path;
      42                 : use std::process::exit;
      43                 : use std::sync::atomic::Ordering;
      44                 : use std::sync::{mpsc, Arc, Condvar, Mutex, RwLock};
      45                 : use std::{thread, time::Duration};
      46                 : 
      47                 : use anyhow::{Context, Result};
      48                 : use chrono::Utc;
      49                 : use clap::Arg;
      50                 : use nix::sys::signal::{kill, Signal};
      51                 : use signal_hook::consts::{SIGQUIT, SIGTERM};
      52                 : use signal_hook::{consts::SIGINT, iterator::Signals};
      53                 : use tracing::{error, info};
      54                 : use url::Url;
      55                 : 
      56                 : use compute_api::responses::ComputeStatus;
      57                 : 
      58                 : use compute_tools::compute::{ComputeNode, ComputeState, ParsedSpec, PG_PID, SYNC_SAFEKEEPERS_PID};
      59                 : use compute_tools::configurator::launch_configurator;
      60                 : use compute_tools::extension_server::get_pg_version;
      61                 : use compute_tools::http::api::launch_http_server;
      62                 : use compute_tools::logger::*;
      63                 : use compute_tools::monitor::launch_monitor;
      64                 : use compute_tools::params::*;
      65                 : use compute_tools::spec::*;
      66                 : 
      67                 : // this is an arbitrary build tag. Fine as a default / for testing purposes
      68                 : // in-case of not-set environment var
      69                 : const BUILD_TAG_DEFAULT: &str = "latest";
      70                 : 
      71 CBC         544 : fn main() -> Result<()> {
      72             544 :     init_tracing_and_logging(DEFAULT_LOG_LEVEL)?;
      73                 : 
      74             544 :     let mut signals = Signals::new([SIGINT, SIGTERM, SIGQUIT])?;
      75             544 :     thread::spawn(move || {
      76             544 :         for sig in signals.forever() {
      77               3 :             handle_exit_signal(sig);
      78               3 :         }
      79             544 :     });
      80             544 : 
      81             544 :     let build_tag = option_env!("BUILD_TAG")
      82             544 :         .unwrap_or(BUILD_TAG_DEFAULT)
      83             544 :         .to_string();
      84             544 :     info!("build_tag: {build_tag}");
      85                 : 
      86             544 :     let matches = cli().get_matches();
      87             544 :     let pgbin_default = String::from("postgres");
      88             544 :     let pgbin = matches.get_one::<String>("pgbin").unwrap_or(&pgbin_default);
      89             544 : 
      90             544 :     let ext_remote_storage = matches
      91             544 :         .get_one::<String>("remote-ext-config")
      92             544 :         // Compatibility hack: if the control plane specified any remote-ext-config
      93             544 :         // use the default value for extension storage proxy gateway.
      94             544 :         // Remove this once the control plane is updated to pass the gateway URL
      95             544 :         .map(|conf| {
      96               1 :             if conf.starts_with("http") {
      97               1 :                 conf.trim_end_matches('/')
      98                 :             } else {
      99 UBC           0 :                 "http://pg-ext-s3-gateway"
     100                 :             }
     101 CBC         544 :         });
     102             544 : 
     103             544 :     let http_port = *matches
     104             544 :         .get_one::<u16>("http-port")
     105             544 :         .expect("http-port is required");
     106             544 :     let pgdata = matches
     107             544 :         .get_one::<String>("pgdata")
     108             544 :         .expect("PGDATA path is required");
     109             544 :     let connstr = matches
     110             544 :         .get_one::<String>("connstr")
     111             544 :         .expect("Postgres connection string is required");
     112             544 :     let spec_json = matches.get_one::<String>("spec");
     113             544 :     let spec_path = matches.get_one::<String>("spec-path");
     114             544 : 
     115             544 :     let pgbouncer_connstr = matches.get_one::<String>("pgbouncer-connstr");
     116             544 :     let pgbouncer_ini_path = matches.get_one::<String>("pgbouncer-ini-path");
     117             544 : 
     118             544 :     // Extract OpenTelemetry context for the startup actions from the
     119             544 :     // TRACEPARENT and TRACESTATE env variables, and attach it to the current
     120             544 :     // tracing context.
     121             544 :     //
     122             544 :     // This is used to propagate the context for the 'start_compute' operation
     123             544 :     // from the neon control plane. This allows linking together the wider
     124             544 :     // 'start_compute' operation that creates the compute container, with the
     125             544 :     // startup actions here within the container.
     126             544 :     //
     127             544 :     // There is no standard for passing context in env variables, but a lot of
     128             544 :     // tools use TRACEPARENT/TRACESTATE, so we use that convention too. See
     129             544 :     // https://github.com/open-telemetry/opentelemetry-specification/issues/740
     130             544 :     //
     131             544 :     // Switch to the startup context here, and exit it once the startup has
     132             544 :     // completed and Postgres is up and running.
     133             544 :     //
     134             544 :     // If this pod is pre-created without binding it to any particular endpoint
     135             544 :     // yet, this isn't the right place to enter the startup context. In that
     136             544 :     // case, the control plane should pass the tracing context as part of the
     137             544 :     // /configure API call.
     138             544 :     //
     139             544 :     // NOTE: This is supposed to only cover the *startup* actions. Once
     140             544 :     // postgres is configured and up-and-running, we exit this span. Any other
     141             544 :     // actions that are performed on incoming HTTP requests, for example, are
     142             544 :     // performed in separate spans.
     143             544 :     //
     144             544 :     // XXX: If the pod is restarted, we perform the startup actions in the same
     145             544 :     // context as the original startup actions, which probably doesn't make
     146             544 :     // sense.
     147             544 :     let mut startup_tracing_carrier: HashMap<String, String> = HashMap::new();
     148             544 :     if let Ok(val) = std::env::var("TRACEPARENT") {
     149 UBC           0 :         startup_tracing_carrier.insert("traceparent".to_string(), val);
     150 CBC         544 :     }
     151             544 :     if let Ok(val) = std::env::var("TRACESTATE") {
     152 UBC           0 :         startup_tracing_carrier.insert("tracestate".to_string(), val);
     153 CBC         544 :     }
     154             544 :     let startup_context_guard = if !startup_tracing_carrier.is_empty() {
     155                 :         use opentelemetry::propagation::TextMapPropagator;
     156                 :         use opentelemetry::sdk::propagation::TraceContextPropagator;
     157 UBC           0 :         let guard = TraceContextPropagator::new()
     158               0 :             .extract(&startup_tracing_carrier)
     159               0 :             .attach();
     160               0 :         info!("startup tracing context attached");
     161               0 :         Some(guard)
     162                 :     } else {
     163 CBC         544 :         None
     164                 :     };
     165                 : 
     166             544 :     let compute_id = matches.get_one::<String>("compute-id");
     167             544 :     let control_plane_uri = matches.get_one::<String>("control-plane-uri");
     168             544 : 
     169             544 :     let spec;
     170             544 :     let mut live_config_allowed = false;
     171             544 :     match spec_json {
     172                 :         // First, try to get cluster spec from the cli argument
     173 UBC           0 :         Some(json) => {
     174               0 :             info!("got spec from cli argument {}", json);
     175               0 :             spec = Some(serde_json::from_str(json)?);
     176                 :         }
     177                 :         None => {
     178                 :             // Second, try to read it from the file if path is provided
     179 CBC         544 :             if let Some(sp) = spec_path {
     180             544 :                 let path = Path::new(sp);
     181             544 :                 let file = File::open(path)?;
     182             544 :                 spec = Some(serde_json::from_reader(file)?);
     183             544 :                 live_config_allowed = true;
     184 UBC           0 :             } else if let Some(id) = compute_id {
     185               0 :                 if let Some(cp_base) = control_plane_uri {
     186               0 :                     live_config_allowed = true;
     187               0 :                     spec = match get_spec_from_control_plane(cp_base, id) {
     188               0 :                         Ok(s) => s,
     189               0 :                         Err(e) => {
     190               0 :                             error!("cannot get response from control plane: {}", e);
     191               0 :                             panic!("neither spec nor confirmation that compute is in the Empty state was received");
     192                 :                         }
     193                 :                     };
     194                 :                 } else {
     195               0 :                     panic!("must specify both --control-plane-uri and --compute-id or none");
     196                 :                 }
     197                 :             } else {
     198               0 :                 panic!(
     199               0 :                     "compute spec should be provided by one of the following ways: \
     200               0 :                     --spec OR --spec-path OR --control-plane-uri and --compute-id"
     201               0 :                 );
     202                 :             }
     203                 :         }
     204                 :     };
     205                 : 
     206 CBC         544 :     let mut new_state = ComputeState::new();
     207                 :     let spec_set;
     208                 : 
     209             544 :     if let Some(spec) = spec {
     210             544 :         let pspec = ParsedSpec::try_from(spec).map_err(|msg| anyhow::anyhow!(msg))?;
     211             544 :         info!("new pspec.spec: {:?}", pspec.spec);
     212             544 :         new_state.pspec = Some(pspec);
     213             544 :         spec_set = true;
     214 UBC           0 :     } else {
     215               0 :         spec_set = false;
     216               0 :     }
     217 CBC         544 :     let compute_node = ComputeNode {
     218             544 :         connstr: Url::parse(connstr).context("cannot parse connstr as a URL")?,
     219             544 :         pgdata: pgdata.to_string(),
     220             544 :         pgbin: pgbin.to_string(),
     221             544 :         pgversion: get_pg_version(pgbin),
     222             544 :         live_config_allowed,
     223             544 :         state: Mutex::new(new_state),
     224             544 :         state_changed: Condvar::new(),
     225             544 :         ext_remote_storage: ext_remote_storage.map(|s| s.to_string()),
     226             544 :         ext_download_progress: RwLock::new(HashMap::new()),
     227             544 :         build_tag,
     228             544 :         pgbouncer_connstr: pgbouncer_connstr.map(|s| s.to_string()),
     229             544 :         pgbouncer_ini_path: pgbouncer_ini_path.map(|s| s.to_string()),
     230             544 :     };
     231             544 :     let compute = Arc::new(compute_node);
     232             544 : 
     233             544 :     // If this is a pooled VM, prewarm before starting HTTP server and becoming
     234             544 :     // available for binding. Prewarming helps postgres start quicker later,
     235             544 :     // because QEMU will already have it's memory allocated from the host, and
     236             544 :     // the necessary binaries will alreaady be cached.
     237             544 :     if !spec_set {
     238 UBC           0 :         compute.prewarm_postgres()?;
     239 CBC         544 :     }
     240                 : 
     241                 :     // Launch http service first, so we were able to serve control-plane
     242                 :     // requests, while configuration is still in progress.
     243             544 :     let _http_handle =
     244             544 :         launch_http_server(http_port, &compute).expect("cannot launch http endpoint thread");
     245             544 : 
     246             544 :     let extension_server_port: u16 = http_port;
     247             544 : 
     248             544 :     if !spec_set {
     249                 :         // No spec provided, hang waiting for it.
     250 UBC           0 :         info!("no compute spec provided, waiting");
     251                 : 
     252               0 :         let mut state = compute.state.lock().unwrap();
     253               0 :         while state.status != ComputeStatus::ConfigurationPending {
     254               0 :             state = compute.state_changed.wait(state).unwrap();
     255               0 : 
     256               0 :             if state.status == ComputeStatus::ConfigurationPending {
     257               0 :                 info!("got spec, continue configuration");
     258                 :                 // Spec is already set by the http server handler.
     259               0 :                 break;
     260               0 :             }
     261                 :         }
     262 CBC         544 :     }
     263                 : 
     264                 :     // We got all we need, update the state.
     265             544 :     let mut state = compute.state.lock().unwrap();
     266             544 : 
     267             544 :     // Record for how long we slept waiting for the spec.
     268             544 :     state.metrics.wait_for_spec_ms = Utc::now()
     269             544 :         .signed_duration_since(state.start_time)
     270             544 :         .to_std()
     271             544 :         .unwrap()
     272             544 :         .as_millis() as u64;
     273             544 :     // Reset start time to the actual start of the configuration, so that
     274             544 :     // total startup time was properly measured at the end.
     275             544 :     state.start_time = Utc::now();
     276             544 : 
     277             544 :     state.status = ComputeStatus::Init;
     278             544 :     compute.state_changed.notify_all();
     279             544 :     drop(state);
     280             544 : 
     281             544 :     // Launch remaining service threads
     282             544 :     let _monitor_handle = launch_monitor(&compute);
     283             544 :     let _configurator_handle = launch_configurator(&compute);
     284             544 : 
     285             544 :     // Start Postgres
     286             544 :     let mut delay_exit = false;
     287             544 :     let mut exit_code = None;
     288             544 :     let pg = match compute.start_compute(extension_server_port) {
     289             537 :         Ok(pg) => Some(pg),
     290               7 :         Err(err) => {
     291               7 :             error!("could not start the compute node: {:?}", err);
     292               7 :             let mut state = compute.state.lock().unwrap();
     293               7 :             state.error = Some(format!("{:?}", err));
     294               7 :             state.status = ComputeStatus::Failed;
     295               7 :             // Notify others that Postgres failed to start. In case of configuring the
     296               7 :             // empty compute, it's likely that API handler is still waiting for compute
     297               7 :             // state change. With this we will notify it that compute is in Failed state,
     298               7 :             // so control plane will know about it earlier and record proper error instead
     299               7 :             // of timeout.
     300               7 :             compute.state_changed.notify_all();
     301               7 :             drop(state); // unlock
     302               7 :             delay_exit = true;
     303               7 :             None
     304                 :         }
     305                 :     };
     306                 : 
     307                 :     // Start the vm-monitor if directed to. The vm-monitor only runs on linux
     308                 :     // because it requires cgroups.
     309                 :     cfg_if::cfg_if! {
     310                 :         if #[cfg(target_os = "linux")] {
     311                 :             use std::env;
     312                 :             use tokio_util::sync::CancellationToken;
     313             544 :             let vm_monitor_addr = matches
     314             544 :                 .get_one::<String>("vm-monitor-addr")
     315             544 :                 .expect("--vm-monitor-addr should always be set because it has a default arg");
     316             544 :             let file_cache_connstr = matches.get_one::<String>("filecache-connstr");
     317             544 :             let cgroup = matches.get_one::<String>("cgroup");
     318                 : 
     319                 :             // Only make a runtime if we need to.
     320                 :             // Note: it seems like you can make a runtime in an inner scope and
     321                 :             // if you start a task in it it won't be dropped. However, make it
     322                 :             // in the outermost scope just to be safe.
     323             544 :             let rt = if env::var_os("AUTOSCALING").is_some() {
     324 UBC           0 :                 Some(
     325               0 :                     tokio::runtime::Builder::new_multi_thread()
     326               0 :                         .worker_threads(4)
     327               0 :                         .enable_all()
     328               0 :                         .build()
     329               0 :                         .expect("failed to create tokio runtime for monitor")
     330               0 :                 )
     331                 :             } else {
     332 CBC         544 :                 None
     333                 :             };
     334                 : 
     335                 :             // This token is used internally by the monitor to clean up all threads
     336             544 :             let token = CancellationToken::new();
     337             544 : 
     338             544 :             let vm_monitor = &rt.as_ref().map(|rt| {
     339 UBC           0 :                 rt.spawn(vm_monitor::start(
     340               0 :                     Box::leak(Box::new(vm_monitor::Args {
     341               0 :                         cgroup: cgroup.cloned(),
     342               0 :                         pgconnstr: file_cache_connstr.cloned(),
     343               0 :                         addr: vm_monitor_addr.clone(),
     344               0 :                     })),
     345               0 :                     token.clone(),
     346               0 :                 ))
     347 CBC         544 :             });
     348                 :         }
     349                 :     }
     350                 : 
     351                 :     // Wait for the child Postgres process forever. In this state Ctrl+C will
     352                 :     // propagate to Postgres and it will be shut down as well.
     353             544 :     if let Some(mut pg) = pg {
     354                 :         // Startup is finished, exit the startup tracing span
     355             537 :         drop(startup_context_guard);
     356             537 : 
     357             537 :         let ecode = pg
     358             537 :             .wait()
     359             537 :             .expect("failed to start waiting on Postgres process");
     360             537 :         PG_PID.store(0, Ordering::SeqCst);
     361             537 :         info!("Postgres exited with code {}, shutting down", ecode);
     362             537 :         exit_code = ecode.code()
     363               7 :     }
     364                 : 
     365                 :     // Terminate the vm_monitor so it releases the file watcher on
     366                 :     // /sys/fs/cgroup/neon-postgres.
     367                 :     // Note: the vm-monitor only runs on linux because it requires cgroups.
     368                 :     cfg_if::cfg_if! {
     369                 :         if #[cfg(target_os = "linux")] {
     370             544 :             if let Some(handle) = vm_monitor {
     371 UBC           0 :                 // Kills all threads spawned by the monitor
     372               0 :                 token.cancel();
     373               0 :                 // Kills the actual task running the monitor
     374               0 :                 handle.abort();
     375               0 : 
     376               0 :                 // If handle is some, rt must have been used to produce it, and
     377               0 :                 // hence is also some
     378               0 :                 rt.unwrap().shutdown_timeout(Duration::from_secs(2));
     379 CBC         544 :             }
     380                 :         }
     381                 :     }
     382                 : 
     383                 :     // Maybe sync safekeepers again, to speed up next startup
     384             544 :     let compute_state = compute.state.lock().unwrap().clone();
     385             544 :     let pspec = compute_state.pspec.as_ref().expect("spec must be set");
     386             544 :     if matches!(pspec.spec.mode, compute_api::spec::ComputeMode::Primary) {
     387             494 :         info!("syncing safekeepers on shutdown");
     388             494 :         let storage_auth_token = pspec.storage_auth_token.clone();
     389             494 :         let lsn = compute.sync_safekeepers(storage_auth_token)?;
     390             490 :         info!("synced safekeepers at lsn {lsn}");
     391              50 :     }
     392                 : 
     393             540 :     if let Err(err) = compute.check_for_core_dumps() {
     394 UBC           0 :         error!("error while checking for core dumps: {err:?}");
     395 CBC         540 :     }
     396                 : 
     397                 :     // If launch failed, keep serving HTTP requests for a while, so the cloud
     398                 :     // control plane can get the actual error.
     399             540 :     if delay_exit {
     400               7 :         info!("giving control plane 30s to collect the error before shutdown");
     401               7 :         thread::sleep(Duration::from_secs(30));
     402             533 :     }
     403                 : 
     404                 :     // Shutdown trace pipeline gracefully, so that it has a chance to send any
     405                 :     // pending traces before we exit. Shutting down OTEL tracing provider may
     406                 :     // hang for quite some time, see, for example:
     407                 :     // - https://github.com/open-telemetry/opentelemetry-rust/issues/868
     408                 :     // - and our problems with staging https://github.com/neondatabase/cloud/issues/3707#issuecomment-1493983636
     409                 :     //
     410                 :     // Yet, we want computes to shut down fast enough, as we may need a new one
     411                 :     // for the same timeline ASAP. So wait no longer than 2s for the shutdown to
     412                 :     // complete, then just error out and exit the main thread.
     413             540 :     info!("shutting down tracing");
     414             540 :     let (sender, receiver) = mpsc::channel();
     415             540 :     let _ = thread::spawn(move || {
     416             540 :         tracing_utils::shutdown_tracing();
     417             540 :         sender.send(()).ok()
     418             540 :     });
     419             540 :     let shutdown_res = receiver.recv_timeout(Duration::from_millis(2000));
     420             540 :     if shutdown_res.is_err() {
     421 UBC           0 :         error!("timed out while shutting down tracing, exiting anyway");
     422 CBC         540 :     }
     423                 : 
     424             540 :     info!("shutting down");
     425             540 :     exit(exit_code.unwrap_or(1))
     426               4 : }
     427                 : 
     428             545 : fn cli() -> clap::Command {
     429             545 :     // Env variable is set by `cargo`
     430             545 :     let version = option_env!("CARGO_PKG_VERSION").unwrap_or("unknown");
     431             545 :     clap::Command::new("compute_ctl")
     432             545 :         .version(version)
     433             545 :         .arg(
     434             545 :             Arg::new("http-port")
     435             545 :                 .long("http-port")
     436             545 :                 .value_name("HTTP_PORT")
     437             545 :                 .default_value("3080")
     438             545 :                 .value_parser(clap::value_parser!(u16))
     439             545 :                 .required(false),
     440             545 :         )
     441             545 :         .arg(
     442             545 :             Arg::new("connstr")
     443             545 :                 .short('C')
     444             545 :                 .long("connstr")
     445             545 :                 .value_name("DATABASE_URL")
     446             545 :                 .required(true),
     447             545 :         )
     448             545 :         .arg(
     449             545 :             Arg::new("pgdata")
     450             545 :                 .short('D')
     451             545 :                 .long("pgdata")
     452             545 :                 .value_name("DATADIR")
     453             545 :                 .required(true),
     454             545 :         )
     455             545 :         .arg(
     456             545 :             Arg::new("pgbin")
     457             545 :                 .short('b')
     458             545 :                 .long("pgbin")
     459             545 :                 .default_value("postgres")
     460             545 :                 .value_name("POSTGRES_PATH"),
     461             545 :         )
     462             545 :         .arg(
     463             545 :             Arg::new("spec")
     464             545 :                 .short('s')
     465             545 :                 .long("spec")
     466             545 :                 .value_name("SPEC_JSON"),
     467             545 :         )
     468             545 :         .arg(
     469             545 :             Arg::new("spec-path")
     470             545 :                 .short('S')
     471             545 :                 .long("spec-path")
     472             545 :                 .value_name("SPEC_PATH"),
     473             545 :         )
     474             545 :         .arg(
     475             545 :             Arg::new("compute-id")
     476             545 :                 .short('i')
     477             545 :                 .long("compute-id")
     478             545 :                 .value_name("COMPUTE_ID"),
     479             545 :         )
     480             545 :         .arg(
     481             545 :             Arg::new("control-plane-uri")
     482             545 :                 .short('p')
     483             545 :                 .long("control-plane-uri")
     484             545 :                 .value_name("CONTROL_PLANE_API_BASE_URI"),
     485             545 :         )
     486             545 :         .arg(
     487             545 :             Arg::new("remote-ext-config")
     488             545 :                 .short('r')
     489             545 :                 .long("remote-ext-config")
     490             545 :                 .value_name("REMOTE_EXT_CONFIG"),
     491             545 :         )
     492             545 :         // TODO(fprasx): we currently have default arguments because the cloud PR
     493             545 :         // to pass them in hasn't been merged yet. We should get rid of them once
     494             545 :         // the PR is merged.
     495             545 :         .arg(
     496             545 :             Arg::new("vm-monitor-addr")
     497             545 :                 .long("vm-monitor-addr")
     498             545 :                 .default_value("0.0.0.0:10301")
     499             545 :                 .value_name("VM_MONITOR_ADDR"),
     500             545 :         )
     501             545 :         .arg(
     502             545 :             Arg::new("cgroup")
     503             545 :                 .long("cgroup")
     504             545 :                 .default_value("neon-postgres")
     505             545 :                 .value_name("CGROUP"),
     506             545 :         )
     507             545 :         .arg(
     508             545 :             Arg::new("filecache-connstr")
     509             545 :                 .long("filecache-connstr")
     510             545 :                 .default_value(
     511             545 :                     "host=localhost port=5432 dbname=postgres user=cloud_admin sslmode=disable",
     512             545 :                 )
     513             545 :                 .value_name("FILECACHE_CONNSTR"),
     514             545 :         )
     515             545 :         .arg(
     516             545 :             Arg::new("pgbouncer-connstr")
     517             545 :                 .long("pgbouncer-connstr")
     518             545 :                 .default_value(
     519             545 :                     "host=localhost port=6432 dbname=pgbouncer user=cloud_admin sslmode=disable",
     520             545 :                 )
     521             545 :                 .value_name("PGBOUNCER_CONNSTR"),
     522             545 :         )
     523             545 :         .arg(
     524             545 :             Arg::new("pgbouncer-ini-path")
     525             545 :                 .long("pgbouncer-ini-path")
     526             545 :                 // Note: this doesn't match current path for pgbouncer.ini.
     527             545 :                 // Until we fix it, we need to pass the path explicitly
     528             545 :                 // or this will be effectively no-op.
     529             545 :                 .default_value("/etc/pgbouncer.ini")
     530             545 :                 .value_name("PGBOUNCER_INI_PATH"),
     531             545 :         )
     532             545 : }
     533                 : 
     534                 : /// When compute_ctl is killed, send also termination signal to sync-safekeepers
     535                 : /// to prevent leakage. TODO: it is better to convert compute_ctl to async and
     536                 : /// wait for termination which would be easy then.
     537               3 : fn handle_exit_signal(sig: i32) {
     538               3 :     info!("received {sig} termination signal");
     539               3 :     let ss_pid = SYNC_SAFEKEEPERS_PID.load(Ordering::SeqCst);
     540               3 :     if ss_pid != 0 {
     541               3 :         let ss_pid = nix::unistd::Pid::from_raw(ss_pid as i32);
     542               3 :         kill(ss_pid, Signal::SIGTERM).ok();
     543               3 :     }
     544               3 :     let pg_pid = PG_PID.load(Ordering::SeqCst);
     545               3 :     if pg_pid != 0 {
     546 UBC           0 :         let pg_pid = nix::unistd::Pid::from_raw(pg_pid as i32);
     547               0 :         kill(pg_pid, Signal::SIGTERM).ok();
     548 CBC           3 :     }
     549               3 :     exit(1);
     550                 : }
     551                 : 
     552               1 : #[test]
     553               1 : fn verify_cli() {
     554               1 :     cli().debug_assert()
     555               1 : }
        

Generated by: LCOV version 2.1-beta