LCOV - code coverage report
Current view: top level - compute_tools/src/bin - compute_ctl.rs (source / functions) Coverage Total Hit
Test: 4f58e98c51285c7fa348e0b410c88a10caf68ad2.info Lines: 18.2 % 555 101
Test Date: 2025-01-07 20:58:07 Functions: 8.7 % 23 2

            Line data    Source code
       1              : //!
       2              : //! Postgres wrapper (`compute_ctl`) is intended to be run as a Docker entrypoint or as a `systemd`
       3              : //! `ExecStart` option. It will handle all the `Neon` specifics during compute node
       4              : //! initialization:
       5              : //! - `compute_ctl` accepts cluster (compute node) specification as a JSON file.
       6              : //! - Every start is a fresh start, so the data directory is removed and
       7              : //!   initialized again on each run.
       8              : //! - If remote_extension_config is provided, it will be used to fetch extensions list
       9              : //!   and download `shared_preload_libraries` from the remote storage.
      10              : //! - Next it will put configuration files into the `PGDATA` directory.
      11              : //! - Sync safekeepers and get commit LSN.
      12              : //! - Get `basebackup` from pageserver using the returned on the previous step LSN.
      13              : //! - Try to start `postgres` and wait until it is ready to accept connections.
      14              : //! - Check and alter/drop/create roles and databases.
      15              : //! - Hang waiting on the `postmaster` process to exit.
      16              : //!
      17              : //! Also `compute_ctl` spawns two separate service threads:
      18              : //! - `compute-monitor` checks the last Postgres activity timestamp and saves it
      19              : //!   into the shared `ComputeNode`;
      20              : //! - `http-endpoint` runs a Hyper HTTP API server, which serves readiness and the
      21              : //!   last activity requests.
      22              : //!
      23              : //! If `AUTOSCALING` environment variable is set, `compute_ctl` will start the
      24              : //! `vm-monitor` located in [`neon/libs/vm_monitor`]. For VM compute nodes,
      25              : //! `vm-monitor` communicates with the VM autoscaling system. It coordinates
      26              : //! downscaling and requests immediate upscaling under resource pressure.
      27              : //!
      28              : //! Usage example:
      29              : //! ```sh
      30              : //! compute_ctl -D /var/db/postgres/compute \
      31              : //!             -C 'postgresql://cloud_admin@localhost/postgres' \
      32              : //!             -S /var/db/postgres/specs/current.json \
      33              : //!             -b /usr/local/bin/postgres \
      34              : //!             -r http://pg-ext-s3-gateway \
      35              : //! ```
      36              : use std::collections::HashMap;
      37              : use std::fs::File;
      38              : use std::path::Path;
      39              : use std::process::exit;
      40              : use std::str::FromStr;
      41              : use std::sync::atomic::Ordering;
      42              : use std::sync::{mpsc, Arc, Condvar, Mutex, RwLock};
      43              : use std::{thread, time::Duration};
      44              : 
      45              : use anyhow::{Context, Result};
      46              : use chrono::Utc;
      47              : use clap::Arg;
      48              : use compute_tools::disk_quota::set_disk_quota;
      49              : use compute_tools::lsn_lease::launch_lsn_lease_bg_task_for_static;
      50              : use signal_hook::consts::{SIGQUIT, SIGTERM};
      51              : use signal_hook::{consts::SIGINT, iterator::Signals};
      52              : use tracing::{error, info, warn};
      53              : use url::Url;
      54              : 
      55              : use compute_api::responses::ComputeStatus;
      56              : use compute_api::spec::ComputeSpec;
      57              : 
      58              : use compute_tools::compute::{
      59              :     forward_termination_signal, ComputeNode, ComputeState, ParsedSpec, PG_PID,
      60              : };
      61              : use compute_tools::configurator::launch_configurator;
      62              : use compute_tools::extension_server::get_pg_version_string;
      63              : use compute_tools::http::api::launch_http_server;
      64              : use compute_tools::logger::*;
      65              : use compute_tools::monitor::launch_monitor;
      66              : use compute_tools::params::*;
      67              : use compute_tools::spec::*;
      68              : use compute_tools::swap::resize_swap;
      69              : use rlimit::{setrlimit, Resource};
      70              : use utils::failpoint_support;
      71              : 
      72              : // this is an arbitrary build tag. Fine as a default / for testing purposes
      73              : // in-case of not-set environment var
      74              : const BUILD_TAG_DEFAULT: &str = "latest";
      75              : 
      76            0 : fn main() -> Result<()> {
      77            0 :     let scenario = failpoint_support::init();
      78              : 
      79            0 :     let (build_tag, clap_args) = init()?;
      80              : 
      81              :     // enable core dumping for all child processes
      82            0 :     setrlimit(Resource::CORE, rlimit::INFINITY, rlimit::INFINITY)?;
      83              : 
      84            0 :     let (pg_handle, start_pg_result) = {
      85              :         // Enter startup tracing context
      86            0 :         let _startup_context_guard = startup_context_from_env();
      87              : 
      88            0 :         let cli_args = process_cli(&clap_args)?;
      89              : 
      90            0 :         let cli_spec = try_spec_from_cli(&clap_args, &cli_args)?;
      91              : 
      92            0 :         let wait_spec_result = wait_spec(build_tag, cli_args, cli_spec)?;
      93              : 
      94            0 :         start_postgres(&clap_args, wait_spec_result)?
      95              : 
      96              :         // Startup is finished, exit the startup tracing span
      97              :     };
      98              : 
      99              :     // PostgreSQL is now running, if startup was successful. Wait until it exits.
     100            0 :     let wait_pg_result = wait_postgres(pg_handle)?;
     101              : 
     102            0 :     let delay_exit = cleanup_after_postgres_exit(start_pg_result)?;
     103              : 
     104            0 :     maybe_delay_exit(delay_exit);
     105            0 : 
     106            0 :     scenario.teardown();
     107            0 : 
     108            0 :     deinit_and_exit(wait_pg_result);
     109            0 : }
     110              : 
     111            0 : fn init() -> Result<(String, clap::ArgMatches)> {
     112            0 :     init_tracing_and_logging(DEFAULT_LOG_LEVEL)?;
     113              : 
     114            0 :     opentelemetry::global::set_error_handler(|err| {
     115            0 :         tracing::info!("OpenTelemetry error: {err}");
     116            0 :     })
     117            0 :     .expect("global error handler lock poisoned");
     118              : 
     119            0 :     let mut signals = Signals::new([SIGINT, SIGTERM, SIGQUIT])?;
     120            0 :     thread::spawn(move || {
     121            0 :         for sig in signals.forever() {
     122            0 :             handle_exit_signal(sig);
     123            0 :         }
     124            0 :     });
     125            0 : 
     126            0 :     let build_tag = option_env!("BUILD_TAG")
     127            0 :         .unwrap_or(BUILD_TAG_DEFAULT)
     128            0 :         .to_string();
     129            0 :     info!("build_tag: {build_tag}");
     130              : 
     131            0 :     Ok((build_tag, cli().get_matches()))
     132            0 : }
     133              : 
     134            0 : fn process_cli(matches: &clap::ArgMatches) -> Result<ProcessCliResult> {
     135            0 :     let pgbin_default = "postgres";
     136            0 :     let pgbin = matches
     137            0 :         .get_one::<String>("pgbin")
     138            0 :         .map(|s| s.as_str())
     139            0 :         .unwrap_or(pgbin_default);
     140            0 : 
     141            0 :     let ext_remote_storage = matches
     142            0 :         .get_one::<String>("remote-ext-config")
     143            0 :         // Compatibility hack: if the control plane specified any remote-ext-config
     144            0 :         // use the default value for extension storage proxy gateway.
     145            0 :         // Remove this once the control plane is updated to pass the gateway URL
     146            0 :         .map(|conf| {
     147            0 :             if conf.starts_with("http") {
     148            0 :                 conf.trim_end_matches('/')
     149              :             } else {
     150            0 :                 "http://pg-ext-s3-gateway"
     151              :             }
     152            0 :         });
     153            0 : 
     154            0 :     let http_port = *matches
     155            0 :         .get_one::<u16>("http-port")
     156            0 :         .expect("http-port is required");
     157            0 :     let pgdata = matches
     158            0 :         .get_one::<String>("pgdata")
     159            0 :         .expect("PGDATA path is required");
     160            0 :     let connstr = matches
     161            0 :         .get_one::<String>("connstr")
     162            0 :         .expect("Postgres connection string is required");
     163            0 :     let spec_json = matches.get_one::<String>("spec");
     164            0 :     let spec_path = matches.get_one::<String>("spec-path");
     165            0 :     let resize_swap_on_bind = matches.get_flag("resize-swap-on-bind");
     166            0 :     let set_disk_quota_for_fs = matches.get_one::<String>("set-disk-quota-for-fs");
     167            0 : 
     168            0 :     Ok(ProcessCliResult {
     169            0 :         connstr,
     170            0 :         pgdata,
     171            0 :         pgbin,
     172            0 :         ext_remote_storage,
     173            0 :         http_port,
     174            0 :         spec_json,
     175            0 :         spec_path,
     176            0 :         resize_swap_on_bind,
     177            0 :         set_disk_quota_for_fs,
     178            0 :     })
     179            0 : }
     180              : 
     181              : struct ProcessCliResult<'clap> {
     182              :     connstr: &'clap str,
     183              :     pgdata: &'clap str,
     184              :     pgbin: &'clap str,
     185              :     ext_remote_storage: Option<&'clap str>,
     186              :     http_port: u16,
     187              :     spec_json: Option<&'clap String>,
     188              :     spec_path: Option<&'clap String>,
     189              :     resize_swap_on_bind: bool,
     190              :     set_disk_quota_for_fs: Option<&'clap String>,
     191              : }
     192              : 
     193            0 : fn startup_context_from_env() -> Option<opentelemetry::ContextGuard> {
     194            0 :     // Extract OpenTelemetry context for the startup actions from the
     195            0 :     // TRACEPARENT and TRACESTATE env variables, and attach it to the current
     196            0 :     // tracing context.
     197            0 :     //
     198            0 :     // This is used to propagate the context for the 'start_compute' operation
     199            0 :     // from the neon control plane. This allows linking together the wider
     200            0 :     // 'start_compute' operation that creates the compute container, with the
     201            0 :     // startup actions here within the container.
     202            0 :     //
     203            0 :     // There is no standard for passing context in env variables, but a lot of
     204            0 :     // tools use TRACEPARENT/TRACESTATE, so we use that convention too. See
     205            0 :     // https://github.com/open-telemetry/opentelemetry-specification/issues/740
     206            0 :     //
     207            0 :     // Switch to the startup context here, and exit it once the startup has
     208            0 :     // completed and Postgres is up and running.
     209            0 :     //
     210            0 :     // If this pod is pre-created without binding it to any particular endpoint
     211            0 :     // yet, this isn't the right place to enter the startup context. In that
     212            0 :     // case, the control plane should pass the tracing context as part of the
     213            0 :     // /configure API call.
     214            0 :     //
     215            0 :     // NOTE: This is supposed to only cover the *startup* actions. Once
     216            0 :     // postgres is configured and up-and-running, we exit this span. Any other
     217            0 :     // actions that are performed on incoming HTTP requests, for example, are
     218            0 :     // performed in separate spans.
     219            0 :     //
     220            0 :     // XXX: If the pod is restarted, we perform the startup actions in the same
     221            0 :     // context as the original startup actions, which probably doesn't make
     222            0 :     // sense.
     223            0 :     let mut startup_tracing_carrier: HashMap<String, String> = HashMap::new();
     224            0 :     if let Ok(val) = std::env::var("TRACEPARENT") {
     225            0 :         startup_tracing_carrier.insert("traceparent".to_string(), val);
     226            0 :     }
     227            0 :     if let Ok(val) = std::env::var("TRACESTATE") {
     228            0 :         startup_tracing_carrier.insert("tracestate".to_string(), val);
     229            0 :     }
     230            0 :     if !startup_tracing_carrier.is_empty() {
     231              :         use opentelemetry::propagation::TextMapPropagator;
     232              :         use opentelemetry_sdk::propagation::TraceContextPropagator;
     233            0 :         let guard = TraceContextPropagator::new()
     234            0 :             .extract(&startup_tracing_carrier)
     235            0 :             .attach();
     236            0 :         info!("startup tracing context attached");
     237            0 :         Some(guard)
     238              :     } else {
     239            0 :         None
     240              :     }
     241            0 : }
     242              : 
     243            0 : fn try_spec_from_cli(
     244            0 :     matches: &clap::ArgMatches,
     245            0 :     ProcessCliResult {
     246            0 :         spec_json,
     247            0 :         spec_path,
     248            0 :         ..
     249            0 :     }: &ProcessCliResult,
     250            0 : ) -> Result<CliSpecParams> {
     251            0 :     let compute_id = matches.get_one::<String>("compute-id");
     252            0 :     let control_plane_uri = matches.get_one::<String>("control-plane-uri");
     253              : 
     254              :     // First, try to get cluster spec from the cli argument
     255            0 :     if let Some(spec_json) = spec_json {
     256            0 :         info!("got spec from cli argument {}", spec_json);
     257              :         return Ok(CliSpecParams {
     258            0 :             spec: Some(serde_json::from_str(spec_json)?),
     259              :             live_config_allowed: false,
     260              :         });
     261            0 :     }
     262              : 
     263              :     // Second, try to read it from the file if path is provided
     264            0 :     if let Some(spec_path) = spec_path {
     265            0 :         let file = File::open(Path::new(spec_path))?;
     266              :         return Ok(CliSpecParams {
     267            0 :             spec: Some(serde_json::from_reader(file)?),
     268              :             live_config_allowed: true,
     269              :         });
     270            0 :     }
     271              : 
     272            0 :     let Some(compute_id) = compute_id else {
     273            0 :         panic!(
     274            0 :             "compute spec should be provided by one of the following ways: \
     275            0 :                 --spec OR --spec-path OR --control-plane-uri and --compute-id"
     276            0 :         );
     277              :     };
     278            0 :     let Some(control_plane_uri) = control_plane_uri else {
     279            0 :         panic!("must specify both --control-plane-uri and --compute-id or none");
     280              :     };
     281              : 
     282            0 :     match get_spec_from_control_plane(control_plane_uri, compute_id) {
     283            0 :         Ok(spec) => Ok(CliSpecParams {
     284            0 :             spec,
     285            0 :             live_config_allowed: true,
     286            0 :         }),
     287            0 :         Err(e) => {
     288            0 :             error!(
     289            0 :                 "cannot get response from control plane: {}\n\
     290            0 :                 neither spec nor confirmation that compute is in the Empty state was received",
     291              :                 e
     292              :             );
     293            0 :             Err(e)
     294              :         }
     295              :     }
     296            0 : }
     297              : 
     298              : struct CliSpecParams {
     299              :     /// If a spec was provided via CLI or file, the [`ComputeSpec`]
     300              :     spec: Option<ComputeSpec>,
     301              :     live_config_allowed: bool,
     302              : }
     303              : 
     304            0 : fn wait_spec(
     305            0 :     build_tag: String,
     306            0 :     ProcessCliResult {
     307            0 :         connstr,
     308            0 :         pgdata,
     309            0 :         pgbin,
     310            0 :         ext_remote_storage,
     311            0 :         resize_swap_on_bind,
     312            0 :         set_disk_quota_for_fs,
     313            0 :         http_port,
     314            0 :         ..
     315            0 :     }: ProcessCliResult,
     316            0 :     CliSpecParams {
     317            0 :         spec,
     318            0 :         live_config_allowed,
     319            0 :     }: CliSpecParams,
     320            0 : ) -> Result<WaitSpecResult> {
     321            0 :     let mut new_state = ComputeState::new();
     322              :     let spec_set;
     323              : 
     324            0 :     if let Some(spec) = spec {
     325            0 :         let pspec = ParsedSpec::try_from(spec).map_err(|msg| anyhow::anyhow!(msg))?;
     326            0 :         info!("new pspec.spec: {:?}", pspec.spec);
     327            0 :         new_state.pspec = Some(pspec);
     328            0 :         spec_set = true;
     329            0 :     } else {
     330            0 :         spec_set = false;
     331            0 :     }
     332            0 :     let connstr = Url::parse(connstr).context("cannot parse connstr as a URL")?;
     333            0 :     let conn_conf = postgres::config::Config::from_str(connstr.as_str())
     334            0 :         .context("cannot build postgres config from connstr")?;
     335            0 :     let tokio_conn_conf = tokio_postgres::config::Config::from_str(connstr.as_str())
     336            0 :         .context("cannot build tokio postgres config from connstr")?;
     337            0 :     let compute_node = ComputeNode {
     338            0 :         connstr,
     339            0 :         conn_conf,
     340            0 :         tokio_conn_conf,
     341            0 :         pgdata: pgdata.to_string(),
     342            0 :         pgbin: pgbin.to_string(),
     343            0 :         pgversion: get_pg_version_string(pgbin),
     344            0 :         http_port,
     345            0 :         live_config_allowed,
     346            0 :         state: Mutex::new(new_state),
     347            0 :         state_changed: Condvar::new(),
     348            0 :         ext_remote_storage: ext_remote_storage.map(|s| s.to_string()),
     349            0 :         ext_download_progress: RwLock::new(HashMap::new()),
     350            0 :         build_tag,
     351            0 :     };
     352            0 :     let compute = Arc::new(compute_node);
     353            0 : 
     354            0 :     // If this is a pooled VM, prewarm before starting HTTP server and becoming
     355            0 :     // available for binding. Prewarming helps Postgres start quicker later,
     356            0 :     // because QEMU will already have its memory allocated from the host, and
     357            0 :     // the necessary binaries will already be cached.
     358            0 :     if !spec_set {
     359            0 :         compute.prewarm_postgres()?;
     360            0 :     }
     361              : 
     362              :     // Launch http service first, so that we can serve control-plane requests
     363              :     // while configuration is still in progress.
     364            0 :     let _http_handle =
     365            0 :         launch_http_server(http_port, &compute).expect("cannot launch http endpoint thread");
     366            0 : 
     367            0 :     if !spec_set {
     368              :         // No spec provided, hang waiting for it.
     369            0 :         info!("no compute spec provided, waiting");
     370              : 
     371            0 :         let mut state = compute.state.lock().unwrap();
     372            0 :         while state.status != ComputeStatus::ConfigurationPending {
     373            0 :             state = compute.state_changed.wait(state).unwrap();
     374            0 : 
     375            0 :             if state.status == ComputeStatus::ConfigurationPending {
     376            0 :                 info!("got spec, continue configuration");
     377              :                 // Spec is already set by the http server handler.
     378            0 :                 break;
     379            0 :             }
     380              :         }
     381              : 
     382              :         // Record for how long we slept waiting for the spec.
     383            0 :         let now = Utc::now();
     384            0 :         state.metrics.wait_for_spec_ms = now
     385            0 :             .signed_duration_since(state.start_time)
     386            0 :             .to_std()
     387            0 :             .unwrap()
     388            0 :             .as_millis() as u64;
     389            0 : 
     390            0 :         // Reset start time, so that the total startup time that is calculated later will
     391            0 :         // not include the time that we waited for the spec.
     392            0 :         state.start_time = now;
     393            0 :     }
     394              : 
     395            0 :     launch_lsn_lease_bg_task_for_static(&compute);
     396            0 : 
     397            0 :     Ok(WaitSpecResult {
     398            0 :         compute,
     399            0 :         resize_swap_on_bind,
     400            0 :         set_disk_quota_for_fs: set_disk_quota_for_fs.cloned(),
     401            0 :     })
     402            0 : }
     403              : 
     404              : struct WaitSpecResult {
     405              :     compute: Arc<ComputeNode>,
     406              :     resize_swap_on_bind: bool,
     407              :     set_disk_quota_for_fs: Option<String>,
     408              : }
     409              : 
     410            0 : fn start_postgres(
     411            0 :     // need to allow unused because `matches` is only used if target_os = "linux"
     412            0 :     #[allow(unused_variables)] matches: &clap::ArgMatches,
     413            0 :     WaitSpecResult {
     414            0 :         compute,
     415            0 :         resize_swap_on_bind,
     416            0 :         set_disk_quota_for_fs,
     417            0 :     }: WaitSpecResult,
     418            0 : ) -> Result<(Option<PostgresHandle>, StartPostgresResult)> {
     419            0 :     // We got all we need, update the state.
     420            0 :     let mut state = compute.state.lock().unwrap();
     421            0 :     state.set_status(ComputeStatus::Init, &compute.state_changed);
     422            0 : 
     423            0 :     info!(
     424            0 :         "running compute with features: {:?}",
     425            0 :         state.pspec.as_ref().unwrap().spec.features
     426              :     );
     427              :     // before we release the mutex, fetch some parameters for later.
     428              :     let &ComputeSpec {
     429            0 :         swap_size_bytes,
     430            0 :         disk_quota_bytes,
     431            0 :         #[cfg(target_os = "linux")]
     432            0 :         disable_lfc_resizing,
     433            0 :         ..
     434            0 :     } = &state.pspec.as_ref().unwrap().spec;
     435            0 :     drop(state);
     436            0 : 
     437            0 :     // Launch remaining service threads
     438            0 :     let _monitor_handle = launch_monitor(&compute);
     439            0 :     let _configurator_handle = launch_configurator(&compute);
     440            0 : 
     441            0 :     let mut prestartup_failed = false;
     442            0 :     let mut delay_exit = false;
     443              : 
     444              :     // Resize swap to the desired size if the compute spec says so
     445            0 :     if let (Some(size_bytes), true) = (swap_size_bytes, resize_swap_on_bind) {
     446              :         // To avoid 'swapoff' hitting postgres startup, we need to run resize-swap to completion
     447              :         // *before* starting postgres.
     448              :         //
     449              :         // In theory, we could do this asynchronously if SkipSwapon was enabled for VMs, but this
     450              :         // carries a risk of introducing hard-to-debug issues - e.g. if postgres sometimes gets
     451              :         // OOM-killed during startup because swap wasn't available yet.
     452            0 :         match resize_swap(size_bytes) {
     453              :             Ok(()) => {
     454            0 :                 let size_mib = size_bytes as f32 / (1 << 20) as f32; // just for more coherent display.
     455            0 :                 info!(%size_bytes, %size_mib, "resized swap");
     456              :             }
     457            0 :             Err(err) => {
     458            0 :                 let err = err.context("failed to resize swap");
     459            0 :                 error!("{err:#}");
     460              : 
     461              :                 // Mark compute startup as failed; don't try to start postgres, and report this
     462              :                 // error to the control plane when it next asks.
     463            0 :                 prestartup_failed = true;
     464            0 :                 compute.set_failed_status(err);
     465            0 :                 delay_exit = true;
     466              :             }
     467              :         }
     468            0 :     }
     469              : 
     470              :     // Set disk quota if the compute spec says so
     471            0 :     if let (Some(disk_quota_bytes), Some(disk_quota_fs_mountpoint)) =
     472            0 :         (disk_quota_bytes, set_disk_quota_for_fs)
     473              :     {
     474            0 :         match set_disk_quota(disk_quota_bytes, &disk_quota_fs_mountpoint) {
     475              :             Ok(()) => {
     476            0 :                 let size_mib = disk_quota_bytes as f32 / (1 << 20) as f32; // just for more coherent display.
     477            0 :                 info!(%disk_quota_bytes, %size_mib, "set disk quota");
     478              :             }
     479            0 :             Err(err) => {
     480            0 :                 let err = err.context("failed to set disk quota");
     481            0 :                 error!("{err:#}");
     482              : 
     483              :                 // Mark compute startup as failed; don't try to start postgres, and report this
     484              :                 // error to the control plane when it next asks.
     485            0 :                 prestartup_failed = true;
     486            0 :                 compute.set_failed_status(err);
     487            0 :                 delay_exit = true;
     488              :             }
     489              :         }
     490            0 :     }
     491              : 
     492              :     // Start Postgres
     493            0 :     let mut pg = None;
     494            0 :     if !prestartup_failed {
     495            0 :         pg = match compute.start_compute() {
     496            0 :             Ok(pg) => Some(pg),
     497            0 :             Err(err) => {
     498            0 :                 error!("could not start the compute node: {:#}", err);
     499            0 :                 compute.set_failed_status(err);
     500            0 :                 delay_exit = true;
     501            0 :                 None
     502              :             }
     503              :         };
     504              :     } else {
     505            0 :         warn!("skipping postgres startup because pre-startup step failed");
     506              :     }
     507              : 
     508              :     // Start the vm-monitor if directed to. The vm-monitor only runs on linux
     509              :     // because it requires cgroups.
     510              :     cfg_if::cfg_if! {
     511              :         if #[cfg(target_os = "linux")] {
     512              :             use std::env;
     513              :             use tokio_util::sync::CancellationToken;
     514            0 :             let vm_monitor_addr = matches
     515            0 :                 .get_one::<String>("vm-monitor-addr")
     516            0 :                 .expect("--vm-monitor-addr should always be set because it has a default arg");
     517            0 :             let file_cache_connstr = matches.get_one::<String>("filecache-connstr");
     518            0 :             let cgroup = matches.get_one::<String>("cgroup");
     519              : 
     520              :             // Only make a runtime if we need to.
     521              :             // Note: it seems like you can make a runtime in an inner scope and
     522              :             // if you start a task in it it won't be dropped. However, make it
     523              :             // in the outermost scope just to be safe.
     524            0 :             let rt = if env::var_os("AUTOSCALING").is_some() {
     525            0 :                 Some(
     526            0 :                     tokio::runtime::Builder::new_multi_thread()
     527            0 :                         .worker_threads(4)
     528            0 :                         .enable_all()
     529            0 :                         .build()
     530            0 :                         .expect("failed to create tokio runtime for monitor")
     531            0 :                 )
     532              :             } else {
     533            0 :                 None
     534              :             };
     535              : 
     536              :             // This token is used internally by the monitor to clean up all threads
     537            0 :             let token = CancellationToken::new();
     538              : 
     539              :             // don't pass postgres connection string to vm-monitor if we don't want it to resize LFC
     540            0 :             let pgconnstr = if disable_lfc_resizing.unwrap_or(false) {
     541            0 :                 None
     542              :             } else {
     543            0 :                 file_cache_connstr.cloned()
     544              :             };
     545              : 
     546            0 :             let vm_monitor = rt.as_ref().map(|rt| {
     547            0 :                 rt.spawn(vm_monitor::start(
     548            0 :                     Box::leak(Box::new(vm_monitor::Args {
     549            0 :                         cgroup: cgroup.cloned(),
     550            0 :                         pgconnstr,
     551            0 :                         addr: vm_monitor_addr.clone(),
     552            0 :                     })),
     553            0 :                     token.clone(),
     554            0 :                 ))
     555            0 :             });
     556            0 :         }
     557            0 :     }
     558            0 : 
     559            0 :     Ok((
     560            0 :         pg,
     561            0 :         StartPostgresResult {
     562            0 :             delay_exit,
     563            0 :             compute,
     564            0 :             #[cfg(target_os = "linux")]
     565            0 :             rt,
     566            0 :             #[cfg(target_os = "linux")]
     567            0 :             token,
     568            0 :             #[cfg(target_os = "linux")]
     569            0 :             vm_monitor,
     570            0 :         },
     571            0 :     ))
     572            0 : }
     573              : 
     574              : type PostgresHandle = (std::process::Child, std::thread::JoinHandle<()>);
     575              : 
     576              : struct StartPostgresResult {
     577              :     delay_exit: bool,
     578              :     // passed through from WaitSpecResult
     579              :     compute: Arc<ComputeNode>,
     580              : 
     581              :     #[cfg(target_os = "linux")]
     582              :     rt: Option<tokio::runtime::Runtime>,
     583              :     #[cfg(target_os = "linux")]
     584              :     token: tokio_util::sync::CancellationToken,
     585              :     #[cfg(target_os = "linux")]
     586              :     vm_monitor: Option<tokio::task::JoinHandle<Result<()>>>,
     587              : }
     588              : 
     589            0 : fn wait_postgres(pg: Option<PostgresHandle>) -> Result<WaitPostgresResult> {
     590            0 :     // Wait for the child Postgres process forever. In this state Ctrl+C will
     591            0 :     // propagate to Postgres and it will be shut down as well.
     592            0 :     let mut exit_code = None;
     593            0 :     if let Some((mut pg, logs_handle)) = pg {
     594            0 :         let ecode = pg
     595            0 :             .wait()
     596            0 :             .expect("failed to start waiting on Postgres process");
     597            0 :         PG_PID.store(0, Ordering::SeqCst);
     598            0 : 
     599            0 :         // Process has exited, so we can join the logs thread.
     600            0 :         let _ = logs_handle
     601            0 :             .join()
     602            0 :             .map_err(|e| tracing::error!("log thread panicked: {:?}", e));
     603            0 : 
     604            0 :         info!("Postgres exited with code {}, shutting down", ecode);
     605            0 :         exit_code = ecode.code()
     606            0 :     }
     607              : 
     608            0 :     Ok(WaitPostgresResult { exit_code })
     609            0 : }
     610              : 
     611              : struct WaitPostgresResult {
     612              :     exit_code: Option<i32>,
     613              : }
     614              : 
     615            0 : fn cleanup_after_postgres_exit(
     616            0 :     StartPostgresResult {
     617            0 :         mut delay_exit,
     618            0 :         compute,
     619            0 :         #[cfg(target_os = "linux")]
     620            0 :         vm_monitor,
     621            0 :         #[cfg(target_os = "linux")]
     622            0 :         token,
     623            0 :         #[cfg(target_os = "linux")]
     624            0 :         rt,
     625            0 :     }: StartPostgresResult,
     626            0 : ) -> Result<bool> {
     627              :     // Terminate the vm_monitor so it releases the file watcher on
     628              :     // /sys/fs/cgroup/neon-postgres.
     629              :     // Note: the vm-monitor only runs on linux because it requires cgroups.
     630              :     cfg_if::cfg_if! {
     631              :         if #[cfg(target_os = "linux")] {
     632            0 :             if let Some(handle) = vm_monitor {
     633            0 :                 // Kills all threads spawned by the monitor
     634            0 :                 token.cancel();
     635            0 :                 // Kills the actual task running the monitor
     636            0 :                 handle.abort();
     637            0 : 
     638            0 :                 // If handle is some, rt must have been used to produce it, and
     639            0 :                 // hence is also some
     640            0 :                 rt.unwrap().shutdown_timeout(Duration::from_secs(2));
     641            0 :             }
     642              :         }
     643              :     }
     644              : 
     645              :     // Maybe sync safekeepers again, to speed up next startup
     646            0 :     let compute_state = compute.state.lock().unwrap().clone();
     647            0 :     let pspec = compute_state.pspec.as_ref().expect("spec must be set");
     648            0 :     if matches!(pspec.spec.mode, compute_api::spec::ComputeMode::Primary) {
     649            0 :         info!("syncing safekeepers on shutdown");
     650            0 :         let storage_auth_token = pspec.storage_auth_token.clone();
     651            0 :         let lsn = compute.sync_safekeepers(storage_auth_token)?;
     652            0 :         info!("synced safekeepers at lsn {lsn}");
     653            0 :     }
     654              : 
     655            0 :     let mut state = compute.state.lock().unwrap();
     656            0 :     if state.status == ComputeStatus::TerminationPending {
     657            0 :         state.status = ComputeStatus::Terminated;
     658            0 :         compute.state_changed.notify_all();
     659            0 :         // we were asked to terminate gracefully, don't exit to avoid restart
     660            0 :         delay_exit = true
     661            0 :     }
     662            0 :     drop(state);
     663              : 
     664            0 :     if let Err(err) = compute.check_for_core_dumps() {
     665            0 :         error!("error while checking for core dumps: {err:?}");
     666            0 :     }
     667              : 
     668            0 :     Ok(delay_exit)
     669            0 : }
     670              : 
     671            0 : fn maybe_delay_exit(delay_exit: bool) {
     672            0 :     // If launch failed, keep serving HTTP requests for a while, so the cloud
     673            0 :     // control plane can get the actual error.
     674            0 :     if delay_exit {
     675            0 :         info!("giving control plane 30s to collect the error before shutdown");
     676            0 :         thread::sleep(Duration::from_secs(30));
     677            0 :     }
     678            0 : }
     679              : 
     680            0 : fn deinit_and_exit(WaitPostgresResult { exit_code }: WaitPostgresResult) -> ! {
     681            0 :     // Shutdown trace pipeline gracefully, so that it has a chance to send any
     682            0 :     // pending traces before we exit. Shutting down OTEL tracing provider may
     683            0 :     // hang for quite some time, see, for example:
     684            0 :     // - https://github.com/open-telemetry/opentelemetry-rust/issues/868
     685            0 :     // - and our problems with staging https://github.com/neondatabase/cloud/issues/3707#issuecomment-1493983636
     686            0 :     //
     687            0 :     // Yet, we want computes to shut down fast enough, as we may need a new one
     688            0 :     // for the same timeline ASAP. So wait no longer than 2s for the shutdown to
     689            0 :     // complete, then just error out and exit the main thread.
     690            0 :     info!("shutting down tracing");
     691            0 :     let (sender, receiver) = mpsc::channel();
     692            0 :     let _ = thread::spawn(move || {
     693            0 :         tracing_utils::shutdown_tracing();
     694            0 :         sender.send(()).ok()
     695            0 :     });
     696            0 :     let shutdown_res = receiver.recv_timeout(Duration::from_millis(2000));
     697            0 :     if shutdown_res.is_err() {
     698            0 :         error!("timed out while shutting down tracing, exiting anyway");
     699            0 :     }
     700              : 
     701            0 :     info!("shutting down");
     702            0 :     exit(exit_code.unwrap_or(1))
     703              : }
     704              : 
     705            1 : fn cli() -> clap::Command {
     706            1 :     // Env variable is set by `cargo`
     707            1 :     let version = option_env!("CARGO_PKG_VERSION").unwrap_or("unknown");
     708            1 :     clap::Command::new("compute_ctl")
     709            1 :         .version(version)
     710            1 :         .arg(
     711            1 :             Arg::new("http-port")
     712            1 :                 .long("http-port")
     713            1 :                 .value_name("HTTP_PORT")
     714            1 :                 .default_value("3080")
     715            1 :                 .value_parser(clap::value_parser!(u16))
     716            1 :                 .required(false),
     717            1 :         )
     718            1 :         .arg(
     719            1 :             Arg::new("connstr")
     720            1 :                 .short('C')
     721            1 :                 .long("connstr")
     722            1 :                 .value_name("DATABASE_URL")
     723            1 :                 .required(true),
     724            1 :         )
     725            1 :         .arg(
     726            1 :             Arg::new("pgdata")
     727            1 :                 .short('D')
     728            1 :                 .long("pgdata")
     729            1 :                 .value_name("DATADIR")
     730            1 :                 .required(true),
     731            1 :         )
     732            1 :         .arg(
     733            1 :             Arg::new("pgbin")
     734            1 :                 .short('b')
     735            1 :                 .long("pgbin")
     736            1 :                 .default_value("postgres")
     737            1 :                 .value_name("POSTGRES_PATH"),
     738            1 :         )
     739            1 :         .arg(
     740            1 :             Arg::new("spec")
     741            1 :                 .short('s')
     742            1 :                 .long("spec")
     743            1 :                 .value_name("SPEC_JSON"),
     744            1 :         )
     745            1 :         .arg(
     746            1 :             Arg::new("spec-path")
     747            1 :                 .short('S')
     748            1 :                 .long("spec-path")
     749            1 :                 .value_name("SPEC_PATH"),
     750            1 :         )
     751            1 :         .arg(
     752            1 :             Arg::new("compute-id")
     753            1 :                 .short('i')
     754            1 :                 .long("compute-id")
     755            1 :                 .value_name("COMPUTE_ID"),
     756            1 :         )
     757            1 :         .arg(
     758            1 :             Arg::new("control-plane-uri")
     759            1 :                 .short('p')
     760            1 :                 .long("control-plane-uri")
     761            1 :                 .value_name("CONTROL_PLANE_API_BASE_URI"),
     762            1 :         )
     763            1 :         .arg(
     764            1 :             Arg::new("remote-ext-config")
     765            1 :                 .short('r')
     766            1 :                 .long("remote-ext-config")
     767            1 :                 .value_name("REMOTE_EXT_CONFIG"),
     768            1 :         )
     769            1 :         // TODO(fprasx): we currently have default arguments because the cloud PR
     770            1 :         // to pass them in hasn't been merged yet. We should get rid of them once
     771            1 :         // the PR is merged.
     772            1 :         .arg(
     773            1 :             Arg::new("vm-monitor-addr")
     774            1 :                 .long("vm-monitor-addr")
     775            1 :                 .default_value("0.0.0.0:10301")
     776            1 :                 .value_name("VM_MONITOR_ADDR"),
     777            1 :         )
     778            1 :         .arg(
     779            1 :             Arg::new("cgroup")
     780            1 :                 .long("cgroup")
     781            1 :                 .default_value("neon-postgres")
     782            1 :                 .value_name("CGROUP"),
     783            1 :         )
     784            1 :         .arg(
     785            1 :             Arg::new("filecache-connstr")
     786            1 :                 .long("filecache-connstr")
     787            1 :                 .default_value(
     788            1 :                     "host=localhost port=5432 dbname=postgres user=cloud_admin sslmode=disable application_name=vm-monitor",
     789            1 :                 )
     790            1 :                 .value_name("FILECACHE_CONNSTR"),
     791            1 :         )
     792            1 :         .arg(
     793            1 :             Arg::new("resize-swap-on-bind")
     794            1 :                 .long("resize-swap-on-bind")
     795            1 :                 .action(clap::ArgAction::SetTrue),
     796            1 :         )
     797            1 :         .arg(
     798            1 :             Arg::new("set-disk-quota-for-fs")
     799            1 :                 .long("set-disk-quota-for-fs")
     800            1 :                 .value_name("SET_DISK_QUOTA_FOR_FS")
     801            1 :         )
     802            1 : }
     803              : 
     804              : /// When compute_ctl is killed, send also termination signal to sync-safekeepers
     805              : /// to prevent leakage. TODO: it is better to convert compute_ctl to async and
     806              : /// wait for termination which would be easy then.
     807            0 : fn handle_exit_signal(sig: i32) {
     808            0 :     info!("received {sig} termination signal");
     809            0 :     forward_termination_signal();
     810            0 :     exit(1);
     811              : }
     812              : 
     813              : #[test]
     814            1 : fn verify_cli() {
     815            1 :     cli().debug_assert()
     816            1 : }
        

Generated by: LCOV version 2.1-beta