LCOV - code coverage report
Current view: top level - compute_tools/src - compute.rs (source / functions) Coverage Total Hit
Test: 6df3fc19ec669bcfbbf9aba41d1338898d24eaa0.info Lines: 0.0 % 1040 0
Test Date: 2025-03-12 18:28:53 Functions: 0.0 % 88 0

            Line data    Source code
       1              : use std::collections::HashMap;
       2              : use std::os::unix::fs::{PermissionsExt, symlink};
       3              : use std::path::Path;
       4              : use std::process::{Command, Stdio};
       5              : use std::str::FromStr;
       6              : use std::sync::atomic::{AtomicU32, Ordering};
       7              : use std::sync::{Arc, Condvar, Mutex, RwLock};
       8              : use std::time::{Duration, Instant};
       9              : use std::{env, fs};
      10              : 
      11              : use anyhow::{Context, Result};
      12              : use chrono::{DateTime, Utc};
      13              : use compute_api::privilege::Privilege;
      14              : use compute_api::responses::{ComputeCtlConfig, ComputeMetrics, ComputeStatus};
      15              : use compute_api::spec::{
      16              :     ComputeAudit, ComputeFeature, ComputeMode, ComputeSpec, ExtVersion, PgIdent,
      17              : };
      18              : use futures::StreamExt;
      19              : use futures::future::join_all;
      20              : use futures::stream::FuturesUnordered;
      21              : use nix::sys::signal::{Signal, kill};
      22              : use nix::unistd::Pid;
      23              : use postgres;
      24              : use postgres::NoTls;
      25              : use postgres::error::SqlState;
      26              : use remote_storage::{DownloadError, RemotePath};
      27              : use tokio::spawn;
      28              : use tracing::{Instrument, debug, error, info, instrument, warn};
      29              : use utils::id::{TenantId, TimelineId};
      30              : use utils::lsn::Lsn;
      31              : use utils::measured_stream::MeasuredReader;
      32              : 
      33              : use crate::configurator::launch_configurator;
      34              : use crate::disk_quota::set_disk_quota;
      35              : use crate::installed_extensions::get_installed_extensions;
      36              : use crate::logger::startup_context_from_env;
      37              : use crate::lsn_lease::launch_lsn_lease_bg_task_for_static;
      38              : use crate::monitor::launch_monitor;
      39              : use crate::pg_helpers::*;
      40              : use crate::rsyslog::configure_audit_rsyslog;
      41              : use crate::spec::*;
      42              : use crate::swap::resize_swap;
      43              : use crate::sync_sk::{check_if_synced, ping_safekeeper};
      44              : use crate::{config, extension_server, local_proxy};
      45              : 
      46              : pub static SYNC_SAFEKEEPERS_PID: AtomicU32 = AtomicU32::new(0);
      47              : pub static PG_PID: AtomicU32 = AtomicU32::new(0);
      48              : 
      49              : /// Static configuration params that don't change after startup. These mostly
      50              : /// come from the CLI args, or are derived from them.
      51              : pub struct ComputeNodeParams {
      52              :     /// The ID of the compute
      53              :     pub compute_id: String,
      54              :     // Url type maintains proper escaping
      55              :     pub connstr: url::Url,
      56              : 
      57              :     pub resize_swap_on_bind: bool,
      58              :     pub set_disk_quota_for_fs: Option<String>,
      59              : 
      60              :     // VM monitor parameters
      61              :     #[cfg(target_os = "linux")]
      62              :     pub filecache_connstr: String,
      63              :     #[cfg(target_os = "linux")]
      64              :     pub cgroup: String,
      65              :     #[cfg(target_os = "linux")]
      66              :     pub vm_monitor_addr: String,
      67              : 
      68              :     pub pgdata: String,
      69              :     pub pgbin: String,
      70              :     pub pgversion: String,
      71              :     pub build_tag: String,
      72              : 
      73              :     /// The port that the compute's external HTTP server listens on
      74              :     pub external_http_port: u16,
      75              :     /// The port that the compute's internal HTTP server listens on
      76              :     pub internal_http_port: u16,
      77              : 
      78              :     /// the address of extension storage proxy gateway
      79              :     pub ext_remote_storage: Option<String>,
      80              : 
      81              :     /// We should only allow live re- / configuration of the compute node if
      82              :     /// it uses 'pull model', i.e. it can go to control-plane and fetch
      83              :     /// the latest configuration. Otherwise, there could be a case:
      84              :     /// - we start compute with some spec provided as argument
      85              :     /// - we push new spec and it does reconfiguration
      86              :     /// - but then something happens and compute pod / VM is destroyed,
      87              :     ///   so k8s controller starts it again with the **old** spec
      88              :     ///
      89              :     /// and the same for empty computes:
      90              :     /// - we started compute without any spec
      91              :     /// - we push spec and it does configuration
      92              :     /// - but then it is restarted without any spec again
      93              :     pub live_config_allowed: bool,
      94              : }
      95              : 
      96              : /// Compute node info shared across several `compute_ctl` threads.
      97              : pub struct ComputeNode {
      98              :     pub params: ComputeNodeParams,
      99              : 
     100              :     // We connect to Postgres from many different places, so build configs once
     101              :     // and reuse them where needed. These are derived from 'params.connstr'
     102              :     pub conn_conf: postgres::config::Config,
     103              :     pub tokio_conn_conf: tokio_postgres::config::Config,
     104              : 
     105              :     /// Volatile part of the `ComputeNode`, which should be used under `Mutex`.
     106              :     /// To allow HTTP API server to serving status requests, while configuration
     107              :     /// is in progress, lock should be held only for short periods of time to do
     108              :     /// read/write, not the whole configuration process.
     109              :     pub state: Mutex<ComputeState>,
     110              :     /// `Condvar` to allow notifying waiters about state changes.
     111              :     pub state_changed: Condvar,
     112              : 
     113              :     // key: ext_archive_name, value: started download time, download_completed?
     114              :     pub ext_download_progress: RwLock<HashMap<String, (DateTime<Utc>, bool)>>,
     115              : }
     116              : 
     117              : // store some metrics about download size that might impact startup time
     118              : #[derive(Clone, Debug)]
     119              : pub struct RemoteExtensionMetrics {
     120              :     num_ext_downloaded: u64,
     121              :     largest_ext_size: u64,
     122              :     total_ext_download_size: u64,
     123              : }
     124              : 
     125              : #[derive(Clone, Debug)]
     126              : pub struct ComputeState {
     127              :     pub start_time: DateTime<Utc>,
     128              :     pub status: ComputeStatus,
     129              :     /// Timestamp of the last Postgres activity. It could be `None` if
     130              :     /// compute wasn't used since start.
     131              :     pub last_active: Option<DateTime<Utc>>,
     132              :     pub error: Option<String>,
     133              : 
     134              :     /// Compute spec. This can be received from the CLI or - more likely -
     135              :     /// passed by the control plane with a /configure HTTP request.
     136              :     pub pspec: Option<ParsedSpec>,
     137              : 
     138              :     pub compute_ctl_config: ComputeCtlConfig,
     139              : 
     140              :     /// If the spec is passed by a /configure request, 'startup_span' is the
     141              :     /// /configure request's tracing span. The main thread enters it when it
     142              :     /// processes the compute startup, so that the compute startup is considered
     143              :     /// to be part of the /configure request for tracing purposes.
     144              :     ///
     145              :     /// If the request handling thread/task called startup_compute() directly,
     146              :     /// it would automatically be a child of the request handling span, and we
     147              :     /// wouldn't need this. But because we use the main thread to perform the
     148              :     /// startup, and the /configure task just waits for it to finish, we need to
     149              :     /// set up the span relationship ourselves.
     150              :     pub startup_span: Option<tracing::span::Span>,
     151              : 
     152              :     pub metrics: ComputeMetrics,
     153              : }
     154              : 
     155              : impl ComputeState {
     156            0 :     pub fn new() -> Self {
     157            0 :         Self {
     158            0 :             start_time: Utc::now(),
     159            0 :             status: ComputeStatus::Empty,
     160            0 :             last_active: None,
     161            0 :             error: None,
     162            0 :             pspec: None,
     163            0 :             compute_ctl_config: ComputeCtlConfig::default(),
     164            0 :             startup_span: None,
     165            0 :             metrics: ComputeMetrics::default(),
     166            0 :         }
     167            0 :     }
     168              : 
     169            0 :     pub fn set_status(&mut self, status: ComputeStatus, state_changed: &Condvar) {
     170            0 :         let prev = self.status;
     171            0 :         info!("Changing compute status from {} to {}", prev, status);
     172            0 :         self.status = status;
     173            0 :         state_changed.notify_all();
     174            0 :     }
     175              : 
     176            0 :     pub fn set_failed_status(&mut self, err: anyhow::Error, state_changed: &Condvar) {
     177            0 :         self.error = Some(format!("{err:?}"));
     178            0 :         self.set_status(ComputeStatus::Failed, state_changed);
     179            0 :     }
     180              : }
     181              : 
     182              : impl Default for ComputeState {
     183            0 :     fn default() -> Self {
     184            0 :         Self::new()
     185            0 :     }
     186              : }
     187              : 
     188              : #[derive(Clone, Debug)]
     189              : pub struct ParsedSpec {
     190              :     pub spec: ComputeSpec,
     191              :     pub tenant_id: TenantId,
     192              :     pub timeline_id: TimelineId,
     193              :     pub pageserver_connstr: String,
     194              :     pub safekeeper_connstrings: Vec<String>,
     195              :     pub storage_auth_token: Option<String>,
     196              : }
     197              : 
     198              : impl TryFrom<ComputeSpec> for ParsedSpec {
     199              :     type Error = String;
     200            0 :     fn try_from(spec: ComputeSpec) -> Result<Self, String> {
     201              :         // Extract the options from the spec file that are needed to connect to
     202              :         // the storage system.
     203              :         //
     204              :         // For backwards-compatibility, the top-level fields in the spec file
     205              :         // may be empty. In that case, we need to dig them from the GUCs in the
     206              :         // cluster.settings field.
     207            0 :         let pageserver_connstr = spec
     208            0 :             .pageserver_connstring
     209            0 :             .clone()
     210            0 :             .or_else(|| spec.cluster.settings.find("neon.pageserver_connstring"))
     211            0 :             .ok_or("pageserver connstr should be provided")?;
     212            0 :         let safekeeper_connstrings = if spec.safekeeper_connstrings.is_empty() {
     213            0 :             if matches!(spec.mode, ComputeMode::Primary) {
     214            0 :                 spec.cluster
     215            0 :                     .settings
     216            0 :                     .find("neon.safekeepers")
     217            0 :                     .ok_or("safekeeper connstrings should be provided")?
     218            0 :                     .split(',')
     219            0 :                     .map(|str| str.to_string())
     220            0 :                     .collect()
     221              :             } else {
     222            0 :                 vec![]
     223              :             }
     224              :         } else {
     225            0 :             spec.safekeeper_connstrings.clone()
     226              :         };
     227            0 :         let storage_auth_token = spec.storage_auth_token.clone();
     228            0 :         let tenant_id: TenantId = if let Some(tenant_id) = spec.tenant_id {
     229            0 :             tenant_id
     230              :         } else {
     231            0 :             spec.cluster
     232            0 :                 .settings
     233            0 :                 .find("neon.tenant_id")
     234            0 :                 .ok_or("tenant id should be provided")
     235            0 :                 .map(|s| TenantId::from_str(&s))?
     236            0 :                 .or(Err("invalid tenant id"))?
     237              :         };
     238            0 :         let timeline_id: TimelineId = if let Some(timeline_id) = spec.timeline_id {
     239            0 :             timeline_id
     240              :         } else {
     241            0 :             spec.cluster
     242            0 :                 .settings
     243            0 :                 .find("neon.timeline_id")
     244            0 :                 .ok_or("timeline id should be provided")
     245            0 :                 .map(|s| TimelineId::from_str(&s))?
     246            0 :                 .or(Err("invalid timeline id"))?
     247              :         };
     248              : 
     249            0 :         Ok(ParsedSpec {
     250            0 :             spec,
     251            0 :             pageserver_connstr,
     252            0 :             safekeeper_connstrings,
     253            0 :             storage_auth_token,
     254            0 :             tenant_id,
     255            0 :             timeline_id,
     256            0 :         })
     257            0 :     }
     258              : }
     259              : 
     260              : /// If we are a VM, returns a [`Command`] that will run in the `neon-postgres`
     261              : /// cgroup. Otherwise returns the default `Command::new(cmd)`
     262              : ///
     263              : /// This function should be used to start postgres, as it will start it in the
     264              : /// neon-postgres cgroup if we are a VM. This allows autoscaling to control
     265              : /// postgres' resource usage. The cgroup will exist in VMs because vm-builder
     266              : /// creates it during the sysinit phase of its inittab.
     267            0 : fn maybe_cgexec(cmd: &str) -> Command {
     268            0 :     // The cplane sets this env var for autoscaling computes.
     269            0 :     // use `var_os` so we don't have to worry about the variable being valid
     270            0 :     // unicode. Should never be an concern . . . but just in case
     271            0 :     if env::var_os("AUTOSCALING").is_some() {
     272            0 :         let mut command = Command::new("cgexec");
     273            0 :         command.args(["-g", "memory:neon-postgres"]);
     274            0 :         command.arg(cmd);
     275            0 :         command
     276              :     } else {
     277            0 :         Command::new(cmd)
     278              :     }
     279            0 : }
     280              : 
     281              : struct PostgresHandle {
     282              :     postgres: std::process::Child,
     283              :     log_collector: tokio::task::JoinHandle<Result<()>>,
     284              : }
     285              : 
     286              : impl PostgresHandle {
     287              :     /// Return PID of the postgres (postmaster) process
     288            0 :     fn pid(&self) -> Pid {
     289            0 :         Pid::from_raw(self.postgres.id() as i32)
     290            0 :     }
     291              : }
     292              : 
     293              : struct StartVmMonitorResult {
     294              :     #[cfg(target_os = "linux")]
     295              :     token: tokio_util::sync::CancellationToken,
     296              :     #[cfg(target_os = "linux")]
     297              :     vm_monitor: Option<tokio::task::JoinHandle<Result<()>>>,
     298              : }
     299              : 
     300              : impl ComputeNode {
     301            0 :     pub fn new(
     302            0 :         params: ComputeNodeParams,
     303            0 :         cli_spec: Option<ComputeSpec>,
     304            0 :         compute_ctl_config: ComputeCtlConfig,
     305            0 :     ) -> Result<Self> {
     306            0 :         let connstr = params.connstr.as_str();
     307            0 :         let conn_conf = postgres::config::Config::from_str(connstr)
     308            0 :             .context("cannot build postgres config from connstr")?;
     309            0 :         let tokio_conn_conf = tokio_postgres::config::Config::from_str(connstr)
     310            0 :             .context("cannot build tokio postgres config from connstr")?;
     311              : 
     312            0 :         let mut new_state = ComputeState::new();
     313            0 :         if let Some(cli_spec) = cli_spec {
     314            0 :             let pspec = ParsedSpec::try_from(cli_spec).map_err(|msg| anyhow::anyhow!(msg))?;
     315            0 :             new_state.pspec = Some(pspec);
     316            0 :         }
     317            0 :         new_state.compute_ctl_config = compute_ctl_config;
     318            0 : 
     319            0 :         Ok(ComputeNode {
     320            0 :             params,
     321            0 :             conn_conf,
     322            0 :             tokio_conn_conf,
     323            0 :             state: Mutex::new(new_state),
     324            0 :             state_changed: Condvar::new(),
     325            0 :             ext_download_progress: RwLock::new(HashMap::new()),
     326            0 :         })
     327            0 :     }
     328              : 
     329              :     /// Top-level control flow of compute_ctl. Returns a process exit code we should
     330              :     /// exit with.
     331            0 :     pub fn run(self) -> Result<Option<i32>> {
     332            0 :         let this = Arc::new(self);
     333            0 : 
     334            0 :         let cli_spec = this.state.lock().unwrap().pspec.clone();
     335            0 : 
     336            0 :         // If this is a pooled VM, prewarm before starting HTTP server and becoming
     337            0 :         // available for binding. Prewarming helps Postgres start quicker later,
     338            0 :         // because QEMU will already have its memory allocated from the host, and
     339            0 :         // the necessary binaries will already be cached.
     340            0 :         if cli_spec.is_none() {
     341            0 :             this.prewarm_postgres()?;
     342            0 :         }
     343              : 
     344              :         // Launch the external HTTP server first, so that we can serve control plane
     345              :         // requests while configuration is still in progress.
     346            0 :         crate::http::server::Server::External {
     347            0 :             port: this.params.external_http_port,
     348            0 :             jwks: this.state.lock().unwrap().compute_ctl_config.jwks.clone(),
     349            0 :             compute_id: this.params.compute_id.clone(),
     350            0 :         }
     351            0 :         .launch(&this);
     352            0 : 
     353            0 :         // The internal HTTP server could be launched later, but there isn't much
     354            0 :         // sense in waiting.
     355            0 :         crate::http::server::Server::Internal {
     356            0 :             port: this.params.internal_http_port,
     357            0 :         }
     358            0 :         .launch(&this);
     359              : 
     360              :         // If we got a spec from the CLI already, use that. Otherwise wait for the
     361              :         // control plane to pass it to us with a /configure HTTP request
     362            0 :         let pspec = if let Some(cli_spec) = cli_spec {
     363            0 :             cli_spec
     364              :         } else {
     365            0 :             this.wait_spec()?
     366              :         };
     367              : 
     368            0 :         launch_lsn_lease_bg_task_for_static(&this);
     369            0 : 
     370            0 :         // We have a spec, start the compute
     371            0 :         let mut delay_exit = false;
     372            0 :         let mut vm_monitor = None;
     373            0 :         let mut pg_process: Option<PostgresHandle> = None;
     374            0 : 
     375            0 :         match this.start_compute(&mut pg_process) {
     376            0 :             Ok(()) => {
     377            0 :                 // Success! Launch remaining services (just vm-monitor currently)
     378            0 :                 vm_monitor =
     379            0 :                     Some(this.start_vm_monitor(pspec.spec.disable_lfc_resizing.unwrap_or(false)));
     380            0 :             }
     381            0 :             Err(err) => {
     382            0 :                 // Something went wrong with the startup. Log it and expose the error to
     383            0 :                 // HTTP status requests.
     384            0 :                 error!("could not start the compute node: {:#}", err);
     385            0 :                 this.set_failed_status(err);
     386            0 :                 delay_exit = true;
     387              : 
     388              :                 // If the error happened after starting PostgreSQL, kill it
     389            0 :                 if let Some(ref pg_process) = pg_process {
     390            0 :                     kill(pg_process.pid(), Signal::SIGQUIT).ok();
     391            0 :                 }
     392              :             }
     393              :         }
     394              : 
     395              :         // If startup was successful, or it failed in the late stages,
     396              :         // PostgreSQL is now running. Wait until it exits.
     397            0 :         let exit_code = if let Some(pg_handle) = pg_process {
     398            0 :             let exit_status = this.wait_postgres(pg_handle);
     399            0 :             info!("Postgres exited with code {}, shutting down", exit_status);
     400            0 :             exit_status.code()
     401              :         } else {
     402            0 :             None
     403              :         };
     404              : 
     405              :         // Terminate the vm_monitor so it releases the file watcher on
     406              :         // /sys/fs/cgroup/neon-postgres.
     407              :         // Note: the vm-monitor only runs on linux because it requires cgroups.
     408            0 :         if let Some(vm_monitor) = vm_monitor {
     409              :             cfg_if::cfg_if! {
     410              :                 if #[cfg(target_os = "linux")] {
     411              :                     // Kills all threads spawned by the monitor
     412            0 :                     vm_monitor.token.cancel();
     413            0 :                     if let Some(handle) = vm_monitor.vm_monitor {
     414            0 :                         // Kills the actual task running the monitor
     415            0 :                         handle.abort();
     416            0 :                     }
     417              :                 } else {
     418              :                     _ = vm_monitor; // appease unused lint on macOS
     419              :                 }
     420              :             }
     421            0 :         }
     422              : 
     423              :         // Reap the postgres process
     424            0 :         delay_exit |= this.cleanup_after_postgres_exit()?;
     425              : 
     426              :         // If launch failed, keep serving HTTP requests for a while, so the cloud
     427              :         // control plane can get the actual error.
     428            0 :         if delay_exit {
     429            0 :             info!("giving control plane 30s to collect the error before shutdown");
     430            0 :             std::thread::sleep(Duration::from_secs(30));
     431            0 :         }
     432            0 :         Ok(exit_code)
     433            0 :     }
     434              : 
     435            0 :     pub fn wait_spec(&self) -> Result<ParsedSpec> {
     436            0 :         info!("no compute spec provided, waiting");
     437            0 :         let mut state = self.state.lock().unwrap();
     438            0 :         while state.status != ComputeStatus::ConfigurationPending {
     439            0 :             state = self.state_changed.wait(state).unwrap();
     440            0 :         }
     441              : 
     442            0 :         info!("got spec, continue configuration");
     443            0 :         let spec = state.pspec.as_ref().unwrap().clone();
     444            0 : 
     445            0 :         // Record for how long we slept waiting for the spec.
     446            0 :         let now = Utc::now();
     447            0 :         state.metrics.wait_for_spec_ms = now
     448            0 :             .signed_duration_since(state.start_time)
     449            0 :             .to_std()
     450            0 :             .unwrap()
     451            0 :             .as_millis() as u64;
     452            0 : 
     453            0 :         // Reset start time, so that the total startup time that is calculated later will
     454            0 :         // not include the time that we waited for the spec.
     455            0 :         state.start_time = now;
     456            0 : 
     457            0 :         Ok(spec)
     458            0 :     }
     459              : 
     460              :     /// Start compute.
     461              :     ///
     462              :     /// Prerequisites:
     463              :     /// - the compute spec has been placed in self.state.pspec
     464              :     ///
     465              :     /// On success:
     466              :     /// - status is set to ComputeStatus::Running
     467              :     /// - self.running_postgres is set
     468              :     ///
     469              :     /// On error:
     470              :     /// - status is left in ComputeStatus::Init. The caller is responsible for setting it to Failed
     471              :     /// - if Postgres was started before the fatal error happened, self.running_postgres is
     472              :     ///   set. The caller is responsible for killing it.
     473              :     ///
     474              :     /// Note that this is in the critical path of a compute cold start. Keep this fast.
     475              :     /// Try to do things concurrently, to hide the latencies.
     476            0 :     fn start_compute(self: &Arc<Self>, pg_handle: &mut Option<PostgresHandle>) -> Result<()> {
     477            0 :         let compute_state: ComputeState;
     478            0 : 
     479            0 :         let start_compute_span;
     480            0 :         let _this_entered;
     481            0 :         {
     482            0 :             let mut state_guard = self.state.lock().unwrap();
     483              : 
     484              :             // Create a tracing span for the startup operation.
     485              :             //
     486              :             // We could otherwise just annotate the function with #[instrument], but if
     487              :             // we're being configured from a /configure HTTP request, we want the
     488              :             // startup to be considered part of the /configure request.
     489              :             //
     490              :             // Similarly, if a trace ID was passed in env variables, attach it to the span.
     491            0 :             start_compute_span = {
     492              :                 // Temporarily enter the parent span, so that the new span becomes its child.
     493            0 :                 if let Some(p) = state_guard.startup_span.take() {
     494            0 :                     let _parent_entered = p.entered();
     495            0 :                     tracing::info_span!("start_compute")
     496            0 :                 } else if let Some(otel_context) = startup_context_from_env() {
     497              :                     use tracing_opentelemetry::OpenTelemetrySpanExt;
     498            0 :                     let span = tracing::info_span!("start_compute");
     499            0 :                     span.set_parent(otel_context);
     500            0 :                     span
     501              :                 } else {
     502            0 :                     tracing::info_span!("start_compute")
     503              :                 }
     504              :             };
     505            0 :             _this_entered = start_compute_span.enter();
     506            0 : 
     507            0 :             state_guard.set_status(ComputeStatus::Init, &self.state_changed);
     508            0 :             compute_state = state_guard.clone()
     509            0 :         }
     510            0 : 
     511            0 :         let pspec = compute_state.pspec.as_ref().expect("spec must be set");
     512            0 :         info!(
     513            0 :             "starting compute for project {}, operation {}, tenant {}, timeline {}, features {:?}, spec.remote_extensions {:?}",
     514            0 :             pspec.spec.cluster.cluster_id.as_deref().unwrap_or("None"),
     515            0 :             pspec.spec.operation_uuid.as_deref().unwrap_or("None"),
     516              :             pspec.tenant_id,
     517              :             pspec.timeline_id,
     518              :             pspec.spec.features,
     519              :             pspec.spec.remote_extensions,
     520              :         );
     521              : 
     522              :         ////// PRE-STARTUP PHASE: things that need to be finished before we start the Postgres process
     523              : 
     524              :         // Collect all the tasks that must finish here
     525            0 :         let mut pre_tasks = tokio::task::JoinSet::new();
     526            0 : 
     527            0 :         // If there are any remote extensions in shared_preload_libraries, start downloading them
     528            0 :         if pspec.spec.remote_extensions.is_some() {
     529            0 :             let (this, spec) = (self.clone(), pspec.spec.clone());
     530            0 :             pre_tasks.spawn(async move {
     531            0 :                 this.download_preload_extensions(&spec)
     532            0 :                     .in_current_span()
     533            0 :                     .await
     534            0 :             });
     535            0 :         }
     536              : 
     537              :         // Prepare pgdata directory. This downloads the basebackup, among other things.
     538            0 :         {
     539            0 :             let (this, cs) = (self.clone(), compute_state.clone());
     540            0 :             pre_tasks.spawn_blocking_child(move || this.prepare_pgdata(&cs));
     541            0 :         }
     542              : 
     543              :         // Resize swap to the desired size if the compute spec says so
     544            0 :         if let (Some(size_bytes), true) =
     545            0 :             (pspec.spec.swap_size_bytes, self.params.resize_swap_on_bind)
     546            0 :         {
     547            0 :             pre_tasks.spawn_blocking_child(move || {
     548            0 :                 // To avoid 'swapoff' hitting postgres startup, we need to run resize-swap to completion
     549            0 :                 // *before* starting postgres.
     550            0 :                 //
     551            0 :                 // In theory, we could do this asynchronously if SkipSwapon was enabled for VMs, but this
     552            0 :                 // carries a risk of introducing hard-to-debug issues - e.g. if postgres sometimes gets
     553            0 :                 // OOM-killed during startup because swap wasn't available yet.
     554            0 :                 resize_swap(size_bytes).context("failed to resize swap")?;
     555            0 :                 let size_mib = size_bytes as f32 / (1 << 20) as f32; // just for more coherent display.
     556            0 :                 info!(%size_bytes, %size_mib, "resized swap");
     557              : 
     558            0 :                 Ok::<(), anyhow::Error>(())
     559            0 :             });
     560            0 :         }
     561              : 
     562              :         // Set disk quota if the compute spec says so
     563            0 :         if let (Some(disk_quota_bytes), Some(disk_quota_fs_mountpoint)) = (
     564            0 :             pspec.spec.disk_quota_bytes,
     565            0 :             self.params.set_disk_quota_for_fs.as_ref(),
     566            0 :         ) {
     567            0 :             let disk_quota_fs_mountpoint = disk_quota_fs_mountpoint.clone();
     568            0 :             pre_tasks.spawn_blocking_child(move || {
     569            0 :                 set_disk_quota(disk_quota_bytes, &disk_quota_fs_mountpoint)
     570            0 :                     .context("failed to set disk quota")?;
     571            0 :                 let size_mib = disk_quota_bytes as f32 / (1 << 20) as f32; // just for more coherent display.
     572            0 :                 info!(%disk_quota_bytes, %size_mib, "set disk quota");
     573              : 
     574            0 :                 Ok::<(), anyhow::Error>(())
     575            0 :             });
     576            0 :         }
     577              : 
     578              :         // tune pgbouncer
     579            0 :         if let Some(pgbouncer_settings) = &pspec.spec.pgbouncer_settings {
     580            0 :             info!("tuning pgbouncer");
     581              : 
     582              :             // Spawn a background task to do the tuning,
     583              :             // so that we don't block the main thread that starts Postgres.
     584            0 :             let pgbouncer_settings = pgbouncer_settings.clone();
     585            0 :             let _handle = tokio::spawn(async move {
     586            0 :                 let res = tune_pgbouncer(pgbouncer_settings).await;
     587            0 :                 if let Err(err) = res {
     588            0 :                     error!("error while tuning pgbouncer: {err:?}");
     589              :                     // Continue with the startup anyway
     590            0 :                 }
     591            0 :             });
     592            0 :         }
     593              : 
     594              :         // configure local_proxy
     595            0 :         if let Some(local_proxy) = &pspec.spec.local_proxy_config {
     596            0 :             info!("configuring local_proxy");
     597              : 
     598              :             // Spawn a background task to do the configuration,
     599              :             // so that we don't block the main thread that starts Postgres.
     600            0 :             let local_proxy = local_proxy.clone();
     601            0 :             let _handle = tokio::spawn(async move {
     602            0 :                 if let Err(err) = local_proxy::configure(&local_proxy) {
     603            0 :                     error!("error while configuring local_proxy: {err:?}");
     604              :                     // Continue with the startup anyway
     605            0 :                 }
     606            0 :             });
     607            0 :         }
     608              : 
     609              :         // Configure and start rsyslog if necessary
     610            0 :         if let ComputeAudit::Hipaa = pspec.spec.audit_log_level {
     611            0 :             let remote_endpoint = std::env::var("AUDIT_LOGGING_ENDPOINT").unwrap_or("".to_string());
     612            0 :             if remote_endpoint.is_empty() {
     613            0 :                 anyhow::bail!("AUDIT_LOGGING_ENDPOINT is empty");
     614            0 :             }
     615            0 : 
     616            0 :             let log_directory_path = Path::new(&self.params.pgdata).join("log");
     617            0 :             // TODO: make this more robust
     618            0 :             // now rsyslog starts once and there is no monitoring or restart if it fails
     619            0 :             configure_audit_rsyslog(
     620            0 :                 log_directory_path.to_str().unwrap(),
     621            0 :                 "hipaa",
     622            0 :                 &remote_endpoint,
     623            0 :             )?;
     624            0 :         }
     625              : 
     626              :         // Launch remaining service threads
     627            0 :         let _monitor_handle = launch_monitor(self);
     628            0 :         let _configurator_handle = launch_configurator(self);
     629            0 : 
     630            0 :         // Wait for all the pre-tasks to finish before starting postgres
     631            0 :         let rt = tokio::runtime::Handle::current();
     632            0 :         while let Some(res) = rt.block_on(pre_tasks.join_next()) {
     633            0 :             res??;
     634              :         }
     635              : 
     636              :         ////// START POSTGRES
     637            0 :         let start_time = Utc::now();
     638            0 :         let pg_process = self.start_postgres(pspec.storage_auth_token.clone())?;
     639            0 :         let postmaster_pid = pg_process.pid();
     640            0 :         *pg_handle = Some(pg_process);
     641            0 : 
     642            0 :         // If this is a primary endpoint, perform some post-startup configuration before
     643            0 :         // opening it up for the world.
     644            0 :         let config_time = Utc::now();
     645            0 :         if pspec.spec.mode == ComputeMode::Primary {
     646            0 :             self.configure_as_primary(&compute_state)?;
     647              : 
     648            0 :             let conf = self.get_tokio_conn_conf(None);
     649            0 :             tokio::task::spawn(async {
     650            0 :                 let res = get_installed_extensions(conf).await;
     651            0 :                 match res {
     652            0 :                     Ok(extensions) => {
     653            0 :                         info!(
     654            0 :                             "[NEON_EXT_STAT] {}",
     655            0 :                             serde_json::to_string(&extensions)
     656            0 :                                 .expect("failed to serialize extensions list")
     657              :                         );
     658              :                     }
     659            0 :                     Err(err) => error!("could not get installed extensions: {err:?}"),
     660              :                 }
     661            0 :             });
     662            0 :         }
     663              : 
     664              :         // All done!
     665            0 :         let startup_end_time = Utc::now();
     666            0 :         let metrics = {
     667            0 :             let mut state = self.state.lock().unwrap();
     668            0 :             state.metrics.start_postgres_ms = config_time
     669            0 :                 .signed_duration_since(start_time)
     670            0 :                 .to_std()
     671            0 :                 .unwrap()
     672            0 :                 .as_millis() as u64;
     673            0 :             state.metrics.config_ms = startup_end_time
     674            0 :                 .signed_duration_since(config_time)
     675            0 :                 .to_std()
     676            0 :                 .unwrap()
     677            0 :                 .as_millis() as u64;
     678            0 :             state.metrics.total_startup_ms = startup_end_time
     679            0 :                 .signed_duration_since(compute_state.start_time)
     680            0 :                 .to_std()
     681            0 :                 .unwrap()
     682            0 :                 .as_millis() as u64;
     683            0 :             state.metrics.clone()
     684            0 :         };
     685            0 :         self.set_status(ComputeStatus::Running);
     686            0 : 
     687            0 :         // Log metrics so that we can search for slow operations in logs
     688            0 :         info!(?metrics, postmaster_pid = %postmaster_pid, "compute start finished");
     689              : 
     690            0 :         Ok(())
     691            0 :     }
     692              : 
     693              :     #[instrument(skip_all)]
     694              :     async fn download_preload_extensions(&self, spec: &ComputeSpec) -> Result<()> {
     695              :         let remote_extensions = if let Some(remote_extensions) = &spec.remote_extensions {
     696              :             remote_extensions
     697              :         } else {
     698              :             return Ok(());
     699              :         };
     700              : 
     701              :         // First, create control files for all available extensions
     702              :         extension_server::create_control_files(remote_extensions, &self.params.pgbin);
     703              : 
     704              :         let library_load_start_time = Utc::now();
     705              :         let remote_ext_metrics = self.prepare_preload_libraries(spec).await?;
     706              : 
     707              :         let library_load_time = Utc::now()
     708              :             .signed_duration_since(library_load_start_time)
     709              :             .to_std()
     710              :             .unwrap()
     711              :             .as_millis() as u64;
     712              :         let mut state = self.state.lock().unwrap();
     713              :         state.metrics.load_ext_ms = library_load_time;
     714              :         state.metrics.num_ext_downloaded = remote_ext_metrics.num_ext_downloaded;
     715              :         state.metrics.largest_ext_size = remote_ext_metrics.largest_ext_size;
     716              :         state.metrics.total_ext_download_size = remote_ext_metrics.total_ext_download_size;
     717              :         info!(
     718              :             "Loading shared_preload_libraries took {:?}ms",
     719              :             library_load_time
     720              :         );
     721              :         info!("{:?}", remote_ext_metrics);
     722              : 
     723              :         Ok(())
     724              :     }
     725              : 
     726              :     /// Start the vm-monitor if directed to. The vm-monitor only runs on linux
     727              :     /// because it requires cgroups.
     728            0 :     fn start_vm_monitor(&self, disable_lfc_resizing: bool) -> StartVmMonitorResult {
     729              :         cfg_if::cfg_if! {
     730              :             if #[cfg(target_os = "linux")] {
     731              :                 use std::env;
     732              :                 use tokio_util::sync::CancellationToken;
     733              : 
     734              :                 // This token is used internally by the monitor to clean up all threads
     735            0 :                 let token = CancellationToken::new();
     736              : 
     737              :                 // don't pass postgres connection string to vm-monitor if we don't want it to resize LFC
     738            0 :                 let pgconnstr = if disable_lfc_resizing {
     739            0 :                     None
     740              :                 } else {
     741            0 :                     Some(self.params.filecache_connstr.clone())
     742              :                 };
     743              : 
     744            0 :                 let vm_monitor = if env::var_os("AUTOSCALING").is_some() {
     745            0 :                     let vm_monitor = tokio::spawn(vm_monitor::start(
     746            0 :                         Box::leak(Box::new(vm_monitor::Args {
     747            0 :                             cgroup: Some(self.params.cgroup.clone()),
     748            0 :                             pgconnstr,
     749            0 :                             addr: self.params.vm_monitor_addr.clone(),
     750            0 :                         })),
     751            0 :                         token.clone(),
     752            0 :                     ));
     753            0 :                     Some(vm_monitor)
     754              :                 } else {
     755            0 :                     None
     756              :                 };
     757            0 :                 StartVmMonitorResult { token, vm_monitor }
     758            0 :             } else {
     759            0 :                 _ = disable_lfc_resizing; // appease unused lint on macOS
     760            0 :                 StartVmMonitorResult { }
     761            0 :             }
     762            0 :         }
     763            0 :     }
     764              : 
     765            0 :     fn cleanup_after_postgres_exit(&self) -> Result<bool> {
     766            0 :         // Maybe sync safekeepers again, to speed up next startup
     767            0 :         let compute_state = self.state.lock().unwrap().clone();
     768            0 :         let pspec = compute_state.pspec.as_ref().expect("spec must be set");
     769            0 :         if matches!(pspec.spec.mode, compute_api::spec::ComputeMode::Primary) {
     770            0 :             info!("syncing safekeepers on shutdown");
     771            0 :             let storage_auth_token = pspec.storage_auth_token.clone();
     772            0 :             let lsn = self.sync_safekeepers(storage_auth_token)?;
     773            0 :             info!("synced safekeepers at lsn {lsn}");
     774            0 :         }
     775              : 
     776            0 :         let mut delay_exit = false;
     777            0 :         let mut state = self.state.lock().unwrap();
     778            0 :         if state.status == ComputeStatus::TerminationPending {
     779            0 :             state.status = ComputeStatus::Terminated;
     780            0 :             self.state_changed.notify_all();
     781            0 :             // we were asked to terminate gracefully, don't exit to avoid restart
     782            0 :             delay_exit = true
     783            0 :         }
     784            0 :         drop(state);
     785              : 
     786            0 :         if let Err(err) = self.check_for_core_dumps() {
     787            0 :             error!("error while checking for core dumps: {err:?}");
     788            0 :         }
     789              : 
     790            0 :         Ok(delay_exit)
     791            0 :     }
     792              : 
     793              :     /// Check that compute node has corresponding feature enabled.
     794            0 :     pub fn has_feature(&self, feature: ComputeFeature) -> bool {
     795            0 :         let state = self.state.lock().unwrap();
     796              : 
     797            0 :         if let Some(s) = state.pspec.as_ref() {
     798            0 :             s.spec.features.contains(&feature)
     799              :         } else {
     800            0 :             false
     801              :         }
     802            0 :     }
     803              : 
     804            0 :     pub fn set_status(&self, status: ComputeStatus) {
     805            0 :         let mut state = self.state.lock().unwrap();
     806            0 :         state.set_status(status, &self.state_changed);
     807            0 :     }
     808              : 
     809            0 :     pub fn set_failed_status(&self, err: anyhow::Error) {
     810            0 :         let mut state = self.state.lock().unwrap();
     811            0 :         state.set_failed_status(err, &self.state_changed);
     812            0 :     }
     813              : 
     814            0 :     pub fn get_status(&self) -> ComputeStatus {
     815            0 :         self.state.lock().unwrap().status
     816            0 :     }
     817              : 
     818            0 :     pub fn get_timeline_id(&self) -> Option<TimelineId> {
     819            0 :         self.state
     820            0 :             .lock()
     821            0 :             .unwrap()
     822            0 :             .pspec
     823            0 :             .as_ref()
     824            0 :             .map(|s| s.timeline_id)
     825            0 :     }
     826              : 
     827              :     // Remove `pgdata` directory and create it again with right permissions.
     828            0 :     fn create_pgdata(&self) -> Result<()> {
     829            0 :         // Ignore removal error, likely it is a 'No such file or directory (os error 2)'.
     830            0 :         // If it is something different then create_dir() will error out anyway.
     831            0 :         let pgdata = &self.params.pgdata;
     832            0 :         let _ok = fs::remove_dir_all(pgdata);
     833            0 :         fs::create_dir(pgdata)?;
     834            0 :         fs::set_permissions(pgdata, fs::Permissions::from_mode(0o700))?;
     835              : 
     836            0 :         Ok(())
     837            0 :     }
     838              : 
     839              :     // Get basebackup from the libpq connection to pageserver using `connstr` and
     840              :     // unarchive it to `pgdata` directory overriding all its previous content.
     841              :     #[instrument(skip_all, fields(%lsn))]
     842              :     fn try_get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> {
     843              :         let spec = compute_state.pspec.as_ref().expect("spec must be set");
     844              :         let start_time = Instant::now();
     845              : 
     846              :         let shard0_connstr = spec.pageserver_connstr.split(',').next().unwrap();
     847              :         let mut config = postgres::Config::from_str(shard0_connstr)?;
     848              : 
     849              :         // Use the storage auth token from the config file, if given.
     850              :         // Note: this overrides any password set in the connection string.
     851              :         if let Some(storage_auth_token) = &spec.storage_auth_token {
     852              :             info!("Got storage auth token from spec file");
     853              :             config.password(storage_auth_token);
     854              :         } else {
     855              :             info!("Storage auth token not set");
     856              :         }
     857              : 
     858              :         // Connect to pageserver
     859              :         let mut client = config.connect(NoTls)?;
     860              :         let pageserver_connect_micros = start_time.elapsed().as_micros() as u64;
     861              : 
     862              :         let basebackup_cmd = match lsn {
     863              :             Lsn(0) => {
     864              :                 if spec.spec.mode != ComputeMode::Primary {
     865              :                     format!(
     866              :                         "basebackup {} {} --gzip --replica",
     867              :                         spec.tenant_id, spec.timeline_id
     868              :                     )
     869              :                 } else {
     870              :                     format!("basebackup {} {} --gzip", spec.tenant_id, spec.timeline_id)
     871              :                 }
     872              :             }
     873              :             _ => {
     874              :                 if spec.spec.mode != ComputeMode::Primary {
     875              :                     format!(
     876              :                         "basebackup {} {} {} --gzip --replica",
     877              :                         spec.tenant_id, spec.timeline_id, lsn
     878              :                     )
     879              :                 } else {
     880              :                     format!(
     881              :                         "basebackup {} {} {} --gzip",
     882              :                         spec.tenant_id, spec.timeline_id, lsn
     883              :                     )
     884              :                 }
     885              :             }
     886              :         };
     887              : 
     888              :         let copyreader = client.copy_out(basebackup_cmd.as_str())?;
     889              :         let mut measured_reader = MeasuredReader::new(copyreader);
     890              :         let mut bufreader = std::io::BufReader::new(&mut measured_reader);
     891              : 
     892              :         // Read the archive directly from the `CopyOutReader`
     893              :         //
     894              :         // Set `ignore_zeros` so that unpack() reads all the Copy data and
     895              :         // doesn't stop at the end-of-archive marker. Otherwise, if the server
     896              :         // sends an Error after finishing the tarball, we will not notice it.
     897              :         let mut ar = tar::Archive::new(flate2::read::GzDecoder::new(&mut bufreader));
     898              :         ar.set_ignore_zeros(true);
     899              :         ar.unpack(&self.params.pgdata)?;
     900              : 
     901              :         // Report metrics
     902              :         let mut state = self.state.lock().unwrap();
     903              :         state.metrics.pageserver_connect_micros = pageserver_connect_micros;
     904              :         state.metrics.basebackup_bytes = measured_reader.get_byte_count() as u64;
     905              :         state.metrics.basebackup_ms = start_time.elapsed().as_millis() as u64;
     906              :         Ok(())
     907              :     }
     908              : 
     909              :     // Gets the basebackup in a retry loop
     910              :     #[instrument(skip_all, fields(%lsn))]
     911              :     pub fn get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> {
     912              :         let mut retry_period_ms = 500.0;
     913              :         let mut attempts = 0;
     914              :         const DEFAULT_ATTEMPTS: u16 = 10;
     915              :         #[cfg(feature = "testing")]
     916              :         let max_attempts = if let Ok(v) = env::var("NEON_COMPUTE_TESTING_BASEBACKUP_RETRIES") {
     917              :             u16::from_str(&v).unwrap()
     918              :         } else {
     919              :             DEFAULT_ATTEMPTS
     920              :         };
     921              :         #[cfg(not(feature = "testing"))]
     922              :         let max_attempts = DEFAULT_ATTEMPTS;
     923              :         loop {
     924              :             let result = self.try_get_basebackup(compute_state, lsn);
     925              :             match result {
     926              :                 Ok(_) => {
     927              :                     return result;
     928              :                 }
     929              :                 Err(ref e) if attempts < max_attempts => {
     930              :                     warn!(
     931              :                         "Failed to get basebackup: {} (attempt {}/{})",
     932              :                         e, attempts, max_attempts
     933              :                     );
     934              :                     std::thread::sleep(std::time::Duration::from_millis(retry_period_ms as u64));
     935              :                     retry_period_ms *= 1.5;
     936              :                 }
     937              :                 Err(_) => {
     938              :                     return result;
     939              :                 }
     940              :             }
     941              :             attempts += 1;
     942              :         }
     943              :     }
     944              : 
     945            0 :     pub async fn check_safekeepers_synced_async(
     946            0 :         &self,
     947            0 :         compute_state: &ComputeState,
     948            0 :     ) -> Result<Option<Lsn>> {
     949            0 :         // Construct a connection config for each safekeeper
     950            0 :         let pspec: ParsedSpec = compute_state
     951            0 :             .pspec
     952            0 :             .as_ref()
     953            0 :             .expect("spec must be set")
     954            0 :             .clone();
     955            0 :         let sk_connstrs: Vec<String> = pspec.safekeeper_connstrings.clone();
     956            0 :         let sk_configs = sk_connstrs.into_iter().map(|connstr| {
     957            0 :             // Format connstr
     958            0 :             let id = connstr.clone();
     959            0 :             let connstr = format!("postgresql://no_user@{}", connstr);
     960            0 :             let options = format!(
     961            0 :                 "-c timeline_id={} tenant_id={}",
     962            0 :                 pspec.timeline_id, pspec.tenant_id
     963            0 :             );
     964            0 : 
     965            0 :             // Construct client
     966            0 :             let mut config = tokio_postgres::Config::from_str(&connstr).unwrap();
     967            0 :             config.options(&options);
     968            0 :             if let Some(storage_auth_token) = pspec.storage_auth_token.clone() {
     969            0 :                 config.password(storage_auth_token);
     970            0 :             }
     971              : 
     972            0 :             (id, config)
     973            0 :         });
     974            0 : 
     975            0 :         // Create task set to query all safekeepers
     976            0 :         let mut tasks = FuturesUnordered::new();
     977            0 :         let quorum = sk_configs.len() / 2 + 1;
     978            0 :         for (id, config) in sk_configs {
     979            0 :             let timeout = tokio::time::Duration::from_millis(100);
     980            0 :             let task = tokio::time::timeout(timeout, ping_safekeeper(id, config));
     981            0 :             tasks.push(tokio::spawn(task));
     982            0 :         }
     983              : 
     984              :         // Get a quorum of responses or errors
     985            0 :         let mut responses = Vec::new();
     986            0 :         let mut join_errors = Vec::new();
     987            0 :         let mut task_errors = Vec::new();
     988            0 :         let mut timeout_errors = Vec::new();
     989            0 :         while let Some(response) = tasks.next().await {
     990            0 :             match response {
     991            0 :                 Ok(Ok(Ok(r))) => responses.push(r),
     992            0 :                 Ok(Ok(Err(e))) => task_errors.push(e),
     993            0 :                 Ok(Err(e)) => timeout_errors.push(e),
     994            0 :                 Err(e) => join_errors.push(e),
     995              :             };
     996            0 :             if responses.len() >= quorum {
     997            0 :                 break;
     998            0 :             }
     999            0 :             if join_errors.len() + task_errors.len() + timeout_errors.len() >= quorum {
    1000            0 :                 break;
    1001            0 :             }
    1002              :         }
    1003              : 
    1004              :         // In case of error, log and fail the check, but don't crash.
    1005              :         // We're playing it safe because these errors could be transient
    1006              :         // and we don't yet retry. Also being careful here allows us to
    1007              :         // be backwards compatible with safekeepers that don't have the
    1008              :         // TIMELINE_STATUS API yet.
    1009            0 :         if responses.len() < quorum {
    1010            0 :             error!(
    1011            0 :                 "failed sync safekeepers check {:?} {:?} {:?}",
    1012              :                 join_errors, task_errors, timeout_errors
    1013              :             );
    1014            0 :             return Ok(None);
    1015            0 :         }
    1016            0 : 
    1017            0 :         Ok(check_if_synced(responses))
    1018            0 :     }
    1019              : 
    1020              :     // Fast path for sync_safekeepers. If they're already synced we get the lsn
    1021              :     // in one roundtrip. If not, we should do a full sync_safekeepers.
    1022              :     #[instrument(skip_all)]
    1023              :     pub fn check_safekeepers_synced(&self, compute_state: &ComputeState) -> Result<Option<Lsn>> {
    1024              :         let start_time = Utc::now();
    1025              : 
    1026              :         let rt = tokio::runtime::Handle::current();
    1027              :         let result = rt.block_on(self.check_safekeepers_synced_async(compute_state));
    1028              : 
    1029              :         // Record runtime
    1030              :         self.state.lock().unwrap().metrics.sync_sk_check_ms = Utc::now()
    1031              :             .signed_duration_since(start_time)
    1032              :             .to_std()
    1033              :             .unwrap()
    1034              :             .as_millis() as u64;
    1035              :         result
    1036              :     }
    1037              : 
    1038              :     // Run `postgres` in a special mode with `--sync-safekeepers` argument
    1039              :     // and return the reported LSN back to the caller.
    1040              :     #[instrument(skip_all)]
    1041              :     pub fn sync_safekeepers(&self, storage_auth_token: Option<String>) -> Result<Lsn> {
    1042              :         let start_time = Utc::now();
    1043              : 
    1044              :         let mut sync_handle = maybe_cgexec(&self.params.pgbin)
    1045              :             .args(["--sync-safekeepers"])
    1046              :             .env("PGDATA", &self.params.pgdata) // we cannot use -D in this mode
    1047              :             .envs(if let Some(storage_auth_token) = &storage_auth_token {
    1048              :                 vec![("NEON_AUTH_TOKEN", storage_auth_token)]
    1049              :             } else {
    1050              :                 vec![]
    1051              :             })
    1052              :             .stdout(Stdio::piped())
    1053              :             .stderr(Stdio::piped())
    1054              :             .spawn()
    1055              :             .expect("postgres --sync-safekeepers failed to start");
    1056              :         SYNC_SAFEKEEPERS_PID.store(sync_handle.id(), Ordering::SeqCst);
    1057              : 
    1058              :         // `postgres --sync-safekeepers` will print all log output to stderr and
    1059              :         // final LSN to stdout. So we leave stdout to collect LSN, while stderr logs
    1060              :         // will be collected in a child thread.
    1061              :         let stderr = sync_handle
    1062              :             .stderr
    1063              :             .take()
    1064              :             .expect("stderr should be captured");
    1065              :         let logs_handle = handle_postgres_logs(stderr);
    1066              : 
    1067              :         let sync_output = sync_handle
    1068              :             .wait_with_output()
    1069              :             .expect("postgres --sync-safekeepers failed");
    1070              :         SYNC_SAFEKEEPERS_PID.store(0, Ordering::SeqCst);
    1071              : 
    1072              :         // Process has exited, so we can join the logs thread.
    1073              :         let _ = tokio::runtime::Handle::current()
    1074              :             .block_on(logs_handle)
    1075            0 :             .map_err(|e| tracing::error!("log task panicked: {:?}", e));
    1076              : 
    1077              :         if !sync_output.status.success() {
    1078              :             anyhow::bail!(
    1079              :                 "postgres --sync-safekeepers exited with non-zero status: {}. stdout: {}",
    1080              :                 sync_output.status,
    1081              :                 String::from_utf8(sync_output.stdout)
    1082              :                     .expect("postgres --sync-safekeepers exited, and stdout is not utf-8"),
    1083              :             );
    1084              :         }
    1085              : 
    1086              :         self.state.lock().unwrap().metrics.sync_safekeepers_ms = Utc::now()
    1087              :             .signed_duration_since(start_time)
    1088              :             .to_std()
    1089              :             .unwrap()
    1090              :             .as_millis() as u64;
    1091              : 
    1092              :         let lsn = Lsn::from_str(String::from_utf8(sync_output.stdout)?.trim())?;
    1093              : 
    1094              :         Ok(lsn)
    1095              :     }
    1096              : 
    1097              :     /// Do all the preparations like PGDATA directory creation, configuration,
    1098              :     /// safekeepers sync, basebackup, etc.
    1099              :     #[instrument(skip_all)]
    1100              :     pub fn prepare_pgdata(&self, compute_state: &ComputeState) -> Result<()> {
    1101              :         let pspec = compute_state.pspec.as_ref().expect("spec must be set");
    1102              :         let spec = &pspec.spec;
    1103              :         let pgdata_path = Path::new(&self.params.pgdata);
    1104              : 
    1105              :         // Remove/create an empty pgdata directory and put configuration there.
    1106              :         self.create_pgdata()?;
    1107              :         config::write_postgres_conf(
    1108              :             &pgdata_path.join("postgresql.conf"),
    1109              :             &pspec.spec,
    1110              :             self.params.internal_http_port,
    1111              :         )?;
    1112              : 
    1113              :         // Syncing safekeepers is only safe with primary nodes: if a primary
    1114              :         // is already connected it will be kicked out, so a secondary (standby)
    1115              :         // cannot sync safekeepers.
    1116              :         let lsn = match spec.mode {
    1117              :             ComputeMode::Primary => {
    1118              :                 info!("checking if safekeepers are synced");
    1119              :                 let lsn = if let Ok(Some(lsn)) = self.check_safekeepers_synced(compute_state) {
    1120              :                     lsn
    1121              :                 } else {
    1122              :                     info!("starting safekeepers syncing");
    1123              :                     self.sync_safekeepers(pspec.storage_auth_token.clone())
    1124            0 :                         .with_context(|| "failed to sync safekeepers")?
    1125              :                 };
    1126              :                 info!("safekeepers synced at LSN {}", lsn);
    1127              :                 lsn
    1128              :             }
    1129              :             ComputeMode::Static(lsn) => {
    1130              :                 info!("Starting read-only node at static LSN {}", lsn);
    1131              :                 lsn
    1132              :             }
    1133              :             ComputeMode::Replica => {
    1134              :                 info!("Initializing standby from latest Pageserver LSN");
    1135              :                 Lsn(0)
    1136              :             }
    1137              :         };
    1138              : 
    1139              :         info!(
    1140              :             "getting basebackup@{} from pageserver {}",
    1141              :             lsn, &pspec.pageserver_connstr
    1142              :         );
    1143            0 :         self.get_basebackup(compute_state, lsn).with_context(|| {
    1144            0 :             format!(
    1145            0 :                 "failed to get basebackup@{} from pageserver {}",
    1146            0 :                 lsn, &pspec.pageserver_connstr
    1147            0 :             )
    1148            0 :         })?;
    1149              : 
    1150              :         // Update pg_hba.conf received with basebackup.
    1151              :         update_pg_hba(pgdata_path)?;
    1152              : 
    1153              :         // Place pg_dynshmem under /dev/shm. This allows us to use
    1154              :         // 'dynamic_shared_memory_type = mmap' so that the files are placed in
    1155              :         // /dev/shm, similar to how 'dynamic_shared_memory_type = posix' works.
    1156              :         //
    1157              :         // Why on earth don't we just stick to the 'posix' default, you might
    1158              :         // ask.  It turns out that making large allocations with 'posix' doesn't
    1159              :         // work very well with autoscaling. The behavior we want is that:
    1160              :         //
    1161              :         // 1. You can make large DSM allocations, larger than the current RAM
    1162              :         //    size of the VM, without errors
    1163              :         //
    1164              :         // 2. If the allocated memory is really used, the VM is scaled up
    1165              :         //    automatically to accommodate that
    1166              :         //
    1167              :         // We try to make that possible by having swap in the VM. But with the
    1168              :         // default 'posix' DSM implementation, we fail step 1, even when there's
    1169              :         // plenty of swap available. PostgreSQL uses posix_fallocate() to create
    1170              :         // the shmem segment, which is really just a file in /dev/shm in Linux,
    1171              :         // but posix_fallocate() on tmpfs returns ENOMEM if the size is larger
    1172              :         // than available RAM.
    1173              :         //
    1174              :         // Using 'dynamic_shared_memory_type = mmap' works around that, because
    1175              :         // the Postgres 'mmap' DSM implementation doesn't use
    1176              :         // posix_fallocate(). Instead, it uses repeated calls to write(2) to
    1177              :         // fill the file with zeros. It's weird that that differs between
    1178              :         // 'posix' and 'mmap', but we take advantage of it. When the file is
    1179              :         // filled slowly with write(2), the kernel allows it to grow larger, as
    1180              :         // long as there's swap available.
    1181              :         //
    1182              :         // In short, using 'dynamic_shared_memory_type = mmap' allows us one DSM
    1183              :         // segment to be larger than currently available RAM. But because we
    1184              :         // don't want to store it on a real file, which the kernel would try to
    1185              :         // flush to disk, so symlink pg_dynshm to /dev/shm.
    1186              :         //
    1187              :         // We don't set 'dynamic_shared_memory_type = mmap' here, we let the
    1188              :         // control plane control that option. If 'mmap' is not used, this
    1189              :         // symlink doesn't affect anything.
    1190              :         //
    1191              :         // See https://github.com/neondatabase/autoscaling/issues/800
    1192              :         std::fs::remove_dir(pgdata_path.join("pg_dynshmem"))?;
    1193              :         symlink("/dev/shm/", pgdata_path.join("pg_dynshmem"))?;
    1194              : 
    1195              :         match spec.mode {
    1196              :             ComputeMode::Primary => {}
    1197              :             ComputeMode::Replica | ComputeMode::Static(..) => {
    1198              :                 add_standby_signal(pgdata_path)?;
    1199              :             }
    1200              :         }
    1201              : 
    1202              :         Ok(())
    1203              :     }
    1204              : 
    1205              :     /// Start and stop a postgres process to warm up the VM for startup.
    1206            0 :     pub fn prewarm_postgres(&self) -> Result<()> {
    1207            0 :         info!("prewarming");
    1208              : 
    1209              :         // Create pgdata
    1210            0 :         let pgdata = &format!("{}.warmup", self.params.pgdata);
    1211            0 :         create_pgdata(pgdata)?;
    1212              : 
    1213              :         // Run initdb to completion
    1214            0 :         info!("running initdb");
    1215            0 :         let initdb_bin = Path::new(&self.params.pgbin)
    1216            0 :             .parent()
    1217            0 :             .unwrap()
    1218            0 :             .join("initdb");
    1219            0 :         Command::new(initdb_bin)
    1220            0 :             .args(["--pgdata", pgdata])
    1221            0 :             .output()
    1222            0 :             .expect("cannot start initdb process");
    1223              : 
    1224              :         // Write conf
    1225              :         use std::io::Write;
    1226            0 :         let conf_path = Path::new(pgdata).join("postgresql.conf");
    1227            0 :         let mut file = std::fs::File::create(conf_path)?;
    1228            0 :         writeln!(file, "shared_buffers=65536")?;
    1229            0 :         writeln!(file, "port=51055")?; // Nobody should be connecting
    1230            0 :         writeln!(file, "shared_preload_libraries = 'neon'")?;
    1231              : 
    1232              :         // Start postgres
    1233            0 :         info!("starting postgres");
    1234            0 :         let mut pg = maybe_cgexec(&self.params.pgbin)
    1235            0 :             .args(["-D", pgdata])
    1236            0 :             .spawn()
    1237            0 :             .expect("cannot start postgres process");
    1238            0 : 
    1239            0 :         // Stop it when it's ready
    1240            0 :         info!("waiting for postgres");
    1241            0 :         wait_for_postgres(&mut pg, Path::new(pgdata))?;
    1242              :         // SIGQUIT orders postgres to exit immediately. We don't want to SIGKILL
    1243              :         // it to avoid orphaned processes prowling around while datadir is
    1244              :         // wiped.
    1245            0 :         let pm_pid = Pid::from_raw(pg.id() as i32);
    1246            0 :         kill(pm_pid, Signal::SIGQUIT)?;
    1247            0 :         info!("sent SIGQUIT signal");
    1248            0 :         pg.wait()?;
    1249            0 :         info!("done prewarming");
    1250              : 
    1251              :         // clean up
    1252            0 :         let _ok = fs::remove_dir_all(pgdata);
    1253            0 :         Ok(())
    1254            0 :     }
    1255              : 
    1256              :     /// Start Postgres as a child process and wait for it to start accepting
    1257              :     /// connections.
    1258              :     ///
    1259              :     /// Returns a handle to the child process and a handle to the logs thread.
    1260              :     #[instrument(skip_all)]
    1261              :     pub fn start_postgres(&self, storage_auth_token: Option<String>) -> Result<PostgresHandle> {
    1262              :         let pgdata_path = Path::new(&self.params.pgdata);
    1263              : 
    1264              :         // Run postgres as a child process.
    1265              :         let mut pg = maybe_cgexec(&self.params.pgbin)
    1266              :             .args(["-D", &self.params.pgdata])
    1267              :             .envs(if let Some(storage_auth_token) = &storage_auth_token {
    1268              :                 vec![("NEON_AUTH_TOKEN", storage_auth_token)]
    1269              :             } else {
    1270              :                 vec![]
    1271              :             })
    1272              :             .stderr(Stdio::piped())
    1273              :             .spawn()
    1274              :             .expect("cannot start postgres process");
    1275              :         PG_PID.store(pg.id(), Ordering::SeqCst);
    1276              : 
    1277              :         // Start a task to collect logs from stderr.
    1278              :         let stderr = pg.stderr.take().expect("stderr should be captured");
    1279              :         let logs_handle = handle_postgres_logs(stderr);
    1280              : 
    1281              :         wait_for_postgres(&mut pg, pgdata_path)?;
    1282              : 
    1283              :         Ok(PostgresHandle {
    1284              :             postgres: pg,
    1285              :             log_collector: logs_handle,
    1286              :         })
    1287              :     }
    1288              : 
    1289              :     /// Wait for the child Postgres process forever. In this state Ctrl+C will
    1290              :     /// propagate to Postgres and it will be shut down as well.
    1291            0 :     fn wait_postgres(&self, mut pg_handle: PostgresHandle) -> std::process::ExitStatus {
    1292            0 :         info!(postmaster_pid = %pg_handle.postgres.id(), "Waiting for Postgres to exit");
    1293              : 
    1294            0 :         let ecode = pg_handle
    1295            0 :             .postgres
    1296            0 :             .wait()
    1297            0 :             .expect("failed to start waiting on Postgres process");
    1298            0 :         PG_PID.store(0, Ordering::SeqCst);
    1299            0 : 
    1300            0 :         // Process has exited. Wait for the log collecting task to finish.
    1301            0 :         let _ = tokio::runtime::Handle::current()
    1302            0 :             .block_on(pg_handle.log_collector)
    1303            0 :             .map_err(|e| tracing::error!("log task panicked: {:?}", e));
    1304            0 : 
    1305            0 :         ecode
    1306            0 :     }
    1307              : 
    1308              :     /// Do post configuration of the already started Postgres. This function spawns a background task to
    1309              :     /// configure the database after applying the compute spec. Currently, it upgrades the neon extension
    1310              :     /// version. In the future, it may upgrade all 3rd-party extensions.
    1311              :     #[instrument(skip_all)]
    1312              :     pub fn post_apply_config(&self) -> Result<()> {
    1313              :         let conf = self.get_tokio_conn_conf(Some("compute_ctl:post_apply_config"));
    1314            0 :         tokio::spawn(async move {
    1315            0 :             let res = async {
    1316            0 :                 let (mut client, connection) = conf.connect(NoTls).await?;
    1317            0 :                 tokio::spawn(async move {
    1318            0 :                     if let Err(e) = connection.await {
    1319            0 :                         eprintln!("connection error: {}", e);
    1320            0 :                     }
    1321            0 :                 });
    1322            0 : 
    1323            0 :                 handle_neon_extension_upgrade(&mut client)
    1324            0 :                     .await
    1325            0 :                     .context("handle_neon_extension_upgrade")?;
    1326            0 :                 Ok::<_, anyhow::Error>(())
    1327            0 :             }
    1328            0 :             .await;
    1329            0 :             if let Err(err) = res {
    1330            0 :                 error!("error while post_apply_config: {err:#}");
    1331            0 :             }
    1332            0 :         });
    1333              :         Ok(())
    1334              :     }
    1335              : 
    1336            0 :     pub fn get_conn_conf(&self, application_name: Option<&str>) -> postgres::Config {
    1337            0 :         let mut conf = self.conn_conf.clone();
    1338            0 :         if let Some(application_name) = application_name {
    1339            0 :             conf.application_name(application_name);
    1340            0 :         }
    1341            0 :         conf
    1342            0 :     }
    1343              : 
    1344            0 :     pub fn get_tokio_conn_conf(&self, application_name: Option<&str>) -> tokio_postgres::Config {
    1345            0 :         let mut conf = self.tokio_conn_conf.clone();
    1346            0 :         if let Some(application_name) = application_name {
    1347            0 :             conf.application_name(application_name);
    1348            0 :         }
    1349            0 :         conf
    1350            0 :     }
    1351              : 
    1352            0 :     pub async fn get_maintenance_client(
    1353            0 :         conf: &tokio_postgres::Config,
    1354            0 :     ) -> Result<tokio_postgres::Client> {
    1355            0 :         let mut conf = conf.clone();
    1356            0 :         conf.application_name("compute_ctl:apply_config");
    1357              : 
    1358            0 :         let (client, conn) = match conf.connect(NoTls).await {
    1359              :             // If connection fails, it may be the old node with `zenith_admin` superuser.
    1360              :             //
    1361              :             // In this case we need to connect with old `zenith_admin` name
    1362              :             // and create new user. We cannot simply rename connected user,
    1363              :             // but we can create a new one and grant it all privileges.
    1364            0 :             Err(e) => match e.code() {
    1365              :                 Some(&SqlState::INVALID_PASSWORD)
    1366              :                 | Some(&SqlState::INVALID_AUTHORIZATION_SPECIFICATION) => {
    1367              :                     // Connect with zenith_admin if cloud_admin could not authenticate
    1368            0 :                     info!(
    1369            0 :                         "cannot connect to postgres: {}, retrying with `zenith_admin` username",
    1370              :                         e
    1371              :                     );
    1372            0 :                     let mut zenith_admin_conf = postgres::config::Config::from(conf.clone());
    1373            0 :                     zenith_admin_conf.application_name("compute_ctl:apply_config");
    1374            0 :                     zenith_admin_conf.user("zenith_admin");
    1375              : 
    1376            0 :                     let mut client =
    1377            0 :                         zenith_admin_conf.connect(NoTls)
    1378            0 :                             .context("broken cloud_admin credential: tried connecting with cloud_admin but could not authenticate, and zenith_admin does not work either")?;
    1379              : 
    1380              :                     // Disable forwarding so that users don't get a cloud_admin role
    1381            0 :                     let mut func = || {
    1382            0 :                         client.simple_query("SET neon.forward_ddl = false")?;
    1383            0 :                         client.simple_query("CREATE USER cloud_admin WITH SUPERUSER")?;
    1384            0 :                         client.simple_query("GRANT zenith_admin TO cloud_admin")?;
    1385            0 :                         Ok::<_, anyhow::Error>(())
    1386            0 :                     };
    1387            0 :                     func().context("apply_config setup cloud_admin")?;
    1388              : 
    1389            0 :                     drop(client);
    1390            0 : 
    1391            0 :                     // Reconnect with connstring with expected name
    1392            0 :                     conf.connect(NoTls).await?
    1393              :                 }
    1394            0 :                 _ => return Err(e.into()),
    1395              :             },
    1396            0 :             Ok((client, conn)) => (client, conn),
    1397              :         };
    1398              : 
    1399            0 :         spawn(async move {
    1400            0 :             if let Err(e) = conn.await {
    1401            0 :                 error!("maintenance client connection error: {}", e);
    1402            0 :             }
    1403            0 :         });
    1404            0 : 
    1405            0 :         // Disable DDL forwarding because control plane already knows about the roles/databases
    1406            0 :         // we're about to modify.
    1407            0 :         client
    1408            0 :             .simple_query("SET neon.forward_ddl = false")
    1409            0 :             .await
    1410            0 :             .context("apply_config SET neon.forward_ddl = false")?;
    1411              : 
    1412            0 :         Ok(client)
    1413            0 :     }
    1414              : 
    1415              :     /// Do initial configuration of the already started Postgres.
    1416              :     #[instrument(skip_all)]
    1417              :     pub fn apply_config(&self, compute_state: &ComputeState) -> Result<()> {
    1418              :         let conf = self.get_tokio_conn_conf(Some("compute_ctl:apply_config"));
    1419              : 
    1420              :         let conf = Arc::new(conf);
    1421              :         let spec = Arc::new(
    1422              :             compute_state
    1423              :                 .pspec
    1424              :                 .as_ref()
    1425              :                 .expect("spec must be set")
    1426              :                 .spec
    1427              :                 .clone(),
    1428              :         );
    1429              : 
    1430              :         let max_concurrent_connections = self.max_service_connections(compute_state, &spec);
    1431              : 
    1432              :         // Merge-apply spec & changes to PostgreSQL state.
    1433              :         self.apply_spec_sql(spec.clone(), conf.clone(), max_concurrent_connections)?;
    1434              : 
    1435              :         if let Some(local_proxy) = &spec.clone().local_proxy_config {
    1436              :             info!("configuring local_proxy");
    1437              :             local_proxy::configure(local_proxy).context("apply_config local_proxy")?;
    1438              :         }
    1439              : 
    1440              :         // Run migrations separately to not hold up cold starts
    1441            0 :         tokio::spawn(async move {
    1442            0 :             let mut conf = conf.as_ref().clone();
    1443            0 :             conf.application_name("compute_ctl:migrations");
    1444            0 : 
    1445            0 :             match conf.connect(NoTls).await {
    1446            0 :                 Ok((mut client, connection)) => {
    1447            0 :                     tokio::spawn(async move {
    1448            0 :                         if let Err(e) = connection.await {
    1449            0 :                             eprintln!("connection error: {}", e);
    1450            0 :                         }
    1451            0 :                     });
    1452            0 :                     if let Err(e) = handle_migrations(&mut client).await {
    1453            0 :                         error!("Failed to run migrations: {}", e);
    1454            0 :                     }
    1455              :                 }
    1456            0 :                 Err(e) => {
    1457            0 :                     error!(
    1458            0 :                         "Failed to connect to the compute for running migrations: {}",
    1459              :                         e
    1460              :                     );
    1461              :                 }
    1462              :             };
    1463            0 :         });
    1464              : 
    1465              :         Ok::<(), anyhow::Error>(())
    1466              :     }
    1467              : 
    1468              :     // Wrapped this around `pg_ctl reload`, but right now we don't use
    1469              :     // `pg_ctl` for start / stop.
    1470              :     #[instrument(skip_all)]
    1471              :     fn pg_reload_conf(&self) -> Result<()> {
    1472              :         let pgctl_bin = Path::new(&self.params.pgbin)
    1473              :             .parent()
    1474              :             .unwrap()
    1475              :             .join("pg_ctl");
    1476              :         Command::new(pgctl_bin)
    1477              :             .args(["reload", "-D", &self.params.pgdata])
    1478              :             .output()
    1479              :             .expect("cannot run pg_ctl process");
    1480              :         Ok(())
    1481              :     }
    1482              : 
    1483              :     /// Similar to `apply_config()`, but does a bit different sequence of operations,
    1484              :     /// as it's used to reconfigure a previously started and configured Postgres node.
    1485              :     #[instrument(skip_all)]
    1486              :     pub fn reconfigure(&self) -> Result<()> {
    1487              :         let spec = self.state.lock().unwrap().pspec.clone().unwrap().spec;
    1488              : 
    1489              :         if let Some(ref pgbouncer_settings) = spec.pgbouncer_settings {
    1490              :             info!("tuning pgbouncer");
    1491              : 
    1492              :             // Spawn a background task to do the tuning,
    1493              :             // so that we don't block the main thread that starts Postgres.
    1494              :             let pgbouncer_settings = pgbouncer_settings.clone();
    1495            0 :             tokio::spawn(async move {
    1496            0 :                 let res = tune_pgbouncer(pgbouncer_settings).await;
    1497            0 :                 if let Err(err) = res {
    1498            0 :                     error!("error while tuning pgbouncer: {err:?}");
    1499            0 :                 }
    1500            0 :             });
    1501              :         }
    1502              : 
    1503              :         if let Some(ref local_proxy) = spec.local_proxy_config {
    1504              :             info!("configuring local_proxy");
    1505              : 
    1506              :             // Spawn a background task to do the configuration,
    1507              :             // so that we don't block the main thread that starts Postgres.
    1508              :             let local_proxy = local_proxy.clone();
    1509            0 :             tokio::spawn(async move {
    1510            0 :                 if let Err(err) = local_proxy::configure(&local_proxy) {
    1511            0 :                     error!("error while configuring local_proxy: {err:?}");
    1512            0 :                 }
    1513            0 :             });
    1514              :         }
    1515              : 
    1516              :         // Write new config
    1517              :         let pgdata_path = Path::new(&self.params.pgdata);
    1518              :         let postgresql_conf_path = pgdata_path.join("postgresql.conf");
    1519              :         config::write_postgres_conf(&postgresql_conf_path, &spec, self.params.internal_http_port)?;
    1520              : 
    1521              :         if !spec.skip_pg_catalog_updates {
    1522              :             let max_concurrent_connections = spec.reconfigure_concurrency;
    1523              :             // Temporarily reset max_cluster_size in config
    1524              :             // to avoid the possibility of hitting the limit, while we are reconfiguring:
    1525              :             // creating new extensions, roles, etc.
    1526            0 :             config::with_compute_ctl_tmp_override(pgdata_path, "neon.max_cluster_size=-1", || {
    1527            0 :                 self.pg_reload_conf()?;
    1528              : 
    1529            0 :                 if spec.mode == ComputeMode::Primary {
    1530            0 :                     let mut conf =
    1531            0 :                         tokio_postgres::Config::from_str(self.params.connstr.as_str()).unwrap();
    1532            0 :                     conf.application_name("apply_config");
    1533            0 :                     let conf = Arc::new(conf);
    1534            0 : 
    1535            0 :                     let spec = Arc::new(spec.clone());
    1536            0 : 
    1537            0 :                     self.apply_spec_sql(spec, conf, max_concurrent_connections)?;
    1538            0 :                 }
    1539              : 
    1540            0 :                 Ok(())
    1541            0 :             })?;
    1542              :         }
    1543              : 
    1544              :         self.pg_reload_conf()?;
    1545              : 
    1546              :         let unknown_op = "unknown".to_string();
    1547              :         let op_id = spec.operation_uuid.as_ref().unwrap_or(&unknown_op);
    1548              :         info!(
    1549              :             "finished reconfiguration of compute node for operation {}",
    1550              :             op_id
    1551              :         );
    1552              : 
    1553              :         Ok(())
    1554              :     }
    1555              : 
    1556              :     #[instrument(skip_all)]
    1557              :     pub fn configure_as_primary(&self, compute_state: &ComputeState) -> Result<()> {
    1558              :         let pspec = compute_state.pspec.as_ref().expect("spec must be set");
    1559              : 
    1560              :         assert!(pspec.spec.mode == ComputeMode::Primary);
    1561              :         if !pspec.spec.skip_pg_catalog_updates {
    1562              :             let pgdata_path = Path::new(&self.params.pgdata);
    1563              :             // temporarily reset max_cluster_size in config
    1564              :             // to avoid the possibility of hitting the limit, while we are applying config:
    1565              :             // creating new extensions, roles, etc...
    1566            0 :             config::with_compute_ctl_tmp_override(pgdata_path, "neon.max_cluster_size=-1", || {
    1567            0 :                 self.pg_reload_conf()?;
    1568              : 
    1569            0 :                 self.apply_config(compute_state)?;
    1570              : 
    1571            0 :                 Ok(())
    1572            0 :             })?;
    1573              : 
    1574              :             let postgresql_conf_path = pgdata_path.join("postgresql.conf");
    1575              :             if config::line_in_file(
    1576              :                 &postgresql_conf_path,
    1577              :                 "neon.disable_logical_replication_subscribers=false",
    1578              :             )? {
    1579              :                 info!(
    1580              :                     "updated postgresql.conf to set neon.disable_logical_replication_subscribers=false"
    1581              :                 );
    1582              :             }
    1583              :             self.pg_reload_conf()?;
    1584              :         }
    1585              :         self.post_apply_config()?;
    1586              : 
    1587              :         Ok(())
    1588              :     }
    1589              : 
    1590              :     /// Update the `last_active` in the shared state, but ensure that it's a more recent one.
    1591            0 :     pub fn update_last_active(&self, last_active: Option<DateTime<Utc>>) {
    1592            0 :         let mut state = self.state.lock().unwrap();
    1593            0 :         // NB: `Some(<DateTime>)` is always greater than `None`.
    1594            0 :         if last_active > state.last_active {
    1595            0 :             state.last_active = last_active;
    1596            0 :             debug!("set the last compute activity time to: {:?}", last_active);
    1597            0 :         }
    1598            0 :     }
    1599              : 
    1600              :     // Look for core dumps and collect backtraces.
    1601              :     //
    1602              :     // EKS worker nodes have following core dump settings:
    1603              :     //   /proc/sys/kernel/core_pattern -> core
    1604              :     //   /proc/sys/kernel/core_uses_pid -> 1
    1605              :     //   ulimit -c -> unlimited
    1606              :     // which results in core dumps being written to postgres data directory as core.<pid>.
    1607              :     //
    1608              :     // Use that as a default location and pattern, except macos where core dumps are written
    1609              :     // to /cores/ directory by default.
    1610              :     //
    1611              :     // With default Linux settings, the core dump file is called just "core", so check for
    1612              :     // that too.
    1613            0 :     pub fn check_for_core_dumps(&self) -> Result<()> {
    1614            0 :         let core_dump_dir = match std::env::consts::OS {
    1615            0 :             "macos" => Path::new("/cores/"),
    1616            0 :             _ => Path::new(&self.params.pgdata),
    1617              :         };
    1618              : 
    1619              :         // Collect core dump paths if any
    1620            0 :         info!("checking for core dumps in {}", core_dump_dir.display());
    1621            0 :         let files = fs::read_dir(core_dump_dir)?;
    1622            0 :         let cores = files.filter_map(|entry| {
    1623            0 :             let entry = entry.ok()?;
    1624              : 
    1625            0 :             let is_core_dump = match entry.file_name().to_str()? {
    1626            0 :                 n if n.starts_with("core.") => true,
    1627            0 :                 "core" => true,
    1628            0 :                 _ => false,
    1629              :             };
    1630            0 :             if is_core_dump {
    1631            0 :                 Some(entry.path())
    1632              :             } else {
    1633            0 :                 None
    1634              :             }
    1635            0 :         });
    1636              : 
    1637              :         // Print backtrace for each core dump
    1638            0 :         for core_path in cores {
    1639            0 :             warn!(
    1640            0 :                 "core dump found: {}, collecting backtrace",
    1641            0 :                 core_path.display()
    1642              :             );
    1643              : 
    1644              :             // Try first with gdb
    1645            0 :             let backtrace = Command::new("gdb")
    1646            0 :                 .args(["--batch", "-q", "-ex", "bt", &self.params.pgbin])
    1647            0 :                 .arg(&core_path)
    1648            0 :                 .output();
    1649              : 
    1650              :             // Try lldb if no gdb is found -- that is handy for local testing on macOS
    1651            0 :             let backtrace = match backtrace {
    1652            0 :                 Err(ref e) if e.kind() == std::io::ErrorKind::NotFound => {
    1653            0 :                     warn!("cannot find gdb, trying lldb");
    1654            0 :                     Command::new("lldb")
    1655            0 :                         .arg("-c")
    1656            0 :                         .arg(&core_path)
    1657            0 :                         .args(["--batch", "-o", "bt all", "-o", "quit"])
    1658            0 :                         .output()
    1659              :                 }
    1660            0 :                 _ => backtrace,
    1661            0 :             }?;
    1662              : 
    1663            0 :             warn!(
    1664            0 :                 "core dump backtrace: {}",
    1665            0 :                 String::from_utf8_lossy(&backtrace.stdout)
    1666              :             );
    1667            0 :             warn!(
    1668            0 :                 "debugger stderr: {}",
    1669            0 :                 String::from_utf8_lossy(&backtrace.stderr)
    1670              :             );
    1671              :         }
    1672              : 
    1673            0 :         Ok(())
    1674            0 :     }
    1675              : 
    1676              :     /// Select `pg_stat_statements` data and return it as a stringified JSON
    1677            0 :     pub async fn collect_insights(&self) -> String {
    1678            0 :         let mut result_rows: Vec<String> = Vec::new();
    1679            0 :         let conf = self.get_tokio_conn_conf(Some("compute_ctl:collect_insights"));
    1680            0 :         let connect_result = conf.connect(NoTls).await;
    1681            0 :         let (client, connection) = connect_result.unwrap();
    1682            0 :         tokio::spawn(async move {
    1683            0 :             if let Err(e) = connection.await {
    1684            0 :                 eprintln!("connection error: {}", e);
    1685            0 :             }
    1686            0 :         });
    1687            0 :         let result = client
    1688            0 :             .simple_query(
    1689            0 :                 "SELECT
    1690            0 :     row_to_json(pg_stat_statements)
    1691            0 : FROM
    1692            0 :     pg_stat_statements
    1693            0 : WHERE
    1694            0 :     userid != 'cloud_admin'::regrole::oid
    1695            0 : ORDER BY
    1696            0 :     (mean_exec_time + mean_plan_time) DESC
    1697            0 : LIMIT 100",
    1698            0 :             )
    1699            0 :             .await;
    1700              : 
    1701            0 :         if let Ok(raw_rows) = result {
    1702            0 :             for message in raw_rows.iter() {
    1703            0 :                 if let postgres::SimpleQueryMessage::Row(row) = message {
    1704            0 :                     if let Some(json) = row.get(0) {
    1705            0 :                         result_rows.push(json.to_string());
    1706            0 :                     }
    1707            0 :                 }
    1708              :             }
    1709              : 
    1710            0 :             format!("{{\"pg_stat_statements\": [{}]}}", result_rows.join(","))
    1711              :         } else {
    1712            0 :             "{{\"pg_stat_statements\": []}}".to_string()
    1713              :         }
    1714            0 :     }
    1715              : 
    1716              :     // download an archive, unzip and place files in correct locations
    1717            0 :     pub async fn download_extension(
    1718            0 :         &self,
    1719            0 :         real_ext_name: String,
    1720            0 :         ext_path: RemotePath,
    1721            0 :     ) -> Result<u64, DownloadError> {
    1722            0 :         let ext_remote_storage =
    1723            0 :             self.params
    1724            0 :                 .ext_remote_storage
    1725            0 :                 .as_ref()
    1726            0 :                 .ok_or(DownloadError::BadInput(anyhow::anyhow!(
    1727            0 :                     "Remote extensions storage is not configured",
    1728            0 :                 )))?;
    1729              : 
    1730            0 :         let ext_archive_name = ext_path.object_name().expect("bad path");
    1731            0 : 
    1732            0 :         let mut first_try = false;
    1733            0 :         if !self
    1734            0 :             .ext_download_progress
    1735            0 :             .read()
    1736            0 :             .expect("lock err")
    1737            0 :             .contains_key(ext_archive_name)
    1738            0 :         {
    1739            0 :             self.ext_download_progress
    1740            0 :                 .write()
    1741            0 :                 .expect("lock err")
    1742            0 :                 .insert(ext_archive_name.to_string(), (Utc::now(), false));
    1743            0 :             first_try = true;
    1744            0 :         }
    1745            0 :         let (download_start, download_completed) =
    1746            0 :             self.ext_download_progress.read().expect("lock err")[ext_archive_name];
    1747            0 :         let start_time_delta = Utc::now()
    1748            0 :             .signed_duration_since(download_start)
    1749            0 :             .to_std()
    1750            0 :             .unwrap()
    1751            0 :             .as_millis() as u64;
    1752              : 
    1753              :         // how long to wait for extension download if it was started by another process
    1754              :         const HANG_TIMEOUT: u64 = 3000; // milliseconds
    1755              : 
    1756            0 :         if download_completed {
    1757            0 :             info!("extension already downloaded, skipping re-download");
    1758            0 :             return Ok(0);
    1759            0 :         } else if start_time_delta < HANG_TIMEOUT && !first_try {
    1760            0 :             info!(
    1761            0 :                 "download {ext_archive_name} already started by another process, hanging untill completion or timeout"
    1762              :             );
    1763            0 :             let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(500));
    1764              :             loop {
    1765            0 :                 info!("waiting for download");
    1766            0 :                 interval.tick().await;
    1767            0 :                 let (_, download_completed_now) =
    1768            0 :                     self.ext_download_progress.read().expect("lock")[ext_archive_name];
    1769            0 :                 if download_completed_now {
    1770            0 :                     info!("download finished by whoever else downloaded it");
    1771            0 :                     return Ok(0);
    1772            0 :                 }
    1773              :             }
    1774              :             // NOTE: the above loop will get terminated
    1775              :             // based on the timeout of the download function
    1776            0 :         }
    1777            0 : 
    1778            0 :         // if extension hasn't been downloaded before or the previous
    1779            0 :         // attempt to download was at least HANG_TIMEOUT ms ago
    1780            0 :         // then we try to download it here
    1781            0 :         info!("downloading new extension {ext_archive_name}");
    1782              : 
    1783            0 :         let download_size = extension_server::download_extension(
    1784            0 :             &real_ext_name,
    1785            0 :             &ext_path,
    1786            0 :             ext_remote_storage,
    1787            0 :             &self.params.pgbin,
    1788            0 :         )
    1789            0 :         .await
    1790            0 :         .map_err(DownloadError::Other);
    1791            0 : 
    1792            0 :         if download_size.is_ok() {
    1793            0 :             self.ext_download_progress
    1794            0 :                 .write()
    1795            0 :                 .expect("bad lock")
    1796            0 :                 .insert(ext_archive_name.to_string(), (download_start, true));
    1797            0 :         }
    1798              : 
    1799            0 :         download_size
    1800            0 :     }
    1801              : 
    1802            0 :     pub async fn set_role_grants(
    1803            0 :         &self,
    1804            0 :         db_name: &PgIdent,
    1805            0 :         schema_name: &PgIdent,
    1806            0 :         privileges: &[Privilege],
    1807            0 :         role_name: &PgIdent,
    1808            0 :     ) -> Result<()> {
    1809              :         use tokio_postgres::NoTls;
    1810              : 
    1811            0 :         let mut conf = self.get_tokio_conn_conf(Some("compute_ctl:set_role_grants"));
    1812            0 :         conf.dbname(db_name);
    1813              : 
    1814            0 :         let (db_client, conn) = conf
    1815            0 :             .connect(NoTls)
    1816            0 :             .await
    1817            0 :             .context("Failed to connect to the database")?;
    1818            0 :         tokio::spawn(conn);
    1819            0 : 
    1820            0 :         // TODO: support other types of grants apart from schemas?
    1821            0 :         let query = format!(
    1822            0 :             "GRANT {} ON SCHEMA {} TO {}",
    1823            0 :             privileges
    1824            0 :                 .iter()
    1825            0 :                 // should not be quoted as it's part of the command.
    1826            0 :                 // is already sanitized so it's ok
    1827            0 :                 .map(|p| p.as_str())
    1828            0 :                 .collect::<Vec<&'static str>>()
    1829            0 :                 .join(", "),
    1830            0 :             // quote the schema and role name as identifiers to sanitize them.
    1831            0 :             schema_name.pg_quote(),
    1832            0 :             role_name.pg_quote(),
    1833            0 :         );
    1834            0 :         db_client
    1835            0 :             .simple_query(&query)
    1836            0 :             .await
    1837            0 :             .with_context(|| format!("Failed to execute query: {}", query))?;
    1838              : 
    1839            0 :         Ok(())
    1840            0 :     }
    1841              : 
    1842            0 :     pub async fn install_extension(
    1843            0 :         &self,
    1844            0 :         ext_name: &PgIdent,
    1845            0 :         db_name: &PgIdent,
    1846            0 :         ext_version: ExtVersion,
    1847            0 :     ) -> Result<ExtVersion> {
    1848              :         use tokio_postgres::NoTls;
    1849              : 
    1850            0 :         let mut conf = self.get_tokio_conn_conf(Some("compute_ctl:install_extension"));
    1851            0 :         conf.dbname(db_name);
    1852              : 
    1853            0 :         let (db_client, conn) = conf
    1854            0 :             .connect(NoTls)
    1855            0 :             .await
    1856            0 :             .context("Failed to connect to the database")?;
    1857            0 :         tokio::spawn(conn);
    1858            0 : 
    1859            0 :         let version_query = "SELECT extversion FROM pg_extension WHERE extname = $1";
    1860            0 :         let version: Option<ExtVersion> = db_client
    1861            0 :             .query_opt(version_query, &[&ext_name])
    1862            0 :             .await
    1863            0 :             .with_context(|| format!("Failed to execute query: {}", version_query))?
    1864            0 :             .map(|row| row.get(0));
    1865            0 : 
    1866            0 :         // sanitize the inputs as postgres idents.
    1867            0 :         let ext_name: String = ext_name.pg_quote();
    1868            0 :         let quoted_version: String = ext_version.pg_quote();
    1869              : 
    1870            0 :         if let Some(installed_version) = version {
    1871            0 :             if installed_version == ext_version {
    1872            0 :                 return Ok(installed_version);
    1873            0 :             }
    1874            0 :             let query = format!("ALTER EXTENSION {ext_name} UPDATE TO {quoted_version}");
    1875            0 :             db_client
    1876            0 :                 .simple_query(&query)
    1877            0 :                 .await
    1878            0 :                 .with_context(|| format!("Failed to execute query: {}", query))?;
    1879              :         } else {
    1880            0 :             let query =
    1881            0 :                 format!("CREATE EXTENSION IF NOT EXISTS {ext_name} WITH VERSION {quoted_version}");
    1882            0 :             db_client
    1883            0 :                 .simple_query(&query)
    1884            0 :                 .await
    1885            0 :                 .with_context(|| format!("Failed to execute query: {}", query))?;
    1886              :         }
    1887              : 
    1888            0 :         Ok(ext_version)
    1889            0 :     }
    1890              : 
    1891            0 :     pub async fn prepare_preload_libraries(
    1892            0 :         &self,
    1893            0 :         spec: &ComputeSpec,
    1894            0 :     ) -> Result<RemoteExtensionMetrics> {
    1895            0 :         if self.params.ext_remote_storage.is_none() {
    1896            0 :             return Ok(RemoteExtensionMetrics {
    1897            0 :                 num_ext_downloaded: 0,
    1898            0 :                 largest_ext_size: 0,
    1899            0 :                 total_ext_download_size: 0,
    1900            0 :             });
    1901            0 :         }
    1902            0 :         let remote_extensions = spec
    1903            0 :             .remote_extensions
    1904            0 :             .as_ref()
    1905            0 :             .ok_or(anyhow::anyhow!("Remote extensions are not configured"))?;
    1906              : 
    1907            0 :         info!("parse shared_preload_libraries from spec.cluster.settings");
    1908            0 :         let mut libs_vec = Vec::new();
    1909            0 :         if let Some(libs) = spec.cluster.settings.find("shared_preload_libraries") {
    1910            0 :             libs_vec = libs
    1911            0 :                 .split(&[',', '\'', ' '])
    1912            0 :                 .filter(|s| *s != "neon" && !s.is_empty())
    1913            0 :                 .map(str::to_string)
    1914            0 :                 .collect();
    1915            0 :         }
    1916            0 :         info!("parse shared_preload_libraries from provided postgresql.conf");
    1917              : 
    1918              :         // that is used in neon_local and python tests
    1919            0 :         if let Some(conf) = &spec.cluster.postgresql_conf {
    1920            0 :             let conf_lines = conf.split('\n').collect::<Vec<&str>>();
    1921            0 :             let mut shared_preload_libraries_line = "";
    1922            0 :             for line in conf_lines {
    1923            0 :                 if line.starts_with("shared_preload_libraries") {
    1924            0 :                     shared_preload_libraries_line = line;
    1925            0 :                 }
    1926              :             }
    1927            0 :             let mut preload_libs_vec = Vec::new();
    1928            0 :             if let Some(libs) = shared_preload_libraries_line.split("='").nth(1) {
    1929            0 :                 preload_libs_vec = libs
    1930            0 :                     .split(&[',', '\'', ' '])
    1931            0 :                     .filter(|s| *s != "neon" && !s.is_empty())
    1932            0 :                     .map(str::to_string)
    1933            0 :                     .collect();
    1934            0 :             }
    1935            0 :             libs_vec.extend(preload_libs_vec);
    1936            0 :         }
    1937              : 
    1938              :         // Don't try to download libraries that are not in the index.
    1939              :         // Assume that they are already present locally.
    1940            0 :         libs_vec.retain(|lib| remote_extensions.library_index.contains_key(lib));
    1941            0 : 
    1942            0 :         info!("Downloading to shared preload libraries: {:?}", &libs_vec);
    1943              : 
    1944            0 :         let mut download_tasks = Vec::new();
    1945            0 :         for library in &libs_vec {
    1946            0 :             let (ext_name, ext_path) = remote_extensions.get_ext(
    1947            0 :                 library,
    1948            0 :                 true,
    1949            0 :                 &self.params.build_tag,
    1950            0 :                 &self.params.pgversion,
    1951            0 :             )?;
    1952            0 :             download_tasks.push(self.download_extension(ext_name, ext_path));
    1953              :         }
    1954            0 :         let results = join_all(download_tasks).await;
    1955              : 
    1956            0 :         let mut remote_ext_metrics = RemoteExtensionMetrics {
    1957            0 :             num_ext_downloaded: 0,
    1958            0 :             largest_ext_size: 0,
    1959            0 :             total_ext_download_size: 0,
    1960            0 :         };
    1961            0 :         for result in results {
    1962            0 :             let download_size = match result {
    1963            0 :                 Ok(res) => {
    1964            0 :                     remote_ext_metrics.num_ext_downloaded += 1;
    1965            0 :                     res
    1966              :                 }
    1967            0 :                 Err(err) => {
    1968            0 :                     // if we failed to download an extension, we don't want to fail the whole
    1969            0 :                     // process, but we do want to log the error
    1970            0 :                     error!("Failed to download extension: {}", err);
    1971            0 :                     0
    1972              :                 }
    1973              :             };
    1974              : 
    1975            0 :             remote_ext_metrics.largest_ext_size =
    1976            0 :                 std::cmp::max(remote_ext_metrics.largest_ext_size, download_size);
    1977            0 :             remote_ext_metrics.total_ext_download_size += download_size;
    1978              :         }
    1979            0 :         Ok(remote_ext_metrics)
    1980            0 :     }
    1981              : 
    1982              :     /// Waits until current thread receives a state changed notification and
    1983              :     /// the pageserver connection strings has changed.
    1984              :     ///
    1985              :     /// The operation will time out after a specified duration.
    1986            0 :     pub fn wait_timeout_while_pageserver_connstr_unchanged(&self, duration: Duration) {
    1987            0 :         let state = self.state.lock().unwrap();
    1988            0 :         let old_pageserver_connstr = state
    1989            0 :             .pspec
    1990            0 :             .as_ref()
    1991            0 :             .expect("spec must be set")
    1992            0 :             .pageserver_connstr
    1993            0 :             .clone();
    1994            0 :         let mut unchanged = true;
    1995            0 :         let _ = self
    1996            0 :             .state_changed
    1997            0 :             .wait_timeout_while(state, duration, |s| {
    1998            0 :                 let pageserver_connstr = &s
    1999            0 :                     .pspec
    2000            0 :                     .as_ref()
    2001            0 :                     .expect("spec must be set")
    2002            0 :                     .pageserver_connstr;
    2003            0 :                 unchanged = pageserver_connstr == &old_pageserver_connstr;
    2004            0 :                 unchanged
    2005            0 :             })
    2006            0 :             .unwrap();
    2007            0 :         if !unchanged {
    2008            0 :             info!("Pageserver config changed");
    2009            0 :         }
    2010            0 :     }
    2011              : }
    2012              : 
    2013            0 : pub fn forward_termination_signal() {
    2014            0 :     let ss_pid = SYNC_SAFEKEEPERS_PID.load(Ordering::SeqCst);
    2015            0 :     if ss_pid != 0 {
    2016            0 :         let ss_pid = nix::unistd::Pid::from_raw(ss_pid as i32);
    2017            0 :         kill(ss_pid, Signal::SIGTERM).ok();
    2018            0 :     }
    2019            0 :     let pg_pid = PG_PID.load(Ordering::SeqCst);
    2020            0 :     if pg_pid != 0 {
    2021            0 :         let pg_pid = nix::unistd::Pid::from_raw(pg_pid as i32);
    2022            0 :         // Use 'fast' shutdown (SIGINT) because it also creates a shutdown checkpoint, which is important for
    2023            0 :         // ROs to get a list of running xacts faster instead of going through the CLOG.
    2024            0 :         // See https://www.postgresql.org/docs/current/server-shutdown.html for the list of modes and signals.
    2025            0 :         kill(pg_pid, Signal::SIGINT).ok();
    2026            0 :     }
    2027            0 : }
    2028              : 
    2029              : // helper trait to call JoinSet::spawn_blocking(f), but propagates the current
    2030              : // tracing span to the thread.
    2031              : trait JoinSetExt<T> {
    2032              :     fn spawn_blocking_child<F>(&mut self, f: F) -> tokio::task::AbortHandle
    2033              :     where
    2034              :         F: FnOnce() -> T + Send + 'static,
    2035              :         T: Send;
    2036              : }
    2037              : 
    2038              : impl<T: 'static> JoinSetExt<T> for tokio::task::JoinSet<T> {
    2039            0 :     fn spawn_blocking_child<F>(&mut self, f: F) -> tokio::task::AbortHandle
    2040            0 :     where
    2041            0 :         F: FnOnce() -> T + Send + 'static,
    2042            0 :         T: Send,
    2043            0 :     {
    2044            0 :         let sp = tracing::Span::current();
    2045            0 :         self.spawn_blocking(move || {
    2046            0 :             let _e = sp.enter();
    2047            0 :             f()
    2048            0 :         })
    2049            0 :     }
    2050              : }
        

Generated by: LCOV version 2.1-beta