LCOV - code coverage report
Current view: top level - compute_tools/src - compute.rs (source / functions) Coverage Total Hit
Test: 1d5975439f3c9882b18414799141ebf9a3922c58.info Lines: 5.7 % 1420 81
Test Date: 2025-07-31 15:59:03 Functions: 5.0 % 120 6

            Line data    Source code
       1              : use anyhow::{Context, Result};
       2              : use chrono::{DateTime, Utc};
       3              : use compute_api::privilege::Privilege;
       4              : use compute_api::responses::{
       5              :     ComputeConfig, ComputeCtlConfig, ComputeMetrics, ComputeStatus, LfcOffloadState,
       6              :     LfcPrewarmState, PromoteState, TlsConfig,
       7              : };
       8              : use compute_api::spec::{
       9              :     ComputeAudit, ComputeFeature, ComputeMode, ComputeSpec, ExtVersion, GenericOption,
      10              :     PageserverConnectionInfo, PageserverProtocol, PgIdent, Role,
      11              : };
      12              : use futures::StreamExt;
      13              : use futures::future::join_all;
      14              : use futures::stream::FuturesUnordered;
      15              : use itertools::Itertools;
      16              : use nix::sys::signal::{Signal, kill};
      17              : use nix::unistd::Pid;
      18              : use once_cell::sync::Lazy;
      19              : use pageserver_page_api::{self as page_api, BaseBackupCompression};
      20              : use postgres;
      21              : use postgres::NoTls;
      22              : use postgres::error::SqlState;
      23              : use remote_storage::{DownloadError, RemotePath};
      24              : use std::collections::{HashMap, HashSet};
      25              : use std::ffi::OsString;
      26              : use std::os::unix::fs::{PermissionsExt, symlink};
      27              : use std::path::Path;
      28              : use std::process::{Command, Stdio};
      29              : use std::str::FromStr;
      30              : use std::sync::atomic::{AtomicU32, AtomicU64, Ordering};
      31              : use std::sync::{Arc, Condvar, Mutex, RwLock};
      32              : use std::time::{Duration, Instant};
      33              : use std::{env, fs};
      34              : use tokio::{spawn, sync::watch, task::JoinHandle, time};
      35              : use tokio_util::sync::CancellationToken;
      36              : use tracing::{Instrument, debug, error, info, instrument, warn};
      37              : use url::Url;
      38              : use utils::backoff::{
      39              :     DEFAULT_BASE_BACKOFF_SECONDS, DEFAULT_MAX_BACKOFF_SECONDS, exponential_backoff_duration,
      40              : };
      41              : use utils::id::{TenantId, TimelineId};
      42              : use utils::lsn::Lsn;
      43              : use utils::measured_stream::MeasuredReader;
      44              : use utils::pid_file;
      45              : use utils::shard::{ShardIndex, ShardNumber, ShardStripeSize};
      46              : 
      47              : use crate::configurator::launch_configurator;
      48              : use crate::disk_quota::set_disk_quota;
      49              : use crate::hadron_metrics::COMPUTE_ATTACHED;
      50              : use crate::installed_extensions::get_installed_extensions;
      51              : use crate::logger::{self, startup_context_from_env};
      52              : use crate::lsn_lease::launch_lsn_lease_bg_task_for_static;
      53              : use crate::metrics::COMPUTE_CTL_UP;
      54              : use crate::monitor::launch_monitor;
      55              : use crate::pg_helpers::*;
      56              : use crate::pgbouncer::*;
      57              : use crate::rsyslog::{
      58              :     PostgresLogsRsyslogConfig, configure_audit_rsyslog, configure_postgres_logs_export,
      59              :     launch_pgaudit_gc,
      60              : };
      61              : use crate::spec::*;
      62              : use crate::swap::resize_swap;
      63              : use crate::sync_sk::{check_if_synced, ping_safekeeper};
      64              : use crate::tls::watch_cert_for_changes;
      65              : use crate::{config, extension_server, local_proxy};
      66              : 
      67              : pub static SYNC_SAFEKEEPERS_PID: AtomicU32 = AtomicU32::new(0);
      68              : pub static PG_PID: AtomicU32 = AtomicU32::new(0);
      69              : // This is an arbitrary build tag. Fine as a default / for testing purposes
      70              : // in-case of not-set environment var
      71              : const BUILD_TAG_DEFAULT: &str = "latest";
      72              : /// Build tag/version of the compute node binaries/image. It's tricky and ugly
      73              : /// to pass it everywhere as a part of `ComputeNodeParams`, so we use a
      74              : /// global static variable.
      75            0 : pub static BUILD_TAG: Lazy<String> = Lazy::new(|| {
      76            0 :     option_env!("BUILD_TAG")
      77            0 :         .unwrap_or(BUILD_TAG_DEFAULT)
      78            0 :         .to_string()
      79            0 : });
      80              : const DEFAULT_INSTALLED_EXTENSIONS_COLLECTION_INTERVAL: u64 = 3600;
      81              : 
      82              : /// Static configuration params that don't change after startup. These mostly
      83              : /// come from the CLI args, or are derived from them.
      84              : #[derive(Clone, Debug)]
      85              : pub struct ComputeNodeParams {
      86              :     /// The ID of the compute
      87              :     pub compute_id: String,
      88              : 
      89              :     /// Url type maintains proper escaping
      90              :     pub connstr: url::Url,
      91              : 
      92              :     /// The name of the 'weak' superuser role, which we give to the users.
      93              :     /// It follows the allow list approach, i.e., we take a standard role
      94              :     /// and grant it extra permissions with explicit GRANTs here and there,
      95              :     /// and core patches.
      96              :     pub privileged_role_name: String,
      97              : 
      98              :     pub resize_swap_on_bind: bool,
      99              :     pub set_disk_quota_for_fs: Option<String>,
     100              : 
     101              :     // VM monitor parameters
     102              :     #[cfg(target_os = "linux")]
     103              :     pub filecache_connstr: String,
     104              :     #[cfg(target_os = "linux")]
     105              :     pub cgroup: String,
     106              :     #[cfg(target_os = "linux")]
     107              :     pub vm_monitor_addr: String,
     108              : 
     109              :     pub pgdata: String,
     110              :     pub pgbin: String,
     111              :     pub pgversion: String,
     112              : 
     113              :     /// The port that the compute's external HTTP server listens on
     114              :     pub external_http_port: u16,
     115              :     /// The port that the compute's internal HTTP server listens on
     116              :     pub internal_http_port: u16,
     117              : 
     118              :     /// the address of extension storage proxy gateway
     119              :     pub remote_ext_base_url: Option<Url>,
     120              : 
     121              :     /// Interval for installed extensions collection
     122              :     pub installed_extensions_collection_interval: Arc<AtomicU64>,
     123              :     /// Hadron instance ID of the compute node.
     124              :     pub instance_id: Option<String>,
     125              :     /// Timeout of PG compute startup in the Init state.
     126              :     pub pg_init_timeout: Option<Duration>,
     127              :     // Path to the `pg_isready` binary.
     128              :     pub pg_isready_bin: String,
     129              :     pub lakebase_mode: bool,
     130              : 
     131              :     pub build_tag: String,
     132              :     pub control_plane_uri: Option<String>,
     133              :     pub config_path_test_only: Option<OsString>,
     134              : }
     135              : 
     136              : type TaskHandle = Mutex<Option<JoinHandle<()>>>;
     137              : 
     138              : /// Compute node info shared across several `compute_ctl` threads.
     139              : pub struct ComputeNode {
     140              :     pub params: ComputeNodeParams,
     141              : 
     142              :     // We connect to Postgres from many different places, so build configs once
     143              :     // and reuse them where needed. These are derived from 'params.connstr'
     144              :     pub conn_conf: postgres::config::Config,
     145              :     pub tokio_conn_conf: tokio_postgres::config::Config,
     146              : 
     147              :     /// Volatile part of the `ComputeNode`, which should be used under `Mutex`.
     148              :     /// To allow HTTP API server to serving status requests, while configuration
     149              :     /// is in progress, lock should be held only for short periods of time to do
     150              :     /// read/write, not the whole configuration process.
     151              :     pub state: Mutex<ComputeState>,
     152              :     /// `Condvar` to allow notifying waiters about state changes.
     153              :     pub state_changed: Condvar,
     154              : 
     155              :     // key: ext_archive_name, value: started download time, download_completed?
     156              :     pub ext_download_progress: RwLock<HashMap<String, (DateTime<Utc>, bool)>>,
     157              :     pub compute_ctl_config: ComputeCtlConfig,
     158              : 
     159              :     /// Handle to the extension stats collection task
     160              :     extension_stats_task: TaskHandle,
     161              :     lfc_offload_task: TaskHandle,
     162              : }
     163              : 
     164              : // store some metrics about download size that might impact startup time
     165              : #[derive(Clone, Debug)]
     166              : pub struct RemoteExtensionMetrics {
     167              :     num_ext_downloaded: u64,
     168              :     largest_ext_size: u64,
     169              :     total_ext_download_size: u64,
     170              : }
     171              : 
     172              : #[derive(Clone, Debug)]
     173              : pub struct ComputeState {
     174              :     pub start_time: DateTime<Utc>,
     175              :     pub pg_start_time: Option<DateTime<Utc>>,
     176              :     pub status: ComputeStatus,
     177              :     /// Timestamp of the last Postgres activity. It could be `None` if
     178              :     /// compute wasn't used since start.
     179              :     pub last_active: Option<DateTime<Utc>>,
     180              :     pub error: Option<String>,
     181              : 
     182              :     /// Compute spec. This can be received from the CLI or - more likely -
     183              :     /// passed by the control plane with a /configure HTTP request.
     184              :     pub pspec: Option<ParsedSpec>,
     185              : 
     186              :     /// If the spec is passed by a /configure request, 'startup_span' is the
     187              :     /// /configure request's tracing span. The main thread enters it when it
     188              :     /// processes the compute startup, so that the compute startup is considered
     189              :     /// to be part of the /configure request for tracing purposes.
     190              :     ///
     191              :     /// If the request handling thread/task called startup_compute() directly,
     192              :     /// it would automatically be a child of the request handling span, and we
     193              :     /// wouldn't need this. But because we use the main thread to perform the
     194              :     /// startup, and the /configure task just waits for it to finish, we need to
     195              :     /// set up the span relationship ourselves.
     196              :     pub startup_span: Option<tracing::span::Span>,
     197              : 
     198              :     pub lfc_prewarm_state: LfcPrewarmState,
     199              :     pub lfc_prewarm_token: CancellationToken,
     200              :     pub lfc_offload_state: LfcOffloadState,
     201              : 
     202              :     /// WAL flush LSN that is set after terminating Postgres and syncing safekeepers if
     203              :     /// mode == ComputeMode::Primary. None otherwise
     204              :     pub terminate_flush_lsn: Option<Lsn>,
     205              :     pub promote_state: Option<watch::Receiver<PromoteState>>,
     206              : 
     207              :     pub metrics: ComputeMetrics,
     208              : }
     209              : 
     210              : impl ComputeState {
     211            0 :     pub fn new() -> Self {
     212            0 :         Self {
     213            0 :             start_time: Utc::now(),
     214            0 :             pg_start_time: None,
     215            0 :             status: ComputeStatus::Empty,
     216            0 :             last_active: None,
     217            0 :             error: None,
     218            0 :             pspec: None,
     219            0 :             startup_span: None,
     220            0 :             metrics: ComputeMetrics::default(),
     221            0 :             lfc_prewarm_state: LfcPrewarmState::default(),
     222            0 :             lfc_offload_state: LfcOffloadState::default(),
     223            0 :             terminate_flush_lsn: None,
     224            0 :             promote_state: None,
     225            0 :             lfc_prewarm_token: CancellationToken::new(),
     226            0 :         }
     227            0 :     }
     228              : 
     229            0 :     pub fn set_status(&mut self, status: ComputeStatus, state_changed: &Condvar) {
     230            0 :         let prev = self.status;
     231            0 :         info!("Changing compute status from {} to {}", prev, status);
     232            0 :         self.status = status;
     233            0 :         state_changed.notify_all();
     234              : 
     235            0 :         COMPUTE_CTL_UP.reset();
     236            0 :         COMPUTE_CTL_UP
     237            0 :             .with_label_values(&[&BUILD_TAG, status.to_string().as_str()])
     238            0 :             .set(1);
     239            0 :     }
     240              : 
     241            0 :     pub fn set_failed_status(&mut self, err: anyhow::Error, state_changed: &Condvar) {
     242            0 :         self.error = Some(format!("{err:?}"));
     243            0 :         self.set_status(ComputeStatus::Failed, state_changed);
     244            0 :     }
     245              : }
     246              : 
     247              : impl Default for ComputeState {
     248            0 :     fn default() -> Self {
     249            0 :         Self::new()
     250            0 :     }
     251              : }
     252              : 
     253              : #[derive(Clone, Debug)]
     254              : pub struct ParsedSpec {
     255              :     pub spec: ComputeSpec,
     256              :     pub tenant_id: TenantId,
     257              :     pub timeline_id: TimelineId,
     258              :     pub pageserver_conninfo: PageserverConnectionInfo,
     259              :     pub safekeeper_connstrings: Vec<String>,
     260              :     pub storage_auth_token: Option<String>,
     261              :     /// k8s dns name and port
     262              :     pub endpoint_storage_addr: Option<String>,
     263              :     pub endpoint_storage_token: Option<String>,
     264              : }
     265              : 
     266              : impl ParsedSpec {
     267            1 :     pub fn validate(&self) -> Result<(), String> {
     268              :         // Only Primary nodes are using safekeeper_connstrings, and at the moment
     269              :         // this method only validates that part of the specs.
     270            1 :         if self.spec.mode != ComputeMode::Primary {
     271            0 :             return Ok(());
     272            1 :         }
     273              : 
     274              :         // While it seems like a good idea to check for an odd number of entries in
     275              :         // the safekeepers connection string, changes to the list of safekeepers might
     276              :         // incur appending a new server to a list of 3, in which case a list of 4
     277              :         // entries is okay in production.
     278              :         //
     279              :         // Still we want unique entries, and at least one entry in the vector
     280            1 :         if self.safekeeper_connstrings.is_empty() {
     281            0 :             return Err(String::from("safekeeper_connstrings is empty"));
     282            1 :         }
     283              : 
     284              :         // check for uniqueness of the connection strings in the set
     285            1 :         let mut connstrings = self.safekeeper_connstrings.clone();
     286              : 
     287            1 :         connstrings.sort();
     288            1 :         let mut previous = &connstrings[0];
     289              : 
     290            2 :         for current in connstrings.iter().skip(1) {
     291              :             // duplicate entry?
     292            2 :             if current == previous {
     293            1 :                 return Err(format!(
     294            1 :                     "duplicate entry in safekeeper_connstrings: {current}!",
     295            1 :                 ));
     296            1 :             }
     297              : 
     298            1 :             previous = current;
     299              :         }
     300              : 
     301            0 :         Ok(())
     302            1 :     }
     303              : }
     304              : 
     305              : impl TryFrom<ComputeSpec> for ParsedSpec {
     306              :     type Error = anyhow::Error;
     307            1 :     fn try_from(spec: ComputeSpec) -> Result<Self, anyhow::Error> {
     308              :         // Extract the options from the spec file that are needed to connect to
     309              :         // the storage system.
     310              :         //
     311              :         // In compute specs generated by old control plane versions, the spec file might
     312              :         // be missing the `pageserver_connection_info` field. In that case, we need to dig
     313              :         // the pageserver connection info from the `pageserver_connstr` field instead, or
     314              :         // if that's missing too, from the GUC in the cluster.settings field.
     315            1 :         let mut pageserver_conninfo = spec.pageserver_connection_info.clone();
     316            1 :         if pageserver_conninfo.is_none() {
     317            1 :             if let Some(pageserver_connstr_field) = &spec.pageserver_connstring {
     318            0 :                 pageserver_conninfo = Some(PageserverConnectionInfo::from_connstr(
     319            0 :                     pageserver_connstr_field,
     320            0 :                     spec.shard_stripe_size,
     321            0 :                 )?);
     322            1 :             }
     323            0 :         }
     324            1 :         if pageserver_conninfo.is_none() {
     325            1 :             if let Some(guc) = spec.cluster.settings.find("neon.pageserver_connstring") {
     326            1 :                 let stripe_size = if let Some(guc) = spec.cluster.settings.find("neon.stripe_size")
     327              :                 {
     328            0 :                     Some(ShardStripeSize(u32::from_str(&guc)?))
     329              :                 } else {
     330            1 :                     None
     331              :                 };
     332            1 :                 pageserver_conninfo =
     333            1 :                     Some(PageserverConnectionInfo::from_connstr(&guc, stripe_size)?);
     334            0 :             }
     335            0 :         }
     336            1 :         let pageserver_conninfo = pageserver_conninfo.ok_or(anyhow::anyhow!(
     337            1 :             "pageserver connection information should be provided"
     338            0 :         ))?;
     339              : 
     340              :         // Similarly for safekeeper connection strings
     341            1 :         let safekeeper_connstrings = if spec.safekeeper_connstrings.is_empty() {
     342            1 :             if matches!(spec.mode, ComputeMode::Primary) {
     343            1 :                 spec.cluster
     344            1 :                     .settings
     345            1 :                     .find("neon.safekeepers")
     346            1 :                     .ok_or(anyhow::anyhow!("safekeeper connstrings should be provided"))?
     347            1 :                     .split(',')
     348            4 :                     .map(|str| str.to_string())
     349            1 :                     .collect()
     350              :             } else {
     351            0 :                 vec![]
     352              :             }
     353              :         } else {
     354            0 :             spec.safekeeper_connstrings.clone()
     355              :         };
     356              : 
     357            1 :         let storage_auth_token = spec.storage_auth_token.clone();
     358            1 :         let tenant_id: TenantId = if let Some(tenant_id) = spec.tenant_id {
     359            0 :             tenant_id
     360              :         } else {
     361            1 :             let guc = spec
     362            1 :                 .cluster
     363            1 :                 .settings
     364            1 :                 .find("neon.tenant_id")
     365            1 :                 .ok_or(anyhow::anyhow!("tenant id should be provided"))?;
     366            1 :             TenantId::from_str(&guc).context("invalid tenant id")?
     367              :         };
     368            1 :         let timeline_id: TimelineId = if let Some(timeline_id) = spec.timeline_id {
     369            0 :             timeline_id
     370              :         } else {
     371            1 :             let guc = spec
     372            1 :                 .cluster
     373            1 :                 .settings
     374            1 :                 .find("neon.timeline_id")
     375            1 :                 .ok_or(anyhow::anyhow!("timeline id should be provided"))?;
     376            1 :             TimelineId::from_str(&guc).context(anyhow::anyhow!("invalid timeline id"))?
     377              :         };
     378              : 
     379            1 :         let endpoint_storage_addr: Option<String> = spec
     380            1 :             .endpoint_storage_addr
     381            1 :             .clone()
     382            1 :             .or_else(|| spec.cluster.settings.find("neon.endpoint_storage_addr"));
     383            1 :         let endpoint_storage_token = spec
     384            1 :             .endpoint_storage_token
     385            1 :             .clone()
     386            1 :             .or_else(|| spec.cluster.settings.find("neon.endpoint_storage_token"));
     387              : 
     388            1 :         let res = ParsedSpec {
     389            1 :             spec,
     390            1 :             pageserver_conninfo,
     391            1 :             safekeeper_connstrings,
     392            1 :             storage_auth_token,
     393            1 :             tenant_id,
     394            1 :             timeline_id,
     395            1 :             endpoint_storage_addr,
     396            1 :             endpoint_storage_token,
     397            1 :         };
     398              : 
     399              :         // Now check validity of the parsed specification
     400            1 :         res.validate().map_err(anyhow::Error::msg)?;
     401            0 :         Ok(res)
     402            1 :     }
     403              : }
     404              : 
     405              : /// If we are a VM, returns a [`Command`] that will run in the `neon-postgres`
     406              : /// cgroup. Otherwise returns the default `Command::new(cmd)`
     407              : ///
     408              : /// This function should be used to start postgres, as it will start it in the
     409              : /// neon-postgres cgroup if we are a VM. This allows autoscaling to control
     410              : /// postgres' resource usage. The cgroup will exist in VMs because vm-builder
     411              : /// creates it during the sysinit phase of its inittab.
     412            0 : fn maybe_cgexec(cmd: &str) -> Command {
     413              :     // The cplane sets this env var for autoscaling computes.
     414              :     // use `var_os` so we don't have to worry about the variable being valid
     415              :     // unicode. Should never be an concern . . . but just in case
     416            0 :     if env::var_os("AUTOSCALING").is_some() {
     417            0 :         let mut command = Command::new("cgexec");
     418            0 :         command.args(["-g", "memory:neon-postgres"]);
     419            0 :         command.arg(cmd);
     420            0 :         command
     421              :     } else {
     422            0 :         Command::new(cmd)
     423              :     }
     424            0 : }
     425              : 
     426              : struct PostgresHandle {
     427              :     postgres: std::process::Child,
     428              :     log_collector: JoinHandle<Result<()>>,
     429              : }
     430              : 
     431              : impl PostgresHandle {
     432              :     /// Return PID of the postgres (postmaster) process
     433            0 :     fn pid(&self) -> Pid {
     434            0 :         Pid::from_raw(self.postgres.id() as i32)
     435            0 :     }
     436              : }
     437              : 
     438              : struct StartVmMonitorResult {
     439              :     #[cfg(target_os = "linux")]
     440              :     token: tokio_util::sync::CancellationToken,
     441              :     #[cfg(target_os = "linux")]
     442              :     vm_monitor: Option<JoinHandle<Result<()>>>,
     443              : }
     444              : 
     445              : // BEGIN_HADRON
     446              : /// This function creates roles that are used by Databricks.
     447              : /// These roles are not needs to be botostrapped at PG Compute provisioning time.
     448              : /// The auth method for these roles are configured in databricks_pg_hba.conf in universe repository.
     449            0 : pub(crate) fn create_databricks_roles() -> Vec<String> {
     450            0 :     let roles = vec![
     451              :         // Role for prometheus_stats_exporter
     452            0 :         Role {
     453            0 :             name: "databricks_monitor".to_string(),
     454            0 :             // This uses "local" connection and auth method for that is "trust", so no password is needed.
     455            0 :             encrypted_password: None,
     456            0 :             options: Some(vec![GenericOption {
     457            0 :                 name: "IN ROLE pg_monitor".to_string(),
     458            0 :                 value: None,
     459            0 :                 vartype: "string".to_string(),
     460            0 :             }]),
     461            0 :         },
     462              :         // Role for brickstore control plane
     463            0 :         Role {
     464            0 :             name: "databricks_control_plane".to_string(),
     465            0 :             // Certificate user does not need password.
     466            0 :             encrypted_password: None,
     467            0 :             options: Some(vec![GenericOption {
     468            0 :                 name: "SUPERUSER".to_string(),
     469            0 :                 value: None,
     470            0 :                 vartype: "string".to_string(),
     471            0 :             }]),
     472            0 :         },
     473              :         // Role for brickstore httpgateway.
     474            0 :         Role {
     475            0 :             name: "databricks_gateway".to_string(),
     476            0 :             // Certificate user does not need password.
     477            0 :             encrypted_password: None,
     478            0 :             options: None,
     479            0 :         },
     480              :     ];
     481              : 
     482            0 :     roles
     483            0 :         .into_iter()
     484            0 :         .map(|role| {
     485            0 :             let query = format!(
     486            0 :                 r#"
     487            0 :                 DO $$
     488            0 :                     BEGIN
     489            0 :                         IF NOT EXISTS (
     490            0 :                             SELECT FROM pg_catalog.pg_roles WHERE rolname = '{}')
     491            0 :                         THEN
     492            0 :                             CREATE ROLE {} {};
     493            0 :                         END IF;
     494            0 :                     END
     495            0 :                 $$;"#,
     496              :                 role.name,
     497            0 :                 role.name.pg_quote(),
     498            0 :                 role.to_pg_options(),
     499              :             );
     500            0 :             query
     501            0 :         })
     502            0 :         .collect()
     503            0 : }
     504              : 
     505              : /// Databricks-specific environment variables to be passed to the `postgres` sub-process.
     506              : pub struct DatabricksEnvVars {
     507              :     /// The Databricks "endpoint ID" of the compute instance. Used by `postgres` to check
     508              :     /// the token scopes of internal auth tokens.
     509              :     pub endpoint_id: String,
     510              :     /// Hostname of the Databricks workspace URL this compute instance belongs to.
     511              :     /// Used by postgres to verify Databricks PAT tokens.
     512              :     pub workspace_host: String,
     513              : 
     514              :     pub lakebase_mode: bool,
     515              : }
     516              : 
     517              : impl DatabricksEnvVars {
     518            0 :     pub fn new(
     519            0 :         compute_spec: &ComputeSpec,
     520            0 :         compute_id: Option<&String>,
     521            0 :         instance_id: Option<String>,
     522            0 :         lakebase_mode: bool,
     523            0 :     ) -> Self {
     524            0 :         let endpoint_id = if let Some(instance_id) = instance_id {
     525              :             // Use instance_id as endpoint_id if it is set. This code path is for PuPr model.
     526            0 :             instance_id
     527              :         } else {
     528              :             // Use compute_id as endpoint_id if instance_id is not set. The code path is for PrPr model.
     529              :             // compute_id is a string format of "{endpoint_id}/{compute_idx}"
     530              :             // endpoint_id is a uuid. We only need to pass down endpoint_id to postgres.
     531              :             // Panics if compute_id is not set or not in the expected format.
     532            0 :             compute_id.unwrap().split('/').next().unwrap().to_string()
     533              :         };
     534            0 :         let workspace_host = compute_spec
     535            0 :             .databricks_settings
     536            0 :             .as_ref()
     537            0 :             .map(|s| s.databricks_workspace_host.clone())
     538            0 :             .unwrap_or("".to_string());
     539            0 :         Self {
     540            0 :             endpoint_id,
     541            0 :             workspace_host,
     542            0 :             lakebase_mode,
     543            0 :         }
     544            0 :     }
     545              : 
     546              :     /// Constants for the names of Databricks-specific postgres environment variables.
     547              :     const DATABRICKS_ENDPOINT_ID_ENVVAR: &'static str = "DATABRICKS_ENDPOINT_ID";
     548              :     const DATABRICKS_WORKSPACE_HOST_ENVVAR: &'static str = "DATABRICKS_WORKSPACE_HOST";
     549              : 
     550              :     /// Convert DatabricksEnvVars to a list of string pairs that can be passed as env vars. Consumes `self`.
     551            0 :     pub fn to_env_var_list(self) -> Vec<(String, String)> {
     552            0 :         if !self.lakebase_mode {
     553              :             // In neon env, we don't need to pass down the env vars to postgres.
     554            0 :             return vec![];
     555            0 :         }
     556            0 :         vec![
     557            0 :             (
     558            0 :                 Self::DATABRICKS_ENDPOINT_ID_ENVVAR.to_string(),
     559            0 :                 self.endpoint_id.clone(),
     560            0 :             ),
     561            0 :             (
     562            0 :                 Self::DATABRICKS_WORKSPACE_HOST_ENVVAR.to_string(),
     563            0 :                 self.workspace_host.clone(),
     564            0 :             ),
     565              :         ]
     566            0 :     }
     567              : }
     568              : 
     569              : impl ComputeNode {
     570            0 :     pub fn new(params: ComputeNodeParams, config: ComputeConfig) -> Result<Self> {
     571            0 :         let connstr = params.connstr.as_str();
     572            0 :         let mut conn_conf = postgres::config::Config::from_str(connstr)
     573            0 :             .context("cannot build postgres config from connstr")?;
     574            0 :         let mut tokio_conn_conf = tokio_postgres::config::Config::from_str(connstr)
     575            0 :             .context("cannot build tokio postgres config from connstr")?;
     576              : 
     577              :         // Users can set some configuration parameters per database with
     578              :         //   ALTER DATABASE ... SET ...
     579              :         //
     580              :         // There are at least these parameters:
     581              :         //
     582              :         //   - role=some_other_role
     583              :         //   - default_transaction_read_only=on
     584              :         //   - statement_timeout=1, i.e., 1ms, which will cause most of the queries to fail
     585              :         //   - search_path=non_public_schema, this should be actually safe because
     586              :         //     we don't call any functions in user databases, but better to always reset
     587              :         //     it to public.
     588              :         //
     589              :         // that can affect `compute_ctl` and prevent it from properly configuring the database schema.
     590              :         // Unset them via connection string options before connecting to the database.
     591              :         // N.B. keep it in sync with `ZENITH_OPTIONS` in `get_maintenance_client()`.
     592              :         const EXTRA_OPTIONS: &str = "-c role=cloud_admin -c default_transaction_read_only=off -c search_path='' -c statement_timeout=0 -c pgaudit.log=none";
     593            0 :         let options = match conn_conf.get_options() {
     594              :             // Allow the control plane to override any options set by the
     595              :             // compute
     596            0 :             Some(options) => format!("{EXTRA_OPTIONS} {options}"),
     597            0 :             None => EXTRA_OPTIONS.to_string(),
     598              :         };
     599            0 :         conn_conf.options(&options);
     600            0 :         tokio_conn_conf.options(&options);
     601              : 
     602            0 :         let mut new_state = ComputeState::new();
     603            0 :         if let Some(spec) = config.spec {
     604            0 :             let pspec = ParsedSpec::try_from(spec).map_err(|msg| anyhow::anyhow!(msg))?;
     605            0 :             if params.lakebase_mode {
     606            0 :                 ComputeNode::set_spec(&params, &mut new_state, pspec);
     607            0 :             } else {
     608            0 :                 new_state.pspec = Some(pspec);
     609            0 :             }
     610            0 :         }
     611              : 
     612            0 :         Ok(ComputeNode {
     613            0 :             params,
     614            0 :             conn_conf,
     615            0 :             tokio_conn_conf,
     616            0 :             state: Mutex::new(new_state),
     617            0 :             state_changed: Condvar::new(),
     618            0 :             ext_download_progress: RwLock::new(HashMap::new()),
     619            0 :             compute_ctl_config: config.compute_ctl_config,
     620            0 :             extension_stats_task: Mutex::new(None),
     621            0 :             lfc_offload_task: Mutex::new(None),
     622            0 :         })
     623            0 :     }
     624              : 
     625              :     /// Top-level control flow of compute_ctl. Returns a process exit code we should
     626              :     /// exit with.
     627            0 :     pub fn run(self) -> Result<Option<i32>> {
     628            0 :         let this = Arc::new(self);
     629              : 
     630            0 :         let cli_spec = this.state.lock().unwrap().pspec.clone();
     631              : 
     632              :         // If this is a pooled VM, prewarm before starting HTTP server and becoming
     633              :         // available for binding. Prewarming helps Postgres start quicker later,
     634              :         // because QEMU will already have its memory allocated from the host, and
     635              :         // the necessary binaries will already be cached.
     636            0 :         if cli_spec.is_none() {
     637            0 :             this.prewarm_postgres_vm_memory()?;
     638            0 :         }
     639              : 
     640              :         // Set the up metric with Empty status before starting the HTTP server.
     641              :         // That way on the first metric scrape, an external observer will see us
     642              :         // as 'up' and 'empty' (unless the compute was started with a spec or
     643              :         // already configured by control plane).
     644            0 :         COMPUTE_CTL_UP
     645            0 :             .with_label_values(&[&BUILD_TAG, ComputeStatus::Empty.to_string().as_str()])
     646            0 :             .set(1);
     647              : 
     648              :         // Launch the external HTTP server first, so that we can serve control plane
     649              :         // requests while configuration is still in progress.
     650            0 :         crate::http::server::Server::External {
     651            0 :             port: this.params.external_http_port,
     652            0 :             config: this.compute_ctl_config.clone(),
     653            0 :             compute_id: this.params.compute_id.clone(),
     654            0 :             instance_id: this.params.instance_id.clone(),
     655            0 :         }
     656            0 :         .launch(&this);
     657              : 
     658              :         // The internal HTTP server could be launched later, but there isn't much
     659              :         // sense in waiting.
     660            0 :         crate::http::server::Server::Internal {
     661            0 :             port: this.params.internal_http_port,
     662            0 :         }
     663            0 :         .launch(&this);
     664              : 
     665              :         // If we got a spec from the CLI already, use that. Otherwise wait for the
     666              :         // control plane to pass it to us with a /configure HTTP request
     667            0 :         let pspec = if let Some(cli_spec) = cli_spec {
     668            0 :             cli_spec
     669              :         } else {
     670            0 :             this.wait_spec()?
     671              :         };
     672              : 
     673            0 :         launch_lsn_lease_bg_task_for_static(&this);
     674              : 
     675              :         // We have a spec, start the compute
     676            0 :         let mut delay_exit = false;
     677            0 :         let mut vm_monitor = None;
     678            0 :         let mut pg_process: Option<PostgresHandle> = None;
     679              : 
     680            0 :         match this.start_compute(&mut pg_process) {
     681            0 :             Ok(()) => {
     682            0 :                 // Success! Launch remaining services (just vm-monitor currently)
     683            0 :                 vm_monitor =
     684            0 :                     Some(this.start_vm_monitor(pspec.spec.disable_lfc_resizing.unwrap_or(false)));
     685            0 :             }
     686            0 :             Err(err) => {
     687              :                 // Something went wrong with the startup. Log it and expose the error to
     688              :                 // HTTP status requests.
     689            0 :                 error!("could not start the compute node: {:#}", err);
     690            0 :                 this.set_failed_status(err);
     691            0 :                 delay_exit = true;
     692              : 
     693              :                 // If the error happened after starting PostgreSQL, kill it
     694            0 :                 if let Some(ref pg_process) = pg_process {
     695            0 :                     kill(pg_process.pid(), Signal::SIGQUIT).ok();
     696            0 :                 }
     697              :             }
     698              :         }
     699              : 
     700              :         // If startup was successful, or it failed in the late stages,
     701              :         // PostgreSQL is now running. Wait until it exits.
     702            0 :         let exit_code = if let Some(pg_handle) = pg_process {
     703            0 :             let exit_status = this.wait_postgres(pg_handle);
     704            0 :             info!("Postgres exited with code {}, shutting down", exit_status);
     705            0 :             exit_status.code()
     706              :         } else {
     707            0 :             None
     708              :         };
     709              : 
     710            0 :         this.terminate_extension_stats_task();
     711            0 :         this.terminate_lfc_offload_task();
     712              : 
     713              :         // Terminate the vm_monitor so it releases the file watcher on
     714              :         // /sys/fs/cgroup/neon-postgres.
     715              :         // Note: the vm-monitor only runs on linux because it requires cgroups.
     716            0 :         if let Some(vm_monitor) = vm_monitor {
     717              :             cfg_if::cfg_if! {
     718              :                 if #[cfg(target_os = "linux")] {
     719              :                     // Kills all threads spawned by the monitor
     720            0 :                     vm_monitor.token.cancel();
     721            0 :                     if let Some(handle) = vm_monitor.vm_monitor {
     722            0 :                         // Kills the actual task running the monitor
     723            0 :                         handle.abort();
     724            0 :                     }
     725              :                 } else {
     726              :                     _ = vm_monitor; // appease unused lint on macOS
     727              :                 }
     728              :             }
     729            0 :         }
     730              : 
     731              :         // Reap the postgres process
     732            0 :         delay_exit |= this.cleanup_after_postgres_exit()?;
     733              : 
     734              :         // /terminate returns LSN. If we don't sleep at all, connection will break and we
     735              :         // won't get result. If we sleep too much, tests will take significantly longer
     736              :         // and Github Action run will error out
     737            0 :         let sleep_duration = if delay_exit {
     738            0 :             Duration::from_secs(30)
     739              :         } else {
     740            0 :             Duration::from_millis(300)
     741              :         };
     742              : 
     743              :         // If launch failed, keep serving HTTP requests for a while, so the cloud
     744              :         // control plane can get the actual error.
     745            0 :         if delay_exit {
     746            0 :             info!("giving control plane 30s to collect the error before shutdown");
     747            0 :         }
     748            0 :         std::thread::sleep(sleep_duration);
     749            0 :         Ok(exit_code)
     750            0 :     }
     751              : 
     752            0 :     pub fn wait_spec(&self) -> Result<ParsedSpec> {
     753            0 :         info!("no compute spec provided, waiting");
     754            0 :         let mut state = self.state.lock().unwrap();
     755            0 :         while state.status != ComputeStatus::ConfigurationPending {
     756            0 :             state = self.state_changed.wait(state).unwrap();
     757            0 :         }
     758              : 
     759            0 :         info!("got spec, continue configuration");
     760            0 :         let spec = state.pspec.as_ref().unwrap().clone();
     761              : 
     762              :         // Record for how long we slept waiting for the spec.
     763            0 :         let now = Utc::now();
     764            0 :         state.metrics.wait_for_spec_ms = now
     765            0 :             .signed_duration_since(state.start_time)
     766            0 :             .to_std()
     767            0 :             .unwrap()
     768            0 :             .as_millis() as u64;
     769              : 
     770              :         // Reset start time, so that the total startup time that is calculated later will
     771              :         // not include the time that we waited for the spec.
     772            0 :         state.start_time = now;
     773              : 
     774            0 :         Ok(spec)
     775            0 :     }
     776              : 
     777              :     /// Start compute.
     778              :     ///
     779              :     /// Prerequisites:
     780              :     /// - the compute spec has been placed in self.state.pspec
     781              :     ///
     782              :     /// On success:
     783              :     /// - status is set to ComputeStatus::Running
     784              :     /// - self.running_postgres is set
     785              :     ///
     786              :     /// On error:
     787              :     /// - status is left in ComputeStatus::Init. The caller is responsible for setting it to Failed
     788              :     /// - if Postgres was started before the fatal error happened, self.running_postgres is
     789              :     ///   set. The caller is responsible for killing it.
     790              :     ///
     791              :     /// Note that this is in the critical path of a compute cold start. Keep this fast.
     792              :     /// Try to do things concurrently, to hide the latencies.
     793            0 :     fn start_compute(self: &Arc<Self>, pg_handle: &mut Option<PostgresHandle>) -> Result<()> {
     794              :         let compute_state: ComputeState;
     795              : 
     796              :         let start_compute_span;
     797              :         let _this_entered;
     798              :         {
     799            0 :             let mut state_guard = self.state.lock().unwrap();
     800              : 
     801              :             // Create a tracing span for the startup operation.
     802              :             //
     803              :             // We could otherwise just annotate the function with #[instrument], but if
     804              :             // we're being configured from a /configure HTTP request, we want the
     805              :             // startup to be considered part of the /configure request.
     806              :             //
     807              :             // Similarly, if a trace ID was passed in env variables, attach it to the span.
     808            0 :             start_compute_span = {
     809              :                 // Temporarily enter the parent span, so that the new span becomes its child.
     810            0 :                 if let Some(p) = state_guard.startup_span.take() {
     811            0 :                     let _parent_entered = p.entered();
     812            0 :                     tracing::info_span!("start_compute")
     813            0 :                 } else if let Some(otel_context) = startup_context_from_env() {
     814              :                     use tracing_opentelemetry::OpenTelemetrySpanExt;
     815            0 :                     let span = tracing::info_span!("start_compute");
     816            0 :                     span.set_parent(otel_context);
     817            0 :                     span
     818              :                 } else {
     819            0 :                     tracing::info_span!("start_compute")
     820              :                 }
     821              :             };
     822            0 :             _this_entered = start_compute_span.enter();
     823              : 
     824              :             // Hadron: Record postgres start time (used to enforce pg_init_timeout).
     825            0 :             state_guard.pg_start_time.replace(Utc::now());
     826              : 
     827            0 :             state_guard.set_status(ComputeStatus::Init, &self.state_changed);
     828            0 :             compute_state = state_guard.clone()
     829              :         }
     830              : 
     831            0 :         let pspec = compute_state.pspec.as_ref().expect("spec must be set");
     832            0 :         info!(
     833            0 :             "starting compute for project {}, operation {}, tenant {}, timeline {}, project {}, branch {}, endpoint {}, features {:?}, spec.remote_extensions {:?}",
     834            0 :             pspec.spec.cluster.cluster_id.as_deref().unwrap_or("None"),
     835            0 :             pspec.spec.operation_uuid.as_deref().unwrap_or("None"),
     836              :             pspec.tenant_id,
     837              :             pspec.timeline_id,
     838            0 :             pspec.spec.project_id.as_deref().unwrap_or("None"),
     839            0 :             pspec.spec.branch_id.as_deref().unwrap_or("None"),
     840            0 :             pspec.spec.endpoint_id.as_deref().unwrap_or("None"),
     841              :             pspec.spec.features,
     842              :             pspec.spec.remote_extensions,
     843              :         );
     844              : 
     845              :         ////// PRE-STARTUP PHASE: things that need to be finished before we start the Postgres process
     846              : 
     847              :         // Collect all the tasks that must finish here
     848            0 :         let mut pre_tasks = tokio::task::JoinSet::new();
     849              : 
     850              :         // Make sure TLS certificates are properly loaded and in the right place.
     851            0 :         if self.compute_ctl_config.tls.is_some() {
     852            0 :             let this = self.clone();
     853            0 :             pre_tasks.spawn(async move {
     854            0 :                 this.watch_cert_for_changes().await;
     855              : 
     856            0 :                 Ok::<(), anyhow::Error>(())
     857            0 :             });
     858            0 :         }
     859              : 
     860            0 :         let tls_config = self.tls_config(&pspec.spec);
     861              : 
     862              :         // If there are any remote extensions in shared_preload_libraries, start downloading them
     863            0 :         if pspec.spec.remote_extensions.is_some() {
     864            0 :             let (this, spec) = (self.clone(), pspec.spec.clone());
     865            0 :             pre_tasks.spawn(async move {
     866            0 :                 this.download_preload_extensions(&spec)
     867            0 :                     .in_current_span()
     868            0 :                     .await
     869            0 :             });
     870            0 :         }
     871              : 
     872              :         // Prepare pgdata directory. This downloads the basebackup, among other things.
     873              :         {
     874            0 :             let (this, cs) = (self.clone(), compute_state.clone());
     875            0 :             pre_tasks.spawn_blocking_child(move || this.prepare_pgdata(&cs));
     876              :         }
     877              : 
     878              :         // Resize swap to the desired size if the compute spec says so
     879            0 :         if let (Some(size_bytes), true) =
     880            0 :             (pspec.spec.swap_size_bytes, self.params.resize_swap_on_bind)
     881              :         {
     882            0 :             pre_tasks.spawn_blocking_child(move || {
     883              :                 // To avoid 'swapoff' hitting postgres startup, we need to run resize-swap to completion
     884              :                 // *before* starting postgres.
     885              :                 //
     886              :                 // In theory, we could do this asynchronously if SkipSwapon was enabled for VMs, but this
     887              :                 // carries a risk of introducing hard-to-debug issues - e.g. if postgres sometimes gets
     888              :                 // OOM-killed during startup because swap wasn't available yet.
     889            0 :                 resize_swap(size_bytes).context("failed to resize swap")?;
     890            0 :                 let size_mib = size_bytes as f32 / (1 << 20) as f32; // just for more coherent display.
     891            0 :                 info!(%size_bytes, %size_mib, "resized swap");
     892              : 
     893            0 :                 Ok::<(), anyhow::Error>(())
     894            0 :             });
     895            0 :         }
     896              : 
     897              :         // Set disk quota if the compute spec says so
     898            0 :         if let (Some(disk_quota_bytes), Some(disk_quota_fs_mountpoint)) = (
     899            0 :             pspec.spec.disk_quota_bytes,
     900            0 :             self.params.set_disk_quota_for_fs.as_ref(),
     901              :         ) {
     902            0 :             let disk_quota_fs_mountpoint = disk_quota_fs_mountpoint.clone();
     903            0 :             pre_tasks.spawn_blocking_child(move || {
     904            0 :                 set_disk_quota(disk_quota_bytes, &disk_quota_fs_mountpoint)
     905            0 :                     .context("failed to set disk quota")?;
     906            0 :                 let size_mib = disk_quota_bytes as f32 / (1 << 20) as f32; // just for more coherent display.
     907            0 :                 info!(%disk_quota_bytes, %size_mib, "set disk quota");
     908              : 
     909            0 :                 Ok::<(), anyhow::Error>(())
     910            0 :             });
     911            0 :         }
     912              : 
     913              :         // tune pgbouncer
     914            0 :         if let Some(pgbouncer_settings) = &pspec.spec.pgbouncer_settings {
     915            0 :             info!("tuning pgbouncer");
     916              : 
     917            0 :             let pgbouncer_settings = pgbouncer_settings.clone();
     918            0 :             let tls_config = tls_config.clone();
     919              : 
     920              :             // Spawn a background task to do the tuning,
     921              :             // so that we don't block the main thread that starts Postgres.
     922            0 :             let _handle = tokio::spawn(async move {
     923            0 :                 let res = tune_pgbouncer(pgbouncer_settings, tls_config).await;
     924            0 :                 if let Err(err) = res {
     925            0 :                     error!("error while tuning pgbouncer: {err:?}");
     926              :                     // Continue with the startup anyway
     927            0 :                 }
     928            0 :             });
     929            0 :         }
     930              : 
     931              :         // configure local_proxy
     932            0 :         if let Some(local_proxy) = &pspec.spec.local_proxy_config {
     933            0 :             info!("configuring local_proxy");
     934              : 
     935              :             // Spawn a background task to do the configuration,
     936              :             // so that we don't block the main thread that starts Postgres.
     937              : 
     938            0 :             let mut local_proxy = local_proxy.clone();
     939            0 :             local_proxy.tls = tls_config.clone();
     940              : 
     941            0 :             let _handle = tokio::spawn(async move {
     942            0 :                 if let Err(err) = local_proxy::configure(&local_proxy) {
     943            0 :                     error!("error while configuring local_proxy: {err:?}");
     944              :                     // Continue with the startup anyway
     945            0 :                 }
     946            0 :             });
     947            0 :         }
     948              : 
     949              :         // Configure and start rsyslog for compliance audit logging
     950            0 :         match pspec.spec.audit_log_level {
     951              :             ComputeAudit::Hipaa | ComputeAudit::Extended | ComputeAudit::Full => {
     952            0 :                 let remote_tls_endpoint =
     953            0 :                     std::env::var("AUDIT_LOGGING_TLS_ENDPOINT").unwrap_or("".to_string());
     954            0 :                 let remote_plain_endpoint =
     955            0 :                     std::env::var("AUDIT_LOGGING_ENDPOINT").unwrap_or("".to_string());
     956              : 
     957            0 :                 if remote_plain_endpoint.is_empty() && remote_tls_endpoint.is_empty() {
     958            0 :                     anyhow::bail!(
     959            0 :                         "AUDIT_LOGGING_ENDPOINT and AUDIT_LOGGING_TLS_ENDPOINT are both empty"
     960              :                     );
     961            0 :                 }
     962              : 
     963            0 :                 let log_directory_path = Path::new(&self.params.pgdata).join("log");
     964            0 :                 let log_directory_path = log_directory_path.to_string_lossy().to_string();
     965              : 
     966              :                 // Add project_id,endpoint_id to identify the logs.
     967              :                 //
     968              :                 // These ids are passed from cplane,
     969            0 :                 let endpoint_id = pspec.spec.endpoint_id.as_deref().unwrap_or("");
     970            0 :                 let project_id = pspec.spec.project_id.as_deref().unwrap_or("");
     971              : 
     972            0 :                 configure_audit_rsyslog(
     973            0 :                     log_directory_path.clone(),
     974            0 :                     endpoint_id,
     975            0 :                     project_id,
     976            0 :                     &remote_plain_endpoint,
     977            0 :                     &remote_tls_endpoint,
     978            0 :                 )?;
     979              : 
     980              :                 // Launch a background task to clean up the audit logs
     981            0 :                 launch_pgaudit_gc(log_directory_path);
     982              :             }
     983            0 :             _ => {}
     984              :         }
     985              : 
     986              :         // Configure and start rsyslog for Postgres logs export
     987            0 :         let conf = PostgresLogsRsyslogConfig::new(pspec.spec.logs_export_host.as_deref());
     988            0 :         configure_postgres_logs_export(conf)?;
     989              : 
     990              :         // Launch remaining service threads
     991            0 :         let _monitor_handle = launch_monitor(self);
     992            0 :         let _configurator_handle = launch_configurator(self);
     993              : 
     994              :         // Wait for all the pre-tasks to finish before starting postgres
     995            0 :         let rt = tokio::runtime::Handle::current();
     996            0 :         while let Some(res) = rt.block_on(pre_tasks.join_next()) {
     997            0 :             res??;
     998              :         }
     999              : 
    1000              :         ////// START POSTGRES
    1001            0 :         let start_time = Utc::now();
    1002            0 :         let pg_process = self.start_postgres(pspec.storage_auth_token.clone())?;
    1003            0 :         let postmaster_pid = pg_process.pid();
    1004            0 :         *pg_handle = Some(pg_process);
    1005              : 
    1006              :         // If this is a primary endpoint, perform some post-startup configuration before
    1007              :         // opening it up for the world.
    1008            0 :         let config_time = Utc::now();
    1009            0 :         if pspec.spec.mode == ComputeMode::Primary {
    1010            0 :             self.configure_as_primary(&compute_state)?;
    1011              : 
    1012            0 :             let conf = self.get_tokio_conn_conf(None);
    1013            0 :             tokio::task::spawn(async {
    1014            0 :                 let _ = installed_extensions(conf).await;
    1015            0 :             });
    1016            0 :         }
    1017              : 
    1018              :         // All done!
    1019            0 :         let startup_end_time = Utc::now();
    1020            0 :         let metrics = {
    1021            0 :             let mut state = self.state.lock().unwrap();
    1022            0 :             state.metrics.start_postgres_ms = config_time
    1023            0 :                 .signed_duration_since(start_time)
    1024            0 :                 .to_std()
    1025            0 :                 .unwrap()
    1026            0 :                 .as_millis() as u64;
    1027            0 :             state.metrics.config_ms = startup_end_time
    1028            0 :                 .signed_duration_since(config_time)
    1029            0 :                 .to_std()
    1030            0 :                 .unwrap()
    1031            0 :                 .as_millis() as u64;
    1032            0 :             state.metrics.total_startup_ms = startup_end_time
    1033            0 :                 .signed_duration_since(compute_state.start_time)
    1034            0 :                 .to_std()
    1035            0 :                 .unwrap()
    1036            0 :                 .as_millis() as u64;
    1037            0 :             state.metrics.clone()
    1038              :         };
    1039            0 :         self.set_status(ComputeStatus::Running);
    1040              : 
    1041              :         // Log metrics so that we can search for slow operations in logs
    1042            0 :         info!(?metrics, postmaster_pid = %postmaster_pid, "compute start finished");
    1043              : 
    1044            0 :         self.spawn_extension_stats_task();
    1045              : 
    1046            0 :         if pspec.spec.autoprewarm {
    1047            0 :             info!("autoprewarming on startup as requested");
    1048            0 :             self.prewarm_lfc(None);
    1049            0 :         }
    1050            0 :         if let Some(seconds) = pspec.spec.offload_lfc_interval_seconds {
    1051            0 :             self.spawn_lfc_offload_task(Duration::from_secs(seconds.into()));
    1052            0 :         };
    1053            0 :         Ok(())
    1054            0 :     }
    1055              : 
    1056              :     #[instrument(skip_all)]
    1057              :     async fn download_preload_extensions(&self, spec: &ComputeSpec) -> Result<()> {
    1058              :         let remote_extensions = if let Some(remote_extensions) = &spec.remote_extensions {
    1059              :             remote_extensions
    1060              :         } else {
    1061              :             return Ok(());
    1062              :         };
    1063              : 
    1064              :         // First, create control files for all available extensions
    1065              :         extension_server::create_control_files(remote_extensions, &self.params.pgbin);
    1066              : 
    1067              :         let library_load_start_time = Utc::now();
    1068              :         let remote_ext_metrics = self.prepare_preload_libraries(spec).await?;
    1069              : 
    1070              :         let library_load_time = Utc::now()
    1071              :             .signed_duration_since(library_load_start_time)
    1072              :             .to_std()
    1073              :             .unwrap()
    1074              :             .as_millis() as u64;
    1075              :         let mut state = self.state.lock().unwrap();
    1076              :         state.metrics.load_ext_ms = library_load_time;
    1077              :         state.metrics.num_ext_downloaded = remote_ext_metrics.num_ext_downloaded;
    1078              :         state.metrics.largest_ext_size = remote_ext_metrics.largest_ext_size;
    1079              :         state.metrics.total_ext_download_size = remote_ext_metrics.total_ext_download_size;
    1080              :         info!(
    1081              :             "Loading shared_preload_libraries took {:?}ms",
    1082              :             library_load_time
    1083              :         );
    1084              :         info!("{:?}", remote_ext_metrics);
    1085              : 
    1086              :         Ok(())
    1087              :     }
    1088              : 
    1089              :     /// Start the vm-monitor if directed to. The vm-monitor only runs on linux
    1090              :     /// because it requires cgroups.
    1091            0 :     fn start_vm_monitor(&self, disable_lfc_resizing: bool) -> StartVmMonitorResult {
    1092              :         cfg_if::cfg_if! {
    1093              :             if #[cfg(target_os = "linux")] {
    1094              :                 use std::env;
    1095              :                 use tokio_util::sync::CancellationToken;
    1096              : 
    1097              :                 // This token is used internally by the monitor to clean up all threads
    1098            0 :                 let token = CancellationToken::new();
    1099              : 
    1100              :                 // don't pass postgres connection string to vm-monitor if we don't want it to resize LFC
    1101            0 :                 let pgconnstr = if disable_lfc_resizing {
    1102            0 :                     None
    1103              :                 } else {
    1104            0 :                     Some(self.params.filecache_connstr.clone())
    1105              :                 };
    1106              : 
    1107            0 :                 let vm_monitor = if env::var_os("AUTOSCALING").is_some() {
    1108            0 :                     let vm_monitor = tokio::spawn(vm_monitor::start(
    1109            0 :                         Box::leak(Box::new(vm_monitor::Args {
    1110            0 :                             cgroup: Some(self.params.cgroup.clone()),
    1111            0 :                             pgconnstr,
    1112            0 :                             addr: self.params.vm_monitor_addr.clone(),
    1113            0 :                         })),
    1114            0 :                         token.clone(),
    1115              :                     ));
    1116            0 :                     Some(vm_monitor)
    1117              :                 } else {
    1118            0 :                     None
    1119              :                 };
    1120            0 :                 StartVmMonitorResult { token, vm_monitor }
    1121              :             } else {
    1122              :                 _ = disable_lfc_resizing; // appease unused lint on macOS
    1123              :                 StartVmMonitorResult { }
    1124              :             }
    1125              :         }
    1126            0 :     }
    1127              : 
    1128            0 :     fn cleanup_after_postgres_exit(&self) -> Result<bool> {
    1129              :         // Maybe sync safekeepers again, to speed up next startup
    1130            0 :         let compute_state = self.state.lock().unwrap().clone();
    1131            0 :         let pspec = compute_state.pspec.as_ref().expect("spec must be set");
    1132            0 :         let lsn = if matches!(pspec.spec.mode, compute_api::spec::ComputeMode::Primary) {
    1133            0 :             info!("syncing safekeepers on shutdown");
    1134            0 :             let storage_auth_token = pspec.storage_auth_token.clone();
    1135            0 :             let lsn = self.sync_safekeepers(storage_auth_token)?;
    1136            0 :             info!(%lsn, "synced safekeepers");
    1137            0 :             Some(lsn)
    1138              :         } else {
    1139            0 :             info!("not primary, not syncing safekeepers");
    1140            0 :             None
    1141              :         };
    1142              : 
    1143            0 :         let mut state = self.state.lock().unwrap();
    1144            0 :         state.terminate_flush_lsn = lsn;
    1145              : 
    1146            0 :         let delay_exit = state.status == ComputeStatus::TerminationPendingFast;
    1147            0 :         if state.status == ComputeStatus::TerminationPendingFast
    1148            0 :             || state.status == ComputeStatus::TerminationPendingImmediate
    1149              :         {
    1150            0 :             info!(
    1151            0 :                 "Changing compute status from {} to {}",
    1152            0 :                 state.status,
    1153              :                 ComputeStatus::Terminated
    1154              :             );
    1155            0 :             state.status = ComputeStatus::Terminated;
    1156            0 :             self.state_changed.notify_all();
    1157            0 :         }
    1158            0 :         drop(state);
    1159              : 
    1160            0 :         if let Err(err) = self.check_for_core_dumps() {
    1161            0 :             error!("error while checking for core dumps: {err:?}");
    1162            0 :         }
    1163              : 
    1164            0 :         Ok(delay_exit)
    1165            0 :     }
    1166              : 
    1167              :     /// Check that compute node has corresponding feature enabled.
    1168            0 :     pub fn has_feature(&self, feature: ComputeFeature) -> bool {
    1169            0 :         let state = self.state.lock().unwrap();
    1170              : 
    1171            0 :         if let Some(s) = state.pspec.as_ref() {
    1172            0 :             s.spec.features.contains(&feature)
    1173              :         } else {
    1174            0 :             false
    1175              :         }
    1176            0 :     }
    1177              : 
    1178            0 :     pub fn set_status(&self, status: ComputeStatus) {
    1179            0 :         let mut state = self.state.lock().unwrap();
    1180            0 :         state.set_status(status, &self.state_changed);
    1181            0 :     }
    1182              : 
    1183            0 :     pub fn set_failed_status(&self, err: anyhow::Error) {
    1184            0 :         let mut state = self.state.lock().unwrap();
    1185            0 :         state.set_failed_status(err, &self.state_changed);
    1186            0 :     }
    1187              : 
    1188            0 :     pub fn get_status(&self) -> ComputeStatus {
    1189            0 :         self.state.lock().unwrap().status
    1190            0 :     }
    1191              : 
    1192            0 :     pub fn get_timeline_id(&self) -> Option<TimelineId> {
    1193            0 :         self.state
    1194            0 :             .lock()
    1195            0 :             .unwrap()
    1196            0 :             .pspec
    1197            0 :             .as_ref()
    1198            0 :             .map(|s| s.timeline_id)
    1199            0 :     }
    1200              : 
    1201              :     // Remove `pgdata` directory and create it again with right permissions.
    1202            0 :     fn create_pgdata(&self) -> Result<()> {
    1203              :         // Ignore removal error, likely it is a 'No such file or directory (os error 2)'.
    1204              :         // If it is something different then create_dir() will error out anyway.
    1205            0 :         let pgdata = &self.params.pgdata;
    1206            0 :         let _ok = fs::remove_dir_all(pgdata);
    1207            0 :         if self.params.lakebase_mode {
    1208            0 :             // Ignore creation errors if the directory already exists (e.g. mounting it ahead of time).
    1209            0 :             // If it is something different then PG startup will error out anyway.
    1210            0 :             let _ok = fs::create_dir(pgdata);
    1211            0 :         } else {
    1212            0 :             fs::create_dir(pgdata)?;
    1213              :         }
    1214              : 
    1215            0 :         fs::set_permissions(pgdata, fs::Permissions::from_mode(0o700))?;
    1216              : 
    1217            0 :         Ok(())
    1218            0 :     }
    1219              : 
    1220              :     /// Fetches a basebackup from the Pageserver using the compute state's Pageserver connstring and
    1221              :     /// unarchives it to `pgdata` directory, replacing any existing contents.
    1222              :     #[instrument(skip_all, fields(%lsn))]
    1223              :     fn try_get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> {
    1224              :         let spec = compute_state.pspec.as_ref().expect("spec must be set");
    1225              : 
    1226              :         let started = Instant::now();
    1227              :         let (connected, size) = match spec.pageserver_conninfo.prefer_protocol {
    1228              :             PageserverProtocol::Grpc => self.try_get_basebackup_grpc(spec, lsn)?,
    1229              :             PageserverProtocol::Libpq => self.try_get_basebackup_libpq(spec, lsn)?,
    1230              :         };
    1231              : 
    1232              :         self.fix_zenith_signal_neon_signal()?;
    1233              : 
    1234              :         let mut state = self.state.lock().unwrap();
    1235              :         state.metrics.pageserver_connect_micros =
    1236              :             connected.duration_since(started).as_micros() as u64;
    1237              :         state.metrics.basebackup_bytes = size as u64;
    1238              :         state.metrics.basebackup_ms = started.elapsed().as_millis() as u64;
    1239              : 
    1240              :         Ok(())
    1241              :     }
    1242              : 
    1243              :     /// Move the Zenith signal file to Neon signal file location.
    1244              :     /// This makes Compute compatible with older PageServers that don't yet
    1245              :     /// know about the Zenith->Neon rename.
    1246            0 :     fn fix_zenith_signal_neon_signal(&self) -> Result<()> {
    1247            0 :         let datadir = Path::new(&self.params.pgdata);
    1248              : 
    1249            0 :         let neonsig = datadir.join("neon.signal");
    1250              : 
    1251            0 :         if neonsig.is_file() {
    1252            0 :             return Ok(());
    1253            0 :         }
    1254              : 
    1255            0 :         let zenithsig = datadir.join("zenith.signal");
    1256              : 
    1257            0 :         if zenithsig.is_file() {
    1258            0 :             fs::copy(zenithsig, neonsig)?;
    1259            0 :         }
    1260              : 
    1261            0 :         Ok(())
    1262            0 :     }
    1263              : 
    1264              :     /// Fetches a basebackup via gRPC. The connstring must use grpc://. Returns the timestamp when
    1265              :     /// the connection was established, and the (compressed) size of the basebackup.
    1266            0 :     fn try_get_basebackup_grpc(&self, spec: &ParsedSpec, lsn: Lsn) -> Result<(Instant, usize)> {
    1267            0 :         let shard0_index = ShardIndex {
    1268            0 :             shard_number: ShardNumber(0),
    1269            0 :             shard_count: spec.pageserver_conninfo.shard_count,
    1270            0 :         };
    1271            0 :         let shard0_url = spec
    1272            0 :             .pageserver_conninfo
    1273            0 :             .shard_url(ShardNumber(0), PageserverProtocol::Grpc)?
    1274            0 :             .to_owned();
    1275            0 :         let (reader, connected) = tokio::runtime::Handle::current().block_on(async move {
    1276            0 :             let mut client = page_api::Client::connect(
    1277            0 :                 shard0_url,
    1278            0 :                 spec.tenant_id,
    1279            0 :                 spec.timeline_id,
    1280            0 :                 shard0_index,
    1281            0 :                 spec.storage_auth_token.clone(),
    1282            0 :                 None, // NB: base backups use payload compression
    1283            0 :             )
    1284            0 :             .await?;
    1285            0 :             let connected = Instant::now();
    1286            0 :             let reader = client
    1287            0 :                 .get_base_backup(page_api::GetBaseBackupRequest {
    1288            0 :                     lsn: (lsn != Lsn(0)).then_some(lsn),
    1289            0 :                     compression: BaseBackupCompression::Gzip,
    1290            0 :                     replica: spec.spec.mode != ComputeMode::Primary,
    1291            0 :                     full: false,
    1292            0 :                 })
    1293            0 :                 .await?;
    1294            0 :             anyhow::Ok((reader, connected))
    1295            0 :         })?;
    1296              : 
    1297            0 :         let mut reader = MeasuredReader::new(tokio_util::io::SyncIoBridge::new(reader));
    1298              : 
    1299              :         // Set `ignore_zeros` so that unpack() reads the entire stream and doesn't just stop at the
    1300              :         // end-of-archive marker. If the server errors, the tar::Builder drop handler will write an
    1301              :         // end-of-archive marker before the error is emitted, and we would not see the error.
    1302            0 :         let mut ar = tar::Archive::new(flate2::read::GzDecoder::new(&mut reader));
    1303            0 :         ar.set_ignore_zeros(true);
    1304            0 :         ar.unpack(&self.params.pgdata)?;
    1305              : 
    1306            0 :         Ok((connected, reader.get_byte_count()))
    1307            0 :     }
    1308              : 
    1309              :     /// Fetches a basebackup via libpq. The connstring must use postgresql://. Returns the timestamp
    1310              :     /// when the connection was established, and the (compressed) size of the basebackup.
    1311            0 :     fn try_get_basebackup_libpq(&self, spec: &ParsedSpec, lsn: Lsn) -> Result<(Instant, usize)> {
    1312            0 :         let shard0_connstr = spec
    1313            0 :             .pageserver_conninfo
    1314            0 :             .shard_url(ShardNumber(0), PageserverProtocol::Libpq)?;
    1315            0 :         let mut config = postgres::Config::from_str(shard0_connstr)?;
    1316              : 
    1317              :         // Use the storage auth token from the config file, if given.
    1318              :         // Note: this overrides any password set in the connection string.
    1319            0 :         if let Some(storage_auth_token) = &spec.storage_auth_token {
    1320            0 :             info!("Got storage auth token from spec file");
    1321            0 :             config.password(storage_auth_token);
    1322              :         } else {
    1323            0 :             info!("Storage auth token not set");
    1324              :         }
    1325              : 
    1326            0 :         config.application_name("compute_ctl");
    1327            0 :         config.options(&format!(
    1328            0 :             "-c neon.compute_mode={}",
    1329            0 :             spec.spec.mode.to_type_str()
    1330            0 :         ));
    1331              : 
    1332              :         // Connect to pageserver
    1333            0 :         let mut client = config.connect(NoTls)?;
    1334            0 :         let connected = Instant::now();
    1335              : 
    1336            0 :         let basebackup_cmd = match lsn {
    1337              :             Lsn(0) => {
    1338            0 :                 if spec.spec.mode != ComputeMode::Primary {
    1339            0 :                     format!(
    1340            0 :                         "basebackup {} {} --gzip --replica",
    1341              :                         spec.tenant_id, spec.timeline_id
    1342              :                     )
    1343              :                 } else {
    1344            0 :                     format!("basebackup {} {} --gzip", spec.tenant_id, spec.timeline_id)
    1345              :                 }
    1346              :             }
    1347              :             _ => {
    1348            0 :                 if spec.spec.mode != ComputeMode::Primary {
    1349            0 :                     format!(
    1350            0 :                         "basebackup {} {} {} --gzip --replica",
    1351              :                         spec.tenant_id, spec.timeline_id, lsn
    1352              :                     )
    1353              :                 } else {
    1354            0 :                     format!(
    1355            0 :                         "basebackup {} {} {} --gzip",
    1356              :                         spec.tenant_id, spec.timeline_id, lsn
    1357              :                     )
    1358              :                 }
    1359              :             }
    1360              :         };
    1361              : 
    1362            0 :         let copyreader = client.copy_out(basebackup_cmd.as_str())?;
    1363            0 :         let mut measured_reader = MeasuredReader::new(copyreader);
    1364            0 :         let mut bufreader = std::io::BufReader::new(&mut measured_reader);
    1365              : 
    1366              :         // Read the archive directly from the `CopyOutReader`
    1367              :         //
    1368              :         // Set `ignore_zeros` so that unpack() reads all the Copy data and
    1369              :         // doesn't stop at the end-of-archive marker. Otherwise, if the server
    1370              :         // sends an Error after finishing the tarball, we will not notice it.
    1371              :         // The tar::Builder drop handler will write an end-of-archive marker
    1372              :         // before emitting the error, and we would not see it otherwise.
    1373            0 :         let mut ar = tar::Archive::new(flate2::read::GzDecoder::new(&mut bufreader));
    1374            0 :         ar.set_ignore_zeros(true);
    1375            0 :         ar.unpack(&self.params.pgdata)?;
    1376              : 
    1377            0 :         Ok((connected, measured_reader.get_byte_count()))
    1378            0 :     }
    1379              : 
    1380              :     // Gets the basebackup in a retry loop
    1381              :     #[instrument(skip_all, fields(%lsn))]
    1382              :     pub fn get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> {
    1383              :         let mut retry_period_ms = 500.0;
    1384              :         let mut attempts = 0;
    1385              :         const DEFAULT_ATTEMPTS: u16 = 10;
    1386              :         #[cfg(feature = "testing")]
    1387              :         let max_attempts = if let Ok(v) = env::var("NEON_COMPUTE_TESTING_BASEBACKUP_RETRIES") {
    1388              :             u16::from_str(&v).unwrap()
    1389              :         } else {
    1390              :             DEFAULT_ATTEMPTS
    1391              :         };
    1392              :         #[cfg(not(feature = "testing"))]
    1393              :         let max_attempts = DEFAULT_ATTEMPTS;
    1394              :         loop {
    1395              :             let result = self.try_get_basebackup(compute_state, lsn);
    1396              :             match result {
    1397              :                 Ok(_) => {
    1398              :                     return result;
    1399              :                 }
    1400              :                 Err(ref e) if attempts < max_attempts => {
    1401              :                     warn!("Failed to get basebackup: {e:?} (attempt {attempts}/{max_attempts})");
    1402              :                     std::thread::sleep(std::time::Duration::from_millis(retry_period_ms as u64));
    1403              :                     retry_period_ms *= 1.5;
    1404              :                 }
    1405              :                 Err(_) => {
    1406              :                     return result;
    1407              :                 }
    1408              :             }
    1409              :             attempts += 1;
    1410              :         }
    1411              :     }
    1412              : 
    1413            0 :     pub async fn check_safekeepers_synced_async(
    1414            0 :         &self,
    1415            0 :         compute_state: &ComputeState,
    1416            0 :     ) -> Result<Option<Lsn>> {
    1417              :         // Construct a connection config for each safekeeper
    1418            0 :         let pspec: ParsedSpec = compute_state
    1419            0 :             .pspec
    1420            0 :             .as_ref()
    1421            0 :             .expect("spec must be set")
    1422            0 :             .clone();
    1423            0 :         let sk_connstrs: Vec<String> = pspec.safekeeper_connstrings.clone();
    1424            0 :         let sk_configs = sk_connstrs.into_iter().map(|connstr| {
    1425              :             // Format connstr
    1426            0 :             let id = connstr.clone();
    1427            0 :             let connstr = format!("postgresql://no_user@{connstr}");
    1428            0 :             let options = format!(
    1429            0 :                 "-c timeline_id={} tenant_id={}",
    1430              :                 pspec.timeline_id, pspec.tenant_id
    1431              :             );
    1432              : 
    1433              :             // Construct client
    1434            0 :             let mut config = tokio_postgres::Config::from_str(&connstr).unwrap();
    1435            0 :             config.options(&options);
    1436            0 :             if let Some(storage_auth_token) = pspec.storage_auth_token.clone() {
    1437            0 :                 config.password(storage_auth_token);
    1438            0 :             }
    1439              : 
    1440            0 :             (id, config)
    1441            0 :         });
    1442              : 
    1443              :         // Create task set to query all safekeepers
    1444            0 :         let mut tasks = FuturesUnordered::new();
    1445            0 :         let quorum = sk_configs.len() / 2 + 1;
    1446            0 :         for (id, config) in sk_configs {
    1447            0 :             let timeout = tokio::time::Duration::from_millis(100);
    1448            0 :             let task = tokio::time::timeout(timeout, ping_safekeeper(id, config));
    1449            0 :             tasks.push(tokio::spawn(task));
    1450            0 :         }
    1451              : 
    1452              :         // Get a quorum of responses or errors
    1453            0 :         let mut responses = Vec::new();
    1454            0 :         let mut join_errors = Vec::new();
    1455            0 :         let mut task_errors = Vec::new();
    1456            0 :         let mut timeout_errors = Vec::new();
    1457            0 :         while let Some(response) = tasks.next().await {
    1458            0 :             match response {
    1459            0 :                 Ok(Ok(Ok(r))) => responses.push(r),
    1460            0 :                 Ok(Ok(Err(e))) => task_errors.push(e),
    1461            0 :                 Ok(Err(e)) => timeout_errors.push(e),
    1462            0 :                 Err(e) => join_errors.push(e),
    1463              :             };
    1464            0 :             if responses.len() >= quorum {
    1465            0 :                 break;
    1466            0 :             }
    1467            0 :             if join_errors.len() + task_errors.len() + timeout_errors.len() >= quorum {
    1468            0 :                 break;
    1469            0 :             }
    1470              :         }
    1471              : 
    1472              :         // In case of error, log and fail the check, but don't crash.
    1473              :         // We're playing it safe because these errors could be transient
    1474              :         // and we don't yet retry.
    1475            0 :         if responses.len() < quorum {
    1476            0 :             error!(
    1477            0 :                 "failed sync safekeepers check {:?} {:?} {:?}",
    1478              :                 join_errors, task_errors, timeout_errors
    1479              :             );
    1480            0 :             return Ok(None);
    1481            0 :         }
    1482              : 
    1483            0 :         Ok(check_if_synced(responses))
    1484            0 :     }
    1485              : 
    1486              :     // Fast path for sync_safekeepers. If they're already synced we get the lsn
    1487              :     // in one roundtrip. If not, we should do a full sync_safekeepers.
    1488              :     #[instrument(skip_all)]
    1489              :     pub fn check_safekeepers_synced(&self, compute_state: &ComputeState) -> Result<Option<Lsn>> {
    1490              :         let start_time = Utc::now();
    1491              : 
    1492              :         let rt = tokio::runtime::Handle::current();
    1493              :         let result = rt.block_on(self.check_safekeepers_synced_async(compute_state));
    1494              : 
    1495              :         // Record runtime
    1496              :         self.state.lock().unwrap().metrics.sync_sk_check_ms = Utc::now()
    1497              :             .signed_duration_since(start_time)
    1498              :             .to_std()
    1499              :             .unwrap()
    1500              :             .as_millis() as u64;
    1501              :         result
    1502              :     }
    1503              : 
    1504              :     // Run `postgres` in a special mode with `--sync-safekeepers` argument
    1505              :     // and return the reported LSN back to the caller.
    1506              :     #[instrument(skip_all)]
    1507              :     pub fn sync_safekeepers(&self, storage_auth_token: Option<String>) -> Result<Lsn> {
    1508              :         let start_time = Utc::now();
    1509              : 
    1510              :         let mut sync_handle = maybe_cgexec(&self.params.pgbin)
    1511              :             .args(["--sync-safekeepers"])
    1512              :             .env("PGDATA", &self.params.pgdata) // we cannot use -D in this mode
    1513              :             .envs(if let Some(storage_auth_token) = &storage_auth_token {
    1514              :                 vec![("NEON_AUTH_TOKEN", storage_auth_token)]
    1515              :             } else {
    1516              :                 vec![]
    1517              :             })
    1518              :             .stdout(Stdio::piped())
    1519              :             .stderr(Stdio::piped())
    1520              :             .spawn()
    1521              :             .expect("postgres --sync-safekeepers failed to start");
    1522              :         SYNC_SAFEKEEPERS_PID.store(sync_handle.id(), Ordering::SeqCst);
    1523              : 
    1524              :         // `postgres --sync-safekeepers` will print all log output to stderr and
    1525              :         // final LSN to stdout. So we leave stdout to collect LSN, while stderr logs
    1526              :         // will be collected in a child thread.
    1527              :         let stderr = sync_handle
    1528              :             .stderr
    1529              :             .take()
    1530              :             .expect("stderr should be captured");
    1531              :         let logs_handle = handle_postgres_logs(stderr);
    1532              : 
    1533              :         let sync_output = sync_handle
    1534              :             .wait_with_output()
    1535              :             .expect("postgres --sync-safekeepers failed");
    1536              :         SYNC_SAFEKEEPERS_PID.store(0, Ordering::SeqCst);
    1537              : 
    1538              :         // Process has exited, so we can join the logs thread.
    1539              :         let _ = tokio::runtime::Handle::current()
    1540              :             .block_on(logs_handle)
    1541            0 :             .map_err(|e| tracing::error!("log task panicked: {:?}", e));
    1542              : 
    1543              :         if !sync_output.status.success() {
    1544              :             anyhow::bail!(
    1545              :                 "postgres --sync-safekeepers exited with non-zero status: {}. stdout: {}",
    1546              :                 sync_output.status,
    1547              :                 String::from_utf8(sync_output.stdout)
    1548              :                     .expect("postgres --sync-safekeepers exited, and stdout is not utf-8"),
    1549              :             );
    1550              :         }
    1551              : 
    1552              :         self.state.lock().unwrap().metrics.sync_safekeepers_ms = Utc::now()
    1553              :             .signed_duration_since(start_time)
    1554              :             .to_std()
    1555              :             .unwrap()
    1556              :             .as_millis() as u64;
    1557              : 
    1558              :         let lsn = Lsn::from_str(String::from_utf8(sync_output.stdout)?.trim())?;
    1559              : 
    1560              :         Ok(lsn)
    1561              :     }
    1562              : 
    1563            0 :     fn sync_safekeepers_with_retries(&self, storage_auth_token: Option<String>) -> Result<Lsn> {
    1564            0 :         let max_retries = 5;
    1565            0 :         let mut attempts = 0;
    1566              :         loop {
    1567            0 :             let result = self.sync_safekeepers(storage_auth_token.clone());
    1568            0 :             match &result {
    1569              :                 Ok(_) => {
    1570            0 :                     if attempts > 0 {
    1571            0 :                         tracing::info!("sync_safekeepers succeeded after {attempts} retries");
    1572            0 :                     }
    1573            0 :                     return result;
    1574              :                 }
    1575            0 :                 Err(e) if attempts < max_retries => {
    1576            0 :                     tracing::info!(
    1577            0 :                         "sync_safekeepers failed, will retry (attempt {attempts}): {e:#}"
    1578              :                     );
    1579              :                 }
    1580            0 :                 Err(err) => {
    1581            0 :                     tracing::warn!(
    1582            0 :                         "sync_safekeepers still failed after {attempts} retries, giving up: {err:?}"
    1583              :                     );
    1584            0 :                     return result;
    1585              :                 }
    1586              :             }
    1587              :             // sleep and retry
    1588            0 :             let backoff = exponential_backoff_duration(
    1589            0 :                 attempts,
    1590              :                 DEFAULT_BASE_BACKOFF_SECONDS,
    1591              :                 DEFAULT_MAX_BACKOFF_SECONDS,
    1592              :             );
    1593            0 :             std::thread::sleep(backoff);
    1594            0 :             attempts += 1;
    1595              :         }
    1596            0 :     }
    1597              : 
    1598              :     /// Do all the preparations like PGDATA directory creation, configuration,
    1599              :     /// safekeepers sync, basebackup, etc.
    1600              :     #[instrument(skip_all)]
    1601              :     pub fn prepare_pgdata(&self, compute_state: &ComputeState) -> Result<()> {
    1602              :         let pspec = compute_state.pspec.as_ref().expect("spec must be set");
    1603              :         let spec = &pspec.spec;
    1604              :         let pgdata_path = Path::new(&self.params.pgdata);
    1605              : 
    1606              :         let tls_config = self.tls_config(&pspec.spec);
    1607              :         let databricks_settings = spec.databricks_settings.as_ref();
    1608              :         let postgres_port = self.params.connstr.port();
    1609              : 
    1610              :         // Remove/create an empty pgdata directory and put configuration there.
    1611              :         self.create_pgdata()?;
    1612              :         config::write_postgres_conf(
    1613              :             pgdata_path,
    1614              :             &self.params,
    1615              :             &pspec.spec,
    1616              :             postgres_port,
    1617              :             self.params.internal_http_port,
    1618              :             tls_config,
    1619              :             databricks_settings,
    1620              :             self.params.lakebase_mode,
    1621              :         )?;
    1622              : 
    1623              :         // Syncing safekeepers is only safe with primary nodes: if a primary
    1624              :         // is already connected it will be kicked out, so a secondary (standby)
    1625              :         // cannot sync safekeepers.
    1626              :         let lsn = match spec.mode {
    1627              :             ComputeMode::Primary => {
    1628              :                 info!("checking if safekeepers are synced");
    1629              :                 let lsn = if let Ok(Some(lsn)) = self.check_safekeepers_synced(compute_state) {
    1630              :                     lsn
    1631              :                 } else {
    1632              :                     info!("starting safekeepers syncing");
    1633              :                     self.sync_safekeepers_with_retries(pspec.storage_auth_token.clone())
    1634              :                         .with_context(|| "failed to sync safekeepers")?
    1635              :                 };
    1636              :                 info!("safekeepers synced at LSN {}", lsn);
    1637              :                 lsn
    1638              :             }
    1639              :             ComputeMode::Static(lsn) => {
    1640              :                 info!("Starting read-only node at static LSN {}", lsn);
    1641              :                 lsn
    1642              :             }
    1643              :             ComputeMode::Replica => {
    1644              :                 info!("Initializing standby from latest Pageserver LSN");
    1645              :                 Lsn(0)
    1646              :             }
    1647              :         };
    1648              : 
    1649              :         self.get_basebackup(compute_state, lsn)
    1650            0 :             .with_context(|| format!("failed to get basebackup@{lsn}"))?;
    1651              : 
    1652              :         if let Some(settings) = databricks_settings {
    1653              :             copy_tls_certificates(
    1654              :                 &settings.pg_compute_tls_settings.key_file,
    1655              :                 &settings.pg_compute_tls_settings.cert_file,
    1656              :                 pgdata_path,
    1657              :             )?;
    1658              : 
    1659              :             // Update pg_hba.conf received with basebackup including additional databricks settings.
    1660              :             update_pg_hba(pgdata_path, Some(&settings.databricks_pg_hba))?;
    1661              :             update_pg_ident(pgdata_path, Some(&settings.databricks_pg_ident))?;
    1662              :         } else {
    1663              :             // Update pg_hba.conf received with basebackup.
    1664              :             update_pg_hba(pgdata_path, None)?;
    1665              :         }
    1666              : 
    1667              :         if let Some(databricks_settings) = spec.databricks_settings.as_ref() {
    1668              :             copy_tls_certificates(
    1669              :                 &databricks_settings.pg_compute_tls_settings.key_file,
    1670              :                 &databricks_settings.pg_compute_tls_settings.cert_file,
    1671              :                 pgdata_path,
    1672              :             )?;
    1673              :         }
    1674              : 
    1675              :         // Place pg_dynshmem under /dev/shm. This allows us to use
    1676              :         // 'dynamic_shared_memory_type = mmap' so that the files are placed in
    1677              :         // /dev/shm, similar to how 'dynamic_shared_memory_type = posix' works.
    1678              :         //
    1679              :         // Why on earth don't we just stick to the 'posix' default, you might
    1680              :         // ask.  It turns out that making large allocations with 'posix' doesn't
    1681              :         // work very well with autoscaling. The behavior we want is that:
    1682              :         //
    1683              :         // 1. You can make large DSM allocations, larger than the current RAM
    1684              :         //    size of the VM, without errors
    1685              :         //
    1686              :         // 2. If the allocated memory is really used, the VM is scaled up
    1687              :         //    automatically to accommodate that
    1688              :         //
    1689              :         // We try to make that possible by having swap in the VM. But with the
    1690              :         // default 'posix' DSM implementation, we fail step 1, even when there's
    1691              :         // plenty of swap available. PostgreSQL uses posix_fallocate() to create
    1692              :         // the shmem segment, which is really just a file in /dev/shm in Linux,
    1693              :         // but posix_fallocate() on tmpfs returns ENOMEM if the size is larger
    1694              :         // than available RAM.
    1695              :         //
    1696              :         // Using 'dynamic_shared_memory_type = mmap' works around that, because
    1697              :         // the Postgres 'mmap' DSM implementation doesn't use
    1698              :         // posix_fallocate(). Instead, it uses repeated calls to write(2) to
    1699              :         // fill the file with zeros. It's weird that that differs between
    1700              :         // 'posix' and 'mmap', but we take advantage of it. When the file is
    1701              :         // filled slowly with write(2), the kernel allows it to grow larger, as
    1702              :         // long as there's swap available.
    1703              :         //
    1704              :         // In short, using 'dynamic_shared_memory_type = mmap' allows us one DSM
    1705              :         // segment to be larger than currently available RAM. But because we
    1706              :         // don't want to store it on a real file, which the kernel would try to
    1707              :         // flush to disk, so symlink pg_dynshm to /dev/shm.
    1708              :         //
    1709              :         // We don't set 'dynamic_shared_memory_type = mmap' here, we let the
    1710              :         // control plane control that option. If 'mmap' is not used, this
    1711              :         // symlink doesn't affect anything.
    1712              :         //
    1713              :         // See https://github.com/neondatabase/autoscaling/issues/800
    1714              :         std::fs::remove_dir_all(pgdata_path.join("pg_dynshmem"))?;
    1715              :         symlink("/dev/shm/", pgdata_path.join("pg_dynshmem"))?;
    1716              : 
    1717              :         match spec.mode {
    1718              :             ComputeMode::Primary => {}
    1719              :             ComputeMode::Replica | ComputeMode::Static(..) => {
    1720              :                 add_standby_signal(pgdata_path)?;
    1721              :             }
    1722              :         }
    1723              : 
    1724              :         Ok(())
    1725              :     }
    1726              : 
    1727              :     /// Start and stop a postgres process to warm up the VM for startup.
    1728            0 :     pub fn prewarm_postgres_vm_memory(&self) -> Result<()> {
    1729            0 :         if self.params.lakebase_mode {
    1730              :             // We are running in Hadron mode. Disabling this prewarming step for now as it could run
    1731              :             // into dblet port conflicts and also doesn't add much value with our current infra.
    1732            0 :             info!("Skipping postgres prewarming in Hadron mode");
    1733            0 :             return Ok(());
    1734            0 :         }
    1735            0 :         info!("prewarming VM memory");
    1736              : 
    1737              :         // Create pgdata
    1738            0 :         let pgdata = &format!("{}.warmup", self.params.pgdata);
    1739            0 :         create_pgdata(pgdata)?;
    1740              : 
    1741              :         // Run initdb to completion
    1742            0 :         info!("running initdb");
    1743            0 :         let initdb_bin = Path::new(&self.params.pgbin)
    1744            0 :             .parent()
    1745            0 :             .unwrap()
    1746            0 :             .join("initdb");
    1747            0 :         Command::new(initdb_bin)
    1748            0 :             .args(["--pgdata", pgdata])
    1749            0 :             .output()
    1750            0 :             .expect("cannot start initdb process");
    1751              : 
    1752              :         // Write conf
    1753              :         use std::io::Write;
    1754            0 :         let conf_path = Path::new(pgdata).join("postgresql.conf");
    1755            0 :         let mut file = std::fs::File::create(conf_path)?;
    1756            0 :         writeln!(file, "shared_buffers=65536")?;
    1757            0 :         writeln!(file, "port=51055")?; // Nobody should be connecting
    1758            0 :         writeln!(file, "shared_preload_libraries = 'neon'")?;
    1759              : 
    1760              :         // Start postgres
    1761            0 :         info!("starting postgres");
    1762            0 :         let mut pg = maybe_cgexec(&self.params.pgbin)
    1763            0 :             .args(["-D", pgdata])
    1764            0 :             .spawn()
    1765            0 :             .expect("cannot start postgres process");
    1766              : 
    1767              :         // Stop it when it's ready
    1768            0 :         info!("waiting for postgres");
    1769            0 :         wait_for_postgres(&mut pg, Path::new(pgdata))?;
    1770              :         // SIGQUIT orders postgres to exit immediately. We don't want to SIGKILL
    1771              :         // it to avoid orphaned processes prowling around while datadir is
    1772              :         // wiped.
    1773            0 :         let pm_pid = Pid::from_raw(pg.id() as i32);
    1774            0 :         kill(pm_pid, Signal::SIGQUIT)?;
    1775            0 :         info!("sent SIGQUIT signal");
    1776            0 :         pg.wait()?;
    1777            0 :         info!("done prewarming vm memory");
    1778              : 
    1779              :         // clean up
    1780            0 :         let _ok = fs::remove_dir_all(pgdata);
    1781            0 :         Ok(())
    1782            0 :     }
    1783              : 
    1784              :     /// Start Postgres as a child process and wait for it to start accepting
    1785              :     /// connections.
    1786              :     ///
    1787              :     /// Returns a handle to the child process and a handle to the logs thread.
    1788              :     #[instrument(skip_all)]
    1789              :     pub fn start_postgres(&self, storage_auth_token: Option<String>) -> Result<PostgresHandle> {
    1790              :         let pgdata_path = Path::new(&self.params.pgdata);
    1791              : 
    1792              :         let env_vars: Vec<(String, String)> = if self.params.lakebase_mode {
    1793              :             let databricks_env_vars = {
    1794              :                 let state = self.state.lock().unwrap();
    1795              :                 let spec = &state.pspec.as_ref().unwrap().spec;
    1796              :                 DatabricksEnvVars::new(
    1797              :                     spec,
    1798              :                     Some(&self.params.compute_id),
    1799              :                     self.params.instance_id.clone(),
    1800              :                     self.params.lakebase_mode,
    1801              :                 )
    1802              :             };
    1803              : 
    1804              :             info!(
    1805              :                 "Starting Postgres for databricks endpoint id: {}",
    1806              :                 &databricks_env_vars.endpoint_id
    1807              :             );
    1808              : 
    1809              :             let mut env_vars = databricks_env_vars.to_env_var_list();
    1810            0 :             env_vars.extend(storage_auth_token.map(|t| ("NEON_AUTH_TOKEN".to_string(), t)));
    1811              :             env_vars
    1812              :         } else if let Some(storage_auth_token) = &storage_auth_token {
    1813              :             vec![("NEON_AUTH_TOKEN".to_owned(), storage_auth_token.to_owned())]
    1814              :         } else {
    1815              :             vec![]
    1816              :         };
    1817              : 
    1818              :         // Run postgres as a child process.
    1819              :         let mut pg = maybe_cgexec(&self.params.pgbin)
    1820              :             .args(["-D", &self.params.pgdata])
    1821              :             .envs(env_vars)
    1822              :             .stderr(Stdio::piped())
    1823              :             .spawn()
    1824              :             .expect("cannot start postgres process");
    1825              :         PG_PID.store(pg.id(), Ordering::SeqCst);
    1826              : 
    1827              :         // Start a task to collect logs from stderr.
    1828              :         let stderr = pg.stderr.take().expect("stderr should be captured");
    1829              :         let logs_handle = handle_postgres_logs(stderr);
    1830              : 
    1831              :         wait_for_postgres(&mut pg, pgdata_path)?;
    1832              : 
    1833              :         Ok(PostgresHandle {
    1834              :             postgres: pg,
    1835              :             log_collector: logs_handle,
    1836              :         })
    1837              :     }
    1838              : 
    1839              :     /// Wait for the child Postgres process forever. In this state Ctrl+C will
    1840              :     /// propagate to Postgres and it will be shut down as well.
    1841            0 :     fn wait_postgres(&self, mut pg_handle: PostgresHandle) -> std::process::ExitStatus {
    1842            0 :         info!(postmaster_pid = %pg_handle.postgres.id(), "Waiting for Postgres to exit");
    1843              : 
    1844            0 :         let ecode = pg_handle
    1845            0 :             .postgres
    1846            0 :             .wait()
    1847            0 :             .expect("failed to start waiting on Postgres process");
    1848            0 :         PG_PID.store(0, Ordering::SeqCst);
    1849              : 
    1850              :         // Process has exited. Wait for the log collecting task to finish.
    1851            0 :         let _ = tokio::runtime::Handle::current()
    1852            0 :             .block_on(pg_handle.log_collector)
    1853            0 :             .map_err(|e| tracing::error!("log task panicked: {:?}", e));
    1854              : 
    1855            0 :         ecode
    1856            0 :     }
    1857              : 
    1858              :     /// Do post configuration of the already started Postgres. This function spawns a background task to
    1859              :     /// configure the database after applying the compute spec. Currently, it upgrades the neon extension
    1860              :     /// version. In the future, it may upgrade all 3rd-party extensions.
    1861              :     #[instrument(skip_all)]
    1862              :     pub fn post_apply_config(&self) -> Result<()> {
    1863              :         let conf = self.get_tokio_conn_conf(Some("compute_ctl:post_apply_config"));
    1864            0 :         tokio::spawn(async move {
    1865            0 :             let res = async {
    1866            0 :                 let (mut client, connection) = conf.connect(NoTls).await?;
    1867            0 :                 tokio::spawn(async move {
    1868            0 :                     if let Err(e) = connection.await {
    1869            0 :                         eprintln!("connection error: {e}");
    1870            0 :                     }
    1871            0 :                 });
    1872              : 
    1873            0 :                 handle_neon_extension_upgrade(&mut client)
    1874            0 :                     .await
    1875            0 :                     .context("handle_neon_extension_upgrade")?;
    1876            0 :                 Ok::<_, anyhow::Error>(())
    1877            0 :             }
    1878            0 :             .await;
    1879            0 :             if let Err(err) = res {
    1880            0 :                 error!("error while post_apply_config: {err:#}");
    1881            0 :             }
    1882            0 :         });
    1883              :         Ok(())
    1884              :     }
    1885              : 
    1886            0 :     pub fn get_conn_conf(&self, application_name: Option<&str>) -> postgres::Config {
    1887            0 :         let mut conf = self.conn_conf.clone();
    1888            0 :         if let Some(application_name) = application_name {
    1889            0 :             conf.application_name(application_name);
    1890            0 :         }
    1891            0 :         conf
    1892            0 :     }
    1893              : 
    1894            0 :     pub fn get_tokio_conn_conf(&self, application_name: Option<&str>) -> tokio_postgres::Config {
    1895            0 :         let mut conf = self.tokio_conn_conf.clone();
    1896            0 :         if let Some(application_name) = application_name {
    1897            0 :             conf.application_name(application_name);
    1898            0 :         }
    1899            0 :         conf
    1900            0 :     }
    1901              : 
    1902            0 :     pub async fn get_maintenance_client(
    1903            0 :         conf: &tokio_postgres::Config,
    1904            0 :     ) -> Result<tokio_postgres::Client> {
    1905            0 :         let mut conf = conf.clone();
    1906            0 :         conf.application_name("compute_ctl:apply_config");
    1907              : 
    1908            0 :         let (client, conn) = match conf.connect(NoTls).await {
    1909              :             // If connection fails, it may be the old node with `zenith_admin` superuser.
    1910              :             //
    1911              :             // In this case we need to connect with old `zenith_admin` name
    1912              :             // and create new user. We cannot simply rename connected user,
    1913              :             // but we can create a new one and grant it all privileges.
    1914            0 :             Err(e) => match e.code() {
    1915              :                 Some(&SqlState::INVALID_PASSWORD)
    1916              :                 | Some(&SqlState::INVALID_AUTHORIZATION_SPECIFICATION) => {
    1917              :                     // Connect with `zenith_admin` if `cloud_admin` could not authenticate
    1918            0 :                     info!(
    1919            0 :                         "cannot connect to Postgres: {}, retrying with 'zenith_admin' username",
    1920              :                         e
    1921              :                     );
    1922            0 :                     let mut zenith_admin_conf = postgres::config::Config::from(conf.clone());
    1923            0 :                     zenith_admin_conf.application_name("compute_ctl:apply_config");
    1924            0 :                     zenith_admin_conf.user("zenith_admin");
    1925              : 
    1926              :                     // It doesn't matter what were the options before, here we just want
    1927              :                     // to connect and create a new superuser role.
    1928              :                     const ZENITH_OPTIONS: &str = "-c role=zenith_admin -c default_transaction_read_only=off -c search_path='' -c statement_timeout=0";
    1929            0 :                     zenith_admin_conf.options(ZENITH_OPTIONS);
    1930              : 
    1931            0 :                     let mut client =
    1932            0 :                         zenith_admin_conf.connect(NoTls)
    1933            0 :                             .context("broken cloud_admin credential: tried connecting with cloud_admin but could not authenticate, and zenith_admin does not work either")?;
    1934              : 
    1935              :                     // Disable forwarding so that users don't get a cloud_admin role
    1936            0 :                     let mut func = || {
    1937            0 :                         client.simple_query("SET neon.forward_ddl = false")?;
    1938            0 :                         client.simple_query("CREATE USER cloud_admin WITH SUPERUSER")?;
    1939            0 :                         client.simple_query("GRANT zenith_admin TO cloud_admin")?;
    1940            0 :                         Ok::<_, anyhow::Error>(())
    1941            0 :                     };
    1942            0 :                     func().context("apply_config setup cloud_admin")?;
    1943              : 
    1944            0 :                     drop(client);
    1945              : 
    1946              :                     // Reconnect with connstring with expected name
    1947            0 :                     conf.connect(NoTls).await?
    1948              :                 }
    1949            0 :                 _ => return Err(e.into()),
    1950              :             },
    1951            0 :             Ok((client, conn)) => (client, conn),
    1952              :         };
    1953              : 
    1954            0 :         spawn(async move {
    1955            0 :             if let Err(e) = conn.await {
    1956            0 :                 error!("maintenance client connection error: {}", e);
    1957            0 :             }
    1958            0 :         });
    1959              : 
    1960              :         // Disable DDL forwarding because control plane already knows about the roles/databases
    1961              :         // we're about to modify.
    1962            0 :         client
    1963            0 :             .simple_query("SET neon.forward_ddl = false")
    1964            0 :             .await
    1965            0 :             .context("apply_config SET neon.forward_ddl = false")?;
    1966              : 
    1967            0 :         Ok(client)
    1968            0 :     }
    1969              : 
    1970              :     /// Do initial configuration of the already started Postgres.
    1971              :     #[instrument(skip_all)]
    1972              :     pub fn apply_config(&self, compute_state: &ComputeState) -> Result<()> {
    1973              :         let mut conf = self.get_tokio_conn_conf(Some("compute_ctl:apply_config"));
    1974              : 
    1975              :         if self.params.lakebase_mode {
    1976              :             // Set a 2-minute statement_timeout for the session applying config. The individual SQL statements
    1977              :             // used in apply_spec_sql() should not take long (they are just creating users and installing
    1978              :             // extensions). If any of them are stuck for an extended period of time it usually indicates a
    1979              :             // pageserver connectivity problem and we should bail out.
    1980              :             conf.options("-c statement_timeout=2min");
    1981              :         }
    1982              : 
    1983              :         let conf = Arc::new(conf);
    1984              :         let spec = Arc::new(
    1985              :             compute_state
    1986              :                 .pspec
    1987              :                 .as_ref()
    1988              :                 .expect("spec must be set")
    1989              :                 .spec
    1990              :                 .clone(),
    1991              :         );
    1992              : 
    1993              :         let mut tls_config = None::<TlsConfig>;
    1994              :         if spec.features.contains(&ComputeFeature::TlsExperimental) {
    1995              :             tls_config = self.compute_ctl_config.tls.clone();
    1996              :         }
    1997              : 
    1998              :         self.update_installed_extensions_collection_interval(&spec);
    1999              : 
    2000              :         let max_concurrent_connections = self.max_service_connections(compute_state, &spec);
    2001              : 
    2002              :         // Merge-apply spec & changes to PostgreSQL state.
    2003              :         self.apply_spec_sql(spec.clone(), conf.clone(), max_concurrent_connections)?;
    2004              : 
    2005              :         if let Some(local_proxy) = &spec.clone().local_proxy_config {
    2006              :             let mut local_proxy = local_proxy.clone();
    2007              :             local_proxy.tls = tls_config.clone();
    2008              : 
    2009              :             info!("configuring local_proxy");
    2010              :             local_proxy::configure(&local_proxy).context("apply_config local_proxy")?;
    2011              :         }
    2012              : 
    2013              :         // Run migrations separately to not hold up cold starts
    2014              :         let lakebase_mode = self.params.lakebase_mode;
    2015              :         let params = self.params.clone();
    2016            0 :         tokio::spawn(async move {
    2017            0 :             let mut conf = conf.as_ref().clone();
    2018            0 :             conf.application_name("compute_ctl:migrations");
    2019              : 
    2020            0 :             match conf.connect(NoTls).await {
    2021            0 :                 Ok((mut client, connection)) => {
    2022            0 :                     tokio::spawn(async move {
    2023            0 :                         if let Err(e) = connection.await {
    2024            0 :                             eprintln!("connection error: {e}");
    2025            0 :                         }
    2026            0 :                     });
    2027            0 :                     if let Err(e) = handle_migrations(params, &mut client, lakebase_mode).await {
    2028            0 :                         error!("Failed to run migrations: {}", e);
    2029            0 :                     }
    2030              :                 }
    2031            0 :                 Err(e) => {
    2032            0 :                     error!(
    2033            0 :                         "Failed to connect to the compute for running migrations: {}",
    2034              :                         e
    2035              :                     );
    2036              :                 }
    2037              :             };
    2038            0 :         });
    2039              : 
    2040              :         Ok::<(), anyhow::Error>(())
    2041              :     }
    2042              : 
    2043              :     // Signal to the configurator to refresh the configuration by pulling a new spec from the HCC.
    2044              :     // Note that this merely triggers a notification on a condition variable the configurator thread
    2045              :     // waits on. The configurator thread (in configurator.rs) pulls the new spec from the HCC and
    2046              :     // applies it.
    2047            0 :     pub async fn signal_refresh_configuration(&self) -> Result<()> {
    2048            0 :         let states_allowing_configuration_refresh = [
    2049            0 :             ComputeStatus::Running,
    2050            0 :             ComputeStatus::Failed,
    2051            0 :             ComputeStatus::RefreshConfigurationPending,
    2052            0 :         ];
    2053              : 
    2054            0 :         let mut state = self.state.lock().expect("state lock poisoned");
    2055            0 :         if states_allowing_configuration_refresh.contains(&state.status) {
    2056            0 :             state.status = ComputeStatus::RefreshConfigurationPending;
    2057            0 :             self.state_changed.notify_all();
    2058            0 :             Ok(())
    2059            0 :         } else if state.status == ComputeStatus::Init {
    2060              :             // If the compute is in Init state, we can't refresh the configuration immediately,
    2061              :             // but we should be able to do that soon.
    2062            0 :             Ok(())
    2063              :         } else {
    2064            0 :             Err(anyhow::anyhow!(
    2065            0 :                 "Cannot refresh compute configuration in state {:?}",
    2066            0 :                 state.status
    2067            0 :             ))
    2068              :         }
    2069            0 :     }
    2070              : 
    2071              :     // Wrapped this around `pg_ctl reload`, but right now we don't use
    2072              :     // `pg_ctl` for start / stop.
    2073              :     #[instrument(skip_all)]
    2074              :     fn pg_reload_conf(&self) -> Result<()> {
    2075              :         let pgctl_bin = Path::new(&self.params.pgbin)
    2076              :             .parent()
    2077              :             .unwrap()
    2078              :             .join("pg_ctl");
    2079              :         Command::new(pgctl_bin)
    2080              :             .args(["reload", "-D", &self.params.pgdata])
    2081              :             .output()
    2082              :             .expect("cannot run pg_ctl process");
    2083              :         Ok(())
    2084              :     }
    2085              : 
    2086              :     /// Similar to `apply_config()`, but does a bit different sequence of operations,
    2087              :     /// as it's used to reconfigure a previously started and configured Postgres node.
    2088              :     #[instrument(skip_all)]
    2089              :     pub fn reconfigure(&self) -> Result<()> {
    2090              :         let spec = self.state.lock().unwrap().pspec.clone().unwrap().spec;
    2091              : 
    2092              :         let tls_config = self.tls_config(&spec);
    2093              : 
    2094              :         self.update_installed_extensions_collection_interval(&spec);
    2095              : 
    2096              :         if let Some(ref pgbouncer_settings) = spec.pgbouncer_settings {
    2097              :             info!("tuning pgbouncer");
    2098              : 
    2099              :             let pgbouncer_settings = pgbouncer_settings.clone();
    2100              :             let tls_config = tls_config.clone();
    2101              : 
    2102              :             // Spawn a background task to do the tuning,
    2103              :             // so that we don't block the main thread that starts Postgres.
    2104            0 :             tokio::spawn(async move {
    2105            0 :                 let res = tune_pgbouncer(pgbouncer_settings, tls_config).await;
    2106            0 :                 if let Err(err) = res {
    2107            0 :                     error!("error while tuning pgbouncer: {err:?}");
    2108            0 :                 }
    2109            0 :             });
    2110              :         }
    2111              : 
    2112              :         if let Some(ref local_proxy) = spec.local_proxy_config {
    2113              :             info!("configuring local_proxy");
    2114              : 
    2115              :             // Spawn a background task to do the configuration,
    2116              :             // so that we don't block the main thread that starts Postgres.
    2117              :             let mut local_proxy = local_proxy.clone();
    2118              :             local_proxy.tls = tls_config.clone();
    2119            0 :             tokio::spawn(async move {
    2120            0 :                 if let Err(err) = local_proxy::configure(&local_proxy) {
    2121            0 :                     error!("error while configuring local_proxy: {err:?}");
    2122            0 :                 }
    2123            0 :             });
    2124              :         }
    2125              : 
    2126              :         // Reconfigure rsyslog for Postgres logs export
    2127              :         let conf = PostgresLogsRsyslogConfig::new(spec.logs_export_host.as_deref());
    2128              :         configure_postgres_logs_export(conf)?;
    2129              : 
    2130              :         // Write new config
    2131              :         let pgdata_path = Path::new(&self.params.pgdata);
    2132              :         let postgres_port = self.params.connstr.port();
    2133              :         config::write_postgres_conf(
    2134              :             pgdata_path,
    2135              :             &self.params,
    2136              :             &spec,
    2137              :             postgres_port,
    2138              :             self.params.internal_http_port,
    2139              :             tls_config,
    2140              :             spec.databricks_settings.as_ref(),
    2141              :             self.params.lakebase_mode,
    2142              :         )?;
    2143              : 
    2144              :         self.pg_reload_conf()?;
    2145              : 
    2146              :         if !spec.skip_pg_catalog_updates {
    2147              :             let max_concurrent_connections = spec.reconfigure_concurrency;
    2148              :             // Temporarily reset max_cluster_size in config
    2149              :             // to avoid the possibility of hitting the limit, while we are reconfiguring:
    2150              :             // creating new extensions, roles, etc.
    2151            0 :             config::with_compute_ctl_tmp_override(pgdata_path, "neon.max_cluster_size=-1", || {
    2152            0 :                 self.pg_reload_conf()?;
    2153              : 
    2154            0 :                 if spec.mode == ComputeMode::Primary {
    2155            0 :                     let conf = self.get_tokio_conn_conf(Some("compute_ctl:reconfigure"));
    2156            0 :                     let conf = Arc::new(conf);
    2157              : 
    2158            0 :                     let spec = Arc::new(spec.clone());
    2159              : 
    2160            0 :                     self.apply_spec_sql(spec, conf, max_concurrent_connections)?;
    2161            0 :                 }
    2162              : 
    2163            0 :                 Ok(())
    2164            0 :             })?;
    2165              :             self.pg_reload_conf()?;
    2166              :         }
    2167              : 
    2168              :         let unknown_op = "unknown".to_string();
    2169              :         let op_id = spec.operation_uuid.as_ref().unwrap_or(&unknown_op);
    2170              :         info!(
    2171              :             "finished reconfiguration of compute node for operation {}",
    2172              :             op_id
    2173              :         );
    2174              : 
    2175              :         Ok(())
    2176              :     }
    2177              : 
    2178              :     #[instrument(skip_all)]
    2179              :     pub fn configure_as_primary(&self, compute_state: &ComputeState) -> Result<()> {
    2180              :         let pspec = compute_state.pspec.as_ref().expect("spec must be set");
    2181              : 
    2182              :         assert!(pspec.spec.mode == ComputeMode::Primary);
    2183              :         if !pspec.spec.skip_pg_catalog_updates {
    2184              :             let pgdata_path = Path::new(&self.params.pgdata);
    2185              :             // temporarily reset max_cluster_size in config
    2186              :             // to avoid the possibility of hitting the limit, while we are applying config:
    2187              :             // creating new extensions, roles, etc...
    2188            0 :             config::with_compute_ctl_tmp_override(pgdata_path, "neon.max_cluster_size=-1", || {
    2189            0 :                 self.pg_reload_conf()?;
    2190              : 
    2191            0 :                 self.apply_config(compute_state)?;
    2192              : 
    2193            0 :                 Ok(())
    2194            0 :             })?;
    2195              : 
    2196              :             let postgresql_conf_path = pgdata_path.join("postgresql.conf");
    2197              :             if config::line_in_file(
    2198              :                 &postgresql_conf_path,
    2199              :                 "neon.disable_logical_replication_subscribers=false",
    2200              :             )? {
    2201              :                 info!(
    2202              :                     "updated postgresql.conf to set neon.disable_logical_replication_subscribers=false"
    2203              :                 );
    2204              :             }
    2205              :             self.pg_reload_conf()?;
    2206              :         }
    2207              :         self.post_apply_config()?;
    2208              : 
    2209              :         Ok(())
    2210              :     }
    2211              : 
    2212            0 :     pub async fn watch_cert_for_changes(self: Arc<Self>) {
    2213              :         // update status on cert renewal
    2214            0 :         if let Some(tls_config) = &self.compute_ctl_config.tls {
    2215            0 :             let tls_config = tls_config.clone();
    2216              : 
    2217              :             // wait until the cert exists.
    2218            0 :             let mut cert_watch = watch_cert_for_changes(tls_config.cert_path.clone()).await;
    2219              : 
    2220            0 :             tokio::task::spawn_blocking(move || {
    2221            0 :                 let handle = tokio::runtime::Handle::current();
    2222              :                 'cert_update: loop {
    2223              :                     // let postgres/pgbouncer/local_proxy know the new cert/key exists.
    2224              :                     // we need to wait until it's configurable first.
    2225              : 
    2226            0 :                     let mut state = self.state.lock().unwrap();
    2227              :                     'status_update: loop {
    2228            0 :                         match state.status {
    2229              :                             // let's update the state to config pending
    2230              :                             ComputeStatus::ConfigurationPending | ComputeStatus::Running => {
    2231            0 :                                 state.set_status(
    2232            0 :                                     ComputeStatus::ConfigurationPending,
    2233            0 :                                     &self.state_changed,
    2234            0 :                                 );
    2235            0 :                                 break 'status_update;
    2236              :                             }
    2237              : 
    2238              :                             // exit loop
    2239              :                             ComputeStatus::Failed
    2240              :                             | ComputeStatus::TerminationPendingFast
    2241              :                             | ComputeStatus::TerminationPendingImmediate
    2242            0 :                             | ComputeStatus::Terminated => break 'cert_update,
    2243              : 
    2244              :                             // wait
    2245              :                             ComputeStatus::Init
    2246              :                             | ComputeStatus::Configuration
    2247              :                             | ComputeStatus::RefreshConfiguration
    2248              :                             | ComputeStatus::RefreshConfigurationPending
    2249            0 :                             | ComputeStatus::Empty => {
    2250            0 :                                 state = self.state_changed.wait(state).unwrap();
    2251            0 :                             }
    2252              :                         }
    2253              :                     }
    2254            0 :                     drop(state);
    2255              : 
    2256              :                     // wait for a new certificate update
    2257            0 :                     if handle.block_on(cert_watch.changed()).is_err() {
    2258            0 :                         break;
    2259            0 :                     }
    2260              :                 }
    2261            0 :             });
    2262            0 :         }
    2263            0 :     }
    2264              : 
    2265            0 :     pub fn tls_config(&self, spec: &ComputeSpec) -> &Option<TlsConfig> {
    2266            0 :         if spec.features.contains(&ComputeFeature::TlsExperimental) {
    2267            0 :             &self.compute_ctl_config.tls
    2268              :         } else {
    2269            0 :             &None::<TlsConfig>
    2270              :         }
    2271            0 :     }
    2272              : 
    2273              :     /// Update the `last_active` in the shared state, but ensure that it's a more recent one.
    2274            0 :     pub fn update_last_active(&self, last_active: Option<DateTime<Utc>>) {
    2275            0 :         let mut state = self.state.lock().unwrap();
    2276              :         // NB: `Some(<DateTime>)` is always greater than `None`.
    2277            0 :         if last_active > state.last_active {
    2278            0 :             state.last_active = last_active;
    2279            0 :             debug!("set the last compute activity time to: {:?}", last_active);
    2280            0 :         }
    2281            0 :     }
    2282              : 
    2283              :     // Look for core dumps and collect backtraces.
    2284              :     //
    2285              :     // EKS worker nodes have following core dump settings:
    2286              :     //   /proc/sys/kernel/core_pattern -> core
    2287              :     //   /proc/sys/kernel/core_uses_pid -> 1
    2288              :     //   ulimit -c -> unlimited
    2289              :     // which results in core dumps being written to postgres data directory as core.<pid>.
    2290              :     //
    2291              :     // Use that as a default location and pattern, except macos where core dumps are written
    2292              :     // to /cores/ directory by default.
    2293              :     //
    2294              :     // With default Linux settings, the core dump file is called just "core", so check for
    2295              :     // that too.
    2296            0 :     pub fn check_for_core_dumps(&self) -> Result<()> {
    2297            0 :         let core_dump_dir = match std::env::consts::OS {
    2298            0 :             "macos" => Path::new("/cores/"),
    2299              :             // BEGIN HADRON
    2300              :             // NB: Read core dump files from a fixed location outside of
    2301              :             // the data directory since `compute_ctl` wipes the data directory
    2302              :             // across container restarts.
    2303              :             _ => {
    2304            0 :                 if self.params.lakebase_mode {
    2305            0 :                     Path::new("/databricks/logs/brickstore")
    2306              :                 } else {
    2307            0 :                     Path::new(&self.params.pgdata)
    2308              :                 }
    2309              :             } // END HADRON
    2310              :         };
    2311              : 
    2312              :         // Collect core dump paths if any
    2313            0 :         info!("checking for core dumps in {}", core_dump_dir.display());
    2314            0 :         let files = fs::read_dir(core_dump_dir)?;
    2315            0 :         let cores = files.filter_map(|entry| {
    2316            0 :             let entry = entry.ok()?;
    2317              : 
    2318            0 :             let is_core_dump = match entry.file_name().to_str()? {
    2319            0 :                 n if n.starts_with("core.") => true,
    2320            0 :                 "core" => true,
    2321            0 :                 _ => false,
    2322              :             };
    2323            0 :             if is_core_dump {
    2324            0 :                 Some(entry.path())
    2325              :             } else {
    2326            0 :                 None
    2327              :             }
    2328            0 :         });
    2329              : 
    2330              :         // Print backtrace for each core dump
    2331            0 :         for core_path in cores {
    2332            0 :             warn!(
    2333            0 :                 "core dump found: {}, collecting backtrace",
    2334            0 :                 core_path.display()
    2335              :             );
    2336              : 
    2337              :             // Try first with gdb
    2338            0 :             let backtrace = Command::new("gdb")
    2339            0 :                 .args(["--batch", "-q", "-ex", "bt", &self.params.pgbin])
    2340            0 :                 .arg(&core_path)
    2341            0 :                 .output();
    2342              : 
    2343              :             // Try lldb if no gdb is found -- that is handy for local testing on macOS
    2344            0 :             let backtrace = match backtrace {
    2345            0 :                 Err(ref e) if e.kind() == std::io::ErrorKind::NotFound => {
    2346            0 :                     warn!("cannot find gdb, trying lldb");
    2347            0 :                     Command::new("lldb")
    2348            0 :                         .arg("-c")
    2349            0 :                         .arg(&core_path)
    2350            0 :                         .args(["--batch", "-o", "bt all", "-o", "quit"])
    2351            0 :                         .output()
    2352              :                 }
    2353            0 :                 _ => backtrace,
    2354            0 :             }?;
    2355              : 
    2356            0 :             warn!(
    2357            0 :                 "core dump backtrace: {}",
    2358            0 :                 String::from_utf8_lossy(&backtrace.stdout)
    2359              :             );
    2360            0 :             warn!(
    2361            0 :                 "debugger stderr: {}",
    2362            0 :                 String::from_utf8_lossy(&backtrace.stderr)
    2363              :             );
    2364              :         }
    2365              : 
    2366            0 :         Ok(())
    2367            0 :     }
    2368              : 
    2369              :     /// Select `pg_stat_statements` data and return it as a stringified JSON
    2370            0 :     pub async fn collect_insights(&self) -> String {
    2371            0 :         let mut result_rows: Vec<String> = Vec::new();
    2372            0 :         let conf = self.get_tokio_conn_conf(Some("compute_ctl:collect_insights"));
    2373            0 :         let connect_result = conf.connect(NoTls).await;
    2374            0 :         let (client, connection) = connect_result.unwrap();
    2375            0 :         tokio::spawn(async move {
    2376            0 :             if let Err(e) = connection.await {
    2377            0 :                 eprintln!("connection error: {e}");
    2378            0 :             }
    2379            0 :         });
    2380            0 :         let result = client
    2381            0 :             .simple_query(
    2382            0 :                 "SELECT
    2383            0 :     pg_catalog.row_to_json(pss)
    2384            0 : FROM
    2385            0 :     public.pg_stat_statements pss
    2386            0 : WHERE
    2387            0 :     pss.userid != 'cloud_admin'::pg_catalog.regrole::pg_catalog.oid
    2388            0 : ORDER BY
    2389            0 :     (pss.mean_exec_time + pss.mean_plan_time) DESC
    2390            0 : LIMIT 100",
    2391            0 :             )
    2392            0 :             .await;
    2393              : 
    2394            0 :         if let Ok(raw_rows) = result {
    2395            0 :             for message in raw_rows.iter() {
    2396            0 :                 if let postgres::SimpleQueryMessage::Row(row) = message {
    2397            0 :                     if let Some(json) = row.get(0) {
    2398            0 :                         result_rows.push(json.to_string());
    2399            0 :                     }
    2400            0 :                 }
    2401              :             }
    2402              : 
    2403            0 :             format!("{{\"pg_stat_statements\": [{}]}}", result_rows.join(","))
    2404              :         } else {
    2405            0 :             "{{\"pg_stat_statements\": []}}".to_string()
    2406              :         }
    2407            0 :     }
    2408              : 
    2409              :     // download an archive, unzip and place files in correct locations
    2410            0 :     pub async fn download_extension(
    2411            0 :         &self,
    2412            0 :         real_ext_name: String,
    2413            0 :         ext_path: RemotePath,
    2414            0 :     ) -> Result<u64, DownloadError> {
    2415            0 :         let remote_ext_base_url =
    2416            0 :             self.params
    2417            0 :                 .remote_ext_base_url
    2418            0 :                 .as_ref()
    2419            0 :                 .ok_or(DownloadError::BadInput(anyhow::anyhow!(
    2420            0 :                     "Remote extensions storage is not configured",
    2421            0 :                 )))?;
    2422              : 
    2423            0 :         let ext_archive_name = ext_path.object_name().expect("bad path");
    2424              : 
    2425            0 :         let mut first_try = false;
    2426            0 :         if !self
    2427            0 :             .ext_download_progress
    2428            0 :             .read()
    2429            0 :             .expect("lock err")
    2430            0 :             .contains_key(ext_archive_name)
    2431            0 :         {
    2432            0 :             self.ext_download_progress
    2433            0 :                 .write()
    2434            0 :                 .expect("lock err")
    2435            0 :                 .insert(ext_archive_name.to_string(), (Utc::now(), false));
    2436            0 :             first_try = true;
    2437            0 :         }
    2438            0 :         let (download_start, download_completed) =
    2439            0 :             self.ext_download_progress.read().expect("lock err")[ext_archive_name];
    2440            0 :         let start_time_delta = Utc::now()
    2441            0 :             .signed_duration_since(download_start)
    2442            0 :             .to_std()
    2443            0 :             .unwrap()
    2444            0 :             .as_millis() as u64;
    2445              : 
    2446              :         // how long to wait for extension download if it was started by another process
    2447              :         const HANG_TIMEOUT: u64 = 3000; // milliseconds
    2448              : 
    2449            0 :         if download_completed {
    2450            0 :             info!("extension already downloaded, skipping re-download");
    2451            0 :             return Ok(0);
    2452            0 :         } else if start_time_delta < HANG_TIMEOUT && !first_try {
    2453            0 :             info!(
    2454            0 :                 "download {ext_archive_name} already started by another process, hanging untill completion or timeout"
    2455              :             );
    2456            0 :             let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(500));
    2457              :             loop {
    2458            0 :                 info!("waiting for download");
    2459            0 :                 interval.tick().await;
    2460            0 :                 let (_, download_completed_now) =
    2461            0 :                     self.ext_download_progress.read().expect("lock")[ext_archive_name];
    2462            0 :                 if download_completed_now {
    2463            0 :                     info!("download finished by whoever else downloaded it");
    2464            0 :                     return Ok(0);
    2465            0 :                 }
    2466              :             }
    2467              :             // NOTE: the above loop will get terminated
    2468              :             // based on the timeout of the download function
    2469            0 :         }
    2470              : 
    2471              :         // if extension hasn't been downloaded before or the previous
    2472              :         // attempt to download was at least HANG_TIMEOUT ms ago
    2473              :         // then we try to download it here
    2474            0 :         info!("downloading new extension {ext_archive_name}");
    2475              : 
    2476            0 :         let download_size = extension_server::download_extension(
    2477            0 :             &real_ext_name,
    2478            0 :             &ext_path,
    2479            0 :             remote_ext_base_url,
    2480            0 :             &self.params.pgbin,
    2481            0 :         )
    2482            0 :         .await
    2483            0 :         .map_err(DownloadError::Other);
    2484              : 
    2485            0 :         if download_size.is_ok() {
    2486            0 :             self.ext_download_progress
    2487            0 :                 .write()
    2488            0 :                 .expect("bad lock")
    2489            0 :                 .insert(ext_archive_name.to_string(), (download_start, true));
    2490            0 :         }
    2491              : 
    2492            0 :         download_size
    2493            0 :     }
    2494              : 
    2495            0 :     pub async fn set_role_grants(
    2496            0 :         &self,
    2497            0 :         db_name: &PgIdent,
    2498            0 :         schema_name: &PgIdent,
    2499            0 :         privileges: &[Privilege],
    2500            0 :         role_name: &PgIdent,
    2501            0 :     ) -> Result<()> {
    2502              :         use tokio_postgres::NoTls;
    2503              : 
    2504            0 :         let mut conf = self.get_tokio_conn_conf(Some("compute_ctl:set_role_grants"));
    2505            0 :         conf.dbname(db_name);
    2506              : 
    2507            0 :         let (db_client, conn) = conf
    2508            0 :             .connect(NoTls)
    2509            0 :             .await
    2510            0 :             .context("Failed to connect to the database")?;
    2511            0 :         tokio::spawn(conn);
    2512              : 
    2513              :         // TODO: support other types of grants apart from schemas?
    2514              : 
    2515              :         // check the role grants first - to gracefully handle read-replicas.
    2516            0 :         let select = "SELECT privilege_type
    2517            0 :             FROM pg_catalog.pg_namespace
    2518            0 :                 JOIN LATERAL (SELECT * FROM aclexplode(nspacl) AS x) AS acl ON true
    2519            0 :                 JOIN pg_catalog.pg_user users ON acl.grantee = users.usesysid
    2520            0 :             WHERE users.usename OPERATOR(pg_catalog.=) $1::pg_catalog.name
    2521            0 :                 AND nspname OPERATOR(pg_catalog.=) $2::pg_catalog.name";
    2522            0 :         let rows = db_client
    2523            0 :             .query(select, &[role_name, schema_name])
    2524            0 :             .await
    2525            0 :             .with_context(|| format!("Failed to execute query: {select}"))?;
    2526              : 
    2527            0 :         let already_granted: HashSet<String> = rows.into_iter().map(|row| row.get(0)).collect();
    2528              : 
    2529            0 :         let grants = privileges
    2530            0 :             .iter()
    2531            0 :             .filter(|p| !already_granted.contains(p.as_str()))
    2532              :             // should not be quoted as it's part of the command.
    2533              :             // is already sanitized so it's ok
    2534            0 :             .map(|p| p.as_str())
    2535            0 :             .join(", ");
    2536              : 
    2537            0 :         if !grants.is_empty() {
    2538              :             // quote the schema and role name as identifiers to sanitize them.
    2539            0 :             let schema_name = schema_name.pg_quote();
    2540            0 :             let role_name = role_name.pg_quote();
    2541              : 
    2542            0 :             let query = format!("GRANT {grants} ON SCHEMA {schema_name} TO {role_name}",);
    2543            0 :             db_client
    2544            0 :                 .simple_query(&query)
    2545            0 :                 .await
    2546            0 :                 .with_context(|| format!("Failed to execute query: {query}"))?;
    2547            0 :         }
    2548              : 
    2549            0 :         Ok(())
    2550            0 :     }
    2551              : 
    2552            0 :     pub async fn install_extension(
    2553            0 :         &self,
    2554            0 :         ext_name: &PgIdent,
    2555            0 :         db_name: &PgIdent,
    2556            0 :         ext_version: ExtVersion,
    2557            0 :     ) -> Result<ExtVersion> {
    2558              :         use tokio_postgres::NoTls;
    2559              : 
    2560            0 :         let mut conf = self.get_tokio_conn_conf(Some("compute_ctl:install_extension"));
    2561            0 :         conf.dbname(db_name);
    2562              : 
    2563            0 :         let (db_client, conn) = conf
    2564            0 :             .connect(NoTls)
    2565            0 :             .await
    2566            0 :             .context("Failed to connect to the database")?;
    2567            0 :         tokio::spawn(conn);
    2568              : 
    2569            0 :         let version_query = "SELECT extversion FROM pg_extension WHERE extname = $1";
    2570            0 :         let version: Option<ExtVersion> = db_client
    2571            0 :             .query_opt(version_query, &[&ext_name])
    2572            0 :             .await
    2573            0 :             .with_context(|| format!("Failed to execute query: {version_query}"))?
    2574            0 :             .map(|row| row.get(0));
    2575              : 
    2576              :         // sanitize the inputs as postgres idents.
    2577            0 :         let ext_name: String = ext_name.pg_quote();
    2578            0 :         let quoted_version: String = ext_version.pg_quote();
    2579              : 
    2580            0 :         if let Some(installed_version) = version {
    2581            0 :             if installed_version == ext_version {
    2582            0 :                 return Ok(installed_version);
    2583            0 :             }
    2584            0 :             let query = format!("ALTER EXTENSION {ext_name} UPDATE TO {quoted_version}");
    2585            0 :             db_client
    2586            0 :                 .simple_query(&query)
    2587            0 :                 .await
    2588            0 :                 .with_context(|| format!("Failed to execute query: {query}"))?;
    2589              :         } else {
    2590            0 :             let query = format!(
    2591            0 :                 "CREATE EXTENSION IF NOT EXISTS {ext_name} WITH SCHEMA public VERSION {quoted_version}"
    2592              :             );
    2593            0 :             db_client
    2594            0 :                 .simple_query(&query)
    2595            0 :                 .await
    2596            0 :                 .with_context(|| format!("Failed to execute query: {query}"))?;
    2597              :         }
    2598              : 
    2599            0 :         Ok(ext_version)
    2600            0 :     }
    2601              : 
    2602            0 :     pub async fn prepare_preload_libraries(
    2603            0 :         &self,
    2604            0 :         spec: &ComputeSpec,
    2605            0 :     ) -> Result<RemoteExtensionMetrics> {
    2606            0 :         if self.params.remote_ext_base_url.is_none() {
    2607            0 :             return Ok(RemoteExtensionMetrics {
    2608            0 :                 num_ext_downloaded: 0,
    2609            0 :                 largest_ext_size: 0,
    2610            0 :                 total_ext_download_size: 0,
    2611            0 :             });
    2612            0 :         }
    2613            0 :         let remote_extensions = spec
    2614            0 :             .remote_extensions
    2615            0 :             .as_ref()
    2616            0 :             .ok_or(anyhow::anyhow!("Remote extensions are not configured"))?;
    2617              : 
    2618            0 :         info!("parse shared_preload_libraries from spec.cluster.settings");
    2619            0 :         let mut libs_vec = Vec::new();
    2620            0 :         if let Some(libs) = spec.cluster.settings.find("shared_preload_libraries") {
    2621            0 :             libs_vec = libs
    2622            0 :                 .split(&[',', '\'', ' '])
    2623            0 :                 .filter(|s| *s != "neon" && *s != "databricks_auth" && !s.is_empty())
    2624            0 :                 .map(str::to_string)
    2625            0 :                 .collect();
    2626            0 :         }
    2627            0 :         info!("parse shared_preload_libraries from provided postgresql.conf");
    2628              : 
    2629              :         // that is used in neon_local and python tests
    2630            0 :         if let Some(conf) = &spec.cluster.postgresql_conf {
    2631            0 :             let conf_lines = conf.split('\n').collect::<Vec<&str>>();
    2632            0 :             let mut shared_preload_libraries_line = "";
    2633            0 :             for line in conf_lines {
    2634            0 :                 if line.starts_with("shared_preload_libraries") {
    2635            0 :                     shared_preload_libraries_line = line;
    2636            0 :                 }
    2637              :             }
    2638            0 :             let mut preload_libs_vec = Vec::new();
    2639            0 :             if let Some(libs) = shared_preload_libraries_line.split("='").nth(1) {
    2640            0 :                 preload_libs_vec = libs
    2641            0 :                     .split(&[',', '\'', ' '])
    2642            0 :                     .filter(|s| *s != "neon" && *s != "databricks_auth" && !s.is_empty())
    2643            0 :                     .map(str::to_string)
    2644            0 :                     .collect();
    2645            0 :             }
    2646            0 :             libs_vec.extend(preload_libs_vec);
    2647            0 :         }
    2648              : 
    2649              :         // Don't try to download libraries that are not in the index.
    2650              :         // Assume that they are already present locally.
    2651            0 :         libs_vec.retain(|lib| remote_extensions.library_index.contains_key(lib));
    2652              : 
    2653            0 :         info!("Downloading to shared preload libraries: {:?}", &libs_vec);
    2654              : 
    2655            0 :         let mut download_tasks = Vec::new();
    2656            0 :         for library in &libs_vec {
    2657            0 :             let (ext_name, ext_path) =
    2658            0 :                 remote_extensions.get_ext(library, true, &BUILD_TAG, &self.params.pgversion)?;
    2659            0 :             download_tasks.push(self.download_extension(ext_name, ext_path));
    2660              :         }
    2661            0 :         let results = join_all(download_tasks).await;
    2662              : 
    2663            0 :         let mut remote_ext_metrics = RemoteExtensionMetrics {
    2664            0 :             num_ext_downloaded: 0,
    2665            0 :             largest_ext_size: 0,
    2666            0 :             total_ext_download_size: 0,
    2667            0 :         };
    2668            0 :         for result in results {
    2669            0 :             let download_size = match result {
    2670            0 :                 Ok(res) => {
    2671            0 :                     remote_ext_metrics.num_ext_downloaded += 1;
    2672            0 :                     res
    2673              :                 }
    2674            0 :                 Err(err) => {
    2675              :                     // if we failed to download an extension, we don't want to fail the whole
    2676              :                     // process, but we do want to log the error
    2677            0 :                     error!("Failed to download extension: {}", err);
    2678            0 :                     0
    2679              :                 }
    2680              :             };
    2681              : 
    2682            0 :             remote_ext_metrics.largest_ext_size =
    2683            0 :                 std::cmp::max(remote_ext_metrics.largest_ext_size, download_size);
    2684            0 :             remote_ext_metrics.total_ext_download_size += download_size;
    2685              :         }
    2686            0 :         Ok(remote_ext_metrics)
    2687            0 :     }
    2688              : 
    2689              :     /// Waits until current thread receives a state changed notification and
    2690              :     /// the pageserver connection strings has changed.
    2691              :     ///
    2692              :     /// The operation will time out after a specified duration.
    2693            0 :     pub fn wait_timeout_while_pageserver_connstr_unchanged(&self, duration: Duration) {
    2694            0 :         let state = self.state.lock().unwrap();
    2695            0 :         let old_pageserver_conninfo = state
    2696            0 :             .pspec
    2697            0 :             .as_ref()
    2698            0 :             .expect("spec must be set")
    2699            0 :             .pageserver_conninfo
    2700            0 :             .clone();
    2701            0 :         let mut unchanged = true;
    2702            0 :         let _ = self
    2703            0 :             .state_changed
    2704            0 :             .wait_timeout_while(state, duration, |s| {
    2705            0 :                 let pageserver_conninfo = &s
    2706            0 :                     .pspec
    2707            0 :                     .as_ref()
    2708            0 :                     .expect("spec must be set")
    2709            0 :                     .pageserver_conninfo;
    2710            0 :                 unchanged = pageserver_conninfo == &old_pageserver_conninfo;
    2711            0 :                 unchanged
    2712            0 :             })
    2713            0 :             .unwrap();
    2714            0 :         if !unchanged {
    2715            0 :             info!("Pageserver config changed");
    2716            0 :         }
    2717            0 :     }
    2718              : 
    2719            0 :     pub fn spawn_extension_stats_task(&self) {
    2720            0 :         self.terminate_extension_stats_task();
    2721              : 
    2722            0 :         let conf = self.tokio_conn_conf.clone();
    2723            0 :         let atomic_interval = self.params.installed_extensions_collection_interval.clone();
    2724            0 :         let mut installed_extensions_collection_interval =
    2725            0 :             2 * atomic_interval.load(std::sync::atomic::Ordering::SeqCst);
    2726            0 :         info!(
    2727            0 :             "[NEON_EXT_SPAWN] Spawning background installed extensions worker with Timeout: {}",
    2728              :             installed_extensions_collection_interval
    2729              :         );
    2730            0 :         let handle = tokio::spawn(async move {
    2731              :             loop {
    2732            0 :                 info!(
    2733            0 :                     "[NEON_EXT_INT_SLEEP]: Interval: {}",
    2734              :                     installed_extensions_collection_interval
    2735              :                 );
    2736              :                 // Sleep at the start of the loop to ensure that two collections don't happen at the same time.
    2737              :                 // The first collection happens during compute startup.
    2738            0 :                 tokio::time::sleep(tokio::time::Duration::from_secs(
    2739            0 :                     installed_extensions_collection_interval,
    2740            0 :                 ))
    2741            0 :                 .await;
    2742            0 :                 let _ = installed_extensions(conf.clone()).await;
    2743              :                 // Acquire a read lock on the compute spec and then update the interval if necessary
    2744            0 :                 installed_extensions_collection_interval = std::cmp::max(
    2745            0 :                     installed_extensions_collection_interval,
    2746            0 :                     2 * atomic_interval.load(std::sync::atomic::Ordering::SeqCst),
    2747            0 :                 );
    2748              :             }
    2749              :         });
    2750              : 
    2751              :         // Store the new task handle
    2752            0 :         *self.extension_stats_task.lock().unwrap() = Some(handle);
    2753            0 :     }
    2754              : 
    2755            0 :     fn terminate_extension_stats_task(&self) {
    2756            0 :         if let Some(h) = self.extension_stats_task.lock().unwrap().take() {
    2757            0 :             h.abort()
    2758            0 :         }
    2759            0 :     }
    2760              : 
    2761            0 :     pub fn spawn_lfc_offload_task(self: &Arc<Self>, interval: Duration) {
    2762            0 :         self.terminate_lfc_offload_task();
    2763            0 :         let secs = interval.as_secs();
    2764            0 :         let this = self.clone();
    2765              : 
    2766            0 :         info!("spawning LFC offload worker with {secs}s interval");
    2767            0 :         let handle = spawn(async move {
    2768            0 :             let mut interval = time::interval(interval);
    2769            0 :             interval.tick().await; // returns immediately
    2770              :             loop {
    2771            0 :                 interval.tick().await;
    2772              : 
    2773            0 :                 let prewarm_state = this.state.lock().unwrap().lfc_prewarm_state.clone();
    2774              :                 // Do not offload LFC state if we are currently prewarming or any issue occurred.
    2775              :                 // If we'd do that, we might override the LFC state in endpoint storage with some
    2776              :                 // incomplete state. Imagine a situation:
    2777              :                 // 1. Endpoint started with `autoprewarm: true`
    2778              :                 // 2. While prewarming is not completed, we upload the new incomplete state
    2779              :                 // 3. Compute gets interrupted and restarts
    2780              :                 // 4. We start again and try to prewarm with the state from 2. instead of the previous complete state
    2781            0 :                 if matches!(
    2782            0 :                     prewarm_state,
    2783              :                     LfcPrewarmState::Completed { .. }
    2784              :                         | LfcPrewarmState::NotPrewarmed
    2785              :                         | LfcPrewarmState::Skipped
    2786              :                 ) {
    2787            0 :                     this.offload_lfc_async().await;
    2788            0 :                 }
    2789              :             }
    2790              :         });
    2791            0 :         *self.lfc_offload_task.lock().unwrap() = Some(handle);
    2792            0 :     }
    2793              : 
    2794            0 :     fn terminate_lfc_offload_task(&self) {
    2795            0 :         if let Some(h) = self.lfc_offload_task.lock().unwrap().take() {
    2796            0 :             h.abort()
    2797            0 :         }
    2798            0 :     }
    2799              : 
    2800            0 :     fn update_installed_extensions_collection_interval(&self, spec: &ComputeSpec) {
    2801              :         // Update the interval for collecting installed extensions statistics
    2802              :         // If the value is -1, we never suspend so set the value to default collection.
    2803              :         // If the value is 0, it means default, we will just continue to use the default.
    2804            0 :         if spec.suspend_timeout_seconds == -1 || spec.suspend_timeout_seconds == 0 {
    2805            0 :             self.params.installed_extensions_collection_interval.store(
    2806            0 :                 DEFAULT_INSTALLED_EXTENSIONS_COLLECTION_INTERVAL,
    2807            0 :                 std::sync::atomic::Ordering::SeqCst,
    2808            0 :             );
    2809            0 :         } else {
    2810            0 :             self.params.installed_extensions_collection_interval.store(
    2811            0 :                 spec.suspend_timeout_seconds as u64,
    2812            0 :                 std::sync::atomic::Ordering::SeqCst,
    2813            0 :             );
    2814            0 :         }
    2815            0 :     }
    2816              : 
    2817              :     /// Set the compute spec and update related metrics.
    2818              :     /// This is the central place where pspec is updated.
    2819            0 :     pub fn set_spec(params: &ComputeNodeParams, state: &mut ComputeState, pspec: ParsedSpec) {
    2820            0 :         state.pspec = Some(pspec);
    2821            0 :         ComputeNode::update_attached_metric(params, state);
    2822            0 :         let _ = logger::update_ids(&params.instance_id, &Some(params.compute_id.clone()));
    2823            0 :     }
    2824              : 
    2825            0 :     pub fn update_attached_metric(params: &ComputeNodeParams, state: &mut ComputeState) {
    2826              :         // Update the pg_cctl_attached gauge when all identifiers are available.
    2827            0 :         if let Some(instance_id) = &params.instance_id {
    2828            0 :             if let Some(pspec) = &state.pspec {
    2829            0 :                 // Clear all values in the metric
    2830            0 :                 COMPUTE_ATTACHED.reset();
    2831            0 : 
    2832            0 :                 // Set new metric value
    2833            0 :                 COMPUTE_ATTACHED
    2834            0 :                     .with_label_values(&[
    2835            0 :                         &params.compute_id,
    2836            0 :                         instance_id,
    2837            0 :                         &pspec.tenant_id.to_string(),
    2838            0 :                         &pspec.timeline_id.to_string(),
    2839            0 :                     ])
    2840            0 :                     .set(1);
    2841            0 :             }
    2842            0 :         }
    2843            0 :     }
    2844              : }
    2845              : 
    2846            0 : pub async fn installed_extensions(conf: tokio_postgres::Config) -> Result<()> {
    2847            0 :     let res = get_installed_extensions(conf).await;
    2848            0 :     match res {
    2849            0 :         Ok(extensions) => {
    2850            0 :             info!(
    2851            0 :                 "[NEON_EXT_STAT] {}",
    2852            0 :                 serde_json::to_string(&extensions).expect("failed to serialize extensions list")
    2853              :             );
    2854              :         }
    2855            0 :         Err(err) => error!("could not get installed extensions: {err}"),
    2856              :     }
    2857            0 :     Ok(())
    2858            0 : }
    2859              : 
    2860            0 : pub fn forward_termination_signal(dev_mode: bool) {
    2861            0 :     let ss_pid = SYNC_SAFEKEEPERS_PID.load(Ordering::SeqCst);
    2862            0 :     if ss_pid != 0 {
    2863            0 :         let ss_pid = nix::unistd::Pid::from_raw(ss_pid as i32);
    2864            0 :         kill(ss_pid, Signal::SIGTERM).ok();
    2865            0 :     }
    2866              : 
    2867            0 :     if !dev_mode {
    2868              :         //  Terminate pgbouncer with SIGKILL
    2869            0 :         match pid_file::read(PGBOUNCER_PIDFILE.into()) {
    2870            0 :             Ok(pid_file::PidFileRead::LockedByOtherProcess(pid)) => {
    2871            0 :                 info!("sending SIGKILL to pgbouncer process pid: {}", pid);
    2872            0 :                 if let Err(e) = kill(pid, Signal::SIGKILL) {
    2873            0 :                     error!("failed to terminate pgbouncer: {}", e);
    2874            0 :                 }
    2875              :             }
    2876              :             // pgbouncer does not lock the pid file, so we read and kill the process directly
    2877              :             Ok(pid_file::PidFileRead::NotHeldByAnyProcess(_)) => {
    2878            0 :                 if let Ok(pid_str) = std::fs::read_to_string(PGBOUNCER_PIDFILE) {
    2879            0 :                     if let Ok(pid) = pid_str.trim().parse::<i32>() {
    2880            0 :                         info!(
    2881            0 :                             "sending SIGKILL to pgbouncer process pid: {} (from unlocked pid file)",
    2882              :                             pid
    2883              :                         );
    2884            0 :                         if let Err(e) = kill(Pid::from_raw(pid), Signal::SIGKILL) {
    2885            0 :                             error!("failed to terminate pgbouncer: {}", e);
    2886            0 :                         }
    2887            0 :                     }
    2888              :                 } else {
    2889            0 :                     info!("pgbouncer pid file exists but process not running");
    2890              :                 }
    2891              :             }
    2892              :             Ok(pid_file::PidFileRead::NotExist) => {
    2893            0 :                 info!("pgbouncer pid file not found, process may not be running");
    2894              :             }
    2895            0 :             Err(e) => {
    2896            0 :                 error!("error reading pgbouncer pid file: {}", e);
    2897              :             }
    2898              :         }
    2899              : 
    2900              :         // Terminate local_proxy
    2901            0 :         match pid_file::read("/etc/local_proxy/pid".into()) {
    2902            0 :             Ok(pid_file::PidFileRead::LockedByOtherProcess(pid)) => {
    2903            0 :                 info!("sending SIGTERM to local_proxy process pid: {}", pid);
    2904            0 :                 if let Err(e) = kill(pid, Signal::SIGTERM) {
    2905            0 :                     error!("failed to terminate local_proxy: {}", e);
    2906            0 :                 }
    2907              :             }
    2908              :             Ok(pid_file::PidFileRead::NotHeldByAnyProcess(_)) => {
    2909            0 :                 info!("local_proxy PID file exists but process not running");
    2910              :             }
    2911              :             Ok(pid_file::PidFileRead::NotExist) => {
    2912            0 :                 info!("local_proxy PID file not found, process may not be running");
    2913              :             }
    2914            0 :             Err(e) => {
    2915            0 :                 error!("error reading local_proxy PID file: {}", e);
    2916              :             }
    2917              :         }
    2918              :     } else {
    2919            0 :         info!("Skipping pgbouncer and local_proxy termination because in dev mode");
    2920              :     }
    2921              : 
    2922            0 :     let pg_pid = PG_PID.load(Ordering::SeqCst);
    2923            0 :     if pg_pid != 0 {
    2924            0 :         let pg_pid = nix::unistd::Pid::from_raw(pg_pid as i32);
    2925            0 :         // Use 'fast' shutdown (SIGINT) because it also creates a shutdown checkpoint, which is important for
    2926            0 :         // ROs to get a list of running xacts faster instead of going through the CLOG.
    2927            0 :         // See https://www.postgresql.org/docs/current/server-shutdown.html for the list of modes and signals.
    2928            0 :         kill(pg_pid, Signal::SIGINT).ok();
    2929            0 :     }
    2930            0 : }
    2931              : 
    2932              : // helper trait to call JoinSet::spawn_blocking(f), but propagates the current
    2933              : // tracing span to the thread.
    2934              : trait JoinSetExt<T> {
    2935              :     fn spawn_blocking_child<F>(&mut self, f: F) -> tokio::task::AbortHandle
    2936              :     where
    2937              :         F: FnOnce() -> T + Send + 'static,
    2938              :         T: Send;
    2939              : }
    2940              : 
    2941              : impl<T: 'static> JoinSetExt<T> for tokio::task::JoinSet<T> {
    2942            0 :     fn spawn_blocking_child<F>(&mut self, f: F) -> tokio::task::AbortHandle
    2943            0 :     where
    2944            0 :         F: FnOnce() -> T + Send + 'static,
    2945            0 :         T: Send,
    2946              :     {
    2947            0 :         let sp = tracing::Span::current();
    2948            0 :         self.spawn_blocking(move || {
    2949            0 :             let _e = sp.enter();
    2950            0 :             f()
    2951            0 :         })
    2952            0 :     }
    2953              : }
    2954              : 
    2955              : #[cfg(test)]
    2956              : mod tests {
    2957              :     use std::fs::File;
    2958              : 
    2959              :     use super::*;
    2960              : 
    2961              :     #[test]
    2962            1 :     fn duplicate_safekeeper_connstring() {
    2963            1 :         let file = File::open("tests/cluster_spec.json").unwrap();
    2964            1 :         let spec: ComputeSpec = serde_json::from_reader(file).unwrap();
    2965              : 
    2966            1 :         match ParsedSpec::try_from(spec.clone()) {
    2967            0 :             Ok(_p) => panic!("Failed to detect duplicate entry"),
    2968            1 :             Err(e) => assert!(
    2969            1 :                 e.to_string()
    2970            1 :                     .starts_with("duplicate entry in safekeeper_connstrings:")
    2971              :             ),
    2972              :         };
    2973            1 :     }
    2974              : }
        

Generated by: LCOV version 2.1-beta