LCOV - code coverage report
Current view: top level - compute_tools/src - compute.rs (source / functions) Coverage Total Hit
Test: 07bee600374ccd486c69370d0972d9035964fe68.info Lines: 0.0 % 1068 0
Test Date: 2025-02-20 13:11:02 Functions: 0.0 % 94 0

            Line data    Source code
       1              : use std::collections::{HashMap, HashSet};
       2              : use std::env;
       3              : use std::fs;
       4              : use std::iter::once;
       5              : use std::os::unix::fs::{symlink, PermissionsExt};
       6              : use std::path::Path;
       7              : use std::process::{Command, Stdio};
       8              : use std::str::FromStr;
       9              : use std::sync::atomic::AtomicU32;
      10              : use std::sync::atomic::Ordering;
      11              : use std::sync::{Arc, Condvar, Mutex, RwLock};
      12              : use std::time::Duration;
      13              : use std::time::Instant;
      14              : 
      15              : use anyhow::{Context, Result};
      16              : use chrono::{DateTime, Utc};
      17              : use compute_api::spec::{Database, PgIdent, Role};
      18              : use futures::future::join_all;
      19              : use futures::stream::FuturesUnordered;
      20              : use futures::StreamExt;
      21              : use nix::unistd::Pid;
      22              : use postgres;
      23              : use postgres::error::SqlState;
      24              : use postgres::NoTls;
      25              : use tracing::{debug, error, info, instrument, warn};
      26              : use utils::id::{TenantId, TimelineId};
      27              : use utils::lsn::Lsn;
      28              : 
      29              : use compute_api::privilege::Privilege;
      30              : use compute_api::responses::{ComputeMetrics, ComputeStatus};
      31              : use compute_api::spec::{ComputeFeature, ComputeMode, ComputeSpec, ExtVersion};
      32              : use utils::measured_stream::MeasuredReader;
      33              : 
      34              : use nix::sys::signal::{kill, Signal};
      35              : use remote_storage::{DownloadError, RemotePath};
      36              : use tokio::spawn;
      37              : 
      38              : use crate::installed_extensions::get_installed_extensions;
      39              : use crate::local_proxy;
      40              : use crate::pg_helpers::*;
      41              : use crate::spec::*;
      42              : use crate::spec_apply::ApplySpecPhase::{
      43              :     CreateAndAlterDatabases, CreateAndAlterRoles, CreateAvailabilityCheck, CreateSchemaNeon,
      44              :     CreateSuperUser, DropInvalidDatabases, DropRoles, FinalizeDropLogicalSubscriptions,
      45              :     HandleNeonExtension, HandleOtherExtensions, RenameAndDeleteDatabases, RenameRoles,
      46              :     RunInEachDatabase,
      47              : };
      48              : use crate::spec_apply::PerDatabasePhase;
      49              : use crate::spec_apply::PerDatabasePhase::{
      50              :     ChangeSchemaPerms, DeleteDBRoleReferences, DropLogicalSubscriptions, HandleAnonExtension,
      51              : };
      52              : use crate::spec_apply::{apply_operations, MutableApplyContext, DB};
      53              : use crate::sync_sk::{check_if_synced, ping_safekeeper};
      54              : use crate::{config, extension_server};
      55              : 
      56              : pub static SYNC_SAFEKEEPERS_PID: AtomicU32 = AtomicU32::new(0);
      57              : pub static PG_PID: AtomicU32 = AtomicU32::new(0);
      58              : 
      59              : /// Compute node info shared across several `compute_ctl` threads.
      60              : pub struct ComputeNode {
      61              :     /// The ID of the compute
      62              :     pub compute_id: String,
      63              :     // Url type maintains proper escaping
      64              :     pub connstr: url::Url,
      65              :     // We connect to Postgres from many different places, so build configs once
      66              :     // and reuse them where needed.
      67              :     pub conn_conf: postgres::config::Config,
      68              :     pub tokio_conn_conf: tokio_postgres::config::Config,
      69              :     pub pgdata: String,
      70              :     pub pgbin: String,
      71              :     pub pgversion: String,
      72              :     /// We should only allow live re- / configuration of the compute node if
      73              :     /// it uses 'pull model', i.e. it can go to control-plane and fetch
      74              :     /// the latest configuration. Otherwise, there could be a case:
      75              :     /// - we start compute with some spec provided as argument
      76              :     /// - we push new spec and it does reconfiguration
      77              :     /// - but then something happens and compute pod / VM is destroyed,
      78              :     ///   so k8s controller starts it again with the **old** spec
      79              :     ///
      80              :     /// and the same for empty computes:
      81              :     /// - we started compute without any spec
      82              :     /// - we push spec and it does configuration
      83              :     /// - but then it is restarted without any spec again
      84              :     pub live_config_allowed: bool,
      85              :     /// The port that the compute's external HTTP server listens on
      86              :     pub external_http_port: u16,
      87              :     /// The port that the compute's internal HTTP server listens on
      88              :     pub internal_http_port: u16,
      89              :     /// Volatile part of the `ComputeNode`, which should be used under `Mutex`.
      90              :     /// To allow HTTP API server to serving status requests, while configuration
      91              :     /// is in progress, lock should be held only for short periods of time to do
      92              :     /// read/write, not the whole configuration process.
      93              :     pub state: Mutex<ComputeState>,
      94              :     /// `Condvar` to allow notifying waiters about state changes.
      95              :     pub state_changed: Condvar,
      96              :     /// the address of extension storage proxy gateway
      97              :     pub ext_remote_storage: Option<String>,
      98              :     // key: ext_archive_name, value: started download time, download_completed?
      99              :     pub ext_download_progress: RwLock<HashMap<String, (DateTime<Utc>, bool)>>,
     100              :     pub build_tag: String,
     101              : }
     102              : 
     103              : // store some metrics about download size that might impact startup time
     104              : #[derive(Clone, Debug)]
     105              : pub struct RemoteExtensionMetrics {
     106              :     num_ext_downloaded: u64,
     107              :     largest_ext_size: u64,
     108              :     total_ext_download_size: u64,
     109              : }
     110              : 
     111              : #[derive(Clone, Debug)]
     112              : pub struct ComputeState {
     113              :     pub start_time: DateTime<Utc>,
     114              :     pub status: ComputeStatus,
     115              :     /// Timestamp of the last Postgres activity. It could be `None` if
     116              :     /// compute wasn't used since start.
     117              :     pub last_active: Option<DateTime<Utc>>,
     118              :     pub error: Option<String>,
     119              :     pub pspec: Option<ParsedSpec>,
     120              :     pub metrics: ComputeMetrics,
     121              : }
     122              : 
     123              : impl ComputeState {
     124            0 :     pub fn new() -> Self {
     125            0 :         Self {
     126            0 :             start_time: Utc::now(),
     127            0 :             status: ComputeStatus::Empty,
     128            0 :             last_active: None,
     129            0 :             error: None,
     130            0 :             pspec: None,
     131            0 :             metrics: ComputeMetrics::default(),
     132            0 :         }
     133            0 :     }
     134              : 
     135            0 :     pub fn set_status(&mut self, status: ComputeStatus, state_changed: &Condvar) {
     136            0 :         let prev = self.status;
     137            0 :         info!("Changing compute status from {} to {}", prev, status);
     138            0 :         self.status = status;
     139            0 :         state_changed.notify_all();
     140            0 :     }
     141              : 
     142            0 :     pub fn set_failed_status(&mut self, err: anyhow::Error, state_changed: &Condvar) {
     143            0 :         self.error = Some(format!("{err:?}"));
     144            0 :         self.set_status(ComputeStatus::Failed, state_changed);
     145            0 :     }
     146              : }
     147              : 
     148              : impl Default for ComputeState {
     149            0 :     fn default() -> Self {
     150            0 :         Self::new()
     151            0 :     }
     152              : }
     153              : 
     154              : #[derive(Clone, Debug)]
     155              : pub struct ParsedSpec {
     156              :     pub spec: ComputeSpec,
     157              :     pub tenant_id: TenantId,
     158              :     pub timeline_id: TimelineId,
     159              :     pub pageserver_connstr: String,
     160              :     pub safekeeper_connstrings: Vec<String>,
     161              :     pub storage_auth_token: Option<String>,
     162              : }
     163              : 
     164              : impl TryFrom<ComputeSpec> for ParsedSpec {
     165              :     type Error = String;
     166            0 :     fn try_from(spec: ComputeSpec) -> Result<Self, String> {
     167              :         // Extract the options from the spec file that are needed to connect to
     168              :         // the storage system.
     169              :         //
     170              :         // For backwards-compatibility, the top-level fields in the spec file
     171              :         // may be empty. In that case, we need to dig them from the GUCs in the
     172              :         // cluster.settings field.
     173            0 :         let pageserver_connstr = spec
     174            0 :             .pageserver_connstring
     175            0 :             .clone()
     176            0 :             .or_else(|| spec.cluster.settings.find("neon.pageserver_connstring"))
     177            0 :             .ok_or("pageserver connstr should be provided")?;
     178            0 :         let safekeeper_connstrings = if spec.safekeeper_connstrings.is_empty() {
     179            0 :             if matches!(spec.mode, ComputeMode::Primary) {
     180            0 :                 spec.cluster
     181            0 :                     .settings
     182            0 :                     .find("neon.safekeepers")
     183            0 :                     .ok_or("safekeeper connstrings should be provided")?
     184            0 :                     .split(',')
     185            0 :                     .map(|str| str.to_string())
     186            0 :                     .collect()
     187              :             } else {
     188            0 :                 vec![]
     189              :             }
     190              :         } else {
     191            0 :             spec.safekeeper_connstrings.clone()
     192              :         };
     193            0 :         let storage_auth_token = spec.storage_auth_token.clone();
     194            0 :         let tenant_id: TenantId = if let Some(tenant_id) = spec.tenant_id {
     195            0 :             tenant_id
     196              :         } else {
     197            0 :             spec.cluster
     198            0 :                 .settings
     199            0 :                 .find("neon.tenant_id")
     200            0 :                 .ok_or("tenant id should be provided")
     201            0 :                 .map(|s| TenantId::from_str(&s))?
     202            0 :                 .or(Err("invalid tenant id"))?
     203              :         };
     204            0 :         let timeline_id: TimelineId = if let Some(timeline_id) = spec.timeline_id {
     205            0 :             timeline_id
     206              :         } else {
     207            0 :             spec.cluster
     208            0 :                 .settings
     209            0 :                 .find("neon.timeline_id")
     210            0 :                 .ok_or("timeline id should be provided")
     211            0 :                 .map(|s| TimelineId::from_str(&s))?
     212            0 :                 .or(Err("invalid timeline id"))?
     213              :         };
     214              : 
     215            0 :         Ok(ParsedSpec {
     216            0 :             spec,
     217            0 :             pageserver_connstr,
     218            0 :             safekeeper_connstrings,
     219            0 :             storage_auth_token,
     220            0 :             tenant_id,
     221            0 :             timeline_id,
     222            0 :         })
     223            0 :     }
     224              : }
     225              : 
     226              : /// If we are a VM, returns a [`Command`] that will run in the `neon-postgres`
     227              : /// cgroup. Otherwise returns the default `Command::new(cmd)`
     228              : ///
     229              : /// This function should be used to start postgres, as it will start it in the
     230              : /// neon-postgres cgroup if we are a VM. This allows autoscaling to control
     231              : /// postgres' resource usage. The cgroup will exist in VMs because vm-builder
     232              : /// creates it during the sysinit phase of its inittab.
     233            0 : fn maybe_cgexec(cmd: &str) -> Command {
     234            0 :     // The cplane sets this env var for autoscaling computes.
     235            0 :     // use `var_os` so we don't have to worry about the variable being valid
     236            0 :     // unicode. Should never be an concern . . . but just in case
     237            0 :     if env::var_os("AUTOSCALING").is_some() {
     238            0 :         let mut command = Command::new("cgexec");
     239            0 :         command.args(["-g", "memory:neon-postgres"]);
     240            0 :         command.arg(cmd);
     241            0 :         command
     242              :     } else {
     243            0 :         Command::new(cmd)
     244              :     }
     245            0 : }
     246              : 
     247            0 : pub(crate) fn construct_superuser_query(spec: &ComputeSpec) -> String {
     248            0 :     let roles = spec
     249            0 :         .cluster
     250            0 :         .roles
     251            0 :         .iter()
     252            0 :         .map(|r| escape_literal(&r.name))
     253            0 :         .collect::<Vec<_>>();
     254            0 : 
     255            0 :     let dbs = spec
     256            0 :         .cluster
     257            0 :         .databases
     258            0 :         .iter()
     259            0 :         .map(|db| escape_literal(&db.name))
     260            0 :         .collect::<Vec<_>>();
     261              : 
     262            0 :     let roles_decl = if roles.is_empty() {
     263            0 :         String::from("roles text[] := NULL;")
     264              :     } else {
     265            0 :         format!(
     266            0 :             r#"
     267            0 :                roles text[] := ARRAY(SELECT rolname
     268            0 :                                      FROM pg_catalog.pg_roles
     269            0 :                                      WHERE rolname IN ({}));"#,
     270            0 :             roles.join(", ")
     271            0 :         )
     272              :     };
     273              : 
     274            0 :     let database_decl = if dbs.is_empty() {
     275            0 :         String::from("dbs text[] := NULL;")
     276              :     } else {
     277            0 :         format!(
     278            0 :             r#"
     279            0 :                dbs text[] := ARRAY(SELECT datname
     280            0 :                                    FROM pg_catalog.pg_database
     281            0 :                                    WHERE datname IN ({}));"#,
     282            0 :             dbs.join(", ")
     283            0 :         )
     284              :     };
     285              : 
     286              :     // ALL PRIVILEGES grants CREATE, CONNECT, and TEMPORARY on all databases
     287              :     // (see https://www.postgresql.org/docs/current/ddl-priv.html)
     288            0 :     let query = format!(
     289            0 :         r#"
     290            0 :             DO $$
     291            0 :                 DECLARE
     292            0 :                     r text;
     293            0 :                     {}
     294            0 :                     {}
     295            0 :                 BEGIN
     296            0 :                     IF NOT EXISTS (
     297            0 :                         SELECT FROM pg_catalog.pg_roles WHERE rolname = 'neon_superuser')
     298            0 :                     THEN
     299            0 :                         CREATE ROLE neon_superuser CREATEDB CREATEROLE NOLOGIN REPLICATION BYPASSRLS IN ROLE pg_read_all_data, pg_write_all_data;
     300            0 :                         IF array_length(roles, 1) IS NOT NULL THEN
     301            0 :                             EXECUTE format('GRANT neon_superuser TO %s',
     302            0 :                                            array_to_string(ARRAY(SELECT quote_ident(x) FROM unnest(roles) as x), ', '));
     303            0 :                             FOREACH r IN ARRAY roles LOOP
     304            0 :                                 EXECUTE format('ALTER ROLE %s CREATEROLE CREATEDB', quote_ident(r));
     305            0 :                             END LOOP;
     306            0 :                         END IF;
     307            0 :                         IF array_length(dbs, 1) IS NOT NULL THEN
     308            0 :                             EXECUTE format('GRANT ALL PRIVILEGES ON DATABASE %s TO neon_superuser',
     309            0 :                                            array_to_string(ARRAY(SELECT quote_ident(x) FROM unnest(dbs) as x), ', '));
     310            0 :                         END IF;
     311            0 :                     END IF;
     312            0 :                 END
     313            0 :             $$;"#,
     314            0 :         roles_decl, database_decl,
     315            0 :     );
     316            0 : 
     317            0 :     query
     318            0 : }
     319              : 
     320              : impl ComputeNode {
     321              :     /// Check that compute node has corresponding feature enabled.
     322            0 :     pub fn has_feature(&self, feature: ComputeFeature) -> bool {
     323            0 :         let state = self.state.lock().unwrap();
     324              : 
     325            0 :         if let Some(s) = state.pspec.as_ref() {
     326            0 :             s.spec.features.contains(&feature)
     327              :         } else {
     328            0 :             false
     329              :         }
     330            0 :     }
     331              : 
     332            0 :     pub fn set_status(&self, status: ComputeStatus) {
     333            0 :         let mut state = self.state.lock().unwrap();
     334            0 :         state.set_status(status, &self.state_changed);
     335            0 :     }
     336              : 
     337            0 :     pub fn set_failed_status(&self, err: anyhow::Error) {
     338            0 :         let mut state = self.state.lock().unwrap();
     339            0 :         state.set_failed_status(err, &self.state_changed);
     340            0 :     }
     341              : 
     342            0 :     pub fn get_status(&self) -> ComputeStatus {
     343            0 :         self.state.lock().unwrap().status
     344            0 :     }
     345              : 
     346            0 :     pub fn get_timeline_id(&self) -> Option<TimelineId> {
     347            0 :         self.state
     348            0 :             .lock()
     349            0 :             .unwrap()
     350            0 :             .pspec
     351            0 :             .as_ref()
     352            0 :             .map(|s| s.timeline_id)
     353            0 :     }
     354              : 
     355              :     // Remove `pgdata` directory and create it again with right permissions.
     356            0 :     fn create_pgdata(&self) -> Result<()> {
     357            0 :         // Ignore removal error, likely it is a 'No such file or directory (os error 2)'.
     358            0 :         // If it is something different then create_dir() will error out anyway.
     359            0 :         let _ok = fs::remove_dir_all(&self.pgdata);
     360            0 :         fs::create_dir(&self.pgdata)?;
     361            0 :         fs::set_permissions(&self.pgdata, fs::Permissions::from_mode(0o700))?;
     362              : 
     363            0 :         Ok(())
     364            0 :     }
     365              : 
     366              :     // Get basebackup from the libpq connection to pageserver using `connstr` and
     367              :     // unarchive it to `pgdata` directory overriding all its previous content.
     368              :     #[instrument(skip_all, fields(%lsn))]
     369              :     fn try_get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> {
     370              :         let spec = compute_state.pspec.as_ref().expect("spec must be set");
     371              :         let start_time = Instant::now();
     372              : 
     373              :         let shard0_connstr = spec.pageserver_connstr.split(',').next().unwrap();
     374              :         let mut config = postgres::Config::from_str(shard0_connstr)?;
     375              : 
     376              :         // Use the storage auth token from the config file, if given.
     377              :         // Note: this overrides any password set in the connection string.
     378              :         if let Some(storage_auth_token) = &spec.storage_auth_token {
     379              :             info!("Got storage auth token from spec file");
     380              :             config.password(storage_auth_token);
     381              :         } else {
     382              :             info!("Storage auth token not set");
     383              :         }
     384              : 
     385              :         // Connect to pageserver
     386              :         let mut client = config.connect(NoTls)?;
     387              :         let pageserver_connect_micros = start_time.elapsed().as_micros() as u64;
     388              : 
     389              :         let basebackup_cmd = match lsn {
     390              :             Lsn(0) => {
     391              :                 if spec.spec.mode != ComputeMode::Primary {
     392              :                     format!(
     393              :                         "basebackup {} {} --gzip --replica",
     394              :                         spec.tenant_id, spec.timeline_id
     395              :                     )
     396              :                 } else {
     397              :                     format!("basebackup {} {} --gzip", spec.tenant_id, spec.timeline_id)
     398              :                 }
     399              :             }
     400              :             _ => {
     401              :                 if spec.spec.mode != ComputeMode::Primary {
     402              :                     format!(
     403              :                         "basebackup {} {} {} --gzip --replica",
     404              :                         spec.tenant_id, spec.timeline_id, lsn
     405              :                     )
     406              :                 } else {
     407              :                     format!(
     408              :                         "basebackup {} {} {} --gzip",
     409              :                         spec.tenant_id, spec.timeline_id, lsn
     410              :                     )
     411              :                 }
     412              :             }
     413              :         };
     414              : 
     415              :         let copyreader = client.copy_out(basebackup_cmd.as_str())?;
     416              :         let mut measured_reader = MeasuredReader::new(copyreader);
     417              :         let mut bufreader = std::io::BufReader::new(&mut measured_reader);
     418              : 
     419              :         // Read the archive directly from the `CopyOutReader`
     420              :         //
     421              :         // Set `ignore_zeros` so that unpack() reads all the Copy data and
     422              :         // doesn't stop at the end-of-archive marker. Otherwise, if the server
     423              :         // sends an Error after finishing the tarball, we will not notice it.
     424              :         let mut ar = tar::Archive::new(flate2::read::GzDecoder::new(&mut bufreader));
     425              :         ar.set_ignore_zeros(true);
     426              :         ar.unpack(&self.pgdata)?;
     427              : 
     428              :         // Report metrics
     429              :         let mut state = self.state.lock().unwrap();
     430              :         state.metrics.pageserver_connect_micros = pageserver_connect_micros;
     431              :         state.metrics.basebackup_bytes = measured_reader.get_byte_count() as u64;
     432              :         state.metrics.basebackup_ms = start_time.elapsed().as_millis() as u64;
     433              :         Ok(())
     434              :     }
     435              : 
     436              :     // Gets the basebackup in a retry loop
     437              :     #[instrument(skip_all, fields(%lsn))]
     438              :     pub fn get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> {
     439              :         let mut retry_period_ms = 500.0;
     440              :         let mut attempts = 0;
     441              :         const DEFAULT_ATTEMPTS: u16 = 10;
     442              :         #[cfg(feature = "testing")]
     443              :         let max_attempts = if let Ok(v) = env::var("NEON_COMPUTE_TESTING_BASEBACKUP_RETRIES") {
     444              :             u16::from_str(&v).unwrap()
     445              :         } else {
     446              :             DEFAULT_ATTEMPTS
     447              :         };
     448              :         #[cfg(not(feature = "testing"))]
     449              :         let max_attempts = DEFAULT_ATTEMPTS;
     450              :         loop {
     451              :             let result = self.try_get_basebackup(compute_state, lsn);
     452              :             match result {
     453              :                 Ok(_) => {
     454              :                     return result;
     455              :                 }
     456              :                 Err(ref e) if attempts < max_attempts => {
     457              :                     warn!(
     458              :                         "Failed to get basebackup: {} (attempt {}/{})",
     459              :                         e, attempts, max_attempts
     460              :                     );
     461              :                     std::thread::sleep(std::time::Duration::from_millis(retry_period_ms as u64));
     462              :                     retry_period_ms *= 1.5;
     463              :                 }
     464              :                 Err(_) => {
     465              :                     return result;
     466              :                 }
     467              :             }
     468              :             attempts += 1;
     469              :         }
     470              :     }
     471              : 
     472            0 :     pub async fn check_safekeepers_synced_async(
     473            0 :         &self,
     474            0 :         compute_state: &ComputeState,
     475            0 :     ) -> Result<Option<Lsn>> {
     476            0 :         // Construct a connection config for each safekeeper
     477            0 :         let pspec: ParsedSpec = compute_state
     478            0 :             .pspec
     479            0 :             .as_ref()
     480            0 :             .expect("spec must be set")
     481            0 :             .clone();
     482            0 :         let sk_connstrs: Vec<String> = pspec.safekeeper_connstrings.clone();
     483            0 :         let sk_configs = sk_connstrs.into_iter().map(|connstr| {
     484            0 :             // Format connstr
     485            0 :             let id = connstr.clone();
     486            0 :             let connstr = format!("postgresql://no_user@{}", connstr);
     487            0 :             let options = format!(
     488            0 :                 "-c timeline_id={} tenant_id={}",
     489            0 :                 pspec.timeline_id, pspec.tenant_id
     490            0 :             );
     491            0 : 
     492            0 :             // Construct client
     493            0 :             let mut config = tokio_postgres::Config::from_str(&connstr).unwrap();
     494            0 :             config.options(&options);
     495            0 :             if let Some(storage_auth_token) = pspec.storage_auth_token.clone() {
     496            0 :                 config.password(storage_auth_token);
     497            0 :             }
     498              : 
     499            0 :             (id, config)
     500            0 :         });
     501            0 : 
     502            0 :         // Create task set to query all safekeepers
     503            0 :         let mut tasks = FuturesUnordered::new();
     504            0 :         let quorum = sk_configs.len() / 2 + 1;
     505            0 :         for (id, config) in sk_configs {
     506            0 :             let timeout = tokio::time::Duration::from_millis(100);
     507            0 :             let task = tokio::time::timeout(timeout, ping_safekeeper(id, config));
     508            0 :             tasks.push(tokio::spawn(task));
     509            0 :         }
     510              : 
     511              :         // Get a quorum of responses or errors
     512            0 :         let mut responses = Vec::new();
     513            0 :         let mut join_errors = Vec::new();
     514            0 :         let mut task_errors = Vec::new();
     515            0 :         let mut timeout_errors = Vec::new();
     516            0 :         while let Some(response) = tasks.next().await {
     517            0 :             match response {
     518            0 :                 Ok(Ok(Ok(r))) => responses.push(r),
     519            0 :                 Ok(Ok(Err(e))) => task_errors.push(e),
     520            0 :                 Ok(Err(e)) => timeout_errors.push(e),
     521            0 :                 Err(e) => join_errors.push(e),
     522              :             };
     523            0 :             if responses.len() >= quorum {
     524            0 :                 break;
     525            0 :             }
     526            0 :             if join_errors.len() + task_errors.len() + timeout_errors.len() >= quorum {
     527            0 :                 break;
     528            0 :             }
     529              :         }
     530              : 
     531              :         // In case of error, log and fail the check, but don't crash.
     532              :         // We're playing it safe because these errors could be transient
     533              :         // and we don't yet retry. Also being careful here allows us to
     534              :         // be backwards compatible with safekeepers that don't have the
     535              :         // TIMELINE_STATUS API yet.
     536            0 :         if responses.len() < quorum {
     537            0 :             error!(
     538            0 :                 "failed sync safekeepers check {:?} {:?} {:?}",
     539              :                 join_errors, task_errors, timeout_errors
     540              :             );
     541            0 :             return Ok(None);
     542            0 :         }
     543            0 : 
     544            0 :         Ok(check_if_synced(responses))
     545            0 :     }
     546              : 
     547              :     // Fast path for sync_safekeepers. If they're already synced we get the lsn
     548              :     // in one roundtrip. If not, we should do a full sync_safekeepers.
     549            0 :     pub fn check_safekeepers_synced(&self, compute_state: &ComputeState) -> Result<Option<Lsn>> {
     550            0 :         let start_time = Utc::now();
     551            0 : 
     552            0 :         let rt = tokio::runtime::Handle::current();
     553            0 :         let result = rt.block_on(self.check_safekeepers_synced_async(compute_state));
     554            0 : 
     555            0 :         // Record runtime
     556            0 :         self.state.lock().unwrap().metrics.sync_sk_check_ms = Utc::now()
     557            0 :             .signed_duration_since(start_time)
     558            0 :             .to_std()
     559            0 :             .unwrap()
     560            0 :             .as_millis() as u64;
     561            0 :         result
     562            0 :     }
     563              : 
     564              :     // Run `postgres` in a special mode with `--sync-safekeepers` argument
     565              :     // and return the reported LSN back to the caller.
     566              :     #[instrument(skip_all)]
     567              :     pub fn sync_safekeepers(&self, storage_auth_token: Option<String>) -> Result<Lsn> {
     568              :         let start_time = Utc::now();
     569              : 
     570              :         let mut sync_handle = maybe_cgexec(&self.pgbin)
     571              :             .args(["--sync-safekeepers"])
     572              :             .env("PGDATA", &self.pgdata) // we cannot use -D in this mode
     573              :             .envs(if let Some(storage_auth_token) = &storage_auth_token {
     574              :                 vec![("NEON_AUTH_TOKEN", storage_auth_token)]
     575              :             } else {
     576              :                 vec![]
     577              :             })
     578              :             .stdout(Stdio::piped())
     579              :             .stderr(Stdio::piped())
     580              :             .spawn()
     581              :             .expect("postgres --sync-safekeepers failed to start");
     582              :         SYNC_SAFEKEEPERS_PID.store(sync_handle.id(), Ordering::SeqCst);
     583              : 
     584              :         // `postgres --sync-safekeepers` will print all log output to stderr and
     585              :         // final LSN to stdout. So we leave stdout to collect LSN, while stderr logs
     586              :         // will be collected in a child thread.
     587              :         let stderr = sync_handle
     588              :             .stderr
     589              :             .take()
     590              :             .expect("stderr should be captured");
     591              :         let logs_handle = handle_postgres_logs(stderr);
     592              : 
     593              :         let sync_output = sync_handle
     594              :             .wait_with_output()
     595              :             .expect("postgres --sync-safekeepers failed");
     596              :         SYNC_SAFEKEEPERS_PID.store(0, Ordering::SeqCst);
     597              : 
     598              :         // Process has exited, so we can join the logs thread.
     599              :         let _ = tokio::runtime::Handle::current()
     600              :             .block_on(logs_handle)
     601            0 :             .map_err(|e| tracing::error!("log task panicked: {:?}", e));
     602              : 
     603              :         if !sync_output.status.success() {
     604              :             anyhow::bail!(
     605              :                 "postgres --sync-safekeepers exited with non-zero status: {}. stdout: {}",
     606              :                 sync_output.status,
     607              :                 String::from_utf8(sync_output.stdout)
     608              :                     .expect("postgres --sync-safekeepers exited, and stdout is not utf-8"),
     609              :             );
     610              :         }
     611              : 
     612              :         self.state.lock().unwrap().metrics.sync_safekeepers_ms = Utc::now()
     613              :             .signed_duration_since(start_time)
     614              :             .to_std()
     615              :             .unwrap()
     616              :             .as_millis() as u64;
     617              : 
     618              :         let lsn = Lsn::from_str(String::from_utf8(sync_output.stdout)?.trim())?;
     619              : 
     620              :         Ok(lsn)
     621              :     }
     622              : 
     623              :     /// Do all the preparations like PGDATA directory creation, configuration,
     624              :     /// safekeepers sync, basebackup, etc.
     625              :     #[instrument(skip_all)]
     626              :     pub fn prepare_pgdata(&self, compute_state: &ComputeState) -> Result<()> {
     627              :         let pspec = compute_state.pspec.as_ref().expect("spec must be set");
     628              :         let spec = &pspec.spec;
     629              :         let pgdata_path = Path::new(&self.pgdata);
     630              : 
     631              :         // Remove/create an empty pgdata directory and put configuration there.
     632              :         self.create_pgdata()?;
     633              :         config::write_postgres_conf(
     634              :             &pgdata_path.join("postgresql.conf"),
     635              :             &pspec.spec,
     636              :             self.internal_http_port,
     637              :         )?;
     638              : 
     639              :         // Syncing safekeepers is only safe with primary nodes: if a primary
     640              :         // is already connected it will be kicked out, so a secondary (standby)
     641              :         // cannot sync safekeepers.
     642              :         let lsn = match spec.mode {
     643              :             ComputeMode::Primary => {
     644              :                 info!("checking if safekeepers are synced");
     645              :                 let lsn = if let Ok(Some(lsn)) = self.check_safekeepers_synced(compute_state) {
     646              :                     lsn
     647              :                 } else {
     648              :                     info!("starting safekeepers syncing");
     649              :                     self.sync_safekeepers(pspec.storage_auth_token.clone())
     650            0 :                         .with_context(|| "failed to sync safekeepers")?
     651              :                 };
     652              :                 info!("safekeepers synced at LSN {}", lsn);
     653              :                 lsn
     654              :             }
     655              :             ComputeMode::Static(lsn) => {
     656              :                 info!("Starting read-only node at static LSN {}", lsn);
     657              :                 lsn
     658              :             }
     659              :             ComputeMode::Replica => {
     660              :                 info!("Initializing standby from latest Pageserver LSN");
     661              :                 Lsn(0)
     662              :             }
     663              :         };
     664              : 
     665              :         info!(
     666              :             "getting basebackup@{} from pageserver {}",
     667              :             lsn, &pspec.pageserver_connstr
     668              :         );
     669            0 :         self.get_basebackup(compute_state, lsn).with_context(|| {
     670            0 :             format!(
     671            0 :                 "failed to get basebackup@{} from pageserver {}",
     672            0 :                 lsn, &pspec.pageserver_connstr
     673            0 :             )
     674            0 :         })?;
     675              : 
     676              :         // Update pg_hba.conf received with basebackup.
     677              :         update_pg_hba(pgdata_path)?;
     678              : 
     679              :         // Place pg_dynshmem under /dev/shm. This allows us to use
     680              :         // 'dynamic_shared_memory_type = mmap' so that the files are placed in
     681              :         // /dev/shm, similar to how 'dynamic_shared_memory_type = posix' works.
     682              :         //
     683              :         // Why on earth don't we just stick to the 'posix' default, you might
     684              :         // ask.  It turns out that making large allocations with 'posix' doesn't
     685              :         // work very well with autoscaling. The behavior we want is that:
     686              :         //
     687              :         // 1. You can make large DSM allocations, larger than the current RAM
     688              :         //    size of the VM, without errors
     689              :         //
     690              :         // 2. If the allocated memory is really used, the VM is scaled up
     691              :         //    automatically to accommodate that
     692              :         //
     693              :         // We try to make that possible by having swap in the VM. But with the
     694              :         // default 'posix' DSM implementation, we fail step 1, even when there's
     695              :         // plenty of swap available. PostgreSQL uses posix_fallocate() to create
     696              :         // the shmem segment, which is really just a file in /dev/shm in Linux,
     697              :         // but posix_fallocate() on tmpfs returns ENOMEM if the size is larger
     698              :         // than available RAM.
     699              :         //
     700              :         // Using 'dynamic_shared_memory_type = mmap' works around that, because
     701              :         // the Postgres 'mmap' DSM implementation doesn't use
     702              :         // posix_fallocate(). Instead, it uses repeated calls to write(2) to
     703              :         // fill the file with zeros. It's weird that that differs between
     704              :         // 'posix' and 'mmap', but we take advantage of it. When the file is
     705              :         // filled slowly with write(2), the kernel allows it to grow larger, as
     706              :         // long as there's swap available.
     707              :         //
     708              :         // In short, using 'dynamic_shared_memory_type = mmap' allows us one DSM
     709              :         // segment to be larger than currently available RAM. But because we
     710              :         // don't want to store it on a real file, which the kernel would try to
     711              :         // flush to disk, so symlink pg_dynshm to /dev/shm.
     712              :         //
     713              :         // We don't set 'dynamic_shared_memory_type = mmap' here, we let the
     714              :         // control plane control that option. If 'mmap' is not used, this
     715              :         // symlink doesn't affect anything.
     716              :         //
     717              :         // See https://github.com/neondatabase/autoscaling/issues/800
     718              :         std::fs::remove_dir(pgdata_path.join("pg_dynshmem"))?;
     719              :         symlink("/dev/shm/", pgdata_path.join("pg_dynshmem"))?;
     720              : 
     721              :         match spec.mode {
     722              :             ComputeMode::Primary => {}
     723              :             ComputeMode::Replica | ComputeMode::Static(..) => {
     724              :                 add_standby_signal(pgdata_path)?;
     725              :             }
     726              :         }
     727              : 
     728              :         Ok(())
     729              :     }
     730              : 
     731              :     /// Start and stop a postgres process to warm up the VM for startup.
     732            0 :     pub fn prewarm_postgres(&self) -> Result<()> {
     733            0 :         info!("prewarming");
     734              : 
     735              :         // Create pgdata
     736            0 :         let pgdata = &format!("{}.warmup", self.pgdata);
     737            0 :         create_pgdata(pgdata)?;
     738              : 
     739              :         // Run initdb to completion
     740            0 :         info!("running initdb");
     741            0 :         let initdb_bin = Path::new(&self.pgbin).parent().unwrap().join("initdb");
     742            0 :         Command::new(initdb_bin)
     743            0 :             .args(["--pgdata", pgdata])
     744            0 :             .output()
     745            0 :             .expect("cannot start initdb process");
     746              : 
     747              :         // Write conf
     748              :         use std::io::Write;
     749            0 :         let conf_path = Path::new(pgdata).join("postgresql.conf");
     750            0 :         let mut file = std::fs::File::create(conf_path)?;
     751            0 :         writeln!(file, "shared_buffers=65536")?;
     752            0 :         writeln!(file, "port=51055")?; // Nobody should be connecting
     753            0 :         writeln!(file, "shared_preload_libraries = 'neon'")?;
     754              : 
     755              :         // Start postgres
     756            0 :         info!("starting postgres");
     757            0 :         let mut pg = maybe_cgexec(&self.pgbin)
     758            0 :             .args(["-D", pgdata])
     759            0 :             .spawn()
     760            0 :             .expect("cannot start postgres process");
     761            0 : 
     762            0 :         // Stop it when it's ready
     763            0 :         info!("waiting for postgres");
     764            0 :         wait_for_postgres(&mut pg, Path::new(pgdata))?;
     765              :         // SIGQUIT orders postgres to exit immediately. We don't want to SIGKILL
     766              :         // it to avoid orphaned processes prowling around while datadir is
     767              :         // wiped.
     768            0 :         let pm_pid = Pid::from_raw(pg.id() as i32);
     769            0 :         kill(pm_pid, Signal::SIGQUIT)?;
     770            0 :         info!("sent SIGQUIT signal");
     771            0 :         pg.wait()?;
     772            0 :         info!("done prewarming");
     773              : 
     774              :         // clean up
     775            0 :         let _ok = fs::remove_dir_all(pgdata);
     776            0 :         Ok(())
     777            0 :     }
     778              : 
     779              :     /// Start Postgres as a child process and manage DBs/roles.
     780              :     /// After that this will hang waiting on the postmaster process to exit.
     781              :     /// Returns a handle to the child process and a handle to the logs thread.
     782              :     #[instrument(skip_all)]
     783              :     pub fn start_postgres(
     784              :         &self,
     785              :         storage_auth_token: Option<String>,
     786              :     ) -> Result<(std::process::Child, tokio::task::JoinHandle<Result<()>>)> {
     787              :         let pgdata_path = Path::new(&self.pgdata);
     788              : 
     789              :         // Run postgres as a child process.
     790              :         let mut pg = maybe_cgexec(&self.pgbin)
     791              :             .args(["-D", &self.pgdata])
     792              :             .envs(if let Some(storage_auth_token) = &storage_auth_token {
     793              :                 vec![("NEON_AUTH_TOKEN", storage_auth_token)]
     794              :             } else {
     795              :                 vec![]
     796              :             })
     797              :             .stderr(Stdio::piped())
     798              :             .spawn()
     799              :             .expect("cannot start postgres process");
     800              :         PG_PID.store(pg.id(), Ordering::SeqCst);
     801              : 
     802              :         // Start a task to collect logs from stderr.
     803              :         let stderr = pg.stderr.take().expect("stderr should be captured");
     804              :         let logs_handle = handle_postgres_logs(stderr);
     805              : 
     806              :         wait_for_postgres(&mut pg, pgdata_path)?;
     807              : 
     808              :         Ok((pg, logs_handle))
     809              :     }
     810              : 
     811              :     /// Do post configuration of the already started Postgres. This function spawns a background task to
     812              :     /// configure the database after applying the compute spec. Currently, it upgrades the neon extension
     813              :     /// version. In the future, it may upgrade all 3rd-party extensions.
     814              :     #[instrument(skip_all)]
     815              :     pub fn post_apply_config(&self) -> Result<()> {
     816              :         let conf = self.get_tokio_conn_conf(Some("compute_ctl:post_apply_config"));
     817            0 :         tokio::spawn(async move {
     818            0 :             let res = async {
     819            0 :                 let (mut client, connection) = conf.connect(NoTls).await?;
     820            0 :                 tokio::spawn(async move {
     821            0 :                     if let Err(e) = connection.await {
     822            0 :                         eprintln!("connection error: {}", e);
     823            0 :                     }
     824            0 :                 });
     825            0 : 
     826            0 :                 handle_neon_extension_upgrade(&mut client)
     827            0 :                     .await
     828            0 :                     .context("handle_neon_extension_upgrade")?;
     829            0 :                 Ok::<_, anyhow::Error>(())
     830            0 :             }
     831            0 :             .await;
     832            0 :             if let Err(err) = res {
     833            0 :                 error!("error while post_apply_config: {err:#}");
     834            0 :             }
     835            0 :         });
     836              :         Ok(())
     837              :     }
     838              : 
     839            0 :     pub fn get_conn_conf(&self, application_name: Option<&str>) -> postgres::Config {
     840            0 :         let mut conf = self.conn_conf.clone();
     841            0 :         if let Some(application_name) = application_name {
     842            0 :             conf.application_name(application_name);
     843            0 :         }
     844            0 :         conf
     845            0 :     }
     846              : 
     847            0 :     pub fn get_tokio_conn_conf(&self, application_name: Option<&str>) -> tokio_postgres::Config {
     848            0 :         let mut conf = self.tokio_conn_conf.clone();
     849            0 :         if let Some(application_name) = application_name {
     850            0 :             conf.application_name(application_name);
     851            0 :         }
     852            0 :         conf
     853            0 :     }
     854              : 
     855            0 :     pub async fn get_maintenance_client(
     856            0 :         conf: &tokio_postgres::Config,
     857            0 :     ) -> Result<tokio_postgres::Client> {
     858            0 :         let mut conf = conf.clone();
     859            0 :         conf.application_name("compute_ctl:apply_config");
     860              : 
     861            0 :         let (client, conn) = match conf.connect(NoTls).await {
     862              :             // If connection fails, it may be the old node with `zenith_admin` superuser.
     863              :             //
     864              :             // In this case we need to connect with old `zenith_admin` name
     865              :             // and create new user. We cannot simply rename connected user,
     866              :             // but we can create a new one and grant it all privileges.
     867            0 :             Err(e) => match e.code() {
     868              :                 Some(&SqlState::INVALID_PASSWORD)
     869              :                 | Some(&SqlState::INVALID_AUTHORIZATION_SPECIFICATION) => {
     870              :                     // Connect with zenith_admin if cloud_admin could not authenticate
     871            0 :                     info!(
     872            0 :                         "cannot connect to postgres: {}, retrying with `zenith_admin` username",
     873              :                         e
     874              :                     );
     875            0 :                     let mut zenith_admin_conf = postgres::config::Config::from(conf.clone());
     876            0 :                     zenith_admin_conf.application_name("compute_ctl:apply_config");
     877            0 :                     zenith_admin_conf.user("zenith_admin");
     878              : 
     879            0 :                     let mut client =
     880            0 :                         zenith_admin_conf.connect(NoTls)
     881            0 :                             .context("broken cloud_admin credential: tried connecting with cloud_admin but could not authenticate, and zenith_admin does not work either")?;
     882              : 
     883              :                     // Disable forwarding so that users don't get a cloud_admin role
     884            0 :                     let mut func = || {
     885            0 :                         client.simple_query("SET neon.forward_ddl = false")?;
     886            0 :                         client.simple_query("CREATE USER cloud_admin WITH SUPERUSER")?;
     887            0 :                         client.simple_query("GRANT zenith_admin TO cloud_admin")?;
     888            0 :                         Ok::<_, anyhow::Error>(())
     889            0 :                     };
     890            0 :                     func().context("apply_config setup cloud_admin")?;
     891              : 
     892            0 :                     drop(client);
     893            0 : 
     894            0 :                     // Reconnect with connstring with expected name
     895            0 :                     conf.connect(NoTls).await?
     896              :                 }
     897            0 :                 _ => return Err(e.into()),
     898              :             },
     899            0 :             Ok((client, conn)) => (client, conn),
     900              :         };
     901              : 
     902            0 :         spawn(async move {
     903            0 :             if let Err(e) = conn.await {
     904            0 :                 error!("maintenance client connection error: {}", e);
     905            0 :             }
     906            0 :         });
     907            0 : 
     908            0 :         // Disable DDL forwarding because control plane already knows about the roles/databases
     909            0 :         // we're about to modify.
     910            0 :         client
     911            0 :             .simple_query("SET neon.forward_ddl = false")
     912            0 :             .await
     913            0 :             .context("apply_config SET neon.forward_ddl = false")?;
     914              : 
     915            0 :         Ok(client)
     916            0 :     }
     917              : 
     918              :     /// Apply the spec to the running PostgreSQL instance.
     919              :     /// The caller can decide to run with multiple clients in parallel, or
     920              :     /// single mode.  Either way, the commands executed will be the same, and
     921              :     /// only commands run in different databases are parallelized.
     922              :     #[instrument(skip_all)]
     923              :     pub fn apply_spec_sql(
     924              :         &self,
     925              :         spec: Arc<ComputeSpec>,
     926              :         conf: Arc<tokio_postgres::Config>,
     927              :         concurrency: usize,
     928              :     ) -> Result<()> {
     929              :         info!("Applying config with max {} concurrency", concurrency);
     930              :         debug!("Config: {:?}", spec);
     931              : 
     932              :         let rt = tokio::runtime::Handle::current();
     933            0 :         rt.block_on(async {
     934              :             // Proceed with post-startup configuration. Note, that order of operations is important.
     935            0 :             let client = Self::get_maintenance_client(&conf).await?;
     936            0 :             let spec = spec.clone();
     937              : 
     938            0 :             let databases = get_existing_dbs_async(&client).await?;
     939            0 :             let roles = get_existing_roles_async(&client)
     940            0 :                 .await?
     941            0 :                 .into_iter()
     942            0 :                 .map(|role| (role.name.clone(), role))
     943            0 :                 .collect::<HashMap<String, Role>>();
     944            0 : 
     945            0 :             // Check if we need to drop subscriptions before starting the endpoint.
     946            0 :             //
     947            0 :             // It is important to do this operation exactly once when endpoint starts on a new branch.
     948            0 :             // Otherwise, we may drop not inherited, but newly created subscriptions.
     949            0 :             //
     950            0 :             // We cannot rely only on spec.drop_subscriptions_before_start flag,
     951            0 :             // because if for some reason compute restarts inside VM,
     952            0 :             // it will start again with the same spec and flag value.
     953            0 :             //
     954            0 :             // To handle this, we save the fact of the operation in the database
     955            0 :             // in the neon.drop_subscriptions_done table.
     956            0 :             // If the table does not exist, we assume that the operation was never performed, so we must do it.
     957            0 :             // If table exists, we check if the operation was performed on the current timelilne.
     958            0 :             //
     959            0 :             let mut drop_subscriptions_done = false;
     960            0 : 
     961            0 :             if spec.drop_subscriptions_before_start {
     962            0 :                 let timeline_id = self.get_timeline_id().context("timeline_id must be set")?;
     963            0 :                 let query = format!("select 1 from neon.drop_subscriptions_done where timeline_id = '{}'", timeline_id);
     964            0 : 
     965            0 :                 info!("Checking if drop subscription operation was already performed for timeline_id: {}", timeline_id);
     966              : 
     967              :                 drop_subscriptions_done =  match
     968            0 :                     client.simple_query(&query).await {
     969            0 :                     Ok(result) => {
     970            0 :                         matches!(&result[0], postgres::SimpleQueryMessage::Row(_))
     971              :                     },
     972            0 :                     Err(e) =>
     973            0 :                     {
     974            0 :                         match e.code() {
     975            0 :                             Some(&SqlState::UNDEFINED_TABLE) => false,
     976              :                             _ => {
     977              :                                 // We don't expect any other error here, except for the schema/table not existing
     978            0 :                                 error!("Error checking if drop subscription operation was already performed: {}", e);
     979            0 :                                 return Err(e.into());
     980              :                             }
     981              :                         }
     982              :                     }
     983              :                 }
     984            0 :             };
     985              : 
     986              : 
     987            0 :             let jwks_roles = Arc::new(
     988            0 :                 spec.as_ref()
     989            0 :                     .local_proxy_config
     990            0 :                     .iter()
     991            0 :                     .flat_map(|it| &it.jwks)
     992            0 :                     .flatten()
     993            0 :                     .flat_map(|setting| &setting.role_names)
     994            0 :                     .cloned()
     995            0 :                     .collect::<HashSet<_>>(),
     996            0 :             );
     997            0 : 
     998            0 :             let ctx = Arc::new(tokio::sync::RwLock::new(MutableApplyContext {
     999            0 :                 roles,
    1000            0 :                 dbs: databases,
    1001            0 :             }));
    1002            0 : 
    1003            0 :             // Apply special pre drop database phase.
    1004            0 :             // NOTE: we use the code of RunInEachDatabase phase for parallelism
    1005            0 :             // and connection management, but we don't really run it in *each* database,
    1006            0 :             // only in databases, we're about to drop.
    1007            0 :             info!("Applying PerDatabase (pre-dropdb) phase");
    1008            0 :             let concurrency_token = Arc::new(tokio::sync::Semaphore::new(concurrency));
    1009            0 : 
    1010            0 :             // Run the phase for each database that we're about to drop.
    1011            0 :             let db_processes = spec
    1012            0 :                 .delta_operations
    1013            0 :                 .iter()
    1014            0 :                 .flatten()
    1015            0 :                 .filter_map(move |op| {
    1016            0 :                     if op.action.as_str() == "delete_db" {
    1017            0 :                         Some(op.name.clone())
    1018              :                     } else {
    1019            0 :                         None
    1020              :                     }
    1021            0 :                 })
    1022            0 :                 .map(|dbname| {
    1023            0 :                     let spec = spec.clone();
    1024            0 :                     let ctx = ctx.clone();
    1025            0 :                     let jwks_roles = jwks_roles.clone();
    1026            0 :                     let mut conf = conf.as_ref().clone();
    1027            0 :                     let concurrency_token = concurrency_token.clone();
    1028            0 :                     // We only need dbname field for this phase, so set other fields to dummy values
    1029            0 :                     let db = DB::UserDB(Database {
    1030            0 :                         name: dbname.clone(),
    1031            0 :                         owner: "cloud_admin".to_string(),
    1032            0 :                         options: None,
    1033            0 :                         restrict_conn: false,
    1034            0 :                         invalid: false,
    1035            0 :                     });
    1036            0 : 
    1037            0 :                     debug!("Applying per-database phases for Database {:?}", &db);
    1038              : 
    1039            0 :                     match &db {
    1040            0 :                         DB::SystemDB => {}
    1041            0 :                         DB::UserDB(db) => {
    1042            0 :                             conf.dbname(db.name.as_str());
    1043            0 :                         }
    1044              :                     }
    1045              : 
    1046            0 :                     let conf = Arc::new(conf);
    1047            0 :                     let fut = Self::apply_spec_sql_db(
    1048            0 :                         spec.clone(),
    1049            0 :                         conf,
    1050            0 :                         ctx.clone(),
    1051            0 :                         jwks_roles.clone(),
    1052            0 :                         concurrency_token.clone(),
    1053            0 :                         db,
    1054            0 :                         [DropLogicalSubscriptions].to_vec(),
    1055            0 :                     );
    1056            0 : 
    1057            0 :                     Ok(spawn(fut))
    1058            0 :                 })
    1059            0 :                 .collect::<Vec<Result<_, anyhow::Error>>>();
    1060              : 
    1061            0 :             for process in db_processes.into_iter() {
    1062            0 :                 let handle = process?;
    1063            0 :                 if let Err(e) = handle.await? {
    1064              :                     // Handle the error case where the database does not exist
    1065              :                     // We do not check whether the DB exists or not in the deletion phase,
    1066              :                     // so we shouldn't be strict about it in pre-deletion cleanup as well.
    1067            0 :                     if e.to_string().contains("does not exist") {
    1068            0 :                         warn!("Error dropping subscription: {}", e);
    1069              :                     } else {
    1070            0 :                         return Err(e);
    1071              :                     }
    1072            0 :                 };
    1073              :             }
    1074              : 
    1075            0 :             for phase in [
    1076            0 :                 CreateSuperUser,
    1077            0 :                 DropInvalidDatabases,
    1078            0 :                 RenameRoles,
    1079            0 :                 CreateAndAlterRoles,
    1080            0 :                 RenameAndDeleteDatabases,
    1081            0 :                 CreateAndAlterDatabases,
    1082            0 :                 CreateSchemaNeon,
    1083              :             ] {
    1084            0 :                 info!("Applying phase {:?}", &phase);
    1085            0 :                 apply_operations(
    1086            0 :                     spec.clone(),
    1087            0 :                     ctx.clone(),
    1088            0 :                     jwks_roles.clone(),
    1089            0 :                     phase,
    1090            0 :                     || async { Ok(&client) },
    1091            0 :                 )
    1092            0 :                 .await?;
    1093              :             }
    1094              : 
    1095            0 :             info!("Applying RunInEachDatabase2 phase");
    1096            0 :             let concurrency_token = Arc::new(tokio::sync::Semaphore::new(concurrency));
    1097            0 : 
    1098            0 :             let db_processes = spec
    1099            0 :                 .cluster
    1100            0 :                 .databases
    1101            0 :                 .iter()
    1102            0 :                 .map(|db| DB::new(db.clone()))
    1103            0 :                 // include
    1104            0 :                 .chain(once(DB::SystemDB))
    1105            0 :                 .map(|db| {
    1106            0 :                     let spec = spec.clone();
    1107            0 :                     let ctx = ctx.clone();
    1108            0 :                     let jwks_roles = jwks_roles.clone();
    1109            0 :                     let mut conf = conf.as_ref().clone();
    1110            0 :                     let concurrency_token = concurrency_token.clone();
    1111            0 :                     let db = db.clone();
    1112            0 : 
    1113            0 :                     debug!("Applying per-database phases for Database {:?}", &db);
    1114              : 
    1115            0 :                     match &db {
    1116            0 :                         DB::SystemDB => {}
    1117            0 :                         DB::UserDB(db) => {
    1118            0 :                             conf.dbname(db.name.as_str());
    1119            0 :                         }
    1120              :                     }
    1121              : 
    1122            0 :                     let conf = Arc::new(conf);
    1123            0 :                     let mut phases = vec![
    1124            0 :                         DeleteDBRoleReferences,
    1125            0 :                         ChangeSchemaPerms,
    1126            0 :                         HandleAnonExtension,
    1127            0 :                     ];
    1128            0 : 
    1129            0 :                     if spec.drop_subscriptions_before_start && !drop_subscriptions_done {
    1130            0 :                         info!("Adding DropLogicalSubscriptions phase because drop_subscriptions_before_start is set");
    1131            0 :                         phases.push(DropLogicalSubscriptions);
    1132            0 :                     }
    1133              : 
    1134            0 :                     let fut = Self::apply_spec_sql_db(
    1135            0 :                         spec.clone(),
    1136            0 :                         conf,
    1137            0 :                         ctx.clone(),
    1138            0 :                         jwks_roles.clone(),
    1139            0 :                         concurrency_token.clone(),
    1140            0 :                         db,
    1141            0 :                         phases,
    1142            0 :                     );
    1143            0 : 
    1144            0 :                     Ok(spawn(fut))
    1145            0 :                 })
    1146            0 :                 .collect::<Vec<Result<_, anyhow::Error>>>();
    1147              : 
    1148            0 :             for process in db_processes.into_iter() {
    1149            0 :                 let handle = process?;
    1150            0 :                 handle.await??;
    1151              :             }
    1152              : 
    1153            0 :             let mut phases = vec![
    1154            0 :                 HandleOtherExtensions,
    1155            0 :                 HandleNeonExtension, // This step depends on CreateSchemaNeon
    1156            0 :                 CreateAvailabilityCheck,
    1157            0 :                 DropRoles,
    1158            0 :             ];
    1159            0 : 
    1160            0 :             // This step depends on CreateSchemaNeon
    1161            0 :             if spec.drop_subscriptions_before_start && !drop_subscriptions_done {
    1162            0 :                 info!("Adding FinalizeDropLogicalSubscriptions phase because drop_subscriptions_before_start is set");
    1163            0 :                 phases.push(FinalizeDropLogicalSubscriptions);
    1164            0 :             }
    1165              : 
    1166            0 :             for phase in phases {
    1167            0 :                 debug!("Applying phase {:?}", &phase);
    1168            0 :                 apply_operations(
    1169            0 :                     spec.clone(),
    1170            0 :                     ctx.clone(),
    1171            0 :                     jwks_roles.clone(),
    1172            0 :                     phase,
    1173            0 :                     || async { Ok(&client) },
    1174            0 :                 )
    1175            0 :                 .await?;
    1176              :             }
    1177              : 
    1178            0 :             Ok::<(), anyhow::Error>(())
    1179            0 :         })?;
    1180              : 
    1181              :         Ok(())
    1182              :     }
    1183              : 
    1184              :     /// Apply SQL migrations of the RunInEachDatabase phase.
    1185              :     ///
    1186              :     /// May opt to not connect to databases that don't have any scheduled
    1187              :     /// operations.  The function is concurrency-controlled with the provided
    1188              :     /// semaphore.  The caller has to make sure the semaphore isn't exhausted.
    1189            0 :     async fn apply_spec_sql_db(
    1190            0 :         spec: Arc<ComputeSpec>,
    1191            0 :         conf: Arc<tokio_postgres::Config>,
    1192            0 :         ctx: Arc<tokio::sync::RwLock<MutableApplyContext>>,
    1193            0 :         jwks_roles: Arc<HashSet<String>>,
    1194            0 :         concurrency_token: Arc<tokio::sync::Semaphore>,
    1195            0 :         db: DB,
    1196            0 :         subphases: Vec<PerDatabasePhase>,
    1197            0 :     ) -> Result<()> {
    1198            0 :         let _permit = concurrency_token.acquire().await?;
    1199              : 
    1200            0 :         let mut client_conn = None;
    1201              : 
    1202            0 :         for subphase in subphases {
    1203            0 :             apply_operations(
    1204            0 :                 spec.clone(),
    1205            0 :                 ctx.clone(),
    1206            0 :                 jwks_roles.clone(),
    1207            0 :                 RunInEachDatabase {
    1208            0 :                     db: db.clone(),
    1209            0 :                     subphase,
    1210            0 :                 },
    1211            0 :                 // Only connect if apply_operation actually wants a connection.
    1212            0 :                 // It's quite possible this database doesn't need any queries,
    1213            0 :                 // so by not connecting we save time and effort connecting to
    1214            0 :                 // that database.
    1215            0 :                 || async {
    1216            0 :                     if client_conn.is_none() {
    1217            0 :                         let db_client = Self::get_maintenance_client(&conf).await?;
    1218            0 :                         client_conn.replace(db_client);
    1219            0 :                     }
    1220            0 :                     let client = client_conn.as_ref().unwrap();
    1221            0 :                     Ok(client)
    1222            0 :                 },
    1223            0 :             )
    1224            0 :             .await?;
    1225              :         }
    1226              : 
    1227            0 :         drop(client_conn);
    1228            0 : 
    1229            0 :         Ok::<(), anyhow::Error>(())
    1230            0 :     }
    1231              : 
    1232              :     /// Choose how many concurrent connections to use for applying the spec changes.
    1233            0 :     pub fn max_service_connections(
    1234            0 :         &self,
    1235            0 :         compute_state: &ComputeState,
    1236            0 :         spec: &ComputeSpec,
    1237            0 :     ) -> usize {
    1238            0 :         // If the cluster is in Init state we don't have to deal with user connections,
    1239            0 :         // and can thus use all `max_connections` connection slots. However, that's generally not
    1240            0 :         // very efficient, so we generally still limit it to a smaller number.
    1241            0 :         if compute_state.status == ComputeStatus::Init {
    1242              :             // If the settings contain 'max_connections', use that as template
    1243            0 :             if let Some(config) = spec.cluster.settings.find("max_connections") {
    1244            0 :                 config.parse::<usize>().ok()
    1245              :             } else {
    1246              :                 // Otherwise, try to find the setting in the postgresql_conf string
    1247            0 :                 spec.cluster
    1248            0 :                     .postgresql_conf
    1249            0 :                     .iter()
    1250            0 :                     .flat_map(|conf| conf.split("\n"))
    1251            0 :                     .filter_map(|line| {
    1252            0 :                         if !line.contains("max_connections") {
    1253            0 :                             return None;
    1254            0 :                         }
    1255              : 
    1256            0 :                         let (key, value) = line.split_once("=")?;
    1257            0 :                         let key = key
    1258            0 :                             .trim_start_matches(char::is_whitespace)
    1259            0 :                             .trim_end_matches(char::is_whitespace);
    1260            0 : 
    1261            0 :                         let value = value
    1262            0 :                             .trim_start_matches(char::is_whitespace)
    1263            0 :                             .trim_end_matches(char::is_whitespace);
    1264            0 : 
    1265            0 :                         if key != "max_connections" {
    1266            0 :                             return None;
    1267            0 :                         }
    1268            0 : 
    1269            0 :                         value.parse::<usize>().ok()
    1270            0 :                     })
    1271            0 :                     .next()
    1272              :             }
    1273              :             // If max_connections is present, use at most 1/3rd of that.
    1274              :             // When max_connections is lower than 30, try to use at least 10 connections, but
    1275              :             // never more than max_connections.
    1276            0 :             .map(|limit| match limit {
    1277            0 :                 0..10 => limit,
    1278            0 :                 10..30 => 10,
    1279            0 :                 30.. => limit / 3,
    1280            0 :             })
    1281            0 :             // If we didn't find max_connections, default to 10 concurrent connections.
    1282            0 :             .unwrap_or(10)
    1283              :         } else {
    1284              :             // state == Running
    1285              :             // Because the cluster is already in the Running state, we should assume users are
    1286              :             // already connected to the cluster, and high concurrency could negatively
    1287              :             // impact user connectivity. Therefore, we can limit concurrency to the number of
    1288              :             // reserved superuser connections, which users wouldn't be able to use anyway.
    1289            0 :             spec.cluster
    1290            0 :                 .settings
    1291            0 :                 .find("superuser_reserved_connections")
    1292            0 :                 .iter()
    1293            0 :                 .filter_map(|val| val.parse::<usize>().ok())
    1294            0 :                 .map(|val| if val > 1 { val - 1 } else { 1 })
    1295            0 :                 .last()
    1296            0 :                 .unwrap_or(3)
    1297              :         }
    1298            0 :     }
    1299              : 
    1300              :     /// Do initial configuration of the already started Postgres.
    1301              :     #[instrument(skip_all)]
    1302              :     pub fn apply_config(&self, compute_state: &ComputeState) -> Result<()> {
    1303              :         let conf = self.get_tokio_conn_conf(Some("compute_ctl:apply_config"));
    1304              : 
    1305              :         let conf = Arc::new(conf);
    1306              :         let spec = Arc::new(
    1307              :             compute_state
    1308              :                 .pspec
    1309              :                 .as_ref()
    1310              :                 .expect("spec must be set")
    1311              :                 .spec
    1312              :                 .clone(),
    1313              :         );
    1314              : 
    1315              :         let max_concurrent_connections = self.max_service_connections(compute_state, &spec);
    1316              : 
    1317              :         // Merge-apply spec & changes to PostgreSQL state.
    1318              :         self.apply_spec_sql(spec.clone(), conf.clone(), max_concurrent_connections)?;
    1319              : 
    1320              :         if let Some(ref local_proxy) = &spec.clone().local_proxy_config {
    1321              :             info!("configuring local_proxy");
    1322              :             local_proxy::configure(local_proxy).context("apply_config local_proxy")?;
    1323              :         }
    1324              : 
    1325              :         // Run migrations separately to not hold up cold starts
    1326            0 :         tokio::spawn(async move {
    1327            0 :             let mut conf = conf.as_ref().clone();
    1328            0 :             conf.application_name("compute_ctl:migrations");
    1329            0 : 
    1330            0 :             match conf.connect(NoTls).await {
    1331            0 :                 Ok((mut client, connection)) => {
    1332            0 :                     tokio::spawn(async move {
    1333            0 :                         if let Err(e) = connection.await {
    1334            0 :                             eprintln!("connection error: {}", e);
    1335            0 :                         }
    1336            0 :                     });
    1337            0 :                     if let Err(e) = handle_migrations(&mut client).await {
    1338            0 :                         error!("Failed to run migrations: {}", e);
    1339            0 :                     }
    1340              :                 }
    1341            0 :                 Err(e) => {
    1342            0 :                     error!(
    1343            0 :                         "Failed to connect to the compute for running migrations: {}",
    1344              :                         e
    1345              :                     );
    1346              :                 }
    1347              :             };
    1348            0 :         });
    1349              : 
    1350              :         Ok::<(), anyhow::Error>(())
    1351              :     }
    1352              : 
    1353              :     // Wrapped this around `pg_ctl reload`, but right now we don't use
    1354              :     // `pg_ctl` for start / stop.
    1355              :     #[instrument(skip_all)]
    1356              :     fn pg_reload_conf(&self) -> Result<()> {
    1357              :         let pgctl_bin = Path::new(&self.pgbin).parent().unwrap().join("pg_ctl");
    1358              :         Command::new(pgctl_bin)
    1359              :             .args(["reload", "-D", &self.pgdata])
    1360              :             .output()
    1361              :             .expect("cannot run pg_ctl process");
    1362              :         Ok(())
    1363              :     }
    1364              : 
    1365              :     /// Similar to `apply_config()`, but does a bit different sequence of operations,
    1366              :     /// as it's used to reconfigure a previously started and configured Postgres node.
    1367              :     #[instrument(skip_all)]
    1368              :     pub fn reconfigure(&self) -> Result<()> {
    1369              :         let spec = self.state.lock().unwrap().pspec.clone().unwrap().spec;
    1370              : 
    1371              :         if let Some(ref pgbouncer_settings) = spec.pgbouncer_settings {
    1372              :             info!("tuning pgbouncer");
    1373              : 
    1374              :             // Spawn a background task to do the tuning,
    1375              :             // so that we don't block the main thread that starts Postgres.
    1376              :             let pgbouncer_settings = pgbouncer_settings.clone();
    1377            0 :             tokio::spawn(async move {
    1378            0 :                 let res = tune_pgbouncer(pgbouncer_settings).await;
    1379            0 :                 if let Err(err) = res {
    1380            0 :                     error!("error while tuning pgbouncer: {err:?}");
    1381            0 :                 }
    1382            0 :             });
    1383              :         }
    1384              : 
    1385              :         if let Some(ref local_proxy) = spec.local_proxy_config {
    1386              :             info!("configuring local_proxy");
    1387              : 
    1388              :             // Spawn a background task to do the configuration,
    1389              :             // so that we don't block the main thread that starts Postgres.
    1390              :             let local_proxy = local_proxy.clone();
    1391            0 :             tokio::spawn(async move {
    1392            0 :                 if let Err(err) = local_proxy::configure(&local_proxy) {
    1393            0 :                     error!("error while configuring local_proxy: {err:?}");
    1394            0 :                 }
    1395            0 :             });
    1396              :         }
    1397              : 
    1398              :         // Write new config
    1399              :         let pgdata_path = Path::new(&self.pgdata);
    1400              :         let postgresql_conf_path = pgdata_path.join("postgresql.conf");
    1401              :         config::write_postgres_conf(&postgresql_conf_path, &spec, self.internal_http_port)?;
    1402              : 
    1403              :         if !spec.skip_pg_catalog_updates {
    1404              :             let max_concurrent_connections = spec.reconfigure_concurrency;
    1405              :             // Temporarily reset max_cluster_size in config
    1406              :             // to avoid the possibility of hitting the limit, while we are reconfiguring:
    1407              :             // creating new extensions, roles, etc.
    1408            0 :             config::with_compute_ctl_tmp_override(pgdata_path, "neon.max_cluster_size=-1", || {
    1409            0 :                 self.pg_reload_conf()?;
    1410              : 
    1411            0 :                 if spec.mode == ComputeMode::Primary {
    1412            0 :                     let mut conf = tokio_postgres::Config::from_str(self.connstr.as_str()).unwrap();
    1413            0 :                     conf.application_name("apply_config");
    1414            0 :                     let conf = Arc::new(conf);
    1415            0 : 
    1416            0 :                     let spec = Arc::new(spec.clone());
    1417            0 : 
    1418            0 :                     self.apply_spec_sql(spec, conf, max_concurrent_connections)?;
    1419            0 :                 }
    1420              : 
    1421            0 :                 Ok(())
    1422            0 :             })?;
    1423              :         }
    1424              : 
    1425              :         self.pg_reload_conf()?;
    1426              : 
    1427              :         let unknown_op = "unknown".to_string();
    1428              :         let op_id = spec.operation_uuid.as_ref().unwrap_or(&unknown_op);
    1429              :         info!(
    1430              :             "finished reconfiguration of compute node for operation {}",
    1431              :             op_id
    1432              :         );
    1433              : 
    1434              :         Ok(())
    1435              :     }
    1436              : 
    1437              :     #[instrument(skip_all)]
    1438              :     pub fn start_compute(
    1439              :         &self,
    1440              :     ) -> Result<(std::process::Child, tokio::task::JoinHandle<Result<()>>)> {
    1441              :         let compute_state = self.state.lock().unwrap().clone();
    1442              :         let pspec = compute_state.pspec.as_ref().expect("spec must be set");
    1443              :         info!(
    1444              :             "starting compute for project {}, operation {}, tenant {}, timeline {}",
    1445              :             pspec.spec.cluster.cluster_id.as_deref().unwrap_or("None"),
    1446              :             pspec.spec.operation_uuid.as_deref().unwrap_or("None"),
    1447              :             pspec.tenant_id,
    1448              :             pspec.timeline_id,
    1449              :         );
    1450              : 
    1451              :         // tune pgbouncer
    1452              :         if let Some(pgbouncer_settings) = &pspec.spec.pgbouncer_settings {
    1453              :             info!("tuning pgbouncer");
    1454              : 
    1455              :             // Spawn a background task to do the tuning,
    1456              :             // so that we don't block the main thread that starts Postgres.
    1457              :             let pgbouncer_settings = pgbouncer_settings.clone();
    1458            0 :             let _handle = tokio::spawn(async move {
    1459            0 :                 let res = tune_pgbouncer(pgbouncer_settings).await;
    1460            0 :                 if let Err(err) = res {
    1461            0 :                     error!("error while tuning pgbouncer: {err:?}");
    1462            0 :                 }
    1463            0 :             });
    1464              :         }
    1465              : 
    1466              :         if let Some(local_proxy) = &pspec.spec.local_proxy_config {
    1467              :             info!("configuring local_proxy");
    1468              : 
    1469              :             // Spawn a background task to do the configuration,
    1470              :             // so that we don't block the main thread that starts Postgres.
    1471              :             let local_proxy = local_proxy.clone();
    1472            0 :             let _handle = tokio::spawn(async move {
    1473            0 :                 if let Err(err) = local_proxy::configure(&local_proxy) {
    1474            0 :                     error!("error while configuring local_proxy: {err:?}");
    1475            0 :                 }
    1476            0 :             });
    1477              :         }
    1478              : 
    1479              :         info!(
    1480              :             "start_compute spec.remote_extensions {:?}",
    1481              :             pspec.spec.remote_extensions
    1482              :         );
    1483              : 
    1484              :         // This part is sync, because we need to download
    1485              :         // remote shared_preload_libraries before postgres start (if any)
    1486              :         if let Some(remote_extensions) = &pspec.spec.remote_extensions {
    1487              :             // First, create control files for all availale extensions
    1488              :             extension_server::create_control_files(remote_extensions, &self.pgbin);
    1489              : 
    1490              :             let library_load_start_time = Utc::now();
    1491              :             let rt = tokio::runtime::Handle::current();
    1492              :             let remote_ext_metrics = rt.block_on(self.prepare_preload_libraries(&pspec.spec))?;
    1493              : 
    1494              :             let library_load_time = Utc::now()
    1495              :                 .signed_duration_since(library_load_start_time)
    1496              :                 .to_std()
    1497              :                 .unwrap()
    1498              :                 .as_millis() as u64;
    1499              :             let mut state = self.state.lock().unwrap();
    1500              :             state.metrics.load_ext_ms = library_load_time;
    1501              :             state.metrics.num_ext_downloaded = remote_ext_metrics.num_ext_downloaded;
    1502              :             state.metrics.largest_ext_size = remote_ext_metrics.largest_ext_size;
    1503              :             state.metrics.total_ext_download_size = remote_ext_metrics.total_ext_download_size;
    1504              :             info!(
    1505              :                 "Loading shared_preload_libraries took {:?}ms",
    1506              :                 library_load_time
    1507              :             );
    1508              :             info!("{:?}", remote_ext_metrics);
    1509              :         }
    1510              : 
    1511              :         self.prepare_pgdata(&compute_state)?;
    1512              : 
    1513              :         let start_time = Utc::now();
    1514              :         let pg_process = self.start_postgres(pspec.storage_auth_token.clone())?;
    1515              : 
    1516              :         let config_time = Utc::now();
    1517              :         if pspec.spec.mode == ComputeMode::Primary {
    1518              :             if !pspec.spec.skip_pg_catalog_updates {
    1519              :                 let pgdata_path = Path::new(&self.pgdata);
    1520              :                 // temporarily reset max_cluster_size in config
    1521              :                 // to avoid the possibility of hitting the limit, while we are applying config:
    1522              :                 // creating new extensions, roles, etc...
    1523              :                 config::with_compute_ctl_tmp_override(
    1524              :                     pgdata_path,
    1525              :                     "neon.max_cluster_size=-1",
    1526            0 :                     || {
    1527            0 :                         self.pg_reload_conf()?;
    1528              : 
    1529            0 :                         self.apply_config(&compute_state)?;
    1530              : 
    1531            0 :                         Ok(())
    1532            0 :                     },
    1533              :                 )?;
    1534              : 
    1535              :                 let postgresql_conf_path = pgdata_path.join("postgresql.conf");
    1536              :                 if config::line_in_file(
    1537              :                     &postgresql_conf_path,
    1538              :                     "neon.disable_logical_replication_subscribers=false",
    1539              :                 )? {
    1540              :                     info!("updated postgresql.conf to set neon.disable_logical_replication_subscribers=false");
    1541              :                 }
    1542              :                 self.pg_reload_conf()?;
    1543              :             }
    1544              :             self.post_apply_config()?;
    1545              : 
    1546              :             let conf = self.get_conn_conf(None);
    1547            0 :             tokio::task::spawn_blocking(|| {
    1548            0 :                 let res = get_installed_extensions(conf);
    1549            0 :                 match res {
    1550            0 :                     Ok(extensions) => {
    1551            0 :                         info!(
    1552            0 :                             "[NEON_EXT_STAT] {}",
    1553            0 :                             serde_json::to_string(&extensions)
    1554            0 :                                 .expect("failed to serialize extensions list")
    1555              :                         );
    1556              :                     }
    1557            0 :                     Err(err) => error!("could not get installed extensions: {err:?}"),
    1558              :                 }
    1559            0 :             });
    1560              :         }
    1561              : 
    1562              :         let startup_end_time = Utc::now();
    1563              :         {
    1564              :             let mut state = self.state.lock().unwrap();
    1565              :             state.metrics.start_postgres_ms = config_time
    1566              :                 .signed_duration_since(start_time)
    1567              :                 .to_std()
    1568              :                 .unwrap()
    1569              :                 .as_millis() as u64;
    1570              :             state.metrics.config_ms = startup_end_time
    1571              :                 .signed_duration_since(config_time)
    1572              :                 .to_std()
    1573              :                 .unwrap()
    1574              :                 .as_millis() as u64;
    1575              :             state.metrics.total_startup_ms = startup_end_time
    1576              :                 .signed_duration_since(compute_state.start_time)
    1577              :                 .to_std()
    1578              :                 .unwrap()
    1579              :                 .as_millis() as u64;
    1580              :         }
    1581              :         self.set_status(ComputeStatus::Running);
    1582              : 
    1583              :         info!(
    1584              :             "finished configuration of compute for project {}",
    1585              :             pspec.spec.cluster.cluster_id.as_deref().unwrap_or("None")
    1586              :         );
    1587              : 
    1588              :         // Log metrics so that we can search for slow operations in logs
    1589              :         let metrics = {
    1590              :             let state = self.state.lock().unwrap();
    1591              :             state.metrics.clone()
    1592              :         };
    1593              :         info!(?metrics, "compute start finished");
    1594              : 
    1595              :         Ok(pg_process)
    1596              :     }
    1597              : 
    1598              :     /// Update the `last_active` in the shared state, but ensure that it's a more recent one.
    1599            0 :     pub fn update_last_active(&self, last_active: Option<DateTime<Utc>>) {
    1600            0 :         let mut state = self.state.lock().unwrap();
    1601            0 :         // NB: `Some(<DateTime>)` is always greater than `None`.
    1602            0 :         if last_active > state.last_active {
    1603            0 :             state.last_active = last_active;
    1604            0 :             debug!("set the last compute activity time to: {:?}", last_active);
    1605            0 :         }
    1606            0 :     }
    1607              : 
    1608              :     // Look for core dumps and collect backtraces.
    1609              :     //
    1610              :     // EKS worker nodes have following core dump settings:
    1611              :     //   /proc/sys/kernel/core_pattern -> core
    1612              :     //   /proc/sys/kernel/core_uses_pid -> 1
    1613              :     //   ulimit -c -> unlimited
    1614              :     // which results in core dumps being written to postgres data directory as core.<pid>.
    1615              :     //
    1616              :     // Use that as a default location and pattern, except macos where core dumps are written
    1617              :     // to /cores/ directory by default.
    1618              :     //
    1619              :     // With default Linux settings, the core dump file is called just "core", so check for
    1620              :     // that too.
    1621            0 :     pub fn check_for_core_dumps(&self) -> Result<()> {
    1622            0 :         let core_dump_dir = match std::env::consts::OS {
    1623            0 :             "macos" => Path::new("/cores/"),
    1624            0 :             _ => Path::new(&self.pgdata),
    1625              :         };
    1626              : 
    1627              :         // Collect core dump paths if any
    1628            0 :         info!("checking for core dumps in {}", core_dump_dir.display());
    1629            0 :         let files = fs::read_dir(core_dump_dir)?;
    1630            0 :         let cores = files.filter_map(|entry| {
    1631            0 :             let entry = entry.ok()?;
    1632              : 
    1633            0 :             let is_core_dump = match entry.file_name().to_str()? {
    1634            0 :                 n if n.starts_with("core.") => true,
    1635            0 :                 "core" => true,
    1636            0 :                 _ => false,
    1637              :             };
    1638            0 :             if is_core_dump {
    1639            0 :                 Some(entry.path())
    1640              :             } else {
    1641            0 :                 None
    1642              :             }
    1643            0 :         });
    1644              : 
    1645              :         // Print backtrace for each core dump
    1646            0 :         for core_path in cores {
    1647            0 :             warn!(
    1648            0 :                 "core dump found: {}, collecting backtrace",
    1649            0 :                 core_path.display()
    1650              :             );
    1651              : 
    1652              :             // Try first with gdb
    1653            0 :             let backtrace = Command::new("gdb")
    1654            0 :                 .args(["--batch", "-q", "-ex", "bt", &self.pgbin])
    1655            0 :                 .arg(&core_path)
    1656            0 :                 .output();
    1657              : 
    1658              :             // Try lldb if no gdb is found -- that is handy for local testing on macOS
    1659            0 :             let backtrace = match backtrace {
    1660            0 :                 Err(ref e) if e.kind() == std::io::ErrorKind::NotFound => {
    1661            0 :                     warn!("cannot find gdb, trying lldb");
    1662            0 :                     Command::new("lldb")
    1663            0 :                         .arg("-c")
    1664            0 :                         .arg(&core_path)
    1665            0 :                         .args(["--batch", "-o", "bt all", "-o", "quit"])
    1666            0 :                         .output()
    1667              :                 }
    1668            0 :                 _ => backtrace,
    1669            0 :             }?;
    1670              : 
    1671            0 :             warn!(
    1672            0 :                 "core dump backtrace: {}",
    1673            0 :                 String::from_utf8_lossy(&backtrace.stdout)
    1674              :             );
    1675            0 :             warn!(
    1676            0 :                 "debugger stderr: {}",
    1677            0 :                 String::from_utf8_lossy(&backtrace.stderr)
    1678              :             );
    1679              :         }
    1680              : 
    1681            0 :         Ok(())
    1682            0 :     }
    1683              : 
    1684              :     /// Select `pg_stat_statements` data and return it as a stringified JSON
    1685            0 :     pub async fn collect_insights(&self) -> String {
    1686            0 :         let mut result_rows: Vec<String> = Vec::new();
    1687            0 :         let conf = self.get_tokio_conn_conf(Some("compute_ctl:collect_insights"));
    1688            0 :         let connect_result = conf.connect(NoTls).await;
    1689            0 :         let (client, connection) = connect_result.unwrap();
    1690            0 :         tokio::spawn(async move {
    1691            0 :             if let Err(e) = connection.await {
    1692            0 :                 eprintln!("connection error: {}", e);
    1693            0 :             }
    1694            0 :         });
    1695            0 :         let result = client
    1696            0 :             .simple_query(
    1697            0 :                 "SELECT
    1698            0 :     row_to_json(pg_stat_statements)
    1699            0 : FROM
    1700            0 :     pg_stat_statements
    1701            0 : WHERE
    1702            0 :     userid != 'cloud_admin'::regrole::oid
    1703            0 : ORDER BY
    1704            0 :     (mean_exec_time + mean_plan_time) DESC
    1705            0 : LIMIT 100",
    1706            0 :             )
    1707            0 :             .await;
    1708              : 
    1709            0 :         if let Ok(raw_rows) = result {
    1710            0 :             for message in raw_rows.iter() {
    1711            0 :                 if let postgres::SimpleQueryMessage::Row(row) = message {
    1712            0 :                     if let Some(json) = row.get(0) {
    1713            0 :                         result_rows.push(json.to_string());
    1714            0 :                     }
    1715            0 :                 }
    1716              :             }
    1717              : 
    1718            0 :             format!("{{\"pg_stat_statements\": [{}]}}", result_rows.join(","))
    1719              :         } else {
    1720            0 :             "{{\"pg_stat_statements\": []}}".to_string()
    1721              :         }
    1722            0 :     }
    1723              : 
    1724              :     // download an archive, unzip and place files in correct locations
    1725            0 :     pub async fn download_extension(
    1726            0 :         &self,
    1727            0 :         real_ext_name: String,
    1728            0 :         ext_path: RemotePath,
    1729            0 :     ) -> Result<u64, DownloadError> {
    1730            0 :         let ext_remote_storage =
    1731            0 :             self.ext_remote_storage
    1732            0 :                 .as_ref()
    1733            0 :                 .ok_or(DownloadError::BadInput(anyhow::anyhow!(
    1734            0 :                     "Remote extensions storage is not configured",
    1735            0 :                 )))?;
    1736              : 
    1737            0 :         let ext_archive_name = ext_path.object_name().expect("bad path");
    1738            0 : 
    1739            0 :         let mut first_try = false;
    1740            0 :         if !self
    1741            0 :             .ext_download_progress
    1742            0 :             .read()
    1743            0 :             .expect("lock err")
    1744            0 :             .contains_key(ext_archive_name)
    1745            0 :         {
    1746            0 :             self.ext_download_progress
    1747            0 :                 .write()
    1748            0 :                 .expect("lock err")
    1749            0 :                 .insert(ext_archive_name.to_string(), (Utc::now(), false));
    1750            0 :             first_try = true;
    1751            0 :         }
    1752            0 :         let (download_start, download_completed) =
    1753            0 :             self.ext_download_progress.read().expect("lock err")[ext_archive_name];
    1754            0 :         let start_time_delta = Utc::now()
    1755            0 :             .signed_duration_since(download_start)
    1756            0 :             .to_std()
    1757            0 :             .unwrap()
    1758            0 :             .as_millis() as u64;
    1759              : 
    1760              :         // how long to wait for extension download if it was started by another process
    1761              :         const HANG_TIMEOUT: u64 = 3000; // milliseconds
    1762              : 
    1763            0 :         if download_completed {
    1764            0 :             info!("extension already downloaded, skipping re-download");
    1765            0 :             return Ok(0);
    1766            0 :         } else if start_time_delta < HANG_TIMEOUT && !first_try {
    1767            0 :             info!("download {ext_archive_name} already started by another process, hanging untill completion or timeout");
    1768            0 :             let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(500));
    1769              :             loop {
    1770            0 :                 info!("waiting for download");
    1771            0 :                 interval.tick().await;
    1772            0 :                 let (_, download_completed_now) =
    1773            0 :                     self.ext_download_progress.read().expect("lock")[ext_archive_name];
    1774            0 :                 if download_completed_now {
    1775            0 :                     info!("download finished by whoever else downloaded it");
    1776            0 :                     return Ok(0);
    1777            0 :                 }
    1778              :             }
    1779              :             // NOTE: the above loop will get terminated
    1780              :             // based on the timeout of the download function
    1781            0 :         }
    1782            0 : 
    1783            0 :         // if extension hasn't been downloaded before or the previous
    1784            0 :         // attempt to download was at least HANG_TIMEOUT ms ago
    1785            0 :         // then we try to download it here
    1786            0 :         info!("downloading new extension {ext_archive_name}");
    1787              : 
    1788            0 :         let download_size = extension_server::download_extension(
    1789            0 :             &real_ext_name,
    1790            0 :             &ext_path,
    1791            0 :             ext_remote_storage,
    1792            0 :             &self.pgbin,
    1793            0 :         )
    1794            0 :         .await
    1795            0 :         .map_err(DownloadError::Other);
    1796            0 : 
    1797            0 :         if download_size.is_ok() {
    1798            0 :             self.ext_download_progress
    1799            0 :                 .write()
    1800            0 :                 .expect("bad lock")
    1801            0 :                 .insert(ext_archive_name.to_string(), (download_start, true));
    1802            0 :         }
    1803              : 
    1804            0 :         download_size
    1805            0 :     }
    1806              : 
    1807            0 :     pub async fn set_role_grants(
    1808            0 :         &self,
    1809            0 :         db_name: &PgIdent,
    1810            0 :         schema_name: &PgIdent,
    1811            0 :         privileges: &[Privilege],
    1812            0 :         role_name: &PgIdent,
    1813            0 :     ) -> Result<()> {
    1814              :         use tokio_postgres::NoTls;
    1815              : 
    1816            0 :         let mut conf = self.get_tokio_conn_conf(Some("compute_ctl:set_role_grants"));
    1817            0 :         conf.dbname(db_name);
    1818              : 
    1819            0 :         let (db_client, conn) = conf
    1820            0 :             .connect(NoTls)
    1821            0 :             .await
    1822            0 :             .context("Failed to connect to the database")?;
    1823            0 :         tokio::spawn(conn);
    1824            0 : 
    1825            0 :         // TODO: support other types of grants apart from schemas?
    1826            0 :         let query = format!(
    1827            0 :             "GRANT {} ON SCHEMA {} TO {}",
    1828            0 :             privileges
    1829            0 :                 .iter()
    1830            0 :                 // should not be quoted as it's part of the command.
    1831            0 :                 // is already sanitized so it's ok
    1832            0 :                 .map(|p| p.as_str())
    1833            0 :                 .collect::<Vec<&'static str>>()
    1834            0 :                 .join(", "),
    1835            0 :             // quote the schema and role name as identifiers to sanitize them.
    1836            0 :             schema_name.pg_quote(),
    1837            0 :             role_name.pg_quote(),
    1838            0 :         );
    1839            0 :         db_client
    1840            0 :             .simple_query(&query)
    1841            0 :             .await
    1842            0 :             .with_context(|| format!("Failed to execute query: {}", query))?;
    1843              : 
    1844            0 :         Ok(())
    1845            0 :     }
    1846              : 
    1847            0 :     pub async fn install_extension(
    1848            0 :         &self,
    1849            0 :         ext_name: &PgIdent,
    1850            0 :         db_name: &PgIdent,
    1851            0 :         ext_version: ExtVersion,
    1852            0 :     ) -> Result<ExtVersion> {
    1853              :         use tokio_postgres::NoTls;
    1854              : 
    1855            0 :         let mut conf = self.get_tokio_conn_conf(Some("compute_ctl:install_extension"));
    1856            0 :         conf.dbname(db_name);
    1857              : 
    1858            0 :         let (db_client, conn) = conf
    1859            0 :             .connect(NoTls)
    1860            0 :             .await
    1861            0 :             .context("Failed to connect to the database")?;
    1862            0 :         tokio::spawn(conn);
    1863            0 : 
    1864            0 :         let version_query = "SELECT extversion FROM pg_extension WHERE extname = $1";
    1865            0 :         let version: Option<ExtVersion> = db_client
    1866            0 :             .query_opt(version_query, &[&ext_name])
    1867            0 :             .await
    1868            0 :             .with_context(|| format!("Failed to execute query: {}", version_query))?
    1869            0 :             .map(|row| row.get(0));
    1870            0 : 
    1871            0 :         // sanitize the inputs as postgres idents.
    1872            0 :         let ext_name: String = ext_name.pg_quote();
    1873            0 :         let quoted_version: String = ext_version.pg_quote();
    1874              : 
    1875            0 :         if let Some(installed_version) = version {
    1876            0 :             if installed_version == ext_version {
    1877            0 :                 return Ok(installed_version);
    1878            0 :             }
    1879            0 :             let query = format!("ALTER EXTENSION {ext_name} UPDATE TO {quoted_version}");
    1880            0 :             db_client
    1881            0 :                 .simple_query(&query)
    1882            0 :                 .await
    1883            0 :                 .with_context(|| format!("Failed to execute query: {}", query))?;
    1884              :         } else {
    1885            0 :             let query =
    1886            0 :                 format!("CREATE EXTENSION IF NOT EXISTS {ext_name} WITH VERSION {quoted_version}");
    1887            0 :             db_client
    1888            0 :                 .simple_query(&query)
    1889            0 :                 .await
    1890            0 :                 .with_context(|| format!("Failed to execute query: {}", query))?;
    1891              :         }
    1892              : 
    1893            0 :         Ok(ext_version)
    1894            0 :     }
    1895              : 
    1896            0 :     pub async fn prepare_preload_libraries(
    1897            0 :         &self,
    1898            0 :         spec: &ComputeSpec,
    1899            0 :     ) -> Result<RemoteExtensionMetrics> {
    1900            0 :         if self.ext_remote_storage.is_none() {
    1901            0 :             return Ok(RemoteExtensionMetrics {
    1902            0 :                 num_ext_downloaded: 0,
    1903            0 :                 largest_ext_size: 0,
    1904            0 :                 total_ext_download_size: 0,
    1905            0 :             });
    1906            0 :         }
    1907            0 :         let remote_extensions = spec
    1908            0 :             .remote_extensions
    1909            0 :             .as_ref()
    1910            0 :             .ok_or(anyhow::anyhow!("Remote extensions are not configured"))?;
    1911              : 
    1912            0 :         info!("parse shared_preload_libraries from spec.cluster.settings");
    1913            0 :         let mut libs_vec = Vec::new();
    1914            0 :         if let Some(libs) = spec.cluster.settings.find("shared_preload_libraries") {
    1915            0 :             libs_vec = libs
    1916            0 :                 .split(&[',', '\'', ' '])
    1917            0 :                 .filter(|s| *s != "neon" && !s.is_empty())
    1918            0 :                 .map(str::to_string)
    1919            0 :                 .collect();
    1920            0 :         }
    1921            0 :         info!("parse shared_preload_libraries from provided postgresql.conf");
    1922              : 
    1923              :         // that is used in neon_local and python tests
    1924            0 :         if let Some(conf) = &spec.cluster.postgresql_conf {
    1925            0 :             let conf_lines = conf.split('\n').collect::<Vec<&str>>();
    1926            0 :             let mut shared_preload_libraries_line = "";
    1927            0 :             for line in conf_lines {
    1928            0 :                 if line.starts_with("shared_preload_libraries") {
    1929            0 :                     shared_preload_libraries_line = line;
    1930            0 :                 }
    1931              :             }
    1932            0 :             let mut preload_libs_vec = Vec::new();
    1933            0 :             if let Some(libs) = shared_preload_libraries_line.split("='").nth(1) {
    1934            0 :                 preload_libs_vec = libs
    1935            0 :                     .split(&[',', '\'', ' '])
    1936            0 :                     .filter(|s| *s != "neon" && !s.is_empty())
    1937            0 :                     .map(str::to_string)
    1938            0 :                     .collect();
    1939            0 :             }
    1940            0 :             libs_vec.extend(preload_libs_vec);
    1941            0 :         }
    1942              : 
    1943              :         // Don't try to download libraries that are not in the index.
    1944              :         // Assume that they are already present locally.
    1945            0 :         libs_vec.retain(|lib| remote_extensions.library_index.contains_key(lib));
    1946            0 : 
    1947            0 :         info!("Downloading to shared preload libraries: {:?}", &libs_vec);
    1948              : 
    1949            0 :         let mut download_tasks = Vec::new();
    1950            0 :         for library in &libs_vec {
    1951            0 :             let (ext_name, ext_path) =
    1952            0 :                 remote_extensions.get_ext(library, true, &self.build_tag, &self.pgversion)?;
    1953            0 :             download_tasks.push(self.download_extension(ext_name, ext_path));
    1954              :         }
    1955            0 :         let results = join_all(download_tasks).await;
    1956              : 
    1957            0 :         let mut remote_ext_metrics = RemoteExtensionMetrics {
    1958            0 :             num_ext_downloaded: 0,
    1959            0 :             largest_ext_size: 0,
    1960            0 :             total_ext_download_size: 0,
    1961            0 :         };
    1962            0 :         for result in results {
    1963            0 :             let download_size = match result {
    1964            0 :                 Ok(res) => {
    1965            0 :                     remote_ext_metrics.num_ext_downloaded += 1;
    1966            0 :                     res
    1967              :                 }
    1968            0 :                 Err(err) => {
    1969            0 :                     // if we failed to download an extension, we don't want to fail the whole
    1970            0 :                     // process, but we do want to log the error
    1971            0 :                     error!("Failed to download extension: {}", err);
    1972            0 :                     0
    1973              :                 }
    1974              :             };
    1975              : 
    1976            0 :             remote_ext_metrics.largest_ext_size =
    1977            0 :                 std::cmp::max(remote_ext_metrics.largest_ext_size, download_size);
    1978            0 :             remote_ext_metrics.total_ext_download_size += download_size;
    1979              :         }
    1980            0 :         Ok(remote_ext_metrics)
    1981            0 :     }
    1982              : 
    1983              :     /// Waits until current thread receives a state changed notification and
    1984              :     /// the pageserver connection strings has changed.
    1985              :     ///
    1986              :     /// The operation will time out after a specified duration.
    1987            0 :     pub fn wait_timeout_while_pageserver_connstr_unchanged(&self, duration: Duration) {
    1988            0 :         let state = self.state.lock().unwrap();
    1989            0 :         let old_pageserver_connstr = state
    1990            0 :             .pspec
    1991            0 :             .as_ref()
    1992            0 :             .expect("spec must be set")
    1993            0 :             .pageserver_connstr
    1994            0 :             .clone();
    1995            0 :         let mut unchanged = true;
    1996            0 :         let _ = self
    1997            0 :             .state_changed
    1998            0 :             .wait_timeout_while(state, duration, |s| {
    1999            0 :                 let pageserver_connstr = &s
    2000            0 :                     .pspec
    2001            0 :                     .as_ref()
    2002            0 :                     .expect("spec must be set")
    2003            0 :                     .pageserver_connstr;
    2004            0 :                 unchanged = pageserver_connstr == &old_pageserver_connstr;
    2005            0 :                 unchanged
    2006            0 :             })
    2007            0 :             .unwrap();
    2008            0 :         if !unchanged {
    2009            0 :             info!("Pageserver config changed");
    2010            0 :         }
    2011            0 :     }
    2012              : }
    2013              : 
    2014            0 : pub fn forward_termination_signal() {
    2015            0 :     let ss_pid = SYNC_SAFEKEEPERS_PID.load(Ordering::SeqCst);
    2016            0 :     if ss_pid != 0 {
    2017            0 :         let ss_pid = nix::unistd::Pid::from_raw(ss_pid as i32);
    2018            0 :         kill(ss_pid, Signal::SIGTERM).ok();
    2019            0 :     }
    2020            0 :     let pg_pid = PG_PID.load(Ordering::SeqCst);
    2021            0 :     if pg_pid != 0 {
    2022            0 :         let pg_pid = nix::unistd::Pid::from_raw(pg_pid as i32);
    2023            0 :         // Use 'fast' shutdown (SIGINT) because it also creates a shutdown checkpoint, which is important for
    2024            0 :         // ROs to get a list of running xacts faster instead of going through the CLOG.
    2025            0 :         // See https://www.postgresql.org/docs/current/server-shutdown.html for the list of modes and signals.
    2026            0 :         kill(pg_pid, Signal::SIGINT).ok();
    2027            0 :     }
    2028            0 : }
        

Generated by: LCOV version 2.1-beta