LCOV - code coverage report
Current view: top level - compute_tools/src - compute.rs (source / functions) Coverage Total Hit
Test: b4ae4c4857f9ef3e144e982a35ee23bc84c71983.info Lines: 0.0 % 667 0
Test Date: 2024-10-22 22:13:45 Functions: 0.0 % 75 0

            Line data    Source code
       1              : use std::collections::HashMap;
       2              : use std::env;
       3              : use std::fs;
       4              : use std::io::BufRead;
       5              : use std::os::unix::fs::{symlink, PermissionsExt};
       6              : use std::path::Path;
       7              : use std::process::{Command, Stdio};
       8              : use std::str::FromStr;
       9              : use std::sync::atomic::AtomicU32;
      10              : use std::sync::atomic::Ordering;
      11              : use std::sync::{Condvar, Mutex, RwLock};
      12              : use std::thread;
      13              : use std::time::Duration;
      14              : use std::time::Instant;
      15              : 
      16              : use anyhow::{Context, Result};
      17              : use chrono::{DateTime, Utc};
      18              : use compute_api::spec::PgIdent;
      19              : use futures::future::join_all;
      20              : use futures::stream::FuturesUnordered;
      21              : use futures::StreamExt;
      22              : use nix::unistd::Pid;
      23              : use postgres::error::SqlState;
      24              : use postgres::{Client, NoTls};
      25              : use tracing::{debug, error, info, instrument, warn};
      26              : use utils::id::{TenantId, TimelineId};
      27              : use utils::lsn::Lsn;
      28              : 
      29              : use compute_api::privilege::Privilege;
      30              : use compute_api::responses::{ComputeMetrics, ComputeStatus};
      31              : use compute_api::spec::{ComputeFeature, ComputeMode, ComputeSpec, ExtVersion};
      32              : use utils::measured_stream::MeasuredReader;
      33              : 
      34              : use nix::sys::signal::{kill, Signal};
      35              : 
      36              : use remote_storage::{DownloadError, RemotePath};
      37              : 
      38              : use crate::checker::create_availability_check_data;
      39              : use crate::installed_extensions::get_installed_extensions_sync;
      40              : use crate::local_proxy;
      41              : use crate::logger::inlinify;
      42              : use crate::pg_helpers::*;
      43              : use crate::spec::*;
      44              : use crate::sync_sk::{check_if_synced, ping_safekeeper};
      45              : use crate::{config, extension_server};
      46              : 
      47              : pub static SYNC_SAFEKEEPERS_PID: AtomicU32 = AtomicU32::new(0);
      48              : pub static PG_PID: AtomicU32 = AtomicU32::new(0);
      49              : 
      50              : /// Compute node info shared across several `compute_ctl` threads.
      51              : pub struct ComputeNode {
      52              :     // Url type maintains proper escaping
      53              :     pub connstr: url::Url,
      54              :     pub pgdata: String,
      55              :     pub pgbin: String,
      56              :     pub pgversion: String,
      57              :     /// We should only allow live re- / configuration of the compute node if
      58              :     /// it uses 'pull model', i.e. it can go to control-plane and fetch
      59              :     /// the latest configuration. Otherwise, there could be a case:
      60              :     /// - we start compute with some spec provided as argument
      61              :     /// - we push new spec and it does reconfiguration
      62              :     /// - but then something happens and compute pod / VM is destroyed,
      63              :     ///   so k8s controller starts it again with the **old** spec
      64              :     ///
      65              :     /// and the same for empty computes:
      66              :     /// - we started compute without any spec
      67              :     /// - we push spec and it does configuration
      68              :     /// - but then it is restarted without any spec again
      69              :     pub live_config_allowed: bool,
      70              :     /// Volatile part of the `ComputeNode`, which should be used under `Mutex`.
      71              :     /// To allow HTTP API server to serving status requests, while configuration
      72              :     /// is in progress, lock should be held only for short periods of time to do
      73              :     /// read/write, not the whole configuration process.
      74              :     pub state: Mutex<ComputeState>,
      75              :     /// `Condvar` to allow notifying waiters about state changes.
      76              :     pub state_changed: Condvar,
      77              :     /// the address of extension storage proxy gateway
      78              :     pub ext_remote_storage: Option<String>,
      79              :     // key: ext_archive_name, value: started download time, download_completed?
      80              :     pub ext_download_progress: RwLock<HashMap<String, (DateTime<Utc>, bool)>>,
      81              :     pub build_tag: String,
      82              : }
      83              : 
      84              : // store some metrics about download size that might impact startup time
      85              : #[derive(Clone, Debug)]
      86              : pub struct RemoteExtensionMetrics {
      87              :     num_ext_downloaded: u64,
      88              :     largest_ext_size: u64,
      89              :     total_ext_download_size: u64,
      90              : }
      91              : 
      92              : #[derive(Clone, Debug)]
      93              : pub struct ComputeState {
      94              :     pub start_time: DateTime<Utc>,
      95              :     pub status: ComputeStatus,
      96              :     /// Timestamp of the last Postgres activity. It could be `None` if
      97              :     /// compute wasn't used since start.
      98              :     pub last_active: Option<DateTime<Utc>>,
      99              :     pub error: Option<String>,
     100              :     pub pspec: Option<ParsedSpec>,
     101              :     pub metrics: ComputeMetrics,
     102              : }
     103              : 
     104              : impl ComputeState {
     105            0 :     pub fn new() -> Self {
     106            0 :         Self {
     107            0 :             start_time: Utc::now(),
     108            0 :             status: ComputeStatus::Empty,
     109            0 :             last_active: None,
     110            0 :             error: None,
     111            0 :             pspec: None,
     112            0 :             metrics: ComputeMetrics::default(),
     113            0 :         }
     114            0 :     }
     115              : 
     116            0 :     pub fn set_status(&mut self, status: ComputeStatus, state_changed: &Condvar) {
     117            0 :         let prev = self.status;
     118            0 :         info!("Changing compute status from {} to {}", prev, status);
     119            0 :         self.status = status;
     120            0 :         state_changed.notify_all();
     121            0 :     }
     122              : 
     123            0 :     pub fn set_failed_status(&mut self, err: anyhow::Error, state_changed: &Condvar) {
     124            0 :         self.error = Some(format!("{err:?}"));
     125            0 :         self.set_status(ComputeStatus::Failed, state_changed);
     126            0 :     }
     127              : }
     128              : 
     129              : impl Default for ComputeState {
     130            0 :     fn default() -> Self {
     131            0 :         Self::new()
     132            0 :     }
     133              : }
     134              : 
     135              : #[derive(Clone, Debug)]
     136              : pub struct ParsedSpec {
     137              :     pub spec: ComputeSpec,
     138              :     pub tenant_id: TenantId,
     139              :     pub timeline_id: TimelineId,
     140              :     pub pageserver_connstr: String,
     141              :     pub safekeeper_connstrings: Vec<String>,
     142              :     pub storage_auth_token: Option<String>,
     143              : }
     144              : 
     145              : impl TryFrom<ComputeSpec> for ParsedSpec {
     146              :     type Error = String;
     147            0 :     fn try_from(spec: ComputeSpec) -> Result<Self, String> {
     148              :         // Extract the options from the spec file that are needed to connect to
     149              :         // the storage system.
     150              :         //
     151              :         // For backwards-compatibility, the top-level fields in the spec file
     152              :         // may be empty. In that case, we need to dig them from the GUCs in the
     153              :         // cluster.settings field.
     154            0 :         let pageserver_connstr = spec
     155            0 :             .pageserver_connstring
     156            0 :             .clone()
     157            0 :             .or_else(|| spec.cluster.settings.find("neon.pageserver_connstring"))
     158            0 :             .ok_or("pageserver connstr should be provided")?;
     159            0 :         let safekeeper_connstrings = if spec.safekeeper_connstrings.is_empty() {
     160            0 :             if matches!(spec.mode, ComputeMode::Primary) {
     161            0 :                 spec.cluster
     162            0 :                     .settings
     163            0 :                     .find("neon.safekeepers")
     164            0 :                     .ok_or("safekeeper connstrings should be provided")?
     165            0 :                     .split(',')
     166            0 :                     .map(|str| str.to_string())
     167            0 :                     .collect()
     168              :             } else {
     169            0 :                 vec![]
     170              :             }
     171              :         } else {
     172            0 :             spec.safekeeper_connstrings.clone()
     173              :         };
     174            0 :         let storage_auth_token = spec.storage_auth_token.clone();
     175            0 :         let tenant_id: TenantId = if let Some(tenant_id) = spec.tenant_id {
     176            0 :             tenant_id
     177              :         } else {
     178            0 :             spec.cluster
     179            0 :                 .settings
     180            0 :                 .find("neon.tenant_id")
     181            0 :                 .ok_or("tenant id should be provided")
     182            0 :                 .map(|s| TenantId::from_str(&s))?
     183            0 :                 .or(Err("invalid tenant id"))?
     184              :         };
     185            0 :         let timeline_id: TimelineId = if let Some(timeline_id) = spec.timeline_id {
     186            0 :             timeline_id
     187              :         } else {
     188            0 :             spec.cluster
     189            0 :                 .settings
     190            0 :                 .find("neon.timeline_id")
     191            0 :                 .ok_or("timeline id should be provided")
     192            0 :                 .map(|s| TimelineId::from_str(&s))?
     193            0 :                 .or(Err("invalid timeline id"))?
     194              :         };
     195              : 
     196            0 :         Ok(ParsedSpec {
     197            0 :             spec,
     198            0 :             pageserver_connstr,
     199            0 :             safekeeper_connstrings,
     200            0 :             storage_auth_token,
     201            0 :             tenant_id,
     202            0 :             timeline_id,
     203            0 :         })
     204            0 :     }
     205              : }
     206              : 
     207              : /// If we are a VM, returns a [`Command`] that will run in the `neon-postgres`
     208              : /// cgroup. Otherwise returns the default `Command::new(cmd)`
     209              : ///
     210              : /// This function should be used to start postgres, as it will start it in the
     211              : /// neon-postgres cgroup if we are a VM. This allows autoscaling to control
     212              : /// postgres' resource usage. The cgroup will exist in VMs because vm-builder
     213              : /// creates it during the sysinit phase of its inittab.
     214            0 : fn maybe_cgexec(cmd: &str) -> Command {
     215            0 :     // The cplane sets this env var for autoscaling computes.
     216            0 :     // use `var_os` so we don't have to worry about the variable being valid
     217            0 :     // unicode. Should never be an concern . . . but just in case
     218            0 :     if env::var_os("AUTOSCALING").is_some() {
     219            0 :         let mut command = Command::new("cgexec");
     220            0 :         command.args(["-g", "memory:neon-postgres"]);
     221            0 :         command.arg(cmd);
     222            0 :         command
     223              :     } else {
     224            0 :         Command::new(cmd)
     225              :     }
     226            0 : }
     227              : 
     228              : /// Create special neon_superuser role, that's a slightly nerfed version of a real superuser
     229              : /// that we give to customers
     230            0 : #[instrument(skip_all)]
     231              : fn create_neon_superuser(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
     232              :     let roles = spec
     233              :         .cluster
     234              :         .roles
     235              :         .iter()
     236            0 :         .map(|r| escape_literal(&r.name))
     237              :         .collect::<Vec<_>>();
     238              : 
     239              :     let dbs = spec
     240              :         .cluster
     241              :         .databases
     242              :         .iter()
     243            0 :         .map(|db| escape_literal(&db.name))
     244              :         .collect::<Vec<_>>();
     245              : 
     246              :     let roles_decl = if roles.is_empty() {
     247              :         String::from("roles text[] := NULL;")
     248              :     } else {
     249              :         format!(
     250              :             r#"
     251              :                roles text[] := ARRAY(SELECT rolname
     252              :                                      FROM pg_catalog.pg_roles
     253              :                                      WHERE rolname IN ({}));"#,
     254              :             roles.join(", ")
     255              :         )
     256              :     };
     257              : 
     258              :     let database_decl = if dbs.is_empty() {
     259              :         String::from("dbs text[] := NULL;")
     260              :     } else {
     261              :         format!(
     262              :             r#"
     263              :                dbs text[] := ARRAY(SELECT datname
     264              :                                    FROM pg_catalog.pg_database
     265              :                                    WHERE datname IN ({}));"#,
     266              :             dbs.join(", ")
     267              :         )
     268              :     };
     269              : 
     270              :     // ALL PRIVILEGES grants CREATE, CONNECT, and TEMPORARY on all databases
     271              :     // (see https://www.postgresql.org/docs/current/ddl-priv.html)
     272              :     let query = format!(
     273              :         r#"
     274              :             DO $$
     275              :                 DECLARE
     276              :                     r text;
     277              :                     {}
     278              :                     {}
     279              :                 BEGIN
     280              :                     IF NOT EXISTS (
     281              :                         SELECT FROM pg_catalog.pg_roles WHERE rolname = 'neon_superuser')
     282              :                     THEN
     283              :                         CREATE ROLE neon_superuser CREATEDB CREATEROLE NOLOGIN REPLICATION BYPASSRLS IN ROLE pg_read_all_data, pg_write_all_data;
     284              :                         IF array_length(roles, 1) IS NOT NULL THEN
     285              :                             EXECUTE format('GRANT neon_superuser TO %s',
     286              :                                            array_to_string(ARRAY(SELECT quote_ident(x) FROM unnest(roles) as x), ', '));
     287              :                             FOREACH r IN ARRAY roles LOOP
     288              :                                 EXECUTE format('ALTER ROLE %s CREATEROLE CREATEDB', quote_ident(r));
     289              :                             END LOOP;
     290              :                         END IF;
     291              :                         IF array_length(dbs, 1) IS NOT NULL THEN
     292              :                             EXECUTE format('GRANT ALL PRIVILEGES ON DATABASE %s TO neon_superuser',
     293              :                                            array_to_string(ARRAY(SELECT quote_ident(x) FROM unnest(dbs) as x), ', '));
     294              :                         END IF;
     295              :                     END IF;
     296              :                 END
     297              :             $$;"#,
     298              :         roles_decl, database_decl,
     299              :     );
     300              :     info!("Neon superuser created: {}", inlinify(&query));
     301              :     client
     302              :         .simple_query(&query)
     303            0 :         .map_err(|e| anyhow::anyhow!(e).context(query))?;
     304              :     Ok(())
     305              : }
     306              : 
     307              : impl ComputeNode {
     308              :     /// Check that compute node has corresponding feature enabled.
     309            0 :     pub fn has_feature(&self, feature: ComputeFeature) -> bool {
     310            0 :         let state = self.state.lock().unwrap();
     311              : 
     312            0 :         if let Some(s) = state.pspec.as_ref() {
     313            0 :             s.spec.features.contains(&feature)
     314              :         } else {
     315            0 :             false
     316              :         }
     317            0 :     }
     318              : 
     319            0 :     pub fn set_status(&self, status: ComputeStatus) {
     320            0 :         let mut state = self.state.lock().unwrap();
     321            0 :         state.set_status(status, &self.state_changed);
     322            0 :     }
     323              : 
     324            0 :     pub fn set_failed_status(&self, err: anyhow::Error) {
     325            0 :         let mut state = self.state.lock().unwrap();
     326            0 :         state.set_failed_status(err, &self.state_changed);
     327            0 :     }
     328              : 
     329            0 :     pub fn get_status(&self) -> ComputeStatus {
     330            0 :         self.state.lock().unwrap().status
     331            0 :     }
     332              : 
     333              :     // Remove `pgdata` directory and create it again with right permissions.
     334            0 :     fn create_pgdata(&self) -> Result<()> {
     335            0 :         // Ignore removal error, likely it is a 'No such file or directory (os error 2)'.
     336            0 :         // If it is something different then create_dir() will error out anyway.
     337            0 :         let _ok = fs::remove_dir_all(&self.pgdata);
     338            0 :         fs::create_dir(&self.pgdata)?;
     339            0 :         fs::set_permissions(&self.pgdata, fs::Permissions::from_mode(0o700))?;
     340              : 
     341            0 :         Ok(())
     342            0 :     }
     343              : 
     344              :     // Get basebackup from the libpq connection to pageserver using `connstr` and
     345              :     // unarchive it to `pgdata` directory overriding all its previous content.
     346            0 :     #[instrument(skip_all, fields(%lsn))]
     347              :     fn try_get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> {
     348              :         let spec = compute_state.pspec.as_ref().expect("spec must be set");
     349              :         let start_time = Instant::now();
     350              : 
     351              :         let shard0_connstr = spec.pageserver_connstr.split(',').next().unwrap();
     352              :         let mut config = postgres::Config::from_str(shard0_connstr)?;
     353              : 
     354              :         // Use the storage auth token from the config file, if given.
     355              :         // Note: this overrides any password set in the connection string.
     356              :         if let Some(storage_auth_token) = &spec.storage_auth_token {
     357              :             info!("Got storage auth token from spec file");
     358              :             config.password(storage_auth_token);
     359              :         } else {
     360              :             info!("Storage auth token not set");
     361              :         }
     362              : 
     363              :         // Connect to pageserver
     364              :         let mut client = config.connect(NoTls)?;
     365              :         let pageserver_connect_micros = start_time.elapsed().as_micros() as u64;
     366              : 
     367              :         let basebackup_cmd = match lsn {
     368              :             // HACK We don't use compression on first start (Lsn(0)) because there's no API for it
     369              :             Lsn(0) => format!("basebackup {} {}", spec.tenant_id, spec.timeline_id),
     370              :             _ => format!(
     371              :                 "basebackup {} {} {} --gzip",
     372              :                 spec.tenant_id, spec.timeline_id, lsn
     373              :             ),
     374              :         };
     375              : 
     376              :         let copyreader = client.copy_out(basebackup_cmd.as_str())?;
     377              :         let mut measured_reader = MeasuredReader::new(copyreader);
     378              : 
     379              :         // Check the magic number to see if it's a gzip or not. Even though
     380              :         // we might explicitly ask for gzip, an old pageserver with no implementation
     381              :         // of gzip compression might send us uncompressed data. After some time
     382              :         // passes we can assume all pageservers know how to compress and we can
     383              :         // delete this check.
     384              :         //
     385              :         // If the data is not gzip, it will be tar. It will not be mistakenly
     386              :         // recognized as gzip because tar starts with an ascii encoding of a filename,
     387              :         // and 0x1f and 0x8b are unlikely first characters for any filename. Moreover,
     388              :         // we send the "global" directory first from the pageserver, so it definitely
     389              :         // won't be recognized as gzip.
     390              :         let mut bufreader = std::io::BufReader::new(&mut measured_reader);
     391              :         let gzip = {
     392              :             let peek = bufreader.fill_buf().unwrap();
     393              :             peek[0] == 0x1f && peek[1] == 0x8b
     394              :         };
     395              : 
     396              :         // Read the archive directly from the `CopyOutReader`
     397              :         //
     398              :         // Set `ignore_zeros` so that unpack() reads all the Copy data and
     399              :         // doesn't stop at the end-of-archive marker. Otherwise, if the server
     400              :         // sends an Error after finishing the tarball, we will not notice it.
     401              :         if gzip {
     402              :             let mut ar = tar::Archive::new(flate2::read::GzDecoder::new(&mut bufreader));
     403              :             ar.set_ignore_zeros(true);
     404              :             ar.unpack(&self.pgdata)?;
     405              :         } else {
     406              :             let mut ar = tar::Archive::new(&mut bufreader);
     407              :             ar.set_ignore_zeros(true);
     408              :             ar.unpack(&self.pgdata)?;
     409              :         };
     410              : 
     411              :         // Report metrics
     412              :         let mut state = self.state.lock().unwrap();
     413              :         state.metrics.pageserver_connect_micros = pageserver_connect_micros;
     414              :         state.metrics.basebackup_bytes = measured_reader.get_byte_count() as u64;
     415              :         state.metrics.basebackup_ms = start_time.elapsed().as_millis() as u64;
     416              :         Ok(())
     417              :     }
     418              : 
     419              :     // Gets the basebackup in a retry loop
     420            0 :     #[instrument(skip_all, fields(%lsn))]
     421              :     pub fn get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> {
     422              :         let mut retry_period_ms = 500.0;
     423              :         let mut attempts = 0;
     424              :         const DEFAULT_ATTEMPTS: u16 = 10;
     425              :         #[cfg(feature = "testing")]
     426              :         let max_attempts = if let Ok(v) = env::var("NEON_COMPUTE_TESTING_BASEBACKUP_RETRIES") {
     427              :             u16::from_str(&v).unwrap()
     428              :         } else {
     429              :             DEFAULT_ATTEMPTS
     430              :         };
     431              :         #[cfg(not(feature = "testing"))]
     432              :         let max_attempts = DEFAULT_ATTEMPTS;
     433              :         loop {
     434              :             let result = self.try_get_basebackup(compute_state, lsn);
     435              :             match result {
     436              :                 Ok(_) => {
     437              :                     return result;
     438              :                 }
     439              :                 Err(ref e) if attempts < max_attempts => {
     440              :                     warn!(
     441              :                         "Failed to get basebackup: {} (attempt {}/{})",
     442              :                         e, attempts, max_attempts
     443              :                     );
     444              :                     std::thread::sleep(std::time::Duration::from_millis(retry_period_ms as u64));
     445              :                     retry_period_ms *= 1.5;
     446              :                 }
     447              :                 Err(_) => {
     448              :                     return result;
     449              :                 }
     450              :             }
     451              :             attempts += 1;
     452              :         }
     453              :     }
     454              : 
     455            0 :     pub async fn check_safekeepers_synced_async(
     456            0 :         &self,
     457            0 :         compute_state: &ComputeState,
     458            0 :     ) -> Result<Option<Lsn>> {
     459            0 :         // Construct a connection config for each safekeeper
     460            0 :         let pspec: ParsedSpec = compute_state
     461            0 :             .pspec
     462            0 :             .as_ref()
     463            0 :             .expect("spec must be set")
     464            0 :             .clone();
     465            0 :         let sk_connstrs: Vec<String> = pspec.safekeeper_connstrings.clone();
     466            0 :         let sk_configs = sk_connstrs.into_iter().map(|connstr| {
     467            0 :             // Format connstr
     468            0 :             let id = connstr.clone();
     469            0 :             let connstr = format!("postgresql://no_user@{}", connstr);
     470            0 :             let options = format!(
     471            0 :                 "-c timeline_id={} tenant_id={}",
     472            0 :                 pspec.timeline_id, pspec.tenant_id
     473            0 :             );
     474            0 : 
     475            0 :             // Construct client
     476            0 :             let mut config = tokio_postgres::Config::from_str(&connstr).unwrap();
     477            0 :             config.options(&options);
     478            0 :             if let Some(storage_auth_token) = pspec.storage_auth_token.clone() {
     479            0 :                 config.password(storage_auth_token);
     480            0 :             }
     481              : 
     482            0 :             (id, config)
     483            0 :         });
     484            0 : 
     485            0 :         // Create task set to query all safekeepers
     486            0 :         let mut tasks = FuturesUnordered::new();
     487            0 :         let quorum = sk_configs.len() / 2 + 1;
     488            0 :         for (id, config) in sk_configs {
     489            0 :             let timeout = tokio::time::Duration::from_millis(100);
     490            0 :             let task = tokio::time::timeout(timeout, ping_safekeeper(id, config));
     491            0 :             tasks.push(tokio::spawn(task));
     492            0 :         }
     493              : 
     494              :         // Get a quorum of responses or errors
     495            0 :         let mut responses = Vec::new();
     496            0 :         let mut join_errors = Vec::new();
     497            0 :         let mut task_errors = Vec::new();
     498            0 :         let mut timeout_errors = Vec::new();
     499            0 :         while let Some(response) = tasks.next().await {
     500            0 :             match response {
     501            0 :                 Ok(Ok(Ok(r))) => responses.push(r),
     502            0 :                 Ok(Ok(Err(e))) => task_errors.push(e),
     503            0 :                 Ok(Err(e)) => timeout_errors.push(e),
     504            0 :                 Err(e) => join_errors.push(e),
     505              :             };
     506            0 :             if responses.len() >= quorum {
     507            0 :                 break;
     508            0 :             }
     509            0 :             if join_errors.len() + task_errors.len() + timeout_errors.len() >= quorum {
     510            0 :                 break;
     511            0 :             }
     512              :         }
     513              : 
     514              :         // In case of error, log and fail the check, but don't crash.
     515              :         // We're playing it safe because these errors could be transient
     516              :         // and we don't yet retry. Also being careful here allows us to
     517              :         // be backwards compatible with safekeepers that don't have the
     518              :         // TIMELINE_STATUS API yet.
     519            0 :         if responses.len() < quorum {
     520            0 :             error!(
     521            0 :                 "failed sync safekeepers check {:?} {:?} {:?}",
     522              :                 join_errors, task_errors, timeout_errors
     523              :             );
     524            0 :             return Ok(None);
     525            0 :         }
     526            0 : 
     527            0 :         Ok(check_if_synced(responses))
     528            0 :     }
     529              : 
     530              :     // Fast path for sync_safekeepers. If they're already synced we get the lsn
     531              :     // in one roundtrip. If not, we should do a full sync_safekeepers.
     532            0 :     pub fn check_safekeepers_synced(&self, compute_state: &ComputeState) -> Result<Option<Lsn>> {
     533            0 :         let start_time = Utc::now();
     534            0 : 
     535            0 :         // Run actual work with new tokio runtime
     536            0 :         let rt = tokio::runtime::Builder::new_current_thread()
     537            0 :             .enable_all()
     538            0 :             .build()
     539            0 :             .expect("failed to create rt");
     540            0 :         let result = rt.block_on(self.check_safekeepers_synced_async(compute_state));
     541            0 : 
     542            0 :         // Record runtime
     543            0 :         self.state.lock().unwrap().metrics.sync_sk_check_ms = Utc::now()
     544            0 :             .signed_duration_since(start_time)
     545            0 :             .to_std()
     546            0 :             .unwrap()
     547            0 :             .as_millis() as u64;
     548            0 :         result
     549            0 :     }
     550              : 
     551              :     // Run `postgres` in a special mode with `--sync-safekeepers` argument
     552              :     // and return the reported LSN back to the caller.
     553            0 :     #[instrument(skip_all)]
     554              :     pub fn sync_safekeepers(&self, storage_auth_token: Option<String>) -> Result<Lsn> {
     555              :         let start_time = Utc::now();
     556              : 
     557              :         let mut sync_handle = maybe_cgexec(&self.pgbin)
     558              :             .args(["--sync-safekeepers"])
     559              :             .env("PGDATA", &self.pgdata) // we cannot use -D in this mode
     560              :             .envs(if let Some(storage_auth_token) = &storage_auth_token {
     561              :                 vec![("NEON_AUTH_TOKEN", storage_auth_token)]
     562              :             } else {
     563              :                 vec![]
     564              :             })
     565              :             .stdout(Stdio::piped())
     566              :             .stderr(Stdio::piped())
     567              :             .spawn()
     568              :             .expect("postgres --sync-safekeepers failed to start");
     569              :         SYNC_SAFEKEEPERS_PID.store(sync_handle.id(), Ordering::SeqCst);
     570              : 
     571              :         // `postgres --sync-safekeepers` will print all log output to stderr and
     572              :         // final LSN to stdout. So we leave stdout to collect LSN, while stderr logs
     573              :         // will be collected in a child thread.
     574              :         let stderr = sync_handle
     575              :             .stderr
     576              :             .take()
     577              :             .expect("stderr should be captured");
     578              :         let logs_handle = handle_postgres_logs(stderr);
     579              : 
     580              :         let sync_output = sync_handle
     581              :             .wait_with_output()
     582              :             .expect("postgres --sync-safekeepers failed");
     583              :         SYNC_SAFEKEEPERS_PID.store(0, Ordering::SeqCst);
     584              : 
     585              :         // Process has exited, so we can join the logs thread.
     586              :         let _ = logs_handle
     587              :             .join()
     588            0 :             .map_err(|e| tracing::error!("log thread panicked: {:?}", e));
     589              : 
     590              :         if !sync_output.status.success() {
     591              :             anyhow::bail!(
     592              :                 "postgres --sync-safekeepers exited with non-zero status: {}. stdout: {}",
     593              :                 sync_output.status,
     594              :                 String::from_utf8(sync_output.stdout)
     595              :                     .expect("postgres --sync-safekeepers exited, and stdout is not utf-8"),
     596              :             );
     597              :         }
     598              : 
     599              :         self.state.lock().unwrap().metrics.sync_safekeepers_ms = Utc::now()
     600              :             .signed_duration_since(start_time)
     601              :             .to_std()
     602              :             .unwrap()
     603              :             .as_millis() as u64;
     604              : 
     605              :         let lsn = Lsn::from_str(String::from_utf8(sync_output.stdout)?.trim())?;
     606              : 
     607              :         Ok(lsn)
     608              :     }
     609              : 
     610              :     /// Do all the preparations like PGDATA directory creation, configuration,
     611              :     /// safekeepers sync, basebackup, etc.
     612            0 :     #[instrument(skip_all)]
     613              :     pub fn prepare_pgdata(
     614              :         &self,
     615              :         compute_state: &ComputeState,
     616              :         extension_server_port: u16,
     617              :     ) -> Result<()> {
     618              :         let pspec = compute_state.pspec.as_ref().expect("spec must be set");
     619              :         let spec = &pspec.spec;
     620              :         let pgdata_path = Path::new(&self.pgdata);
     621              : 
     622              :         // Remove/create an empty pgdata directory and put configuration there.
     623              :         self.create_pgdata()?;
     624              :         config::write_postgres_conf(
     625              :             &pgdata_path.join("postgresql.conf"),
     626              :             &pspec.spec,
     627              :             Some(extension_server_port),
     628              :         )?;
     629              : 
     630              :         // Syncing safekeepers is only safe with primary nodes: if a primary
     631              :         // is already connected it will be kicked out, so a secondary (standby)
     632              :         // cannot sync safekeepers.
     633              :         let lsn = match spec.mode {
     634              :             ComputeMode::Primary => {
     635              :                 info!("checking if safekeepers are synced");
     636              :                 let lsn = if let Ok(Some(lsn)) = self.check_safekeepers_synced(compute_state) {
     637              :                     lsn
     638              :                 } else {
     639              :                     info!("starting safekeepers syncing");
     640              :                     self.sync_safekeepers(pspec.storage_auth_token.clone())
     641            0 :                         .with_context(|| "failed to sync safekeepers")?
     642              :                 };
     643              :                 info!("safekeepers synced at LSN {}", lsn);
     644              :                 lsn
     645              :             }
     646              :             ComputeMode::Static(lsn) => {
     647              :                 info!("Starting read-only node at static LSN {}", lsn);
     648              :                 lsn
     649              :             }
     650              :             ComputeMode::Replica => {
     651              :                 info!("Initializing standby from latest Pageserver LSN");
     652              :                 Lsn(0)
     653              :             }
     654              :         };
     655              : 
     656              :         info!(
     657              :             "getting basebackup@{} from pageserver {}",
     658              :             lsn, &pspec.pageserver_connstr
     659              :         );
     660            0 :         self.get_basebackup(compute_state, lsn).with_context(|| {
     661            0 :             format!(
     662            0 :                 "failed to get basebackup@{} from pageserver {}",
     663            0 :                 lsn, &pspec.pageserver_connstr
     664            0 :             )
     665            0 :         })?;
     666              : 
     667              :         // Update pg_hba.conf received with basebackup.
     668              :         update_pg_hba(pgdata_path)?;
     669              : 
     670              :         // Place pg_dynshmem under /dev/shm. This allows us to use
     671              :         // 'dynamic_shared_memory_type = mmap' so that the files are placed in
     672              :         // /dev/shm, similar to how 'dynamic_shared_memory_type = posix' works.
     673              :         //
     674              :         // Why on earth don't we just stick to the 'posix' default, you might
     675              :         // ask.  It turns out that making large allocations with 'posix' doesn't
     676              :         // work very well with autoscaling. The behavior we want is that:
     677              :         //
     678              :         // 1. You can make large DSM allocations, larger than the current RAM
     679              :         //    size of the VM, without errors
     680              :         //
     681              :         // 2. If the allocated memory is really used, the VM is scaled up
     682              :         //    automatically to accommodate that
     683              :         //
     684              :         // We try to make that possible by having swap in the VM. But with the
     685              :         // default 'posix' DSM implementation, we fail step 1, even when there's
     686              :         // plenty of swap available. PostgreSQL uses posix_fallocate() to create
     687              :         // the shmem segment, which is really just a file in /dev/shm in Linux,
     688              :         // but posix_fallocate() on tmpfs returns ENOMEM if the size is larger
     689              :         // than available RAM.
     690              :         //
     691              :         // Using 'dynamic_shared_memory_type = mmap' works around that, because
     692              :         // the Postgres 'mmap' DSM implementation doesn't use
     693              :         // posix_fallocate(). Instead, it uses repeated calls to write(2) to
     694              :         // fill the file with zeros. It's weird that that differs between
     695              :         // 'posix' and 'mmap', but we take advantage of it. When the file is
     696              :         // filled slowly with write(2), the kernel allows it to grow larger, as
     697              :         // long as there's swap available.
     698              :         //
     699              :         // In short, using 'dynamic_shared_memory_type = mmap' allows us one DSM
     700              :         // segment to be larger than currently available RAM. But because we
     701              :         // don't want to store it on a real file, which the kernel would try to
     702              :         // flush to disk, so symlink pg_dynshm to /dev/shm.
     703              :         //
     704              :         // We don't set 'dynamic_shared_memory_type = mmap' here, we let the
     705              :         // control plane control that option. If 'mmap' is not used, this
     706              :         // symlink doesn't affect anything.
     707              :         //
     708              :         // See https://github.com/neondatabase/autoscaling/issues/800
     709              :         std::fs::remove_dir(pgdata_path.join("pg_dynshmem"))?;
     710              :         symlink("/dev/shm/", pgdata_path.join("pg_dynshmem"))?;
     711              : 
     712              :         match spec.mode {
     713              :             ComputeMode::Primary => {}
     714              :             ComputeMode::Replica | ComputeMode::Static(..) => {
     715              :                 add_standby_signal(pgdata_path)?;
     716              :             }
     717              :         }
     718              : 
     719              :         Ok(())
     720              :     }
     721              : 
     722              :     /// Start and stop a postgres process to warm up the VM for startup.
     723            0 :     pub fn prewarm_postgres(&self) -> Result<()> {
     724            0 :         info!("prewarming");
     725              : 
     726              :         // Create pgdata
     727            0 :         let pgdata = &format!("{}.warmup", self.pgdata);
     728            0 :         create_pgdata(pgdata)?;
     729              : 
     730              :         // Run initdb to completion
     731            0 :         info!("running initdb");
     732            0 :         let initdb_bin = Path::new(&self.pgbin).parent().unwrap().join("initdb");
     733            0 :         Command::new(initdb_bin)
     734            0 :             .args(["--pgdata", pgdata])
     735            0 :             .output()
     736            0 :             .expect("cannot start initdb process");
     737              : 
     738              :         // Write conf
     739              :         use std::io::Write;
     740            0 :         let conf_path = Path::new(pgdata).join("postgresql.conf");
     741            0 :         let mut file = std::fs::File::create(conf_path)?;
     742            0 :         writeln!(file, "shared_buffers=65536")?;
     743            0 :         writeln!(file, "port=51055")?; // Nobody should be connecting
     744            0 :         writeln!(file, "shared_preload_libraries = 'neon'")?;
     745              : 
     746              :         // Start postgres
     747            0 :         info!("starting postgres");
     748            0 :         let mut pg = maybe_cgexec(&self.pgbin)
     749            0 :             .args(["-D", pgdata])
     750            0 :             .spawn()
     751            0 :             .expect("cannot start postgres process");
     752            0 : 
     753            0 :         // Stop it when it's ready
     754            0 :         info!("waiting for postgres");
     755            0 :         wait_for_postgres(&mut pg, Path::new(pgdata))?;
     756              :         // SIGQUIT orders postgres to exit immediately. We don't want to SIGKILL
     757              :         // it to avoid orphaned processes prowling around while datadir is
     758              :         // wiped.
     759            0 :         let pm_pid = Pid::from_raw(pg.id() as i32);
     760            0 :         kill(pm_pid, Signal::SIGQUIT)?;
     761            0 :         info!("sent SIGQUIT signal");
     762            0 :         pg.wait()?;
     763            0 :         info!("done prewarming");
     764              : 
     765              :         // clean up
     766            0 :         let _ok = fs::remove_dir_all(pgdata);
     767            0 :         Ok(())
     768            0 :     }
     769              : 
     770              :     /// Start Postgres as a child process and manage DBs/roles.
     771              :     /// After that this will hang waiting on the postmaster process to exit.
     772              :     /// Returns a handle to the child process and a handle to the logs thread.
     773            0 :     #[instrument(skip_all)]
     774              :     pub fn start_postgres(
     775              :         &self,
     776              :         storage_auth_token: Option<String>,
     777              :     ) -> Result<(std::process::Child, std::thread::JoinHandle<()>)> {
     778              :         let pgdata_path = Path::new(&self.pgdata);
     779              : 
     780              :         // Run postgres as a child process.
     781              :         let mut pg = maybe_cgexec(&self.pgbin)
     782              :             .args(["-D", &self.pgdata])
     783              :             .envs(if let Some(storage_auth_token) = &storage_auth_token {
     784              :                 vec![("NEON_AUTH_TOKEN", storage_auth_token)]
     785              :             } else {
     786              :                 vec![]
     787              :             })
     788              :             .stderr(Stdio::piped())
     789              :             .spawn()
     790              :             .expect("cannot start postgres process");
     791              :         PG_PID.store(pg.id(), Ordering::SeqCst);
     792              : 
     793              :         // Start a thread to collect logs from stderr.
     794              :         let stderr = pg.stderr.take().expect("stderr should be captured");
     795              :         let logs_handle = handle_postgres_logs(stderr);
     796              : 
     797              :         wait_for_postgres(&mut pg, pgdata_path)?;
     798              : 
     799              :         Ok((pg, logs_handle))
     800              :     }
     801              : 
     802              :     /// Do post configuration of the already started Postgres. This function spawns a background thread to
     803              :     /// configure the database after applying the compute spec. Currently, it upgrades the neon extension
     804              :     /// version. In the future, it may upgrade all 3rd-party extensions.
     805            0 :     #[instrument(skip_all)]
     806              :     pub fn post_apply_config(&self) -> Result<()> {
     807              :         let connstr = self.connstr.clone();
     808            0 :         thread::spawn(move || {
     809            0 :             let func = || {
     810            0 :                 let mut client = Client::connect(connstr.as_str(), NoTls)?;
     811            0 :                 handle_neon_extension_upgrade(&mut client)
     812            0 :                     .context("handle_neon_extension_upgrade")?;
     813            0 :                 Ok::<_, anyhow::Error>(())
     814            0 :             };
     815            0 :             if let Err(err) = func() {
     816            0 :                 error!("error while post_apply_config: {err:#}");
     817            0 :             }
     818            0 :         });
     819              :         Ok(())
     820              :     }
     821              : 
     822              :     /// Do initial configuration of the already started Postgres.
     823            0 :     #[instrument(skip_all)]
     824              :     pub fn apply_config(&self, compute_state: &ComputeState) -> Result<()> {
     825              :         // If connection fails,
     826              :         // it may be the old node with `zenith_admin` superuser.
     827              :         //
     828              :         // In this case we need to connect with old `zenith_admin` name
     829              :         // and create new user. We cannot simply rename connected user,
     830              :         // but we can create a new one and grant it all privileges.
     831              :         let mut connstr = self.connstr.clone();
     832              :         connstr
     833              :             .query_pairs_mut()
     834              :             .append_pair("application_name", "apply_config");
     835              : 
     836              :         let mut client = match Client::connect(connstr.as_str(), NoTls) {
     837              :             Err(e) => match e.code() {
     838              :                 Some(&SqlState::INVALID_PASSWORD)
     839              :                 | Some(&SqlState::INVALID_AUTHORIZATION_SPECIFICATION) => {
     840              :                     // connect with zenith_admin if cloud_admin could not authenticate
     841              :                     info!(
     842              :                         "cannot connect to postgres: {}, retrying with `zenith_admin` username",
     843              :                         e
     844              :                     );
     845              :                     let mut zenith_admin_connstr = connstr.clone();
     846              : 
     847              :                     zenith_admin_connstr
     848              :                         .set_username("zenith_admin")
     849            0 :                         .map_err(|_| anyhow::anyhow!("invalid connstr"))?;
     850              : 
     851              :                     let mut client =
     852              :                         Client::connect(zenith_admin_connstr.as_str(), NoTls)
     853              :                             .context("broken cloud_admin credential: tried connecting with cloud_admin but could not authenticate, and zenith_admin does not work either")?;
     854              :                     // Disable forwarding so that users don't get a cloud_admin role
     855              : 
     856            0 :                     let mut func = || {
     857            0 :                         client.simple_query("SET neon.forward_ddl = false")?;
     858            0 :                         client.simple_query("CREATE USER cloud_admin WITH SUPERUSER")?;
     859            0 :                         client.simple_query("GRANT zenith_admin TO cloud_admin")?;
     860            0 :                         Ok::<_, anyhow::Error>(())
     861            0 :                     };
     862              :                     func().context("apply_config setup cloud_admin")?;
     863              : 
     864              :                     drop(client);
     865              : 
     866              :                     // reconnect with connstring with expected name
     867              :                     Client::connect(connstr.as_str(), NoTls)?
     868              :                 }
     869              :                 _ => return Err(e.into()),
     870              :             },
     871              :             Ok(client) => client,
     872              :         };
     873              : 
     874              :         // Disable DDL forwarding because control plane already knows about these roles/databases.
     875              :         client
     876              :             .simple_query("SET neon.forward_ddl = false")
     877              :             .context("apply_config SET neon.forward_ddl = false")?;
     878              : 
     879              :         // Proceed with post-startup configuration. Note, that order of operations is important.
     880              :         let spec = &compute_state.pspec.as_ref().expect("spec must be set").spec;
     881              :         create_neon_superuser(spec, &mut client).context("apply_config create_neon_superuser")?;
     882              :         cleanup_instance(&mut client).context("apply_config cleanup_instance")?;
     883              :         handle_roles(spec, &mut client).context("apply_config handle_roles")?;
     884              :         handle_databases(spec, &mut client).context("apply_config handle_databases")?;
     885              :         handle_role_deletions(spec, connstr.as_str(), &mut client)
     886              :             .context("apply_config handle_role_deletions")?;
     887              :         handle_grants(
     888              :             spec,
     889              :             &mut client,
     890              :             connstr.as_str(),
     891              :             self.has_feature(ComputeFeature::AnonExtension),
     892              :         )
     893              :         .context("apply_config handle_grants")?;
     894              :         handle_extensions(spec, &mut client).context("apply_config handle_extensions")?;
     895              :         handle_extension_neon(&mut client).context("apply_config handle_extension_neon")?;
     896              :         create_availability_check_data(&mut client)
     897              :             .context("apply_config create_availability_check_data")?;
     898              : 
     899              :         // 'Close' connection
     900              :         drop(client);
     901              : 
     902              :         if let Some(ref local_proxy) = spec.local_proxy_config {
     903              :             info!("configuring local_proxy");
     904              :             local_proxy::configure(local_proxy).context("apply_config local_proxy")?;
     905              :         }
     906              : 
     907              :         // Run migrations separately to not hold up cold starts
     908            0 :         thread::spawn(move || {
     909            0 :             let mut connstr = connstr.clone();
     910            0 :             connstr
     911            0 :                 .query_pairs_mut()
     912            0 :                 .append_pair("application_name", "migrations");
     913              : 
     914            0 :             let mut client = Client::connect(connstr.as_str(), NoTls)?;
     915            0 :             handle_migrations(&mut client).context("apply_config handle_migrations")
     916            0 :         });
     917              :         Ok(())
     918              :     }
     919              : 
     920              :     // Wrapped this around `pg_ctl reload`, but right now we don't use
     921              :     // `pg_ctl` for start / stop.
     922            0 :     #[instrument(skip_all)]
     923              :     fn pg_reload_conf(&self) -> Result<()> {
     924              :         let pgctl_bin = Path::new(&self.pgbin).parent().unwrap().join("pg_ctl");
     925              :         Command::new(pgctl_bin)
     926              :             .args(["reload", "-D", &self.pgdata])
     927              :             .output()
     928              :             .expect("cannot run pg_ctl process");
     929              :         Ok(())
     930              :     }
     931              : 
     932              :     /// Similar to `apply_config()`, but does a bit different sequence of operations,
     933              :     /// as it's used to reconfigure a previously started and configured Postgres node.
     934            0 :     #[instrument(skip_all)]
     935              :     pub fn reconfigure(&self) -> Result<()> {
     936              :         let spec = self.state.lock().unwrap().pspec.clone().unwrap().spec;
     937              : 
     938              :         if let Some(ref pgbouncer_settings) = spec.pgbouncer_settings {
     939              :             info!("tuning pgbouncer");
     940              : 
     941              :             let rt = tokio::runtime::Builder::new_current_thread()
     942              :                 .enable_all()
     943              :                 .build()
     944              :                 .expect("failed to create rt");
     945              : 
     946              :             // Spawn a thread to do the tuning,
     947              :             // so that we don't block the main thread that starts Postgres.
     948              :             let pgbouncer_settings = pgbouncer_settings.clone();
     949            0 :             let _handle = thread::spawn(move || {
     950            0 :                 let res = rt.block_on(tune_pgbouncer(pgbouncer_settings));
     951            0 :                 if let Err(err) = res {
     952            0 :                     error!("error while tuning pgbouncer: {err:?}");
     953            0 :                 }
     954            0 :             });
     955              :         }
     956              : 
     957              :         if let Some(ref local_proxy) = spec.local_proxy_config {
     958              :             info!("configuring local_proxy");
     959              : 
     960              :             // Spawn a thread to do the configuration,
     961              :             // so that we don't block the main thread that starts Postgres.
     962              :             let local_proxy = local_proxy.clone();
     963            0 :             let _handle = Some(thread::spawn(move || {
     964            0 :                 if let Err(err) = local_proxy::configure(&local_proxy) {
     965            0 :                     error!("error while configuring local_proxy: {err:?}");
     966            0 :                 }
     967            0 :             }));
     968              :         }
     969              : 
     970              :         // Write new config
     971              :         let pgdata_path = Path::new(&self.pgdata);
     972              :         let postgresql_conf_path = pgdata_path.join("postgresql.conf");
     973              :         config::write_postgres_conf(&postgresql_conf_path, &spec, None)?;
     974              :         // temporarily reset max_cluster_size in config
     975              :         // to avoid the possibility of hitting the limit, while we are reconfiguring:
     976              :         // creating new extensions, roles, etc...
     977            0 :         config::with_compute_ctl_tmp_override(pgdata_path, "neon.max_cluster_size=-1", || {
     978            0 :             self.pg_reload_conf()?;
     979              : 
     980            0 :             let mut client = Client::connect(self.connstr.as_str(), NoTls)?;
     981              : 
     982              :             // Proceed with post-startup configuration. Note, that order of operations is important.
     983              :             // Disable DDL forwarding because control plane already knows about these roles/databases.
     984            0 :             if spec.mode == ComputeMode::Primary {
     985            0 :                 client.simple_query("SET neon.forward_ddl = false")?;
     986            0 :                 cleanup_instance(&mut client)?;
     987            0 :                 handle_roles(&spec, &mut client)?;
     988            0 :                 handle_databases(&spec, &mut client)?;
     989            0 :                 handle_role_deletions(&spec, self.connstr.as_str(), &mut client)?;
     990            0 :                 handle_grants(
     991            0 :                     &spec,
     992            0 :                     &mut client,
     993            0 :                     self.connstr.as_str(),
     994            0 :                     self.has_feature(ComputeFeature::AnonExtension),
     995            0 :                 )?;
     996            0 :                 handle_extensions(&spec, &mut client)?;
     997            0 :                 handle_extension_neon(&mut client)?;
     998              :                 // We can skip handle_migrations here because a new migration can only appear
     999              :                 // if we have a new version of the compute_ctl binary, which can only happen
    1000              :                 // if compute got restarted, in which case we'll end up inside of apply_config
    1001              :                 // instead of reconfigure.
    1002            0 :             }
    1003              : 
    1004              :             // 'Close' connection
    1005            0 :             drop(client);
    1006            0 : 
    1007            0 :             Ok(())
    1008            0 :         })?;
    1009              : 
    1010              :         self.pg_reload_conf()?;
    1011              : 
    1012              :         let unknown_op = "unknown".to_string();
    1013              :         let op_id = spec.operation_uuid.as_ref().unwrap_or(&unknown_op);
    1014              :         info!(
    1015              :             "finished reconfiguration of compute node for operation {}",
    1016              :             op_id
    1017              :         );
    1018              : 
    1019              :         Ok(())
    1020              :     }
    1021              : 
    1022            0 :     #[instrument(skip_all)]
    1023              :     pub fn start_compute(
    1024              :         &self,
    1025              :         extension_server_port: u16,
    1026              :     ) -> Result<(std::process::Child, std::thread::JoinHandle<()>)> {
    1027              :         let compute_state = self.state.lock().unwrap().clone();
    1028              :         let pspec = compute_state.pspec.as_ref().expect("spec must be set");
    1029              :         info!(
    1030              :             "starting compute for project {}, operation {}, tenant {}, timeline {}",
    1031              :             pspec.spec.cluster.cluster_id.as_deref().unwrap_or("None"),
    1032              :             pspec.spec.operation_uuid.as_deref().unwrap_or("None"),
    1033              :             pspec.tenant_id,
    1034              :             pspec.timeline_id,
    1035              :         );
    1036              : 
    1037              :         // tune pgbouncer
    1038              :         if let Some(pgbouncer_settings) = &pspec.spec.pgbouncer_settings {
    1039              :             info!("tuning pgbouncer");
    1040              : 
    1041              :             let rt = tokio::runtime::Builder::new_current_thread()
    1042              :                 .enable_all()
    1043              :                 .build()
    1044              :                 .expect("failed to create rt");
    1045              : 
    1046              :             // Spawn a thread to do the tuning,
    1047              :             // so that we don't block the main thread that starts Postgres.
    1048              :             let pgbouncer_settings = pgbouncer_settings.clone();
    1049            0 :             let _handle = thread::spawn(move || {
    1050            0 :                 let res = rt.block_on(tune_pgbouncer(pgbouncer_settings));
    1051            0 :                 if let Err(err) = res {
    1052            0 :                     error!("error while tuning pgbouncer: {err:?}");
    1053            0 :                 }
    1054            0 :             });
    1055              :         }
    1056              : 
    1057              :         if let Some(local_proxy) = &pspec.spec.local_proxy_config {
    1058              :             info!("configuring local_proxy");
    1059              : 
    1060              :             // Spawn a thread to do the configuration,
    1061              :             // so that we don't block the main thread that starts Postgres.
    1062              :             let local_proxy = local_proxy.clone();
    1063            0 :             let _handle = thread::spawn(move || {
    1064            0 :                 if let Err(err) = local_proxy::configure(&local_proxy) {
    1065            0 :                     error!("error while configuring local_proxy: {err:?}");
    1066            0 :                 }
    1067            0 :             });
    1068              :         }
    1069              : 
    1070              :         info!(
    1071              :             "start_compute spec.remote_extensions {:?}",
    1072              :             pspec.spec.remote_extensions
    1073              :         );
    1074              : 
    1075              :         // This part is sync, because we need to download
    1076              :         // remote shared_preload_libraries before postgres start (if any)
    1077              :         if let Some(remote_extensions) = &pspec.spec.remote_extensions {
    1078              :             // First, create control files for all availale extensions
    1079              :             extension_server::create_control_files(remote_extensions, &self.pgbin);
    1080              : 
    1081              :             let library_load_start_time = Utc::now();
    1082              :             let remote_ext_metrics = self.prepare_preload_libraries(&pspec.spec)?;
    1083              : 
    1084              :             let library_load_time = Utc::now()
    1085              :                 .signed_duration_since(library_load_start_time)
    1086              :                 .to_std()
    1087              :                 .unwrap()
    1088              :                 .as_millis() as u64;
    1089              :             let mut state = self.state.lock().unwrap();
    1090              :             state.metrics.load_ext_ms = library_load_time;
    1091              :             state.metrics.num_ext_downloaded = remote_ext_metrics.num_ext_downloaded;
    1092              :             state.metrics.largest_ext_size = remote_ext_metrics.largest_ext_size;
    1093              :             state.metrics.total_ext_download_size = remote_ext_metrics.total_ext_download_size;
    1094              :             info!(
    1095              :                 "Loading shared_preload_libraries took {:?}ms",
    1096              :                 library_load_time
    1097              :             );
    1098              :             info!("{:?}", remote_ext_metrics);
    1099              :         }
    1100              : 
    1101              :         self.prepare_pgdata(&compute_state, extension_server_port)?;
    1102              : 
    1103              :         let start_time = Utc::now();
    1104              :         let pg_process = self.start_postgres(pspec.storage_auth_token.clone())?;
    1105              : 
    1106              :         let config_time = Utc::now();
    1107              :         if pspec.spec.mode == ComputeMode::Primary {
    1108              :             if !pspec.spec.skip_pg_catalog_updates {
    1109              :                 let pgdata_path = Path::new(&self.pgdata);
    1110              :                 // temporarily reset max_cluster_size in config
    1111              :                 // to avoid the possibility of hitting the limit, while we are applying config:
    1112              :                 // creating new extensions, roles, etc...
    1113              :                 config::with_compute_ctl_tmp_override(
    1114              :                     pgdata_path,
    1115              :                     "neon.max_cluster_size=-1",
    1116            0 :                     || {
    1117            0 :                         self.pg_reload_conf()?;
    1118              : 
    1119            0 :                         self.apply_config(&compute_state)?;
    1120              : 
    1121            0 :                         Ok(())
    1122            0 :                     },
    1123              :                 )?;
    1124              :                 self.pg_reload_conf()?;
    1125              :             }
    1126              :             self.post_apply_config()?;
    1127              : 
    1128              :             let connstr = self.connstr.clone();
    1129            0 :             thread::spawn(move || {
    1130            0 :                 get_installed_extensions_sync(connstr).context("get_installed_extensions")
    1131            0 :             });
    1132              :         }
    1133              : 
    1134              :         let startup_end_time = Utc::now();
    1135              :         {
    1136              :             let mut state = self.state.lock().unwrap();
    1137              :             state.metrics.start_postgres_ms = config_time
    1138              :                 .signed_duration_since(start_time)
    1139              :                 .to_std()
    1140              :                 .unwrap()
    1141              :                 .as_millis() as u64;
    1142              :             state.metrics.config_ms = startup_end_time
    1143              :                 .signed_duration_since(config_time)
    1144              :                 .to_std()
    1145              :                 .unwrap()
    1146              :                 .as_millis() as u64;
    1147              :             state.metrics.total_startup_ms = startup_end_time
    1148              :                 .signed_duration_since(compute_state.start_time)
    1149              :                 .to_std()
    1150              :                 .unwrap()
    1151              :                 .as_millis() as u64;
    1152              :         }
    1153              :         self.set_status(ComputeStatus::Running);
    1154              : 
    1155              :         info!(
    1156              :             "finished configuration of compute for project {}",
    1157              :             pspec.spec.cluster.cluster_id.as_deref().unwrap_or("None")
    1158              :         );
    1159              : 
    1160              :         // Log metrics so that we can search for slow operations in logs
    1161              :         let metrics = {
    1162              :             let state = self.state.lock().unwrap();
    1163              :             state.metrics.clone()
    1164              :         };
    1165              :         info!(?metrics, "compute start finished");
    1166              : 
    1167              :         Ok(pg_process)
    1168              :     }
    1169              : 
    1170              :     /// Update the `last_active` in the shared state, but ensure that it's a more recent one.
    1171            0 :     pub fn update_last_active(&self, last_active: Option<DateTime<Utc>>) {
    1172            0 :         let mut state = self.state.lock().unwrap();
    1173            0 :         // NB: `Some(<DateTime>)` is always greater than `None`.
    1174            0 :         if last_active > state.last_active {
    1175            0 :             state.last_active = last_active;
    1176            0 :             debug!("set the last compute activity time to: {:?}", last_active);
    1177            0 :         }
    1178            0 :     }
    1179              : 
    1180              :     // Look for core dumps and collect backtraces.
    1181              :     //
    1182              :     // EKS worker nodes have following core dump settings:
    1183              :     //   /proc/sys/kernel/core_pattern -> core
    1184              :     //   /proc/sys/kernel/core_uses_pid -> 1
    1185              :     //   ulimit -c -> unlimited
    1186              :     // which results in core dumps being written to postgres data directory as core.<pid>.
    1187              :     //
    1188              :     // Use that as a default location and pattern, except macos where core dumps are written
    1189              :     // to /cores/ directory by default.
    1190              :     //
    1191              :     // With default Linux settings, the core dump file is called just "core", so check for
    1192              :     // that too.
    1193            0 :     pub fn check_for_core_dumps(&self) -> Result<()> {
    1194            0 :         let core_dump_dir = match std::env::consts::OS {
    1195            0 :             "macos" => Path::new("/cores/"),
    1196            0 :             _ => Path::new(&self.pgdata),
    1197              :         };
    1198              : 
    1199              :         // Collect core dump paths if any
    1200            0 :         info!("checking for core dumps in {}", core_dump_dir.display());
    1201            0 :         let files = fs::read_dir(core_dump_dir)?;
    1202            0 :         let cores = files.filter_map(|entry| {
    1203            0 :             let entry = entry.ok()?;
    1204              : 
    1205            0 :             let is_core_dump = match entry.file_name().to_str()? {
    1206            0 :                 n if n.starts_with("core.") => true,
    1207            0 :                 "core" => true,
    1208            0 :                 _ => false,
    1209              :             };
    1210            0 :             if is_core_dump {
    1211            0 :                 Some(entry.path())
    1212              :             } else {
    1213            0 :                 None
    1214              :             }
    1215            0 :         });
    1216              : 
    1217              :         // Print backtrace for each core dump
    1218            0 :         for core_path in cores {
    1219            0 :             warn!(
    1220            0 :                 "core dump found: {}, collecting backtrace",
    1221            0 :                 core_path.display()
    1222              :             );
    1223              : 
    1224              :             // Try first with gdb
    1225            0 :             let backtrace = Command::new("gdb")
    1226            0 :                 .args(["--batch", "-q", "-ex", "bt", &self.pgbin])
    1227            0 :                 .arg(&core_path)
    1228            0 :                 .output();
    1229              : 
    1230              :             // Try lldb if no gdb is found -- that is handy for local testing on macOS
    1231            0 :             let backtrace = match backtrace {
    1232            0 :                 Err(ref e) if e.kind() == std::io::ErrorKind::NotFound => {
    1233            0 :                     warn!("cannot find gdb, trying lldb");
    1234            0 :                     Command::new("lldb")
    1235            0 :                         .arg("-c")
    1236            0 :                         .arg(&core_path)
    1237            0 :                         .args(["--batch", "-o", "bt all", "-o", "quit"])
    1238            0 :                         .output()
    1239              :                 }
    1240            0 :                 _ => backtrace,
    1241            0 :             }?;
    1242              : 
    1243            0 :             warn!(
    1244            0 :                 "core dump backtrace: {}",
    1245            0 :                 String::from_utf8_lossy(&backtrace.stdout)
    1246              :             );
    1247            0 :             warn!(
    1248            0 :                 "debugger stderr: {}",
    1249            0 :                 String::from_utf8_lossy(&backtrace.stderr)
    1250              :             );
    1251              :         }
    1252              : 
    1253            0 :         Ok(())
    1254            0 :     }
    1255              : 
    1256              :     /// Select `pg_stat_statements` data and return it as a stringified JSON
    1257            0 :     pub async fn collect_insights(&self) -> String {
    1258            0 :         let mut result_rows: Vec<String> = Vec::new();
    1259            0 :         let connect_result = tokio_postgres::connect(self.connstr.as_str(), NoTls).await;
    1260            0 :         let (client, connection) = connect_result.unwrap();
    1261            0 :         tokio::spawn(async move {
    1262            0 :             if let Err(e) = connection.await {
    1263            0 :                 eprintln!("connection error: {}", e);
    1264            0 :             }
    1265            0 :         });
    1266            0 :         let result = client
    1267            0 :             .simple_query(
    1268            0 :                 "SELECT
    1269            0 :     row_to_json(pg_stat_statements)
    1270            0 : FROM
    1271            0 :     pg_stat_statements
    1272            0 : WHERE
    1273            0 :     userid != 'cloud_admin'::regrole::oid
    1274            0 : ORDER BY
    1275            0 :     (mean_exec_time + mean_plan_time) DESC
    1276            0 : LIMIT 100",
    1277            0 :             )
    1278            0 :             .await;
    1279              : 
    1280            0 :         if let Ok(raw_rows) = result {
    1281            0 :             for message in raw_rows.iter() {
    1282            0 :                 if let postgres::SimpleQueryMessage::Row(row) = message {
    1283            0 :                     if let Some(json) = row.get(0) {
    1284            0 :                         result_rows.push(json.to_string());
    1285            0 :                     }
    1286            0 :                 }
    1287              :             }
    1288              : 
    1289            0 :             format!("{{\"pg_stat_statements\": [{}]}}", result_rows.join(","))
    1290              :         } else {
    1291            0 :             "{{\"pg_stat_statements\": []}}".to_string()
    1292              :         }
    1293            0 :     }
    1294              : 
    1295              :     // download an archive, unzip and place files in correct locations
    1296            0 :     pub async fn download_extension(
    1297            0 :         &self,
    1298            0 :         real_ext_name: String,
    1299            0 :         ext_path: RemotePath,
    1300            0 :     ) -> Result<u64, DownloadError> {
    1301            0 :         let ext_remote_storage =
    1302            0 :             self.ext_remote_storage
    1303            0 :                 .as_ref()
    1304            0 :                 .ok_or(DownloadError::BadInput(anyhow::anyhow!(
    1305            0 :                     "Remote extensions storage is not configured",
    1306            0 :                 )))?;
    1307              : 
    1308            0 :         let ext_archive_name = ext_path.object_name().expect("bad path");
    1309            0 : 
    1310            0 :         let mut first_try = false;
    1311            0 :         if !self
    1312            0 :             .ext_download_progress
    1313            0 :             .read()
    1314            0 :             .expect("lock err")
    1315            0 :             .contains_key(ext_archive_name)
    1316            0 :         {
    1317            0 :             self.ext_download_progress
    1318            0 :                 .write()
    1319            0 :                 .expect("lock err")
    1320            0 :                 .insert(ext_archive_name.to_string(), (Utc::now(), false));
    1321            0 :             first_try = true;
    1322            0 :         }
    1323            0 :         let (download_start, download_completed) =
    1324            0 :             self.ext_download_progress.read().expect("lock err")[ext_archive_name];
    1325            0 :         let start_time_delta = Utc::now()
    1326            0 :             .signed_duration_since(download_start)
    1327            0 :             .to_std()
    1328            0 :             .unwrap()
    1329            0 :             .as_millis() as u64;
    1330              : 
    1331              :         // how long to wait for extension download if it was started by another process
    1332              :         const HANG_TIMEOUT: u64 = 3000; // milliseconds
    1333              : 
    1334            0 :         if download_completed {
    1335            0 :             info!("extension already downloaded, skipping re-download");
    1336            0 :             return Ok(0);
    1337            0 :         } else if start_time_delta < HANG_TIMEOUT && !first_try {
    1338            0 :             info!("download {ext_archive_name} already started by another process, hanging untill completion or timeout");
    1339            0 :             let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(500));
    1340              :             loop {
    1341            0 :                 info!("waiting for download");
    1342            0 :                 interval.tick().await;
    1343            0 :                 let (_, download_completed_now) =
    1344            0 :                     self.ext_download_progress.read().expect("lock")[ext_archive_name];
    1345            0 :                 if download_completed_now {
    1346            0 :                     info!("download finished by whoever else downloaded it");
    1347            0 :                     return Ok(0);
    1348            0 :                 }
    1349              :             }
    1350              :             // NOTE: the above loop will get terminated
    1351              :             // based on the timeout of the download function
    1352            0 :         }
    1353            0 : 
    1354            0 :         // if extension hasn't been downloaded before or the previous
    1355            0 :         // attempt to download was at least HANG_TIMEOUT ms ago
    1356            0 :         // then we try to download it here
    1357            0 :         info!("downloading new extension {ext_archive_name}");
    1358              : 
    1359            0 :         let download_size = extension_server::download_extension(
    1360            0 :             &real_ext_name,
    1361            0 :             &ext_path,
    1362            0 :             ext_remote_storage,
    1363            0 :             &self.pgbin,
    1364            0 :         )
    1365            0 :         .await
    1366            0 :         .map_err(DownloadError::Other);
    1367            0 : 
    1368            0 :         if download_size.is_ok() {
    1369            0 :             self.ext_download_progress
    1370            0 :                 .write()
    1371            0 :                 .expect("bad lock")
    1372            0 :                 .insert(ext_archive_name.to_string(), (download_start, true));
    1373            0 :         }
    1374              : 
    1375            0 :         download_size
    1376            0 :     }
    1377              : 
    1378            0 :     pub async fn set_role_grants(
    1379            0 :         &self,
    1380            0 :         db_name: &PgIdent,
    1381            0 :         schema_name: &PgIdent,
    1382            0 :         privileges: &[Privilege],
    1383            0 :         role_name: &PgIdent,
    1384            0 :     ) -> Result<()> {
    1385              :         use tokio_postgres::config::Config;
    1386              :         use tokio_postgres::NoTls;
    1387              : 
    1388            0 :         let mut conf = Config::from_str(self.connstr.as_str()).unwrap();
    1389            0 :         conf.dbname(db_name);
    1390              : 
    1391            0 :         let (db_client, conn) = conf
    1392            0 :             .connect(NoTls)
    1393            0 :             .await
    1394            0 :             .context("Failed to connect to the database")?;
    1395            0 :         tokio::spawn(conn);
    1396            0 : 
    1397            0 :         // TODO: support other types of grants apart from schemas?
    1398            0 :         let query = format!(
    1399            0 :             "GRANT {} ON SCHEMA {} TO {}",
    1400            0 :             privileges
    1401            0 :                 .iter()
    1402            0 :                 // should not be quoted as it's part of the command.
    1403            0 :                 // is already sanitized so it's ok
    1404            0 :                 .map(|p| p.as_str())
    1405            0 :                 .collect::<Vec<&'static str>>()
    1406            0 :                 .join(", "),
    1407            0 :             // quote the schema and role name as identifiers to sanitize them.
    1408            0 :             schema_name.pg_quote(),
    1409            0 :             role_name.pg_quote(),
    1410            0 :         );
    1411            0 :         db_client
    1412            0 :             .simple_query(&query)
    1413            0 :             .await
    1414            0 :             .with_context(|| format!("Failed to execute query: {}", query))?;
    1415              : 
    1416            0 :         Ok(())
    1417            0 :     }
    1418              : 
    1419            0 :     pub async fn install_extension(
    1420            0 :         &self,
    1421            0 :         ext_name: &PgIdent,
    1422            0 :         db_name: &PgIdent,
    1423            0 :         ext_version: ExtVersion,
    1424            0 :     ) -> Result<ExtVersion> {
    1425              :         use tokio_postgres::config::Config;
    1426              :         use tokio_postgres::NoTls;
    1427              : 
    1428            0 :         let mut conf = Config::from_str(self.connstr.as_str()).unwrap();
    1429            0 :         conf.dbname(db_name);
    1430              : 
    1431            0 :         let (db_client, conn) = conf
    1432            0 :             .connect(NoTls)
    1433            0 :             .await
    1434            0 :             .context("Failed to connect to the database")?;
    1435            0 :         tokio::spawn(conn);
    1436            0 : 
    1437            0 :         let version_query = "SELECT extversion FROM pg_extension WHERE extname = $1";
    1438            0 :         let version: Option<ExtVersion> = db_client
    1439            0 :             .query_opt(version_query, &[&ext_name])
    1440            0 :             .await
    1441            0 :             .with_context(|| format!("Failed to execute query: {}", version_query))?
    1442            0 :             .map(|row| row.get(0));
    1443            0 : 
    1444            0 :         // sanitize the inputs as postgres idents.
    1445            0 :         let ext_name: String = ext_name.pg_quote();
    1446            0 :         let quoted_version: String = ext_version.pg_quote();
    1447              : 
    1448            0 :         if let Some(installed_version) = version {
    1449            0 :             if installed_version == ext_version {
    1450            0 :                 return Ok(installed_version);
    1451            0 :             }
    1452            0 :             let query = format!("ALTER EXTENSION {ext_name} UPDATE TO {quoted_version}");
    1453            0 :             db_client
    1454            0 :                 .simple_query(&query)
    1455            0 :                 .await
    1456            0 :                 .with_context(|| format!("Failed to execute query: {}", query))?;
    1457              :         } else {
    1458            0 :             let query =
    1459            0 :                 format!("CREATE EXTENSION IF NOT EXISTS {ext_name} WITH VERSION {quoted_version}");
    1460            0 :             db_client
    1461            0 :                 .simple_query(&query)
    1462            0 :                 .await
    1463            0 :                 .with_context(|| format!("Failed to execute query: {}", query))?;
    1464              :         }
    1465              : 
    1466            0 :         Ok(ext_version)
    1467            0 :     }
    1468              : 
    1469              :     #[tokio::main]
    1470            0 :     pub async fn prepare_preload_libraries(
    1471            0 :         &self,
    1472            0 :         spec: &ComputeSpec,
    1473            0 :     ) -> Result<RemoteExtensionMetrics> {
    1474            0 :         if self.ext_remote_storage.is_none() {
    1475            0 :             return Ok(RemoteExtensionMetrics {
    1476            0 :                 num_ext_downloaded: 0,
    1477            0 :                 largest_ext_size: 0,
    1478            0 :                 total_ext_download_size: 0,
    1479            0 :             });
    1480            0 :         }
    1481            0 :         let remote_extensions = spec
    1482            0 :             .remote_extensions
    1483            0 :             .as_ref()
    1484            0 :             .ok_or(anyhow::anyhow!("Remote extensions are not configured"))?;
    1485            0 : 
    1486            0 :         info!("parse shared_preload_libraries from spec.cluster.settings");
    1487            0 :         let mut libs_vec = Vec::new();
    1488            0 :         if let Some(libs) = spec.cluster.settings.find("shared_preload_libraries") {
    1489            0 :             libs_vec = libs
    1490            0 :                 .split(&[',', '\'', ' '])
    1491            0 :                 .filter(|s| *s != "neon" && !s.is_empty())
    1492            0 :                 .map(str::to_string)
    1493            0 :                 .collect();
    1494            0 :         }
    1495            0 :         info!("parse shared_preload_libraries from provided postgresql.conf");
    1496            0 : 
    1497            0 :         // that is used in neon_local and python tests
    1498            0 :         if let Some(conf) = &spec.cluster.postgresql_conf {
    1499            0 :             let conf_lines = conf.split('\n').collect::<Vec<&str>>();
    1500            0 :             let mut shared_preload_libraries_line = "";
    1501            0 :             for line in conf_lines {
    1502            0 :                 if line.starts_with("shared_preload_libraries") {
    1503            0 :                     shared_preload_libraries_line = line;
    1504            0 :                 }
    1505            0 :             }
    1506            0 :             let mut preload_libs_vec = Vec::new();
    1507            0 :             if let Some(libs) = shared_preload_libraries_line.split("='").nth(1) {
    1508            0 :                 preload_libs_vec = libs
    1509            0 :                     .split(&[',', '\'', ' '])
    1510            0 :                     .filter(|s| *s != "neon" && !s.is_empty())
    1511            0 :                     .map(str::to_string)
    1512            0 :                     .collect();
    1513            0 :             }
    1514            0 :             libs_vec.extend(preload_libs_vec);
    1515            0 :         }
    1516            0 : 
    1517            0 :         // Don't try to download libraries that are not in the index.
    1518            0 :         // Assume that they are already present locally.
    1519            0 :         libs_vec.retain(|lib| remote_extensions.library_index.contains_key(lib));
    1520            0 : 
    1521            0 :         info!("Downloading to shared preload libraries: {:?}", &libs_vec);
    1522            0 : 
    1523            0 :         let mut download_tasks = Vec::new();
    1524            0 :         for library in &libs_vec {
    1525            0 :             let (ext_name, ext_path) =
    1526            0 :                 remote_extensions.get_ext(library, true, &self.build_tag, &self.pgversion)?;
    1527            0 :             download_tasks.push(self.download_extension(ext_name, ext_path));
    1528            0 :         }
    1529            0 :         let results = join_all(download_tasks).await;
    1530            0 : 
    1531            0 :         let mut remote_ext_metrics = RemoteExtensionMetrics {
    1532            0 :             num_ext_downloaded: 0,
    1533            0 :             largest_ext_size: 0,
    1534            0 :             total_ext_download_size: 0,
    1535            0 :         };
    1536            0 :         for result in results {
    1537            0 :             let download_size = match result {
    1538            0 :                 Ok(res) => {
    1539            0 :                     remote_ext_metrics.num_ext_downloaded += 1;
    1540            0 :                     res
    1541            0 :                 }
    1542            0 :                 Err(err) => {
    1543            0 :                     // if we failed to download an extension, we don't want to fail the whole
    1544            0 :                     // process, but we do want to log the error
    1545            0 :                     error!("Failed to download extension: {}", err);
    1546            0 :                     0
    1547            0 :                 }
    1548            0 :             };
    1549            0 : 
    1550            0 :             remote_ext_metrics.largest_ext_size =
    1551            0 :                 std::cmp::max(remote_ext_metrics.largest_ext_size, download_size);
    1552            0 :             remote_ext_metrics.total_ext_download_size += download_size;
    1553            0 :         }
    1554            0 :         Ok(remote_ext_metrics)
    1555            0 :     }
    1556              : 
    1557              :     /// Waits until current thread receives a state changed notification and
    1558              :     /// the pageserver connection strings has changed.
    1559              :     ///
    1560              :     /// The operation will time out after a specified duration.
    1561            0 :     pub fn wait_timeout_while_pageserver_connstr_unchanged(&self, duration: Duration) {
    1562            0 :         let state = self.state.lock().unwrap();
    1563            0 :         let old_pageserver_connstr = state
    1564            0 :             .pspec
    1565            0 :             .as_ref()
    1566            0 :             .expect("spec must be set")
    1567            0 :             .pageserver_connstr
    1568            0 :             .clone();
    1569            0 :         let mut unchanged = true;
    1570            0 :         let _ = self
    1571            0 :             .state_changed
    1572            0 :             .wait_timeout_while(state, duration, |s| {
    1573            0 :                 let pageserver_connstr = &s
    1574            0 :                     .pspec
    1575            0 :                     .as_ref()
    1576            0 :                     .expect("spec must be set")
    1577            0 :                     .pageserver_connstr;
    1578            0 :                 unchanged = pageserver_connstr == &old_pageserver_connstr;
    1579            0 :                 unchanged
    1580            0 :             })
    1581            0 :             .unwrap();
    1582            0 :         if !unchanged {
    1583            0 :             info!("Pageserver config changed");
    1584            0 :         }
    1585            0 :     }
    1586              : }
    1587              : 
    1588            0 : pub fn forward_termination_signal() {
    1589            0 :     let ss_pid = SYNC_SAFEKEEPERS_PID.load(Ordering::SeqCst);
    1590            0 :     if ss_pid != 0 {
    1591            0 :         let ss_pid = nix::unistd::Pid::from_raw(ss_pid as i32);
    1592            0 :         kill(ss_pid, Signal::SIGTERM).ok();
    1593            0 :     }
    1594            0 :     let pg_pid = PG_PID.load(Ordering::SeqCst);
    1595            0 :     if pg_pid != 0 {
    1596            0 :         let pg_pid = nix::unistd::Pid::from_raw(pg_pid as i32);
    1597            0 :         // Use 'fast' shutdown (SIGINT) because it also creates a shutdown checkpoint, which is important for
    1598            0 :         // ROs to get a list of running xacts faster instead of going through the CLOG.
    1599            0 :         // See https://www.postgresql.org/docs/current/server-shutdown.html for the list of modes and signals.
    1600            0 :         kill(pg_pid, Signal::SIGINT).ok();
    1601            0 :     }
    1602            0 : }
        

Generated by: LCOV version 2.1-beta