Line data Source code
1 : use std::collections::{HashMap, HashSet};
2 : use std::env;
3 : use std::fs;
4 : use std::iter::once;
5 : use std::os::unix::fs::{symlink, PermissionsExt};
6 : use std::path::Path;
7 : use std::process::{Command, Stdio};
8 : use std::str::FromStr;
9 : use std::sync::atomic::AtomicU32;
10 : use std::sync::atomic::Ordering;
11 : use std::sync::{Arc, Condvar, Mutex, RwLock};
12 : use std::thread;
13 : use std::time::Duration;
14 : use std::time::Instant;
15 :
16 : use anyhow::{Context, Result};
17 : use chrono::{DateTime, Utc};
18 : use compute_api::spec::{PgIdent, Role};
19 : use futures::future::join_all;
20 : use futures::stream::FuturesUnordered;
21 : use futures::StreamExt;
22 : use nix::unistd::Pid;
23 : use postgres;
24 : use postgres::error::SqlState;
25 : use postgres::NoTls;
26 : use tracing::{debug, error, info, instrument, warn};
27 : use utils::id::{TenantId, TimelineId};
28 : use utils::lsn::Lsn;
29 :
30 : use compute_api::privilege::Privilege;
31 : use compute_api::responses::{ComputeMetrics, ComputeStatus};
32 : use compute_api::spec::{ComputeFeature, ComputeMode, ComputeSpec, ExtVersion};
33 : use utils::measured_stream::MeasuredReader;
34 :
35 : use nix::sys::signal::{kill, Signal};
36 : use remote_storage::{DownloadError, RemotePath};
37 : use tokio::spawn;
38 :
39 : use crate::installed_extensions::get_installed_extensions;
40 : use crate::local_proxy;
41 : use crate::pg_helpers::*;
42 : use crate::spec::*;
43 : use crate::spec_apply::ApplySpecPhase::{
44 : CreateAndAlterDatabases, CreateAndAlterRoles, CreateAvailabilityCheck, CreateSuperUser,
45 : DropInvalidDatabases, DropRoles, HandleNeonExtension, HandleOtherExtensions,
46 : RenameAndDeleteDatabases, RenameRoles, RunInEachDatabase,
47 : };
48 : use crate::spec_apply::PerDatabasePhase::{
49 : ChangeSchemaPerms, DeleteDBRoleReferences, HandleAnonExtension,
50 : };
51 : use crate::spec_apply::{apply_operations, MutableApplyContext, DB};
52 : use crate::sync_sk::{check_if_synced, ping_safekeeper};
53 : use crate::{config, extension_server};
54 :
55 : pub static SYNC_SAFEKEEPERS_PID: AtomicU32 = AtomicU32::new(0);
56 : pub static PG_PID: AtomicU32 = AtomicU32::new(0);
57 :
58 : /// Compute node info shared across several `compute_ctl` threads.
59 : pub struct ComputeNode {
60 : // Url type maintains proper escaping
61 : pub connstr: url::Url,
62 : // We connect to Postgres from many different places, so build configs once
63 : // and reuse them where needed.
64 : pub conn_conf: postgres::config::Config,
65 : pub tokio_conn_conf: tokio_postgres::config::Config,
66 : pub pgdata: String,
67 : pub pgbin: String,
68 : pub pgversion: String,
69 : /// We should only allow live re- / configuration of the compute node if
70 : /// it uses 'pull model', i.e. it can go to control-plane and fetch
71 : /// the latest configuration. Otherwise, there could be a case:
72 : /// - we start compute with some spec provided as argument
73 : /// - we push new spec and it does reconfiguration
74 : /// - but then something happens and compute pod / VM is destroyed,
75 : /// so k8s controller starts it again with the **old** spec
76 : ///
77 : /// and the same for empty computes:
78 : /// - we started compute without any spec
79 : /// - we push spec and it does configuration
80 : /// - but then it is restarted without any spec again
81 : pub live_config_allowed: bool,
82 : /// The port that the compute's HTTP server listens on
83 : pub http_port: u16,
84 : /// Volatile part of the `ComputeNode`, which should be used under `Mutex`.
85 : /// To allow HTTP API server to serving status requests, while configuration
86 : /// is in progress, lock should be held only for short periods of time to do
87 : /// read/write, not the whole configuration process.
88 : pub state: Mutex<ComputeState>,
89 : /// `Condvar` to allow notifying waiters about state changes.
90 : pub state_changed: Condvar,
91 : /// the address of extension storage proxy gateway
92 : pub ext_remote_storage: Option<String>,
93 : // key: ext_archive_name, value: started download time, download_completed?
94 : pub ext_download_progress: RwLock<HashMap<String, (DateTime<Utc>, bool)>>,
95 : pub build_tag: String,
96 : }
97 :
98 : // store some metrics about download size that might impact startup time
99 : #[derive(Clone, Debug)]
100 : pub struct RemoteExtensionMetrics {
101 : num_ext_downloaded: u64,
102 : largest_ext_size: u64,
103 : total_ext_download_size: u64,
104 : }
105 :
106 : #[derive(Clone, Debug)]
107 : pub struct ComputeState {
108 : pub start_time: DateTime<Utc>,
109 : pub status: ComputeStatus,
110 : /// Timestamp of the last Postgres activity. It could be `None` if
111 : /// compute wasn't used since start.
112 : pub last_active: Option<DateTime<Utc>>,
113 : pub error: Option<String>,
114 : pub pspec: Option<ParsedSpec>,
115 : pub metrics: ComputeMetrics,
116 : }
117 :
118 : impl ComputeState {
119 0 : pub fn new() -> Self {
120 0 : Self {
121 0 : start_time: Utc::now(),
122 0 : status: ComputeStatus::Empty,
123 0 : last_active: None,
124 0 : error: None,
125 0 : pspec: None,
126 0 : metrics: ComputeMetrics::default(),
127 0 : }
128 0 : }
129 :
130 0 : pub fn set_status(&mut self, status: ComputeStatus, state_changed: &Condvar) {
131 0 : let prev = self.status;
132 0 : info!("Changing compute status from {} to {}", prev, status);
133 0 : self.status = status;
134 0 : state_changed.notify_all();
135 0 : }
136 :
137 0 : pub fn set_failed_status(&mut self, err: anyhow::Error, state_changed: &Condvar) {
138 0 : self.error = Some(format!("{err:?}"));
139 0 : self.set_status(ComputeStatus::Failed, state_changed);
140 0 : }
141 : }
142 :
143 : impl Default for ComputeState {
144 0 : fn default() -> Self {
145 0 : Self::new()
146 0 : }
147 : }
148 :
149 : #[derive(Clone, Debug)]
150 : pub struct ParsedSpec {
151 : pub spec: ComputeSpec,
152 : pub tenant_id: TenantId,
153 : pub timeline_id: TimelineId,
154 : pub pageserver_connstr: String,
155 : pub safekeeper_connstrings: Vec<String>,
156 : pub storage_auth_token: Option<String>,
157 : }
158 :
159 : impl TryFrom<ComputeSpec> for ParsedSpec {
160 : type Error = String;
161 0 : fn try_from(spec: ComputeSpec) -> Result<Self, String> {
162 : // Extract the options from the spec file that are needed to connect to
163 : // the storage system.
164 : //
165 : // For backwards-compatibility, the top-level fields in the spec file
166 : // may be empty. In that case, we need to dig them from the GUCs in the
167 : // cluster.settings field.
168 0 : let pageserver_connstr = spec
169 0 : .pageserver_connstring
170 0 : .clone()
171 0 : .or_else(|| spec.cluster.settings.find("neon.pageserver_connstring"))
172 0 : .ok_or("pageserver connstr should be provided")?;
173 0 : let safekeeper_connstrings = if spec.safekeeper_connstrings.is_empty() {
174 0 : if matches!(spec.mode, ComputeMode::Primary) {
175 0 : spec.cluster
176 0 : .settings
177 0 : .find("neon.safekeepers")
178 0 : .ok_or("safekeeper connstrings should be provided")?
179 0 : .split(',')
180 0 : .map(|str| str.to_string())
181 0 : .collect()
182 : } else {
183 0 : vec![]
184 : }
185 : } else {
186 0 : spec.safekeeper_connstrings.clone()
187 : };
188 0 : let storage_auth_token = spec.storage_auth_token.clone();
189 0 : let tenant_id: TenantId = if let Some(tenant_id) = spec.tenant_id {
190 0 : tenant_id
191 : } else {
192 0 : spec.cluster
193 0 : .settings
194 0 : .find("neon.tenant_id")
195 0 : .ok_or("tenant id should be provided")
196 0 : .map(|s| TenantId::from_str(&s))?
197 0 : .or(Err("invalid tenant id"))?
198 : };
199 0 : let timeline_id: TimelineId = if let Some(timeline_id) = spec.timeline_id {
200 0 : timeline_id
201 : } else {
202 0 : spec.cluster
203 0 : .settings
204 0 : .find("neon.timeline_id")
205 0 : .ok_or("timeline id should be provided")
206 0 : .map(|s| TimelineId::from_str(&s))?
207 0 : .or(Err("invalid timeline id"))?
208 : };
209 :
210 0 : Ok(ParsedSpec {
211 0 : spec,
212 0 : pageserver_connstr,
213 0 : safekeeper_connstrings,
214 0 : storage_auth_token,
215 0 : tenant_id,
216 0 : timeline_id,
217 0 : })
218 0 : }
219 : }
220 :
221 : /// If we are a VM, returns a [`Command`] that will run in the `neon-postgres`
222 : /// cgroup. Otherwise returns the default `Command::new(cmd)`
223 : ///
224 : /// This function should be used to start postgres, as it will start it in the
225 : /// neon-postgres cgroup if we are a VM. This allows autoscaling to control
226 : /// postgres' resource usage. The cgroup will exist in VMs because vm-builder
227 : /// creates it during the sysinit phase of its inittab.
228 0 : fn maybe_cgexec(cmd: &str) -> Command {
229 0 : // The cplane sets this env var for autoscaling computes.
230 0 : // use `var_os` so we don't have to worry about the variable being valid
231 0 : // unicode. Should never be an concern . . . but just in case
232 0 : if env::var_os("AUTOSCALING").is_some() {
233 0 : let mut command = Command::new("cgexec");
234 0 : command.args(["-g", "memory:neon-postgres"]);
235 0 : command.arg(cmd);
236 0 : command
237 : } else {
238 0 : Command::new(cmd)
239 : }
240 0 : }
241 :
242 0 : pub(crate) fn construct_superuser_query(spec: &ComputeSpec) -> String {
243 0 : let roles = spec
244 0 : .cluster
245 0 : .roles
246 0 : .iter()
247 0 : .map(|r| escape_literal(&r.name))
248 0 : .collect::<Vec<_>>();
249 0 :
250 0 : let dbs = spec
251 0 : .cluster
252 0 : .databases
253 0 : .iter()
254 0 : .map(|db| escape_literal(&db.name))
255 0 : .collect::<Vec<_>>();
256 :
257 0 : let roles_decl = if roles.is_empty() {
258 0 : String::from("roles text[] := NULL;")
259 : } else {
260 0 : format!(
261 0 : r#"
262 0 : roles text[] := ARRAY(SELECT rolname
263 0 : FROM pg_catalog.pg_roles
264 0 : WHERE rolname IN ({}));"#,
265 0 : roles.join(", ")
266 0 : )
267 : };
268 :
269 0 : let database_decl = if dbs.is_empty() {
270 0 : String::from("dbs text[] := NULL;")
271 : } else {
272 0 : format!(
273 0 : r#"
274 0 : dbs text[] := ARRAY(SELECT datname
275 0 : FROM pg_catalog.pg_database
276 0 : WHERE datname IN ({}));"#,
277 0 : dbs.join(", ")
278 0 : )
279 : };
280 :
281 : // ALL PRIVILEGES grants CREATE, CONNECT, and TEMPORARY on all databases
282 : // (see https://www.postgresql.org/docs/current/ddl-priv.html)
283 0 : let query = format!(
284 0 : r#"
285 0 : DO $$
286 0 : DECLARE
287 0 : r text;
288 0 : {}
289 0 : {}
290 0 : BEGIN
291 0 : IF NOT EXISTS (
292 0 : SELECT FROM pg_catalog.pg_roles WHERE rolname = 'neon_superuser')
293 0 : THEN
294 0 : CREATE ROLE neon_superuser CREATEDB CREATEROLE NOLOGIN REPLICATION BYPASSRLS IN ROLE pg_read_all_data, pg_write_all_data;
295 0 : IF array_length(roles, 1) IS NOT NULL THEN
296 0 : EXECUTE format('GRANT neon_superuser TO %s',
297 0 : array_to_string(ARRAY(SELECT quote_ident(x) FROM unnest(roles) as x), ', '));
298 0 : FOREACH r IN ARRAY roles LOOP
299 0 : EXECUTE format('ALTER ROLE %s CREATEROLE CREATEDB', quote_ident(r));
300 0 : END LOOP;
301 0 : END IF;
302 0 : IF array_length(dbs, 1) IS NOT NULL THEN
303 0 : EXECUTE format('GRANT ALL PRIVILEGES ON DATABASE %s TO neon_superuser',
304 0 : array_to_string(ARRAY(SELECT quote_ident(x) FROM unnest(dbs) as x), ', '));
305 0 : END IF;
306 0 : END IF;
307 0 : END
308 0 : $$;"#,
309 0 : roles_decl, database_decl,
310 0 : );
311 0 :
312 0 : query
313 0 : }
314 :
315 : impl ComputeNode {
316 : /// Check that compute node has corresponding feature enabled.
317 0 : pub fn has_feature(&self, feature: ComputeFeature) -> bool {
318 0 : let state = self.state.lock().unwrap();
319 :
320 0 : if let Some(s) = state.pspec.as_ref() {
321 0 : s.spec.features.contains(&feature)
322 : } else {
323 0 : false
324 : }
325 0 : }
326 :
327 0 : pub fn set_status(&self, status: ComputeStatus) {
328 0 : let mut state = self.state.lock().unwrap();
329 0 : state.set_status(status, &self.state_changed);
330 0 : }
331 :
332 0 : pub fn set_failed_status(&self, err: anyhow::Error) {
333 0 : let mut state = self.state.lock().unwrap();
334 0 : state.set_failed_status(err, &self.state_changed);
335 0 : }
336 :
337 0 : pub fn get_status(&self) -> ComputeStatus {
338 0 : self.state.lock().unwrap().status
339 0 : }
340 :
341 : // Remove `pgdata` directory and create it again with right permissions.
342 0 : fn create_pgdata(&self) -> Result<()> {
343 0 : // Ignore removal error, likely it is a 'No such file or directory (os error 2)'.
344 0 : // If it is something different then create_dir() will error out anyway.
345 0 : let _ok = fs::remove_dir_all(&self.pgdata);
346 0 : fs::create_dir(&self.pgdata)?;
347 0 : fs::set_permissions(&self.pgdata, fs::Permissions::from_mode(0o700))?;
348 :
349 0 : Ok(())
350 0 : }
351 :
352 : // Get basebackup from the libpq connection to pageserver using `connstr` and
353 : // unarchive it to `pgdata` directory overriding all its previous content.
354 0 : #[instrument(skip_all, fields(%lsn))]
355 : fn try_get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> {
356 : let spec = compute_state.pspec.as_ref().expect("spec must be set");
357 : let start_time = Instant::now();
358 :
359 : let shard0_connstr = spec.pageserver_connstr.split(',').next().unwrap();
360 : let mut config = postgres::Config::from_str(shard0_connstr)?;
361 :
362 : // Use the storage auth token from the config file, if given.
363 : // Note: this overrides any password set in the connection string.
364 : if let Some(storage_auth_token) = &spec.storage_auth_token {
365 : info!("Got storage auth token from spec file");
366 : config.password(storage_auth_token);
367 : } else {
368 : info!("Storage auth token not set");
369 : }
370 :
371 : // Connect to pageserver
372 : let mut client = config.connect(NoTls)?;
373 : let pageserver_connect_micros = start_time.elapsed().as_micros() as u64;
374 :
375 : let basebackup_cmd = match lsn {
376 : Lsn(0) => {
377 : if spec.spec.mode != ComputeMode::Primary {
378 : format!(
379 : "basebackup {} {} --gzip --replica",
380 : spec.tenant_id, spec.timeline_id
381 : )
382 : } else {
383 : format!("basebackup {} {} --gzip", spec.tenant_id, spec.timeline_id)
384 : }
385 : }
386 : _ => {
387 : if spec.spec.mode != ComputeMode::Primary {
388 : format!(
389 : "basebackup {} {} {} --gzip --replica",
390 : spec.tenant_id, spec.timeline_id, lsn
391 : )
392 : } else {
393 : format!(
394 : "basebackup {} {} {} --gzip",
395 : spec.tenant_id, spec.timeline_id, lsn
396 : )
397 : }
398 : }
399 : };
400 :
401 : let copyreader = client.copy_out(basebackup_cmd.as_str())?;
402 : let mut measured_reader = MeasuredReader::new(copyreader);
403 : let mut bufreader = std::io::BufReader::new(&mut measured_reader);
404 :
405 : // Read the archive directly from the `CopyOutReader`
406 : //
407 : // Set `ignore_zeros` so that unpack() reads all the Copy data and
408 : // doesn't stop at the end-of-archive marker. Otherwise, if the server
409 : // sends an Error after finishing the tarball, we will not notice it.
410 : let mut ar = tar::Archive::new(flate2::read::GzDecoder::new(&mut bufreader));
411 : ar.set_ignore_zeros(true);
412 : ar.unpack(&self.pgdata)?;
413 :
414 : // Report metrics
415 : let mut state = self.state.lock().unwrap();
416 : state.metrics.pageserver_connect_micros = pageserver_connect_micros;
417 : state.metrics.basebackup_bytes = measured_reader.get_byte_count() as u64;
418 : state.metrics.basebackup_ms = start_time.elapsed().as_millis() as u64;
419 : Ok(())
420 : }
421 :
422 : // Gets the basebackup in a retry loop
423 0 : #[instrument(skip_all, fields(%lsn))]
424 : pub fn get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> {
425 : let mut retry_period_ms = 500.0;
426 : let mut attempts = 0;
427 : const DEFAULT_ATTEMPTS: u16 = 10;
428 : #[cfg(feature = "testing")]
429 : let max_attempts = if let Ok(v) = env::var("NEON_COMPUTE_TESTING_BASEBACKUP_RETRIES") {
430 : u16::from_str(&v).unwrap()
431 : } else {
432 : DEFAULT_ATTEMPTS
433 : };
434 : #[cfg(not(feature = "testing"))]
435 : let max_attempts = DEFAULT_ATTEMPTS;
436 : loop {
437 : let result = self.try_get_basebackup(compute_state, lsn);
438 : match result {
439 : Ok(_) => {
440 : return result;
441 : }
442 : Err(ref e) if attempts < max_attempts => {
443 : warn!(
444 : "Failed to get basebackup: {} (attempt {}/{})",
445 : e, attempts, max_attempts
446 : );
447 : std::thread::sleep(std::time::Duration::from_millis(retry_period_ms as u64));
448 : retry_period_ms *= 1.5;
449 : }
450 : Err(_) => {
451 : return result;
452 : }
453 : }
454 : attempts += 1;
455 : }
456 : }
457 :
458 0 : pub async fn check_safekeepers_synced_async(
459 0 : &self,
460 0 : compute_state: &ComputeState,
461 0 : ) -> Result<Option<Lsn>> {
462 0 : // Construct a connection config for each safekeeper
463 0 : let pspec: ParsedSpec = compute_state
464 0 : .pspec
465 0 : .as_ref()
466 0 : .expect("spec must be set")
467 0 : .clone();
468 0 : let sk_connstrs: Vec<String> = pspec.safekeeper_connstrings.clone();
469 0 : let sk_configs = sk_connstrs.into_iter().map(|connstr| {
470 0 : // Format connstr
471 0 : let id = connstr.clone();
472 0 : let connstr = format!("postgresql://no_user@{}", connstr);
473 0 : let options = format!(
474 0 : "-c timeline_id={} tenant_id={}",
475 0 : pspec.timeline_id, pspec.tenant_id
476 0 : );
477 0 :
478 0 : // Construct client
479 0 : let mut config = tokio_postgres::Config::from_str(&connstr).unwrap();
480 0 : config.options(&options);
481 0 : if let Some(storage_auth_token) = pspec.storage_auth_token.clone() {
482 0 : config.password(storage_auth_token);
483 0 : }
484 :
485 0 : (id, config)
486 0 : });
487 0 :
488 0 : // Create task set to query all safekeepers
489 0 : let mut tasks = FuturesUnordered::new();
490 0 : let quorum = sk_configs.len() / 2 + 1;
491 0 : for (id, config) in sk_configs {
492 0 : let timeout = tokio::time::Duration::from_millis(100);
493 0 : let task = tokio::time::timeout(timeout, ping_safekeeper(id, config));
494 0 : tasks.push(tokio::spawn(task));
495 0 : }
496 :
497 : // Get a quorum of responses or errors
498 0 : let mut responses = Vec::new();
499 0 : let mut join_errors = Vec::new();
500 0 : let mut task_errors = Vec::new();
501 0 : let mut timeout_errors = Vec::new();
502 0 : while let Some(response) = tasks.next().await {
503 0 : match response {
504 0 : Ok(Ok(Ok(r))) => responses.push(r),
505 0 : Ok(Ok(Err(e))) => task_errors.push(e),
506 0 : Ok(Err(e)) => timeout_errors.push(e),
507 0 : Err(e) => join_errors.push(e),
508 : };
509 0 : if responses.len() >= quorum {
510 0 : break;
511 0 : }
512 0 : if join_errors.len() + task_errors.len() + timeout_errors.len() >= quorum {
513 0 : break;
514 0 : }
515 : }
516 :
517 : // In case of error, log and fail the check, but don't crash.
518 : // We're playing it safe because these errors could be transient
519 : // and we don't yet retry. Also being careful here allows us to
520 : // be backwards compatible with safekeepers that don't have the
521 : // TIMELINE_STATUS API yet.
522 0 : if responses.len() < quorum {
523 0 : error!(
524 0 : "failed sync safekeepers check {:?} {:?} {:?}",
525 : join_errors, task_errors, timeout_errors
526 : );
527 0 : return Ok(None);
528 0 : }
529 0 :
530 0 : Ok(check_if_synced(responses))
531 0 : }
532 :
533 : // Fast path for sync_safekeepers. If they're already synced we get the lsn
534 : // in one roundtrip. If not, we should do a full sync_safekeepers.
535 0 : pub fn check_safekeepers_synced(&self, compute_state: &ComputeState) -> Result<Option<Lsn>> {
536 0 : let start_time = Utc::now();
537 0 :
538 0 : // Run actual work with new tokio runtime
539 0 : let rt = tokio::runtime::Builder::new_current_thread()
540 0 : .enable_all()
541 0 : .build()
542 0 : .expect("failed to create rt");
543 0 : let result = rt.block_on(self.check_safekeepers_synced_async(compute_state));
544 0 :
545 0 : // Record runtime
546 0 : self.state.lock().unwrap().metrics.sync_sk_check_ms = Utc::now()
547 0 : .signed_duration_since(start_time)
548 0 : .to_std()
549 0 : .unwrap()
550 0 : .as_millis() as u64;
551 0 : result
552 0 : }
553 :
554 : // Run `postgres` in a special mode with `--sync-safekeepers` argument
555 : // and return the reported LSN back to the caller.
556 0 : #[instrument(skip_all)]
557 : pub fn sync_safekeepers(&self, storage_auth_token: Option<String>) -> Result<Lsn> {
558 : let start_time = Utc::now();
559 :
560 : let mut sync_handle = maybe_cgexec(&self.pgbin)
561 : .args(["--sync-safekeepers"])
562 : .env("PGDATA", &self.pgdata) // we cannot use -D in this mode
563 : .envs(if let Some(storage_auth_token) = &storage_auth_token {
564 : vec![("NEON_AUTH_TOKEN", storage_auth_token)]
565 : } else {
566 : vec![]
567 : })
568 : .stdout(Stdio::piped())
569 : .stderr(Stdio::piped())
570 : .spawn()
571 : .expect("postgres --sync-safekeepers failed to start");
572 : SYNC_SAFEKEEPERS_PID.store(sync_handle.id(), Ordering::SeqCst);
573 :
574 : // `postgres --sync-safekeepers` will print all log output to stderr and
575 : // final LSN to stdout. So we leave stdout to collect LSN, while stderr logs
576 : // will be collected in a child thread.
577 : let stderr = sync_handle
578 : .stderr
579 : .take()
580 : .expect("stderr should be captured");
581 : let logs_handle = handle_postgres_logs(stderr);
582 :
583 : let sync_output = sync_handle
584 : .wait_with_output()
585 : .expect("postgres --sync-safekeepers failed");
586 : SYNC_SAFEKEEPERS_PID.store(0, Ordering::SeqCst);
587 :
588 : // Process has exited, so we can join the logs thread.
589 : let _ = logs_handle
590 : .join()
591 0 : .map_err(|e| tracing::error!("log thread panicked: {:?}", e));
592 :
593 : if !sync_output.status.success() {
594 : anyhow::bail!(
595 : "postgres --sync-safekeepers exited with non-zero status: {}. stdout: {}",
596 : sync_output.status,
597 : String::from_utf8(sync_output.stdout)
598 : .expect("postgres --sync-safekeepers exited, and stdout is not utf-8"),
599 : );
600 : }
601 :
602 : self.state.lock().unwrap().metrics.sync_safekeepers_ms = Utc::now()
603 : .signed_duration_since(start_time)
604 : .to_std()
605 : .unwrap()
606 : .as_millis() as u64;
607 :
608 : let lsn = Lsn::from_str(String::from_utf8(sync_output.stdout)?.trim())?;
609 :
610 : Ok(lsn)
611 : }
612 :
613 : /// Do all the preparations like PGDATA directory creation, configuration,
614 : /// safekeepers sync, basebackup, etc.
615 0 : #[instrument(skip_all)]
616 : pub fn prepare_pgdata(&self, compute_state: &ComputeState) -> Result<()> {
617 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
618 : let spec = &pspec.spec;
619 : let pgdata_path = Path::new(&self.pgdata);
620 :
621 : // Remove/create an empty pgdata directory and put configuration there.
622 : self.create_pgdata()?;
623 : config::write_postgres_conf(
624 : &pgdata_path.join("postgresql.conf"),
625 : &pspec.spec,
626 : self.http_port,
627 : )?;
628 :
629 : // Syncing safekeepers is only safe with primary nodes: if a primary
630 : // is already connected it will be kicked out, so a secondary (standby)
631 : // cannot sync safekeepers.
632 : let lsn = match spec.mode {
633 : ComputeMode::Primary => {
634 : info!("checking if safekeepers are synced");
635 : let lsn = if let Ok(Some(lsn)) = self.check_safekeepers_synced(compute_state) {
636 : lsn
637 : } else {
638 : info!("starting safekeepers syncing");
639 : self.sync_safekeepers(pspec.storage_auth_token.clone())
640 0 : .with_context(|| "failed to sync safekeepers")?
641 : };
642 : info!("safekeepers synced at LSN {}", lsn);
643 : lsn
644 : }
645 : ComputeMode::Static(lsn) => {
646 : info!("Starting read-only node at static LSN {}", lsn);
647 : lsn
648 : }
649 : ComputeMode::Replica => {
650 : info!("Initializing standby from latest Pageserver LSN");
651 : Lsn(0)
652 : }
653 : };
654 :
655 : info!(
656 : "getting basebackup@{} from pageserver {}",
657 : lsn, &pspec.pageserver_connstr
658 : );
659 0 : self.get_basebackup(compute_state, lsn).with_context(|| {
660 0 : format!(
661 0 : "failed to get basebackup@{} from pageserver {}",
662 0 : lsn, &pspec.pageserver_connstr
663 0 : )
664 0 : })?;
665 :
666 : // Update pg_hba.conf received with basebackup.
667 : update_pg_hba(pgdata_path)?;
668 :
669 : // Place pg_dynshmem under /dev/shm. This allows us to use
670 : // 'dynamic_shared_memory_type = mmap' so that the files are placed in
671 : // /dev/shm, similar to how 'dynamic_shared_memory_type = posix' works.
672 : //
673 : // Why on earth don't we just stick to the 'posix' default, you might
674 : // ask. It turns out that making large allocations with 'posix' doesn't
675 : // work very well with autoscaling. The behavior we want is that:
676 : //
677 : // 1. You can make large DSM allocations, larger than the current RAM
678 : // size of the VM, without errors
679 : //
680 : // 2. If the allocated memory is really used, the VM is scaled up
681 : // automatically to accommodate that
682 : //
683 : // We try to make that possible by having swap in the VM. But with the
684 : // default 'posix' DSM implementation, we fail step 1, even when there's
685 : // plenty of swap available. PostgreSQL uses posix_fallocate() to create
686 : // the shmem segment, which is really just a file in /dev/shm in Linux,
687 : // but posix_fallocate() on tmpfs returns ENOMEM if the size is larger
688 : // than available RAM.
689 : //
690 : // Using 'dynamic_shared_memory_type = mmap' works around that, because
691 : // the Postgres 'mmap' DSM implementation doesn't use
692 : // posix_fallocate(). Instead, it uses repeated calls to write(2) to
693 : // fill the file with zeros. It's weird that that differs between
694 : // 'posix' and 'mmap', but we take advantage of it. When the file is
695 : // filled slowly with write(2), the kernel allows it to grow larger, as
696 : // long as there's swap available.
697 : //
698 : // In short, using 'dynamic_shared_memory_type = mmap' allows us one DSM
699 : // segment to be larger than currently available RAM. But because we
700 : // don't want to store it on a real file, which the kernel would try to
701 : // flush to disk, so symlink pg_dynshm to /dev/shm.
702 : //
703 : // We don't set 'dynamic_shared_memory_type = mmap' here, we let the
704 : // control plane control that option. If 'mmap' is not used, this
705 : // symlink doesn't affect anything.
706 : //
707 : // See https://github.com/neondatabase/autoscaling/issues/800
708 : std::fs::remove_dir(pgdata_path.join("pg_dynshmem"))?;
709 : symlink("/dev/shm/", pgdata_path.join("pg_dynshmem"))?;
710 :
711 : match spec.mode {
712 : ComputeMode::Primary => {}
713 : ComputeMode::Replica | ComputeMode::Static(..) => {
714 : add_standby_signal(pgdata_path)?;
715 : }
716 : }
717 :
718 : Ok(())
719 : }
720 :
721 : /// Start and stop a postgres process to warm up the VM for startup.
722 0 : pub fn prewarm_postgres(&self) -> Result<()> {
723 0 : info!("prewarming");
724 :
725 : // Create pgdata
726 0 : let pgdata = &format!("{}.warmup", self.pgdata);
727 0 : create_pgdata(pgdata)?;
728 :
729 : // Run initdb to completion
730 0 : info!("running initdb");
731 0 : let initdb_bin = Path::new(&self.pgbin).parent().unwrap().join("initdb");
732 0 : Command::new(initdb_bin)
733 0 : .args(["--pgdata", pgdata])
734 0 : .output()
735 0 : .expect("cannot start initdb process");
736 :
737 : // Write conf
738 : use std::io::Write;
739 0 : let conf_path = Path::new(pgdata).join("postgresql.conf");
740 0 : let mut file = std::fs::File::create(conf_path)?;
741 0 : writeln!(file, "shared_buffers=65536")?;
742 0 : writeln!(file, "port=51055")?; // Nobody should be connecting
743 0 : writeln!(file, "shared_preload_libraries = 'neon'")?;
744 :
745 : // Start postgres
746 0 : info!("starting postgres");
747 0 : let mut pg = maybe_cgexec(&self.pgbin)
748 0 : .args(["-D", pgdata])
749 0 : .spawn()
750 0 : .expect("cannot start postgres process");
751 0 :
752 0 : // Stop it when it's ready
753 0 : info!("waiting for postgres");
754 0 : wait_for_postgres(&mut pg, Path::new(pgdata))?;
755 : // SIGQUIT orders postgres to exit immediately. We don't want to SIGKILL
756 : // it to avoid orphaned processes prowling around while datadir is
757 : // wiped.
758 0 : let pm_pid = Pid::from_raw(pg.id() as i32);
759 0 : kill(pm_pid, Signal::SIGQUIT)?;
760 0 : info!("sent SIGQUIT signal");
761 0 : pg.wait()?;
762 0 : info!("done prewarming");
763 :
764 : // clean up
765 0 : let _ok = fs::remove_dir_all(pgdata);
766 0 : Ok(())
767 0 : }
768 :
769 : /// Start Postgres as a child process and manage DBs/roles.
770 : /// After that this will hang waiting on the postmaster process to exit.
771 : /// Returns a handle to the child process and a handle to the logs thread.
772 0 : #[instrument(skip_all)]
773 : pub fn start_postgres(
774 : &self,
775 : storage_auth_token: Option<String>,
776 : ) -> Result<(std::process::Child, std::thread::JoinHandle<()>)> {
777 : let pgdata_path = Path::new(&self.pgdata);
778 :
779 : // Run postgres as a child process.
780 : let mut pg = maybe_cgexec(&self.pgbin)
781 : .args(["-D", &self.pgdata])
782 : .envs(if let Some(storage_auth_token) = &storage_auth_token {
783 : vec![("NEON_AUTH_TOKEN", storage_auth_token)]
784 : } else {
785 : vec![]
786 : })
787 : .stderr(Stdio::piped())
788 : .spawn()
789 : .expect("cannot start postgres process");
790 : PG_PID.store(pg.id(), Ordering::SeqCst);
791 :
792 : // Start a thread to collect logs from stderr.
793 : let stderr = pg.stderr.take().expect("stderr should be captured");
794 : let logs_handle = handle_postgres_logs(stderr);
795 :
796 : wait_for_postgres(&mut pg, pgdata_path)?;
797 :
798 : Ok((pg, logs_handle))
799 : }
800 :
801 : /// Do post configuration of the already started Postgres. This function spawns a background thread to
802 : /// configure the database after applying the compute spec. Currently, it upgrades the neon extension
803 : /// version. In the future, it may upgrade all 3rd-party extensions.
804 0 : #[instrument(skip_all)]
805 : pub fn post_apply_config(&self) -> Result<()> {
806 : let conf = self.get_conn_conf(Some("compute_ctl:post_apply_config"));
807 0 : thread::spawn(move || {
808 0 : let func = || {
809 0 : let mut client = conf.connect(NoTls)?;
810 0 : handle_neon_extension_upgrade(&mut client)
811 0 : .context("handle_neon_extension_upgrade")?;
812 0 : Ok::<_, anyhow::Error>(())
813 0 : };
814 0 : if let Err(err) = func() {
815 0 : error!("error while post_apply_config: {err:#}");
816 0 : }
817 0 : });
818 : Ok(())
819 : }
820 :
821 0 : pub fn get_conn_conf(&self, application_name: Option<&str>) -> postgres::Config {
822 0 : let mut conf = self.conn_conf.clone();
823 0 : if let Some(application_name) = application_name {
824 0 : conf.application_name(application_name);
825 0 : }
826 0 : conf
827 0 : }
828 :
829 0 : pub fn get_tokio_conn_conf(&self, application_name: Option<&str>) -> tokio_postgres::Config {
830 0 : let mut conf = self.tokio_conn_conf.clone();
831 0 : if let Some(application_name) = application_name {
832 0 : conf.application_name(application_name);
833 0 : }
834 0 : conf
835 0 : }
836 :
837 0 : async fn get_maintenance_client(
838 0 : conf: &tokio_postgres::Config,
839 0 : ) -> Result<tokio_postgres::Client> {
840 0 : let mut conf = conf.clone();
841 0 : conf.application_name("compute_ctl:apply_config");
842 :
843 0 : let (client, conn) = match conf.connect(NoTls).await {
844 : // If connection fails, it may be the old node with `zenith_admin` superuser.
845 : //
846 : // In this case we need to connect with old `zenith_admin` name
847 : // and create new user. We cannot simply rename connected user,
848 : // but we can create a new one and grant it all privileges.
849 0 : Err(e) => match e.code() {
850 : Some(&SqlState::INVALID_PASSWORD)
851 : | Some(&SqlState::INVALID_AUTHORIZATION_SPECIFICATION) => {
852 : // Connect with zenith_admin if cloud_admin could not authenticate
853 0 : info!(
854 0 : "cannot connect to postgres: {}, retrying with `zenith_admin` username",
855 : e
856 : );
857 0 : let mut zenith_admin_conf = postgres::config::Config::from(conf.clone());
858 0 : zenith_admin_conf.application_name("compute_ctl:apply_config");
859 0 : zenith_admin_conf.user("zenith_admin");
860 :
861 0 : let mut client =
862 0 : zenith_admin_conf.connect(NoTls)
863 0 : .context("broken cloud_admin credential: tried connecting with cloud_admin but could not authenticate, and zenith_admin does not work either")?;
864 :
865 : // Disable forwarding so that users don't get a cloud_admin role
866 0 : let mut func = || {
867 0 : client.simple_query("SET neon.forward_ddl = false")?;
868 0 : client.simple_query("CREATE USER cloud_admin WITH SUPERUSER")?;
869 0 : client.simple_query("GRANT zenith_admin TO cloud_admin")?;
870 0 : Ok::<_, anyhow::Error>(())
871 0 : };
872 0 : func().context("apply_config setup cloud_admin")?;
873 :
874 0 : drop(client);
875 0 :
876 0 : // Reconnect with connstring with expected name
877 0 : conf.connect(NoTls).await?
878 : }
879 0 : _ => return Err(e.into()),
880 : },
881 0 : Ok((client, conn)) => (client, conn),
882 : };
883 :
884 0 : spawn(async move {
885 0 : if let Err(e) = conn.await {
886 0 : error!("maintenance client connection error: {}", e);
887 0 : }
888 0 : });
889 0 :
890 0 : // Disable DDL forwarding because control plane already knows about the roles/databases
891 0 : // we're about to modify.
892 0 : client
893 0 : .simple_query("SET neon.forward_ddl = false")
894 0 : .await
895 0 : .context("apply_config SET neon.forward_ddl = false")?;
896 :
897 0 : Ok(client)
898 0 : }
899 :
900 : /// Apply the spec to the running PostgreSQL instance.
901 : /// The caller can decide to run with multiple clients in parallel, or
902 : /// single mode. Either way, the commands executed will be the same, and
903 : /// only commands run in different databases are parallelized.
904 0 : #[instrument(skip_all)]
905 : pub fn apply_spec_sql(
906 : &self,
907 : spec: Arc<ComputeSpec>,
908 : conf: Arc<tokio_postgres::Config>,
909 : concurrency: usize,
910 : ) -> Result<()> {
911 : let rt = tokio::runtime::Builder::new_multi_thread()
912 : .enable_all()
913 : .build()?;
914 :
915 : info!("Applying config with max {} concurrency", concurrency);
916 : debug!("Config: {:?}", spec);
917 :
918 0 : rt.block_on(async {
919 : // Proceed with post-startup configuration. Note, that order of operations is important.
920 0 : let client = Self::get_maintenance_client(&conf).await?;
921 0 : let spec = spec.clone();
922 :
923 0 : let databases = get_existing_dbs_async(&client).await?;
924 0 : let roles = get_existing_roles_async(&client)
925 0 : .await?
926 0 : .into_iter()
927 0 : .map(|role| (role.name.clone(), role))
928 0 : .collect::<HashMap<String, Role>>();
929 0 :
930 0 : let jwks_roles = Arc::new(
931 0 : spec.as_ref()
932 0 : .local_proxy_config
933 0 : .iter()
934 0 : .flat_map(|it| &it.jwks)
935 0 : .flatten()
936 0 : .flat_map(|setting| &setting.role_names)
937 0 : .cloned()
938 0 : .collect::<HashSet<_>>(),
939 0 : );
940 0 :
941 0 : let ctx = Arc::new(tokio::sync::RwLock::new(MutableApplyContext {
942 0 : roles,
943 0 : dbs: databases,
944 0 : }));
945 :
946 0 : for phase in [
947 0 : CreateSuperUser,
948 0 : DropInvalidDatabases,
949 0 : RenameRoles,
950 0 : CreateAndAlterRoles,
951 0 : RenameAndDeleteDatabases,
952 0 : CreateAndAlterDatabases,
953 : ] {
954 0 : info!("Applying phase {:?}", &phase);
955 0 : apply_operations(
956 0 : spec.clone(),
957 0 : ctx.clone(),
958 0 : jwks_roles.clone(),
959 0 : phase,
960 0 : || async { Ok(&client) },
961 0 : )
962 0 : .await?;
963 : }
964 :
965 0 : info!("Applying RunInEachDatabase phase");
966 0 : let concurrency_token = Arc::new(tokio::sync::Semaphore::new(concurrency));
967 0 :
968 0 : let db_processes = spec
969 0 : .cluster
970 0 : .databases
971 0 : .iter()
972 0 : .map(|db| DB::new(db.clone()))
973 0 : // include
974 0 : .chain(once(DB::SystemDB))
975 0 : .map(|db| {
976 0 : let spec = spec.clone();
977 0 : let ctx = ctx.clone();
978 0 : let jwks_roles = jwks_roles.clone();
979 0 : let mut conf = conf.as_ref().clone();
980 0 : let concurrency_token = concurrency_token.clone();
981 0 : let db = db.clone();
982 0 :
983 0 : debug!("Applying per-database phases for Database {:?}", &db);
984 :
985 0 : match &db {
986 0 : DB::SystemDB => {}
987 0 : DB::UserDB(db) => {
988 0 : conf.dbname(db.name.as_str());
989 0 : }
990 : }
991 :
992 0 : let conf = Arc::new(conf);
993 0 : let fut = Self::apply_spec_sql_db(
994 0 : spec.clone(),
995 0 : conf,
996 0 : ctx.clone(),
997 0 : jwks_roles.clone(),
998 0 : concurrency_token.clone(),
999 0 : db,
1000 0 : );
1001 0 :
1002 0 : Ok(spawn(fut))
1003 0 : })
1004 0 : .collect::<Vec<Result<_, anyhow::Error>>>();
1005 :
1006 0 : for process in db_processes.into_iter() {
1007 0 : let handle = process?;
1008 0 : handle.await??;
1009 : }
1010 :
1011 0 : for phase in vec![
1012 0 : HandleOtherExtensions,
1013 0 : HandleNeonExtension,
1014 0 : CreateAvailabilityCheck,
1015 0 : DropRoles,
1016 0 : ] {
1017 0 : debug!("Applying phase {:?}", &phase);
1018 0 : apply_operations(
1019 0 : spec.clone(),
1020 0 : ctx.clone(),
1021 0 : jwks_roles.clone(),
1022 0 : phase,
1023 0 : || async { Ok(&client) },
1024 0 : )
1025 0 : .await?;
1026 : }
1027 :
1028 0 : Ok::<(), anyhow::Error>(())
1029 0 : })?;
1030 :
1031 : Ok(())
1032 : }
1033 :
1034 : /// Apply SQL migrations of the RunInEachDatabase phase.
1035 : ///
1036 : /// May opt to not connect to databases that don't have any scheduled
1037 : /// operations. The function is concurrency-controlled with the provided
1038 : /// semaphore. The caller has to make sure the semaphore isn't exhausted.
1039 0 : async fn apply_spec_sql_db(
1040 0 : spec: Arc<ComputeSpec>,
1041 0 : conf: Arc<tokio_postgres::Config>,
1042 0 : ctx: Arc<tokio::sync::RwLock<MutableApplyContext>>,
1043 0 : jwks_roles: Arc<HashSet<String>>,
1044 0 : concurrency_token: Arc<tokio::sync::Semaphore>,
1045 0 : db: DB,
1046 0 : ) -> Result<()> {
1047 0 : let _permit = concurrency_token.acquire().await?;
1048 :
1049 0 : let mut client_conn = None;
1050 :
1051 0 : for subphase in [
1052 0 : DeleteDBRoleReferences,
1053 0 : ChangeSchemaPerms,
1054 0 : HandleAnonExtension,
1055 : ] {
1056 0 : apply_operations(
1057 0 : spec.clone(),
1058 0 : ctx.clone(),
1059 0 : jwks_roles.clone(),
1060 0 : RunInEachDatabase {
1061 0 : db: db.clone(),
1062 0 : subphase,
1063 0 : },
1064 0 : // Only connect if apply_operation actually wants a connection.
1065 0 : // It's quite possible this database doesn't need any queries,
1066 0 : // so by not connecting we save time and effort connecting to
1067 0 : // that database.
1068 0 : || async {
1069 0 : if client_conn.is_none() {
1070 0 : let db_client = Self::get_maintenance_client(&conf).await?;
1071 0 : client_conn.replace(db_client);
1072 0 : }
1073 0 : let client = client_conn.as_ref().unwrap();
1074 0 : Ok(client)
1075 0 : },
1076 0 : )
1077 0 : .await?;
1078 : }
1079 :
1080 0 : drop(client_conn);
1081 0 :
1082 0 : Ok::<(), anyhow::Error>(())
1083 0 : }
1084 :
1085 : /// Choose how many concurrent connections to use for applying the spec changes.
1086 0 : pub fn max_service_connections(
1087 0 : &self,
1088 0 : compute_state: &ComputeState,
1089 0 : spec: &ComputeSpec,
1090 0 : ) -> usize {
1091 0 : // If the cluster is in Init state we don't have to deal with user connections,
1092 0 : // and can thus use all `max_connections` connection slots. However, that's generally not
1093 0 : // very efficient, so we generally still limit it to a smaller number.
1094 0 : if compute_state.status == ComputeStatus::Init {
1095 : // If the settings contain 'max_connections', use that as template
1096 0 : if let Some(config) = spec.cluster.settings.find("max_connections") {
1097 0 : config.parse::<usize>().ok()
1098 : } else {
1099 : // Otherwise, try to find the setting in the postgresql_conf string
1100 0 : spec.cluster
1101 0 : .postgresql_conf
1102 0 : .iter()
1103 0 : .flat_map(|conf| conf.split("\n"))
1104 0 : .filter_map(|line| {
1105 0 : if !line.contains("max_connections") {
1106 0 : return None;
1107 0 : }
1108 :
1109 0 : let (key, value) = line.split_once("=")?;
1110 0 : let key = key
1111 0 : .trim_start_matches(char::is_whitespace)
1112 0 : .trim_end_matches(char::is_whitespace);
1113 0 :
1114 0 : let value = value
1115 0 : .trim_start_matches(char::is_whitespace)
1116 0 : .trim_end_matches(char::is_whitespace);
1117 0 :
1118 0 : if key != "max_connections" {
1119 0 : return None;
1120 0 : }
1121 0 :
1122 0 : value.parse::<usize>().ok()
1123 0 : })
1124 0 : .next()
1125 : }
1126 : // If max_connections is present, use at most 1/3rd of that.
1127 : // When max_connections is lower than 30, try to use at least 10 connections, but
1128 : // never more than max_connections.
1129 0 : .map(|limit| match limit {
1130 0 : 0..10 => limit,
1131 0 : 10..30 => 10,
1132 0 : 30.. => limit / 3,
1133 0 : })
1134 0 : // If we didn't find max_connections, default to 10 concurrent connections.
1135 0 : .unwrap_or(10)
1136 : } else {
1137 : // state == Running
1138 : // Because the cluster is already in the Running state, we should assume users are
1139 : // already connected to the cluster, and high concurrency could negatively
1140 : // impact user connectivity. Therefore, we can limit concurrency to the number of
1141 : // reserved superuser connections, which users wouldn't be able to use anyway.
1142 0 : spec.cluster
1143 0 : .settings
1144 0 : .find("superuser_reserved_connections")
1145 0 : .iter()
1146 0 : .filter_map(|val| val.parse::<usize>().ok())
1147 0 : .map(|val| if val > 1 { val - 1 } else { 1 })
1148 0 : .last()
1149 0 : .unwrap_or(3)
1150 : }
1151 0 : }
1152 :
1153 : /// Do initial configuration of the already started Postgres.
1154 0 : #[instrument(skip_all)]
1155 : pub fn apply_config(&self, compute_state: &ComputeState) -> Result<()> {
1156 : let conf = self.get_tokio_conn_conf(Some("compute_ctl:apply_config"));
1157 :
1158 : let conf = Arc::new(conf);
1159 : let spec = Arc::new(
1160 : compute_state
1161 : .pspec
1162 : .as_ref()
1163 : .expect("spec must be set")
1164 : .spec
1165 : .clone(),
1166 : );
1167 :
1168 : let max_concurrent_connections = self.max_service_connections(compute_state, &spec);
1169 :
1170 : // Merge-apply spec & changes to PostgreSQL state.
1171 : self.apply_spec_sql(spec.clone(), conf.clone(), max_concurrent_connections)?;
1172 :
1173 : if let Some(ref local_proxy) = &spec.clone().local_proxy_config {
1174 : info!("configuring local_proxy");
1175 : local_proxy::configure(local_proxy).context("apply_config local_proxy")?;
1176 : }
1177 :
1178 : // Run migrations separately to not hold up cold starts
1179 0 : thread::spawn(move || {
1180 0 : let conf = conf.as_ref().clone();
1181 0 : let mut conf = postgres::config::Config::from(conf);
1182 0 : conf.application_name("compute_ctl:migrations");
1183 0 :
1184 0 : match conf.connect(NoTls) {
1185 0 : Ok(mut client) => {
1186 0 : if let Err(e) = handle_migrations(&mut client) {
1187 0 : error!("Failed to run migrations: {}", e);
1188 0 : }
1189 : }
1190 0 : Err(e) => {
1191 0 : error!(
1192 0 : "Failed to connect to the compute for running migrations: {}",
1193 : e
1194 : );
1195 : }
1196 : };
1197 0 : });
1198 :
1199 : Ok::<(), anyhow::Error>(())
1200 : }
1201 :
1202 : // Wrapped this around `pg_ctl reload`, but right now we don't use
1203 : // `pg_ctl` for start / stop.
1204 0 : #[instrument(skip_all)]
1205 : fn pg_reload_conf(&self) -> Result<()> {
1206 : let pgctl_bin = Path::new(&self.pgbin).parent().unwrap().join("pg_ctl");
1207 : Command::new(pgctl_bin)
1208 : .args(["reload", "-D", &self.pgdata])
1209 : .output()
1210 : .expect("cannot run pg_ctl process");
1211 : Ok(())
1212 : }
1213 :
1214 : /// Similar to `apply_config()`, but does a bit different sequence of operations,
1215 : /// as it's used to reconfigure a previously started and configured Postgres node.
1216 0 : #[instrument(skip_all)]
1217 : pub fn reconfigure(&self) -> Result<()> {
1218 : let spec = self.state.lock().unwrap().pspec.clone().unwrap().spec;
1219 :
1220 : if let Some(ref pgbouncer_settings) = spec.pgbouncer_settings {
1221 : info!("tuning pgbouncer");
1222 :
1223 : let rt = tokio::runtime::Builder::new_current_thread()
1224 : .enable_all()
1225 : .build()
1226 : .expect("failed to create rt");
1227 :
1228 : // Spawn a thread to do the tuning,
1229 : // so that we don't block the main thread that starts Postgres.
1230 : let pgbouncer_settings = pgbouncer_settings.clone();
1231 0 : let _handle = thread::spawn(move || {
1232 0 : let res = rt.block_on(tune_pgbouncer(pgbouncer_settings));
1233 0 : if let Err(err) = res {
1234 0 : error!("error while tuning pgbouncer: {err:?}");
1235 0 : }
1236 0 : });
1237 : }
1238 :
1239 : if let Some(ref local_proxy) = spec.local_proxy_config {
1240 : info!("configuring local_proxy");
1241 :
1242 : // Spawn a thread to do the configuration,
1243 : // so that we don't block the main thread that starts Postgres.
1244 : let local_proxy = local_proxy.clone();
1245 0 : let _handle = Some(thread::spawn(move || {
1246 0 : if let Err(err) = local_proxy::configure(&local_proxy) {
1247 0 : error!("error while configuring local_proxy: {err:?}");
1248 0 : }
1249 0 : }));
1250 : }
1251 :
1252 : // Write new config
1253 : let pgdata_path = Path::new(&self.pgdata);
1254 : let postgresql_conf_path = pgdata_path.join("postgresql.conf");
1255 : config::write_postgres_conf(&postgresql_conf_path, &spec, self.http_port)?;
1256 :
1257 : let max_concurrent_connections = spec.reconfigure_concurrency;
1258 :
1259 : // Temporarily reset max_cluster_size in config
1260 : // to avoid the possibility of hitting the limit, while we are reconfiguring:
1261 : // creating new extensions, roles, etc.
1262 0 : config::with_compute_ctl_tmp_override(pgdata_path, "neon.max_cluster_size=-1", || {
1263 0 : self.pg_reload_conf()?;
1264 :
1265 0 : if spec.mode == ComputeMode::Primary {
1266 0 : let mut conf = tokio_postgres::Config::from_str(self.connstr.as_str()).unwrap();
1267 0 : conf.application_name("apply_config");
1268 0 : let conf = Arc::new(conf);
1269 0 :
1270 0 : let spec = Arc::new(spec.clone());
1271 0 :
1272 0 : self.apply_spec_sql(spec, conf, max_concurrent_connections)?;
1273 0 : }
1274 :
1275 0 : Ok(())
1276 0 : })?;
1277 :
1278 : self.pg_reload_conf()?;
1279 :
1280 : let unknown_op = "unknown".to_string();
1281 : let op_id = spec.operation_uuid.as_ref().unwrap_or(&unknown_op);
1282 : info!(
1283 : "finished reconfiguration of compute node for operation {}",
1284 : op_id
1285 : );
1286 :
1287 : Ok(())
1288 : }
1289 :
1290 0 : #[instrument(skip_all)]
1291 : pub fn start_compute(&self) -> Result<(std::process::Child, std::thread::JoinHandle<()>)> {
1292 : let compute_state = self.state.lock().unwrap().clone();
1293 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
1294 : info!(
1295 : "starting compute for project {}, operation {}, tenant {}, timeline {}",
1296 : pspec.spec.cluster.cluster_id.as_deref().unwrap_or("None"),
1297 : pspec.spec.operation_uuid.as_deref().unwrap_or("None"),
1298 : pspec.tenant_id,
1299 : pspec.timeline_id,
1300 : );
1301 :
1302 : // tune pgbouncer
1303 : if let Some(pgbouncer_settings) = &pspec.spec.pgbouncer_settings {
1304 : info!("tuning pgbouncer");
1305 :
1306 : let rt = tokio::runtime::Builder::new_current_thread()
1307 : .enable_all()
1308 : .build()
1309 : .expect("failed to create rt");
1310 :
1311 : // Spawn a thread to do the tuning,
1312 : // so that we don't block the main thread that starts Postgres.
1313 : let pgbouncer_settings = pgbouncer_settings.clone();
1314 0 : let _handle = thread::spawn(move || {
1315 0 : let res = rt.block_on(tune_pgbouncer(pgbouncer_settings));
1316 0 : if let Err(err) = res {
1317 0 : error!("error while tuning pgbouncer: {err:?}");
1318 0 : }
1319 0 : });
1320 : }
1321 :
1322 : if let Some(local_proxy) = &pspec.spec.local_proxy_config {
1323 : info!("configuring local_proxy");
1324 :
1325 : // Spawn a thread to do the configuration,
1326 : // so that we don't block the main thread that starts Postgres.
1327 : let local_proxy = local_proxy.clone();
1328 0 : let _handle = thread::spawn(move || {
1329 0 : if let Err(err) = local_proxy::configure(&local_proxy) {
1330 0 : error!("error while configuring local_proxy: {err:?}");
1331 0 : }
1332 0 : });
1333 : }
1334 :
1335 : info!(
1336 : "start_compute spec.remote_extensions {:?}",
1337 : pspec.spec.remote_extensions
1338 : );
1339 :
1340 : // This part is sync, because we need to download
1341 : // remote shared_preload_libraries before postgres start (if any)
1342 : if let Some(remote_extensions) = &pspec.spec.remote_extensions {
1343 : // First, create control files for all availale extensions
1344 : extension_server::create_control_files(remote_extensions, &self.pgbin);
1345 :
1346 : let library_load_start_time = Utc::now();
1347 : let remote_ext_metrics = self.prepare_preload_libraries(&pspec.spec)?;
1348 :
1349 : let library_load_time = Utc::now()
1350 : .signed_duration_since(library_load_start_time)
1351 : .to_std()
1352 : .unwrap()
1353 : .as_millis() as u64;
1354 : let mut state = self.state.lock().unwrap();
1355 : state.metrics.load_ext_ms = library_load_time;
1356 : state.metrics.num_ext_downloaded = remote_ext_metrics.num_ext_downloaded;
1357 : state.metrics.largest_ext_size = remote_ext_metrics.largest_ext_size;
1358 : state.metrics.total_ext_download_size = remote_ext_metrics.total_ext_download_size;
1359 : info!(
1360 : "Loading shared_preload_libraries took {:?}ms",
1361 : library_load_time
1362 : );
1363 : info!("{:?}", remote_ext_metrics);
1364 : }
1365 :
1366 : self.prepare_pgdata(&compute_state)?;
1367 :
1368 : let start_time = Utc::now();
1369 : let pg_process = self.start_postgres(pspec.storage_auth_token.clone())?;
1370 :
1371 : let config_time = Utc::now();
1372 : if pspec.spec.mode == ComputeMode::Primary {
1373 : if !pspec.spec.skip_pg_catalog_updates {
1374 : let pgdata_path = Path::new(&self.pgdata);
1375 : // temporarily reset max_cluster_size in config
1376 : // to avoid the possibility of hitting the limit, while we are applying config:
1377 : // creating new extensions, roles, etc...
1378 : config::with_compute_ctl_tmp_override(
1379 : pgdata_path,
1380 : "neon.max_cluster_size=-1",
1381 0 : || {
1382 0 : self.pg_reload_conf()?;
1383 :
1384 0 : self.apply_config(&compute_state)?;
1385 :
1386 0 : Ok(())
1387 0 : },
1388 : )?;
1389 : self.pg_reload_conf()?;
1390 : }
1391 : self.post_apply_config()?;
1392 :
1393 : let conf = self.get_conn_conf(None);
1394 0 : thread::spawn(move || {
1395 0 : let res = get_installed_extensions(conf);
1396 0 : match res {
1397 0 : Ok(extensions) => {
1398 0 : info!(
1399 0 : "[NEON_EXT_STAT] {}",
1400 0 : serde_json::to_string(&extensions)
1401 0 : .expect("failed to serialize extensions list")
1402 : );
1403 : }
1404 0 : Err(err) => error!("could not get installed extensions: {err:?}"),
1405 : }
1406 0 : });
1407 : }
1408 :
1409 : let startup_end_time = Utc::now();
1410 : {
1411 : let mut state = self.state.lock().unwrap();
1412 : state.metrics.start_postgres_ms = config_time
1413 : .signed_duration_since(start_time)
1414 : .to_std()
1415 : .unwrap()
1416 : .as_millis() as u64;
1417 : state.metrics.config_ms = startup_end_time
1418 : .signed_duration_since(config_time)
1419 : .to_std()
1420 : .unwrap()
1421 : .as_millis() as u64;
1422 : state.metrics.total_startup_ms = startup_end_time
1423 : .signed_duration_since(compute_state.start_time)
1424 : .to_std()
1425 : .unwrap()
1426 : .as_millis() as u64;
1427 : }
1428 : self.set_status(ComputeStatus::Running);
1429 :
1430 : info!(
1431 : "finished configuration of compute for project {}",
1432 : pspec.spec.cluster.cluster_id.as_deref().unwrap_or("None")
1433 : );
1434 :
1435 : // Log metrics so that we can search for slow operations in logs
1436 : let metrics = {
1437 : let state = self.state.lock().unwrap();
1438 : state.metrics.clone()
1439 : };
1440 : info!(?metrics, "compute start finished");
1441 :
1442 : Ok(pg_process)
1443 : }
1444 :
1445 : /// Update the `last_active` in the shared state, but ensure that it's a more recent one.
1446 0 : pub fn update_last_active(&self, last_active: Option<DateTime<Utc>>) {
1447 0 : let mut state = self.state.lock().unwrap();
1448 0 : // NB: `Some(<DateTime>)` is always greater than `None`.
1449 0 : if last_active > state.last_active {
1450 0 : state.last_active = last_active;
1451 0 : debug!("set the last compute activity time to: {:?}", last_active);
1452 0 : }
1453 0 : }
1454 :
1455 : // Look for core dumps and collect backtraces.
1456 : //
1457 : // EKS worker nodes have following core dump settings:
1458 : // /proc/sys/kernel/core_pattern -> core
1459 : // /proc/sys/kernel/core_uses_pid -> 1
1460 : // ulimit -c -> unlimited
1461 : // which results in core dumps being written to postgres data directory as core.<pid>.
1462 : //
1463 : // Use that as a default location and pattern, except macos where core dumps are written
1464 : // to /cores/ directory by default.
1465 : //
1466 : // With default Linux settings, the core dump file is called just "core", so check for
1467 : // that too.
1468 0 : pub fn check_for_core_dumps(&self) -> Result<()> {
1469 0 : let core_dump_dir = match std::env::consts::OS {
1470 0 : "macos" => Path::new("/cores/"),
1471 0 : _ => Path::new(&self.pgdata),
1472 : };
1473 :
1474 : // Collect core dump paths if any
1475 0 : info!("checking for core dumps in {}", core_dump_dir.display());
1476 0 : let files = fs::read_dir(core_dump_dir)?;
1477 0 : let cores = files.filter_map(|entry| {
1478 0 : let entry = entry.ok()?;
1479 :
1480 0 : let is_core_dump = match entry.file_name().to_str()? {
1481 0 : n if n.starts_with("core.") => true,
1482 0 : "core" => true,
1483 0 : _ => false,
1484 : };
1485 0 : if is_core_dump {
1486 0 : Some(entry.path())
1487 : } else {
1488 0 : None
1489 : }
1490 0 : });
1491 :
1492 : // Print backtrace for each core dump
1493 0 : for core_path in cores {
1494 0 : warn!(
1495 0 : "core dump found: {}, collecting backtrace",
1496 0 : core_path.display()
1497 : );
1498 :
1499 : // Try first with gdb
1500 0 : let backtrace = Command::new("gdb")
1501 0 : .args(["--batch", "-q", "-ex", "bt", &self.pgbin])
1502 0 : .arg(&core_path)
1503 0 : .output();
1504 :
1505 : // Try lldb if no gdb is found -- that is handy for local testing on macOS
1506 0 : let backtrace = match backtrace {
1507 0 : Err(ref e) if e.kind() == std::io::ErrorKind::NotFound => {
1508 0 : warn!("cannot find gdb, trying lldb");
1509 0 : Command::new("lldb")
1510 0 : .arg("-c")
1511 0 : .arg(&core_path)
1512 0 : .args(["--batch", "-o", "bt all", "-o", "quit"])
1513 0 : .output()
1514 : }
1515 0 : _ => backtrace,
1516 0 : }?;
1517 :
1518 0 : warn!(
1519 0 : "core dump backtrace: {}",
1520 0 : String::from_utf8_lossy(&backtrace.stdout)
1521 : );
1522 0 : warn!(
1523 0 : "debugger stderr: {}",
1524 0 : String::from_utf8_lossy(&backtrace.stderr)
1525 : );
1526 : }
1527 :
1528 0 : Ok(())
1529 0 : }
1530 :
1531 : /// Select `pg_stat_statements` data and return it as a stringified JSON
1532 0 : pub async fn collect_insights(&self) -> String {
1533 0 : let mut result_rows: Vec<String> = Vec::new();
1534 0 : let conf = self.get_tokio_conn_conf(Some("compute_ctl:collect_insights"));
1535 0 : let connect_result = conf.connect(NoTls).await;
1536 0 : let (client, connection) = connect_result.unwrap();
1537 0 : tokio::spawn(async move {
1538 0 : if let Err(e) = connection.await {
1539 0 : eprintln!("connection error: {}", e);
1540 0 : }
1541 0 : });
1542 0 : let result = client
1543 0 : .simple_query(
1544 0 : "SELECT
1545 0 : row_to_json(pg_stat_statements)
1546 0 : FROM
1547 0 : pg_stat_statements
1548 0 : WHERE
1549 0 : userid != 'cloud_admin'::regrole::oid
1550 0 : ORDER BY
1551 0 : (mean_exec_time + mean_plan_time) DESC
1552 0 : LIMIT 100",
1553 0 : )
1554 0 : .await;
1555 :
1556 0 : if let Ok(raw_rows) = result {
1557 0 : for message in raw_rows.iter() {
1558 0 : if let postgres::SimpleQueryMessage::Row(row) = message {
1559 0 : if let Some(json) = row.get(0) {
1560 0 : result_rows.push(json.to_string());
1561 0 : }
1562 0 : }
1563 : }
1564 :
1565 0 : format!("{{\"pg_stat_statements\": [{}]}}", result_rows.join(","))
1566 : } else {
1567 0 : "{{\"pg_stat_statements\": []}}".to_string()
1568 : }
1569 0 : }
1570 :
1571 : // download an archive, unzip and place files in correct locations
1572 0 : pub async fn download_extension(
1573 0 : &self,
1574 0 : real_ext_name: String,
1575 0 : ext_path: RemotePath,
1576 0 : ) -> Result<u64, DownloadError> {
1577 0 : let ext_remote_storage =
1578 0 : self.ext_remote_storage
1579 0 : .as_ref()
1580 0 : .ok_or(DownloadError::BadInput(anyhow::anyhow!(
1581 0 : "Remote extensions storage is not configured",
1582 0 : )))?;
1583 :
1584 0 : let ext_archive_name = ext_path.object_name().expect("bad path");
1585 0 :
1586 0 : let mut first_try = false;
1587 0 : if !self
1588 0 : .ext_download_progress
1589 0 : .read()
1590 0 : .expect("lock err")
1591 0 : .contains_key(ext_archive_name)
1592 0 : {
1593 0 : self.ext_download_progress
1594 0 : .write()
1595 0 : .expect("lock err")
1596 0 : .insert(ext_archive_name.to_string(), (Utc::now(), false));
1597 0 : first_try = true;
1598 0 : }
1599 0 : let (download_start, download_completed) =
1600 0 : self.ext_download_progress.read().expect("lock err")[ext_archive_name];
1601 0 : let start_time_delta = Utc::now()
1602 0 : .signed_duration_since(download_start)
1603 0 : .to_std()
1604 0 : .unwrap()
1605 0 : .as_millis() as u64;
1606 :
1607 : // how long to wait for extension download if it was started by another process
1608 : const HANG_TIMEOUT: u64 = 3000; // milliseconds
1609 :
1610 0 : if download_completed {
1611 0 : info!("extension already downloaded, skipping re-download");
1612 0 : return Ok(0);
1613 0 : } else if start_time_delta < HANG_TIMEOUT && !first_try {
1614 0 : info!("download {ext_archive_name} already started by another process, hanging untill completion or timeout");
1615 0 : let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(500));
1616 : loop {
1617 0 : info!("waiting for download");
1618 0 : interval.tick().await;
1619 0 : let (_, download_completed_now) =
1620 0 : self.ext_download_progress.read().expect("lock")[ext_archive_name];
1621 0 : if download_completed_now {
1622 0 : info!("download finished by whoever else downloaded it");
1623 0 : return Ok(0);
1624 0 : }
1625 : }
1626 : // NOTE: the above loop will get terminated
1627 : // based on the timeout of the download function
1628 0 : }
1629 0 :
1630 0 : // if extension hasn't been downloaded before or the previous
1631 0 : // attempt to download was at least HANG_TIMEOUT ms ago
1632 0 : // then we try to download it here
1633 0 : info!("downloading new extension {ext_archive_name}");
1634 :
1635 0 : let download_size = extension_server::download_extension(
1636 0 : &real_ext_name,
1637 0 : &ext_path,
1638 0 : ext_remote_storage,
1639 0 : &self.pgbin,
1640 0 : )
1641 0 : .await
1642 0 : .map_err(DownloadError::Other);
1643 0 :
1644 0 : if download_size.is_ok() {
1645 0 : self.ext_download_progress
1646 0 : .write()
1647 0 : .expect("bad lock")
1648 0 : .insert(ext_archive_name.to_string(), (download_start, true));
1649 0 : }
1650 :
1651 0 : download_size
1652 0 : }
1653 :
1654 0 : pub async fn set_role_grants(
1655 0 : &self,
1656 0 : db_name: &PgIdent,
1657 0 : schema_name: &PgIdent,
1658 0 : privileges: &[Privilege],
1659 0 : role_name: &PgIdent,
1660 0 : ) -> Result<()> {
1661 : use tokio_postgres::NoTls;
1662 :
1663 0 : let mut conf = self.get_tokio_conn_conf(Some("compute_ctl:set_role_grants"));
1664 0 : conf.dbname(db_name);
1665 :
1666 0 : let (db_client, conn) = conf
1667 0 : .connect(NoTls)
1668 0 : .await
1669 0 : .context("Failed to connect to the database")?;
1670 0 : tokio::spawn(conn);
1671 0 :
1672 0 : // TODO: support other types of grants apart from schemas?
1673 0 : let query = format!(
1674 0 : "GRANT {} ON SCHEMA {} TO {}",
1675 0 : privileges
1676 0 : .iter()
1677 0 : // should not be quoted as it's part of the command.
1678 0 : // is already sanitized so it's ok
1679 0 : .map(|p| p.as_str())
1680 0 : .collect::<Vec<&'static str>>()
1681 0 : .join(", "),
1682 0 : // quote the schema and role name as identifiers to sanitize them.
1683 0 : schema_name.pg_quote(),
1684 0 : role_name.pg_quote(),
1685 0 : );
1686 0 : db_client
1687 0 : .simple_query(&query)
1688 0 : .await
1689 0 : .with_context(|| format!("Failed to execute query: {}", query))?;
1690 :
1691 0 : Ok(())
1692 0 : }
1693 :
1694 0 : pub async fn install_extension(
1695 0 : &self,
1696 0 : ext_name: &PgIdent,
1697 0 : db_name: &PgIdent,
1698 0 : ext_version: ExtVersion,
1699 0 : ) -> Result<ExtVersion> {
1700 : use tokio_postgres::NoTls;
1701 :
1702 0 : let mut conf = self.get_tokio_conn_conf(Some("compute_ctl:install_extension"));
1703 0 : conf.dbname(db_name);
1704 :
1705 0 : let (db_client, conn) = conf
1706 0 : .connect(NoTls)
1707 0 : .await
1708 0 : .context("Failed to connect to the database")?;
1709 0 : tokio::spawn(conn);
1710 0 :
1711 0 : let version_query = "SELECT extversion FROM pg_extension WHERE extname = $1";
1712 0 : let version: Option<ExtVersion> = db_client
1713 0 : .query_opt(version_query, &[&ext_name])
1714 0 : .await
1715 0 : .with_context(|| format!("Failed to execute query: {}", version_query))?
1716 0 : .map(|row| row.get(0));
1717 0 :
1718 0 : // sanitize the inputs as postgres idents.
1719 0 : let ext_name: String = ext_name.pg_quote();
1720 0 : let quoted_version: String = ext_version.pg_quote();
1721 :
1722 0 : if let Some(installed_version) = version {
1723 0 : if installed_version == ext_version {
1724 0 : return Ok(installed_version);
1725 0 : }
1726 0 : let query = format!("ALTER EXTENSION {ext_name} UPDATE TO {quoted_version}");
1727 0 : db_client
1728 0 : .simple_query(&query)
1729 0 : .await
1730 0 : .with_context(|| format!("Failed to execute query: {}", query))?;
1731 : } else {
1732 0 : let query =
1733 0 : format!("CREATE EXTENSION IF NOT EXISTS {ext_name} WITH VERSION {quoted_version}");
1734 0 : db_client
1735 0 : .simple_query(&query)
1736 0 : .await
1737 0 : .with_context(|| format!("Failed to execute query: {}", query))?;
1738 : }
1739 :
1740 0 : Ok(ext_version)
1741 0 : }
1742 :
1743 : #[tokio::main]
1744 0 : pub async fn prepare_preload_libraries(
1745 0 : &self,
1746 0 : spec: &ComputeSpec,
1747 0 : ) -> Result<RemoteExtensionMetrics> {
1748 0 : if self.ext_remote_storage.is_none() {
1749 0 : return Ok(RemoteExtensionMetrics {
1750 0 : num_ext_downloaded: 0,
1751 0 : largest_ext_size: 0,
1752 0 : total_ext_download_size: 0,
1753 0 : });
1754 0 : }
1755 0 : let remote_extensions = spec
1756 0 : .remote_extensions
1757 0 : .as_ref()
1758 0 : .ok_or(anyhow::anyhow!("Remote extensions are not configured"))?;
1759 0 :
1760 0 : info!("parse shared_preload_libraries from spec.cluster.settings");
1761 0 : let mut libs_vec = Vec::new();
1762 0 : if let Some(libs) = spec.cluster.settings.find("shared_preload_libraries") {
1763 0 : libs_vec = libs
1764 0 : .split(&[',', '\'', ' '])
1765 0 : .filter(|s| *s != "neon" && !s.is_empty())
1766 0 : .map(str::to_string)
1767 0 : .collect();
1768 0 : }
1769 0 : info!("parse shared_preload_libraries from provided postgresql.conf");
1770 0 :
1771 0 : // that is used in neon_local and python tests
1772 0 : if let Some(conf) = &spec.cluster.postgresql_conf {
1773 0 : let conf_lines = conf.split('\n').collect::<Vec<&str>>();
1774 0 : let mut shared_preload_libraries_line = "";
1775 0 : for line in conf_lines {
1776 0 : if line.starts_with("shared_preload_libraries") {
1777 0 : shared_preload_libraries_line = line;
1778 0 : }
1779 0 : }
1780 0 : let mut preload_libs_vec = Vec::new();
1781 0 : if let Some(libs) = shared_preload_libraries_line.split("='").nth(1) {
1782 0 : preload_libs_vec = libs
1783 0 : .split(&[',', '\'', ' '])
1784 0 : .filter(|s| *s != "neon" && !s.is_empty())
1785 0 : .map(str::to_string)
1786 0 : .collect();
1787 0 : }
1788 0 : libs_vec.extend(preload_libs_vec);
1789 0 : }
1790 0 :
1791 0 : // Don't try to download libraries that are not in the index.
1792 0 : // Assume that they are already present locally.
1793 0 : libs_vec.retain(|lib| remote_extensions.library_index.contains_key(lib));
1794 0 :
1795 0 : info!("Downloading to shared preload libraries: {:?}", &libs_vec);
1796 0 :
1797 0 : let mut download_tasks = Vec::new();
1798 0 : for library in &libs_vec {
1799 0 : let (ext_name, ext_path) =
1800 0 : remote_extensions.get_ext(library, true, &self.build_tag, &self.pgversion)?;
1801 0 : download_tasks.push(self.download_extension(ext_name, ext_path));
1802 0 : }
1803 0 : let results = join_all(download_tasks).await;
1804 0 :
1805 0 : let mut remote_ext_metrics = RemoteExtensionMetrics {
1806 0 : num_ext_downloaded: 0,
1807 0 : largest_ext_size: 0,
1808 0 : total_ext_download_size: 0,
1809 0 : };
1810 0 : for result in results {
1811 0 : let download_size = match result {
1812 0 : Ok(res) => {
1813 0 : remote_ext_metrics.num_ext_downloaded += 1;
1814 0 : res
1815 0 : }
1816 0 : Err(err) => {
1817 0 : // if we failed to download an extension, we don't want to fail the whole
1818 0 : // process, but we do want to log the error
1819 0 : error!("Failed to download extension: {}", err);
1820 0 : 0
1821 0 : }
1822 0 : };
1823 0 :
1824 0 : remote_ext_metrics.largest_ext_size =
1825 0 : std::cmp::max(remote_ext_metrics.largest_ext_size, download_size);
1826 0 : remote_ext_metrics.total_ext_download_size += download_size;
1827 0 : }
1828 0 : Ok(remote_ext_metrics)
1829 0 : }
1830 :
1831 : /// Waits until current thread receives a state changed notification and
1832 : /// the pageserver connection strings has changed.
1833 : ///
1834 : /// The operation will time out after a specified duration.
1835 0 : pub fn wait_timeout_while_pageserver_connstr_unchanged(&self, duration: Duration) {
1836 0 : let state = self.state.lock().unwrap();
1837 0 : let old_pageserver_connstr = state
1838 0 : .pspec
1839 0 : .as_ref()
1840 0 : .expect("spec must be set")
1841 0 : .pageserver_connstr
1842 0 : .clone();
1843 0 : let mut unchanged = true;
1844 0 : let _ = self
1845 0 : .state_changed
1846 0 : .wait_timeout_while(state, duration, |s| {
1847 0 : let pageserver_connstr = &s
1848 0 : .pspec
1849 0 : .as_ref()
1850 0 : .expect("spec must be set")
1851 0 : .pageserver_connstr;
1852 0 : unchanged = pageserver_connstr == &old_pageserver_connstr;
1853 0 : unchanged
1854 0 : })
1855 0 : .unwrap();
1856 0 : if !unchanged {
1857 0 : info!("Pageserver config changed");
1858 0 : }
1859 0 : }
1860 : }
1861 :
1862 0 : pub fn forward_termination_signal() {
1863 0 : let ss_pid = SYNC_SAFEKEEPERS_PID.load(Ordering::SeqCst);
1864 0 : if ss_pid != 0 {
1865 0 : let ss_pid = nix::unistd::Pid::from_raw(ss_pid as i32);
1866 0 : kill(ss_pid, Signal::SIGTERM).ok();
1867 0 : }
1868 0 : let pg_pid = PG_PID.load(Ordering::SeqCst);
1869 0 : if pg_pid != 0 {
1870 0 : let pg_pid = nix::unistd::Pid::from_raw(pg_pid as i32);
1871 0 : // Use 'fast' shutdown (SIGINT) because it also creates a shutdown checkpoint, which is important for
1872 0 : // ROs to get a list of running xacts faster instead of going through the CLOG.
1873 0 : // See https://www.postgresql.org/docs/current/server-shutdown.html for the list of modes and signals.
1874 0 : kill(pg_pid, Signal::SIGINT).ok();
1875 0 : }
1876 0 : }
|