Line data Source code
1 : use std::collections::{HashMap, HashSet};
2 : use std::env;
3 : use std::fs;
4 : use std::iter::once;
5 : use std::os::unix::fs::{symlink, PermissionsExt};
6 : use std::path::Path;
7 : use std::process::{Command, Stdio};
8 : use std::str::FromStr;
9 : use std::sync::atomic::AtomicU32;
10 : use std::sync::atomic::Ordering;
11 : use std::sync::{Arc, Condvar, Mutex, RwLock};
12 : use std::thread;
13 : use std::time::Duration;
14 : use std::time::Instant;
15 :
16 : use anyhow::{Context, Result};
17 : use chrono::{DateTime, Utc};
18 : use compute_api::spec::{Database, PgIdent, Role};
19 : use futures::future::join_all;
20 : use futures::stream::FuturesUnordered;
21 : use futures::StreamExt;
22 : use nix::unistd::Pid;
23 : use postgres;
24 : use postgres::error::SqlState;
25 : use postgres::NoTls;
26 : use tracing::{debug, error, info, instrument, warn};
27 : use utils::id::{TenantId, TimelineId};
28 : use utils::lsn::Lsn;
29 :
30 : use compute_api::privilege::Privilege;
31 : use compute_api::responses::{ComputeMetrics, ComputeStatus};
32 : use compute_api::spec::{ComputeFeature, ComputeMode, ComputeSpec, ExtVersion};
33 : use utils::measured_stream::MeasuredReader;
34 :
35 : use nix::sys::signal::{kill, Signal};
36 : use remote_storage::{DownloadError, RemotePath};
37 : use tokio::spawn;
38 :
39 : use crate::installed_extensions::get_installed_extensions;
40 : use crate::local_proxy;
41 : use crate::pg_helpers::*;
42 : use crate::spec::*;
43 : use crate::spec_apply::ApplySpecPhase::{
44 : CreateAndAlterDatabases, CreateAndAlterRoles, CreateAvailabilityCheck, CreateSuperUser,
45 : DropInvalidDatabases, DropRoles, HandleNeonExtension, HandleOtherExtensions,
46 : RenameAndDeleteDatabases, RenameRoles, RunInEachDatabase,
47 : };
48 : use crate::spec_apply::PerDatabasePhase;
49 : use crate::spec_apply::PerDatabasePhase::{
50 : ChangeSchemaPerms, DeleteDBRoleReferences, DropSubscriptionsForDeletedDatabases,
51 : HandleAnonExtension,
52 : };
53 : use crate::spec_apply::{apply_operations, MutableApplyContext, DB};
54 : use crate::sync_sk::{check_if_synced, ping_safekeeper};
55 : use crate::{config, extension_server};
56 :
57 : pub static SYNC_SAFEKEEPERS_PID: AtomicU32 = AtomicU32::new(0);
58 : pub static PG_PID: AtomicU32 = AtomicU32::new(0);
59 :
60 : /// Compute node info shared across several `compute_ctl` threads.
61 : pub struct ComputeNode {
62 : // Url type maintains proper escaping
63 : pub connstr: url::Url,
64 : // We connect to Postgres from many different places, so build configs once
65 : // and reuse them where needed.
66 : pub conn_conf: postgres::config::Config,
67 : pub tokio_conn_conf: tokio_postgres::config::Config,
68 : pub pgdata: String,
69 : pub pgbin: String,
70 : pub pgversion: String,
71 : /// We should only allow live re- / configuration of the compute node if
72 : /// it uses 'pull model', i.e. it can go to control-plane and fetch
73 : /// the latest configuration. Otherwise, there could be a case:
74 : /// - we start compute with some spec provided as argument
75 : /// - we push new spec and it does reconfiguration
76 : /// - but then something happens and compute pod / VM is destroyed,
77 : /// so k8s controller starts it again with the **old** spec
78 : ///
79 : /// and the same for empty computes:
80 : /// - we started compute without any spec
81 : /// - we push spec and it does configuration
82 : /// - but then it is restarted without any spec again
83 : pub live_config_allowed: bool,
84 : /// The port that the compute's HTTP server listens on
85 : pub http_port: u16,
86 : /// Volatile part of the `ComputeNode`, which should be used under `Mutex`.
87 : /// To allow HTTP API server to serving status requests, while configuration
88 : /// is in progress, lock should be held only for short periods of time to do
89 : /// read/write, not the whole configuration process.
90 : pub state: Mutex<ComputeState>,
91 : /// `Condvar` to allow notifying waiters about state changes.
92 : pub state_changed: Condvar,
93 : /// the address of extension storage proxy gateway
94 : pub ext_remote_storage: Option<String>,
95 : // key: ext_archive_name, value: started download time, download_completed?
96 : pub ext_download_progress: RwLock<HashMap<String, (DateTime<Utc>, bool)>>,
97 : pub build_tag: String,
98 : }
99 :
100 : // store some metrics about download size that might impact startup time
101 : #[derive(Clone, Debug)]
102 : pub struct RemoteExtensionMetrics {
103 : num_ext_downloaded: u64,
104 : largest_ext_size: u64,
105 : total_ext_download_size: u64,
106 : }
107 :
108 : #[derive(Clone, Debug)]
109 : pub struct ComputeState {
110 : pub start_time: DateTime<Utc>,
111 : pub status: ComputeStatus,
112 : /// Timestamp of the last Postgres activity. It could be `None` if
113 : /// compute wasn't used since start.
114 : pub last_active: Option<DateTime<Utc>>,
115 : pub error: Option<String>,
116 : pub pspec: Option<ParsedSpec>,
117 : pub metrics: ComputeMetrics,
118 : }
119 :
120 : impl ComputeState {
121 0 : pub fn new() -> Self {
122 0 : Self {
123 0 : start_time: Utc::now(),
124 0 : status: ComputeStatus::Empty,
125 0 : last_active: None,
126 0 : error: None,
127 0 : pspec: None,
128 0 : metrics: ComputeMetrics::default(),
129 0 : }
130 0 : }
131 :
132 0 : pub fn set_status(&mut self, status: ComputeStatus, state_changed: &Condvar) {
133 0 : let prev = self.status;
134 0 : info!("Changing compute status from {} to {}", prev, status);
135 0 : self.status = status;
136 0 : state_changed.notify_all();
137 0 : }
138 :
139 0 : pub fn set_failed_status(&mut self, err: anyhow::Error, state_changed: &Condvar) {
140 0 : self.error = Some(format!("{err:?}"));
141 0 : self.set_status(ComputeStatus::Failed, state_changed);
142 0 : }
143 : }
144 :
145 : impl Default for ComputeState {
146 0 : fn default() -> Self {
147 0 : Self::new()
148 0 : }
149 : }
150 :
151 : #[derive(Clone, Debug)]
152 : pub struct ParsedSpec {
153 : pub spec: ComputeSpec,
154 : pub tenant_id: TenantId,
155 : pub timeline_id: TimelineId,
156 : pub pageserver_connstr: String,
157 : pub safekeeper_connstrings: Vec<String>,
158 : pub storage_auth_token: Option<String>,
159 : }
160 :
161 : impl TryFrom<ComputeSpec> for ParsedSpec {
162 : type Error = String;
163 0 : fn try_from(spec: ComputeSpec) -> Result<Self, String> {
164 : // Extract the options from the spec file that are needed to connect to
165 : // the storage system.
166 : //
167 : // For backwards-compatibility, the top-level fields in the spec file
168 : // may be empty. In that case, we need to dig them from the GUCs in the
169 : // cluster.settings field.
170 0 : let pageserver_connstr = spec
171 0 : .pageserver_connstring
172 0 : .clone()
173 0 : .or_else(|| spec.cluster.settings.find("neon.pageserver_connstring"))
174 0 : .ok_or("pageserver connstr should be provided")?;
175 0 : let safekeeper_connstrings = if spec.safekeeper_connstrings.is_empty() {
176 0 : if matches!(spec.mode, ComputeMode::Primary) {
177 0 : spec.cluster
178 0 : .settings
179 0 : .find("neon.safekeepers")
180 0 : .ok_or("safekeeper connstrings should be provided")?
181 0 : .split(',')
182 0 : .map(|str| str.to_string())
183 0 : .collect()
184 : } else {
185 0 : vec![]
186 : }
187 : } else {
188 0 : spec.safekeeper_connstrings.clone()
189 : };
190 0 : let storage_auth_token = spec.storage_auth_token.clone();
191 0 : let tenant_id: TenantId = if let Some(tenant_id) = spec.tenant_id {
192 0 : tenant_id
193 : } else {
194 0 : spec.cluster
195 0 : .settings
196 0 : .find("neon.tenant_id")
197 0 : .ok_or("tenant id should be provided")
198 0 : .map(|s| TenantId::from_str(&s))?
199 0 : .or(Err("invalid tenant id"))?
200 : };
201 0 : let timeline_id: TimelineId = if let Some(timeline_id) = spec.timeline_id {
202 0 : timeline_id
203 : } else {
204 0 : spec.cluster
205 0 : .settings
206 0 : .find("neon.timeline_id")
207 0 : .ok_or("timeline id should be provided")
208 0 : .map(|s| TimelineId::from_str(&s))?
209 0 : .or(Err("invalid timeline id"))?
210 : };
211 :
212 0 : Ok(ParsedSpec {
213 0 : spec,
214 0 : pageserver_connstr,
215 0 : safekeeper_connstrings,
216 0 : storage_auth_token,
217 0 : tenant_id,
218 0 : timeline_id,
219 0 : })
220 0 : }
221 : }
222 :
223 : /// If we are a VM, returns a [`Command`] that will run in the `neon-postgres`
224 : /// cgroup. Otherwise returns the default `Command::new(cmd)`
225 : ///
226 : /// This function should be used to start postgres, as it will start it in the
227 : /// neon-postgres cgroup if we are a VM. This allows autoscaling to control
228 : /// postgres' resource usage. The cgroup will exist in VMs because vm-builder
229 : /// creates it during the sysinit phase of its inittab.
230 0 : fn maybe_cgexec(cmd: &str) -> Command {
231 0 : // The cplane sets this env var for autoscaling computes.
232 0 : // use `var_os` so we don't have to worry about the variable being valid
233 0 : // unicode. Should never be an concern . . . but just in case
234 0 : if env::var_os("AUTOSCALING").is_some() {
235 0 : let mut command = Command::new("cgexec");
236 0 : command.args(["-g", "memory:neon-postgres"]);
237 0 : command.arg(cmd);
238 0 : command
239 : } else {
240 0 : Command::new(cmd)
241 : }
242 0 : }
243 :
244 0 : pub(crate) fn construct_superuser_query(spec: &ComputeSpec) -> String {
245 0 : let roles = spec
246 0 : .cluster
247 0 : .roles
248 0 : .iter()
249 0 : .map(|r| escape_literal(&r.name))
250 0 : .collect::<Vec<_>>();
251 0 :
252 0 : let dbs = spec
253 0 : .cluster
254 0 : .databases
255 0 : .iter()
256 0 : .map(|db| escape_literal(&db.name))
257 0 : .collect::<Vec<_>>();
258 :
259 0 : let roles_decl = if roles.is_empty() {
260 0 : String::from("roles text[] := NULL;")
261 : } else {
262 0 : format!(
263 0 : r#"
264 0 : roles text[] := ARRAY(SELECT rolname
265 0 : FROM pg_catalog.pg_roles
266 0 : WHERE rolname IN ({}));"#,
267 0 : roles.join(", ")
268 0 : )
269 : };
270 :
271 0 : let database_decl = if dbs.is_empty() {
272 0 : String::from("dbs text[] := NULL;")
273 : } else {
274 0 : format!(
275 0 : r#"
276 0 : dbs text[] := ARRAY(SELECT datname
277 0 : FROM pg_catalog.pg_database
278 0 : WHERE datname IN ({}));"#,
279 0 : dbs.join(", ")
280 0 : )
281 : };
282 :
283 : // ALL PRIVILEGES grants CREATE, CONNECT, and TEMPORARY on all databases
284 : // (see https://www.postgresql.org/docs/current/ddl-priv.html)
285 0 : let query = format!(
286 0 : r#"
287 0 : DO $$
288 0 : DECLARE
289 0 : r text;
290 0 : {}
291 0 : {}
292 0 : BEGIN
293 0 : IF NOT EXISTS (
294 0 : SELECT FROM pg_catalog.pg_roles WHERE rolname = 'neon_superuser')
295 0 : THEN
296 0 : CREATE ROLE neon_superuser CREATEDB CREATEROLE NOLOGIN REPLICATION BYPASSRLS IN ROLE pg_read_all_data, pg_write_all_data;
297 0 : IF array_length(roles, 1) IS NOT NULL THEN
298 0 : EXECUTE format('GRANT neon_superuser TO %s',
299 0 : array_to_string(ARRAY(SELECT quote_ident(x) FROM unnest(roles) as x), ', '));
300 0 : FOREACH r IN ARRAY roles LOOP
301 0 : EXECUTE format('ALTER ROLE %s CREATEROLE CREATEDB', quote_ident(r));
302 0 : END LOOP;
303 0 : END IF;
304 0 : IF array_length(dbs, 1) IS NOT NULL THEN
305 0 : EXECUTE format('GRANT ALL PRIVILEGES ON DATABASE %s TO neon_superuser',
306 0 : array_to_string(ARRAY(SELECT quote_ident(x) FROM unnest(dbs) as x), ', '));
307 0 : END IF;
308 0 : END IF;
309 0 : END
310 0 : $$;"#,
311 0 : roles_decl, database_decl,
312 0 : );
313 0 :
314 0 : query
315 0 : }
316 :
317 : impl ComputeNode {
318 : /// Check that compute node has corresponding feature enabled.
319 0 : pub fn has_feature(&self, feature: ComputeFeature) -> bool {
320 0 : let state = self.state.lock().unwrap();
321 :
322 0 : if let Some(s) = state.pspec.as_ref() {
323 0 : s.spec.features.contains(&feature)
324 : } else {
325 0 : false
326 : }
327 0 : }
328 :
329 0 : pub fn set_status(&self, status: ComputeStatus) {
330 0 : let mut state = self.state.lock().unwrap();
331 0 : state.set_status(status, &self.state_changed);
332 0 : }
333 :
334 0 : pub fn set_failed_status(&self, err: anyhow::Error) {
335 0 : let mut state = self.state.lock().unwrap();
336 0 : state.set_failed_status(err, &self.state_changed);
337 0 : }
338 :
339 0 : pub fn get_status(&self) -> ComputeStatus {
340 0 : self.state.lock().unwrap().status
341 0 : }
342 :
343 : // Remove `pgdata` directory and create it again with right permissions.
344 0 : fn create_pgdata(&self) -> Result<()> {
345 0 : // Ignore removal error, likely it is a 'No such file or directory (os error 2)'.
346 0 : // If it is something different then create_dir() will error out anyway.
347 0 : let _ok = fs::remove_dir_all(&self.pgdata);
348 0 : fs::create_dir(&self.pgdata)?;
349 0 : fs::set_permissions(&self.pgdata, fs::Permissions::from_mode(0o700))?;
350 :
351 0 : Ok(())
352 0 : }
353 :
354 : // Get basebackup from the libpq connection to pageserver using `connstr` and
355 : // unarchive it to `pgdata` directory overriding all its previous content.
356 : #[instrument(skip_all, fields(%lsn))]
357 : fn try_get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> {
358 : let spec = compute_state.pspec.as_ref().expect("spec must be set");
359 : let start_time = Instant::now();
360 :
361 : let shard0_connstr = spec.pageserver_connstr.split(',').next().unwrap();
362 : let mut config = postgres::Config::from_str(shard0_connstr)?;
363 :
364 : // Use the storage auth token from the config file, if given.
365 : // Note: this overrides any password set in the connection string.
366 : if let Some(storage_auth_token) = &spec.storage_auth_token {
367 : info!("Got storage auth token from spec file");
368 : config.password(storage_auth_token);
369 : } else {
370 : info!("Storage auth token not set");
371 : }
372 :
373 : // Connect to pageserver
374 : let mut client = config.connect(NoTls)?;
375 : let pageserver_connect_micros = start_time.elapsed().as_micros() as u64;
376 :
377 : let basebackup_cmd = match lsn {
378 : Lsn(0) => {
379 : if spec.spec.mode != ComputeMode::Primary {
380 : format!(
381 : "basebackup {} {} --gzip --replica",
382 : spec.tenant_id, spec.timeline_id
383 : )
384 : } else {
385 : format!("basebackup {} {} --gzip", spec.tenant_id, spec.timeline_id)
386 : }
387 : }
388 : _ => {
389 : if spec.spec.mode != ComputeMode::Primary {
390 : format!(
391 : "basebackup {} {} {} --gzip --replica",
392 : spec.tenant_id, spec.timeline_id, lsn
393 : )
394 : } else {
395 : format!(
396 : "basebackup {} {} {} --gzip",
397 : spec.tenant_id, spec.timeline_id, lsn
398 : )
399 : }
400 : }
401 : };
402 :
403 : let copyreader = client.copy_out(basebackup_cmd.as_str())?;
404 : let mut measured_reader = MeasuredReader::new(copyreader);
405 : let mut bufreader = std::io::BufReader::new(&mut measured_reader);
406 :
407 : // Read the archive directly from the `CopyOutReader`
408 : //
409 : // Set `ignore_zeros` so that unpack() reads all the Copy data and
410 : // doesn't stop at the end-of-archive marker. Otherwise, if the server
411 : // sends an Error after finishing the tarball, we will not notice it.
412 : let mut ar = tar::Archive::new(flate2::read::GzDecoder::new(&mut bufreader));
413 : ar.set_ignore_zeros(true);
414 : ar.unpack(&self.pgdata)?;
415 :
416 : // Report metrics
417 : let mut state = self.state.lock().unwrap();
418 : state.metrics.pageserver_connect_micros = pageserver_connect_micros;
419 : state.metrics.basebackup_bytes = measured_reader.get_byte_count() as u64;
420 : state.metrics.basebackup_ms = start_time.elapsed().as_millis() as u64;
421 : Ok(())
422 : }
423 :
424 : // Gets the basebackup in a retry loop
425 : #[instrument(skip_all, fields(%lsn))]
426 : pub fn get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> {
427 : let mut retry_period_ms = 500.0;
428 : let mut attempts = 0;
429 : const DEFAULT_ATTEMPTS: u16 = 10;
430 : #[cfg(feature = "testing")]
431 : let max_attempts = if let Ok(v) = env::var("NEON_COMPUTE_TESTING_BASEBACKUP_RETRIES") {
432 : u16::from_str(&v).unwrap()
433 : } else {
434 : DEFAULT_ATTEMPTS
435 : };
436 : #[cfg(not(feature = "testing"))]
437 : let max_attempts = DEFAULT_ATTEMPTS;
438 : loop {
439 : let result = self.try_get_basebackup(compute_state, lsn);
440 : match result {
441 : Ok(_) => {
442 : return result;
443 : }
444 : Err(ref e) if attempts < max_attempts => {
445 : warn!(
446 : "Failed to get basebackup: {} (attempt {}/{})",
447 : e, attempts, max_attempts
448 : );
449 : std::thread::sleep(std::time::Duration::from_millis(retry_period_ms as u64));
450 : retry_period_ms *= 1.5;
451 : }
452 : Err(_) => {
453 : return result;
454 : }
455 : }
456 : attempts += 1;
457 : }
458 : }
459 :
460 0 : pub async fn check_safekeepers_synced_async(
461 0 : &self,
462 0 : compute_state: &ComputeState,
463 0 : ) -> Result<Option<Lsn>> {
464 0 : // Construct a connection config for each safekeeper
465 0 : let pspec: ParsedSpec = compute_state
466 0 : .pspec
467 0 : .as_ref()
468 0 : .expect("spec must be set")
469 0 : .clone();
470 0 : let sk_connstrs: Vec<String> = pspec.safekeeper_connstrings.clone();
471 0 : let sk_configs = sk_connstrs.into_iter().map(|connstr| {
472 0 : // Format connstr
473 0 : let id = connstr.clone();
474 0 : let connstr = format!("postgresql://no_user@{}", connstr);
475 0 : let options = format!(
476 0 : "-c timeline_id={} tenant_id={}",
477 0 : pspec.timeline_id, pspec.tenant_id
478 0 : );
479 0 :
480 0 : // Construct client
481 0 : let mut config = tokio_postgres::Config::from_str(&connstr).unwrap();
482 0 : config.options(&options);
483 0 : if let Some(storage_auth_token) = pspec.storage_auth_token.clone() {
484 0 : config.password(storage_auth_token);
485 0 : }
486 :
487 0 : (id, config)
488 0 : });
489 0 :
490 0 : // Create task set to query all safekeepers
491 0 : let mut tasks = FuturesUnordered::new();
492 0 : let quorum = sk_configs.len() / 2 + 1;
493 0 : for (id, config) in sk_configs {
494 0 : let timeout = tokio::time::Duration::from_millis(100);
495 0 : let task = tokio::time::timeout(timeout, ping_safekeeper(id, config));
496 0 : tasks.push(tokio::spawn(task));
497 0 : }
498 :
499 : // Get a quorum of responses or errors
500 0 : let mut responses = Vec::new();
501 0 : let mut join_errors = Vec::new();
502 0 : let mut task_errors = Vec::new();
503 0 : let mut timeout_errors = Vec::new();
504 0 : while let Some(response) = tasks.next().await {
505 0 : match response {
506 0 : Ok(Ok(Ok(r))) => responses.push(r),
507 0 : Ok(Ok(Err(e))) => task_errors.push(e),
508 0 : Ok(Err(e)) => timeout_errors.push(e),
509 0 : Err(e) => join_errors.push(e),
510 : };
511 0 : if responses.len() >= quorum {
512 0 : break;
513 0 : }
514 0 : if join_errors.len() + task_errors.len() + timeout_errors.len() >= quorum {
515 0 : break;
516 0 : }
517 : }
518 :
519 : // In case of error, log and fail the check, but don't crash.
520 : // We're playing it safe because these errors could be transient
521 : // and we don't yet retry. Also being careful here allows us to
522 : // be backwards compatible with safekeepers that don't have the
523 : // TIMELINE_STATUS API yet.
524 0 : if responses.len() < quorum {
525 0 : error!(
526 0 : "failed sync safekeepers check {:?} {:?} {:?}",
527 : join_errors, task_errors, timeout_errors
528 : );
529 0 : return Ok(None);
530 0 : }
531 0 :
532 0 : Ok(check_if_synced(responses))
533 0 : }
534 :
535 : // Fast path for sync_safekeepers. If they're already synced we get the lsn
536 : // in one roundtrip. If not, we should do a full sync_safekeepers.
537 0 : pub fn check_safekeepers_synced(&self, compute_state: &ComputeState) -> Result<Option<Lsn>> {
538 0 : let start_time = Utc::now();
539 0 :
540 0 : // Run actual work with new tokio runtime
541 0 : let rt = tokio::runtime::Builder::new_current_thread()
542 0 : .enable_all()
543 0 : .build()
544 0 : .expect("failed to create rt");
545 0 : let result = rt.block_on(self.check_safekeepers_synced_async(compute_state));
546 0 :
547 0 : // Record runtime
548 0 : self.state.lock().unwrap().metrics.sync_sk_check_ms = Utc::now()
549 0 : .signed_duration_since(start_time)
550 0 : .to_std()
551 0 : .unwrap()
552 0 : .as_millis() as u64;
553 0 : result
554 0 : }
555 :
556 : // Run `postgres` in a special mode with `--sync-safekeepers` argument
557 : // and return the reported LSN back to the caller.
558 : #[instrument(skip_all)]
559 : pub fn sync_safekeepers(&self, storage_auth_token: Option<String>) -> Result<Lsn> {
560 : let start_time = Utc::now();
561 :
562 : let mut sync_handle = maybe_cgexec(&self.pgbin)
563 : .args(["--sync-safekeepers"])
564 : .env("PGDATA", &self.pgdata) // we cannot use -D in this mode
565 : .envs(if let Some(storage_auth_token) = &storage_auth_token {
566 : vec![("NEON_AUTH_TOKEN", storage_auth_token)]
567 : } else {
568 : vec![]
569 : })
570 : .stdout(Stdio::piped())
571 : .stderr(Stdio::piped())
572 : .spawn()
573 : .expect("postgres --sync-safekeepers failed to start");
574 : SYNC_SAFEKEEPERS_PID.store(sync_handle.id(), Ordering::SeqCst);
575 :
576 : // `postgres --sync-safekeepers` will print all log output to stderr and
577 : // final LSN to stdout. So we leave stdout to collect LSN, while stderr logs
578 : // will be collected in a child thread.
579 : let stderr = sync_handle
580 : .stderr
581 : .take()
582 : .expect("stderr should be captured");
583 : let logs_handle = handle_postgres_logs(stderr);
584 :
585 : let sync_output = sync_handle
586 : .wait_with_output()
587 : .expect("postgres --sync-safekeepers failed");
588 : SYNC_SAFEKEEPERS_PID.store(0, Ordering::SeqCst);
589 :
590 : // Process has exited, so we can join the logs thread.
591 : let _ = logs_handle
592 : .join()
593 0 : .map_err(|e| tracing::error!("log thread panicked: {:?}", e));
594 :
595 : if !sync_output.status.success() {
596 : anyhow::bail!(
597 : "postgres --sync-safekeepers exited with non-zero status: {}. stdout: {}",
598 : sync_output.status,
599 : String::from_utf8(sync_output.stdout)
600 : .expect("postgres --sync-safekeepers exited, and stdout is not utf-8"),
601 : );
602 : }
603 :
604 : self.state.lock().unwrap().metrics.sync_safekeepers_ms = Utc::now()
605 : .signed_duration_since(start_time)
606 : .to_std()
607 : .unwrap()
608 : .as_millis() as u64;
609 :
610 : let lsn = Lsn::from_str(String::from_utf8(sync_output.stdout)?.trim())?;
611 :
612 : Ok(lsn)
613 : }
614 :
615 : /// Do all the preparations like PGDATA directory creation, configuration,
616 : /// safekeepers sync, basebackup, etc.
617 : #[instrument(skip_all)]
618 : pub fn prepare_pgdata(&self, compute_state: &ComputeState) -> Result<()> {
619 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
620 : let spec = &pspec.spec;
621 : let pgdata_path = Path::new(&self.pgdata);
622 :
623 : // Remove/create an empty pgdata directory and put configuration there.
624 : self.create_pgdata()?;
625 : config::write_postgres_conf(
626 : &pgdata_path.join("postgresql.conf"),
627 : &pspec.spec,
628 : self.http_port,
629 : )?;
630 :
631 : // Syncing safekeepers is only safe with primary nodes: if a primary
632 : // is already connected it will be kicked out, so a secondary (standby)
633 : // cannot sync safekeepers.
634 : let lsn = match spec.mode {
635 : ComputeMode::Primary => {
636 : info!("checking if safekeepers are synced");
637 : let lsn = if let Ok(Some(lsn)) = self.check_safekeepers_synced(compute_state) {
638 : lsn
639 : } else {
640 : info!("starting safekeepers syncing");
641 : self.sync_safekeepers(pspec.storage_auth_token.clone())
642 0 : .with_context(|| "failed to sync safekeepers")?
643 : };
644 : info!("safekeepers synced at LSN {}", lsn);
645 : lsn
646 : }
647 : ComputeMode::Static(lsn) => {
648 : info!("Starting read-only node at static LSN {}", lsn);
649 : lsn
650 : }
651 : ComputeMode::Replica => {
652 : info!("Initializing standby from latest Pageserver LSN");
653 : Lsn(0)
654 : }
655 : };
656 :
657 : info!(
658 : "getting basebackup@{} from pageserver {}",
659 : lsn, &pspec.pageserver_connstr
660 : );
661 0 : self.get_basebackup(compute_state, lsn).with_context(|| {
662 0 : format!(
663 0 : "failed to get basebackup@{} from pageserver {}",
664 0 : lsn, &pspec.pageserver_connstr
665 0 : )
666 0 : })?;
667 :
668 : // Update pg_hba.conf received with basebackup.
669 : update_pg_hba(pgdata_path)?;
670 :
671 : // Place pg_dynshmem under /dev/shm. This allows us to use
672 : // 'dynamic_shared_memory_type = mmap' so that the files are placed in
673 : // /dev/shm, similar to how 'dynamic_shared_memory_type = posix' works.
674 : //
675 : // Why on earth don't we just stick to the 'posix' default, you might
676 : // ask. It turns out that making large allocations with 'posix' doesn't
677 : // work very well with autoscaling. The behavior we want is that:
678 : //
679 : // 1. You can make large DSM allocations, larger than the current RAM
680 : // size of the VM, without errors
681 : //
682 : // 2. If the allocated memory is really used, the VM is scaled up
683 : // automatically to accommodate that
684 : //
685 : // We try to make that possible by having swap in the VM. But with the
686 : // default 'posix' DSM implementation, we fail step 1, even when there's
687 : // plenty of swap available. PostgreSQL uses posix_fallocate() to create
688 : // the shmem segment, which is really just a file in /dev/shm in Linux,
689 : // but posix_fallocate() on tmpfs returns ENOMEM if the size is larger
690 : // than available RAM.
691 : //
692 : // Using 'dynamic_shared_memory_type = mmap' works around that, because
693 : // the Postgres 'mmap' DSM implementation doesn't use
694 : // posix_fallocate(). Instead, it uses repeated calls to write(2) to
695 : // fill the file with zeros. It's weird that that differs between
696 : // 'posix' and 'mmap', but we take advantage of it. When the file is
697 : // filled slowly with write(2), the kernel allows it to grow larger, as
698 : // long as there's swap available.
699 : //
700 : // In short, using 'dynamic_shared_memory_type = mmap' allows us one DSM
701 : // segment to be larger than currently available RAM. But because we
702 : // don't want to store it on a real file, which the kernel would try to
703 : // flush to disk, so symlink pg_dynshm to /dev/shm.
704 : //
705 : // We don't set 'dynamic_shared_memory_type = mmap' here, we let the
706 : // control plane control that option. If 'mmap' is not used, this
707 : // symlink doesn't affect anything.
708 : //
709 : // See https://github.com/neondatabase/autoscaling/issues/800
710 : std::fs::remove_dir(pgdata_path.join("pg_dynshmem"))?;
711 : symlink("/dev/shm/", pgdata_path.join("pg_dynshmem"))?;
712 :
713 : match spec.mode {
714 : ComputeMode::Primary => {}
715 : ComputeMode::Replica | ComputeMode::Static(..) => {
716 : add_standby_signal(pgdata_path)?;
717 : }
718 : }
719 :
720 : Ok(())
721 : }
722 :
723 : /// Start and stop a postgres process to warm up the VM for startup.
724 0 : pub fn prewarm_postgres(&self) -> Result<()> {
725 0 : info!("prewarming");
726 :
727 : // Create pgdata
728 0 : let pgdata = &format!("{}.warmup", self.pgdata);
729 0 : create_pgdata(pgdata)?;
730 :
731 : // Run initdb to completion
732 0 : info!("running initdb");
733 0 : let initdb_bin = Path::new(&self.pgbin).parent().unwrap().join("initdb");
734 0 : Command::new(initdb_bin)
735 0 : .args(["--pgdata", pgdata])
736 0 : .output()
737 0 : .expect("cannot start initdb process");
738 :
739 : // Write conf
740 : use std::io::Write;
741 0 : let conf_path = Path::new(pgdata).join("postgresql.conf");
742 0 : let mut file = std::fs::File::create(conf_path)?;
743 0 : writeln!(file, "shared_buffers=65536")?;
744 0 : writeln!(file, "port=51055")?; // Nobody should be connecting
745 0 : writeln!(file, "shared_preload_libraries = 'neon'")?;
746 :
747 : // Start postgres
748 0 : info!("starting postgres");
749 0 : let mut pg = maybe_cgexec(&self.pgbin)
750 0 : .args(["-D", pgdata])
751 0 : .spawn()
752 0 : .expect("cannot start postgres process");
753 0 :
754 0 : // Stop it when it's ready
755 0 : info!("waiting for postgres");
756 0 : wait_for_postgres(&mut pg, Path::new(pgdata))?;
757 : // SIGQUIT orders postgres to exit immediately. We don't want to SIGKILL
758 : // it to avoid orphaned processes prowling around while datadir is
759 : // wiped.
760 0 : let pm_pid = Pid::from_raw(pg.id() as i32);
761 0 : kill(pm_pid, Signal::SIGQUIT)?;
762 0 : info!("sent SIGQUIT signal");
763 0 : pg.wait()?;
764 0 : info!("done prewarming");
765 :
766 : // clean up
767 0 : let _ok = fs::remove_dir_all(pgdata);
768 0 : Ok(())
769 0 : }
770 :
771 : /// Start Postgres as a child process and manage DBs/roles.
772 : /// After that this will hang waiting on the postmaster process to exit.
773 : /// Returns a handle to the child process and a handle to the logs thread.
774 : #[instrument(skip_all)]
775 : pub fn start_postgres(
776 : &self,
777 : storage_auth_token: Option<String>,
778 : ) -> Result<(std::process::Child, std::thread::JoinHandle<()>)> {
779 : let pgdata_path = Path::new(&self.pgdata);
780 :
781 : // Run postgres as a child process.
782 : let mut pg = maybe_cgexec(&self.pgbin)
783 : .args(["-D", &self.pgdata])
784 : .envs(if let Some(storage_auth_token) = &storage_auth_token {
785 : vec![("NEON_AUTH_TOKEN", storage_auth_token)]
786 : } else {
787 : vec![]
788 : })
789 : .stderr(Stdio::piped())
790 : .spawn()
791 : .expect("cannot start postgres process");
792 : PG_PID.store(pg.id(), Ordering::SeqCst);
793 :
794 : // Start a thread to collect logs from stderr.
795 : let stderr = pg.stderr.take().expect("stderr should be captured");
796 : let logs_handle = handle_postgres_logs(stderr);
797 :
798 : wait_for_postgres(&mut pg, pgdata_path)?;
799 :
800 : Ok((pg, logs_handle))
801 : }
802 :
803 : /// Do post configuration of the already started Postgres. This function spawns a background thread to
804 : /// configure the database after applying the compute spec. Currently, it upgrades the neon extension
805 : /// version. In the future, it may upgrade all 3rd-party extensions.
806 : #[instrument(skip_all)]
807 : pub fn post_apply_config(&self) -> Result<()> {
808 : let conf = self.get_conn_conf(Some("compute_ctl:post_apply_config"));
809 0 : thread::spawn(move || {
810 0 : let func = || {
811 0 : let mut client = conf.connect(NoTls)?;
812 0 : handle_neon_extension_upgrade(&mut client)
813 0 : .context("handle_neon_extension_upgrade")?;
814 0 : Ok::<_, anyhow::Error>(())
815 0 : };
816 0 : if let Err(err) = func() {
817 0 : error!("error while post_apply_config: {err:#}");
818 0 : }
819 0 : });
820 : Ok(())
821 : }
822 :
823 0 : pub fn get_conn_conf(&self, application_name: Option<&str>) -> postgres::Config {
824 0 : let mut conf = self.conn_conf.clone();
825 0 : if let Some(application_name) = application_name {
826 0 : conf.application_name(application_name);
827 0 : }
828 0 : conf
829 0 : }
830 :
831 0 : pub fn get_tokio_conn_conf(&self, application_name: Option<&str>) -> tokio_postgres::Config {
832 0 : let mut conf = self.tokio_conn_conf.clone();
833 0 : if let Some(application_name) = application_name {
834 0 : conf.application_name(application_name);
835 0 : }
836 0 : conf
837 0 : }
838 :
839 0 : pub async fn get_maintenance_client(
840 0 : conf: &tokio_postgres::Config,
841 0 : ) -> Result<tokio_postgres::Client> {
842 0 : let mut conf = conf.clone();
843 0 : conf.application_name("compute_ctl:apply_config");
844 :
845 0 : let (client, conn) = match conf.connect(NoTls).await {
846 : // If connection fails, it may be the old node with `zenith_admin` superuser.
847 : //
848 : // In this case we need to connect with old `zenith_admin` name
849 : // and create new user. We cannot simply rename connected user,
850 : // but we can create a new one and grant it all privileges.
851 0 : Err(e) => match e.code() {
852 : Some(&SqlState::INVALID_PASSWORD)
853 : | Some(&SqlState::INVALID_AUTHORIZATION_SPECIFICATION) => {
854 : // Connect with zenith_admin if cloud_admin could not authenticate
855 0 : info!(
856 0 : "cannot connect to postgres: {}, retrying with `zenith_admin` username",
857 : e
858 : );
859 0 : let mut zenith_admin_conf = postgres::config::Config::from(conf.clone());
860 0 : zenith_admin_conf.application_name("compute_ctl:apply_config");
861 0 : zenith_admin_conf.user("zenith_admin");
862 :
863 0 : let mut client =
864 0 : zenith_admin_conf.connect(NoTls)
865 0 : .context("broken cloud_admin credential: tried connecting with cloud_admin but could not authenticate, and zenith_admin does not work either")?;
866 :
867 : // Disable forwarding so that users don't get a cloud_admin role
868 0 : let mut func = || {
869 0 : client.simple_query("SET neon.forward_ddl = false")?;
870 0 : client.simple_query("CREATE USER cloud_admin WITH SUPERUSER")?;
871 0 : client.simple_query("GRANT zenith_admin TO cloud_admin")?;
872 0 : Ok::<_, anyhow::Error>(())
873 0 : };
874 0 : func().context("apply_config setup cloud_admin")?;
875 :
876 0 : drop(client);
877 0 :
878 0 : // Reconnect with connstring with expected name
879 0 : conf.connect(NoTls).await?
880 : }
881 0 : _ => return Err(e.into()),
882 : },
883 0 : Ok((client, conn)) => (client, conn),
884 : };
885 :
886 0 : spawn(async move {
887 0 : if let Err(e) = conn.await {
888 0 : error!("maintenance client connection error: {}", e);
889 0 : }
890 0 : });
891 0 :
892 0 : // Disable DDL forwarding because control plane already knows about the roles/databases
893 0 : // we're about to modify.
894 0 : client
895 0 : .simple_query("SET neon.forward_ddl = false")
896 0 : .await
897 0 : .context("apply_config SET neon.forward_ddl = false")?;
898 :
899 0 : Ok(client)
900 0 : }
901 :
902 : /// Apply the spec to the running PostgreSQL instance.
903 : /// The caller can decide to run with multiple clients in parallel, or
904 : /// single mode. Either way, the commands executed will be the same, and
905 : /// only commands run in different databases are parallelized.
906 : #[instrument(skip_all)]
907 : pub fn apply_spec_sql(
908 : &self,
909 : spec: Arc<ComputeSpec>,
910 : conf: Arc<tokio_postgres::Config>,
911 : concurrency: usize,
912 : ) -> Result<()> {
913 : let rt = tokio::runtime::Builder::new_multi_thread()
914 : .enable_all()
915 : .build()?;
916 :
917 : info!("Applying config with max {} concurrency", concurrency);
918 : debug!("Config: {:?}", spec);
919 :
920 0 : rt.block_on(async {
921 : // Proceed with post-startup configuration. Note, that order of operations is important.
922 0 : let client = Self::get_maintenance_client(&conf).await?;
923 0 : let spec = spec.clone();
924 :
925 0 : let databases = get_existing_dbs_async(&client).await?;
926 0 : let roles = get_existing_roles_async(&client)
927 0 : .await?
928 0 : .into_iter()
929 0 : .map(|role| (role.name.clone(), role))
930 0 : .collect::<HashMap<String, Role>>();
931 0 :
932 0 : let jwks_roles = Arc::new(
933 0 : spec.as_ref()
934 0 : .local_proxy_config
935 0 : .iter()
936 0 : .flat_map(|it| &it.jwks)
937 0 : .flatten()
938 0 : .flat_map(|setting| &setting.role_names)
939 0 : .cloned()
940 0 : .collect::<HashSet<_>>(),
941 0 : );
942 0 :
943 0 : let ctx = Arc::new(tokio::sync::RwLock::new(MutableApplyContext {
944 0 : roles,
945 0 : dbs: databases,
946 0 : }));
947 0 :
948 0 : // Apply special pre drop database phase.
949 0 : // NOTE: we use the code of RunInEachDatabase phase for parallelism
950 0 : // and connection management, but we don't really run it in *each* database,
951 0 : // only in databases, we're about to drop.
952 0 : info!("Applying PerDatabase (pre-dropdb) phase");
953 0 : let concurrency_token = Arc::new(tokio::sync::Semaphore::new(concurrency));
954 0 :
955 0 : // Run the phase for each database that we're about to drop.
956 0 : let db_processes = spec
957 0 : .delta_operations
958 0 : .iter()
959 0 : .flatten()
960 0 : .filter_map(move |op| {
961 0 : if op.action.as_str() == "delete_db" {
962 0 : Some(op.name.clone())
963 : } else {
964 0 : None
965 : }
966 0 : })
967 0 : .map(|dbname| {
968 0 : let spec = spec.clone();
969 0 : let ctx = ctx.clone();
970 0 : let jwks_roles = jwks_roles.clone();
971 0 : let mut conf = conf.as_ref().clone();
972 0 : let concurrency_token = concurrency_token.clone();
973 0 : // We only need dbname field for this phase, so set other fields to dummy values
974 0 : let db = DB::UserDB(Database {
975 0 : name: dbname.clone(),
976 0 : owner: "cloud_admin".to_string(),
977 0 : options: None,
978 0 : restrict_conn: false,
979 0 : invalid: false,
980 0 : });
981 0 :
982 0 : debug!("Applying per-database phases for Database {:?}", &db);
983 :
984 0 : match &db {
985 0 : DB::SystemDB => {}
986 0 : DB::UserDB(db) => {
987 0 : conf.dbname(db.name.as_str());
988 0 : }
989 : }
990 :
991 0 : let conf = Arc::new(conf);
992 0 : let fut = Self::apply_spec_sql_db(
993 0 : spec.clone(),
994 0 : conf,
995 0 : ctx.clone(),
996 0 : jwks_roles.clone(),
997 0 : concurrency_token.clone(),
998 0 : db,
999 0 : [DropSubscriptionsForDeletedDatabases].to_vec(),
1000 0 : );
1001 0 :
1002 0 : Ok(spawn(fut))
1003 0 : })
1004 0 : .collect::<Vec<Result<_, anyhow::Error>>>();
1005 :
1006 0 : for process in db_processes.into_iter() {
1007 0 : let handle = process?;
1008 0 : if let Err(e) = handle.await? {
1009 : // Handle the error case where the database does not exist
1010 : // We do not check whether the DB exists or not in the deletion phase,
1011 : // so we shouldn't be strict about it in pre-deletion cleanup as well.
1012 0 : if e.to_string().contains("does not exist") {
1013 0 : warn!("Error dropping subscription: {}", e);
1014 : } else {
1015 0 : return Err(e);
1016 : }
1017 0 : };
1018 : }
1019 :
1020 0 : for phase in [
1021 0 : CreateSuperUser,
1022 0 : DropInvalidDatabases,
1023 0 : RenameRoles,
1024 0 : CreateAndAlterRoles,
1025 0 : RenameAndDeleteDatabases,
1026 0 : CreateAndAlterDatabases,
1027 : ] {
1028 0 : info!("Applying phase {:?}", &phase);
1029 0 : apply_operations(
1030 0 : spec.clone(),
1031 0 : ctx.clone(),
1032 0 : jwks_roles.clone(),
1033 0 : phase,
1034 0 : || async { Ok(&client) },
1035 0 : )
1036 0 : .await?;
1037 : }
1038 :
1039 0 : info!("Applying RunInEachDatabase2 phase");
1040 0 : let concurrency_token = Arc::new(tokio::sync::Semaphore::new(concurrency));
1041 0 :
1042 0 : let db_processes = spec
1043 0 : .cluster
1044 0 : .databases
1045 0 : .iter()
1046 0 : .map(|db| DB::new(db.clone()))
1047 0 : // include
1048 0 : .chain(once(DB::SystemDB))
1049 0 : .map(|db| {
1050 0 : let spec = spec.clone();
1051 0 : let ctx = ctx.clone();
1052 0 : let jwks_roles = jwks_roles.clone();
1053 0 : let mut conf = conf.as_ref().clone();
1054 0 : let concurrency_token = concurrency_token.clone();
1055 0 : let db = db.clone();
1056 0 :
1057 0 : debug!("Applying per-database phases for Database {:?}", &db);
1058 :
1059 0 : match &db {
1060 0 : DB::SystemDB => {}
1061 0 : DB::UserDB(db) => {
1062 0 : conf.dbname(db.name.as_str());
1063 0 : }
1064 : }
1065 :
1066 0 : let conf = Arc::new(conf);
1067 0 : let fut = Self::apply_spec_sql_db(
1068 0 : spec.clone(),
1069 0 : conf,
1070 0 : ctx.clone(),
1071 0 : jwks_roles.clone(),
1072 0 : concurrency_token.clone(),
1073 0 : db,
1074 0 : [
1075 0 : DeleteDBRoleReferences,
1076 0 : ChangeSchemaPerms,
1077 0 : HandleAnonExtension,
1078 0 : ]
1079 0 : .to_vec(),
1080 0 : );
1081 0 :
1082 0 : Ok(spawn(fut))
1083 0 : })
1084 0 : .collect::<Vec<Result<_, anyhow::Error>>>();
1085 :
1086 0 : for process in db_processes.into_iter() {
1087 0 : let handle = process?;
1088 0 : handle.await??;
1089 : }
1090 :
1091 0 : for phase in vec![
1092 0 : HandleOtherExtensions,
1093 0 : HandleNeonExtension,
1094 0 : CreateAvailabilityCheck,
1095 0 : DropRoles,
1096 0 : ] {
1097 0 : debug!("Applying phase {:?}", &phase);
1098 0 : apply_operations(
1099 0 : spec.clone(),
1100 0 : ctx.clone(),
1101 0 : jwks_roles.clone(),
1102 0 : phase,
1103 0 : || async { Ok(&client) },
1104 0 : )
1105 0 : .await?;
1106 : }
1107 :
1108 0 : Ok::<(), anyhow::Error>(())
1109 0 : })?;
1110 :
1111 : Ok(())
1112 : }
1113 :
1114 : /// Apply SQL migrations of the RunInEachDatabase phase.
1115 : ///
1116 : /// May opt to not connect to databases that don't have any scheduled
1117 : /// operations. The function is concurrency-controlled with the provided
1118 : /// semaphore. The caller has to make sure the semaphore isn't exhausted.
1119 0 : async fn apply_spec_sql_db(
1120 0 : spec: Arc<ComputeSpec>,
1121 0 : conf: Arc<tokio_postgres::Config>,
1122 0 : ctx: Arc<tokio::sync::RwLock<MutableApplyContext>>,
1123 0 : jwks_roles: Arc<HashSet<String>>,
1124 0 : concurrency_token: Arc<tokio::sync::Semaphore>,
1125 0 : db: DB,
1126 0 : subphases: Vec<PerDatabasePhase>,
1127 0 : ) -> Result<()> {
1128 0 : let _permit = concurrency_token.acquire().await?;
1129 :
1130 0 : let mut client_conn = None;
1131 :
1132 0 : for subphase in subphases {
1133 0 : apply_operations(
1134 0 : spec.clone(),
1135 0 : ctx.clone(),
1136 0 : jwks_roles.clone(),
1137 0 : RunInEachDatabase {
1138 0 : db: db.clone(),
1139 0 : subphase,
1140 0 : },
1141 0 : // Only connect if apply_operation actually wants a connection.
1142 0 : // It's quite possible this database doesn't need any queries,
1143 0 : // so by not connecting we save time and effort connecting to
1144 0 : // that database.
1145 0 : || async {
1146 0 : if client_conn.is_none() {
1147 0 : let db_client = Self::get_maintenance_client(&conf).await?;
1148 0 : client_conn.replace(db_client);
1149 0 : }
1150 0 : let client = client_conn.as_ref().unwrap();
1151 0 : Ok(client)
1152 0 : },
1153 0 : )
1154 0 : .await?;
1155 : }
1156 :
1157 0 : drop(client_conn);
1158 0 :
1159 0 : Ok::<(), anyhow::Error>(())
1160 0 : }
1161 :
1162 : /// Choose how many concurrent connections to use for applying the spec changes.
1163 0 : pub fn max_service_connections(
1164 0 : &self,
1165 0 : compute_state: &ComputeState,
1166 0 : spec: &ComputeSpec,
1167 0 : ) -> usize {
1168 0 : // If the cluster is in Init state we don't have to deal with user connections,
1169 0 : // and can thus use all `max_connections` connection slots. However, that's generally not
1170 0 : // very efficient, so we generally still limit it to a smaller number.
1171 0 : if compute_state.status == ComputeStatus::Init {
1172 : // If the settings contain 'max_connections', use that as template
1173 0 : if let Some(config) = spec.cluster.settings.find("max_connections") {
1174 0 : config.parse::<usize>().ok()
1175 : } else {
1176 : // Otherwise, try to find the setting in the postgresql_conf string
1177 0 : spec.cluster
1178 0 : .postgresql_conf
1179 0 : .iter()
1180 0 : .flat_map(|conf| conf.split("\n"))
1181 0 : .filter_map(|line| {
1182 0 : if !line.contains("max_connections") {
1183 0 : return None;
1184 0 : }
1185 :
1186 0 : let (key, value) = line.split_once("=")?;
1187 0 : let key = key
1188 0 : .trim_start_matches(char::is_whitespace)
1189 0 : .trim_end_matches(char::is_whitespace);
1190 0 :
1191 0 : let value = value
1192 0 : .trim_start_matches(char::is_whitespace)
1193 0 : .trim_end_matches(char::is_whitespace);
1194 0 :
1195 0 : if key != "max_connections" {
1196 0 : return None;
1197 0 : }
1198 0 :
1199 0 : value.parse::<usize>().ok()
1200 0 : })
1201 0 : .next()
1202 : }
1203 : // If max_connections is present, use at most 1/3rd of that.
1204 : // When max_connections is lower than 30, try to use at least 10 connections, but
1205 : // never more than max_connections.
1206 0 : .map(|limit| match limit {
1207 0 : 0..10 => limit,
1208 0 : 10..30 => 10,
1209 0 : 30.. => limit / 3,
1210 0 : })
1211 0 : // If we didn't find max_connections, default to 10 concurrent connections.
1212 0 : .unwrap_or(10)
1213 : } else {
1214 : // state == Running
1215 : // Because the cluster is already in the Running state, we should assume users are
1216 : // already connected to the cluster, and high concurrency could negatively
1217 : // impact user connectivity. Therefore, we can limit concurrency to the number of
1218 : // reserved superuser connections, which users wouldn't be able to use anyway.
1219 0 : spec.cluster
1220 0 : .settings
1221 0 : .find("superuser_reserved_connections")
1222 0 : .iter()
1223 0 : .filter_map(|val| val.parse::<usize>().ok())
1224 0 : .map(|val| if val > 1 { val - 1 } else { 1 })
1225 0 : .last()
1226 0 : .unwrap_or(3)
1227 : }
1228 0 : }
1229 :
1230 : /// Do initial configuration of the already started Postgres.
1231 : #[instrument(skip_all)]
1232 : pub fn apply_config(&self, compute_state: &ComputeState) -> Result<()> {
1233 : let conf = self.get_tokio_conn_conf(Some("compute_ctl:apply_config"));
1234 :
1235 : let conf = Arc::new(conf);
1236 : let spec = Arc::new(
1237 : compute_state
1238 : .pspec
1239 : .as_ref()
1240 : .expect("spec must be set")
1241 : .spec
1242 : .clone(),
1243 : );
1244 :
1245 : let max_concurrent_connections = self.max_service_connections(compute_state, &spec);
1246 :
1247 : // Merge-apply spec & changes to PostgreSQL state.
1248 : self.apply_spec_sql(spec.clone(), conf.clone(), max_concurrent_connections)?;
1249 :
1250 : if let Some(ref local_proxy) = &spec.clone().local_proxy_config {
1251 : info!("configuring local_proxy");
1252 : local_proxy::configure(local_proxy).context("apply_config local_proxy")?;
1253 : }
1254 :
1255 : // Run migrations separately to not hold up cold starts
1256 0 : thread::spawn(move || {
1257 0 : let conf = conf.as_ref().clone();
1258 0 : let mut conf = postgres::config::Config::from(conf);
1259 0 : conf.application_name("compute_ctl:migrations");
1260 0 :
1261 0 : match conf.connect(NoTls) {
1262 0 : Ok(mut client) => {
1263 0 : if let Err(e) = handle_migrations(&mut client) {
1264 0 : error!("Failed to run migrations: {}", e);
1265 0 : }
1266 : }
1267 0 : Err(e) => {
1268 0 : error!(
1269 0 : "Failed to connect to the compute for running migrations: {}",
1270 : e
1271 : );
1272 : }
1273 : };
1274 0 : });
1275 :
1276 : Ok::<(), anyhow::Error>(())
1277 : }
1278 :
1279 : // Wrapped this around `pg_ctl reload`, but right now we don't use
1280 : // `pg_ctl` for start / stop.
1281 : #[instrument(skip_all)]
1282 : fn pg_reload_conf(&self) -> Result<()> {
1283 : let pgctl_bin = Path::new(&self.pgbin).parent().unwrap().join("pg_ctl");
1284 : Command::new(pgctl_bin)
1285 : .args(["reload", "-D", &self.pgdata])
1286 : .output()
1287 : .expect("cannot run pg_ctl process");
1288 : Ok(())
1289 : }
1290 :
1291 : /// Similar to `apply_config()`, but does a bit different sequence of operations,
1292 : /// as it's used to reconfigure a previously started and configured Postgres node.
1293 : #[instrument(skip_all)]
1294 : pub fn reconfigure(&self) -> Result<()> {
1295 : let spec = self.state.lock().unwrap().pspec.clone().unwrap().spec;
1296 :
1297 : if let Some(ref pgbouncer_settings) = spec.pgbouncer_settings {
1298 : info!("tuning pgbouncer");
1299 :
1300 : let rt = tokio::runtime::Builder::new_current_thread()
1301 : .enable_all()
1302 : .build()
1303 : .expect("failed to create rt");
1304 :
1305 : // Spawn a thread to do the tuning,
1306 : // so that we don't block the main thread that starts Postgres.
1307 : let pgbouncer_settings = pgbouncer_settings.clone();
1308 0 : let _handle = thread::spawn(move || {
1309 0 : let res = rt.block_on(tune_pgbouncer(pgbouncer_settings));
1310 0 : if let Err(err) = res {
1311 0 : error!("error while tuning pgbouncer: {err:?}");
1312 0 : }
1313 0 : });
1314 : }
1315 :
1316 : if let Some(ref local_proxy) = spec.local_proxy_config {
1317 : info!("configuring local_proxy");
1318 :
1319 : // Spawn a thread to do the configuration,
1320 : // so that we don't block the main thread that starts Postgres.
1321 : let local_proxy = local_proxy.clone();
1322 0 : let _handle = Some(thread::spawn(move || {
1323 0 : if let Err(err) = local_proxy::configure(&local_proxy) {
1324 0 : error!("error while configuring local_proxy: {err:?}");
1325 0 : }
1326 0 : }));
1327 : }
1328 :
1329 : // Write new config
1330 : let pgdata_path = Path::new(&self.pgdata);
1331 : let postgresql_conf_path = pgdata_path.join("postgresql.conf");
1332 : config::write_postgres_conf(&postgresql_conf_path, &spec, self.http_port)?;
1333 :
1334 : let max_concurrent_connections = spec.reconfigure_concurrency;
1335 :
1336 : // Temporarily reset max_cluster_size in config
1337 : // to avoid the possibility of hitting the limit, while we are reconfiguring:
1338 : // creating new extensions, roles, etc.
1339 0 : config::with_compute_ctl_tmp_override(pgdata_path, "neon.max_cluster_size=-1", || {
1340 0 : self.pg_reload_conf()?;
1341 :
1342 0 : if spec.mode == ComputeMode::Primary {
1343 0 : let mut conf = tokio_postgres::Config::from_str(self.connstr.as_str()).unwrap();
1344 0 : conf.application_name("apply_config");
1345 0 : let conf = Arc::new(conf);
1346 0 :
1347 0 : let spec = Arc::new(spec.clone());
1348 0 :
1349 0 : self.apply_spec_sql(spec, conf, max_concurrent_connections)?;
1350 0 : }
1351 :
1352 0 : Ok(())
1353 0 : })?;
1354 :
1355 : self.pg_reload_conf()?;
1356 :
1357 : let unknown_op = "unknown".to_string();
1358 : let op_id = spec.operation_uuid.as_ref().unwrap_or(&unknown_op);
1359 : info!(
1360 : "finished reconfiguration of compute node for operation {}",
1361 : op_id
1362 : );
1363 :
1364 : Ok(())
1365 : }
1366 :
1367 : #[instrument(skip_all)]
1368 : pub fn start_compute(&self) -> Result<(std::process::Child, std::thread::JoinHandle<()>)> {
1369 : let compute_state = self.state.lock().unwrap().clone();
1370 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
1371 : info!(
1372 : "starting compute for project {}, operation {}, tenant {}, timeline {}",
1373 : pspec.spec.cluster.cluster_id.as_deref().unwrap_or("None"),
1374 : pspec.spec.operation_uuid.as_deref().unwrap_or("None"),
1375 : pspec.tenant_id,
1376 : pspec.timeline_id,
1377 : );
1378 :
1379 : // tune pgbouncer
1380 : if let Some(pgbouncer_settings) = &pspec.spec.pgbouncer_settings {
1381 : info!("tuning pgbouncer");
1382 :
1383 : let rt = tokio::runtime::Builder::new_current_thread()
1384 : .enable_all()
1385 : .build()
1386 : .expect("failed to create rt");
1387 :
1388 : // Spawn a thread to do the tuning,
1389 : // so that we don't block the main thread that starts Postgres.
1390 : let pgbouncer_settings = pgbouncer_settings.clone();
1391 0 : let _handle = thread::spawn(move || {
1392 0 : let res = rt.block_on(tune_pgbouncer(pgbouncer_settings));
1393 0 : if let Err(err) = res {
1394 0 : error!("error while tuning pgbouncer: {err:?}");
1395 0 : }
1396 0 : });
1397 : }
1398 :
1399 : if let Some(local_proxy) = &pspec.spec.local_proxy_config {
1400 : info!("configuring local_proxy");
1401 :
1402 : // Spawn a thread to do the configuration,
1403 : // so that we don't block the main thread that starts Postgres.
1404 : let local_proxy = local_proxy.clone();
1405 0 : let _handle = thread::spawn(move || {
1406 0 : if let Err(err) = local_proxy::configure(&local_proxy) {
1407 0 : error!("error while configuring local_proxy: {err:?}");
1408 0 : }
1409 0 : });
1410 : }
1411 :
1412 : info!(
1413 : "start_compute spec.remote_extensions {:?}",
1414 : pspec.spec.remote_extensions
1415 : );
1416 :
1417 : // This part is sync, because we need to download
1418 : // remote shared_preload_libraries before postgres start (if any)
1419 : if let Some(remote_extensions) = &pspec.spec.remote_extensions {
1420 : // First, create control files for all availale extensions
1421 : extension_server::create_control_files(remote_extensions, &self.pgbin);
1422 :
1423 : let library_load_start_time = Utc::now();
1424 : let remote_ext_metrics = self.prepare_preload_libraries(&pspec.spec)?;
1425 :
1426 : let library_load_time = Utc::now()
1427 : .signed_duration_since(library_load_start_time)
1428 : .to_std()
1429 : .unwrap()
1430 : .as_millis() as u64;
1431 : let mut state = self.state.lock().unwrap();
1432 : state.metrics.load_ext_ms = library_load_time;
1433 : state.metrics.num_ext_downloaded = remote_ext_metrics.num_ext_downloaded;
1434 : state.metrics.largest_ext_size = remote_ext_metrics.largest_ext_size;
1435 : state.metrics.total_ext_download_size = remote_ext_metrics.total_ext_download_size;
1436 : info!(
1437 : "Loading shared_preload_libraries took {:?}ms",
1438 : library_load_time
1439 : );
1440 : info!("{:?}", remote_ext_metrics);
1441 : }
1442 :
1443 : self.prepare_pgdata(&compute_state)?;
1444 :
1445 : let start_time = Utc::now();
1446 : let pg_process = self.start_postgres(pspec.storage_auth_token.clone())?;
1447 :
1448 : let config_time = Utc::now();
1449 : if pspec.spec.mode == ComputeMode::Primary {
1450 : if !pspec.spec.skip_pg_catalog_updates {
1451 : let pgdata_path = Path::new(&self.pgdata);
1452 : // temporarily reset max_cluster_size in config
1453 : // to avoid the possibility of hitting the limit, while we are applying config:
1454 : // creating new extensions, roles, etc...
1455 : config::with_compute_ctl_tmp_override(
1456 : pgdata_path,
1457 : "neon.max_cluster_size=-1",
1458 0 : || {
1459 0 : self.pg_reload_conf()?;
1460 :
1461 0 : self.apply_config(&compute_state)?;
1462 :
1463 0 : Ok(())
1464 0 : },
1465 : )?;
1466 : self.pg_reload_conf()?;
1467 : }
1468 : self.post_apply_config()?;
1469 :
1470 : let conf = self.get_conn_conf(None);
1471 0 : thread::spawn(move || {
1472 0 : let res = get_installed_extensions(conf);
1473 0 : match res {
1474 0 : Ok(extensions) => {
1475 0 : info!(
1476 0 : "[NEON_EXT_STAT] {}",
1477 0 : serde_json::to_string(&extensions)
1478 0 : .expect("failed to serialize extensions list")
1479 : );
1480 : }
1481 0 : Err(err) => error!("could not get installed extensions: {err:?}"),
1482 : }
1483 0 : });
1484 : }
1485 :
1486 : let startup_end_time = Utc::now();
1487 : {
1488 : let mut state = self.state.lock().unwrap();
1489 : state.metrics.start_postgres_ms = config_time
1490 : .signed_duration_since(start_time)
1491 : .to_std()
1492 : .unwrap()
1493 : .as_millis() as u64;
1494 : state.metrics.config_ms = startup_end_time
1495 : .signed_duration_since(config_time)
1496 : .to_std()
1497 : .unwrap()
1498 : .as_millis() as u64;
1499 : state.metrics.total_startup_ms = startup_end_time
1500 : .signed_duration_since(compute_state.start_time)
1501 : .to_std()
1502 : .unwrap()
1503 : .as_millis() as u64;
1504 : }
1505 : self.set_status(ComputeStatus::Running);
1506 :
1507 : info!(
1508 : "finished configuration of compute for project {}",
1509 : pspec.spec.cluster.cluster_id.as_deref().unwrap_or("None")
1510 : );
1511 :
1512 : // Log metrics so that we can search for slow operations in logs
1513 : let metrics = {
1514 : let state = self.state.lock().unwrap();
1515 : state.metrics.clone()
1516 : };
1517 : info!(?metrics, "compute start finished");
1518 :
1519 : Ok(pg_process)
1520 : }
1521 :
1522 : /// Update the `last_active` in the shared state, but ensure that it's a more recent one.
1523 0 : pub fn update_last_active(&self, last_active: Option<DateTime<Utc>>) {
1524 0 : let mut state = self.state.lock().unwrap();
1525 0 : // NB: `Some(<DateTime>)` is always greater than `None`.
1526 0 : if last_active > state.last_active {
1527 0 : state.last_active = last_active;
1528 0 : debug!("set the last compute activity time to: {:?}", last_active);
1529 0 : }
1530 0 : }
1531 :
1532 : // Look for core dumps and collect backtraces.
1533 : //
1534 : // EKS worker nodes have following core dump settings:
1535 : // /proc/sys/kernel/core_pattern -> core
1536 : // /proc/sys/kernel/core_uses_pid -> 1
1537 : // ulimit -c -> unlimited
1538 : // which results in core dumps being written to postgres data directory as core.<pid>.
1539 : //
1540 : // Use that as a default location and pattern, except macos where core dumps are written
1541 : // to /cores/ directory by default.
1542 : //
1543 : // With default Linux settings, the core dump file is called just "core", so check for
1544 : // that too.
1545 0 : pub fn check_for_core_dumps(&self) -> Result<()> {
1546 0 : let core_dump_dir = match std::env::consts::OS {
1547 0 : "macos" => Path::new("/cores/"),
1548 0 : _ => Path::new(&self.pgdata),
1549 : };
1550 :
1551 : // Collect core dump paths if any
1552 0 : info!("checking for core dumps in {}", core_dump_dir.display());
1553 0 : let files = fs::read_dir(core_dump_dir)?;
1554 0 : let cores = files.filter_map(|entry| {
1555 0 : let entry = entry.ok()?;
1556 :
1557 0 : let is_core_dump = match entry.file_name().to_str()? {
1558 0 : n if n.starts_with("core.") => true,
1559 0 : "core" => true,
1560 0 : _ => false,
1561 : };
1562 0 : if is_core_dump {
1563 0 : Some(entry.path())
1564 : } else {
1565 0 : None
1566 : }
1567 0 : });
1568 :
1569 : // Print backtrace for each core dump
1570 0 : for core_path in cores {
1571 0 : warn!(
1572 0 : "core dump found: {}, collecting backtrace",
1573 0 : core_path.display()
1574 : );
1575 :
1576 : // Try first with gdb
1577 0 : let backtrace = Command::new("gdb")
1578 0 : .args(["--batch", "-q", "-ex", "bt", &self.pgbin])
1579 0 : .arg(&core_path)
1580 0 : .output();
1581 :
1582 : // Try lldb if no gdb is found -- that is handy for local testing on macOS
1583 0 : let backtrace = match backtrace {
1584 0 : Err(ref e) if e.kind() == std::io::ErrorKind::NotFound => {
1585 0 : warn!("cannot find gdb, trying lldb");
1586 0 : Command::new("lldb")
1587 0 : .arg("-c")
1588 0 : .arg(&core_path)
1589 0 : .args(["--batch", "-o", "bt all", "-o", "quit"])
1590 0 : .output()
1591 : }
1592 0 : _ => backtrace,
1593 0 : }?;
1594 :
1595 0 : warn!(
1596 0 : "core dump backtrace: {}",
1597 0 : String::from_utf8_lossy(&backtrace.stdout)
1598 : );
1599 0 : warn!(
1600 0 : "debugger stderr: {}",
1601 0 : String::from_utf8_lossy(&backtrace.stderr)
1602 : );
1603 : }
1604 :
1605 0 : Ok(())
1606 0 : }
1607 :
1608 : /// Select `pg_stat_statements` data and return it as a stringified JSON
1609 0 : pub async fn collect_insights(&self) -> String {
1610 0 : let mut result_rows: Vec<String> = Vec::new();
1611 0 : let conf = self.get_tokio_conn_conf(Some("compute_ctl:collect_insights"));
1612 0 : let connect_result = conf.connect(NoTls).await;
1613 0 : let (client, connection) = connect_result.unwrap();
1614 0 : tokio::spawn(async move {
1615 0 : if let Err(e) = connection.await {
1616 0 : eprintln!("connection error: {}", e);
1617 0 : }
1618 0 : });
1619 0 : let result = client
1620 0 : .simple_query(
1621 0 : "SELECT
1622 0 : row_to_json(pg_stat_statements)
1623 0 : FROM
1624 0 : pg_stat_statements
1625 0 : WHERE
1626 0 : userid != 'cloud_admin'::regrole::oid
1627 0 : ORDER BY
1628 0 : (mean_exec_time + mean_plan_time) DESC
1629 0 : LIMIT 100",
1630 0 : )
1631 0 : .await;
1632 :
1633 0 : if let Ok(raw_rows) = result {
1634 0 : for message in raw_rows.iter() {
1635 0 : if let postgres::SimpleQueryMessage::Row(row) = message {
1636 0 : if let Some(json) = row.get(0) {
1637 0 : result_rows.push(json.to_string());
1638 0 : }
1639 0 : }
1640 : }
1641 :
1642 0 : format!("{{\"pg_stat_statements\": [{}]}}", result_rows.join(","))
1643 : } else {
1644 0 : "{{\"pg_stat_statements\": []}}".to_string()
1645 : }
1646 0 : }
1647 :
1648 : // download an archive, unzip and place files in correct locations
1649 0 : pub async fn download_extension(
1650 0 : &self,
1651 0 : real_ext_name: String,
1652 0 : ext_path: RemotePath,
1653 0 : ) -> Result<u64, DownloadError> {
1654 0 : let ext_remote_storage =
1655 0 : self.ext_remote_storage
1656 0 : .as_ref()
1657 0 : .ok_or(DownloadError::BadInput(anyhow::anyhow!(
1658 0 : "Remote extensions storage is not configured",
1659 0 : )))?;
1660 :
1661 0 : let ext_archive_name = ext_path.object_name().expect("bad path");
1662 0 :
1663 0 : let mut first_try = false;
1664 0 : if !self
1665 0 : .ext_download_progress
1666 0 : .read()
1667 0 : .expect("lock err")
1668 0 : .contains_key(ext_archive_name)
1669 0 : {
1670 0 : self.ext_download_progress
1671 0 : .write()
1672 0 : .expect("lock err")
1673 0 : .insert(ext_archive_name.to_string(), (Utc::now(), false));
1674 0 : first_try = true;
1675 0 : }
1676 0 : let (download_start, download_completed) =
1677 0 : self.ext_download_progress.read().expect("lock err")[ext_archive_name];
1678 0 : let start_time_delta = Utc::now()
1679 0 : .signed_duration_since(download_start)
1680 0 : .to_std()
1681 0 : .unwrap()
1682 0 : .as_millis() as u64;
1683 :
1684 : // how long to wait for extension download if it was started by another process
1685 : const HANG_TIMEOUT: u64 = 3000; // milliseconds
1686 :
1687 0 : if download_completed {
1688 0 : info!("extension already downloaded, skipping re-download");
1689 0 : return Ok(0);
1690 0 : } else if start_time_delta < HANG_TIMEOUT && !first_try {
1691 0 : info!("download {ext_archive_name} already started by another process, hanging untill completion or timeout");
1692 0 : let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(500));
1693 : loop {
1694 0 : info!("waiting for download");
1695 0 : interval.tick().await;
1696 0 : let (_, download_completed_now) =
1697 0 : self.ext_download_progress.read().expect("lock")[ext_archive_name];
1698 0 : if download_completed_now {
1699 0 : info!("download finished by whoever else downloaded it");
1700 0 : return Ok(0);
1701 0 : }
1702 : }
1703 : // NOTE: the above loop will get terminated
1704 : // based on the timeout of the download function
1705 0 : }
1706 0 :
1707 0 : // if extension hasn't been downloaded before or the previous
1708 0 : // attempt to download was at least HANG_TIMEOUT ms ago
1709 0 : // then we try to download it here
1710 0 : info!("downloading new extension {ext_archive_name}");
1711 :
1712 0 : let download_size = extension_server::download_extension(
1713 0 : &real_ext_name,
1714 0 : &ext_path,
1715 0 : ext_remote_storage,
1716 0 : &self.pgbin,
1717 0 : )
1718 0 : .await
1719 0 : .map_err(DownloadError::Other);
1720 0 :
1721 0 : if download_size.is_ok() {
1722 0 : self.ext_download_progress
1723 0 : .write()
1724 0 : .expect("bad lock")
1725 0 : .insert(ext_archive_name.to_string(), (download_start, true));
1726 0 : }
1727 :
1728 0 : download_size
1729 0 : }
1730 :
1731 0 : pub async fn set_role_grants(
1732 0 : &self,
1733 0 : db_name: &PgIdent,
1734 0 : schema_name: &PgIdent,
1735 0 : privileges: &[Privilege],
1736 0 : role_name: &PgIdent,
1737 0 : ) -> Result<()> {
1738 : use tokio_postgres::NoTls;
1739 :
1740 0 : let mut conf = self.get_tokio_conn_conf(Some("compute_ctl:set_role_grants"));
1741 0 : conf.dbname(db_name);
1742 :
1743 0 : let (db_client, conn) = conf
1744 0 : .connect(NoTls)
1745 0 : .await
1746 0 : .context("Failed to connect to the database")?;
1747 0 : tokio::spawn(conn);
1748 0 :
1749 0 : // TODO: support other types of grants apart from schemas?
1750 0 : let query = format!(
1751 0 : "GRANT {} ON SCHEMA {} TO {}",
1752 0 : privileges
1753 0 : .iter()
1754 0 : // should not be quoted as it's part of the command.
1755 0 : // is already sanitized so it's ok
1756 0 : .map(|p| p.as_str())
1757 0 : .collect::<Vec<&'static str>>()
1758 0 : .join(", "),
1759 0 : // quote the schema and role name as identifiers to sanitize them.
1760 0 : schema_name.pg_quote(),
1761 0 : role_name.pg_quote(),
1762 0 : );
1763 0 : db_client
1764 0 : .simple_query(&query)
1765 0 : .await
1766 0 : .with_context(|| format!("Failed to execute query: {}", query))?;
1767 :
1768 0 : Ok(())
1769 0 : }
1770 :
1771 0 : pub async fn install_extension(
1772 0 : &self,
1773 0 : ext_name: &PgIdent,
1774 0 : db_name: &PgIdent,
1775 0 : ext_version: ExtVersion,
1776 0 : ) -> Result<ExtVersion> {
1777 : use tokio_postgres::NoTls;
1778 :
1779 0 : let mut conf = self.get_tokio_conn_conf(Some("compute_ctl:install_extension"));
1780 0 : conf.dbname(db_name);
1781 :
1782 0 : let (db_client, conn) = conf
1783 0 : .connect(NoTls)
1784 0 : .await
1785 0 : .context("Failed to connect to the database")?;
1786 0 : tokio::spawn(conn);
1787 0 :
1788 0 : let version_query = "SELECT extversion FROM pg_extension WHERE extname = $1";
1789 0 : let version: Option<ExtVersion> = db_client
1790 0 : .query_opt(version_query, &[&ext_name])
1791 0 : .await
1792 0 : .with_context(|| format!("Failed to execute query: {}", version_query))?
1793 0 : .map(|row| row.get(0));
1794 0 :
1795 0 : // sanitize the inputs as postgres idents.
1796 0 : let ext_name: String = ext_name.pg_quote();
1797 0 : let quoted_version: String = ext_version.pg_quote();
1798 :
1799 0 : if let Some(installed_version) = version {
1800 0 : if installed_version == ext_version {
1801 0 : return Ok(installed_version);
1802 0 : }
1803 0 : let query = format!("ALTER EXTENSION {ext_name} UPDATE TO {quoted_version}");
1804 0 : db_client
1805 0 : .simple_query(&query)
1806 0 : .await
1807 0 : .with_context(|| format!("Failed to execute query: {}", query))?;
1808 : } else {
1809 0 : let query =
1810 0 : format!("CREATE EXTENSION IF NOT EXISTS {ext_name} WITH VERSION {quoted_version}");
1811 0 : db_client
1812 0 : .simple_query(&query)
1813 0 : .await
1814 0 : .with_context(|| format!("Failed to execute query: {}", query))?;
1815 : }
1816 :
1817 0 : Ok(ext_version)
1818 0 : }
1819 :
1820 : #[tokio::main]
1821 0 : pub async fn prepare_preload_libraries(
1822 0 : &self,
1823 0 : spec: &ComputeSpec,
1824 0 : ) -> Result<RemoteExtensionMetrics> {
1825 0 : if self.ext_remote_storage.is_none() {
1826 0 : return Ok(RemoteExtensionMetrics {
1827 0 : num_ext_downloaded: 0,
1828 0 : largest_ext_size: 0,
1829 0 : total_ext_download_size: 0,
1830 0 : });
1831 0 : }
1832 0 : let remote_extensions = spec
1833 0 : .remote_extensions
1834 0 : .as_ref()
1835 0 : .ok_or(anyhow::anyhow!("Remote extensions are not configured"))?;
1836 0 :
1837 0 : info!("parse shared_preload_libraries from spec.cluster.settings");
1838 0 : let mut libs_vec = Vec::new();
1839 0 : if let Some(libs) = spec.cluster.settings.find("shared_preload_libraries") {
1840 0 : libs_vec = libs
1841 0 : .split(&[',', '\'', ' '])
1842 0 : .filter(|s| *s != "neon" && !s.is_empty())
1843 0 : .map(str::to_string)
1844 0 : .collect();
1845 0 : }
1846 0 : info!("parse shared_preload_libraries from provided postgresql.conf");
1847 0 :
1848 0 : // that is used in neon_local and python tests
1849 0 : if let Some(conf) = &spec.cluster.postgresql_conf {
1850 0 : let conf_lines = conf.split('\n').collect::<Vec<&str>>();
1851 0 : let mut shared_preload_libraries_line = "";
1852 0 : for line in conf_lines {
1853 0 : if line.starts_with("shared_preload_libraries") {
1854 0 : shared_preload_libraries_line = line;
1855 0 : }
1856 0 : }
1857 0 : let mut preload_libs_vec = Vec::new();
1858 0 : if let Some(libs) = shared_preload_libraries_line.split("='").nth(1) {
1859 0 : preload_libs_vec = libs
1860 0 : .split(&[',', '\'', ' '])
1861 0 : .filter(|s| *s != "neon" && !s.is_empty())
1862 0 : .map(str::to_string)
1863 0 : .collect();
1864 0 : }
1865 0 : libs_vec.extend(preload_libs_vec);
1866 0 : }
1867 0 :
1868 0 : // Don't try to download libraries that are not in the index.
1869 0 : // Assume that they are already present locally.
1870 0 : libs_vec.retain(|lib| remote_extensions.library_index.contains_key(lib));
1871 0 :
1872 0 : info!("Downloading to shared preload libraries: {:?}", &libs_vec);
1873 0 :
1874 0 : let mut download_tasks = Vec::new();
1875 0 : for library in &libs_vec {
1876 0 : let (ext_name, ext_path) =
1877 0 : remote_extensions.get_ext(library, true, &self.build_tag, &self.pgversion)?;
1878 0 : download_tasks.push(self.download_extension(ext_name, ext_path));
1879 0 : }
1880 0 : let results = join_all(download_tasks).await;
1881 0 :
1882 0 : let mut remote_ext_metrics = RemoteExtensionMetrics {
1883 0 : num_ext_downloaded: 0,
1884 0 : largest_ext_size: 0,
1885 0 : total_ext_download_size: 0,
1886 0 : };
1887 0 : for result in results {
1888 0 : let download_size = match result {
1889 0 : Ok(res) => {
1890 0 : remote_ext_metrics.num_ext_downloaded += 1;
1891 0 : res
1892 0 : }
1893 0 : Err(err) => {
1894 0 : // if we failed to download an extension, we don't want to fail the whole
1895 0 : // process, but we do want to log the error
1896 0 : error!("Failed to download extension: {}", err);
1897 0 : 0
1898 0 : }
1899 0 : };
1900 0 :
1901 0 : remote_ext_metrics.largest_ext_size =
1902 0 : std::cmp::max(remote_ext_metrics.largest_ext_size, download_size);
1903 0 : remote_ext_metrics.total_ext_download_size += download_size;
1904 0 : }
1905 0 : Ok(remote_ext_metrics)
1906 0 : }
1907 :
1908 : /// Waits until current thread receives a state changed notification and
1909 : /// the pageserver connection strings has changed.
1910 : ///
1911 : /// The operation will time out after a specified duration.
1912 0 : pub fn wait_timeout_while_pageserver_connstr_unchanged(&self, duration: Duration) {
1913 0 : let state = self.state.lock().unwrap();
1914 0 : let old_pageserver_connstr = state
1915 0 : .pspec
1916 0 : .as_ref()
1917 0 : .expect("spec must be set")
1918 0 : .pageserver_connstr
1919 0 : .clone();
1920 0 : let mut unchanged = true;
1921 0 : let _ = self
1922 0 : .state_changed
1923 0 : .wait_timeout_while(state, duration, |s| {
1924 0 : let pageserver_connstr = &s
1925 0 : .pspec
1926 0 : .as_ref()
1927 0 : .expect("spec must be set")
1928 0 : .pageserver_connstr;
1929 0 : unchanged = pageserver_connstr == &old_pageserver_connstr;
1930 0 : unchanged
1931 0 : })
1932 0 : .unwrap();
1933 0 : if !unchanged {
1934 0 : info!("Pageserver config changed");
1935 0 : }
1936 0 : }
1937 : }
1938 :
1939 0 : pub fn forward_termination_signal() {
1940 0 : let ss_pid = SYNC_SAFEKEEPERS_PID.load(Ordering::SeqCst);
1941 0 : if ss_pid != 0 {
1942 0 : let ss_pid = nix::unistd::Pid::from_raw(ss_pid as i32);
1943 0 : kill(ss_pid, Signal::SIGTERM).ok();
1944 0 : }
1945 0 : let pg_pid = PG_PID.load(Ordering::SeqCst);
1946 0 : if pg_pid != 0 {
1947 0 : let pg_pid = nix::unistd::Pid::from_raw(pg_pid as i32);
1948 0 : // Use 'fast' shutdown (SIGINT) because it also creates a shutdown checkpoint, which is important for
1949 0 : // ROs to get a list of running xacts faster instead of going through the CLOG.
1950 0 : // See https://www.postgresql.org/docs/current/server-shutdown.html for the list of modes and signals.
1951 0 : kill(pg_pid, Signal::SIGINT).ok();
1952 0 : }
1953 0 : }
|