Line data Source code
1 : use std::collections::HashMap;
2 : use std::env;
3 : use std::fs;
4 : use std::os::unix::fs::{symlink, PermissionsExt};
5 : use std::path::Path;
6 : use std::process::{Command, Stdio};
7 : use std::str::FromStr;
8 : use std::sync::atomic::AtomicU32;
9 : use std::sync::atomic::Ordering;
10 : use std::sync::{Condvar, Mutex, RwLock};
11 : use std::thread;
12 : use std::time::Duration;
13 : use std::time::Instant;
14 :
15 : use anyhow::{Context, Result};
16 : use chrono::{DateTime, Utc};
17 : use compute_api::spec::PgIdent;
18 : use futures::future::join_all;
19 : use futures::stream::FuturesUnordered;
20 : use futures::StreamExt;
21 : use nix::unistd::Pid;
22 : use postgres::error::SqlState;
23 : use postgres::{Client, NoTls};
24 : use tracing::{debug, error, info, instrument, warn};
25 : use utils::id::{TenantId, TimelineId};
26 : use utils::lsn::Lsn;
27 :
28 : use compute_api::privilege::Privilege;
29 : use compute_api::responses::{ComputeMetrics, ComputeStatus};
30 : use compute_api::spec::{ComputeFeature, ComputeMode, ComputeSpec, ExtVersion};
31 : use utils::measured_stream::MeasuredReader;
32 :
33 : use nix::sys::signal::{kill, Signal};
34 :
35 : use remote_storage::{DownloadError, RemotePath};
36 :
37 : use crate::checker::create_availability_check_data;
38 : use crate::installed_extensions::get_installed_extensions_sync;
39 : use crate::local_proxy;
40 : use crate::logger::inlinify;
41 : use crate::pg_helpers::*;
42 : use crate::spec::*;
43 : use crate::sync_sk::{check_if_synced, ping_safekeeper};
44 : use crate::{config, extension_server};
45 :
46 : pub static SYNC_SAFEKEEPERS_PID: AtomicU32 = AtomicU32::new(0);
47 : pub static PG_PID: AtomicU32 = AtomicU32::new(0);
48 :
49 : /// Compute node info shared across several `compute_ctl` threads.
50 : pub struct ComputeNode {
51 : // Url type maintains proper escaping
52 : pub connstr: url::Url,
53 : pub pgdata: String,
54 : pub pgbin: String,
55 : pub pgversion: String,
56 : /// We should only allow live re- / configuration of the compute node if
57 : /// it uses 'pull model', i.e. it can go to control-plane and fetch
58 : /// the latest configuration. Otherwise, there could be a case:
59 : /// - we start compute with some spec provided as argument
60 : /// - we push new spec and it does reconfiguration
61 : /// - but then something happens and compute pod / VM is destroyed,
62 : /// so k8s controller starts it again with the **old** spec
63 : ///
64 : /// and the same for empty computes:
65 : /// - we started compute without any spec
66 : /// - we push spec and it does configuration
67 : /// - but then it is restarted without any spec again
68 : pub live_config_allowed: bool,
69 : /// Volatile part of the `ComputeNode`, which should be used under `Mutex`.
70 : /// To allow HTTP API server to serving status requests, while configuration
71 : /// is in progress, lock should be held only for short periods of time to do
72 : /// read/write, not the whole configuration process.
73 : pub state: Mutex<ComputeState>,
74 : /// `Condvar` to allow notifying waiters about state changes.
75 : pub state_changed: Condvar,
76 : /// the address of extension storage proxy gateway
77 : pub ext_remote_storage: Option<String>,
78 : // key: ext_archive_name, value: started download time, download_completed?
79 : pub ext_download_progress: RwLock<HashMap<String, (DateTime<Utc>, bool)>>,
80 : pub build_tag: String,
81 : }
82 :
83 : // store some metrics about download size that might impact startup time
84 : #[derive(Clone, Debug)]
85 : pub struct RemoteExtensionMetrics {
86 : num_ext_downloaded: u64,
87 : largest_ext_size: u64,
88 : total_ext_download_size: u64,
89 : }
90 :
91 : #[derive(Clone, Debug)]
92 : pub struct ComputeState {
93 : pub start_time: DateTime<Utc>,
94 : pub status: ComputeStatus,
95 : /// Timestamp of the last Postgres activity. It could be `None` if
96 : /// compute wasn't used since start.
97 : pub last_active: Option<DateTime<Utc>>,
98 : pub error: Option<String>,
99 : pub pspec: Option<ParsedSpec>,
100 : pub metrics: ComputeMetrics,
101 : }
102 :
103 : impl ComputeState {
104 0 : pub fn new() -> Self {
105 0 : Self {
106 0 : start_time: Utc::now(),
107 0 : status: ComputeStatus::Empty,
108 0 : last_active: None,
109 0 : error: None,
110 0 : pspec: None,
111 0 : metrics: ComputeMetrics::default(),
112 0 : }
113 0 : }
114 :
115 0 : pub fn set_status(&mut self, status: ComputeStatus, state_changed: &Condvar) {
116 0 : let prev = self.status;
117 0 : info!("Changing compute status from {} to {}", prev, status);
118 0 : self.status = status;
119 0 : state_changed.notify_all();
120 0 : }
121 :
122 0 : pub fn set_failed_status(&mut self, err: anyhow::Error, state_changed: &Condvar) {
123 0 : self.error = Some(format!("{err:?}"));
124 0 : self.set_status(ComputeStatus::Failed, state_changed);
125 0 : }
126 : }
127 :
128 : impl Default for ComputeState {
129 0 : fn default() -> Self {
130 0 : Self::new()
131 0 : }
132 : }
133 :
134 : #[derive(Clone, Debug)]
135 : pub struct ParsedSpec {
136 : pub spec: ComputeSpec,
137 : pub tenant_id: TenantId,
138 : pub timeline_id: TimelineId,
139 : pub pageserver_connstr: String,
140 : pub safekeeper_connstrings: Vec<String>,
141 : pub storage_auth_token: Option<String>,
142 : }
143 :
144 : impl TryFrom<ComputeSpec> for ParsedSpec {
145 : type Error = String;
146 0 : fn try_from(spec: ComputeSpec) -> Result<Self, String> {
147 : // Extract the options from the spec file that are needed to connect to
148 : // the storage system.
149 : //
150 : // For backwards-compatibility, the top-level fields in the spec file
151 : // may be empty. In that case, we need to dig them from the GUCs in the
152 : // cluster.settings field.
153 0 : let pageserver_connstr = spec
154 0 : .pageserver_connstring
155 0 : .clone()
156 0 : .or_else(|| spec.cluster.settings.find("neon.pageserver_connstring"))
157 0 : .ok_or("pageserver connstr should be provided")?;
158 0 : let safekeeper_connstrings = if spec.safekeeper_connstrings.is_empty() {
159 0 : if matches!(spec.mode, ComputeMode::Primary) {
160 0 : spec.cluster
161 0 : .settings
162 0 : .find("neon.safekeepers")
163 0 : .ok_or("safekeeper connstrings should be provided")?
164 0 : .split(',')
165 0 : .map(|str| str.to_string())
166 0 : .collect()
167 : } else {
168 0 : vec![]
169 : }
170 : } else {
171 0 : spec.safekeeper_connstrings.clone()
172 : };
173 0 : let storage_auth_token = spec.storage_auth_token.clone();
174 0 : let tenant_id: TenantId = if let Some(tenant_id) = spec.tenant_id {
175 0 : tenant_id
176 : } else {
177 0 : spec.cluster
178 0 : .settings
179 0 : .find("neon.tenant_id")
180 0 : .ok_or("tenant id should be provided")
181 0 : .map(|s| TenantId::from_str(&s))?
182 0 : .or(Err("invalid tenant id"))?
183 : };
184 0 : let timeline_id: TimelineId = if let Some(timeline_id) = spec.timeline_id {
185 0 : timeline_id
186 : } else {
187 0 : spec.cluster
188 0 : .settings
189 0 : .find("neon.timeline_id")
190 0 : .ok_or("timeline id should be provided")
191 0 : .map(|s| TimelineId::from_str(&s))?
192 0 : .or(Err("invalid timeline id"))?
193 : };
194 :
195 0 : Ok(ParsedSpec {
196 0 : spec,
197 0 : pageserver_connstr,
198 0 : safekeeper_connstrings,
199 0 : storage_auth_token,
200 0 : tenant_id,
201 0 : timeline_id,
202 0 : })
203 0 : }
204 : }
205 :
206 : /// If we are a VM, returns a [`Command`] that will run in the `neon-postgres`
207 : /// cgroup. Otherwise returns the default `Command::new(cmd)`
208 : ///
209 : /// This function should be used to start postgres, as it will start it in the
210 : /// neon-postgres cgroup if we are a VM. This allows autoscaling to control
211 : /// postgres' resource usage. The cgroup will exist in VMs because vm-builder
212 : /// creates it during the sysinit phase of its inittab.
213 0 : fn maybe_cgexec(cmd: &str) -> Command {
214 0 : // The cplane sets this env var for autoscaling computes.
215 0 : // use `var_os` so we don't have to worry about the variable being valid
216 0 : // unicode. Should never be an concern . . . but just in case
217 0 : if env::var_os("AUTOSCALING").is_some() {
218 0 : let mut command = Command::new("cgexec");
219 0 : command.args(["-g", "memory:neon-postgres"]);
220 0 : command.arg(cmd);
221 0 : command
222 : } else {
223 0 : Command::new(cmd)
224 : }
225 0 : }
226 :
227 : /// Create special neon_superuser role, that's a slightly nerfed version of a real superuser
228 : /// that we give to customers
229 0 : #[instrument(skip_all)]
230 : fn create_neon_superuser(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
231 : let roles = spec
232 : .cluster
233 : .roles
234 : .iter()
235 0 : .map(|r| escape_literal(&r.name))
236 : .collect::<Vec<_>>();
237 :
238 : let dbs = spec
239 : .cluster
240 : .databases
241 : .iter()
242 0 : .map(|db| escape_literal(&db.name))
243 : .collect::<Vec<_>>();
244 :
245 : let roles_decl = if roles.is_empty() {
246 : String::from("roles text[] := NULL;")
247 : } else {
248 : format!(
249 : r#"
250 : roles text[] := ARRAY(SELECT rolname
251 : FROM pg_catalog.pg_roles
252 : WHERE rolname IN ({}));"#,
253 : roles.join(", ")
254 : )
255 : };
256 :
257 : let database_decl = if dbs.is_empty() {
258 : String::from("dbs text[] := NULL;")
259 : } else {
260 : format!(
261 : r#"
262 : dbs text[] := ARRAY(SELECT datname
263 : FROM pg_catalog.pg_database
264 : WHERE datname IN ({}));"#,
265 : dbs.join(", ")
266 : )
267 : };
268 :
269 : // ALL PRIVILEGES grants CREATE, CONNECT, and TEMPORARY on all databases
270 : // (see https://www.postgresql.org/docs/current/ddl-priv.html)
271 : let query = format!(
272 : r#"
273 : DO $$
274 : DECLARE
275 : r text;
276 : {}
277 : {}
278 : BEGIN
279 : IF NOT EXISTS (
280 : SELECT FROM pg_catalog.pg_roles WHERE rolname = 'neon_superuser')
281 : THEN
282 : CREATE ROLE neon_superuser CREATEDB CREATEROLE NOLOGIN REPLICATION BYPASSRLS IN ROLE pg_read_all_data, pg_write_all_data;
283 : IF array_length(roles, 1) IS NOT NULL THEN
284 : EXECUTE format('GRANT neon_superuser TO %s',
285 : array_to_string(ARRAY(SELECT quote_ident(x) FROM unnest(roles) as x), ', '));
286 : FOREACH r IN ARRAY roles LOOP
287 : EXECUTE format('ALTER ROLE %s CREATEROLE CREATEDB', quote_ident(r));
288 : END LOOP;
289 : END IF;
290 : IF array_length(dbs, 1) IS NOT NULL THEN
291 : EXECUTE format('GRANT ALL PRIVILEGES ON DATABASE %s TO neon_superuser',
292 : array_to_string(ARRAY(SELECT quote_ident(x) FROM unnest(dbs) as x), ', '));
293 : END IF;
294 : END IF;
295 : END
296 : $$;"#,
297 : roles_decl, database_decl,
298 : );
299 : info!("Neon superuser created: {}", inlinify(&query));
300 : client
301 : .simple_query(&query)
302 0 : .map_err(|e| anyhow::anyhow!(e).context(query))?;
303 : Ok(())
304 : }
305 :
306 : impl ComputeNode {
307 : /// Check that compute node has corresponding feature enabled.
308 0 : pub fn has_feature(&self, feature: ComputeFeature) -> bool {
309 0 : let state = self.state.lock().unwrap();
310 :
311 0 : if let Some(s) = state.pspec.as_ref() {
312 0 : s.spec.features.contains(&feature)
313 : } else {
314 0 : false
315 : }
316 0 : }
317 :
318 0 : pub fn set_status(&self, status: ComputeStatus) {
319 0 : let mut state = self.state.lock().unwrap();
320 0 : state.set_status(status, &self.state_changed);
321 0 : }
322 :
323 0 : pub fn set_failed_status(&self, err: anyhow::Error) {
324 0 : let mut state = self.state.lock().unwrap();
325 0 : state.set_failed_status(err, &self.state_changed);
326 0 : }
327 :
328 0 : pub fn get_status(&self) -> ComputeStatus {
329 0 : self.state.lock().unwrap().status
330 0 : }
331 :
332 : // Remove `pgdata` directory and create it again with right permissions.
333 0 : fn create_pgdata(&self) -> Result<()> {
334 0 : // Ignore removal error, likely it is a 'No such file or directory (os error 2)'.
335 0 : // If it is something different then create_dir() will error out anyway.
336 0 : let _ok = fs::remove_dir_all(&self.pgdata);
337 0 : fs::create_dir(&self.pgdata)?;
338 0 : fs::set_permissions(&self.pgdata, fs::Permissions::from_mode(0o700))?;
339 :
340 0 : Ok(())
341 0 : }
342 :
343 : // Get basebackup from the libpq connection to pageserver using `connstr` and
344 : // unarchive it to `pgdata` directory overriding all its previous content.
345 0 : #[instrument(skip_all, fields(%lsn))]
346 : fn try_get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> {
347 : let spec = compute_state.pspec.as_ref().expect("spec must be set");
348 : let start_time = Instant::now();
349 :
350 : let shard0_connstr = spec.pageserver_connstr.split(',').next().unwrap();
351 : let mut config = postgres::Config::from_str(shard0_connstr)?;
352 :
353 : // Use the storage auth token from the config file, if given.
354 : // Note: this overrides any password set in the connection string.
355 : if let Some(storage_auth_token) = &spec.storage_auth_token {
356 : info!("Got storage auth token from spec file");
357 : config.password(storage_auth_token);
358 : } else {
359 : info!("Storage auth token not set");
360 : }
361 :
362 : // Connect to pageserver
363 : let mut client = config.connect(NoTls)?;
364 : let pageserver_connect_micros = start_time.elapsed().as_micros() as u64;
365 :
366 : let basebackup_cmd = match lsn {
367 : Lsn(0) => {
368 : if spec.spec.mode != ComputeMode::Primary {
369 : format!(
370 : "basebackup {} {} --gzip --replica",
371 : spec.tenant_id, spec.timeline_id
372 : )
373 : } else {
374 : format!("basebackup {} {} --gzip", spec.tenant_id, spec.timeline_id)
375 : }
376 : }
377 : _ => {
378 : if spec.spec.mode != ComputeMode::Primary {
379 : format!(
380 : "basebackup {} {} {} --gzip --replica",
381 : spec.tenant_id, spec.timeline_id, lsn
382 : )
383 : } else {
384 : format!(
385 : "basebackup {} {} {} --gzip",
386 : spec.tenant_id, spec.timeline_id, lsn
387 : )
388 : }
389 : }
390 : };
391 :
392 : let copyreader = client.copy_out(basebackup_cmd.as_str())?;
393 : let mut measured_reader = MeasuredReader::new(copyreader);
394 : let mut bufreader = std::io::BufReader::new(&mut measured_reader);
395 :
396 : // Read the archive directly from the `CopyOutReader`
397 : //
398 : // Set `ignore_zeros` so that unpack() reads all the Copy data and
399 : // doesn't stop at the end-of-archive marker. Otherwise, if the server
400 : // sends an Error after finishing the tarball, we will not notice it.
401 : let mut ar = tar::Archive::new(flate2::read::GzDecoder::new(&mut bufreader));
402 : ar.set_ignore_zeros(true);
403 : ar.unpack(&self.pgdata)?;
404 :
405 : // Report metrics
406 : let mut state = self.state.lock().unwrap();
407 : state.metrics.pageserver_connect_micros = pageserver_connect_micros;
408 : state.metrics.basebackup_bytes = measured_reader.get_byte_count() as u64;
409 : state.metrics.basebackup_ms = start_time.elapsed().as_millis() as u64;
410 : Ok(())
411 : }
412 :
413 : // Gets the basebackup in a retry loop
414 0 : #[instrument(skip_all, fields(%lsn))]
415 : pub fn get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> {
416 : let mut retry_period_ms = 500.0;
417 : let mut attempts = 0;
418 : const DEFAULT_ATTEMPTS: u16 = 10;
419 : #[cfg(feature = "testing")]
420 : let max_attempts = if let Ok(v) = env::var("NEON_COMPUTE_TESTING_BASEBACKUP_RETRIES") {
421 : u16::from_str(&v).unwrap()
422 : } else {
423 : DEFAULT_ATTEMPTS
424 : };
425 : #[cfg(not(feature = "testing"))]
426 : let max_attempts = DEFAULT_ATTEMPTS;
427 : loop {
428 : let result = self.try_get_basebackup(compute_state, lsn);
429 : match result {
430 : Ok(_) => {
431 : return result;
432 : }
433 : Err(ref e) if attempts < max_attempts => {
434 : warn!(
435 : "Failed to get basebackup: {} (attempt {}/{})",
436 : e, attempts, max_attempts
437 : );
438 : std::thread::sleep(std::time::Duration::from_millis(retry_period_ms as u64));
439 : retry_period_ms *= 1.5;
440 : }
441 : Err(_) => {
442 : return result;
443 : }
444 : }
445 : attempts += 1;
446 : }
447 : }
448 :
449 0 : pub async fn check_safekeepers_synced_async(
450 0 : &self,
451 0 : compute_state: &ComputeState,
452 0 : ) -> Result<Option<Lsn>> {
453 0 : // Construct a connection config for each safekeeper
454 0 : let pspec: ParsedSpec = compute_state
455 0 : .pspec
456 0 : .as_ref()
457 0 : .expect("spec must be set")
458 0 : .clone();
459 0 : let sk_connstrs: Vec<String> = pspec.safekeeper_connstrings.clone();
460 0 : let sk_configs = sk_connstrs.into_iter().map(|connstr| {
461 0 : // Format connstr
462 0 : let id = connstr.clone();
463 0 : let connstr = format!("postgresql://no_user@{}", connstr);
464 0 : let options = format!(
465 0 : "-c timeline_id={} tenant_id={}",
466 0 : pspec.timeline_id, pspec.tenant_id
467 0 : );
468 0 :
469 0 : // Construct client
470 0 : let mut config = tokio_postgres::Config::from_str(&connstr).unwrap();
471 0 : config.options(&options);
472 0 : if let Some(storage_auth_token) = pspec.storage_auth_token.clone() {
473 0 : config.password(storage_auth_token);
474 0 : }
475 :
476 0 : (id, config)
477 0 : });
478 0 :
479 0 : // Create task set to query all safekeepers
480 0 : let mut tasks = FuturesUnordered::new();
481 0 : let quorum = sk_configs.len() / 2 + 1;
482 0 : for (id, config) in sk_configs {
483 0 : let timeout = tokio::time::Duration::from_millis(100);
484 0 : let task = tokio::time::timeout(timeout, ping_safekeeper(id, config));
485 0 : tasks.push(tokio::spawn(task));
486 0 : }
487 :
488 : // Get a quorum of responses or errors
489 0 : let mut responses = Vec::new();
490 0 : let mut join_errors = Vec::new();
491 0 : let mut task_errors = Vec::new();
492 0 : let mut timeout_errors = Vec::new();
493 0 : while let Some(response) = tasks.next().await {
494 0 : match response {
495 0 : Ok(Ok(Ok(r))) => responses.push(r),
496 0 : Ok(Ok(Err(e))) => task_errors.push(e),
497 0 : Ok(Err(e)) => timeout_errors.push(e),
498 0 : Err(e) => join_errors.push(e),
499 : };
500 0 : if responses.len() >= quorum {
501 0 : break;
502 0 : }
503 0 : if join_errors.len() + task_errors.len() + timeout_errors.len() >= quorum {
504 0 : break;
505 0 : }
506 : }
507 :
508 : // In case of error, log and fail the check, but don't crash.
509 : // We're playing it safe because these errors could be transient
510 : // and we don't yet retry. Also being careful here allows us to
511 : // be backwards compatible with safekeepers that don't have the
512 : // TIMELINE_STATUS API yet.
513 0 : if responses.len() < quorum {
514 0 : error!(
515 0 : "failed sync safekeepers check {:?} {:?} {:?}",
516 : join_errors, task_errors, timeout_errors
517 : );
518 0 : return Ok(None);
519 0 : }
520 0 :
521 0 : Ok(check_if_synced(responses))
522 0 : }
523 :
524 : // Fast path for sync_safekeepers. If they're already synced we get the lsn
525 : // in one roundtrip. If not, we should do a full sync_safekeepers.
526 0 : pub fn check_safekeepers_synced(&self, compute_state: &ComputeState) -> Result<Option<Lsn>> {
527 0 : let start_time = Utc::now();
528 0 :
529 0 : // Run actual work with new tokio runtime
530 0 : let rt = tokio::runtime::Builder::new_current_thread()
531 0 : .enable_all()
532 0 : .build()
533 0 : .expect("failed to create rt");
534 0 : let result = rt.block_on(self.check_safekeepers_synced_async(compute_state));
535 0 :
536 0 : // Record runtime
537 0 : self.state.lock().unwrap().metrics.sync_sk_check_ms = Utc::now()
538 0 : .signed_duration_since(start_time)
539 0 : .to_std()
540 0 : .unwrap()
541 0 : .as_millis() as u64;
542 0 : result
543 0 : }
544 :
545 : // Run `postgres` in a special mode with `--sync-safekeepers` argument
546 : // and return the reported LSN back to the caller.
547 0 : #[instrument(skip_all)]
548 : pub fn sync_safekeepers(&self, storage_auth_token: Option<String>) -> Result<Lsn> {
549 : let start_time = Utc::now();
550 :
551 : let mut sync_handle = maybe_cgexec(&self.pgbin)
552 : .args(["--sync-safekeepers"])
553 : .env("PGDATA", &self.pgdata) // we cannot use -D in this mode
554 : .envs(if let Some(storage_auth_token) = &storage_auth_token {
555 : vec![("NEON_AUTH_TOKEN", storage_auth_token)]
556 : } else {
557 : vec![]
558 : })
559 : .stdout(Stdio::piped())
560 : .stderr(Stdio::piped())
561 : .spawn()
562 : .expect("postgres --sync-safekeepers failed to start");
563 : SYNC_SAFEKEEPERS_PID.store(sync_handle.id(), Ordering::SeqCst);
564 :
565 : // `postgres --sync-safekeepers` will print all log output to stderr and
566 : // final LSN to stdout. So we leave stdout to collect LSN, while stderr logs
567 : // will be collected in a child thread.
568 : let stderr = sync_handle
569 : .stderr
570 : .take()
571 : .expect("stderr should be captured");
572 : let logs_handle = handle_postgres_logs(stderr);
573 :
574 : let sync_output = sync_handle
575 : .wait_with_output()
576 : .expect("postgres --sync-safekeepers failed");
577 : SYNC_SAFEKEEPERS_PID.store(0, Ordering::SeqCst);
578 :
579 : // Process has exited, so we can join the logs thread.
580 : let _ = logs_handle
581 : .join()
582 0 : .map_err(|e| tracing::error!("log thread panicked: {:?}", e));
583 :
584 : if !sync_output.status.success() {
585 : anyhow::bail!(
586 : "postgres --sync-safekeepers exited with non-zero status: {}. stdout: {}",
587 : sync_output.status,
588 : String::from_utf8(sync_output.stdout)
589 : .expect("postgres --sync-safekeepers exited, and stdout is not utf-8"),
590 : );
591 : }
592 :
593 : self.state.lock().unwrap().metrics.sync_safekeepers_ms = Utc::now()
594 : .signed_duration_since(start_time)
595 : .to_std()
596 : .unwrap()
597 : .as_millis() as u64;
598 :
599 : let lsn = Lsn::from_str(String::from_utf8(sync_output.stdout)?.trim())?;
600 :
601 : Ok(lsn)
602 : }
603 :
604 : /// Do all the preparations like PGDATA directory creation, configuration,
605 : /// safekeepers sync, basebackup, etc.
606 0 : #[instrument(skip_all)]
607 : pub fn prepare_pgdata(
608 : &self,
609 : compute_state: &ComputeState,
610 : extension_server_port: u16,
611 : ) -> Result<()> {
612 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
613 : let spec = &pspec.spec;
614 : let pgdata_path = Path::new(&self.pgdata);
615 :
616 : // Remove/create an empty pgdata directory and put configuration there.
617 : self.create_pgdata()?;
618 : config::write_postgres_conf(
619 : &pgdata_path.join("postgresql.conf"),
620 : &pspec.spec,
621 : Some(extension_server_port),
622 : )?;
623 :
624 : // Syncing safekeepers is only safe with primary nodes: if a primary
625 : // is already connected it will be kicked out, so a secondary (standby)
626 : // cannot sync safekeepers.
627 : let lsn = match spec.mode {
628 : ComputeMode::Primary => {
629 : info!("checking if safekeepers are synced");
630 : let lsn = if let Ok(Some(lsn)) = self.check_safekeepers_synced(compute_state) {
631 : lsn
632 : } else {
633 : info!("starting safekeepers syncing");
634 : self.sync_safekeepers(pspec.storage_auth_token.clone())
635 0 : .with_context(|| "failed to sync safekeepers")?
636 : };
637 : info!("safekeepers synced at LSN {}", lsn);
638 : lsn
639 : }
640 : ComputeMode::Static(lsn) => {
641 : info!("Starting read-only node at static LSN {}", lsn);
642 : lsn
643 : }
644 : ComputeMode::Replica => {
645 : info!("Initializing standby from latest Pageserver LSN");
646 : Lsn(0)
647 : }
648 : };
649 :
650 : info!(
651 : "getting basebackup@{} from pageserver {}",
652 : lsn, &pspec.pageserver_connstr
653 : );
654 0 : self.get_basebackup(compute_state, lsn).with_context(|| {
655 0 : format!(
656 0 : "failed to get basebackup@{} from pageserver {}",
657 0 : lsn, &pspec.pageserver_connstr
658 0 : )
659 0 : })?;
660 :
661 : // Update pg_hba.conf received with basebackup.
662 : update_pg_hba(pgdata_path)?;
663 :
664 : // Place pg_dynshmem under /dev/shm. This allows us to use
665 : // 'dynamic_shared_memory_type = mmap' so that the files are placed in
666 : // /dev/shm, similar to how 'dynamic_shared_memory_type = posix' works.
667 : //
668 : // Why on earth don't we just stick to the 'posix' default, you might
669 : // ask. It turns out that making large allocations with 'posix' doesn't
670 : // work very well with autoscaling. The behavior we want is that:
671 : //
672 : // 1. You can make large DSM allocations, larger than the current RAM
673 : // size of the VM, without errors
674 : //
675 : // 2. If the allocated memory is really used, the VM is scaled up
676 : // automatically to accommodate that
677 : //
678 : // We try to make that possible by having swap in the VM. But with the
679 : // default 'posix' DSM implementation, we fail step 1, even when there's
680 : // plenty of swap available. PostgreSQL uses posix_fallocate() to create
681 : // the shmem segment, which is really just a file in /dev/shm in Linux,
682 : // but posix_fallocate() on tmpfs returns ENOMEM if the size is larger
683 : // than available RAM.
684 : //
685 : // Using 'dynamic_shared_memory_type = mmap' works around that, because
686 : // the Postgres 'mmap' DSM implementation doesn't use
687 : // posix_fallocate(). Instead, it uses repeated calls to write(2) to
688 : // fill the file with zeros. It's weird that that differs between
689 : // 'posix' and 'mmap', but we take advantage of it. When the file is
690 : // filled slowly with write(2), the kernel allows it to grow larger, as
691 : // long as there's swap available.
692 : //
693 : // In short, using 'dynamic_shared_memory_type = mmap' allows us one DSM
694 : // segment to be larger than currently available RAM. But because we
695 : // don't want to store it on a real file, which the kernel would try to
696 : // flush to disk, so symlink pg_dynshm to /dev/shm.
697 : //
698 : // We don't set 'dynamic_shared_memory_type = mmap' here, we let the
699 : // control plane control that option. If 'mmap' is not used, this
700 : // symlink doesn't affect anything.
701 : //
702 : // See https://github.com/neondatabase/autoscaling/issues/800
703 : std::fs::remove_dir(pgdata_path.join("pg_dynshmem"))?;
704 : symlink("/dev/shm/", pgdata_path.join("pg_dynshmem"))?;
705 :
706 : match spec.mode {
707 : ComputeMode::Primary => {}
708 : ComputeMode::Replica | ComputeMode::Static(..) => {
709 : add_standby_signal(pgdata_path)?;
710 : }
711 : }
712 :
713 : Ok(())
714 : }
715 :
716 : /// Start and stop a postgres process to warm up the VM for startup.
717 0 : pub fn prewarm_postgres(&self) -> Result<()> {
718 0 : info!("prewarming");
719 :
720 : // Create pgdata
721 0 : let pgdata = &format!("{}.warmup", self.pgdata);
722 0 : create_pgdata(pgdata)?;
723 :
724 : // Run initdb to completion
725 0 : info!("running initdb");
726 0 : let initdb_bin = Path::new(&self.pgbin).parent().unwrap().join("initdb");
727 0 : Command::new(initdb_bin)
728 0 : .args(["--pgdata", pgdata])
729 0 : .output()
730 0 : .expect("cannot start initdb process");
731 :
732 : // Write conf
733 : use std::io::Write;
734 0 : let conf_path = Path::new(pgdata).join("postgresql.conf");
735 0 : let mut file = std::fs::File::create(conf_path)?;
736 0 : writeln!(file, "shared_buffers=65536")?;
737 0 : writeln!(file, "port=51055")?; // Nobody should be connecting
738 0 : writeln!(file, "shared_preload_libraries = 'neon'")?;
739 :
740 : // Start postgres
741 0 : info!("starting postgres");
742 0 : let mut pg = maybe_cgexec(&self.pgbin)
743 0 : .args(["-D", pgdata])
744 0 : .spawn()
745 0 : .expect("cannot start postgres process");
746 0 :
747 0 : // Stop it when it's ready
748 0 : info!("waiting for postgres");
749 0 : wait_for_postgres(&mut pg, Path::new(pgdata))?;
750 : // SIGQUIT orders postgres to exit immediately. We don't want to SIGKILL
751 : // it to avoid orphaned processes prowling around while datadir is
752 : // wiped.
753 0 : let pm_pid = Pid::from_raw(pg.id() as i32);
754 0 : kill(pm_pid, Signal::SIGQUIT)?;
755 0 : info!("sent SIGQUIT signal");
756 0 : pg.wait()?;
757 0 : info!("done prewarming");
758 :
759 : // clean up
760 0 : let _ok = fs::remove_dir_all(pgdata);
761 0 : Ok(())
762 0 : }
763 :
764 : /// Start Postgres as a child process and manage DBs/roles.
765 : /// After that this will hang waiting on the postmaster process to exit.
766 : /// Returns a handle to the child process and a handle to the logs thread.
767 0 : #[instrument(skip_all)]
768 : pub fn start_postgres(
769 : &self,
770 : storage_auth_token: Option<String>,
771 : ) -> Result<(std::process::Child, std::thread::JoinHandle<()>)> {
772 : let pgdata_path = Path::new(&self.pgdata);
773 :
774 : // Run postgres as a child process.
775 : let mut pg = maybe_cgexec(&self.pgbin)
776 : .args(["-D", &self.pgdata])
777 : .envs(if let Some(storage_auth_token) = &storage_auth_token {
778 : vec![("NEON_AUTH_TOKEN", storage_auth_token)]
779 : } else {
780 : vec![]
781 : })
782 : .stderr(Stdio::piped())
783 : .spawn()
784 : .expect("cannot start postgres process");
785 : PG_PID.store(pg.id(), Ordering::SeqCst);
786 :
787 : // Start a thread to collect logs from stderr.
788 : let stderr = pg.stderr.take().expect("stderr should be captured");
789 : let logs_handle = handle_postgres_logs(stderr);
790 :
791 : wait_for_postgres(&mut pg, pgdata_path)?;
792 :
793 : Ok((pg, logs_handle))
794 : }
795 :
796 : /// Do post configuration of the already started Postgres. This function spawns a background thread to
797 : /// configure the database after applying the compute spec. Currently, it upgrades the neon extension
798 : /// version. In the future, it may upgrade all 3rd-party extensions.
799 0 : #[instrument(skip_all)]
800 : pub fn post_apply_config(&self) -> Result<()> {
801 : let connstr = self.connstr.clone();
802 0 : thread::spawn(move || {
803 0 : let func = || {
804 0 : let mut client = Client::connect(connstr.as_str(), NoTls)?;
805 0 : handle_neon_extension_upgrade(&mut client)
806 0 : .context("handle_neon_extension_upgrade")?;
807 0 : Ok::<_, anyhow::Error>(())
808 0 : };
809 0 : if let Err(err) = func() {
810 0 : error!("error while post_apply_config: {err:#}");
811 0 : }
812 0 : });
813 : Ok(())
814 : }
815 :
816 : /// Do initial configuration of the already started Postgres.
817 0 : #[instrument(skip_all)]
818 : pub fn apply_config(&self, compute_state: &ComputeState) -> Result<()> {
819 : // If connection fails,
820 : // it may be the old node with `zenith_admin` superuser.
821 : //
822 : // In this case we need to connect with old `zenith_admin` name
823 : // and create new user. We cannot simply rename connected user,
824 : // but we can create a new one and grant it all privileges.
825 : let mut connstr = self.connstr.clone();
826 : connstr
827 : .query_pairs_mut()
828 : .append_pair("application_name", "apply_config");
829 :
830 : let mut client = match Client::connect(connstr.as_str(), NoTls) {
831 : Err(e) => match e.code() {
832 : Some(&SqlState::INVALID_PASSWORD)
833 : | Some(&SqlState::INVALID_AUTHORIZATION_SPECIFICATION) => {
834 : // connect with zenith_admin if cloud_admin could not authenticate
835 : info!(
836 : "cannot connect to postgres: {}, retrying with `zenith_admin` username",
837 : e
838 : );
839 : let mut zenith_admin_connstr = connstr.clone();
840 :
841 : zenith_admin_connstr
842 : .set_username("zenith_admin")
843 0 : .map_err(|_| anyhow::anyhow!("invalid connstr"))?;
844 :
845 : let mut client =
846 : Client::connect(zenith_admin_connstr.as_str(), NoTls)
847 : .context("broken cloud_admin credential: tried connecting with cloud_admin but could not authenticate, and zenith_admin does not work either")?;
848 : // Disable forwarding so that users don't get a cloud_admin role
849 :
850 0 : let mut func = || {
851 0 : client.simple_query("SET neon.forward_ddl = false")?;
852 0 : client.simple_query("CREATE USER cloud_admin WITH SUPERUSER")?;
853 0 : client.simple_query("GRANT zenith_admin TO cloud_admin")?;
854 0 : Ok::<_, anyhow::Error>(())
855 0 : };
856 : func().context("apply_config setup cloud_admin")?;
857 :
858 : drop(client);
859 :
860 : // reconnect with connstring with expected name
861 : Client::connect(connstr.as_str(), NoTls)?
862 : }
863 : _ => return Err(e.into()),
864 : },
865 : Ok(client) => client,
866 : };
867 :
868 : // Disable DDL forwarding because control plane already knows about these roles/databases.
869 : client
870 : .simple_query("SET neon.forward_ddl = false")
871 : .context("apply_config SET neon.forward_ddl = false")?;
872 :
873 : // Proceed with post-startup configuration. Note, that order of operations is important.
874 : let spec = &compute_state.pspec.as_ref().expect("spec must be set").spec;
875 : create_neon_superuser(spec, &mut client).context("apply_config create_neon_superuser")?;
876 : cleanup_instance(&mut client).context("apply_config cleanup_instance")?;
877 : handle_roles(spec, &mut client).context("apply_config handle_roles")?;
878 : handle_databases(spec, &mut client).context("apply_config handle_databases")?;
879 : handle_role_deletions(spec, connstr.as_str(), &mut client)
880 : .context("apply_config handle_role_deletions")?;
881 : handle_grants(
882 : spec,
883 : &mut client,
884 : connstr.as_str(),
885 : self.has_feature(ComputeFeature::AnonExtension),
886 : )
887 : .context("apply_config handle_grants")?;
888 : handle_extensions(spec, &mut client).context("apply_config handle_extensions")?;
889 : handle_extension_neon(&mut client).context("apply_config handle_extension_neon")?;
890 : create_availability_check_data(&mut client)
891 : .context("apply_config create_availability_check_data")?;
892 :
893 : // 'Close' connection
894 : drop(client);
895 :
896 : if let Some(ref local_proxy) = spec.local_proxy_config {
897 : info!("configuring local_proxy");
898 : local_proxy::configure(local_proxy).context("apply_config local_proxy")?;
899 : }
900 :
901 : // Run migrations separately to not hold up cold starts
902 0 : thread::spawn(move || {
903 0 : let mut connstr = connstr.clone();
904 0 : connstr
905 0 : .query_pairs_mut()
906 0 : .append_pair("application_name", "migrations");
907 0 :
908 0 : match Client::connect(connstr.as_str(), NoTls) {
909 0 : Ok(mut client) => {
910 0 : if let Err(e) =
911 0 : handle_migrations(&mut client).context("apply_config handle_migrations")
912 : {
913 0 : error!("Failed to run migrations: {}", e);
914 0 : }
915 : }
916 0 : Err(e) => {
917 0 : error!(
918 0 : "Failed to connect to the compute for running migrations: {}",
919 : e
920 : );
921 : }
922 : };
923 0 : });
924 : Ok(())
925 : }
926 :
927 : // Wrapped this around `pg_ctl reload`, but right now we don't use
928 : // `pg_ctl` for start / stop.
929 0 : #[instrument(skip_all)]
930 : fn pg_reload_conf(&self) -> Result<()> {
931 : let pgctl_bin = Path::new(&self.pgbin).parent().unwrap().join("pg_ctl");
932 : Command::new(pgctl_bin)
933 : .args(["reload", "-D", &self.pgdata])
934 : .output()
935 : .expect("cannot run pg_ctl process");
936 : Ok(())
937 : }
938 :
939 : /// Similar to `apply_config()`, but does a bit different sequence of operations,
940 : /// as it's used to reconfigure a previously started and configured Postgres node.
941 0 : #[instrument(skip_all)]
942 : pub fn reconfigure(&self) -> Result<()> {
943 : let spec = self.state.lock().unwrap().pspec.clone().unwrap().spec;
944 :
945 : if let Some(ref pgbouncer_settings) = spec.pgbouncer_settings {
946 : info!("tuning pgbouncer");
947 :
948 : let rt = tokio::runtime::Builder::new_current_thread()
949 : .enable_all()
950 : .build()
951 : .expect("failed to create rt");
952 :
953 : // Spawn a thread to do the tuning,
954 : // so that we don't block the main thread that starts Postgres.
955 : let pgbouncer_settings = pgbouncer_settings.clone();
956 0 : let _handle = thread::spawn(move || {
957 0 : let res = rt.block_on(tune_pgbouncer(pgbouncer_settings));
958 0 : if let Err(err) = res {
959 0 : error!("error while tuning pgbouncer: {err:?}");
960 0 : }
961 0 : });
962 : }
963 :
964 : if let Some(ref local_proxy) = spec.local_proxy_config {
965 : info!("configuring local_proxy");
966 :
967 : // Spawn a thread to do the configuration,
968 : // so that we don't block the main thread that starts Postgres.
969 : let local_proxy = local_proxy.clone();
970 0 : let _handle = Some(thread::spawn(move || {
971 0 : if let Err(err) = local_proxy::configure(&local_proxy) {
972 0 : error!("error while configuring local_proxy: {err:?}");
973 0 : }
974 0 : }));
975 : }
976 :
977 : // Write new config
978 : let pgdata_path = Path::new(&self.pgdata);
979 : let postgresql_conf_path = pgdata_path.join("postgresql.conf");
980 : config::write_postgres_conf(&postgresql_conf_path, &spec, None)?;
981 : // temporarily reset max_cluster_size in config
982 : // to avoid the possibility of hitting the limit, while we are reconfiguring:
983 : // creating new extensions, roles, etc...
984 0 : config::with_compute_ctl_tmp_override(pgdata_path, "neon.max_cluster_size=-1", || {
985 0 : self.pg_reload_conf()?;
986 :
987 0 : let mut client = Client::connect(self.connstr.as_str(), NoTls)?;
988 :
989 : // Proceed with post-startup configuration. Note, that order of operations is important.
990 : // Disable DDL forwarding because control plane already knows about these roles/databases.
991 0 : if spec.mode == ComputeMode::Primary {
992 0 : client.simple_query("SET neon.forward_ddl = false")?;
993 0 : cleanup_instance(&mut client)?;
994 0 : handle_roles(&spec, &mut client)?;
995 0 : handle_databases(&spec, &mut client)?;
996 0 : handle_role_deletions(&spec, self.connstr.as_str(), &mut client)?;
997 0 : handle_grants(
998 0 : &spec,
999 0 : &mut client,
1000 0 : self.connstr.as_str(),
1001 0 : self.has_feature(ComputeFeature::AnonExtension),
1002 0 : )?;
1003 0 : handle_extensions(&spec, &mut client)?;
1004 0 : handle_extension_neon(&mut client)?;
1005 : // We can skip handle_migrations here because a new migration can only appear
1006 : // if we have a new version of the compute_ctl binary, which can only happen
1007 : // if compute got restarted, in which case we'll end up inside of apply_config
1008 : // instead of reconfigure.
1009 0 : }
1010 :
1011 : // 'Close' connection
1012 0 : drop(client);
1013 0 :
1014 0 : Ok(())
1015 0 : })?;
1016 :
1017 : self.pg_reload_conf()?;
1018 :
1019 : let unknown_op = "unknown".to_string();
1020 : let op_id = spec.operation_uuid.as_ref().unwrap_or(&unknown_op);
1021 : info!(
1022 : "finished reconfiguration of compute node for operation {}",
1023 : op_id
1024 : );
1025 :
1026 : Ok(())
1027 : }
1028 :
1029 0 : #[instrument(skip_all)]
1030 : pub fn start_compute(
1031 : &self,
1032 : extension_server_port: u16,
1033 : ) -> Result<(std::process::Child, std::thread::JoinHandle<()>)> {
1034 : let compute_state = self.state.lock().unwrap().clone();
1035 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
1036 : info!(
1037 : "starting compute for project {}, operation {}, tenant {}, timeline {}",
1038 : pspec.spec.cluster.cluster_id.as_deref().unwrap_or("None"),
1039 : pspec.spec.operation_uuid.as_deref().unwrap_or("None"),
1040 : pspec.tenant_id,
1041 : pspec.timeline_id,
1042 : );
1043 :
1044 : // tune pgbouncer
1045 : if let Some(pgbouncer_settings) = &pspec.spec.pgbouncer_settings {
1046 : info!("tuning pgbouncer");
1047 :
1048 : let rt = tokio::runtime::Builder::new_current_thread()
1049 : .enable_all()
1050 : .build()
1051 : .expect("failed to create rt");
1052 :
1053 : // Spawn a thread to do the tuning,
1054 : // so that we don't block the main thread that starts Postgres.
1055 : let pgbouncer_settings = pgbouncer_settings.clone();
1056 0 : let _handle = thread::spawn(move || {
1057 0 : let res = rt.block_on(tune_pgbouncer(pgbouncer_settings));
1058 0 : if let Err(err) = res {
1059 0 : error!("error while tuning pgbouncer: {err:?}");
1060 0 : }
1061 0 : });
1062 : }
1063 :
1064 : if let Some(local_proxy) = &pspec.spec.local_proxy_config {
1065 : info!("configuring local_proxy");
1066 :
1067 : // Spawn a thread to do the configuration,
1068 : // so that we don't block the main thread that starts Postgres.
1069 : let local_proxy = local_proxy.clone();
1070 0 : let _handle = thread::spawn(move || {
1071 0 : if let Err(err) = local_proxy::configure(&local_proxy) {
1072 0 : error!("error while configuring local_proxy: {err:?}");
1073 0 : }
1074 0 : });
1075 : }
1076 :
1077 : info!(
1078 : "start_compute spec.remote_extensions {:?}",
1079 : pspec.spec.remote_extensions
1080 : );
1081 :
1082 : // This part is sync, because we need to download
1083 : // remote shared_preload_libraries before postgres start (if any)
1084 : if let Some(remote_extensions) = &pspec.spec.remote_extensions {
1085 : // First, create control files for all availale extensions
1086 : extension_server::create_control_files(remote_extensions, &self.pgbin);
1087 :
1088 : let library_load_start_time = Utc::now();
1089 : let remote_ext_metrics = self.prepare_preload_libraries(&pspec.spec)?;
1090 :
1091 : let library_load_time = Utc::now()
1092 : .signed_duration_since(library_load_start_time)
1093 : .to_std()
1094 : .unwrap()
1095 : .as_millis() as u64;
1096 : let mut state = self.state.lock().unwrap();
1097 : state.metrics.load_ext_ms = library_load_time;
1098 : state.metrics.num_ext_downloaded = remote_ext_metrics.num_ext_downloaded;
1099 : state.metrics.largest_ext_size = remote_ext_metrics.largest_ext_size;
1100 : state.metrics.total_ext_download_size = remote_ext_metrics.total_ext_download_size;
1101 : info!(
1102 : "Loading shared_preload_libraries took {:?}ms",
1103 : library_load_time
1104 : );
1105 : info!("{:?}", remote_ext_metrics);
1106 : }
1107 :
1108 : self.prepare_pgdata(&compute_state, extension_server_port)?;
1109 :
1110 : let start_time = Utc::now();
1111 : let pg_process = self.start_postgres(pspec.storage_auth_token.clone())?;
1112 :
1113 : let config_time = Utc::now();
1114 : if pspec.spec.mode == ComputeMode::Primary {
1115 : if !pspec.spec.skip_pg_catalog_updates {
1116 : let pgdata_path = Path::new(&self.pgdata);
1117 : // temporarily reset max_cluster_size in config
1118 : // to avoid the possibility of hitting the limit, while we are applying config:
1119 : // creating new extensions, roles, etc...
1120 : config::with_compute_ctl_tmp_override(
1121 : pgdata_path,
1122 : "neon.max_cluster_size=-1",
1123 0 : || {
1124 0 : self.pg_reload_conf()?;
1125 :
1126 0 : self.apply_config(&compute_state)?;
1127 :
1128 0 : Ok(())
1129 0 : },
1130 : )?;
1131 : self.pg_reload_conf()?;
1132 : }
1133 : self.post_apply_config()?;
1134 :
1135 : let connstr = self.connstr.clone();
1136 0 : thread::spawn(move || {
1137 0 : get_installed_extensions_sync(connstr).context("get_installed_extensions")
1138 0 : });
1139 : }
1140 :
1141 : let startup_end_time = Utc::now();
1142 : {
1143 : let mut state = self.state.lock().unwrap();
1144 : state.metrics.start_postgres_ms = config_time
1145 : .signed_duration_since(start_time)
1146 : .to_std()
1147 : .unwrap()
1148 : .as_millis() as u64;
1149 : state.metrics.config_ms = startup_end_time
1150 : .signed_duration_since(config_time)
1151 : .to_std()
1152 : .unwrap()
1153 : .as_millis() as u64;
1154 : state.metrics.total_startup_ms = startup_end_time
1155 : .signed_duration_since(compute_state.start_time)
1156 : .to_std()
1157 : .unwrap()
1158 : .as_millis() as u64;
1159 : }
1160 : self.set_status(ComputeStatus::Running);
1161 :
1162 : info!(
1163 : "finished configuration of compute for project {}",
1164 : pspec.spec.cluster.cluster_id.as_deref().unwrap_or("None")
1165 : );
1166 :
1167 : // Log metrics so that we can search for slow operations in logs
1168 : let metrics = {
1169 : let state = self.state.lock().unwrap();
1170 : state.metrics.clone()
1171 : };
1172 : info!(?metrics, "compute start finished");
1173 :
1174 : Ok(pg_process)
1175 : }
1176 :
1177 : /// Update the `last_active` in the shared state, but ensure that it's a more recent one.
1178 0 : pub fn update_last_active(&self, last_active: Option<DateTime<Utc>>) {
1179 0 : let mut state = self.state.lock().unwrap();
1180 0 : // NB: `Some(<DateTime>)` is always greater than `None`.
1181 0 : if last_active > state.last_active {
1182 0 : state.last_active = last_active;
1183 0 : debug!("set the last compute activity time to: {:?}", last_active);
1184 0 : }
1185 0 : }
1186 :
1187 : // Look for core dumps and collect backtraces.
1188 : //
1189 : // EKS worker nodes have following core dump settings:
1190 : // /proc/sys/kernel/core_pattern -> core
1191 : // /proc/sys/kernel/core_uses_pid -> 1
1192 : // ulimit -c -> unlimited
1193 : // which results in core dumps being written to postgres data directory as core.<pid>.
1194 : //
1195 : // Use that as a default location and pattern, except macos where core dumps are written
1196 : // to /cores/ directory by default.
1197 : //
1198 : // With default Linux settings, the core dump file is called just "core", so check for
1199 : // that too.
1200 0 : pub fn check_for_core_dumps(&self) -> Result<()> {
1201 0 : let core_dump_dir = match std::env::consts::OS {
1202 0 : "macos" => Path::new("/cores/"),
1203 0 : _ => Path::new(&self.pgdata),
1204 : };
1205 :
1206 : // Collect core dump paths if any
1207 0 : info!("checking for core dumps in {}", core_dump_dir.display());
1208 0 : let files = fs::read_dir(core_dump_dir)?;
1209 0 : let cores = files.filter_map(|entry| {
1210 0 : let entry = entry.ok()?;
1211 :
1212 0 : let is_core_dump = match entry.file_name().to_str()? {
1213 0 : n if n.starts_with("core.") => true,
1214 0 : "core" => true,
1215 0 : _ => false,
1216 : };
1217 0 : if is_core_dump {
1218 0 : Some(entry.path())
1219 : } else {
1220 0 : None
1221 : }
1222 0 : });
1223 :
1224 : // Print backtrace for each core dump
1225 0 : for core_path in cores {
1226 0 : warn!(
1227 0 : "core dump found: {}, collecting backtrace",
1228 0 : core_path.display()
1229 : );
1230 :
1231 : // Try first with gdb
1232 0 : let backtrace = Command::new("gdb")
1233 0 : .args(["--batch", "-q", "-ex", "bt", &self.pgbin])
1234 0 : .arg(&core_path)
1235 0 : .output();
1236 :
1237 : // Try lldb if no gdb is found -- that is handy for local testing on macOS
1238 0 : let backtrace = match backtrace {
1239 0 : Err(ref e) if e.kind() == std::io::ErrorKind::NotFound => {
1240 0 : warn!("cannot find gdb, trying lldb");
1241 0 : Command::new("lldb")
1242 0 : .arg("-c")
1243 0 : .arg(&core_path)
1244 0 : .args(["--batch", "-o", "bt all", "-o", "quit"])
1245 0 : .output()
1246 : }
1247 0 : _ => backtrace,
1248 0 : }?;
1249 :
1250 0 : warn!(
1251 0 : "core dump backtrace: {}",
1252 0 : String::from_utf8_lossy(&backtrace.stdout)
1253 : );
1254 0 : warn!(
1255 0 : "debugger stderr: {}",
1256 0 : String::from_utf8_lossy(&backtrace.stderr)
1257 : );
1258 : }
1259 :
1260 0 : Ok(())
1261 0 : }
1262 :
1263 : /// Select `pg_stat_statements` data and return it as a stringified JSON
1264 0 : pub async fn collect_insights(&self) -> String {
1265 0 : let mut result_rows: Vec<String> = Vec::new();
1266 0 : let connect_result = tokio_postgres::connect(self.connstr.as_str(), NoTls).await;
1267 0 : let (client, connection) = connect_result.unwrap();
1268 0 : tokio::spawn(async move {
1269 0 : if let Err(e) = connection.await {
1270 0 : eprintln!("connection error: {}", e);
1271 0 : }
1272 0 : });
1273 0 : let result = client
1274 0 : .simple_query(
1275 0 : "SELECT
1276 0 : row_to_json(pg_stat_statements)
1277 0 : FROM
1278 0 : pg_stat_statements
1279 0 : WHERE
1280 0 : userid != 'cloud_admin'::regrole::oid
1281 0 : ORDER BY
1282 0 : (mean_exec_time + mean_plan_time) DESC
1283 0 : LIMIT 100",
1284 0 : )
1285 0 : .await;
1286 :
1287 0 : if let Ok(raw_rows) = result {
1288 0 : for message in raw_rows.iter() {
1289 0 : if let postgres::SimpleQueryMessage::Row(row) = message {
1290 0 : if let Some(json) = row.get(0) {
1291 0 : result_rows.push(json.to_string());
1292 0 : }
1293 0 : }
1294 : }
1295 :
1296 0 : format!("{{\"pg_stat_statements\": [{}]}}", result_rows.join(","))
1297 : } else {
1298 0 : "{{\"pg_stat_statements\": []}}".to_string()
1299 : }
1300 0 : }
1301 :
1302 : // download an archive, unzip and place files in correct locations
1303 0 : pub async fn download_extension(
1304 0 : &self,
1305 0 : real_ext_name: String,
1306 0 : ext_path: RemotePath,
1307 0 : ) -> Result<u64, DownloadError> {
1308 0 : let ext_remote_storage =
1309 0 : self.ext_remote_storage
1310 0 : .as_ref()
1311 0 : .ok_or(DownloadError::BadInput(anyhow::anyhow!(
1312 0 : "Remote extensions storage is not configured",
1313 0 : )))?;
1314 :
1315 0 : let ext_archive_name = ext_path.object_name().expect("bad path");
1316 0 :
1317 0 : let mut first_try = false;
1318 0 : if !self
1319 0 : .ext_download_progress
1320 0 : .read()
1321 0 : .expect("lock err")
1322 0 : .contains_key(ext_archive_name)
1323 0 : {
1324 0 : self.ext_download_progress
1325 0 : .write()
1326 0 : .expect("lock err")
1327 0 : .insert(ext_archive_name.to_string(), (Utc::now(), false));
1328 0 : first_try = true;
1329 0 : }
1330 0 : let (download_start, download_completed) =
1331 0 : self.ext_download_progress.read().expect("lock err")[ext_archive_name];
1332 0 : let start_time_delta = Utc::now()
1333 0 : .signed_duration_since(download_start)
1334 0 : .to_std()
1335 0 : .unwrap()
1336 0 : .as_millis() as u64;
1337 :
1338 : // how long to wait for extension download if it was started by another process
1339 : const HANG_TIMEOUT: u64 = 3000; // milliseconds
1340 :
1341 0 : if download_completed {
1342 0 : info!("extension already downloaded, skipping re-download");
1343 0 : return Ok(0);
1344 0 : } else if start_time_delta < HANG_TIMEOUT && !first_try {
1345 0 : info!("download {ext_archive_name} already started by another process, hanging untill completion or timeout");
1346 0 : let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(500));
1347 : loop {
1348 0 : info!("waiting for download");
1349 0 : interval.tick().await;
1350 0 : let (_, download_completed_now) =
1351 0 : self.ext_download_progress.read().expect("lock")[ext_archive_name];
1352 0 : if download_completed_now {
1353 0 : info!("download finished by whoever else downloaded it");
1354 0 : return Ok(0);
1355 0 : }
1356 : }
1357 : // NOTE: the above loop will get terminated
1358 : // based on the timeout of the download function
1359 0 : }
1360 0 :
1361 0 : // if extension hasn't been downloaded before or the previous
1362 0 : // attempt to download was at least HANG_TIMEOUT ms ago
1363 0 : // then we try to download it here
1364 0 : info!("downloading new extension {ext_archive_name}");
1365 :
1366 0 : let download_size = extension_server::download_extension(
1367 0 : &real_ext_name,
1368 0 : &ext_path,
1369 0 : ext_remote_storage,
1370 0 : &self.pgbin,
1371 0 : )
1372 0 : .await
1373 0 : .map_err(DownloadError::Other);
1374 0 :
1375 0 : if download_size.is_ok() {
1376 0 : self.ext_download_progress
1377 0 : .write()
1378 0 : .expect("bad lock")
1379 0 : .insert(ext_archive_name.to_string(), (download_start, true));
1380 0 : }
1381 :
1382 0 : download_size
1383 0 : }
1384 :
1385 0 : pub async fn set_role_grants(
1386 0 : &self,
1387 0 : db_name: &PgIdent,
1388 0 : schema_name: &PgIdent,
1389 0 : privileges: &[Privilege],
1390 0 : role_name: &PgIdent,
1391 0 : ) -> Result<()> {
1392 : use tokio_postgres::config::Config;
1393 : use tokio_postgres::NoTls;
1394 :
1395 0 : let mut conf = Config::from_str(self.connstr.as_str()).unwrap();
1396 0 : conf.dbname(db_name);
1397 :
1398 0 : let (db_client, conn) = conf
1399 0 : .connect(NoTls)
1400 0 : .await
1401 0 : .context("Failed to connect to the database")?;
1402 0 : tokio::spawn(conn);
1403 0 :
1404 0 : // TODO: support other types of grants apart from schemas?
1405 0 : let query = format!(
1406 0 : "GRANT {} ON SCHEMA {} TO {}",
1407 0 : privileges
1408 0 : .iter()
1409 0 : // should not be quoted as it's part of the command.
1410 0 : // is already sanitized so it's ok
1411 0 : .map(|p| p.as_str())
1412 0 : .collect::<Vec<&'static str>>()
1413 0 : .join(", "),
1414 0 : // quote the schema and role name as identifiers to sanitize them.
1415 0 : schema_name.pg_quote(),
1416 0 : role_name.pg_quote(),
1417 0 : );
1418 0 : db_client
1419 0 : .simple_query(&query)
1420 0 : .await
1421 0 : .with_context(|| format!("Failed to execute query: {}", query))?;
1422 :
1423 0 : Ok(())
1424 0 : }
1425 :
1426 0 : pub async fn install_extension(
1427 0 : &self,
1428 0 : ext_name: &PgIdent,
1429 0 : db_name: &PgIdent,
1430 0 : ext_version: ExtVersion,
1431 0 : ) -> Result<ExtVersion> {
1432 : use tokio_postgres::config::Config;
1433 : use tokio_postgres::NoTls;
1434 :
1435 0 : let mut conf = Config::from_str(self.connstr.as_str()).unwrap();
1436 0 : conf.dbname(db_name);
1437 :
1438 0 : let (db_client, conn) = conf
1439 0 : .connect(NoTls)
1440 0 : .await
1441 0 : .context("Failed to connect to the database")?;
1442 0 : tokio::spawn(conn);
1443 0 :
1444 0 : let version_query = "SELECT extversion FROM pg_extension WHERE extname = $1";
1445 0 : let version: Option<ExtVersion> = db_client
1446 0 : .query_opt(version_query, &[&ext_name])
1447 0 : .await
1448 0 : .with_context(|| format!("Failed to execute query: {}", version_query))?
1449 0 : .map(|row| row.get(0));
1450 0 :
1451 0 : // sanitize the inputs as postgres idents.
1452 0 : let ext_name: String = ext_name.pg_quote();
1453 0 : let quoted_version: String = ext_version.pg_quote();
1454 :
1455 0 : if let Some(installed_version) = version {
1456 0 : if installed_version == ext_version {
1457 0 : return Ok(installed_version);
1458 0 : }
1459 0 : let query = format!("ALTER EXTENSION {ext_name} UPDATE TO {quoted_version}");
1460 0 : db_client
1461 0 : .simple_query(&query)
1462 0 : .await
1463 0 : .with_context(|| format!("Failed to execute query: {}", query))?;
1464 : } else {
1465 0 : let query =
1466 0 : format!("CREATE EXTENSION IF NOT EXISTS {ext_name} WITH VERSION {quoted_version}");
1467 0 : db_client
1468 0 : .simple_query(&query)
1469 0 : .await
1470 0 : .with_context(|| format!("Failed to execute query: {}", query))?;
1471 : }
1472 :
1473 0 : Ok(ext_version)
1474 0 : }
1475 :
1476 : #[tokio::main]
1477 0 : pub async fn prepare_preload_libraries(
1478 0 : &self,
1479 0 : spec: &ComputeSpec,
1480 0 : ) -> Result<RemoteExtensionMetrics> {
1481 0 : if self.ext_remote_storage.is_none() {
1482 0 : return Ok(RemoteExtensionMetrics {
1483 0 : num_ext_downloaded: 0,
1484 0 : largest_ext_size: 0,
1485 0 : total_ext_download_size: 0,
1486 0 : });
1487 0 : }
1488 0 : let remote_extensions = spec
1489 0 : .remote_extensions
1490 0 : .as_ref()
1491 0 : .ok_or(anyhow::anyhow!("Remote extensions are not configured"))?;
1492 0 :
1493 0 : info!("parse shared_preload_libraries from spec.cluster.settings");
1494 0 : let mut libs_vec = Vec::new();
1495 0 : if let Some(libs) = spec.cluster.settings.find("shared_preload_libraries") {
1496 0 : libs_vec = libs
1497 0 : .split(&[',', '\'', ' '])
1498 0 : .filter(|s| *s != "neon" && !s.is_empty())
1499 0 : .map(str::to_string)
1500 0 : .collect();
1501 0 : }
1502 0 : info!("parse shared_preload_libraries from provided postgresql.conf");
1503 0 :
1504 0 : // that is used in neon_local and python tests
1505 0 : if let Some(conf) = &spec.cluster.postgresql_conf {
1506 0 : let conf_lines = conf.split('\n').collect::<Vec<&str>>();
1507 0 : let mut shared_preload_libraries_line = "";
1508 0 : for line in conf_lines {
1509 0 : if line.starts_with("shared_preload_libraries") {
1510 0 : shared_preload_libraries_line = line;
1511 0 : }
1512 0 : }
1513 0 : let mut preload_libs_vec = Vec::new();
1514 0 : if let Some(libs) = shared_preload_libraries_line.split("='").nth(1) {
1515 0 : preload_libs_vec = libs
1516 0 : .split(&[',', '\'', ' '])
1517 0 : .filter(|s| *s != "neon" && !s.is_empty())
1518 0 : .map(str::to_string)
1519 0 : .collect();
1520 0 : }
1521 0 : libs_vec.extend(preload_libs_vec);
1522 0 : }
1523 0 :
1524 0 : // Don't try to download libraries that are not in the index.
1525 0 : // Assume that they are already present locally.
1526 0 : libs_vec.retain(|lib| remote_extensions.library_index.contains_key(lib));
1527 0 :
1528 0 : info!("Downloading to shared preload libraries: {:?}", &libs_vec);
1529 0 :
1530 0 : let mut download_tasks = Vec::new();
1531 0 : for library in &libs_vec {
1532 0 : let (ext_name, ext_path) =
1533 0 : remote_extensions.get_ext(library, true, &self.build_tag, &self.pgversion)?;
1534 0 : download_tasks.push(self.download_extension(ext_name, ext_path));
1535 0 : }
1536 0 : let results = join_all(download_tasks).await;
1537 0 :
1538 0 : let mut remote_ext_metrics = RemoteExtensionMetrics {
1539 0 : num_ext_downloaded: 0,
1540 0 : largest_ext_size: 0,
1541 0 : total_ext_download_size: 0,
1542 0 : };
1543 0 : for result in results {
1544 0 : let download_size = match result {
1545 0 : Ok(res) => {
1546 0 : remote_ext_metrics.num_ext_downloaded += 1;
1547 0 : res
1548 0 : }
1549 0 : Err(err) => {
1550 0 : // if we failed to download an extension, we don't want to fail the whole
1551 0 : // process, but we do want to log the error
1552 0 : error!("Failed to download extension: {}", err);
1553 0 : 0
1554 0 : }
1555 0 : };
1556 0 :
1557 0 : remote_ext_metrics.largest_ext_size =
1558 0 : std::cmp::max(remote_ext_metrics.largest_ext_size, download_size);
1559 0 : remote_ext_metrics.total_ext_download_size += download_size;
1560 0 : }
1561 0 : Ok(remote_ext_metrics)
1562 0 : }
1563 :
1564 : /// Waits until current thread receives a state changed notification and
1565 : /// the pageserver connection strings has changed.
1566 : ///
1567 : /// The operation will time out after a specified duration.
1568 0 : pub fn wait_timeout_while_pageserver_connstr_unchanged(&self, duration: Duration) {
1569 0 : let state = self.state.lock().unwrap();
1570 0 : let old_pageserver_connstr = state
1571 0 : .pspec
1572 0 : .as_ref()
1573 0 : .expect("spec must be set")
1574 0 : .pageserver_connstr
1575 0 : .clone();
1576 0 : let mut unchanged = true;
1577 0 : let _ = self
1578 0 : .state_changed
1579 0 : .wait_timeout_while(state, duration, |s| {
1580 0 : let pageserver_connstr = &s
1581 0 : .pspec
1582 0 : .as_ref()
1583 0 : .expect("spec must be set")
1584 0 : .pageserver_connstr;
1585 0 : unchanged = pageserver_connstr == &old_pageserver_connstr;
1586 0 : unchanged
1587 0 : })
1588 0 : .unwrap();
1589 0 : if !unchanged {
1590 0 : info!("Pageserver config changed");
1591 0 : }
1592 0 : }
1593 : }
1594 :
1595 0 : pub fn forward_termination_signal() {
1596 0 : let ss_pid = SYNC_SAFEKEEPERS_PID.load(Ordering::SeqCst);
1597 0 : if ss_pid != 0 {
1598 0 : let ss_pid = nix::unistd::Pid::from_raw(ss_pid as i32);
1599 0 : kill(ss_pid, Signal::SIGTERM).ok();
1600 0 : }
1601 0 : let pg_pid = PG_PID.load(Ordering::SeqCst);
1602 0 : if pg_pid != 0 {
1603 0 : let pg_pid = nix::unistd::Pid::from_raw(pg_pid as i32);
1604 0 : // Use 'fast' shutdown (SIGINT) because it also creates a shutdown checkpoint, which is important for
1605 0 : // ROs to get a list of running xacts faster instead of going through the CLOG.
1606 0 : // See https://www.postgresql.org/docs/current/server-shutdown.html for the list of modes and signals.
1607 0 : kill(pg_pid, Signal::SIGINT).ok();
1608 0 : }
1609 0 : }
|