Line data Source code
1 : use std::collections::HashMap;
2 : use std::os::unix::fs::{PermissionsExt, symlink};
3 : use std::path::Path;
4 : use std::process::{Command, Stdio};
5 : use std::str::FromStr;
6 : use std::sync::atomic::{AtomicU32, Ordering};
7 : use std::sync::{Arc, Condvar, Mutex, RwLock};
8 : use std::time::{Duration, Instant};
9 : use std::{env, fs};
10 :
11 : use anyhow::{Context, Result};
12 : use chrono::{DateTime, Utc};
13 : use compute_api::privilege::Privilege;
14 : use compute_api::responses::{ComputeMetrics, ComputeStatus};
15 : use compute_api::spec::{ComputeFeature, ComputeMode, ComputeSpec, ExtVersion, PgIdent};
16 : use futures::StreamExt;
17 : use futures::future::join_all;
18 : use futures::stream::FuturesUnordered;
19 : use nix::sys::signal::{Signal, kill};
20 : use nix::unistd::Pid;
21 : use postgres;
22 : use postgres::NoTls;
23 : use postgres::error::SqlState;
24 : use remote_storage::{DownloadError, RemotePath};
25 : use tokio::spawn;
26 : use tracing::{debug, error, info, instrument, warn};
27 : use utils::id::{TenantId, TimelineId};
28 : use utils::lsn::Lsn;
29 : use utils::measured_stream::MeasuredReader;
30 :
31 : use crate::installed_extensions::get_installed_extensions;
32 : use crate::pg_helpers::*;
33 : use crate::spec::*;
34 : use crate::sync_sk::{check_if_synced, ping_safekeeper};
35 : use crate::{config, extension_server, local_proxy};
36 :
37 : pub static SYNC_SAFEKEEPERS_PID: AtomicU32 = AtomicU32::new(0);
38 : pub static PG_PID: AtomicU32 = AtomicU32::new(0);
39 :
40 : /// Compute node info shared across several `compute_ctl` threads.
41 : pub struct ComputeNode {
42 : /// The ID of the compute
43 : pub compute_id: String,
44 : // Url type maintains proper escaping
45 : pub connstr: url::Url,
46 : // We connect to Postgres from many different places, so build configs once
47 : // and reuse them where needed.
48 : pub conn_conf: postgres::config::Config,
49 : pub tokio_conn_conf: tokio_postgres::config::Config,
50 : pub pgdata: String,
51 : pub pgbin: String,
52 : pub pgversion: String,
53 : /// We should only allow live re- / configuration of the compute node if
54 : /// it uses 'pull model', i.e. it can go to control-plane and fetch
55 : /// the latest configuration. Otherwise, there could be a case:
56 : /// - we start compute with some spec provided as argument
57 : /// - we push new spec and it does reconfiguration
58 : /// - but then something happens and compute pod / VM is destroyed,
59 : /// so k8s controller starts it again with the **old** spec
60 : ///
61 : /// and the same for empty computes:
62 : /// - we started compute without any spec
63 : /// - we push spec and it does configuration
64 : /// - but then it is restarted without any spec again
65 : pub live_config_allowed: bool,
66 : /// The port that the compute's external HTTP server listens on
67 : pub external_http_port: u16,
68 : /// The port that the compute's internal HTTP server listens on
69 : pub internal_http_port: u16,
70 : /// Volatile part of the `ComputeNode`, which should be used under `Mutex`.
71 : /// To allow HTTP API server to serving status requests, while configuration
72 : /// is in progress, lock should be held only for short periods of time to do
73 : /// read/write, not the whole configuration process.
74 : pub state: Mutex<ComputeState>,
75 : /// `Condvar` to allow notifying waiters about state changes.
76 : pub state_changed: Condvar,
77 : /// the address of extension storage proxy gateway
78 : pub ext_remote_storage: Option<String>,
79 : // key: ext_archive_name, value: started download time, download_completed?
80 : pub ext_download_progress: RwLock<HashMap<String, (DateTime<Utc>, bool)>>,
81 : pub build_tag: String,
82 : }
83 :
84 : // store some metrics about download size that might impact startup time
85 : #[derive(Clone, Debug)]
86 : pub struct RemoteExtensionMetrics {
87 : num_ext_downloaded: u64,
88 : largest_ext_size: u64,
89 : total_ext_download_size: u64,
90 : }
91 :
92 : #[derive(Clone, Debug)]
93 : pub struct ComputeState {
94 : pub start_time: DateTime<Utc>,
95 : pub status: ComputeStatus,
96 : /// Timestamp of the last Postgres activity. It could be `None` if
97 : /// compute wasn't used since start.
98 : pub last_active: Option<DateTime<Utc>>,
99 : pub error: Option<String>,
100 :
101 : /// Compute spec. This can be received from the CLI or - more likely -
102 : /// passed by the control plane with a /configure HTTP request.
103 : pub pspec: Option<ParsedSpec>,
104 :
105 : /// If the spec is passed by a /configure request, 'startup_span' is the
106 : /// /configure request's tracing span. The main thread enters it when it
107 : /// processes the compute startup, so that the compute startup is considered
108 : /// to be part of the /configure request for tracing purposes.
109 : ///
110 : /// If the request handling thread/task called startup_compute() directly,
111 : /// it would automatically be a child of the request handling span, and we
112 : /// wouldn't need this. But because we use the main thread to perform the
113 : /// startup, and the /configure task just waits for it to finish, we need to
114 : /// set up the span relationship ourselves.
115 : pub startup_span: Option<tracing::span::Span>,
116 :
117 : pub metrics: ComputeMetrics,
118 : }
119 :
120 : impl ComputeState {
121 0 : pub fn new() -> Self {
122 0 : Self {
123 0 : start_time: Utc::now(),
124 0 : status: ComputeStatus::Empty,
125 0 : last_active: None,
126 0 : error: None,
127 0 : pspec: None,
128 0 : startup_span: None,
129 0 : metrics: ComputeMetrics::default(),
130 0 : }
131 0 : }
132 :
133 0 : pub fn set_status(&mut self, status: ComputeStatus, state_changed: &Condvar) {
134 0 : let prev = self.status;
135 0 : info!("Changing compute status from {} to {}", prev, status);
136 0 : self.status = status;
137 0 : state_changed.notify_all();
138 0 : }
139 :
140 0 : pub fn set_failed_status(&mut self, err: anyhow::Error, state_changed: &Condvar) {
141 0 : self.error = Some(format!("{err:?}"));
142 0 : self.set_status(ComputeStatus::Failed, state_changed);
143 0 : }
144 : }
145 :
146 : impl Default for ComputeState {
147 0 : fn default() -> Self {
148 0 : Self::new()
149 0 : }
150 : }
151 :
152 : #[derive(Clone, Debug)]
153 : pub struct ParsedSpec {
154 : pub spec: ComputeSpec,
155 : pub tenant_id: TenantId,
156 : pub timeline_id: TimelineId,
157 : pub pageserver_connstr: String,
158 : pub safekeeper_connstrings: Vec<String>,
159 : pub storage_auth_token: Option<String>,
160 : }
161 :
162 : impl TryFrom<ComputeSpec> for ParsedSpec {
163 : type Error = String;
164 0 : fn try_from(spec: ComputeSpec) -> Result<Self, String> {
165 : // Extract the options from the spec file that are needed to connect to
166 : // the storage system.
167 : //
168 : // For backwards-compatibility, the top-level fields in the spec file
169 : // may be empty. In that case, we need to dig them from the GUCs in the
170 : // cluster.settings field.
171 0 : let pageserver_connstr = spec
172 0 : .pageserver_connstring
173 0 : .clone()
174 0 : .or_else(|| spec.cluster.settings.find("neon.pageserver_connstring"))
175 0 : .ok_or("pageserver connstr should be provided")?;
176 0 : let safekeeper_connstrings = if spec.safekeeper_connstrings.is_empty() {
177 0 : if matches!(spec.mode, ComputeMode::Primary) {
178 0 : spec.cluster
179 0 : .settings
180 0 : .find("neon.safekeepers")
181 0 : .ok_or("safekeeper connstrings should be provided")?
182 0 : .split(',')
183 0 : .map(|str| str.to_string())
184 0 : .collect()
185 : } else {
186 0 : vec![]
187 : }
188 : } else {
189 0 : spec.safekeeper_connstrings.clone()
190 : };
191 0 : let storage_auth_token = spec.storage_auth_token.clone();
192 0 : let tenant_id: TenantId = if let Some(tenant_id) = spec.tenant_id {
193 0 : tenant_id
194 : } else {
195 0 : spec.cluster
196 0 : .settings
197 0 : .find("neon.tenant_id")
198 0 : .ok_or("tenant id should be provided")
199 0 : .map(|s| TenantId::from_str(&s))?
200 0 : .or(Err("invalid tenant id"))?
201 : };
202 0 : let timeline_id: TimelineId = if let Some(timeline_id) = spec.timeline_id {
203 0 : timeline_id
204 : } else {
205 0 : spec.cluster
206 0 : .settings
207 0 : .find("neon.timeline_id")
208 0 : .ok_or("timeline id should be provided")
209 0 : .map(|s| TimelineId::from_str(&s))?
210 0 : .or(Err("invalid timeline id"))?
211 : };
212 :
213 0 : Ok(ParsedSpec {
214 0 : spec,
215 0 : pageserver_connstr,
216 0 : safekeeper_connstrings,
217 0 : storage_auth_token,
218 0 : tenant_id,
219 0 : timeline_id,
220 0 : })
221 0 : }
222 : }
223 :
224 : /// If we are a VM, returns a [`Command`] that will run in the `neon-postgres`
225 : /// cgroup. Otherwise returns the default `Command::new(cmd)`
226 : ///
227 : /// This function should be used to start postgres, as it will start it in the
228 : /// neon-postgres cgroup if we are a VM. This allows autoscaling to control
229 : /// postgres' resource usage. The cgroup will exist in VMs because vm-builder
230 : /// creates it during the sysinit phase of its inittab.
231 0 : fn maybe_cgexec(cmd: &str) -> Command {
232 0 : // The cplane sets this env var for autoscaling computes.
233 0 : // use `var_os` so we don't have to worry about the variable being valid
234 0 : // unicode. Should never be an concern . . . but just in case
235 0 : if env::var_os("AUTOSCALING").is_some() {
236 0 : let mut command = Command::new("cgexec");
237 0 : command.args(["-g", "memory:neon-postgres"]);
238 0 : command.arg(cmd);
239 0 : command
240 : } else {
241 0 : Command::new(cmd)
242 : }
243 0 : }
244 :
245 0 : pub(crate) fn construct_superuser_query(spec: &ComputeSpec) -> String {
246 0 : let roles = spec
247 0 : .cluster
248 0 : .roles
249 0 : .iter()
250 0 : .map(|r| escape_literal(&r.name))
251 0 : .collect::<Vec<_>>();
252 0 :
253 0 : let dbs = spec
254 0 : .cluster
255 0 : .databases
256 0 : .iter()
257 0 : .map(|db| escape_literal(&db.name))
258 0 : .collect::<Vec<_>>();
259 :
260 0 : let roles_decl = if roles.is_empty() {
261 0 : String::from("roles text[] := NULL;")
262 : } else {
263 0 : format!(
264 0 : r#"
265 0 : roles text[] := ARRAY(SELECT rolname
266 0 : FROM pg_catalog.pg_roles
267 0 : WHERE rolname IN ({}));"#,
268 0 : roles.join(", ")
269 0 : )
270 : };
271 :
272 0 : let database_decl = if dbs.is_empty() {
273 0 : String::from("dbs text[] := NULL;")
274 : } else {
275 0 : format!(
276 0 : r#"
277 0 : dbs text[] := ARRAY(SELECT datname
278 0 : FROM pg_catalog.pg_database
279 0 : WHERE datname IN ({}));"#,
280 0 : dbs.join(", ")
281 0 : )
282 : };
283 :
284 : // ALL PRIVILEGES grants CREATE, CONNECT, and TEMPORARY on all databases
285 : // (see https://www.postgresql.org/docs/current/ddl-priv.html)
286 0 : let query = format!(
287 0 : r#"
288 0 : DO $$
289 0 : DECLARE
290 0 : r text;
291 0 : {}
292 0 : {}
293 0 : BEGIN
294 0 : IF NOT EXISTS (
295 0 : SELECT FROM pg_catalog.pg_roles WHERE rolname = 'neon_superuser')
296 0 : THEN
297 0 : CREATE ROLE neon_superuser CREATEDB CREATEROLE NOLOGIN REPLICATION BYPASSRLS IN ROLE pg_read_all_data, pg_write_all_data;
298 0 : IF array_length(roles, 1) IS NOT NULL THEN
299 0 : EXECUTE format('GRANT neon_superuser TO %s',
300 0 : array_to_string(ARRAY(SELECT quote_ident(x) FROM unnest(roles) as x), ', '));
301 0 : FOREACH r IN ARRAY roles LOOP
302 0 : EXECUTE format('ALTER ROLE %s CREATEROLE CREATEDB', quote_ident(r));
303 0 : END LOOP;
304 0 : END IF;
305 0 : IF array_length(dbs, 1) IS NOT NULL THEN
306 0 : EXECUTE format('GRANT ALL PRIVILEGES ON DATABASE %s TO neon_superuser',
307 0 : array_to_string(ARRAY(SELECT quote_ident(x) FROM unnest(dbs) as x), ', '));
308 0 : END IF;
309 0 : END IF;
310 0 : END
311 0 : $$;"#,
312 0 : roles_decl, database_decl,
313 0 : );
314 0 :
315 0 : query
316 0 : }
317 :
318 : impl ComputeNode {
319 : /// Check that compute node has corresponding feature enabled.
320 0 : pub fn has_feature(&self, feature: ComputeFeature) -> bool {
321 0 : let state = self.state.lock().unwrap();
322 :
323 0 : if let Some(s) = state.pspec.as_ref() {
324 0 : s.spec.features.contains(&feature)
325 : } else {
326 0 : false
327 : }
328 0 : }
329 :
330 0 : pub fn set_status(&self, status: ComputeStatus) {
331 0 : let mut state = self.state.lock().unwrap();
332 0 : state.set_status(status, &self.state_changed);
333 0 : }
334 :
335 0 : pub fn set_failed_status(&self, err: anyhow::Error) {
336 0 : let mut state = self.state.lock().unwrap();
337 0 : state.set_failed_status(err, &self.state_changed);
338 0 : }
339 :
340 0 : pub fn get_status(&self) -> ComputeStatus {
341 0 : self.state.lock().unwrap().status
342 0 : }
343 :
344 0 : pub fn get_timeline_id(&self) -> Option<TimelineId> {
345 0 : self.state
346 0 : .lock()
347 0 : .unwrap()
348 0 : .pspec
349 0 : .as_ref()
350 0 : .map(|s| s.timeline_id)
351 0 : }
352 :
353 : // Remove `pgdata` directory and create it again with right permissions.
354 0 : fn create_pgdata(&self) -> Result<()> {
355 0 : // Ignore removal error, likely it is a 'No such file or directory (os error 2)'.
356 0 : // If it is something different then create_dir() will error out anyway.
357 0 : let _ok = fs::remove_dir_all(&self.pgdata);
358 0 : fs::create_dir(&self.pgdata)?;
359 0 : fs::set_permissions(&self.pgdata, fs::Permissions::from_mode(0o700))?;
360 :
361 0 : Ok(())
362 0 : }
363 :
364 : // Get basebackup from the libpq connection to pageserver using `connstr` and
365 : // unarchive it to `pgdata` directory overriding all its previous content.
366 : #[instrument(skip_all, fields(%lsn))]
367 : fn try_get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> {
368 : let spec = compute_state.pspec.as_ref().expect("spec must be set");
369 : let start_time = Instant::now();
370 :
371 : let shard0_connstr = spec.pageserver_connstr.split(',').next().unwrap();
372 : let mut config = postgres::Config::from_str(shard0_connstr)?;
373 :
374 : // Use the storage auth token from the config file, if given.
375 : // Note: this overrides any password set in the connection string.
376 : if let Some(storage_auth_token) = &spec.storage_auth_token {
377 : info!("Got storage auth token from spec file");
378 : config.password(storage_auth_token);
379 : } else {
380 : info!("Storage auth token not set");
381 : }
382 :
383 : // Connect to pageserver
384 : let mut client = config.connect(NoTls)?;
385 : let pageserver_connect_micros = start_time.elapsed().as_micros() as u64;
386 :
387 : let basebackup_cmd = match lsn {
388 : Lsn(0) => {
389 : if spec.spec.mode != ComputeMode::Primary {
390 : format!(
391 : "basebackup {} {} --gzip --replica",
392 : spec.tenant_id, spec.timeline_id
393 : )
394 : } else {
395 : format!("basebackup {} {} --gzip", spec.tenant_id, spec.timeline_id)
396 : }
397 : }
398 : _ => {
399 : if spec.spec.mode != ComputeMode::Primary {
400 : format!(
401 : "basebackup {} {} {} --gzip --replica",
402 : spec.tenant_id, spec.timeline_id, lsn
403 : )
404 : } else {
405 : format!(
406 : "basebackup {} {} {} --gzip",
407 : spec.tenant_id, spec.timeline_id, lsn
408 : )
409 : }
410 : }
411 : };
412 :
413 : let copyreader = client.copy_out(basebackup_cmd.as_str())?;
414 : let mut measured_reader = MeasuredReader::new(copyreader);
415 : let mut bufreader = std::io::BufReader::new(&mut measured_reader);
416 :
417 : // Read the archive directly from the `CopyOutReader`
418 : //
419 : // Set `ignore_zeros` so that unpack() reads all the Copy data and
420 : // doesn't stop at the end-of-archive marker. Otherwise, if the server
421 : // sends an Error after finishing the tarball, we will not notice it.
422 : let mut ar = tar::Archive::new(flate2::read::GzDecoder::new(&mut bufreader));
423 : ar.set_ignore_zeros(true);
424 : ar.unpack(&self.pgdata)?;
425 :
426 : // Report metrics
427 : let mut state = self.state.lock().unwrap();
428 : state.metrics.pageserver_connect_micros = pageserver_connect_micros;
429 : state.metrics.basebackup_bytes = measured_reader.get_byte_count() as u64;
430 : state.metrics.basebackup_ms = start_time.elapsed().as_millis() as u64;
431 : Ok(())
432 : }
433 :
434 : // Gets the basebackup in a retry loop
435 : #[instrument(skip_all, fields(%lsn))]
436 : pub fn get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> {
437 : let mut retry_period_ms = 500.0;
438 : let mut attempts = 0;
439 : const DEFAULT_ATTEMPTS: u16 = 10;
440 : #[cfg(feature = "testing")]
441 : let max_attempts = if let Ok(v) = env::var("NEON_COMPUTE_TESTING_BASEBACKUP_RETRIES") {
442 : u16::from_str(&v).unwrap()
443 : } else {
444 : DEFAULT_ATTEMPTS
445 : };
446 : #[cfg(not(feature = "testing"))]
447 : let max_attempts = DEFAULT_ATTEMPTS;
448 : loop {
449 : let result = self.try_get_basebackup(compute_state, lsn);
450 : match result {
451 : Ok(_) => {
452 : return result;
453 : }
454 : Err(ref e) if attempts < max_attempts => {
455 : warn!(
456 : "Failed to get basebackup: {} (attempt {}/{})",
457 : e, attempts, max_attempts
458 : );
459 : std::thread::sleep(std::time::Duration::from_millis(retry_period_ms as u64));
460 : retry_period_ms *= 1.5;
461 : }
462 : Err(_) => {
463 : return result;
464 : }
465 : }
466 : attempts += 1;
467 : }
468 : }
469 :
470 0 : pub async fn check_safekeepers_synced_async(
471 0 : &self,
472 0 : compute_state: &ComputeState,
473 0 : ) -> Result<Option<Lsn>> {
474 0 : // Construct a connection config for each safekeeper
475 0 : let pspec: ParsedSpec = compute_state
476 0 : .pspec
477 0 : .as_ref()
478 0 : .expect("spec must be set")
479 0 : .clone();
480 0 : let sk_connstrs: Vec<String> = pspec.safekeeper_connstrings.clone();
481 0 : let sk_configs = sk_connstrs.into_iter().map(|connstr| {
482 0 : // Format connstr
483 0 : let id = connstr.clone();
484 0 : let connstr = format!("postgresql://no_user@{}", connstr);
485 0 : let options = format!(
486 0 : "-c timeline_id={} tenant_id={}",
487 0 : pspec.timeline_id, pspec.tenant_id
488 0 : );
489 0 :
490 0 : // Construct client
491 0 : let mut config = tokio_postgres::Config::from_str(&connstr).unwrap();
492 0 : config.options(&options);
493 0 : if let Some(storage_auth_token) = pspec.storage_auth_token.clone() {
494 0 : config.password(storage_auth_token);
495 0 : }
496 :
497 0 : (id, config)
498 0 : });
499 0 :
500 0 : // Create task set to query all safekeepers
501 0 : let mut tasks = FuturesUnordered::new();
502 0 : let quorum = sk_configs.len() / 2 + 1;
503 0 : for (id, config) in sk_configs {
504 0 : let timeout = tokio::time::Duration::from_millis(100);
505 0 : let task = tokio::time::timeout(timeout, ping_safekeeper(id, config));
506 0 : tasks.push(tokio::spawn(task));
507 0 : }
508 :
509 : // Get a quorum of responses or errors
510 0 : let mut responses = Vec::new();
511 0 : let mut join_errors = Vec::new();
512 0 : let mut task_errors = Vec::new();
513 0 : let mut timeout_errors = Vec::new();
514 0 : while let Some(response) = tasks.next().await {
515 0 : match response {
516 0 : Ok(Ok(Ok(r))) => responses.push(r),
517 0 : Ok(Ok(Err(e))) => task_errors.push(e),
518 0 : Ok(Err(e)) => timeout_errors.push(e),
519 0 : Err(e) => join_errors.push(e),
520 : };
521 0 : if responses.len() >= quorum {
522 0 : break;
523 0 : }
524 0 : if join_errors.len() + task_errors.len() + timeout_errors.len() >= quorum {
525 0 : break;
526 0 : }
527 : }
528 :
529 : // In case of error, log and fail the check, but don't crash.
530 : // We're playing it safe because these errors could be transient
531 : // and we don't yet retry. Also being careful here allows us to
532 : // be backwards compatible with safekeepers that don't have the
533 : // TIMELINE_STATUS API yet.
534 0 : if responses.len() < quorum {
535 0 : error!(
536 0 : "failed sync safekeepers check {:?} {:?} {:?}",
537 : join_errors, task_errors, timeout_errors
538 : );
539 0 : return Ok(None);
540 0 : }
541 0 :
542 0 : Ok(check_if_synced(responses))
543 0 : }
544 :
545 : // Fast path for sync_safekeepers. If they're already synced we get the lsn
546 : // in one roundtrip. If not, we should do a full sync_safekeepers.
547 : #[instrument(skip_all)]
548 : pub fn check_safekeepers_synced(&self, compute_state: &ComputeState) -> Result<Option<Lsn>> {
549 : let start_time = Utc::now();
550 :
551 : let rt = tokio::runtime::Handle::current();
552 : let result = rt.block_on(self.check_safekeepers_synced_async(compute_state));
553 :
554 : // Record runtime
555 : self.state.lock().unwrap().metrics.sync_sk_check_ms = Utc::now()
556 : .signed_duration_since(start_time)
557 : .to_std()
558 : .unwrap()
559 : .as_millis() as u64;
560 : result
561 : }
562 :
563 : // Run `postgres` in a special mode with `--sync-safekeepers` argument
564 : // and return the reported LSN back to the caller.
565 : #[instrument(skip_all)]
566 : pub fn sync_safekeepers(&self, storage_auth_token: Option<String>) -> Result<Lsn> {
567 : let start_time = Utc::now();
568 :
569 : let mut sync_handle = maybe_cgexec(&self.pgbin)
570 : .args(["--sync-safekeepers"])
571 : .env("PGDATA", &self.pgdata) // we cannot use -D in this mode
572 : .envs(if let Some(storage_auth_token) = &storage_auth_token {
573 : vec![("NEON_AUTH_TOKEN", storage_auth_token)]
574 : } else {
575 : vec![]
576 : })
577 : .stdout(Stdio::piped())
578 : .stderr(Stdio::piped())
579 : .spawn()
580 : .expect("postgres --sync-safekeepers failed to start");
581 : SYNC_SAFEKEEPERS_PID.store(sync_handle.id(), Ordering::SeqCst);
582 :
583 : // `postgres --sync-safekeepers` will print all log output to stderr and
584 : // final LSN to stdout. So we leave stdout to collect LSN, while stderr logs
585 : // will be collected in a child thread.
586 : let stderr = sync_handle
587 : .stderr
588 : .take()
589 : .expect("stderr should be captured");
590 : let logs_handle = handle_postgres_logs(stderr);
591 :
592 : let sync_output = sync_handle
593 : .wait_with_output()
594 : .expect("postgres --sync-safekeepers failed");
595 : SYNC_SAFEKEEPERS_PID.store(0, Ordering::SeqCst);
596 :
597 : // Process has exited, so we can join the logs thread.
598 : let _ = tokio::runtime::Handle::current()
599 : .block_on(logs_handle)
600 0 : .map_err(|e| tracing::error!("log task panicked: {:?}", e));
601 :
602 : if !sync_output.status.success() {
603 : anyhow::bail!(
604 : "postgres --sync-safekeepers exited with non-zero status: {}. stdout: {}",
605 : sync_output.status,
606 : String::from_utf8(sync_output.stdout)
607 : .expect("postgres --sync-safekeepers exited, and stdout is not utf-8"),
608 : );
609 : }
610 :
611 : self.state.lock().unwrap().metrics.sync_safekeepers_ms = Utc::now()
612 : .signed_duration_since(start_time)
613 : .to_std()
614 : .unwrap()
615 : .as_millis() as u64;
616 :
617 : let lsn = Lsn::from_str(String::from_utf8(sync_output.stdout)?.trim())?;
618 :
619 : Ok(lsn)
620 : }
621 :
622 : /// Do all the preparations like PGDATA directory creation, configuration,
623 : /// safekeepers sync, basebackup, etc.
624 : #[instrument(skip_all)]
625 : pub fn prepare_pgdata(&self, compute_state: &ComputeState) -> Result<()> {
626 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
627 : let spec = &pspec.spec;
628 : let pgdata_path = Path::new(&self.pgdata);
629 :
630 : // Remove/create an empty pgdata directory and put configuration there.
631 : self.create_pgdata()?;
632 : config::write_postgres_conf(
633 : &pgdata_path.join("postgresql.conf"),
634 : &pspec.spec,
635 : self.internal_http_port,
636 : )?;
637 :
638 : // Syncing safekeepers is only safe with primary nodes: if a primary
639 : // is already connected it will be kicked out, so a secondary (standby)
640 : // cannot sync safekeepers.
641 : let lsn = match spec.mode {
642 : ComputeMode::Primary => {
643 : info!("checking if safekeepers are synced");
644 : let lsn = if let Ok(Some(lsn)) = self.check_safekeepers_synced(compute_state) {
645 : lsn
646 : } else {
647 : info!("starting safekeepers syncing");
648 : self.sync_safekeepers(pspec.storage_auth_token.clone())
649 0 : .with_context(|| "failed to sync safekeepers")?
650 : };
651 : info!("safekeepers synced at LSN {}", lsn);
652 : lsn
653 : }
654 : ComputeMode::Static(lsn) => {
655 : info!("Starting read-only node at static LSN {}", lsn);
656 : lsn
657 : }
658 : ComputeMode::Replica => {
659 : info!("Initializing standby from latest Pageserver LSN");
660 : Lsn(0)
661 : }
662 : };
663 :
664 : info!(
665 : "getting basebackup@{} from pageserver {}",
666 : lsn, &pspec.pageserver_connstr
667 : );
668 0 : self.get_basebackup(compute_state, lsn).with_context(|| {
669 0 : format!(
670 0 : "failed to get basebackup@{} from pageserver {}",
671 0 : lsn, &pspec.pageserver_connstr
672 0 : )
673 0 : })?;
674 :
675 : // Update pg_hba.conf received with basebackup.
676 : update_pg_hba(pgdata_path)?;
677 :
678 : // Place pg_dynshmem under /dev/shm. This allows us to use
679 : // 'dynamic_shared_memory_type = mmap' so that the files are placed in
680 : // /dev/shm, similar to how 'dynamic_shared_memory_type = posix' works.
681 : //
682 : // Why on earth don't we just stick to the 'posix' default, you might
683 : // ask. It turns out that making large allocations with 'posix' doesn't
684 : // work very well with autoscaling. The behavior we want is that:
685 : //
686 : // 1. You can make large DSM allocations, larger than the current RAM
687 : // size of the VM, without errors
688 : //
689 : // 2. If the allocated memory is really used, the VM is scaled up
690 : // automatically to accommodate that
691 : //
692 : // We try to make that possible by having swap in the VM. But with the
693 : // default 'posix' DSM implementation, we fail step 1, even when there's
694 : // plenty of swap available. PostgreSQL uses posix_fallocate() to create
695 : // the shmem segment, which is really just a file in /dev/shm in Linux,
696 : // but posix_fallocate() on tmpfs returns ENOMEM if the size is larger
697 : // than available RAM.
698 : //
699 : // Using 'dynamic_shared_memory_type = mmap' works around that, because
700 : // the Postgres 'mmap' DSM implementation doesn't use
701 : // posix_fallocate(). Instead, it uses repeated calls to write(2) to
702 : // fill the file with zeros. It's weird that that differs between
703 : // 'posix' and 'mmap', but we take advantage of it. When the file is
704 : // filled slowly with write(2), the kernel allows it to grow larger, as
705 : // long as there's swap available.
706 : //
707 : // In short, using 'dynamic_shared_memory_type = mmap' allows us one DSM
708 : // segment to be larger than currently available RAM. But because we
709 : // don't want to store it on a real file, which the kernel would try to
710 : // flush to disk, so symlink pg_dynshm to /dev/shm.
711 : //
712 : // We don't set 'dynamic_shared_memory_type = mmap' here, we let the
713 : // control plane control that option. If 'mmap' is not used, this
714 : // symlink doesn't affect anything.
715 : //
716 : // See https://github.com/neondatabase/autoscaling/issues/800
717 : std::fs::remove_dir(pgdata_path.join("pg_dynshmem"))?;
718 : symlink("/dev/shm/", pgdata_path.join("pg_dynshmem"))?;
719 :
720 : match spec.mode {
721 : ComputeMode::Primary => {}
722 : ComputeMode::Replica | ComputeMode::Static(..) => {
723 : add_standby_signal(pgdata_path)?;
724 : }
725 : }
726 :
727 : Ok(())
728 : }
729 :
730 : /// Start and stop a postgres process to warm up the VM for startup.
731 0 : pub fn prewarm_postgres(&self) -> Result<()> {
732 0 : info!("prewarming");
733 :
734 : // Create pgdata
735 0 : let pgdata = &format!("{}.warmup", self.pgdata);
736 0 : create_pgdata(pgdata)?;
737 :
738 : // Run initdb to completion
739 0 : info!("running initdb");
740 0 : let initdb_bin = Path::new(&self.pgbin).parent().unwrap().join("initdb");
741 0 : Command::new(initdb_bin)
742 0 : .args(["--pgdata", pgdata])
743 0 : .output()
744 0 : .expect("cannot start initdb process");
745 :
746 : // Write conf
747 : use std::io::Write;
748 0 : let conf_path = Path::new(pgdata).join("postgresql.conf");
749 0 : let mut file = std::fs::File::create(conf_path)?;
750 0 : writeln!(file, "shared_buffers=65536")?;
751 0 : writeln!(file, "port=51055")?; // Nobody should be connecting
752 0 : writeln!(file, "shared_preload_libraries = 'neon'")?;
753 :
754 : // Start postgres
755 0 : info!("starting postgres");
756 0 : let mut pg = maybe_cgexec(&self.pgbin)
757 0 : .args(["-D", pgdata])
758 0 : .spawn()
759 0 : .expect("cannot start postgres process");
760 0 :
761 0 : // Stop it when it's ready
762 0 : info!("waiting for postgres");
763 0 : wait_for_postgres(&mut pg, Path::new(pgdata))?;
764 : // SIGQUIT orders postgres to exit immediately. We don't want to SIGKILL
765 : // it to avoid orphaned processes prowling around while datadir is
766 : // wiped.
767 0 : let pm_pid = Pid::from_raw(pg.id() as i32);
768 0 : kill(pm_pid, Signal::SIGQUIT)?;
769 0 : info!("sent SIGQUIT signal");
770 0 : pg.wait()?;
771 0 : info!("done prewarming");
772 :
773 : // clean up
774 0 : let _ok = fs::remove_dir_all(pgdata);
775 0 : Ok(())
776 0 : }
777 :
778 : /// Start Postgres as a child process and wait for it to start accepting
779 : /// connections.
780 : ///
781 : /// Returns a handle to the child process and a handle to the logs thread.
782 : #[instrument(skip_all)]
783 : pub fn start_postgres(
784 : &self,
785 : storage_auth_token: Option<String>,
786 : ) -> Result<(std::process::Child, tokio::task::JoinHandle<Result<()>>)> {
787 : let pgdata_path = Path::new(&self.pgdata);
788 :
789 : // Run postgres as a child process.
790 : let mut pg = maybe_cgexec(&self.pgbin)
791 : .args(["-D", &self.pgdata])
792 : .envs(if let Some(storage_auth_token) = &storage_auth_token {
793 : vec![("NEON_AUTH_TOKEN", storage_auth_token)]
794 : } else {
795 : vec![]
796 : })
797 : .stderr(Stdio::piped())
798 : .spawn()
799 : .expect("cannot start postgres process");
800 : PG_PID.store(pg.id(), Ordering::SeqCst);
801 :
802 : // Start a task to collect logs from stderr.
803 : let stderr = pg.stderr.take().expect("stderr should be captured");
804 : let logs_handle = handle_postgres_logs(stderr);
805 :
806 : wait_for_postgres(&mut pg, pgdata_path)?;
807 :
808 : Ok((pg, logs_handle))
809 : }
810 :
811 : /// Do post configuration of the already started Postgres. This function spawns a background task to
812 : /// configure the database after applying the compute spec. Currently, it upgrades the neon extension
813 : /// version. In the future, it may upgrade all 3rd-party extensions.
814 : #[instrument(skip_all)]
815 : pub fn post_apply_config(&self) -> Result<()> {
816 : let conf = self.get_tokio_conn_conf(Some("compute_ctl:post_apply_config"));
817 0 : tokio::spawn(async move {
818 0 : let res = async {
819 0 : let (mut client, connection) = conf.connect(NoTls).await?;
820 0 : tokio::spawn(async move {
821 0 : if let Err(e) = connection.await {
822 0 : eprintln!("connection error: {}", e);
823 0 : }
824 0 : });
825 0 :
826 0 : handle_neon_extension_upgrade(&mut client)
827 0 : .await
828 0 : .context("handle_neon_extension_upgrade")?;
829 0 : Ok::<_, anyhow::Error>(())
830 0 : }
831 0 : .await;
832 0 : if let Err(err) = res {
833 0 : error!("error while post_apply_config: {err:#}");
834 0 : }
835 0 : });
836 : Ok(())
837 : }
838 :
839 0 : pub fn get_conn_conf(&self, application_name: Option<&str>) -> postgres::Config {
840 0 : let mut conf = self.conn_conf.clone();
841 0 : if let Some(application_name) = application_name {
842 0 : conf.application_name(application_name);
843 0 : }
844 0 : conf
845 0 : }
846 :
847 0 : pub fn get_tokio_conn_conf(&self, application_name: Option<&str>) -> tokio_postgres::Config {
848 0 : let mut conf = self.tokio_conn_conf.clone();
849 0 : if let Some(application_name) = application_name {
850 0 : conf.application_name(application_name);
851 0 : }
852 0 : conf
853 0 : }
854 :
855 0 : pub async fn get_maintenance_client(
856 0 : conf: &tokio_postgres::Config,
857 0 : ) -> Result<tokio_postgres::Client> {
858 0 : let mut conf = conf.clone();
859 0 : conf.application_name("compute_ctl:apply_config");
860 :
861 0 : let (client, conn) = match conf.connect(NoTls).await {
862 : // If connection fails, it may be the old node with `zenith_admin` superuser.
863 : //
864 : // In this case we need to connect with old `zenith_admin` name
865 : // and create new user. We cannot simply rename connected user,
866 : // but we can create a new one and grant it all privileges.
867 0 : Err(e) => match e.code() {
868 : Some(&SqlState::INVALID_PASSWORD)
869 : | Some(&SqlState::INVALID_AUTHORIZATION_SPECIFICATION) => {
870 : // Connect with zenith_admin if cloud_admin could not authenticate
871 0 : info!(
872 0 : "cannot connect to postgres: {}, retrying with `zenith_admin` username",
873 : e
874 : );
875 0 : let mut zenith_admin_conf = postgres::config::Config::from(conf.clone());
876 0 : zenith_admin_conf.application_name("compute_ctl:apply_config");
877 0 : zenith_admin_conf.user("zenith_admin");
878 :
879 0 : let mut client =
880 0 : zenith_admin_conf.connect(NoTls)
881 0 : .context("broken cloud_admin credential: tried connecting with cloud_admin but could not authenticate, and zenith_admin does not work either")?;
882 :
883 : // Disable forwarding so that users don't get a cloud_admin role
884 0 : let mut func = || {
885 0 : client.simple_query("SET neon.forward_ddl = false")?;
886 0 : client.simple_query("CREATE USER cloud_admin WITH SUPERUSER")?;
887 0 : client.simple_query("GRANT zenith_admin TO cloud_admin")?;
888 0 : Ok::<_, anyhow::Error>(())
889 0 : };
890 0 : func().context("apply_config setup cloud_admin")?;
891 :
892 0 : drop(client);
893 0 :
894 0 : // Reconnect with connstring with expected name
895 0 : conf.connect(NoTls).await?
896 : }
897 0 : _ => return Err(e.into()),
898 : },
899 0 : Ok((client, conn)) => (client, conn),
900 : };
901 :
902 0 : spawn(async move {
903 0 : if let Err(e) = conn.await {
904 0 : error!("maintenance client connection error: {}", e);
905 0 : }
906 0 : });
907 0 :
908 0 : // Disable DDL forwarding because control plane already knows about the roles/databases
909 0 : // we're about to modify.
910 0 : client
911 0 : .simple_query("SET neon.forward_ddl = false")
912 0 : .await
913 0 : .context("apply_config SET neon.forward_ddl = false")?;
914 :
915 0 : Ok(client)
916 0 : }
917 :
918 : /// Do initial configuration of the already started Postgres.
919 : #[instrument(skip_all)]
920 : pub fn apply_config(&self, compute_state: &ComputeState) -> Result<()> {
921 : let conf = self.get_tokio_conn_conf(Some("compute_ctl:apply_config"));
922 :
923 : let conf = Arc::new(conf);
924 : let spec = Arc::new(
925 : compute_state
926 : .pspec
927 : .as_ref()
928 : .expect("spec must be set")
929 : .spec
930 : .clone(),
931 : );
932 :
933 : let max_concurrent_connections = self.max_service_connections(compute_state, &spec);
934 :
935 : // Merge-apply spec & changes to PostgreSQL state.
936 : self.apply_spec_sql(spec.clone(), conf.clone(), max_concurrent_connections)?;
937 :
938 : if let Some(local_proxy) = &spec.clone().local_proxy_config {
939 : info!("configuring local_proxy");
940 : local_proxy::configure(local_proxy).context("apply_config local_proxy")?;
941 : }
942 :
943 : // Run migrations separately to not hold up cold starts
944 0 : tokio::spawn(async move {
945 0 : let mut conf = conf.as_ref().clone();
946 0 : conf.application_name("compute_ctl:migrations");
947 0 :
948 0 : match conf.connect(NoTls).await {
949 0 : Ok((mut client, connection)) => {
950 0 : tokio::spawn(async move {
951 0 : if let Err(e) = connection.await {
952 0 : eprintln!("connection error: {}", e);
953 0 : }
954 0 : });
955 0 : if let Err(e) = handle_migrations(&mut client).await {
956 0 : error!("Failed to run migrations: {}", e);
957 0 : }
958 : }
959 0 : Err(e) => {
960 0 : error!(
961 0 : "Failed to connect to the compute for running migrations: {}",
962 : e
963 : );
964 : }
965 : };
966 0 : });
967 :
968 : Ok::<(), anyhow::Error>(())
969 : }
970 :
971 : // Wrapped this around `pg_ctl reload`, but right now we don't use
972 : // `pg_ctl` for start / stop.
973 : #[instrument(skip_all)]
974 : fn pg_reload_conf(&self) -> Result<()> {
975 : let pgctl_bin = Path::new(&self.pgbin).parent().unwrap().join("pg_ctl");
976 : Command::new(pgctl_bin)
977 : .args(["reload", "-D", &self.pgdata])
978 : .output()
979 : .expect("cannot run pg_ctl process");
980 : Ok(())
981 : }
982 :
983 : /// Similar to `apply_config()`, but does a bit different sequence of operations,
984 : /// as it's used to reconfigure a previously started and configured Postgres node.
985 : #[instrument(skip_all)]
986 : pub fn reconfigure(&self) -> Result<()> {
987 : let spec = self.state.lock().unwrap().pspec.clone().unwrap().spec;
988 :
989 : if let Some(ref pgbouncer_settings) = spec.pgbouncer_settings {
990 : info!("tuning pgbouncer");
991 :
992 : // Spawn a background task to do the tuning,
993 : // so that we don't block the main thread that starts Postgres.
994 : let pgbouncer_settings = pgbouncer_settings.clone();
995 0 : tokio::spawn(async move {
996 0 : let res = tune_pgbouncer(pgbouncer_settings).await;
997 0 : if let Err(err) = res {
998 0 : error!("error while tuning pgbouncer: {err:?}");
999 0 : }
1000 0 : });
1001 : }
1002 :
1003 : if let Some(ref local_proxy) = spec.local_proxy_config {
1004 : info!("configuring local_proxy");
1005 :
1006 : // Spawn a background task to do the configuration,
1007 : // so that we don't block the main thread that starts Postgres.
1008 : let local_proxy = local_proxy.clone();
1009 0 : tokio::spawn(async move {
1010 0 : if let Err(err) = local_proxy::configure(&local_proxy) {
1011 0 : error!("error while configuring local_proxy: {err:?}");
1012 0 : }
1013 0 : });
1014 : }
1015 :
1016 : // Write new config
1017 : let pgdata_path = Path::new(&self.pgdata);
1018 : let postgresql_conf_path = pgdata_path.join("postgresql.conf");
1019 : config::write_postgres_conf(&postgresql_conf_path, &spec, self.internal_http_port)?;
1020 :
1021 : if !spec.skip_pg_catalog_updates {
1022 : let max_concurrent_connections = spec.reconfigure_concurrency;
1023 : // Temporarily reset max_cluster_size in config
1024 : // to avoid the possibility of hitting the limit, while we are reconfiguring:
1025 : // creating new extensions, roles, etc.
1026 0 : config::with_compute_ctl_tmp_override(pgdata_path, "neon.max_cluster_size=-1", || {
1027 0 : self.pg_reload_conf()?;
1028 :
1029 0 : if spec.mode == ComputeMode::Primary {
1030 0 : let mut conf = tokio_postgres::Config::from_str(self.connstr.as_str()).unwrap();
1031 0 : conf.application_name("apply_config");
1032 0 : let conf = Arc::new(conf);
1033 0 :
1034 0 : let spec = Arc::new(spec.clone());
1035 0 :
1036 0 : self.apply_spec_sql(spec, conf, max_concurrent_connections)?;
1037 0 : }
1038 :
1039 0 : Ok(())
1040 0 : })?;
1041 : }
1042 :
1043 : self.pg_reload_conf()?;
1044 :
1045 : let unknown_op = "unknown".to_string();
1046 : let op_id = spec.operation_uuid.as_ref().unwrap_or(&unknown_op);
1047 : info!(
1048 : "finished reconfiguration of compute node for operation {}",
1049 : op_id
1050 : );
1051 :
1052 : Ok(())
1053 : }
1054 :
1055 : #[instrument(skip_all)]
1056 : pub fn start_compute(
1057 : &self,
1058 : ) -> Result<(std::process::Child, tokio::task::JoinHandle<Result<()>>)> {
1059 : let compute_state = self.state.lock().unwrap().clone();
1060 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
1061 : info!(
1062 : "starting compute for project {}, operation {}, tenant {}, timeline {}",
1063 : pspec.spec.cluster.cluster_id.as_deref().unwrap_or("None"),
1064 : pspec.spec.operation_uuid.as_deref().unwrap_or("None"),
1065 : pspec.tenant_id,
1066 : pspec.timeline_id,
1067 : );
1068 :
1069 : // tune pgbouncer
1070 : if let Some(pgbouncer_settings) = &pspec.spec.pgbouncer_settings {
1071 : info!("tuning pgbouncer");
1072 :
1073 : // Spawn a background task to do the tuning,
1074 : // so that we don't block the main thread that starts Postgres.
1075 : let pgbouncer_settings = pgbouncer_settings.clone();
1076 0 : let _handle = tokio::spawn(async move {
1077 0 : let res = tune_pgbouncer(pgbouncer_settings).await;
1078 0 : if let Err(err) = res {
1079 0 : error!("error while tuning pgbouncer: {err:?}");
1080 0 : }
1081 0 : });
1082 : }
1083 :
1084 : if let Some(local_proxy) = &pspec.spec.local_proxy_config {
1085 : info!("configuring local_proxy");
1086 :
1087 : // Spawn a background task to do the configuration,
1088 : // so that we don't block the main thread that starts Postgres.
1089 : let local_proxy = local_proxy.clone();
1090 0 : let _handle = tokio::spawn(async move {
1091 0 : if let Err(err) = local_proxy::configure(&local_proxy) {
1092 0 : error!("error while configuring local_proxy: {err:?}");
1093 0 : }
1094 0 : });
1095 : }
1096 :
1097 : info!(
1098 : "start_compute spec.remote_extensions {:?}",
1099 : pspec.spec.remote_extensions
1100 : );
1101 :
1102 : // This part is sync, because we need to download
1103 : // remote shared_preload_libraries before postgres start (if any)
1104 : if let Some(remote_extensions) = &pspec.spec.remote_extensions {
1105 : // First, create control files for all availale extensions
1106 : extension_server::create_control_files(remote_extensions, &self.pgbin);
1107 :
1108 : let library_load_start_time = Utc::now();
1109 : let rt = tokio::runtime::Handle::current();
1110 : let remote_ext_metrics = rt.block_on(self.prepare_preload_libraries(&pspec.spec))?;
1111 :
1112 : let library_load_time = Utc::now()
1113 : .signed_duration_since(library_load_start_time)
1114 : .to_std()
1115 : .unwrap()
1116 : .as_millis() as u64;
1117 : let mut state = self.state.lock().unwrap();
1118 : state.metrics.load_ext_ms = library_load_time;
1119 : state.metrics.num_ext_downloaded = remote_ext_metrics.num_ext_downloaded;
1120 : state.metrics.largest_ext_size = remote_ext_metrics.largest_ext_size;
1121 : state.metrics.total_ext_download_size = remote_ext_metrics.total_ext_download_size;
1122 : info!(
1123 : "Loading shared_preload_libraries took {:?}ms",
1124 : library_load_time
1125 : );
1126 : info!("{:?}", remote_ext_metrics);
1127 : }
1128 :
1129 : self.prepare_pgdata(&compute_state)?;
1130 :
1131 : let start_time = Utc::now();
1132 : let pg_process = self.start_postgres(pspec.storage_auth_token.clone())?;
1133 :
1134 : let config_time = Utc::now();
1135 : if pspec.spec.mode == ComputeMode::Primary {
1136 : if !pspec.spec.skip_pg_catalog_updates {
1137 : let pgdata_path = Path::new(&self.pgdata);
1138 : // temporarily reset max_cluster_size in config
1139 : // to avoid the possibility of hitting the limit, while we are applying config:
1140 : // creating new extensions, roles, etc...
1141 : config::with_compute_ctl_tmp_override(
1142 : pgdata_path,
1143 : "neon.max_cluster_size=-1",
1144 0 : || {
1145 0 : self.pg_reload_conf()?;
1146 :
1147 0 : self.apply_config(&compute_state)?;
1148 :
1149 0 : Ok(())
1150 0 : },
1151 : )?;
1152 :
1153 : let postgresql_conf_path = pgdata_path.join("postgresql.conf");
1154 : if config::line_in_file(
1155 : &postgresql_conf_path,
1156 : "neon.disable_logical_replication_subscribers=false",
1157 : )? {
1158 : info!(
1159 : "updated postgresql.conf to set neon.disable_logical_replication_subscribers=false"
1160 : );
1161 : }
1162 : self.pg_reload_conf()?;
1163 : }
1164 : self.post_apply_config()?;
1165 :
1166 : let conf = self.get_conn_conf(None);
1167 0 : tokio::task::spawn_blocking(|| {
1168 0 : let res = get_installed_extensions(conf);
1169 0 : match res {
1170 0 : Ok(extensions) => {
1171 0 : info!(
1172 0 : "[NEON_EXT_STAT] {}",
1173 0 : serde_json::to_string(&extensions)
1174 0 : .expect("failed to serialize extensions list")
1175 : );
1176 : }
1177 0 : Err(err) => error!("could not get installed extensions: {err:?}"),
1178 : }
1179 0 : });
1180 : }
1181 :
1182 : let startup_end_time = Utc::now();
1183 : {
1184 : let mut state = self.state.lock().unwrap();
1185 : state.metrics.start_postgres_ms = config_time
1186 : .signed_duration_since(start_time)
1187 : .to_std()
1188 : .unwrap()
1189 : .as_millis() as u64;
1190 : state.metrics.config_ms = startup_end_time
1191 : .signed_duration_since(config_time)
1192 : .to_std()
1193 : .unwrap()
1194 : .as_millis() as u64;
1195 : state.metrics.total_startup_ms = startup_end_time
1196 : .signed_duration_since(compute_state.start_time)
1197 : .to_std()
1198 : .unwrap()
1199 : .as_millis() as u64;
1200 : }
1201 : self.set_status(ComputeStatus::Running);
1202 :
1203 : info!(
1204 : "finished configuration of compute for project {}",
1205 : pspec.spec.cluster.cluster_id.as_deref().unwrap_or("None")
1206 : );
1207 :
1208 : // Log metrics so that we can search for slow operations in logs
1209 : let metrics = {
1210 : let state = self.state.lock().unwrap();
1211 : state.metrics.clone()
1212 : };
1213 : info!(?metrics, "compute start finished");
1214 :
1215 : Ok(pg_process)
1216 : }
1217 :
1218 : /// Update the `last_active` in the shared state, but ensure that it's a more recent one.
1219 0 : pub fn update_last_active(&self, last_active: Option<DateTime<Utc>>) {
1220 0 : let mut state = self.state.lock().unwrap();
1221 0 : // NB: `Some(<DateTime>)` is always greater than `None`.
1222 0 : if last_active > state.last_active {
1223 0 : state.last_active = last_active;
1224 0 : debug!("set the last compute activity time to: {:?}", last_active);
1225 0 : }
1226 0 : }
1227 :
1228 : // Look for core dumps and collect backtraces.
1229 : //
1230 : // EKS worker nodes have following core dump settings:
1231 : // /proc/sys/kernel/core_pattern -> core
1232 : // /proc/sys/kernel/core_uses_pid -> 1
1233 : // ulimit -c -> unlimited
1234 : // which results in core dumps being written to postgres data directory as core.<pid>.
1235 : //
1236 : // Use that as a default location and pattern, except macos where core dumps are written
1237 : // to /cores/ directory by default.
1238 : //
1239 : // With default Linux settings, the core dump file is called just "core", so check for
1240 : // that too.
1241 0 : pub fn check_for_core_dumps(&self) -> Result<()> {
1242 0 : let core_dump_dir = match std::env::consts::OS {
1243 0 : "macos" => Path::new("/cores/"),
1244 0 : _ => Path::new(&self.pgdata),
1245 : };
1246 :
1247 : // Collect core dump paths if any
1248 0 : info!("checking for core dumps in {}", core_dump_dir.display());
1249 0 : let files = fs::read_dir(core_dump_dir)?;
1250 0 : let cores = files.filter_map(|entry| {
1251 0 : let entry = entry.ok()?;
1252 :
1253 0 : let is_core_dump = match entry.file_name().to_str()? {
1254 0 : n if n.starts_with("core.") => true,
1255 0 : "core" => true,
1256 0 : _ => false,
1257 : };
1258 0 : if is_core_dump {
1259 0 : Some(entry.path())
1260 : } else {
1261 0 : None
1262 : }
1263 0 : });
1264 :
1265 : // Print backtrace for each core dump
1266 0 : for core_path in cores {
1267 0 : warn!(
1268 0 : "core dump found: {}, collecting backtrace",
1269 0 : core_path.display()
1270 : );
1271 :
1272 : // Try first with gdb
1273 0 : let backtrace = Command::new("gdb")
1274 0 : .args(["--batch", "-q", "-ex", "bt", &self.pgbin])
1275 0 : .arg(&core_path)
1276 0 : .output();
1277 :
1278 : // Try lldb if no gdb is found -- that is handy for local testing on macOS
1279 0 : let backtrace = match backtrace {
1280 0 : Err(ref e) if e.kind() == std::io::ErrorKind::NotFound => {
1281 0 : warn!("cannot find gdb, trying lldb");
1282 0 : Command::new("lldb")
1283 0 : .arg("-c")
1284 0 : .arg(&core_path)
1285 0 : .args(["--batch", "-o", "bt all", "-o", "quit"])
1286 0 : .output()
1287 : }
1288 0 : _ => backtrace,
1289 0 : }?;
1290 :
1291 0 : warn!(
1292 0 : "core dump backtrace: {}",
1293 0 : String::from_utf8_lossy(&backtrace.stdout)
1294 : );
1295 0 : warn!(
1296 0 : "debugger stderr: {}",
1297 0 : String::from_utf8_lossy(&backtrace.stderr)
1298 : );
1299 : }
1300 :
1301 0 : Ok(())
1302 0 : }
1303 :
1304 : /// Select `pg_stat_statements` data and return it as a stringified JSON
1305 0 : pub async fn collect_insights(&self) -> String {
1306 0 : let mut result_rows: Vec<String> = Vec::new();
1307 0 : let conf = self.get_tokio_conn_conf(Some("compute_ctl:collect_insights"));
1308 0 : let connect_result = conf.connect(NoTls).await;
1309 0 : let (client, connection) = connect_result.unwrap();
1310 0 : tokio::spawn(async move {
1311 0 : if let Err(e) = connection.await {
1312 0 : eprintln!("connection error: {}", e);
1313 0 : }
1314 0 : });
1315 0 : let result = client
1316 0 : .simple_query(
1317 0 : "SELECT
1318 0 : row_to_json(pg_stat_statements)
1319 0 : FROM
1320 0 : pg_stat_statements
1321 0 : WHERE
1322 0 : userid != 'cloud_admin'::regrole::oid
1323 0 : ORDER BY
1324 0 : (mean_exec_time + mean_plan_time) DESC
1325 0 : LIMIT 100",
1326 0 : )
1327 0 : .await;
1328 :
1329 0 : if let Ok(raw_rows) = result {
1330 0 : for message in raw_rows.iter() {
1331 0 : if let postgres::SimpleQueryMessage::Row(row) = message {
1332 0 : if let Some(json) = row.get(0) {
1333 0 : result_rows.push(json.to_string());
1334 0 : }
1335 0 : }
1336 : }
1337 :
1338 0 : format!("{{\"pg_stat_statements\": [{}]}}", result_rows.join(","))
1339 : } else {
1340 0 : "{{\"pg_stat_statements\": []}}".to_string()
1341 : }
1342 0 : }
1343 :
1344 : // download an archive, unzip and place files in correct locations
1345 0 : pub async fn download_extension(
1346 0 : &self,
1347 0 : real_ext_name: String,
1348 0 : ext_path: RemotePath,
1349 0 : ) -> Result<u64, DownloadError> {
1350 0 : let ext_remote_storage =
1351 0 : self.ext_remote_storage
1352 0 : .as_ref()
1353 0 : .ok_or(DownloadError::BadInput(anyhow::anyhow!(
1354 0 : "Remote extensions storage is not configured",
1355 0 : )))?;
1356 :
1357 0 : let ext_archive_name = ext_path.object_name().expect("bad path");
1358 0 :
1359 0 : let mut first_try = false;
1360 0 : if !self
1361 0 : .ext_download_progress
1362 0 : .read()
1363 0 : .expect("lock err")
1364 0 : .contains_key(ext_archive_name)
1365 0 : {
1366 0 : self.ext_download_progress
1367 0 : .write()
1368 0 : .expect("lock err")
1369 0 : .insert(ext_archive_name.to_string(), (Utc::now(), false));
1370 0 : first_try = true;
1371 0 : }
1372 0 : let (download_start, download_completed) =
1373 0 : self.ext_download_progress.read().expect("lock err")[ext_archive_name];
1374 0 : let start_time_delta = Utc::now()
1375 0 : .signed_duration_since(download_start)
1376 0 : .to_std()
1377 0 : .unwrap()
1378 0 : .as_millis() as u64;
1379 :
1380 : // how long to wait for extension download if it was started by another process
1381 : const HANG_TIMEOUT: u64 = 3000; // milliseconds
1382 :
1383 0 : if download_completed {
1384 0 : info!("extension already downloaded, skipping re-download");
1385 0 : return Ok(0);
1386 0 : } else if start_time_delta < HANG_TIMEOUT && !first_try {
1387 0 : info!(
1388 0 : "download {ext_archive_name} already started by another process, hanging untill completion or timeout"
1389 : );
1390 0 : let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(500));
1391 : loop {
1392 0 : info!("waiting for download");
1393 0 : interval.tick().await;
1394 0 : let (_, download_completed_now) =
1395 0 : self.ext_download_progress.read().expect("lock")[ext_archive_name];
1396 0 : if download_completed_now {
1397 0 : info!("download finished by whoever else downloaded it");
1398 0 : return Ok(0);
1399 0 : }
1400 : }
1401 : // NOTE: the above loop will get terminated
1402 : // based on the timeout of the download function
1403 0 : }
1404 0 :
1405 0 : // if extension hasn't been downloaded before or the previous
1406 0 : // attempt to download was at least HANG_TIMEOUT ms ago
1407 0 : // then we try to download it here
1408 0 : info!("downloading new extension {ext_archive_name}");
1409 :
1410 0 : let download_size = extension_server::download_extension(
1411 0 : &real_ext_name,
1412 0 : &ext_path,
1413 0 : ext_remote_storage,
1414 0 : &self.pgbin,
1415 0 : )
1416 0 : .await
1417 0 : .map_err(DownloadError::Other);
1418 0 :
1419 0 : if download_size.is_ok() {
1420 0 : self.ext_download_progress
1421 0 : .write()
1422 0 : .expect("bad lock")
1423 0 : .insert(ext_archive_name.to_string(), (download_start, true));
1424 0 : }
1425 :
1426 0 : download_size
1427 0 : }
1428 :
1429 0 : pub async fn set_role_grants(
1430 0 : &self,
1431 0 : db_name: &PgIdent,
1432 0 : schema_name: &PgIdent,
1433 0 : privileges: &[Privilege],
1434 0 : role_name: &PgIdent,
1435 0 : ) -> Result<()> {
1436 : use tokio_postgres::NoTls;
1437 :
1438 0 : let mut conf = self.get_tokio_conn_conf(Some("compute_ctl:set_role_grants"));
1439 0 : conf.dbname(db_name);
1440 :
1441 0 : let (db_client, conn) = conf
1442 0 : .connect(NoTls)
1443 0 : .await
1444 0 : .context("Failed to connect to the database")?;
1445 0 : tokio::spawn(conn);
1446 0 :
1447 0 : // TODO: support other types of grants apart from schemas?
1448 0 : let query = format!(
1449 0 : "GRANT {} ON SCHEMA {} TO {}",
1450 0 : privileges
1451 0 : .iter()
1452 0 : // should not be quoted as it's part of the command.
1453 0 : // is already sanitized so it's ok
1454 0 : .map(|p| p.as_str())
1455 0 : .collect::<Vec<&'static str>>()
1456 0 : .join(", "),
1457 0 : // quote the schema and role name as identifiers to sanitize them.
1458 0 : schema_name.pg_quote(),
1459 0 : role_name.pg_quote(),
1460 0 : );
1461 0 : db_client
1462 0 : .simple_query(&query)
1463 0 : .await
1464 0 : .with_context(|| format!("Failed to execute query: {}", query))?;
1465 :
1466 0 : Ok(())
1467 0 : }
1468 :
1469 0 : pub async fn install_extension(
1470 0 : &self,
1471 0 : ext_name: &PgIdent,
1472 0 : db_name: &PgIdent,
1473 0 : ext_version: ExtVersion,
1474 0 : ) -> Result<ExtVersion> {
1475 : use tokio_postgres::NoTls;
1476 :
1477 0 : let mut conf = self.get_tokio_conn_conf(Some("compute_ctl:install_extension"));
1478 0 : conf.dbname(db_name);
1479 :
1480 0 : let (db_client, conn) = conf
1481 0 : .connect(NoTls)
1482 0 : .await
1483 0 : .context("Failed to connect to the database")?;
1484 0 : tokio::spawn(conn);
1485 0 :
1486 0 : let version_query = "SELECT extversion FROM pg_extension WHERE extname = $1";
1487 0 : let version: Option<ExtVersion> = db_client
1488 0 : .query_opt(version_query, &[&ext_name])
1489 0 : .await
1490 0 : .with_context(|| format!("Failed to execute query: {}", version_query))?
1491 0 : .map(|row| row.get(0));
1492 0 :
1493 0 : // sanitize the inputs as postgres idents.
1494 0 : let ext_name: String = ext_name.pg_quote();
1495 0 : let quoted_version: String = ext_version.pg_quote();
1496 :
1497 0 : if let Some(installed_version) = version {
1498 0 : if installed_version == ext_version {
1499 0 : return Ok(installed_version);
1500 0 : }
1501 0 : let query = format!("ALTER EXTENSION {ext_name} UPDATE TO {quoted_version}");
1502 0 : db_client
1503 0 : .simple_query(&query)
1504 0 : .await
1505 0 : .with_context(|| format!("Failed to execute query: {}", query))?;
1506 : } else {
1507 0 : let query =
1508 0 : format!("CREATE EXTENSION IF NOT EXISTS {ext_name} WITH VERSION {quoted_version}");
1509 0 : db_client
1510 0 : .simple_query(&query)
1511 0 : .await
1512 0 : .with_context(|| format!("Failed to execute query: {}", query))?;
1513 : }
1514 :
1515 0 : Ok(ext_version)
1516 0 : }
1517 :
1518 0 : pub async fn prepare_preload_libraries(
1519 0 : &self,
1520 0 : spec: &ComputeSpec,
1521 0 : ) -> Result<RemoteExtensionMetrics> {
1522 0 : if self.ext_remote_storage.is_none() {
1523 0 : return Ok(RemoteExtensionMetrics {
1524 0 : num_ext_downloaded: 0,
1525 0 : largest_ext_size: 0,
1526 0 : total_ext_download_size: 0,
1527 0 : });
1528 0 : }
1529 0 : let remote_extensions = spec
1530 0 : .remote_extensions
1531 0 : .as_ref()
1532 0 : .ok_or(anyhow::anyhow!("Remote extensions are not configured"))?;
1533 :
1534 0 : info!("parse shared_preload_libraries from spec.cluster.settings");
1535 0 : let mut libs_vec = Vec::new();
1536 0 : if let Some(libs) = spec.cluster.settings.find("shared_preload_libraries") {
1537 0 : libs_vec = libs
1538 0 : .split(&[',', '\'', ' '])
1539 0 : .filter(|s| *s != "neon" && !s.is_empty())
1540 0 : .map(str::to_string)
1541 0 : .collect();
1542 0 : }
1543 0 : info!("parse shared_preload_libraries from provided postgresql.conf");
1544 :
1545 : // that is used in neon_local and python tests
1546 0 : if let Some(conf) = &spec.cluster.postgresql_conf {
1547 0 : let conf_lines = conf.split('\n').collect::<Vec<&str>>();
1548 0 : let mut shared_preload_libraries_line = "";
1549 0 : for line in conf_lines {
1550 0 : if line.starts_with("shared_preload_libraries") {
1551 0 : shared_preload_libraries_line = line;
1552 0 : }
1553 : }
1554 0 : let mut preload_libs_vec = Vec::new();
1555 0 : if let Some(libs) = shared_preload_libraries_line.split("='").nth(1) {
1556 0 : preload_libs_vec = libs
1557 0 : .split(&[',', '\'', ' '])
1558 0 : .filter(|s| *s != "neon" && !s.is_empty())
1559 0 : .map(str::to_string)
1560 0 : .collect();
1561 0 : }
1562 0 : libs_vec.extend(preload_libs_vec);
1563 0 : }
1564 :
1565 : // Don't try to download libraries that are not in the index.
1566 : // Assume that they are already present locally.
1567 0 : libs_vec.retain(|lib| remote_extensions.library_index.contains_key(lib));
1568 0 :
1569 0 : info!("Downloading to shared preload libraries: {:?}", &libs_vec);
1570 :
1571 0 : let mut download_tasks = Vec::new();
1572 0 : for library in &libs_vec {
1573 0 : let (ext_name, ext_path) =
1574 0 : remote_extensions.get_ext(library, true, &self.build_tag, &self.pgversion)?;
1575 0 : download_tasks.push(self.download_extension(ext_name, ext_path));
1576 : }
1577 0 : let results = join_all(download_tasks).await;
1578 :
1579 0 : let mut remote_ext_metrics = RemoteExtensionMetrics {
1580 0 : num_ext_downloaded: 0,
1581 0 : largest_ext_size: 0,
1582 0 : total_ext_download_size: 0,
1583 0 : };
1584 0 : for result in results {
1585 0 : let download_size = match result {
1586 0 : Ok(res) => {
1587 0 : remote_ext_metrics.num_ext_downloaded += 1;
1588 0 : res
1589 : }
1590 0 : Err(err) => {
1591 0 : // if we failed to download an extension, we don't want to fail the whole
1592 0 : // process, but we do want to log the error
1593 0 : error!("Failed to download extension: {}", err);
1594 0 : 0
1595 : }
1596 : };
1597 :
1598 0 : remote_ext_metrics.largest_ext_size =
1599 0 : std::cmp::max(remote_ext_metrics.largest_ext_size, download_size);
1600 0 : remote_ext_metrics.total_ext_download_size += download_size;
1601 : }
1602 0 : Ok(remote_ext_metrics)
1603 0 : }
1604 :
1605 : /// Waits until current thread receives a state changed notification and
1606 : /// the pageserver connection strings has changed.
1607 : ///
1608 : /// The operation will time out after a specified duration.
1609 0 : pub fn wait_timeout_while_pageserver_connstr_unchanged(&self, duration: Duration) {
1610 0 : let state = self.state.lock().unwrap();
1611 0 : let old_pageserver_connstr = state
1612 0 : .pspec
1613 0 : .as_ref()
1614 0 : .expect("spec must be set")
1615 0 : .pageserver_connstr
1616 0 : .clone();
1617 0 : let mut unchanged = true;
1618 0 : let _ = self
1619 0 : .state_changed
1620 0 : .wait_timeout_while(state, duration, |s| {
1621 0 : let pageserver_connstr = &s
1622 0 : .pspec
1623 0 : .as_ref()
1624 0 : .expect("spec must be set")
1625 0 : .pageserver_connstr;
1626 0 : unchanged = pageserver_connstr == &old_pageserver_connstr;
1627 0 : unchanged
1628 0 : })
1629 0 : .unwrap();
1630 0 : if !unchanged {
1631 0 : info!("Pageserver config changed");
1632 0 : }
1633 0 : }
1634 : }
1635 :
1636 0 : pub fn forward_termination_signal() {
1637 0 : let ss_pid = SYNC_SAFEKEEPERS_PID.load(Ordering::SeqCst);
1638 0 : if ss_pid != 0 {
1639 0 : let ss_pid = nix::unistd::Pid::from_raw(ss_pid as i32);
1640 0 : kill(ss_pid, Signal::SIGTERM).ok();
1641 0 : }
1642 0 : let pg_pid = PG_PID.load(Ordering::SeqCst);
1643 0 : if pg_pid != 0 {
1644 0 : let pg_pid = nix::unistd::Pid::from_raw(pg_pid as i32);
1645 0 : // Use 'fast' shutdown (SIGINT) because it also creates a shutdown checkpoint, which is important for
1646 0 : // ROs to get a list of running xacts faster instead of going through the CLOG.
1647 0 : // See https://www.postgresql.org/docs/current/server-shutdown.html for the list of modes and signals.
1648 0 : kill(pg_pid, Signal::SIGINT).ok();
1649 0 : }
1650 0 : }
|