Line data Source code
1 : use anyhow::{Context, Result};
2 : use chrono::{DateTime, Utc};
3 : use compute_api::privilege::Privilege;
4 : use compute_api::responses::{
5 : ComputeConfig, ComputeCtlConfig, ComputeMetrics, ComputeStatus, LfcOffloadState,
6 : LfcPrewarmState, PromoteState, TlsConfig,
7 : };
8 : use compute_api::spec::{
9 : ComputeAudit, ComputeFeature, ComputeMode, ComputeSpec, ExtVersion, PageserverProtocol, PgIdent,
10 : };
11 : use futures::StreamExt;
12 : use futures::future::join_all;
13 : use futures::stream::FuturesUnordered;
14 : use itertools::Itertools;
15 : use nix::sys::signal::{Signal, kill};
16 : use nix::unistd::Pid;
17 : use once_cell::sync::Lazy;
18 : use pageserver_page_api::{self as page_api, BaseBackupCompression};
19 : use postgres;
20 : use postgres::NoTls;
21 : use postgres::error::SqlState;
22 : use remote_storage::{DownloadError, RemotePath};
23 : use std::collections::{HashMap, HashSet};
24 : use std::os::unix::fs::{PermissionsExt, symlink};
25 : use std::path::Path;
26 : use std::process::{Command, Stdio};
27 : use std::str::FromStr;
28 : use std::sync::atomic::{AtomicU32, AtomicU64, Ordering};
29 : use std::sync::{Arc, Condvar, Mutex, RwLock};
30 : use std::time::{Duration, Instant};
31 : use std::{env, fs};
32 : use tokio::{spawn, sync::watch, task::JoinHandle, time};
33 : use tracing::{Instrument, debug, error, info, instrument, warn};
34 : use url::Url;
35 : use utils::id::{TenantId, TimelineId};
36 : use utils::lsn::Lsn;
37 : use utils::measured_stream::MeasuredReader;
38 : use utils::pid_file;
39 : use utils::shard::{ShardCount, ShardIndex, ShardNumber};
40 :
41 : use crate::configurator::launch_configurator;
42 : use crate::disk_quota::set_disk_quota;
43 : use crate::installed_extensions::get_installed_extensions;
44 : use crate::logger::startup_context_from_env;
45 : use crate::lsn_lease::launch_lsn_lease_bg_task_for_static;
46 : use crate::metrics::COMPUTE_CTL_UP;
47 : use crate::monitor::launch_monitor;
48 : use crate::pg_helpers::*;
49 : use crate::pgbouncer::*;
50 : use crate::rsyslog::{
51 : PostgresLogsRsyslogConfig, configure_audit_rsyslog, configure_postgres_logs_export,
52 : launch_pgaudit_gc,
53 : };
54 : use crate::spec::*;
55 : use crate::swap::resize_swap;
56 : use crate::sync_sk::{check_if_synced, ping_safekeeper};
57 : use crate::tls::watch_cert_for_changes;
58 : use crate::{config, extension_server, local_proxy};
59 :
60 : pub static SYNC_SAFEKEEPERS_PID: AtomicU32 = AtomicU32::new(0);
61 : pub static PG_PID: AtomicU32 = AtomicU32::new(0);
62 : // This is an arbitrary build tag. Fine as a default / for testing purposes
63 : // in-case of not-set environment var
64 : const BUILD_TAG_DEFAULT: &str = "latest";
65 : /// Build tag/version of the compute node binaries/image. It's tricky and ugly
66 : /// to pass it everywhere as a part of `ComputeNodeParams`, so we use a
67 : /// global static variable.
68 0 : pub static BUILD_TAG: Lazy<String> = Lazy::new(|| {
69 0 : option_env!("BUILD_TAG")
70 0 : .unwrap_or(BUILD_TAG_DEFAULT)
71 0 : .to_string()
72 0 : });
73 : const DEFAULT_INSTALLED_EXTENSIONS_COLLECTION_INTERVAL: u64 = 3600;
74 :
75 : /// Static configuration params that don't change after startup. These mostly
76 : /// come from the CLI args, or are derived from them.
77 : #[derive(Clone, Debug)]
78 : pub struct ComputeNodeParams {
79 : /// The ID of the compute
80 : pub compute_id: String,
81 :
82 : /// Url type maintains proper escaping
83 : pub connstr: url::Url,
84 :
85 : /// The name of the 'weak' superuser role, which we give to the users.
86 : /// It follows the allow list approach, i.e., we take a standard role
87 : /// and grant it extra permissions with explicit GRANTs here and there,
88 : /// and core patches.
89 : pub privileged_role_name: String,
90 :
91 : pub resize_swap_on_bind: bool,
92 : pub set_disk_quota_for_fs: Option<String>,
93 :
94 : // VM monitor parameters
95 : #[cfg(target_os = "linux")]
96 : pub filecache_connstr: String,
97 : #[cfg(target_os = "linux")]
98 : pub cgroup: String,
99 : #[cfg(target_os = "linux")]
100 : pub vm_monitor_addr: String,
101 :
102 : pub pgdata: String,
103 : pub pgbin: String,
104 : pub pgversion: String,
105 :
106 : /// The port that the compute's external HTTP server listens on
107 : pub external_http_port: u16,
108 : /// The port that the compute's internal HTTP server listens on
109 : pub internal_http_port: u16,
110 :
111 : /// the address of extension storage proxy gateway
112 : pub remote_ext_base_url: Option<Url>,
113 :
114 : /// Interval for installed extensions collection
115 : pub installed_extensions_collection_interval: Arc<AtomicU64>,
116 : /// Hadron instance ID of the compute node.
117 : pub instance_id: Option<String>,
118 : /// Timeout of PG compute startup in the Init state.
119 : pub pg_init_timeout: Option<Duration>,
120 : // Path to the `pg_isready` binary.
121 : pub pg_isready_bin: String,
122 : pub lakebase_mode: bool,
123 : }
124 :
125 : type TaskHandle = Mutex<Option<JoinHandle<()>>>;
126 :
127 : /// Compute node info shared across several `compute_ctl` threads.
128 : pub struct ComputeNode {
129 : pub params: ComputeNodeParams,
130 :
131 : // We connect to Postgres from many different places, so build configs once
132 : // and reuse them where needed. These are derived from 'params.connstr'
133 : pub conn_conf: postgres::config::Config,
134 : pub tokio_conn_conf: tokio_postgres::config::Config,
135 :
136 : /// Volatile part of the `ComputeNode`, which should be used under `Mutex`.
137 : /// To allow HTTP API server to serving status requests, while configuration
138 : /// is in progress, lock should be held only for short periods of time to do
139 : /// read/write, not the whole configuration process.
140 : pub state: Mutex<ComputeState>,
141 : /// `Condvar` to allow notifying waiters about state changes.
142 : pub state_changed: Condvar,
143 :
144 : // key: ext_archive_name, value: started download time, download_completed?
145 : pub ext_download_progress: RwLock<HashMap<String, (DateTime<Utc>, bool)>>,
146 : pub compute_ctl_config: ComputeCtlConfig,
147 :
148 : /// Handle to the extension stats collection task
149 : extension_stats_task: TaskHandle,
150 : lfc_offload_task: TaskHandle,
151 : }
152 :
153 : // store some metrics about download size that might impact startup time
154 : #[derive(Clone, Debug)]
155 : pub struct RemoteExtensionMetrics {
156 : num_ext_downloaded: u64,
157 : largest_ext_size: u64,
158 : total_ext_download_size: u64,
159 : }
160 :
161 : #[derive(Clone, Debug)]
162 : pub struct ComputeState {
163 : pub start_time: DateTime<Utc>,
164 : pub pg_start_time: Option<DateTime<Utc>>,
165 : pub status: ComputeStatus,
166 : /// Timestamp of the last Postgres activity. It could be `None` if
167 : /// compute wasn't used since start.
168 : pub last_active: Option<DateTime<Utc>>,
169 : pub error: Option<String>,
170 :
171 : /// Compute spec. This can be received from the CLI or - more likely -
172 : /// passed by the control plane with a /configure HTTP request.
173 : pub pspec: Option<ParsedSpec>,
174 :
175 : /// If the spec is passed by a /configure request, 'startup_span' is the
176 : /// /configure request's tracing span. The main thread enters it when it
177 : /// processes the compute startup, so that the compute startup is considered
178 : /// to be part of the /configure request for tracing purposes.
179 : ///
180 : /// If the request handling thread/task called startup_compute() directly,
181 : /// it would automatically be a child of the request handling span, and we
182 : /// wouldn't need this. But because we use the main thread to perform the
183 : /// startup, and the /configure task just waits for it to finish, we need to
184 : /// set up the span relationship ourselves.
185 : pub startup_span: Option<tracing::span::Span>,
186 :
187 : pub lfc_prewarm_state: LfcPrewarmState,
188 : pub lfc_offload_state: LfcOffloadState,
189 :
190 : /// WAL flush LSN that is set after terminating Postgres and syncing safekeepers if
191 : /// mode == ComputeMode::Primary. None otherwise
192 : pub terminate_flush_lsn: Option<Lsn>,
193 : pub promote_state: Option<watch::Receiver<PromoteState>>,
194 :
195 : pub metrics: ComputeMetrics,
196 : }
197 :
198 : impl ComputeState {
199 0 : pub fn new() -> Self {
200 0 : Self {
201 0 : start_time: Utc::now(),
202 0 : pg_start_time: None,
203 0 : status: ComputeStatus::Empty,
204 0 : last_active: None,
205 0 : error: None,
206 0 : pspec: None,
207 0 : startup_span: None,
208 0 : metrics: ComputeMetrics::default(),
209 0 : lfc_prewarm_state: LfcPrewarmState::default(),
210 0 : lfc_offload_state: LfcOffloadState::default(),
211 0 : terminate_flush_lsn: None,
212 0 : promote_state: None,
213 0 : }
214 0 : }
215 :
216 0 : pub fn set_status(&mut self, status: ComputeStatus, state_changed: &Condvar) {
217 0 : let prev = self.status;
218 0 : info!("Changing compute status from {} to {}", prev, status);
219 0 : self.status = status;
220 0 : state_changed.notify_all();
221 :
222 0 : COMPUTE_CTL_UP.reset();
223 0 : COMPUTE_CTL_UP
224 0 : .with_label_values(&[&BUILD_TAG, status.to_string().as_str()])
225 0 : .set(1);
226 0 : }
227 :
228 0 : pub fn set_failed_status(&mut self, err: anyhow::Error, state_changed: &Condvar) {
229 0 : self.error = Some(format!("{err:?}"));
230 0 : self.set_status(ComputeStatus::Failed, state_changed);
231 0 : }
232 : }
233 :
234 : impl Default for ComputeState {
235 0 : fn default() -> Self {
236 0 : Self::new()
237 0 : }
238 : }
239 :
240 : #[derive(Clone, Debug)]
241 : pub struct ParsedSpec {
242 : pub spec: ComputeSpec,
243 : pub tenant_id: TenantId,
244 : pub timeline_id: TimelineId,
245 : pub pageserver_connstr: String,
246 : pub safekeeper_connstrings: Vec<String>,
247 : pub storage_auth_token: Option<String>,
248 : /// k8s dns name and port
249 : pub endpoint_storage_addr: Option<String>,
250 : pub endpoint_storage_token: Option<String>,
251 : }
252 :
253 : impl ParsedSpec {
254 1 : pub fn validate(&self) -> Result<(), String> {
255 : // Only Primary nodes are using safekeeper_connstrings, and at the moment
256 : // this method only validates that part of the specs.
257 1 : if self.spec.mode != ComputeMode::Primary {
258 0 : return Ok(());
259 1 : }
260 :
261 : // While it seems like a good idea to check for an odd number of entries in
262 : // the safekeepers connection string, changes to the list of safekeepers might
263 : // incur appending a new server to a list of 3, in which case a list of 4
264 : // entries is okay in production.
265 : //
266 : // Still we want unique entries, and at least one entry in the vector
267 1 : if self.safekeeper_connstrings.is_empty() {
268 0 : return Err(String::from("safekeeper_connstrings is empty"));
269 1 : }
270 :
271 : // check for uniqueness of the connection strings in the set
272 1 : let mut connstrings = self.safekeeper_connstrings.clone();
273 :
274 1 : connstrings.sort();
275 1 : let mut previous = &connstrings[0];
276 :
277 2 : for current in connstrings.iter().skip(1) {
278 : // duplicate entry?
279 2 : if current == previous {
280 1 : return Err(format!(
281 1 : "duplicate entry in safekeeper_connstrings: {current}!",
282 1 : ));
283 1 : }
284 :
285 1 : previous = current;
286 : }
287 :
288 0 : Ok(())
289 1 : }
290 : }
291 :
292 : impl TryFrom<ComputeSpec> for ParsedSpec {
293 : type Error = String;
294 1 : fn try_from(spec: ComputeSpec) -> Result<Self, String> {
295 : // Extract the options from the spec file that are needed to connect to
296 : // the storage system.
297 : //
298 : // For backwards-compatibility, the top-level fields in the spec file
299 : // may be empty. In that case, we need to dig them from the GUCs in the
300 : // cluster.settings field.
301 1 : let pageserver_connstr = spec
302 1 : .pageserver_connstring
303 1 : .clone()
304 1 : .or_else(|| spec.cluster.settings.find("neon.pageserver_connstring"))
305 1 : .ok_or("pageserver connstr should be provided")?;
306 1 : let safekeeper_connstrings = if spec.safekeeper_connstrings.is_empty() {
307 1 : if matches!(spec.mode, ComputeMode::Primary) {
308 1 : spec.cluster
309 1 : .settings
310 1 : .find("neon.safekeepers")
311 1 : .ok_or("safekeeper connstrings should be provided")?
312 1 : .split(',')
313 4 : .map(|str| str.to_string())
314 1 : .collect()
315 : } else {
316 0 : vec![]
317 : }
318 : } else {
319 0 : spec.safekeeper_connstrings.clone()
320 : };
321 :
322 1 : let storage_auth_token = spec.storage_auth_token.clone();
323 1 : let tenant_id: TenantId = if let Some(tenant_id) = spec.tenant_id {
324 0 : tenant_id
325 : } else {
326 1 : spec.cluster
327 1 : .settings
328 1 : .find("neon.tenant_id")
329 1 : .ok_or("tenant id should be provided")
330 1 : .map(|s| TenantId::from_str(&s))?
331 1 : .or(Err("invalid tenant id"))?
332 : };
333 1 : let timeline_id: TimelineId = if let Some(timeline_id) = spec.timeline_id {
334 0 : timeline_id
335 : } else {
336 1 : spec.cluster
337 1 : .settings
338 1 : .find("neon.timeline_id")
339 1 : .ok_or("timeline id should be provided")
340 1 : .map(|s| TimelineId::from_str(&s))?
341 1 : .or(Err("invalid timeline id"))?
342 : };
343 :
344 1 : let endpoint_storage_addr: Option<String> = spec
345 1 : .endpoint_storage_addr
346 1 : .clone()
347 1 : .or_else(|| spec.cluster.settings.find("neon.endpoint_storage_addr"));
348 1 : let endpoint_storage_token = spec
349 1 : .endpoint_storage_token
350 1 : .clone()
351 1 : .or_else(|| spec.cluster.settings.find("neon.endpoint_storage_token"));
352 :
353 1 : let res = ParsedSpec {
354 1 : spec,
355 1 : pageserver_connstr,
356 1 : safekeeper_connstrings,
357 1 : storage_auth_token,
358 1 : tenant_id,
359 1 : timeline_id,
360 1 : endpoint_storage_addr,
361 1 : endpoint_storage_token,
362 1 : };
363 :
364 : // Now check validity of the parsed specification
365 1 : res.validate()?;
366 0 : Ok(res)
367 1 : }
368 : }
369 :
370 : /// If we are a VM, returns a [`Command`] that will run in the `neon-postgres`
371 : /// cgroup. Otherwise returns the default `Command::new(cmd)`
372 : ///
373 : /// This function should be used to start postgres, as it will start it in the
374 : /// neon-postgres cgroup if we are a VM. This allows autoscaling to control
375 : /// postgres' resource usage. The cgroup will exist in VMs because vm-builder
376 : /// creates it during the sysinit phase of its inittab.
377 0 : fn maybe_cgexec(cmd: &str) -> Command {
378 : // The cplane sets this env var for autoscaling computes.
379 : // use `var_os` so we don't have to worry about the variable being valid
380 : // unicode. Should never be an concern . . . but just in case
381 0 : if env::var_os("AUTOSCALING").is_some() {
382 0 : let mut command = Command::new("cgexec");
383 0 : command.args(["-g", "memory:neon-postgres"]);
384 0 : command.arg(cmd);
385 0 : command
386 : } else {
387 0 : Command::new(cmd)
388 : }
389 0 : }
390 :
391 : struct PostgresHandle {
392 : postgres: std::process::Child,
393 : log_collector: JoinHandle<Result<()>>,
394 : }
395 :
396 : impl PostgresHandle {
397 : /// Return PID of the postgres (postmaster) process
398 0 : fn pid(&self) -> Pid {
399 0 : Pid::from_raw(self.postgres.id() as i32)
400 0 : }
401 : }
402 :
403 : struct StartVmMonitorResult {
404 : #[cfg(target_os = "linux")]
405 : token: tokio_util::sync::CancellationToken,
406 : #[cfg(target_os = "linux")]
407 : vm_monitor: Option<JoinHandle<Result<()>>>,
408 : }
409 :
410 : impl ComputeNode {
411 0 : pub fn new(params: ComputeNodeParams, config: ComputeConfig) -> Result<Self> {
412 0 : let connstr = params.connstr.as_str();
413 0 : let mut conn_conf = postgres::config::Config::from_str(connstr)
414 0 : .context("cannot build postgres config from connstr")?;
415 0 : let mut tokio_conn_conf = tokio_postgres::config::Config::from_str(connstr)
416 0 : .context("cannot build tokio postgres config from connstr")?;
417 :
418 : // Users can set some configuration parameters per database with
419 : // ALTER DATABASE ... SET ...
420 : //
421 : // There are at least these parameters:
422 : //
423 : // - role=some_other_role
424 : // - default_transaction_read_only=on
425 : // - statement_timeout=1, i.e., 1ms, which will cause most of the queries to fail
426 : // - search_path=non_public_schema, this should be actually safe because
427 : // we don't call any functions in user databases, but better to always reset
428 : // it to public.
429 : //
430 : // that can affect `compute_ctl` and prevent it from properly configuring the database schema.
431 : // Unset them via connection string options before connecting to the database.
432 : // N.B. keep it in sync with `ZENITH_OPTIONS` in `get_maintenance_client()`.
433 : const EXTRA_OPTIONS: &str = "-c role=cloud_admin -c default_transaction_read_only=off -c search_path=public -c statement_timeout=0 -c pgaudit.log=none";
434 0 : let options = match conn_conf.get_options() {
435 : // Allow the control plane to override any options set by the
436 : // compute
437 0 : Some(options) => format!("{EXTRA_OPTIONS} {options}"),
438 0 : None => EXTRA_OPTIONS.to_string(),
439 : };
440 0 : conn_conf.options(&options);
441 0 : tokio_conn_conf.options(&options);
442 :
443 0 : let mut new_state = ComputeState::new();
444 0 : if let Some(spec) = config.spec {
445 0 : let pspec = ParsedSpec::try_from(spec).map_err(|msg| anyhow::anyhow!(msg))?;
446 0 : new_state.pspec = Some(pspec);
447 0 : }
448 :
449 0 : Ok(ComputeNode {
450 0 : params,
451 0 : conn_conf,
452 0 : tokio_conn_conf,
453 0 : state: Mutex::new(new_state),
454 0 : state_changed: Condvar::new(),
455 0 : ext_download_progress: RwLock::new(HashMap::new()),
456 0 : compute_ctl_config: config.compute_ctl_config,
457 0 : extension_stats_task: Mutex::new(None),
458 0 : lfc_offload_task: Mutex::new(None),
459 0 : })
460 0 : }
461 :
462 : /// Top-level control flow of compute_ctl. Returns a process exit code we should
463 : /// exit with.
464 0 : pub fn run(self) -> Result<Option<i32>> {
465 0 : let this = Arc::new(self);
466 :
467 0 : let cli_spec = this.state.lock().unwrap().pspec.clone();
468 :
469 : // If this is a pooled VM, prewarm before starting HTTP server and becoming
470 : // available for binding. Prewarming helps Postgres start quicker later,
471 : // because QEMU will already have its memory allocated from the host, and
472 : // the necessary binaries will already be cached.
473 0 : if cli_spec.is_none() {
474 0 : this.prewarm_postgres_vm_memory()?;
475 0 : }
476 :
477 : // Set the up metric with Empty status before starting the HTTP server.
478 : // That way on the first metric scrape, an external observer will see us
479 : // as 'up' and 'empty' (unless the compute was started with a spec or
480 : // already configured by control plane).
481 0 : COMPUTE_CTL_UP
482 0 : .with_label_values(&[&BUILD_TAG, ComputeStatus::Empty.to_string().as_str()])
483 0 : .set(1);
484 :
485 : // Launch the external HTTP server first, so that we can serve control plane
486 : // requests while configuration is still in progress.
487 0 : crate::http::server::Server::External {
488 0 : port: this.params.external_http_port,
489 0 : config: this.compute_ctl_config.clone(),
490 0 : compute_id: this.params.compute_id.clone(),
491 0 : instance_id: this.params.instance_id.clone(),
492 0 : }
493 0 : .launch(&this);
494 :
495 : // The internal HTTP server could be launched later, but there isn't much
496 : // sense in waiting.
497 0 : crate::http::server::Server::Internal {
498 0 : port: this.params.internal_http_port,
499 0 : }
500 0 : .launch(&this);
501 :
502 : // If we got a spec from the CLI already, use that. Otherwise wait for the
503 : // control plane to pass it to us with a /configure HTTP request
504 0 : let pspec = if let Some(cli_spec) = cli_spec {
505 0 : cli_spec
506 : } else {
507 0 : this.wait_spec()?
508 : };
509 :
510 0 : launch_lsn_lease_bg_task_for_static(&this);
511 :
512 : // We have a spec, start the compute
513 0 : let mut delay_exit = false;
514 0 : let mut vm_monitor = None;
515 0 : let mut pg_process: Option<PostgresHandle> = None;
516 :
517 0 : match this.start_compute(&mut pg_process) {
518 0 : Ok(()) => {
519 0 : // Success! Launch remaining services (just vm-monitor currently)
520 0 : vm_monitor =
521 0 : Some(this.start_vm_monitor(pspec.spec.disable_lfc_resizing.unwrap_or(false)));
522 0 : }
523 0 : Err(err) => {
524 : // Something went wrong with the startup. Log it and expose the error to
525 : // HTTP status requests.
526 0 : error!("could not start the compute node: {:#}", err);
527 0 : this.set_failed_status(err);
528 0 : delay_exit = true;
529 :
530 : // If the error happened after starting PostgreSQL, kill it
531 0 : if let Some(ref pg_process) = pg_process {
532 0 : kill(pg_process.pid(), Signal::SIGQUIT).ok();
533 0 : }
534 : }
535 : }
536 :
537 : // If startup was successful, or it failed in the late stages,
538 : // PostgreSQL is now running. Wait until it exits.
539 0 : let exit_code = if let Some(pg_handle) = pg_process {
540 0 : let exit_status = this.wait_postgres(pg_handle);
541 0 : info!("Postgres exited with code {}, shutting down", exit_status);
542 0 : exit_status.code()
543 : } else {
544 0 : None
545 : };
546 :
547 0 : this.terminate_extension_stats_task();
548 0 : this.terminate_lfc_offload_task();
549 :
550 : // Terminate the vm_monitor so it releases the file watcher on
551 : // /sys/fs/cgroup/neon-postgres.
552 : // Note: the vm-monitor only runs on linux because it requires cgroups.
553 0 : if let Some(vm_monitor) = vm_monitor {
554 : cfg_if::cfg_if! {
555 : if #[cfg(target_os = "linux")] {
556 : // Kills all threads spawned by the monitor
557 0 : vm_monitor.token.cancel();
558 0 : if let Some(handle) = vm_monitor.vm_monitor {
559 0 : // Kills the actual task running the monitor
560 0 : handle.abort();
561 0 : }
562 : } else {
563 : _ = vm_monitor; // appease unused lint on macOS
564 : }
565 : }
566 0 : }
567 :
568 : // Reap the postgres process
569 0 : delay_exit |= this.cleanup_after_postgres_exit()?;
570 :
571 : // /terminate returns LSN. If we don't sleep at all, connection will break and we
572 : // won't get result. If we sleep too much, tests will take significantly longer
573 : // and Github Action run will error out
574 0 : let sleep_duration = if delay_exit {
575 0 : Duration::from_secs(30)
576 : } else {
577 0 : Duration::from_millis(300)
578 : };
579 :
580 : // If launch failed, keep serving HTTP requests for a while, so the cloud
581 : // control plane can get the actual error.
582 0 : if delay_exit {
583 0 : info!("giving control plane 30s to collect the error before shutdown");
584 0 : }
585 0 : std::thread::sleep(sleep_duration);
586 0 : Ok(exit_code)
587 0 : }
588 :
589 0 : pub fn wait_spec(&self) -> Result<ParsedSpec> {
590 0 : info!("no compute spec provided, waiting");
591 0 : let mut state = self.state.lock().unwrap();
592 0 : while state.status != ComputeStatus::ConfigurationPending {
593 0 : state = self.state_changed.wait(state).unwrap();
594 0 : }
595 :
596 0 : info!("got spec, continue configuration");
597 0 : let spec = state.pspec.as_ref().unwrap().clone();
598 :
599 : // Record for how long we slept waiting for the spec.
600 0 : let now = Utc::now();
601 0 : state.metrics.wait_for_spec_ms = now
602 0 : .signed_duration_since(state.start_time)
603 0 : .to_std()
604 0 : .unwrap()
605 0 : .as_millis() as u64;
606 :
607 : // Reset start time, so that the total startup time that is calculated later will
608 : // not include the time that we waited for the spec.
609 0 : state.start_time = now;
610 :
611 0 : Ok(spec)
612 0 : }
613 :
614 : /// Start compute.
615 : ///
616 : /// Prerequisites:
617 : /// - the compute spec has been placed in self.state.pspec
618 : ///
619 : /// On success:
620 : /// - status is set to ComputeStatus::Running
621 : /// - self.running_postgres is set
622 : ///
623 : /// On error:
624 : /// - status is left in ComputeStatus::Init. The caller is responsible for setting it to Failed
625 : /// - if Postgres was started before the fatal error happened, self.running_postgres is
626 : /// set. The caller is responsible for killing it.
627 : ///
628 : /// Note that this is in the critical path of a compute cold start. Keep this fast.
629 : /// Try to do things concurrently, to hide the latencies.
630 0 : fn start_compute(self: &Arc<Self>, pg_handle: &mut Option<PostgresHandle>) -> Result<()> {
631 : let compute_state: ComputeState;
632 :
633 : let start_compute_span;
634 : let _this_entered;
635 : {
636 0 : let mut state_guard = self.state.lock().unwrap();
637 :
638 : // Create a tracing span for the startup operation.
639 : //
640 : // We could otherwise just annotate the function with #[instrument], but if
641 : // we're being configured from a /configure HTTP request, we want the
642 : // startup to be considered part of the /configure request.
643 : //
644 : // Similarly, if a trace ID was passed in env variables, attach it to the span.
645 0 : start_compute_span = {
646 : // Temporarily enter the parent span, so that the new span becomes its child.
647 0 : if let Some(p) = state_guard.startup_span.take() {
648 0 : let _parent_entered = p.entered();
649 0 : tracing::info_span!("start_compute")
650 0 : } else if let Some(otel_context) = startup_context_from_env() {
651 : use tracing_opentelemetry::OpenTelemetrySpanExt;
652 0 : let span = tracing::info_span!("start_compute");
653 0 : span.set_parent(otel_context);
654 0 : span
655 : } else {
656 0 : tracing::info_span!("start_compute")
657 : }
658 : };
659 0 : _this_entered = start_compute_span.enter();
660 :
661 : // Hadron: Record postgres start time (used to enforce pg_init_timeout).
662 0 : state_guard.pg_start_time.replace(Utc::now());
663 :
664 0 : state_guard.set_status(ComputeStatus::Init, &self.state_changed);
665 0 : compute_state = state_guard.clone()
666 : }
667 :
668 0 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
669 0 : info!(
670 0 : "starting compute for project {}, operation {}, tenant {}, timeline {}, project {}, branch {}, endpoint {}, features {:?}, spec.remote_extensions {:?}",
671 0 : pspec.spec.cluster.cluster_id.as_deref().unwrap_or("None"),
672 0 : pspec.spec.operation_uuid.as_deref().unwrap_or("None"),
673 : pspec.tenant_id,
674 : pspec.timeline_id,
675 0 : pspec.spec.project_id.as_deref().unwrap_or("None"),
676 0 : pspec.spec.branch_id.as_deref().unwrap_or("None"),
677 0 : pspec.spec.endpoint_id.as_deref().unwrap_or("None"),
678 : pspec.spec.features,
679 : pspec.spec.remote_extensions,
680 : );
681 :
682 : ////// PRE-STARTUP PHASE: things that need to be finished before we start the Postgres process
683 :
684 : // Collect all the tasks that must finish here
685 0 : let mut pre_tasks = tokio::task::JoinSet::new();
686 :
687 : // Make sure TLS certificates are properly loaded and in the right place.
688 0 : if self.compute_ctl_config.tls.is_some() {
689 0 : let this = self.clone();
690 0 : pre_tasks.spawn(async move {
691 0 : this.watch_cert_for_changes().await;
692 :
693 0 : Ok::<(), anyhow::Error>(())
694 0 : });
695 0 : }
696 :
697 0 : let tls_config = self.tls_config(&pspec.spec);
698 :
699 : // If there are any remote extensions in shared_preload_libraries, start downloading them
700 0 : if pspec.spec.remote_extensions.is_some() {
701 0 : let (this, spec) = (self.clone(), pspec.spec.clone());
702 0 : pre_tasks.spawn(async move {
703 0 : this.download_preload_extensions(&spec)
704 0 : .in_current_span()
705 0 : .await
706 0 : });
707 0 : }
708 :
709 : // Prepare pgdata directory. This downloads the basebackup, among other things.
710 : {
711 0 : let (this, cs) = (self.clone(), compute_state.clone());
712 0 : pre_tasks.spawn_blocking_child(move || this.prepare_pgdata(&cs));
713 : }
714 :
715 : // Resize swap to the desired size if the compute spec says so
716 0 : if let (Some(size_bytes), true) =
717 0 : (pspec.spec.swap_size_bytes, self.params.resize_swap_on_bind)
718 : {
719 0 : pre_tasks.spawn_blocking_child(move || {
720 : // To avoid 'swapoff' hitting postgres startup, we need to run resize-swap to completion
721 : // *before* starting postgres.
722 : //
723 : // In theory, we could do this asynchronously if SkipSwapon was enabled for VMs, but this
724 : // carries a risk of introducing hard-to-debug issues - e.g. if postgres sometimes gets
725 : // OOM-killed during startup because swap wasn't available yet.
726 0 : resize_swap(size_bytes).context("failed to resize swap")?;
727 0 : let size_mib = size_bytes as f32 / (1 << 20) as f32; // just for more coherent display.
728 0 : info!(%size_bytes, %size_mib, "resized swap");
729 :
730 0 : Ok::<(), anyhow::Error>(())
731 0 : });
732 0 : }
733 :
734 : // Set disk quota if the compute spec says so
735 0 : if let (Some(disk_quota_bytes), Some(disk_quota_fs_mountpoint)) = (
736 0 : pspec.spec.disk_quota_bytes,
737 0 : self.params.set_disk_quota_for_fs.as_ref(),
738 : ) {
739 0 : let disk_quota_fs_mountpoint = disk_quota_fs_mountpoint.clone();
740 0 : pre_tasks.spawn_blocking_child(move || {
741 0 : set_disk_quota(disk_quota_bytes, &disk_quota_fs_mountpoint)
742 0 : .context("failed to set disk quota")?;
743 0 : let size_mib = disk_quota_bytes as f32 / (1 << 20) as f32; // just for more coherent display.
744 0 : info!(%disk_quota_bytes, %size_mib, "set disk quota");
745 :
746 0 : Ok::<(), anyhow::Error>(())
747 0 : });
748 0 : }
749 :
750 : // tune pgbouncer
751 0 : if let Some(pgbouncer_settings) = &pspec.spec.pgbouncer_settings {
752 0 : info!("tuning pgbouncer");
753 :
754 0 : let pgbouncer_settings = pgbouncer_settings.clone();
755 0 : let tls_config = tls_config.clone();
756 :
757 : // Spawn a background task to do the tuning,
758 : // so that we don't block the main thread that starts Postgres.
759 0 : let _handle = tokio::spawn(async move {
760 0 : let res = tune_pgbouncer(pgbouncer_settings, tls_config).await;
761 0 : if let Err(err) = res {
762 0 : error!("error while tuning pgbouncer: {err:?}");
763 : // Continue with the startup anyway
764 0 : }
765 0 : });
766 0 : }
767 :
768 : // configure local_proxy
769 0 : if let Some(local_proxy) = &pspec.spec.local_proxy_config {
770 0 : info!("configuring local_proxy");
771 :
772 : // Spawn a background task to do the configuration,
773 : // so that we don't block the main thread that starts Postgres.
774 :
775 0 : let mut local_proxy = local_proxy.clone();
776 0 : local_proxy.tls = tls_config.clone();
777 :
778 0 : let _handle = tokio::spawn(async move {
779 0 : if let Err(err) = local_proxy::configure(&local_proxy) {
780 0 : error!("error while configuring local_proxy: {err:?}");
781 : // Continue with the startup anyway
782 0 : }
783 0 : });
784 0 : }
785 :
786 : // Configure and start rsyslog for compliance audit logging
787 0 : match pspec.spec.audit_log_level {
788 : ComputeAudit::Hipaa | ComputeAudit::Extended | ComputeAudit::Full => {
789 0 : let remote_tls_endpoint =
790 0 : std::env::var("AUDIT_LOGGING_TLS_ENDPOINT").unwrap_or("".to_string());
791 0 : let remote_plain_endpoint =
792 0 : std::env::var("AUDIT_LOGGING_ENDPOINT").unwrap_or("".to_string());
793 :
794 0 : if remote_plain_endpoint.is_empty() && remote_tls_endpoint.is_empty() {
795 0 : anyhow::bail!(
796 0 : "AUDIT_LOGGING_ENDPOINT and AUDIT_LOGGING_TLS_ENDPOINT are both empty"
797 : );
798 0 : }
799 :
800 0 : let log_directory_path = Path::new(&self.params.pgdata).join("log");
801 0 : let log_directory_path = log_directory_path.to_string_lossy().to_string();
802 :
803 : // Add project_id,endpoint_id to identify the logs.
804 : //
805 : // These ids are passed from cplane,
806 0 : let endpoint_id = pspec.spec.endpoint_id.as_deref().unwrap_or("");
807 0 : let project_id = pspec.spec.project_id.as_deref().unwrap_or("");
808 :
809 0 : configure_audit_rsyslog(
810 0 : log_directory_path.clone(),
811 0 : endpoint_id,
812 0 : project_id,
813 0 : &remote_plain_endpoint,
814 0 : &remote_tls_endpoint,
815 0 : )?;
816 :
817 : // Launch a background task to clean up the audit logs
818 0 : launch_pgaudit_gc(log_directory_path);
819 : }
820 0 : _ => {}
821 : }
822 :
823 : // Configure and start rsyslog for Postgres logs export
824 0 : let conf = PostgresLogsRsyslogConfig::new(pspec.spec.logs_export_host.as_deref());
825 0 : configure_postgres_logs_export(conf)?;
826 :
827 : // Launch remaining service threads
828 0 : let _monitor_handle = launch_monitor(self);
829 0 : let _configurator_handle = launch_configurator(self);
830 :
831 : // Wait for all the pre-tasks to finish before starting postgres
832 0 : let rt = tokio::runtime::Handle::current();
833 0 : while let Some(res) = rt.block_on(pre_tasks.join_next()) {
834 0 : res??;
835 : }
836 :
837 : ////// START POSTGRES
838 0 : let start_time = Utc::now();
839 0 : let pg_process = self.start_postgres(pspec.storage_auth_token.clone())?;
840 0 : let postmaster_pid = pg_process.pid();
841 0 : *pg_handle = Some(pg_process);
842 :
843 : // If this is a primary endpoint, perform some post-startup configuration before
844 : // opening it up for the world.
845 0 : let config_time = Utc::now();
846 0 : if pspec.spec.mode == ComputeMode::Primary {
847 0 : self.configure_as_primary(&compute_state)?;
848 :
849 0 : let conf = self.get_tokio_conn_conf(None);
850 0 : tokio::task::spawn(async {
851 0 : let _ = installed_extensions(conf).await;
852 0 : });
853 0 : }
854 :
855 : // All done!
856 0 : let startup_end_time = Utc::now();
857 0 : let metrics = {
858 0 : let mut state = self.state.lock().unwrap();
859 0 : state.metrics.start_postgres_ms = config_time
860 0 : .signed_duration_since(start_time)
861 0 : .to_std()
862 0 : .unwrap()
863 0 : .as_millis() as u64;
864 0 : state.metrics.config_ms = startup_end_time
865 0 : .signed_duration_since(config_time)
866 0 : .to_std()
867 0 : .unwrap()
868 0 : .as_millis() as u64;
869 0 : state.metrics.total_startup_ms = startup_end_time
870 0 : .signed_duration_since(compute_state.start_time)
871 0 : .to_std()
872 0 : .unwrap()
873 0 : .as_millis() as u64;
874 0 : state.metrics.clone()
875 : };
876 0 : self.set_status(ComputeStatus::Running);
877 :
878 : // Log metrics so that we can search for slow operations in logs
879 0 : info!(?metrics, postmaster_pid = %postmaster_pid, "compute start finished");
880 :
881 0 : self.spawn_extension_stats_task();
882 :
883 0 : if pspec.spec.autoprewarm {
884 0 : info!("autoprewarming on startup as requested");
885 0 : self.prewarm_lfc(None);
886 0 : }
887 0 : if let Some(seconds) = pspec.spec.offload_lfc_interval_seconds {
888 0 : self.spawn_lfc_offload_task(Duration::from_secs(seconds.into()));
889 0 : };
890 0 : Ok(())
891 0 : }
892 :
893 : #[instrument(skip_all)]
894 : async fn download_preload_extensions(&self, spec: &ComputeSpec) -> Result<()> {
895 : let remote_extensions = if let Some(remote_extensions) = &spec.remote_extensions {
896 : remote_extensions
897 : } else {
898 : return Ok(());
899 : };
900 :
901 : // First, create control files for all available extensions
902 : extension_server::create_control_files(remote_extensions, &self.params.pgbin);
903 :
904 : let library_load_start_time = Utc::now();
905 : let remote_ext_metrics = self.prepare_preload_libraries(spec).await?;
906 :
907 : let library_load_time = Utc::now()
908 : .signed_duration_since(library_load_start_time)
909 : .to_std()
910 : .unwrap()
911 : .as_millis() as u64;
912 : let mut state = self.state.lock().unwrap();
913 : state.metrics.load_ext_ms = library_load_time;
914 : state.metrics.num_ext_downloaded = remote_ext_metrics.num_ext_downloaded;
915 : state.metrics.largest_ext_size = remote_ext_metrics.largest_ext_size;
916 : state.metrics.total_ext_download_size = remote_ext_metrics.total_ext_download_size;
917 : info!(
918 : "Loading shared_preload_libraries took {:?}ms",
919 : library_load_time
920 : );
921 : info!("{:?}", remote_ext_metrics);
922 :
923 : Ok(())
924 : }
925 :
926 : /// Start the vm-monitor if directed to. The vm-monitor only runs on linux
927 : /// because it requires cgroups.
928 0 : fn start_vm_monitor(&self, disable_lfc_resizing: bool) -> StartVmMonitorResult {
929 : cfg_if::cfg_if! {
930 : if #[cfg(target_os = "linux")] {
931 : use std::env;
932 : use tokio_util::sync::CancellationToken;
933 :
934 : // This token is used internally by the monitor to clean up all threads
935 0 : let token = CancellationToken::new();
936 :
937 : // don't pass postgres connection string to vm-monitor if we don't want it to resize LFC
938 0 : let pgconnstr = if disable_lfc_resizing {
939 0 : None
940 : } else {
941 0 : Some(self.params.filecache_connstr.clone())
942 : };
943 :
944 0 : let vm_monitor = if env::var_os("AUTOSCALING").is_some() {
945 0 : let vm_monitor = tokio::spawn(vm_monitor::start(
946 0 : Box::leak(Box::new(vm_monitor::Args {
947 0 : cgroup: Some(self.params.cgroup.clone()),
948 0 : pgconnstr,
949 0 : addr: self.params.vm_monitor_addr.clone(),
950 0 : })),
951 0 : token.clone(),
952 : ));
953 0 : Some(vm_monitor)
954 : } else {
955 0 : None
956 : };
957 0 : StartVmMonitorResult { token, vm_monitor }
958 : } else {
959 : _ = disable_lfc_resizing; // appease unused lint on macOS
960 : StartVmMonitorResult { }
961 : }
962 : }
963 0 : }
964 :
965 0 : fn cleanup_after_postgres_exit(&self) -> Result<bool> {
966 : // Maybe sync safekeepers again, to speed up next startup
967 0 : let compute_state = self.state.lock().unwrap().clone();
968 0 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
969 0 : let lsn = if matches!(pspec.spec.mode, compute_api::spec::ComputeMode::Primary) {
970 0 : info!("syncing safekeepers on shutdown");
971 0 : let storage_auth_token = pspec.storage_auth_token.clone();
972 0 : let lsn = self.sync_safekeepers(storage_auth_token)?;
973 0 : info!(%lsn, "synced safekeepers");
974 0 : Some(lsn)
975 : } else {
976 0 : info!("not primary, not syncing safekeepers");
977 0 : None
978 : };
979 :
980 0 : let mut state = self.state.lock().unwrap();
981 0 : state.terminate_flush_lsn = lsn;
982 :
983 0 : let delay_exit = state.status == ComputeStatus::TerminationPendingFast;
984 0 : if state.status == ComputeStatus::TerminationPendingFast
985 0 : || state.status == ComputeStatus::TerminationPendingImmediate
986 : {
987 0 : info!(
988 0 : "Changing compute status from {} to {}",
989 0 : state.status,
990 : ComputeStatus::Terminated
991 : );
992 0 : state.status = ComputeStatus::Terminated;
993 0 : self.state_changed.notify_all();
994 0 : }
995 0 : drop(state);
996 :
997 0 : if let Err(err) = self.check_for_core_dumps() {
998 0 : error!("error while checking for core dumps: {err:?}");
999 0 : }
1000 :
1001 0 : Ok(delay_exit)
1002 0 : }
1003 :
1004 : /// Check that compute node has corresponding feature enabled.
1005 0 : pub fn has_feature(&self, feature: ComputeFeature) -> bool {
1006 0 : let state = self.state.lock().unwrap();
1007 :
1008 0 : if let Some(s) = state.pspec.as_ref() {
1009 0 : s.spec.features.contains(&feature)
1010 : } else {
1011 0 : false
1012 : }
1013 0 : }
1014 :
1015 0 : pub fn set_status(&self, status: ComputeStatus) {
1016 0 : let mut state = self.state.lock().unwrap();
1017 0 : state.set_status(status, &self.state_changed);
1018 0 : }
1019 :
1020 0 : pub fn set_failed_status(&self, err: anyhow::Error) {
1021 0 : let mut state = self.state.lock().unwrap();
1022 0 : state.set_failed_status(err, &self.state_changed);
1023 0 : }
1024 :
1025 0 : pub fn get_status(&self) -> ComputeStatus {
1026 0 : self.state.lock().unwrap().status
1027 0 : }
1028 :
1029 0 : pub fn get_timeline_id(&self) -> Option<TimelineId> {
1030 0 : self.state
1031 0 : .lock()
1032 0 : .unwrap()
1033 0 : .pspec
1034 0 : .as_ref()
1035 0 : .map(|s| s.timeline_id)
1036 0 : }
1037 :
1038 : // Remove `pgdata` directory and create it again with right permissions.
1039 0 : fn create_pgdata(&self) -> Result<()> {
1040 : // Ignore removal error, likely it is a 'No such file or directory (os error 2)'.
1041 : // If it is something different then create_dir() will error out anyway.
1042 0 : let pgdata = &self.params.pgdata;
1043 0 : let _ok = fs::remove_dir_all(pgdata);
1044 0 : fs::create_dir(pgdata)?;
1045 0 : fs::set_permissions(pgdata, fs::Permissions::from_mode(0o700))?;
1046 :
1047 0 : Ok(())
1048 0 : }
1049 :
1050 : /// Fetches a basebackup from the Pageserver using the compute state's Pageserver connstring and
1051 : /// unarchives it to `pgdata` directory, replacing any existing contents.
1052 : #[instrument(skip_all, fields(%lsn))]
1053 : fn try_get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> {
1054 : let spec = compute_state.pspec.as_ref().expect("spec must be set");
1055 :
1056 : let shard0_connstr = spec.pageserver_connstr.split(',').next().unwrap();
1057 : let started = Instant::now();
1058 :
1059 : let (connected, size) = match PageserverProtocol::from_connstring(shard0_connstr)? {
1060 : PageserverProtocol::Libpq => self.try_get_basebackup_libpq(spec, lsn)?,
1061 : PageserverProtocol::Grpc => self.try_get_basebackup_grpc(spec, lsn)?,
1062 : };
1063 :
1064 : self.fix_zenith_signal_neon_signal()?;
1065 :
1066 : let mut state = self.state.lock().unwrap();
1067 : state.metrics.pageserver_connect_micros =
1068 : connected.duration_since(started).as_micros() as u64;
1069 : state.metrics.basebackup_bytes = size as u64;
1070 : state.metrics.basebackup_ms = started.elapsed().as_millis() as u64;
1071 :
1072 : Ok(())
1073 : }
1074 :
1075 : /// Move the Zenith signal file to Neon signal file location.
1076 : /// This makes Compute compatible with older PageServers that don't yet
1077 : /// know about the Zenith->Neon rename.
1078 0 : fn fix_zenith_signal_neon_signal(&self) -> Result<()> {
1079 0 : let datadir = Path::new(&self.params.pgdata);
1080 :
1081 0 : let neonsig = datadir.join("neon.signal");
1082 :
1083 0 : if neonsig.is_file() {
1084 0 : return Ok(());
1085 0 : }
1086 :
1087 0 : let zenithsig = datadir.join("zenith.signal");
1088 :
1089 0 : if zenithsig.is_file() {
1090 0 : fs::copy(zenithsig, neonsig)?;
1091 0 : }
1092 :
1093 0 : Ok(())
1094 0 : }
1095 :
1096 : /// Fetches a basebackup via gRPC. The connstring must use grpc://. Returns the timestamp when
1097 : /// the connection was established, and the (compressed) size of the basebackup.
1098 0 : fn try_get_basebackup_grpc(&self, spec: &ParsedSpec, lsn: Lsn) -> Result<(Instant, usize)> {
1099 0 : let shard0_connstr = spec
1100 0 : .pageserver_connstr
1101 0 : .split(',')
1102 0 : .next()
1103 0 : .unwrap()
1104 0 : .to_string();
1105 0 : let shard_index = match spec.pageserver_connstr.split(',').count() as u8 {
1106 0 : 0 | 1 => ShardIndex::unsharded(),
1107 0 : count => ShardIndex::new(ShardNumber(0), ShardCount(count)),
1108 : };
1109 :
1110 0 : let (reader, connected) = tokio::runtime::Handle::current().block_on(async move {
1111 0 : let mut client = page_api::Client::connect(
1112 0 : shard0_connstr,
1113 0 : spec.tenant_id,
1114 0 : spec.timeline_id,
1115 0 : shard_index,
1116 0 : spec.storage_auth_token.clone(),
1117 0 : None, // NB: base backups use payload compression
1118 0 : )
1119 0 : .await?;
1120 0 : let connected = Instant::now();
1121 0 : let reader = client
1122 0 : .get_base_backup(page_api::GetBaseBackupRequest {
1123 0 : lsn: (lsn != Lsn(0)).then_some(lsn),
1124 0 : compression: BaseBackupCompression::Gzip,
1125 0 : replica: spec.spec.mode != ComputeMode::Primary,
1126 0 : full: false,
1127 0 : })
1128 0 : .await?;
1129 0 : anyhow::Ok((reader, connected))
1130 0 : })?;
1131 :
1132 0 : let mut reader = MeasuredReader::new(tokio_util::io::SyncIoBridge::new(reader));
1133 :
1134 : // Set `ignore_zeros` so that unpack() reads the entire stream and doesn't just stop at the
1135 : // end-of-archive marker. If the server errors, the tar::Builder drop handler will write an
1136 : // end-of-archive marker before the error is emitted, and we would not see the error.
1137 0 : let mut ar = tar::Archive::new(flate2::read::GzDecoder::new(&mut reader));
1138 0 : ar.set_ignore_zeros(true);
1139 0 : ar.unpack(&self.params.pgdata)?;
1140 :
1141 0 : Ok((connected, reader.get_byte_count()))
1142 0 : }
1143 :
1144 : /// Fetches a basebackup via libpq. The connstring must use postgresql://. Returns the timestamp
1145 : /// when the connection was established, and the (compressed) size of the basebackup.
1146 0 : fn try_get_basebackup_libpq(&self, spec: &ParsedSpec, lsn: Lsn) -> Result<(Instant, usize)> {
1147 0 : let shard0_connstr = spec.pageserver_connstr.split(',').next().unwrap();
1148 0 : let mut config = postgres::Config::from_str(shard0_connstr)?;
1149 :
1150 : // Use the storage auth token from the config file, if given.
1151 : // Note: this overrides any password set in the connection string.
1152 0 : if let Some(storage_auth_token) = &spec.storage_auth_token {
1153 0 : info!("Got storage auth token from spec file");
1154 0 : config.password(storage_auth_token);
1155 : } else {
1156 0 : info!("Storage auth token not set");
1157 : }
1158 :
1159 0 : config.application_name("compute_ctl");
1160 0 : config.options(&format!(
1161 0 : "-c neon.compute_mode={}",
1162 0 : spec.spec.mode.to_type_str()
1163 0 : ));
1164 :
1165 : // Connect to pageserver
1166 0 : let mut client = config.connect(NoTls)?;
1167 0 : let connected = Instant::now();
1168 :
1169 0 : let basebackup_cmd = match lsn {
1170 : Lsn(0) => {
1171 0 : if spec.spec.mode != ComputeMode::Primary {
1172 0 : format!(
1173 0 : "basebackup {} {} --gzip --replica",
1174 : spec.tenant_id, spec.timeline_id
1175 : )
1176 : } else {
1177 0 : format!("basebackup {} {} --gzip", spec.tenant_id, spec.timeline_id)
1178 : }
1179 : }
1180 : _ => {
1181 0 : if spec.spec.mode != ComputeMode::Primary {
1182 0 : format!(
1183 0 : "basebackup {} {} {} --gzip --replica",
1184 : spec.tenant_id, spec.timeline_id, lsn
1185 : )
1186 : } else {
1187 0 : format!(
1188 0 : "basebackup {} {} {} --gzip",
1189 : spec.tenant_id, spec.timeline_id, lsn
1190 : )
1191 : }
1192 : }
1193 : };
1194 :
1195 0 : let copyreader = client.copy_out(basebackup_cmd.as_str())?;
1196 0 : let mut measured_reader = MeasuredReader::new(copyreader);
1197 0 : let mut bufreader = std::io::BufReader::new(&mut measured_reader);
1198 :
1199 : // Read the archive directly from the `CopyOutReader`
1200 : //
1201 : // Set `ignore_zeros` so that unpack() reads all the Copy data and
1202 : // doesn't stop at the end-of-archive marker. Otherwise, if the server
1203 : // sends an Error after finishing the tarball, we will not notice it.
1204 : // The tar::Builder drop handler will write an end-of-archive marker
1205 : // before emitting the error, and we would not see it otherwise.
1206 0 : let mut ar = tar::Archive::new(flate2::read::GzDecoder::new(&mut bufreader));
1207 0 : ar.set_ignore_zeros(true);
1208 0 : ar.unpack(&self.params.pgdata)?;
1209 :
1210 0 : Ok((connected, measured_reader.get_byte_count()))
1211 0 : }
1212 :
1213 : // Gets the basebackup in a retry loop
1214 : #[instrument(skip_all, fields(%lsn))]
1215 : pub fn get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> {
1216 : let mut retry_period_ms = 500.0;
1217 : let mut attempts = 0;
1218 : const DEFAULT_ATTEMPTS: u16 = 10;
1219 : #[cfg(feature = "testing")]
1220 : let max_attempts = if let Ok(v) = env::var("NEON_COMPUTE_TESTING_BASEBACKUP_RETRIES") {
1221 : u16::from_str(&v).unwrap()
1222 : } else {
1223 : DEFAULT_ATTEMPTS
1224 : };
1225 : #[cfg(not(feature = "testing"))]
1226 : let max_attempts = DEFAULT_ATTEMPTS;
1227 : loop {
1228 : let result = self.try_get_basebackup(compute_state, lsn);
1229 : match result {
1230 : Ok(_) => {
1231 : return result;
1232 : }
1233 : Err(ref e) if attempts < max_attempts => {
1234 : warn!(
1235 : "Failed to get basebackup: {} (attempt {}/{})",
1236 : e, attempts, max_attempts
1237 : );
1238 : std::thread::sleep(std::time::Duration::from_millis(retry_period_ms as u64));
1239 : retry_period_ms *= 1.5;
1240 : }
1241 : Err(_) => {
1242 : return result;
1243 : }
1244 : }
1245 : attempts += 1;
1246 : }
1247 : }
1248 :
1249 0 : pub async fn check_safekeepers_synced_async(
1250 0 : &self,
1251 0 : compute_state: &ComputeState,
1252 0 : ) -> Result<Option<Lsn>> {
1253 : // Construct a connection config for each safekeeper
1254 0 : let pspec: ParsedSpec = compute_state
1255 0 : .pspec
1256 0 : .as_ref()
1257 0 : .expect("spec must be set")
1258 0 : .clone();
1259 0 : let sk_connstrs: Vec<String> = pspec.safekeeper_connstrings.clone();
1260 0 : let sk_configs = sk_connstrs.into_iter().map(|connstr| {
1261 : // Format connstr
1262 0 : let id = connstr.clone();
1263 0 : let connstr = format!("postgresql://no_user@{connstr}");
1264 0 : let options = format!(
1265 0 : "-c timeline_id={} tenant_id={}",
1266 : pspec.timeline_id, pspec.tenant_id
1267 : );
1268 :
1269 : // Construct client
1270 0 : let mut config = tokio_postgres::Config::from_str(&connstr).unwrap();
1271 0 : config.options(&options);
1272 0 : if let Some(storage_auth_token) = pspec.storage_auth_token.clone() {
1273 0 : config.password(storage_auth_token);
1274 0 : }
1275 :
1276 0 : (id, config)
1277 0 : });
1278 :
1279 : // Create task set to query all safekeepers
1280 0 : let mut tasks = FuturesUnordered::new();
1281 0 : let quorum = sk_configs.len() / 2 + 1;
1282 0 : for (id, config) in sk_configs {
1283 0 : let timeout = tokio::time::Duration::from_millis(100);
1284 0 : let task = tokio::time::timeout(timeout, ping_safekeeper(id, config));
1285 0 : tasks.push(tokio::spawn(task));
1286 0 : }
1287 :
1288 : // Get a quorum of responses or errors
1289 0 : let mut responses = Vec::new();
1290 0 : let mut join_errors = Vec::new();
1291 0 : let mut task_errors = Vec::new();
1292 0 : let mut timeout_errors = Vec::new();
1293 0 : while let Some(response) = tasks.next().await {
1294 0 : match response {
1295 0 : Ok(Ok(Ok(r))) => responses.push(r),
1296 0 : Ok(Ok(Err(e))) => task_errors.push(e),
1297 0 : Ok(Err(e)) => timeout_errors.push(e),
1298 0 : Err(e) => join_errors.push(e),
1299 : };
1300 0 : if responses.len() >= quorum {
1301 0 : break;
1302 0 : }
1303 0 : if join_errors.len() + task_errors.len() + timeout_errors.len() >= quorum {
1304 0 : break;
1305 0 : }
1306 : }
1307 :
1308 : // In case of error, log and fail the check, but don't crash.
1309 : // We're playing it safe because these errors could be transient
1310 : // and we don't yet retry.
1311 0 : if responses.len() < quorum {
1312 0 : error!(
1313 0 : "failed sync safekeepers check {:?} {:?} {:?}",
1314 : join_errors, task_errors, timeout_errors
1315 : );
1316 0 : return Ok(None);
1317 0 : }
1318 :
1319 0 : Ok(check_if_synced(responses))
1320 0 : }
1321 :
1322 : // Fast path for sync_safekeepers. If they're already synced we get the lsn
1323 : // in one roundtrip. If not, we should do a full sync_safekeepers.
1324 : #[instrument(skip_all)]
1325 : pub fn check_safekeepers_synced(&self, compute_state: &ComputeState) -> Result<Option<Lsn>> {
1326 : let start_time = Utc::now();
1327 :
1328 : let rt = tokio::runtime::Handle::current();
1329 : let result = rt.block_on(self.check_safekeepers_synced_async(compute_state));
1330 :
1331 : // Record runtime
1332 : self.state.lock().unwrap().metrics.sync_sk_check_ms = Utc::now()
1333 : .signed_duration_since(start_time)
1334 : .to_std()
1335 : .unwrap()
1336 : .as_millis() as u64;
1337 : result
1338 : }
1339 :
1340 : // Run `postgres` in a special mode with `--sync-safekeepers` argument
1341 : // and return the reported LSN back to the caller.
1342 : #[instrument(skip_all)]
1343 : pub fn sync_safekeepers(&self, storage_auth_token: Option<String>) -> Result<Lsn> {
1344 : let start_time = Utc::now();
1345 :
1346 : let mut sync_handle = maybe_cgexec(&self.params.pgbin)
1347 : .args(["--sync-safekeepers"])
1348 : .env("PGDATA", &self.params.pgdata) // we cannot use -D in this mode
1349 : .envs(if let Some(storage_auth_token) = &storage_auth_token {
1350 : vec![("NEON_AUTH_TOKEN", storage_auth_token)]
1351 : } else {
1352 : vec![]
1353 : })
1354 : .stdout(Stdio::piped())
1355 : .stderr(Stdio::piped())
1356 : .spawn()
1357 : .expect("postgres --sync-safekeepers failed to start");
1358 : SYNC_SAFEKEEPERS_PID.store(sync_handle.id(), Ordering::SeqCst);
1359 :
1360 : // `postgres --sync-safekeepers` will print all log output to stderr and
1361 : // final LSN to stdout. So we leave stdout to collect LSN, while stderr logs
1362 : // will be collected in a child thread.
1363 : let stderr = sync_handle
1364 : .stderr
1365 : .take()
1366 : .expect("stderr should be captured");
1367 : let logs_handle = handle_postgres_logs(stderr);
1368 :
1369 : let sync_output = sync_handle
1370 : .wait_with_output()
1371 : .expect("postgres --sync-safekeepers failed");
1372 : SYNC_SAFEKEEPERS_PID.store(0, Ordering::SeqCst);
1373 :
1374 : // Process has exited, so we can join the logs thread.
1375 : let _ = tokio::runtime::Handle::current()
1376 : .block_on(logs_handle)
1377 0 : .map_err(|e| tracing::error!("log task panicked: {:?}", e));
1378 :
1379 : if !sync_output.status.success() {
1380 : anyhow::bail!(
1381 : "postgres --sync-safekeepers exited with non-zero status: {}. stdout: {}",
1382 : sync_output.status,
1383 : String::from_utf8(sync_output.stdout)
1384 : .expect("postgres --sync-safekeepers exited, and stdout is not utf-8"),
1385 : );
1386 : }
1387 :
1388 : self.state.lock().unwrap().metrics.sync_safekeepers_ms = Utc::now()
1389 : .signed_duration_since(start_time)
1390 : .to_std()
1391 : .unwrap()
1392 : .as_millis() as u64;
1393 :
1394 : let lsn = Lsn::from_str(String::from_utf8(sync_output.stdout)?.trim())?;
1395 :
1396 : Ok(lsn)
1397 : }
1398 :
1399 : /// Do all the preparations like PGDATA directory creation, configuration,
1400 : /// safekeepers sync, basebackup, etc.
1401 : #[instrument(skip_all)]
1402 : pub fn prepare_pgdata(&self, compute_state: &ComputeState) -> Result<()> {
1403 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
1404 : let spec = &pspec.spec;
1405 : let pgdata_path = Path::new(&self.params.pgdata);
1406 :
1407 : let tls_config = self.tls_config(&pspec.spec);
1408 :
1409 : // Remove/create an empty pgdata directory and put configuration there.
1410 : self.create_pgdata()?;
1411 : config::write_postgres_conf(
1412 : pgdata_path,
1413 : &self.params,
1414 : &pspec.spec,
1415 : self.params.internal_http_port,
1416 : tls_config,
1417 : )?;
1418 :
1419 : // Syncing safekeepers is only safe with primary nodes: if a primary
1420 : // is already connected it will be kicked out, so a secondary (standby)
1421 : // cannot sync safekeepers.
1422 : let lsn = match spec.mode {
1423 : ComputeMode::Primary => {
1424 : info!("checking if safekeepers are synced");
1425 : let lsn = if let Ok(Some(lsn)) = self.check_safekeepers_synced(compute_state) {
1426 : lsn
1427 : } else {
1428 : info!("starting safekeepers syncing");
1429 : self.sync_safekeepers(pspec.storage_auth_token.clone())
1430 : .with_context(|| "failed to sync safekeepers")?
1431 : };
1432 : info!("safekeepers synced at LSN {}", lsn);
1433 : lsn
1434 : }
1435 : ComputeMode::Static(lsn) => {
1436 : info!("Starting read-only node at static LSN {}", lsn);
1437 : lsn
1438 : }
1439 : ComputeMode::Replica => {
1440 : info!("Initializing standby from latest Pageserver LSN");
1441 : Lsn(0)
1442 : }
1443 : };
1444 :
1445 : info!(
1446 : "getting basebackup@{} from pageserver {}",
1447 : lsn, &pspec.pageserver_connstr
1448 : );
1449 0 : self.get_basebackup(compute_state, lsn).with_context(|| {
1450 0 : format!(
1451 0 : "failed to get basebackup@{} from pageserver {}",
1452 0 : lsn, &pspec.pageserver_connstr
1453 : )
1454 0 : })?;
1455 :
1456 : // Update pg_hba.conf received with basebackup.
1457 : update_pg_hba(pgdata_path, None)?;
1458 :
1459 : // Place pg_dynshmem under /dev/shm. This allows us to use
1460 : // 'dynamic_shared_memory_type = mmap' so that the files are placed in
1461 : // /dev/shm, similar to how 'dynamic_shared_memory_type = posix' works.
1462 : //
1463 : // Why on earth don't we just stick to the 'posix' default, you might
1464 : // ask. It turns out that making large allocations with 'posix' doesn't
1465 : // work very well with autoscaling. The behavior we want is that:
1466 : //
1467 : // 1. You can make large DSM allocations, larger than the current RAM
1468 : // size of the VM, without errors
1469 : //
1470 : // 2. If the allocated memory is really used, the VM is scaled up
1471 : // automatically to accommodate that
1472 : //
1473 : // We try to make that possible by having swap in the VM. But with the
1474 : // default 'posix' DSM implementation, we fail step 1, even when there's
1475 : // plenty of swap available. PostgreSQL uses posix_fallocate() to create
1476 : // the shmem segment, which is really just a file in /dev/shm in Linux,
1477 : // but posix_fallocate() on tmpfs returns ENOMEM if the size is larger
1478 : // than available RAM.
1479 : //
1480 : // Using 'dynamic_shared_memory_type = mmap' works around that, because
1481 : // the Postgres 'mmap' DSM implementation doesn't use
1482 : // posix_fallocate(). Instead, it uses repeated calls to write(2) to
1483 : // fill the file with zeros. It's weird that that differs between
1484 : // 'posix' and 'mmap', but we take advantage of it. When the file is
1485 : // filled slowly with write(2), the kernel allows it to grow larger, as
1486 : // long as there's swap available.
1487 : //
1488 : // In short, using 'dynamic_shared_memory_type = mmap' allows us one DSM
1489 : // segment to be larger than currently available RAM. But because we
1490 : // don't want to store it on a real file, which the kernel would try to
1491 : // flush to disk, so symlink pg_dynshm to /dev/shm.
1492 : //
1493 : // We don't set 'dynamic_shared_memory_type = mmap' here, we let the
1494 : // control plane control that option. If 'mmap' is not used, this
1495 : // symlink doesn't affect anything.
1496 : //
1497 : // See https://github.com/neondatabase/autoscaling/issues/800
1498 : std::fs::remove_dir(pgdata_path.join("pg_dynshmem"))?;
1499 : symlink("/dev/shm/", pgdata_path.join("pg_dynshmem"))?;
1500 :
1501 : match spec.mode {
1502 : ComputeMode::Primary => {}
1503 : ComputeMode::Replica | ComputeMode::Static(..) => {
1504 : add_standby_signal(pgdata_path)?;
1505 : }
1506 : }
1507 :
1508 : Ok(())
1509 : }
1510 :
1511 : /// Start and stop a postgres process to warm up the VM for startup.
1512 0 : pub fn prewarm_postgres_vm_memory(&self) -> Result<()> {
1513 0 : info!("prewarming VM memory");
1514 :
1515 : // Create pgdata
1516 0 : let pgdata = &format!("{}.warmup", self.params.pgdata);
1517 0 : create_pgdata(pgdata)?;
1518 :
1519 : // Run initdb to completion
1520 0 : info!("running initdb");
1521 0 : let initdb_bin = Path::new(&self.params.pgbin)
1522 0 : .parent()
1523 0 : .unwrap()
1524 0 : .join("initdb");
1525 0 : Command::new(initdb_bin)
1526 0 : .args(["--pgdata", pgdata])
1527 0 : .output()
1528 0 : .expect("cannot start initdb process");
1529 :
1530 : // Write conf
1531 : use std::io::Write;
1532 0 : let conf_path = Path::new(pgdata).join("postgresql.conf");
1533 0 : let mut file = std::fs::File::create(conf_path)?;
1534 0 : writeln!(file, "shared_buffers=65536")?;
1535 0 : writeln!(file, "port=51055")?; // Nobody should be connecting
1536 0 : writeln!(file, "shared_preload_libraries = 'neon'")?;
1537 :
1538 : // Start postgres
1539 0 : info!("starting postgres");
1540 0 : let mut pg = maybe_cgexec(&self.params.pgbin)
1541 0 : .args(["-D", pgdata])
1542 0 : .spawn()
1543 0 : .expect("cannot start postgres process");
1544 :
1545 : // Stop it when it's ready
1546 0 : info!("waiting for postgres");
1547 0 : wait_for_postgres(&mut pg, Path::new(pgdata))?;
1548 : // SIGQUIT orders postgres to exit immediately. We don't want to SIGKILL
1549 : // it to avoid orphaned processes prowling around while datadir is
1550 : // wiped.
1551 0 : let pm_pid = Pid::from_raw(pg.id() as i32);
1552 0 : kill(pm_pid, Signal::SIGQUIT)?;
1553 0 : info!("sent SIGQUIT signal");
1554 0 : pg.wait()?;
1555 0 : info!("done prewarming vm memory");
1556 :
1557 : // clean up
1558 0 : let _ok = fs::remove_dir_all(pgdata);
1559 0 : Ok(())
1560 0 : }
1561 :
1562 : /// Start Postgres as a child process and wait for it to start accepting
1563 : /// connections.
1564 : ///
1565 : /// Returns a handle to the child process and a handle to the logs thread.
1566 : #[instrument(skip_all)]
1567 : pub fn start_postgres(&self, storage_auth_token: Option<String>) -> Result<PostgresHandle> {
1568 : let pgdata_path = Path::new(&self.params.pgdata);
1569 :
1570 : // Run postgres as a child process.
1571 : let mut pg = maybe_cgexec(&self.params.pgbin)
1572 : .args(["-D", &self.params.pgdata])
1573 : .envs(if let Some(storage_auth_token) = &storage_auth_token {
1574 : vec![("NEON_AUTH_TOKEN", storage_auth_token)]
1575 : } else {
1576 : vec![]
1577 : })
1578 : .stderr(Stdio::piped())
1579 : .spawn()
1580 : .expect("cannot start postgres process");
1581 : PG_PID.store(pg.id(), Ordering::SeqCst);
1582 :
1583 : // Start a task to collect logs from stderr.
1584 : let stderr = pg.stderr.take().expect("stderr should be captured");
1585 : let logs_handle = handle_postgres_logs(stderr);
1586 :
1587 : wait_for_postgres(&mut pg, pgdata_path)?;
1588 :
1589 : Ok(PostgresHandle {
1590 : postgres: pg,
1591 : log_collector: logs_handle,
1592 : })
1593 : }
1594 :
1595 : /// Wait for the child Postgres process forever. In this state Ctrl+C will
1596 : /// propagate to Postgres and it will be shut down as well.
1597 0 : fn wait_postgres(&self, mut pg_handle: PostgresHandle) -> std::process::ExitStatus {
1598 0 : info!(postmaster_pid = %pg_handle.postgres.id(), "Waiting for Postgres to exit");
1599 :
1600 0 : let ecode = pg_handle
1601 0 : .postgres
1602 0 : .wait()
1603 0 : .expect("failed to start waiting on Postgres process");
1604 0 : PG_PID.store(0, Ordering::SeqCst);
1605 :
1606 : // Process has exited. Wait for the log collecting task to finish.
1607 0 : let _ = tokio::runtime::Handle::current()
1608 0 : .block_on(pg_handle.log_collector)
1609 0 : .map_err(|e| tracing::error!("log task panicked: {:?}", e));
1610 :
1611 0 : ecode
1612 0 : }
1613 :
1614 : /// Do post configuration of the already started Postgres. This function spawns a background task to
1615 : /// configure the database after applying the compute spec. Currently, it upgrades the neon extension
1616 : /// version. In the future, it may upgrade all 3rd-party extensions.
1617 : #[instrument(skip_all)]
1618 : pub fn post_apply_config(&self) -> Result<()> {
1619 : let conf = self.get_tokio_conn_conf(Some("compute_ctl:post_apply_config"));
1620 0 : tokio::spawn(async move {
1621 0 : let res = async {
1622 0 : let (mut client, connection) = conf.connect(NoTls).await?;
1623 0 : tokio::spawn(async move {
1624 0 : if let Err(e) = connection.await {
1625 0 : eprintln!("connection error: {e}");
1626 0 : }
1627 0 : });
1628 :
1629 0 : handle_neon_extension_upgrade(&mut client)
1630 0 : .await
1631 0 : .context("handle_neon_extension_upgrade")?;
1632 0 : Ok::<_, anyhow::Error>(())
1633 0 : }
1634 0 : .await;
1635 0 : if let Err(err) = res {
1636 0 : error!("error while post_apply_config: {err:#}");
1637 0 : }
1638 0 : });
1639 : Ok(())
1640 : }
1641 :
1642 0 : pub fn get_conn_conf(&self, application_name: Option<&str>) -> postgres::Config {
1643 0 : let mut conf = self.conn_conf.clone();
1644 0 : if let Some(application_name) = application_name {
1645 0 : conf.application_name(application_name);
1646 0 : }
1647 0 : conf
1648 0 : }
1649 :
1650 0 : pub fn get_tokio_conn_conf(&self, application_name: Option<&str>) -> tokio_postgres::Config {
1651 0 : let mut conf = self.tokio_conn_conf.clone();
1652 0 : if let Some(application_name) = application_name {
1653 0 : conf.application_name(application_name);
1654 0 : }
1655 0 : conf
1656 0 : }
1657 :
1658 0 : pub async fn get_maintenance_client(
1659 0 : conf: &tokio_postgres::Config,
1660 0 : ) -> Result<tokio_postgres::Client> {
1661 0 : let mut conf = conf.clone();
1662 0 : conf.application_name("compute_ctl:apply_config");
1663 :
1664 0 : let (client, conn) = match conf.connect(NoTls).await {
1665 : // If connection fails, it may be the old node with `zenith_admin` superuser.
1666 : //
1667 : // In this case we need to connect with old `zenith_admin` name
1668 : // and create new user. We cannot simply rename connected user,
1669 : // but we can create a new one and grant it all privileges.
1670 0 : Err(e) => match e.code() {
1671 : Some(&SqlState::INVALID_PASSWORD)
1672 : | Some(&SqlState::INVALID_AUTHORIZATION_SPECIFICATION) => {
1673 : // Connect with `zenith_admin` if `cloud_admin` could not authenticate
1674 0 : info!(
1675 0 : "cannot connect to Postgres: {}, retrying with 'zenith_admin' username",
1676 : e
1677 : );
1678 0 : let mut zenith_admin_conf = postgres::config::Config::from(conf.clone());
1679 0 : zenith_admin_conf.application_name("compute_ctl:apply_config");
1680 0 : zenith_admin_conf.user("zenith_admin");
1681 :
1682 : // It doesn't matter what were the options before, here we just want
1683 : // to connect and create a new superuser role.
1684 : const ZENITH_OPTIONS: &str = "-c role=zenith_admin -c default_transaction_read_only=off -c search_path=public -c statement_timeout=0";
1685 0 : zenith_admin_conf.options(ZENITH_OPTIONS);
1686 :
1687 0 : let mut client =
1688 0 : zenith_admin_conf.connect(NoTls)
1689 0 : .context("broken cloud_admin credential: tried connecting with cloud_admin but could not authenticate, and zenith_admin does not work either")?;
1690 :
1691 : // Disable forwarding so that users don't get a cloud_admin role
1692 0 : let mut func = || {
1693 0 : client.simple_query("SET neon.forward_ddl = false")?;
1694 0 : client.simple_query("CREATE USER cloud_admin WITH SUPERUSER")?;
1695 0 : client.simple_query("GRANT zenith_admin TO cloud_admin")?;
1696 0 : Ok::<_, anyhow::Error>(())
1697 0 : };
1698 0 : func().context("apply_config setup cloud_admin")?;
1699 :
1700 0 : drop(client);
1701 :
1702 : // Reconnect with connstring with expected name
1703 0 : conf.connect(NoTls).await?
1704 : }
1705 0 : _ => return Err(e.into()),
1706 : },
1707 0 : Ok((client, conn)) => (client, conn),
1708 : };
1709 :
1710 0 : spawn(async move {
1711 0 : if let Err(e) = conn.await {
1712 0 : error!("maintenance client connection error: {}", e);
1713 0 : }
1714 0 : });
1715 :
1716 : // Disable DDL forwarding because control plane already knows about the roles/databases
1717 : // we're about to modify.
1718 0 : client
1719 0 : .simple_query("SET neon.forward_ddl = false")
1720 0 : .await
1721 0 : .context("apply_config SET neon.forward_ddl = false")?;
1722 :
1723 0 : Ok(client)
1724 0 : }
1725 :
1726 : /// Do initial configuration of the already started Postgres.
1727 : #[instrument(skip_all)]
1728 : pub fn apply_config(&self, compute_state: &ComputeState) -> Result<()> {
1729 : let conf = self.get_tokio_conn_conf(Some("compute_ctl:apply_config"));
1730 :
1731 : let conf = Arc::new(conf);
1732 : let spec = Arc::new(
1733 : compute_state
1734 : .pspec
1735 : .as_ref()
1736 : .expect("spec must be set")
1737 : .spec
1738 : .clone(),
1739 : );
1740 :
1741 : let mut tls_config = None::<TlsConfig>;
1742 : if spec.features.contains(&ComputeFeature::TlsExperimental) {
1743 : tls_config = self.compute_ctl_config.tls.clone();
1744 : }
1745 :
1746 : self.update_installed_extensions_collection_interval(&spec);
1747 :
1748 : let max_concurrent_connections = self.max_service_connections(compute_state, &spec);
1749 :
1750 : // Merge-apply spec & changes to PostgreSQL state.
1751 : self.apply_spec_sql(spec.clone(), conf.clone(), max_concurrent_connections)?;
1752 :
1753 : if let Some(local_proxy) = &spec.clone().local_proxy_config {
1754 : let mut local_proxy = local_proxy.clone();
1755 : local_proxy.tls = tls_config.clone();
1756 :
1757 : info!("configuring local_proxy");
1758 : local_proxy::configure(&local_proxy).context("apply_config local_proxy")?;
1759 : }
1760 :
1761 : // Run migrations separately to not hold up cold starts
1762 : let lakebase_mode = self.params.lakebase_mode;
1763 : let params = self.params.clone();
1764 0 : tokio::spawn(async move {
1765 0 : let mut conf = conf.as_ref().clone();
1766 0 : conf.application_name("compute_ctl:migrations");
1767 :
1768 0 : match conf.connect(NoTls).await {
1769 0 : Ok((mut client, connection)) => {
1770 0 : tokio::spawn(async move {
1771 0 : if let Err(e) = connection.await {
1772 0 : eprintln!("connection error: {e}");
1773 0 : }
1774 0 : });
1775 0 : if let Err(e) = handle_migrations(params, &mut client, lakebase_mode).await {
1776 0 : error!("Failed to run migrations: {}", e);
1777 0 : }
1778 : }
1779 0 : Err(e) => {
1780 0 : error!(
1781 0 : "Failed to connect to the compute for running migrations: {}",
1782 : e
1783 : );
1784 : }
1785 : };
1786 0 : });
1787 :
1788 : Ok::<(), anyhow::Error>(())
1789 : }
1790 :
1791 : // Signal to the configurator to refresh the configuration by pulling a new spec from the HCC.
1792 : // Note that this merely triggers a notification on a condition variable the configurator thread
1793 : // waits on. The configurator thread (in configurator.rs) pulls the new spec from the HCC and
1794 : // applies it.
1795 0 : pub async fn signal_refresh_configuration(&self) -> Result<()> {
1796 0 : let states_allowing_configuration_refresh = [
1797 0 : ComputeStatus::Running,
1798 0 : ComputeStatus::Failed,
1799 0 : // ComputeStatus::RefreshConfigurationPending,
1800 0 : ];
1801 :
1802 0 : let state = self.state.lock().expect("state lock poisoned");
1803 0 : if states_allowing_configuration_refresh.contains(&state.status) {
1804 : // state.status = ComputeStatus::RefreshConfigurationPending;
1805 0 : self.state_changed.notify_all();
1806 0 : Ok(())
1807 0 : } else if state.status == ComputeStatus::Init {
1808 : // If the compute is in Init state, we can't refresh the configuration immediately,
1809 : // but we should be able to do that soon.
1810 0 : Ok(())
1811 : } else {
1812 0 : Err(anyhow::anyhow!(
1813 0 : "Cannot refresh compute configuration in state {:?}",
1814 0 : state.status
1815 0 : ))
1816 : }
1817 0 : }
1818 :
1819 : // Wrapped this around `pg_ctl reload`, but right now we don't use
1820 : // `pg_ctl` for start / stop.
1821 : #[instrument(skip_all)]
1822 : fn pg_reload_conf(&self) -> Result<()> {
1823 : let pgctl_bin = Path::new(&self.params.pgbin)
1824 : .parent()
1825 : .unwrap()
1826 : .join("pg_ctl");
1827 : Command::new(pgctl_bin)
1828 : .args(["reload", "-D", &self.params.pgdata])
1829 : .output()
1830 : .expect("cannot run pg_ctl process");
1831 : Ok(())
1832 : }
1833 :
1834 : /// Similar to `apply_config()`, but does a bit different sequence of operations,
1835 : /// as it's used to reconfigure a previously started and configured Postgres node.
1836 : #[instrument(skip_all)]
1837 : pub fn reconfigure(&self) -> Result<()> {
1838 : let spec = self.state.lock().unwrap().pspec.clone().unwrap().spec;
1839 :
1840 : let tls_config = self.tls_config(&spec);
1841 :
1842 : self.update_installed_extensions_collection_interval(&spec);
1843 :
1844 : if let Some(ref pgbouncer_settings) = spec.pgbouncer_settings {
1845 : info!("tuning pgbouncer");
1846 :
1847 : let pgbouncer_settings = pgbouncer_settings.clone();
1848 : let tls_config = tls_config.clone();
1849 :
1850 : // Spawn a background task to do the tuning,
1851 : // so that we don't block the main thread that starts Postgres.
1852 0 : tokio::spawn(async move {
1853 0 : let res = tune_pgbouncer(pgbouncer_settings, tls_config).await;
1854 0 : if let Err(err) = res {
1855 0 : error!("error while tuning pgbouncer: {err:?}");
1856 0 : }
1857 0 : });
1858 : }
1859 :
1860 : if let Some(ref local_proxy) = spec.local_proxy_config {
1861 : info!("configuring local_proxy");
1862 :
1863 : // Spawn a background task to do the configuration,
1864 : // so that we don't block the main thread that starts Postgres.
1865 : let mut local_proxy = local_proxy.clone();
1866 : local_proxy.tls = tls_config.clone();
1867 0 : tokio::spawn(async move {
1868 0 : if let Err(err) = local_proxy::configure(&local_proxy) {
1869 0 : error!("error while configuring local_proxy: {err:?}");
1870 0 : }
1871 0 : });
1872 : }
1873 :
1874 : // Reconfigure rsyslog for Postgres logs export
1875 : let conf = PostgresLogsRsyslogConfig::new(spec.logs_export_host.as_deref());
1876 : configure_postgres_logs_export(conf)?;
1877 :
1878 : // Write new config
1879 : let pgdata_path = Path::new(&self.params.pgdata);
1880 : config::write_postgres_conf(
1881 : pgdata_path,
1882 : &self.params,
1883 : &spec,
1884 : self.params.internal_http_port,
1885 : tls_config,
1886 : )?;
1887 :
1888 : self.pg_reload_conf()?;
1889 :
1890 : if !spec.skip_pg_catalog_updates {
1891 : let max_concurrent_connections = spec.reconfigure_concurrency;
1892 : // Temporarily reset max_cluster_size in config
1893 : // to avoid the possibility of hitting the limit, while we are reconfiguring:
1894 : // creating new extensions, roles, etc.
1895 0 : config::with_compute_ctl_tmp_override(pgdata_path, "neon.max_cluster_size=-1", || {
1896 0 : self.pg_reload_conf()?;
1897 :
1898 0 : if spec.mode == ComputeMode::Primary {
1899 0 : let conf = self.get_tokio_conn_conf(Some("compute_ctl:reconfigure"));
1900 0 : let conf = Arc::new(conf);
1901 :
1902 0 : let spec = Arc::new(spec.clone());
1903 :
1904 0 : self.apply_spec_sql(spec, conf, max_concurrent_connections)?;
1905 0 : }
1906 :
1907 0 : Ok(())
1908 0 : })?;
1909 : self.pg_reload_conf()?;
1910 : }
1911 :
1912 : let unknown_op = "unknown".to_string();
1913 : let op_id = spec.operation_uuid.as_ref().unwrap_or(&unknown_op);
1914 : info!(
1915 : "finished reconfiguration of compute node for operation {}",
1916 : op_id
1917 : );
1918 :
1919 : Ok(())
1920 : }
1921 :
1922 : #[instrument(skip_all)]
1923 : pub fn configure_as_primary(&self, compute_state: &ComputeState) -> Result<()> {
1924 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
1925 :
1926 : assert!(pspec.spec.mode == ComputeMode::Primary);
1927 : if !pspec.spec.skip_pg_catalog_updates {
1928 : let pgdata_path = Path::new(&self.params.pgdata);
1929 : // temporarily reset max_cluster_size in config
1930 : // to avoid the possibility of hitting the limit, while we are applying config:
1931 : // creating new extensions, roles, etc...
1932 0 : config::with_compute_ctl_tmp_override(pgdata_path, "neon.max_cluster_size=-1", || {
1933 0 : self.pg_reload_conf()?;
1934 :
1935 0 : self.apply_config(compute_state)?;
1936 :
1937 0 : Ok(())
1938 0 : })?;
1939 :
1940 : let postgresql_conf_path = pgdata_path.join("postgresql.conf");
1941 : if config::line_in_file(
1942 : &postgresql_conf_path,
1943 : "neon.disable_logical_replication_subscribers=false",
1944 : )? {
1945 : info!(
1946 : "updated postgresql.conf to set neon.disable_logical_replication_subscribers=false"
1947 : );
1948 : }
1949 : self.pg_reload_conf()?;
1950 : }
1951 : self.post_apply_config()?;
1952 :
1953 : Ok(())
1954 : }
1955 :
1956 0 : pub async fn watch_cert_for_changes(self: Arc<Self>) {
1957 : // update status on cert renewal
1958 0 : if let Some(tls_config) = &self.compute_ctl_config.tls {
1959 0 : let tls_config = tls_config.clone();
1960 :
1961 : // wait until the cert exists.
1962 0 : let mut cert_watch = watch_cert_for_changes(tls_config.cert_path.clone()).await;
1963 :
1964 0 : tokio::task::spawn_blocking(move || {
1965 0 : let handle = tokio::runtime::Handle::current();
1966 : 'cert_update: loop {
1967 : // let postgres/pgbouncer/local_proxy know the new cert/key exists.
1968 : // we need to wait until it's configurable first.
1969 :
1970 0 : let mut state = self.state.lock().unwrap();
1971 : 'status_update: loop {
1972 0 : match state.status {
1973 : // let's update the state to config pending
1974 : ComputeStatus::ConfigurationPending | ComputeStatus::Running => {
1975 0 : state.set_status(
1976 0 : ComputeStatus::ConfigurationPending,
1977 0 : &self.state_changed,
1978 0 : );
1979 0 : break 'status_update;
1980 : }
1981 :
1982 : // exit loop
1983 : ComputeStatus::Failed
1984 : | ComputeStatus::TerminationPendingFast
1985 : | ComputeStatus::TerminationPendingImmediate
1986 0 : | ComputeStatus::Terminated => break 'cert_update,
1987 :
1988 : // wait
1989 : ComputeStatus::Init
1990 : | ComputeStatus::Configuration
1991 0 : | ComputeStatus::Empty => {
1992 0 : state = self.state_changed.wait(state).unwrap();
1993 0 : }
1994 : }
1995 : }
1996 0 : drop(state);
1997 :
1998 : // wait for a new certificate update
1999 0 : if handle.block_on(cert_watch.changed()).is_err() {
2000 0 : break;
2001 0 : }
2002 : }
2003 0 : });
2004 0 : }
2005 0 : }
2006 :
2007 0 : pub fn tls_config(&self, spec: &ComputeSpec) -> &Option<TlsConfig> {
2008 0 : if spec.features.contains(&ComputeFeature::TlsExperimental) {
2009 0 : &self.compute_ctl_config.tls
2010 : } else {
2011 0 : &None::<TlsConfig>
2012 : }
2013 0 : }
2014 :
2015 : /// Update the `last_active` in the shared state, but ensure that it's a more recent one.
2016 0 : pub fn update_last_active(&self, last_active: Option<DateTime<Utc>>) {
2017 0 : let mut state = self.state.lock().unwrap();
2018 : // NB: `Some(<DateTime>)` is always greater than `None`.
2019 0 : if last_active > state.last_active {
2020 0 : state.last_active = last_active;
2021 0 : debug!("set the last compute activity time to: {:?}", last_active);
2022 0 : }
2023 0 : }
2024 :
2025 : // Look for core dumps and collect backtraces.
2026 : //
2027 : // EKS worker nodes have following core dump settings:
2028 : // /proc/sys/kernel/core_pattern -> core
2029 : // /proc/sys/kernel/core_uses_pid -> 1
2030 : // ulimit -c -> unlimited
2031 : // which results in core dumps being written to postgres data directory as core.<pid>.
2032 : //
2033 : // Use that as a default location and pattern, except macos where core dumps are written
2034 : // to /cores/ directory by default.
2035 : //
2036 : // With default Linux settings, the core dump file is called just "core", so check for
2037 : // that too.
2038 0 : pub fn check_for_core_dumps(&self) -> Result<()> {
2039 0 : let core_dump_dir = match std::env::consts::OS {
2040 0 : "macos" => Path::new("/cores/"),
2041 0 : _ => Path::new(&self.params.pgdata),
2042 : };
2043 :
2044 : // Collect core dump paths if any
2045 0 : info!("checking for core dumps in {}", core_dump_dir.display());
2046 0 : let files = fs::read_dir(core_dump_dir)?;
2047 0 : let cores = files.filter_map(|entry| {
2048 0 : let entry = entry.ok()?;
2049 :
2050 0 : let is_core_dump = match entry.file_name().to_str()? {
2051 0 : n if n.starts_with("core.") => true,
2052 0 : "core" => true,
2053 0 : _ => false,
2054 : };
2055 0 : if is_core_dump {
2056 0 : Some(entry.path())
2057 : } else {
2058 0 : None
2059 : }
2060 0 : });
2061 :
2062 : // Print backtrace for each core dump
2063 0 : for core_path in cores {
2064 0 : warn!(
2065 0 : "core dump found: {}, collecting backtrace",
2066 0 : core_path.display()
2067 : );
2068 :
2069 : // Try first with gdb
2070 0 : let backtrace = Command::new("gdb")
2071 0 : .args(["--batch", "-q", "-ex", "bt", &self.params.pgbin])
2072 0 : .arg(&core_path)
2073 0 : .output();
2074 :
2075 : // Try lldb if no gdb is found -- that is handy for local testing on macOS
2076 0 : let backtrace = match backtrace {
2077 0 : Err(ref e) if e.kind() == std::io::ErrorKind::NotFound => {
2078 0 : warn!("cannot find gdb, trying lldb");
2079 0 : Command::new("lldb")
2080 0 : .arg("-c")
2081 0 : .arg(&core_path)
2082 0 : .args(["--batch", "-o", "bt all", "-o", "quit"])
2083 0 : .output()
2084 : }
2085 0 : _ => backtrace,
2086 0 : }?;
2087 :
2088 0 : warn!(
2089 0 : "core dump backtrace: {}",
2090 0 : String::from_utf8_lossy(&backtrace.stdout)
2091 : );
2092 0 : warn!(
2093 0 : "debugger stderr: {}",
2094 0 : String::from_utf8_lossy(&backtrace.stderr)
2095 : );
2096 : }
2097 :
2098 0 : Ok(())
2099 0 : }
2100 :
2101 : /// Select `pg_stat_statements` data and return it as a stringified JSON
2102 0 : pub async fn collect_insights(&self) -> String {
2103 0 : let mut result_rows: Vec<String> = Vec::new();
2104 0 : let conf = self.get_tokio_conn_conf(Some("compute_ctl:collect_insights"));
2105 0 : let connect_result = conf.connect(NoTls).await;
2106 0 : let (client, connection) = connect_result.unwrap();
2107 0 : tokio::spawn(async move {
2108 0 : if let Err(e) = connection.await {
2109 0 : eprintln!("connection error: {e}");
2110 0 : }
2111 0 : });
2112 0 : let result = client
2113 0 : .simple_query(
2114 0 : "SELECT
2115 0 : row_to_json(pg_stat_statements)
2116 0 : FROM
2117 0 : pg_stat_statements
2118 0 : WHERE
2119 0 : userid != 'cloud_admin'::regrole::oid
2120 0 : ORDER BY
2121 0 : (mean_exec_time + mean_plan_time) DESC
2122 0 : LIMIT 100",
2123 0 : )
2124 0 : .await;
2125 :
2126 0 : if let Ok(raw_rows) = result {
2127 0 : for message in raw_rows.iter() {
2128 0 : if let postgres::SimpleQueryMessage::Row(row) = message {
2129 0 : if let Some(json) = row.get(0) {
2130 0 : result_rows.push(json.to_string());
2131 0 : }
2132 0 : }
2133 : }
2134 :
2135 0 : format!("{{\"pg_stat_statements\": [{}]}}", result_rows.join(","))
2136 : } else {
2137 0 : "{{\"pg_stat_statements\": []}}".to_string()
2138 : }
2139 0 : }
2140 :
2141 : // download an archive, unzip and place files in correct locations
2142 0 : pub async fn download_extension(
2143 0 : &self,
2144 0 : real_ext_name: String,
2145 0 : ext_path: RemotePath,
2146 0 : ) -> Result<u64, DownloadError> {
2147 0 : let remote_ext_base_url =
2148 0 : self.params
2149 0 : .remote_ext_base_url
2150 0 : .as_ref()
2151 0 : .ok_or(DownloadError::BadInput(anyhow::anyhow!(
2152 0 : "Remote extensions storage is not configured",
2153 0 : )))?;
2154 :
2155 0 : let ext_archive_name = ext_path.object_name().expect("bad path");
2156 :
2157 0 : let mut first_try = false;
2158 0 : if !self
2159 0 : .ext_download_progress
2160 0 : .read()
2161 0 : .expect("lock err")
2162 0 : .contains_key(ext_archive_name)
2163 0 : {
2164 0 : self.ext_download_progress
2165 0 : .write()
2166 0 : .expect("lock err")
2167 0 : .insert(ext_archive_name.to_string(), (Utc::now(), false));
2168 0 : first_try = true;
2169 0 : }
2170 0 : let (download_start, download_completed) =
2171 0 : self.ext_download_progress.read().expect("lock err")[ext_archive_name];
2172 0 : let start_time_delta = Utc::now()
2173 0 : .signed_duration_since(download_start)
2174 0 : .to_std()
2175 0 : .unwrap()
2176 0 : .as_millis() as u64;
2177 :
2178 : // how long to wait for extension download if it was started by another process
2179 : const HANG_TIMEOUT: u64 = 3000; // milliseconds
2180 :
2181 0 : if download_completed {
2182 0 : info!("extension already downloaded, skipping re-download");
2183 0 : return Ok(0);
2184 0 : } else if start_time_delta < HANG_TIMEOUT && !first_try {
2185 0 : info!(
2186 0 : "download {ext_archive_name} already started by another process, hanging untill completion or timeout"
2187 : );
2188 0 : let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(500));
2189 : loop {
2190 0 : info!("waiting for download");
2191 0 : interval.tick().await;
2192 0 : let (_, download_completed_now) =
2193 0 : self.ext_download_progress.read().expect("lock")[ext_archive_name];
2194 0 : if download_completed_now {
2195 0 : info!("download finished by whoever else downloaded it");
2196 0 : return Ok(0);
2197 0 : }
2198 : }
2199 : // NOTE: the above loop will get terminated
2200 : // based on the timeout of the download function
2201 0 : }
2202 :
2203 : // if extension hasn't been downloaded before or the previous
2204 : // attempt to download was at least HANG_TIMEOUT ms ago
2205 : // then we try to download it here
2206 0 : info!("downloading new extension {ext_archive_name}");
2207 :
2208 0 : let download_size = extension_server::download_extension(
2209 0 : &real_ext_name,
2210 0 : &ext_path,
2211 0 : remote_ext_base_url,
2212 0 : &self.params.pgbin,
2213 0 : )
2214 0 : .await
2215 0 : .map_err(DownloadError::Other);
2216 :
2217 0 : if download_size.is_ok() {
2218 0 : self.ext_download_progress
2219 0 : .write()
2220 0 : .expect("bad lock")
2221 0 : .insert(ext_archive_name.to_string(), (download_start, true));
2222 0 : }
2223 :
2224 0 : download_size
2225 0 : }
2226 :
2227 0 : pub async fn set_role_grants(
2228 0 : &self,
2229 0 : db_name: &PgIdent,
2230 0 : schema_name: &PgIdent,
2231 0 : privileges: &[Privilege],
2232 0 : role_name: &PgIdent,
2233 0 : ) -> Result<()> {
2234 : use tokio_postgres::NoTls;
2235 :
2236 0 : let mut conf = self.get_tokio_conn_conf(Some("compute_ctl:set_role_grants"));
2237 0 : conf.dbname(db_name);
2238 :
2239 0 : let (db_client, conn) = conf
2240 0 : .connect(NoTls)
2241 0 : .await
2242 0 : .context("Failed to connect to the database")?;
2243 0 : tokio::spawn(conn);
2244 :
2245 : // TODO: support other types of grants apart from schemas?
2246 :
2247 : // check the role grants first - to gracefully handle read-replicas.
2248 0 : let select = "SELECT privilege_type
2249 0 : FROM pg_namespace
2250 0 : JOIN LATERAL (SELECT * FROM aclexplode(nspacl) AS x) acl ON true
2251 0 : JOIN pg_user users ON acl.grantee = users.usesysid
2252 0 : WHERE users.usename = $1
2253 0 : AND nspname = $2";
2254 0 : let rows = db_client
2255 0 : .query(select, &[role_name, schema_name])
2256 0 : .await
2257 0 : .with_context(|| format!("Failed to execute query: {select}"))?;
2258 :
2259 0 : let already_granted: HashSet<String> = rows.into_iter().map(|row| row.get(0)).collect();
2260 :
2261 0 : let grants = privileges
2262 0 : .iter()
2263 0 : .filter(|p| !already_granted.contains(p.as_str()))
2264 : // should not be quoted as it's part of the command.
2265 : // is already sanitized so it's ok
2266 0 : .map(|p| p.as_str())
2267 0 : .join(", ");
2268 :
2269 0 : if !grants.is_empty() {
2270 : // quote the schema and role name as identifiers to sanitize them.
2271 0 : let schema_name = schema_name.pg_quote();
2272 0 : let role_name = role_name.pg_quote();
2273 :
2274 0 : let query = format!("GRANT {grants} ON SCHEMA {schema_name} TO {role_name}",);
2275 0 : db_client
2276 0 : .simple_query(&query)
2277 0 : .await
2278 0 : .with_context(|| format!("Failed to execute query: {query}"))?;
2279 0 : }
2280 :
2281 0 : Ok(())
2282 0 : }
2283 :
2284 0 : pub async fn install_extension(
2285 0 : &self,
2286 0 : ext_name: &PgIdent,
2287 0 : db_name: &PgIdent,
2288 0 : ext_version: ExtVersion,
2289 0 : ) -> Result<ExtVersion> {
2290 : use tokio_postgres::NoTls;
2291 :
2292 0 : let mut conf = self.get_tokio_conn_conf(Some("compute_ctl:install_extension"));
2293 0 : conf.dbname(db_name);
2294 :
2295 0 : let (db_client, conn) = conf
2296 0 : .connect(NoTls)
2297 0 : .await
2298 0 : .context("Failed to connect to the database")?;
2299 0 : tokio::spawn(conn);
2300 :
2301 0 : let version_query = "SELECT extversion FROM pg_extension WHERE extname = $1";
2302 0 : let version: Option<ExtVersion> = db_client
2303 0 : .query_opt(version_query, &[&ext_name])
2304 0 : .await
2305 0 : .with_context(|| format!("Failed to execute query: {version_query}"))?
2306 0 : .map(|row| row.get(0));
2307 :
2308 : // sanitize the inputs as postgres idents.
2309 0 : let ext_name: String = ext_name.pg_quote();
2310 0 : let quoted_version: String = ext_version.pg_quote();
2311 :
2312 0 : if let Some(installed_version) = version {
2313 0 : if installed_version == ext_version {
2314 0 : return Ok(installed_version);
2315 0 : }
2316 0 : let query = format!("ALTER EXTENSION {ext_name} UPDATE TO {quoted_version}");
2317 0 : db_client
2318 0 : .simple_query(&query)
2319 0 : .await
2320 0 : .with_context(|| format!("Failed to execute query: {query}"))?;
2321 : } else {
2322 0 : let query =
2323 0 : format!("CREATE EXTENSION IF NOT EXISTS {ext_name} WITH VERSION {quoted_version}");
2324 0 : db_client
2325 0 : .simple_query(&query)
2326 0 : .await
2327 0 : .with_context(|| format!("Failed to execute query: {query}"))?;
2328 : }
2329 :
2330 0 : Ok(ext_version)
2331 0 : }
2332 :
2333 0 : pub async fn prepare_preload_libraries(
2334 0 : &self,
2335 0 : spec: &ComputeSpec,
2336 0 : ) -> Result<RemoteExtensionMetrics> {
2337 0 : if self.params.remote_ext_base_url.is_none() {
2338 0 : return Ok(RemoteExtensionMetrics {
2339 0 : num_ext_downloaded: 0,
2340 0 : largest_ext_size: 0,
2341 0 : total_ext_download_size: 0,
2342 0 : });
2343 0 : }
2344 0 : let remote_extensions = spec
2345 0 : .remote_extensions
2346 0 : .as_ref()
2347 0 : .ok_or(anyhow::anyhow!("Remote extensions are not configured"))?;
2348 :
2349 0 : info!("parse shared_preload_libraries from spec.cluster.settings");
2350 0 : let mut libs_vec = Vec::new();
2351 0 : if let Some(libs) = spec.cluster.settings.find("shared_preload_libraries") {
2352 0 : libs_vec = libs
2353 0 : .split(&[',', '\'', ' '])
2354 0 : .filter(|s| *s != "neon" && !s.is_empty())
2355 0 : .map(str::to_string)
2356 0 : .collect();
2357 0 : }
2358 0 : info!("parse shared_preload_libraries from provided postgresql.conf");
2359 :
2360 : // that is used in neon_local and python tests
2361 0 : if let Some(conf) = &spec.cluster.postgresql_conf {
2362 0 : let conf_lines = conf.split('\n').collect::<Vec<&str>>();
2363 0 : let mut shared_preload_libraries_line = "";
2364 0 : for line in conf_lines {
2365 0 : if line.starts_with("shared_preload_libraries") {
2366 0 : shared_preload_libraries_line = line;
2367 0 : }
2368 : }
2369 0 : let mut preload_libs_vec = Vec::new();
2370 0 : if let Some(libs) = shared_preload_libraries_line.split("='").nth(1) {
2371 0 : preload_libs_vec = libs
2372 0 : .split(&[',', '\'', ' '])
2373 0 : .filter(|s| *s != "neon" && !s.is_empty())
2374 0 : .map(str::to_string)
2375 0 : .collect();
2376 0 : }
2377 0 : libs_vec.extend(preload_libs_vec);
2378 0 : }
2379 :
2380 : // Don't try to download libraries that are not in the index.
2381 : // Assume that they are already present locally.
2382 0 : libs_vec.retain(|lib| remote_extensions.library_index.contains_key(lib));
2383 :
2384 0 : info!("Downloading to shared preload libraries: {:?}", &libs_vec);
2385 :
2386 0 : let mut download_tasks = Vec::new();
2387 0 : for library in &libs_vec {
2388 0 : let (ext_name, ext_path) =
2389 0 : remote_extensions.get_ext(library, true, &BUILD_TAG, &self.params.pgversion)?;
2390 0 : download_tasks.push(self.download_extension(ext_name, ext_path));
2391 : }
2392 0 : let results = join_all(download_tasks).await;
2393 :
2394 0 : let mut remote_ext_metrics = RemoteExtensionMetrics {
2395 0 : num_ext_downloaded: 0,
2396 0 : largest_ext_size: 0,
2397 0 : total_ext_download_size: 0,
2398 0 : };
2399 0 : for result in results {
2400 0 : let download_size = match result {
2401 0 : Ok(res) => {
2402 0 : remote_ext_metrics.num_ext_downloaded += 1;
2403 0 : res
2404 : }
2405 0 : Err(err) => {
2406 : // if we failed to download an extension, we don't want to fail the whole
2407 : // process, but we do want to log the error
2408 0 : error!("Failed to download extension: {}", err);
2409 0 : 0
2410 : }
2411 : };
2412 :
2413 0 : remote_ext_metrics.largest_ext_size =
2414 0 : std::cmp::max(remote_ext_metrics.largest_ext_size, download_size);
2415 0 : remote_ext_metrics.total_ext_download_size += download_size;
2416 : }
2417 0 : Ok(remote_ext_metrics)
2418 0 : }
2419 :
2420 : /// Waits until current thread receives a state changed notification and
2421 : /// the pageserver connection strings has changed.
2422 : ///
2423 : /// The operation will time out after a specified duration.
2424 0 : pub fn wait_timeout_while_pageserver_connstr_unchanged(&self, duration: Duration) {
2425 0 : let state = self.state.lock().unwrap();
2426 0 : let old_pageserver_connstr = state
2427 0 : .pspec
2428 0 : .as_ref()
2429 0 : .expect("spec must be set")
2430 0 : .pageserver_connstr
2431 0 : .clone();
2432 0 : let mut unchanged = true;
2433 0 : let _ = self
2434 0 : .state_changed
2435 0 : .wait_timeout_while(state, duration, |s| {
2436 0 : let pageserver_connstr = &s
2437 0 : .pspec
2438 0 : .as_ref()
2439 0 : .expect("spec must be set")
2440 0 : .pageserver_connstr;
2441 0 : unchanged = pageserver_connstr == &old_pageserver_connstr;
2442 0 : unchanged
2443 0 : })
2444 0 : .unwrap();
2445 0 : if !unchanged {
2446 0 : info!("Pageserver config changed");
2447 0 : }
2448 0 : }
2449 :
2450 0 : pub fn spawn_extension_stats_task(&self) {
2451 0 : self.terminate_extension_stats_task();
2452 :
2453 0 : let conf = self.tokio_conn_conf.clone();
2454 0 : let atomic_interval = self.params.installed_extensions_collection_interval.clone();
2455 0 : let mut installed_extensions_collection_interval =
2456 0 : 2 * atomic_interval.load(std::sync::atomic::Ordering::SeqCst);
2457 0 : info!(
2458 0 : "[NEON_EXT_SPAWN] Spawning background installed extensions worker with Timeout: {}",
2459 : installed_extensions_collection_interval
2460 : );
2461 0 : let handle = tokio::spawn(async move {
2462 : loop {
2463 0 : info!(
2464 0 : "[NEON_EXT_INT_SLEEP]: Interval: {}",
2465 : installed_extensions_collection_interval
2466 : );
2467 : // Sleep at the start of the loop to ensure that two collections don't happen at the same time.
2468 : // The first collection happens during compute startup.
2469 0 : tokio::time::sleep(tokio::time::Duration::from_secs(
2470 0 : installed_extensions_collection_interval,
2471 0 : ))
2472 0 : .await;
2473 0 : let _ = installed_extensions(conf.clone()).await;
2474 : // Acquire a read lock on the compute spec and then update the interval if necessary
2475 0 : installed_extensions_collection_interval = std::cmp::max(
2476 0 : installed_extensions_collection_interval,
2477 0 : 2 * atomic_interval.load(std::sync::atomic::Ordering::SeqCst),
2478 0 : );
2479 : }
2480 : });
2481 :
2482 : // Store the new task handle
2483 0 : *self.extension_stats_task.lock().unwrap() = Some(handle);
2484 0 : }
2485 :
2486 0 : fn terminate_extension_stats_task(&self) {
2487 0 : if let Some(h) = self.extension_stats_task.lock().unwrap().take() {
2488 0 : h.abort()
2489 0 : }
2490 0 : }
2491 :
2492 0 : pub fn spawn_lfc_offload_task(self: &Arc<Self>, interval: Duration) {
2493 0 : self.terminate_lfc_offload_task();
2494 0 : let secs = interval.as_secs();
2495 0 : let this = self.clone();
2496 :
2497 0 : info!("spawning LFC offload worker with {secs}s interval");
2498 0 : let handle = spawn(async move {
2499 0 : let mut interval = time::interval(interval);
2500 0 : interval.tick().await; // returns immediately
2501 : loop {
2502 0 : interval.tick().await;
2503 :
2504 0 : let prewarm_state = this.state.lock().unwrap().lfc_prewarm_state.clone();
2505 : // Do not offload LFC state if we are currently prewarming or any issue occurred.
2506 : // If we'd do that, we might override the LFC state in endpoint storage with some
2507 : // incomplete state. Imagine a situation:
2508 : // 1. Endpoint started with `autoprewarm: true`
2509 : // 2. While prewarming is not completed, we upload the new incomplete state
2510 : // 3. Compute gets interrupted and restarts
2511 : // 4. We start again and try to prewarm with the state from 2. instead of the previous complete state
2512 0 : if matches!(
2513 0 : prewarm_state,
2514 : LfcPrewarmState::Completed
2515 : | LfcPrewarmState::NotPrewarmed
2516 : | LfcPrewarmState::Skipped
2517 : ) {
2518 0 : this.offload_lfc_async().await;
2519 0 : }
2520 : }
2521 : });
2522 0 : *self.lfc_offload_task.lock().unwrap() = Some(handle);
2523 0 : }
2524 :
2525 0 : fn terminate_lfc_offload_task(&self) {
2526 0 : if let Some(h) = self.lfc_offload_task.lock().unwrap().take() {
2527 0 : h.abort()
2528 0 : }
2529 0 : }
2530 :
2531 0 : fn update_installed_extensions_collection_interval(&self, spec: &ComputeSpec) {
2532 : // Update the interval for collecting installed extensions statistics
2533 : // If the value is -1, we never suspend so set the value to default collection.
2534 : // If the value is 0, it means default, we will just continue to use the default.
2535 0 : if spec.suspend_timeout_seconds == -1 || spec.suspend_timeout_seconds == 0 {
2536 0 : self.params.installed_extensions_collection_interval.store(
2537 0 : DEFAULT_INSTALLED_EXTENSIONS_COLLECTION_INTERVAL,
2538 0 : std::sync::atomic::Ordering::SeqCst,
2539 0 : );
2540 0 : } else {
2541 0 : self.params.installed_extensions_collection_interval.store(
2542 0 : spec.suspend_timeout_seconds as u64,
2543 0 : std::sync::atomic::Ordering::SeqCst,
2544 0 : );
2545 0 : }
2546 0 : }
2547 : }
2548 :
2549 0 : pub async fn installed_extensions(conf: tokio_postgres::Config) -> Result<()> {
2550 0 : let res = get_installed_extensions(conf).await;
2551 0 : match res {
2552 0 : Ok(extensions) => {
2553 0 : info!(
2554 0 : "[NEON_EXT_STAT] {}",
2555 0 : serde_json::to_string(&extensions).expect("failed to serialize extensions list")
2556 : );
2557 : }
2558 0 : Err(err) => error!("could not get installed extensions: {err}"),
2559 : }
2560 0 : Ok(())
2561 0 : }
2562 :
2563 0 : pub fn forward_termination_signal(dev_mode: bool) {
2564 0 : let ss_pid = SYNC_SAFEKEEPERS_PID.load(Ordering::SeqCst);
2565 0 : if ss_pid != 0 {
2566 0 : let ss_pid = nix::unistd::Pid::from_raw(ss_pid as i32);
2567 0 : kill(ss_pid, Signal::SIGTERM).ok();
2568 0 : }
2569 :
2570 0 : if !dev_mode {
2571 : // Terminate pgbouncer with SIGKILL
2572 0 : match pid_file::read(PGBOUNCER_PIDFILE.into()) {
2573 0 : Ok(pid_file::PidFileRead::LockedByOtherProcess(pid)) => {
2574 0 : info!("sending SIGKILL to pgbouncer process pid: {}", pid);
2575 0 : if let Err(e) = kill(pid, Signal::SIGKILL) {
2576 0 : error!("failed to terminate pgbouncer: {}", e);
2577 0 : }
2578 : }
2579 : // pgbouncer does not lock the pid file, so we read and kill the process directly
2580 : Ok(pid_file::PidFileRead::NotHeldByAnyProcess(_)) => {
2581 0 : if let Ok(pid_str) = std::fs::read_to_string(PGBOUNCER_PIDFILE) {
2582 0 : if let Ok(pid) = pid_str.trim().parse::<i32>() {
2583 0 : info!(
2584 0 : "sending SIGKILL to pgbouncer process pid: {} (from unlocked pid file)",
2585 : pid
2586 : );
2587 0 : if let Err(e) = kill(Pid::from_raw(pid), Signal::SIGKILL) {
2588 0 : error!("failed to terminate pgbouncer: {}", e);
2589 0 : }
2590 0 : }
2591 : } else {
2592 0 : info!("pgbouncer pid file exists but process not running");
2593 : }
2594 : }
2595 : Ok(pid_file::PidFileRead::NotExist) => {
2596 0 : info!("pgbouncer pid file not found, process may not be running");
2597 : }
2598 0 : Err(e) => {
2599 0 : error!("error reading pgbouncer pid file: {}", e);
2600 : }
2601 : }
2602 :
2603 : // Terminate local_proxy
2604 0 : match pid_file::read("/etc/local_proxy/pid".into()) {
2605 0 : Ok(pid_file::PidFileRead::LockedByOtherProcess(pid)) => {
2606 0 : info!("sending SIGTERM to local_proxy process pid: {}", pid);
2607 0 : if let Err(e) = kill(pid, Signal::SIGTERM) {
2608 0 : error!("failed to terminate local_proxy: {}", e);
2609 0 : }
2610 : }
2611 : Ok(pid_file::PidFileRead::NotHeldByAnyProcess(_)) => {
2612 0 : info!("local_proxy PID file exists but process not running");
2613 : }
2614 : Ok(pid_file::PidFileRead::NotExist) => {
2615 0 : info!("local_proxy PID file not found, process may not be running");
2616 : }
2617 0 : Err(e) => {
2618 0 : error!("error reading local_proxy PID file: {}", e);
2619 : }
2620 : }
2621 : } else {
2622 0 : info!("Skipping pgbouncer and local_proxy termination because in dev mode");
2623 : }
2624 :
2625 0 : let pg_pid = PG_PID.load(Ordering::SeqCst);
2626 0 : if pg_pid != 0 {
2627 0 : let pg_pid = nix::unistd::Pid::from_raw(pg_pid as i32);
2628 0 : // Use 'fast' shutdown (SIGINT) because it also creates a shutdown checkpoint, which is important for
2629 0 : // ROs to get a list of running xacts faster instead of going through the CLOG.
2630 0 : // See https://www.postgresql.org/docs/current/server-shutdown.html for the list of modes and signals.
2631 0 : kill(pg_pid, Signal::SIGINT).ok();
2632 0 : }
2633 0 : }
2634 :
2635 : // helper trait to call JoinSet::spawn_blocking(f), but propagates the current
2636 : // tracing span to the thread.
2637 : trait JoinSetExt<T> {
2638 : fn spawn_blocking_child<F>(&mut self, f: F) -> tokio::task::AbortHandle
2639 : where
2640 : F: FnOnce() -> T + Send + 'static,
2641 : T: Send;
2642 : }
2643 :
2644 : impl<T: 'static> JoinSetExt<T> for tokio::task::JoinSet<T> {
2645 0 : fn spawn_blocking_child<F>(&mut self, f: F) -> tokio::task::AbortHandle
2646 0 : where
2647 0 : F: FnOnce() -> T + Send + 'static,
2648 0 : T: Send,
2649 : {
2650 0 : let sp = tracing::Span::current();
2651 0 : self.spawn_blocking(move || {
2652 0 : let _e = sp.enter();
2653 0 : f()
2654 0 : })
2655 0 : }
2656 : }
2657 :
2658 : #[cfg(test)]
2659 : mod tests {
2660 : use std::fs::File;
2661 :
2662 : use super::*;
2663 :
2664 : #[test]
2665 1 : fn duplicate_safekeeper_connstring() {
2666 1 : let file = File::open("tests/cluster_spec.json").unwrap();
2667 1 : let spec: ComputeSpec = serde_json::from_reader(file).unwrap();
2668 :
2669 1 : match ParsedSpec::try_from(spec.clone()) {
2670 0 : Ok(_p) => panic!("Failed to detect duplicate entry"),
2671 1 : Err(e) => assert!(e.starts_with("duplicate entry in safekeeper_connstrings:")),
2672 : };
2673 1 : }
2674 : }
|