Line data Source code
1 : use anyhow::{Context, Result};
2 : use chrono::{DateTime, Utc};
3 : use compute_api::privilege::Privilege;
4 : use compute_api::responses::{
5 : ComputeConfig, ComputeCtlConfig, ComputeMetrics, ComputeStatus, LfcOffloadState,
6 : LfcPrewarmState, PromoteState, TlsConfig,
7 : };
8 : use compute_api::spec::{
9 : ComputeAudit, ComputeFeature, ComputeMode, ComputeSpec, ExtVersion, PageserverProtocol, PgIdent,
10 : };
11 : use futures::StreamExt;
12 : use futures::future::join_all;
13 : use futures::stream::FuturesUnordered;
14 : use itertools::Itertools;
15 : use nix::sys::signal::{Signal, kill};
16 : use nix::unistd::Pid;
17 : use once_cell::sync::Lazy;
18 : use pageserver_page_api::{self as page_api, BaseBackupCompression};
19 : use postgres;
20 : use postgres::NoTls;
21 : use postgres::error::SqlState;
22 : use remote_storage::{DownloadError, RemotePath};
23 : use std::collections::{HashMap, HashSet};
24 : use std::os::unix::fs::{PermissionsExt, symlink};
25 : use std::path::Path;
26 : use std::process::{Command, Stdio};
27 : use std::str::FromStr;
28 : use std::sync::atomic::{AtomicU32, AtomicU64, Ordering};
29 : use std::sync::{Arc, Condvar, Mutex, RwLock};
30 : use std::time::{Duration, Instant};
31 : use std::{env, fs};
32 : use tokio::{spawn, sync::watch, task::JoinHandle, time};
33 : use tracing::{Instrument, debug, error, info, instrument, warn};
34 : use url::Url;
35 : use utils::id::{TenantId, TimelineId};
36 : use utils::lsn::Lsn;
37 : use utils::measured_stream::MeasuredReader;
38 : use utils::pid_file;
39 : use utils::shard::{ShardCount, ShardIndex, ShardNumber};
40 :
41 : use crate::configurator::launch_configurator;
42 : use crate::disk_quota::set_disk_quota;
43 : use crate::installed_extensions::get_installed_extensions;
44 : use crate::logger::startup_context_from_env;
45 : use crate::lsn_lease::launch_lsn_lease_bg_task_for_static;
46 : use crate::metrics::COMPUTE_CTL_UP;
47 : use crate::monitor::launch_monitor;
48 : use crate::pg_helpers::*;
49 : use crate::pgbouncer::*;
50 : use crate::rsyslog::{
51 : PostgresLogsRsyslogConfig, configure_audit_rsyslog, configure_postgres_logs_export,
52 : launch_pgaudit_gc,
53 : };
54 : use crate::spec::*;
55 : use crate::swap::resize_swap;
56 : use crate::sync_sk::{check_if_synced, ping_safekeeper};
57 : use crate::tls::watch_cert_for_changes;
58 : use crate::{config, extension_server, local_proxy};
59 :
60 : pub static SYNC_SAFEKEEPERS_PID: AtomicU32 = AtomicU32::new(0);
61 : pub static PG_PID: AtomicU32 = AtomicU32::new(0);
62 : // This is an arbitrary build tag. Fine as a default / for testing purposes
63 : // in-case of not-set environment var
64 : const BUILD_TAG_DEFAULT: &str = "latest";
65 : /// Build tag/version of the compute node binaries/image. It's tricky and ugly
66 : /// to pass it everywhere as a part of `ComputeNodeParams`, so we use a
67 : /// global static variable.
68 0 : pub static BUILD_TAG: Lazy<String> = Lazy::new(|| {
69 0 : option_env!("BUILD_TAG")
70 0 : .unwrap_or(BUILD_TAG_DEFAULT)
71 0 : .to_string()
72 0 : });
73 : const DEFAULT_INSTALLED_EXTENSIONS_COLLECTION_INTERVAL: u64 = 3600;
74 :
75 : /// Static configuration params that don't change after startup. These mostly
76 : /// come from the CLI args, or are derived from them.
77 : #[derive(Clone, Debug)]
78 : pub struct ComputeNodeParams {
79 : /// The ID of the compute
80 : pub compute_id: String,
81 :
82 : /// Url type maintains proper escaping
83 : pub connstr: url::Url,
84 :
85 : /// The name of the 'weak' superuser role, which we give to the users.
86 : /// It follows the allow list approach, i.e., we take a standard role
87 : /// and grant it extra permissions with explicit GRANTs here and there,
88 : /// and core patches.
89 : pub privileged_role_name: String,
90 :
91 : pub resize_swap_on_bind: bool,
92 : pub set_disk_quota_for_fs: Option<String>,
93 :
94 : // VM monitor parameters
95 : #[cfg(target_os = "linux")]
96 : pub filecache_connstr: String,
97 : #[cfg(target_os = "linux")]
98 : pub cgroup: String,
99 : #[cfg(target_os = "linux")]
100 : pub vm_monitor_addr: String,
101 :
102 : pub pgdata: String,
103 : pub pgbin: String,
104 : pub pgversion: String,
105 :
106 : /// The port that the compute's external HTTP server listens on
107 : pub external_http_port: u16,
108 : /// The port that the compute's internal HTTP server listens on
109 : pub internal_http_port: u16,
110 :
111 : /// the address of extension storage proxy gateway
112 : pub remote_ext_base_url: Option<Url>,
113 :
114 : /// Interval for installed extensions collection
115 : pub installed_extensions_collection_interval: Arc<AtomicU64>,
116 : }
117 :
118 : type TaskHandle = Mutex<Option<JoinHandle<()>>>;
119 :
120 : /// Compute node info shared across several `compute_ctl` threads.
121 : pub struct ComputeNode {
122 : pub params: ComputeNodeParams,
123 :
124 : // We connect to Postgres from many different places, so build configs once
125 : // and reuse them where needed. These are derived from 'params.connstr'
126 : pub conn_conf: postgres::config::Config,
127 : pub tokio_conn_conf: tokio_postgres::config::Config,
128 :
129 : /// Volatile part of the `ComputeNode`, which should be used under `Mutex`.
130 : /// To allow HTTP API server to serving status requests, while configuration
131 : /// is in progress, lock should be held only for short periods of time to do
132 : /// read/write, not the whole configuration process.
133 : pub state: Mutex<ComputeState>,
134 : /// `Condvar` to allow notifying waiters about state changes.
135 : pub state_changed: Condvar,
136 :
137 : // key: ext_archive_name, value: started download time, download_completed?
138 : pub ext_download_progress: RwLock<HashMap<String, (DateTime<Utc>, bool)>>,
139 : pub compute_ctl_config: ComputeCtlConfig,
140 :
141 : /// Handle to the extension stats collection task
142 : extension_stats_task: TaskHandle,
143 : lfc_offload_task: TaskHandle,
144 : }
145 :
146 : // store some metrics about download size that might impact startup time
147 : #[derive(Clone, Debug)]
148 : pub struct RemoteExtensionMetrics {
149 : num_ext_downloaded: u64,
150 : largest_ext_size: u64,
151 : total_ext_download_size: u64,
152 : }
153 :
154 : #[derive(Clone, Debug)]
155 : pub struct ComputeState {
156 : pub start_time: DateTime<Utc>,
157 : pub status: ComputeStatus,
158 : /// Timestamp of the last Postgres activity. It could be `None` if
159 : /// compute wasn't used since start.
160 : pub last_active: Option<DateTime<Utc>>,
161 : pub error: Option<String>,
162 :
163 : /// Compute spec. This can be received from the CLI or - more likely -
164 : /// passed by the control plane with a /configure HTTP request.
165 : pub pspec: Option<ParsedSpec>,
166 :
167 : /// If the spec is passed by a /configure request, 'startup_span' is the
168 : /// /configure request's tracing span. The main thread enters it when it
169 : /// processes the compute startup, so that the compute startup is considered
170 : /// to be part of the /configure request for tracing purposes.
171 : ///
172 : /// If the request handling thread/task called startup_compute() directly,
173 : /// it would automatically be a child of the request handling span, and we
174 : /// wouldn't need this. But because we use the main thread to perform the
175 : /// startup, and the /configure task just waits for it to finish, we need to
176 : /// set up the span relationship ourselves.
177 : pub startup_span: Option<tracing::span::Span>,
178 :
179 : pub lfc_prewarm_state: LfcPrewarmState,
180 : pub lfc_offload_state: LfcOffloadState,
181 :
182 : /// WAL flush LSN that is set after terminating Postgres and syncing safekeepers if
183 : /// mode == ComputeMode::Primary. None otherwise
184 : pub terminate_flush_lsn: Option<Lsn>,
185 : pub promote_state: Option<watch::Receiver<PromoteState>>,
186 :
187 : pub metrics: ComputeMetrics,
188 : }
189 :
190 : impl ComputeState {
191 0 : pub fn new() -> Self {
192 0 : Self {
193 0 : start_time: Utc::now(),
194 0 : status: ComputeStatus::Empty,
195 0 : last_active: None,
196 0 : error: None,
197 0 : pspec: None,
198 0 : startup_span: None,
199 0 : metrics: ComputeMetrics::default(),
200 0 : lfc_prewarm_state: LfcPrewarmState::default(),
201 0 : lfc_offload_state: LfcOffloadState::default(),
202 0 : terminate_flush_lsn: None,
203 0 : promote_state: None,
204 0 : }
205 0 : }
206 :
207 0 : pub fn set_status(&mut self, status: ComputeStatus, state_changed: &Condvar) {
208 0 : let prev = self.status;
209 0 : info!("Changing compute status from {} to {}", prev, status);
210 0 : self.status = status;
211 0 : state_changed.notify_all();
212 :
213 0 : COMPUTE_CTL_UP.reset();
214 0 : COMPUTE_CTL_UP
215 0 : .with_label_values(&[&BUILD_TAG, status.to_string().as_str()])
216 0 : .set(1);
217 0 : }
218 :
219 0 : pub fn set_failed_status(&mut self, err: anyhow::Error, state_changed: &Condvar) {
220 0 : self.error = Some(format!("{err:?}"));
221 0 : self.set_status(ComputeStatus::Failed, state_changed);
222 0 : }
223 : }
224 :
225 : impl Default for ComputeState {
226 0 : fn default() -> Self {
227 0 : Self::new()
228 0 : }
229 : }
230 :
231 : #[derive(Clone, Debug)]
232 : pub struct ParsedSpec {
233 : pub spec: ComputeSpec,
234 : pub tenant_id: TenantId,
235 : pub timeline_id: TimelineId,
236 : pub pageserver_connstr: String,
237 : pub safekeeper_connstrings: Vec<String>,
238 : pub storage_auth_token: Option<String>,
239 : /// k8s dns name and port
240 : pub endpoint_storage_addr: Option<String>,
241 : pub endpoint_storage_token: Option<String>,
242 : }
243 :
244 : impl ParsedSpec {
245 1 : pub fn validate(&self) -> Result<(), String> {
246 : // Only Primary nodes are using safekeeper_connstrings, and at the moment
247 : // this method only validates that part of the specs.
248 1 : if self.spec.mode != ComputeMode::Primary {
249 0 : return Ok(());
250 1 : }
251 :
252 : // While it seems like a good idea to check for an odd number of entries in
253 : // the safekeepers connection string, changes to the list of safekeepers might
254 : // incur appending a new server to a list of 3, in which case a list of 4
255 : // entries is okay in production.
256 : //
257 : // Still we want unique entries, and at least one entry in the vector
258 1 : if self.safekeeper_connstrings.is_empty() {
259 0 : return Err(String::from("safekeeper_connstrings is empty"));
260 1 : }
261 :
262 : // check for uniqueness of the connection strings in the set
263 1 : let mut connstrings = self.safekeeper_connstrings.clone();
264 :
265 1 : connstrings.sort();
266 1 : let mut previous = &connstrings[0];
267 :
268 2 : for current in connstrings.iter().skip(1) {
269 : // duplicate entry?
270 2 : if current == previous {
271 1 : return Err(format!(
272 1 : "duplicate entry in safekeeper_connstrings: {current}!",
273 1 : ));
274 1 : }
275 :
276 1 : previous = current;
277 : }
278 :
279 0 : Ok(())
280 1 : }
281 : }
282 :
283 : impl TryFrom<ComputeSpec> for ParsedSpec {
284 : type Error = String;
285 1 : fn try_from(spec: ComputeSpec) -> Result<Self, String> {
286 : // Extract the options from the spec file that are needed to connect to
287 : // the storage system.
288 : //
289 : // For backwards-compatibility, the top-level fields in the spec file
290 : // may be empty. In that case, we need to dig them from the GUCs in the
291 : // cluster.settings field.
292 1 : let pageserver_connstr = spec
293 1 : .pageserver_connstring
294 1 : .clone()
295 1 : .or_else(|| spec.cluster.settings.find("neon.pageserver_connstring"))
296 1 : .ok_or("pageserver connstr should be provided")?;
297 1 : let safekeeper_connstrings = if spec.safekeeper_connstrings.is_empty() {
298 1 : if matches!(spec.mode, ComputeMode::Primary) {
299 1 : spec.cluster
300 1 : .settings
301 1 : .find("neon.safekeepers")
302 1 : .ok_or("safekeeper connstrings should be provided")?
303 1 : .split(',')
304 4 : .map(|str| str.to_string())
305 1 : .collect()
306 : } else {
307 0 : vec![]
308 : }
309 : } else {
310 0 : spec.safekeeper_connstrings.clone()
311 : };
312 :
313 1 : let storage_auth_token = spec.storage_auth_token.clone();
314 1 : let tenant_id: TenantId = if let Some(tenant_id) = spec.tenant_id {
315 0 : tenant_id
316 : } else {
317 1 : spec.cluster
318 1 : .settings
319 1 : .find("neon.tenant_id")
320 1 : .ok_or("tenant id should be provided")
321 1 : .map(|s| TenantId::from_str(&s))?
322 1 : .or(Err("invalid tenant id"))?
323 : };
324 1 : let timeline_id: TimelineId = if let Some(timeline_id) = spec.timeline_id {
325 0 : timeline_id
326 : } else {
327 1 : spec.cluster
328 1 : .settings
329 1 : .find("neon.timeline_id")
330 1 : .ok_or("timeline id should be provided")
331 1 : .map(|s| TimelineId::from_str(&s))?
332 1 : .or(Err("invalid timeline id"))?
333 : };
334 :
335 1 : let endpoint_storage_addr: Option<String> = spec
336 1 : .endpoint_storage_addr
337 1 : .clone()
338 1 : .or_else(|| spec.cluster.settings.find("neon.endpoint_storage_addr"));
339 1 : let endpoint_storage_token = spec
340 1 : .endpoint_storage_token
341 1 : .clone()
342 1 : .or_else(|| spec.cluster.settings.find("neon.endpoint_storage_token"));
343 :
344 1 : let res = ParsedSpec {
345 1 : spec,
346 1 : pageserver_connstr,
347 1 : safekeeper_connstrings,
348 1 : storage_auth_token,
349 1 : tenant_id,
350 1 : timeline_id,
351 1 : endpoint_storage_addr,
352 1 : endpoint_storage_token,
353 1 : };
354 :
355 : // Now check validity of the parsed specification
356 1 : res.validate()?;
357 0 : Ok(res)
358 1 : }
359 : }
360 :
361 : /// If we are a VM, returns a [`Command`] that will run in the `neon-postgres`
362 : /// cgroup. Otherwise returns the default `Command::new(cmd)`
363 : ///
364 : /// This function should be used to start postgres, as it will start it in the
365 : /// neon-postgres cgroup if we are a VM. This allows autoscaling to control
366 : /// postgres' resource usage. The cgroup will exist in VMs because vm-builder
367 : /// creates it during the sysinit phase of its inittab.
368 0 : fn maybe_cgexec(cmd: &str) -> Command {
369 : // The cplane sets this env var for autoscaling computes.
370 : // use `var_os` so we don't have to worry about the variable being valid
371 : // unicode. Should never be an concern . . . but just in case
372 0 : if env::var_os("AUTOSCALING").is_some() {
373 0 : let mut command = Command::new("cgexec");
374 0 : command.args(["-g", "memory:neon-postgres"]);
375 0 : command.arg(cmd);
376 0 : command
377 : } else {
378 0 : Command::new(cmd)
379 : }
380 0 : }
381 :
382 : struct PostgresHandle {
383 : postgres: std::process::Child,
384 : log_collector: JoinHandle<Result<()>>,
385 : }
386 :
387 : impl PostgresHandle {
388 : /// Return PID of the postgres (postmaster) process
389 0 : fn pid(&self) -> Pid {
390 0 : Pid::from_raw(self.postgres.id() as i32)
391 0 : }
392 : }
393 :
394 : struct StartVmMonitorResult {
395 : #[cfg(target_os = "linux")]
396 : token: tokio_util::sync::CancellationToken,
397 : #[cfg(target_os = "linux")]
398 : vm_monitor: Option<JoinHandle<Result<()>>>,
399 : }
400 :
401 : impl ComputeNode {
402 0 : pub fn new(params: ComputeNodeParams, config: ComputeConfig) -> Result<Self> {
403 0 : let connstr = params.connstr.as_str();
404 0 : let mut conn_conf = postgres::config::Config::from_str(connstr)
405 0 : .context("cannot build postgres config from connstr")?;
406 0 : let mut tokio_conn_conf = tokio_postgres::config::Config::from_str(connstr)
407 0 : .context("cannot build tokio postgres config from connstr")?;
408 :
409 : // Users can set some configuration parameters per database with
410 : // ALTER DATABASE ... SET ...
411 : //
412 : // There are at least these parameters:
413 : //
414 : // - role=some_other_role
415 : // - default_transaction_read_only=on
416 : // - statement_timeout=1, i.e., 1ms, which will cause most of the queries to fail
417 : // - search_path=non_public_schema, this should be actually safe because
418 : // we don't call any functions in user databases, but better to always reset
419 : // it to public.
420 : //
421 : // that can affect `compute_ctl` and prevent it from properly configuring the database schema.
422 : // Unset them via connection string options before connecting to the database.
423 : // N.B. keep it in sync with `ZENITH_OPTIONS` in `get_maintenance_client()`.
424 : const EXTRA_OPTIONS: &str = "-c role=cloud_admin -c default_transaction_read_only=off -c search_path=public -c statement_timeout=0 -c pgaudit.log=none";
425 0 : let options = match conn_conf.get_options() {
426 : // Allow the control plane to override any options set by the
427 : // compute
428 0 : Some(options) => format!("{EXTRA_OPTIONS} {options}"),
429 0 : None => EXTRA_OPTIONS.to_string(),
430 : };
431 0 : conn_conf.options(&options);
432 0 : tokio_conn_conf.options(&options);
433 :
434 0 : let mut new_state = ComputeState::new();
435 0 : if let Some(spec) = config.spec {
436 0 : let pspec = ParsedSpec::try_from(spec).map_err(|msg| anyhow::anyhow!(msg))?;
437 0 : new_state.pspec = Some(pspec);
438 0 : }
439 :
440 0 : Ok(ComputeNode {
441 0 : params,
442 0 : conn_conf,
443 0 : tokio_conn_conf,
444 0 : state: Mutex::new(new_state),
445 0 : state_changed: Condvar::new(),
446 0 : ext_download_progress: RwLock::new(HashMap::new()),
447 0 : compute_ctl_config: config.compute_ctl_config,
448 0 : extension_stats_task: Mutex::new(None),
449 0 : lfc_offload_task: Mutex::new(None),
450 0 : })
451 0 : }
452 :
453 : /// Top-level control flow of compute_ctl. Returns a process exit code we should
454 : /// exit with.
455 0 : pub fn run(self) -> Result<Option<i32>> {
456 0 : let this = Arc::new(self);
457 :
458 0 : let cli_spec = this.state.lock().unwrap().pspec.clone();
459 :
460 : // If this is a pooled VM, prewarm before starting HTTP server and becoming
461 : // available for binding. Prewarming helps Postgres start quicker later,
462 : // because QEMU will already have its memory allocated from the host, and
463 : // the necessary binaries will already be cached.
464 0 : if cli_spec.is_none() {
465 0 : this.prewarm_postgres_vm_memory()?;
466 0 : }
467 :
468 : // Set the up metric with Empty status before starting the HTTP server.
469 : // That way on the first metric scrape, an external observer will see us
470 : // as 'up' and 'empty' (unless the compute was started with a spec or
471 : // already configured by control plane).
472 0 : COMPUTE_CTL_UP
473 0 : .with_label_values(&[&BUILD_TAG, ComputeStatus::Empty.to_string().as_str()])
474 0 : .set(1);
475 :
476 : // Launch the external HTTP server first, so that we can serve control plane
477 : // requests while configuration is still in progress.
478 0 : crate::http::server::Server::External {
479 0 : port: this.params.external_http_port,
480 0 : config: this.compute_ctl_config.clone(),
481 0 : compute_id: this.params.compute_id.clone(),
482 0 : }
483 0 : .launch(&this);
484 :
485 : // The internal HTTP server could be launched later, but there isn't much
486 : // sense in waiting.
487 0 : crate::http::server::Server::Internal {
488 0 : port: this.params.internal_http_port,
489 0 : }
490 0 : .launch(&this);
491 :
492 : // If we got a spec from the CLI already, use that. Otherwise wait for the
493 : // control plane to pass it to us with a /configure HTTP request
494 0 : let pspec = if let Some(cli_spec) = cli_spec {
495 0 : cli_spec
496 : } else {
497 0 : this.wait_spec()?
498 : };
499 :
500 0 : launch_lsn_lease_bg_task_for_static(&this);
501 :
502 : // We have a spec, start the compute
503 0 : let mut delay_exit = false;
504 0 : let mut vm_monitor = None;
505 0 : let mut pg_process: Option<PostgresHandle> = None;
506 :
507 0 : match this.start_compute(&mut pg_process) {
508 0 : Ok(()) => {
509 0 : // Success! Launch remaining services (just vm-monitor currently)
510 0 : vm_monitor =
511 0 : Some(this.start_vm_monitor(pspec.spec.disable_lfc_resizing.unwrap_or(false)));
512 0 : }
513 0 : Err(err) => {
514 : // Something went wrong with the startup. Log it and expose the error to
515 : // HTTP status requests.
516 0 : error!("could not start the compute node: {:#}", err);
517 0 : this.set_failed_status(err);
518 0 : delay_exit = true;
519 :
520 : // If the error happened after starting PostgreSQL, kill it
521 0 : if let Some(ref pg_process) = pg_process {
522 0 : kill(pg_process.pid(), Signal::SIGQUIT).ok();
523 0 : }
524 : }
525 : }
526 :
527 : // If startup was successful, or it failed in the late stages,
528 : // PostgreSQL is now running. Wait until it exits.
529 0 : let exit_code = if let Some(pg_handle) = pg_process {
530 0 : let exit_status = this.wait_postgres(pg_handle);
531 0 : info!("Postgres exited with code {}, shutting down", exit_status);
532 0 : exit_status.code()
533 : } else {
534 0 : None
535 : };
536 :
537 0 : this.terminate_extension_stats_task();
538 0 : this.terminate_lfc_offload_task();
539 :
540 : // Terminate the vm_monitor so it releases the file watcher on
541 : // /sys/fs/cgroup/neon-postgres.
542 : // Note: the vm-monitor only runs on linux because it requires cgroups.
543 0 : if let Some(vm_monitor) = vm_monitor {
544 : cfg_if::cfg_if! {
545 : if #[cfg(target_os = "linux")] {
546 : // Kills all threads spawned by the monitor
547 0 : vm_monitor.token.cancel();
548 0 : if let Some(handle) = vm_monitor.vm_monitor {
549 0 : // Kills the actual task running the monitor
550 0 : handle.abort();
551 0 : }
552 : } else {
553 : _ = vm_monitor; // appease unused lint on macOS
554 : }
555 : }
556 0 : }
557 :
558 : // Reap the postgres process
559 0 : delay_exit |= this.cleanup_after_postgres_exit()?;
560 :
561 : // /terminate returns LSN. If we don't sleep at all, connection will break and we
562 : // won't get result. If we sleep too much, tests will take significantly longer
563 : // and Github Action run will error out
564 0 : let sleep_duration = if delay_exit {
565 0 : Duration::from_secs(30)
566 : } else {
567 0 : Duration::from_millis(300)
568 : };
569 :
570 : // If launch failed, keep serving HTTP requests for a while, so the cloud
571 : // control plane can get the actual error.
572 0 : if delay_exit {
573 0 : info!("giving control plane 30s to collect the error before shutdown");
574 0 : }
575 0 : std::thread::sleep(sleep_duration);
576 0 : Ok(exit_code)
577 0 : }
578 :
579 0 : pub fn wait_spec(&self) -> Result<ParsedSpec> {
580 0 : info!("no compute spec provided, waiting");
581 0 : let mut state = self.state.lock().unwrap();
582 0 : while state.status != ComputeStatus::ConfigurationPending {
583 0 : state = self.state_changed.wait(state).unwrap();
584 0 : }
585 :
586 0 : info!("got spec, continue configuration");
587 0 : let spec = state.pspec.as_ref().unwrap().clone();
588 :
589 : // Record for how long we slept waiting for the spec.
590 0 : let now = Utc::now();
591 0 : state.metrics.wait_for_spec_ms = now
592 0 : .signed_duration_since(state.start_time)
593 0 : .to_std()
594 0 : .unwrap()
595 0 : .as_millis() as u64;
596 :
597 : // Reset start time, so that the total startup time that is calculated later will
598 : // not include the time that we waited for the spec.
599 0 : state.start_time = now;
600 :
601 0 : Ok(spec)
602 0 : }
603 :
604 : /// Start compute.
605 : ///
606 : /// Prerequisites:
607 : /// - the compute spec has been placed in self.state.pspec
608 : ///
609 : /// On success:
610 : /// - status is set to ComputeStatus::Running
611 : /// - self.running_postgres is set
612 : ///
613 : /// On error:
614 : /// - status is left in ComputeStatus::Init. The caller is responsible for setting it to Failed
615 : /// - if Postgres was started before the fatal error happened, self.running_postgres is
616 : /// set. The caller is responsible for killing it.
617 : ///
618 : /// Note that this is in the critical path of a compute cold start. Keep this fast.
619 : /// Try to do things concurrently, to hide the latencies.
620 0 : fn start_compute(self: &Arc<Self>, pg_handle: &mut Option<PostgresHandle>) -> Result<()> {
621 : let compute_state: ComputeState;
622 :
623 : let start_compute_span;
624 : let _this_entered;
625 : {
626 0 : let mut state_guard = self.state.lock().unwrap();
627 :
628 : // Create a tracing span for the startup operation.
629 : //
630 : // We could otherwise just annotate the function with #[instrument], but if
631 : // we're being configured from a /configure HTTP request, we want the
632 : // startup to be considered part of the /configure request.
633 : //
634 : // Similarly, if a trace ID was passed in env variables, attach it to the span.
635 0 : start_compute_span = {
636 : // Temporarily enter the parent span, so that the new span becomes its child.
637 0 : if let Some(p) = state_guard.startup_span.take() {
638 0 : let _parent_entered = p.entered();
639 0 : tracing::info_span!("start_compute")
640 0 : } else if let Some(otel_context) = startup_context_from_env() {
641 : use tracing_opentelemetry::OpenTelemetrySpanExt;
642 0 : let span = tracing::info_span!("start_compute");
643 0 : span.set_parent(otel_context);
644 0 : span
645 : } else {
646 0 : tracing::info_span!("start_compute")
647 : }
648 : };
649 0 : _this_entered = start_compute_span.enter();
650 :
651 0 : state_guard.set_status(ComputeStatus::Init, &self.state_changed);
652 0 : compute_state = state_guard.clone()
653 : }
654 :
655 0 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
656 0 : info!(
657 0 : "starting compute for project {}, operation {}, tenant {}, timeline {}, project {}, branch {}, endpoint {}, features {:?}, spec.remote_extensions {:?}",
658 0 : pspec.spec.cluster.cluster_id.as_deref().unwrap_or("None"),
659 0 : pspec.spec.operation_uuid.as_deref().unwrap_or("None"),
660 : pspec.tenant_id,
661 : pspec.timeline_id,
662 0 : pspec.spec.project_id.as_deref().unwrap_or("None"),
663 0 : pspec.spec.branch_id.as_deref().unwrap_or("None"),
664 0 : pspec.spec.endpoint_id.as_deref().unwrap_or("None"),
665 : pspec.spec.features,
666 : pspec.spec.remote_extensions,
667 : );
668 :
669 : ////// PRE-STARTUP PHASE: things that need to be finished before we start the Postgres process
670 :
671 : // Collect all the tasks that must finish here
672 0 : let mut pre_tasks = tokio::task::JoinSet::new();
673 :
674 : // Make sure TLS certificates are properly loaded and in the right place.
675 0 : if self.compute_ctl_config.tls.is_some() {
676 0 : let this = self.clone();
677 0 : pre_tasks.spawn(async move {
678 0 : this.watch_cert_for_changes().await;
679 :
680 0 : Ok::<(), anyhow::Error>(())
681 0 : });
682 0 : }
683 :
684 0 : let tls_config = self.tls_config(&pspec.spec);
685 :
686 : // If there are any remote extensions in shared_preload_libraries, start downloading them
687 0 : if pspec.spec.remote_extensions.is_some() {
688 0 : let (this, spec) = (self.clone(), pspec.spec.clone());
689 0 : pre_tasks.spawn(async move {
690 0 : this.download_preload_extensions(&spec)
691 0 : .in_current_span()
692 0 : .await
693 0 : });
694 0 : }
695 :
696 : // Prepare pgdata directory. This downloads the basebackup, among other things.
697 : {
698 0 : let (this, cs) = (self.clone(), compute_state.clone());
699 0 : pre_tasks.spawn_blocking_child(move || this.prepare_pgdata(&cs));
700 : }
701 :
702 : // Resize swap to the desired size if the compute spec says so
703 0 : if let (Some(size_bytes), true) =
704 0 : (pspec.spec.swap_size_bytes, self.params.resize_swap_on_bind)
705 : {
706 0 : pre_tasks.spawn_blocking_child(move || {
707 : // To avoid 'swapoff' hitting postgres startup, we need to run resize-swap to completion
708 : // *before* starting postgres.
709 : //
710 : // In theory, we could do this asynchronously if SkipSwapon was enabled for VMs, but this
711 : // carries a risk of introducing hard-to-debug issues - e.g. if postgres sometimes gets
712 : // OOM-killed during startup because swap wasn't available yet.
713 0 : resize_swap(size_bytes).context("failed to resize swap")?;
714 0 : let size_mib = size_bytes as f32 / (1 << 20) as f32; // just for more coherent display.
715 0 : info!(%size_bytes, %size_mib, "resized swap");
716 :
717 0 : Ok::<(), anyhow::Error>(())
718 0 : });
719 0 : }
720 :
721 : // Set disk quota if the compute spec says so
722 0 : if let (Some(disk_quota_bytes), Some(disk_quota_fs_mountpoint)) = (
723 0 : pspec.spec.disk_quota_bytes,
724 0 : self.params.set_disk_quota_for_fs.as_ref(),
725 : ) {
726 0 : let disk_quota_fs_mountpoint = disk_quota_fs_mountpoint.clone();
727 0 : pre_tasks.spawn_blocking_child(move || {
728 0 : set_disk_quota(disk_quota_bytes, &disk_quota_fs_mountpoint)
729 0 : .context("failed to set disk quota")?;
730 0 : let size_mib = disk_quota_bytes as f32 / (1 << 20) as f32; // just for more coherent display.
731 0 : info!(%disk_quota_bytes, %size_mib, "set disk quota");
732 :
733 0 : Ok::<(), anyhow::Error>(())
734 0 : });
735 0 : }
736 :
737 : // tune pgbouncer
738 0 : if let Some(pgbouncer_settings) = &pspec.spec.pgbouncer_settings {
739 0 : info!("tuning pgbouncer");
740 :
741 0 : let pgbouncer_settings = pgbouncer_settings.clone();
742 0 : let tls_config = tls_config.clone();
743 :
744 : // Spawn a background task to do the tuning,
745 : // so that we don't block the main thread that starts Postgres.
746 0 : let _handle = tokio::spawn(async move {
747 0 : let res = tune_pgbouncer(pgbouncer_settings, tls_config).await;
748 0 : if let Err(err) = res {
749 0 : error!("error while tuning pgbouncer: {err:?}");
750 : // Continue with the startup anyway
751 0 : }
752 0 : });
753 0 : }
754 :
755 : // configure local_proxy
756 0 : if let Some(local_proxy) = &pspec.spec.local_proxy_config {
757 0 : info!("configuring local_proxy");
758 :
759 : // Spawn a background task to do the configuration,
760 : // so that we don't block the main thread that starts Postgres.
761 :
762 0 : let mut local_proxy = local_proxy.clone();
763 0 : local_proxy.tls = tls_config.clone();
764 :
765 0 : let _handle = tokio::spawn(async move {
766 0 : if let Err(err) = local_proxy::configure(&local_proxy) {
767 0 : error!("error while configuring local_proxy: {err:?}");
768 : // Continue with the startup anyway
769 0 : }
770 0 : });
771 0 : }
772 :
773 : // Configure and start rsyslog for compliance audit logging
774 0 : match pspec.spec.audit_log_level {
775 : ComputeAudit::Hipaa | ComputeAudit::Extended | ComputeAudit::Full => {
776 0 : let remote_tls_endpoint =
777 0 : std::env::var("AUDIT_LOGGING_TLS_ENDPOINT").unwrap_or("".to_string());
778 0 : let remote_plain_endpoint =
779 0 : std::env::var("AUDIT_LOGGING_ENDPOINT").unwrap_or("".to_string());
780 :
781 0 : if remote_plain_endpoint.is_empty() && remote_tls_endpoint.is_empty() {
782 0 : anyhow::bail!(
783 0 : "AUDIT_LOGGING_ENDPOINT and AUDIT_LOGGING_TLS_ENDPOINT are both empty"
784 : );
785 0 : }
786 :
787 0 : let log_directory_path = Path::new(&self.params.pgdata).join("log");
788 0 : let log_directory_path = log_directory_path.to_string_lossy().to_string();
789 :
790 : // Add project_id,endpoint_id to identify the logs.
791 : //
792 : // These ids are passed from cplane,
793 0 : let endpoint_id = pspec.spec.endpoint_id.as_deref().unwrap_or("");
794 0 : let project_id = pspec.spec.project_id.as_deref().unwrap_or("");
795 :
796 0 : configure_audit_rsyslog(
797 0 : log_directory_path.clone(),
798 0 : endpoint_id,
799 0 : project_id,
800 0 : &remote_plain_endpoint,
801 0 : &remote_tls_endpoint,
802 0 : )?;
803 :
804 : // Launch a background task to clean up the audit logs
805 0 : launch_pgaudit_gc(log_directory_path);
806 : }
807 0 : _ => {}
808 : }
809 :
810 : // Configure and start rsyslog for Postgres logs export
811 0 : let conf = PostgresLogsRsyslogConfig::new(pspec.spec.logs_export_host.as_deref());
812 0 : configure_postgres_logs_export(conf)?;
813 :
814 : // Launch remaining service threads
815 0 : let _monitor_handle = launch_monitor(self);
816 0 : let _configurator_handle = launch_configurator(self);
817 :
818 : // Wait for all the pre-tasks to finish before starting postgres
819 0 : let rt = tokio::runtime::Handle::current();
820 0 : while let Some(res) = rt.block_on(pre_tasks.join_next()) {
821 0 : res??;
822 : }
823 :
824 : ////// START POSTGRES
825 0 : let start_time = Utc::now();
826 0 : let pg_process = self.start_postgres(pspec.storage_auth_token.clone())?;
827 0 : let postmaster_pid = pg_process.pid();
828 0 : *pg_handle = Some(pg_process);
829 :
830 : // If this is a primary endpoint, perform some post-startup configuration before
831 : // opening it up for the world.
832 0 : let config_time = Utc::now();
833 0 : if pspec.spec.mode == ComputeMode::Primary {
834 0 : self.configure_as_primary(&compute_state)?;
835 :
836 0 : let conf = self.get_tokio_conn_conf(None);
837 0 : tokio::task::spawn(async {
838 0 : let _ = installed_extensions(conf).await;
839 0 : });
840 0 : }
841 :
842 : // All done!
843 0 : let startup_end_time = Utc::now();
844 0 : let metrics = {
845 0 : let mut state = self.state.lock().unwrap();
846 0 : state.metrics.start_postgres_ms = config_time
847 0 : .signed_duration_since(start_time)
848 0 : .to_std()
849 0 : .unwrap()
850 0 : .as_millis() as u64;
851 0 : state.metrics.config_ms = startup_end_time
852 0 : .signed_duration_since(config_time)
853 0 : .to_std()
854 0 : .unwrap()
855 0 : .as_millis() as u64;
856 0 : state.metrics.total_startup_ms = startup_end_time
857 0 : .signed_duration_since(compute_state.start_time)
858 0 : .to_std()
859 0 : .unwrap()
860 0 : .as_millis() as u64;
861 0 : state.metrics.clone()
862 : };
863 0 : self.set_status(ComputeStatus::Running);
864 :
865 : // Log metrics so that we can search for slow operations in logs
866 0 : info!(?metrics, postmaster_pid = %postmaster_pid, "compute start finished");
867 :
868 0 : self.spawn_extension_stats_task();
869 :
870 0 : if pspec.spec.autoprewarm {
871 0 : info!("autoprewarming on startup as requested");
872 0 : self.prewarm_lfc(None);
873 0 : }
874 0 : if let Some(seconds) = pspec.spec.offload_lfc_interval_seconds {
875 0 : self.spawn_lfc_offload_task(Duration::from_secs(seconds.into()));
876 0 : };
877 0 : Ok(())
878 0 : }
879 :
880 : #[instrument(skip_all)]
881 : async fn download_preload_extensions(&self, spec: &ComputeSpec) -> Result<()> {
882 : let remote_extensions = if let Some(remote_extensions) = &spec.remote_extensions {
883 : remote_extensions
884 : } else {
885 : return Ok(());
886 : };
887 :
888 : // First, create control files for all available extensions
889 : extension_server::create_control_files(remote_extensions, &self.params.pgbin);
890 :
891 : let library_load_start_time = Utc::now();
892 : let remote_ext_metrics = self.prepare_preload_libraries(spec).await?;
893 :
894 : let library_load_time = Utc::now()
895 : .signed_duration_since(library_load_start_time)
896 : .to_std()
897 : .unwrap()
898 : .as_millis() as u64;
899 : let mut state = self.state.lock().unwrap();
900 : state.metrics.load_ext_ms = library_load_time;
901 : state.metrics.num_ext_downloaded = remote_ext_metrics.num_ext_downloaded;
902 : state.metrics.largest_ext_size = remote_ext_metrics.largest_ext_size;
903 : state.metrics.total_ext_download_size = remote_ext_metrics.total_ext_download_size;
904 : info!(
905 : "Loading shared_preload_libraries took {:?}ms",
906 : library_load_time
907 : );
908 : info!("{:?}", remote_ext_metrics);
909 :
910 : Ok(())
911 : }
912 :
913 : /// Start the vm-monitor if directed to. The vm-monitor only runs on linux
914 : /// because it requires cgroups.
915 0 : fn start_vm_monitor(&self, disable_lfc_resizing: bool) -> StartVmMonitorResult {
916 : cfg_if::cfg_if! {
917 : if #[cfg(target_os = "linux")] {
918 : use std::env;
919 : use tokio_util::sync::CancellationToken;
920 :
921 : // This token is used internally by the monitor to clean up all threads
922 0 : let token = CancellationToken::new();
923 :
924 : // don't pass postgres connection string to vm-monitor if we don't want it to resize LFC
925 0 : let pgconnstr = if disable_lfc_resizing {
926 0 : None
927 : } else {
928 0 : Some(self.params.filecache_connstr.clone())
929 : };
930 :
931 0 : let vm_monitor = if env::var_os("AUTOSCALING").is_some() {
932 0 : let vm_monitor = tokio::spawn(vm_monitor::start(
933 0 : Box::leak(Box::new(vm_monitor::Args {
934 0 : cgroup: Some(self.params.cgroup.clone()),
935 0 : pgconnstr,
936 0 : addr: self.params.vm_monitor_addr.clone(),
937 0 : })),
938 0 : token.clone(),
939 : ));
940 0 : Some(vm_monitor)
941 : } else {
942 0 : None
943 : };
944 0 : StartVmMonitorResult { token, vm_monitor }
945 : } else {
946 : _ = disable_lfc_resizing; // appease unused lint on macOS
947 : StartVmMonitorResult { }
948 : }
949 : }
950 0 : }
951 :
952 0 : fn cleanup_after_postgres_exit(&self) -> Result<bool> {
953 : // Maybe sync safekeepers again, to speed up next startup
954 0 : let compute_state = self.state.lock().unwrap().clone();
955 0 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
956 0 : let lsn = if matches!(pspec.spec.mode, compute_api::spec::ComputeMode::Primary) {
957 0 : info!("syncing safekeepers on shutdown");
958 0 : let storage_auth_token = pspec.storage_auth_token.clone();
959 0 : let lsn = self.sync_safekeepers(storage_auth_token)?;
960 0 : info!(%lsn, "synced safekeepers");
961 0 : Some(lsn)
962 : } else {
963 0 : info!("not primary, not syncing safekeepers");
964 0 : None
965 : };
966 :
967 0 : let mut state = self.state.lock().unwrap();
968 0 : state.terminate_flush_lsn = lsn;
969 :
970 0 : let delay_exit = state.status == ComputeStatus::TerminationPendingFast;
971 0 : if state.status == ComputeStatus::TerminationPendingFast
972 0 : || state.status == ComputeStatus::TerminationPendingImmediate
973 : {
974 0 : info!(
975 0 : "Changing compute status from {} to {}",
976 0 : state.status,
977 : ComputeStatus::Terminated
978 : );
979 0 : state.status = ComputeStatus::Terminated;
980 0 : self.state_changed.notify_all();
981 0 : }
982 0 : drop(state);
983 :
984 0 : if let Err(err) = self.check_for_core_dumps() {
985 0 : error!("error while checking for core dumps: {err:?}");
986 0 : }
987 :
988 0 : Ok(delay_exit)
989 0 : }
990 :
991 : /// Check that compute node has corresponding feature enabled.
992 0 : pub fn has_feature(&self, feature: ComputeFeature) -> bool {
993 0 : let state = self.state.lock().unwrap();
994 :
995 0 : if let Some(s) = state.pspec.as_ref() {
996 0 : s.spec.features.contains(&feature)
997 : } else {
998 0 : false
999 : }
1000 0 : }
1001 :
1002 0 : pub fn set_status(&self, status: ComputeStatus) {
1003 0 : let mut state = self.state.lock().unwrap();
1004 0 : state.set_status(status, &self.state_changed);
1005 0 : }
1006 :
1007 0 : pub fn set_failed_status(&self, err: anyhow::Error) {
1008 0 : let mut state = self.state.lock().unwrap();
1009 0 : state.set_failed_status(err, &self.state_changed);
1010 0 : }
1011 :
1012 0 : pub fn get_status(&self) -> ComputeStatus {
1013 0 : self.state.lock().unwrap().status
1014 0 : }
1015 :
1016 0 : pub fn get_timeline_id(&self) -> Option<TimelineId> {
1017 0 : self.state
1018 0 : .lock()
1019 0 : .unwrap()
1020 0 : .pspec
1021 0 : .as_ref()
1022 0 : .map(|s| s.timeline_id)
1023 0 : }
1024 :
1025 : // Remove `pgdata` directory and create it again with right permissions.
1026 0 : fn create_pgdata(&self) -> Result<()> {
1027 : // Ignore removal error, likely it is a 'No such file or directory (os error 2)'.
1028 : // If it is something different then create_dir() will error out anyway.
1029 0 : let pgdata = &self.params.pgdata;
1030 0 : let _ok = fs::remove_dir_all(pgdata);
1031 0 : fs::create_dir(pgdata)?;
1032 0 : fs::set_permissions(pgdata, fs::Permissions::from_mode(0o700))?;
1033 :
1034 0 : Ok(())
1035 0 : }
1036 :
1037 : /// Fetches a basebackup from the Pageserver using the compute state's Pageserver connstring and
1038 : /// unarchives it to `pgdata` directory, replacing any existing contents.
1039 : #[instrument(skip_all, fields(%lsn))]
1040 : fn try_get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> {
1041 : let spec = compute_state.pspec.as_ref().expect("spec must be set");
1042 :
1043 : let shard0_connstr = spec.pageserver_connstr.split(',').next().unwrap();
1044 : let started = Instant::now();
1045 :
1046 : let (connected, size) = match PageserverProtocol::from_connstring(shard0_connstr)? {
1047 : PageserverProtocol::Libpq => self.try_get_basebackup_libpq(spec, lsn)?,
1048 : PageserverProtocol::Grpc => self.try_get_basebackup_grpc(spec, lsn)?,
1049 : };
1050 :
1051 : self.fix_zenith_signal_neon_signal()?;
1052 :
1053 : let mut state = self.state.lock().unwrap();
1054 : state.metrics.pageserver_connect_micros =
1055 : connected.duration_since(started).as_micros() as u64;
1056 : state.metrics.basebackup_bytes = size as u64;
1057 : state.metrics.basebackup_ms = started.elapsed().as_millis() as u64;
1058 :
1059 : Ok(())
1060 : }
1061 :
1062 : /// Move the Zenith signal file to Neon signal file location.
1063 : /// This makes Compute compatible with older PageServers that don't yet
1064 : /// know about the Zenith->Neon rename.
1065 0 : fn fix_zenith_signal_neon_signal(&self) -> Result<()> {
1066 0 : let datadir = Path::new(&self.params.pgdata);
1067 :
1068 0 : let neonsig = datadir.join("neon.signal");
1069 :
1070 0 : if neonsig.is_file() {
1071 0 : return Ok(());
1072 0 : }
1073 :
1074 0 : let zenithsig = datadir.join("zenith.signal");
1075 :
1076 0 : if zenithsig.is_file() {
1077 0 : fs::copy(zenithsig, neonsig)?;
1078 0 : }
1079 :
1080 0 : Ok(())
1081 0 : }
1082 :
1083 : /// Fetches a basebackup via gRPC. The connstring must use grpc://. Returns the timestamp when
1084 : /// the connection was established, and the (compressed) size of the basebackup.
1085 0 : fn try_get_basebackup_grpc(&self, spec: &ParsedSpec, lsn: Lsn) -> Result<(Instant, usize)> {
1086 0 : let shard0_connstr = spec
1087 0 : .pageserver_connstr
1088 0 : .split(',')
1089 0 : .next()
1090 0 : .unwrap()
1091 0 : .to_string();
1092 0 : let shard_index = match spec.pageserver_connstr.split(',').count() as u8 {
1093 0 : 0 | 1 => ShardIndex::unsharded(),
1094 0 : count => ShardIndex::new(ShardNumber(0), ShardCount(count)),
1095 : };
1096 :
1097 0 : let (reader, connected) = tokio::runtime::Handle::current().block_on(async move {
1098 0 : let mut client = page_api::Client::connect(
1099 0 : shard0_connstr,
1100 0 : spec.tenant_id,
1101 0 : spec.timeline_id,
1102 0 : shard_index,
1103 0 : spec.storage_auth_token.clone(),
1104 0 : None, // NB: base backups use payload compression
1105 0 : )
1106 0 : .await?;
1107 0 : let connected = Instant::now();
1108 0 : let reader = client
1109 0 : .get_base_backup(page_api::GetBaseBackupRequest {
1110 0 : lsn: (lsn != Lsn(0)).then_some(lsn),
1111 0 : compression: BaseBackupCompression::Gzip,
1112 0 : replica: spec.spec.mode != ComputeMode::Primary,
1113 0 : full: false,
1114 0 : })
1115 0 : .await?;
1116 0 : anyhow::Ok((reader, connected))
1117 0 : })?;
1118 :
1119 0 : let mut reader = MeasuredReader::new(tokio_util::io::SyncIoBridge::new(reader));
1120 :
1121 : // Set `ignore_zeros` so that unpack() reads the entire stream and doesn't just stop at the
1122 : // end-of-archive marker. If the server errors, the tar::Builder drop handler will write an
1123 : // end-of-archive marker before the error is emitted, and we would not see the error.
1124 0 : let mut ar = tar::Archive::new(flate2::read::GzDecoder::new(&mut reader));
1125 0 : ar.set_ignore_zeros(true);
1126 0 : ar.unpack(&self.params.pgdata)?;
1127 :
1128 0 : Ok((connected, reader.get_byte_count()))
1129 0 : }
1130 :
1131 : /// Fetches a basebackup via libpq. The connstring must use postgresql://. Returns the timestamp
1132 : /// when the connection was established, and the (compressed) size of the basebackup.
1133 0 : fn try_get_basebackup_libpq(&self, spec: &ParsedSpec, lsn: Lsn) -> Result<(Instant, usize)> {
1134 0 : let shard0_connstr = spec.pageserver_connstr.split(',').next().unwrap();
1135 0 : let mut config = postgres::Config::from_str(shard0_connstr)?;
1136 :
1137 : // Use the storage auth token from the config file, if given.
1138 : // Note: this overrides any password set in the connection string.
1139 0 : if let Some(storage_auth_token) = &spec.storage_auth_token {
1140 0 : info!("Got storage auth token from spec file");
1141 0 : config.password(storage_auth_token);
1142 : } else {
1143 0 : info!("Storage auth token not set");
1144 : }
1145 :
1146 0 : config.application_name("compute_ctl");
1147 0 : config.options(&format!(
1148 0 : "-c neon.compute_mode={}",
1149 0 : spec.spec.mode.to_type_str()
1150 0 : ));
1151 :
1152 : // Connect to pageserver
1153 0 : let mut client = config.connect(NoTls)?;
1154 0 : let connected = Instant::now();
1155 :
1156 0 : let basebackup_cmd = match lsn {
1157 : Lsn(0) => {
1158 0 : if spec.spec.mode != ComputeMode::Primary {
1159 0 : format!(
1160 0 : "basebackup {} {} --gzip --replica",
1161 : spec.tenant_id, spec.timeline_id
1162 : )
1163 : } else {
1164 0 : format!("basebackup {} {} --gzip", spec.tenant_id, spec.timeline_id)
1165 : }
1166 : }
1167 : _ => {
1168 0 : if spec.spec.mode != ComputeMode::Primary {
1169 0 : format!(
1170 0 : "basebackup {} {} {} --gzip --replica",
1171 : spec.tenant_id, spec.timeline_id, lsn
1172 : )
1173 : } else {
1174 0 : format!(
1175 0 : "basebackup {} {} {} --gzip",
1176 : spec.tenant_id, spec.timeline_id, lsn
1177 : )
1178 : }
1179 : }
1180 : };
1181 :
1182 0 : let copyreader = client.copy_out(basebackup_cmd.as_str())?;
1183 0 : let mut measured_reader = MeasuredReader::new(copyreader);
1184 0 : let mut bufreader = std::io::BufReader::new(&mut measured_reader);
1185 :
1186 : // Read the archive directly from the `CopyOutReader`
1187 : //
1188 : // Set `ignore_zeros` so that unpack() reads all the Copy data and
1189 : // doesn't stop at the end-of-archive marker. Otherwise, if the server
1190 : // sends an Error after finishing the tarball, we will not notice it.
1191 : // The tar::Builder drop handler will write an end-of-archive marker
1192 : // before emitting the error, and we would not see it otherwise.
1193 0 : let mut ar = tar::Archive::new(flate2::read::GzDecoder::new(&mut bufreader));
1194 0 : ar.set_ignore_zeros(true);
1195 0 : ar.unpack(&self.params.pgdata)?;
1196 :
1197 0 : Ok((connected, measured_reader.get_byte_count()))
1198 0 : }
1199 :
1200 : // Gets the basebackup in a retry loop
1201 : #[instrument(skip_all, fields(%lsn))]
1202 : pub fn get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> {
1203 : let mut retry_period_ms = 500.0;
1204 : let mut attempts = 0;
1205 : const DEFAULT_ATTEMPTS: u16 = 10;
1206 : #[cfg(feature = "testing")]
1207 : let max_attempts = if let Ok(v) = env::var("NEON_COMPUTE_TESTING_BASEBACKUP_RETRIES") {
1208 : u16::from_str(&v).unwrap()
1209 : } else {
1210 : DEFAULT_ATTEMPTS
1211 : };
1212 : #[cfg(not(feature = "testing"))]
1213 : let max_attempts = DEFAULT_ATTEMPTS;
1214 : loop {
1215 : let result = self.try_get_basebackup(compute_state, lsn);
1216 : match result {
1217 : Ok(_) => {
1218 : return result;
1219 : }
1220 : Err(ref e) if attempts < max_attempts => {
1221 : warn!(
1222 : "Failed to get basebackup: {} (attempt {}/{})",
1223 : e, attempts, max_attempts
1224 : );
1225 : std::thread::sleep(std::time::Duration::from_millis(retry_period_ms as u64));
1226 : retry_period_ms *= 1.5;
1227 : }
1228 : Err(_) => {
1229 : return result;
1230 : }
1231 : }
1232 : attempts += 1;
1233 : }
1234 : }
1235 :
1236 0 : pub async fn check_safekeepers_synced_async(
1237 0 : &self,
1238 0 : compute_state: &ComputeState,
1239 0 : ) -> Result<Option<Lsn>> {
1240 : // Construct a connection config for each safekeeper
1241 0 : let pspec: ParsedSpec = compute_state
1242 0 : .pspec
1243 0 : .as_ref()
1244 0 : .expect("spec must be set")
1245 0 : .clone();
1246 0 : let sk_connstrs: Vec<String> = pspec.safekeeper_connstrings.clone();
1247 0 : let sk_configs = sk_connstrs.into_iter().map(|connstr| {
1248 : // Format connstr
1249 0 : let id = connstr.clone();
1250 0 : let connstr = format!("postgresql://no_user@{connstr}");
1251 0 : let options = format!(
1252 0 : "-c timeline_id={} tenant_id={}",
1253 : pspec.timeline_id, pspec.tenant_id
1254 : );
1255 :
1256 : // Construct client
1257 0 : let mut config = tokio_postgres::Config::from_str(&connstr).unwrap();
1258 0 : config.options(&options);
1259 0 : if let Some(storage_auth_token) = pspec.storage_auth_token.clone() {
1260 0 : config.password(storage_auth_token);
1261 0 : }
1262 :
1263 0 : (id, config)
1264 0 : });
1265 :
1266 : // Create task set to query all safekeepers
1267 0 : let mut tasks = FuturesUnordered::new();
1268 0 : let quorum = sk_configs.len() / 2 + 1;
1269 0 : for (id, config) in sk_configs {
1270 0 : let timeout = tokio::time::Duration::from_millis(100);
1271 0 : let task = tokio::time::timeout(timeout, ping_safekeeper(id, config));
1272 0 : tasks.push(tokio::spawn(task));
1273 0 : }
1274 :
1275 : // Get a quorum of responses or errors
1276 0 : let mut responses = Vec::new();
1277 0 : let mut join_errors = Vec::new();
1278 0 : let mut task_errors = Vec::new();
1279 0 : let mut timeout_errors = Vec::new();
1280 0 : while let Some(response) = tasks.next().await {
1281 0 : match response {
1282 0 : Ok(Ok(Ok(r))) => responses.push(r),
1283 0 : Ok(Ok(Err(e))) => task_errors.push(e),
1284 0 : Ok(Err(e)) => timeout_errors.push(e),
1285 0 : Err(e) => join_errors.push(e),
1286 : };
1287 0 : if responses.len() >= quorum {
1288 0 : break;
1289 0 : }
1290 0 : if join_errors.len() + task_errors.len() + timeout_errors.len() >= quorum {
1291 0 : break;
1292 0 : }
1293 : }
1294 :
1295 : // In case of error, log and fail the check, but don't crash.
1296 : // We're playing it safe because these errors could be transient
1297 : // and we don't yet retry.
1298 0 : if responses.len() < quorum {
1299 0 : error!(
1300 0 : "failed sync safekeepers check {:?} {:?} {:?}",
1301 : join_errors, task_errors, timeout_errors
1302 : );
1303 0 : return Ok(None);
1304 0 : }
1305 :
1306 0 : Ok(check_if_synced(responses))
1307 0 : }
1308 :
1309 : // Fast path for sync_safekeepers. If they're already synced we get the lsn
1310 : // in one roundtrip. If not, we should do a full sync_safekeepers.
1311 : #[instrument(skip_all)]
1312 : pub fn check_safekeepers_synced(&self, compute_state: &ComputeState) -> Result<Option<Lsn>> {
1313 : let start_time = Utc::now();
1314 :
1315 : let rt = tokio::runtime::Handle::current();
1316 : let result = rt.block_on(self.check_safekeepers_synced_async(compute_state));
1317 :
1318 : // Record runtime
1319 : self.state.lock().unwrap().metrics.sync_sk_check_ms = Utc::now()
1320 : .signed_duration_since(start_time)
1321 : .to_std()
1322 : .unwrap()
1323 : .as_millis() as u64;
1324 : result
1325 : }
1326 :
1327 : // Run `postgres` in a special mode with `--sync-safekeepers` argument
1328 : // and return the reported LSN back to the caller.
1329 : #[instrument(skip_all)]
1330 : pub fn sync_safekeepers(&self, storage_auth_token: Option<String>) -> Result<Lsn> {
1331 : let start_time = Utc::now();
1332 :
1333 : let mut sync_handle = maybe_cgexec(&self.params.pgbin)
1334 : .args(["--sync-safekeepers"])
1335 : .env("PGDATA", &self.params.pgdata) // we cannot use -D in this mode
1336 : .envs(if let Some(storage_auth_token) = &storage_auth_token {
1337 : vec![("NEON_AUTH_TOKEN", storage_auth_token)]
1338 : } else {
1339 : vec![]
1340 : })
1341 : .stdout(Stdio::piped())
1342 : .stderr(Stdio::piped())
1343 : .spawn()
1344 : .expect("postgres --sync-safekeepers failed to start");
1345 : SYNC_SAFEKEEPERS_PID.store(sync_handle.id(), Ordering::SeqCst);
1346 :
1347 : // `postgres --sync-safekeepers` will print all log output to stderr and
1348 : // final LSN to stdout. So we leave stdout to collect LSN, while stderr logs
1349 : // will be collected in a child thread.
1350 : let stderr = sync_handle
1351 : .stderr
1352 : .take()
1353 : .expect("stderr should be captured");
1354 : let logs_handle = handle_postgres_logs(stderr);
1355 :
1356 : let sync_output = sync_handle
1357 : .wait_with_output()
1358 : .expect("postgres --sync-safekeepers failed");
1359 : SYNC_SAFEKEEPERS_PID.store(0, Ordering::SeqCst);
1360 :
1361 : // Process has exited, so we can join the logs thread.
1362 : let _ = tokio::runtime::Handle::current()
1363 : .block_on(logs_handle)
1364 0 : .map_err(|e| tracing::error!("log task panicked: {:?}", e));
1365 :
1366 : if !sync_output.status.success() {
1367 : anyhow::bail!(
1368 : "postgres --sync-safekeepers exited with non-zero status: {}. stdout: {}",
1369 : sync_output.status,
1370 : String::from_utf8(sync_output.stdout)
1371 : .expect("postgres --sync-safekeepers exited, and stdout is not utf-8"),
1372 : );
1373 : }
1374 :
1375 : self.state.lock().unwrap().metrics.sync_safekeepers_ms = Utc::now()
1376 : .signed_duration_since(start_time)
1377 : .to_std()
1378 : .unwrap()
1379 : .as_millis() as u64;
1380 :
1381 : let lsn = Lsn::from_str(String::from_utf8(sync_output.stdout)?.trim())?;
1382 :
1383 : Ok(lsn)
1384 : }
1385 :
1386 : /// Do all the preparations like PGDATA directory creation, configuration,
1387 : /// safekeepers sync, basebackup, etc.
1388 : #[instrument(skip_all)]
1389 : pub fn prepare_pgdata(&self, compute_state: &ComputeState) -> Result<()> {
1390 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
1391 : let spec = &pspec.spec;
1392 : let pgdata_path = Path::new(&self.params.pgdata);
1393 :
1394 : let tls_config = self.tls_config(&pspec.spec);
1395 :
1396 : // Remove/create an empty pgdata directory and put configuration there.
1397 : self.create_pgdata()?;
1398 : config::write_postgres_conf(
1399 : pgdata_path,
1400 : &self.params,
1401 : &pspec.spec,
1402 : self.params.internal_http_port,
1403 : tls_config,
1404 : )?;
1405 :
1406 : // Syncing safekeepers is only safe with primary nodes: if a primary
1407 : // is already connected it will be kicked out, so a secondary (standby)
1408 : // cannot sync safekeepers.
1409 : let lsn = match spec.mode {
1410 : ComputeMode::Primary => {
1411 : info!("checking if safekeepers are synced");
1412 : let lsn = if let Ok(Some(lsn)) = self.check_safekeepers_synced(compute_state) {
1413 : lsn
1414 : } else {
1415 : info!("starting safekeepers syncing");
1416 : self.sync_safekeepers(pspec.storage_auth_token.clone())
1417 : .with_context(|| "failed to sync safekeepers")?
1418 : };
1419 : info!("safekeepers synced at LSN {}", lsn);
1420 : lsn
1421 : }
1422 : ComputeMode::Static(lsn) => {
1423 : info!("Starting read-only node at static LSN {}", lsn);
1424 : lsn
1425 : }
1426 : ComputeMode::Replica => {
1427 : info!("Initializing standby from latest Pageserver LSN");
1428 : Lsn(0)
1429 : }
1430 : };
1431 :
1432 : info!(
1433 : "getting basebackup@{} from pageserver {}",
1434 : lsn, &pspec.pageserver_connstr
1435 : );
1436 0 : self.get_basebackup(compute_state, lsn).with_context(|| {
1437 0 : format!(
1438 0 : "failed to get basebackup@{} from pageserver {}",
1439 0 : lsn, &pspec.pageserver_connstr
1440 : )
1441 0 : })?;
1442 :
1443 : // Update pg_hba.conf received with basebackup.
1444 : update_pg_hba(pgdata_path)?;
1445 :
1446 : // Place pg_dynshmem under /dev/shm. This allows us to use
1447 : // 'dynamic_shared_memory_type = mmap' so that the files are placed in
1448 : // /dev/shm, similar to how 'dynamic_shared_memory_type = posix' works.
1449 : //
1450 : // Why on earth don't we just stick to the 'posix' default, you might
1451 : // ask. It turns out that making large allocations with 'posix' doesn't
1452 : // work very well with autoscaling. The behavior we want is that:
1453 : //
1454 : // 1. You can make large DSM allocations, larger than the current RAM
1455 : // size of the VM, without errors
1456 : //
1457 : // 2. If the allocated memory is really used, the VM is scaled up
1458 : // automatically to accommodate that
1459 : //
1460 : // We try to make that possible by having swap in the VM. But with the
1461 : // default 'posix' DSM implementation, we fail step 1, even when there's
1462 : // plenty of swap available. PostgreSQL uses posix_fallocate() to create
1463 : // the shmem segment, which is really just a file in /dev/shm in Linux,
1464 : // but posix_fallocate() on tmpfs returns ENOMEM if the size is larger
1465 : // than available RAM.
1466 : //
1467 : // Using 'dynamic_shared_memory_type = mmap' works around that, because
1468 : // the Postgres 'mmap' DSM implementation doesn't use
1469 : // posix_fallocate(). Instead, it uses repeated calls to write(2) to
1470 : // fill the file with zeros. It's weird that that differs between
1471 : // 'posix' and 'mmap', but we take advantage of it. When the file is
1472 : // filled slowly with write(2), the kernel allows it to grow larger, as
1473 : // long as there's swap available.
1474 : //
1475 : // In short, using 'dynamic_shared_memory_type = mmap' allows us one DSM
1476 : // segment to be larger than currently available RAM. But because we
1477 : // don't want to store it on a real file, which the kernel would try to
1478 : // flush to disk, so symlink pg_dynshm to /dev/shm.
1479 : //
1480 : // We don't set 'dynamic_shared_memory_type = mmap' here, we let the
1481 : // control plane control that option. If 'mmap' is not used, this
1482 : // symlink doesn't affect anything.
1483 : //
1484 : // See https://github.com/neondatabase/autoscaling/issues/800
1485 : std::fs::remove_dir(pgdata_path.join("pg_dynshmem"))?;
1486 : symlink("/dev/shm/", pgdata_path.join("pg_dynshmem"))?;
1487 :
1488 : match spec.mode {
1489 : ComputeMode::Primary => {}
1490 : ComputeMode::Replica | ComputeMode::Static(..) => {
1491 : add_standby_signal(pgdata_path)?;
1492 : }
1493 : }
1494 :
1495 : Ok(())
1496 : }
1497 :
1498 : /// Start and stop a postgres process to warm up the VM for startup.
1499 0 : pub fn prewarm_postgres_vm_memory(&self) -> Result<()> {
1500 0 : info!("prewarming VM memory");
1501 :
1502 : // Create pgdata
1503 0 : let pgdata = &format!("{}.warmup", self.params.pgdata);
1504 0 : create_pgdata(pgdata)?;
1505 :
1506 : // Run initdb to completion
1507 0 : info!("running initdb");
1508 0 : let initdb_bin = Path::new(&self.params.pgbin)
1509 0 : .parent()
1510 0 : .unwrap()
1511 0 : .join("initdb");
1512 0 : Command::new(initdb_bin)
1513 0 : .args(["--pgdata", pgdata])
1514 0 : .output()
1515 0 : .expect("cannot start initdb process");
1516 :
1517 : // Write conf
1518 : use std::io::Write;
1519 0 : let conf_path = Path::new(pgdata).join("postgresql.conf");
1520 0 : let mut file = std::fs::File::create(conf_path)?;
1521 0 : writeln!(file, "shared_buffers=65536")?;
1522 0 : writeln!(file, "port=51055")?; // Nobody should be connecting
1523 0 : writeln!(file, "shared_preload_libraries = 'neon'")?;
1524 :
1525 : // Start postgres
1526 0 : info!("starting postgres");
1527 0 : let mut pg = maybe_cgexec(&self.params.pgbin)
1528 0 : .args(["-D", pgdata])
1529 0 : .spawn()
1530 0 : .expect("cannot start postgres process");
1531 :
1532 : // Stop it when it's ready
1533 0 : info!("waiting for postgres");
1534 0 : wait_for_postgres(&mut pg, Path::new(pgdata))?;
1535 : // SIGQUIT orders postgres to exit immediately. We don't want to SIGKILL
1536 : // it to avoid orphaned processes prowling around while datadir is
1537 : // wiped.
1538 0 : let pm_pid = Pid::from_raw(pg.id() as i32);
1539 0 : kill(pm_pid, Signal::SIGQUIT)?;
1540 0 : info!("sent SIGQUIT signal");
1541 0 : pg.wait()?;
1542 0 : info!("done prewarming vm memory");
1543 :
1544 : // clean up
1545 0 : let _ok = fs::remove_dir_all(pgdata);
1546 0 : Ok(())
1547 0 : }
1548 :
1549 : /// Start Postgres as a child process and wait for it to start accepting
1550 : /// connections.
1551 : ///
1552 : /// Returns a handle to the child process and a handle to the logs thread.
1553 : #[instrument(skip_all)]
1554 : pub fn start_postgres(&self, storage_auth_token: Option<String>) -> Result<PostgresHandle> {
1555 : let pgdata_path = Path::new(&self.params.pgdata);
1556 :
1557 : // Run postgres as a child process.
1558 : let mut pg = maybe_cgexec(&self.params.pgbin)
1559 : .args(["-D", &self.params.pgdata])
1560 : .envs(if let Some(storage_auth_token) = &storage_auth_token {
1561 : vec![("NEON_AUTH_TOKEN", storage_auth_token)]
1562 : } else {
1563 : vec![]
1564 : })
1565 : .stderr(Stdio::piped())
1566 : .spawn()
1567 : .expect("cannot start postgres process");
1568 : PG_PID.store(pg.id(), Ordering::SeqCst);
1569 :
1570 : // Start a task to collect logs from stderr.
1571 : let stderr = pg.stderr.take().expect("stderr should be captured");
1572 : let logs_handle = handle_postgres_logs(stderr);
1573 :
1574 : wait_for_postgres(&mut pg, pgdata_path)?;
1575 :
1576 : Ok(PostgresHandle {
1577 : postgres: pg,
1578 : log_collector: logs_handle,
1579 : })
1580 : }
1581 :
1582 : /// Wait for the child Postgres process forever. In this state Ctrl+C will
1583 : /// propagate to Postgres and it will be shut down as well.
1584 0 : fn wait_postgres(&self, mut pg_handle: PostgresHandle) -> std::process::ExitStatus {
1585 0 : info!(postmaster_pid = %pg_handle.postgres.id(), "Waiting for Postgres to exit");
1586 :
1587 0 : let ecode = pg_handle
1588 0 : .postgres
1589 0 : .wait()
1590 0 : .expect("failed to start waiting on Postgres process");
1591 0 : PG_PID.store(0, Ordering::SeqCst);
1592 :
1593 : // Process has exited. Wait for the log collecting task to finish.
1594 0 : let _ = tokio::runtime::Handle::current()
1595 0 : .block_on(pg_handle.log_collector)
1596 0 : .map_err(|e| tracing::error!("log task panicked: {:?}", e));
1597 :
1598 0 : ecode
1599 0 : }
1600 :
1601 : /// Do post configuration of the already started Postgres. This function spawns a background task to
1602 : /// configure the database after applying the compute spec. Currently, it upgrades the neon extension
1603 : /// version. In the future, it may upgrade all 3rd-party extensions.
1604 : #[instrument(skip_all)]
1605 : pub fn post_apply_config(&self) -> Result<()> {
1606 : let conf = self.get_tokio_conn_conf(Some("compute_ctl:post_apply_config"));
1607 0 : tokio::spawn(async move {
1608 0 : let res = async {
1609 0 : let (mut client, connection) = conf.connect(NoTls).await?;
1610 0 : tokio::spawn(async move {
1611 0 : if let Err(e) = connection.await {
1612 0 : eprintln!("connection error: {e}");
1613 0 : }
1614 0 : });
1615 :
1616 0 : handle_neon_extension_upgrade(&mut client)
1617 0 : .await
1618 0 : .context("handle_neon_extension_upgrade")?;
1619 0 : Ok::<_, anyhow::Error>(())
1620 0 : }
1621 0 : .await;
1622 0 : if let Err(err) = res {
1623 0 : error!("error while post_apply_config: {err:#}");
1624 0 : }
1625 0 : });
1626 : Ok(())
1627 : }
1628 :
1629 0 : pub fn get_conn_conf(&self, application_name: Option<&str>) -> postgres::Config {
1630 0 : let mut conf = self.conn_conf.clone();
1631 0 : if let Some(application_name) = application_name {
1632 0 : conf.application_name(application_name);
1633 0 : }
1634 0 : conf
1635 0 : }
1636 :
1637 0 : pub fn get_tokio_conn_conf(&self, application_name: Option<&str>) -> tokio_postgres::Config {
1638 0 : let mut conf = self.tokio_conn_conf.clone();
1639 0 : if let Some(application_name) = application_name {
1640 0 : conf.application_name(application_name);
1641 0 : }
1642 0 : conf
1643 0 : }
1644 :
1645 0 : pub async fn get_maintenance_client(
1646 0 : conf: &tokio_postgres::Config,
1647 0 : ) -> Result<tokio_postgres::Client> {
1648 0 : let mut conf = conf.clone();
1649 0 : conf.application_name("compute_ctl:apply_config");
1650 :
1651 0 : let (client, conn) = match conf.connect(NoTls).await {
1652 : // If connection fails, it may be the old node with `zenith_admin` superuser.
1653 : //
1654 : // In this case we need to connect with old `zenith_admin` name
1655 : // and create new user. We cannot simply rename connected user,
1656 : // but we can create a new one and grant it all privileges.
1657 0 : Err(e) => match e.code() {
1658 : Some(&SqlState::INVALID_PASSWORD)
1659 : | Some(&SqlState::INVALID_AUTHORIZATION_SPECIFICATION) => {
1660 : // Connect with `zenith_admin` if `cloud_admin` could not authenticate
1661 0 : info!(
1662 0 : "cannot connect to Postgres: {}, retrying with 'zenith_admin' username",
1663 : e
1664 : );
1665 0 : let mut zenith_admin_conf = postgres::config::Config::from(conf.clone());
1666 0 : zenith_admin_conf.application_name("compute_ctl:apply_config");
1667 0 : zenith_admin_conf.user("zenith_admin");
1668 :
1669 : // It doesn't matter what were the options before, here we just want
1670 : // to connect and create a new superuser role.
1671 : const ZENITH_OPTIONS: &str = "-c role=zenith_admin -c default_transaction_read_only=off -c search_path=public -c statement_timeout=0";
1672 0 : zenith_admin_conf.options(ZENITH_OPTIONS);
1673 :
1674 0 : let mut client =
1675 0 : zenith_admin_conf.connect(NoTls)
1676 0 : .context("broken cloud_admin credential: tried connecting with cloud_admin but could not authenticate, and zenith_admin does not work either")?;
1677 :
1678 : // Disable forwarding so that users don't get a cloud_admin role
1679 0 : let mut func = || {
1680 0 : client.simple_query("SET neon.forward_ddl = false")?;
1681 0 : client.simple_query("CREATE USER cloud_admin WITH SUPERUSER")?;
1682 0 : client.simple_query("GRANT zenith_admin TO cloud_admin")?;
1683 0 : Ok::<_, anyhow::Error>(())
1684 0 : };
1685 0 : func().context("apply_config setup cloud_admin")?;
1686 :
1687 0 : drop(client);
1688 :
1689 : // Reconnect with connstring with expected name
1690 0 : conf.connect(NoTls).await?
1691 : }
1692 0 : _ => return Err(e.into()),
1693 : },
1694 0 : Ok((client, conn)) => (client, conn),
1695 : };
1696 :
1697 0 : spawn(async move {
1698 0 : if let Err(e) = conn.await {
1699 0 : error!("maintenance client connection error: {}", e);
1700 0 : }
1701 0 : });
1702 :
1703 : // Disable DDL forwarding because control plane already knows about the roles/databases
1704 : // we're about to modify.
1705 0 : client
1706 0 : .simple_query("SET neon.forward_ddl = false")
1707 0 : .await
1708 0 : .context("apply_config SET neon.forward_ddl = false")?;
1709 :
1710 0 : Ok(client)
1711 0 : }
1712 :
1713 : /// Do initial configuration of the already started Postgres.
1714 : #[instrument(skip_all)]
1715 : pub fn apply_config(&self, compute_state: &ComputeState) -> Result<()> {
1716 : let conf = self.get_tokio_conn_conf(Some("compute_ctl:apply_config"));
1717 :
1718 : let conf = Arc::new(conf);
1719 : let spec = Arc::new(
1720 : compute_state
1721 : .pspec
1722 : .as_ref()
1723 : .expect("spec must be set")
1724 : .spec
1725 : .clone(),
1726 : );
1727 :
1728 : let mut tls_config = None::<TlsConfig>;
1729 : if spec.features.contains(&ComputeFeature::TlsExperimental) {
1730 : tls_config = self.compute_ctl_config.tls.clone();
1731 : }
1732 :
1733 : self.update_installed_extensions_collection_interval(&spec);
1734 :
1735 : let max_concurrent_connections = self.max_service_connections(compute_state, &spec);
1736 :
1737 : // Merge-apply spec & changes to PostgreSQL state.
1738 : self.apply_spec_sql(spec.clone(), conf.clone(), max_concurrent_connections)?;
1739 :
1740 : if let Some(local_proxy) = &spec.clone().local_proxy_config {
1741 : let mut local_proxy = local_proxy.clone();
1742 : local_proxy.tls = tls_config.clone();
1743 :
1744 : info!("configuring local_proxy");
1745 : local_proxy::configure(&local_proxy).context("apply_config local_proxy")?;
1746 : }
1747 :
1748 : // Run migrations separately to not hold up cold starts
1749 : let params = self.params.clone();
1750 0 : tokio::spawn(async move {
1751 0 : let mut conf = conf.as_ref().clone();
1752 0 : conf.application_name("compute_ctl:migrations");
1753 :
1754 0 : match conf.connect(NoTls).await {
1755 0 : Ok((mut client, connection)) => {
1756 0 : tokio::spawn(async move {
1757 0 : if let Err(e) = connection.await {
1758 0 : eprintln!("connection error: {e}");
1759 0 : }
1760 0 : });
1761 0 : if let Err(e) = handle_migrations(params, &mut client).await {
1762 0 : error!("Failed to run migrations: {}", e);
1763 0 : }
1764 : }
1765 0 : Err(e) => {
1766 0 : error!(
1767 0 : "Failed to connect to the compute for running migrations: {}",
1768 : e
1769 : );
1770 : }
1771 : };
1772 0 : });
1773 :
1774 : Ok::<(), anyhow::Error>(())
1775 : }
1776 :
1777 : // Wrapped this around `pg_ctl reload`, but right now we don't use
1778 : // `pg_ctl` for start / stop.
1779 : #[instrument(skip_all)]
1780 : fn pg_reload_conf(&self) -> Result<()> {
1781 : let pgctl_bin = Path::new(&self.params.pgbin)
1782 : .parent()
1783 : .unwrap()
1784 : .join("pg_ctl");
1785 : Command::new(pgctl_bin)
1786 : .args(["reload", "-D", &self.params.pgdata])
1787 : .output()
1788 : .expect("cannot run pg_ctl process");
1789 : Ok(())
1790 : }
1791 :
1792 : /// Similar to `apply_config()`, but does a bit different sequence of operations,
1793 : /// as it's used to reconfigure a previously started and configured Postgres node.
1794 : #[instrument(skip_all)]
1795 : pub fn reconfigure(&self) -> Result<()> {
1796 : let spec = self.state.lock().unwrap().pspec.clone().unwrap().spec;
1797 :
1798 : let tls_config = self.tls_config(&spec);
1799 :
1800 : self.update_installed_extensions_collection_interval(&spec);
1801 :
1802 : if let Some(ref pgbouncer_settings) = spec.pgbouncer_settings {
1803 : info!("tuning pgbouncer");
1804 :
1805 : let pgbouncer_settings = pgbouncer_settings.clone();
1806 : let tls_config = tls_config.clone();
1807 :
1808 : // Spawn a background task to do the tuning,
1809 : // so that we don't block the main thread that starts Postgres.
1810 0 : tokio::spawn(async move {
1811 0 : let res = tune_pgbouncer(pgbouncer_settings, tls_config).await;
1812 0 : if let Err(err) = res {
1813 0 : error!("error while tuning pgbouncer: {err:?}");
1814 0 : }
1815 0 : });
1816 : }
1817 :
1818 : if let Some(ref local_proxy) = spec.local_proxy_config {
1819 : info!("configuring local_proxy");
1820 :
1821 : // Spawn a background task to do the configuration,
1822 : // so that we don't block the main thread that starts Postgres.
1823 : let mut local_proxy = local_proxy.clone();
1824 : local_proxy.tls = tls_config.clone();
1825 0 : tokio::spawn(async move {
1826 0 : if let Err(err) = local_proxy::configure(&local_proxy) {
1827 0 : error!("error while configuring local_proxy: {err:?}");
1828 0 : }
1829 0 : });
1830 : }
1831 :
1832 : // Reconfigure rsyslog for Postgres logs export
1833 : let conf = PostgresLogsRsyslogConfig::new(spec.logs_export_host.as_deref());
1834 : configure_postgres_logs_export(conf)?;
1835 :
1836 : // Write new config
1837 : let pgdata_path = Path::new(&self.params.pgdata);
1838 : config::write_postgres_conf(
1839 : pgdata_path,
1840 : &self.params,
1841 : &spec,
1842 : self.params.internal_http_port,
1843 : tls_config,
1844 : )?;
1845 :
1846 : self.pg_reload_conf()?;
1847 :
1848 : if !spec.skip_pg_catalog_updates {
1849 : let max_concurrent_connections = spec.reconfigure_concurrency;
1850 : // Temporarily reset max_cluster_size in config
1851 : // to avoid the possibility of hitting the limit, while we are reconfiguring:
1852 : // creating new extensions, roles, etc.
1853 0 : config::with_compute_ctl_tmp_override(pgdata_path, "neon.max_cluster_size=-1", || {
1854 0 : self.pg_reload_conf()?;
1855 :
1856 0 : if spec.mode == ComputeMode::Primary {
1857 0 : let conf = self.get_tokio_conn_conf(Some("compute_ctl:reconfigure"));
1858 0 : let conf = Arc::new(conf);
1859 :
1860 0 : let spec = Arc::new(spec.clone());
1861 :
1862 0 : self.apply_spec_sql(spec, conf, max_concurrent_connections)?;
1863 0 : }
1864 :
1865 0 : Ok(())
1866 0 : })?;
1867 : self.pg_reload_conf()?;
1868 : }
1869 :
1870 : let unknown_op = "unknown".to_string();
1871 : let op_id = spec.operation_uuid.as_ref().unwrap_or(&unknown_op);
1872 : info!(
1873 : "finished reconfiguration of compute node for operation {}",
1874 : op_id
1875 : );
1876 :
1877 : Ok(())
1878 : }
1879 :
1880 : #[instrument(skip_all)]
1881 : pub fn configure_as_primary(&self, compute_state: &ComputeState) -> Result<()> {
1882 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
1883 :
1884 : assert!(pspec.spec.mode == ComputeMode::Primary);
1885 : if !pspec.spec.skip_pg_catalog_updates {
1886 : let pgdata_path = Path::new(&self.params.pgdata);
1887 : // temporarily reset max_cluster_size in config
1888 : // to avoid the possibility of hitting the limit, while we are applying config:
1889 : // creating new extensions, roles, etc...
1890 0 : config::with_compute_ctl_tmp_override(pgdata_path, "neon.max_cluster_size=-1", || {
1891 0 : self.pg_reload_conf()?;
1892 :
1893 0 : self.apply_config(compute_state)?;
1894 :
1895 0 : Ok(())
1896 0 : })?;
1897 :
1898 : let postgresql_conf_path = pgdata_path.join("postgresql.conf");
1899 : if config::line_in_file(
1900 : &postgresql_conf_path,
1901 : "neon.disable_logical_replication_subscribers=false",
1902 : )? {
1903 : info!(
1904 : "updated postgresql.conf to set neon.disable_logical_replication_subscribers=false"
1905 : );
1906 : }
1907 : self.pg_reload_conf()?;
1908 : }
1909 : self.post_apply_config()?;
1910 :
1911 : Ok(())
1912 : }
1913 :
1914 0 : pub async fn watch_cert_for_changes(self: Arc<Self>) {
1915 : // update status on cert renewal
1916 0 : if let Some(tls_config) = &self.compute_ctl_config.tls {
1917 0 : let tls_config = tls_config.clone();
1918 :
1919 : // wait until the cert exists.
1920 0 : let mut cert_watch = watch_cert_for_changes(tls_config.cert_path.clone()).await;
1921 :
1922 0 : tokio::task::spawn_blocking(move || {
1923 0 : let handle = tokio::runtime::Handle::current();
1924 : 'cert_update: loop {
1925 : // let postgres/pgbouncer/local_proxy know the new cert/key exists.
1926 : // we need to wait until it's configurable first.
1927 :
1928 0 : let mut state = self.state.lock().unwrap();
1929 : 'status_update: loop {
1930 0 : match state.status {
1931 : // let's update the state to config pending
1932 : ComputeStatus::ConfigurationPending | ComputeStatus::Running => {
1933 0 : state.set_status(
1934 0 : ComputeStatus::ConfigurationPending,
1935 0 : &self.state_changed,
1936 0 : );
1937 0 : break 'status_update;
1938 : }
1939 :
1940 : // exit loop
1941 : ComputeStatus::Failed
1942 : | ComputeStatus::TerminationPendingFast
1943 : | ComputeStatus::TerminationPendingImmediate
1944 0 : | ComputeStatus::Terminated => break 'cert_update,
1945 :
1946 : // wait
1947 : ComputeStatus::Init
1948 : | ComputeStatus::Configuration
1949 0 : | ComputeStatus::Empty => {
1950 0 : state = self.state_changed.wait(state).unwrap();
1951 0 : }
1952 : }
1953 : }
1954 0 : drop(state);
1955 :
1956 : // wait for a new certificate update
1957 0 : if handle.block_on(cert_watch.changed()).is_err() {
1958 0 : break;
1959 0 : }
1960 : }
1961 0 : });
1962 0 : }
1963 0 : }
1964 :
1965 0 : pub fn tls_config(&self, spec: &ComputeSpec) -> &Option<TlsConfig> {
1966 0 : if spec.features.contains(&ComputeFeature::TlsExperimental) {
1967 0 : &self.compute_ctl_config.tls
1968 : } else {
1969 0 : &None::<TlsConfig>
1970 : }
1971 0 : }
1972 :
1973 : /// Update the `last_active` in the shared state, but ensure that it's a more recent one.
1974 0 : pub fn update_last_active(&self, last_active: Option<DateTime<Utc>>) {
1975 0 : let mut state = self.state.lock().unwrap();
1976 : // NB: `Some(<DateTime>)` is always greater than `None`.
1977 0 : if last_active > state.last_active {
1978 0 : state.last_active = last_active;
1979 0 : debug!("set the last compute activity time to: {:?}", last_active);
1980 0 : }
1981 0 : }
1982 :
1983 : // Look for core dumps and collect backtraces.
1984 : //
1985 : // EKS worker nodes have following core dump settings:
1986 : // /proc/sys/kernel/core_pattern -> core
1987 : // /proc/sys/kernel/core_uses_pid -> 1
1988 : // ulimit -c -> unlimited
1989 : // which results in core dumps being written to postgres data directory as core.<pid>.
1990 : //
1991 : // Use that as a default location and pattern, except macos where core dumps are written
1992 : // to /cores/ directory by default.
1993 : //
1994 : // With default Linux settings, the core dump file is called just "core", so check for
1995 : // that too.
1996 0 : pub fn check_for_core_dumps(&self) -> Result<()> {
1997 0 : let core_dump_dir = match std::env::consts::OS {
1998 0 : "macos" => Path::new("/cores/"),
1999 0 : _ => Path::new(&self.params.pgdata),
2000 : };
2001 :
2002 : // Collect core dump paths if any
2003 0 : info!("checking for core dumps in {}", core_dump_dir.display());
2004 0 : let files = fs::read_dir(core_dump_dir)?;
2005 0 : let cores = files.filter_map(|entry| {
2006 0 : let entry = entry.ok()?;
2007 :
2008 0 : let is_core_dump = match entry.file_name().to_str()? {
2009 0 : n if n.starts_with("core.") => true,
2010 0 : "core" => true,
2011 0 : _ => false,
2012 : };
2013 0 : if is_core_dump {
2014 0 : Some(entry.path())
2015 : } else {
2016 0 : None
2017 : }
2018 0 : });
2019 :
2020 : // Print backtrace for each core dump
2021 0 : for core_path in cores {
2022 0 : warn!(
2023 0 : "core dump found: {}, collecting backtrace",
2024 0 : core_path.display()
2025 : );
2026 :
2027 : // Try first with gdb
2028 0 : let backtrace = Command::new("gdb")
2029 0 : .args(["--batch", "-q", "-ex", "bt", &self.params.pgbin])
2030 0 : .arg(&core_path)
2031 0 : .output();
2032 :
2033 : // Try lldb if no gdb is found -- that is handy for local testing on macOS
2034 0 : let backtrace = match backtrace {
2035 0 : Err(ref e) if e.kind() == std::io::ErrorKind::NotFound => {
2036 0 : warn!("cannot find gdb, trying lldb");
2037 0 : Command::new("lldb")
2038 0 : .arg("-c")
2039 0 : .arg(&core_path)
2040 0 : .args(["--batch", "-o", "bt all", "-o", "quit"])
2041 0 : .output()
2042 : }
2043 0 : _ => backtrace,
2044 0 : }?;
2045 :
2046 0 : warn!(
2047 0 : "core dump backtrace: {}",
2048 0 : String::from_utf8_lossy(&backtrace.stdout)
2049 : );
2050 0 : warn!(
2051 0 : "debugger stderr: {}",
2052 0 : String::from_utf8_lossy(&backtrace.stderr)
2053 : );
2054 : }
2055 :
2056 0 : Ok(())
2057 0 : }
2058 :
2059 : /// Select `pg_stat_statements` data and return it as a stringified JSON
2060 0 : pub async fn collect_insights(&self) -> String {
2061 0 : let mut result_rows: Vec<String> = Vec::new();
2062 0 : let conf = self.get_tokio_conn_conf(Some("compute_ctl:collect_insights"));
2063 0 : let connect_result = conf.connect(NoTls).await;
2064 0 : let (client, connection) = connect_result.unwrap();
2065 0 : tokio::spawn(async move {
2066 0 : if let Err(e) = connection.await {
2067 0 : eprintln!("connection error: {e}");
2068 0 : }
2069 0 : });
2070 0 : let result = client
2071 0 : .simple_query(
2072 0 : "SELECT
2073 0 : row_to_json(pg_stat_statements)
2074 0 : FROM
2075 0 : pg_stat_statements
2076 0 : WHERE
2077 0 : userid != 'cloud_admin'::regrole::oid
2078 0 : ORDER BY
2079 0 : (mean_exec_time + mean_plan_time) DESC
2080 0 : LIMIT 100",
2081 0 : )
2082 0 : .await;
2083 :
2084 0 : if let Ok(raw_rows) = result {
2085 0 : for message in raw_rows.iter() {
2086 0 : if let postgres::SimpleQueryMessage::Row(row) = message {
2087 0 : if let Some(json) = row.get(0) {
2088 0 : result_rows.push(json.to_string());
2089 0 : }
2090 0 : }
2091 : }
2092 :
2093 0 : format!("{{\"pg_stat_statements\": [{}]}}", result_rows.join(","))
2094 : } else {
2095 0 : "{{\"pg_stat_statements\": []}}".to_string()
2096 : }
2097 0 : }
2098 :
2099 : // download an archive, unzip and place files in correct locations
2100 0 : pub async fn download_extension(
2101 0 : &self,
2102 0 : real_ext_name: String,
2103 0 : ext_path: RemotePath,
2104 0 : ) -> Result<u64, DownloadError> {
2105 0 : let remote_ext_base_url =
2106 0 : self.params
2107 0 : .remote_ext_base_url
2108 0 : .as_ref()
2109 0 : .ok_or(DownloadError::BadInput(anyhow::anyhow!(
2110 0 : "Remote extensions storage is not configured",
2111 0 : )))?;
2112 :
2113 0 : let ext_archive_name = ext_path.object_name().expect("bad path");
2114 :
2115 0 : let mut first_try = false;
2116 0 : if !self
2117 0 : .ext_download_progress
2118 0 : .read()
2119 0 : .expect("lock err")
2120 0 : .contains_key(ext_archive_name)
2121 0 : {
2122 0 : self.ext_download_progress
2123 0 : .write()
2124 0 : .expect("lock err")
2125 0 : .insert(ext_archive_name.to_string(), (Utc::now(), false));
2126 0 : first_try = true;
2127 0 : }
2128 0 : let (download_start, download_completed) =
2129 0 : self.ext_download_progress.read().expect("lock err")[ext_archive_name];
2130 0 : let start_time_delta = Utc::now()
2131 0 : .signed_duration_since(download_start)
2132 0 : .to_std()
2133 0 : .unwrap()
2134 0 : .as_millis() as u64;
2135 :
2136 : // how long to wait for extension download if it was started by another process
2137 : const HANG_TIMEOUT: u64 = 3000; // milliseconds
2138 :
2139 0 : if download_completed {
2140 0 : info!("extension already downloaded, skipping re-download");
2141 0 : return Ok(0);
2142 0 : } else if start_time_delta < HANG_TIMEOUT && !first_try {
2143 0 : info!(
2144 0 : "download {ext_archive_name} already started by another process, hanging untill completion or timeout"
2145 : );
2146 0 : let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(500));
2147 : loop {
2148 0 : info!("waiting for download");
2149 0 : interval.tick().await;
2150 0 : let (_, download_completed_now) =
2151 0 : self.ext_download_progress.read().expect("lock")[ext_archive_name];
2152 0 : if download_completed_now {
2153 0 : info!("download finished by whoever else downloaded it");
2154 0 : return Ok(0);
2155 0 : }
2156 : }
2157 : // NOTE: the above loop will get terminated
2158 : // based on the timeout of the download function
2159 0 : }
2160 :
2161 : // if extension hasn't been downloaded before or the previous
2162 : // attempt to download was at least HANG_TIMEOUT ms ago
2163 : // then we try to download it here
2164 0 : info!("downloading new extension {ext_archive_name}");
2165 :
2166 0 : let download_size = extension_server::download_extension(
2167 0 : &real_ext_name,
2168 0 : &ext_path,
2169 0 : remote_ext_base_url,
2170 0 : &self.params.pgbin,
2171 0 : )
2172 0 : .await
2173 0 : .map_err(DownloadError::Other);
2174 :
2175 0 : if download_size.is_ok() {
2176 0 : self.ext_download_progress
2177 0 : .write()
2178 0 : .expect("bad lock")
2179 0 : .insert(ext_archive_name.to_string(), (download_start, true));
2180 0 : }
2181 :
2182 0 : download_size
2183 0 : }
2184 :
2185 0 : pub async fn set_role_grants(
2186 0 : &self,
2187 0 : db_name: &PgIdent,
2188 0 : schema_name: &PgIdent,
2189 0 : privileges: &[Privilege],
2190 0 : role_name: &PgIdent,
2191 0 : ) -> Result<()> {
2192 : use tokio_postgres::NoTls;
2193 :
2194 0 : let mut conf = self.get_tokio_conn_conf(Some("compute_ctl:set_role_grants"));
2195 0 : conf.dbname(db_name);
2196 :
2197 0 : let (db_client, conn) = conf
2198 0 : .connect(NoTls)
2199 0 : .await
2200 0 : .context("Failed to connect to the database")?;
2201 0 : tokio::spawn(conn);
2202 :
2203 : // TODO: support other types of grants apart from schemas?
2204 :
2205 : // check the role grants first - to gracefully handle read-replicas.
2206 0 : let select = "SELECT privilege_type
2207 0 : FROM pg_namespace
2208 0 : JOIN LATERAL (SELECT * FROM aclexplode(nspacl) AS x) acl ON true
2209 0 : JOIN pg_user users ON acl.grantee = users.usesysid
2210 0 : WHERE users.usename = $1
2211 0 : AND nspname = $2";
2212 0 : let rows = db_client
2213 0 : .query(select, &[role_name, schema_name])
2214 0 : .await
2215 0 : .with_context(|| format!("Failed to execute query: {select}"))?;
2216 :
2217 0 : let already_granted: HashSet<String> = rows.into_iter().map(|row| row.get(0)).collect();
2218 :
2219 0 : let grants = privileges
2220 0 : .iter()
2221 0 : .filter(|p| !already_granted.contains(p.as_str()))
2222 : // should not be quoted as it's part of the command.
2223 : // is already sanitized so it's ok
2224 0 : .map(|p| p.as_str())
2225 0 : .join(", ");
2226 :
2227 0 : if !grants.is_empty() {
2228 : // quote the schema and role name as identifiers to sanitize them.
2229 0 : let schema_name = schema_name.pg_quote();
2230 0 : let role_name = role_name.pg_quote();
2231 :
2232 0 : let query = format!("GRANT {grants} ON SCHEMA {schema_name} TO {role_name}",);
2233 0 : db_client
2234 0 : .simple_query(&query)
2235 0 : .await
2236 0 : .with_context(|| format!("Failed to execute query: {query}"))?;
2237 0 : }
2238 :
2239 0 : Ok(())
2240 0 : }
2241 :
2242 0 : pub async fn install_extension(
2243 0 : &self,
2244 0 : ext_name: &PgIdent,
2245 0 : db_name: &PgIdent,
2246 0 : ext_version: ExtVersion,
2247 0 : ) -> Result<ExtVersion> {
2248 : use tokio_postgres::NoTls;
2249 :
2250 0 : let mut conf = self.get_tokio_conn_conf(Some("compute_ctl:install_extension"));
2251 0 : conf.dbname(db_name);
2252 :
2253 0 : let (db_client, conn) = conf
2254 0 : .connect(NoTls)
2255 0 : .await
2256 0 : .context("Failed to connect to the database")?;
2257 0 : tokio::spawn(conn);
2258 :
2259 0 : let version_query = "SELECT extversion FROM pg_extension WHERE extname = $1";
2260 0 : let version: Option<ExtVersion> = db_client
2261 0 : .query_opt(version_query, &[&ext_name])
2262 0 : .await
2263 0 : .with_context(|| format!("Failed to execute query: {version_query}"))?
2264 0 : .map(|row| row.get(0));
2265 :
2266 : // sanitize the inputs as postgres idents.
2267 0 : let ext_name: String = ext_name.pg_quote();
2268 0 : let quoted_version: String = ext_version.pg_quote();
2269 :
2270 0 : if let Some(installed_version) = version {
2271 0 : if installed_version == ext_version {
2272 0 : return Ok(installed_version);
2273 0 : }
2274 0 : let query = format!("ALTER EXTENSION {ext_name} UPDATE TO {quoted_version}");
2275 0 : db_client
2276 0 : .simple_query(&query)
2277 0 : .await
2278 0 : .with_context(|| format!("Failed to execute query: {query}"))?;
2279 : } else {
2280 0 : let query =
2281 0 : format!("CREATE EXTENSION IF NOT EXISTS {ext_name} WITH VERSION {quoted_version}");
2282 0 : db_client
2283 0 : .simple_query(&query)
2284 0 : .await
2285 0 : .with_context(|| format!("Failed to execute query: {query}"))?;
2286 : }
2287 :
2288 0 : Ok(ext_version)
2289 0 : }
2290 :
2291 0 : pub async fn prepare_preload_libraries(
2292 0 : &self,
2293 0 : spec: &ComputeSpec,
2294 0 : ) -> Result<RemoteExtensionMetrics> {
2295 0 : if self.params.remote_ext_base_url.is_none() {
2296 0 : return Ok(RemoteExtensionMetrics {
2297 0 : num_ext_downloaded: 0,
2298 0 : largest_ext_size: 0,
2299 0 : total_ext_download_size: 0,
2300 0 : });
2301 0 : }
2302 0 : let remote_extensions = spec
2303 0 : .remote_extensions
2304 0 : .as_ref()
2305 0 : .ok_or(anyhow::anyhow!("Remote extensions are not configured"))?;
2306 :
2307 0 : info!("parse shared_preload_libraries from spec.cluster.settings");
2308 0 : let mut libs_vec = Vec::new();
2309 0 : if let Some(libs) = spec.cluster.settings.find("shared_preload_libraries") {
2310 0 : libs_vec = libs
2311 0 : .split(&[',', '\'', ' '])
2312 0 : .filter(|s| *s != "neon" && !s.is_empty())
2313 0 : .map(str::to_string)
2314 0 : .collect();
2315 0 : }
2316 0 : info!("parse shared_preload_libraries from provided postgresql.conf");
2317 :
2318 : // that is used in neon_local and python tests
2319 0 : if let Some(conf) = &spec.cluster.postgresql_conf {
2320 0 : let conf_lines = conf.split('\n').collect::<Vec<&str>>();
2321 0 : let mut shared_preload_libraries_line = "";
2322 0 : for line in conf_lines {
2323 0 : if line.starts_with("shared_preload_libraries") {
2324 0 : shared_preload_libraries_line = line;
2325 0 : }
2326 : }
2327 0 : let mut preload_libs_vec = Vec::new();
2328 0 : if let Some(libs) = shared_preload_libraries_line.split("='").nth(1) {
2329 0 : preload_libs_vec = libs
2330 0 : .split(&[',', '\'', ' '])
2331 0 : .filter(|s| *s != "neon" && !s.is_empty())
2332 0 : .map(str::to_string)
2333 0 : .collect();
2334 0 : }
2335 0 : libs_vec.extend(preload_libs_vec);
2336 0 : }
2337 :
2338 : // Don't try to download libraries that are not in the index.
2339 : // Assume that they are already present locally.
2340 0 : libs_vec.retain(|lib| remote_extensions.library_index.contains_key(lib));
2341 :
2342 0 : info!("Downloading to shared preload libraries: {:?}", &libs_vec);
2343 :
2344 0 : let mut download_tasks = Vec::new();
2345 0 : for library in &libs_vec {
2346 0 : let (ext_name, ext_path) =
2347 0 : remote_extensions.get_ext(library, true, &BUILD_TAG, &self.params.pgversion)?;
2348 0 : download_tasks.push(self.download_extension(ext_name, ext_path));
2349 : }
2350 0 : let results = join_all(download_tasks).await;
2351 :
2352 0 : let mut remote_ext_metrics = RemoteExtensionMetrics {
2353 0 : num_ext_downloaded: 0,
2354 0 : largest_ext_size: 0,
2355 0 : total_ext_download_size: 0,
2356 0 : };
2357 0 : for result in results {
2358 0 : let download_size = match result {
2359 0 : Ok(res) => {
2360 0 : remote_ext_metrics.num_ext_downloaded += 1;
2361 0 : res
2362 : }
2363 0 : Err(err) => {
2364 : // if we failed to download an extension, we don't want to fail the whole
2365 : // process, but we do want to log the error
2366 0 : error!("Failed to download extension: {}", err);
2367 0 : 0
2368 : }
2369 : };
2370 :
2371 0 : remote_ext_metrics.largest_ext_size =
2372 0 : std::cmp::max(remote_ext_metrics.largest_ext_size, download_size);
2373 0 : remote_ext_metrics.total_ext_download_size += download_size;
2374 : }
2375 0 : Ok(remote_ext_metrics)
2376 0 : }
2377 :
2378 : /// Waits until current thread receives a state changed notification and
2379 : /// the pageserver connection strings has changed.
2380 : ///
2381 : /// The operation will time out after a specified duration.
2382 0 : pub fn wait_timeout_while_pageserver_connstr_unchanged(&self, duration: Duration) {
2383 0 : let state = self.state.lock().unwrap();
2384 0 : let old_pageserver_connstr = state
2385 0 : .pspec
2386 0 : .as_ref()
2387 0 : .expect("spec must be set")
2388 0 : .pageserver_connstr
2389 0 : .clone();
2390 0 : let mut unchanged = true;
2391 0 : let _ = self
2392 0 : .state_changed
2393 0 : .wait_timeout_while(state, duration, |s| {
2394 0 : let pageserver_connstr = &s
2395 0 : .pspec
2396 0 : .as_ref()
2397 0 : .expect("spec must be set")
2398 0 : .pageserver_connstr;
2399 0 : unchanged = pageserver_connstr == &old_pageserver_connstr;
2400 0 : unchanged
2401 0 : })
2402 0 : .unwrap();
2403 0 : if !unchanged {
2404 0 : info!("Pageserver config changed");
2405 0 : }
2406 0 : }
2407 :
2408 0 : pub fn spawn_extension_stats_task(&self) {
2409 0 : self.terminate_extension_stats_task();
2410 :
2411 0 : let conf = self.tokio_conn_conf.clone();
2412 0 : let atomic_interval = self.params.installed_extensions_collection_interval.clone();
2413 0 : let mut installed_extensions_collection_interval =
2414 0 : 2 * atomic_interval.load(std::sync::atomic::Ordering::SeqCst);
2415 0 : info!(
2416 0 : "[NEON_EXT_SPAWN] Spawning background installed extensions worker with Timeout: {}",
2417 : installed_extensions_collection_interval
2418 : );
2419 0 : let handle = tokio::spawn(async move {
2420 : loop {
2421 0 : info!(
2422 0 : "[NEON_EXT_INT_SLEEP]: Interval: {}",
2423 : installed_extensions_collection_interval
2424 : );
2425 : // Sleep at the start of the loop to ensure that two collections don't happen at the same time.
2426 : // The first collection happens during compute startup.
2427 0 : tokio::time::sleep(tokio::time::Duration::from_secs(
2428 0 : installed_extensions_collection_interval,
2429 0 : ))
2430 0 : .await;
2431 0 : let _ = installed_extensions(conf.clone()).await;
2432 : // Acquire a read lock on the compute spec and then update the interval if necessary
2433 0 : installed_extensions_collection_interval = std::cmp::max(
2434 0 : installed_extensions_collection_interval,
2435 0 : 2 * atomic_interval.load(std::sync::atomic::Ordering::SeqCst),
2436 0 : );
2437 : }
2438 : });
2439 :
2440 : // Store the new task handle
2441 0 : *self.extension_stats_task.lock().unwrap() = Some(handle);
2442 0 : }
2443 :
2444 0 : fn terminate_extension_stats_task(&self) {
2445 0 : if let Some(h) = self.extension_stats_task.lock().unwrap().take() {
2446 0 : h.abort()
2447 0 : }
2448 0 : }
2449 :
2450 0 : pub fn spawn_lfc_offload_task(self: &Arc<Self>, interval: Duration) {
2451 0 : self.terminate_lfc_offload_task();
2452 0 : let secs = interval.as_secs();
2453 0 : info!("spawning lfc offload worker with {secs}s interval");
2454 0 : let this = self.clone();
2455 0 : let handle = spawn(async move {
2456 0 : let mut interval = time::interval(interval);
2457 0 : interval.tick().await; // returns immediately
2458 : loop {
2459 0 : interval.tick().await;
2460 0 : this.offload_lfc_async().await;
2461 : }
2462 : });
2463 0 : *self.lfc_offload_task.lock().unwrap() = Some(handle);
2464 0 : }
2465 :
2466 0 : fn terminate_lfc_offload_task(&self) {
2467 0 : if let Some(h) = self.lfc_offload_task.lock().unwrap().take() {
2468 0 : h.abort()
2469 0 : }
2470 0 : }
2471 :
2472 0 : fn update_installed_extensions_collection_interval(&self, spec: &ComputeSpec) {
2473 : // Update the interval for collecting installed extensions statistics
2474 : // If the value is -1, we never suspend so set the value to default collection.
2475 : // If the value is 0, it means default, we will just continue to use the default.
2476 0 : if spec.suspend_timeout_seconds == -1 || spec.suspend_timeout_seconds == 0 {
2477 0 : self.params.installed_extensions_collection_interval.store(
2478 0 : DEFAULT_INSTALLED_EXTENSIONS_COLLECTION_INTERVAL,
2479 0 : std::sync::atomic::Ordering::SeqCst,
2480 0 : );
2481 0 : } else {
2482 0 : self.params.installed_extensions_collection_interval.store(
2483 0 : spec.suspend_timeout_seconds as u64,
2484 0 : std::sync::atomic::Ordering::SeqCst,
2485 0 : );
2486 0 : }
2487 0 : }
2488 : }
2489 :
2490 0 : pub async fn installed_extensions(conf: tokio_postgres::Config) -> Result<()> {
2491 0 : let res = get_installed_extensions(conf).await;
2492 0 : match res {
2493 0 : Ok(extensions) => {
2494 0 : info!(
2495 0 : "[NEON_EXT_STAT] {}",
2496 0 : serde_json::to_string(&extensions).expect("failed to serialize extensions list")
2497 : );
2498 : }
2499 0 : Err(err) => error!("could not get installed extensions: {err}"),
2500 : }
2501 0 : Ok(())
2502 0 : }
2503 :
2504 0 : pub fn forward_termination_signal(dev_mode: bool) {
2505 0 : let ss_pid = SYNC_SAFEKEEPERS_PID.load(Ordering::SeqCst);
2506 0 : if ss_pid != 0 {
2507 0 : let ss_pid = nix::unistd::Pid::from_raw(ss_pid as i32);
2508 0 : kill(ss_pid, Signal::SIGTERM).ok();
2509 0 : }
2510 :
2511 0 : if !dev_mode {
2512 : // Terminate pgbouncer with SIGKILL
2513 0 : match pid_file::read(PGBOUNCER_PIDFILE.into()) {
2514 0 : Ok(pid_file::PidFileRead::LockedByOtherProcess(pid)) => {
2515 0 : info!("sending SIGKILL to pgbouncer process pid: {}", pid);
2516 0 : if let Err(e) = kill(pid, Signal::SIGKILL) {
2517 0 : error!("failed to terminate pgbouncer: {}", e);
2518 0 : }
2519 : }
2520 : // pgbouncer does not lock the pid file, so we read and kill the process directly
2521 : Ok(pid_file::PidFileRead::NotHeldByAnyProcess(_)) => {
2522 0 : if let Ok(pid_str) = std::fs::read_to_string(PGBOUNCER_PIDFILE) {
2523 0 : if let Ok(pid) = pid_str.trim().parse::<i32>() {
2524 0 : info!(
2525 0 : "sending SIGKILL to pgbouncer process pid: {} (from unlocked pid file)",
2526 : pid
2527 : );
2528 0 : if let Err(e) = kill(Pid::from_raw(pid), Signal::SIGKILL) {
2529 0 : error!("failed to terminate pgbouncer: {}", e);
2530 0 : }
2531 0 : }
2532 : } else {
2533 0 : info!("pgbouncer pid file exists but process not running");
2534 : }
2535 : }
2536 : Ok(pid_file::PidFileRead::NotExist) => {
2537 0 : info!("pgbouncer pid file not found, process may not be running");
2538 : }
2539 0 : Err(e) => {
2540 0 : error!("error reading pgbouncer pid file: {}", e);
2541 : }
2542 : }
2543 :
2544 : // Terminate local_proxy
2545 0 : match pid_file::read("/etc/local_proxy/pid".into()) {
2546 0 : Ok(pid_file::PidFileRead::LockedByOtherProcess(pid)) => {
2547 0 : info!("sending SIGTERM to local_proxy process pid: {}", pid);
2548 0 : if let Err(e) = kill(pid, Signal::SIGTERM) {
2549 0 : error!("failed to terminate local_proxy: {}", e);
2550 0 : }
2551 : }
2552 : Ok(pid_file::PidFileRead::NotHeldByAnyProcess(_)) => {
2553 0 : info!("local_proxy PID file exists but process not running");
2554 : }
2555 : Ok(pid_file::PidFileRead::NotExist) => {
2556 0 : info!("local_proxy PID file not found, process may not be running");
2557 : }
2558 0 : Err(e) => {
2559 0 : error!("error reading local_proxy PID file: {}", e);
2560 : }
2561 : }
2562 : } else {
2563 0 : info!("Skipping pgbouncer and local_proxy termination because in dev mode");
2564 : }
2565 :
2566 0 : let pg_pid = PG_PID.load(Ordering::SeqCst);
2567 0 : if pg_pid != 0 {
2568 0 : let pg_pid = nix::unistd::Pid::from_raw(pg_pid as i32);
2569 0 : // Use 'fast' shutdown (SIGINT) because it also creates a shutdown checkpoint, which is important for
2570 0 : // ROs to get a list of running xacts faster instead of going through the CLOG.
2571 0 : // See https://www.postgresql.org/docs/current/server-shutdown.html for the list of modes and signals.
2572 0 : kill(pg_pid, Signal::SIGINT).ok();
2573 0 : }
2574 0 : }
2575 :
2576 : // helper trait to call JoinSet::spawn_blocking(f), but propagates the current
2577 : // tracing span to the thread.
2578 : trait JoinSetExt<T> {
2579 : fn spawn_blocking_child<F>(&mut self, f: F) -> tokio::task::AbortHandle
2580 : where
2581 : F: FnOnce() -> T + Send + 'static,
2582 : T: Send;
2583 : }
2584 :
2585 : impl<T: 'static> JoinSetExt<T> for tokio::task::JoinSet<T> {
2586 0 : fn spawn_blocking_child<F>(&mut self, f: F) -> tokio::task::AbortHandle
2587 0 : where
2588 0 : F: FnOnce() -> T + Send + 'static,
2589 0 : T: Send,
2590 : {
2591 0 : let sp = tracing::Span::current();
2592 0 : self.spawn_blocking(move || {
2593 0 : let _e = sp.enter();
2594 0 : f()
2595 0 : })
2596 0 : }
2597 : }
2598 :
2599 : #[cfg(test)]
2600 : mod tests {
2601 : use std::fs::File;
2602 :
2603 : use super::*;
2604 :
2605 : #[test]
2606 1 : fn duplicate_safekeeper_connstring() {
2607 1 : let file = File::open("tests/cluster_spec.json").unwrap();
2608 1 : let spec: ComputeSpec = serde_json::from_reader(file).unwrap();
2609 :
2610 1 : match ParsedSpec::try_from(spec.clone()) {
2611 0 : Ok(_p) => panic!("Failed to detect duplicate entry"),
2612 1 : Err(e) => assert!(e.starts_with("duplicate entry in safekeeper_connstrings:")),
2613 : };
2614 1 : }
2615 : }
|