Line data Source code
1 : use anyhow::{Context, Result};
2 : use chrono::{DateTime, Utc};
3 : use compute_api::privilege::Privilege;
4 : use compute_api::responses::{
5 : ComputeConfig, ComputeCtlConfig, ComputeMetrics, ComputeStatus, LfcOffloadState,
6 : LfcPrewarmState, PromoteState, TlsConfig,
7 : };
8 : use compute_api::spec::{
9 : ComputeAudit, ComputeFeature, ComputeMode, ComputeSpec, ExtVersion, PageserverProtocol, PgIdent,
10 : };
11 : use futures::StreamExt;
12 : use futures::future::join_all;
13 : use futures::stream::FuturesUnordered;
14 : use itertools::Itertools;
15 : use nix::sys::signal::{Signal, kill};
16 : use nix::unistd::Pid;
17 : use once_cell::sync::Lazy;
18 : use pageserver_page_api::{self as page_api, BaseBackupCompression};
19 : use postgres;
20 : use postgres::NoTls;
21 : use postgres::error::SqlState;
22 : use remote_storage::{DownloadError, RemotePath};
23 : use std::collections::{HashMap, HashSet};
24 : use std::os::unix::fs::{PermissionsExt, symlink};
25 : use std::path::Path;
26 : use std::process::{Command, Stdio};
27 : use std::str::FromStr;
28 : use std::sync::atomic::{AtomicU32, AtomicU64, Ordering};
29 : use std::sync::{Arc, Condvar, Mutex, RwLock};
30 : use std::time::{Duration, Instant};
31 : use std::{env, fs};
32 : use tokio::{spawn, sync::watch, task::JoinHandle, time};
33 : use tracing::{Instrument, debug, error, info, instrument, warn};
34 : use url::Url;
35 : use utils::id::{TenantId, TimelineId};
36 : use utils::lsn::Lsn;
37 : use utils::measured_stream::MeasuredReader;
38 : use utils::pid_file;
39 : use utils::shard::{ShardCount, ShardIndex, ShardNumber};
40 :
41 : use crate::configurator::launch_configurator;
42 : use crate::disk_quota::set_disk_quota;
43 : use crate::installed_extensions::get_installed_extensions;
44 : use crate::logger::startup_context_from_env;
45 : use crate::lsn_lease::launch_lsn_lease_bg_task_for_static;
46 : use crate::metrics::COMPUTE_CTL_UP;
47 : use crate::monitor::launch_monitor;
48 : use crate::pg_helpers::*;
49 : use crate::pgbouncer::*;
50 : use crate::rsyslog::{
51 : PostgresLogsRsyslogConfig, configure_audit_rsyslog, configure_postgres_logs_export,
52 : launch_pgaudit_gc,
53 : };
54 : use crate::spec::*;
55 : use crate::swap::resize_swap;
56 : use crate::sync_sk::{check_if_synced, ping_safekeeper};
57 : use crate::tls::watch_cert_for_changes;
58 : use crate::{config, extension_server, local_proxy};
59 :
60 : pub static SYNC_SAFEKEEPERS_PID: AtomicU32 = AtomicU32::new(0);
61 : pub static PG_PID: AtomicU32 = AtomicU32::new(0);
62 : // This is an arbitrary build tag. Fine as a default / for testing purposes
63 : // in-case of not-set environment var
64 : const BUILD_TAG_DEFAULT: &str = "latest";
65 : /// Build tag/version of the compute node binaries/image. It's tricky and ugly
66 : /// to pass it everywhere as a part of `ComputeNodeParams`, so we use a
67 : /// global static variable.
68 0 : pub static BUILD_TAG: Lazy<String> = Lazy::new(|| {
69 0 : option_env!("BUILD_TAG")
70 0 : .unwrap_or(BUILD_TAG_DEFAULT)
71 0 : .to_string()
72 0 : });
73 : const DEFAULT_INSTALLED_EXTENSIONS_COLLECTION_INTERVAL: u64 = 3600;
74 :
75 : /// Static configuration params that don't change after startup. These mostly
76 : /// come from the CLI args, or are derived from them.
77 : pub struct ComputeNodeParams {
78 : /// The ID of the compute
79 : pub compute_id: String,
80 : // Url type maintains proper escaping
81 : pub connstr: url::Url,
82 :
83 : pub resize_swap_on_bind: bool,
84 : pub set_disk_quota_for_fs: Option<String>,
85 :
86 : // VM monitor parameters
87 : #[cfg(target_os = "linux")]
88 : pub filecache_connstr: String,
89 : #[cfg(target_os = "linux")]
90 : pub cgroup: String,
91 : #[cfg(target_os = "linux")]
92 : pub vm_monitor_addr: String,
93 :
94 : pub pgdata: String,
95 : pub pgbin: String,
96 : pub pgversion: String,
97 :
98 : /// The port that the compute's external HTTP server listens on
99 : pub external_http_port: u16,
100 : /// The port that the compute's internal HTTP server listens on
101 : pub internal_http_port: u16,
102 :
103 : /// the address of extension storage proxy gateway
104 : pub remote_ext_base_url: Option<Url>,
105 :
106 : /// Interval for installed extensions collection
107 : pub installed_extensions_collection_interval: Arc<AtomicU64>,
108 : }
109 :
110 : type TaskHandle = Mutex<Option<JoinHandle<()>>>;
111 :
112 : /// Compute node info shared across several `compute_ctl` threads.
113 : pub struct ComputeNode {
114 : pub params: ComputeNodeParams,
115 :
116 : // We connect to Postgres from many different places, so build configs once
117 : // and reuse them where needed. These are derived from 'params.connstr'
118 : pub conn_conf: postgres::config::Config,
119 : pub tokio_conn_conf: tokio_postgres::config::Config,
120 :
121 : /// Volatile part of the `ComputeNode`, which should be used under `Mutex`.
122 : /// To allow HTTP API server to serving status requests, while configuration
123 : /// is in progress, lock should be held only for short periods of time to do
124 : /// read/write, not the whole configuration process.
125 : pub state: Mutex<ComputeState>,
126 : /// `Condvar` to allow notifying waiters about state changes.
127 : pub state_changed: Condvar,
128 :
129 : // key: ext_archive_name, value: started download time, download_completed?
130 : pub ext_download_progress: RwLock<HashMap<String, (DateTime<Utc>, bool)>>,
131 : pub compute_ctl_config: ComputeCtlConfig,
132 :
133 : /// Handle to the extension stats collection task
134 : extension_stats_task: TaskHandle,
135 : lfc_offload_task: TaskHandle,
136 : }
137 :
138 : // store some metrics about download size that might impact startup time
139 : #[derive(Clone, Debug)]
140 : pub struct RemoteExtensionMetrics {
141 : num_ext_downloaded: u64,
142 : largest_ext_size: u64,
143 : total_ext_download_size: u64,
144 : }
145 :
146 : #[derive(Clone, Debug)]
147 : pub struct ComputeState {
148 : pub start_time: DateTime<Utc>,
149 : pub status: ComputeStatus,
150 : /// Timestamp of the last Postgres activity. It could be `None` if
151 : /// compute wasn't used since start.
152 : pub last_active: Option<DateTime<Utc>>,
153 : pub error: Option<String>,
154 :
155 : /// Compute spec. This can be received from the CLI or - more likely -
156 : /// passed by the control plane with a /configure HTTP request.
157 : pub pspec: Option<ParsedSpec>,
158 :
159 : /// If the spec is passed by a /configure request, 'startup_span' is the
160 : /// /configure request's tracing span. The main thread enters it when it
161 : /// processes the compute startup, so that the compute startup is considered
162 : /// to be part of the /configure request for tracing purposes.
163 : ///
164 : /// If the request handling thread/task called startup_compute() directly,
165 : /// it would automatically be a child of the request handling span, and we
166 : /// wouldn't need this. But because we use the main thread to perform the
167 : /// startup, and the /configure task just waits for it to finish, we need to
168 : /// set up the span relationship ourselves.
169 : pub startup_span: Option<tracing::span::Span>,
170 :
171 : pub lfc_prewarm_state: LfcPrewarmState,
172 : pub lfc_offload_state: LfcOffloadState,
173 :
174 : /// WAL flush LSN that is set after terminating Postgres and syncing safekeepers if
175 : /// mode == ComputeMode::Primary. None otherwise
176 : pub terminate_flush_lsn: Option<Lsn>,
177 : pub promote_state: Option<watch::Receiver<PromoteState>>,
178 :
179 : pub metrics: ComputeMetrics,
180 : }
181 :
182 : impl ComputeState {
183 0 : pub fn new() -> Self {
184 0 : Self {
185 0 : start_time: Utc::now(),
186 0 : status: ComputeStatus::Empty,
187 0 : last_active: None,
188 0 : error: None,
189 0 : pspec: None,
190 0 : startup_span: None,
191 0 : metrics: ComputeMetrics::default(),
192 0 : lfc_prewarm_state: LfcPrewarmState::default(),
193 0 : lfc_offload_state: LfcOffloadState::default(),
194 0 : terminate_flush_lsn: None,
195 0 : promote_state: None,
196 0 : }
197 0 : }
198 :
199 0 : pub fn set_status(&mut self, status: ComputeStatus, state_changed: &Condvar) {
200 0 : let prev = self.status;
201 0 : info!("Changing compute status from {} to {}", prev, status);
202 0 : self.status = status;
203 0 : state_changed.notify_all();
204 :
205 0 : COMPUTE_CTL_UP.reset();
206 0 : COMPUTE_CTL_UP
207 0 : .with_label_values(&[&BUILD_TAG, status.to_string().as_str()])
208 0 : .set(1);
209 0 : }
210 :
211 0 : pub fn set_failed_status(&mut self, err: anyhow::Error, state_changed: &Condvar) {
212 0 : self.error = Some(format!("{err:?}"));
213 0 : self.set_status(ComputeStatus::Failed, state_changed);
214 0 : }
215 : }
216 :
217 : impl Default for ComputeState {
218 0 : fn default() -> Self {
219 0 : Self::new()
220 0 : }
221 : }
222 :
223 : #[derive(Clone, Debug)]
224 : pub struct ParsedSpec {
225 : pub spec: ComputeSpec,
226 : pub tenant_id: TenantId,
227 : pub timeline_id: TimelineId,
228 : pub pageserver_connstr: String,
229 : pub safekeeper_connstrings: Vec<String>,
230 : pub storage_auth_token: Option<String>,
231 : /// k8s dns name and port
232 : pub endpoint_storage_addr: Option<String>,
233 : pub endpoint_storage_token: Option<String>,
234 : }
235 :
236 : impl ParsedSpec {
237 1 : pub fn validate(&self) -> Result<(), String> {
238 : // Only Primary nodes are using safekeeper_connstrings, and at the moment
239 : // this method only validates that part of the specs.
240 1 : if self.spec.mode != ComputeMode::Primary {
241 0 : return Ok(());
242 1 : }
243 :
244 : // While it seems like a good idea to check for an odd number of entries in
245 : // the safekeepers connection string, changes to the list of safekeepers might
246 : // incur appending a new server to a list of 3, in which case a list of 4
247 : // entries is okay in production.
248 : //
249 : // Still we want unique entries, and at least one entry in the vector
250 1 : if self.safekeeper_connstrings.is_empty() {
251 0 : return Err(String::from("safekeeper_connstrings is empty"));
252 1 : }
253 :
254 : // check for uniqueness of the connection strings in the set
255 1 : let mut connstrings = self.safekeeper_connstrings.clone();
256 :
257 1 : connstrings.sort();
258 1 : let mut previous = &connstrings[0];
259 :
260 2 : for current in connstrings.iter().skip(1) {
261 : // duplicate entry?
262 2 : if current == previous {
263 1 : return Err(format!(
264 1 : "duplicate entry in safekeeper_connstrings: {current}!",
265 1 : ));
266 1 : }
267 :
268 1 : previous = current;
269 : }
270 :
271 0 : Ok(())
272 1 : }
273 : }
274 :
275 : impl TryFrom<ComputeSpec> for ParsedSpec {
276 : type Error = String;
277 1 : fn try_from(spec: ComputeSpec) -> Result<Self, String> {
278 : // Extract the options from the spec file that are needed to connect to
279 : // the storage system.
280 : //
281 : // For backwards-compatibility, the top-level fields in the spec file
282 : // may be empty. In that case, we need to dig them from the GUCs in the
283 : // cluster.settings field.
284 1 : let pageserver_connstr = spec
285 1 : .pageserver_connstring
286 1 : .clone()
287 1 : .or_else(|| spec.cluster.settings.find("neon.pageserver_connstring"))
288 1 : .ok_or("pageserver connstr should be provided")?;
289 1 : let safekeeper_connstrings = if spec.safekeeper_connstrings.is_empty() {
290 1 : if matches!(spec.mode, ComputeMode::Primary) {
291 1 : spec.cluster
292 1 : .settings
293 1 : .find("neon.safekeepers")
294 1 : .ok_or("safekeeper connstrings should be provided")?
295 1 : .split(',')
296 4 : .map(|str| str.to_string())
297 1 : .collect()
298 : } else {
299 0 : vec![]
300 : }
301 : } else {
302 0 : spec.safekeeper_connstrings.clone()
303 : };
304 :
305 1 : let storage_auth_token = spec.storage_auth_token.clone();
306 1 : let tenant_id: TenantId = if let Some(tenant_id) = spec.tenant_id {
307 0 : tenant_id
308 : } else {
309 1 : spec.cluster
310 1 : .settings
311 1 : .find("neon.tenant_id")
312 1 : .ok_or("tenant id should be provided")
313 1 : .map(|s| TenantId::from_str(&s))?
314 1 : .or(Err("invalid tenant id"))?
315 : };
316 1 : let timeline_id: TimelineId = if let Some(timeline_id) = spec.timeline_id {
317 0 : timeline_id
318 : } else {
319 1 : spec.cluster
320 1 : .settings
321 1 : .find("neon.timeline_id")
322 1 : .ok_or("timeline id should be provided")
323 1 : .map(|s| TimelineId::from_str(&s))?
324 1 : .or(Err("invalid timeline id"))?
325 : };
326 :
327 1 : let endpoint_storage_addr: Option<String> = spec
328 1 : .endpoint_storage_addr
329 1 : .clone()
330 1 : .or_else(|| spec.cluster.settings.find("neon.endpoint_storage_addr"));
331 1 : let endpoint_storage_token = spec
332 1 : .endpoint_storage_token
333 1 : .clone()
334 1 : .or_else(|| spec.cluster.settings.find("neon.endpoint_storage_token"));
335 :
336 1 : let res = ParsedSpec {
337 1 : spec,
338 1 : pageserver_connstr,
339 1 : safekeeper_connstrings,
340 1 : storage_auth_token,
341 1 : tenant_id,
342 1 : timeline_id,
343 1 : endpoint_storage_addr,
344 1 : endpoint_storage_token,
345 1 : };
346 :
347 : // Now check validity of the parsed specification
348 1 : res.validate()?;
349 0 : Ok(res)
350 1 : }
351 : }
352 :
353 : /// If we are a VM, returns a [`Command`] that will run in the `neon-postgres`
354 : /// cgroup. Otherwise returns the default `Command::new(cmd)`
355 : ///
356 : /// This function should be used to start postgres, as it will start it in the
357 : /// neon-postgres cgroup if we are a VM. This allows autoscaling to control
358 : /// postgres' resource usage. The cgroup will exist in VMs because vm-builder
359 : /// creates it during the sysinit phase of its inittab.
360 0 : fn maybe_cgexec(cmd: &str) -> Command {
361 : // The cplane sets this env var for autoscaling computes.
362 : // use `var_os` so we don't have to worry about the variable being valid
363 : // unicode. Should never be an concern . . . but just in case
364 0 : if env::var_os("AUTOSCALING").is_some() {
365 0 : let mut command = Command::new("cgexec");
366 0 : command.args(["-g", "memory:neon-postgres"]);
367 0 : command.arg(cmd);
368 0 : command
369 : } else {
370 0 : Command::new(cmd)
371 : }
372 0 : }
373 :
374 : struct PostgresHandle {
375 : postgres: std::process::Child,
376 : log_collector: JoinHandle<Result<()>>,
377 : }
378 :
379 : impl PostgresHandle {
380 : /// Return PID of the postgres (postmaster) process
381 0 : fn pid(&self) -> Pid {
382 0 : Pid::from_raw(self.postgres.id() as i32)
383 0 : }
384 : }
385 :
386 : struct StartVmMonitorResult {
387 : #[cfg(target_os = "linux")]
388 : token: tokio_util::sync::CancellationToken,
389 : #[cfg(target_os = "linux")]
390 : vm_monitor: Option<JoinHandle<Result<()>>>,
391 : }
392 :
393 : impl ComputeNode {
394 0 : pub fn new(params: ComputeNodeParams, config: ComputeConfig) -> Result<Self> {
395 0 : let connstr = params.connstr.as_str();
396 0 : let mut conn_conf = postgres::config::Config::from_str(connstr)
397 0 : .context("cannot build postgres config from connstr")?;
398 0 : let mut tokio_conn_conf = tokio_postgres::config::Config::from_str(connstr)
399 0 : .context("cannot build tokio postgres config from connstr")?;
400 :
401 : // Users can set some configuration parameters per database with
402 : // ALTER DATABASE ... SET ...
403 : //
404 : // There are at least these parameters:
405 : //
406 : // - role=some_other_role
407 : // - default_transaction_read_only=on
408 : // - statement_timeout=1, i.e., 1ms, which will cause most of the queries to fail
409 : // - search_path=non_public_schema, this should be actually safe because
410 : // we don't call any functions in user databases, but better to always reset
411 : // it to public.
412 : //
413 : // that can affect `compute_ctl` and prevent it from properly configuring the database schema.
414 : // Unset them via connection string options before connecting to the database.
415 : // N.B. keep it in sync with `ZENITH_OPTIONS` in `get_maintenance_client()`.
416 : const EXTRA_OPTIONS: &str = "-c role=cloud_admin -c default_transaction_read_only=off -c search_path=public -c statement_timeout=0 -c pgaudit.log=none";
417 0 : let options = match conn_conf.get_options() {
418 : // Allow the control plane to override any options set by the
419 : // compute
420 0 : Some(options) => format!("{EXTRA_OPTIONS} {options}"),
421 0 : None => EXTRA_OPTIONS.to_string(),
422 : };
423 0 : conn_conf.options(&options);
424 0 : tokio_conn_conf.options(&options);
425 :
426 0 : let mut new_state = ComputeState::new();
427 0 : if let Some(spec) = config.spec {
428 0 : let pspec = ParsedSpec::try_from(spec).map_err(|msg| anyhow::anyhow!(msg))?;
429 0 : new_state.pspec = Some(pspec);
430 0 : }
431 :
432 0 : Ok(ComputeNode {
433 0 : params,
434 0 : conn_conf,
435 0 : tokio_conn_conf,
436 0 : state: Mutex::new(new_state),
437 0 : state_changed: Condvar::new(),
438 0 : ext_download_progress: RwLock::new(HashMap::new()),
439 0 : compute_ctl_config: config.compute_ctl_config,
440 0 : extension_stats_task: Mutex::new(None),
441 0 : lfc_offload_task: Mutex::new(None),
442 0 : })
443 0 : }
444 :
445 : /// Top-level control flow of compute_ctl. Returns a process exit code we should
446 : /// exit with.
447 0 : pub fn run(self) -> Result<Option<i32>> {
448 0 : let this = Arc::new(self);
449 :
450 0 : let cli_spec = this.state.lock().unwrap().pspec.clone();
451 :
452 : // If this is a pooled VM, prewarm before starting HTTP server and becoming
453 : // available for binding. Prewarming helps Postgres start quicker later,
454 : // because QEMU will already have its memory allocated from the host, and
455 : // the necessary binaries will already be cached.
456 0 : if cli_spec.is_none() {
457 0 : this.prewarm_postgres_vm_memory()?;
458 0 : }
459 :
460 : // Set the up metric with Empty status before starting the HTTP server.
461 : // That way on the first metric scrape, an external observer will see us
462 : // as 'up' and 'empty' (unless the compute was started with a spec or
463 : // already configured by control plane).
464 0 : COMPUTE_CTL_UP
465 0 : .with_label_values(&[&BUILD_TAG, ComputeStatus::Empty.to_string().as_str()])
466 0 : .set(1);
467 :
468 : // Launch the external HTTP server first, so that we can serve control plane
469 : // requests while configuration is still in progress.
470 0 : crate::http::server::Server::External {
471 0 : port: this.params.external_http_port,
472 0 : config: this.compute_ctl_config.clone(),
473 0 : compute_id: this.params.compute_id.clone(),
474 0 : }
475 0 : .launch(&this);
476 :
477 : // The internal HTTP server could be launched later, but there isn't much
478 : // sense in waiting.
479 0 : crate::http::server::Server::Internal {
480 0 : port: this.params.internal_http_port,
481 0 : }
482 0 : .launch(&this);
483 :
484 : // If we got a spec from the CLI already, use that. Otherwise wait for the
485 : // control plane to pass it to us with a /configure HTTP request
486 0 : let pspec = if let Some(cli_spec) = cli_spec {
487 0 : cli_spec
488 : } else {
489 0 : this.wait_spec()?
490 : };
491 :
492 0 : launch_lsn_lease_bg_task_for_static(&this);
493 :
494 : // We have a spec, start the compute
495 0 : let mut delay_exit = false;
496 0 : let mut vm_monitor = None;
497 0 : let mut pg_process: Option<PostgresHandle> = None;
498 :
499 0 : match this.start_compute(&mut pg_process) {
500 0 : Ok(()) => {
501 0 : // Success! Launch remaining services (just vm-monitor currently)
502 0 : vm_monitor =
503 0 : Some(this.start_vm_monitor(pspec.spec.disable_lfc_resizing.unwrap_or(false)));
504 0 : }
505 0 : Err(err) => {
506 : // Something went wrong with the startup. Log it and expose the error to
507 : // HTTP status requests.
508 0 : error!("could not start the compute node: {:#}", err);
509 0 : this.set_failed_status(err);
510 0 : delay_exit = true;
511 :
512 : // If the error happened after starting PostgreSQL, kill it
513 0 : if let Some(ref pg_process) = pg_process {
514 0 : kill(pg_process.pid(), Signal::SIGQUIT).ok();
515 0 : }
516 : }
517 : }
518 :
519 : // If startup was successful, or it failed in the late stages,
520 : // PostgreSQL is now running. Wait until it exits.
521 0 : let exit_code = if let Some(pg_handle) = pg_process {
522 0 : let exit_status = this.wait_postgres(pg_handle);
523 0 : info!("Postgres exited with code {}, shutting down", exit_status);
524 0 : exit_status.code()
525 : } else {
526 0 : None
527 : };
528 :
529 0 : this.terminate_extension_stats_task();
530 0 : this.terminate_lfc_offload_task();
531 :
532 : // Terminate the vm_monitor so it releases the file watcher on
533 : // /sys/fs/cgroup/neon-postgres.
534 : // Note: the vm-monitor only runs on linux because it requires cgroups.
535 0 : if let Some(vm_monitor) = vm_monitor {
536 : cfg_if::cfg_if! {
537 : if #[cfg(target_os = "linux")] {
538 : // Kills all threads spawned by the monitor
539 0 : vm_monitor.token.cancel();
540 0 : if let Some(handle) = vm_monitor.vm_monitor {
541 0 : // Kills the actual task running the monitor
542 0 : handle.abort();
543 0 : }
544 : } else {
545 : _ = vm_monitor; // appease unused lint on macOS
546 : }
547 : }
548 0 : }
549 :
550 : // Reap the postgres process
551 0 : delay_exit |= this.cleanup_after_postgres_exit()?;
552 :
553 : // /terminate returns LSN. If we don't sleep at all, connection will break and we
554 : // won't get result. If we sleep too much, tests will take significantly longer
555 : // and Github Action run will error out
556 0 : let sleep_duration = if delay_exit {
557 0 : Duration::from_secs(30)
558 : } else {
559 0 : Duration::from_millis(300)
560 : };
561 :
562 : // If launch failed, keep serving HTTP requests for a while, so the cloud
563 : // control plane can get the actual error.
564 0 : if delay_exit {
565 0 : info!("giving control plane 30s to collect the error before shutdown");
566 0 : }
567 0 : std::thread::sleep(sleep_duration);
568 0 : Ok(exit_code)
569 0 : }
570 :
571 0 : pub fn wait_spec(&self) -> Result<ParsedSpec> {
572 0 : info!("no compute spec provided, waiting");
573 0 : let mut state = self.state.lock().unwrap();
574 0 : while state.status != ComputeStatus::ConfigurationPending {
575 0 : state = self.state_changed.wait(state).unwrap();
576 0 : }
577 :
578 0 : info!("got spec, continue configuration");
579 0 : let spec = state.pspec.as_ref().unwrap().clone();
580 :
581 : // Record for how long we slept waiting for the spec.
582 0 : let now = Utc::now();
583 0 : state.metrics.wait_for_spec_ms = now
584 0 : .signed_duration_since(state.start_time)
585 0 : .to_std()
586 0 : .unwrap()
587 0 : .as_millis() as u64;
588 :
589 : // Reset start time, so that the total startup time that is calculated later will
590 : // not include the time that we waited for the spec.
591 0 : state.start_time = now;
592 :
593 0 : Ok(spec)
594 0 : }
595 :
596 : /// Start compute.
597 : ///
598 : /// Prerequisites:
599 : /// - the compute spec has been placed in self.state.pspec
600 : ///
601 : /// On success:
602 : /// - status is set to ComputeStatus::Running
603 : /// - self.running_postgres is set
604 : ///
605 : /// On error:
606 : /// - status is left in ComputeStatus::Init. The caller is responsible for setting it to Failed
607 : /// - if Postgres was started before the fatal error happened, self.running_postgres is
608 : /// set. The caller is responsible for killing it.
609 : ///
610 : /// Note that this is in the critical path of a compute cold start. Keep this fast.
611 : /// Try to do things concurrently, to hide the latencies.
612 0 : fn start_compute(self: &Arc<Self>, pg_handle: &mut Option<PostgresHandle>) -> Result<()> {
613 : let compute_state: ComputeState;
614 :
615 : let start_compute_span;
616 : let _this_entered;
617 : {
618 0 : let mut state_guard = self.state.lock().unwrap();
619 :
620 : // Create a tracing span for the startup operation.
621 : //
622 : // We could otherwise just annotate the function with #[instrument], but if
623 : // we're being configured from a /configure HTTP request, we want the
624 : // startup to be considered part of the /configure request.
625 : //
626 : // Similarly, if a trace ID was passed in env variables, attach it to the span.
627 0 : start_compute_span = {
628 : // Temporarily enter the parent span, so that the new span becomes its child.
629 0 : if let Some(p) = state_guard.startup_span.take() {
630 0 : let _parent_entered = p.entered();
631 0 : tracing::info_span!("start_compute")
632 0 : } else if let Some(otel_context) = startup_context_from_env() {
633 : use tracing_opentelemetry::OpenTelemetrySpanExt;
634 0 : let span = tracing::info_span!("start_compute");
635 0 : span.set_parent(otel_context);
636 0 : span
637 : } else {
638 0 : tracing::info_span!("start_compute")
639 : }
640 : };
641 0 : _this_entered = start_compute_span.enter();
642 :
643 0 : state_guard.set_status(ComputeStatus::Init, &self.state_changed);
644 0 : compute_state = state_guard.clone()
645 : }
646 :
647 0 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
648 0 : info!(
649 0 : "starting compute for project {}, operation {}, tenant {}, timeline {}, project {}, branch {}, endpoint {}, features {:?}, spec.remote_extensions {:?}",
650 0 : pspec.spec.cluster.cluster_id.as_deref().unwrap_or("None"),
651 0 : pspec.spec.operation_uuid.as_deref().unwrap_or("None"),
652 : pspec.tenant_id,
653 : pspec.timeline_id,
654 0 : pspec.spec.project_id.as_deref().unwrap_or("None"),
655 0 : pspec.spec.branch_id.as_deref().unwrap_or("None"),
656 0 : pspec.spec.endpoint_id.as_deref().unwrap_or("None"),
657 : pspec.spec.features,
658 : pspec.spec.remote_extensions,
659 : );
660 :
661 : ////// PRE-STARTUP PHASE: things that need to be finished before we start the Postgres process
662 :
663 : // Collect all the tasks that must finish here
664 0 : let mut pre_tasks = tokio::task::JoinSet::new();
665 :
666 : // Make sure TLS certificates are properly loaded and in the right place.
667 0 : if self.compute_ctl_config.tls.is_some() {
668 0 : let this = self.clone();
669 0 : pre_tasks.spawn(async move {
670 0 : this.watch_cert_for_changes().await;
671 :
672 0 : Ok::<(), anyhow::Error>(())
673 0 : });
674 0 : }
675 :
676 0 : let tls_config = self.tls_config(&pspec.spec);
677 :
678 : // If there are any remote extensions in shared_preload_libraries, start downloading them
679 0 : if pspec.spec.remote_extensions.is_some() {
680 0 : let (this, spec) = (self.clone(), pspec.spec.clone());
681 0 : pre_tasks.spawn(async move {
682 0 : this.download_preload_extensions(&spec)
683 0 : .in_current_span()
684 0 : .await
685 0 : });
686 0 : }
687 :
688 : // Prepare pgdata directory. This downloads the basebackup, among other things.
689 : {
690 0 : let (this, cs) = (self.clone(), compute_state.clone());
691 0 : pre_tasks.spawn_blocking_child(move || this.prepare_pgdata(&cs));
692 : }
693 :
694 : // Resize swap to the desired size if the compute spec says so
695 0 : if let (Some(size_bytes), true) =
696 0 : (pspec.spec.swap_size_bytes, self.params.resize_swap_on_bind)
697 : {
698 0 : pre_tasks.spawn_blocking_child(move || {
699 : // To avoid 'swapoff' hitting postgres startup, we need to run resize-swap to completion
700 : // *before* starting postgres.
701 : //
702 : // In theory, we could do this asynchronously if SkipSwapon was enabled for VMs, but this
703 : // carries a risk of introducing hard-to-debug issues - e.g. if postgres sometimes gets
704 : // OOM-killed during startup because swap wasn't available yet.
705 0 : resize_swap(size_bytes).context("failed to resize swap")?;
706 0 : let size_mib = size_bytes as f32 / (1 << 20) as f32; // just for more coherent display.
707 0 : info!(%size_bytes, %size_mib, "resized swap");
708 :
709 0 : Ok::<(), anyhow::Error>(())
710 0 : });
711 0 : }
712 :
713 : // Set disk quota if the compute spec says so
714 0 : if let (Some(disk_quota_bytes), Some(disk_quota_fs_mountpoint)) = (
715 0 : pspec.spec.disk_quota_bytes,
716 0 : self.params.set_disk_quota_for_fs.as_ref(),
717 : ) {
718 0 : let disk_quota_fs_mountpoint = disk_quota_fs_mountpoint.clone();
719 0 : pre_tasks.spawn_blocking_child(move || {
720 0 : set_disk_quota(disk_quota_bytes, &disk_quota_fs_mountpoint)
721 0 : .context("failed to set disk quota")?;
722 0 : let size_mib = disk_quota_bytes as f32 / (1 << 20) as f32; // just for more coherent display.
723 0 : info!(%disk_quota_bytes, %size_mib, "set disk quota");
724 :
725 0 : Ok::<(), anyhow::Error>(())
726 0 : });
727 0 : }
728 :
729 : // tune pgbouncer
730 0 : if let Some(pgbouncer_settings) = &pspec.spec.pgbouncer_settings {
731 0 : info!("tuning pgbouncer");
732 :
733 0 : let pgbouncer_settings = pgbouncer_settings.clone();
734 0 : let tls_config = tls_config.clone();
735 :
736 : // Spawn a background task to do the tuning,
737 : // so that we don't block the main thread that starts Postgres.
738 0 : let _handle = tokio::spawn(async move {
739 0 : let res = tune_pgbouncer(pgbouncer_settings, tls_config).await;
740 0 : if let Err(err) = res {
741 0 : error!("error while tuning pgbouncer: {err:?}");
742 : // Continue with the startup anyway
743 0 : }
744 0 : });
745 0 : }
746 :
747 : // configure local_proxy
748 0 : if let Some(local_proxy) = &pspec.spec.local_proxy_config {
749 0 : info!("configuring local_proxy");
750 :
751 : // Spawn a background task to do the configuration,
752 : // so that we don't block the main thread that starts Postgres.
753 :
754 0 : let mut local_proxy = local_proxy.clone();
755 0 : local_proxy.tls = tls_config.clone();
756 :
757 0 : let _handle = tokio::spawn(async move {
758 0 : if let Err(err) = local_proxy::configure(&local_proxy) {
759 0 : error!("error while configuring local_proxy: {err:?}");
760 : // Continue with the startup anyway
761 0 : }
762 0 : });
763 0 : }
764 :
765 : // Configure and start rsyslog for compliance audit logging
766 0 : match pspec.spec.audit_log_level {
767 : ComputeAudit::Hipaa | ComputeAudit::Extended | ComputeAudit::Full => {
768 0 : let remote_tls_endpoint =
769 0 : std::env::var("AUDIT_LOGGING_TLS_ENDPOINT").unwrap_or("".to_string());
770 0 : let remote_plain_endpoint =
771 0 : std::env::var("AUDIT_LOGGING_ENDPOINT").unwrap_or("".to_string());
772 :
773 0 : if remote_plain_endpoint.is_empty() && remote_tls_endpoint.is_empty() {
774 0 : anyhow::bail!(
775 0 : "AUDIT_LOGGING_ENDPOINT and AUDIT_LOGGING_TLS_ENDPOINT are both empty"
776 : );
777 0 : }
778 :
779 0 : let log_directory_path = Path::new(&self.params.pgdata).join("log");
780 0 : let log_directory_path = log_directory_path.to_string_lossy().to_string();
781 :
782 : // Add project_id,endpoint_id to identify the logs.
783 : //
784 : // These ids are passed from cplane,
785 0 : let endpoint_id = pspec.spec.endpoint_id.as_deref().unwrap_or("");
786 0 : let project_id = pspec.spec.project_id.as_deref().unwrap_or("");
787 :
788 0 : configure_audit_rsyslog(
789 0 : log_directory_path.clone(),
790 0 : endpoint_id,
791 0 : project_id,
792 0 : &remote_plain_endpoint,
793 0 : &remote_tls_endpoint,
794 0 : )?;
795 :
796 : // Launch a background task to clean up the audit logs
797 0 : launch_pgaudit_gc(log_directory_path);
798 : }
799 0 : _ => {}
800 : }
801 :
802 : // Configure and start rsyslog for Postgres logs export
803 0 : let conf = PostgresLogsRsyslogConfig::new(pspec.spec.logs_export_host.as_deref());
804 0 : configure_postgres_logs_export(conf)?;
805 :
806 : // Launch remaining service threads
807 0 : let _monitor_handle = launch_monitor(self);
808 0 : let _configurator_handle = launch_configurator(self);
809 :
810 : // Wait for all the pre-tasks to finish before starting postgres
811 0 : let rt = tokio::runtime::Handle::current();
812 0 : while let Some(res) = rt.block_on(pre_tasks.join_next()) {
813 0 : res??;
814 : }
815 :
816 : ////// START POSTGRES
817 0 : let start_time = Utc::now();
818 0 : let pg_process = self.start_postgres(pspec.storage_auth_token.clone())?;
819 0 : let postmaster_pid = pg_process.pid();
820 0 : *pg_handle = Some(pg_process);
821 :
822 : // If this is a primary endpoint, perform some post-startup configuration before
823 : // opening it up for the world.
824 0 : let config_time = Utc::now();
825 0 : if pspec.spec.mode == ComputeMode::Primary {
826 0 : self.configure_as_primary(&compute_state)?;
827 :
828 0 : let conf = self.get_tokio_conn_conf(None);
829 0 : tokio::task::spawn(async {
830 0 : let _ = installed_extensions(conf).await;
831 0 : });
832 0 : }
833 :
834 : // All done!
835 0 : let startup_end_time = Utc::now();
836 0 : let metrics = {
837 0 : let mut state = self.state.lock().unwrap();
838 0 : state.metrics.start_postgres_ms = config_time
839 0 : .signed_duration_since(start_time)
840 0 : .to_std()
841 0 : .unwrap()
842 0 : .as_millis() as u64;
843 0 : state.metrics.config_ms = startup_end_time
844 0 : .signed_duration_since(config_time)
845 0 : .to_std()
846 0 : .unwrap()
847 0 : .as_millis() as u64;
848 0 : state.metrics.total_startup_ms = startup_end_time
849 0 : .signed_duration_since(compute_state.start_time)
850 0 : .to_std()
851 0 : .unwrap()
852 0 : .as_millis() as u64;
853 0 : state.metrics.clone()
854 : };
855 0 : self.set_status(ComputeStatus::Running);
856 :
857 : // Log metrics so that we can search for slow operations in logs
858 0 : info!(?metrics, postmaster_pid = %postmaster_pid, "compute start finished");
859 :
860 0 : self.spawn_extension_stats_task();
861 :
862 0 : if pspec.spec.autoprewarm {
863 0 : info!("autoprewarming on startup as requested");
864 0 : self.prewarm_lfc(None);
865 0 : }
866 0 : if let Some(seconds) = pspec.spec.offload_lfc_interval_seconds {
867 0 : self.spawn_lfc_offload_task(Duration::from_secs(seconds.into()));
868 0 : };
869 0 : Ok(())
870 0 : }
871 :
872 : #[instrument(skip_all)]
873 : async fn download_preload_extensions(&self, spec: &ComputeSpec) -> Result<()> {
874 : let remote_extensions = if let Some(remote_extensions) = &spec.remote_extensions {
875 : remote_extensions
876 : } else {
877 : return Ok(());
878 : };
879 :
880 : // First, create control files for all available extensions
881 : extension_server::create_control_files(remote_extensions, &self.params.pgbin);
882 :
883 : let library_load_start_time = Utc::now();
884 : let remote_ext_metrics = self.prepare_preload_libraries(spec).await?;
885 :
886 : let library_load_time = Utc::now()
887 : .signed_duration_since(library_load_start_time)
888 : .to_std()
889 : .unwrap()
890 : .as_millis() as u64;
891 : let mut state = self.state.lock().unwrap();
892 : state.metrics.load_ext_ms = library_load_time;
893 : state.metrics.num_ext_downloaded = remote_ext_metrics.num_ext_downloaded;
894 : state.metrics.largest_ext_size = remote_ext_metrics.largest_ext_size;
895 : state.metrics.total_ext_download_size = remote_ext_metrics.total_ext_download_size;
896 : info!(
897 : "Loading shared_preload_libraries took {:?}ms",
898 : library_load_time
899 : );
900 : info!("{:?}", remote_ext_metrics);
901 :
902 : Ok(())
903 : }
904 :
905 : /// Start the vm-monitor if directed to. The vm-monitor only runs on linux
906 : /// because it requires cgroups.
907 0 : fn start_vm_monitor(&self, disable_lfc_resizing: bool) -> StartVmMonitorResult {
908 : cfg_if::cfg_if! {
909 : if #[cfg(target_os = "linux")] {
910 : use std::env;
911 : use tokio_util::sync::CancellationToken;
912 :
913 : // This token is used internally by the monitor to clean up all threads
914 0 : let token = CancellationToken::new();
915 :
916 : // don't pass postgres connection string to vm-monitor if we don't want it to resize LFC
917 0 : let pgconnstr = if disable_lfc_resizing {
918 0 : None
919 : } else {
920 0 : Some(self.params.filecache_connstr.clone())
921 : };
922 :
923 0 : let vm_monitor = if env::var_os("AUTOSCALING").is_some() {
924 0 : let vm_monitor = tokio::spawn(vm_monitor::start(
925 0 : Box::leak(Box::new(vm_monitor::Args {
926 0 : cgroup: Some(self.params.cgroup.clone()),
927 0 : pgconnstr,
928 0 : addr: self.params.vm_monitor_addr.clone(),
929 0 : })),
930 0 : token.clone(),
931 : ));
932 0 : Some(vm_monitor)
933 : } else {
934 0 : None
935 : };
936 0 : StartVmMonitorResult { token, vm_monitor }
937 : } else {
938 : _ = disable_lfc_resizing; // appease unused lint on macOS
939 : StartVmMonitorResult { }
940 : }
941 : }
942 0 : }
943 :
944 0 : fn cleanup_after_postgres_exit(&self) -> Result<bool> {
945 : // Maybe sync safekeepers again, to speed up next startup
946 0 : let compute_state = self.state.lock().unwrap().clone();
947 0 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
948 0 : let lsn = if matches!(pspec.spec.mode, compute_api::spec::ComputeMode::Primary) {
949 0 : info!("syncing safekeepers on shutdown");
950 0 : let storage_auth_token = pspec.storage_auth_token.clone();
951 0 : let lsn = self.sync_safekeepers(storage_auth_token)?;
952 0 : info!(%lsn, "synced safekeepers");
953 0 : Some(lsn)
954 : } else {
955 0 : info!("not primary, not syncing safekeepers");
956 0 : None
957 : };
958 :
959 0 : let mut state = self.state.lock().unwrap();
960 0 : state.terminate_flush_lsn = lsn;
961 :
962 0 : let delay_exit = state.status == ComputeStatus::TerminationPendingFast;
963 0 : if state.status == ComputeStatus::TerminationPendingFast
964 0 : || state.status == ComputeStatus::TerminationPendingImmediate
965 : {
966 0 : info!(
967 0 : "Changing compute status from {} to {}",
968 0 : state.status,
969 : ComputeStatus::Terminated
970 : );
971 0 : state.status = ComputeStatus::Terminated;
972 0 : self.state_changed.notify_all();
973 0 : }
974 0 : drop(state);
975 :
976 0 : if let Err(err) = self.check_for_core_dumps() {
977 0 : error!("error while checking for core dumps: {err:?}");
978 0 : }
979 :
980 0 : Ok(delay_exit)
981 0 : }
982 :
983 : /// Check that compute node has corresponding feature enabled.
984 0 : pub fn has_feature(&self, feature: ComputeFeature) -> bool {
985 0 : let state = self.state.lock().unwrap();
986 :
987 0 : if let Some(s) = state.pspec.as_ref() {
988 0 : s.spec.features.contains(&feature)
989 : } else {
990 0 : false
991 : }
992 0 : }
993 :
994 0 : pub fn set_status(&self, status: ComputeStatus) {
995 0 : let mut state = self.state.lock().unwrap();
996 0 : state.set_status(status, &self.state_changed);
997 0 : }
998 :
999 0 : pub fn set_failed_status(&self, err: anyhow::Error) {
1000 0 : let mut state = self.state.lock().unwrap();
1001 0 : state.set_failed_status(err, &self.state_changed);
1002 0 : }
1003 :
1004 0 : pub fn get_status(&self) -> ComputeStatus {
1005 0 : self.state.lock().unwrap().status
1006 0 : }
1007 :
1008 0 : pub fn get_timeline_id(&self) -> Option<TimelineId> {
1009 0 : self.state
1010 0 : .lock()
1011 0 : .unwrap()
1012 0 : .pspec
1013 0 : .as_ref()
1014 0 : .map(|s| s.timeline_id)
1015 0 : }
1016 :
1017 : // Remove `pgdata` directory and create it again with right permissions.
1018 0 : fn create_pgdata(&self) -> Result<()> {
1019 : // Ignore removal error, likely it is a 'No such file or directory (os error 2)'.
1020 : // If it is something different then create_dir() will error out anyway.
1021 0 : let pgdata = &self.params.pgdata;
1022 0 : let _ok = fs::remove_dir_all(pgdata);
1023 0 : fs::create_dir(pgdata)?;
1024 0 : fs::set_permissions(pgdata, fs::Permissions::from_mode(0o700))?;
1025 :
1026 0 : Ok(())
1027 0 : }
1028 :
1029 : /// Fetches a basebackup from the Pageserver using the compute state's Pageserver connstring and
1030 : /// unarchives it to `pgdata` directory, replacing any existing contents.
1031 : #[instrument(skip_all, fields(%lsn))]
1032 : fn try_get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> {
1033 : let spec = compute_state.pspec.as_ref().expect("spec must be set");
1034 :
1035 : let shard0_connstr = spec.pageserver_connstr.split(',').next().unwrap();
1036 : let started = Instant::now();
1037 :
1038 : let (connected, size) = match PageserverProtocol::from_connstring(shard0_connstr)? {
1039 : PageserverProtocol::Libpq => self.try_get_basebackup_libpq(spec, lsn)?,
1040 : PageserverProtocol::Grpc => self.try_get_basebackup_grpc(spec, lsn)?,
1041 : };
1042 :
1043 : self.fix_zenith_signal_neon_signal()?;
1044 :
1045 : let mut state = self.state.lock().unwrap();
1046 : state.metrics.pageserver_connect_micros =
1047 : connected.duration_since(started).as_micros() as u64;
1048 : state.metrics.basebackup_bytes = size as u64;
1049 : state.metrics.basebackup_ms = started.elapsed().as_millis() as u64;
1050 :
1051 : Ok(())
1052 : }
1053 :
1054 : /// Move the Zenith signal file to Neon signal file location.
1055 : /// This makes Compute compatible with older PageServers that don't yet
1056 : /// know about the Zenith->Neon rename.
1057 0 : fn fix_zenith_signal_neon_signal(&self) -> Result<()> {
1058 0 : let datadir = Path::new(&self.params.pgdata);
1059 :
1060 0 : let neonsig = datadir.join("neon.signal");
1061 :
1062 0 : if neonsig.is_file() {
1063 0 : return Ok(());
1064 0 : }
1065 :
1066 0 : let zenithsig = datadir.join("zenith.signal");
1067 :
1068 0 : if zenithsig.is_file() {
1069 0 : fs::copy(zenithsig, neonsig)?;
1070 0 : }
1071 :
1072 0 : Ok(())
1073 0 : }
1074 :
1075 : /// Fetches a basebackup via gRPC. The connstring must use grpc://. Returns the timestamp when
1076 : /// the connection was established, and the (compressed) size of the basebackup.
1077 0 : fn try_get_basebackup_grpc(&self, spec: &ParsedSpec, lsn: Lsn) -> Result<(Instant, usize)> {
1078 0 : let shard0_connstr = spec
1079 0 : .pageserver_connstr
1080 0 : .split(',')
1081 0 : .next()
1082 0 : .unwrap()
1083 0 : .to_string();
1084 0 : let shard_index = match spec.pageserver_connstr.split(',').count() as u8 {
1085 0 : 0 | 1 => ShardIndex::unsharded(),
1086 0 : count => ShardIndex::new(ShardNumber(0), ShardCount(count)),
1087 : };
1088 :
1089 0 : let (reader, connected) = tokio::runtime::Handle::current().block_on(async move {
1090 0 : let mut client = page_api::Client::connect(
1091 0 : shard0_connstr,
1092 0 : spec.tenant_id,
1093 0 : spec.timeline_id,
1094 0 : shard_index,
1095 0 : spec.storage_auth_token.clone(),
1096 0 : None, // NB: base backups use payload compression
1097 0 : )
1098 0 : .await?;
1099 0 : let connected = Instant::now();
1100 0 : let reader = client
1101 0 : .get_base_backup(page_api::GetBaseBackupRequest {
1102 0 : lsn: (lsn != Lsn(0)).then_some(lsn),
1103 0 : compression: BaseBackupCompression::Gzip,
1104 0 : replica: spec.spec.mode != ComputeMode::Primary,
1105 0 : full: false,
1106 0 : })
1107 0 : .await?;
1108 0 : anyhow::Ok((reader, connected))
1109 0 : })?;
1110 :
1111 0 : let mut reader = MeasuredReader::new(tokio_util::io::SyncIoBridge::new(reader));
1112 :
1113 : // Set `ignore_zeros` so that unpack() reads the entire stream and doesn't just stop at the
1114 : // end-of-archive marker. If the server errors, the tar::Builder drop handler will write an
1115 : // end-of-archive marker before the error is emitted, and we would not see the error.
1116 0 : let mut ar = tar::Archive::new(flate2::read::GzDecoder::new(&mut reader));
1117 0 : ar.set_ignore_zeros(true);
1118 0 : ar.unpack(&self.params.pgdata)?;
1119 :
1120 0 : Ok((connected, reader.get_byte_count()))
1121 0 : }
1122 :
1123 : /// Fetches a basebackup via libpq. The connstring must use postgresql://. Returns the timestamp
1124 : /// when the connection was established, and the (compressed) size of the basebackup.
1125 0 : fn try_get_basebackup_libpq(&self, spec: &ParsedSpec, lsn: Lsn) -> Result<(Instant, usize)> {
1126 0 : let shard0_connstr = spec.pageserver_connstr.split(',').next().unwrap();
1127 0 : let mut config = postgres::Config::from_str(shard0_connstr)?;
1128 :
1129 : // Use the storage auth token from the config file, if given.
1130 : // Note: this overrides any password set in the connection string.
1131 0 : if let Some(storage_auth_token) = &spec.storage_auth_token {
1132 0 : info!("Got storage auth token from spec file");
1133 0 : config.password(storage_auth_token);
1134 : } else {
1135 0 : info!("Storage auth token not set");
1136 : }
1137 :
1138 0 : config.application_name("compute_ctl");
1139 0 : config.options(&format!(
1140 0 : "-c neon.compute_mode={}",
1141 0 : spec.spec.mode.to_type_str()
1142 0 : ));
1143 :
1144 : // Connect to pageserver
1145 0 : let mut client = config.connect(NoTls)?;
1146 0 : let connected = Instant::now();
1147 :
1148 0 : let basebackup_cmd = match lsn {
1149 : Lsn(0) => {
1150 0 : if spec.spec.mode != ComputeMode::Primary {
1151 0 : format!(
1152 0 : "basebackup {} {} --gzip --replica",
1153 : spec.tenant_id, spec.timeline_id
1154 : )
1155 : } else {
1156 0 : format!("basebackup {} {} --gzip", spec.tenant_id, spec.timeline_id)
1157 : }
1158 : }
1159 : _ => {
1160 0 : if spec.spec.mode != ComputeMode::Primary {
1161 0 : format!(
1162 0 : "basebackup {} {} {} --gzip --replica",
1163 : spec.tenant_id, spec.timeline_id, lsn
1164 : )
1165 : } else {
1166 0 : format!(
1167 0 : "basebackup {} {} {} --gzip",
1168 : spec.tenant_id, spec.timeline_id, lsn
1169 : )
1170 : }
1171 : }
1172 : };
1173 :
1174 0 : let copyreader = client.copy_out(basebackup_cmd.as_str())?;
1175 0 : let mut measured_reader = MeasuredReader::new(copyreader);
1176 0 : let mut bufreader = std::io::BufReader::new(&mut measured_reader);
1177 :
1178 : // Read the archive directly from the `CopyOutReader`
1179 : //
1180 : // Set `ignore_zeros` so that unpack() reads all the Copy data and
1181 : // doesn't stop at the end-of-archive marker. Otherwise, if the server
1182 : // sends an Error after finishing the tarball, we will not notice it.
1183 : // The tar::Builder drop handler will write an end-of-archive marker
1184 : // before emitting the error, and we would not see it otherwise.
1185 0 : let mut ar = tar::Archive::new(flate2::read::GzDecoder::new(&mut bufreader));
1186 0 : ar.set_ignore_zeros(true);
1187 0 : ar.unpack(&self.params.pgdata)?;
1188 :
1189 0 : Ok((connected, measured_reader.get_byte_count()))
1190 0 : }
1191 :
1192 : // Gets the basebackup in a retry loop
1193 : #[instrument(skip_all, fields(%lsn))]
1194 : pub fn get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> {
1195 : let mut retry_period_ms = 500.0;
1196 : let mut attempts = 0;
1197 : const DEFAULT_ATTEMPTS: u16 = 10;
1198 : #[cfg(feature = "testing")]
1199 : let max_attempts = if let Ok(v) = env::var("NEON_COMPUTE_TESTING_BASEBACKUP_RETRIES") {
1200 : u16::from_str(&v).unwrap()
1201 : } else {
1202 : DEFAULT_ATTEMPTS
1203 : };
1204 : #[cfg(not(feature = "testing"))]
1205 : let max_attempts = DEFAULT_ATTEMPTS;
1206 : loop {
1207 : let result = self.try_get_basebackup(compute_state, lsn);
1208 : match result {
1209 : Ok(_) => {
1210 : return result;
1211 : }
1212 : Err(ref e) if attempts < max_attempts => {
1213 : warn!(
1214 : "Failed to get basebackup: {} (attempt {}/{})",
1215 : e, attempts, max_attempts
1216 : );
1217 : std::thread::sleep(std::time::Duration::from_millis(retry_period_ms as u64));
1218 : retry_period_ms *= 1.5;
1219 : }
1220 : Err(_) => {
1221 : return result;
1222 : }
1223 : }
1224 : attempts += 1;
1225 : }
1226 : }
1227 :
1228 0 : pub async fn check_safekeepers_synced_async(
1229 0 : &self,
1230 0 : compute_state: &ComputeState,
1231 0 : ) -> Result<Option<Lsn>> {
1232 : // Construct a connection config for each safekeeper
1233 0 : let pspec: ParsedSpec = compute_state
1234 0 : .pspec
1235 0 : .as_ref()
1236 0 : .expect("spec must be set")
1237 0 : .clone();
1238 0 : let sk_connstrs: Vec<String> = pspec.safekeeper_connstrings.clone();
1239 0 : let sk_configs = sk_connstrs.into_iter().map(|connstr| {
1240 : // Format connstr
1241 0 : let id = connstr.clone();
1242 0 : let connstr = format!("postgresql://no_user@{connstr}");
1243 0 : let options = format!(
1244 0 : "-c timeline_id={} tenant_id={}",
1245 : pspec.timeline_id, pspec.tenant_id
1246 : );
1247 :
1248 : // Construct client
1249 0 : let mut config = tokio_postgres::Config::from_str(&connstr).unwrap();
1250 0 : config.options(&options);
1251 0 : if let Some(storage_auth_token) = pspec.storage_auth_token.clone() {
1252 0 : config.password(storage_auth_token);
1253 0 : }
1254 :
1255 0 : (id, config)
1256 0 : });
1257 :
1258 : // Create task set to query all safekeepers
1259 0 : let mut tasks = FuturesUnordered::new();
1260 0 : let quorum = sk_configs.len() / 2 + 1;
1261 0 : for (id, config) in sk_configs {
1262 0 : let timeout = tokio::time::Duration::from_millis(100);
1263 0 : let task = tokio::time::timeout(timeout, ping_safekeeper(id, config));
1264 0 : tasks.push(tokio::spawn(task));
1265 0 : }
1266 :
1267 : // Get a quorum of responses or errors
1268 0 : let mut responses = Vec::new();
1269 0 : let mut join_errors = Vec::new();
1270 0 : let mut task_errors = Vec::new();
1271 0 : let mut timeout_errors = Vec::new();
1272 0 : while let Some(response) = tasks.next().await {
1273 0 : match response {
1274 0 : Ok(Ok(Ok(r))) => responses.push(r),
1275 0 : Ok(Ok(Err(e))) => task_errors.push(e),
1276 0 : Ok(Err(e)) => timeout_errors.push(e),
1277 0 : Err(e) => join_errors.push(e),
1278 : };
1279 0 : if responses.len() >= quorum {
1280 0 : break;
1281 0 : }
1282 0 : if join_errors.len() + task_errors.len() + timeout_errors.len() >= quorum {
1283 0 : break;
1284 0 : }
1285 : }
1286 :
1287 : // In case of error, log and fail the check, but don't crash.
1288 : // We're playing it safe because these errors could be transient
1289 : // and we don't yet retry. Also being careful here allows us to
1290 : // be backwards compatible with safekeepers that don't have the
1291 : // TIMELINE_STATUS API yet.
1292 0 : if responses.len() < quorum {
1293 0 : error!(
1294 0 : "failed sync safekeepers check {:?} {:?} {:?}",
1295 : join_errors, task_errors, timeout_errors
1296 : );
1297 0 : return Ok(None);
1298 0 : }
1299 :
1300 0 : Ok(check_if_synced(responses))
1301 0 : }
1302 :
1303 : // Fast path for sync_safekeepers. If they're already synced we get the lsn
1304 : // in one roundtrip. If not, we should do a full sync_safekeepers.
1305 : #[instrument(skip_all)]
1306 : pub fn check_safekeepers_synced(&self, compute_state: &ComputeState) -> Result<Option<Lsn>> {
1307 : let start_time = Utc::now();
1308 :
1309 : let rt = tokio::runtime::Handle::current();
1310 : let result = rt.block_on(self.check_safekeepers_synced_async(compute_state));
1311 :
1312 : // Record runtime
1313 : self.state.lock().unwrap().metrics.sync_sk_check_ms = Utc::now()
1314 : .signed_duration_since(start_time)
1315 : .to_std()
1316 : .unwrap()
1317 : .as_millis() as u64;
1318 : result
1319 : }
1320 :
1321 : // Run `postgres` in a special mode with `--sync-safekeepers` argument
1322 : // and return the reported LSN back to the caller.
1323 : #[instrument(skip_all)]
1324 : pub fn sync_safekeepers(&self, storage_auth_token: Option<String>) -> Result<Lsn> {
1325 : let start_time = Utc::now();
1326 :
1327 : let mut sync_handle = maybe_cgexec(&self.params.pgbin)
1328 : .args(["--sync-safekeepers"])
1329 : .env("PGDATA", &self.params.pgdata) // we cannot use -D in this mode
1330 : .envs(if let Some(storage_auth_token) = &storage_auth_token {
1331 : vec![("NEON_AUTH_TOKEN", storage_auth_token)]
1332 : } else {
1333 : vec![]
1334 : })
1335 : .stdout(Stdio::piped())
1336 : .stderr(Stdio::piped())
1337 : .spawn()
1338 : .expect("postgres --sync-safekeepers failed to start");
1339 : SYNC_SAFEKEEPERS_PID.store(sync_handle.id(), Ordering::SeqCst);
1340 :
1341 : // `postgres --sync-safekeepers` will print all log output to stderr and
1342 : // final LSN to stdout. So we leave stdout to collect LSN, while stderr logs
1343 : // will be collected in a child thread.
1344 : let stderr = sync_handle
1345 : .stderr
1346 : .take()
1347 : .expect("stderr should be captured");
1348 : let logs_handle = handle_postgres_logs(stderr);
1349 :
1350 : let sync_output = sync_handle
1351 : .wait_with_output()
1352 : .expect("postgres --sync-safekeepers failed");
1353 : SYNC_SAFEKEEPERS_PID.store(0, Ordering::SeqCst);
1354 :
1355 : // Process has exited, so we can join the logs thread.
1356 : let _ = tokio::runtime::Handle::current()
1357 : .block_on(logs_handle)
1358 0 : .map_err(|e| tracing::error!("log task panicked: {:?}", e));
1359 :
1360 : if !sync_output.status.success() {
1361 : anyhow::bail!(
1362 : "postgres --sync-safekeepers exited with non-zero status: {}. stdout: {}",
1363 : sync_output.status,
1364 : String::from_utf8(sync_output.stdout)
1365 : .expect("postgres --sync-safekeepers exited, and stdout is not utf-8"),
1366 : );
1367 : }
1368 :
1369 : self.state.lock().unwrap().metrics.sync_safekeepers_ms = Utc::now()
1370 : .signed_duration_since(start_time)
1371 : .to_std()
1372 : .unwrap()
1373 : .as_millis() as u64;
1374 :
1375 : let lsn = Lsn::from_str(String::from_utf8(sync_output.stdout)?.trim())?;
1376 :
1377 : Ok(lsn)
1378 : }
1379 :
1380 : /// Do all the preparations like PGDATA directory creation, configuration,
1381 : /// safekeepers sync, basebackup, etc.
1382 : #[instrument(skip_all)]
1383 : pub fn prepare_pgdata(&self, compute_state: &ComputeState) -> Result<()> {
1384 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
1385 : let spec = &pspec.spec;
1386 : let pgdata_path = Path::new(&self.params.pgdata);
1387 :
1388 : let tls_config = self.tls_config(&pspec.spec);
1389 :
1390 : // Remove/create an empty pgdata directory and put configuration there.
1391 : self.create_pgdata()?;
1392 : config::write_postgres_conf(
1393 : pgdata_path,
1394 : &pspec.spec,
1395 : self.params.internal_http_port,
1396 : tls_config,
1397 : )?;
1398 :
1399 : // Syncing safekeepers is only safe with primary nodes: if a primary
1400 : // is already connected it will be kicked out, so a secondary (standby)
1401 : // cannot sync safekeepers.
1402 : let lsn = match spec.mode {
1403 : ComputeMode::Primary => {
1404 : info!("checking if safekeepers are synced");
1405 : let lsn = if let Ok(Some(lsn)) = self.check_safekeepers_synced(compute_state) {
1406 : lsn
1407 : } else {
1408 : info!("starting safekeepers syncing");
1409 : self.sync_safekeepers(pspec.storage_auth_token.clone())
1410 : .with_context(|| "failed to sync safekeepers")?
1411 : };
1412 : info!("safekeepers synced at LSN {}", lsn);
1413 : lsn
1414 : }
1415 : ComputeMode::Static(lsn) => {
1416 : info!("Starting read-only node at static LSN {}", lsn);
1417 : lsn
1418 : }
1419 : ComputeMode::Replica => {
1420 : info!("Initializing standby from latest Pageserver LSN");
1421 : Lsn(0)
1422 : }
1423 : };
1424 :
1425 : info!(
1426 : "getting basebackup@{} from pageserver {}",
1427 : lsn, &pspec.pageserver_connstr
1428 : );
1429 0 : self.get_basebackup(compute_state, lsn).with_context(|| {
1430 0 : format!(
1431 0 : "failed to get basebackup@{} from pageserver {}",
1432 0 : lsn, &pspec.pageserver_connstr
1433 : )
1434 0 : })?;
1435 :
1436 : // Update pg_hba.conf received with basebackup.
1437 : update_pg_hba(pgdata_path)?;
1438 :
1439 : // Place pg_dynshmem under /dev/shm. This allows us to use
1440 : // 'dynamic_shared_memory_type = mmap' so that the files are placed in
1441 : // /dev/shm, similar to how 'dynamic_shared_memory_type = posix' works.
1442 : //
1443 : // Why on earth don't we just stick to the 'posix' default, you might
1444 : // ask. It turns out that making large allocations with 'posix' doesn't
1445 : // work very well with autoscaling. The behavior we want is that:
1446 : //
1447 : // 1. You can make large DSM allocations, larger than the current RAM
1448 : // size of the VM, without errors
1449 : //
1450 : // 2. If the allocated memory is really used, the VM is scaled up
1451 : // automatically to accommodate that
1452 : //
1453 : // We try to make that possible by having swap in the VM. But with the
1454 : // default 'posix' DSM implementation, we fail step 1, even when there's
1455 : // plenty of swap available. PostgreSQL uses posix_fallocate() to create
1456 : // the shmem segment, which is really just a file in /dev/shm in Linux,
1457 : // but posix_fallocate() on tmpfs returns ENOMEM if the size is larger
1458 : // than available RAM.
1459 : //
1460 : // Using 'dynamic_shared_memory_type = mmap' works around that, because
1461 : // the Postgres 'mmap' DSM implementation doesn't use
1462 : // posix_fallocate(). Instead, it uses repeated calls to write(2) to
1463 : // fill the file with zeros. It's weird that that differs between
1464 : // 'posix' and 'mmap', but we take advantage of it. When the file is
1465 : // filled slowly with write(2), the kernel allows it to grow larger, as
1466 : // long as there's swap available.
1467 : //
1468 : // In short, using 'dynamic_shared_memory_type = mmap' allows us one DSM
1469 : // segment to be larger than currently available RAM. But because we
1470 : // don't want to store it on a real file, which the kernel would try to
1471 : // flush to disk, so symlink pg_dynshm to /dev/shm.
1472 : //
1473 : // We don't set 'dynamic_shared_memory_type = mmap' here, we let the
1474 : // control plane control that option. If 'mmap' is not used, this
1475 : // symlink doesn't affect anything.
1476 : //
1477 : // See https://github.com/neondatabase/autoscaling/issues/800
1478 : std::fs::remove_dir(pgdata_path.join("pg_dynshmem"))?;
1479 : symlink("/dev/shm/", pgdata_path.join("pg_dynshmem"))?;
1480 :
1481 : match spec.mode {
1482 : ComputeMode::Primary => {}
1483 : ComputeMode::Replica | ComputeMode::Static(..) => {
1484 : add_standby_signal(pgdata_path)?;
1485 : }
1486 : }
1487 :
1488 : Ok(())
1489 : }
1490 :
1491 : /// Start and stop a postgres process to warm up the VM for startup.
1492 0 : pub fn prewarm_postgres_vm_memory(&self) -> Result<()> {
1493 0 : info!("prewarming VM memory");
1494 :
1495 : // Create pgdata
1496 0 : let pgdata = &format!("{}.warmup", self.params.pgdata);
1497 0 : create_pgdata(pgdata)?;
1498 :
1499 : // Run initdb to completion
1500 0 : info!("running initdb");
1501 0 : let initdb_bin = Path::new(&self.params.pgbin)
1502 0 : .parent()
1503 0 : .unwrap()
1504 0 : .join("initdb");
1505 0 : Command::new(initdb_bin)
1506 0 : .args(["--pgdata", pgdata])
1507 0 : .output()
1508 0 : .expect("cannot start initdb process");
1509 :
1510 : // Write conf
1511 : use std::io::Write;
1512 0 : let conf_path = Path::new(pgdata).join("postgresql.conf");
1513 0 : let mut file = std::fs::File::create(conf_path)?;
1514 0 : writeln!(file, "shared_buffers=65536")?;
1515 0 : writeln!(file, "port=51055")?; // Nobody should be connecting
1516 0 : writeln!(file, "shared_preload_libraries = 'neon'")?;
1517 :
1518 : // Start postgres
1519 0 : info!("starting postgres");
1520 0 : let mut pg = maybe_cgexec(&self.params.pgbin)
1521 0 : .args(["-D", pgdata])
1522 0 : .spawn()
1523 0 : .expect("cannot start postgres process");
1524 :
1525 : // Stop it when it's ready
1526 0 : info!("waiting for postgres");
1527 0 : wait_for_postgres(&mut pg, Path::new(pgdata))?;
1528 : // SIGQUIT orders postgres to exit immediately. We don't want to SIGKILL
1529 : // it to avoid orphaned processes prowling around while datadir is
1530 : // wiped.
1531 0 : let pm_pid = Pid::from_raw(pg.id() as i32);
1532 0 : kill(pm_pid, Signal::SIGQUIT)?;
1533 0 : info!("sent SIGQUIT signal");
1534 0 : pg.wait()?;
1535 0 : info!("done prewarming vm memory");
1536 :
1537 : // clean up
1538 0 : let _ok = fs::remove_dir_all(pgdata);
1539 0 : Ok(())
1540 0 : }
1541 :
1542 : /// Start Postgres as a child process and wait for it to start accepting
1543 : /// connections.
1544 : ///
1545 : /// Returns a handle to the child process and a handle to the logs thread.
1546 : #[instrument(skip_all)]
1547 : pub fn start_postgres(&self, storage_auth_token: Option<String>) -> Result<PostgresHandle> {
1548 : let pgdata_path = Path::new(&self.params.pgdata);
1549 :
1550 : // Run postgres as a child process.
1551 : let mut pg = maybe_cgexec(&self.params.pgbin)
1552 : .args(["-D", &self.params.pgdata])
1553 : .envs(if let Some(storage_auth_token) = &storage_auth_token {
1554 : vec![("NEON_AUTH_TOKEN", storage_auth_token)]
1555 : } else {
1556 : vec![]
1557 : })
1558 : .stderr(Stdio::piped())
1559 : .spawn()
1560 : .expect("cannot start postgres process");
1561 : PG_PID.store(pg.id(), Ordering::SeqCst);
1562 :
1563 : // Start a task to collect logs from stderr.
1564 : let stderr = pg.stderr.take().expect("stderr should be captured");
1565 : let logs_handle = handle_postgres_logs(stderr);
1566 :
1567 : wait_for_postgres(&mut pg, pgdata_path)?;
1568 :
1569 : Ok(PostgresHandle {
1570 : postgres: pg,
1571 : log_collector: logs_handle,
1572 : })
1573 : }
1574 :
1575 : /// Wait for the child Postgres process forever. In this state Ctrl+C will
1576 : /// propagate to Postgres and it will be shut down as well.
1577 0 : fn wait_postgres(&self, mut pg_handle: PostgresHandle) -> std::process::ExitStatus {
1578 0 : info!(postmaster_pid = %pg_handle.postgres.id(), "Waiting for Postgres to exit");
1579 :
1580 0 : let ecode = pg_handle
1581 0 : .postgres
1582 0 : .wait()
1583 0 : .expect("failed to start waiting on Postgres process");
1584 0 : PG_PID.store(0, Ordering::SeqCst);
1585 :
1586 : // Process has exited. Wait for the log collecting task to finish.
1587 0 : let _ = tokio::runtime::Handle::current()
1588 0 : .block_on(pg_handle.log_collector)
1589 0 : .map_err(|e| tracing::error!("log task panicked: {:?}", e));
1590 :
1591 0 : ecode
1592 0 : }
1593 :
1594 : /// Do post configuration of the already started Postgres. This function spawns a background task to
1595 : /// configure the database after applying the compute spec. Currently, it upgrades the neon extension
1596 : /// version. In the future, it may upgrade all 3rd-party extensions.
1597 : #[instrument(skip_all)]
1598 : pub fn post_apply_config(&self) -> Result<()> {
1599 : let conf = self.get_tokio_conn_conf(Some("compute_ctl:post_apply_config"));
1600 0 : tokio::spawn(async move {
1601 0 : let res = async {
1602 0 : let (mut client, connection) = conf.connect(NoTls).await?;
1603 0 : tokio::spawn(async move {
1604 0 : if let Err(e) = connection.await {
1605 0 : eprintln!("connection error: {e}");
1606 0 : }
1607 0 : });
1608 :
1609 0 : handle_neon_extension_upgrade(&mut client)
1610 0 : .await
1611 0 : .context("handle_neon_extension_upgrade")?;
1612 0 : Ok::<_, anyhow::Error>(())
1613 0 : }
1614 0 : .await;
1615 0 : if let Err(err) = res {
1616 0 : error!("error while post_apply_config: {err:#}");
1617 0 : }
1618 0 : });
1619 : Ok(())
1620 : }
1621 :
1622 0 : pub fn get_conn_conf(&self, application_name: Option<&str>) -> postgres::Config {
1623 0 : let mut conf = self.conn_conf.clone();
1624 0 : if let Some(application_name) = application_name {
1625 0 : conf.application_name(application_name);
1626 0 : }
1627 0 : conf
1628 0 : }
1629 :
1630 0 : pub fn get_tokio_conn_conf(&self, application_name: Option<&str>) -> tokio_postgres::Config {
1631 0 : let mut conf = self.tokio_conn_conf.clone();
1632 0 : if let Some(application_name) = application_name {
1633 0 : conf.application_name(application_name);
1634 0 : }
1635 0 : conf
1636 0 : }
1637 :
1638 0 : pub async fn get_maintenance_client(
1639 0 : conf: &tokio_postgres::Config,
1640 0 : ) -> Result<tokio_postgres::Client> {
1641 0 : let mut conf = conf.clone();
1642 0 : conf.application_name("compute_ctl:apply_config");
1643 :
1644 0 : let (client, conn) = match conf.connect(NoTls).await {
1645 : // If connection fails, it may be the old node with `zenith_admin` superuser.
1646 : //
1647 : // In this case we need to connect with old `zenith_admin` name
1648 : // and create new user. We cannot simply rename connected user,
1649 : // but we can create a new one and grant it all privileges.
1650 0 : Err(e) => match e.code() {
1651 : Some(&SqlState::INVALID_PASSWORD)
1652 : | Some(&SqlState::INVALID_AUTHORIZATION_SPECIFICATION) => {
1653 : // Connect with `zenith_admin` if `cloud_admin` could not authenticate
1654 0 : info!(
1655 0 : "cannot connect to Postgres: {}, retrying with 'zenith_admin' username",
1656 : e
1657 : );
1658 0 : let mut zenith_admin_conf = postgres::config::Config::from(conf.clone());
1659 0 : zenith_admin_conf.application_name("compute_ctl:apply_config");
1660 0 : zenith_admin_conf.user("zenith_admin");
1661 :
1662 : // It doesn't matter what were the options before, here we just want
1663 : // to connect and create a new superuser role.
1664 : const ZENITH_OPTIONS: &str = "-c role=zenith_admin -c default_transaction_read_only=off -c search_path=public -c statement_timeout=0";
1665 0 : zenith_admin_conf.options(ZENITH_OPTIONS);
1666 :
1667 0 : let mut client =
1668 0 : zenith_admin_conf.connect(NoTls)
1669 0 : .context("broken cloud_admin credential: tried connecting with cloud_admin but could not authenticate, and zenith_admin does not work either")?;
1670 :
1671 : // Disable forwarding so that users don't get a cloud_admin role
1672 0 : let mut func = || {
1673 0 : client.simple_query("SET neon.forward_ddl = false")?;
1674 0 : client.simple_query("CREATE USER cloud_admin WITH SUPERUSER")?;
1675 0 : client.simple_query("GRANT zenith_admin TO cloud_admin")?;
1676 0 : Ok::<_, anyhow::Error>(())
1677 0 : };
1678 0 : func().context("apply_config setup cloud_admin")?;
1679 :
1680 0 : drop(client);
1681 :
1682 : // Reconnect with connstring with expected name
1683 0 : conf.connect(NoTls).await?
1684 : }
1685 0 : _ => return Err(e.into()),
1686 : },
1687 0 : Ok((client, conn)) => (client, conn),
1688 : };
1689 :
1690 0 : spawn(async move {
1691 0 : if let Err(e) = conn.await {
1692 0 : error!("maintenance client connection error: {}", e);
1693 0 : }
1694 0 : });
1695 :
1696 : // Disable DDL forwarding because control plane already knows about the roles/databases
1697 : // we're about to modify.
1698 0 : client
1699 0 : .simple_query("SET neon.forward_ddl = false")
1700 0 : .await
1701 0 : .context("apply_config SET neon.forward_ddl = false")?;
1702 :
1703 0 : Ok(client)
1704 0 : }
1705 :
1706 : /// Do initial configuration of the already started Postgres.
1707 : #[instrument(skip_all)]
1708 : pub fn apply_config(&self, compute_state: &ComputeState) -> Result<()> {
1709 : let conf = self.get_tokio_conn_conf(Some("compute_ctl:apply_config"));
1710 :
1711 : let conf = Arc::new(conf);
1712 : let spec = Arc::new(
1713 : compute_state
1714 : .pspec
1715 : .as_ref()
1716 : .expect("spec must be set")
1717 : .spec
1718 : .clone(),
1719 : );
1720 :
1721 : let mut tls_config = None::<TlsConfig>;
1722 : if spec.features.contains(&ComputeFeature::TlsExperimental) {
1723 : tls_config = self.compute_ctl_config.tls.clone();
1724 : }
1725 :
1726 : self.update_installed_extensions_collection_interval(&spec);
1727 :
1728 : let max_concurrent_connections = self.max_service_connections(compute_state, &spec);
1729 :
1730 : // Merge-apply spec & changes to PostgreSQL state.
1731 : self.apply_spec_sql(spec.clone(), conf.clone(), max_concurrent_connections)?;
1732 :
1733 : if let Some(local_proxy) = &spec.clone().local_proxy_config {
1734 : let mut local_proxy = local_proxy.clone();
1735 : local_proxy.tls = tls_config.clone();
1736 :
1737 : info!("configuring local_proxy");
1738 : local_proxy::configure(&local_proxy).context("apply_config local_proxy")?;
1739 : }
1740 :
1741 : // Run migrations separately to not hold up cold starts
1742 0 : tokio::spawn(async move {
1743 0 : let mut conf = conf.as_ref().clone();
1744 0 : conf.application_name("compute_ctl:migrations");
1745 :
1746 0 : match conf.connect(NoTls).await {
1747 0 : Ok((mut client, connection)) => {
1748 0 : tokio::spawn(async move {
1749 0 : if let Err(e) = connection.await {
1750 0 : eprintln!("connection error: {e}");
1751 0 : }
1752 0 : });
1753 0 : if let Err(e) = handle_migrations(&mut client).await {
1754 0 : error!("Failed to run migrations: {}", e);
1755 0 : }
1756 : }
1757 0 : Err(e) => {
1758 0 : error!(
1759 0 : "Failed to connect to the compute for running migrations: {}",
1760 : e
1761 : );
1762 : }
1763 : };
1764 0 : });
1765 :
1766 : Ok::<(), anyhow::Error>(())
1767 : }
1768 :
1769 : // Wrapped this around `pg_ctl reload`, but right now we don't use
1770 : // `pg_ctl` for start / stop.
1771 : #[instrument(skip_all)]
1772 : fn pg_reload_conf(&self) -> Result<()> {
1773 : let pgctl_bin = Path::new(&self.params.pgbin)
1774 : .parent()
1775 : .unwrap()
1776 : .join("pg_ctl");
1777 : Command::new(pgctl_bin)
1778 : .args(["reload", "-D", &self.params.pgdata])
1779 : .output()
1780 : .expect("cannot run pg_ctl process");
1781 : Ok(())
1782 : }
1783 :
1784 : /// Similar to `apply_config()`, but does a bit different sequence of operations,
1785 : /// as it's used to reconfigure a previously started and configured Postgres node.
1786 : #[instrument(skip_all)]
1787 : pub fn reconfigure(&self) -> Result<()> {
1788 : let spec = self.state.lock().unwrap().pspec.clone().unwrap().spec;
1789 :
1790 : let tls_config = self.tls_config(&spec);
1791 :
1792 : self.update_installed_extensions_collection_interval(&spec);
1793 :
1794 : if let Some(ref pgbouncer_settings) = spec.pgbouncer_settings {
1795 : info!("tuning pgbouncer");
1796 :
1797 : let pgbouncer_settings = pgbouncer_settings.clone();
1798 : let tls_config = tls_config.clone();
1799 :
1800 : // Spawn a background task to do the tuning,
1801 : // so that we don't block the main thread that starts Postgres.
1802 0 : tokio::spawn(async move {
1803 0 : let res = tune_pgbouncer(pgbouncer_settings, tls_config).await;
1804 0 : if let Err(err) = res {
1805 0 : error!("error while tuning pgbouncer: {err:?}");
1806 0 : }
1807 0 : });
1808 : }
1809 :
1810 : if let Some(ref local_proxy) = spec.local_proxy_config {
1811 : info!("configuring local_proxy");
1812 :
1813 : // Spawn a background task to do the configuration,
1814 : // so that we don't block the main thread that starts Postgres.
1815 : let mut local_proxy = local_proxy.clone();
1816 : local_proxy.tls = tls_config.clone();
1817 0 : tokio::spawn(async move {
1818 0 : if let Err(err) = local_proxy::configure(&local_proxy) {
1819 0 : error!("error while configuring local_proxy: {err:?}");
1820 0 : }
1821 0 : });
1822 : }
1823 :
1824 : // Reconfigure rsyslog for Postgres logs export
1825 : let conf = PostgresLogsRsyslogConfig::new(spec.logs_export_host.as_deref());
1826 : configure_postgres_logs_export(conf)?;
1827 :
1828 : // Write new config
1829 : let pgdata_path = Path::new(&self.params.pgdata);
1830 : config::write_postgres_conf(
1831 : pgdata_path,
1832 : &spec,
1833 : self.params.internal_http_port,
1834 : tls_config,
1835 : )?;
1836 :
1837 : self.pg_reload_conf()?;
1838 :
1839 : if !spec.skip_pg_catalog_updates {
1840 : let max_concurrent_connections = spec.reconfigure_concurrency;
1841 : // Temporarily reset max_cluster_size in config
1842 : // to avoid the possibility of hitting the limit, while we are reconfiguring:
1843 : // creating new extensions, roles, etc.
1844 0 : config::with_compute_ctl_tmp_override(pgdata_path, "neon.max_cluster_size=-1", || {
1845 0 : self.pg_reload_conf()?;
1846 :
1847 0 : if spec.mode == ComputeMode::Primary {
1848 0 : let conf = self.get_tokio_conn_conf(Some("compute_ctl:reconfigure"));
1849 0 : let conf = Arc::new(conf);
1850 :
1851 0 : let spec = Arc::new(spec.clone());
1852 :
1853 0 : self.apply_spec_sql(spec, conf, max_concurrent_connections)?;
1854 0 : }
1855 :
1856 0 : Ok(())
1857 0 : })?;
1858 : self.pg_reload_conf()?;
1859 : }
1860 :
1861 : let unknown_op = "unknown".to_string();
1862 : let op_id = spec.operation_uuid.as_ref().unwrap_or(&unknown_op);
1863 : info!(
1864 : "finished reconfiguration of compute node for operation {}",
1865 : op_id
1866 : );
1867 :
1868 : Ok(())
1869 : }
1870 :
1871 : #[instrument(skip_all)]
1872 : pub fn configure_as_primary(&self, compute_state: &ComputeState) -> Result<()> {
1873 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
1874 :
1875 : assert!(pspec.spec.mode == ComputeMode::Primary);
1876 : if !pspec.spec.skip_pg_catalog_updates {
1877 : let pgdata_path = Path::new(&self.params.pgdata);
1878 : // temporarily reset max_cluster_size in config
1879 : // to avoid the possibility of hitting the limit, while we are applying config:
1880 : // creating new extensions, roles, etc...
1881 0 : config::with_compute_ctl_tmp_override(pgdata_path, "neon.max_cluster_size=-1", || {
1882 0 : self.pg_reload_conf()?;
1883 :
1884 0 : self.apply_config(compute_state)?;
1885 :
1886 0 : Ok(())
1887 0 : })?;
1888 :
1889 : let postgresql_conf_path = pgdata_path.join("postgresql.conf");
1890 : if config::line_in_file(
1891 : &postgresql_conf_path,
1892 : "neon.disable_logical_replication_subscribers=false",
1893 : )? {
1894 : info!(
1895 : "updated postgresql.conf to set neon.disable_logical_replication_subscribers=false"
1896 : );
1897 : }
1898 : self.pg_reload_conf()?;
1899 : }
1900 : self.post_apply_config()?;
1901 :
1902 : Ok(())
1903 : }
1904 :
1905 0 : pub async fn watch_cert_for_changes(self: Arc<Self>) {
1906 : // update status on cert renewal
1907 0 : if let Some(tls_config) = &self.compute_ctl_config.tls {
1908 0 : let tls_config = tls_config.clone();
1909 :
1910 : // wait until the cert exists.
1911 0 : let mut cert_watch = watch_cert_for_changes(tls_config.cert_path.clone()).await;
1912 :
1913 0 : tokio::task::spawn_blocking(move || {
1914 0 : let handle = tokio::runtime::Handle::current();
1915 : 'cert_update: loop {
1916 : // let postgres/pgbouncer/local_proxy know the new cert/key exists.
1917 : // we need to wait until it's configurable first.
1918 :
1919 0 : let mut state = self.state.lock().unwrap();
1920 : 'status_update: loop {
1921 0 : match state.status {
1922 : // let's update the state to config pending
1923 : ComputeStatus::ConfigurationPending | ComputeStatus::Running => {
1924 0 : state.set_status(
1925 0 : ComputeStatus::ConfigurationPending,
1926 0 : &self.state_changed,
1927 0 : );
1928 0 : break 'status_update;
1929 : }
1930 :
1931 : // exit loop
1932 : ComputeStatus::Failed
1933 : | ComputeStatus::TerminationPendingFast
1934 : | ComputeStatus::TerminationPendingImmediate
1935 0 : | ComputeStatus::Terminated => break 'cert_update,
1936 :
1937 : // wait
1938 : ComputeStatus::Init
1939 : | ComputeStatus::Configuration
1940 0 : | ComputeStatus::Empty => {
1941 0 : state = self.state_changed.wait(state).unwrap();
1942 0 : }
1943 : }
1944 : }
1945 0 : drop(state);
1946 :
1947 : // wait for a new certificate update
1948 0 : if handle.block_on(cert_watch.changed()).is_err() {
1949 0 : break;
1950 0 : }
1951 : }
1952 0 : });
1953 0 : }
1954 0 : }
1955 :
1956 0 : pub fn tls_config(&self, spec: &ComputeSpec) -> &Option<TlsConfig> {
1957 0 : if spec.features.contains(&ComputeFeature::TlsExperimental) {
1958 0 : &self.compute_ctl_config.tls
1959 : } else {
1960 0 : &None::<TlsConfig>
1961 : }
1962 0 : }
1963 :
1964 : /// Update the `last_active` in the shared state, but ensure that it's a more recent one.
1965 0 : pub fn update_last_active(&self, last_active: Option<DateTime<Utc>>) {
1966 0 : let mut state = self.state.lock().unwrap();
1967 : // NB: `Some(<DateTime>)` is always greater than `None`.
1968 0 : if last_active > state.last_active {
1969 0 : state.last_active = last_active;
1970 0 : debug!("set the last compute activity time to: {:?}", last_active);
1971 0 : }
1972 0 : }
1973 :
1974 : // Look for core dumps and collect backtraces.
1975 : //
1976 : // EKS worker nodes have following core dump settings:
1977 : // /proc/sys/kernel/core_pattern -> core
1978 : // /proc/sys/kernel/core_uses_pid -> 1
1979 : // ulimit -c -> unlimited
1980 : // which results in core dumps being written to postgres data directory as core.<pid>.
1981 : //
1982 : // Use that as a default location and pattern, except macos where core dumps are written
1983 : // to /cores/ directory by default.
1984 : //
1985 : // With default Linux settings, the core dump file is called just "core", so check for
1986 : // that too.
1987 0 : pub fn check_for_core_dumps(&self) -> Result<()> {
1988 0 : let core_dump_dir = match std::env::consts::OS {
1989 0 : "macos" => Path::new("/cores/"),
1990 0 : _ => Path::new(&self.params.pgdata),
1991 : };
1992 :
1993 : // Collect core dump paths if any
1994 0 : info!("checking for core dumps in {}", core_dump_dir.display());
1995 0 : let files = fs::read_dir(core_dump_dir)?;
1996 0 : let cores = files.filter_map(|entry| {
1997 0 : let entry = entry.ok()?;
1998 :
1999 0 : let is_core_dump = match entry.file_name().to_str()? {
2000 0 : n if n.starts_with("core.") => true,
2001 0 : "core" => true,
2002 0 : _ => false,
2003 : };
2004 0 : if is_core_dump {
2005 0 : Some(entry.path())
2006 : } else {
2007 0 : None
2008 : }
2009 0 : });
2010 :
2011 : // Print backtrace for each core dump
2012 0 : for core_path in cores {
2013 0 : warn!(
2014 0 : "core dump found: {}, collecting backtrace",
2015 0 : core_path.display()
2016 : );
2017 :
2018 : // Try first with gdb
2019 0 : let backtrace = Command::new("gdb")
2020 0 : .args(["--batch", "-q", "-ex", "bt", &self.params.pgbin])
2021 0 : .arg(&core_path)
2022 0 : .output();
2023 :
2024 : // Try lldb if no gdb is found -- that is handy for local testing on macOS
2025 0 : let backtrace = match backtrace {
2026 0 : Err(ref e) if e.kind() == std::io::ErrorKind::NotFound => {
2027 0 : warn!("cannot find gdb, trying lldb");
2028 0 : Command::new("lldb")
2029 0 : .arg("-c")
2030 0 : .arg(&core_path)
2031 0 : .args(["--batch", "-o", "bt all", "-o", "quit"])
2032 0 : .output()
2033 : }
2034 0 : _ => backtrace,
2035 0 : }?;
2036 :
2037 0 : warn!(
2038 0 : "core dump backtrace: {}",
2039 0 : String::from_utf8_lossy(&backtrace.stdout)
2040 : );
2041 0 : warn!(
2042 0 : "debugger stderr: {}",
2043 0 : String::from_utf8_lossy(&backtrace.stderr)
2044 : );
2045 : }
2046 :
2047 0 : Ok(())
2048 0 : }
2049 :
2050 : /// Select `pg_stat_statements` data and return it as a stringified JSON
2051 0 : pub async fn collect_insights(&self) -> String {
2052 0 : let mut result_rows: Vec<String> = Vec::new();
2053 0 : let conf = self.get_tokio_conn_conf(Some("compute_ctl:collect_insights"));
2054 0 : let connect_result = conf.connect(NoTls).await;
2055 0 : let (client, connection) = connect_result.unwrap();
2056 0 : tokio::spawn(async move {
2057 0 : if let Err(e) = connection.await {
2058 0 : eprintln!("connection error: {e}");
2059 0 : }
2060 0 : });
2061 0 : let result = client
2062 0 : .simple_query(
2063 0 : "SELECT
2064 0 : row_to_json(pg_stat_statements)
2065 0 : FROM
2066 0 : pg_stat_statements
2067 0 : WHERE
2068 0 : userid != 'cloud_admin'::regrole::oid
2069 0 : ORDER BY
2070 0 : (mean_exec_time + mean_plan_time) DESC
2071 0 : LIMIT 100",
2072 0 : )
2073 0 : .await;
2074 :
2075 0 : if let Ok(raw_rows) = result {
2076 0 : for message in raw_rows.iter() {
2077 0 : if let postgres::SimpleQueryMessage::Row(row) = message {
2078 0 : if let Some(json) = row.get(0) {
2079 0 : result_rows.push(json.to_string());
2080 0 : }
2081 0 : }
2082 : }
2083 :
2084 0 : format!("{{\"pg_stat_statements\": [{}]}}", result_rows.join(","))
2085 : } else {
2086 0 : "{{\"pg_stat_statements\": []}}".to_string()
2087 : }
2088 0 : }
2089 :
2090 : // download an archive, unzip and place files in correct locations
2091 0 : pub async fn download_extension(
2092 0 : &self,
2093 0 : real_ext_name: String,
2094 0 : ext_path: RemotePath,
2095 0 : ) -> Result<u64, DownloadError> {
2096 0 : let remote_ext_base_url =
2097 0 : self.params
2098 0 : .remote_ext_base_url
2099 0 : .as_ref()
2100 0 : .ok_or(DownloadError::BadInput(anyhow::anyhow!(
2101 0 : "Remote extensions storage is not configured",
2102 0 : )))?;
2103 :
2104 0 : let ext_archive_name = ext_path.object_name().expect("bad path");
2105 :
2106 0 : let mut first_try = false;
2107 0 : if !self
2108 0 : .ext_download_progress
2109 0 : .read()
2110 0 : .expect("lock err")
2111 0 : .contains_key(ext_archive_name)
2112 0 : {
2113 0 : self.ext_download_progress
2114 0 : .write()
2115 0 : .expect("lock err")
2116 0 : .insert(ext_archive_name.to_string(), (Utc::now(), false));
2117 0 : first_try = true;
2118 0 : }
2119 0 : let (download_start, download_completed) =
2120 0 : self.ext_download_progress.read().expect("lock err")[ext_archive_name];
2121 0 : let start_time_delta = Utc::now()
2122 0 : .signed_duration_since(download_start)
2123 0 : .to_std()
2124 0 : .unwrap()
2125 0 : .as_millis() as u64;
2126 :
2127 : // how long to wait for extension download if it was started by another process
2128 : const HANG_TIMEOUT: u64 = 3000; // milliseconds
2129 :
2130 0 : if download_completed {
2131 0 : info!("extension already downloaded, skipping re-download");
2132 0 : return Ok(0);
2133 0 : } else if start_time_delta < HANG_TIMEOUT && !first_try {
2134 0 : info!(
2135 0 : "download {ext_archive_name} already started by another process, hanging untill completion or timeout"
2136 : );
2137 0 : let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(500));
2138 : loop {
2139 0 : info!("waiting for download");
2140 0 : interval.tick().await;
2141 0 : let (_, download_completed_now) =
2142 0 : self.ext_download_progress.read().expect("lock")[ext_archive_name];
2143 0 : if download_completed_now {
2144 0 : info!("download finished by whoever else downloaded it");
2145 0 : return Ok(0);
2146 0 : }
2147 : }
2148 : // NOTE: the above loop will get terminated
2149 : // based on the timeout of the download function
2150 0 : }
2151 :
2152 : // if extension hasn't been downloaded before or the previous
2153 : // attempt to download was at least HANG_TIMEOUT ms ago
2154 : // then we try to download it here
2155 0 : info!("downloading new extension {ext_archive_name}");
2156 :
2157 0 : let download_size = extension_server::download_extension(
2158 0 : &real_ext_name,
2159 0 : &ext_path,
2160 0 : remote_ext_base_url,
2161 0 : &self.params.pgbin,
2162 0 : )
2163 0 : .await
2164 0 : .map_err(DownloadError::Other);
2165 :
2166 0 : if download_size.is_ok() {
2167 0 : self.ext_download_progress
2168 0 : .write()
2169 0 : .expect("bad lock")
2170 0 : .insert(ext_archive_name.to_string(), (download_start, true));
2171 0 : }
2172 :
2173 0 : download_size
2174 0 : }
2175 :
2176 0 : pub async fn set_role_grants(
2177 0 : &self,
2178 0 : db_name: &PgIdent,
2179 0 : schema_name: &PgIdent,
2180 0 : privileges: &[Privilege],
2181 0 : role_name: &PgIdent,
2182 0 : ) -> Result<()> {
2183 : use tokio_postgres::NoTls;
2184 :
2185 0 : let mut conf = self.get_tokio_conn_conf(Some("compute_ctl:set_role_grants"));
2186 0 : conf.dbname(db_name);
2187 :
2188 0 : let (db_client, conn) = conf
2189 0 : .connect(NoTls)
2190 0 : .await
2191 0 : .context("Failed to connect to the database")?;
2192 0 : tokio::spawn(conn);
2193 :
2194 : // TODO: support other types of grants apart from schemas?
2195 :
2196 : // check the role grants first - to gracefully handle read-replicas.
2197 0 : let select = "SELECT privilege_type
2198 0 : FROM pg_namespace
2199 0 : JOIN LATERAL (SELECT * FROM aclexplode(nspacl) AS x) acl ON true
2200 0 : JOIN pg_user users ON acl.grantee = users.usesysid
2201 0 : WHERE users.usename = $1
2202 0 : AND nspname = $2";
2203 0 : let rows = db_client
2204 0 : .query(select, &[role_name, schema_name])
2205 0 : .await
2206 0 : .with_context(|| format!("Failed to execute query: {select}"))?;
2207 :
2208 0 : let already_granted: HashSet<String> = rows.into_iter().map(|row| row.get(0)).collect();
2209 :
2210 0 : let grants = privileges
2211 0 : .iter()
2212 0 : .filter(|p| !already_granted.contains(p.as_str()))
2213 : // should not be quoted as it's part of the command.
2214 : // is already sanitized so it's ok
2215 0 : .map(|p| p.as_str())
2216 0 : .join(", ");
2217 :
2218 0 : if !grants.is_empty() {
2219 : // quote the schema and role name as identifiers to sanitize them.
2220 0 : let schema_name = schema_name.pg_quote();
2221 0 : let role_name = role_name.pg_quote();
2222 :
2223 0 : let query = format!("GRANT {grants} ON SCHEMA {schema_name} TO {role_name}",);
2224 0 : db_client
2225 0 : .simple_query(&query)
2226 0 : .await
2227 0 : .with_context(|| format!("Failed to execute query: {query}"))?;
2228 0 : }
2229 :
2230 0 : Ok(())
2231 0 : }
2232 :
2233 0 : pub async fn install_extension(
2234 0 : &self,
2235 0 : ext_name: &PgIdent,
2236 0 : db_name: &PgIdent,
2237 0 : ext_version: ExtVersion,
2238 0 : ) -> Result<ExtVersion> {
2239 : use tokio_postgres::NoTls;
2240 :
2241 0 : let mut conf = self.get_tokio_conn_conf(Some("compute_ctl:install_extension"));
2242 0 : conf.dbname(db_name);
2243 :
2244 0 : let (db_client, conn) = conf
2245 0 : .connect(NoTls)
2246 0 : .await
2247 0 : .context("Failed to connect to the database")?;
2248 0 : tokio::spawn(conn);
2249 :
2250 0 : let version_query = "SELECT extversion FROM pg_extension WHERE extname = $1";
2251 0 : let version: Option<ExtVersion> = db_client
2252 0 : .query_opt(version_query, &[&ext_name])
2253 0 : .await
2254 0 : .with_context(|| format!("Failed to execute query: {version_query}"))?
2255 0 : .map(|row| row.get(0));
2256 :
2257 : // sanitize the inputs as postgres idents.
2258 0 : let ext_name: String = ext_name.pg_quote();
2259 0 : let quoted_version: String = ext_version.pg_quote();
2260 :
2261 0 : if let Some(installed_version) = version {
2262 0 : if installed_version == ext_version {
2263 0 : return Ok(installed_version);
2264 0 : }
2265 0 : let query = format!("ALTER EXTENSION {ext_name} UPDATE TO {quoted_version}");
2266 0 : db_client
2267 0 : .simple_query(&query)
2268 0 : .await
2269 0 : .with_context(|| format!("Failed to execute query: {query}"))?;
2270 : } else {
2271 0 : let query =
2272 0 : format!("CREATE EXTENSION IF NOT EXISTS {ext_name} WITH VERSION {quoted_version}");
2273 0 : db_client
2274 0 : .simple_query(&query)
2275 0 : .await
2276 0 : .with_context(|| format!("Failed to execute query: {query}"))?;
2277 : }
2278 :
2279 0 : Ok(ext_version)
2280 0 : }
2281 :
2282 0 : pub async fn prepare_preload_libraries(
2283 0 : &self,
2284 0 : spec: &ComputeSpec,
2285 0 : ) -> Result<RemoteExtensionMetrics> {
2286 0 : if self.params.remote_ext_base_url.is_none() {
2287 0 : return Ok(RemoteExtensionMetrics {
2288 0 : num_ext_downloaded: 0,
2289 0 : largest_ext_size: 0,
2290 0 : total_ext_download_size: 0,
2291 0 : });
2292 0 : }
2293 0 : let remote_extensions = spec
2294 0 : .remote_extensions
2295 0 : .as_ref()
2296 0 : .ok_or(anyhow::anyhow!("Remote extensions are not configured"))?;
2297 :
2298 0 : info!("parse shared_preload_libraries from spec.cluster.settings");
2299 0 : let mut libs_vec = Vec::new();
2300 0 : if let Some(libs) = spec.cluster.settings.find("shared_preload_libraries") {
2301 0 : libs_vec = libs
2302 0 : .split(&[',', '\'', ' '])
2303 0 : .filter(|s| *s != "neon" && !s.is_empty())
2304 0 : .map(str::to_string)
2305 0 : .collect();
2306 0 : }
2307 0 : info!("parse shared_preload_libraries from provided postgresql.conf");
2308 :
2309 : // that is used in neon_local and python tests
2310 0 : if let Some(conf) = &spec.cluster.postgresql_conf {
2311 0 : let conf_lines = conf.split('\n').collect::<Vec<&str>>();
2312 0 : let mut shared_preload_libraries_line = "";
2313 0 : for line in conf_lines {
2314 0 : if line.starts_with("shared_preload_libraries") {
2315 0 : shared_preload_libraries_line = line;
2316 0 : }
2317 : }
2318 0 : let mut preload_libs_vec = Vec::new();
2319 0 : if let Some(libs) = shared_preload_libraries_line.split("='").nth(1) {
2320 0 : preload_libs_vec = libs
2321 0 : .split(&[',', '\'', ' '])
2322 0 : .filter(|s| *s != "neon" && !s.is_empty())
2323 0 : .map(str::to_string)
2324 0 : .collect();
2325 0 : }
2326 0 : libs_vec.extend(preload_libs_vec);
2327 0 : }
2328 :
2329 : // Don't try to download libraries that are not in the index.
2330 : // Assume that they are already present locally.
2331 0 : libs_vec.retain(|lib| remote_extensions.library_index.contains_key(lib));
2332 :
2333 0 : info!("Downloading to shared preload libraries: {:?}", &libs_vec);
2334 :
2335 0 : let mut download_tasks = Vec::new();
2336 0 : for library in &libs_vec {
2337 0 : let (ext_name, ext_path) =
2338 0 : remote_extensions.get_ext(library, true, &BUILD_TAG, &self.params.pgversion)?;
2339 0 : download_tasks.push(self.download_extension(ext_name, ext_path));
2340 : }
2341 0 : let results = join_all(download_tasks).await;
2342 :
2343 0 : let mut remote_ext_metrics = RemoteExtensionMetrics {
2344 0 : num_ext_downloaded: 0,
2345 0 : largest_ext_size: 0,
2346 0 : total_ext_download_size: 0,
2347 0 : };
2348 0 : for result in results {
2349 0 : let download_size = match result {
2350 0 : Ok(res) => {
2351 0 : remote_ext_metrics.num_ext_downloaded += 1;
2352 0 : res
2353 : }
2354 0 : Err(err) => {
2355 : // if we failed to download an extension, we don't want to fail the whole
2356 : // process, but we do want to log the error
2357 0 : error!("Failed to download extension: {}", err);
2358 0 : 0
2359 : }
2360 : };
2361 :
2362 0 : remote_ext_metrics.largest_ext_size =
2363 0 : std::cmp::max(remote_ext_metrics.largest_ext_size, download_size);
2364 0 : remote_ext_metrics.total_ext_download_size += download_size;
2365 : }
2366 0 : Ok(remote_ext_metrics)
2367 0 : }
2368 :
2369 : /// Waits until current thread receives a state changed notification and
2370 : /// the pageserver connection strings has changed.
2371 : ///
2372 : /// The operation will time out after a specified duration.
2373 0 : pub fn wait_timeout_while_pageserver_connstr_unchanged(&self, duration: Duration) {
2374 0 : let state = self.state.lock().unwrap();
2375 0 : let old_pageserver_connstr = state
2376 0 : .pspec
2377 0 : .as_ref()
2378 0 : .expect("spec must be set")
2379 0 : .pageserver_connstr
2380 0 : .clone();
2381 0 : let mut unchanged = true;
2382 0 : let _ = self
2383 0 : .state_changed
2384 0 : .wait_timeout_while(state, duration, |s| {
2385 0 : let pageserver_connstr = &s
2386 0 : .pspec
2387 0 : .as_ref()
2388 0 : .expect("spec must be set")
2389 0 : .pageserver_connstr;
2390 0 : unchanged = pageserver_connstr == &old_pageserver_connstr;
2391 0 : unchanged
2392 0 : })
2393 0 : .unwrap();
2394 0 : if !unchanged {
2395 0 : info!("Pageserver config changed");
2396 0 : }
2397 0 : }
2398 :
2399 0 : pub fn spawn_extension_stats_task(&self) {
2400 0 : self.terminate_extension_stats_task();
2401 :
2402 0 : let conf = self.tokio_conn_conf.clone();
2403 0 : let atomic_interval = self.params.installed_extensions_collection_interval.clone();
2404 0 : let mut installed_extensions_collection_interval =
2405 0 : 2 * atomic_interval.load(std::sync::atomic::Ordering::SeqCst);
2406 0 : info!(
2407 0 : "[NEON_EXT_SPAWN] Spawning background installed extensions worker with Timeout: {}",
2408 : installed_extensions_collection_interval
2409 : );
2410 0 : let handle = tokio::spawn(async move {
2411 : loop {
2412 0 : info!(
2413 0 : "[NEON_EXT_INT_SLEEP]: Interval: {}",
2414 : installed_extensions_collection_interval
2415 : );
2416 : // Sleep at the start of the loop to ensure that two collections don't happen at the same time.
2417 : // The first collection happens during compute startup.
2418 0 : tokio::time::sleep(tokio::time::Duration::from_secs(
2419 0 : installed_extensions_collection_interval,
2420 0 : ))
2421 0 : .await;
2422 0 : let _ = installed_extensions(conf.clone()).await;
2423 : // Acquire a read lock on the compute spec and then update the interval if necessary
2424 0 : installed_extensions_collection_interval = std::cmp::max(
2425 0 : installed_extensions_collection_interval,
2426 0 : 2 * atomic_interval.load(std::sync::atomic::Ordering::SeqCst),
2427 0 : );
2428 : }
2429 : });
2430 :
2431 : // Store the new task handle
2432 0 : *self.extension_stats_task.lock().unwrap() = Some(handle);
2433 0 : }
2434 :
2435 0 : fn terminate_extension_stats_task(&self) {
2436 0 : if let Some(h) = self.extension_stats_task.lock().unwrap().take() {
2437 0 : h.abort()
2438 0 : }
2439 0 : }
2440 :
2441 0 : pub fn spawn_lfc_offload_task(self: &Arc<Self>, interval: Duration) {
2442 0 : self.terminate_lfc_offload_task();
2443 0 : let secs = interval.as_secs();
2444 0 : info!("spawning lfc offload worker with {secs}s interval");
2445 0 : let this = self.clone();
2446 0 : let handle = spawn(async move {
2447 0 : let mut interval = time::interval(interval);
2448 0 : interval.tick().await; // returns immediately
2449 : loop {
2450 0 : interval.tick().await;
2451 0 : this.offload_lfc_async().await;
2452 : }
2453 : });
2454 0 : *self.lfc_offload_task.lock().unwrap() = Some(handle);
2455 0 : }
2456 :
2457 0 : fn terminate_lfc_offload_task(&self) {
2458 0 : if let Some(h) = self.lfc_offload_task.lock().unwrap().take() {
2459 0 : h.abort()
2460 0 : }
2461 0 : }
2462 :
2463 0 : fn update_installed_extensions_collection_interval(&self, spec: &ComputeSpec) {
2464 : // Update the interval for collecting installed extensions statistics
2465 : // If the value is -1, we never suspend so set the value to default collection.
2466 : // If the value is 0, it means default, we will just continue to use the default.
2467 0 : if spec.suspend_timeout_seconds == -1 || spec.suspend_timeout_seconds == 0 {
2468 0 : self.params.installed_extensions_collection_interval.store(
2469 0 : DEFAULT_INSTALLED_EXTENSIONS_COLLECTION_INTERVAL,
2470 0 : std::sync::atomic::Ordering::SeqCst,
2471 0 : );
2472 0 : } else {
2473 0 : self.params.installed_extensions_collection_interval.store(
2474 0 : spec.suspend_timeout_seconds as u64,
2475 0 : std::sync::atomic::Ordering::SeqCst,
2476 0 : );
2477 0 : }
2478 0 : }
2479 : }
2480 :
2481 0 : pub async fn installed_extensions(conf: tokio_postgres::Config) -> Result<()> {
2482 0 : let res = get_installed_extensions(conf).await;
2483 0 : match res {
2484 0 : Ok(extensions) => {
2485 0 : info!(
2486 0 : "[NEON_EXT_STAT] {}",
2487 0 : serde_json::to_string(&extensions).expect("failed to serialize extensions list")
2488 : );
2489 : }
2490 0 : Err(err) => error!("could not get installed extensions: {err:?}"),
2491 : }
2492 0 : Ok(())
2493 0 : }
2494 :
2495 0 : pub fn forward_termination_signal(dev_mode: bool) {
2496 0 : let ss_pid = SYNC_SAFEKEEPERS_PID.load(Ordering::SeqCst);
2497 0 : if ss_pid != 0 {
2498 0 : let ss_pid = nix::unistd::Pid::from_raw(ss_pid as i32);
2499 0 : kill(ss_pid, Signal::SIGTERM).ok();
2500 0 : }
2501 :
2502 0 : if !dev_mode {
2503 : // Terminate pgbouncer with SIGKILL
2504 0 : match pid_file::read(PGBOUNCER_PIDFILE.into()) {
2505 0 : Ok(pid_file::PidFileRead::LockedByOtherProcess(pid)) => {
2506 0 : info!("sending SIGKILL to pgbouncer process pid: {}", pid);
2507 0 : if let Err(e) = kill(pid, Signal::SIGKILL) {
2508 0 : error!("failed to terminate pgbouncer: {}", e);
2509 0 : }
2510 : }
2511 : // pgbouncer does not lock the pid file, so we read and kill the process directly
2512 : Ok(pid_file::PidFileRead::NotHeldByAnyProcess(_)) => {
2513 0 : if let Ok(pid_str) = std::fs::read_to_string(PGBOUNCER_PIDFILE) {
2514 0 : if let Ok(pid) = pid_str.trim().parse::<i32>() {
2515 0 : info!(
2516 0 : "sending SIGKILL to pgbouncer process pid: {} (from unlocked pid file)",
2517 : pid
2518 : );
2519 0 : if let Err(e) = kill(Pid::from_raw(pid), Signal::SIGKILL) {
2520 0 : error!("failed to terminate pgbouncer: {}", e);
2521 0 : }
2522 0 : }
2523 : } else {
2524 0 : info!("pgbouncer pid file exists but process not running");
2525 : }
2526 : }
2527 : Ok(pid_file::PidFileRead::NotExist) => {
2528 0 : info!("pgbouncer pid file not found, process may not be running");
2529 : }
2530 0 : Err(e) => {
2531 0 : error!("error reading pgbouncer pid file: {}", e);
2532 : }
2533 : }
2534 :
2535 : // Terminate local_proxy
2536 0 : match pid_file::read("/etc/local_proxy/pid".into()) {
2537 0 : Ok(pid_file::PidFileRead::LockedByOtherProcess(pid)) => {
2538 0 : info!("sending SIGTERM to local_proxy process pid: {}", pid);
2539 0 : if let Err(e) = kill(pid, Signal::SIGTERM) {
2540 0 : error!("failed to terminate local_proxy: {}", e);
2541 0 : }
2542 : }
2543 : Ok(pid_file::PidFileRead::NotHeldByAnyProcess(_)) => {
2544 0 : info!("local_proxy PID file exists but process not running");
2545 : }
2546 : Ok(pid_file::PidFileRead::NotExist) => {
2547 0 : info!("local_proxy PID file not found, process may not be running");
2548 : }
2549 0 : Err(e) => {
2550 0 : error!("error reading local_proxy PID file: {}", e);
2551 : }
2552 : }
2553 : } else {
2554 0 : info!("Skipping pgbouncer and local_proxy termination because in dev mode");
2555 : }
2556 :
2557 0 : let pg_pid = PG_PID.load(Ordering::SeqCst);
2558 0 : if pg_pid != 0 {
2559 0 : let pg_pid = nix::unistd::Pid::from_raw(pg_pid as i32);
2560 0 : // Use 'fast' shutdown (SIGINT) because it also creates a shutdown checkpoint, which is important for
2561 0 : // ROs to get a list of running xacts faster instead of going through the CLOG.
2562 0 : // See https://www.postgresql.org/docs/current/server-shutdown.html for the list of modes and signals.
2563 0 : kill(pg_pid, Signal::SIGINT).ok();
2564 0 : }
2565 0 : }
2566 :
2567 : // helper trait to call JoinSet::spawn_blocking(f), but propagates the current
2568 : // tracing span to the thread.
2569 : trait JoinSetExt<T> {
2570 : fn spawn_blocking_child<F>(&mut self, f: F) -> tokio::task::AbortHandle
2571 : where
2572 : F: FnOnce() -> T + Send + 'static,
2573 : T: Send;
2574 : }
2575 :
2576 : impl<T: 'static> JoinSetExt<T> for tokio::task::JoinSet<T> {
2577 0 : fn spawn_blocking_child<F>(&mut self, f: F) -> tokio::task::AbortHandle
2578 0 : where
2579 0 : F: FnOnce() -> T + Send + 'static,
2580 0 : T: Send,
2581 : {
2582 0 : let sp = tracing::Span::current();
2583 0 : self.spawn_blocking(move || {
2584 0 : let _e = sp.enter();
2585 0 : f()
2586 0 : })
2587 0 : }
2588 : }
2589 :
2590 : #[cfg(test)]
2591 : mod tests {
2592 : use std::fs::File;
2593 :
2594 : use super::*;
2595 :
2596 : #[test]
2597 1 : fn duplicate_safekeeper_connstring() {
2598 1 : let file = File::open("tests/cluster_spec.json").unwrap();
2599 1 : let spec: ComputeSpec = serde_json::from_reader(file).unwrap();
2600 :
2601 1 : match ParsedSpec::try_from(spec.clone()) {
2602 0 : Ok(_p) => panic!("Failed to detect duplicate entry"),
2603 1 : Err(e) => assert!(e.starts_with("duplicate entry in safekeeper_connstrings:")),
2604 : };
2605 1 : }
2606 : }
|