Line data Source code
1 : use anyhow::{Context, Result};
2 : use chrono::{DateTime, Utc};
3 : use compute_api::privilege::Privilege;
4 : use compute_api::responses::{
5 : ComputeConfig, ComputeCtlConfig, ComputeMetrics, ComputeStatus, LfcOffloadState,
6 : LfcPrewarmState,
7 : };
8 : use compute_api::spec::{
9 : ComputeAudit, ComputeFeature, ComputeMode, ComputeSpec, ExtVersion, PgIdent,
10 : };
11 : use futures::StreamExt;
12 : use futures::future::join_all;
13 : use futures::stream::FuturesUnordered;
14 : use itertools::Itertools;
15 : use nix::sys::signal::{Signal, kill};
16 : use nix::unistd::Pid;
17 : use once_cell::sync::Lazy;
18 : use postgres;
19 : use postgres::NoTls;
20 : use postgres::error::SqlState;
21 : use remote_storage::{DownloadError, RemotePath};
22 : use std::collections::{HashMap, HashSet};
23 : use std::net::SocketAddr;
24 : use std::os::unix::fs::{PermissionsExt, symlink};
25 : use std::path::Path;
26 : use std::process::{Command, Stdio};
27 : use std::str::FromStr;
28 : use std::sync::atomic::{AtomicU32, Ordering};
29 : use std::sync::{Arc, Condvar, Mutex, RwLock};
30 : use std::time::{Duration, Instant};
31 : use std::{env, fs};
32 : use tokio::spawn;
33 : use tracing::{Instrument, debug, error, info, instrument, warn};
34 : use utils::id::{TenantId, TimelineId};
35 : use utils::lsn::Lsn;
36 : use utils::measured_stream::MeasuredReader;
37 :
38 : use crate::configurator::launch_configurator;
39 : use crate::disk_quota::set_disk_quota;
40 : use crate::installed_extensions::get_installed_extensions;
41 : use crate::logger::startup_context_from_env;
42 : use crate::lsn_lease::launch_lsn_lease_bg_task_for_static;
43 : use crate::metrics::COMPUTE_CTL_UP;
44 : use crate::monitor::launch_monitor;
45 : use crate::pg_helpers::*;
46 : use crate::rsyslog::{
47 : PostgresLogsRsyslogConfig, configure_audit_rsyslog, configure_postgres_logs_export,
48 : launch_pgaudit_gc,
49 : };
50 : use crate::spec::*;
51 : use crate::swap::resize_swap;
52 : use crate::sync_sk::{check_if_synced, ping_safekeeper};
53 : use crate::tls::watch_cert_for_changes;
54 : use crate::{config, extension_server, local_proxy};
55 :
56 : pub static SYNC_SAFEKEEPERS_PID: AtomicU32 = AtomicU32::new(0);
57 : pub static PG_PID: AtomicU32 = AtomicU32::new(0);
58 : // This is an arbitrary build tag. Fine as a default / for testing purposes
59 : // in-case of not-set environment var
60 : const BUILD_TAG_DEFAULT: &str = "latest";
61 : /// Build tag/version of the compute node binaries/image. It's tricky and ugly
62 : /// to pass it everywhere as a part of `ComputeNodeParams`, so we use a
63 : /// global static variable.
64 0 : pub static BUILD_TAG: Lazy<String> = Lazy::new(|| {
65 0 : option_env!("BUILD_TAG")
66 0 : .unwrap_or(BUILD_TAG_DEFAULT)
67 0 : .to_string()
68 0 : });
69 :
70 : /// Static configuration params that don't change after startup. These mostly
71 : /// come from the CLI args, or are derived from them.
72 : pub struct ComputeNodeParams {
73 : /// The ID of the compute
74 : pub compute_id: String,
75 : // Url type maintains proper escaping
76 : pub connstr: url::Url,
77 :
78 : pub resize_swap_on_bind: bool,
79 : pub set_disk_quota_for_fs: Option<String>,
80 :
81 : // VM monitor parameters
82 : #[cfg(target_os = "linux")]
83 : pub filecache_connstr: String,
84 : #[cfg(target_os = "linux")]
85 : pub cgroup: String,
86 : #[cfg(target_os = "linux")]
87 : pub vm_monitor_addr: String,
88 :
89 : pub pgdata: String,
90 : pub pgbin: String,
91 : pub pgversion: String,
92 :
93 : /// The port that the compute's external HTTP server listens on
94 : pub external_http_port: u16,
95 : /// The port that the compute's internal HTTP server listens on
96 : pub internal_http_port: u16,
97 :
98 : /// the address of extension storage proxy gateway
99 : pub remote_ext_base_url: Option<String>,
100 : }
101 :
102 : /// Compute node info shared across several `compute_ctl` threads.
103 : pub struct ComputeNode {
104 : pub params: ComputeNodeParams,
105 :
106 : // We connect to Postgres from many different places, so build configs once
107 : // and reuse them where needed. These are derived from 'params.connstr'
108 : pub conn_conf: postgres::config::Config,
109 : pub tokio_conn_conf: tokio_postgres::config::Config,
110 :
111 : /// Volatile part of the `ComputeNode`, which should be used under `Mutex`.
112 : /// To allow HTTP API server to serving status requests, while configuration
113 : /// is in progress, lock should be held only for short periods of time to do
114 : /// read/write, not the whole configuration process.
115 : pub state: Mutex<ComputeState>,
116 : /// `Condvar` to allow notifying waiters about state changes.
117 : pub state_changed: Condvar,
118 :
119 : // key: ext_archive_name, value: started download time, download_completed?
120 : pub ext_download_progress: RwLock<HashMap<String, (DateTime<Utc>, bool)>>,
121 : pub compute_ctl_config: ComputeCtlConfig,
122 : }
123 :
124 : // store some metrics about download size that might impact startup time
125 : #[derive(Clone, Debug)]
126 : pub struct RemoteExtensionMetrics {
127 : num_ext_downloaded: u64,
128 : largest_ext_size: u64,
129 : total_ext_download_size: u64,
130 : }
131 :
132 : #[derive(Clone, Debug)]
133 : pub struct ComputeState {
134 : pub start_time: DateTime<Utc>,
135 : pub status: ComputeStatus,
136 : /// Timestamp of the last Postgres activity. It could be `None` if
137 : /// compute wasn't used since start.
138 : pub last_active: Option<DateTime<Utc>>,
139 : pub error: Option<String>,
140 :
141 : /// Compute spec. This can be received from the CLI or - more likely -
142 : /// passed by the control plane with a /configure HTTP request.
143 : pub pspec: Option<ParsedSpec>,
144 :
145 : /// If the spec is passed by a /configure request, 'startup_span' is the
146 : /// /configure request's tracing span. The main thread enters it when it
147 : /// processes the compute startup, so that the compute startup is considered
148 : /// to be part of the /configure request for tracing purposes.
149 : ///
150 : /// If the request handling thread/task called startup_compute() directly,
151 : /// it would automatically be a child of the request handling span, and we
152 : /// wouldn't need this. But because we use the main thread to perform the
153 : /// startup, and the /configure task just waits for it to finish, we need to
154 : /// set up the span relationship ourselves.
155 : pub startup_span: Option<tracing::span::Span>,
156 :
157 : pub lfc_prewarm_state: LfcPrewarmState,
158 : pub lfc_offload_state: LfcOffloadState,
159 :
160 : pub metrics: ComputeMetrics,
161 : }
162 :
163 : impl ComputeState {
164 0 : pub fn new() -> Self {
165 0 : Self {
166 0 : start_time: Utc::now(),
167 0 : status: ComputeStatus::Empty,
168 0 : last_active: None,
169 0 : error: None,
170 0 : pspec: None,
171 0 : startup_span: None,
172 0 : metrics: ComputeMetrics::default(),
173 0 : lfc_prewarm_state: LfcPrewarmState::default(),
174 0 : lfc_offload_state: LfcOffloadState::default(),
175 0 : }
176 0 : }
177 :
178 0 : pub fn set_status(&mut self, status: ComputeStatus, state_changed: &Condvar) {
179 0 : let prev = self.status;
180 0 : info!("Changing compute status from {} to {}", prev, status);
181 0 : self.status = status;
182 0 : state_changed.notify_all();
183 0 :
184 0 : COMPUTE_CTL_UP.reset();
185 0 : COMPUTE_CTL_UP
186 0 : .with_label_values(&[&BUILD_TAG, status.to_string().as_str()])
187 0 : .set(1);
188 0 : }
189 :
190 0 : pub fn set_failed_status(&mut self, err: anyhow::Error, state_changed: &Condvar) {
191 0 : self.error = Some(format!("{err:?}"));
192 0 : self.set_status(ComputeStatus::Failed, state_changed);
193 0 : }
194 : }
195 :
196 : impl Default for ComputeState {
197 0 : fn default() -> Self {
198 0 : Self::new()
199 0 : }
200 : }
201 :
202 : #[derive(Clone, Debug)]
203 : pub struct ParsedSpec {
204 : pub spec: ComputeSpec,
205 : pub tenant_id: TenantId,
206 : pub timeline_id: TimelineId,
207 : pub pageserver_connstr: String,
208 : pub safekeeper_connstrings: Vec<String>,
209 : pub storage_auth_token: Option<String>,
210 : pub endpoint_storage_addr: Option<SocketAddr>,
211 : pub endpoint_storage_token: Option<String>,
212 : }
213 :
214 : impl TryFrom<ComputeSpec> for ParsedSpec {
215 : type Error = String;
216 0 : fn try_from(spec: ComputeSpec) -> Result<Self, String> {
217 : // Extract the options from the spec file that are needed to connect to
218 : // the storage system.
219 : //
220 : // For backwards-compatibility, the top-level fields in the spec file
221 : // may be empty. In that case, we need to dig them from the GUCs in the
222 : // cluster.settings field.
223 0 : let pageserver_connstr = spec
224 0 : .pageserver_connstring
225 0 : .clone()
226 0 : .or_else(|| spec.cluster.settings.find("neon.pageserver_connstring"))
227 0 : .ok_or("pageserver connstr should be provided")?;
228 0 : let safekeeper_connstrings = if spec.safekeeper_connstrings.is_empty() {
229 0 : if matches!(spec.mode, ComputeMode::Primary) {
230 0 : spec.cluster
231 0 : .settings
232 0 : .find("neon.safekeepers")
233 0 : .ok_or("safekeeper connstrings should be provided")?
234 0 : .split(',')
235 0 : .map(|str| str.to_string())
236 0 : .collect()
237 : } else {
238 0 : vec![]
239 : }
240 : } else {
241 0 : spec.safekeeper_connstrings.clone()
242 : };
243 0 : let storage_auth_token = spec.storage_auth_token.clone();
244 0 : let tenant_id: TenantId = if let Some(tenant_id) = spec.tenant_id {
245 0 : tenant_id
246 : } else {
247 0 : spec.cluster
248 0 : .settings
249 0 : .find("neon.tenant_id")
250 0 : .ok_or("tenant id should be provided")
251 0 : .map(|s| TenantId::from_str(&s))?
252 0 : .or(Err("invalid tenant id"))?
253 : };
254 0 : let timeline_id: TimelineId = if let Some(timeline_id) = spec.timeline_id {
255 0 : timeline_id
256 : } else {
257 0 : spec.cluster
258 0 : .settings
259 0 : .find("neon.timeline_id")
260 0 : .ok_or("timeline id should be provided")
261 0 : .map(|s| TimelineId::from_str(&s))?
262 0 : .or(Err("invalid timeline id"))?
263 : };
264 :
265 0 : let endpoint_storage_addr: Option<SocketAddr> = spec
266 0 : .endpoint_storage_addr
267 0 : .clone()
268 0 : .or_else(|| spec.cluster.settings.find("neon.endpoint_storage_addr"))
269 0 : .unwrap_or_default()
270 0 : .parse()
271 0 : .ok();
272 0 : let endpoint_storage_token = spec
273 0 : .endpoint_storage_token
274 0 : .clone()
275 0 : .or_else(|| spec.cluster.settings.find("neon.endpoint_storage_token"));
276 0 :
277 0 : Ok(ParsedSpec {
278 0 : spec,
279 0 : pageserver_connstr,
280 0 : safekeeper_connstrings,
281 0 : storage_auth_token,
282 0 : tenant_id,
283 0 : timeline_id,
284 0 : endpoint_storage_addr,
285 0 : endpoint_storage_token,
286 0 : })
287 0 : }
288 : }
289 :
290 : /// If we are a VM, returns a [`Command`] that will run in the `neon-postgres`
291 : /// cgroup. Otherwise returns the default `Command::new(cmd)`
292 : ///
293 : /// This function should be used to start postgres, as it will start it in the
294 : /// neon-postgres cgroup if we are a VM. This allows autoscaling to control
295 : /// postgres' resource usage. The cgroup will exist in VMs because vm-builder
296 : /// creates it during the sysinit phase of its inittab.
297 0 : fn maybe_cgexec(cmd: &str) -> Command {
298 0 : // The cplane sets this env var for autoscaling computes.
299 0 : // use `var_os` so we don't have to worry about the variable being valid
300 0 : // unicode. Should never be an concern . . . but just in case
301 0 : if env::var_os("AUTOSCALING").is_some() {
302 0 : let mut command = Command::new("cgexec");
303 0 : command.args(["-g", "memory:neon-postgres"]);
304 0 : command.arg(cmd);
305 0 : command
306 : } else {
307 0 : Command::new(cmd)
308 : }
309 0 : }
310 :
311 : struct PostgresHandle {
312 : postgres: std::process::Child,
313 : log_collector: tokio::task::JoinHandle<Result<()>>,
314 : }
315 :
316 : impl PostgresHandle {
317 : /// Return PID of the postgres (postmaster) process
318 0 : fn pid(&self) -> Pid {
319 0 : Pid::from_raw(self.postgres.id() as i32)
320 0 : }
321 : }
322 :
323 : struct StartVmMonitorResult {
324 : #[cfg(target_os = "linux")]
325 : token: tokio_util::sync::CancellationToken,
326 : #[cfg(target_os = "linux")]
327 : vm_monitor: Option<tokio::task::JoinHandle<Result<()>>>,
328 : }
329 :
330 : impl ComputeNode {
331 0 : pub fn new(params: ComputeNodeParams, config: ComputeConfig) -> Result<Self> {
332 0 : let connstr = params.connstr.as_str();
333 0 : let mut conn_conf = postgres::config::Config::from_str(connstr)
334 0 : .context("cannot build postgres config from connstr")?;
335 0 : let mut tokio_conn_conf = tokio_postgres::config::Config::from_str(connstr)
336 0 : .context("cannot build tokio postgres config from connstr")?;
337 :
338 : // Users can set some configuration parameters per database with
339 : // ALTER DATABASE ... SET ...
340 : //
341 : // There are at least these parameters:
342 : //
343 : // - role=some_other_role
344 : // - default_transaction_read_only=on
345 : // - statement_timeout=1, i.e., 1ms, which will cause most of the queries to fail
346 : // - search_path=non_public_schema, this should be actually safe because
347 : // we don't call any functions in user databases, but better to always reset
348 : // it to public.
349 : //
350 : // that can affect `compute_ctl` and prevent it from properly configuring the database schema.
351 : // Unset them via connection string options before connecting to the database.
352 : // N.B. keep it in sync with `ZENITH_OPTIONS` in `get_maintenance_client()`.
353 : //
354 : // TODO(ololobus): we currently pass `-c default_transaction_read_only=off` from control plane
355 : // as well. After rolling out this code, we can remove this parameter from control plane.
356 : // In the meantime, double-passing is fine, the last value is applied.
357 : // See: <https://github.com/neondatabase/cloud/blob/133dd8c4dbbba40edfbad475bf6a45073ca63faf/goapp/controlplane/internal/pkg/compute/provisioner/provisioner_common.go#L70>
358 : const EXTRA_OPTIONS: &str = "-c role=cloud_admin -c default_transaction_read_only=off -c search_path=public -c statement_timeout=0";
359 0 : let options = match conn_conf.get_options() {
360 0 : Some(options) => format!("{} {}", options, EXTRA_OPTIONS),
361 0 : None => EXTRA_OPTIONS.to_string(),
362 : };
363 0 : conn_conf.options(&options);
364 0 : tokio_conn_conf.options(&options);
365 0 :
366 0 : let mut new_state = ComputeState::new();
367 0 : if let Some(spec) = config.spec {
368 0 : let pspec = ParsedSpec::try_from(spec).map_err(|msg| anyhow::anyhow!(msg))?;
369 0 : new_state.pspec = Some(pspec);
370 0 : }
371 :
372 0 : Ok(ComputeNode {
373 0 : params,
374 0 : conn_conf,
375 0 : tokio_conn_conf,
376 0 : state: Mutex::new(new_state),
377 0 : state_changed: Condvar::new(),
378 0 : ext_download_progress: RwLock::new(HashMap::new()),
379 0 : compute_ctl_config: config.compute_ctl_config,
380 0 : })
381 0 : }
382 :
383 : /// Top-level control flow of compute_ctl. Returns a process exit code we should
384 : /// exit with.
385 0 : pub fn run(self) -> Result<Option<i32>> {
386 0 : let this = Arc::new(self);
387 0 :
388 0 : let cli_spec = this.state.lock().unwrap().pspec.clone();
389 0 :
390 0 : // If this is a pooled VM, prewarm before starting HTTP server and becoming
391 0 : // available for binding. Prewarming helps Postgres start quicker later,
392 0 : // because QEMU will already have its memory allocated from the host, and
393 0 : // the necessary binaries will already be cached.
394 0 : if cli_spec.is_none() {
395 0 : this.prewarm_postgres()?;
396 0 : }
397 :
398 : // Set the up metric with Empty status before starting the HTTP server.
399 : // That way on the first metric scrape, an external observer will see us
400 : // as 'up' and 'empty' (unless the compute was started with a spec or
401 : // already configured by control plane).
402 0 : COMPUTE_CTL_UP
403 0 : .with_label_values(&[&BUILD_TAG, ComputeStatus::Empty.to_string().as_str()])
404 0 : .set(1);
405 0 :
406 0 : // Launch the external HTTP server first, so that we can serve control plane
407 0 : // requests while configuration is still in progress.
408 0 : crate::http::server::Server::External {
409 0 : port: this.params.external_http_port,
410 0 : config: this.compute_ctl_config.clone(),
411 0 : compute_id: this.params.compute_id.clone(),
412 0 : }
413 0 : .launch(&this);
414 0 :
415 0 : // The internal HTTP server could be launched later, but there isn't much
416 0 : // sense in waiting.
417 0 : crate::http::server::Server::Internal {
418 0 : port: this.params.internal_http_port,
419 0 : }
420 0 : .launch(&this);
421 :
422 : // If we got a spec from the CLI already, use that. Otherwise wait for the
423 : // control plane to pass it to us with a /configure HTTP request
424 0 : let pspec = if let Some(cli_spec) = cli_spec {
425 0 : cli_spec
426 : } else {
427 0 : this.wait_spec()?
428 : };
429 :
430 0 : launch_lsn_lease_bg_task_for_static(&this);
431 0 :
432 0 : // We have a spec, start the compute
433 0 : let mut delay_exit = false;
434 0 : let mut vm_monitor = None;
435 0 : let mut pg_process: Option<PostgresHandle> = None;
436 0 :
437 0 : match this.start_compute(&mut pg_process) {
438 0 : Ok(()) => {
439 0 : // Success! Launch remaining services (just vm-monitor currently)
440 0 : vm_monitor =
441 0 : Some(this.start_vm_monitor(pspec.spec.disable_lfc_resizing.unwrap_or(false)));
442 0 : }
443 0 : Err(err) => {
444 0 : // Something went wrong with the startup. Log it and expose the error to
445 0 : // HTTP status requests.
446 0 : error!("could not start the compute node: {:#}", err);
447 0 : this.set_failed_status(err);
448 0 : delay_exit = true;
449 :
450 : // If the error happened after starting PostgreSQL, kill it
451 0 : if let Some(ref pg_process) = pg_process {
452 0 : kill(pg_process.pid(), Signal::SIGQUIT).ok();
453 0 : }
454 : }
455 : }
456 :
457 : // If startup was successful, or it failed in the late stages,
458 : // PostgreSQL is now running. Wait until it exits.
459 0 : let exit_code = if let Some(pg_handle) = pg_process {
460 0 : let exit_status = this.wait_postgres(pg_handle);
461 0 : info!("Postgres exited with code {}, shutting down", exit_status);
462 0 : exit_status.code()
463 : } else {
464 0 : None
465 : };
466 :
467 : // Terminate the vm_monitor so it releases the file watcher on
468 : // /sys/fs/cgroup/neon-postgres.
469 : // Note: the vm-monitor only runs on linux because it requires cgroups.
470 0 : if let Some(vm_monitor) = vm_monitor {
471 : cfg_if::cfg_if! {
472 : if #[cfg(target_os = "linux")] {
473 : // Kills all threads spawned by the monitor
474 0 : vm_monitor.token.cancel();
475 0 : if let Some(handle) = vm_monitor.vm_monitor {
476 0 : // Kills the actual task running the monitor
477 0 : handle.abort();
478 0 : }
479 : } else {
480 : _ = vm_monitor; // appease unused lint on macOS
481 : }
482 : }
483 0 : }
484 :
485 : // Reap the postgres process
486 0 : delay_exit |= this.cleanup_after_postgres_exit()?;
487 :
488 : // If launch failed, keep serving HTTP requests for a while, so the cloud
489 : // control plane can get the actual error.
490 0 : if delay_exit {
491 0 : info!("giving control plane 30s to collect the error before shutdown");
492 0 : std::thread::sleep(Duration::from_secs(30));
493 0 : }
494 0 : Ok(exit_code)
495 0 : }
496 :
497 0 : pub fn wait_spec(&self) -> Result<ParsedSpec> {
498 0 : info!("no compute spec provided, waiting");
499 0 : let mut state = self.state.lock().unwrap();
500 0 : while state.status != ComputeStatus::ConfigurationPending {
501 0 : state = self.state_changed.wait(state).unwrap();
502 0 : }
503 :
504 0 : info!("got spec, continue configuration");
505 0 : let spec = state.pspec.as_ref().unwrap().clone();
506 0 :
507 0 : // Record for how long we slept waiting for the spec.
508 0 : let now = Utc::now();
509 0 : state.metrics.wait_for_spec_ms = now
510 0 : .signed_duration_since(state.start_time)
511 0 : .to_std()
512 0 : .unwrap()
513 0 : .as_millis() as u64;
514 0 :
515 0 : // Reset start time, so that the total startup time that is calculated later will
516 0 : // not include the time that we waited for the spec.
517 0 : state.start_time = now;
518 0 :
519 0 : Ok(spec)
520 0 : }
521 :
522 : /// Start compute.
523 : ///
524 : /// Prerequisites:
525 : /// - the compute spec has been placed in self.state.pspec
526 : ///
527 : /// On success:
528 : /// - status is set to ComputeStatus::Running
529 : /// - self.running_postgres is set
530 : ///
531 : /// On error:
532 : /// - status is left in ComputeStatus::Init. The caller is responsible for setting it to Failed
533 : /// - if Postgres was started before the fatal error happened, self.running_postgres is
534 : /// set. The caller is responsible for killing it.
535 : ///
536 : /// Note that this is in the critical path of a compute cold start. Keep this fast.
537 : /// Try to do things concurrently, to hide the latencies.
538 0 : fn start_compute(self: &Arc<Self>, pg_handle: &mut Option<PostgresHandle>) -> Result<()> {
539 0 : let compute_state: ComputeState;
540 0 :
541 0 : let start_compute_span;
542 0 : let _this_entered;
543 0 : {
544 0 : let mut state_guard = self.state.lock().unwrap();
545 :
546 : // Create a tracing span for the startup operation.
547 : //
548 : // We could otherwise just annotate the function with #[instrument], but if
549 : // we're being configured from a /configure HTTP request, we want the
550 : // startup to be considered part of the /configure request.
551 : //
552 : // Similarly, if a trace ID was passed in env variables, attach it to the span.
553 0 : start_compute_span = {
554 : // Temporarily enter the parent span, so that the new span becomes its child.
555 0 : if let Some(p) = state_guard.startup_span.take() {
556 0 : let _parent_entered = p.entered();
557 0 : tracing::info_span!("start_compute")
558 0 : } else if let Some(otel_context) = startup_context_from_env() {
559 : use tracing_opentelemetry::OpenTelemetrySpanExt;
560 0 : let span = tracing::info_span!("start_compute");
561 0 : span.set_parent(otel_context);
562 0 : span
563 : } else {
564 0 : tracing::info_span!("start_compute")
565 : }
566 : };
567 0 : _this_entered = start_compute_span.enter();
568 0 :
569 0 : state_guard.set_status(ComputeStatus::Init, &self.state_changed);
570 0 : compute_state = state_guard.clone()
571 0 : }
572 0 :
573 0 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
574 0 : info!(
575 0 : "starting compute for project {}, operation {}, tenant {}, timeline {}, project {}, branch {}, endpoint {}, features {:?}, spec.remote_extensions {:?}",
576 0 : pspec.spec.cluster.cluster_id.as_deref().unwrap_or("None"),
577 0 : pspec.spec.operation_uuid.as_deref().unwrap_or("None"),
578 0 : pspec.tenant_id,
579 0 : pspec.timeline_id,
580 0 : pspec.spec.project_id.as_deref().unwrap_or("None"),
581 0 : pspec.spec.branch_id.as_deref().unwrap_or("None"),
582 0 : pspec.spec.endpoint_id.as_deref().unwrap_or("None"),
583 : pspec.spec.features,
584 : pspec.spec.remote_extensions,
585 : );
586 :
587 : ////// PRE-STARTUP PHASE: things that need to be finished before we start the Postgres process
588 :
589 : // Collect all the tasks that must finish here
590 0 : let mut pre_tasks = tokio::task::JoinSet::new();
591 0 :
592 0 : // Make sure TLS certificates are properly loaded and in the right place.
593 0 : if self.compute_ctl_config.tls.is_some() {
594 0 : let this = self.clone();
595 0 : pre_tasks.spawn(async move {
596 0 : this.watch_cert_for_changes().await;
597 :
598 0 : Ok::<(), anyhow::Error>(())
599 0 : });
600 0 : }
601 :
602 : // If there are any remote extensions in shared_preload_libraries, start downloading them
603 0 : if pspec.spec.remote_extensions.is_some() {
604 0 : let (this, spec) = (self.clone(), pspec.spec.clone());
605 0 : pre_tasks.spawn(async move {
606 0 : this.download_preload_extensions(&spec)
607 0 : .in_current_span()
608 0 : .await
609 0 : });
610 0 : }
611 :
612 : // Prepare pgdata directory. This downloads the basebackup, among other things.
613 0 : {
614 0 : let (this, cs) = (self.clone(), compute_state.clone());
615 0 : pre_tasks.spawn_blocking_child(move || this.prepare_pgdata(&cs));
616 0 : }
617 :
618 : // Resize swap to the desired size if the compute spec says so
619 0 : if let (Some(size_bytes), true) =
620 0 : (pspec.spec.swap_size_bytes, self.params.resize_swap_on_bind)
621 0 : {
622 0 : pre_tasks.spawn_blocking_child(move || {
623 0 : // To avoid 'swapoff' hitting postgres startup, we need to run resize-swap to completion
624 0 : // *before* starting postgres.
625 0 : //
626 0 : // In theory, we could do this asynchronously if SkipSwapon was enabled for VMs, but this
627 0 : // carries a risk of introducing hard-to-debug issues - e.g. if postgres sometimes gets
628 0 : // OOM-killed during startup because swap wasn't available yet.
629 0 : resize_swap(size_bytes).context("failed to resize swap")?;
630 0 : let size_mib = size_bytes as f32 / (1 << 20) as f32; // just for more coherent display.
631 0 : info!(%size_bytes, %size_mib, "resized swap");
632 :
633 0 : Ok::<(), anyhow::Error>(())
634 0 : });
635 0 : }
636 :
637 : // Set disk quota if the compute spec says so
638 0 : if let (Some(disk_quota_bytes), Some(disk_quota_fs_mountpoint)) = (
639 0 : pspec.spec.disk_quota_bytes,
640 0 : self.params.set_disk_quota_for_fs.as_ref(),
641 0 : ) {
642 0 : let disk_quota_fs_mountpoint = disk_quota_fs_mountpoint.clone();
643 0 : pre_tasks.spawn_blocking_child(move || {
644 0 : set_disk_quota(disk_quota_bytes, &disk_quota_fs_mountpoint)
645 0 : .context("failed to set disk quota")?;
646 0 : let size_mib = disk_quota_bytes as f32 / (1 << 20) as f32; // just for more coherent display.
647 0 : info!(%disk_quota_bytes, %size_mib, "set disk quota");
648 :
649 0 : Ok::<(), anyhow::Error>(())
650 0 : });
651 0 : }
652 :
653 : // tune pgbouncer
654 0 : if let Some(pgbouncer_settings) = &pspec.spec.pgbouncer_settings {
655 0 : info!("tuning pgbouncer");
656 :
657 0 : let pgbouncer_settings = pgbouncer_settings.clone();
658 0 : let tls_config = self.compute_ctl_config.tls.clone();
659 0 :
660 0 : // Spawn a background task to do the tuning,
661 0 : // so that we don't block the main thread that starts Postgres.
662 0 : let _handle = tokio::spawn(async move {
663 0 : let res = tune_pgbouncer(pgbouncer_settings, tls_config).await;
664 0 : if let Err(err) = res {
665 0 : error!("error while tuning pgbouncer: {err:?}");
666 : // Continue with the startup anyway
667 0 : }
668 0 : });
669 0 : }
670 :
671 : // configure local_proxy
672 0 : if let Some(local_proxy) = &pspec.spec.local_proxy_config {
673 0 : info!("configuring local_proxy");
674 :
675 : // Spawn a background task to do the configuration,
676 : // so that we don't block the main thread that starts Postgres.
677 0 : let local_proxy = local_proxy.clone();
678 0 : let _handle = tokio::spawn(async move {
679 0 : if let Err(err) = local_proxy::configure(&local_proxy) {
680 0 : error!("error while configuring local_proxy: {err:?}");
681 : // Continue with the startup anyway
682 0 : }
683 0 : });
684 0 : }
685 :
686 : // Configure and start rsyslog for compliance audit logging
687 0 : match pspec.spec.audit_log_level {
688 : ComputeAudit::Hipaa | ComputeAudit::Extended | ComputeAudit::Full => {
689 0 : let remote_endpoint =
690 0 : std::env::var("AUDIT_LOGGING_ENDPOINT").unwrap_or("".to_string());
691 0 : if remote_endpoint.is_empty() {
692 0 : anyhow::bail!("AUDIT_LOGGING_ENDPOINT is empty");
693 0 : }
694 0 :
695 0 : let log_directory_path = Path::new(&self.params.pgdata).join("log");
696 0 : let log_directory_path = log_directory_path.to_string_lossy().to_string();
697 :
698 : // Add project_id,endpoint_id tag to identify the logs.
699 : //
700 : // These ids are passed from cplane,
701 : // for backwards compatibility (old computes that don't have them),
702 : // we set them to None.
703 : // TODO: Clean up this code when all computes have them.
704 0 : let tag: Option<String> = match (
705 0 : pspec.spec.project_id.as_deref(),
706 0 : pspec.spec.endpoint_id.as_deref(),
707 : ) {
708 0 : (Some(project_id), Some(endpoint_id)) => {
709 0 : Some(format!("{project_id}/{endpoint_id}"))
710 : }
711 0 : (Some(project_id), None) => Some(format!("{project_id}/None")),
712 0 : (None, Some(endpoint_id)) => Some(format!("None,{endpoint_id}")),
713 0 : (None, None) => None,
714 : };
715 :
716 0 : configure_audit_rsyslog(log_directory_path.clone(), tag, &remote_endpoint)?;
717 :
718 : // Launch a background task to clean up the audit logs
719 0 : launch_pgaudit_gc(log_directory_path);
720 : }
721 0 : _ => {}
722 : }
723 :
724 : // Configure and start rsyslog for Postgres logs export
725 0 : let conf = PostgresLogsRsyslogConfig::new(pspec.spec.logs_export_host.as_deref());
726 0 : configure_postgres_logs_export(conf)?;
727 :
728 : // Launch remaining service threads
729 0 : let _monitor_handle = launch_monitor(self);
730 0 : let _configurator_handle = launch_configurator(self);
731 0 :
732 0 : // Wait for all the pre-tasks to finish before starting postgres
733 0 : let rt = tokio::runtime::Handle::current();
734 0 : while let Some(res) = rt.block_on(pre_tasks.join_next()) {
735 0 : res??;
736 : }
737 :
738 : ////// START POSTGRES
739 0 : let start_time = Utc::now();
740 0 : let pg_process = self.start_postgres(pspec.storage_auth_token.clone())?;
741 0 : let postmaster_pid = pg_process.pid();
742 0 : *pg_handle = Some(pg_process);
743 0 :
744 0 : // If this is a primary endpoint, perform some post-startup configuration before
745 0 : // opening it up for the world.
746 0 : let config_time = Utc::now();
747 0 : if pspec.spec.mode == ComputeMode::Primary {
748 0 : self.configure_as_primary(&compute_state)?;
749 :
750 0 : let conf = self.get_tokio_conn_conf(None);
751 0 : tokio::task::spawn(async {
752 0 : let res = get_installed_extensions(conf).await;
753 0 : match res {
754 0 : Ok(extensions) => {
755 0 : info!(
756 0 : "[NEON_EXT_STAT] {}",
757 0 : serde_json::to_string(&extensions)
758 0 : .expect("failed to serialize extensions list")
759 : );
760 : }
761 0 : Err(err) => error!("could not get installed extensions: {err:?}"),
762 : }
763 0 : });
764 0 : }
765 :
766 : // All done!
767 0 : let startup_end_time = Utc::now();
768 0 : let metrics = {
769 0 : let mut state = self.state.lock().unwrap();
770 0 : state.metrics.start_postgres_ms = config_time
771 0 : .signed_duration_since(start_time)
772 0 : .to_std()
773 0 : .unwrap()
774 0 : .as_millis() as u64;
775 0 : state.metrics.config_ms = startup_end_time
776 0 : .signed_duration_since(config_time)
777 0 : .to_std()
778 0 : .unwrap()
779 0 : .as_millis() as u64;
780 0 : state.metrics.total_startup_ms = startup_end_time
781 0 : .signed_duration_since(compute_state.start_time)
782 0 : .to_std()
783 0 : .unwrap()
784 0 : .as_millis() as u64;
785 0 : state.metrics.clone()
786 0 : };
787 0 : self.set_status(ComputeStatus::Running);
788 0 :
789 0 : // Log metrics so that we can search for slow operations in logs
790 0 : info!(?metrics, postmaster_pid = %postmaster_pid, "compute start finished");
791 :
792 0 : if pspec.spec.prewarm_lfc_on_startup {
793 0 : self.prewarm_lfc();
794 0 : }
795 0 : Ok(())
796 0 : }
797 :
798 : #[instrument(skip_all)]
799 : async fn download_preload_extensions(&self, spec: &ComputeSpec) -> Result<()> {
800 : let remote_extensions = if let Some(remote_extensions) = &spec.remote_extensions {
801 : remote_extensions
802 : } else {
803 : return Ok(());
804 : };
805 :
806 : // First, create control files for all available extensions
807 : extension_server::create_control_files(remote_extensions, &self.params.pgbin);
808 :
809 : let library_load_start_time = Utc::now();
810 : let remote_ext_metrics = self.prepare_preload_libraries(spec).await?;
811 :
812 : let library_load_time = Utc::now()
813 : .signed_duration_since(library_load_start_time)
814 : .to_std()
815 : .unwrap()
816 : .as_millis() as u64;
817 : let mut state = self.state.lock().unwrap();
818 : state.metrics.load_ext_ms = library_load_time;
819 : state.metrics.num_ext_downloaded = remote_ext_metrics.num_ext_downloaded;
820 : state.metrics.largest_ext_size = remote_ext_metrics.largest_ext_size;
821 : state.metrics.total_ext_download_size = remote_ext_metrics.total_ext_download_size;
822 : info!(
823 : "Loading shared_preload_libraries took {:?}ms",
824 : library_load_time
825 : );
826 : info!("{:?}", remote_ext_metrics);
827 :
828 : Ok(())
829 : }
830 :
831 : /// Start the vm-monitor if directed to. The vm-monitor only runs on linux
832 : /// because it requires cgroups.
833 0 : fn start_vm_monitor(&self, disable_lfc_resizing: bool) -> StartVmMonitorResult {
834 : cfg_if::cfg_if! {
835 : if #[cfg(target_os = "linux")] {
836 : use std::env;
837 : use tokio_util::sync::CancellationToken;
838 :
839 : // This token is used internally by the monitor to clean up all threads
840 0 : let token = CancellationToken::new();
841 :
842 : // don't pass postgres connection string to vm-monitor if we don't want it to resize LFC
843 0 : let pgconnstr = if disable_lfc_resizing {
844 0 : None
845 : } else {
846 0 : Some(self.params.filecache_connstr.clone())
847 : };
848 :
849 0 : let vm_monitor = if env::var_os("AUTOSCALING").is_some() {
850 0 : let vm_monitor = tokio::spawn(vm_monitor::start(
851 0 : Box::leak(Box::new(vm_monitor::Args {
852 0 : cgroup: Some(self.params.cgroup.clone()),
853 0 : pgconnstr,
854 0 : addr: self.params.vm_monitor_addr.clone(),
855 0 : })),
856 0 : token.clone(),
857 0 : ));
858 0 : Some(vm_monitor)
859 : } else {
860 0 : None
861 : };
862 0 : StartVmMonitorResult { token, vm_monitor }
863 0 : } else {
864 0 : _ = disable_lfc_resizing; // appease unused lint on macOS
865 0 : StartVmMonitorResult { }
866 0 : }
867 0 : }
868 0 : }
869 :
870 0 : fn cleanup_after_postgres_exit(&self) -> Result<bool> {
871 0 : // Maybe sync safekeepers again, to speed up next startup
872 0 : let compute_state = self.state.lock().unwrap().clone();
873 0 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
874 0 : if matches!(pspec.spec.mode, compute_api::spec::ComputeMode::Primary) {
875 0 : info!("syncing safekeepers on shutdown");
876 0 : let storage_auth_token = pspec.storage_auth_token.clone();
877 0 : let lsn = self.sync_safekeepers(storage_auth_token)?;
878 0 : info!("synced safekeepers at lsn {lsn}");
879 0 : }
880 :
881 0 : let mut delay_exit = false;
882 0 : let mut state = self.state.lock().unwrap();
883 0 : if state.status == ComputeStatus::TerminationPending {
884 0 : state.status = ComputeStatus::Terminated;
885 0 : self.state_changed.notify_all();
886 0 : // we were asked to terminate gracefully, don't exit to avoid restart
887 0 : delay_exit = true
888 0 : }
889 0 : drop(state);
890 :
891 0 : if let Err(err) = self.check_for_core_dumps() {
892 0 : error!("error while checking for core dumps: {err:?}");
893 0 : }
894 :
895 0 : Ok(delay_exit)
896 0 : }
897 :
898 : /// Check that compute node has corresponding feature enabled.
899 0 : pub fn has_feature(&self, feature: ComputeFeature) -> bool {
900 0 : let state = self.state.lock().unwrap();
901 :
902 0 : if let Some(s) = state.pspec.as_ref() {
903 0 : s.spec.features.contains(&feature)
904 : } else {
905 0 : false
906 : }
907 0 : }
908 :
909 0 : pub fn set_status(&self, status: ComputeStatus) {
910 0 : let mut state = self.state.lock().unwrap();
911 0 : state.set_status(status, &self.state_changed);
912 0 : }
913 :
914 0 : pub fn set_failed_status(&self, err: anyhow::Error) {
915 0 : let mut state = self.state.lock().unwrap();
916 0 : state.set_failed_status(err, &self.state_changed);
917 0 : }
918 :
919 0 : pub fn get_status(&self) -> ComputeStatus {
920 0 : self.state.lock().unwrap().status
921 0 : }
922 :
923 0 : pub fn get_timeline_id(&self) -> Option<TimelineId> {
924 0 : self.state
925 0 : .lock()
926 0 : .unwrap()
927 0 : .pspec
928 0 : .as_ref()
929 0 : .map(|s| s.timeline_id)
930 0 : }
931 :
932 : // Remove `pgdata` directory and create it again with right permissions.
933 0 : fn create_pgdata(&self) -> Result<()> {
934 0 : // Ignore removal error, likely it is a 'No such file or directory (os error 2)'.
935 0 : // If it is something different then create_dir() will error out anyway.
936 0 : let pgdata = &self.params.pgdata;
937 0 : let _ok = fs::remove_dir_all(pgdata);
938 0 : fs::create_dir(pgdata)?;
939 0 : fs::set_permissions(pgdata, fs::Permissions::from_mode(0o700))?;
940 :
941 0 : Ok(())
942 0 : }
943 :
944 : // Get basebackup from the libpq connection to pageserver using `connstr` and
945 : // unarchive it to `pgdata` directory overriding all its previous content.
946 : #[instrument(skip_all, fields(%lsn))]
947 : fn try_get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> {
948 : let spec = compute_state.pspec.as_ref().expect("spec must be set");
949 : let start_time = Instant::now();
950 :
951 : let shard0_connstr = spec.pageserver_connstr.split(',').next().unwrap();
952 : let mut config = postgres::Config::from_str(shard0_connstr)?;
953 :
954 : // Use the storage auth token from the config file, if given.
955 : // Note: this overrides any password set in the connection string.
956 : if let Some(storage_auth_token) = &spec.storage_auth_token {
957 : info!("Got storage auth token from spec file");
958 : config.password(storage_auth_token);
959 : } else {
960 : info!("Storage auth token not set");
961 : }
962 :
963 : config.application_name("compute_ctl");
964 : if let Some(spec) = &compute_state.pspec {
965 : config.options(&format!(
966 : "-c neon.compute_mode={}",
967 : spec.spec.mode.to_type_str()
968 : ));
969 : }
970 :
971 : // Connect to pageserver
972 : let mut client = config.connect(NoTls)?;
973 : let pageserver_connect_micros = start_time.elapsed().as_micros() as u64;
974 :
975 : let basebackup_cmd = match lsn {
976 : Lsn(0) => {
977 : if spec.spec.mode != ComputeMode::Primary {
978 : format!(
979 : "basebackup {} {} --gzip --replica",
980 : spec.tenant_id, spec.timeline_id
981 : )
982 : } else {
983 : format!("basebackup {} {} --gzip", spec.tenant_id, spec.timeline_id)
984 : }
985 : }
986 : _ => {
987 : if spec.spec.mode != ComputeMode::Primary {
988 : format!(
989 : "basebackup {} {} {} --gzip --replica",
990 : spec.tenant_id, spec.timeline_id, lsn
991 : )
992 : } else {
993 : format!(
994 : "basebackup {} {} {} --gzip",
995 : spec.tenant_id, spec.timeline_id, lsn
996 : )
997 : }
998 : }
999 : };
1000 :
1001 : let copyreader = client.copy_out(basebackup_cmd.as_str())?;
1002 : let mut measured_reader = MeasuredReader::new(copyreader);
1003 : let mut bufreader = std::io::BufReader::new(&mut measured_reader);
1004 :
1005 : // Read the archive directly from the `CopyOutReader`
1006 : //
1007 : // Set `ignore_zeros` so that unpack() reads all the Copy data and
1008 : // doesn't stop at the end-of-archive marker. Otherwise, if the server
1009 : // sends an Error after finishing the tarball, we will not notice it.
1010 : let mut ar = tar::Archive::new(flate2::read::GzDecoder::new(&mut bufreader));
1011 : ar.set_ignore_zeros(true);
1012 : ar.unpack(&self.params.pgdata)?;
1013 :
1014 : // Report metrics
1015 : let mut state = self.state.lock().unwrap();
1016 : state.metrics.pageserver_connect_micros = pageserver_connect_micros;
1017 : state.metrics.basebackup_bytes = measured_reader.get_byte_count() as u64;
1018 : state.metrics.basebackup_ms = start_time.elapsed().as_millis() as u64;
1019 : Ok(())
1020 : }
1021 :
1022 : // Gets the basebackup in a retry loop
1023 : #[instrument(skip_all, fields(%lsn))]
1024 : pub fn get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> {
1025 : let mut retry_period_ms = 500.0;
1026 : let mut attempts = 0;
1027 : const DEFAULT_ATTEMPTS: u16 = 10;
1028 : #[cfg(feature = "testing")]
1029 : let max_attempts = if let Ok(v) = env::var("NEON_COMPUTE_TESTING_BASEBACKUP_RETRIES") {
1030 : u16::from_str(&v).unwrap()
1031 : } else {
1032 : DEFAULT_ATTEMPTS
1033 : };
1034 : #[cfg(not(feature = "testing"))]
1035 : let max_attempts = DEFAULT_ATTEMPTS;
1036 : loop {
1037 : let result = self.try_get_basebackup(compute_state, lsn);
1038 : match result {
1039 : Ok(_) => {
1040 : return result;
1041 : }
1042 : Err(ref e) if attempts < max_attempts => {
1043 : warn!(
1044 : "Failed to get basebackup: {} (attempt {}/{})",
1045 : e, attempts, max_attempts
1046 : );
1047 : std::thread::sleep(std::time::Duration::from_millis(retry_period_ms as u64));
1048 : retry_period_ms *= 1.5;
1049 : }
1050 : Err(_) => {
1051 : return result;
1052 : }
1053 : }
1054 : attempts += 1;
1055 : }
1056 : }
1057 :
1058 0 : pub async fn check_safekeepers_synced_async(
1059 0 : &self,
1060 0 : compute_state: &ComputeState,
1061 0 : ) -> Result<Option<Lsn>> {
1062 0 : // Construct a connection config for each safekeeper
1063 0 : let pspec: ParsedSpec = compute_state
1064 0 : .pspec
1065 0 : .as_ref()
1066 0 : .expect("spec must be set")
1067 0 : .clone();
1068 0 : let sk_connstrs: Vec<String> = pspec.safekeeper_connstrings.clone();
1069 0 : let sk_configs = sk_connstrs.into_iter().map(|connstr| {
1070 0 : // Format connstr
1071 0 : let id = connstr.clone();
1072 0 : let connstr = format!("postgresql://no_user@{}", connstr);
1073 0 : let options = format!(
1074 0 : "-c timeline_id={} tenant_id={}",
1075 0 : pspec.timeline_id, pspec.tenant_id
1076 0 : );
1077 0 :
1078 0 : // Construct client
1079 0 : let mut config = tokio_postgres::Config::from_str(&connstr).unwrap();
1080 0 : config.options(&options);
1081 0 : if let Some(storage_auth_token) = pspec.storage_auth_token.clone() {
1082 0 : config.password(storage_auth_token);
1083 0 : }
1084 :
1085 0 : (id, config)
1086 0 : });
1087 0 :
1088 0 : // Create task set to query all safekeepers
1089 0 : let mut tasks = FuturesUnordered::new();
1090 0 : let quorum = sk_configs.len() / 2 + 1;
1091 0 : for (id, config) in sk_configs {
1092 0 : let timeout = tokio::time::Duration::from_millis(100);
1093 0 : let task = tokio::time::timeout(timeout, ping_safekeeper(id, config));
1094 0 : tasks.push(tokio::spawn(task));
1095 0 : }
1096 :
1097 : // Get a quorum of responses or errors
1098 0 : let mut responses = Vec::new();
1099 0 : let mut join_errors = Vec::new();
1100 0 : let mut task_errors = Vec::new();
1101 0 : let mut timeout_errors = Vec::new();
1102 0 : while let Some(response) = tasks.next().await {
1103 0 : match response {
1104 0 : Ok(Ok(Ok(r))) => responses.push(r),
1105 0 : Ok(Ok(Err(e))) => task_errors.push(e),
1106 0 : Ok(Err(e)) => timeout_errors.push(e),
1107 0 : Err(e) => join_errors.push(e),
1108 : };
1109 0 : if responses.len() >= quorum {
1110 0 : break;
1111 0 : }
1112 0 : if join_errors.len() + task_errors.len() + timeout_errors.len() >= quorum {
1113 0 : break;
1114 0 : }
1115 : }
1116 :
1117 : // In case of error, log and fail the check, but don't crash.
1118 : // We're playing it safe because these errors could be transient
1119 : // and we don't yet retry. Also being careful here allows us to
1120 : // be backwards compatible with safekeepers that don't have the
1121 : // TIMELINE_STATUS API yet.
1122 0 : if responses.len() < quorum {
1123 0 : error!(
1124 0 : "failed sync safekeepers check {:?} {:?} {:?}",
1125 : join_errors, task_errors, timeout_errors
1126 : );
1127 0 : return Ok(None);
1128 0 : }
1129 0 :
1130 0 : Ok(check_if_synced(responses))
1131 0 : }
1132 :
1133 : // Fast path for sync_safekeepers. If they're already synced we get the lsn
1134 : // in one roundtrip. If not, we should do a full sync_safekeepers.
1135 : #[instrument(skip_all)]
1136 : pub fn check_safekeepers_synced(&self, compute_state: &ComputeState) -> Result<Option<Lsn>> {
1137 : let start_time = Utc::now();
1138 :
1139 : let rt = tokio::runtime::Handle::current();
1140 : let result = rt.block_on(self.check_safekeepers_synced_async(compute_state));
1141 :
1142 : // Record runtime
1143 : self.state.lock().unwrap().metrics.sync_sk_check_ms = Utc::now()
1144 : .signed_duration_since(start_time)
1145 : .to_std()
1146 : .unwrap()
1147 : .as_millis() as u64;
1148 : result
1149 : }
1150 :
1151 : // Run `postgres` in a special mode with `--sync-safekeepers` argument
1152 : // and return the reported LSN back to the caller.
1153 : #[instrument(skip_all)]
1154 : pub fn sync_safekeepers(&self, storage_auth_token: Option<String>) -> Result<Lsn> {
1155 : let start_time = Utc::now();
1156 :
1157 : let mut sync_handle = maybe_cgexec(&self.params.pgbin)
1158 : .args(["--sync-safekeepers"])
1159 : .env("PGDATA", &self.params.pgdata) // we cannot use -D in this mode
1160 : .envs(if let Some(storage_auth_token) = &storage_auth_token {
1161 : vec![("NEON_AUTH_TOKEN", storage_auth_token)]
1162 : } else {
1163 : vec![]
1164 : })
1165 : .stdout(Stdio::piped())
1166 : .stderr(Stdio::piped())
1167 : .spawn()
1168 : .expect("postgres --sync-safekeepers failed to start");
1169 : SYNC_SAFEKEEPERS_PID.store(sync_handle.id(), Ordering::SeqCst);
1170 :
1171 : // `postgres --sync-safekeepers` will print all log output to stderr and
1172 : // final LSN to stdout. So we leave stdout to collect LSN, while stderr logs
1173 : // will be collected in a child thread.
1174 : let stderr = sync_handle
1175 : .stderr
1176 : .take()
1177 : .expect("stderr should be captured");
1178 : let logs_handle = handle_postgres_logs(stderr);
1179 :
1180 : let sync_output = sync_handle
1181 : .wait_with_output()
1182 : .expect("postgres --sync-safekeepers failed");
1183 : SYNC_SAFEKEEPERS_PID.store(0, Ordering::SeqCst);
1184 :
1185 : // Process has exited, so we can join the logs thread.
1186 : let _ = tokio::runtime::Handle::current()
1187 : .block_on(logs_handle)
1188 0 : .map_err(|e| tracing::error!("log task panicked: {:?}", e));
1189 :
1190 : if !sync_output.status.success() {
1191 : anyhow::bail!(
1192 : "postgres --sync-safekeepers exited with non-zero status: {}. stdout: {}",
1193 : sync_output.status,
1194 : String::from_utf8(sync_output.stdout)
1195 : .expect("postgres --sync-safekeepers exited, and stdout is not utf-8"),
1196 : );
1197 : }
1198 :
1199 : self.state.lock().unwrap().metrics.sync_safekeepers_ms = Utc::now()
1200 : .signed_duration_since(start_time)
1201 : .to_std()
1202 : .unwrap()
1203 : .as_millis() as u64;
1204 :
1205 : let lsn = Lsn::from_str(String::from_utf8(sync_output.stdout)?.trim())?;
1206 :
1207 : Ok(lsn)
1208 : }
1209 :
1210 : /// Do all the preparations like PGDATA directory creation, configuration,
1211 : /// safekeepers sync, basebackup, etc.
1212 : #[instrument(skip_all)]
1213 : pub fn prepare_pgdata(&self, compute_state: &ComputeState) -> Result<()> {
1214 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
1215 : let spec = &pspec.spec;
1216 : let pgdata_path = Path::new(&self.params.pgdata);
1217 :
1218 : // Remove/create an empty pgdata directory and put configuration there.
1219 : self.create_pgdata()?;
1220 : config::write_postgres_conf(
1221 : pgdata_path,
1222 : &pspec.spec,
1223 : self.params.internal_http_port,
1224 : &self.compute_ctl_config.tls,
1225 : )?;
1226 :
1227 : // Syncing safekeepers is only safe with primary nodes: if a primary
1228 : // is already connected it will be kicked out, so a secondary (standby)
1229 : // cannot sync safekeepers.
1230 : let lsn = match spec.mode {
1231 : ComputeMode::Primary => {
1232 : info!("checking if safekeepers are synced");
1233 : let lsn = if let Ok(Some(lsn)) = self.check_safekeepers_synced(compute_state) {
1234 : lsn
1235 : } else {
1236 : info!("starting safekeepers syncing");
1237 : self.sync_safekeepers(pspec.storage_auth_token.clone())
1238 0 : .with_context(|| "failed to sync safekeepers")?
1239 : };
1240 : info!("safekeepers synced at LSN {}", lsn);
1241 : lsn
1242 : }
1243 : ComputeMode::Static(lsn) => {
1244 : info!("Starting read-only node at static LSN {}", lsn);
1245 : lsn
1246 : }
1247 : ComputeMode::Replica => {
1248 : info!("Initializing standby from latest Pageserver LSN");
1249 : Lsn(0)
1250 : }
1251 : };
1252 :
1253 : info!(
1254 : "getting basebackup@{} from pageserver {}",
1255 : lsn, &pspec.pageserver_connstr
1256 : );
1257 0 : self.get_basebackup(compute_state, lsn).with_context(|| {
1258 0 : format!(
1259 0 : "failed to get basebackup@{} from pageserver {}",
1260 0 : lsn, &pspec.pageserver_connstr
1261 0 : )
1262 0 : })?;
1263 :
1264 : // Update pg_hba.conf received with basebackup.
1265 : update_pg_hba(pgdata_path)?;
1266 :
1267 : // Place pg_dynshmem under /dev/shm. This allows us to use
1268 : // 'dynamic_shared_memory_type = mmap' so that the files are placed in
1269 : // /dev/shm, similar to how 'dynamic_shared_memory_type = posix' works.
1270 : //
1271 : // Why on earth don't we just stick to the 'posix' default, you might
1272 : // ask. It turns out that making large allocations with 'posix' doesn't
1273 : // work very well with autoscaling. The behavior we want is that:
1274 : //
1275 : // 1. You can make large DSM allocations, larger than the current RAM
1276 : // size of the VM, without errors
1277 : //
1278 : // 2. If the allocated memory is really used, the VM is scaled up
1279 : // automatically to accommodate that
1280 : //
1281 : // We try to make that possible by having swap in the VM. But with the
1282 : // default 'posix' DSM implementation, we fail step 1, even when there's
1283 : // plenty of swap available. PostgreSQL uses posix_fallocate() to create
1284 : // the shmem segment, which is really just a file in /dev/shm in Linux,
1285 : // but posix_fallocate() on tmpfs returns ENOMEM if the size is larger
1286 : // than available RAM.
1287 : //
1288 : // Using 'dynamic_shared_memory_type = mmap' works around that, because
1289 : // the Postgres 'mmap' DSM implementation doesn't use
1290 : // posix_fallocate(). Instead, it uses repeated calls to write(2) to
1291 : // fill the file with zeros. It's weird that that differs between
1292 : // 'posix' and 'mmap', but we take advantage of it. When the file is
1293 : // filled slowly with write(2), the kernel allows it to grow larger, as
1294 : // long as there's swap available.
1295 : //
1296 : // In short, using 'dynamic_shared_memory_type = mmap' allows us one DSM
1297 : // segment to be larger than currently available RAM. But because we
1298 : // don't want to store it on a real file, which the kernel would try to
1299 : // flush to disk, so symlink pg_dynshm to /dev/shm.
1300 : //
1301 : // We don't set 'dynamic_shared_memory_type = mmap' here, we let the
1302 : // control plane control that option. If 'mmap' is not used, this
1303 : // symlink doesn't affect anything.
1304 : //
1305 : // See https://github.com/neondatabase/autoscaling/issues/800
1306 : std::fs::remove_dir(pgdata_path.join("pg_dynshmem"))?;
1307 : symlink("/dev/shm/", pgdata_path.join("pg_dynshmem"))?;
1308 :
1309 : match spec.mode {
1310 : ComputeMode::Primary => {}
1311 : ComputeMode::Replica | ComputeMode::Static(..) => {
1312 : add_standby_signal(pgdata_path)?;
1313 : }
1314 : }
1315 :
1316 : Ok(())
1317 : }
1318 :
1319 : /// Start and stop a postgres process to warm up the VM for startup.
1320 0 : pub fn prewarm_postgres(&self) -> Result<()> {
1321 0 : info!("prewarming");
1322 :
1323 : // Create pgdata
1324 0 : let pgdata = &format!("{}.warmup", self.params.pgdata);
1325 0 : create_pgdata(pgdata)?;
1326 :
1327 : // Run initdb to completion
1328 0 : info!("running initdb");
1329 0 : let initdb_bin = Path::new(&self.params.pgbin)
1330 0 : .parent()
1331 0 : .unwrap()
1332 0 : .join("initdb");
1333 0 : Command::new(initdb_bin)
1334 0 : .args(["--pgdata", pgdata])
1335 0 : .output()
1336 0 : .expect("cannot start initdb process");
1337 :
1338 : // Write conf
1339 : use std::io::Write;
1340 0 : let conf_path = Path::new(pgdata).join("postgresql.conf");
1341 0 : let mut file = std::fs::File::create(conf_path)?;
1342 0 : writeln!(file, "shared_buffers=65536")?;
1343 0 : writeln!(file, "port=51055")?; // Nobody should be connecting
1344 0 : writeln!(file, "shared_preload_libraries = 'neon'")?;
1345 :
1346 : // Start postgres
1347 0 : info!("starting postgres");
1348 0 : let mut pg = maybe_cgexec(&self.params.pgbin)
1349 0 : .args(["-D", pgdata])
1350 0 : .spawn()
1351 0 : .expect("cannot start postgres process");
1352 0 :
1353 0 : // Stop it when it's ready
1354 0 : info!("waiting for postgres");
1355 0 : wait_for_postgres(&mut pg, Path::new(pgdata))?;
1356 : // SIGQUIT orders postgres to exit immediately. We don't want to SIGKILL
1357 : // it to avoid orphaned processes prowling around while datadir is
1358 : // wiped.
1359 0 : let pm_pid = Pid::from_raw(pg.id() as i32);
1360 0 : kill(pm_pid, Signal::SIGQUIT)?;
1361 0 : info!("sent SIGQUIT signal");
1362 0 : pg.wait()?;
1363 0 : info!("done prewarming");
1364 :
1365 : // clean up
1366 0 : let _ok = fs::remove_dir_all(pgdata);
1367 0 : Ok(())
1368 0 : }
1369 :
1370 : /// Start Postgres as a child process and wait for it to start accepting
1371 : /// connections.
1372 : ///
1373 : /// Returns a handle to the child process and a handle to the logs thread.
1374 : #[instrument(skip_all)]
1375 : pub fn start_postgres(&self, storage_auth_token: Option<String>) -> Result<PostgresHandle> {
1376 : let pgdata_path = Path::new(&self.params.pgdata);
1377 :
1378 : // Run postgres as a child process.
1379 : let mut pg = maybe_cgexec(&self.params.pgbin)
1380 : .args(["-D", &self.params.pgdata])
1381 : .envs(if let Some(storage_auth_token) = &storage_auth_token {
1382 : vec![("NEON_AUTH_TOKEN", storage_auth_token)]
1383 : } else {
1384 : vec![]
1385 : })
1386 : .stderr(Stdio::piped())
1387 : .spawn()
1388 : .expect("cannot start postgres process");
1389 : PG_PID.store(pg.id(), Ordering::SeqCst);
1390 :
1391 : // Start a task to collect logs from stderr.
1392 : let stderr = pg.stderr.take().expect("stderr should be captured");
1393 : let logs_handle = handle_postgres_logs(stderr);
1394 :
1395 : wait_for_postgres(&mut pg, pgdata_path)?;
1396 :
1397 : Ok(PostgresHandle {
1398 : postgres: pg,
1399 : log_collector: logs_handle,
1400 : })
1401 : }
1402 :
1403 : /// Wait for the child Postgres process forever. In this state Ctrl+C will
1404 : /// propagate to Postgres and it will be shut down as well.
1405 0 : fn wait_postgres(&self, mut pg_handle: PostgresHandle) -> std::process::ExitStatus {
1406 0 : info!(postmaster_pid = %pg_handle.postgres.id(), "Waiting for Postgres to exit");
1407 :
1408 0 : let ecode = pg_handle
1409 0 : .postgres
1410 0 : .wait()
1411 0 : .expect("failed to start waiting on Postgres process");
1412 0 : PG_PID.store(0, Ordering::SeqCst);
1413 0 :
1414 0 : // Process has exited. Wait for the log collecting task to finish.
1415 0 : let _ = tokio::runtime::Handle::current()
1416 0 : .block_on(pg_handle.log_collector)
1417 0 : .map_err(|e| tracing::error!("log task panicked: {:?}", e));
1418 0 :
1419 0 : ecode
1420 0 : }
1421 :
1422 : /// Do post configuration of the already started Postgres. This function spawns a background task to
1423 : /// configure the database after applying the compute spec. Currently, it upgrades the neon extension
1424 : /// version. In the future, it may upgrade all 3rd-party extensions.
1425 : #[instrument(skip_all)]
1426 : pub fn post_apply_config(&self) -> Result<()> {
1427 : let conf = self.get_tokio_conn_conf(Some("compute_ctl:post_apply_config"));
1428 0 : tokio::spawn(async move {
1429 0 : let res = async {
1430 0 : let (mut client, connection) = conf.connect(NoTls).await?;
1431 0 : tokio::spawn(async move {
1432 0 : if let Err(e) = connection.await {
1433 0 : eprintln!("connection error: {}", e);
1434 0 : }
1435 0 : });
1436 0 :
1437 0 : handle_neon_extension_upgrade(&mut client)
1438 0 : .await
1439 0 : .context("handle_neon_extension_upgrade")?;
1440 0 : Ok::<_, anyhow::Error>(())
1441 0 : }
1442 0 : .await;
1443 0 : if let Err(err) = res {
1444 0 : error!("error while post_apply_config: {err:#}");
1445 0 : }
1446 0 : });
1447 : Ok(())
1448 : }
1449 :
1450 0 : pub fn get_conn_conf(&self, application_name: Option<&str>) -> postgres::Config {
1451 0 : let mut conf = self.conn_conf.clone();
1452 0 : if let Some(application_name) = application_name {
1453 0 : conf.application_name(application_name);
1454 0 : }
1455 0 : conf
1456 0 : }
1457 :
1458 0 : pub fn get_tokio_conn_conf(&self, application_name: Option<&str>) -> tokio_postgres::Config {
1459 0 : let mut conf = self.tokio_conn_conf.clone();
1460 0 : if let Some(application_name) = application_name {
1461 0 : conf.application_name(application_name);
1462 0 : }
1463 0 : conf
1464 0 : }
1465 :
1466 0 : pub async fn get_maintenance_client(
1467 0 : conf: &tokio_postgres::Config,
1468 0 : ) -> Result<tokio_postgres::Client> {
1469 0 : let mut conf = conf.clone();
1470 0 : conf.application_name("compute_ctl:apply_config");
1471 :
1472 0 : let (client, conn) = match conf.connect(NoTls).await {
1473 : // If connection fails, it may be the old node with `zenith_admin` superuser.
1474 : //
1475 : // In this case we need to connect with old `zenith_admin` name
1476 : // and create new user. We cannot simply rename connected user,
1477 : // but we can create a new one and grant it all privileges.
1478 0 : Err(e) => match e.code() {
1479 : Some(&SqlState::INVALID_PASSWORD)
1480 : | Some(&SqlState::INVALID_AUTHORIZATION_SPECIFICATION) => {
1481 : // Connect with `zenith_admin` if `cloud_admin` could not authenticate
1482 0 : info!(
1483 0 : "cannot connect to Postgres: {}, retrying with 'zenith_admin' username",
1484 : e
1485 : );
1486 0 : let mut zenith_admin_conf = postgres::config::Config::from(conf.clone());
1487 0 : zenith_admin_conf.application_name("compute_ctl:apply_config");
1488 0 : zenith_admin_conf.user("zenith_admin");
1489 :
1490 : // It doesn't matter what were the options before, here we just want
1491 : // to connect and create a new superuser role.
1492 : const ZENITH_OPTIONS: &str = "-c role=zenith_admin -c default_transaction_read_only=off -c search_path=public -c statement_timeout=0";
1493 0 : zenith_admin_conf.options(ZENITH_OPTIONS);
1494 :
1495 0 : let mut client =
1496 0 : zenith_admin_conf.connect(NoTls)
1497 0 : .context("broken cloud_admin credential: tried connecting with cloud_admin but could not authenticate, and zenith_admin does not work either")?;
1498 :
1499 : // Disable forwarding so that users don't get a cloud_admin role
1500 0 : let mut func = || {
1501 0 : client.simple_query("SET neon.forward_ddl = false")?;
1502 0 : client.simple_query("CREATE USER cloud_admin WITH SUPERUSER")?;
1503 0 : client.simple_query("GRANT zenith_admin TO cloud_admin")?;
1504 0 : Ok::<_, anyhow::Error>(())
1505 0 : };
1506 0 : func().context("apply_config setup cloud_admin")?;
1507 :
1508 0 : drop(client);
1509 0 :
1510 0 : // Reconnect with connstring with expected name
1511 0 : conf.connect(NoTls).await?
1512 : }
1513 0 : _ => return Err(e.into()),
1514 : },
1515 0 : Ok((client, conn)) => (client, conn),
1516 : };
1517 :
1518 0 : spawn(async move {
1519 0 : if let Err(e) = conn.await {
1520 0 : error!("maintenance client connection error: {}", e);
1521 0 : }
1522 0 : });
1523 0 :
1524 0 : // Disable DDL forwarding because control plane already knows about the roles/databases
1525 0 : // we're about to modify.
1526 0 : client
1527 0 : .simple_query("SET neon.forward_ddl = false")
1528 0 : .await
1529 0 : .context("apply_config SET neon.forward_ddl = false")?;
1530 :
1531 0 : Ok(client)
1532 0 : }
1533 :
1534 : /// Do initial configuration of the already started Postgres.
1535 : #[instrument(skip_all)]
1536 : pub fn apply_config(&self, compute_state: &ComputeState) -> Result<()> {
1537 : let conf = self.get_tokio_conn_conf(Some("compute_ctl:apply_config"));
1538 :
1539 : let conf = Arc::new(conf);
1540 : let spec = Arc::new(
1541 : compute_state
1542 : .pspec
1543 : .as_ref()
1544 : .expect("spec must be set")
1545 : .spec
1546 : .clone(),
1547 : );
1548 :
1549 : let max_concurrent_connections = self.max_service_connections(compute_state, &spec);
1550 :
1551 : // Merge-apply spec & changes to PostgreSQL state.
1552 : self.apply_spec_sql(spec.clone(), conf.clone(), max_concurrent_connections)?;
1553 :
1554 : if let Some(local_proxy) = &spec.clone().local_proxy_config {
1555 : info!("configuring local_proxy");
1556 : local_proxy::configure(local_proxy).context("apply_config local_proxy")?;
1557 : }
1558 :
1559 : // Run migrations separately to not hold up cold starts
1560 0 : tokio::spawn(async move {
1561 0 : let mut conf = conf.as_ref().clone();
1562 0 : conf.application_name("compute_ctl:migrations");
1563 0 :
1564 0 : match conf.connect(NoTls).await {
1565 0 : Ok((mut client, connection)) => {
1566 0 : tokio::spawn(async move {
1567 0 : if let Err(e) = connection.await {
1568 0 : eprintln!("connection error: {}", e);
1569 0 : }
1570 0 : });
1571 0 : if let Err(e) = handle_migrations(&mut client).await {
1572 0 : error!("Failed to run migrations: {}", e);
1573 0 : }
1574 : }
1575 0 : Err(e) => {
1576 0 : error!(
1577 0 : "Failed to connect to the compute for running migrations: {}",
1578 : e
1579 : );
1580 : }
1581 : };
1582 0 : });
1583 :
1584 : Ok::<(), anyhow::Error>(())
1585 : }
1586 :
1587 : // Wrapped this around `pg_ctl reload`, but right now we don't use
1588 : // `pg_ctl` for start / stop.
1589 : #[instrument(skip_all)]
1590 : fn pg_reload_conf(&self) -> Result<()> {
1591 : let pgctl_bin = Path::new(&self.params.pgbin)
1592 : .parent()
1593 : .unwrap()
1594 : .join("pg_ctl");
1595 : Command::new(pgctl_bin)
1596 : .args(["reload", "-D", &self.params.pgdata])
1597 : .output()
1598 : .expect("cannot run pg_ctl process");
1599 : Ok(())
1600 : }
1601 :
1602 : /// Similar to `apply_config()`, but does a bit different sequence of operations,
1603 : /// as it's used to reconfigure a previously started and configured Postgres node.
1604 : #[instrument(skip_all)]
1605 : pub fn reconfigure(&self) -> Result<()> {
1606 : let spec = self.state.lock().unwrap().pspec.clone().unwrap().spec;
1607 :
1608 : if let Some(ref pgbouncer_settings) = spec.pgbouncer_settings {
1609 : info!("tuning pgbouncer");
1610 :
1611 : let pgbouncer_settings = pgbouncer_settings.clone();
1612 : let tls_config = self.compute_ctl_config.tls.clone();
1613 :
1614 : // Spawn a background task to do the tuning,
1615 : // so that we don't block the main thread that starts Postgres.
1616 0 : tokio::spawn(async move {
1617 0 : let res = tune_pgbouncer(pgbouncer_settings, tls_config).await;
1618 0 : if let Err(err) = res {
1619 0 : error!("error while tuning pgbouncer: {err:?}");
1620 0 : }
1621 0 : });
1622 : }
1623 :
1624 : if let Some(ref local_proxy) = spec.local_proxy_config {
1625 : info!("configuring local_proxy");
1626 :
1627 : // Spawn a background task to do the configuration,
1628 : // so that we don't block the main thread that starts Postgres.
1629 : let mut local_proxy = local_proxy.clone();
1630 : local_proxy.tls = self.compute_ctl_config.tls.clone();
1631 0 : tokio::spawn(async move {
1632 0 : if let Err(err) = local_proxy::configure(&local_proxy) {
1633 0 : error!("error while configuring local_proxy: {err:?}");
1634 0 : }
1635 0 : });
1636 : }
1637 :
1638 : // Reconfigure rsyslog for Postgres logs export
1639 : let conf = PostgresLogsRsyslogConfig::new(spec.logs_export_host.as_deref());
1640 : configure_postgres_logs_export(conf)?;
1641 :
1642 : // Write new config
1643 : let pgdata_path = Path::new(&self.params.pgdata);
1644 : config::write_postgres_conf(
1645 : pgdata_path,
1646 : &spec,
1647 : self.params.internal_http_port,
1648 : &self.compute_ctl_config.tls,
1649 : )?;
1650 :
1651 : if !spec.skip_pg_catalog_updates {
1652 : let max_concurrent_connections = spec.reconfigure_concurrency;
1653 : // Temporarily reset max_cluster_size in config
1654 : // to avoid the possibility of hitting the limit, while we are reconfiguring:
1655 : // creating new extensions, roles, etc.
1656 0 : config::with_compute_ctl_tmp_override(pgdata_path, "neon.max_cluster_size=-1", || {
1657 0 : self.pg_reload_conf()?;
1658 :
1659 0 : if spec.mode == ComputeMode::Primary {
1660 0 : let conf = self.get_tokio_conn_conf(Some("compute_ctl:reconfigure"));
1661 0 : let conf = Arc::new(conf);
1662 0 :
1663 0 : let spec = Arc::new(spec.clone());
1664 0 :
1665 0 : self.apply_spec_sql(spec, conf, max_concurrent_connections)?;
1666 0 : }
1667 :
1668 0 : Ok(())
1669 0 : })?;
1670 : }
1671 :
1672 : self.pg_reload_conf()?;
1673 :
1674 : let unknown_op = "unknown".to_string();
1675 : let op_id = spec.operation_uuid.as_ref().unwrap_or(&unknown_op);
1676 : info!(
1677 : "finished reconfiguration of compute node for operation {}",
1678 : op_id
1679 : );
1680 :
1681 : Ok(())
1682 : }
1683 :
1684 : #[instrument(skip_all)]
1685 : pub fn configure_as_primary(&self, compute_state: &ComputeState) -> Result<()> {
1686 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
1687 :
1688 : assert!(pspec.spec.mode == ComputeMode::Primary);
1689 : if !pspec.spec.skip_pg_catalog_updates {
1690 : let pgdata_path = Path::new(&self.params.pgdata);
1691 : // temporarily reset max_cluster_size in config
1692 : // to avoid the possibility of hitting the limit, while we are applying config:
1693 : // creating new extensions, roles, etc...
1694 0 : config::with_compute_ctl_tmp_override(pgdata_path, "neon.max_cluster_size=-1", || {
1695 0 : self.pg_reload_conf()?;
1696 :
1697 0 : self.apply_config(compute_state)?;
1698 :
1699 0 : Ok(())
1700 0 : })?;
1701 :
1702 : let postgresql_conf_path = pgdata_path.join("postgresql.conf");
1703 : if config::line_in_file(
1704 : &postgresql_conf_path,
1705 : "neon.disable_logical_replication_subscribers=false",
1706 : )? {
1707 : info!(
1708 : "updated postgresql.conf to set neon.disable_logical_replication_subscribers=false"
1709 : );
1710 : }
1711 : self.pg_reload_conf()?;
1712 : }
1713 : self.post_apply_config()?;
1714 :
1715 : Ok(())
1716 : }
1717 :
1718 0 : pub async fn watch_cert_for_changes(self: Arc<Self>) {
1719 : // update status on cert renewal
1720 0 : if let Some(tls_config) = &self.compute_ctl_config.tls {
1721 0 : let tls_config = tls_config.clone();
1722 :
1723 : // wait until the cert exists.
1724 0 : let mut cert_watch = watch_cert_for_changes(tls_config.cert_path.clone()).await;
1725 :
1726 0 : tokio::task::spawn_blocking(move || {
1727 0 : let handle = tokio::runtime::Handle::current();
1728 : 'cert_update: loop {
1729 : // let postgres/pgbouncer/local_proxy know the new cert/key exists.
1730 : // we need to wait until it's configurable first.
1731 :
1732 0 : let mut state = self.state.lock().unwrap();
1733 : 'status_update: loop {
1734 0 : match state.status {
1735 : // let's update the state to config pending
1736 : ComputeStatus::ConfigurationPending | ComputeStatus::Running => {
1737 0 : state.set_status(
1738 0 : ComputeStatus::ConfigurationPending,
1739 0 : &self.state_changed,
1740 0 : );
1741 0 : break 'status_update;
1742 : }
1743 :
1744 : // exit loop
1745 : ComputeStatus::Failed
1746 : | ComputeStatus::TerminationPending
1747 0 : | ComputeStatus::Terminated => break 'cert_update,
1748 :
1749 : // wait
1750 : ComputeStatus::Init
1751 : | ComputeStatus::Configuration
1752 0 : | ComputeStatus::Empty => {
1753 0 : state = self.state_changed.wait(state).unwrap();
1754 0 : }
1755 : }
1756 : }
1757 0 : drop(state);
1758 0 :
1759 0 : // wait for a new certificate update
1760 0 : if handle.block_on(cert_watch.changed()).is_err() {
1761 0 : break;
1762 0 : }
1763 : }
1764 0 : });
1765 0 : }
1766 0 : }
1767 :
1768 : /// Update the `last_active` in the shared state, but ensure that it's a more recent one.
1769 0 : pub fn update_last_active(&self, last_active: Option<DateTime<Utc>>) {
1770 0 : let mut state = self.state.lock().unwrap();
1771 0 : // NB: `Some(<DateTime>)` is always greater than `None`.
1772 0 : if last_active > state.last_active {
1773 0 : state.last_active = last_active;
1774 0 : debug!("set the last compute activity time to: {:?}", last_active);
1775 0 : }
1776 0 : }
1777 :
1778 : // Look for core dumps and collect backtraces.
1779 : //
1780 : // EKS worker nodes have following core dump settings:
1781 : // /proc/sys/kernel/core_pattern -> core
1782 : // /proc/sys/kernel/core_uses_pid -> 1
1783 : // ulimit -c -> unlimited
1784 : // which results in core dumps being written to postgres data directory as core.<pid>.
1785 : //
1786 : // Use that as a default location and pattern, except macos where core dumps are written
1787 : // to /cores/ directory by default.
1788 : //
1789 : // With default Linux settings, the core dump file is called just "core", so check for
1790 : // that too.
1791 0 : pub fn check_for_core_dumps(&self) -> Result<()> {
1792 0 : let core_dump_dir = match std::env::consts::OS {
1793 0 : "macos" => Path::new("/cores/"),
1794 0 : _ => Path::new(&self.params.pgdata),
1795 : };
1796 :
1797 : // Collect core dump paths if any
1798 0 : info!("checking for core dumps in {}", core_dump_dir.display());
1799 0 : let files = fs::read_dir(core_dump_dir)?;
1800 0 : let cores = files.filter_map(|entry| {
1801 0 : let entry = entry.ok()?;
1802 :
1803 0 : let is_core_dump = match entry.file_name().to_str()? {
1804 0 : n if n.starts_with("core.") => true,
1805 0 : "core" => true,
1806 0 : _ => false,
1807 : };
1808 0 : if is_core_dump {
1809 0 : Some(entry.path())
1810 : } else {
1811 0 : None
1812 : }
1813 0 : });
1814 :
1815 : // Print backtrace for each core dump
1816 0 : for core_path in cores {
1817 0 : warn!(
1818 0 : "core dump found: {}, collecting backtrace",
1819 0 : core_path.display()
1820 : );
1821 :
1822 : // Try first with gdb
1823 0 : let backtrace = Command::new("gdb")
1824 0 : .args(["--batch", "-q", "-ex", "bt", &self.params.pgbin])
1825 0 : .arg(&core_path)
1826 0 : .output();
1827 :
1828 : // Try lldb if no gdb is found -- that is handy for local testing on macOS
1829 0 : let backtrace = match backtrace {
1830 0 : Err(ref e) if e.kind() == std::io::ErrorKind::NotFound => {
1831 0 : warn!("cannot find gdb, trying lldb");
1832 0 : Command::new("lldb")
1833 0 : .arg("-c")
1834 0 : .arg(&core_path)
1835 0 : .args(["--batch", "-o", "bt all", "-o", "quit"])
1836 0 : .output()
1837 : }
1838 0 : _ => backtrace,
1839 0 : }?;
1840 :
1841 0 : warn!(
1842 0 : "core dump backtrace: {}",
1843 0 : String::from_utf8_lossy(&backtrace.stdout)
1844 : );
1845 0 : warn!(
1846 0 : "debugger stderr: {}",
1847 0 : String::from_utf8_lossy(&backtrace.stderr)
1848 : );
1849 : }
1850 :
1851 0 : Ok(())
1852 0 : }
1853 :
1854 : /// Select `pg_stat_statements` data and return it as a stringified JSON
1855 0 : pub async fn collect_insights(&self) -> String {
1856 0 : let mut result_rows: Vec<String> = Vec::new();
1857 0 : let conf = self.get_tokio_conn_conf(Some("compute_ctl:collect_insights"));
1858 0 : let connect_result = conf.connect(NoTls).await;
1859 0 : let (client, connection) = connect_result.unwrap();
1860 0 : tokio::spawn(async move {
1861 0 : if let Err(e) = connection.await {
1862 0 : eprintln!("connection error: {}", e);
1863 0 : }
1864 0 : });
1865 0 : let result = client
1866 0 : .simple_query(
1867 0 : "SELECT
1868 0 : row_to_json(pg_stat_statements)
1869 0 : FROM
1870 0 : pg_stat_statements
1871 0 : WHERE
1872 0 : userid != 'cloud_admin'::regrole::oid
1873 0 : ORDER BY
1874 0 : (mean_exec_time + mean_plan_time) DESC
1875 0 : LIMIT 100",
1876 0 : )
1877 0 : .await;
1878 :
1879 0 : if let Ok(raw_rows) = result {
1880 0 : for message in raw_rows.iter() {
1881 0 : if let postgres::SimpleQueryMessage::Row(row) = message {
1882 0 : if let Some(json) = row.get(0) {
1883 0 : result_rows.push(json.to_string());
1884 0 : }
1885 0 : }
1886 : }
1887 :
1888 0 : format!("{{\"pg_stat_statements\": [{}]}}", result_rows.join(","))
1889 : } else {
1890 0 : "{{\"pg_stat_statements\": []}}".to_string()
1891 : }
1892 0 : }
1893 :
1894 : // download an archive, unzip and place files in correct locations
1895 0 : pub async fn download_extension(
1896 0 : &self,
1897 0 : real_ext_name: String,
1898 0 : ext_path: RemotePath,
1899 0 : ) -> Result<u64, DownloadError> {
1900 0 : let remote_ext_base_url =
1901 0 : self.params
1902 0 : .remote_ext_base_url
1903 0 : .as_ref()
1904 0 : .ok_or(DownloadError::BadInput(anyhow::anyhow!(
1905 0 : "Remote extensions storage is not configured",
1906 0 : )))?;
1907 :
1908 0 : let ext_archive_name = ext_path.object_name().expect("bad path");
1909 0 :
1910 0 : let mut first_try = false;
1911 0 : if !self
1912 0 : .ext_download_progress
1913 0 : .read()
1914 0 : .expect("lock err")
1915 0 : .contains_key(ext_archive_name)
1916 0 : {
1917 0 : self.ext_download_progress
1918 0 : .write()
1919 0 : .expect("lock err")
1920 0 : .insert(ext_archive_name.to_string(), (Utc::now(), false));
1921 0 : first_try = true;
1922 0 : }
1923 0 : let (download_start, download_completed) =
1924 0 : self.ext_download_progress.read().expect("lock err")[ext_archive_name];
1925 0 : let start_time_delta = Utc::now()
1926 0 : .signed_duration_since(download_start)
1927 0 : .to_std()
1928 0 : .unwrap()
1929 0 : .as_millis() as u64;
1930 :
1931 : // how long to wait for extension download if it was started by another process
1932 : const HANG_TIMEOUT: u64 = 3000; // milliseconds
1933 :
1934 0 : if download_completed {
1935 0 : info!("extension already downloaded, skipping re-download");
1936 0 : return Ok(0);
1937 0 : } else if start_time_delta < HANG_TIMEOUT && !first_try {
1938 0 : info!(
1939 0 : "download {ext_archive_name} already started by another process, hanging untill completion or timeout"
1940 : );
1941 0 : let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(500));
1942 : loop {
1943 0 : info!("waiting for download");
1944 0 : interval.tick().await;
1945 0 : let (_, download_completed_now) =
1946 0 : self.ext_download_progress.read().expect("lock")[ext_archive_name];
1947 0 : if download_completed_now {
1948 0 : info!("download finished by whoever else downloaded it");
1949 0 : return Ok(0);
1950 0 : }
1951 : }
1952 : // NOTE: the above loop will get terminated
1953 : // based on the timeout of the download function
1954 0 : }
1955 0 :
1956 0 : // if extension hasn't been downloaded before or the previous
1957 0 : // attempt to download was at least HANG_TIMEOUT ms ago
1958 0 : // then we try to download it here
1959 0 : info!("downloading new extension {ext_archive_name}");
1960 :
1961 0 : let download_size = extension_server::download_extension(
1962 0 : &real_ext_name,
1963 0 : &ext_path,
1964 0 : remote_ext_base_url,
1965 0 : &self.params.pgbin,
1966 0 : )
1967 0 : .await
1968 0 : .map_err(DownloadError::Other);
1969 0 :
1970 0 : if download_size.is_ok() {
1971 0 : self.ext_download_progress
1972 0 : .write()
1973 0 : .expect("bad lock")
1974 0 : .insert(ext_archive_name.to_string(), (download_start, true));
1975 0 : }
1976 :
1977 0 : download_size
1978 0 : }
1979 :
1980 0 : pub async fn set_role_grants(
1981 0 : &self,
1982 0 : db_name: &PgIdent,
1983 0 : schema_name: &PgIdent,
1984 0 : privileges: &[Privilege],
1985 0 : role_name: &PgIdent,
1986 0 : ) -> Result<()> {
1987 : use tokio_postgres::NoTls;
1988 :
1989 0 : let mut conf = self.get_tokio_conn_conf(Some("compute_ctl:set_role_grants"));
1990 0 : conf.dbname(db_name);
1991 :
1992 0 : let (db_client, conn) = conf
1993 0 : .connect(NoTls)
1994 0 : .await
1995 0 : .context("Failed to connect to the database")?;
1996 0 : tokio::spawn(conn);
1997 0 :
1998 0 : // TODO: support other types of grants apart from schemas?
1999 0 :
2000 0 : // check the role grants first - to gracefully handle read-replicas.
2001 0 : let select = "SELECT privilege_type
2002 0 : FROM pg_namespace
2003 0 : JOIN LATERAL (SELECT * FROM aclexplode(nspacl) AS x) acl ON true
2004 0 : JOIN pg_user users ON acl.grantee = users.usesysid
2005 0 : WHERE users.usename = $1
2006 0 : AND nspname = $2";
2007 0 : let rows = db_client
2008 0 : .query(select, &[role_name, schema_name])
2009 0 : .await
2010 0 : .with_context(|| format!("Failed to execute query: {select}"))?;
2011 :
2012 0 : let already_granted: HashSet<String> = rows.into_iter().map(|row| row.get(0)).collect();
2013 0 :
2014 0 : let grants = privileges
2015 0 : .iter()
2016 0 : .filter(|p| !already_granted.contains(p.as_str()))
2017 0 : // should not be quoted as it's part of the command.
2018 0 : // is already sanitized so it's ok
2019 0 : .map(|p| p.as_str())
2020 0 : .join(", ");
2021 0 :
2022 0 : if !grants.is_empty() {
2023 : // quote the schema and role name as identifiers to sanitize them.
2024 0 : let schema_name = schema_name.pg_quote();
2025 0 : let role_name = role_name.pg_quote();
2026 0 :
2027 0 : let query = format!("GRANT {grants} ON SCHEMA {schema_name} TO {role_name}",);
2028 0 : db_client
2029 0 : .simple_query(&query)
2030 0 : .await
2031 0 : .with_context(|| format!("Failed to execute query: {}", query))?;
2032 0 : }
2033 :
2034 0 : Ok(())
2035 0 : }
2036 :
2037 0 : pub async fn install_extension(
2038 0 : &self,
2039 0 : ext_name: &PgIdent,
2040 0 : db_name: &PgIdent,
2041 0 : ext_version: ExtVersion,
2042 0 : ) -> Result<ExtVersion> {
2043 : use tokio_postgres::NoTls;
2044 :
2045 0 : let mut conf = self.get_tokio_conn_conf(Some("compute_ctl:install_extension"));
2046 0 : conf.dbname(db_name);
2047 :
2048 0 : let (db_client, conn) = conf
2049 0 : .connect(NoTls)
2050 0 : .await
2051 0 : .context("Failed to connect to the database")?;
2052 0 : tokio::spawn(conn);
2053 0 :
2054 0 : let version_query = "SELECT extversion FROM pg_extension WHERE extname = $1";
2055 0 : let version: Option<ExtVersion> = db_client
2056 0 : .query_opt(version_query, &[&ext_name])
2057 0 : .await
2058 0 : .with_context(|| format!("Failed to execute query: {}", version_query))?
2059 0 : .map(|row| row.get(0));
2060 0 :
2061 0 : // sanitize the inputs as postgres idents.
2062 0 : let ext_name: String = ext_name.pg_quote();
2063 0 : let quoted_version: String = ext_version.pg_quote();
2064 :
2065 0 : if let Some(installed_version) = version {
2066 0 : if installed_version == ext_version {
2067 0 : return Ok(installed_version);
2068 0 : }
2069 0 : let query = format!("ALTER EXTENSION {ext_name} UPDATE TO {quoted_version}");
2070 0 : db_client
2071 0 : .simple_query(&query)
2072 0 : .await
2073 0 : .with_context(|| format!("Failed to execute query: {}", query))?;
2074 : } else {
2075 0 : let query =
2076 0 : format!("CREATE EXTENSION IF NOT EXISTS {ext_name} WITH VERSION {quoted_version}");
2077 0 : db_client
2078 0 : .simple_query(&query)
2079 0 : .await
2080 0 : .with_context(|| format!("Failed to execute query: {}", query))?;
2081 : }
2082 :
2083 0 : Ok(ext_version)
2084 0 : }
2085 :
2086 0 : pub async fn prepare_preload_libraries(
2087 0 : &self,
2088 0 : spec: &ComputeSpec,
2089 0 : ) -> Result<RemoteExtensionMetrics> {
2090 0 : if self.params.remote_ext_base_url.is_none() {
2091 0 : return Ok(RemoteExtensionMetrics {
2092 0 : num_ext_downloaded: 0,
2093 0 : largest_ext_size: 0,
2094 0 : total_ext_download_size: 0,
2095 0 : });
2096 0 : }
2097 0 : let remote_extensions = spec
2098 0 : .remote_extensions
2099 0 : .as_ref()
2100 0 : .ok_or(anyhow::anyhow!("Remote extensions are not configured"))?;
2101 :
2102 0 : info!("parse shared_preload_libraries from spec.cluster.settings");
2103 0 : let mut libs_vec = Vec::new();
2104 0 : if let Some(libs) = spec.cluster.settings.find("shared_preload_libraries") {
2105 0 : libs_vec = libs
2106 0 : .split(&[',', '\'', ' '])
2107 0 : .filter(|s| *s != "neon" && !s.is_empty())
2108 0 : .map(str::to_string)
2109 0 : .collect();
2110 0 : }
2111 0 : info!("parse shared_preload_libraries from provided postgresql.conf");
2112 :
2113 : // that is used in neon_local and python tests
2114 0 : if let Some(conf) = &spec.cluster.postgresql_conf {
2115 0 : let conf_lines = conf.split('\n').collect::<Vec<&str>>();
2116 0 : let mut shared_preload_libraries_line = "";
2117 0 : for line in conf_lines {
2118 0 : if line.starts_with("shared_preload_libraries") {
2119 0 : shared_preload_libraries_line = line;
2120 0 : }
2121 : }
2122 0 : let mut preload_libs_vec = Vec::new();
2123 0 : if let Some(libs) = shared_preload_libraries_line.split("='").nth(1) {
2124 0 : preload_libs_vec = libs
2125 0 : .split(&[',', '\'', ' '])
2126 0 : .filter(|s| *s != "neon" && !s.is_empty())
2127 0 : .map(str::to_string)
2128 0 : .collect();
2129 0 : }
2130 0 : libs_vec.extend(preload_libs_vec);
2131 0 : }
2132 :
2133 : // Don't try to download libraries that are not in the index.
2134 : // Assume that they are already present locally.
2135 0 : libs_vec.retain(|lib| remote_extensions.library_index.contains_key(lib));
2136 0 :
2137 0 : info!("Downloading to shared preload libraries: {:?}", &libs_vec);
2138 :
2139 0 : let mut download_tasks = Vec::new();
2140 0 : for library in &libs_vec {
2141 0 : let (ext_name, ext_path) =
2142 0 : remote_extensions.get_ext(library, true, &BUILD_TAG, &self.params.pgversion)?;
2143 0 : download_tasks.push(self.download_extension(ext_name, ext_path));
2144 : }
2145 0 : let results = join_all(download_tasks).await;
2146 :
2147 0 : let mut remote_ext_metrics = RemoteExtensionMetrics {
2148 0 : num_ext_downloaded: 0,
2149 0 : largest_ext_size: 0,
2150 0 : total_ext_download_size: 0,
2151 0 : };
2152 0 : for result in results {
2153 0 : let download_size = match result {
2154 0 : Ok(res) => {
2155 0 : remote_ext_metrics.num_ext_downloaded += 1;
2156 0 : res
2157 : }
2158 0 : Err(err) => {
2159 0 : // if we failed to download an extension, we don't want to fail the whole
2160 0 : // process, but we do want to log the error
2161 0 : error!("Failed to download extension: {}", err);
2162 0 : 0
2163 : }
2164 : };
2165 :
2166 0 : remote_ext_metrics.largest_ext_size =
2167 0 : std::cmp::max(remote_ext_metrics.largest_ext_size, download_size);
2168 0 : remote_ext_metrics.total_ext_download_size += download_size;
2169 : }
2170 0 : Ok(remote_ext_metrics)
2171 0 : }
2172 :
2173 : /// Waits until current thread receives a state changed notification and
2174 : /// the pageserver connection strings has changed.
2175 : ///
2176 : /// The operation will time out after a specified duration.
2177 0 : pub fn wait_timeout_while_pageserver_connstr_unchanged(&self, duration: Duration) {
2178 0 : let state = self.state.lock().unwrap();
2179 0 : let old_pageserver_connstr = state
2180 0 : .pspec
2181 0 : .as_ref()
2182 0 : .expect("spec must be set")
2183 0 : .pageserver_connstr
2184 0 : .clone();
2185 0 : let mut unchanged = true;
2186 0 : let _ = self
2187 0 : .state_changed
2188 0 : .wait_timeout_while(state, duration, |s| {
2189 0 : let pageserver_connstr = &s
2190 0 : .pspec
2191 0 : .as_ref()
2192 0 : .expect("spec must be set")
2193 0 : .pageserver_connstr;
2194 0 : unchanged = pageserver_connstr == &old_pageserver_connstr;
2195 0 : unchanged
2196 0 : })
2197 0 : .unwrap();
2198 0 : if !unchanged {
2199 0 : info!("Pageserver config changed");
2200 0 : }
2201 0 : }
2202 : }
2203 :
2204 0 : pub fn forward_termination_signal() {
2205 0 : let ss_pid = SYNC_SAFEKEEPERS_PID.load(Ordering::SeqCst);
2206 0 : if ss_pid != 0 {
2207 0 : let ss_pid = nix::unistd::Pid::from_raw(ss_pid as i32);
2208 0 : kill(ss_pid, Signal::SIGTERM).ok();
2209 0 : }
2210 0 : let pg_pid = PG_PID.load(Ordering::SeqCst);
2211 0 : if pg_pid != 0 {
2212 0 : let pg_pid = nix::unistd::Pid::from_raw(pg_pid as i32);
2213 0 : // Use 'fast' shutdown (SIGINT) because it also creates a shutdown checkpoint, which is important for
2214 0 : // ROs to get a list of running xacts faster instead of going through the CLOG.
2215 0 : // See https://www.postgresql.org/docs/current/server-shutdown.html for the list of modes and signals.
2216 0 : kill(pg_pid, Signal::SIGINT).ok();
2217 0 : }
2218 0 : }
2219 :
2220 : // helper trait to call JoinSet::spawn_blocking(f), but propagates the current
2221 : // tracing span to the thread.
2222 : trait JoinSetExt<T> {
2223 : fn spawn_blocking_child<F>(&mut self, f: F) -> tokio::task::AbortHandle
2224 : where
2225 : F: FnOnce() -> T + Send + 'static,
2226 : T: Send;
2227 : }
2228 :
2229 : impl<T: 'static> JoinSetExt<T> for tokio::task::JoinSet<T> {
2230 0 : fn spawn_blocking_child<F>(&mut self, f: F) -> tokio::task::AbortHandle
2231 0 : where
2232 0 : F: FnOnce() -> T + Send + 'static,
2233 0 : T: Send,
2234 0 : {
2235 0 : let sp = tracing::Span::current();
2236 0 : self.spawn_blocking(move || {
2237 0 : let _e = sp.enter();
2238 0 : f()
2239 0 : })
2240 0 : }
2241 : }
|