Line data Source code
1 : use std::collections::HashMap;
2 : use std::os::unix::fs::{PermissionsExt, symlink};
3 : use std::path::Path;
4 : use std::process::{Command, Stdio};
5 : use std::str::FromStr;
6 : use std::sync::atomic::{AtomicU32, Ordering};
7 : use std::sync::{Arc, Condvar, Mutex, RwLock};
8 : use std::time::{Duration, Instant};
9 : use std::{env, fs};
10 :
11 : use anyhow::{Context, Result};
12 : use chrono::{DateTime, Utc};
13 : use compute_api::privilege::Privilege;
14 : use compute_api::responses::{ComputeConfig, ComputeCtlConfig, ComputeMetrics, ComputeStatus};
15 : use compute_api::spec::{
16 : ComputeAudit, ComputeFeature, ComputeMode, ComputeSpec, ExtVersion, PgIdent,
17 : };
18 : use futures::StreamExt;
19 : use futures::future::join_all;
20 : use futures::stream::FuturesUnordered;
21 : use nix::sys::signal::{Signal, kill};
22 : use nix::unistd::Pid;
23 : use once_cell::sync::Lazy;
24 : use postgres;
25 : use postgres::NoTls;
26 : use postgres::error::SqlState;
27 : use remote_storage::{DownloadError, RemotePath};
28 : use tokio::spawn;
29 : use tracing::{Instrument, debug, error, info, instrument, warn};
30 : use utils::id::{TenantId, TimelineId};
31 : use utils::lsn::Lsn;
32 : use utils::measured_stream::MeasuredReader;
33 :
34 : use crate::configurator::launch_configurator;
35 : use crate::disk_quota::set_disk_quota;
36 : use crate::installed_extensions::get_installed_extensions;
37 : use crate::logger::startup_context_from_env;
38 : use crate::lsn_lease::launch_lsn_lease_bg_task_for_static;
39 : use crate::metrics::COMPUTE_CTL_UP;
40 : use crate::monitor::launch_monitor;
41 : use crate::pg_helpers::*;
42 : use crate::rsyslog::{
43 : PostgresLogsRsyslogConfig, configure_audit_rsyslog, configure_postgres_logs_export,
44 : launch_pgaudit_gc,
45 : };
46 : use crate::spec::*;
47 : use crate::swap::resize_swap;
48 : use crate::sync_sk::{check_if_synced, ping_safekeeper};
49 : use crate::tls::watch_cert_for_changes;
50 : use crate::{config, extension_server, local_proxy};
51 :
52 : pub static SYNC_SAFEKEEPERS_PID: AtomicU32 = AtomicU32::new(0);
53 : pub static PG_PID: AtomicU32 = AtomicU32::new(0);
54 : // This is an arbitrary build tag. Fine as a default / for testing purposes
55 : // in-case of not-set environment var
56 : const BUILD_TAG_DEFAULT: &str = "latest";
57 : /// Build tag/version of the compute node binaries/image. It's tricky and ugly
58 : /// to pass it everywhere as a part of `ComputeNodeParams`, so we use a
59 : /// global static variable.
60 0 : pub static BUILD_TAG: Lazy<String> = Lazy::new(|| {
61 0 : option_env!("BUILD_TAG")
62 0 : .unwrap_or(BUILD_TAG_DEFAULT)
63 0 : .to_string()
64 0 : });
65 :
66 : /// Static configuration params that don't change after startup. These mostly
67 : /// come from the CLI args, or are derived from them.
68 : pub struct ComputeNodeParams {
69 : /// The ID of the compute
70 : pub compute_id: String,
71 : // Url type maintains proper escaping
72 : pub connstr: url::Url,
73 :
74 : pub resize_swap_on_bind: bool,
75 : pub set_disk_quota_for_fs: Option<String>,
76 :
77 : // VM monitor parameters
78 : #[cfg(target_os = "linux")]
79 : pub filecache_connstr: String,
80 : #[cfg(target_os = "linux")]
81 : pub cgroup: String,
82 : #[cfg(target_os = "linux")]
83 : pub vm_monitor_addr: String,
84 :
85 : pub pgdata: String,
86 : pub pgbin: String,
87 : pub pgversion: String,
88 :
89 : /// The port that the compute's external HTTP server listens on
90 : pub external_http_port: u16,
91 : /// The port that the compute's internal HTTP server listens on
92 : pub internal_http_port: u16,
93 :
94 : /// the address of extension storage proxy gateway
95 : pub ext_remote_storage: Option<String>,
96 : }
97 :
98 : /// Compute node info shared across several `compute_ctl` threads.
99 : pub struct ComputeNode {
100 : pub params: ComputeNodeParams,
101 :
102 : // We connect to Postgres from many different places, so build configs once
103 : // and reuse them where needed. These are derived from 'params.connstr'
104 : pub conn_conf: postgres::config::Config,
105 : pub tokio_conn_conf: tokio_postgres::config::Config,
106 :
107 : /// Volatile part of the `ComputeNode`, which should be used under `Mutex`.
108 : /// To allow HTTP API server to serving status requests, while configuration
109 : /// is in progress, lock should be held only for short periods of time to do
110 : /// read/write, not the whole configuration process.
111 : pub state: Mutex<ComputeState>,
112 : /// `Condvar` to allow notifying waiters about state changes.
113 : pub state_changed: Condvar,
114 :
115 : // key: ext_archive_name, value: started download time, download_completed?
116 : pub ext_download_progress: RwLock<HashMap<String, (DateTime<Utc>, bool)>>,
117 : pub compute_ctl_config: ComputeCtlConfig,
118 : }
119 :
120 : // store some metrics about download size that might impact startup time
121 : #[derive(Clone, Debug)]
122 : pub struct RemoteExtensionMetrics {
123 : num_ext_downloaded: u64,
124 : largest_ext_size: u64,
125 : total_ext_download_size: u64,
126 : }
127 :
128 : #[derive(Clone, Debug)]
129 : pub struct ComputeState {
130 : pub start_time: DateTime<Utc>,
131 : pub status: ComputeStatus,
132 : /// Timestamp of the last Postgres activity. It could be `None` if
133 : /// compute wasn't used since start.
134 : pub last_active: Option<DateTime<Utc>>,
135 : pub error: Option<String>,
136 :
137 : /// Compute spec. This can be received from the CLI or - more likely -
138 : /// passed by the control plane with a /configure HTTP request.
139 : pub pspec: Option<ParsedSpec>,
140 :
141 : /// If the spec is passed by a /configure request, 'startup_span' is the
142 : /// /configure request's tracing span. The main thread enters it when it
143 : /// processes the compute startup, so that the compute startup is considered
144 : /// to be part of the /configure request for tracing purposes.
145 : ///
146 : /// If the request handling thread/task called startup_compute() directly,
147 : /// it would automatically be a child of the request handling span, and we
148 : /// wouldn't need this. But because we use the main thread to perform the
149 : /// startup, and the /configure task just waits for it to finish, we need to
150 : /// set up the span relationship ourselves.
151 : pub startup_span: Option<tracing::span::Span>,
152 :
153 : pub metrics: ComputeMetrics,
154 : }
155 :
156 : impl ComputeState {
157 0 : pub fn new() -> Self {
158 0 : Self {
159 0 : start_time: Utc::now(),
160 0 : status: ComputeStatus::Empty,
161 0 : last_active: None,
162 0 : error: None,
163 0 : pspec: None,
164 0 : startup_span: None,
165 0 : metrics: ComputeMetrics::default(),
166 0 : }
167 0 : }
168 :
169 0 : pub fn set_status(&mut self, status: ComputeStatus, state_changed: &Condvar) {
170 0 : let prev = self.status;
171 0 : info!("Changing compute status from {} to {}", prev, status);
172 0 : self.status = status;
173 0 : state_changed.notify_all();
174 0 :
175 0 : COMPUTE_CTL_UP.reset();
176 0 : COMPUTE_CTL_UP
177 0 : .with_label_values(&[&BUILD_TAG, status.to_string().as_str()])
178 0 : .set(1);
179 0 : }
180 :
181 0 : pub fn set_failed_status(&mut self, err: anyhow::Error, state_changed: &Condvar) {
182 0 : self.error = Some(format!("{err:?}"));
183 0 : self.set_status(ComputeStatus::Failed, state_changed);
184 0 : }
185 : }
186 :
187 : impl Default for ComputeState {
188 0 : fn default() -> Self {
189 0 : Self::new()
190 0 : }
191 : }
192 :
193 : #[derive(Clone, Debug)]
194 : pub struct ParsedSpec {
195 : pub spec: ComputeSpec,
196 : pub tenant_id: TenantId,
197 : pub timeline_id: TimelineId,
198 : pub pageserver_connstr: String,
199 : pub safekeeper_connstrings: Vec<String>,
200 : pub storage_auth_token: Option<String>,
201 : }
202 :
203 : impl TryFrom<ComputeSpec> for ParsedSpec {
204 : type Error = String;
205 0 : fn try_from(spec: ComputeSpec) -> Result<Self, String> {
206 : // Extract the options from the spec file that are needed to connect to
207 : // the storage system.
208 : //
209 : // For backwards-compatibility, the top-level fields in the spec file
210 : // may be empty. In that case, we need to dig them from the GUCs in the
211 : // cluster.settings field.
212 0 : let pageserver_connstr = spec
213 0 : .pageserver_connstring
214 0 : .clone()
215 0 : .or_else(|| spec.cluster.settings.find("neon.pageserver_connstring"))
216 0 : .ok_or("pageserver connstr should be provided")?;
217 0 : let safekeeper_connstrings = if spec.safekeeper_connstrings.is_empty() {
218 0 : if matches!(spec.mode, ComputeMode::Primary) {
219 0 : spec.cluster
220 0 : .settings
221 0 : .find("neon.safekeepers")
222 0 : .ok_or("safekeeper connstrings should be provided")?
223 0 : .split(',')
224 0 : .map(|str| str.to_string())
225 0 : .collect()
226 : } else {
227 0 : vec![]
228 : }
229 : } else {
230 0 : spec.safekeeper_connstrings.clone()
231 : };
232 0 : let storage_auth_token = spec.storage_auth_token.clone();
233 0 : let tenant_id: TenantId = if let Some(tenant_id) = spec.tenant_id {
234 0 : tenant_id
235 : } else {
236 0 : spec.cluster
237 0 : .settings
238 0 : .find("neon.tenant_id")
239 0 : .ok_or("tenant id should be provided")
240 0 : .map(|s| TenantId::from_str(&s))?
241 0 : .or(Err("invalid tenant id"))?
242 : };
243 0 : let timeline_id: TimelineId = if let Some(timeline_id) = spec.timeline_id {
244 0 : timeline_id
245 : } else {
246 0 : spec.cluster
247 0 : .settings
248 0 : .find("neon.timeline_id")
249 0 : .ok_or("timeline id should be provided")
250 0 : .map(|s| TimelineId::from_str(&s))?
251 0 : .or(Err("invalid timeline id"))?
252 : };
253 :
254 0 : Ok(ParsedSpec {
255 0 : spec,
256 0 : pageserver_connstr,
257 0 : safekeeper_connstrings,
258 0 : storage_auth_token,
259 0 : tenant_id,
260 0 : timeline_id,
261 0 : })
262 0 : }
263 : }
264 :
265 : /// If we are a VM, returns a [`Command`] that will run in the `neon-postgres`
266 : /// cgroup. Otherwise returns the default `Command::new(cmd)`
267 : ///
268 : /// This function should be used to start postgres, as it will start it in the
269 : /// neon-postgres cgroup if we are a VM. This allows autoscaling to control
270 : /// postgres' resource usage. The cgroup will exist in VMs because vm-builder
271 : /// creates it during the sysinit phase of its inittab.
272 0 : fn maybe_cgexec(cmd: &str) -> Command {
273 0 : // The cplane sets this env var for autoscaling computes.
274 0 : // use `var_os` so we don't have to worry about the variable being valid
275 0 : // unicode. Should never be an concern . . . but just in case
276 0 : if env::var_os("AUTOSCALING").is_some() {
277 0 : let mut command = Command::new("cgexec");
278 0 : command.args(["-g", "memory:neon-postgres"]);
279 0 : command.arg(cmd);
280 0 : command
281 : } else {
282 0 : Command::new(cmd)
283 : }
284 0 : }
285 :
286 : struct PostgresHandle {
287 : postgres: std::process::Child,
288 : log_collector: tokio::task::JoinHandle<Result<()>>,
289 : }
290 :
291 : impl PostgresHandle {
292 : /// Return PID of the postgres (postmaster) process
293 0 : fn pid(&self) -> Pid {
294 0 : Pid::from_raw(self.postgres.id() as i32)
295 0 : }
296 : }
297 :
298 : struct StartVmMonitorResult {
299 : #[cfg(target_os = "linux")]
300 : token: tokio_util::sync::CancellationToken,
301 : #[cfg(target_os = "linux")]
302 : vm_monitor: Option<tokio::task::JoinHandle<Result<()>>>,
303 : }
304 :
305 : impl ComputeNode {
306 0 : pub fn new(params: ComputeNodeParams, config: ComputeConfig) -> Result<Self> {
307 0 : let connstr = params.connstr.as_str();
308 0 : let conn_conf = postgres::config::Config::from_str(connstr)
309 0 : .context("cannot build postgres config from connstr")?;
310 0 : let tokio_conn_conf = tokio_postgres::config::Config::from_str(connstr)
311 0 : .context("cannot build tokio postgres config from connstr")?;
312 :
313 0 : let mut new_state = ComputeState::new();
314 0 : if let Some(spec) = config.spec {
315 0 : let pspec = ParsedSpec::try_from(spec).map_err(|msg| anyhow::anyhow!(msg))?;
316 0 : new_state.pspec = Some(pspec);
317 0 : }
318 :
319 0 : Ok(ComputeNode {
320 0 : params,
321 0 : conn_conf,
322 0 : tokio_conn_conf,
323 0 : state: Mutex::new(new_state),
324 0 : state_changed: Condvar::new(),
325 0 : ext_download_progress: RwLock::new(HashMap::new()),
326 0 : compute_ctl_config: config.compute_ctl_config,
327 0 : })
328 0 : }
329 :
330 : /// Top-level control flow of compute_ctl. Returns a process exit code we should
331 : /// exit with.
332 0 : pub fn run(self) -> Result<Option<i32>> {
333 0 : let this = Arc::new(self);
334 0 :
335 0 : let cli_spec = this.state.lock().unwrap().pspec.clone();
336 0 :
337 0 : // If this is a pooled VM, prewarm before starting HTTP server and becoming
338 0 : // available for binding. Prewarming helps Postgres start quicker later,
339 0 : // because QEMU will already have its memory allocated from the host, and
340 0 : // the necessary binaries will already be cached.
341 0 : if cli_spec.is_none() {
342 0 : this.prewarm_postgres()?;
343 0 : }
344 :
345 : // Set the up metric with Empty status before starting the HTTP server.
346 : // That way on the first metric scrape, an external observer will see us
347 : // as 'up' and 'empty' (unless the compute was started with a spec or
348 : // already configured by control plane).
349 0 : COMPUTE_CTL_UP
350 0 : .with_label_values(&[&BUILD_TAG, ComputeStatus::Empty.to_string().as_str()])
351 0 : .set(1);
352 0 :
353 0 : // Launch the external HTTP server first, so that we can serve control plane
354 0 : // requests while configuration is still in progress.
355 0 : crate::http::server::Server::External {
356 0 : port: this.params.external_http_port,
357 0 : config: this.compute_ctl_config.clone(),
358 0 : compute_id: this.params.compute_id.clone(),
359 0 : }
360 0 : .launch(&this);
361 0 :
362 0 : // The internal HTTP server could be launched later, but there isn't much
363 0 : // sense in waiting.
364 0 : crate::http::server::Server::Internal {
365 0 : port: this.params.internal_http_port,
366 0 : }
367 0 : .launch(&this);
368 :
369 : // If we got a spec from the CLI already, use that. Otherwise wait for the
370 : // control plane to pass it to us with a /configure HTTP request
371 0 : let pspec = if let Some(cli_spec) = cli_spec {
372 0 : cli_spec
373 : } else {
374 0 : this.wait_spec()?
375 : };
376 :
377 0 : launch_lsn_lease_bg_task_for_static(&this);
378 0 :
379 0 : // We have a spec, start the compute
380 0 : let mut delay_exit = false;
381 0 : let mut vm_monitor = None;
382 0 : let mut pg_process: Option<PostgresHandle> = None;
383 0 :
384 0 : match this.start_compute(&mut pg_process) {
385 0 : Ok(()) => {
386 0 : // Success! Launch remaining services (just vm-monitor currently)
387 0 : vm_monitor =
388 0 : Some(this.start_vm_monitor(pspec.spec.disable_lfc_resizing.unwrap_or(false)));
389 0 : }
390 0 : Err(err) => {
391 0 : // Something went wrong with the startup. Log it and expose the error to
392 0 : // HTTP status requests.
393 0 : error!("could not start the compute node: {:#}", err);
394 0 : this.set_failed_status(err);
395 0 : delay_exit = true;
396 :
397 : // If the error happened after starting PostgreSQL, kill it
398 0 : if let Some(ref pg_process) = pg_process {
399 0 : kill(pg_process.pid(), Signal::SIGQUIT).ok();
400 0 : }
401 : }
402 : }
403 :
404 : // If startup was successful, or it failed in the late stages,
405 : // PostgreSQL is now running. Wait until it exits.
406 0 : let exit_code = if let Some(pg_handle) = pg_process {
407 0 : let exit_status = this.wait_postgres(pg_handle);
408 0 : info!("Postgres exited with code {}, shutting down", exit_status);
409 0 : exit_status.code()
410 : } else {
411 0 : None
412 : };
413 :
414 : // Terminate the vm_monitor so it releases the file watcher on
415 : // /sys/fs/cgroup/neon-postgres.
416 : // Note: the vm-monitor only runs on linux because it requires cgroups.
417 0 : if let Some(vm_monitor) = vm_monitor {
418 : cfg_if::cfg_if! {
419 : if #[cfg(target_os = "linux")] {
420 : // Kills all threads spawned by the monitor
421 0 : vm_monitor.token.cancel();
422 0 : if let Some(handle) = vm_monitor.vm_monitor {
423 0 : // Kills the actual task running the monitor
424 0 : handle.abort();
425 0 : }
426 : } else {
427 : _ = vm_monitor; // appease unused lint on macOS
428 : }
429 : }
430 0 : }
431 :
432 : // Reap the postgres process
433 0 : delay_exit |= this.cleanup_after_postgres_exit()?;
434 :
435 : // If launch failed, keep serving HTTP requests for a while, so the cloud
436 : // control plane can get the actual error.
437 0 : if delay_exit {
438 0 : info!("giving control plane 30s to collect the error before shutdown");
439 0 : std::thread::sleep(Duration::from_secs(30));
440 0 : }
441 0 : Ok(exit_code)
442 0 : }
443 :
444 0 : pub fn wait_spec(&self) -> Result<ParsedSpec> {
445 0 : info!("no compute spec provided, waiting");
446 0 : let mut state = self.state.lock().unwrap();
447 0 : while state.status != ComputeStatus::ConfigurationPending {
448 0 : state = self.state_changed.wait(state).unwrap();
449 0 : }
450 :
451 0 : info!("got spec, continue configuration");
452 0 : let spec = state.pspec.as_ref().unwrap().clone();
453 0 :
454 0 : // Record for how long we slept waiting for the spec.
455 0 : let now = Utc::now();
456 0 : state.metrics.wait_for_spec_ms = now
457 0 : .signed_duration_since(state.start_time)
458 0 : .to_std()
459 0 : .unwrap()
460 0 : .as_millis() as u64;
461 0 :
462 0 : // Reset start time, so that the total startup time that is calculated later will
463 0 : // not include the time that we waited for the spec.
464 0 : state.start_time = now;
465 0 :
466 0 : Ok(spec)
467 0 : }
468 :
469 : /// Start compute.
470 : ///
471 : /// Prerequisites:
472 : /// - the compute spec has been placed in self.state.pspec
473 : ///
474 : /// On success:
475 : /// - status is set to ComputeStatus::Running
476 : /// - self.running_postgres is set
477 : ///
478 : /// On error:
479 : /// - status is left in ComputeStatus::Init. The caller is responsible for setting it to Failed
480 : /// - if Postgres was started before the fatal error happened, self.running_postgres is
481 : /// set. The caller is responsible for killing it.
482 : ///
483 : /// Note that this is in the critical path of a compute cold start. Keep this fast.
484 : /// Try to do things concurrently, to hide the latencies.
485 0 : fn start_compute(self: &Arc<Self>, pg_handle: &mut Option<PostgresHandle>) -> Result<()> {
486 0 : let compute_state: ComputeState;
487 0 :
488 0 : let start_compute_span;
489 0 : let _this_entered;
490 0 : {
491 0 : let mut state_guard = self.state.lock().unwrap();
492 :
493 : // Create a tracing span for the startup operation.
494 : //
495 : // We could otherwise just annotate the function with #[instrument], but if
496 : // we're being configured from a /configure HTTP request, we want the
497 : // startup to be considered part of the /configure request.
498 : //
499 : // Similarly, if a trace ID was passed in env variables, attach it to the span.
500 0 : start_compute_span = {
501 : // Temporarily enter the parent span, so that the new span becomes its child.
502 0 : if let Some(p) = state_guard.startup_span.take() {
503 0 : let _parent_entered = p.entered();
504 0 : tracing::info_span!("start_compute")
505 0 : } else if let Some(otel_context) = startup_context_from_env() {
506 : use tracing_opentelemetry::OpenTelemetrySpanExt;
507 0 : let span = tracing::info_span!("start_compute");
508 0 : span.set_parent(otel_context);
509 0 : span
510 : } else {
511 0 : tracing::info_span!("start_compute")
512 : }
513 : };
514 0 : _this_entered = start_compute_span.enter();
515 0 :
516 0 : state_guard.set_status(ComputeStatus::Init, &self.state_changed);
517 0 : compute_state = state_guard.clone()
518 0 : }
519 0 :
520 0 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
521 0 : info!(
522 0 : "starting compute for project {}, operation {}, tenant {}, timeline {}, project {}, branch {}, endpoint {}, features {:?}, spec.remote_extensions {:?}",
523 0 : pspec.spec.cluster.cluster_id.as_deref().unwrap_or("None"),
524 0 : pspec.spec.operation_uuid.as_deref().unwrap_or("None"),
525 0 : pspec.tenant_id,
526 0 : pspec.timeline_id,
527 0 : pspec.spec.project_id.as_deref().unwrap_or("None"),
528 0 : pspec.spec.branch_id.as_deref().unwrap_or("None"),
529 0 : pspec.spec.endpoint_id.as_deref().unwrap_or("None"),
530 : pspec.spec.features,
531 : pspec.spec.remote_extensions,
532 : );
533 :
534 : ////// PRE-STARTUP PHASE: things that need to be finished before we start the Postgres process
535 :
536 : // Collect all the tasks that must finish here
537 0 : let mut pre_tasks = tokio::task::JoinSet::new();
538 0 :
539 0 : // Make sure TLS certificates are properly loaded and in the right place.
540 0 : if self.compute_ctl_config.tls.is_some() {
541 0 : let this = self.clone();
542 0 : pre_tasks.spawn(async move {
543 0 : this.watch_cert_for_changes().await;
544 :
545 0 : Ok::<(), anyhow::Error>(())
546 0 : });
547 0 : }
548 :
549 : // If there are any remote extensions in shared_preload_libraries, start downloading them
550 0 : if pspec.spec.remote_extensions.is_some() {
551 0 : let (this, spec) = (self.clone(), pspec.spec.clone());
552 0 : pre_tasks.spawn(async move {
553 0 : this.download_preload_extensions(&spec)
554 0 : .in_current_span()
555 0 : .await
556 0 : });
557 0 : }
558 :
559 : // Prepare pgdata directory. This downloads the basebackup, among other things.
560 0 : {
561 0 : let (this, cs) = (self.clone(), compute_state.clone());
562 0 : pre_tasks.spawn_blocking_child(move || this.prepare_pgdata(&cs));
563 0 : }
564 :
565 : // Resize swap to the desired size if the compute spec says so
566 0 : if let (Some(size_bytes), true) =
567 0 : (pspec.spec.swap_size_bytes, self.params.resize_swap_on_bind)
568 0 : {
569 0 : pre_tasks.spawn_blocking_child(move || {
570 0 : // To avoid 'swapoff' hitting postgres startup, we need to run resize-swap to completion
571 0 : // *before* starting postgres.
572 0 : //
573 0 : // In theory, we could do this asynchronously if SkipSwapon was enabled for VMs, but this
574 0 : // carries a risk of introducing hard-to-debug issues - e.g. if postgres sometimes gets
575 0 : // OOM-killed during startup because swap wasn't available yet.
576 0 : resize_swap(size_bytes).context("failed to resize swap")?;
577 0 : let size_mib = size_bytes as f32 / (1 << 20) as f32; // just for more coherent display.
578 0 : info!(%size_bytes, %size_mib, "resized swap");
579 :
580 0 : Ok::<(), anyhow::Error>(())
581 0 : });
582 0 : }
583 :
584 : // Set disk quota if the compute spec says so
585 0 : if let (Some(disk_quota_bytes), Some(disk_quota_fs_mountpoint)) = (
586 0 : pspec.spec.disk_quota_bytes,
587 0 : self.params.set_disk_quota_for_fs.as_ref(),
588 0 : ) {
589 0 : let disk_quota_fs_mountpoint = disk_quota_fs_mountpoint.clone();
590 0 : pre_tasks.spawn_blocking_child(move || {
591 0 : set_disk_quota(disk_quota_bytes, &disk_quota_fs_mountpoint)
592 0 : .context("failed to set disk quota")?;
593 0 : let size_mib = disk_quota_bytes as f32 / (1 << 20) as f32; // just for more coherent display.
594 0 : info!(%disk_quota_bytes, %size_mib, "set disk quota");
595 :
596 0 : Ok::<(), anyhow::Error>(())
597 0 : });
598 0 : }
599 :
600 : // tune pgbouncer
601 0 : if let Some(pgbouncer_settings) = &pspec.spec.pgbouncer_settings {
602 0 : info!("tuning pgbouncer");
603 :
604 0 : let pgbouncer_settings = pgbouncer_settings.clone();
605 0 : let tls_config = self.compute_ctl_config.tls.clone();
606 0 :
607 0 : // Spawn a background task to do the tuning,
608 0 : // so that we don't block the main thread that starts Postgres.
609 0 : let _handle = tokio::spawn(async move {
610 0 : let res = tune_pgbouncer(pgbouncer_settings, tls_config).await;
611 0 : if let Err(err) = res {
612 0 : error!("error while tuning pgbouncer: {err:?}");
613 : // Continue with the startup anyway
614 0 : }
615 0 : });
616 0 : }
617 :
618 : // configure local_proxy
619 0 : if let Some(local_proxy) = &pspec.spec.local_proxy_config {
620 0 : info!("configuring local_proxy");
621 :
622 : // Spawn a background task to do the configuration,
623 : // so that we don't block the main thread that starts Postgres.
624 0 : let local_proxy = local_proxy.clone();
625 0 : let _handle = tokio::spawn(async move {
626 0 : if let Err(err) = local_proxy::configure(&local_proxy) {
627 0 : error!("error while configuring local_proxy: {err:?}");
628 : // Continue with the startup anyway
629 0 : }
630 0 : });
631 0 : }
632 :
633 : // Configure and start rsyslog for compliance audit logging
634 0 : match pspec.spec.audit_log_level {
635 : ComputeAudit::Hipaa | ComputeAudit::Extended | ComputeAudit::Full => {
636 0 : let remote_endpoint =
637 0 : std::env::var("AUDIT_LOGGING_ENDPOINT").unwrap_or("".to_string());
638 0 : if remote_endpoint.is_empty() {
639 0 : anyhow::bail!("AUDIT_LOGGING_ENDPOINT is empty");
640 0 : }
641 0 :
642 0 : let log_directory_path = Path::new(&self.params.pgdata).join("log");
643 0 : let log_directory_path = log_directory_path.to_string_lossy().to_string();
644 0 : configure_audit_rsyslog(log_directory_path.clone(), "hipaa", &remote_endpoint)?;
645 :
646 : // Launch a background task to clean up the audit logs
647 0 : launch_pgaudit_gc(log_directory_path);
648 : }
649 0 : _ => {}
650 : }
651 :
652 : // Configure and start rsyslog for Postgres logs export
653 0 : let conf = PostgresLogsRsyslogConfig::new(pspec.spec.logs_export_host.as_deref());
654 0 : configure_postgres_logs_export(conf)?;
655 :
656 : // Launch remaining service threads
657 0 : let _monitor_handle = launch_monitor(self);
658 0 : let _configurator_handle = launch_configurator(self);
659 0 :
660 0 : // Wait for all the pre-tasks to finish before starting postgres
661 0 : let rt = tokio::runtime::Handle::current();
662 0 : while let Some(res) = rt.block_on(pre_tasks.join_next()) {
663 0 : res??;
664 : }
665 :
666 : ////// START POSTGRES
667 0 : let start_time = Utc::now();
668 0 : let pg_process = self.start_postgres(pspec.storage_auth_token.clone())?;
669 0 : let postmaster_pid = pg_process.pid();
670 0 : *pg_handle = Some(pg_process);
671 0 :
672 0 : // If this is a primary endpoint, perform some post-startup configuration before
673 0 : // opening it up for the world.
674 0 : let config_time = Utc::now();
675 0 : if pspec.spec.mode == ComputeMode::Primary {
676 0 : self.configure_as_primary(&compute_state)?;
677 :
678 0 : let conf = self.get_tokio_conn_conf(None);
679 0 : tokio::task::spawn(async {
680 0 : let res = get_installed_extensions(conf).await;
681 0 : match res {
682 0 : Ok(extensions) => {
683 0 : info!(
684 0 : "[NEON_EXT_STAT] {}",
685 0 : serde_json::to_string(&extensions)
686 0 : .expect("failed to serialize extensions list")
687 : );
688 : }
689 0 : Err(err) => error!("could not get installed extensions: {err:?}"),
690 : }
691 0 : });
692 0 : }
693 :
694 : // All done!
695 0 : let startup_end_time = Utc::now();
696 0 : let metrics = {
697 0 : let mut state = self.state.lock().unwrap();
698 0 : state.metrics.start_postgres_ms = config_time
699 0 : .signed_duration_since(start_time)
700 0 : .to_std()
701 0 : .unwrap()
702 0 : .as_millis() as u64;
703 0 : state.metrics.config_ms = startup_end_time
704 0 : .signed_duration_since(config_time)
705 0 : .to_std()
706 0 : .unwrap()
707 0 : .as_millis() as u64;
708 0 : state.metrics.total_startup_ms = startup_end_time
709 0 : .signed_duration_since(compute_state.start_time)
710 0 : .to_std()
711 0 : .unwrap()
712 0 : .as_millis() as u64;
713 0 : state.metrics.clone()
714 0 : };
715 0 : self.set_status(ComputeStatus::Running);
716 0 :
717 0 : // Log metrics so that we can search for slow operations in logs
718 0 : info!(?metrics, postmaster_pid = %postmaster_pid, "compute start finished");
719 :
720 0 : Ok(())
721 0 : }
722 :
723 : #[instrument(skip_all)]
724 : async fn download_preload_extensions(&self, spec: &ComputeSpec) -> Result<()> {
725 : let remote_extensions = if let Some(remote_extensions) = &spec.remote_extensions {
726 : remote_extensions
727 : } else {
728 : return Ok(());
729 : };
730 :
731 : // First, create control files for all available extensions
732 : extension_server::create_control_files(remote_extensions, &self.params.pgbin);
733 :
734 : let library_load_start_time = Utc::now();
735 : let remote_ext_metrics = self.prepare_preload_libraries(spec).await?;
736 :
737 : let library_load_time = Utc::now()
738 : .signed_duration_since(library_load_start_time)
739 : .to_std()
740 : .unwrap()
741 : .as_millis() as u64;
742 : let mut state = self.state.lock().unwrap();
743 : state.metrics.load_ext_ms = library_load_time;
744 : state.metrics.num_ext_downloaded = remote_ext_metrics.num_ext_downloaded;
745 : state.metrics.largest_ext_size = remote_ext_metrics.largest_ext_size;
746 : state.metrics.total_ext_download_size = remote_ext_metrics.total_ext_download_size;
747 : info!(
748 : "Loading shared_preload_libraries took {:?}ms",
749 : library_load_time
750 : );
751 : info!("{:?}", remote_ext_metrics);
752 :
753 : Ok(())
754 : }
755 :
756 : /// Start the vm-monitor if directed to. The vm-monitor only runs on linux
757 : /// because it requires cgroups.
758 0 : fn start_vm_monitor(&self, disable_lfc_resizing: bool) -> StartVmMonitorResult {
759 : cfg_if::cfg_if! {
760 : if #[cfg(target_os = "linux")] {
761 : use std::env;
762 : use tokio_util::sync::CancellationToken;
763 :
764 : // This token is used internally by the monitor to clean up all threads
765 0 : let token = CancellationToken::new();
766 :
767 : // don't pass postgres connection string to vm-monitor if we don't want it to resize LFC
768 0 : let pgconnstr = if disable_lfc_resizing {
769 0 : None
770 : } else {
771 0 : Some(self.params.filecache_connstr.clone())
772 : };
773 :
774 0 : let vm_monitor = if env::var_os("AUTOSCALING").is_some() {
775 0 : let vm_monitor = tokio::spawn(vm_monitor::start(
776 0 : Box::leak(Box::new(vm_monitor::Args {
777 0 : cgroup: Some(self.params.cgroup.clone()),
778 0 : pgconnstr,
779 0 : addr: self.params.vm_monitor_addr.clone(),
780 0 : })),
781 0 : token.clone(),
782 0 : ));
783 0 : Some(vm_monitor)
784 : } else {
785 0 : None
786 : };
787 0 : StartVmMonitorResult { token, vm_monitor }
788 0 : } else {
789 0 : _ = disable_lfc_resizing; // appease unused lint on macOS
790 0 : StartVmMonitorResult { }
791 0 : }
792 0 : }
793 0 : }
794 :
795 0 : fn cleanup_after_postgres_exit(&self) -> Result<bool> {
796 0 : // Maybe sync safekeepers again, to speed up next startup
797 0 : let compute_state = self.state.lock().unwrap().clone();
798 0 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
799 0 : if matches!(pspec.spec.mode, compute_api::spec::ComputeMode::Primary) {
800 0 : info!("syncing safekeepers on shutdown");
801 0 : let storage_auth_token = pspec.storage_auth_token.clone();
802 0 : let lsn = self.sync_safekeepers(storage_auth_token)?;
803 0 : info!("synced safekeepers at lsn {lsn}");
804 0 : }
805 :
806 0 : let mut delay_exit = false;
807 0 : let mut state = self.state.lock().unwrap();
808 0 : if state.status == ComputeStatus::TerminationPending {
809 0 : state.status = ComputeStatus::Terminated;
810 0 : self.state_changed.notify_all();
811 0 : // we were asked to terminate gracefully, don't exit to avoid restart
812 0 : delay_exit = true
813 0 : }
814 0 : drop(state);
815 :
816 0 : if let Err(err) = self.check_for_core_dumps() {
817 0 : error!("error while checking for core dumps: {err:?}");
818 0 : }
819 :
820 0 : Ok(delay_exit)
821 0 : }
822 :
823 : /// Check that compute node has corresponding feature enabled.
824 0 : pub fn has_feature(&self, feature: ComputeFeature) -> bool {
825 0 : let state = self.state.lock().unwrap();
826 :
827 0 : if let Some(s) = state.pspec.as_ref() {
828 0 : s.spec.features.contains(&feature)
829 : } else {
830 0 : false
831 : }
832 0 : }
833 :
834 0 : pub fn set_status(&self, status: ComputeStatus) {
835 0 : let mut state = self.state.lock().unwrap();
836 0 : state.set_status(status, &self.state_changed);
837 0 : }
838 :
839 0 : pub fn set_failed_status(&self, err: anyhow::Error) {
840 0 : let mut state = self.state.lock().unwrap();
841 0 : state.set_failed_status(err, &self.state_changed);
842 0 : }
843 :
844 0 : pub fn get_status(&self) -> ComputeStatus {
845 0 : self.state.lock().unwrap().status
846 0 : }
847 :
848 0 : pub fn get_timeline_id(&self) -> Option<TimelineId> {
849 0 : self.state
850 0 : .lock()
851 0 : .unwrap()
852 0 : .pspec
853 0 : .as_ref()
854 0 : .map(|s| s.timeline_id)
855 0 : }
856 :
857 : // Remove `pgdata` directory and create it again with right permissions.
858 0 : fn create_pgdata(&self) -> Result<()> {
859 0 : // Ignore removal error, likely it is a 'No such file or directory (os error 2)'.
860 0 : // If it is something different then create_dir() will error out anyway.
861 0 : let pgdata = &self.params.pgdata;
862 0 : let _ok = fs::remove_dir_all(pgdata);
863 0 : fs::create_dir(pgdata)?;
864 0 : fs::set_permissions(pgdata, fs::Permissions::from_mode(0o700))?;
865 :
866 0 : Ok(())
867 0 : }
868 :
869 : // Get basebackup from the libpq connection to pageserver using `connstr` and
870 : // unarchive it to `pgdata` directory overriding all its previous content.
871 : #[instrument(skip_all, fields(%lsn))]
872 : fn try_get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> {
873 : let spec = compute_state.pspec.as_ref().expect("spec must be set");
874 : let start_time = Instant::now();
875 :
876 : let shard0_connstr = spec.pageserver_connstr.split(',').next().unwrap();
877 : let mut config = postgres::Config::from_str(shard0_connstr)?;
878 :
879 : // Use the storage auth token from the config file, if given.
880 : // Note: this overrides any password set in the connection string.
881 : if let Some(storage_auth_token) = &spec.storage_auth_token {
882 : info!("Got storage auth token from spec file");
883 : config.password(storage_auth_token);
884 : } else {
885 : info!("Storage auth token not set");
886 : }
887 :
888 : config.application_name("compute_ctl");
889 : if let Some(spec) = &compute_state.pspec {
890 : config.options(&format!(
891 : "-c neon.compute_mode={}",
892 : spec.spec.mode.to_type_str()
893 : ));
894 : }
895 :
896 : // Connect to pageserver
897 : let mut client = config.connect(NoTls)?;
898 : let pageserver_connect_micros = start_time.elapsed().as_micros() as u64;
899 :
900 : let basebackup_cmd = match lsn {
901 : Lsn(0) => {
902 : if spec.spec.mode != ComputeMode::Primary {
903 : format!(
904 : "basebackup {} {} --gzip --replica",
905 : spec.tenant_id, spec.timeline_id
906 : )
907 : } else {
908 : format!("basebackup {} {} --gzip", spec.tenant_id, spec.timeline_id)
909 : }
910 : }
911 : _ => {
912 : if spec.spec.mode != ComputeMode::Primary {
913 : format!(
914 : "basebackup {} {} {} --gzip --replica",
915 : spec.tenant_id, spec.timeline_id, lsn
916 : )
917 : } else {
918 : format!(
919 : "basebackup {} {} {} --gzip",
920 : spec.tenant_id, spec.timeline_id, lsn
921 : )
922 : }
923 : }
924 : };
925 :
926 : let copyreader = client.copy_out(basebackup_cmd.as_str())?;
927 : let mut measured_reader = MeasuredReader::new(copyreader);
928 : let mut bufreader = std::io::BufReader::new(&mut measured_reader);
929 :
930 : // Read the archive directly from the `CopyOutReader`
931 : //
932 : // Set `ignore_zeros` so that unpack() reads all the Copy data and
933 : // doesn't stop at the end-of-archive marker. Otherwise, if the server
934 : // sends an Error after finishing the tarball, we will not notice it.
935 : let mut ar = tar::Archive::new(flate2::read::GzDecoder::new(&mut bufreader));
936 : ar.set_ignore_zeros(true);
937 : ar.unpack(&self.params.pgdata)?;
938 :
939 : // Report metrics
940 : let mut state = self.state.lock().unwrap();
941 : state.metrics.pageserver_connect_micros = pageserver_connect_micros;
942 : state.metrics.basebackup_bytes = measured_reader.get_byte_count() as u64;
943 : state.metrics.basebackup_ms = start_time.elapsed().as_millis() as u64;
944 : Ok(())
945 : }
946 :
947 : // Gets the basebackup in a retry loop
948 : #[instrument(skip_all, fields(%lsn))]
949 : pub fn get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> {
950 : let mut retry_period_ms = 500.0;
951 : let mut attempts = 0;
952 : const DEFAULT_ATTEMPTS: u16 = 10;
953 : #[cfg(feature = "testing")]
954 : let max_attempts = if let Ok(v) = env::var("NEON_COMPUTE_TESTING_BASEBACKUP_RETRIES") {
955 : u16::from_str(&v).unwrap()
956 : } else {
957 : DEFAULT_ATTEMPTS
958 : };
959 : #[cfg(not(feature = "testing"))]
960 : let max_attempts = DEFAULT_ATTEMPTS;
961 : loop {
962 : let result = self.try_get_basebackup(compute_state, lsn);
963 : match result {
964 : Ok(_) => {
965 : return result;
966 : }
967 : Err(ref e) if attempts < max_attempts => {
968 : warn!(
969 : "Failed to get basebackup: {} (attempt {}/{})",
970 : e, attempts, max_attempts
971 : );
972 : std::thread::sleep(std::time::Duration::from_millis(retry_period_ms as u64));
973 : retry_period_ms *= 1.5;
974 : }
975 : Err(_) => {
976 : return result;
977 : }
978 : }
979 : attempts += 1;
980 : }
981 : }
982 :
983 0 : pub async fn check_safekeepers_synced_async(
984 0 : &self,
985 0 : compute_state: &ComputeState,
986 0 : ) -> Result<Option<Lsn>> {
987 0 : // Construct a connection config for each safekeeper
988 0 : let pspec: ParsedSpec = compute_state
989 0 : .pspec
990 0 : .as_ref()
991 0 : .expect("spec must be set")
992 0 : .clone();
993 0 : let sk_connstrs: Vec<String> = pspec.safekeeper_connstrings.clone();
994 0 : let sk_configs = sk_connstrs.into_iter().map(|connstr| {
995 0 : // Format connstr
996 0 : let id = connstr.clone();
997 0 : let connstr = format!("postgresql://no_user@{}", connstr);
998 0 : let options = format!(
999 0 : "-c timeline_id={} tenant_id={}",
1000 0 : pspec.timeline_id, pspec.tenant_id
1001 0 : );
1002 0 :
1003 0 : // Construct client
1004 0 : let mut config = tokio_postgres::Config::from_str(&connstr).unwrap();
1005 0 : config.options(&options);
1006 0 : if let Some(storage_auth_token) = pspec.storage_auth_token.clone() {
1007 0 : config.password(storage_auth_token);
1008 0 : }
1009 :
1010 0 : (id, config)
1011 0 : });
1012 0 :
1013 0 : // Create task set to query all safekeepers
1014 0 : let mut tasks = FuturesUnordered::new();
1015 0 : let quorum = sk_configs.len() / 2 + 1;
1016 0 : for (id, config) in sk_configs {
1017 0 : let timeout = tokio::time::Duration::from_millis(100);
1018 0 : let task = tokio::time::timeout(timeout, ping_safekeeper(id, config));
1019 0 : tasks.push(tokio::spawn(task));
1020 0 : }
1021 :
1022 : // Get a quorum of responses or errors
1023 0 : let mut responses = Vec::new();
1024 0 : let mut join_errors = Vec::new();
1025 0 : let mut task_errors = Vec::new();
1026 0 : let mut timeout_errors = Vec::new();
1027 0 : while let Some(response) = tasks.next().await {
1028 0 : match response {
1029 0 : Ok(Ok(Ok(r))) => responses.push(r),
1030 0 : Ok(Ok(Err(e))) => task_errors.push(e),
1031 0 : Ok(Err(e)) => timeout_errors.push(e),
1032 0 : Err(e) => join_errors.push(e),
1033 : };
1034 0 : if responses.len() >= quorum {
1035 0 : break;
1036 0 : }
1037 0 : if join_errors.len() + task_errors.len() + timeout_errors.len() >= quorum {
1038 0 : break;
1039 0 : }
1040 : }
1041 :
1042 : // In case of error, log and fail the check, but don't crash.
1043 : // We're playing it safe because these errors could be transient
1044 : // and we don't yet retry. Also being careful here allows us to
1045 : // be backwards compatible with safekeepers that don't have the
1046 : // TIMELINE_STATUS API yet.
1047 0 : if responses.len() < quorum {
1048 0 : error!(
1049 0 : "failed sync safekeepers check {:?} {:?} {:?}",
1050 : join_errors, task_errors, timeout_errors
1051 : );
1052 0 : return Ok(None);
1053 0 : }
1054 0 :
1055 0 : Ok(check_if_synced(responses))
1056 0 : }
1057 :
1058 : // Fast path for sync_safekeepers. If they're already synced we get the lsn
1059 : // in one roundtrip. If not, we should do a full sync_safekeepers.
1060 : #[instrument(skip_all)]
1061 : pub fn check_safekeepers_synced(&self, compute_state: &ComputeState) -> Result<Option<Lsn>> {
1062 : let start_time = Utc::now();
1063 :
1064 : let rt = tokio::runtime::Handle::current();
1065 : let result = rt.block_on(self.check_safekeepers_synced_async(compute_state));
1066 :
1067 : // Record runtime
1068 : self.state.lock().unwrap().metrics.sync_sk_check_ms = Utc::now()
1069 : .signed_duration_since(start_time)
1070 : .to_std()
1071 : .unwrap()
1072 : .as_millis() as u64;
1073 : result
1074 : }
1075 :
1076 : // Run `postgres` in a special mode with `--sync-safekeepers` argument
1077 : // and return the reported LSN back to the caller.
1078 : #[instrument(skip_all)]
1079 : pub fn sync_safekeepers(&self, storage_auth_token: Option<String>) -> Result<Lsn> {
1080 : let start_time = Utc::now();
1081 :
1082 : let mut sync_handle = maybe_cgexec(&self.params.pgbin)
1083 : .args(["--sync-safekeepers"])
1084 : .env("PGDATA", &self.params.pgdata) // we cannot use -D in this mode
1085 : .envs(if let Some(storage_auth_token) = &storage_auth_token {
1086 : vec![("NEON_AUTH_TOKEN", storage_auth_token)]
1087 : } else {
1088 : vec![]
1089 : })
1090 : .stdout(Stdio::piped())
1091 : .stderr(Stdio::piped())
1092 : .spawn()
1093 : .expect("postgres --sync-safekeepers failed to start");
1094 : SYNC_SAFEKEEPERS_PID.store(sync_handle.id(), Ordering::SeqCst);
1095 :
1096 : // `postgres --sync-safekeepers` will print all log output to stderr and
1097 : // final LSN to stdout. So we leave stdout to collect LSN, while stderr logs
1098 : // will be collected in a child thread.
1099 : let stderr = sync_handle
1100 : .stderr
1101 : .take()
1102 : .expect("stderr should be captured");
1103 : let logs_handle = handle_postgres_logs(stderr);
1104 :
1105 : let sync_output = sync_handle
1106 : .wait_with_output()
1107 : .expect("postgres --sync-safekeepers failed");
1108 : SYNC_SAFEKEEPERS_PID.store(0, Ordering::SeqCst);
1109 :
1110 : // Process has exited, so we can join the logs thread.
1111 : let _ = tokio::runtime::Handle::current()
1112 : .block_on(logs_handle)
1113 0 : .map_err(|e| tracing::error!("log task panicked: {:?}", e));
1114 :
1115 : if !sync_output.status.success() {
1116 : anyhow::bail!(
1117 : "postgres --sync-safekeepers exited with non-zero status: {}. stdout: {}",
1118 : sync_output.status,
1119 : String::from_utf8(sync_output.stdout)
1120 : .expect("postgres --sync-safekeepers exited, and stdout is not utf-8"),
1121 : );
1122 : }
1123 :
1124 : self.state.lock().unwrap().metrics.sync_safekeepers_ms = Utc::now()
1125 : .signed_duration_since(start_time)
1126 : .to_std()
1127 : .unwrap()
1128 : .as_millis() as u64;
1129 :
1130 : let lsn = Lsn::from_str(String::from_utf8(sync_output.stdout)?.trim())?;
1131 :
1132 : Ok(lsn)
1133 : }
1134 :
1135 : /// Do all the preparations like PGDATA directory creation, configuration,
1136 : /// safekeepers sync, basebackup, etc.
1137 : #[instrument(skip_all)]
1138 : pub fn prepare_pgdata(&self, compute_state: &ComputeState) -> Result<()> {
1139 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
1140 : let spec = &pspec.spec;
1141 : let pgdata_path = Path::new(&self.params.pgdata);
1142 :
1143 : // Remove/create an empty pgdata directory and put configuration there.
1144 : self.create_pgdata()?;
1145 : config::write_postgres_conf(
1146 : pgdata_path,
1147 : &pspec.spec,
1148 : self.params.internal_http_port,
1149 : &self.compute_ctl_config.tls,
1150 : )?;
1151 :
1152 : // Syncing safekeepers is only safe with primary nodes: if a primary
1153 : // is already connected it will be kicked out, so a secondary (standby)
1154 : // cannot sync safekeepers.
1155 : let lsn = match spec.mode {
1156 : ComputeMode::Primary => {
1157 : info!("checking if safekeepers are synced");
1158 : let lsn = if let Ok(Some(lsn)) = self.check_safekeepers_synced(compute_state) {
1159 : lsn
1160 : } else {
1161 : info!("starting safekeepers syncing");
1162 : self.sync_safekeepers(pspec.storage_auth_token.clone())
1163 0 : .with_context(|| "failed to sync safekeepers")?
1164 : };
1165 : info!("safekeepers synced at LSN {}", lsn);
1166 : lsn
1167 : }
1168 : ComputeMode::Static(lsn) => {
1169 : info!("Starting read-only node at static LSN {}", lsn);
1170 : lsn
1171 : }
1172 : ComputeMode::Replica => {
1173 : info!("Initializing standby from latest Pageserver LSN");
1174 : Lsn(0)
1175 : }
1176 : };
1177 :
1178 : info!(
1179 : "getting basebackup@{} from pageserver {}",
1180 : lsn, &pspec.pageserver_connstr
1181 : );
1182 0 : self.get_basebackup(compute_state, lsn).with_context(|| {
1183 0 : format!(
1184 0 : "failed to get basebackup@{} from pageserver {}",
1185 0 : lsn, &pspec.pageserver_connstr
1186 0 : )
1187 0 : })?;
1188 :
1189 : // Update pg_hba.conf received with basebackup.
1190 : update_pg_hba(pgdata_path)?;
1191 :
1192 : // Place pg_dynshmem under /dev/shm. This allows us to use
1193 : // 'dynamic_shared_memory_type = mmap' so that the files are placed in
1194 : // /dev/shm, similar to how 'dynamic_shared_memory_type = posix' works.
1195 : //
1196 : // Why on earth don't we just stick to the 'posix' default, you might
1197 : // ask. It turns out that making large allocations with 'posix' doesn't
1198 : // work very well with autoscaling. The behavior we want is that:
1199 : //
1200 : // 1. You can make large DSM allocations, larger than the current RAM
1201 : // size of the VM, without errors
1202 : //
1203 : // 2. If the allocated memory is really used, the VM is scaled up
1204 : // automatically to accommodate that
1205 : //
1206 : // We try to make that possible by having swap in the VM. But with the
1207 : // default 'posix' DSM implementation, we fail step 1, even when there's
1208 : // plenty of swap available. PostgreSQL uses posix_fallocate() to create
1209 : // the shmem segment, which is really just a file in /dev/shm in Linux,
1210 : // but posix_fallocate() on tmpfs returns ENOMEM if the size is larger
1211 : // than available RAM.
1212 : //
1213 : // Using 'dynamic_shared_memory_type = mmap' works around that, because
1214 : // the Postgres 'mmap' DSM implementation doesn't use
1215 : // posix_fallocate(). Instead, it uses repeated calls to write(2) to
1216 : // fill the file with zeros. It's weird that that differs between
1217 : // 'posix' and 'mmap', but we take advantage of it. When the file is
1218 : // filled slowly with write(2), the kernel allows it to grow larger, as
1219 : // long as there's swap available.
1220 : //
1221 : // In short, using 'dynamic_shared_memory_type = mmap' allows us one DSM
1222 : // segment to be larger than currently available RAM. But because we
1223 : // don't want to store it on a real file, which the kernel would try to
1224 : // flush to disk, so symlink pg_dynshm to /dev/shm.
1225 : //
1226 : // We don't set 'dynamic_shared_memory_type = mmap' here, we let the
1227 : // control plane control that option. If 'mmap' is not used, this
1228 : // symlink doesn't affect anything.
1229 : //
1230 : // See https://github.com/neondatabase/autoscaling/issues/800
1231 : std::fs::remove_dir(pgdata_path.join("pg_dynshmem"))?;
1232 : symlink("/dev/shm/", pgdata_path.join("pg_dynshmem"))?;
1233 :
1234 : match spec.mode {
1235 : ComputeMode::Primary => {}
1236 : ComputeMode::Replica | ComputeMode::Static(..) => {
1237 : add_standby_signal(pgdata_path)?;
1238 : }
1239 : }
1240 :
1241 : Ok(())
1242 : }
1243 :
1244 : /// Start and stop a postgres process to warm up the VM for startup.
1245 0 : pub fn prewarm_postgres(&self) -> Result<()> {
1246 0 : info!("prewarming");
1247 :
1248 : // Create pgdata
1249 0 : let pgdata = &format!("{}.warmup", self.params.pgdata);
1250 0 : create_pgdata(pgdata)?;
1251 :
1252 : // Run initdb to completion
1253 0 : info!("running initdb");
1254 0 : let initdb_bin = Path::new(&self.params.pgbin)
1255 0 : .parent()
1256 0 : .unwrap()
1257 0 : .join("initdb");
1258 0 : Command::new(initdb_bin)
1259 0 : .args(["--pgdata", pgdata])
1260 0 : .output()
1261 0 : .expect("cannot start initdb process");
1262 :
1263 : // Write conf
1264 : use std::io::Write;
1265 0 : let conf_path = Path::new(pgdata).join("postgresql.conf");
1266 0 : let mut file = std::fs::File::create(conf_path)?;
1267 0 : writeln!(file, "shared_buffers=65536")?;
1268 0 : writeln!(file, "port=51055")?; // Nobody should be connecting
1269 0 : writeln!(file, "shared_preload_libraries = 'neon'")?;
1270 :
1271 : // Start postgres
1272 0 : info!("starting postgres");
1273 0 : let mut pg = maybe_cgexec(&self.params.pgbin)
1274 0 : .args(["-D", pgdata])
1275 0 : .spawn()
1276 0 : .expect("cannot start postgres process");
1277 0 :
1278 0 : // Stop it when it's ready
1279 0 : info!("waiting for postgres");
1280 0 : wait_for_postgres(&mut pg, Path::new(pgdata))?;
1281 : // SIGQUIT orders postgres to exit immediately. We don't want to SIGKILL
1282 : // it to avoid orphaned processes prowling around while datadir is
1283 : // wiped.
1284 0 : let pm_pid = Pid::from_raw(pg.id() as i32);
1285 0 : kill(pm_pid, Signal::SIGQUIT)?;
1286 0 : info!("sent SIGQUIT signal");
1287 0 : pg.wait()?;
1288 0 : info!("done prewarming");
1289 :
1290 : // clean up
1291 0 : let _ok = fs::remove_dir_all(pgdata);
1292 0 : Ok(())
1293 0 : }
1294 :
1295 : /// Start Postgres as a child process and wait for it to start accepting
1296 : /// connections.
1297 : ///
1298 : /// Returns a handle to the child process and a handle to the logs thread.
1299 : #[instrument(skip_all)]
1300 : pub fn start_postgres(&self, storage_auth_token: Option<String>) -> Result<PostgresHandle> {
1301 : let pgdata_path = Path::new(&self.params.pgdata);
1302 :
1303 : // Run postgres as a child process.
1304 : let mut pg = maybe_cgexec(&self.params.pgbin)
1305 : .args(["-D", &self.params.pgdata])
1306 : .envs(if let Some(storage_auth_token) = &storage_auth_token {
1307 : vec![("NEON_AUTH_TOKEN", storage_auth_token)]
1308 : } else {
1309 : vec![]
1310 : })
1311 : .stderr(Stdio::piped())
1312 : .spawn()
1313 : .expect("cannot start postgres process");
1314 : PG_PID.store(pg.id(), Ordering::SeqCst);
1315 :
1316 : // Start a task to collect logs from stderr.
1317 : let stderr = pg.stderr.take().expect("stderr should be captured");
1318 : let logs_handle = handle_postgres_logs(stderr);
1319 :
1320 : wait_for_postgres(&mut pg, pgdata_path)?;
1321 :
1322 : Ok(PostgresHandle {
1323 : postgres: pg,
1324 : log_collector: logs_handle,
1325 : })
1326 : }
1327 :
1328 : /// Wait for the child Postgres process forever. In this state Ctrl+C will
1329 : /// propagate to Postgres and it will be shut down as well.
1330 0 : fn wait_postgres(&self, mut pg_handle: PostgresHandle) -> std::process::ExitStatus {
1331 0 : info!(postmaster_pid = %pg_handle.postgres.id(), "Waiting for Postgres to exit");
1332 :
1333 0 : let ecode = pg_handle
1334 0 : .postgres
1335 0 : .wait()
1336 0 : .expect("failed to start waiting on Postgres process");
1337 0 : PG_PID.store(0, Ordering::SeqCst);
1338 0 :
1339 0 : // Process has exited. Wait for the log collecting task to finish.
1340 0 : let _ = tokio::runtime::Handle::current()
1341 0 : .block_on(pg_handle.log_collector)
1342 0 : .map_err(|e| tracing::error!("log task panicked: {:?}", e));
1343 0 :
1344 0 : ecode
1345 0 : }
1346 :
1347 : /// Do post configuration of the already started Postgres. This function spawns a background task to
1348 : /// configure the database after applying the compute spec. Currently, it upgrades the neon extension
1349 : /// version. In the future, it may upgrade all 3rd-party extensions.
1350 : #[instrument(skip_all)]
1351 : pub fn post_apply_config(&self) -> Result<()> {
1352 : let conf = self.get_tokio_conn_conf(Some("compute_ctl:post_apply_config"));
1353 0 : tokio::spawn(async move {
1354 0 : let res = async {
1355 0 : let (mut client, connection) = conf.connect(NoTls).await?;
1356 0 : tokio::spawn(async move {
1357 0 : if let Err(e) = connection.await {
1358 0 : eprintln!("connection error: {}", e);
1359 0 : }
1360 0 : });
1361 0 :
1362 0 : handle_neon_extension_upgrade(&mut client)
1363 0 : .await
1364 0 : .context("handle_neon_extension_upgrade")?;
1365 0 : Ok::<_, anyhow::Error>(())
1366 0 : }
1367 0 : .await;
1368 0 : if let Err(err) = res {
1369 0 : error!("error while post_apply_config: {err:#}");
1370 0 : }
1371 0 : });
1372 : Ok(())
1373 : }
1374 :
1375 0 : pub fn get_conn_conf(&self, application_name: Option<&str>) -> postgres::Config {
1376 0 : let mut conf = self.conn_conf.clone();
1377 0 : if let Some(application_name) = application_name {
1378 0 : conf.application_name(application_name);
1379 0 : }
1380 0 : conf
1381 0 : }
1382 :
1383 0 : pub fn get_tokio_conn_conf(&self, application_name: Option<&str>) -> tokio_postgres::Config {
1384 0 : let mut conf = self.tokio_conn_conf.clone();
1385 0 : if let Some(application_name) = application_name {
1386 0 : conf.application_name(application_name);
1387 0 : }
1388 0 : conf
1389 0 : }
1390 :
1391 0 : pub async fn get_maintenance_client(
1392 0 : conf: &tokio_postgres::Config,
1393 0 : ) -> Result<tokio_postgres::Client> {
1394 0 : let mut conf = conf.clone();
1395 0 : conf.application_name("compute_ctl:apply_config");
1396 :
1397 0 : let (client, conn) = match conf.connect(NoTls).await {
1398 : // If connection fails, it may be the old node with `zenith_admin` superuser.
1399 : //
1400 : // In this case we need to connect with old `zenith_admin` name
1401 : // and create new user. We cannot simply rename connected user,
1402 : // but we can create a new one and grant it all privileges.
1403 0 : Err(e) => match e.code() {
1404 : Some(&SqlState::INVALID_PASSWORD)
1405 : | Some(&SqlState::INVALID_AUTHORIZATION_SPECIFICATION) => {
1406 : // Connect with zenith_admin if cloud_admin could not authenticate
1407 0 : info!(
1408 0 : "cannot connect to postgres: {}, retrying with `zenith_admin` username",
1409 : e
1410 : );
1411 0 : let mut zenith_admin_conf = postgres::config::Config::from(conf.clone());
1412 0 : zenith_admin_conf.application_name("compute_ctl:apply_config");
1413 0 : zenith_admin_conf.user("zenith_admin");
1414 :
1415 0 : let mut client =
1416 0 : zenith_admin_conf.connect(NoTls)
1417 0 : .context("broken cloud_admin credential: tried connecting with cloud_admin but could not authenticate, and zenith_admin does not work either")?;
1418 :
1419 : // Disable forwarding so that users don't get a cloud_admin role
1420 0 : let mut func = || {
1421 0 : client.simple_query("SET neon.forward_ddl = false")?;
1422 0 : client.simple_query("CREATE USER cloud_admin WITH SUPERUSER")?;
1423 0 : client.simple_query("GRANT zenith_admin TO cloud_admin")?;
1424 0 : Ok::<_, anyhow::Error>(())
1425 0 : };
1426 0 : func().context("apply_config setup cloud_admin")?;
1427 :
1428 0 : drop(client);
1429 0 :
1430 0 : // Reconnect with connstring with expected name
1431 0 : conf.connect(NoTls).await?
1432 : }
1433 0 : _ => return Err(e.into()),
1434 : },
1435 0 : Ok((client, conn)) => (client, conn),
1436 : };
1437 :
1438 0 : spawn(async move {
1439 0 : if let Err(e) = conn.await {
1440 0 : error!("maintenance client connection error: {}", e);
1441 0 : }
1442 0 : });
1443 0 :
1444 0 : // Disable DDL forwarding because control plane already knows about the roles/databases
1445 0 : // we're about to modify.
1446 0 : client
1447 0 : .simple_query("SET neon.forward_ddl = false")
1448 0 : .await
1449 0 : .context("apply_config SET neon.forward_ddl = false")?;
1450 :
1451 0 : Ok(client)
1452 0 : }
1453 :
1454 : /// Do initial configuration of the already started Postgres.
1455 : #[instrument(skip_all)]
1456 : pub fn apply_config(&self, compute_state: &ComputeState) -> Result<()> {
1457 : let conf = self.get_tokio_conn_conf(Some("compute_ctl:apply_config"));
1458 :
1459 : let conf = Arc::new(conf);
1460 : let spec = Arc::new(
1461 : compute_state
1462 : .pspec
1463 : .as_ref()
1464 : .expect("spec must be set")
1465 : .spec
1466 : .clone(),
1467 : );
1468 :
1469 : let max_concurrent_connections = self.max_service_connections(compute_state, &spec);
1470 :
1471 : // Merge-apply spec & changes to PostgreSQL state.
1472 : self.apply_spec_sql(spec.clone(), conf.clone(), max_concurrent_connections)?;
1473 :
1474 : if let Some(local_proxy) = &spec.clone().local_proxy_config {
1475 : info!("configuring local_proxy");
1476 : local_proxy::configure(local_proxy).context("apply_config local_proxy")?;
1477 : }
1478 :
1479 : // Run migrations separately to not hold up cold starts
1480 0 : tokio::spawn(async move {
1481 0 : let mut conf = conf.as_ref().clone();
1482 0 : conf.application_name("compute_ctl:migrations");
1483 0 :
1484 0 : match conf.connect(NoTls).await {
1485 0 : Ok((mut client, connection)) => {
1486 0 : tokio::spawn(async move {
1487 0 : if let Err(e) = connection.await {
1488 0 : eprintln!("connection error: {}", e);
1489 0 : }
1490 0 : });
1491 0 : if let Err(e) = handle_migrations(&mut client).await {
1492 0 : error!("Failed to run migrations: {}", e);
1493 0 : }
1494 : }
1495 0 : Err(e) => {
1496 0 : error!(
1497 0 : "Failed to connect to the compute for running migrations: {}",
1498 : e
1499 : );
1500 : }
1501 : };
1502 0 : });
1503 :
1504 : Ok::<(), anyhow::Error>(())
1505 : }
1506 :
1507 : // Wrapped this around `pg_ctl reload`, but right now we don't use
1508 : // `pg_ctl` for start / stop.
1509 : #[instrument(skip_all)]
1510 : fn pg_reload_conf(&self) -> Result<()> {
1511 : let pgctl_bin = Path::new(&self.params.pgbin)
1512 : .parent()
1513 : .unwrap()
1514 : .join("pg_ctl");
1515 : Command::new(pgctl_bin)
1516 : .args(["reload", "-D", &self.params.pgdata])
1517 : .output()
1518 : .expect("cannot run pg_ctl process");
1519 : Ok(())
1520 : }
1521 :
1522 : /// Similar to `apply_config()`, but does a bit different sequence of operations,
1523 : /// as it's used to reconfigure a previously started and configured Postgres node.
1524 : #[instrument(skip_all)]
1525 : pub fn reconfigure(&self) -> Result<()> {
1526 : let spec = self.state.lock().unwrap().pspec.clone().unwrap().spec;
1527 :
1528 : if let Some(ref pgbouncer_settings) = spec.pgbouncer_settings {
1529 : info!("tuning pgbouncer");
1530 :
1531 : let pgbouncer_settings = pgbouncer_settings.clone();
1532 : let tls_config = self.compute_ctl_config.tls.clone();
1533 :
1534 : // Spawn a background task to do the tuning,
1535 : // so that we don't block the main thread that starts Postgres.
1536 0 : tokio::spawn(async move {
1537 0 : let res = tune_pgbouncer(pgbouncer_settings, tls_config).await;
1538 0 : if let Err(err) = res {
1539 0 : error!("error while tuning pgbouncer: {err:?}");
1540 0 : }
1541 0 : });
1542 : }
1543 :
1544 : if let Some(ref local_proxy) = spec.local_proxy_config {
1545 : info!("configuring local_proxy");
1546 :
1547 : // Spawn a background task to do the configuration,
1548 : // so that we don't block the main thread that starts Postgres.
1549 : let mut local_proxy = local_proxy.clone();
1550 : local_proxy.tls = self.compute_ctl_config.tls.clone();
1551 0 : tokio::spawn(async move {
1552 0 : if let Err(err) = local_proxy::configure(&local_proxy) {
1553 0 : error!("error while configuring local_proxy: {err:?}");
1554 0 : }
1555 0 : });
1556 : }
1557 :
1558 : // Reconfigure rsyslog for Postgres logs export
1559 : let conf = PostgresLogsRsyslogConfig::new(spec.logs_export_host.as_deref());
1560 : configure_postgres_logs_export(conf)?;
1561 :
1562 : // Write new config
1563 : let pgdata_path = Path::new(&self.params.pgdata);
1564 : config::write_postgres_conf(
1565 : pgdata_path,
1566 : &spec,
1567 : self.params.internal_http_port,
1568 : &self.compute_ctl_config.tls,
1569 : )?;
1570 :
1571 : if !spec.skip_pg_catalog_updates {
1572 : let max_concurrent_connections = spec.reconfigure_concurrency;
1573 : // Temporarily reset max_cluster_size in config
1574 : // to avoid the possibility of hitting the limit, while we are reconfiguring:
1575 : // creating new extensions, roles, etc.
1576 0 : config::with_compute_ctl_tmp_override(pgdata_path, "neon.max_cluster_size=-1", || {
1577 0 : self.pg_reload_conf()?;
1578 :
1579 0 : if spec.mode == ComputeMode::Primary {
1580 0 : let mut conf =
1581 0 : tokio_postgres::Config::from_str(self.params.connstr.as_str()).unwrap();
1582 0 : conf.application_name("apply_config");
1583 0 : let conf = Arc::new(conf);
1584 0 :
1585 0 : let spec = Arc::new(spec.clone());
1586 0 :
1587 0 : self.apply_spec_sql(spec, conf, max_concurrent_connections)?;
1588 0 : }
1589 :
1590 0 : Ok(())
1591 0 : })?;
1592 : }
1593 :
1594 : self.pg_reload_conf()?;
1595 :
1596 : let unknown_op = "unknown".to_string();
1597 : let op_id = spec.operation_uuid.as_ref().unwrap_or(&unknown_op);
1598 : info!(
1599 : "finished reconfiguration of compute node for operation {}",
1600 : op_id
1601 : );
1602 :
1603 : Ok(())
1604 : }
1605 :
1606 : #[instrument(skip_all)]
1607 : pub fn configure_as_primary(&self, compute_state: &ComputeState) -> Result<()> {
1608 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
1609 :
1610 : assert!(pspec.spec.mode == ComputeMode::Primary);
1611 : if !pspec.spec.skip_pg_catalog_updates {
1612 : let pgdata_path = Path::new(&self.params.pgdata);
1613 : // temporarily reset max_cluster_size in config
1614 : // to avoid the possibility of hitting the limit, while we are applying config:
1615 : // creating new extensions, roles, etc...
1616 0 : config::with_compute_ctl_tmp_override(pgdata_path, "neon.max_cluster_size=-1", || {
1617 0 : self.pg_reload_conf()?;
1618 :
1619 0 : self.apply_config(compute_state)?;
1620 :
1621 0 : Ok(())
1622 0 : })?;
1623 :
1624 : let postgresql_conf_path = pgdata_path.join("postgresql.conf");
1625 : if config::line_in_file(
1626 : &postgresql_conf_path,
1627 : "neon.disable_logical_replication_subscribers=false",
1628 : )? {
1629 : info!(
1630 : "updated postgresql.conf to set neon.disable_logical_replication_subscribers=false"
1631 : );
1632 : }
1633 : self.pg_reload_conf()?;
1634 : }
1635 : self.post_apply_config()?;
1636 :
1637 : Ok(())
1638 : }
1639 :
1640 0 : pub async fn watch_cert_for_changes(self: Arc<Self>) {
1641 : // update status on cert renewal
1642 0 : if let Some(tls_config) = &self.compute_ctl_config.tls {
1643 0 : let tls_config = tls_config.clone();
1644 :
1645 : // wait until the cert exists.
1646 0 : let mut cert_watch = watch_cert_for_changes(tls_config.cert_path.clone()).await;
1647 :
1648 0 : tokio::task::spawn_blocking(move || {
1649 0 : let handle = tokio::runtime::Handle::current();
1650 : 'cert_update: loop {
1651 : // let postgres/pgbouncer/local_proxy know the new cert/key exists.
1652 : // we need to wait until it's configurable first.
1653 :
1654 0 : let mut state = self.state.lock().unwrap();
1655 : 'status_update: loop {
1656 0 : match state.status {
1657 : // let's update the state to config pending
1658 : ComputeStatus::ConfigurationPending | ComputeStatus::Running => {
1659 0 : state.set_status(
1660 0 : ComputeStatus::ConfigurationPending,
1661 0 : &self.state_changed,
1662 0 : );
1663 0 : break 'status_update;
1664 : }
1665 :
1666 : // exit loop
1667 : ComputeStatus::Failed
1668 : | ComputeStatus::TerminationPending
1669 0 : | ComputeStatus::Terminated => break 'cert_update,
1670 :
1671 : // wait
1672 : ComputeStatus::Init
1673 : | ComputeStatus::Configuration
1674 0 : | ComputeStatus::Empty => {
1675 0 : state = self.state_changed.wait(state).unwrap();
1676 0 : }
1677 : }
1678 : }
1679 0 : drop(state);
1680 0 :
1681 0 : // wait for a new certificate update
1682 0 : if handle.block_on(cert_watch.changed()).is_err() {
1683 0 : break;
1684 0 : }
1685 : }
1686 0 : });
1687 0 : }
1688 0 : }
1689 :
1690 : /// Update the `last_active` in the shared state, but ensure that it's a more recent one.
1691 0 : pub fn update_last_active(&self, last_active: Option<DateTime<Utc>>) {
1692 0 : let mut state = self.state.lock().unwrap();
1693 0 : // NB: `Some(<DateTime>)` is always greater than `None`.
1694 0 : if last_active > state.last_active {
1695 0 : state.last_active = last_active;
1696 0 : debug!("set the last compute activity time to: {:?}", last_active);
1697 0 : }
1698 0 : }
1699 :
1700 : // Look for core dumps and collect backtraces.
1701 : //
1702 : // EKS worker nodes have following core dump settings:
1703 : // /proc/sys/kernel/core_pattern -> core
1704 : // /proc/sys/kernel/core_uses_pid -> 1
1705 : // ulimit -c -> unlimited
1706 : // which results in core dumps being written to postgres data directory as core.<pid>.
1707 : //
1708 : // Use that as a default location and pattern, except macos where core dumps are written
1709 : // to /cores/ directory by default.
1710 : //
1711 : // With default Linux settings, the core dump file is called just "core", so check for
1712 : // that too.
1713 0 : pub fn check_for_core_dumps(&self) -> Result<()> {
1714 0 : let core_dump_dir = match std::env::consts::OS {
1715 0 : "macos" => Path::new("/cores/"),
1716 0 : _ => Path::new(&self.params.pgdata),
1717 : };
1718 :
1719 : // Collect core dump paths if any
1720 0 : info!("checking for core dumps in {}", core_dump_dir.display());
1721 0 : let files = fs::read_dir(core_dump_dir)?;
1722 0 : let cores = files.filter_map(|entry| {
1723 0 : let entry = entry.ok()?;
1724 :
1725 0 : let is_core_dump = match entry.file_name().to_str()? {
1726 0 : n if n.starts_with("core.") => true,
1727 0 : "core" => true,
1728 0 : _ => false,
1729 : };
1730 0 : if is_core_dump {
1731 0 : Some(entry.path())
1732 : } else {
1733 0 : None
1734 : }
1735 0 : });
1736 :
1737 : // Print backtrace for each core dump
1738 0 : for core_path in cores {
1739 0 : warn!(
1740 0 : "core dump found: {}, collecting backtrace",
1741 0 : core_path.display()
1742 : );
1743 :
1744 : // Try first with gdb
1745 0 : let backtrace = Command::new("gdb")
1746 0 : .args(["--batch", "-q", "-ex", "bt", &self.params.pgbin])
1747 0 : .arg(&core_path)
1748 0 : .output();
1749 :
1750 : // Try lldb if no gdb is found -- that is handy for local testing on macOS
1751 0 : let backtrace = match backtrace {
1752 0 : Err(ref e) if e.kind() == std::io::ErrorKind::NotFound => {
1753 0 : warn!("cannot find gdb, trying lldb");
1754 0 : Command::new("lldb")
1755 0 : .arg("-c")
1756 0 : .arg(&core_path)
1757 0 : .args(["--batch", "-o", "bt all", "-o", "quit"])
1758 0 : .output()
1759 : }
1760 0 : _ => backtrace,
1761 0 : }?;
1762 :
1763 0 : warn!(
1764 0 : "core dump backtrace: {}",
1765 0 : String::from_utf8_lossy(&backtrace.stdout)
1766 : );
1767 0 : warn!(
1768 0 : "debugger stderr: {}",
1769 0 : String::from_utf8_lossy(&backtrace.stderr)
1770 : );
1771 : }
1772 :
1773 0 : Ok(())
1774 0 : }
1775 :
1776 : /// Select `pg_stat_statements` data and return it as a stringified JSON
1777 0 : pub async fn collect_insights(&self) -> String {
1778 0 : let mut result_rows: Vec<String> = Vec::new();
1779 0 : let conf = self.get_tokio_conn_conf(Some("compute_ctl:collect_insights"));
1780 0 : let connect_result = conf.connect(NoTls).await;
1781 0 : let (client, connection) = connect_result.unwrap();
1782 0 : tokio::spawn(async move {
1783 0 : if let Err(e) = connection.await {
1784 0 : eprintln!("connection error: {}", e);
1785 0 : }
1786 0 : });
1787 0 : let result = client
1788 0 : .simple_query(
1789 0 : "SELECT
1790 0 : row_to_json(pg_stat_statements)
1791 0 : FROM
1792 0 : pg_stat_statements
1793 0 : WHERE
1794 0 : userid != 'cloud_admin'::regrole::oid
1795 0 : ORDER BY
1796 0 : (mean_exec_time + mean_plan_time) DESC
1797 0 : LIMIT 100",
1798 0 : )
1799 0 : .await;
1800 :
1801 0 : if let Ok(raw_rows) = result {
1802 0 : for message in raw_rows.iter() {
1803 0 : if let postgres::SimpleQueryMessage::Row(row) = message {
1804 0 : if let Some(json) = row.get(0) {
1805 0 : result_rows.push(json.to_string());
1806 0 : }
1807 0 : }
1808 : }
1809 :
1810 0 : format!("{{\"pg_stat_statements\": [{}]}}", result_rows.join(","))
1811 : } else {
1812 0 : "{{\"pg_stat_statements\": []}}".to_string()
1813 : }
1814 0 : }
1815 :
1816 : // download an archive, unzip and place files in correct locations
1817 0 : pub async fn download_extension(
1818 0 : &self,
1819 0 : real_ext_name: String,
1820 0 : ext_path: RemotePath,
1821 0 : ) -> Result<u64, DownloadError> {
1822 0 : let ext_remote_storage =
1823 0 : self.params
1824 0 : .ext_remote_storage
1825 0 : .as_ref()
1826 0 : .ok_or(DownloadError::BadInput(anyhow::anyhow!(
1827 0 : "Remote extensions storage is not configured",
1828 0 : )))?;
1829 :
1830 0 : let ext_archive_name = ext_path.object_name().expect("bad path");
1831 0 :
1832 0 : let mut first_try = false;
1833 0 : if !self
1834 0 : .ext_download_progress
1835 0 : .read()
1836 0 : .expect("lock err")
1837 0 : .contains_key(ext_archive_name)
1838 0 : {
1839 0 : self.ext_download_progress
1840 0 : .write()
1841 0 : .expect("lock err")
1842 0 : .insert(ext_archive_name.to_string(), (Utc::now(), false));
1843 0 : first_try = true;
1844 0 : }
1845 0 : let (download_start, download_completed) =
1846 0 : self.ext_download_progress.read().expect("lock err")[ext_archive_name];
1847 0 : let start_time_delta = Utc::now()
1848 0 : .signed_duration_since(download_start)
1849 0 : .to_std()
1850 0 : .unwrap()
1851 0 : .as_millis() as u64;
1852 :
1853 : // how long to wait for extension download if it was started by another process
1854 : const HANG_TIMEOUT: u64 = 3000; // milliseconds
1855 :
1856 0 : if download_completed {
1857 0 : info!("extension already downloaded, skipping re-download");
1858 0 : return Ok(0);
1859 0 : } else if start_time_delta < HANG_TIMEOUT && !first_try {
1860 0 : info!(
1861 0 : "download {ext_archive_name} already started by another process, hanging untill completion or timeout"
1862 : );
1863 0 : let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(500));
1864 : loop {
1865 0 : info!("waiting for download");
1866 0 : interval.tick().await;
1867 0 : let (_, download_completed_now) =
1868 0 : self.ext_download_progress.read().expect("lock")[ext_archive_name];
1869 0 : if download_completed_now {
1870 0 : info!("download finished by whoever else downloaded it");
1871 0 : return Ok(0);
1872 0 : }
1873 : }
1874 : // NOTE: the above loop will get terminated
1875 : // based on the timeout of the download function
1876 0 : }
1877 0 :
1878 0 : // if extension hasn't been downloaded before or the previous
1879 0 : // attempt to download was at least HANG_TIMEOUT ms ago
1880 0 : // then we try to download it here
1881 0 : info!("downloading new extension {ext_archive_name}");
1882 :
1883 0 : let download_size = extension_server::download_extension(
1884 0 : &real_ext_name,
1885 0 : &ext_path,
1886 0 : ext_remote_storage,
1887 0 : &self.params.pgbin,
1888 0 : )
1889 0 : .await
1890 0 : .map_err(DownloadError::Other);
1891 0 :
1892 0 : if download_size.is_ok() {
1893 0 : self.ext_download_progress
1894 0 : .write()
1895 0 : .expect("bad lock")
1896 0 : .insert(ext_archive_name.to_string(), (download_start, true));
1897 0 : }
1898 :
1899 0 : download_size
1900 0 : }
1901 :
1902 0 : pub async fn set_role_grants(
1903 0 : &self,
1904 0 : db_name: &PgIdent,
1905 0 : schema_name: &PgIdent,
1906 0 : privileges: &[Privilege],
1907 0 : role_name: &PgIdent,
1908 0 : ) -> Result<()> {
1909 : use tokio_postgres::NoTls;
1910 :
1911 0 : let mut conf = self.get_tokio_conn_conf(Some("compute_ctl:set_role_grants"));
1912 0 : conf.dbname(db_name);
1913 :
1914 0 : let (db_client, conn) = conf
1915 0 : .connect(NoTls)
1916 0 : .await
1917 0 : .context("Failed to connect to the database")?;
1918 0 : tokio::spawn(conn);
1919 0 :
1920 0 : // TODO: support other types of grants apart from schemas?
1921 0 : let query = format!(
1922 0 : "GRANT {} ON SCHEMA {} TO {}",
1923 0 : privileges
1924 0 : .iter()
1925 0 : // should not be quoted as it's part of the command.
1926 0 : // is already sanitized so it's ok
1927 0 : .map(|p| p.as_str())
1928 0 : .collect::<Vec<&'static str>>()
1929 0 : .join(", "),
1930 0 : // quote the schema and role name as identifiers to sanitize them.
1931 0 : schema_name.pg_quote(),
1932 0 : role_name.pg_quote(),
1933 0 : );
1934 0 : db_client
1935 0 : .simple_query(&query)
1936 0 : .await
1937 0 : .with_context(|| format!("Failed to execute query: {}", query))?;
1938 :
1939 0 : Ok(())
1940 0 : }
1941 :
1942 0 : pub async fn install_extension(
1943 0 : &self,
1944 0 : ext_name: &PgIdent,
1945 0 : db_name: &PgIdent,
1946 0 : ext_version: ExtVersion,
1947 0 : ) -> Result<ExtVersion> {
1948 : use tokio_postgres::NoTls;
1949 :
1950 0 : let mut conf = self.get_tokio_conn_conf(Some("compute_ctl:install_extension"));
1951 0 : conf.dbname(db_name);
1952 :
1953 0 : let (db_client, conn) = conf
1954 0 : .connect(NoTls)
1955 0 : .await
1956 0 : .context("Failed to connect to the database")?;
1957 0 : tokio::spawn(conn);
1958 0 :
1959 0 : let version_query = "SELECT extversion FROM pg_extension WHERE extname = $1";
1960 0 : let version: Option<ExtVersion> = db_client
1961 0 : .query_opt(version_query, &[&ext_name])
1962 0 : .await
1963 0 : .with_context(|| format!("Failed to execute query: {}", version_query))?
1964 0 : .map(|row| row.get(0));
1965 0 :
1966 0 : // sanitize the inputs as postgres idents.
1967 0 : let ext_name: String = ext_name.pg_quote();
1968 0 : let quoted_version: String = ext_version.pg_quote();
1969 :
1970 0 : if let Some(installed_version) = version {
1971 0 : if installed_version == ext_version {
1972 0 : return Ok(installed_version);
1973 0 : }
1974 0 : let query = format!("ALTER EXTENSION {ext_name} UPDATE TO {quoted_version}");
1975 0 : db_client
1976 0 : .simple_query(&query)
1977 0 : .await
1978 0 : .with_context(|| format!("Failed to execute query: {}", query))?;
1979 : } else {
1980 0 : let query =
1981 0 : format!("CREATE EXTENSION IF NOT EXISTS {ext_name} WITH VERSION {quoted_version}");
1982 0 : db_client
1983 0 : .simple_query(&query)
1984 0 : .await
1985 0 : .with_context(|| format!("Failed to execute query: {}", query))?;
1986 : }
1987 :
1988 0 : Ok(ext_version)
1989 0 : }
1990 :
1991 0 : pub async fn prepare_preload_libraries(
1992 0 : &self,
1993 0 : spec: &ComputeSpec,
1994 0 : ) -> Result<RemoteExtensionMetrics> {
1995 0 : if self.params.ext_remote_storage.is_none() {
1996 0 : return Ok(RemoteExtensionMetrics {
1997 0 : num_ext_downloaded: 0,
1998 0 : largest_ext_size: 0,
1999 0 : total_ext_download_size: 0,
2000 0 : });
2001 0 : }
2002 0 : let remote_extensions = spec
2003 0 : .remote_extensions
2004 0 : .as_ref()
2005 0 : .ok_or(anyhow::anyhow!("Remote extensions are not configured"))?;
2006 :
2007 0 : info!("parse shared_preload_libraries from spec.cluster.settings");
2008 0 : let mut libs_vec = Vec::new();
2009 0 : if let Some(libs) = spec.cluster.settings.find("shared_preload_libraries") {
2010 0 : libs_vec = libs
2011 0 : .split(&[',', '\'', ' '])
2012 0 : .filter(|s| *s != "neon" && !s.is_empty())
2013 0 : .map(str::to_string)
2014 0 : .collect();
2015 0 : }
2016 0 : info!("parse shared_preload_libraries from provided postgresql.conf");
2017 :
2018 : // that is used in neon_local and python tests
2019 0 : if let Some(conf) = &spec.cluster.postgresql_conf {
2020 0 : let conf_lines = conf.split('\n').collect::<Vec<&str>>();
2021 0 : let mut shared_preload_libraries_line = "";
2022 0 : for line in conf_lines {
2023 0 : if line.starts_with("shared_preload_libraries") {
2024 0 : shared_preload_libraries_line = line;
2025 0 : }
2026 : }
2027 0 : let mut preload_libs_vec = Vec::new();
2028 0 : if let Some(libs) = shared_preload_libraries_line.split("='").nth(1) {
2029 0 : preload_libs_vec = libs
2030 0 : .split(&[',', '\'', ' '])
2031 0 : .filter(|s| *s != "neon" && !s.is_empty())
2032 0 : .map(str::to_string)
2033 0 : .collect();
2034 0 : }
2035 0 : libs_vec.extend(preload_libs_vec);
2036 0 : }
2037 :
2038 : // Don't try to download libraries that are not in the index.
2039 : // Assume that they are already present locally.
2040 0 : libs_vec.retain(|lib| remote_extensions.library_index.contains_key(lib));
2041 0 :
2042 0 : info!("Downloading to shared preload libraries: {:?}", &libs_vec);
2043 :
2044 0 : let mut download_tasks = Vec::new();
2045 0 : for library in &libs_vec {
2046 0 : let (ext_name, ext_path) =
2047 0 : remote_extensions.get_ext(library, true, &BUILD_TAG, &self.params.pgversion)?;
2048 0 : download_tasks.push(self.download_extension(ext_name, ext_path));
2049 : }
2050 0 : let results = join_all(download_tasks).await;
2051 :
2052 0 : let mut remote_ext_metrics = RemoteExtensionMetrics {
2053 0 : num_ext_downloaded: 0,
2054 0 : largest_ext_size: 0,
2055 0 : total_ext_download_size: 0,
2056 0 : };
2057 0 : for result in results {
2058 0 : let download_size = match result {
2059 0 : Ok(res) => {
2060 0 : remote_ext_metrics.num_ext_downloaded += 1;
2061 0 : res
2062 : }
2063 0 : Err(err) => {
2064 0 : // if we failed to download an extension, we don't want to fail the whole
2065 0 : // process, but we do want to log the error
2066 0 : error!("Failed to download extension: {}", err);
2067 0 : 0
2068 : }
2069 : };
2070 :
2071 0 : remote_ext_metrics.largest_ext_size =
2072 0 : std::cmp::max(remote_ext_metrics.largest_ext_size, download_size);
2073 0 : remote_ext_metrics.total_ext_download_size += download_size;
2074 : }
2075 0 : Ok(remote_ext_metrics)
2076 0 : }
2077 :
2078 : /// Waits until current thread receives a state changed notification and
2079 : /// the pageserver connection strings has changed.
2080 : ///
2081 : /// The operation will time out after a specified duration.
2082 0 : pub fn wait_timeout_while_pageserver_connstr_unchanged(&self, duration: Duration) {
2083 0 : let state = self.state.lock().unwrap();
2084 0 : let old_pageserver_connstr = state
2085 0 : .pspec
2086 0 : .as_ref()
2087 0 : .expect("spec must be set")
2088 0 : .pageserver_connstr
2089 0 : .clone();
2090 0 : let mut unchanged = true;
2091 0 : let _ = self
2092 0 : .state_changed
2093 0 : .wait_timeout_while(state, duration, |s| {
2094 0 : let pageserver_connstr = &s
2095 0 : .pspec
2096 0 : .as_ref()
2097 0 : .expect("spec must be set")
2098 0 : .pageserver_connstr;
2099 0 : unchanged = pageserver_connstr == &old_pageserver_connstr;
2100 0 : unchanged
2101 0 : })
2102 0 : .unwrap();
2103 0 : if !unchanged {
2104 0 : info!("Pageserver config changed");
2105 0 : }
2106 0 : }
2107 : }
2108 :
2109 0 : pub fn forward_termination_signal() {
2110 0 : let ss_pid = SYNC_SAFEKEEPERS_PID.load(Ordering::SeqCst);
2111 0 : if ss_pid != 0 {
2112 0 : let ss_pid = nix::unistd::Pid::from_raw(ss_pid as i32);
2113 0 : kill(ss_pid, Signal::SIGTERM).ok();
2114 0 : }
2115 0 : let pg_pid = PG_PID.load(Ordering::SeqCst);
2116 0 : if pg_pid != 0 {
2117 0 : let pg_pid = nix::unistd::Pid::from_raw(pg_pid as i32);
2118 0 : // Use 'fast' shutdown (SIGINT) because it also creates a shutdown checkpoint, which is important for
2119 0 : // ROs to get a list of running xacts faster instead of going through the CLOG.
2120 0 : // See https://www.postgresql.org/docs/current/server-shutdown.html for the list of modes and signals.
2121 0 : kill(pg_pid, Signal::SIGINT).ok();
2122 0 : }
2123 0 : }
2124 :
2125 : // helper trait to call JoinSet::spawn_blocking(f), but propagates the current
2126 : // tracing span to the thread.
2127 : trait JoinSetExt<T> {
2128 : fn spawn_blocking_child<F>(&mut self, f: F) -> tokio::task::AbortHandle
2129 : where
2130 : F: FnOnce() -> T + Send + 'static,
2131 : T: Send;
2132 : }
2133 :
2134 : impl<T: 'static> JoinSetExt<T> for tokio::task::JoinSet<T> {
2135 0 : fn spawn_blocking_child<F>(&mut self, f: F) -> tokio::task::AbortHandle
2136 0 : where
2137 0 : F: FnOnce() -> T + Send + 'static,
2138 0 : T: Send,
2139 0 : {
2140 0 : let sp = tracing::Span::current();
2141 0 : self.spawn_blocking(move || {
2142 0 : let _e = sp.enter();
2143 0 : f()
2144 0 : })
2145 0 : }
2146 : }
|