Line data Source code
1 : use std::collections::HashMap;
2 : use std::os::unix::fs::{PermissionsExt, symlink};
3 : use std::path::Path;
4 : use std::process::{Command, Stdio};
5 : use std::str::FromStr;
6 : use std::sync::atomic::{AtomicU32, Ordering};
7 : use std::sync::{Arc, Condvar, Mutex, RwLock};
8 : use std::time::{Duration, Instant};
9 : use std::{env, fs};
10 :
11 : use anyhow::{Context, Result};
12 : use chrono::{DateTime, Utc};
13 : use compute_api::privilege::Privilege;
14 : use compute_api::responses::{ComputeMetrics, ComputeStatus};
15 : use compute_api::spec::{ComputeFeature, ComputeMode, ComputeSpec, ExtVersion, PgIdent};
16 : use futures::StreamExt;
17 : use futures::future::join_all;
18 : use futures::stream::FuturesUnordered;
19 : use nix::sys::signal::{Signal, kill};
20 : use nix::unistd::Pid;
21 : use postgres;
22 : use postgres::NoTls;
23 : use postgres::error::SqlState;
24 : use remote_storage::{DownloadError, RemotePath};
25 : use tokio::spawn;
26 : use tracing::{Instrument, debug, error, info, instrument, warn};
27 : use utils::id::{TenantId, TimelineId};
28 : use utils::lsn::Lsn;
29 : use utils::measured_stream::MeasuredReader;
30 :
31 : use crate::configurator::launch_configurator;
32 : use crate::disk_quota::set_disk_quota;
33 : use crate::installed_extensions::get_installed_extensions;
34 : use crate::logger::startup_context_from_env;
35 : use crate::lsn_lease::launch_lsn_lease_bg_task_for_static;
36 : use crate::monitor::launch_monitor;
37 : use crate::pg_helpers::*;
38 : use crate::spec::*;
39 : use crate::swap::resize_swap;
40 : use crate::sync_sk::{check_if_synced, ping_safekeeper};
41 : use crate::{config, extension_server, local_proxy};
42 :
43 : pub static SYNC_SAFEKEEPERS_PID: AtomicU32 = AtomicU32::new(0);
44 : pub static PG_PID: AtomicU32 = AtomicU32::new(0);
45 :
46 : /// Static configuration params that don't change after startup. These mostly
47 : /// come from the CLI args, or are derived from them.
48 : pub struct ComputeNodeParams {
49 : /// The ID of the compute
50 : pub compute_id: String,
51 : // Url type maintains proper escaping
52 : pub connstr: url::Url,
53 :
54 : pub resize_swap_on_bind: bool,
55 : pub set_disk_quota_for_fs: Option<String>,
56 :
57 : // VM monitor parameters
58 : #[cfg(target_os = "linux")]
59 : pub filecache_connstr: String,
60 : #[cfg(target_os = "linux")]
61 : pub cgroup: String,
62 : #[cfg(target_os = "linux")]
63 : pub vm_monitor_addr: String,
64 :
65 : pub pgdata: String,
66 : pub pgbin: String,
67 : pub pgversion: String,
68 : pub build_tag: String,
69 :
70 : /// The port that the compute's external HTTP server listens on
71 : pub external_http_port: u16,
72 : /// The port that the compute's internal HTTP server listens on
73 : pub internal_http_port: u16,
74 :
75 : /// the address of extension storage proxy gateway
76 : pub ext_remote_storage: Option<String>,
77 :
78 : /// We should only allow live re- / configuration of the compute node if
79 : /// it uses 'pull model', i.e. it can go to control-plane and fetch
80 : /// the latest configuration. Otherwise, there could be a case:
81 : /// - we start compute with some spec provided as argument
82 : /// - we push new spec and it does reconfiguration
83 : /// - but then something happens and compute pod / VM is destroyed,
84 : /// so k8s controller starts it again with the **old** spec
85 : ///
86 : /// and the same for empty computes:
87 : /// - we started compute without any spec
88 : /// - we push spec and it does configuration
89 : /// - but then it is restarted without any spec again
90 : pub live_config_allowed: bool,
91 : }
92 :
93 : /// Compute node info shared across several `compute_ctl` threads.
94 : pub struct ComputeNode {
95 : pub params: ComputeNodeParams,
96 :
97 : // We connect to Postgres from many different places, so build configs once
98 : // and reuse them where needed. These are derived from 'params.connstr'
99 : pub conn_conf: postgres::config::Config,
100 : pub tokio_conn_conf: tokio_postgres::config::Config,
101 :
102 : /// Volatile part of the `ComputeNode`, which should be used under `Mutex`.
103 : /// To allow HTTP API server to serving status requests, while configuration
104 : /// is in progress, lock should be held only for short periods of time to do
105 : /// read/write, not the whole configuration process.
106 : pub state: Mutex<ComputeState>,
107 : /// `Condvar` to allow notifying waiters about state changes.
108 : pub state_changed: Condvar,
109 :
110 : // key: ext_archive_name, value: started download time, download_completed?
111 : pub ext_download_progress: RwLock<HashMap<String, (DateTime<Utc>, bool)>>,
112 : }
113 :
114 : // store some metrics about download size that might impact startup time
115 : #[derive(Clone, Debug)]
116 : pub struct RemoteExtensionMetrics {
117 : num_ext_downloaded: u64,
118 : largest_ext_size: u64,
119 : total_ext_download_size: u64,
120 : }
121 :
122 : #[derive(Clone, Debug)]
123 : pub struct ComputeState {
124 : pub start_time: DateTime<Utc>,
125 : pub status: ComputeStatus,
126 : /// Timestamp of the last Postgres activity. It could be `None` if
127 : /// compute wasn't used since start.
128 : pub last_active: Option<DateTime<Utc>>,
129 : pub error: Option<String>,
130 :
131 : /// Compute spec. This can be received from the CLI or - more likely -
132 : /// passed by the control plane with a /configure HTTP request.
133 : pub pspec: Option<ParsedSpec>,
134 :
135 : /// If the spec is passed by a /configure request, 'startup_span' is the
136 : /// /configure request's tracing span. The main thread enters it when it
137 : /// processes the compute startup, so that the compute startup is considered
138 : /// to be part of the /configure request for tracing purposes.
139 : ///
140 : /// If the request handling thread/task called startup_compute() directly,
141 : /// it would automatically be a child of the request handling span, and we
142 : /// wouldn't need this. But because we use the main thread to perform the
143 : /// startup, and the /configure task just waits for it to finish, we need to
144 : /// set up the span relationship ourselves.
145 : pub startup_span: Option<tracing::span::Span>,
146 :
147 : pub metrics: ComputeMetrics,
148 : }
149 :
150 : impl ComputeState {
151 0 : pub fn new() -> Self {
152 0 : Self {
153 0 : start_time: Utc::now(),
154 0 : status: ComputeStatus::Empty,
155 0 : last_active: None,
156 0 : error: None,
157 0 : pspec: None,
158 0 : startup_span: None,
159 0 : metrics: ComputeMetrics::default(),
160 0 : }
161 0 : }
162 :
163 0 : pub fn set_status(&mut self, status: ComputeStatus, state_changed: &Condvar) {
164 0 : let prev = self.status;
165 0 : info!("Changing compute status from {} to {}", prev, status);
166 0 : self.status = status;
167 0 : state_changed.notify_all();
168 0 : }
169 :
170 0 : pub fn set_failed_status(&mut self, err: anyhow::Error, state_changed: &Condvar) {
171 0 : self.error = Some(format!("{err:?}"));
172 0 : self.set_status(ComputeStatus::Failed, state_changed);
173 0 : }
174 : }
175 :
176 : impl Default for ComputeState {
177 0 : fn default() -> Self {
178 0 : Self::new()
179 0 : }
180 : }
181 :
182 : #[derive(Clone, Debug)]
183 : pub struct ParsedSpec {
184 : pub spec: ComputeSpec,
185 : pub tenant_id: TenantId,
186 : pub timeline_id: TimelineId,
187 : pub pageserver_connstr: String,
188 : pub safekeeper_connstrings: Vec<String>,
189 : pub storage_auth_token: Option<String>,
190 : }
191 :
192 : impl TryFrom<ComputeSpec> for ParsedSpec {
193 : type Error = String;
194 0 : fn try_from(spec: ComputeSpec) -> Result<Self, String> {
195 : // Extract the options from the spec file that are needed to connect to
196 : // the storage system.
197 : //
198 : // For backwards-compatibility, the top-level fields in the spec file
199 : // may be empty. In that case, we need to dig them from the GUCs in the
200 : // cluster.settings field.
201 0 : let pageserver_connstr = spec
202 0 : .pageserver_connstring
203 0 : .clone()
204 0 : .or_else(|| spec.cluster.settings.find("neon.pageserver_connstring"))
205 0 : .ok_or("pageserver connstr should be provided")?;
206 0 : let safekeeper_connstrings = if spec.safekeeper_connstrings.is_empty() {
207 0 : if matches!(spec.mode, ComputeMode::Primary) {
208 0 : spec.cluster
209 0 : .settings
210 0 : .find("neon.safekeepers")
211 0 : .ok_or("safekeeper connstrings should be provided")?
212 0 : .split(',')
213 0 : .map(|str| str.to_string())
214 0 : .collect()
215 : } else {
216 0 : vec![]
217 : }
218 : } else {
219 0 : spec.safekeeper_connstrings.clone()
220 : };
221 0 : let storage_auth_token = spec.storage_auth_token.clone();
222 0 : let tenant_id: TenantId = if let Some(tenant_id) = spec.tenant_id {
223 0 : tenant_id
224 : } else {
225 0 : spec.cluster
226 0 : .settings
227 0 : .find("neon.tenant_id")
228 0 : .ok_or("tenant id should be provided")
229 0 : .map(|s| TenantId::from_str(&s))?
230 0 : .or(Err("invalid tenant id"))?
231 : };
232 0 : let timeline_id: TimelineId = if let Some(timeline_id) = spec.timeline_id {
233 0 : timeline_id
234 : } else {
235 0 : spec.cluster
236 0 : .settings
237 0 : .find("neon.timeline_id")
238 0 : .ok_or("timeline id should be provided")
239 0 : .map(|s| TimelineId::from_str(&s))?
240 0 : .or(Err("invalid timeline id"))?
241 : };
242 :
243 0 : Ok(ParsedSpec {
244 0 : spec,
245 0 : pageserver_connstr,
246 0 : safekeeper_connstrings,
247 0 : storage_auth_token,
248 0 : tenant_id,
249 0 : timeline_id,
250 0 : })
251 0 : }
252 : }
253 :
254 : /// If we are a VM, returns a [`Command`] that will run in the `neon-postgres`
255 : /// cgroup. Otherwise returns the default `Command::new(cmd)`
256 : ///
257 : /// This function should be used to start postgres, as it will start it in the
258 : /// neon-postgres cgroup if we are a VM. This allows autoscaling to control
259 : /// postgres' resource usage. The cgroup will exist in VMs because vm-builder
260 : /// creates it during the sysinit phase of its inittab.
261 0 : fn maybe_cgexec(cmd: &str) -> Command {
262 0 : // The cplane sets this env var for autoscaling computes.
263 0 : // use `var_os` so we don't have to worry about the variable being valid
264 0 : // unicode. Should never be an concern . . . but just in case
265 0 : if env::var_os("AUTOSCALING").is_some() {
266 0 : let mut command = Command::new("cgexec");
267 0 : command.args(["-g", "memory:neon-postgres"]);
268 0 : command.arg(cmd);
269 0 : command
270 : } else {
271 0 : Command::new(cmd)
272 : }
273 0 : }
274 :
275 : struct PostgresHandle {
276 : postgres: std::process::Child,
277 : log_collector: tokio::task::JoinHandle<Result<()>>,
278 : }
279 :
280 : impl PostgresHandle {
281 : /// Return PID of the postgres (postmaster) process
282 0 : fn pid(&self) -> Pid {
283 0 : Pid::from_raw(self.postgres.id() as i32)
284 0 : }
285 : }
286 :
287 : struct StartVmMonitorResult {
288 : #[cfg(target_os = "linux")]
289 : token: tokio_util::sync::CancellationToken,
290 : #[cfg(target_os = "linux")]
291 : vm_monitor: Option<tokio::task::JoinHandle<Result<()>>>,
292 : }
293 :
294 0 : pub(crate) fn construct_superuser_query(spec: &ComputeSpec) -> String {
295 0 : let roles = spec
296 0 : .cluster
297 0 : .roles
298 0 : .iter()
299 0 : .map(|r| escape_literal(&r.name))
300 0 : .collect::<Vec<_>>();
301 0 :
302 0 : let dbs = spec
303 0 : .cluster
304 0 : .databases
305 0 : .iter()
306 0 : .map(|db| escape_literal(&db.name))
307 0 : .collect::<Vec<_>>();
308 :
309 0 : let roles_decl = if roles.is_empty() {
310 0 : String::from("roles text[] := NULL;")
311 : } else {
312 0 : format!(
313 0 : r#"
314 0 : roles text[] := ARRAY(SELECT rolname
315 0 : FROM pg_catalog.pg_roles
316 0 : WHERE rolname IN ({}));"#,
317 0 : roles.join(", ")
318 0 : )
319 : };
320 :
321 0 : let database_decl = if dbs.is_empty() {
322 0 : String::from("dbs text[] := NULL;")
323 : } else {
324 0 : format!(
325 0 : r#"
326 0 : dbs text[] := ARRAY(SELECT datname
327 0 : FROM pg_catalog.pg_database
328 0 : WHERE datname IN ({}));"#,
329 0 : dbs.join(", ")
330 0 : )
331 : };
332 :
333 : // ALL PRIVILEGES grants CREATE, CONNECT, and TEMPORARY on all databases
334 : // (see https://www.postgresql.org/docs/current/ddl-priv.html)
335 0 : let query = format!(
336 0 : r#"
337 0 : DO $$
338 0 : DECLARE
339 0 : r text;
340 0 : {}
341 0 : {}
342 0 : BEGIN
343 0 : IF NOT EXISTS (
344 0 : SELECT FROM pg_catalog.pg_roles WHERE rolname = 'neon_superuser')
345 0 : THEN
346 0 : CREATE ROLE neon_superuser CREATEDB CREATEROLE NOLOGIN REPLICATION BYPASSRLS IN ROLE pg_read_all_data, pg_write_all_data;
347 0 : IF array_length(roles, 1) IS NOT NULL THEN
348 0 : EXECUTE format('GRANT neon_superuser TO %s',
349 0 : array_to_string(ARRAY(SELECT quote_ident(x) FROM unnest(roles) as x), ', '));
350 0 : FOREACH r IN ARRAY roles LOOP
351 0 : EXECUTE format('ALTER ROLE %s CREATEROLE CREATEDB', quote_ident(r));
352 0 : END LOOP;
353 0 : END IF;
354 0 : IF array_length(dbs, 1) IS NOT NULL THEN
355 0 : EXECUTE format('GRANT ALL PRIVILEGES ON DATABASE %s TO neon_superuser',
356 0 : array_to_string(ARRAY(SELECT quote_ident(x) FROM unnest(dbs) as x), ', '));
357 0 : END IF;
358 0 : END IF;
359 0 : END
360 0 : $$;"#,
361 0 : roles_decl, database_decl,
362 0 : );
363 0 :
364 0 : query
365 0 : }
366 :
367 : impl ComputeNode {
368 0 : pub fn new(params: ComputeNodeParams, cli_spec: Option<ComputeSpec>) -> Result<Self> {
369 0 : let connstr = params.connstr.as_str();
370 0 : let conn_conf = postgres::config::Config::from_str(connstr)
371 0 : .context("cannot build postgres config from connstr")?;
372 0 : let tokio_conn_conf = tokio_postgres::config::Config::from_str(connstr)
373 0 : .context("cannot build tokio postgres config from connstr")?;
374 :
375 0 : let mut new_state = ComputeState::new();
376 0 : if let Some(cli_spec) = cli_spec {
377 0 : let pspec = ParsedSpec::try_from(cli_spec).map_err(|msg| anyhow::anyhow!(msg))?;
378 0 : new_state.pspec = Some(pspec);
379 0 : }
380 :
381 0 : Ok(ComputeNode {
382 0 : params,
383 0 : conn_conf,
384 0 : tokio_conn_conf,
385 0 : state: Mutex::new(new_state),
386 0 : state_changed: Condvar::new(),
387 0 : ext_download_progress: RwLock::new(HashMap::new()),
388 0 : })
389 0 : }
390 :
391 : /// Top-level control flow of compute_ctl. Returns a process exit code we should
392 : /// exit with.
393 0 : pub fn run(self) -> Result<Option<i32>> {
394 0 : let this = Arc::new(self);
395 0 :
396 0 : let cli_spec = this.state.lock().unwrap().pspec.clone();
397 0 :
398 0 : // If this is a pooled VM, prewarm before starting HTTP server and becoming
399 0 : // available for binding. Prewarming helps Postgres start quicker later,
400 0 : // because QEMU will already have its memory allocated from the host, and
401 0 : // the necessary binaries will already be cached.
402 0 : if cli_spec.is_none() {
403 0 : this.prewarm_postgres()?;
404 0 : }
405 :
406 : // Launch the external HTTP server first, so that we can serve control plane
407 : // requests while configuration is still in progress.
408 0 : crate::http::server::Server::External(this.params.external_http_port).launch(&this);
409 0 :
410 0 : // The internal HTTP server could be launched later, but there isn't much
411 0 : // sense in waiting.
412 0 : crate::http::server::Server::Internal(this.params.internal_http_port).launch(&this);
413 :
414 : // If we got a spec from the CLI already, use that. Otherwise wait for the
415 : // control plane to pass it to us with a /configure HTTP request
416 0 : let pspec = if let Some(cli_spec) = cli_spec {
417 0 : cli_spec
418 : } else {
419 0 : this.wait_spec()?
420 : };
421 :
422 0 : launch_lsn_lease_bg_task_for_static(&this);
423 0 :
424 0 : // We have a spec, start the compute
425 0 : let mut delay_exit = false;
426 0 : let mut vm_monitor = None;
427 0 : let mut pg_process: Option<PostgresHandle> = None;
428 0 :
429 0 : match this.start_compute(&mut pg_process) {
430 0 : Ok(()) => {
431 0 : // Success! Launch remaining services (just vm-monitor currently)
432 0 : vm_monitor =
433 0 : Some(this.start_vm_monitor(pspec.spec.disable_lfc_resizing.unwrap_or(false)));
434 0 : }
435 0 : Err(err) => {
436 0 : // Something went wrong with the startup. Log it and expose the error to
437 0 : // HTTP status requests.
438 0 : error!("could not start the compute node: {:#}", err);
439 0 : this.set_failed_status(err);
440 0 : delay_exit = true;
441 :
442 : // If the error happened after starting PostgreSQL, kill it
443 0 : if let Some(ref pg_process) = pg_process {
444 0 : kill(pg_process.pid(), Signal::SIGQUIT).ok();
445 0 : }
446 : }
447 : }
448 :
449 : // If startup was successful, or it failed in the late stages,
450 : // PostgreSQL is now running. Wait until it exits.
451 0 : let exit_code = if let Some(pg_handle) = pg_process {
452 0 : let exit_status = this.wait_postgres(pg_handle);
453 0 : info!("Postgres exited with code {}, shutting down", exit_status);
454 0 : exit_status.code()
455 : } else {
456 0 : None
457 : };
458 :
459 : // Terminate the vm_monitor so it releases the file watcher on
460 : // /sys/fs/cgroup/neon-postgres.
461 : // Note: the vm-monitor only runs on linux because it requires cgroups.
462 0 : if let Some(vm_monitor) = vm_monitor {
463 : cfg_if::cfg_if! {
464 : if #[cfg(target_os = "linux")] {
465 : // Kills all threads spawned by the monitor
466 0 : vm_monitor.token.cancel();
467 0 : if let Some(handle) = vm_monitor.vm_monitor {
468 0 : // Kills the actual task running the monitor
469 0 : handle.abort();
470 0 : }
471 : }
472 : }
473 0 : }
474 :
475 : // Reap the postgres process
476 0 : delay_exit |= this.cleanup_after_postgres_exit()?;
477 :
478 : // If launch failed, keep serving HTTP requests for a while, so the cloud
479 : // control plane can get the actual error.
480 0 : if delay_exit {
481 0 : info!("giving control plane 30s to collect the error before shutdown");
482 0 : std::thread::sleep(Duration::from_secs(30));
483 0 : }
484 0 : Ok(exit_code)
485 0 : }
486 :
487 0 : pub fn wait_spec(&self) -> Result<ParsedSpec> {
488 0 : info!("no compute spec provided, waiting");
489 0 : let mut state = self.state.lock().unwrap();
490 0 : while state.status != ComputeStatus::ConfigurationPending {
491 0 : state = self.state_changed.wait(state).unwrap();
492 0 : }
493 :
494 0 : info!("got spec, continue configuration");
495 0 : let spec = state.pspec.as_ref().unwrap().clone();
496 0 :
497 0 : // Record for how long we slept waiting for the spec.
498 0 : let now = Utc::now();
499 0 : state.metrics.wait_for_spec_ms = now
500 0 : .signed_duration_since(state.start_time)
501 0 : .to_std()
502 0 : .unwrap()
503 0 : .as_millis() as u64;
504 0 :
505 0 : // Reset start time, so that the total startup time that is calculated later will
506 0 : // not include the time that we waited for the spec.
507 0 : state.start_time = now;
508 0 :
509 0 : Ok(spec)
510 0 : }
511 :
512 : /// Start compute.
513 : ///
514 : /// Prerequisites:
515 : /// - the compute spec has been placed in self.state.pspec
516 : ///
517 : /// On success:
518 : /// - status is set to ComputeStatus::Running
519 : /// - self.running_postgres is set
520 : ///
521 : /// On error:
522 : /// - status is left in ComputeStatus::Init. The caller is responsible for setting it to Failed
523 : /// - if Postgres was started before the fatal error happened, self.running_postgres is
524 : /// set. The caller is responsible for killing it.
525 : ///
526 : /// Note that this is in the critical path of a compute cold start. Keep this fast.
527 : /// Try to do things concurrently, to hide the latencies.
528 0 : fn start_compute(self: &Arc<Self>, pg_handle: &mut Option<PostgresHandle>) -> Result<()> {
529 0 : let compute_state: ComputeState;
530 0 :
531 0 : let start_compute_span;
532 0 : let _this_entered;
533 0 : {
534 0 : let mut state_guard = self.state.lock().unwrap();
535 :
536 : // Create a tracing span for the startup operation.
537 : //
538 : // We could otherwise just annotate the function with #[instrument], but if
539 : // we're being configured from a /configure HTTP request, we want the
540 : // startup to be considered part of the /configure request.
541 : //
542 : // Similarly, if a trace ID was passed in env variables, attach it to the span.
543 0 : start_compute_span = {
544 : // Temporarily enter the parent span, so that the new span becomes its child.
545 0 : if let Some(p) = state_guard.startup_span.take() {
546 0 : let _parent_entered = p.entered();
547 0 : tracing::info_span!("start_compute")
548 0 : } else if let Some(otel_context) = startup_context_from_env() {
549 : use tracing_opentelemetry::OpenTelemetrySpanExt;
550 0 : let span = tracing::info_span!("start_compute");
551 0 : span.set_parent(otel_context);
552 0 : span
553 : } else {
554 0 : tracing::info_span!("start_compute")
555 : }
556 : };
557 0 : _this_entered = start_compute_span.enter();
558 0 :
559 0 : state_guard.set_status(ComputeStatus::Init, &self.state_changed);
560 0 : compute_state = state_guard.clone()
561 0 : }
562 0 :
563 0 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
564 0 : info!(
565 0 : "starting compute for project {}, operation {}, tenant {}, timeline {}, features {:?}, spec.remote_extensions {:?}",
566 0 : pspec.spec.cluster.cluster_id.as_deref().unwrap_or("None"),
567 0 : pspec.spec.operation_uuid.as_deref().unwrap_or("None"),
568 : pspec.tenant_id,
569 : pspec.timeline_id,
570 : pspec.spec.features,
571 : pspec.spec.remote_extensions,
572 : );
573 :
574 : ////// PRE-STARTUP PHASE: things that need to be finished before we start the Postgres process
575 :
576 : // Collect all the tasks that must finish here
577 0 : let mut pre_tasks = tokio::task::JoinSet::new();
578 0 :
579 0 : // If there are any remote extensions in shared_preload_libraries, start downloading them
580 0 : if pspec.spec.remote_extensions.is_some() {
581 0 : let (this, spec) = (self.clone(), pspec.spec.clone());
582 0 : pre_tasks.spawn(async move {
583 0 : this.download_preload_extensions(&spec)
584 0 : .in_current_span()
585 0 : .await
586 0 : });
587 0 : }
588 :
589 : // Prepare pgdata directory. This downloads the basebackup, among other things.
590 0 : {
591 0 : let (this, cs) = (self.clone(), compute_state.clone());
592 0 : pre_tasks.spawn_blocking_child(move || this.prepare_pgdata(&cs));
593 0 : }
594 :
595 : // Resize swap to the desired size if the compute spec says so
596 0 : if let (Some(size_bytes), true) =
597 0 : (pspec.spec.swap_size_bytes, self.params.resize_swap_on_bind)
598 0 : {
599 0 : pre_tasks.spawn_blocking_child(move || {
600 0 : // To avoid 'swapoff' hitting postgres startup, we need to run resize-swap to completion
601 0 : // *before* starting postgres.
602 0 : //
603 0 : // In theory, we could do this asynchronously if SkipSwapon was enabled for VMs, but this
604 0 : // carries a risk of introducing hard-to-debug issues - e.g. if postgres sometimes gets
605 0 : // OOM-killed during startup because swap wasn't available yet.
606 0 : resize_swap(size_bytes).context("failed to resize swap")?;
607 0 : let size_mib = size_bytes as f32 / (1 << 20) as f32; // just for more coherent display.
608 0 : info!(%size_bytes, %size_mib, "resized swap");
609 :
610 0 : Ok::<(), anyhow::Error>(())
611 0 : });
612 0 : }
613 :
614 : // Set disk quota if the compute spec says so
615 0 : if let (Some(disk_quota_bytes), Some(disk_quota_fs_mountpoint)) = (
616 0 : pspec.spec.disk_quota_bytes,
617 0 : self.params.set_disk_quota_for_fs.as_ref(),
618 0 : ) {
619 0 : let disk_quota_fs_mountpoint = disk_quota_fs_mountpoint.clone();
620 0 : pre_tasks.spawn_blocking_child(move || {
621 0 : set_disk_quota(disk_quota_bytes, &disk_quota_fs_mountpoint)
622 0 : .context("failed to set disk quota")?;
623 0 : let size_mib = disk_quota_bytes as f32 / (1 << 20) as f32; // just for more coherent display.
624 0 : info!(%disk_quota_bytes, %size_mib, "set disk quota");
625 :
626 0 : Ok::<(), anyhow::Error>(())
627 0 : });
628 0 : }
629 :
630 : // tune pgbouncer
631 0 : if let Some(pgbouncer_settings) = &pspec.spec.pgbouncer_settings {
632 0 : info!("tuning pgbouncer");
633 :
634 : // Spawn a background task to do the tuning,
635 : // so that we don't block the main thread that starts Postgres.
636 0 : let pgbouncer_settings = pgbouncer_settings.clone();
637 0 : let _handle = tokio::spawn(async move {
638 0 : let res = tune_pgbouncer(pgbouncer_settings).await;
639 0 : if let Err(err) = res {
640 0 : error!("error while tuning pgbouncer: {err:?}");
641 : // Continue with the startup anyway
642 0 : }
643 0 : });
644 0 : }
645 :
646 : // configure local_proxy
647 0 : if let Some(local_proxy) = &pspec.spec.local_proxy_config {
648 0 : info!("configuring local_proxy");
649 :
650 : // Spawn a background task to do the configuration,
651 : // so that we don't block the main thread that starts Postgres.
652 0 : let local_proxy = local_proxy.clone();
653 0 : let _handle = tokio::spawn(async move {
654 0 : if let Err(err) = local_proxy::configure(&local_proxy) {
655 0 : error!("error while configuring local_proxy: {err:?}");
656 : // Continue with the startup anyway
657 0 : }
658 0 : });
659 0 : }
660 :
661 : // Launch remaining service threads
662 0 : let _monitor_handle = launch_monitor(self);
663 0 : let _configurator_handle = launch_configurator(self);
664 0 :
665 0 : // Wait for all the pre-tasks to finish before starting postgres
666 0 : let rt = tokio::runtime::Handle::current();
667 0 : while let Some(res) = rt.block_on(pre_tasks.join_next()) {
668 0 : res??;
669 : }
670 :
671 : ////// START POSTGRES
672 0 : let start_time = Utc::now();
673 0 : let pg_process = self.start_postgres(pspec.storage_auth_token.clone())?;
674 0 : let postmaster_pid = pg_process.pid();
675 0 : *pg_handle = Some(pg_process);
676 0 :
677 0 : // If this is a primary endpoint, perform some post-startup configuration before
678 0 : // opening it up for the world.
679 0 : let config_time = Utc::now();
680 0 : if pspec.spec.mode == ComputeMode::Primary {
681 0 : self.configure_as_primary(&compute_state)?;
682 :
683 0 : let conf = self.get_conn_conf(None);
684 0 : tokio::task::spawn_blocking(|| {
685 0 : let res = get_installed_extensions(conf);
686 0 : match res {
687 0 : Ok(extensions) => {
688 0 : info!(
689 0 : "[NEON_EXT_STAT] {}",
690 0 : serde_json::to_string(&extensions)
691 0 : .expect("failed to serialize extensions list")
692 : );
693 : }
694 0 : Err(err) => error!("could not get installed extensions: {err:?}"),
695 : }
696 0 : });
697 0 : }
698 :
699 : // All done!
700 0 : let startup_end_time = Utc::now();
701 0 : let metrics = {
702 0 : let mut state = self.state.lock().unwrap();
703 0 : state.metrics.start_postgres_ms = config_time
704 0 : .signed_duration_since(start_time)
705 0 : .to_std()
706 0 : .unwrap()
707 0 : .as_millis() as u64;
708 0 : state.metrics.config_ms = startup_end_time
709 0 : .signed_duration_since(config_time)
710 0 : .to_std()
711 0 : .unwrap()
712 0 : .as_millis() as u64;
713 0 : state.metrics.total_startup_ms = startup_end_time
714 0 : .signed_duration_since(compute_state.start_time)
715 0 : .to_std()
716 0 : .unwrap()
717 0 : .as_millis() as u64;
718 0 : state.metrics.clone()
719 0 : };
720 0 : self.set_status(ComputeStatus::Running);
721 0 :
722 0 : // Log metrics so that we can search for slow operations in logs
723 0 : info!(?metrics, postmaster_pid = %postmaster_pid, "compute start finished");
724 :
725 0 : Ok(())
726 0 : }
727 :
728 : #[instrument(skip_all)]
729 : async fn download_preload_extensions(&self, spec: &ComputeSpec) -> Result<()> {
730 : let remote_extensions = if let Some(remote_extensions) = &spec.remote_extensions {
731 : remote_extensions
732 : } else {
733 : return Ok(());
734 : };
735 :
736 : // First, create control files for all available extensions
737 : extension_server::create_control_files(remote_extensions, &self.params.pgbin);
738 :
739 : let library_load_start_time = Utc::now();
740 : let remote_ext_metrics = self.prepare_preload_libraries(spec).await?;
741 :
742 : let library_load_time = Utc::now()
743 : .signed_duration_since(library_load_start_time)
744 : .to_std()
745 : .unwrap()
746 : .as_millis() as u64;
747 : let mut state = self.state.lock().unwrap();
748 : state.metrics.load_ext_ms = library_load_time;
749 : state.metrics.num_ext_downloaded = remote_ext_metrics.num_ext_downloaded;
750 : state.metrics.largest_ext_size = remote_ext_metrics.largest_ext_size;
751 : state.metrics.total_ext_download_size = remote_ext_metrics.total_ext_download_size;
752 : info!(
753 : "Loading shared_preload_libraries took {:?}ms",
754 : library_load_time
755 : );
756 : info!("{:?}", remote_ext_metrics);
757 :
758 : Ok(())
759 : }
760 :
761 : /// Start the vm-monitor if directed to. The vm-monitor only runs on linux
762 : /// because it requires cgroups.
763 0 : fn start_vm_monitor(&self, disable_lfc_resizing: bool) -> StartVmMonitorResult {
764 : cfg_if::cfg_if! {
765 : if #[cfg(target_os = "linux")] {
766 : use std::env;
767 : use tokio_util::sync::CancellationToken;
768 :
769 : // This token is used internally by the monitor to clean up all threads
770 0 : let token = CancellationToken::new();
771 :
772 : // don't pass postgres connection string to vm-monitor if we don't want it to resize LFC
773 0 : let pgconnstr = if disable_lfc_resizing {
774 0 : None
775 : } else {
776 0 : Some(self.params.filecache_connstr.clone())
777 : };
778 :
779 0 : let vm_monitor = if env::var_os("AUTOSCALING").is_some() {
780 0 : let vm_monitor = tokio::spawn(vm_monitor::start(
781 0 : Box::leak(Box::new(vm_monitor::Args {
782 0 : cgroup: Some(self.params.cgroup.clone()),
783 0 : pgconnstr,
784 0 : addr: self.params.vm_monitor_addr.clone(),
785 0 : })),
786 0 : token.clone(),
787 0 : ));
788 0 : Some(vm_monitor)
789 : } else {
790 0 : None
791 : };
792 0 : StartVmMonitorResult { token, vm_monitor }
793 0 : } else {
794 0 : StartVmMonitorResult { }
795 0 : }
796 0 : }
797 0 : }
798 :
799 0 : fn cleanup_after_postgres_exit(&self) -> Result<bool> {
800 0 : // Maybe sync safekeepers again, to speed up next startup
801 0 : let compute_state = self.state.lock().unwrap().clone();
802 0 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
803 0 : if matches!(pspec.spec.mode, compute_api::spec::ComputeMode::Primary) {
804 0 : info!("syncing safekeepers on shutdown");
805 0 : let storage_auth_token = pspec.storage_auth_token.clone();
806 0 : let lsn = self.sync_safekeepers(storage_auth_token)?;
807 0 : info!("synced safekeepers at lsn {lsn}");
808 0 : }
809 :
810 0 : let mut delay_exit = false;
811 0 : let mut state = self.state.lock().unwrap();
812 0 : if state.status == ComputeStatus::TerminationPending {
813 0 : state.status = ComputeStatus::Terminated;
814 0 : self.state_changed.notify_all();
815 0 : // we were asked to terminate gracefully, don't exit to avoid restart
816 0 : delay_exit = true
817 0 : }
818 0 : drop(state);
819 :
820 0 : if let Err(err) = self.check_for_core_dumps() {
821 0 : error!("error while checking for core dumps: {err:?}");
822 0 : }
823 :
824 0 : Ok(delay_exit)
825 0 : }
826 :
827 : /// Check that compute node has corresponding feature enabled.
828 0 : pub fn has_feature(&self, feature: ComputeFeature) -> bool {
829 0 : let state = self.state.lock().unwrap();
830 :
831 0 : if let Some(s) = state.pspec.as_ref() {
832 0 : s.spec.features.contains(&feature)
833 : } else {
834 0 : false
835 : }
836 0 : }
837 :
838 0 : pub fn set_status(&self, status: ComputeStatus) {
839 0 : let mut state = self.state.lock().unwrap();
840 0 : state.set_status(status, &self.state_changed);
841 0 : }
842 :
843 0 : pub fn set_failed_status(&self, err: anyhow::Error) {
844 0 : let mut state = self.state.lock().unwrap();
845 0 : state.set_failed_status(err, &self.state_changed);
846 0 : }
847 :
848 0 : pub fn get_status(&self) -> ComputeStatus {
849 0 : self.state.lock().unwrap().status
850 0 : }
851 :
852 0 : pub fn get_timeline_id(&self) -> Option<TimelineId> {
853 0 : self.state
854 0 : .lock()
855 0 : .unwrap()
856 0 : .pspec
857 0 : .as_ref()
858 0 : .map(|s| s.timeline_id)
859 0 : }
860 :
861 : // Remove `pgdata` directory and create it again with right permissions.
862 0 : fn create_pgdata(&self) -> Result<()> {
863 0 : // Ignore removal error, likely it is a 'No such file or directory (os error 2)'.
864 0 : // If it is something different then create_dir() will error out anyway.
865 0 : let pgdata = &self.params.pgdata;
866 0 : let _ok = fs::remove_dir_all(pgdata);
867 0 : fs::create_dir(pgdata)?;
868 0 : fs::set_permissions(pgdata, fs::Permissions::from_mode(0o700))?;
869 :
870 0 : Ok(())
871 0 : }
872 :
873 : // Get basebackup from the libpq connection to pageserver using `connstr` and
874 : // unarchive it to `pgdata` directory overriding all its previous content.
875 : #[instrument(skip_all, fields(%lsn))]
876 : fn try_get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> {
877 : let spec = compute_state.pspec.as_ref().expect("spec must be set");
878 : let start_time = Instant::now();
879 :
880 : let shard0_connstr = spec.pageserver_connstr.split(',').next().unwrap();
881 : let mut config = postgres::Config::from_str(shard0_connstr)?;
882 :
883 : // Use the storage auth token from the config file, if given.
884 : // Note: this overrides any password set in the connection string.
885 : if let Some(storage_auth_token) = &spec.storage_auth_token {
886 : info!("Got storage auth token from spec file");
887 : config.password(storage_auth_token);
888 : } else {
889 : info!("Storage auth token not set");
890 : }
891 :
892 : // Connect to pageserver
893 : let mut client = config.connect(NoTls)?;
894 : let pageserver_connect_micros = start_time.elapsed().as_micros() as u64;
895 :
896 : let basebackup_cmd = match lsn {
897 : Lsn(0) => {
898 : if spec.spec.mode != ComputeMode::Primary {
899 : format!(
900 : "basebackup {} {} --gzip --replica",
901 : spec.tenant_id, spec.timeline_id
902 : )
903 : } else {
904 : format!("basebackup {} {} --gzip", spec.tenant_id, spec.timeline_id)
905 : }
906 : }
907 : _ => {
908 : if spec.spec.mode != ComputeMode::Primary {
909 : format!(
910 : "basebackup {} {} {} --gzip --replica",
911 : spec.tenant_id, spec.timeline_id, lsn
912 : )
913 : } else {
914 : format!(
915 : "basebackup {} {} {} --gzip",
916 : spec.tenant_id, spec.timeline_id, lsn
917 : )
918 : }
919 : }
920 : };
921 :
922 : let copyreader = client.copy_out(basebackup_cmd.as_str())?;
923 : let mut measured_reader = MeasuredReader::new(copyreader);
924 : let mut bufreader = std::io::BufReader::new(&mut measured_reader);
925 :
926 : // Read the archive directly from the `CopyOutReader`
927 : //
928 : // Set `ignore_zeros` so that unpack() reads all the Copy data and
929 : // doesn't stop at the end-of-archive marker. Otherwise, if the server
930 : // sends an Error after finishing the tarball, we will not notice it.
931 : let mut ar = tar::Archive::new(flate2::read::GzDecoder::new(&mut bufreader));
932 : ar.set_ignore_zeros(true);
933 : ar.unpack(&self.params.pgdata)?;
934 :
935 : // Report metrics
936 : let mut state = self.state.lock().unwrap();
937 : state.metrics.pageserver_connect_micros = pageserver_connect_micros;
938 : state.metrics.basebackup_bytes = measured_reader.get_byte_count() as u64;
939 : state.metrics.basebackup_ms = start_time.elapsed().as_millis() as u64;
940 : Ok(())
941 : }
942 :
943 : // Gets the basebackup in a retry loop
944 : #[instrument(skip_all, fields(%lsn))]
945 : pub fn get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> {
946 : let mut retry_period_ms = 500.0;
947 : let mut attempts = 0;
948 : const DEFAULT_ATTEMPTS: u16 = 10;
949 : #[cfg(feature = "testing")]
950 : let max_attempts = if let Ok(v) = env::var("NEON_COMPUTE_TESTING_BASEBACKUP_RETRIES") {
951 : u16::from_str(&v).unwrap()
952 : } else {
953 : DEFAULT_ATTEMPTS
954 : };
955 : #[cfg(not(feature = "testing"))]
956 : let max_attempts = DEFAULT_ATTEMPTS;
957 : loop {
958 : let result = self.try_get_basebackup(compute_state, lsn);
959 : match result {
960 : Ok(_) => {
961 : return result;
962 : }
963 : Err(ref e) if attempts < max_attempts => {
964 : warn!(
965 : "Failed to get basebackup: {} (attempt {}/{})",
966 : e, attempts, max_attempts
967 : );
968 : std::thread::sleep(std::time::Duration::from_millis(retry_period_ms as u64));
969 : retry_period_ms *= 1.5;
970 : }
971 : Err(_) => {
972 : return result;
973 : }
974 : }
975 : attempts += 1;
976 : }
977 : }
978 :
979 0 : pub async fn check_safekeepers_synced_async(
980 0 : &self,
981 0 : compute_state: &ComputeState,
982 0 : ) -> Result<Option<Lsn>> {
983 0 : // Construct a connection config for each safekeeper
984 0 : let pspec: ParsedSpec = compute_state
985 0 : .pspec
986 0 : .as_ref()
987 0 : .expect("spec must be set")
988 0 : .clone();
989 0 : let sk_connstrs: Vec<String> = pspec.safekeeper_connstrings.clone();
990 0 : let sk_configs = sk_connstrs.into_iter().map(|connstr| {
991 0 : // Format connstr
992 0 : let id = connstr.clone();
993 0 : let connstr = format!("postgresql://no_user@{}", connstr);
994 0 : let options = format!(
995 0 : "-c timeline_id={} tenant_id={}",
996 0 : pspec.timeline_id, pspec.tenant_id
997 0 : );
998 0 :
999 0 : // Construct client
1000 0 : let mut config = tokio_postgres::Config::from_str(&connstr).unwrap();
1001 0 : config.options(&options);
1002 0 : if let Some(storage_auth_token) = pspec.storage_auth_token.clone() {
1003 0 : config.password(storage_auth_token);
1004 0 : }
1005 :
1006 0 : (id, config)
1007 0 : });
1008 0 :
1009 0 : // Create task set to query all safekeepers
1010 0 : let mut tasks = FuturesUnordered::new();
1011 0 : let quorum = sk_configs.len() / 2 + 1;
1012 0 : for (id, config) in sk_configs {
1013 0 : let timeout = tokio::time::Duration::from_millis(100);
1014 0 : let task = tokio::time::timeout(timeout, ping_safekeeper(id, config));
1015 0 : tasks.push(tokio::spawn(task));
1016 0 : }
1017 :
1018 : // Get a quorum of responses or errors
1019 0 : let mut responses = Vec::new();
1020 0 : let mut join_errors = Vec::new();
1021 0 : let mut task_errors = Vec::new();
1022 0 : let mut timeout_errors = Vec::new();
1023 0 : while let Some(response) = tasks.next().await {
1024 0 : match response {
1025 0 : Ok(Ok(Ok(r))) => responses.push(r),
1026 0 : Ok(Ok(Err(e))) => task_errors.push(e),
1027 0 : Ok(Err(e)) => timeout_errors.push(e),
1028 0 : Err(e) => join_errors.push(e),
1029 : };
1030 0 : if responses.len() >= quorum {
1031 0 : break;
1032 0 : }
1033 0 : if join_errors.len() + task_errors.len() + timeout_errors.len() >= quorum {
1034 0 : break;
1035 0 : }
1036 : }
1037 :
1038 : // In case of error, log and fail the check, but don't crash.
1039 : // We're playing it safe because these errors could be transient
1040 : // and we don't yet retry. Also being careful here allows us to
1041 : // be backwards compatible with safekeepers that don't have the
1042 : // TIMELINE_STATUS API yet.
1043 0 : if responses.len() < quorum {
1044 0 : error!(
1045 0 : "failed sync safekeepers check {:?} {:?} {:?}",
1046 : join_errors, task_errors, timeout_errors
1047 : );
1048 0 : return Ok(None);
1049 0 : }
1050 0 :
1051 0 : Ok(check_if_synced(responses))
1052 0 : }
1053 :
1054 : // Fast path for sync_safekeepers. If they're already synced we get the lsn
1055 : // in one roundtrip. If not, we should do a full sync_safekeepers.
1056 : #[instrument(skip_all)]
1057 : pub fn check_safekeepers_synced(&self, compute_state: &ComputeState) -> Result<Option<Lsn>> {
1058 : let start_time = Utc::now();
1059 :
1060 : let rt = tokio::runtime::Handle::current();
1061 : let result = rt.block_on(self.check_safekeepers_synced_async(compute_state));
1062 :
1063 : // Record runtime
1064 : self.state.lock().unwrap().metrics.sync_sk_check_ms = Utc::now()
1065 : .signed_duration_since(start_time)
1066 : .to_std()
1067 : .unwrap()
1068 : .as_millis() as u64;
1069 : result
1070 : }
1071 :
1072 : // Run `postgres` in a special mode with `--sync-safekeepers` argument
1073 : // and return the reported LSN back to the caller.
1074 : #[instrument(skip_all)]
1075 : pub fn sync_safekeepers(&self, storage_auth_token: Option<String>) -> Result<Lsn> {
1076 : let start_time = Utc::now();
1077 :
1078 : let mut sync_handle = maybe_cgexec(&self.params.pgbin)
1079 : .args(["--sync-safekeepers"])
1080 : .env("PGDATA", &self.params.pgdata) // we cannot use -D in this mode
1081 : .envs(if let Some(storage_auth_token) = &storage_auth_token {
1082 : vec![("NEON_AUTH_TOKEN", storage_auth_token)]
1083 : } else {
1084 : vec![]
1085 : })
1086 : .stdout(Stdio::piped())
1087 : .stderr(Stdio::piped())
1088 : .spawn()
1089 : .expect("postgres --sync-safekeepers failed to start");
1090 : SYNC_SAFEKEEPERS_PID.store(sync_handle.id(), Ordering::SeqCst);
1091 :
1092 : // `postgres --sync-safekeepers` will print all log output to stderr and
1093 : // final LSN to stdout. So we leave stdout to collect LSN, while stderr logs
1094 : // will be collected in a child thread.
1095 : let stderr = sync_handle
1096 : .stderr
1097 : .take()
1098 : .expect("stderr should be captured");
1099 : let logs_handle = handle_postgres_logs(stderr);
1100 :
1101 : let sync_output = sync_handle
1102 : .wait_with_output()
1103 : .expect("postgres --sync-safekeepers failed");
1104 : SYNC_SAFEKEEPERS_PID.store(0, Ordering::SeqCst);
1105 :
1106 : // Process has exited, so we can join the logs thread.
1107 : let _ = tokio::runtime::Handle::current()
1108 : .block_on(logs_handle)
1109 0 : .map_err(|e| tracing::error!("log task panicked: {:?}", e));
1110 :
1111 : if !sync_output.status.success() {
1112 : anyhow::bail!(
1113 : "postgres --sync-safekeepers exited with non-zero status: {}. stdout: {}",
1114 : sync_output.status,
1115 : String::from_utf8(sync_output.stdout)
1116 : .expect("postgres --sync-safekeepers exited, and stdout is not utf-8"),
1117 : );
1118 : }
1119 :
1120 : self.state.lock().unwrap().metrics.sync_safekeepers_ms = Utc::now()
1121 : .signed_duration_since(start_time)
1122 : .to_std()
1123 : .unwrap()
1124 : .as_millis() as u64;
1125 :
1126 : let lsn = Lsn::from_str(String::from_utf8(sync_output.stdout)?.trim())?;
1127 :
1128 : Ok(lsn)
1129 : }
1130 :
1131 : /// Do all the preparations like PGDATA directory creation, configuration,
1132 : /// safekeepers sync, basebackup, etc.
1133 : #[instrument(skip_all)]
1134 : pub fn prepare_pgdata(&self, compute_state: &ComputeState) -> Result<()> {
1135 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
1136 : let spec = &pspec.spec;
1137 : let pgdata_path = Path::new(&self.params.pgdata);
1138 :
1139 : // Remove/create an empty pgdata directory and put configuration there.
1140 : self.create_pgdata()?;
1141 : config::write_postgres_conf(
1142 : &pgdata_path.join("postgresql.conf"),
1143 : &pspec.spec,
1144 : self.params.internal_http_port,
1145 : )?;
1146 :
1147 : // Syncing safekeepers is only safe with primary nodes: if a primary
1148 : // is already connected it will be kicked out, so a secondary (standby)
1149 : // cannot sync safekeepers.
1150 : let lsn = match spec.mode {
1151 : ComputeMode::Primary => {
1152 : info!("checking if safekeepers are synced");
1153 : let lsn = if let Ok(Some(lsn)) = self.check_safekeepers_synced(compute_state) {
1154 : lsn
1155 : } else {
1156 : info!("starting safekeepers syncing");
1157 : self.sync_safekeepers(pspec.storage_auth_token.clone())
1158 0 : .with_context(|| "failed to sync safekeepers")?
1159 : };
1160 : info!("safekeepers synced at LSN {}", lsn);
1161 : lsn
1162 : }
1163 : ComputeMode::Static(lsn) => {
1164 : info!("Starting read-only node at static LSN {}", lsn);
1165 : lsn
1166 : }
1167 : ComputeMode::Replica => {
1168 : info!("Initializing standby from latest Pageserver LSN");
1169 : Lsn(0)
1170 : }
1171 : };
1172 :
1173 : info!(
1174 : "getting basebackup@{} from pageserver {}",
1175 : lsn, &pspec.pageserver_connstr
1176 : );
1177 0 : self.get_basebackup(compute_state, lsn).with_context(|| {
1178 0 : format!(
1179 0 : "failed to get basebackup@{} from pageserver {}",
1180 0 : lsn, &pspec.pageserver_connstr
1181 0 : )
1182 0 : })?;
1183 :
1184 : // Update pg_hba.conf received with basebackup.
1185 : update_pg_hba(pgdata_path)?;
1186 :
1187 : // Place pg_dynshmem under /dev/shm. This allows us to use
1188 : // 'dynamic_shared_memory_type = mmap' so that the files are placed in
1189 : // /dev/shm, similar to how 'dynamic_shared_memory_type = posix' works.
1190 : //
1191 : // Why on earth don't we just stick to the 'posix' default, you might
1192 : // ask. It turns out that making large allocations with 'posix' doesn't
1193 : // work very well with autoscaling. The behavior we want is that:
1194 : //
1195 : // 1. You can make large DSM allocations, larger than the current RAM
1196 : // size of the VM, without errors
1197 : //
1198 : // 2. If the allocated memory is really used, the VM is scaled up
1199 : // automatically to accommodate that
1200 : //
1201 : // We try to make that possible by having swap in the VM. But with the
1202 : // default 'posix' DSM implementation, we fail step 1, even when there's
1203 : // plenty of swap available. PostgreSQL uses posix_fallocate() to create
1204 : // the shmem segment, which is really just a file in /dev/shm in Linux,
1205 : // but posix_fallocate() on tmpfs returns ENOMEM if the size is larger
1206 : // than available RAM.
1207 : //
1208 : // Using 'dynamic_shared_memory_type = mmap' works around that, because
1209 : // the Postgres 'mmap' DSM implementation doesn't use
1210 : // posix_fallocate(). Instead, it uses repeated calls to write(2) to
1211 : // fill the file with zeros. It's weird that that differs between
1212 : // 'posix' and 'mmap', but we take advantage of it. When the file is
1213 : // filled slowly with write(2), the kernel allows it to grow larger, as
1214 : // long as there's swap available.
1215 : //
1216 : // In short, using 'dynamic_shared_memory_type = mmap' allows us one DSM
1217 : // segment to be larger than currently available RAM. But because we
1218 : // don't want to store it on a real file, which the kernel would try to
1219 : // flush to disk, so symlink pg_dynshm to /dev/shm.
1220 : //
1221 : // We don't set 'dynamic_shared_memory_type = mmap' here, we let the
1222 : // control plane control that option. If 'mmap' is not used, this
1223 : // symlink doesn't affect anything.
1224 : //
1225 : // See https://github.com/neondatabase/autoscaling/issues/800
1226 : std::fs::remove_dir(pgdata_path.join("pg_dynshmem"))?;
1227 : symlink("/dev/shm/", pgdata_path.join("pg_dynshmem"))?;
1228 :
1229 : match spec.mode {
1230 : ComputeMode::Primary => {}
1231 : ComputeMode::Replica | ComputeMode::Static(..) => {
1232 : add_standby_signal(pgdata_path)?;
1233 : }
1234 : }
1235 :
1236 : Ok(())
1237 : }
1238 :
1239 : /// Start and stop a postgres process to warm up the VM for startup.
1240 0 : pub fn prewarm_postgres(&self) -> Result<()> {
1241 0 : info!("prewarming");
1242 :
1243 : // Create pgdata
1244 0 : let pgdata = &format!("{}.warmup", self.params.pgdata);
1245 0 : create_pgdata(pgdata)?;
1246 :
1247 : // Run initdb to completion
1248 0 : info!("running initdb");
1249 0 : let initdb_bin = Path::new(&self.params.pgbin)
1250 0 : .parent()
1251 0 : .unwrap()
1252 0 : .join("initdb");
1253 0 : Command::new(initdb_bin)
1254 0 : .args(["--pgdata", pgdata])
1255 0 : .output()
1256 0 : .expect("cannot start initdb process");
1257 :
1258 : // Write conf
1259 : use std::io::Write;
1260 0 : let conf_path = Path::new(pgdata).join("postgresql.conf");
1261 0 : let mut file = std::fs::File::create(conf_path)?;
1262 0 : writeln!(file, "shared_buffers=65536")?;
1263 0 : writeln!(file, "port=51055")?; // Nobody should be connecting
1264 0 : writeln!(file, "shared_preload_libraries = 'neon'")?;
1265 :
1266 : // Start postgres
1267 0 : info!("starting postgres");
1268 0 : let mut pg = maybe_cgexec(&self.params.pgbin)
1269 0 : .args(["-D", pgdata])
1270 0 : .spawn()
1271 0 : .expect("cannot start postgres process");
1272 0 :
1273 0 : // Stop it when it's ready
1274 0 : info!("waiting for postgres");
1275 0 : wait_for_postgres(&mut pg, Path::new(pgdata))?;
1276 : // SIGQUIT orders postgres to exit immediately. We don't want to SIGKILL
1277 : // it to avoid orphaned processes prowling around while datadir is
1278 : // wiped.
1279 0 : let pm_pid = Pid::from_raw(pg.id() as i32);
1280 0 : kill(pm_pid, Signal::SIGQUIT)?;
1281 0 : info!("sent SIGQUIT signal");
1282 0 : pg.wait()?;
1283 0 : info!("done prewarming");
1284 :
1285 : // clean up
1286 0 : let _ok = fs::remove_dir_all(pgdata);
1287 0 : Ok(())
1288 0 : }
1289 :
1290 : /// Start Postgres as a child process and wait for it to start accepting
1291 : /// connections.
1292 : ///
1293 : /// Returns a handle to the child process and a handle to the logs thread.
1294 : #[instrument(skip_all)]
1295 : pub fn start_postgres(&self, storage_auth_token: Option<String>) -> Result<PostgresHandle> {
1296 : let pgdata_path = Path::new(&self.params.pgdata);
1297 :
1298 : // Run postgres as a child process.
1299 : let mut pg = maybe_cgexec(&self.params.pgbin)
1300 : .args(["-D", &self.params.pgdata])
1301 : .envs(if let Some(storage_auth_token) = &storage_auth_token {
1302 : vec![("NEON_AUTH_TOKEN", storage_auth_token)]
1303 : } else {
1304 : vec![]
1305 : })
1306 : .stderr(Stdio::piped())
1307 : .spawn()
1308 : .expect("cannot start postgres process");
1309 : PG_PID.store(pg.id(), Ordering::SeqCst);
1310 :
1311 : // Start a task to collect logs from stderr.
1312 : let stderr = pg.stderr.take().expect("stderr should be captured");
1313 : let logs_handle = handle_postgres_logs(stderr);
1314 :
1315 : wait_for_postgres(&mut pg, pgdata_path)?;
1316 :
1317 : Ok(PostgresHandle {
1318 : postgres: pg,
1319 : log_collector: logs_handle,
1320 : })
1321 : }
1322 :
1323 : /// Wait for the child Postgres process forever. In this state Ctrl+C will
1324 : /// propagate to Postgres and it will be shut down as well.
1325 0 : fn wait_postgres(&self, mut pg_handle: PostgresHandle) -> std::process::ExitStatus {
1326 0 : info!(postmaster_pid = %pg_handle.postgres.id(), "Waiting for Postgres to exit");
1327 :
1328 0 : let ecode = pg_handle
1329 0 : .postgres
1330 0 : .wait()
1331 0 : .expect("failed to start waiting on Postgres process");
1332 0 : PG_PID.store(0, Ordering::SeqCst);
1333 0 :
1334 0 : // Process has exited. Wait for the log collecting task to finish.
1335 0 : let _ = tokio::runtime::Handle::current()
1336 0 : .block_on(pg_handle.log_collector)
1337 0 : .map_err(|e| tracing::error!("log task panicked: {:?}", e));
1338 0 :
1339 0 : ecode
1340 0 : }
1341 :
1342 : /// Do post configuration of the already started Postgres. This function spawns a background task to
1343 : /// configure the database after applying the compute spec. Currently, it upgrades the neon extension
1344 : /// version. In the future, it may upgrade all 3rd-party extensions.
1345 : #[instrument(skip_all)]
1346 : pub fn post_apply_config(&self) -> Result<()> {
1347 : let conf = self.get_tokio_conn_conf(Some("compute_ctl:post_apply_config"));
1348 0 : tokio::spawn(async move {
1349 0 : let res = async {
1350 0 : let (mut client, connection) = conf.connect(NoTls).await?;
1351 0 : tokio::spawn(async move {
1352 0 : if let Err(e) = connection.await {
1353 0 : eprintln!("connection error: {}", e);
1354 0 : }
1355 0 : });
1356 0 :
1357 0 : handle_neon_extension_upgrade(&mut client)
1358 0 : .await
1359 0 : .context("handle_neon_extension_upgrade")?;
1360 0 : Ok::<_, anyhow::Error>(())
1361 0 : }
1362 0 : .await;
1363 0 : if let Err(err) = res {
1364 0 : error!("error while post_apply_config: {err:#}");
1365 0 : }
1366 0 : });
1367 : Ok(())
1368 : }
1369 :
1370 0 : pub fn get_conn_conf(&self, application_name: Option<&str>) -> postgres::Config {
1371 0 : let mut conf = self.conn_conf.clone();
1372 0 : if let Some(application_name) = application_name {
1373 0 : conf.application_name(application_name);
1374 0 : }
1375 0 : conf
1376 0 : }
1377 :
1378 0 : pub fn get_tokio_conn_conf(&self, application_name: Option<&str>) -> tokio_postgres::Config {
1379 0 : let mut conf = self.tokio_conn_conf.clone();
1380 0 : if let Some(application_name) = application_name {
1381 0 : conf.application_name(application_name);
1382 0 : }
1383 0 : conf
1384 0 : }
1385 :
1386 0 : pub async fn get_maintenance_client(
1387 0 : conf: &tokio_postgres::Config,
1388 0 : ) -> Result<tokio_postgres::Client> {
1389 0 : let mut conf = conf.clone();
1390 0 : conf.application_name("compute_ctl:apply_config");
1391 :
1392 0 : let (client, conn) = match conf.connect(NoTls).await {
1393 : // If connection fails, it may be the old node with `zenith_admin` superuser.
1394 : //
1395 : // In this case we need to connect with old `zenith_admin` name
1396 : // and create new user. We cannot simply rename connected user,
1397 : // but we can create a new one and grant it all privileges.
1398 0 : Err(e) => match e.code() {
1399 : Some(&SqlState::INVALID_PASSWORD)
1400 : | Some(&SqlState::INVALID_AUTHORIZATION_SPECIFICATION) => {
1401 : // Connect with zenith_admin if cloud_admin could not authenticate
1402 0 : info!(
1403 0 : "cannot connect to postgres: {}, retrying with `zenith_admin` username",
1404 : e
1405 : );
1406 0 : let mut zenith_admin_conf = postgres::config::Config::from(conf.clone());
1407 0 : zenith_admin_conf.application_name("compute_ctl:apply_config");
1408 0 : zenith_admin_conf.user("zenith_admin");
1409 :
1410 0 : let mut client =
1411 0 : zenith_admin_conf.connect(NoTls)
1412 0 : .context("broken cloud_admin credential: tried connecting with cloud_admin but could not authenticate, and zenith_admin does not work either")?;
1413 :
1414 : // Disable forwarding so that users don't get a cloud_admin role
1415 0 : let mut func = || {
1416 0 : client.simple_query("SET neon.forward_ddl = false")?;
1417 0 : client.simple_query("CREATE USER cloud_admin WITH SUPERUSER")?;
1418 0 : client.simple_query("GRANT zenith_admin TO cloud_admin")?;
1419 0 : Ok::<_, anyhow::Error>(())
1420 0 : };
1421 0 : func().context("apply_config setup cloud_admin")?;
1422 :
1423 0 : drop(client);
1424 0 :
1425 0 : // Reconnect with connstring with expected name
1426 0 : conf.connect(NoTls).await?
1427 : }
1428 0 : _ => return Err(e.into()),
1429 : },
1430 0 : Ok((client, conn)) => (client, conn),
1431 : };
1432 :
1433 0 : spawn(async move {
1434 0 : if let Err(e) = conn.await {
1435 0 : error!("maintenance client connection error: {}", e);
1436 0 : }
1437 0 : });
1438 0 :
1439 0 : // Disable DDL forwarding because control plane already knows about the roles/databases
1440 0 : // we're about to modify.
1441 0 : client
1442 0 : .simple_query("SET neon.forward_ddl = false")
1443 0 : .await
1444 0 : .context("apply_config SET neon.forward_ddl = false")?;
1445 :
1446 0 : Ok(client)
1447 0 : }
1448 :
1449 : /// Do initial configuration of the already started Postgres.
1450 : #[instrument(skip_all)]
1451 : pub fn apply_config(&self, compute_state: &ComputeState) -> Result<()> {
1452 : let conf = self.get_tokio_conn_conf(Some("compute_ctl:apply_config"));
1453 :
1454 : let conf = Arc::new(conf);
1455 : let spec = Arc::new(
1456 : compute_state
1457 : .pspec
1458 : .as_ref()
1459 : .expect("spec must be set")
1460 : .spec
1461 : .clone(),
1462 : );
1463 :
1464 : let max_concurrent_connections = self.max_service_connections(compute_state, &spec);
1465 :
1466 : // Merge-apply spec & changes to PostgreSQL state.
1467 : self.apply_spec_sql(spec.clone(), conf.clone(), max_concurrent_connections)?;
1468 :
1469 : if let Some(local_proxy) = &spec.clone().local_proxy_config {
1470 : info!("configuring local_proxy");
1471 : local_proxy::configure(local_proxy).context("apply_config local_proxy")?;
1472 : }
1473 :
1474 : // Run migrations separately to not hold up cold starts
1475 0 : tokio::spawn(async move {
1476 0 : let mut conf = conf.as_ref().clone();
1477 0 : conf.application_name("compute_ctl:migrations");
1478 0 :
1479 0 : match conf.connect(NoTls).await {
1480 0 : Ok((mut client, connection)) => {
1481 0 : tokio::spawn(async move {
1482 0 : if let Err(e) = connection.await {
1483 0 : eprintln!("connection error: {}", e);
1484 0 : }
1485 0 : });
1486 0 : if let Err(e) = handle_migrations(&mut client).await {
1487 0 : error!("Failed to run migrations: {}", e);
1488 0 : }
1489 : }
1490 0 : Err(e) => {
1491 0 : error!(
1492 0 : "Failed to connect to the compute for running migrations: {}",
1493 : e
1494 : );
1495 : }
1496 : };
1497 0 : });
1498 :
1499 : Ok::<(), anyhow::Error>(())
1500 : }
1501 :
1502 : // Wrapped this around `pg_ctl reload`, but right now we don't use
1503 : // `pg_ctl` for start / stop.
1504 : #[instrument(skip_all)]
1505 : fn pg_reload_conf(&self) -> Result<()> {
1506 : let pgctl_bin = Path::new(&self.params.pgbin)
1507 : .parent()
1508 : .unwrap()
1509 : .join("pg_ctl");
1510 : Command::new(pgctl_bin)
1511 : .args(["reload", "-D", &self.params.pgdata])
1512 : .output()
1513 : .expect("cannot run pg_ctl process");
1514 : Ok(())
1515 : }
1516 :
1517 : /// Similar to `apply_config()`, but does a bit different sequence of operations,
1518 : /// as it's used to reconfigure a previously started and configured Postgres node.
1519 : #[instrument(skip_all)]
1520 : pub fn reconfigure(&self) -> Result<()> {
1521 : let spec = self.state.lock().unwrap().pspec.clone().unwrap().spec;
1522 :
1523 : if let Some(ref pgbouncer_settings) = spec.pgbouncer_settings {
1524 : info!("tuning pgbouncer");
1525 :
1526 : // Spawn a background task to do the tuning,
1527 : // so that we don't block the main thread that starts Postgres.
1528 : let pgbouncer_settings = pgbouncer_settings.clone();
1529 0 : tokio::spawn(async move {
1530 0 : let res = tune_pgbouncer(pgbouncer_settings).await;
1531 0 : if let Err(err) = res {
1532 0 : error!("error while tuning pgbouncer: {err:?}");
1533 0 : }
1534 0 : });
1535 : }
1536 :
1537 : if let Some(ref local_proxy) = spec.local_proxy_config {
1538 : info!("configuring local_proxy");
1539 :
1540 : // Spawn a background task to do the configuration,
1541 : // so that we don't block the main thread that starts Postgres.
1542 : let local_proxy = local_proxy.clone();
1543 0 : tokio::spawn(async move {
1544 0 : if let Err(err) = local_proxy::configure(&local_proxy) {
1545 0 : error!("error while configuring local_proxy: {err:?}");
1546 0 : }
1547 0 : });
1548 : }
1549 :
1550 : // Write new config
1551 : let pgdata_path = Path::new(&self.params.pgdata);
1552 : let postgresql_conf_path = pgdata_path.join("postgresql.conf");
1553 : config::write_postgres_conf(&postgresql_conf_path, &spec, self.params.internal_http_port)?;
1554 :
1555 : if !spec.skip_pg_catalog_updates {
1556 : let max_concurrent_connections = spec.reconfigure_concurrency;
1557 : // Temporarily reset max_cluster_size in config
1558 : // to avoid the possibility of hitting the limit, while we are reconfiguring:
1559 : // creating new extensions, roles, etc.
1560 0 : config::with_compute_ctl_tmp_override(pgdata_path, "neon.max_cluster_size=-1", || {
1561 0 : self.pg_reload_conf()?;
1562 :
1563 0 : if spec.mode == ComputeMode::Primary {
1564 0 : let mut conf =
1565 0 : tokio_postgres::Config::from_str(self.params.connstr.as_str()).unwrap();
1566 0 : conf.application_name("apply_config");
1567 0 : let conf = Arc::new(conf);
1568 0 :
1569 0 : let spec = Arc::new(spec.clone());
1570 0 :
1571 0 : self.apply_spec_sql(spec, conf, max_concurrent_connections)?;
1572 0 : }
1573 :
1574 0 : Ok(())
1575 0 : })?;
1576 : }
1577 :
1578 : self.pg_reload_conf()?;
1579 :
1580 : let unknown_op = "unknown".to_string();
1581 : let op_id = spec.operation_uuid.as_ref().unwrap_or(&unknown_op);
1582 : info!(
1583 : "finished reconfiguration of compute node for operation {}",
1584 : op_id
1585 : );
1586 :
1587 : Ok(())
1588 : }
1589 :
1590 : #[instrument(skip_all)]
1591 : pub fn configure_as_primary(&self, compute_state: &ComputeState) -> Result<()> {
1592 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
1593 :
1594 : assert!(pspec.spec.mode == ComputeMode::Primary);
1595 : if !pspec.spec.skip_pg_catalog_updates {
1596 : let pgdata_path = Path::new(&self.params.pgdata);
1597 : // temporarily reset max_cluster_size in config
1598 : // to avoid the possibility of hitting the limit, while we are applying config:
1599 : // creating new extensions, roles, etc...
1600 0 : config::with_compute_ctl_tmp_override(pgdata_path, "neon.max_cluster_size=-1", || {
1601 0 : self.pg_reload_conf()?;
1602 :
1603 0 : self.apply_config(compute_state)?;
1604 :
1605 0 : Ok(())
1606 0 : })?;
1607 :
1608 : let postgresql_conf_path = pgdata_path.join("postgresql.conf");
1609 : if config::line_in_file(
1610 : &postgresql_conf_path,
1611 : "neon.disable_logical_replication_subscribers=false",
1612 : )? {
1613 : info!(
1614 : "updated postgresql.conf to set neon.disable_logical_replication_subscribers=false"
1615 : );
1616 : }
1617 : self.pg_reload_conf()?;
1618 : }
1619 : self.post_apply_config()?;
1620 :
1621 : Ok(())
1622 : }
1623 :
1624 : /// Update the `last_active` in the shared state, but ensure that it's a more recent one.
1625 0 : pub fn update_last_active(&self, last_active: Option<DateTime<Utc>>) {
1626 0 : let mut state = self.state.lock().unwrap();
1627 0 : // NB: `Some(<DateTime>)` is always greater than `None`.
1628 0 : if last_active > state.last_active {
1629 0 : state.last_active = last_active;
1630 0 : debug!("set the last compute activity time to: {:?}", last_active);
1631 0 : }
1632 0 : }
1633 :
1634 : // Look for core dumps and collect backtraces.
1635 : //
1636 : // EKS worker nodes have following core dump settings:
1637 : // /proc/sys/kernel/core_pattern -> core
1638 : // /proc/sys/kernel/core_uses_pid -> 1
1639 : // ulimit -c -> unlimited
1640 : // which results in core dumps being written to postgres data directory as core.<pid>.
1641 : //
1642 : // Use that as a default location and pattern, except macos where core dumps are written
1643 : // to /cores/ directory by default.
1644 : //
1645 : // With default Linux settings, the core dump file is called just "core", so check for
1646 : // that too.
1647 0 : pub fn check_for_core_dumps(&self) -> Result<()> {
1648 0 : let core_dump_dir = match std::env::consts::OS {
1649 0 : "macos" => Path::new("/cores/"),
1650 0 : _ => Path::new(&self.params.pgdata),
1651 : };
1652 :
1653 : // Collect core dump paths if any
1654 0 : info!("checking for core dumps in {}", core_dump_dir.display());
1655 0 : let files = fs::read_dir(core_dump_dir)?;
1656 0 : let cores = files.filter_map(|entry| {
1657 0 : let entry = entry.ok()?;
1658 :
1659 0 : let is_core_dump = match entry.file_name().to_str()? {
1660 0 : n if n.starts_with("core.") => true,
1661 0 : "core" => true,
1662 0 : _ => false,
1663 : };
1664 0 : if is_core_dump {
1665 0 : Some(entry.path())
1666 : } else {
1667 0 : None
1668 : }
1669 0 : });
1670 :
1671 : // Print backtrace for each core dump
1672 0 : for core_path in cores {
1673 0 : warn!(
1674 0 : "core dump found: {}, collecting backtrace",
1675 0 : core_path.display()
1676 : );
1677 :
1678 : // Try first with gdb
1679 0 : let backtrace = Command::new("gdb")
1680 0 : .args(["--batch", "-q", "-ex", "bt", &self.params.pgbin])
1681 0 : .arg(&core_path)
1682 0 : .output();
1683 :
1684 : // Try lldb if no gdb is found -- that is handy for local testing on macOS
1685 0 : let backtrace = match backtrace {
1686 0 : Err(ref e) if e.kind() == std::io::ErrorKind::NotFound => {
1687 0 : warn!("cannot find gdb, trying lldb");
1688 0 : Command::new("lldb")
1689 0 : .arg("-c")
1690 0 : .arg(&core_path)
1691 0 : .args(["--batch", "-o", "bt all", "-o", "quit"])
1692 0 : .output()
1693 : }
1694 0 : _ => backtrace,
1695 0 : }?;
1696 :
1697 0 : warn!(
1698 0 : "core dump backtrace: {}",
1699 0 : String::from_utf8_lossy(&backtrace.stdout)
1700 : );
1701 0 : warn!(
1702 0 : "debugger stderr: {}",
1703 0 : String::from_utf8_lossy(&backtrace.stderr)
1704 : );
1705 : }
1706 :
1707 0 : Ok(())
1708 0 : }
1709 :
1710 : /// Select `pg_stat_statements` data and return it as a stringified JSON
1711 0 : pub async fn collect_insights(&self) -> String {
1712 0 : let mut result_rows: Vec<String> = Vec::new();
1713 0 : let conf = self.get_tokio_conn_conf(Some("compute_ctl:collect_insights"));
1714 0 : let connect_result = conf.connect(NoTls).await;
1715 0 : let (client, connection) = connect_result.unwrap();
1716 0 : tokio::spawn(async move {
1717 0 : if let Err(e) = connection.await {
1718 0 : eprintln!("connection error: {}", e);
1719 0 : }
1720 0 : });
1721 0 : let result = client
1722 0 : .simple_query(
1723 0 : "SELECT
1724 0 : row_to_json(pg_stat_statements)
1725 0 : FROM
1726 0 : pg_stat_statements
1727 0 : WHERE
1728 0 : userid != 'cloud_admin'::regrole::oid
1729 0 : ORDER BY
1730 0 : (mean_exec_time + mean_plan_time) DESC
1731 0 : LIMIT 100",
1732 0 : )
1733 0 : .await;
1734 :
1735 0 : if let Ok(raw_rows) = result {
1736 0 : for message in raw_rows.iter() {
1737 0 : if let postgres::SimpleQueryMessage::Row(row) = message {
1738 0 : if let Some(json) = row.get(0) {
1739 0 : result_rows.push(json.to_string());
1740 0 : }
1741 0 : }
1742 : }
1743 :
1744 0 : format!("{{\"pg_stat_statements\": [{}]}}", result_rows.join(","))
1745 : } else {
1746 0 : "{{\"pg_stat_statements\": []}}".to_string()
1747 : }
1748 0 : }
1749 :
1750 : // download an archive, unzip and place files in correct locations
1751 0 : pub async fn download_extension(
1752 0 : &self,
1753 0 : real_ext_name: String,
1754 0 : ext_path: RemotePath,
1755 0 : ) -> Result<u64, DownloadError> {
1756 0 : let ext_remote_storage =
1757 0 : self.params
1758 0 : .ext_remote_storage
1759 0 : .as_ref()
1760 0 : .ok_or(DownloadError::BadInput(anyhow::anyhow!(
1761 0 : "Remote extensions storage is not configured",
1762 0 : )))?;
1763 :
1764 0 : let ext_archive_name = ext_path.object_name().expect("bad path");
1765 0 :
1766 0 : let mut first_try = false;
1767 0 : if !self
1768 0 : .ext_download_progress
1769 0 : .read()
1770 0 : .expect("lock err")
1771 0 : .contains_key(ext_archive_name)
1772 0 : {
1773 0 : self.ext_download_progress
1774 0 : .write()
1775 0 : .expect("lock err")
1776 0 : .insert(ext_archive_name.to_string(), (Utc::now(), false));
1777 0 : first_try = true;
1778 0 : }
1779 0 : let (download_start, download_completed) =
1780 0 : self.ext_download_progress.read().expect("lock err")[ext_archive_name];
1781 0 : let start_time_delta = Utc::now()
1782 0 : .signed_duration_since(download_start)
1783 0 : .to_std()
1784 0 : .unwrap()
1785 0 : .as_millis() as u64;
1786 :
1787 : // how long to wait for extension download if it was started by another process
1788 : const HANG_TIMEOUT: u64 = 3000; // milliseconds
1789 :
1790 0 : if download_completed {
1791 0 : info!("extension already downloaded, skipping re-download");
1792 0 : return Ok(0);
1793 0 : } else if start_time_delta < HANG_TIMEOUT && !first_try {
1794 0 : info!(
1795 0 : "download {ext_archive_name} already started by another process, hanging untill completion or timeout"
1796 : );
1797 0 : let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(500));
1798 : loop {
1799 0 : info!("waiting for download");
1800 0 : interval.tick().await;
1801 0 : let (_, download_completed_now) =
1802 0 : self.ext_download_progress.read().expect("lock")[ext_archive_name];
1803 0 : if download_completed_now {
1804 0 : info!("download finished by whoever else downloaded it");
1805 0 : return Ok(0);
1806 0 : }
1807 : }
1808 : // NOTE: the above loop will get terminated
1809 : // based on the timeout of the download function
1810 0 : }
1811 0 :
1812 0 : // if extension hasn't been downloaded before or the previous
1813 0 : // attempt to download was at least HANG_TIMEOUT ms ago
1814 0 : // then we try to download it here
1815 0 : info!("downloading new extension {ext_archive_name}");
1816 :
1817 0 : let download_size = extension_server::download_extension(
1818 0 : &real_ext_name,
1819 0 : &ext_path,
1820 0 : ext_remote_storage,
1821 0 : &self.params.pgbin,
1822 0 : )
1823 0 : .await
1824 0 : .map_err(DownloadError::Other);
1825 0 :
1826 0 : if download_size.is_ok() {
1827 0 : self.ext_download_progress
1828 0 : .write()
1829 0 : .expect("bad lock")
1830 0 : .insert(ext_archive_name.to_string(), (download_start, true));
1831 0 : }
1832 :
1833 0 : download_size
1834 0 : }
1835 :
1836 0 : pub async fn set_role_grants(
1837 0 : &self,
1838 0 : db_name: &PgIdent,
1839 0 : schema_name: &PgIdent,
1840 0 : privileges: &[Privilege],
1841 0 : role_name: &PgIdent,
1842 0 : ) -> Result<()> {
1843 : use tokio_postgres::NoTls;
1844 :
1845 0 : let mut conf = self.get_tokio_conn_conf(Some("compute_ctl:set_role_grants"));
1846 0 : conf.dbname(db_name);
1847 :
1848 0 : let (db_client, conn) = conf
1849 0 : .connect(NoTls)
1850 0 : .await
1851 0 : .context("Failed to connect to the database")?;
1852 0 : tokio::spawn(conn);
1853 0 :
1854 0 : // TODO: support other types of grants apart from schemas?
1855 0 : let query = format!(
1856 0 : "GRANT {} ON SCHEMA {} TO {}",
1857 0 : privileges
1858 0 : .iter()
1859 0 : // should not be quoted as it's part of the command.
1860 0 : // is already sanitized so it's ok
1861 0 : .map(|p| p.as_str())
1862 0 : .collect::<Vec<&'static str>>()
1863 0 : .join(", "),
1864 0 : // quote the schema and role name as identifiers to sanitize them.
1865 0 : schema_name.pg_quote(),
1866 0 : role_name.pg_quote(),
1867 0 : );
1868 0 : db_client
1869 0 : .simple_query(&query)
1870 0 : .await
1871 0 : .with_context(|| format!("Failed to execute query: {}", query))?;
1872 :
1873 0 : Ok(())
1874 0 : }
1875 :
1876 0 : pub async fn install_extension(
1877 0 : &self,
1878 0 : ext_name: &PgIdent,
1879 0 : db_name: &PgIdent,
1880 0 : ext_version: ExtVersion,
1881 0 : ) -> Result<ExtVersion> {
1882 : use tokio_postgres::NoTls;
1883 :
1884 0 : let mut conf = self.get_tokio_conn_conf(Some("compute_ctl:install_extension"));
1885 0 : conf.dbname(db_name);
1886 :
1887 0 : let (db_client, conn) = conf
1888 0 : .connect(NoTls)
1889 0 : .await
1890 0 : .context("Failed to connect to the database")?;
1891 0 : tokio::spawn(conn);
1892 0 :
1893 0 : let version_query = "SELECT extversion FROM pg_extension WHERE extname = $1";
1894 0 : let version: Option<ExtVersion> = db_client
1895 0 : .query_opt(version_query, &[&ext_name])
1896 0 : .await
1897 0 : .with_context(|| format!("Failed to execute query: {}", version_query))?
1898 0 : .map(|row| row.get(0));
1899 0 :
1900 0 : // sanitize the inputs as postgres idents.
1901 0 : let ext_name: String = ext_name.pg_quote();
1902 0 : let quoted_version: String = ext_version.pg_quote();
1903 :
1904 0 : if let Some(installed_version) = version {
1905 0 : if installed_version == ext_version {
1906 0 : return Ok(installed_version);
1907 0 : }
1908 0 : let query = format!("ALTER EXTENSION {ext_name} UPDATE TO {quoted_version}");
1909 0 : db_client
1910 0 : .simple_query(&query)
1911 0 : .await
1912 0 : .with_context(|| format!("Failed to execute query: {}", query))?;
1913 : } else {
1914 0 : let query =
1915 0 : format!("CREATE EXTENSION IF NOT EXISTS {ext_name} WITH VERSION {quoted_version}");
1916 0 : db_client
1917 0 : .simple_query(&query)
1918 0 : .await
1919 0 : .with_context(|| format!("Failed to execute query: {}", query))?;
1920 : }
1921 :
1922 0 : Ok(ext_version)
1923 0 : }
1924 :
1925 0 : pub async fn prepare_preload_libraries(
1926 0 : &self,
1927 0 : spec: &ComputeSpec,
1928 0 : ) -> Result<RemoteExtensionMetrics> {
1929 0 : if self.params.ext_remote_storage.is_none() {
1930 0 : return Ok(RemoteExtensionMetrics {
1931 0 : num_ext_downloaded: 0,
1932 0 : largest_ext_size: 0,
1933 0 : total_ext_download_size: 0,
1934 0 : });
1935 0 : }
1936 0 : let remote_extensions = spec
1937 0 : .remote_extensions
1938 0 : .as_ref()
1939 0 : .ok_or(anyhow::anyhow!("Remote extensions are not configured"))?;
1940 :
1941 0 : info!("parse shared_preload_libraries from spec.cluster.settings");
1942 0 : let mut libs_vec = Vec::new();
1943 0 : if let Some(libs) = spec.cluster.settings.find("shared_preload_libraries") {
1944 0 : libs_vec = libs
1945 0 : .split(&[',', '\'', ' '])
1946 0 : .filter(|s| *s != "neon" && !s.is_empty())
1947 0 : .map(str::to_string)
1948 0 : .collect();
1949 0 : }
1950 0 : info!("parse shared_preload_libraries from provided postgresql.conf");
1951 :
1952 : // that is used in neon_local and python tests
1953 0 : if let Some(conf) = &spec.cluster.postgresql_conf {
1954 0 : let conf_lines = conf.split('\n').collect::<Vec<&str>>();
1955 0 : let mut shared_preload_libraries_line = "";
1956 0 : for line in conf_lines {
1957 0 : if line.starts_with("shared_preload_libraries") {
1958 0 : shared_preload_libraries_line = line;
1959 0 : }
1960 : }
1961 0 : let mut preload_libs_vec = Vec::new();
1962 0 : if let Some(libs) = shared_preload_libraries_line.split("='").nth(1) {
1963 0 : preload_libs_vec = libs
1964 0 : .split(&[',', '\'', ' '])
1965 0 : .filter(|s| *s != "neon" && !s.is_empty())
1966 0 : .map(str::to_string)
1967 0 : .collect();
1968 0 : }
1969 0 : libs_vec.extend(preload_libs_vec);
1970 0 : }
1971 :
1972 : // Don't try to download libraries that are not in the index.
1973 : // Assume that they are already present locally.
1974 0 : libs_vec.retain(|lib| remote_extensions.library_index.contains_key(lib));
1975 0 :
1976 0 : info!("Downloading to shared preload libraries: {:?}", &libs_vec);
1977 :
1978 0 : let mut download_tasks = Vec::new();
1979 0 : for library in &libs_vec {
1980 0 : let (ext_name, ext_path) = remote_extensions.get_ext(
1981 0 : library,
1982 0 : true,
1983 0 : &self.params.build_tag,
1984 0 : &self.params.pgversion,
1985 0 : )?;
1986 0 : download_tasks.push(self.download_extension(ext_name, ext_path));
1987 : }
1988 0 : let results = join_all(download_tasks).await;
1989 :
1990 0 : let mut remote_ext_metrics = RemoteExtensionMetrics {
1991 0 : num_ext_downloaded: 0,
1992 0 : largest_ext_size: 0,
1993 0 : total_ext_download_size: 0,
1994 0 : };
1995 0 : for result in results {
1996 0 : let download_size = match result {
1997 0 : Ok(res) => {
1998 0 : remote_ext_metrics.num_ext_downloaded += 1;
1999 0 : res
2000 : }
2001 0 : Err(err) => {
2002 0 : // if we failed to download an extension, we don't want to fail the whole
2003 0 : // process, but we do want to log the error
2004 0 : error!("Failed to download extension: {}", err);
2005 0 : 0
2006 : }
2007 : };
2008 :
2009 0 : remote_ext_metrics.largest_ext_size =
2010 0 : std::cmp::max(remote_ext_metrics.largest_ext_size, download_size);
2011 0 : remote_ext_metrics.total_ext_download_size += download_size;
2012 : }
2013 0 : Ok(remote_ext_metrics)
2014 0 : }
2015 :
2016 : /// Waits until current thread receives a state changed notification and
2017 : /// the pageserver connection strings has changed.
2018 : ///
2019 : /// The operation will time out after a specified duration.
2020 0 : pub fn wait_timeout_while_pageserver_connstr_unchanged(&self, duration: Duration) {
2021 0 : let state = self.state.lock().unwrap();
2022 0 : let old_pageserver_connstr = state
2023 0 : .pspec
2024 0 : .as_ref()
2025 0 : .expect("spec must be set")
2026 0 : .pageserver_connstr
2027 0 : .clone();
2028 0 : let mut unchanged = true;
2029 0 : let _ = self
2030 0 : .state_changed
2031 0 : .wait_timeout_while(state, duration, |s| {
2032 0 : let pageserver_connstr = &s
2033 0 : .pspec
2034 0 : .as_ref()
2035 0 : .expect("spec must be set")
2036 0 : .pageserver_connstr;
2037 0 : unchanged = pageserver_connstr == &old_pageserver_connstr;
2038 0 : unchanged
2039 0 : })
2040 0 : .unwrap();
2041 0 : if !unchanged {
2042 0 : info!("Pageserver config changed");
2043 0 : }
2044 0 : }
2045 : }
2046 :
2047 0 : pub fn forward_termination_signal() {
2048 0 : let ss_pid = SYNC_SAFEKEEPERS_PID.load(Ordering::SeqCst);
2049 0 : if ss_pid != 0 {
2050 0 : let ss_pid = nix::unistd::Pid::from_raw(ss_pid as i32);
2051 0 : kill(ss_pid, Signal::SIGTERM).ok();
2052 0 : }
2053 0 : let pg_pid = PG_PID.load(Ordering::SeqCst);
2054 0 : if pg_pid != 0 {
2055 0 : let pg_pid = nix::unistd::Pid::from_raw(pg_pid as i32);
2056 0 : // Use 'fast' shutdown (SIGINT) because it also creates a shutdown checkpoint, which is important for
2057 0 : // ROs to get a list of running xacts faster instead of going through the CLOG.
2058 0 : // See https://www.postgresql.org/docs/current/server-shutdown.html for the list of modes and signals.
2059 0 : kill(pg_pid, Signal::SIGINT).ok();
2060 0 : }
2061 0 : }
2062 :
2063 : // helper trait to call JoinSet::spawn_blocking(f), but propagates the current
2064 : // tracing span to the thread.
2065 : trait JoinSetExt<T> {
2066 : fn spawn_blocking_child<F>(&mut self, f: F) -> tokio::task::AbortHandle
2067 : where
2068 : F: FnOnce() -> T + Send + 'static,
2069 : T: Send;
2070 : }
2071 :
2072 : impl<T: 'static> JoinSetExt<T> for tokio::task::JoinSet<T> {
2073 0 : fn spawn_blocking_child<F>(&mut self, f: F) -> tokio::task::AbortHandle
2074 0 : where
2075 0 : F: FnOnce() -> T + Send + 'static,
2076 0 : T: Send,
2077 0 : {
2078 0 : let sp = tracing::Span::current();
2079 0 : self.spawn_blocking(move || {
2080 0 : let _e = sp.enter();
2081 0 : f()
2082 0 : })
2083 0 : }
2084 : }
|