Line data Source code
1 : use anyhow::{Context, Result};
2 : use chrono::{DateTime, Utc};
3 : use compute_api::privilege::Privilege;
4 : use compute_api::responses::{
5 : ComputeConfig, ComputeCtlConfig, ComputeMetrics, ComputeStatus, LfcOffloadState,
6 : LfcPrewarmState, TlsConfig,
7 : };
8 : use compute_api::spec::{
9 : ComputeAudit, ComputeFeature, ComputeMode, ComputeSpec, ExtVersion, PgIdent,
10 : };
11 : use futures::StreamExt;
12 : use futures::future::join_all;
13 : use futures::stream::FuturesUnordered;
14 : use itertools::Itertools;
15 : use nix::sys::signal::{Signal, kill};
16 : use nix::unistd::Pid;
17 : use once_cell::sync::Lazy;
18 : use postgres;
19 : use postgres::NoTls;
20 : use postgres::error::SqlState;
21 : use remote_storage::{DownloadError, RemotePath};
22 : use std::collections::{HashMap, HashSet};
23 : use std::net::SocketAddr;
24 : use std::os::unix::fs::{PermissionsExt, symlink};
25 : use std::path::Path;
26 : use std::process::{Command, Stdio};
27 : use std::str::FromStr;
28 : use std::sync::atomic::{AtomicU32, Ordering};
29 : use std::sync::{Arc, Condvar, Mutex, RwLock};
30 : use std::time::{Duration, Instant};
31 : use std::{env, fs};
32 : use tokio::spawn;
33 : use tracing::{Instrument, debug, error, info, instrument, warn};
34 : use url::Url;
35 : use utils::id::{TenantId, TimelineId};
36 : use utils::lsn::Lsn;
37 : use utils::measured_stream::MeasuredReader;
38 :
39 : use crate::configurator::launch_configurator;
40 : use crate::disk_quota::set_disk_quota;
41 : use crate::installed_extensions::get_installed_extensions;
42 : use crate::logger::startup_context_from_env;
43 : use crate::lsn_lease::launch_lsn_lease_bg_task_for_static;
44 : use crate::metrics::COMPUTE_CTL_UP;
45 : use crate::monitor::launch_monitor;
46 : use crate::pg_helpers::*;
47 : use crate::rsyslog::{
48 : PostgresLogsRsyslogConfig, configure_audit_rsyslog, configure_postgres_logs_export,
49 : launch_pgaudit_gc,
50 : };
51 : use crate::spec::*;
52 : use crate::swap::resize_swap;
53 : use crate::sync_sk::{check_if_synced, ping_safekeeper};
54 : use crate::tls::watch_cert_for_changes;
55 : use crate::{config, extension_server, local_proxy};
56 :
57 : pub static SYNC_SAFEKEEPERS_PID: AtomicU32 = AtomicU32::new(0);
58 : pub static PG_PID: AtomicU32 = AtomicU32::new(0);
59 : // This is an arbitrary build tag. Fine as a default / for testing purposes
60 : // in-case of not-set environment var
61 : const BUILD_TAG_DEFAULT: &str = "latest";
62 : /// Build tag/version of the compute node binaries/image. It's tricky and ugly
63 : /// to pass it everywhere as a part of `ComputeNodeParams`, so we use a
64 : /// global static variable.
65 0 : pub static BUILD_TAG: Lazy<String> = Lazy::new(|| {
66 0 : option_env!("BUILD_TAG")
67 0 : .unwrap_or(BUILD_TAG_DEFAULT)
68 0 : .to_string()
69 0 : });
70 :
71 : /// Static configuration params that don't change after startup. These mostly
72 : /// come from the CLI args, or are derived from them.
73 : pub struct ComputeNodeParams {
74 : /// The ID of the compute
75 : pub compute_id: String,
76 : // Url type maintains proper escaping
77 : pub connstr: url::Url,
78 :
79 : pub resize_swap_on_bind: bool,
80 : pub set_disk_quota_for_fs: Option<String>,
81 :
82 : // VM monitor parameters
83 : #[cfg(target_os = "linux")]
84 : pub filecache_connstr: String,
85 : #[cfg(target_os = "linux")]
86 : pub cgroup: String,
87 : #[cfg(target_os = "linux")]
88 : pub vm_monitor_addr: String,
89 :
90 : pub pgdata: String,
91 : pub pgbin: String,
92 : pub pgversion: String,
93 :
94 : /// The port that the compute's external HTTP server listens on
95 : pub external_http_port: u16,
96 : /// The port that the compute's internal HTTP server listens on
97 : pub internal_http_port: u16,
98 :
99 : /// the address of extension storage proxy gateway
100 : pub remote_ext_base_url: Option<Url>,
101 :
102 : /// Interval for installed extensions collection
103 : pub installed_extensions_collection_interval: u64,
104 : }
105 :
106 : /// Compute node info shared across several `compute_ctl` threads.
107 : pub struct ComputeNode {
108 : pub params: ComputeNodeParams,
109 :
110 : // We connect to Postgres from many different places, so build configs once
111 : // and reuse them where needed. These are derived from 'params.connstr'
112 : pub conn_conf: postgres::config::Config,
113 : pub tokio_conn_conf: tokio_postgres::config::Config,
114 :
115 : /// Volatile part of the `ComputeNode`, which should be used under `Mutex`.
116 : /// To allow HTTP API server to serving status requests, while configuration
117 : /// is in progress, lock should be held only for short periods of time to do
118 : /// read/write, not the whole configuration process.
119 : pub state: Mutex<ComputeState>,
120 : /// `Condvar` to allow notifying waiters about state changes.
121 : pub state_changed: Condvar,
122 :
123 : // key: ext_archive_name, value: started download time, download_completed?
124 : pub ext_download_progress: RwLock<HashMap<String, (DateTime<Utc>, bool)>>,
125 : pub compute_ctl_config: ComputeCtlConfig,
126 : }
127 :
128 : // store some metrics about download size that might impact startup time
129 : #[derive(Clone, Debug)]
130 : pub struct RemoteExtensionMetrics {
131 : num_ext_downloaded: u64,
132 : largest_ext_size: u64,
133 : total_ext_download_size: u64,
134 : }
135 :
136 : #[derive(Clone, Debug)]
137 : pub struct ComputeState {
138 : pub start_time: DateTime<Utc>,
139 : pub status: ComputeStatus,
140 : /// Timestamp of the last Postgres activity. It could be `None` if
141 : /// compute wasn't used since start.
142 : pub last_active: Option<DateTime<Utc>>,
143 : pub error: Option<String>,
144 :
145 : /// Compute spec. This can be received from the CLI or - more likely -
146 : /// passed by the control plane with a /configure HTTP request.
147 : pub pspec: Option<ParsedSpec>,
148 :
149 : /// If the spec is passed by a /configure request, 'startup_span' is the
150 : /// /configure request's tracing span. The main thread enters it when it
151 : /// processes the compute startup, so that the compute startup is considered
152 : /// to be part of the /configure request for tracing purposes.
153 : ///
154 : /// If the request handling thread/task called startup_compute() directly,
155 : /// it would automatically be a child of the request handling span, and we
156 : /// wouldn't need this. But because we use the main thread to perform the
157 : /// startup, and the /configure task just waits for it to finish, we need to
158 : /// set up the span relationship ourselves.
159 : pub startup_span: Option<tracing::span::Span>,
160 :
161 : pub lfc_prewarm_state: LfcPrewarmState,
162 : pub lfc_offload_state: LfcOffloadState,
163 :
164 : pub metrics: ComputeMetrics,
165 : }
166 :
167 : impl ComputeState {
168 0 : pub fn new() -> Self {
169 0 : Self {
170 0 : start_time: Utc::now(),
171 0 : status: ComputeStatus::Empty,
172 0 : last_active: None,
173 0 : error: None,
174 0 : pspec: None,
175 0 : startup_span: None,
176 0 : metrics: ComputeMetrics::default(),
177 0 : lfc_prewarm_state: LfcPrewarmState::default(),
178 0 : lfc_offload_state: LfcOffloadState::default(),
179 0 : }
180 0 : }
181 :
182 0 : pub fn set_status(&mut self, status: ComputeStatus, state_changed: &Condvar) {
183 0 : let prev = self.status;
184 0 : info!("Changing compute status from {} to {}", prev, status);
185 0 : self.status = status;
186 0 : state_changed.notify_all();
187 0 :
188 0 : COMPUTE_CTL_UP.reset();
189 0 : COMPUTE_CTL_UP
190 0 : .with_label_values(&[&BUILD_TAG, status.to_string().as_str()])
191 0 : .set(1);
192 0 : }
193 :
194 0 : pub fn set_failed_status(&mut self, err: anyhow::Error, state_changed: &Condvar) {
195 0 : self.error = Some(format!("{err:?}"));
196 0 : self.set_status(ComputeStatus::Failed, state_changed);
197 0 : }
198 : }
199 :
200 : impl Default for ComputeState {
201 0 : fn default() -> Self {
202 0 : Self::new()
203 0 : }
204 : }
205 :
206 : #[derive(Clone, Debug)]
207 : pub struct ParsedSpec {
208 : pub spec: ComputeSpec,
209 : pub tenant_id: TenantId,
210 : pub timeline_id: TimelineId,
211 : pub pageserver_connstr: String,
212 : pub safekeeper_connstrings: Vec<String>,
213 : pub storage_auth_token: Option<String>,
214 : pub endpoint_storage_addr: Option<SocketAddr>,
215 : pub endpoint_storage_token: Option<String>,
216 : }
217 :
218 : impl TryFrom<ComputeSpec> for ParsedSpec {
219 : type Error = String;
220 0 : fn try_from(spec: ComputeSpec) -> Result<Self, String> {
221 : // Extract the options from the spec file that are needed to connect to
222 : // the storage system.
223 : //
224 : // For backwards-compatibility, the top-level fields in the spec file
225 : // may be empty. In that case, we need to dig them from the GUCs in the
226 : // cluster.settings field.
227 0 : let pageserver_connstr = spec
228 0 : .pageserver_connstring
229 0 : .clone()
230 0 : .or_else(|| spec.cluster.settings.find("neon.pageserver_connstring"))
231 0 : .ok_or("pageserver connstr should be provided")?;
232 0 : let safekeeper_connstrings = if spec.safekeeper_connstrings.is_empty() {
233 0 : if matches!(spec.mode, ComputeMode::Primary) {
234 0 : spec.cluster
235 0 : .settings
236 0 : .find("neon.safekeepers")
237 0 : .ok_or("safekeeper connstrings should be provided")?
238 0 : .split(',')
239 0 : .map(|str| str.to_string())
240 0 : .collect()
241 : } else {
242 0 : vec![]
243 : }
244 : } else {
245 0 : spec.safekeeper_connstrings.clone()
246 : };
247 0 : let storage_auth_token = spec.storage_auth_token.clone();
248 0 : let tenant_id: TenantId = if let Some(tenant_id) = spec.tenant_id {
249 0 : tenant_id
250 : } else {
251 0 : spec.cluster
252 0 : .settings
253 0 : .find("neon.tenant_id")
254 0 : .ok_or("tenant id should be provided")
255 0 : .map(|s| TenantId::from_str(&s))?
256 0 : .or(Err("invalid tenant id"))?
257 : };
258 0 : let timeline_id: TimelineId = if let Some(timeline_id) = spec.timeline_id {
259 0 : timeline_id
260 : } else {
261 0 : spec.cluster
262 0 : .settings
263 0 : .find("neon.timeline_id")
264 0 : .ok_or("timeline id should be provided")
265 0 : .map(|s| TimelineId::from_str(&s))?
266 0 : .or(Err("invalid timeline id"))?
267 : };
268 :
269 0 : let endpoint_storage_addr: Option<SocketAddr> = spec
270 0 : .endpoint_storage_addr
271 0 : .clone()
272 0 : .or_else(|| spec.cluster.settings.find("neon.endpoint_storage_addr"))
273 0 : .unwrap_or_default()
274 0 : .parse()
275 0 : .ok();
276 0 : let endpoint_storage_token = spec
277 0 : .endpoint_storage_token
278 0 : .clone()
279 0 : .or_else(|| spec.cluster.settings.find("neon.endpoint_storage_token"));
280 0 :
281 0 : Ok(ParsedSpec {
282 0 : spec,
283 0 : pageserver_connstr,
284 0 : safekeeper_connstrings,
285 0 : storage_auth_token,
286 0 : tenant_id,
287 0 : timeline_id,
288 0 : endpoint_storage_addr,
289 0 : endpoint_storage_token,
290 0 : })
291 0 : }
292 : }
293 :
294 : /// If we are a VM, returns a [`Command`] that will run in the `neon-postgres`
295 : /// cgroup. Otherwise returns the default `Command::new(cmd)`
296 : ///
297 : /// This function should be used to start postgres, as it will start it in the
298 : /// neon-postgres cgroup if we are a VM. This allows autoscaling to control
299 : /// postgres' resource usage. The cgroup will exist in VMs because vm-builder
300 : /// creates it during the sysinit phase of its inittab.
301 0 : fn maybe_cgexec(cmd: &str) -> Command {
302 0 : // The cplane sets this env var for autoscaling computes.
303 0 : // use `var_os` so we don't have to worry about the variable being valid
304 0 : // unicode. Should never be an concern . . . but just in case
305 0 : if env::var_os("AUTOSCALING").is_some() {
306 0 : let mut command = Command::new("cgexec");
307 0 : command.args(["-g", "memory:neon-postgres"]);
308 0 : command.arg(cmd);
309 0 : command
310 : } else {
311 0 : Command::new(cmd)
312 : }
313 0 : }
314 :
315 : struct PostgresHandle {
316 : postgres: std::process::Child,
317 : log_collector: tokio::task::JoinHandle<Result<()>>,
318 : }
319 :
320 : impl PostgresHandle {
321 : /// Return PID of the postgres (postmaster) process
322 0 : fn pid(&self) -> Pid {
323 0 : Pid::from_raw(self.postgres.id() as i32)
324 0 : }
325 : }
326 :
327 : struct StartVmMonitorResult {
328 : #[cfg(target_os = "linux")]
329 : token: tokio_util::sync::CancellationToken,
330 : #[cfg(target_os = "linux")]
331 : vm_monitor: Option<tokio::task::JoinHandle<Result<()>>>,
332 : }
333 :
334 : impl ComputeNode {
335 0 : pub fn new(params: ComputeNodeParams, config: ComputeConfig) -> Result<Self> {
336 0 : let connstr = params.connstr.as_str();
337 0 : let mut conn_conf = postgres::config::Config::from_str(connstr)
338 0 : .context("cannot build postgres config from connstr")?;
339 0 : let mut tokio_conn_conf = tokio_postgres::config::Config::from_str(connstr)
340 0 : .context("cannot build tokio postgres config from connstr")?;
341 :
342 : // Users can set some configuration parameters per database with
343 : // ALTER DATABASE ... SET ...
344 : //
345 : // There are at least these parameters:
346 : //
347 : // - role=some_other_role
348 : // - default_transaction_read_only=on
349 : // - statement_timeout=1, i.e., 1ms, which will cause most of the queries to fail
350 : // - search_path=non_public_schema, this should be actually safe because
351 : // we don't call any functions in user databases, but better to always reset
352 : // it to public.
353 : //
354 : // that can affect `compute_ctl` and prevent it from properly configuring the database schema.
355 : // Unset them via connection string options before connecting to the database.
356 : // N.B. keep it in sync with `ZENITH_OPTIONS` in `get_maintenance_client()`.
357 : //
358 : // TODO(ololobus): we currently pass `-c default_transaction_read_only=off` from control plane
359 : // as well. After rolling out this code, we can remove this parameter from control plane.
360 : // In the meantime, double-passing is fine, the last value is applied.
361 : // See: <https://github.com/neondatabase/cloud/blob/133dd8c4dbbba40edfbad475bf6a45073ca63faf/goapp/controlplane/internal/pkg/compute/provisioner/provisioner_common.go#L70>
362 : const EXTRA_OPTIONS: &str = "-c role=cloud_admin -c default_transaction_read_only=off -c search_path=public -c statement_timeout=0";
363 0 : let options = match conn_conf.get_options() {
364 0 : Some(options) => format!("{} {}", options, EXTRA_OPTIONS),
365 0 : None => EXTRA_OPTIONS.to_string(),
366 : };
367 0 : conn_conf.options(&options);
368 0 : tokio_conn_conf.options(&options);
369 0 :
370 0 : let mut new_state = ComputeState::new();
371 0 : if let Some(spec) = config.spec {
372 0 : let pspec = ParsedSpec::try_from(spec).map_err(|msg| anyhow::anyhow!(msg))?;
373 0 : new_state.pspec = Some(pspec);
374 0 : }
375 :
376 0 : Ok(ComputeNode {
377 0 : params,
378 0 : conn_conf,
379 0 : tokio_conn_conf,
380 0 : state: Mutex::new(new_state),
381 0 : state_changed: Condvar::new(),
382 0 : ext_download_progress: RwLock::new(HashMap::new()),
383 0 : compute_ctl_config: config.compute_ctl_config,
384 0 : })
385 0 : }
386 :
387 : /// Top-level control flow of compute_ctl. Returns a process exit code we should
388 : /// exit with.
389 0 : pub fn run(self) -> Result<Option<i32>> {
390 0 : let this = Arc::new(self);
391 0 :
392 0 : let cli_spec = this.state.lock().unwrap().pspec.clone();
393 0 :
394 0 : // If this is a pooled VM, prewarm before starting HTTP server and becoming
395 0 : // available for binding. Prewarming helps Postgres start quicker later,
396 0 : // because QEMU will already have its memory allocated from the host, and
397 0 : // the necessary binaries will already be cached.
398 0 : if cli_spec.is_none() {
399 0 : this.prewarm_postgres_vm_memory()?;
400 0 : }
401 :
402 : // Set the up metric with Empty status before starting the HTTP server.
403 : // That way on the first metric scrape, an external observer will see us
404 : // as 'up' and 'empty' (unless the compute was started with a spec or
405 : // already configured by control plane).
406 0 : COMPUTE_CTL_UP
407 0 : .with_label_values(&[&BUILD_TAG, ComputeStatus::Empty.to_string().as_str()])
408 0 : .set(1);
409 0 :
410 0 : // Launch the external HTTP server first, so that we can serve control plane
411 0 : // requests while configuration is still in progress.
412 0 : crate::http::server::Server::External {
413 0 : port: this.params.external_http_port,
414 0 : config: this.compute_ctl_config.clone(),
415 0 : compute_id: this.params.compute_id.clone(),
416 0 : }
417 0 : .launch(&this);
418 0 :
419 0 : // The internal HTTP server could be launched later, but there isn't much
420 0 : // sense in waiting.
421 0 : crate::http::server::Server::Internal {
422 0 : port: this.params.internal_http_port,
423 0 : }
424 0 : .launch(&this);
425 :
426 : // If we got a spec from the CLI already, use that. Otherwise wait for the
427 : // control plane to pass it to us with a /configure HTTP request
428 0 : let pspec = if let Some(cli_spec) = cli_spec {
429 0 : cli_spec
430 : } else {
431 0 : this.wait_spec()?
432 : };
433 :
434 0 : launch_lsn_lease_bg_task_for_static(&this);
435 0 :
436 0 : // We have a spec, start the compute
437 0 : let mut delay_exit = false;
438 0 : let mut vm_monitor = None;
439 0 : let mut pg_process: Option<PostgresHandle> = None;
440 0 :
441 0 : match this.start_compute(&mut pg_process) {
442 0 : Ok(()) => {
443 0 : // Success! Launch remaining services (just vm-monitor currently)
444 0 : vm_monitor =
445 0 : Some(this.start_vm_monitor(pspec.spec.disable_lfc_resizing.unwrap_or(false)));
446 0 : }
447 0 : Err(err) => {
448 0 : // Something went wrong with the startup. Log it and expose the error to
449 0 : // HTTP status requests.
450 0 : error!("could not start the compute node: {:#}", err);
451 0 : this.set_failed_status(err);
452 0 : delay_exit = true;
453 :
454 : // If the error happened after starting PostgreSQL, kill it
455 0 : if let Some(ref pg_process) = pg_process {
456 0 : kill(pg_process.pid(), Signal::SIGQUIT).ok();
457 0 : }
458 : }
459 : }
460 :
461 : // If startup was successful, or it failed in the late stages,
462 : // PostgreSQL is now running. Wait until it exits.
463 0 : let exit_code = if let Some(pg_handle) = pg_process {
464 0 : let exit_status = this.wait_postgres(pg_handle);
465 0 : info!("Postgres exited with code {}, shutting down", exit_status);
466 0 : exit_status.code()
467 : } else {
468 0 : None
469 : };
470 :
471 : // Terminate the vm_monitor so it releases the file watcher on
472 : // /sys/fs/cgroup/neon-postgres.
473 : // Note: the vm-monitor only runs on linux because it requires cgroups.
474 0 : if let Some(vm_monitor) = vm_monitor {
475 : cfg_if::cfg_if! {
476 : if #[cfg(target_os = "linux")] {
477 : // Kills all threads spawned by the monitor
478 0 : vm_monitor.token.cancel();
479 0 : if let Some(handle) = vm_monitor.vm_monitor {
480 0 : // Kills the actual task running the monitor
481 0 : handle.abort();
482 0 : }
483 : } else {
484 : _ = vm_monitor; // appease unused lint on macOS
485 : }
486 : }
487 0 : }
488 :
489 : // Reap the postgres process
490 0 : delay_exit |= this.cleanup_after_postgres_exit()?;
491 :
492 : // If launch failed, keep serving HTTP requests for a while, so the cloud
493 : // control plane can get the actual error.
494 0 : if delay_exit {
495 0 : info!("giving control plane 30s to collect the error before shutdown");
496 0 : std::thread::sleep(Duration::from_secs(30));
497 0 : }
498 0 : Ok(exit_code)
499 0 : }
500 :
501 0 : pub fn wait_spec(&self) -> Result<ParsedSpec> {
502 0 : info!("no compute spec provided, waiting");
503 0 : let mut state = self.state.lock().unwrap();
504 0 : while state.status != ComputeStatus::ConfigurationPending {
505 0 : state = self.state_changed.wait(state).unwrap();
506 0 : }
507 :
508 0 : info!("got spec, continue configuration");
509 0 : let spec = state.pspec.as_ref().unwrap().clone();
510 0 :
511 0 : // Record for how long we slept waiting for the spec.
512 0 : let now = Utc::now();
513 0 : state.metrics.wait_for_spec_ms = now
514 0 : .signed_duration_since(state.start_time)
515 0 : .to_std()
516 0 : .unwrap()
517 0 : .as_millis() as u64;
518 0 :
519 0 : // Reset start time, so that the total startup time that is calculated later will
520 0 : // not include the time that we waited for the spec.
521 0 : state.start_time = now;
522 0 :
523 0 : Ok(spec)
524 0 : }
525 :
526 : /// Start compute.
527 : ///
528 : /// Prerequisites:
529 : /// - the compute spec has been placed in self.state.pspec
530 : ///
531 : /// On success:
532 : /// - status is set to ComputeStatus::Running
533 : /// - self.running_postgres is set
534 : ///
535 : /// On error:
536 : /// - status is left in ComputeStatus::Init. The caller is responsible for setting it to Failed
537 : /// - if Postgres was started before the fatal error happened, self.running_postgres is
538 : /// set. The caller is responsible for killing it.
539 : ///
540 : /// Note that this is in the critical path of a compute cold start. Keep this fast.
541 : /// Try to do things concurrently, to hide the latencies.
542 0 : fn start_compute(self: &Arc<Self>, pg_handle: &mut Option<PostgresHandle>) -> Result<()> {
543 0 : let compute_state: ComputeState;
544 0 :
545 0 : let start_compute_span;
546 0 : let _this_entered;
547 0 : {
548 0 : let mut state_guard = self.state.lock().unwrap();
549 :
550 : // Create a tracing span for the startup operation.
551 : //
552 : // We could otherwise just annotate the function with #[instrument], but if
553 : // we're being configured from a /configure HTTP request, we want the
554 : // startup to be considered part of the /configure request.
555 : //
556 : // Similarly, if a trace ID was passed in env variables, attach it to the span.
557 0 : start_compute_span = {
558 : // Temporarily enter the parent span, so that the new span becomes its child.
559 0 : if let Some(p) = state_guard.startup_span.take() {
560 0 : let _parent_entered = p.entered();
561 0 : tracing::info_span!("start_compute")
562 0 : } else if let Some(otel_context) = startup_context_from_env() {
563 : use tracing_opentelemetry::OpenTelemetrySpanExt;
564 0 : let span = tracing::info_span!("start_compute");
565 0 : span.set_parent(otel_context);
566 0 : span
567 : } else {
568 0 : tracing::info_span!("start_compute")
569 : }
570 : };
571 0 : _this_entered = start_compute_span.enter();
572 0 :
573 0 : state_guard.set_status(ComputeStatus::Init, &self.state_changed);
574 0 : compute_state = state_guard.clone()
575 0 : }
576 0 :
577 0 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
578 0 : info!(
579 0 : "starting compute for project {}, operation {}, tenant {}, timeline {}, project {}, branch {}, endpoint {}, features {:?}, spec.remote_extensions {:?}",
580 0 : pspec.spec.cluster.cluster_id.as_deref().unwrap_or("None"),
581 0 : pspec.spec.operation_uuid.as_deref().unwrap_or("None"),
582 0 : pspec.tenant_id,
583 0 : pspec.timeline_id,
584 0 : pspec.spec.project_id.as_deref().unwrap_or("None"),
585 0 : pspec.spec.branch_id.as_deref().unwrap_or("None"),
586 0 : pspec.spec.endpoint_id.as_deref().unwrap_or("None"),
587 : pspec.spec.features,
588 : pspec.spec.remote_extensions,
589 : );
590 :
591 : ////// PRE-STARTUP PHASE: things that need to be finished before we start the Postgres process
592 :
593 : // Collect all the tasks that must finish here
594 0 : let mut pre_tasks = tokio::task::JoinSet::new();
595 0 :
596 0 : // Make sure TLS certificates are properly loaded and in the right place.
597 0 : if self.compute_ctl_config.tls.is_some() {
598 0 : let this = self.clone();
599 0 : pre_tasks.spawn(async move {
600 0 : this.watch_cert_for_changes().await;
601 :
602 0 : Ok::<(), anyhow::Error>(())
603 0 : });
604 0 : }
605 :
606 0 : let tls_config = self.tls_config(&pspec.spec);
607 0 :
608 0 : // If there are any remote extensions in shared_preload_libraries, start downloading them
609 0 : if pspec.spec.remote_extensions.is_some() {
610 0 : let (this, spec) = (self.clone(), pspec.spec.clone());
611 0 : pre_tasks.spawn(async move {
612 0 : this.download_preload_extensions(&spec)
613 0 : .in_current_span()
614 0 : .await
615 0 : });
616 0 : }
617 :
618 : // Prepare pgdata directory. This downloads the basebackup, among other things.
619 0 : {
620 0 : let (this, cs) = (self.clone(), compute_state.clone());
621 0 : pre_tasks.spawn_blocking_child(move || this.prepare_pgdata(&cs));
622 0 : }
623 :
624 : // Resize swap to the desired size if the compute spec says so
625 0 : if let (Some(size_bytes), true) =
626 0 : (pspec.spec.swap_size_bytes, self.params.resize_swap_on_bind)
627 0 : {
628 0 : pre_tasks.spawn_blocking_child(move || {
629 0 : // To avoid 'swapoff' hitting postgres startup, we need to run resize-swap to completion
630 0 : // *before* starting postgres.
631 0 : //
632 0 : // In theory, we could do this asynchronously if SkipSwapon was enabled for VMs, but this
633 0 : // carries a risk of introducing hard-to-debug issues - e.g. if postgres sometimes gets
634 0 : // OOM-killed during startup because swap wasn't available yet.
635 0 : resize_swap(size_bytes).context("failed to resize swap")?;
636 0 : let size_mib = size_bytes as f32 / (1 << 20) as f32; // just for more coherent display.
637 0 : info!(%size_bytes, %size_mib, "resized swap");
638 :
639 0 : Ok::<(), anyhow::Error>(())
640 0 : });
641 0 : }
642 :
643 : // Set disk quota if the compute spec says so
644 0 : if let (Some(disk_quota_bytes), Some(disk_quota_fs_mountpoint)) = (
645 0 : pspec.spec.disk_quota_bytes,
646 0 : self.params.set_disk_quota_for_fs.as_ref(),
647 0 : ) {
648 0 : let disk_quota_fs_mountpoint = disk_quota_fs_mountpoint.clone();
649 0 : pre_tasks.spawn_blocking_child(move || {
650 0 : set_disk_quota(disk_quota_bytes, &disk_quota_fs_mountpoint)
651 0 : .context("failed to set disk quota")?;
652 0 : let size_mib = disk_quota_bytes as f32 / (1 << 20) as f32; // just for more coherent display.
653 0 : info!(%disk_quota_bytes, %size_mib, "set disk quota");
654 :
655 0 : Ok::<(), anyhow::Error>(())
656 0 : });
657 0 : }
658 :
659 : // tune pgbouncer
660 0 : if let Some(pgbouncer_settings) = &pspec.spec.pgbouncer_settings {
661 0 : info!("tuning pgbouncer");
662 :
663 0 : let pgbouncer_settings = pgbouncer_settings.clone();
664 0 : let tls_config = tls_config.clone();
665 0 :
666 0 : // Spawn a background task to do the tuning,
667 0 : // so that we don't block the main thread that starts Postgres.
668 0 : let _handle = tokio::spawn(async move {
669 0 : let res = tune_pgbouncer(pgbouncer_settings, tls_config).await;
670 0 : if let Err(err) = res {
671 0 : error!("error while tuning pgbouncer: {err:?}");
672 : // Continue with the startup anyway
673 0 : }
674 0 : });
675 0 : }
676 :
677 : // configure local_proxy
678 0 : if let Some(local_proxy) = &pspec.spec.local_proxy_config {
679 0 : info!("configuring local_proxy");
680 :
681 : // Spawn a background task to do the configuration,
682 : // so that we don't block the main thread that starts Postgres.
683 :
684 0 : let mut local_proxy = local_proxy.clone();
685 0 : local_proxy.tls = tls_config.clone();
686 0 :
687 0 : let _handle = tokio::spawn(async move {
688 0 : if let Err(err) = local_proxy::configure(&local_proxy) {
689 0 : error!("error while configuring local_proxy: {err:?}");
690 : // Continue with the startup anyway
691 0 : }
692 0 : });
693 0 : }
694 :
695 : // Configure and start rsyslog for compliance audit logging
696 0 : match pspec.spec.audit_log_level {
697 : ComputeAudit::Hipaa | ComputeAudit::Extended | ComputeAudit::Full => {
698 0 : let remote_endpoint =
699 0 : std::env::var("AUDIT_LOGGING_ENDPOINT").unwrap_or("".to_string());
700 0 : if remote_endpoint.is_empty() {
701 0 : anyhow::bail!("AUDIT_LOGGING_ENDPOINT is empty");
702 0 : }
703 0 :
704 0 : let log_directory_path = Path::new(&self.params.pgdata).join("log");
705 0 : let log_directory_path = log_directory_path.to_string_lossy().to_string();
706 0 :
707 0 : // Add project_id,endpoint_id to identify the logs.
708 0 : //
709 0 : // These ids are passed from cplane,
710 0 : let endpoint_id = pspec.spec.endpoint_id.as_deref().unwrap_or("");
711 0 : let project_id = pspec.spec.project_id.as_deref().unwrap_or("");
712 0 :
713 0 : configure_audit_rsyslog(
714 0 : log_directory_path.clone(),
715 0 : endpoint_id,
716 0 : project_id,
717 0 : &remote_endpoint,
718 0 : )?;
719 :
720 : // Launch a background task to clean up the audit logs
721 0 : launch_pgaudit_gc(log_directory_path);
722 : }
723 0 : _ => {}
724 : }
725 :
726 : // Configure and start rsyslog for Postgres logs export
727 0 : let conf = PostgresLogsRsyslogConfig::new(pspec.spec.logs_export_host.as_deref());
728 0 : configure_postgres_logs_export(conf)?;
729 :
730 : // Launch remaining service threads
731 0 : let _monitor_handle = launch_monitor(self);
732 0 : let _configurator_handle = launch_configurator(self);
733 0 :
734 0 : // Wait for all the pre-tasks to finish before starting postgres
735 0 : let rt = tokio::runtime::Handle::current();
736 0 : while let Some(res) = rt.block_on(pre_tasks.join_next()) {
737 0 : res??;
738 : }
739 :
740 : ////// START POSTGRES
741 0 : let start_time = Utc::now();
742 0 : let pg_process = self.start_postgres(pspec.storage_auth_token.clone())?;
743 0 : let postmaster_pid = pg_process.pid();
744 0 : *pg_handle = Some(pg_process);
745 0 :
746 0 : // If this is a primary endpoint, perform some post-startup configuration before
747 0 : // opening it up for the world.
748 0 : let config_time = Utc::now();
749 0 : if pspec.spec.mode == ComputeMode::Primary {
750 0 : self.configure_as_primary(&compute_state)?;
751 :
752 0 : let conf = self.get_tokio_conn_conf(None);
753 0 : tokio::task::spawn(async {
754 0 : let _ = installed_extensions(conf).await;
755 0 : });
756 0 : }
757 :
758 : // All done!
759 0 : let startup_end_time = Utc::now();
760 0 : let metrics = {
761 0 : let mut state = self.state.lock().unwrap();
762 0 : state.metrics.start_postgres_ms = config_time
763 0 : .signed_duration_since(start_time)
764 0 : .to_std()
765 0 : .unwrap()
766 0 : .as_millis() as u64;
767 0 : state.metrics.config_ms = startup_end_time
768 0 : .signed_duration_since(config_time)
769 0 : .to_std()
770 0 : .unwrap()
771 0 : .as_millis() as u64;
772 0 : state.metrics.total_startup_ms = startup_end_time
773 0 : .signed_duration_since(compute_state.start_time)
774 0 : .to_std()
775 0 : .unwrap()
776 0 : .as_millis() as u64;
777 0 : state.metrics.clone()
778 0 : };
779 0 : self.set_status(ComputeStatus::Running);
780 0 :
781 0 : // Log metrics so that we can search for slow operations in logs
782 0 : info!(?metrics, postmaster_pid = %postmaster_pid, "compute start finished");
783 :
784 : // Spawn the extension stats background task
785 0 : self.spawn_extension_stats_task();
786 0 :
787 0 : if pspec.spec.autoprewarm {
788 0 : self.prewarm_lfc(None);
789 0 : }
790 0 : Ok(())
791 0 : }
792 :
793 : #[instrument(skip_all)]
794 : async fn download_preload_extensions(&self, spec: &ComputeSpec) -> Result<()> {
795 : let remote_extensions = if let Some(remote_extensions) = &spec.remote_extensions {
796 : remote_extensions
797 : } else {
798 : return Ok(());
799 : };
800 :
801 : // First, create control files for all available extensions
802 : extension_server::create_control_files(remote_extensions, &self.params.pgbin);
803 :
804 : let library_load_start_time = Utc::now();
805 : let remote_ext_metrics = self.prepare_preload_libraries(spec).await?;
806 :
807 : let library_load_time = Utc::now()
808 : .signed_duration_since(library_load_start_time)
809 : .to_std()
810 : .unwrap()
811 : .as_millis() as u64;
812 : let mut state = self.state.lock().unwrap();
813 : state.metrics.load_ext_ms = library_load_time;
814 : state.metrics.num_ext_downloaded = remote_ext_metrics.num_ext_downloaded;
815 : state.metrics.largest_ext_size = remote_ext_metrics.largest_ext_size;
816 : state.metrics.total_ext_download_size = remote_ext_metrics.total_ext_download_size;
817 : info!(
818 : "Loading shared_preload_libraries took {:?}ms",
819 : library_load_time
820 : );
821 : info!("{:?}", remote_ext_metrics);
822 :
823 : Ok(())
824 : }
825 :
826 : /// Start the vm-monitor if directed to. The vm-monitor only runs on linux
827 : /// because it requires cgroups.
828 0 : fn start_vm_monitor(&self, disable_lfc_resizing: bool) -> StartVmMonitorResult {
829 : cfg_if::cfg_if! {
830 : if #[cfg(target_os = "linux")] {
831 : use std::env;
832 : use tokio_util::sync::CancellationToken;
833 :
834 : // This token is used internally by the monitor to clean up all threads
835 0 : let token = CancellationToken::new();
836 :
837 : // don't pass postgres connection string to vm-monitor if we don't want it to resize LFC
838 0 : let pgconnstr = if disable_lfc_resizing {
839 0 : None
840 : } else {
841 0 : Some(self.params.filecache_connstr.clone())
842 : };
843 :
844 0 : let vm_monitor = if env::var_os("AUTOSCALING").is_some() {
845 0 : let vm_monitor = tokio::spawn(vm_monitor::start(
846 0 : Box::leak(Box::new(vm_monitor::Args {
847 0 : cgroup: Some(self.params.cgroup.clone()),
848 0 : pgconnstr,
849 0 : addr: self.params.vm_monitor_addr.clone(),
850 0 : })),
851 0 : token.clone(),
852 0 : ));
853 0 : Some(vm_monitor)
854 : } else {
855 0 : None
856 : };
857 0 : StartVmMonitorResult { token, vm_monitor }
858 0 : } else {
859 0 : _ = disable_lfc_resizing; // appease unused lint on macOS
860 0 : StartVmMonitorResult { }
861 0 : }
862 0 : }
863 0 : }
864 :
865 0 : fn cleanup_after_postgres_exit(&self) -> Result<bool> {
866 0 : // Maybe sync safekeepers again, to speed up next startup
867 0 : let compute_state = self.state.lock().unwrap().clone();
868 0 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
869 0 : if matches!(pspec.spec.mode, compute_api::spec::ComputeMode::Primary) {
870 0 : info!("syncing safekeepers on shutdown");
871 0 : let storage_auth_token = pspec.storage_auth_token.clone();
872 0 : let lsn = self.sync_safekeepers(storage_auth_token)?;
873 0 : info!("synced safekeepers at lsn {lsn}");
874 0 : }
875 :
876 0 : let mut delay_exit = false;
877 0 : let mut state = self.state.lock().unwrap();
878 0 : if state.status == ComputeStatus::TerminationPending {
879 0 : state.status = ComputeStatus::Terminated;
880 0 : self.state_changed.notify_all();
881 0 : // we were asked to terminate gracefully, don't exit to avoid restart
882 0 : delay_exit = true
883 0 : }
884 0 : drop(state);
885 :
886 0 : if let Err(err) = self.check_for_core_dumps() {
887 0 : error!("error while checking for core dumps: {err:?}");
888 0 : }
889 :
890 0 : Ok(delay_exit)
891 0 : }
892 :
893 : /// Check that compute node has corresponding feature enabled.
894 0 : pub fn has_feature(&self, feature: ComputeFeature) -> bool {
895 0 : let state = self.state.lock().unwrap();
896 :
897 0 : if let Some(s) = state.pspec.as_ref() {
898 0 : s.spec.features.contains(&feature)
899 : } else {
900 0 : false
901 : }
902 0 : }
903 :
904 0 : pub fn set_status(&self, status: ComputeStatus) {
905 0 : let mut state = self.state.lock().unwrap();
906 0 : state.set_status(status, &self.state_changed);
907 0 : }
908 :
909 0 : pub fn set_failed_status(&self, err: anyhow::Error) {
910 0 : let mut state = self.state.lock().unwrap();
911 0 : state.set_failed_status(err, &self.state_changed);
912 0 : }
913 :
914 0 : pub fn get_status(&self) -> ComputeStatus {
915 0 : self.state.lock().unwrap().status
916 0 : }
917 :
918 0 : pub fn get_timeline_id(&self) -> Option<TimelineId> {
919 0 : self.state
920 0 : .lock()
921 0 : .unwrap()
922 0 : .pspec
923 0 : .as_ref()
924 0 : .map(|s| s.timeline_id)
925 0 : }
926 :
927 : // Remove `pgdata` directory and create it again with right permissions.
928 0 : fn create_pgdata(&self) -> Result<()> {
929 0 : // Ignore removal error, likely it is a 'No such file or directory (os error 2)'.
930 0 : // If it is something different then create_dir() will error out anyway.
931 0 : let pgdata = &self.params.pgdata;
932 0 : let _ok = fs::remove_dir_all(pgdata);
933 0 : fs::create_dir(pgdata)?;
934 0 : fs::set_permissions(pgdata, fs::Permissions::from_mode(0o700))?;
935 :
936 0 : Ok(())
937 0 : }
938 :
939 : // Get basebackup from the libpq connection to pageserver using `connstr` and
940 : // unarchive it to `pgdata` directory overriding all its previous content.
941 : #[instrument(skip_all, fields(%lsn))]
942 : fn try_get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> {
943 : let spec = compute_state.pspec.as_ref().expect("spec must be set");
944 : let start_time = Instant::now();
945 :
946 : let shard0_connstr = spec.pageserver_connstr.split(',').next().unwrap();
947 : let mut config = postgres::Config::from_str(shard0_connstr)?;
948 :
949 : // Use the storage auth token from the config file, if given.
950 : // Note: this overrides any password set in the connection string.
951 : if let Some(storage_auth_token) = &spec.storage_auth_token {
952 : info!("Got storage auth token from spec file");
953 : config.password(storage_auth_token);
954 : } else {
955 : info!("Storage auth token not set");
956 : }
957 :
958 : config.application_name("compute_ctl");
959 : if let Some(spec) = &compute_state.pspec {
960 : config.options(&format!(
961 : "-c neon.compute_mode={}",
962 : spec.spec.mode.to_type_str()
963 : ));
964 : }
965 :
966 : // Connect to pageserver
967 : let mut client = config.connect(NoTls)?;
968 : let pageserver_connect_micros = start_time.elapsed().as_micros() as u64;
969 :
970 : let basebackup_cmd = match lsn {
971 : Lsn(0) => {
972 : if spec.spec.mode != ComputeMode::Primary {
973 : format!(
974 : "basebackup {} {} --gzip --replica",
975 : spec.tenant_id, spec.timeline_id
976 : )
977 : } else {
978 : format!("basebackup {} {} --gzip", spec.tenant_id, spec.timeline_id)
979 : }
980 : }
981 : _ => {
982 : if spec.spec.mode != ComputeMode::Primary {
983 : format!(
984 : "basebackup {} {} {} --gzip --replica",
985 : spec.tenant_id, spec.timeline_id, lsn
986 : )
987 : } else {
988 : format!(
989 : "basebackup {} {} {} --gzip",
990 : spec.tenant_id, spec.timeline_id, lsn
991 : )
992 : }
993 : }
994 : };
995 :
996 : let copyreader = client.copy_out(basebackup_cmd.as_str())?;
997 : let mut measured_reader = MeasuredReader::new(copyreader);
998 : let mut bufreader = std::io::BufReader::new(&mut measured_reader);
999 :
1000 : // Read the archive directly from the `CopyOutReader`
1001 : //
1002 : // Set `ignore_zeros` so that unpack() reads all the Copy data and
1003 : // doesn't stop at the end-of-archive marker. Otherwise, if the server
1004 : // sends an Error after finishing the tarball, we will not notice it.
1005 : let mut ar = tar::Archive::new(flate2::read::GzDecoder::new(&mut bufreader));
1006 : ar.set_ignore_zeros(true);
1007 : ar.unpack(&self.params.pgdata)?;
1008 :
1009 : // Report metrics
1010 : let mut state = self.state.lock().unwrap();
1011 : state.metrics.pageserver_connect_micros = pageserver_connect_micros;
1012 : state.metrics.basebackup_bytes = measured_reader.get_byte_count() as u64;
1013 : state.metrics.basebackup_ms = start_time.elapsed().as_millis() as u64;
1014 : Ok(())
1015 : }
1016 :
1017 : // Gets the basebackup in a retry loop
1018 : #[instrument(skip_all, fields(%lsn))]
1019 : pub fn get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> {
1020 : let mut retry_period_ms = 500.0;
1021 : let mut attempts = 0;
1022 : const DEFAULT_ATTEMPTS: u16 = 10;
1023 : #[cfg(feature = "testing")]
1024 : let max_attempts = if let Ok(v) = env::var("NEON_COMPUTE_TESTING_BASEBACKUP_RETRIES") {
1025 : u16::from_str(&v).unwrap()
1026 : } else {
1027 : DEFAULT_ATTEMPTS
1028 : };
1029 : #[cfg(not(feature = "testing"))]
1030 : let max_attempts = DEFAULT_ATTEMPTS;
1031 : loop {
1032 : let result = self.try_get_basebackup(compute_state, lsn);
1033 : match result {
1034 : Ok(_) => {
1035 : return result;
1036 : }
1037 : Err(ref e) if attempts < max_attempts => {
1038 : warn!(
1039 : "Failed to get basebackup: {} (attempt {}/{})",
1040 : e, attempts, max_attempts
1041 : );
1042 : std::thread::sleep(std::time::Duration::from_millis(retry_period_ms as u64));
1043 : retry_period_ms *= 1.5;
1044 : }
1045 : Err(_) => {
1046 : return result;
1047 : }
1048 : }
1049 : attempts += 1;
1050 : }
1051 : }
1052 :
1053 0 : pub async fn check_safekeepers_synced_async(
1054 0 : &self,
1055 0 : compute_state: &ComputeState,
1056 0 : ) -> Result<Option<Lsn>> {
1057 0 : // Construct a connection config for each safekeeper
1058 0 : let pspec: ParsedSpec = compute_state
1059 0 : .pspec
1060 0 : .as_ref()
1061 0 : .expect("spec must be set")
1062 0 : .clone();
1063 0 : let sk_connstrs: Vec<String> = pspec.safekeeper_connstrings.clone();
1064 0 : let sk_configs = sk_connstrs.into_iter().map(|connstr| {
1065 0 : // Format connstr
1066 0 : let id = connstr.clone();
1067 0 : let connstr = format!("postgresql://no_user@{}", connstr);
1068 0 : let options = format!(
1069 0 : "-c timeline_id={} tenant_id={}",
1070 0 : pspec.timeline_id, pspec.tenant_id
1071 0 : );
1072 0 :
1073 0 : // Construct client
1074 0 : let mut config = tokio_postgres::Config::from_str(&connstr).unwrap();
1075 0 : config.options(&options);
1076 0 : if let Some(storage_auth_token) = pspec.storage_auth_token.clone() {
1077 0 : config.password(storage_auth_token);
1078 0 : }
1079 :
1080 0 : (id, config)
1081 0 : });
1082 0 :
1083 0 : // Create task set to query all safekeepers
1084 0 : let mut tasks = FuturesUnordered::new();
1085 0 : let quorum = sk_configs.len() / 2 + 1;
1086 0 : for (id, config) in sk_configs {
1087 0 : let timeout = tokio::time::Duration::from_millis(100);
1088 0 : let task = tokio::time::timeout(timeout, ping_safekeeper(id, config));
1089 0 : tasks.push(tokio::spawn(task));
1090 0 : }
1091 :
1092 : // Get a quorum of responses or errors
1093 0 : let mut responses = Vec::new();
1094 0 : let mut join_errors = Vec::new();
1095 0 : let mut task_errors = Vec::new();
1096 0 : let mut timeout_errors = Vec::new();
1097 0 : while let Some(response) = tasks.next().await {
1098 0 : match response {
1099 0 : Ok(Ok(Ok(r))) => responses.push(r),
1100 0 : Ok(Ok(Err(e))) => task_errors.push(e),
1101 0 : Ok(Err(e)) => timeout_errors.push(e),
1102 0 : Err(e) => join_errors.push(e),
1103 : };
1104 0 : if responses.len() >= quorum {
1105 0 : break;
1106 0 : }
1107 0 : if join_errors.len() + task_errors.len() + timeout_errors.len() >= quorum {
1108 0 : break;
1109 0 : }
1110 : }
1111 :
1112 : // In case of error, log and fail the check, but don't crash.
1113 : // We're playing it safe because these errors could be transient
1114 : // and we don't yet retry. Also being careful here allows us to
1115 : // be backwards compatible with safekeepers that don't have the
1116 : // TIMELINE_STATUS API yet.
1117 0 : if responses.len() < quorum {
1118 0 : error!(
1119 0 : "failed sync safekeepers check {:?} {:?} {:?}",
1120 : join_errors, task_errors, timeout_errors
1121 : );
1122 0 : return Ok(None);
1123 0 : }
1124 0 :
1125 0 : Ok(check_if_synced(responses))
1126 0 : }
1127 :
1128 : // Fast path for sync_safekeepers. If they're already synced we get the lsn
1129 : // in one roundtrip. If not, we should do a full sync_safekeepers.
1130 : #[instrument(skip_all)]
1131 : pub fn check_safekeepers_synced(&self, compute_state: &ComputeState) -> Result<Option<Lsn>> {
1132 : let start_time = Utc::now();
1133 :
1134 : let rt = tokio::runtime::Handle::current();
1135 : let result = rt.block_on(self.check_safekeepers_synced_async(compute_state));
1136 :
1137 : // Record runtime
1138 : self.state.lock().unwrap().metrics.sync_sk_check_ms = Utc::now()
1139 : .signed_duration_since(start_time)
1140 : .to_std()
1141 : .unwrap()
1142 : .as_millis() as u64;
1143 : result
1144 : }
1145 :
1146 : // Run `postgres` in a special mode with `--sync-safekeepers` argument
1147 : // and return the reported LSN back to the caller.
1148 : #[instrument(skip_all)]
1149 : pub fn sync_safekeepers(&self, storage_auth_token: Option<String>) -> Result<Lsn> {
1150 : let start_time = Utc::now();
1151 :
1152 : let mut sync_handle = maybe_cgexec(&self.params.pgbin)
1153 : .args(["--sync-safekeepers"])
1154 : .env("PGDATA", &self.params.pgdata) // we cannot use -D in this mode
1155 : .envs(if let Some(storage_auth_token) = &storage_auth_token {
1156 : vec![("NEON_AUTH_TOKEN", storage_auth_token)]
1157 : } else {
1158 : vec![]
1159 : })
1160 : .stdout(Stdio::piped())
1161 : .stderr(Stdio::piped())
1162 : .spawn()
1163 : .expect("postgres --sync-safekeepers failed to start");
1164 : SYNC_SAFEKEEPERS_PID.store(sync_handle.id(), Ordering::SeqCst);
1165 :
1166 : // `postgres --sync-safekeepers` will print all log output to stderr and
1167 : // final LSN to stdout. So we leave stdout to collect LSN, while stderr logs
1168 : // will be collected in a child thread.
1169 : let stderr = sync_handle
1170 : .stderr
1171 : .take()
1172 : .expect("stderr should be captured");
1173 : let logs_handle = handle_postgres_logs(stderr);
1174 :
1175 : let sync_output = sync_handle
1176 : .wait_with_output()
1177 : .expect("postgres --sync-safekeepers failed");
1178 : SYNC_SAFEKEEPERS_PID.store(0, Ordering::SeqCst);
1179 :
1180 : // Process has exited, so we can join the logs thread.
1181 : let _ = tokio::runtime::Handle::current()
1182 : .block_on(logs_handle)
1183 0 : .map_err(|e| tracing::error!("log task panicked: {:?}", e));
1184 :
1185 : if !sync_output.status.success() {
1186 : anyhow::bail!(
1187 : "postgres --sync-safekeepers exited with non-zero status: {}. stdout: {}",
1188 : sync_output.status,
1189 : String::from_utf8(sync_output.stdout)
1190 : .expect("postgres --sync-safekeepers exited, and stdout is not utf-8"),
1191 : );
1192 : }
1193 :
1194 : self.state.lock().unwrap().metrics.sync_safekeepers_ms = Utc::now()
1195 : .signed_duration_since(start_time)
1196 : .to_std()
1197 : .unwrap()
1198 : .as_millis() as u64;
1199 :
1200 : let lsn = Lsn::from_str(String::from_utf8(sync_output.stdout)?.trim())?;
1201 :
1202 : Ok(lsn)
1203 : }
1204 :
1205 : /// Do all the preparations like PGDATA directory creation, configuration,
1206 : /// safekeepers sync, basebackup, etc.
1207 : #[instrument(skip_all)]
1208 : pub fn prepare_pgdata(&self, compute_state: &ComputeState) -> Result<()> {
1209 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
1210 : let spec = &pspec.spec;
1211 : let pgdata_path = Path::new(&self.params.pgdata);
1212 :
1213 : let tls_config = self.tls_config(&pspec.spec);
1214 :
1215 : // Remove/create an empty pgdata directory and put configuration there.
1216 : self.create_pgdata()?;
1217 : config::write_postgres_conf(
1218 : pgdata_path,
1219 : &pspec.spec,
1220 : self.params.internal_http_port,
1221 : tls_config,
1222 : )?;
1223 :
1224 : // Syncing safekeepers is only safe with primary nodes: if a primary
1225 : // is already connected it will be kicked out, so a secondary (standby)
1226 : // cannot sync safekeepers.
1227 : let lsn = match spec.mode {
1228 : ComputeMode::Primary => {
1229 : info!("checking if safekeepers are synced");
1230 : let lsn = if let Ok(Some(lsn)) = self.check_safekeepers_synced(compute_state) {
1231 : lsn
1232 : } else {
1233 : info!("starting safekeepers syncing");
1234 : self.sync_safekeepers(pspec.storage_auth_token.clone())
1235 0 : .with_context(|| "failed to sync safekeepers")?
1236 : };
1237 : info!("safekeepers synced at LSN {}", lsn);
1238 : lsn
1239 : }
1240 : ComputeMode::Static(lsn) => {
1241 : info!("Starting read-only node at static LSN {}", lsn);
1242 : lsn
1243 : }
1244 : ComputeMode::Replica => {
1245 : info!("Initializing standby from latest Pageserver LSN");
1246 : Lsn(0)
1247 : }
1248 : };
1249 :
1250 : info!(
1251 : "getting basebackup@{} from pageserver {}",
1252 : lsn, &pspec.pageserver_connstr
1253 : );
1254 0 : self.get_basebackup(compute_state, lsn).with_context(|| {
1255 0 : format!(
1256 0 : "failed to get basebackup@{} from pageserver {}",
1257 0 : lsn, &pspec.pageserver_connstr
1258 0 : )
1259 0 : })?;
1260 :
1261 : // Update pg_hba.conf received with basebackup.
1262 : update_pg_hba(pgdata_path)?;
1263 :
1264 : // Place pg_dynshmem under /dev/shm. This allows us to use
1265 : // 'dynamic_shared_memory_type = mmap' so that the files are placed in
1266 : // /dev/shm, similar to how 'dynamic_shared_memory_type = posix' works.
1267 : //
1268 : // Why on earth don't we just stick to the 'posix' default, you might
1269 : // ask. It turns out that making large allocations with 'posix' doesn't
1270 : // work very well with autoscaling. The behavior we want is that:
1271 : //
1272 : // 1. You can make large DSM allocations, larger than the current RAM
1273 : // size of the VM, without errors
1274 : //
1275 : // 2. If the allocated memory is really used, the VM is scaled up
1276 : // automatically to accommodate that
1277 : //
1278 : // We try to make that possible by having swap in the VM. But with the
1279 : // default 'posix' DSM implementation, we fail step 1, even when there's
1280 : // plenty of swap available. PostgreSQL uses posix_fallocate() to create
1281 : // the shmem segment, which is really just a file in /dev/shm in Linux,
1282 : // but posix_fallocate() on tmpfs returns ENOMEM if the size is larger
1283 : // than available RAM.
1284 : //
1285 : // Using 'dynamic_shared_memory_type = mmap' works around that, because
1286 : // the Postgres 'mmap' DSM implementation doesn't use
1287 : // posix_fallocate(). Instead, it uses repeated calls to write(2) to
1288 : // fill the file with zeros. It's weird that that differs between
1289 : // 'posix' and 'mmap', but we take advantage of it. When the file is
1290 : // filled slowly with write(2), the kernel allows it to grow larger, as
1291 : // long as there's swap available.
1292 : //
1293 : // In short, using 'dynamic_shared_memory_type = mmap' allows us one DSM
1294 : // segment to be larger than currently available RAM. But because we
1295 : // don't want to store it on a real file, which the kernel would try to
1296 : // flush to disk, so symlink pg_dynshm to /dev/shm.
1297 : //
1298 : // We don't set 'dynamic_shared_memory_type = mmap' here, we let the
1299 : // control plane control that option. If 'mmap' is not used, this
1300 : // symlink doesn't affect anything.
1301 : //
1302 : // See https://github.com/neondatabase/autoscaling/issues/800
1303 : std::fs::remove_dir(pgdata_path.join("pg_dynshmem"))?;
1304 : symlink("/dev/shm/", pgdata_path.join("pg_dynshmem"))?;
1305 :
1306 : match spec.mode {
1307 : ComputeMode::Primary => {}
1308 : ComputeMode::Replica | ComputeMode::Static(..) => {
1309 : add_standby_signal(pgdata_path)?;
1310 : }
1311 : }
1312 :
1313 : Ok(())
1314 : }
1315 :
1316 : /// Start and stop a postgres process to warm up the VM for startup.
1317 0 : pub fn prewarm_postgres_vm_memory(&self) -> Result<()> {
1318 0 : info!("prewarming VM memory");
1319 :
1320 : // Create pgdata
1321 0 : let pgdata = &format!("{}.warmup", self.params.pgdata);
1322 0 : create_pgdata(pgdata)?;
1323 :
1324 : // Run initdb to completion
1325 0 : info!("running initdb");
1326 0 : let initdb_bin = Path::new(&self.params.pgbin)
1327 0 : .parent()
1328 0 : .unwrap()
1329 0 : .join("initdb");
1330 0 : Command::new(initdb_bin)
1331 0 : .args(["--pgdata", pgdata])
1332 0 : .output()
1333 0 : .expect("cannot start initdb process");
1334 :
1335 : // Write conf
1336 : use std::io::Write;
1337 0 : let conf_path = Path::new(pgdata).join("postgresql.conf");
1338 0 : let mut file = std::fs::File::create(conf_path)?;
1339 0 : writeln!(file, "shared_buffers=65536")?;
1340 0 : writeln!(file, "port=51055")?; // Nobody should be connecting
1341 0 : writeln!(file, "shared_preload_libraries = 'neon'")?;
1342 :
1343 : // Start postgres
1344 0 : info!("starting postgres");
1345 0 : let mut pg = maybe_cgexec(&self.params.pgbin)
1346 0 : .args(["-D", pgdata])
1347 0 : .spawn()
1348 0 : .expect("cannot start postgres process");
1349 0 :
1350 0 : // Stop it when it's ready
1351 0 : info!("waiting for postgres");
1352 0 : wait_for_postgres(&mut pg, Path::new(pgdata))?;
1353 : // SIGQUIT orders postgres to exit immediately. We don't want to SIGKILL
1354 : // it to avoid orphaned processes prowling around while datadir is
1355 : // wiped.
1356 0 : let pm_pid = Pid::from_raw(pg.id() as i32);
1357 0 : kill(pm_pid, Signal::SIGQUIT)?;
1358 0 : info!("sent SIGQUIT signal");
1359 0 : pg.wait()?;
1360 0 : info!("done prewarming vm memory");
1361 :
1362 : // clean up
1363 0 : let _ok = fs::remove_dir_all(pgdata);
1364 0 : Ok(())
1365 0 : }
1366 :
1367 : /// Start Postgres as a child process and wait for it to start accepting
1368 : /// connections.
1369 : ///
1370 : /// Returns a handle to the child process and a handle to the logs thread.
1371 : #[instrument(skip_all)]
1372 : pub fn start_postgres(&self, storage_auth_token: Option<String>) -> Result<PostgresHandle> {
1373 : let pgdata_path = Path::new(&self.params.pgdata);
1374 :
1375 : // Run postgres as a child process.
1376 : let mut pg = maybe_cgexec(&self.params.pgbin)
1377 : .args(["-D", &self.params.pgdata])
1378 : .envs(if let Some(storage_auth_token) = &storage_auth_token {
1379 : vec![("NEON_AUTH_TOKEN", storage_auth_token)]
1380 : } else {
1381 : vec![]
1382 : })
1383 : .stderr(Stdio::piped())
1384 : .spawn()
1385 : .expect("cannot start postgres process");
1386 : PG_PID.store(pg.id(), Ordering::SeqCst);
1387 :
1388 : // Start a task to collect logs from stderr.
1389 : let stderr = pg.stderr.take().expect("stderr should be captured");
1390 : let logs_handle = handle_postgres_logs(stderr);
1391 :
1392 : wait_for_postgres(&mut pg, pgdata_path)?;
1393 :
1394 : Ok(PostgresHandle {
1395 : postgres: pg,
1396 : log_collector: logs_handle,
1397 : })
1398 : }
1399 :
1400 : /// Wait for the child Postgres process forever. In this state Ctrl+C will
1401 : /// propagate to Postgres and it will be shut down as well.
1402 0 : fn wait_postgres(&self, mut pg_handle: PostgresHandle) -> std::process::ExitStatus {
1403 0 : info!(postmaster_pid = %pg_handle.postgres.id(), "Waiting for Postgres to exit");
1404 :
1405 0 : let ecode = pg_handle
1406 0 : .postgres
1407 0 : .wait()
1408 0 : .expect("failed to start waiting on Postgres process");
1409 0 : PG_PID.store(0, Ordering::SeqCst);
1410 0 :
1411 0 : // Process has exited. Wait for the log collecting task to finish.
1412 0 : let _ = tokio::runtime::Handle::current()
1413 0 : .block_on(pg_handle.log_collector)
1414 0 : .map_err(|e| tracing::error!("log task panicked: {:?}", e));
1415 0 :
1416 0 : ecode
1417 0 : }
1418 :
1419 : /// Do post configuration of the already started Postgres. This function spawns a background task to
1420 : /// configure the database after applying the compute spec. Currently, it upgrades the neon extension
1421 : /// version. In the future, it may upgrade all 3rd-party extensions.
1422 : #[instrument(skip_all)]
1423 : pub fn post_apply_config(&self) -> Result<()> {
1424 : let conf = self.get_tokio_conn_conf(Some("compute_ctl:post_apply_config"));
1425 0 : tokio::spawn(async move {
1426 0 : let res = async {
1427 0 : let (mut client, connection) = conf.connect(NoTls).await?;
1428 0 : tokio::spawn(async move {
1429 0 : if let Err(e) = connection.await {
1430 0 : eprintln!("connection error: {}", e);
1431 0 : }
1432 0 : });
1433 0 :
1434 0 : handle_neon_extension_upgrade(&mut client)
1435 0 : .await
1436 0 : .context("handle_neon_extension_upgrade")?;
1437 0 : Ok::<_, anyhow::Error>(())
1438 0 : }
1439 0 : .await;
1440 0 : if let Err(err) = res {
1441 0 : error!("error while post_apply_config: {err:#}");
1442 0 : }
1443 0 : });
1444 : Ok(())
1445 : }
1446 :
1447 0 : pub fn get_conn_conf(&self, application_name: Option<&str>) -> postgres::Config {
1448 0 : let mut conf = self.conn_conf.clone();
1449 0 : if let Some(application_name) = application_name {
1450 0 : conf.application_name(application_name);
1451 0 : }
1452 0 : conf
1453 0 : }
1454 :
1455 0 : pub fn get_tokio_conn_conf(&self, application_name: Option<&str>) -> tokio_postgres::Config {
1456 0 : let mut conf = self.tokio_conn_conf.clone();
1457 0 : if let Some(application_name) = application_name {
1458 0 : conf.application_name(application_name);
1459 0 : }
1460 0 : conf
1461 0 : }
1462 :
1463 0 : pub async fn get_maintenance_client(
1464 0 : conf: &tokio_postgres::Config,
1465 0 : ) -> Result<tokio_postgres::Client> {
1466 0 : let mut conf = conf.clone();
1467 0 : conf.application_name("compute_ctl:apply_config");
1468 :
1469 0 : let (client, conn) = match conf.connect(NoTls).await {
1470 : // If connection fails, it may be the old node with `zenith_admin` superuser.
1471 : //
1472 : // In this case we need to connect with old `zenith_admin` name
1473 : // and create new user. We cannot simply rename connected user,
1474 : // but we can create a new one and grant it all privileges.
1475 0 : Err(e) => match e.code() {
1476 : Some(&SqlState::INVALID_PASSWORD)
1477 : | Some(&SqlState::INVALID_AUTHORIZATION_SPECIFICATION) => {
1478 : // Connect with `zenith_admin` if `cloud_admin` could not authenticate
1479 0 : info!(
1480 0 : "cannot connect to Postgres: {}, retrying with 'zenith_admin' username",
1481 : e
1482 : );
1483 0 : let mut zenith_admin_conf = postgres::config::Config::from(conf.clone());
1484 0 : zenith_admin_conf.application_name("compute_ctl:apply_config");
1485 0 : zenith_admin_conf.user("zenith_admin");
1486 :
1487 : // It doesn't matter what were the options before, here we just want
1488 : // to connect and create a new superuser role.
1489 : const ZENITH_OPTIONS: &str = "-c role=zenith_admin -c default_transaction_read_only=off -c search_path=public -c statement_timeout=0";
1490 0 : zenith_admin_conf.options(ZENITH_OPTIONS);
1491 :
1492 0 : let mut client =
1493 0 : zenith_admin_conf.connect(NoTls)
1494 0 : .context("broken cloud_admin credential: tried connecting with cloud_admin but could not authenticate, and zenith_admin does not work either")?;
1495 :
1496 : // Disable forwarding so that users don't get a cloud_admin role
1497 0 : let mut func = || {
1498 0 : client.simple_query("SET neon.forward_ddl = false")?;
1499 0 : client.simple_query("CREATE USER cloud_admin WITH SUPERUSER")?;
1500 0 : client.simple_query("GRANT zenith_admin TO cloud_admin")?;
1501 0 : Ok::<_, anyhow::Error>(())
1502 0 : };
1503 0 : func().context("apply_config setup cloud_admin")?;
1504 :
1505 0 : drop(client);
1506 0 :
1507 0 : // Reconnect with connstring with expected name
1508 0 : conf.connect(NoTls).await?
1509 : }
1510 0 : _ => return Err(e.into()),
1511 : },
1512 0 : Ok((client, conn)) => (client, conn),
1513 : };
1514 :
1515 0 : spawn(async move {
1516 0 : if let Err(e) = conn.await {
1517 0 : error!("maintenance client connection error: {}", e);
1518 0 : }
1519 0 : });
1520 0 :
1521 0 : // Disable DDL forwarding because control plane already knows about the roles/databases
1522 0 : // we're about to modify.
1523 0 : client
1524 0 : .simple_query("SET neon.forward_ddl = false")
1525 0 : .await
1526 0 : .context("apply_config SET neon.forward_ddl = false")?;
1527 :
1528 0 : Ok(client)
1529 0 : }
1530 :
1531 : /// Do initial configuration of the already started Postgres.
1532 : #[instrument(skip_all)]
1533 : pub fn apply_config(&self, compute_state: &ComputeState) -> Result<()> {
1534 : let conf = self.get_tokio_conn_conf(Some("compute_ctl:apply_config"));
1535 :
1536 : let conf = Arc::new(conf);
1537 : let spec = Arc::new(
1538 : compute_state
1539 : .pspec
1540 : .as_ref()
1541 : .expect("spec must be set")
1542 : .spec
1543 : .clone(),
1544 : );
1545 :
1546 : let mut tls_config = None::<TlsConfig>;
1547 : if spec.features.contains(&ComputeFeature::TlsExperimental) {
1548 : tls_config = self.compute_ctl_config.tls.clone();
1549 : }
1550 :
1551 : let max_concurrent_connections = self.max_service_connections(compute_state, &spec);
1552 :
1553 : // Merge-apply spec & changes to PostgreSQL state.
1554 : self.apply_spec_sql(spec.clone(), conf.clone(), max_concurrent_connections)?;
1555 :
1556 : if let Some(local_proxy) = &spec.clone().local_proxy_config {
1557 : let mut local_proxy = local_proxy.clone();
1558 : local_proxy.tls = tls_config.clone();
1559 :
1560 : info!("configuring local_proxy");
1561 : local_proxy::configure(&local_proxy).context("apply_config local_proxy")?;
1562 : }
1563 :
1564 : // Run migrations separately to not hold up cold starts
1565 0 : tokio::spawn(async move {
1566 0 : let mut conf = conf.as_ref().clone();
1567 0 : conf.application_name("compute_ctl:migrations");
1568 0 :
1569 0 : match conf.connect(NoTls).await {
1570 0 : Ok((mut client, connection)) => {
1571 0 : tokio::spawn(async move {
1572 0 : if let Err(e) = connection.await {
1573 0 : eprintln!("connection error: {}", e);
1574 0 : }
1575 0 : });
1576 0 : if let Err(e) = handle_migrations(&mut client).await {
1577 0 : error!("Failed to run migrations: {}", e);
1578 0 : }
1579 : }
1580 0 : Err(e) => {
1581 0 : error!(
1582 0 : "Failed to connect to the compute for running migrations: {}",
1583 : e
1584 : );
1585 : }
1586 : };
1587 0 : });
1588 :
1589 : Ok::<(), anyhow::Error>(())
1590 : }
1591 :
1592 : // Wrapped this around `pg_ctl reload`, but right now we don't use
1593 : // `pg_ctl` for start / stop.
1594 : #[instrument(skip_all)]
1595 : fn pg_reload_conf(&self) -> Result<()> {
1596 : let pgctl_bin = Path::new(&self.params.pgbin)
1597 : .parent()
1598 : .unwrap()
1599 : .join("pg_ctl");
1600 : Command::new(pgctl_bin)
1601 : .args(["reload", "-D", &self.params.pgdata])
1602 : .output()
1603 : .expect("cannot run pg_ctl process");
1604 : Ok(())
1605 : }
1606 :
1607 : /// Similar to `apply_config()`, but does a bit different sequence of operations,
1608 : /// as it's used to reconfigure a previously started and configured Postgres node.
1609 : #[instrument(skip_all)]
1610 : pub fn reconfigure(&self) -> Result<()> {
1611 : let spec = self.state.lock().unwrap().pspec.clone().unwrap().spec;
1612 :
1613 : let tls_config = self.tls_config(&spec);
1614 :
1615 : if let Some(ref pgbouncer_settings) = spec.pgbouncer_settings {
1616 : info!("tuning pgbouncer");
1617 :
1618 : let pgbouncer_settings = pgbouncer_settings.clone();
1619 : let tls_config = tls_config.clone();
1620 :
1621 : // Spawn a background task to do the tuning,
1622 : // so that we don't block the main thread that starts Postgres.
1623 0 : tokio::spawn(async move {
1624 0 : let res = tune_pgbouncer(pgbouncer_settings, tls_config).await;
1625 0 : if let Err(err) = res {
1626 0 : error!("error while tuning pgbouncer: {err:?}");
1627 0 : }
1628 0 : });
1629 : }
1630 :
1631 : if let Some(ref local_proxy) = spec.local_proxy_config {
1632 : info!("configuring local_proxy");
1633 :
1634 : // Spawn a background task to do the configuration,
1635 : // so that we don't block the main thread that starts Postgres.
1636 : let mut local_proxy = local_proxy.clone();
1637 : local_proxy.tls = tls_config.clone();
1638 0 : tokio::spawn(async move {
1639 0 : if let Err(err) = local_proxy::configure(&local_proxy) {
1640 0 : error!("error while configuring local_proxy: {err:?}");
1641 0 : }
1642 0 : });
1643 : }
1644 :
1645 : // Reconfigure rsyslog for Postgres logs export
1646 : let conf = PostgresLogsRsyslogConfig::new(spec.logs_export_host.as_deref());
1647 : configure_postgres_logs_export(conf)?;
1648 :
1649 : // Write new config
1650 : let pgdata_path = Path::new(&self.params.pgdata);
1651 : config::write_postgres_conf(
1652 : pgdata_path,
1653 : &spec,
1654 : self.params.internal_http_port,
1655 : tls_config,
1656 : )?;
1657 :
1658 : if !spec.skip_pg_catalog_updates {
1659 : let max_concurrent_connections = spec.reconfigure_concurrency;
1660 : // Temporarily reset max_cluster_size in config
1661 : // to avoid the possibility of hitting the limit, while we are reconfiguring:
1662 : // creating new extensions, roles, etc.
1663 0 : config::with_compute_ctl_tmp_override(pgdata_path, "neon.max_cluster_size=-1", || {
1664 0 : self.pg_reload_conf()?;
1665 :
1666 0 : if spec.mode == ComputeMode::Primary {
1667 0 : let conf = self.get_tokio_conn_conf(Some("compute_ctl:reconfigure"));
1668 0 : let conf = Arc::new(conf);
1669 0 :
1670 0 : let spec = Arc::new(spec.clone());
1671 0 :
1672 0 : self.apply_spec_sql(spec, conf, max_concurrent_connections)?;
1673 0 : }
1674 :
1675 0 : Ok(())
1676 0 : })?;
1677 : }
1678 :
1679 : self.pg_reload_conf()?;
1680 :
1681 : let unknown_op = "unknown".to_string();
1682 : let op_id = spec.operation_uuid.as_ref().unwrap_or(&unknown_op);
1683 : info!(
1684 : "finished reconfiguration of compute node for operation {}",
1685 : op_id
1686 : );
1687 :
1688 : Ok(())
1689 : }
1690 :
1691 : #[instrument(skip_all)]
1692 : pub fn configure_as_primary(&self, compute_state: &ComputeState) -> Result<()> {
1693 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
1694 :
1695 : assert!(pspec.spec.mode == ComputeMode::Primary);
1696 : if !pspec.spec.skip_pg_catalog_updates {
1697 : let pgdata_path = Path::new(&self.params.pgdata);
1698 : // temporarily reset max_cluster_size in config
1699 : // to avoid the possibility of hitting the limit, while we are applying config:
1700 : // creating new extensions, roles, etc...
1701 0 : config::with_compute_ctl_tmp_override(pgdata_path, "neon.max_cluster_size=-1", || {
1702 0 : self.pg_reload_conf()?;
1703 :
1704 0 : self.apply_config(compute_state)?;
1705 :
1706 0 : Ok(())
1707 0 : })?;
1708 :
1709 : let postgresql_conf_path = pgdata_path.join("postgresql.conf");
1710 : if config::line_in_file(
1711 : &postgresql_conf_path,
1712 : "neon.disable_logical_replication_subscribers=false",
1713 : )? {
1714 : info!(
1715 : "updated postgresql.conf to set neon.disable_logical_replication_subscribers=false"
1716 : );
1717 : }
1718 : self.pg_reload_conf()?;
1719 : }
1720 : self.post_apply_config()?;
1721 :
1722 : Ok(())
1723 : }
1724 :
1725 0 : pub async fn watch_cert_for_changes(self: Arc<Self>) {
1726 : // update status on cert renewal
1727 0 : if let Some(tls_config) = &self.compute_ctl_config.tls {
1728 0 : let tls_config = tls_config.clone();
1729 :
1730 : // wait until the cert exists.
1731 0 : let mut cert_watch = watch_cert_for_changes(tls_config.cert_path.clone()).await;
1732 :
1733 0 : tokio::task::spawn_blocking(move || {
1734 0 : let handle = tokio::runtime::Handle::current();
1735 : 'cert_update: loop {
1736 : // let postgres/pgbouncer/local_proxy know the new cert/key exists.
1737 : // we need to wait until it's configurable first.
1738 :
1739 0 : let mut state = self.state.lock().unwrap();
1740 : 'status_update: loop {
1741 0 : match state.status {
1742 : // let's update the state to config pending
1743 : ComputeStatus::ConfigurationPending | ComputeStatus::Running => {
1744 0 : state.set_status(
1745 0 : ComputeStatus::ConfigurationPending,
1746 0 : &self.state_changed,
1747 0 : );
1748 0 : break 'status_update;
1749 : }
1750 :
1751 : // exit loop
1752 : ComputeStatus::Failed
1753 : | ComputeStatus::TerminationPending
1754 0 : | ComputeStatus::Terminated => break 'cert_update,
1755 :
1756 : // wait
1757 : ComputeStatus::Init
1758 : | ComputeStatus::Configuration
1759 0 : | ComputeStatus::Empty => {
1760 0 : state = self.state_changed.wait(state).unwrap();
1761 0 : }
1762 : }
1763 : }
1764 0 : drop(state);
1765 0 :
1766 0 : // wait for a new certificate update
1767 0 : if handle.block_on(cert_watch.changed()).is_err() {
1768 0 : break;
1769 0 : }
1770 : }
1771 0 : });
1772 0 : }
1773 0 : }
1774 :
1775 0 : pub fn tls_config(&self, spec: &ComputeSpec) -> &Option<TlsConfig> {
1776 0 : if spec.features.contains(&ComputeFeature::TlsExperimental) {
1777 0 : &self.compute_ctl_config.tls
1778 : } else {
1779 0 : &None::<TlsConfig>
1780 : }
1781 0 : }
1782 :
1783 : /// Update the `last_active` in the shared state, but ensure that it's a more recent one.
1784 0 : pub fn update_last_active(&self, last_active: Option<DateTime<Utc>>) {
1785 0 : let mut state = self.state.lock().unwrap();
1786 0 : // NB: `Some(<DateTime>)` is always greater than `None`.
1787 0 : if last_active > state.last_active {
1788 0 : state.last_active = last_active;
1789 0 : debug!("set the last compute activity time to: {:?}", last_active);
1790 0 : }
1791 0 : }
1792 :
1793 : // Look for core dumps and collect backtraces.
1794 : //
1795 : // EKS worker nodes have following core dump settings:
1796 : // /proc/sys/kernel/core_pattern -> core
1797 : // /proc/sys/kernel/core_uses_pid -> 1
1798 : // ulimit -c -> unlimited
1799 : // which results in core dumps being written to postgres data directory as core.<pid>.
1800 : //
1801 : // Use that as a default location and pattern, except macos where core dumps are written
1802 : // to /cores/ directory by default.
1803 : //
1804 : // With default Linux settings, the core dump file is called just "core", so check for
1805 : // that too.
1806 0 : pub fn check_for_core_dumps(&self) -> Result<()> {
1807 0 : let core_dump_dir = match std::env::consts::OS {
1808 0 : "macos" => Path::new("/cores/"),
1809 0 : _ => Path::new(&self.params.pgdata),
1810 : };
1811 :
1812 : // Collect core dump paths if any
1813 0 : info!("checking for core dumps in {}", core_dump_dir.display());
1814 0 : let files = fs::read_dir(core_dump_dir)?;
1815 0 : let cores = files.filter_map(|entry| {
1816 0 : let entry = entry.ok()?;
1817 :
1818 0 : let is_core_dump = match entry.file_name().to_str()? {
1819 0 : n if n.starts_with("core.") => true,
1820 0 : "core" => true,
1821 0 : _ => false,
1822 : };
1823 0 : if is_core_dump {
1824 0 : Some(entry.path())
1825 : } else {
1826 0 : None
1827 : }
1828 0 : });
1829 :
1830 : // Print backtrace for each core dump
1831 0 : for core_path in cores {
1832 0 : warn!(
1833 0 : "core dump found: {}, collecting backtrace",
1834 0 : core_path.display()
1835 : );
1836 :
1837 : // Try first with gdb
1838 0 : let backtrace = Command::new("gdb")
1839 0 : .args(["--batch", "-q", "-ex", "bt", &self.params.pgbin])
1840 0 : .arg(&core_path)
1841 0 : .output();
1842 :
1843 : // Try lldb if no gdb is found -- that is handy for local testing on macOS
1844 0 : let backtrace = match backtrace {
1845 0 : Err(ref e) if e.kind() == std::io::ErrorKind::NotFound => {
1846 0 : warn!("cannot find gdb, trying lldb");
1847 0 : Command::new("lldb")
1848 0 : .arg("-c")
1849 0 : .arg(&core_path)
1850 0 : .args(["--batch", "-o", "bt all", "-o", "quit"])
1851 0 : .output()
1852 : }
1853 0 : _ => backtrace,
1854 0 : }?;
1855 :
1856 0 : warn!(
1857 0 : "core dump backtrace: {}",
1858 0 : String::from_utf8_lossy(&backtrace.stdout)
1859 : );
1860 0 : warn!(
1861 0 : "debugger stderr: {}",
1862 0 : String::from_utf8_lossy(&backtrace.stderr)
1863 : );
1864 : }
1865 :
1866 0 : Ok(())
1867 0 : }
1868 :
1869 : /// Select `pg_stat_statements` data and return it as a stringified JSON
1870 0 : pub async fn collect_insights(&self) -> String {
1871 0 : let mut result_rows: Vec<String> = Vec::new();
1872 0 : let conf = self.get_tokio_conn_conf(Some("compute_ctl:collect_insights"));
1873 0 : let connect_result = conf.connect(NoTls).await;
1874 0 : let (client, connection) = connect_result.unwrap();
1875 0 : tokio::spawn(async move {
1876 0 : if let Err(e) = connection.await {
1877 0 : eprintln!("connection error: {}", e);
1878 0 : }
1879 0 : });
1880 0 : let result = client
1881 0 : .simple_query(
1882 0 : "SELECT
1883 0 : row_to_json(pg_stat_statements)
1884 0 : FROM
1885 0 : pg_stat_statements
1886 0 : WHERE
1887 0 : userid != 'cloud_admin'::regrole::oid
1888 0 : ORDER BY
1889 0 : (mean_exec_time + mean_plan_time) DESC
1890 0 : LIMIT 100",
1891 0 : )
1892 0 : .await;
1893 :
1894 0 : if let Ok(raw_rows) = result {
1895 0 : for message in raw_rows.iter() {
1896 0 : if let postgres::SimpleQueryMessage::Row(row) = message {
1897 0 : if let Some(json) = row.get(0) {
1898 0 : result_rows.push(json.to_string());
1899 0 : }
1900 0 : }
1901 : }
1902 :
1903 0 : format!("{{\"pg_stat_statements\": [{}]}}", result_rows.join(","))
1904 : } else {
1905 0 : "{{\"pg_stat_statements\": []}}".to_string()
1906 : }
1907 0 : }
1908 :
1909 : // download an archive, unzip and place files in correct locations
1910 0 : pub async fn download_extension(
1911 0 : &self,
1912 0 : real_ext_name: String,
1913 0 : ext_path: RemotePath,
1914 0 : ) -> Result<u64, DownloadError> {
1915 0 : let remote_ext_base_url =
1916 0 : self.params
1917 0 : .remote_ext_base_url
1918 0 : .as_ref()
1919 0 : .ok_or(DownloadError::BadInput(anyhow::anyhow!(
1920 0 : "Remote extensions storage is not configured",
1921 0 : )))?;
1922 :
1923 0 : let ext_archive_name = ext_path.object_name().expect("bad path");
1924 0 :
1925 0 : let mut first_try = false;
1926 0 : if !self
1927 0 : .ext_download_progress
1928 0 : .read()
1929 0 : .expect("lock err")
1930 0 : .contains_key(ext_archive_name)
1931 0 : {
1932 0 : self.ext_download_progress
1933 0 : .write()
1934 0 : .expect("lock err")
1935 0 : .insert(ext_archive_name.to_string(), (Utc::now(), false));
1936 0 : first_try = true;
1937 0 : }
1938 0 : let (download_start, download_completed) =
1939 0 : self.ext_download_progress.read().expect("lock err")[ext_archive_name];
1940 0 : let start_time_delta = Utc::now()
1941 0 : .signed_duration_since(download_start)
1942 0 : .to_std()
1943 0 : .unwrap()
1944 0 : .as_millis() as u64;
1945 :
1946 : // how long to wait for extension download if it was started by another process
1947 : const HANG_TIMEOUT: u64 = 3000; // milliseconds
1948 :
1949 0 : if download_completed {
1950 0 : info!("extension already downloaded, skipping re-download");
1951 0 : return Ok(0);
1952 0 : } else if start_time_delta < HANG_TIMEOUT && !first_try {
1953 0 : info!(
1954 0 : "download {ext_archive_name} already started by another process, hanging untill completion or timeout"
1955 : );
1956 0 : let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(500));
1957 : loop {
1958 0 : info!("waiting for download");
1959 0 : interval.tick().await;
1960 0 : let (_, download_completed_now) =
1961 0 : self.ext_download_progress.read().expect("lock")[ext_archive_name];
1962 0 : if download_completed_now {
1963 0 : info!("download finished by whoever else downloaded it");
1964 0 : return Ok(0);
1965 0 : }
1966 : }
1967 : // NOTE: the above loop will get terminated
1968 : // based on the timeout of the download function
1969 0 : }
1970 0 :
1971 0 : // if extension hasn't been downloaded before or the previous
1972 0 : // attempt to download was at least HANG_TIMEOUT ms ago
1973 0 : // then we try to download it here
1974 0 : info!("downloading new extension {ext_archive_name}");
1975 :
1976 0 : let download_size = extension_server::download_extension(
1977 0 : &real_ext_name,
1978 0 : &ext_path,
1979 0 : remote_ext_base_url,
1980 0 : &self.params.pgbin,
1981 0 : )
1982 0 : .await
1983 0 : .map_err(DownloadError::Other);
1984 0 :
1985 0 : if download_size.is_ok() {
1986 0 : self.ext_download_progress
1987 0 : .write()
1988 0 : .expect("bad lock")
1989 0 : .insert(ext_archive_name.to_string(), (download_start, true));
1990 0 : }
1991 :
1992 0 : download_size
1993 0 : }
1994 :
1995 0 : pub async fn set_role_grants(
1996 0 : &self,
1997 0 : db_name: &PgIdent,
1998 0 : schema_name: &PgIdent,
1999 0 : privileges: &[Privilege],
2000 0 : role_name: &PgIdent,
2001 0 : ) -> Result<()> {
2002 : use tokio_postgres::NoTls;
2003 :
2004 0 : let mut conf = self.get_tokio_conn_conf(Some("compute_ctl:set_role_grants"));
2005 0 : conf.dbname(db_name);
2006 :
2007 0 : let (db_client, conn) = conf
2008 0 : .connect(NoTls)
2009 0 : .await
2010 0 : .context("Failed to connect to the database")?;
2011 0 : tokio::spawn(conn);
2012 0 :
2013 0 : // TODO: support other types of grants apart from schemas?
2014 0 :
2015 0 : // check the role grants first - to gracefully handle read-replicas.
2016 0 : let select = "SELECT privilege_type
2017 0 : FROM pg_namespace
2018 0 : JOIN LATERAL (SELECT * FROM aclexplode(nspacl) AS x) acl ON true
2019 0 : JOIN pg_user users ON acl.grantee = users.usesysid
2020 0 : WHERE users.usename = $1
2021 0 : AND nspname = $2";
2022 0 : let rows = db_client
2023 0 : .query(select, &[role_name, schema_name])
2024 0 : .await
2025 0 : .with_context(|| format!("Failed to execute query: {select}"))?;
2026 :
2027 0 : let already_granted: HashSet<String> = rows.into_iter().map(|row| row.get(0)).collect();
2028 0 :
2029 0 : let grants = privileges
2030 0 : .iter()
2031 0 : .filter(|p| !already_granted.contains(p.as_str()))
2032 0 : // should not be quoted as it's part of the command.
2033 0 : // is already sanitized so it's ok
2034 0 : .map(|p| p.as_str())
2035 0 : .join(", ");
2036 0 :
2037 0 : if !grants.is_empty() {
2038 : // quote the schema and role name as identifiers to sanitize them.
2039 0 : let schema_name = schema_name.pg_quote();
2040 0 : let role_name = role_name.pg_quote();
2041 0 :
2042 0 : let query = format!("GRANT {grants} ON SCHEMA {schema_name} TO {role_name}",);
2043 0 : db_client
2044 0 : .simple_query(&query)
2045 0 : .await
2046 0 : .with_context(|| format!("Failed to execute query: {}", query))?;
2047 0 : }
2048 :
2049 0 : Ok(())
2050 0 : }
2051 :
2052 0 : pub async fn install_extension(
2053 0 : &self,
2054 0 : ext_name: &PgIdent,
2055 0 : db_name: &PgIdent,
2056 0 : ext_version: ExtVersion,
2057 0 : ) -> Result<ExtVersion> {
2058 : use tokio_postgres::NoTls;
2059 :
2060 0 : let mut conf = self.get_tokio_conn_conf(Some("compute_ctl:install_extension"));
2061 0 : conf.dbname(db_name);
2062 :
2063 0 : let (db_client, conn) = conf
2064 0 : .connect(NoTls)
2065 0 : .await
2066 0 : .context("Failed to connect to the database")?;
2067 0 : tokio::spawn(conn);
2068 0 :
2069 0 : let version_query = "SELECT extversion FROM pg_extension WHERE extname = $1";
2070 0 : let version: Option<ExtVersion> = db_client
2071 0 : .query_opt(version_query, &[&ext_name])
2072 0 : .await
2073 0 : .with_context(|| format!("Failed to execute query: {}", version_query))?
2074 0 : .map(|row| row.get(0));
2075 0 :
2076 0 : // sanitize the inputs as postgres idents.
2077 0 : let ext_name: String = ext_name.pg_quote();
2078 0 : let quoted_version: String = ext_version.pg_quote();
2079 :
2080 0 : if let Some(installed_version) = version {
2081 0 : if installed_version == ext_version {
2082 0 : return Ok(installed_version);
2083 0 : }
2084 0 : let query = format!("ALTER EXTENSION {ext_name} UPDATE TO {quoted_version}");
2085 0 : db_client
2086 0 : .simple_query(&query)
2087 0 : .await
2088 0 : .with_context(|| format!("Failed to execute query: {}", query))?;
2089 : } else {
2090 0 : let query =
2091 0 : format!("CREATE EXTENSION IF NOT EXISTS {ext_name} WITH VERSION {quoted_version}");
2092 0 : db_client
2093 0 : .simple_query(&query)
2094 0 : .await
2095 0 : .with_context(|| format!("Failed to execute query: {}", query))?;
2096 : }
2097 :
2098 0 : Ok(ext_version)
2099 0 : }
2100 :
2101 0 : pub async fn prepare_preload_libraries(
2102 0 : &self,
2103 0 : spec: &ComputeSpec,
2104 0 : ) -> Result<RemoteExtensionMetrics> {
2105 0 : if self.params.remote_ext_base_url.is_none() {
2106 0 : return Ok(RemoteExtensionMetrics {
2107 0 : num_ext_downloaded: 0,
2108 0 : largest_ext_size: 0,
2109 0 : total_ext_download_size: 0,
2110 0 : });
2111 0 : }
2112 0 : let remote_extensions = spec
2113 0 : .remote_extensions
2114 0 : .as_ref()
2115 0 : .ok_or(anyhow::anyhow!("Remote extensions are not configured"))?;
2116 :
2117 0 : info!("parse shared_preload_libraries from spec.cluster.settings");
2118 0 : let mut libs_vec = Vec::new();
2119 0 : if let Some(libs) = spec.cluster.settings.find("shared_preload_libraries") {
2120 0 : libs_vec = libs
2121 0 : .split(&[',', '\'', ' '])
2122 0 : .filter(|s| *s != "neon" && !s.is_empty())
2123 0 : .map(str::to_string)
2124 0 : .collect();
2125 0 : }
2126 0 : info!("parse shared_preload_libraries from provided postgresql.conf");
2127 :
2128 : // that is used in neon_local and python tests
2129 0 : if let Some(conf) = &spec.cluster.postgresql_conf {
2130 0 : let conf_lines = conf.split('\n').collect::<Vec<&str>>();
2131 0 : let mut shared_preload_libraries_line = "";
2132 0 : for line in conf_lines {
2133 0 : if line.starts_with("shared_preload_libraries") {
2134 0 : shared_preload_libraries_line = line;
2135 0 : }
2136 : }
2137 0 : let mut preload_libs_vec = Vec::new();
2138 0 : if let Some(libs) = shared_preload_libraries_line.split("='").nth(1) {
2139 0 : preload_libs_vec = libs
2140 0 : .split(&[',', '\'', ' '])
2141 0 : .filter(|s| *s != "neon" && !s.is_empty())
2142 0 : .map(str::to_string)
2143 0 : .collect();
2144 0 : }
2145 0 : libs_vec.extend(preload_libs_vec);
2146 0 : }
2147 :
2148 : // Don't try to download libraries that are not in the index.
2149 : // Assume that they are already present locally.
2150 0 : libs_vec.retain(|lib| remote_extensions.library_index.contains_key(lib));
2151 0 :
2152 0 : info!("Downloading to shared preload libraries: {:?}", &libs_vec);
2153 :
2154 0 : let mut download_tasks = Vec::new();
2155 0 : for library in &libs_vec {
2156 0 : let (ext_name, ext_path) =
2157 0 : remote_extensions.get_ext(library, true, &BUILD_TAG, &self.params.pgversion)?;
2158 0 : download_tasks.push(self.download_extension(ext_name, ext_path));
2159 : }
2160 0 : let results = join_all(download_tasks).await;
2161 :
2162 0 : let mut remote_ext_metrics = RemoteExtensionMetrics {
2163 0 : num_ext_downloaded: 0,
2164 0 : largest_ext_size: 0,
2165 0 : total_ext_download_size: 0,
2166 0 : };
2167 0 : for result in results {
2168 0 : let download_size = match result {
2169 0 : Ok(res) => {
2170 0 : remote_ext_metrics.num_ext_downloaded += 1;
2171 0 : res
2172 : }
2173 0 : Err(err) => {
2174 0 : // if we failed to download an extension, we don't want to fail the whole
2175 0 : // process, but we do want to log the error
2176 0 : error!("Failed to download extension: {}", err);
2177 0 : 0
2178 : }
2179 : };
2180 :
2181 0 : remote_ext_metrics.largest_ext_size =
2182 0 : std::cmp::max(remote_ext_metrics.largest_ext_size, download_size);
2183 0 : remote_ext_metrics.total_ext_download_size += download_size;
2184 : }
2185 0 : Ok(remote_ext_metrics)
2186 0 : }
2187 :
2188 : /// Waits until current thread receives a state changed notification and
2189 : /// the pageserver connection strings has changed.
2190 : ///
2191 : /// The operation will time out after a specified duration.
2192 0 : pub fn wait_timeout_while_pageserver_connstr_unchanged(&self, duration: Duration) {
2193 0 : let state = self.state.lock().unwrap();
2194 0 : let old_pageserver_connstr = state
2195 0 : .pspec
2196 0 : .as_ref()
2197 0 : .expect("spec must be set")
2198 0 : .pageserver_connstr
2199 0 : .clone();
2200 0 : let mut unchanged = true;
2201 0 : let _ = self
2202 0 : .state_changed
2203 0 : .wait_timeout_while(state, duration, |s| {
2204 0 : let pageserver_connstr = &s
2205 0 : .pspec
2206 0 : .as_ref()
2207 0 : .expect("spec must be set")
2208 0 : .pageserver_connstr;
2209 0 : unchanged = pageserver_connstr == &old_pageserver_connstr;
2210 0 : unchanged
2211 0 : })
2212 0 : .unwrap();
2213 0 : if !unchanged {
2214 0 : info!("Pageserver config changed");
2215 0 : }
2216 0 : }
2217 :
2218 0 : pub fn spawn_extension_stats_task(&self) {
2219 0 : let conf = self.tokio_conn_conf.clone();
2220 0 : let installed_extensions_collection_interval =
2221 0 : self.params.installed_extensions_collection_interval;
2222 0 : tokio::spawn(async move {
2223 0 : // An initial sleep is added to ensure that two collections don't happen at the same time.
2224 0 : // The first collection happens during compute startup.
2225 0 : tokio::time::sleep(tokio::time::Duration::from_secs(
2226 0 : installed_extensions_collection_interval,
2227 0 : ))
2228 0 : .await;
2229 0 : let mut interval = tokio::time::interval(tokio::time::Duration::from_secs(
2230 0 : installed_extensions_collection_interval,
2231 0 : ));
2232 : loop {
2233 0 : interval.tick().await;
2234 0 : let _ = installed_extensions(conf.clone()).await;
2235 : }
2236 0 : });
2237 0 : }
2238 : }
2239 :
2240 0 : pub async fn installed_extensions(conf: tokio_postgres::Config) -> Result<()> {
2241 0 : let res = get_installed_extensions(conf).await;
2242 0 : match res {
2243 0 : Ok(extensions) => {
2244 0 : info!(
2245 0 : "[NEON_EXT_STAT] {}",
2246 0 : serde_json::to_string(&extensions).expect("failed to serialize extensions list")
2247 : );
2248 : }
2249 0 : Err(err) => error!("could not get installed extensions: {err:?}"),
2250 : }
2251 0 : Ok(())
2252 0 : }
2253 :
2254 0 : pub fn forward_termination_signal() {
2255 0 : let ss_pid = SYNC_SAFEKEEPERS_PID.load(Ordering::SeqCst);
2256 0 : if ss_pid != 0 {
2257 0 : let ss_pid = nix::unistd::Pid::from_raw(ss_pid as i32);
2258 0 : kill(ss_pid, Signal::SIGTERM).ok();
2259 0 : }
2260 0 : let pg_pid = PG_PID.load(Ordering::SeqCst);
2261 0 : if pg_pid != 0 {
2262 0 : let pg_pid = nix::unistd::Pid::from_raw(pg_pid as i32);
2263 0 : // Use 'fast' shutdown (SIGINT) because it also creates a shutdown checkpoint, which is important for
2264 0 : // ROs to get a list of running xacts faster instead of going through the CLOG.
2265 0 : // See https://www.postgresql.org/docs/current/server-shutdown.html for the list of modes and signals.
2266 0 : kill(pg_pid, Signal::SIGINT).ok();
2267 0 : }
2268 0 : }
2269 :
2270 : // helper trait to call JoinSet::spawn_blocking(f), but propagates the current
2271 : // tracing span to the thread.
2272 : trait JoinSetExt<T> {
2273 : fn spawn_blocking_child<F>(&mut self, f: F) -> tokio::task::AbortHandle
2274 : where
2275 : F: FnOnce() -> T + Send + 'static,
2276 : T: Send;
2277 : }
2278 :
2279 : impl<T: 'static> JoinSetExt<T> for tokio::task::JoinSet<T> {
2280 0 : fn spawn_blocking_child<F>(&mut self, f: F) -> tokio::task::AbortHandle
2281 0 : where
2282 0 : F: FnOnce() -> T + Send + 'static,
2283 0 : T: Send,
2284 0 : {
2285 0 : let sp = tracing::Span::current();
2286 0 : self.spawn_blocking(move || {
2287 0 : let _e = sp.enter();
2288 0 : f()
2289 0 : })
2290 0 : }
2291 : }
|