Line data Source code
1 : use std::collections::HashMap;
2 : use std::env;
3 : use std::fs;
4 : use std::io::BufRead;
5 : use std::os::unix::fs::{symlink, PermissionsExt};
6 : use std::path::Path;
7 : use std::process::{Command, Stdio};
8 : use std::str::FromStr;
9 : use std::sync::atomic::AtomicU32;
10 : use std::sync::atomic::Ordering;
11 : use std::sync::{Condvar, Mutex, RwLock};
12 : use std::thread;
13 : use std::time::Instant;
14 :
15 : use anyhow::{Context, Result};
16 : use chrono::{DateTime, Utc};
17 : use futures::future::join_all;
18 : use futures::stream::FuturesUnordered;
19 : use futures::StreamExt;
20 : use postgres::{Client, NoTls};
21 : use tokio;
22 : use tokio_postgres;
23 : use tracing::{debug, error, info, instrument, warn};
24 : use utils::id::{TenantId, TimelineId};
25 : use utils::lsn::Lsn;
26 :
27 : use compute_api::responses::{ComputeMetrics, ComputeStatus};
28 : use compute_api::spec::{ComputeFeature, ComputeMode, ComputeSpec};
29 : use utils::measured_stream::MeasuredReader;
30 :
31 : use nix::sys::signal::{kill, Signal};
32 :
33 : use remote_storage::{DownloadError, RemotePath};
34 :
35 : use crate::checker::create_availability_check_data;
36 : use crate::logger::inlinify;
37 : use crate::pg_helpers::*;
38 : use crate::spec::*;
39 : use crate::sync_sk::{check_if_synced, ping_safekeeper};
40 : use crate::{config, extension_server};
41 :
42 : pub static SYNC_SAFEKEEPERS_PID: AtomicU32 = AtomicU32::new(0);
43 : pub static PG_PID: AtomicU32 = AtomicU32::new(0);
44 :
45 : /// Compute node info shared across several `compute_ctl` threads.
46 : pub struct ComputeNode {
47 : // Url type maintains proper escaping
48 : pub connstr: url::Url,
49 : pub pgdata: String,
50 : pub pgbin: String,
51 : pub pgversion: String,
52 : /// We should only allow live re- / configuration of the compute node if
53 : /// it uses 'pull model', i.e. it can go to control-plane and fetch
54 : /// the latest configuration. Otherwise, there could be a case:
55 : /// - we start compute with some spec provided as argument
56 : /// - we push new spec and it does reconfiguration
57 : /// - but then something happens and compute pod / VM is destroyed,
58 : /// so k8s controller starts it again with the **old** spec
59 : /// and the same for empty computes:
60 : /// - we started compute without any spec
61 : /// - we push spec and it does configuration
62 : /// - but then it is restarted without any spec again
63 : pub live_config_allowed: bool,
64 : /// Volatile part of the `ComputeNode`, which should be used under `Mutex`.
65 : /// To allow HTTP API server to serving status requests, while configuration
66 : /// is in progress, lock should be held only for short periods of time to do
67 : /// read/write, not the whole configuration process.
68 : pub state: Mutex<ComputeState>,
69 : /// `Condvar` to allow notifying waiters about state changes.
70 : pub state_changed: Condvar,
71 : /// the address of extension storage proxy gateway
72 : pub ext_remote_storage: Option<String>,
73 : // key: ext_archive_name, value: started download time, download_completed?
74 : pub ext_download_progress: RwLock<HashMap<String, (DateTime<Utc>, bool)>>,
75 : pub build_tag: String,
76 : }
77 :
78 : // store some metrics about download size that might impact startup time
79 0 : #[derive(Clone, Debug)]
80 : pub struct RemoteExtensionMetrics {
81 : num_ext_downloaded: u64,
82 : largest_ext_size: u64,
83 : total_ext_download_size: u64,
84 : }
85 :
86 0 : #[derive(Clone, Debug)]
87 : pub struct ComputeState {
88 : pub start_time: DateTime<Utc>,
89 : pub status: ComputeStatus,
90 : /// Timestamp of the last Postgres activity. It could be `None` if
91 : /// compute wasn't used since start.
92 : pub last_active: Option<DateTime<Utc>>,
93 : pub error: Option<String>,
94 : pub pspec: Option<ParsedSpec>,
95 : pub metrics: ComputeMetrics,
96 : }
97 :
98 : impl ComputeState {
99 0 : pub fn new() -> Self {
100 0 : Self {
101 0 : start_time: Utc::now(),
102 0 : status: ComputeStatus::Empty,
103 0 : last_active: None,
104 0 : error: None,
105 0 : pspec: None,
106 0 : metrics: ComputeMetrics::default(),
107 0 : }
108 0 : }
109 : }
110 :
111 : impl Default for ComputeState {
112 0 : fn default() -> Self {
113 0 : Self::new()
114 0 : }
115 : }
116 :
117 0 : #[derive(Clone, Debug)]
118 : pub struct ParsedSpec {
119 : pub spec: ComputeSpec,
120 : pub tenant_id: TenantId,
121 : pub timeline_id: TimelineId,
122 : pub pageserver_connstr: String,
123 : pub safekeeper_connstrings: Vec<String>,
124 : pub storage_auth_token: Option<String>,
125 : }
126 :
127 : impl TryFrom<ComputeSpec> for ParsedSpec {
128 : type Error = String;
129 0 : fn try_from(spec: ComputeSpec) -> Result<Self, String> {
130 : // Extract the options from the spec file that are needed to connect to
131 : // the storage system.
132 : //
133 : // For backwards-compatibility, the top-level fields in the spec file
134 : // may be empty. In that case, we need to dig them from the GUCs in the
135 : // cluster.settings field.
136 0 : let pageserver_connstr = spec
137 0 : .pageserver_connstring
138 0 : .clone()
139 0 : .or_else(|| spec.cluster.settings.find("neon.pageserver_connstring"))
140 0 : .ok_or("pageserver connstr should be provided")?;
141 0 : let safekeeper_connstrings = if spec.safekeeper_connstrings.is_empty() {
142 0 : if matches!(spec.mode, ComputeMode::Primary) {
143 0 : spec.cluster
144 0 : .settings
145 0 : .find("neon.safekeepers")
146 0 : .ok_or("safekeeper connstrings should be provided")?
147 0 : .split(',')
148 0 : .map(|str| str.to_string())
149 0 : .collect()
150 : } else {
151 0 : vec![]
152 : }
153 : } else {
154 0 : spec.safekeeper_connstrings.clone()
155 : };
156 0 : let storage_auth_token = spec.storage_auth_token.clone();
157 0 : let tenant_id: TenantId = if let Some(tenant_id) = spec.tenant_id {
158 0 : tenant_id
159 : } else {
160 0 : spec.cluster
161 0 : .settings
162 0 : .find("neon.tenant_id")
163 0 : .ok_or("tenant id should be provided")
164 0 : .map(|s| TenantId::from_str(&s))?
165 0 : .or(Err("invalid tenant id"))?
166 : };
167 0 : let timeline_id: TimelineId = if let Some(timeline_id) = spec.timeline_id {
168 0 : timeline_id
169 : } else {
170 0 : spec.cluster
171 0 : .settings
172 0 : .find("neon.timeline_id")
173 0 : .ok_or("timeline id should be provided")
174 0 : .map(|s| TimelineId::from_str(&s))?
175 0 : .or(Err("invalid timeline id"))?
176 : };
177 :
178 0 : Ok(ParsedSpec {
179 0 : spec,
180 0 : pageserver_connstr,
181 0 : safekeeper_connstrings,
182 0 : storage_auth_token,
183 0 : tenant_id,
184 0 : timeline_id,
185 0 : })
186 0 : }
187 : }
188 :
189 : /// If we are a VM, returns a [`Command`] that will run in the `neon-postgres`
190 : /// cgroup. Otherwise returns the default `Command::new(cmd)`
191 : ///
192 : /// This function should be used to start postgres, as it will start it in the
193 : /// neon-postgres cgroup if we are a VM. This allows autoscaling to control
194 : /// postgres' resource usage. The cgroup will exist in VMs because vm-builder
195 : /// creates it during the sysinit phase of its inittab.
196 0 : fn maybe_cgexec(cmd: &str) -> Command {
197 0 : // The cplane sets this env var for autoscaling computes.
198 0 : // use `var_os` so we don't have to worry about the variable being valid
199 0 : // unicode. Should never be an concern . . . but just in case
200 0 : if env::var_os("AUTOSCALING").is_some() {
201 0 : let mut command = Command::new("cgexec");
202 0 : command.args(["-g", "memory:neon-postgres"]);
203 0 : command.arg(cmd);
204 0 : command
205 : } else {
206 0 : Command::new(cmd)
207 : }
208 0 : }
209 :
210 : /// Create special neon_superuser role, that's a slightly nerfed version of a real superuser
211 : /// that we give to customers
212 0 : #[instrument(skip_all)]
213 : fn create_neon_superuser(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
214 : let roles = spec
215 : .cluster
216 : .roles
217 : .iter()
218 0 : .map(|r| escape_literal(&r.name))
219 : .collect::<Vec<_>>();
220 :
221 : let dbs = spec
222 : .cluster
223 : .databases
224 : .iter()
225 0 : .map(|db| escape_literal(&db.name))
226 : .collect::<Vec<_>>();
227 :
228 : let roles_decl = if roles.is_empty() {
229 : String::from("roles text[] := NULL;")
230 : } else {
231 : format!(
232 : r#"
233 : roles text[] := ARRAY(SELECT rolname
234 : FROM pg_catalog.pg_roles
235 : WHERE rolname IN ({}));"#,
236 : roles.join(", ")
237 : )
238 : };
239 :
240 : let database_decl = if dbs.is_empty() {
241 : String::from("dbs text[] := NULL;")
242 : } else {
243 : format!(
244 : r#"
245 : dbs text[] := ARRAY(SELECT datname
246 : FROM pg_catalog.pg_database
247 : WHERE datname IN ({}));"#,
248 : dbs.join(", ")
249 : )
250 : };
251 :
252 : // ALL PRIVILEGES grants CREATE, CONNECT, and TEMPORARY on all databases
253 : // (see https://www.postgresql.org/docs/current/ddl-priv.html)
254 : let query = format!(
255 : r#"
256 : DO $$
257 : DECLARE
258 : r text;
259 : {}
260 : {}
261 : BEGIN
262 : IF NOT EXISTS (
263 : SELECT FROM pg_catalog.pg_roles WHERE rolname = 'neon_superuser')
264 : THEN
265 : CREATE ROLE neon_superuser CREATEDB CREATEROLE NOLOGIN REPLICATION BYPASSRLS IN ROLE pg_read_all_data, pg_write_all_data;
266 : IF array_length(roles, 1) IS NOT NULL THEN
267 : EXECUTE format('GRANT neon_superuser TO %s',
268 : array_to_string(ARRAY(SELECT quote_ident(x) FROM unnest(roles) as x), ', '));
269 : FOREACH r IN ARRAY roles LOOP
270 : EXECUTE format('ALTER ROLE %s CREATEROLE CREATEDB', quote_ident(r));
271 : END LOOP;
272 : END IF;
273 : IF array_length(dbs, 1) IS NOT NULL THEN
274 : EXECUTE format('GRANT ALL PRIVILEGES ON DATABASE %s TO neon_superuser',
275 : array_to_string(ARRAY(SELECT quote_ident(x) FROM unnest(dbs) as x), ', '));
276 : END IF;
277 : END IF;
278 : END
279 : $$;"#,
280 : roles_decl, database_decl,
281 : );
282 0 : info!("Neon superuser created: {}", inlinify(&query));
283 : client
284 : .simple_query(&query)
285 0 : .map_err(|e| anyhow::anyhow!(e).context(query))?;
286 : Ok(())
287 : }
288 :
289 : impl ComputeNode {
290 : /// Check that compute node has corresponding feature enabled.
291 0 : pub fn has_feature(&self, feature: ComputeFeature) -> bool {
292 0 : let state = self.state.lock().unwrap();
293 :
294 0 : if let Some(s) = state.pspec.as_ref() {
295 0 : s.spec.features.contains(&feature)
296 : } else {
297 0 : false
298 : }
299 0 : }
300 :
301 0 : pub fn set_status(&self, status: ComputeStatus) {
302 0 : let mut state = self.state.lock().unwrap();
303 0 : state.status = status;
304 0 : self.state_changed.notify_all();
305 0 : }
306 :
307 0 : pub fn get_status(&self) -> ComputeStatus {
308 0 : self.state.lock().unwrap().status
309 0 : }
310 :
311 : // Remove `pgdata` directory and create it again with right permissions.
312 0 : fn create_pgdata(&self) -> Result<()> {
313 0 : // Ignore removal error, likely it is a 'No such file or directory (os error 2)'.
314 0 : // If it is something different then create_dir() will error out anyway.
315 0 : let _ok = fs::remove_dir_all(&self.pgdata);
316 0 : fs::create_dir(&self.pgdata)?;
317 0 : fs::set_permissions(&self.pgdata, fs::Permissions::from_mode(0o700))?;
318 :
319 0 : Ok(())
320 0 : }
321 :
322 : // Get basebackup from the libpq connection to pageserver using `connstr` and
323 : // unarchive it to `pgdata` directory overriding all its previous content.
324 0 : #[instrument(skip_all, fields(%lsn))]
325 : fn try_get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> {
326 : let spec = compute_state.pspec.as_ref().expect("spec must be set");
327 : let start_time = Instant::now();
328 :
329 : let shard0_connstr = spec.pageserver_connstr.split(',').next().unwrap();
330 : let mut config = postgres::Config::from_str(shard0_connstr)?;
331 :
332 : // Use the storage auth token from the config file, if given.
333 : // Note: this overrides any password set in the connection string.
334 : if let Some(storage_auth_token) = &spec.storage_auth_token {
335 0 : info!("Got storage auth token from spec file");
336 : config.password(storage_auth_token);
337 : } else {
338 0 : info!("Storage auth token not set");
339 : }
340 :
341 : // Connect to pageserver
342 : let mut client = config.connect(NoTls)?;
343 : let pageserver_connect_micros = start_time.elapsed().as_micros() as u64;
344 :
345 : let basebackup_cmd = match lsn {
346 : // HACK We don't use compression on first start (Lsn(0)) because there's no API for it
347 : Lsn(0) => format!("basebackup {} {}", spec.tenant_id, spec.timeline_id),
348 : _ => format!(
349 : "basebackup {} {} {} --gzip",
350 : spec.tenant_id, spec.timeline_id, lsn
351 : ),
352 : };
353 :
354 : let copyreader = client.copy_out(basebackup_cmd.as_str())?;
355 : let mut measured_reader = MeasuredReader::new(copyreader);
356 :
357 : // Check the magic number to see if it's a gzip or not. Even though
358 : // we might explicitly ask for gzip, an old pageserver with no implementation
359 : // of gzip compression might send us uncompressed data. After some time
360 : // passes we can assume all pageservers know how to compress and we can
361 : // delete this check.
362 : //
363 : // If the data is not gzip, it will be tar. It will not be mistakenly
364 : // recognized as gzip because tar starts with an ascii encoding of a filename,
365 : // and 0x1f and 0x8b are unlikely first characters for any filename. Moreover,
366 : // we send the "global" directory first from the pageserver, so it definitely
367 : // won't be recognized as gzip.
368 : let mut bufreader = std::io::BufReader::new(&mut measured_reader);
369 : let gzip = {
370 : let peek = bufreader.fill_buf().unwrap();
371 : peek[0] == 0x1f && peek[1] == 0x8b
372 : };
373 :
374 : // Read the archive directly from the `CopyOutReader`
375 : //
376 : // Set `ignore_zeros` so that unpack() reads all the Copy data and
377 : // doesn't stop at the end-of-archive marker. Otherwise, if the server
378 : // sends an Error after finishing the tarball, we will not notice it.
379 : if gzip {
380 : let mut ar = tar::Archive::new(flate2::read::GzDecoder::new(&mut bufreader));
381 : ar.set_ignore_zeros(true);
382 : ar.unpack(&self.pgdata)?;
383 : } else {
384 : let mut ar = tar::Archive::new(&mut bufreader);
385 : ar.set_ignore_zeros(true);
386 : ar.unpack(&self.pgdata)?;
387 : };
388 :
389 : // Report metrics
390 : let mut state = self.state.lock().unwrap();
391 : state.metrics.pageserver_connect_micros = pageserver_connect_micros;
392 : state.metrics.basebackup_bytes = measured_reader.get_byte_count() as u64;
393 : state.metrics.basebackup_ms = start_time.elapsed().as_millis() as u64;
394 : Ok(())
395 : }
396 :
397 : // Gets the basebackup in a retry loop
398 0 : #[instrument(skip_all, fields(%lsn))]
399 : pub fn get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> {
400 : let mut retry_period_ms = 500;
401 : let mut attempts = 0;
402 : let max_attempts = 5;
403 : loop {
404 : let result = self.try_get_basebackup(compute_state, lsn);
405 : match result {
406 : Ok(_) => {
407 : return result;
408 : }
409 : Err(ref e) if attempts < max_attempts => {
410 0 : warn!(
411 0 : "Failed to get basebackup: {} (attempt {}/{})",
412 0 : e, attempts, max_attempts
413 0 : );
414 : std::thread::sleep(std::time::Duration::from_millis(retry_period_ms));
415 : retry_period_ms *= 2;
416 : }
417 : Err(_) => {
418 : return result;
419 : }
420 : }
421 : attempts += 1;
422 : }
423 : }
424 :
425 0 : pub async fn check_safekeepers_synced_async(
426 0 : &self,
427 0 : compute_state: &ComputeState,
428 0 : ) -> Result<Option<Lsn>> {
429 0 : // Construct a connection config for each safekeeper
430 0 : let pspec: ParsedSpec = compute_state
431 0 : .pspec
432 0 : .as_ref()
433 0 : .expect("spec must be set")
434 0 : .clone();
435 0 : let sk_connstrs: Vec<String> = pspec.safekeeper_connstrings.clone();
436 0 : let sk_configs = sk_connstrs.into_iter().map(|connstr| {
437 0 : // Format connstr
438 0 : let id = connstr.clone();
439 0 : let connstr = format!("postgresql://no_user@{}", connstr);
440 0 : let options = format!(
441 0 : "-c timeline_id={} tenant_id={}",
442 0 : pspec.timeline_id, pspec.tenant_id
443 0 : );
444 0 :
445 0 : // Construct client
446 0 : let mut config = tokio_postgres::Config::from_str(&connstr).unwrap();
447 0 : config.options(&options);
448 0 : if let Some(storage_auth_token) = pspec.storage_auth_token.clone() {
449 0 : config.password(storage_auth_token);
450 0 : }
451 :
452 0 : (id, config)
453 0 : });
454 0 :
455 0 : // Create task set to query all safekeepers
456 0 : let mut tasks = FuturesUnordered::new();
457 0 : let quorum = sk_configs.len() / 2 + 1;
458 0 : for (id, config) in sk_configs {
459 0 : let timeout = tokio::time::Duration::from_millis(100);
460 0 : let task = tokio::time::timeout(timeout, ping_safekeeper(id, config));
461 0 : tasks.push(tokio::spawn(task));
462 0 : }
463 :
464 : // Get a quorum of responses or errors
465 0 : let mut responses = Vec::new();
466 0 : let mut join_errors = Vec::new();
467 0 : let mut task_errors = Vec::new();
468 0 : let mut timeout_errors = Vec::new();
469 0 : while let Some(response) = tasks.next().await {
470 0 : match response {
471 0 : Ok(Ok(Ok(r))) => responses.push(r),
472 0 : Ok(Ok(Err(e))) => task_errors.push(e),
473 0 : Ok(Err(e)) => timeout_errors.push(e),
474 0 : Err(e) => join_errors.push(e),
475 : };
476 0 : if responses.len() >= quorum {
477 0 : break;
478 0 : }
479 0 : if join_errors.len() + task_errors.len() + timeout_errors.len() >= quorum {
480 0 : break;
481 0 : }
482 : }
483 :
484 : // In case of error, log and fail the check, but don't crash.
485 : // We're playing it safe because these errors could be transient
486 : // and we don't yet retry. Also being careful here allows us to
487 : // be backwards compatible with safekeepers that don't have the
488 : // TIMELINE_STATUS API yet.
489 0 : if responses.len() < quorum {
490 0 : error!(
491 0 : "failed sync safekeepers check {:?} {:?} {:?}",
492 0 : join_errors, task_errors, timeout_errors
493 0 : );
494 0 : return Ok(None);
495 0 : }
496 0 :
497 0 : Ok(check_if_synced(responses))
498 0 : }
499 :
500 : // Fast path for sync_safekeepers. If they're already synced we get the lsn
501 : // in one roundtrip. If not, we should do a full sync_safekeepers.
502 0 : pub fn check_safekeepers_synced(&self, compute_state: &ComputeState) -> Result<Option<Lsn>> {
503 0 : let start_time = Utc::now();
504 0 :
505 0 : // Run actual work with new tokio runtime
506 0 : let rt = tokio::runtime::Builder::new_current_thread()
507 0 : .enable_all()
508 0 : .build()
509 0 : .expect("failed to create rt");
510 0 : let result = rt.block_on(self.check_safekeepers_synced_async(compute_state));
511 0 :
512 0 : // Record runtime
513 0 : self.state.lock().unwrap().metrics.sync_sk_check_ms = Utc::now()
514 0 : .signed_duration_since(start_time)
515 0 : .to_std()
516 0 : .unwrap()
517 0 : .as_millis() as u64;
518 0 : result
519 0 : }
520 :
521 : // Run `postgres` in a special mode with `--sync-safekeepers` argument
522 : // and return the reported LSN back to the caller.
523 0 : #[instrument(skip_all)]
524 : pub fn sync_safekeepers(&self, storage_auth_token: Option<String>) -> Result<Lsn> {
525 : let start_time = Utc::now();
526 :
527 : let mut sync_handle = maybe_cgexec(&self.pgbin)
528 : .args(["--sync-safekeepers"])
529 : .env("PGDATA", &self.pgdata) // we cannot use -D in this mode
530 : .envs(if let Some(storage_auth_token) = &storage_auth_token {
531 : vec![("NEON_AUTH_TOKEN", storage_auth_token)]
532 : } else {
533 : vec![]
534 : })
535 : .stdout(Stdio::piped())
536 : .stderr(Stdio::piped())
537 : .spawn()
538 : .expect("postgres --sync-safekeepers failed to start");
539 : SYNC_SAFEKEEPERS_PID.store(sync_handle.id(), Ordering::SeqCst);
540 :
541 : // `postgres --sync-safekeepers` will print all log output to stderr and
542 : // final LSN to stdout. So we leave stdout to collect LSN, while stderr logs
543 : // will be collected in a child thread.
544 : let stderr = sync_handle
545 : .stderr
546 : .take()
547 : .expect("stderr should be captured");
548 : let logs_handle = handle_postgres_logs(stderr);
549 :
550 : let sync_output = sync_handle
551 : .wait_with_output()
552 : .expect("postgres --sync-safekeepers failed");
553 : SYNC_SAFEKEEPERS_PID.store(0, Ordering::SeqCst);
554 :
555 : // Process has exited, so we can join the logs thread.
556 : let _ = logs_handle
557 : .join()
558 0 : .map_err(|e| tracing::error!("log thread panicked: {:?}", e));
559 :
560 : if !sync_output.status.success() {
561 : anyhow::bail!(
562 : "postgres --sync-safekeepers exited with non-zero status: {}. stdout: {}",
563 : sync_output.status,
564 : String::from_utf8(sync_output.stdout)
565 : .expect("postgres --sync-safekeepers exited, and stdout is not utf-8"),
566 : );
567 : }
568 :
569 : self.state.lock().unwrap().metrics.sync_safekeepers_ms = Utc::now()
570 : .signed_duration_since(start_time)
571 : .to_std()
572 : .unwrap()
573 : .as_millis() as u64;
574 :
575 : let lsn = Lsn::from_str(String::from_utf8(sync_output.stdout)?.trim())?;
576 :
577 : Ok(lsn)
578 : }
579 :
580 : /// Do all the preparations like PGDATA directory creation, configuration,
581 : /// safekeepers sync, basebackup, etc.
582 0 : #[instrument(skip_all)]
583 : pub fn prepare_pgdata(
584 : &self,
585 : compute_state: &ComputeState,
586 : extension_server_port: u16,
587 : ) -> Result<()> {
588 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
589 : let spec = &pspec.spec;
590 : let pgdata_path = Path::new(&self.pgdata);
591 :
592 : // Remove/create an empty pgdata directory and put configuration there.
593 : self.create_pgdata()?;
594 : config::write_postgres_conf(
595 : &pgdata_path.join("postgresql.conf"),
596 : &pspec.spec,
597 : Some(extension_server_port),
598 : )?;
599 :
600 : // Syncing safekeepers is only safe with primary nodes: if a primary
601 : // is already connected it will be kicked out, so a secondary (standby)
602 : // cannot sync safekeepers.
603 : let lsn = match spec.mode {
604 : ComputeMode::Primary => {
605 0 : info!("checking if safekeepers are synced");
606 : let lsn = if let Ok(Some(lsn)) = self.check_safekeepers_synced(compute_state) {
607 : lsn
608 : } else {
609 0 : info!("starting safekeepers syncing");
610 : self.sync_safekeepers(pspec.storage_auth_token.clone())
611 0 : .with_context(|| "failed to sync safekeepers")?
612 : };
613 0 : info!("safekeepers synced at LSN {}", lsn);
614 : lsn
615 : }
616 : ComputeMode::Static(lsn) => {
617 0 : info!("Starting read-only node at static LSN {}", lsn);
618 : lsn
619 : }
620 : ComputeMode::Replica => {
621 0 : info!("Initializing standby from latest Pageserver LSN");
622 : Lsn(0)
623 : }
624 : };
625 :
626 0 : info!(
627 0 : "getting basebackup@{} from pageserver {}",
628 0 : lsn, &pspec.pageserver_connstr
629 0 : );
630 0 : self.get_basebackup(compute_state, lsn).with_context(|| {
631 0 : format!(
632 0 : "failed to get basebackup@{} from pageserver {}",
633 0 : lsn, &pspec.pageserver_connstr
634 0 : )
635 0 : })?;
636 :
637 : // Update pg_hba.conf received with basebackup.
638 : update_pg_hba(pgdata_path)?;
639 :
640 : // Place pg_dynshmem under /dev/shm. This allows us to use
641 : // 'dynamic_shared_memory_type = mmap' so that the files are placed in
642 : // /dev/shm, similar to how 'dynamic_shared_memory_type = posix' works.
643 : //
644 : // Why on earth don't we just stick to the 'posix' default, you might
645 : // ask. It turns out that making large allocations with 'posix' doesn't
646 : // work very well with autoscaling. The behavior we want is that:
647 : //
648 : // 1. You can make large DSM allocations, larger than the current RAM
649 : // size of the VM, without errors
650 : //
651 : // 2. If the allocated memory is really used, the VM is scaled up
652 : // automatically to accommodate that
653 : //
654 : // We try to make that possible by having swap in the VM. But with the
655 : // default 'posix' DSM implementation, we fail step 1, even when there's
656 : // plenty of swap available. PostgreSQL uses posix_fallocate() to create
657 : // the shmem segment, which is really just a file in /dev/shm in Linux,
658 : // but posix_fallocate() on tmpfs returns ENOMEM if the size is larger
659 : // than available RAM.
660 : //
661 : // Using 'dynamic_shared_memory_type = mmap' works around that, because
662 : // the Postgres 'mmap' DSM implementation doesn't use
663 : // posix_fallocate(). Instead, it uses repeated calls to write(2) to
664 : // fill the file with zeros. It's weird that that differs between
665 : // 'posix' and 'mmap', but we take advantage of it. When the file is
666 : // filled slowly with write(2), the kernel allows it to grow larger, as
667 : // long as there's swap available.
668 : //
669 : // In short, using 'dynamic_shared_memory_type = mmap' allows us one DSM
670 : // segment to be larger than currently available RAM. But because we
671 : // don't want to store it on a real file, which the kernel would try to
672 : // flush to disk, so symlink pg_dynshm to /dev/shm.
673 : //
674 : // We don't set 'dynamic_shared_memory_type = mmap' here, we let the
675 : // control plane control that option. If 'mmap' is not used, this
676 : // symlink doesn't affect anything.
677 : //
678 : // See https://github.com/neondatabase/autoscaling/issues/800
679 : std::fs::remove_dir(pgdata_path.join("pg_dynshmem"))?;
680 : symlink("/dev/shm/", pgdata_path.join("pg_dynshmem"))?;
681 :
682 : match spec.mode {
683 : ComputeMode::Primary => {}
684 : ComputeMode::Replica | ComputeMode::Static(..) => {
685 : add_standby_signal(pgdata_path)?;
686 : }
687 : }
688 :
689 : Ok(())
690 : }
691 :
692 : /// Start and stop a postgres process to warm up the VM for startup.
693 0 : pub fn prewarm_postgres(&self) -> Result<()> {
694 0 : info!("prewarming");
695 :
696 : // Create pgdata
697 0 : let pgdata = &format!("{}.warmup", self.pgdata);
698 0 : create_pgdata(pgdata)?;
699 :
700 : // Run initdb to completion
701 0 : info!("running initdb");
702 0 : let initdb_bin = Path::new(&self.pgbin).parent().unwrap().join("initdb");
703 0 : Command::new(initdb_bin)
704 0 : .args(["-D", pgdata])
705 0 : .output()
706 0 : .expect("cannot start initdb process");
707 0 :
708 0 : // Write conf
709 0 : use std::io::Write;
710 0 : let conf_path = Path::new(pgdata).join("postgresql.conf");
711 0 : let mut file = std::fs::File::create(conf_path)?;
712 0 : writeln!(file, "shared_buffers=65536")?;
713 0 : writeln!(file, "port=51055")?; // Nobody should be connecting
714 0 : writeln!(file, "shared_preload_libraries = 'neon'")?;
715 :
716 : // Start postgres
717 0 : info!("starting postgres");
718 0 : let mut pg = maybe_cgexec(&self.pgbin)
719 0 : .args(["-D", pgdata])
720 0 : .spawn()
721 0 : .expect("cannot start postgres process");
722 0 :
723 0 : // Stop it when it's ready
724 0 : info!("waiting for postgres");
725 0 : wait_for_postgres(&mut pg, Path::new(pgdata))?;
726 0 : pg.kill()?;
727 0 : info!("sent kill signal");
728 0 : pg.wait()?;
729 0 : info!("done prewarming");
730 :
731 : // clean up
732 0 : let _ok = fs::remove_dir_all(pgdata);
733 0 : Ok(())
734 0 : }
735 :
736 : /// Start Postgres as a child process and manage DBs/roles.
737 : /// After that this will hang waiting on the postmaster process to exit.
738 : /// Returns a handle to the child process and a handle to the logs thread.
739 0 : #[instrument(skip_all)]
740 : pub fn start_postgres(
741 : &self,
742 : storage_auth_token: Option<String>,
743 : ) -> Result<(std::process::Child, std::thread::JoinHandle<()>)> {
744 : let pgdata_path = Path::new(&self.pgdata);
745 :
746 : // Run postgres as a child process.
747 : let mut pg = maybe_cgexec(&self.pgbin)
748 : .args(["-D", &self.pgdata])
749 : .envs(if let Some(storage_auth_token) = &storage_auth_token {
750 : vec![("NEON_AUTH_TOKEN", storage_auth_token)]
751 : } else {
752 : vec![]
753 : })
754 : .stderr(Stdio::piped())
755 : .spawn()
756 : .expect("cannot start postgres process");
757 : PG_PID.store(pg.id(), Ordering::SeqCst);
758 :
759 : // Start a thread to collect logs from stderr.
760 : let stderr = pg.stderr.take().expect("stderr should be captured");
761 : let logs_handle = handle_postgres_logs(stderr);
762 :
763 : wait_for_postgres(&mut pg, pgdata_path)?;
764 :
765 : Ok((pg, logs_handle))
766 : }
767 :
768 : /// Do initial configuration of the already started Postgres.
769 0 : #[instrument(skip_all)]
770 : pub fn apply_config(&self, compute_state: &ComputeState) -> Result<()> {
771 : // If connection fails,
772 : // it may be the old node with `zenith_admin` superuser.
773 : //
774 : // In this case we need to connect with old `zenith_admin` name
775 : // and create new user. We cannot simply rename connected user,
776 : // but we can create a new one and grant it all privileges.
777 : let connstr = self.connstr.clone();
778 : let mut client = match Client::connect(connstr.as_str(), NoTls) {
779 : Err(e) => {
780 0 : info!(
781 0 : "cannot connect to postgres: {}, retrying with `zenith_admin` username",
782 0 : e
783 0 : );
784 : let mut zenith_admin_connstr = connstr.clone();
785 :
786 : zenith_admin_connstr
787 : .set_username("zenith_admin")
788 0 : .map_err(|_| anyhow::anyhow!("invalid connstr"))?;
789 :
790 : let mut client = Client::connect(zenith_admin_connstr.as_str(), NoTls)?;
791 : // Disable forwarding so that users don't get a cloud_admin role
792 : client.simple_query("SET neon.forward_ddl = false")?;
793 : client.simple_query("CREATE USER cloud_admin WITH SUPERUSER")?;
794 : client.simple_query("GRANT zenith_admin TO cloud_admin")?;
795 : drop(client);
796 :
797 : // reconnect with connstring with expected name
798 : Client::connect(connstr.as_str(), NoTls)?
799 : }
800 : Ok(client) => client,
801 : };
802 :
803 : // Disable DDL forwarding because control plane already knows about these roles/databases.
804 : client.simple_query("SET neon.forward_ddl = false")?;
805 :
806 : // Proceed with post-startup configuration. Note, that order of operations is important.
807 : let spec = &compute_state.pspec.as_ref().expect("spec must be set").spec;
808 : create_neon_superuser(spec, &mut client)?;
809 : cleanup_instance(&mut client)?;
810 : handle_roles(spec, &mut client)?;
811 : handle_databases(spec, &mut client)?;
812 : handle_role_deletions(spec, connstr.as_str(), &mut client)?;
813 : handle_grants(
814 : spec,
815 : &mut client,
816 : connstr.as_str(),
817 : self.has_feature(ComputeFeature::AnonExtension),
818 : )?;
819 : handle_extensions(spec, &mut client)?;
820 : handle_extension_neon(&mut client)?;
821 : create_availability_check_data(&mut client)?;
822 :
823 : // 'Close' connection
824 : drop(client);
825 :
826 : // Run migrations separately to not hold up cold starts
827 0 : thread::spawn(move || {
828 0 : let mut client = Client::connect(connstr.as_str(), NoTls)?;
829 0 : handle_migrations(&mut client)
830 0 : });
831 : Ok(())
832 : }
833 :
834 : // We could've wrapped this around `pg_ctl reload`, but right now we don't use
835 : // `pg_ctl` for start / stop, so this just seems much easier to do as we already
836 : // have opened connection to Postgres and superuser access.
837 0 : #[instrument(skip_all)]
838 : fn pg_reload_conf(&self) -> Result<()> {
839 : let pgctl_bin = Path::new(&self.pgbin).parent().unwrap().join("pg_ctl");
840 : Command::new(pgctl_bin)
841 : .args(["reload", "-D", &self.pgdata])
842 : .output()
843 : .expect("cannot run pg_ctl process");
844 : Ok(())
845 : }
846 :
847 : /// Similar to `apply_config()`, but does a bit different sequence of operations,
848 : /// as it's used to reconfigure a previously started and configured Postgres node.
849 0 : #[instrument(skip_all)]
850 : pub fn reconfigure(&self) -> Result<()> {
851 : let spec = self.state.lock().unwrap().pspec.clone().unwrap().spec;
852 :
853 : if let Some(ref pgbouncer_settings) = spec.pgbouncer_settings {
854 0 : info!("tuning pgbouncer");
855 :
856 : let rt = tokio::runtime::Builder::new_current_thread()
857 : .enable_all()
858 : .build()
859 : .expect("failed to create rt");
860 :
861 : // Spawn a thread to do the tuning,
862 : // so that we don't block the main thread that starts Postgres.
863 : let pgbouncer_settings = pgbouncer_settings.clone();
864 0 : let _handle = thread::spawn(move || {
865 0 : let res = rt.block_on(tune_pgbouncer(pgbouncer_settings));
866 0 : if let Err(err) = res {
867 0 : error!("error while tuning pgbouncer: {err:?}");
868 0 : }
869 0 : });
870 : }
871 :
872 : // Write new config
873 : let pgdata_path = Path::new(&self.pgdata);
874 : let postgresql_conf_path = pgdata_path.join("postgresql.conf");
875 : config::write_postgres_conf(&postgresql_conf_path, &spec, None)?;
876 : // temporarily reset max_cluster_size in config
877 : // to avoid the possibility of hitting the limit, while we are reconfiguring:
878 : // creating new extensions, roles, etc...
879 : config::compute_ctl_temp_override_create(pgdata_path, "neon.max_cluster_size=-1")?;
880 : self.pg_reload_conf()?;
881 :
882 : let mut client = Client::connect(self.connstr.as_str(), NoTls)?;
883 :
884 : // Proceed with post-startup configuration. Note, that order of operations is important.
885 : // Disable DDL forwarding because control plane already knows about these roles/databases.
886 : if spec.mode == ComputeMode::Primary {
887 : client.simple_query("SET neon.forward_ddl = false")?;
888 : cleanup_instance(&mut client)?;
889 : handle_roles(&spec, &mut client)?;
890 : handle_databases(&spec, &mut client)?;
891 : handle_role_deletions(&spec, self.connstr.as_str(), &mut client)?;
892 : handle_grants(
893 : &spec,
894 : &mut client,
895 : self.connstr.as_str(),
896 : self.has_feature(ComputeFeature::AnonExtension),
897 : )?;
898 : handle_extensions(&spec, &mut client)?;
899 : handle_extension_neon(&mut client)?;
900 : // We can skip handle_migrations here because a new migration can only appear
901 : // if we have a new version of the compute_ctl binary, which can only happen
902 : // if compute got restarted, in which case we'll end up inside of apply_config
903 : // instead of reconfigure.
904 : }
905 :
906 : // 'Close' connection
907 : drop(client);
908 :
909 : // reset max_cluster_size in config back to original value and reload config
910 : config::compute_ctl_temp_override_remove(pgdata_path)?;
911 : self.pg_reload_conf()?;
912 :
913 : let unknown_op = "unknown".to_string();
914 : let op_id = spec.operation_uuid.as_ref().unwrap_or(&unknown_op);
915 0 : info!(
916 0 : "finished reconfiguration of compute node for operation {}",
917 0 : op_id
918 0 : );
919 :
920 : Ok(())
921 : }
922 :
923 0 : #[instrument(skip_all)]
924 : pub fn start_compute(
925 : &self,
926 : extension_server_port: u16,
927 : ) -> Result<(std::process::Child, std::thread::JoinHandle<()>)> {
928 : let compute_state = self.state.lock().unwrap().clone();
929 : let pspec = compute_state.pspec.as_ref().expect("spec must be set");
930 0 : info!(
931 0 : "starting compute for project {}, operation {}, tenant {}, timeline {}",
932 0 : pspec.spec.cluster.cluster_id.as_deref().unwrap_or("None"),
933 0 : pspec.spec.operation_uuid.as_deref().unwrap_or("None"),
934 0 : pspec.tenant_id,
935 0 : pspec.timeline_id,
936 0 : );
937 :
938 : // tune pgbouncer
939 : if let Some(pgbouncer_settings) = &pspec.spec.pgbouncer_settings {
940 0 : info!("tuning pgbouncer");
941 :
942 : let rt = tokio::runtime::Builder::new_current_thread()
943 : .enable_all()
944 : .build()
945 : .expect("failed to create rt");
946 :
947 : // Spawn a thread to do the tuning,
948 : // so that we don't block the main thread that starts Postgres.
949 : let pgbouncer_settings = pgbouncer_settings.clone();
950 0 : let _handle = thread::spawn(move || {
951 0 : let res = rt.block_on(tune_pgbouncer(pgbouncer_settings));
952 0 : if let Err(err) = res {
953 0 : error!("error while tuning pgbouncer: {err:?}");
954 0 : }
955 0 : });
956 : }
957 :
958 0 : info!(
959 0 : "start_compute spec.remote_extensions {:?}",
960 0 : pspec.spec.remote_extensions
961 0 : );
962 :
963 : // This part is sync, because we need to download
964 : // remote shared_preload_libraries before postgres start (if any)
965 : if let Some(remote_extensions) = &pspec.spec.remote_extensions {
966 : // First, create control files for all availale extensions
967 : extension_server::create_control_files(remote_extensions, &self.pgbin);
968 :
969 : let library_load_start_time = Utc::now();
970 : let remote_ext_metrics = self.prepare_preload_libraries(&pspec.spec)?;
971 :
972 : let library_load_time = Utc::now()
973 : .signed_duration_since(library_load_start_time)
974 : .to_std()
975 : .unwrap()
976 : .as_millis() as u64;
977 : let mut state = self.state.lock().unwrap();
978 : state.metrics.load_ext_ms = library_load_time;
979 : state.metrics.num_ext_downloaded = remote_ext_metrics.num_ext_downloaded;
980 : state.metrics.largest_ext_size = remote_ext_metrics.largest_ext_size;
981 : state.metrics.total_ext_download_size = remote_ext_metrics.total_ext_download_size;
982 0 : info!(
983 0 : "Loading shared_preload_libraries took {:?}ms",
984 0 : library_load_time
985 0 : );
986 0 : info!("{:?}", remote_ext_metrics);
987 : }
988 :
989 : self.prepare_pgdata(&compute_state, extension_server_port)?;
990 :
991 : let start_time = Utc::now();
992 : let pg_process = self.start_postgres(pspec.storage_auth_token.clone())?;
993 :
994 : let config_time = Utc::now();
995 : if pspec.spec.mode == ComputeMode::Primary && !pspec.spec.skip_pg_catalog_updates {
996 : let pgdata_path = Path::new(&self.pgdata);
997 : // temporarily reset max_cluster_size in config
998 : // to avoid the possibility of hitting the limit, while we are applying config:
999 : // creating new extensions, roles, etc...
1000 : config::compute_ctl_temp_override_create(pgdata_path, "neon.max_cluster_size=-1")?;
1001 : self.pg_reload_conf()?;
1002 :
1003 : self.apply_config(&compute_state)?;
1004 :
1005 : config::compute_ctl_temp_override_remove(pgdata_path)?;
1006 : self.pg_reload_conf()?;
1007 : }
1008 :
1009 : let startup_end_time = Utc::now();
1010 : {
1011 : let mut state = self.state.lock().unwrap();
1012 : state.metrics.start_postgres_ms = config_time
1013 : .signed_duration_since(start_time)
1014 : .to_std()
1015 : .unwrap()
1016 : .as_millis() as u64;
1017 : state.metrics.config_ms = startup_end_time
1018 : .signed_duration_since(config_time)
1019 : .to_std()
1020 : .unwrap()
1021 : .as_millis() as u64;
1022 : state.metrics.total_startup_ms = startup_end_time
1023 : .signed_duration_since(compute_state.start_time)
1024 : .to_std()
1025 : .unwrap()
1026 : .as_millis() as u64;
1027 : }
1028 : self.set_status(ComputeStatus::Running);
1029 :
1030 0 : info!(
1031 0 : "finished configuration of compute for project {}",
1032 0 : pspec.spec.cluster.cluster_id.as_deref().unwrap_or("None")
1033 0 : );
1034 :
1035 : // Log metrics so that we can search for slow operations in logs
1036 : let metrics = {
1037 : let state = self.state.lock().unwrap();
1038 : state.metrics.clone()
1039 : };
1040 0 : info!(?metrics, "compute start finished");
1041 :
1042 : Ok(pg_process)
1043 : }
1044 :
1045 : /// Update the `last_active` in the shared state, but ensure that it's a more recent one.
1046 0 : pub fn update_last_active(&self, last_active: Option<DateTime<Utc>>) {
1047 0 : let mut state = self.state.lock().unwrap();
1048 0 : // NB: `Some(<DateTime>)` is always greater than `None`.
1049 0 : if last_active > state.last_active {
1050 0 : state.last_active = last_active;
1051 0 : debug!("set the last compute activity time to: {:?}", last_active);
1052 0 : }
1053 0 : }
1054 :
1055 : // Look for core dumps and collect backtraces.
1056 : //
1057 : // EKS worker nodes have following core dump settings:
1058 : // /proc/sys/kernel/core_pattern -> core
1059 : // /proc/sys/kernel/core_uses_pid -> 1
1060 : // ulimint -c -> unlimited
1061 : // which results in core dumps being written to postgres data directory as core.<pid>.
1062 : //
1063 : // Use that as a default location and pattern, except macos where core dumps are written
1064 : // to /cores/ directory by default.
1065 0 : pub fn check_for_core_dumps(&self) -> Result<()> {
1066 0 : let core_dump_dir = match std::env::consts::OS {
1067 0 : "macos" => Path::new("/cores/"),
1068 0 : _ => Path::new(&self.pgdata),
1069 : };
1070 :
1071 : // Collect core dump paths if any
1072 0 : info!("checking for core dumps in {}", core_dump_dir.display());
1073 0 : let files = fs::read_dir(core_dump_dir)?;
1074 0 : let cores = files.filter_map(|entry| {
1075 0 : let entry = entry.ok()?;
1076 0 : let _ = entry.file_name().to_str()?.strip_prefix("core.")?;
1077 0 : Some(entry.path())
1078 0 : });
1079 :
1080 : // Print backtrace for each core dump
1081 0 : for core_path in cores {
1082 0 : warn!(
1083 0 : "core dump found: {}, collecting backtrace",
1084 0 : core_path.display()
1085 0 : );
1086 :
1087 : // Try first with gdb
1088 0 : let backtrace = Command::new("gdb")
1089 0 : .args(["--batch", "-q", "-ex", "bt", &self.pgbin])
1090 0 : .arg(&core_path)
1091 0 : .output();
1092 :
1093 : // Try lldb if no gdb is found -- that is handy for local testing on macOS
1094 0 : let backtrace = match backtrace {
1095 0 : Err(ref e) if e.kind() == std::io::ErrorKind::NotFound => {
1096 0 : warn!("cannot find gdb, trying lldb");
1097 0 : Command::new("lldb")
1098 0 : .arg("-c")
1099 0 : .arg(&core_path)
1100 0 : .args(["--batch", "-o", "bt all", "-o", "quit"])
1101 0 : .output()
1102 : }
1103 0 : _ => backtrace,
1104 0 : }?;
1105 :
1106 0 : warn!(
1107 0 : "core dump backtrace: {}",
1108 0 : String::from_utf8_lossy(&backtrace.stdout)
1109 0 : );
1110 0 : warn!(
1111 0 : "debugger stderr: {}",
1112 0 : String::from_utf8_lossy(&backtrace.stderr)
1113 0 : );
1114 : }
1115 :
1116 0 : Ok(())
1117 0 : }
1118 :
1119 : /// Select `pg_stat_statements` data and return it as a stringified JSON
1120 0 : pub async fn collect_insights(&self) -> String {
1121 0 : let mut result_rows: Vec<String> = Vec::new();
1122 0 : let connect_result = tokio_postgres::connect(self.connstr.as_str(), NoTls).await;
1123 0 : let (client, connection) = connect_result.unwrap();
1124 0 : tokio::spawn(async move {
1125 0 : if let Err(e) = connection.await {
1126 0 : eprintln!("connection error: {}", e);
1127 0 : }
1128 0 : });
1129 0 : let result = client
1130 0 : .simple_query(
1131 0 : "SELECT
1132 0 : row_to_json(pg_stat_statements)
1133 0 : FROM
1134 0 : pg_stat_statements
1135 0 : WHERE
1136 0 : userid != 'cloud_admin'::regrole::oid
1137 0 : ORDER BY
1138 0 : (mean_exec_time + mean_plan_time) DESC
1139 0 : LIMIT 100",
1140 0 : )
1141 0 : .await;
1142 :
1143 0 : if let Ok(raw_rows) = result {
1144 0 : for message in raw_rows.iter() {
1145 0 : if let postgres::SimpleQueryMessage::Row(row) = message {
1146 0 : if let Some(json) = row.get(0) {
1147 0 : result_rows.push(json.to_string());
1148 0 : }
1149 0 : }
1150 : }
1151 :
1152 0 : format!("{{\"pg_stat_statements\": [{}]}}", result_rows.join(","))
1153 : } else {
1154 0 : "{{\"pg_stat_statements\": []}}".to_string()
1155 : }
1156 0 : }
1157 :
1158 : // download an archive, unzip and place files in correct locations
1159 0 : pub async fn download_extension(
1160 0 : &self,
1161 0 : real_ext_name: String,
1162 0 : ext_path: RemotePath,
1163 0 : ) -> Result<u64, DownloadError> {
1164 0 : let ext_remote_storage =
1165 0 : self.ext_remote_storage
1166 0 : .as_ref()
1167 0 : .ok_or(DownloadError::BadInput(anyhow::anyhow!(
1168 0 : "Remote extensions storage is not configured",
1169 0 : )))?;
1170 :
1171 0 : let ext_archive_name = ext_path.object_name().expect("bad path");
1172 0 :
1173 0 : let mut first_try = false;
1174 0 : if !self
1175 0 : .ext_download_progress
1176 0 : .read()
1177 0 : .expect("lock err")
1178 0 : .contains_key(ext_archive_name)
1179 0 : {
1180 0 : self.ext_download_progress
1181 0 : .write()
1182 0 : .expect("lock err")
1183 0 : .insert(ext_archive_name.to_string(), (Utc::now(), false));
1184 0 : first_try = true;
1185 0 : }
1186 0 : let (download_start, download_completed) =
1187 0 : self.ext_download_progress.read().expect("lock err")[ext_archive_name];
1188 0 : let start_time_delta = Utc::now()
1189 0 : .signed_duration_since(download_start)
1190 0 : .to_std()
1191 0 : .unwrap()
1192 0 : .as_millis() as u64;
1193 0 :
1194 0 : // how long to wait for extension download if it was started by another process
1195 0 : const HANG_TIMEOUT: u64 = 3000; // milliseconds
1196 0 :
1197 0 : if download_completed {
1198 0 : info!("extension already downloaded, skipping re-download");
1199 0 : return Ok(0);
1200 0 : } else if start_time_delta < HANG_TIMEOUT && !first_try {
1201 0 : info!("download {ext_archive_name} already started by another process, hanging untill completion or timeout");
1202 0 : let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(500));
1203 : loop {
1204 0 : info!("waiting for download");
1205 0 : interval.tick().await;
1206 0 : let (_, download_completed_now) =
1207 0 : self.ext_download_progress.read().expect("lock")[ext_archive_name];
1208 0 : if download_completed_now {
1209 0 : info!("download finished by whoever else downloaded it");
1210 0 : return Ok(0);
1211 0 : }
1212 : }
1213 : // NOTE: the above loop will get terminated
1214 : // based on the timeout of the download function
1215 0 : }
1216 :
1217 : // if extension hasn't been downloaded before or the previous
1218 : // attempt to download was at least HANG_TIMEOUT ms ago
1219 : // then we try to download it here
1220 0 : info!("downloading new extension {ext_archive_name}");
1221 :
1222 0 : let download_size = extension_server::download_extension(
1223 0 : &real_ext_name,
1224 0 : &ext_path,
1225 0 : ext_remote_storage,
1226 0 : &self.pgbin,
1227 0 : )
1228 0 : .await
1229 0 : .map_err(DownloadError::Other);
1230 0 :
1231 0 : self.ext_download_progress
1232 0 : .write()
1233 0 : .expect("bad lock")
1234 0 : .insert(ext_archive_name.to_string(), (download_start, true));
1235 0 :
1236 0 : download_size
1237 0 : }
1238 :
1239 : #[tokio::main]
1240 0 : pub async fn prepare_preload_libraries(
1241 0 : &self,
1242 0 : spec: &ComputeSpec,
1243 0 : ) -> Result<RemoteExtensionMetrics> {
1244 0 : if self.ext_remote_storage.is_none() {
1245 0 : return Ok(RemoteExtensionMetrics {
1246 0 : num_ext_downloaded: 0,
1247 0 : largest_ext_size: 0,
1248 0 : total_ext_download_size: 0,
1249 0 : });
1250 0 : }
1251 0 : let remote_extensions = spec
1252 0 : .remote_extensions
1253 0 : .as_ref()
1254 0 : .ok_or(anyhow::anyhow!("Remote extensions are not configured"))?;
1255 0 :
1256 0 : info!("parse shared_preload_libraries from spec.cluster.settings");
1257 0 : let mut libs_vec = Vec::new();
1258 0 : if let Some(libs) = spec.cluster.settings.find("shared_preload_libraries") {
1259 0 : libs_vec = libs
1260 0 : .split(&[',', '\'', ' '])
1261 0 : .filter(|s| *s != "neon" && !s.is_empty())
1262 0 : .map(str::to_string)
1263 0 : .collect();
1264 0 : }
1265 0 : info!("parse shared_preload_libraries from provided postgresql.conf");
1266 0 :
1267 0 : // that is used in neon_local and python tests
1268 0 : if let Some(conf) = &spec.cluster.postgresql_conf {
1269 0 : let conf_lines = conf.split('\n').collect::<Vec<&str>>();
1270 0 : let mut shared_preload_libraries_line = "";
1271 0 : for line in conf_lines {
1272 0 : if line.starts_with("shared_preload_libraries") {
1273 0 : shared_preload_libraries_line = line;
1274 0 : }
1275 0 : }
1276 0 : let mut preload_libs_vec = Vec::new();
1277 0 : if let Some(libs) = shared_preload_libraries_line.split("='").nth(1) {
1278 0 : preload_libs_vec = libs
1279 0 : .split(&[',', '\'', ' '])
1280 0 : .filter(|s| *s != "neon" && !s.is_empty())
1281 0 : .map(str::to_string)
1282 0 : .collect();
1283 0 : }
1284 0 : libs_vec.extend(preload_libs_vec);
1285 0 : }
1286 0 :
1287 0 : // Don't try to download libraries that are not in the index.
1288 0 : // Assume that they are already present locally.
1289 0 : libs_vec.retain(|lib| remote_extensions.library_index.contains_key(lib));
1290 0 :
1291 0 : info!("Downloading to shared preload libraries: {:?}", &libs_vec);
1292 0 :
1293 0 : let mut download_tasks = Vec::new();
1294 0 : for library in &libs_vec {
1295 0 : let (ext_name, ext_path) =
1296 0 : remote_extensions.get_ext(library, true, &self.build_tag, &self.pgversion)?;
1297 0 : download_tasks.push(self.download_extension(ext_name, ext_path));
1298 0 : }
1299 0 : let results = join_all(download_tasks).await;
1300 0 :
1301 0 : let mut remote_ext_metrics = RemoteExtensionMetrics {
1302 0 : num_ext_downloaded: 0,
1303 0 : largest_ext_size: 0,
1304 0 : total_ext_download_size: 0,
1305 0 : };
1306 0 : for result in results {
1307 0 : let download_size = match result {
1308 0 : Ok(res) => {
1309 0 : remote_ext_metrics.num_ext_downloaded += 1;
1310 0 : res
1311 0 : }
1312 0 : Err(err) => {
1313 0 : // if we failed to download an extension, we don't want to fail the whole
1314 0 : // process, but we do want to log the error
1315 0 : error!("Failed to download extension: {}", err);
1316 0 : 0
1317 0 : }
1318 0 : };
1319 0 :
1320 0 : remote_ext_metrics.largest_ext_size =
1321 0 : std::cmp::max(remote_ext_metrics.largest_ext_size, download_size);
1322 0 : remote_ext_metrics.total_ext_download_size += download_size;
1323 0 : }
1324 0 : Ok(remote_ext_metrics)
1325 0 : }
1326 : }
1327 :
1328 0 : pub fn forward_termination_signal() {
1329 0 : let ss_pid = SYNC_SAFEKEEPERS_PID.load(Ordering::SeqCst);
1330 0 : if ss_pid != 0 {
1331 0 : let ss_pid = nix::unistd::Pid::from_raw(ss_pid as i32);
1332 0 : kill(ss_pid, Signal::SIGTERM).ok();
1333 0 : }
1334 0 : let pg_pid = PG_PID.load(Ordering::SeqCst);
1335 0 : if pg_pid != 0 {
1336 0 : let pg_pid = nix::unistd::Pid::from_raw(pg_pid as i32);
1337 0 : // use 'immediate' shutdown (SIGQUIT): https://www.postgresql.org/docs/current/server-shutdown.html
1338 0 : kill(pg_pid, Signal::SIGQUIT).ok();
1339 0 : }
1340 0 : }
|