Line data Source code
1 : //
2 : // Main entry point for the safekeeper executable
3 : //
4 : use std::env::{VarError, var};
5 : use std::fs::{self, File};
6 : use std::io::{ErrorKind, Write};
7 : use std::str::FromStr;
8 : use std::sync::Arc;
9 : use std::time::{Duration, Instant};
10 :
11 : use anyhow::{Context, Result, bail};
12 : use camino::{Utf8Path, Utf8PathBuf};
13 : use clap::{ArgAction, Parser};
14 : use futures::future::BoxFuture;
15 : use futures::stream::FuturesUnordered;
16 : use futures::{FutureExt, StreamExt};
17 : use metrics::set_build_info_metric;
18 : use remote_storage::RemoteStorageConfig;
19 : use safekeeper::defaults::{
20 : DEFAULT_CONTROL_FILE_SAVE_INTERVAL, DEFAULT_EVICTION_MIN_RESIDENT, DEFAULT_HEARTBEAT_TIMEOUT,
21 : DEFAULT_HTTP_LISTEN_ADDR, DEFAULT_MAX_OFFLOADER_LAG_BYTES, DEFAULT_PARTIAL_BACKUP_CONCURRENCY,
22 : DEFAULT_PARTIAL_BACKUP_TIMEOUT, DEFAULT_PG_LISTEN_ADDR,
23 : };
24 : use safekeeper::{
25 : BROKER_RUNTIME, GlobalTimelines, HTTP_RUNTIME, SafeKeeperConf, WAL_SERVICE_RUNTIME, broker,
26 : control_file, http, wal_backup, wal_service,
27 : };
28 : use sd_notify::NotifyState;
29 : use storage_broker::{DEFAULT_ENDPOINT, Uri};
30 : use tokio::runtime::Handle;
31 : use tokio::signal::unix::{SignalKind, signal};
32 : use tokio::task::JoinError;
33 : use tracing::*;
34 : use utils::auth::{JwtAuth, Scope, SwappableJwtAuth};
35 : use utils::id::NodeId;
36 : use utils::logging::{self, LogFormat, SecretString};
37 : use utils::sentry_init::init_sentry;
38 : use utils::{pid_file, project_build_tag, project_git_version, tcp_listener};
39 :
40 : #[global_allocator]
41 : static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
42 :
43 : /// Configure jemalloc to profile heap allocations by sampling stack traces every 2 MB (1 << 21).
44 : /// This adds roughly 3% overhead for allocations on average, which is acceptable considering
45 : /// performance-sensitive code will avoid allocations as far as possible anyway.
46 : #[allow(non_upper_case_globals)]
47 : #[unsafe(export_name = "malloc_conf")]
48 : pub static malloc_conf: &[u8] = b"prof:true,prof_active:true,lg_prof_sample:21\0";
49 :
50 : const PID_FILE_NAME: &str = "safekeeper.pid";
51 : const ID_FILE_NAME: &str = "safekeeper.id";
52 :
53 : project_git_version!(GIT_VERSION);
54 : project_build_tag!(BUILD_TAG);
55 :
56 : const FEATURES: &[&str] = &[
57 : #[cfg(feature = "testing")]
58 : "testing",
59 : ];
60 :
61 0 : fn version() -> String {
62 0 : format!(
63 0 : "{GIT_VERSION} failpoints: {}, features: {:?}",
64 0 : fail::has_failpoints(),
65 0 : FEATURES,
66 0 : )
67 0 : }
68 :
69 : const ABOUT: &str = r#"
70 : A fleet of safekeepers is responsible for reliably storing WAL received from
71 : compute, passing it through consensus (mitigating potential computes brain
72 : split), and serving the hardened part further downstream to pageserver(s).
73 : "#;
74 :
75 : #[derive(Parser)]
76 : #[command(name = "Neon safekeeper", version = GIT_VERSION, about = ABOUT, long_about = None)]
77 : struct Args {
78 : /// Path to the safekeeper data directory.
79 : #[arg(short = 'D', long, default_value = "./")]
80 0 : datadir: Utf8PathBuf,
81 : /// Safekeeper node id.
82 : #[arg(long)]
83 : id: Option<u64>,
84 : /// Initialize safekeeper with given id and exit.
85 : #[arg(long)]
86 0 : init: bool,
87 : /// Listen endpoint for receiving/sending WAL in the form host:port.
88 : #[arg(short, long, default_value = DEFAULT_PG_LISTEN_ADDR)]
89 0 : listen_pg: String,
90 : /// Listen endpoint for receiving/sending WAL in the form host:port allowing
91 : /// only tenant scoped auth tokens. Pointless if auth is disabled.
92 : #[arg(long, default_value = None, verbatim_doc_comment)]
93 : listen_pg_tenant_only: Option<String>,
94 : /// Listen http endpoint for management and metrics in the form host:port.
95 : #[arg(long, default_value = DEFAULT_HTTP_LISTEN_ADDR)]
96 0 : listen_http: String,
97 : /// Advertised endpoint for receiving/sending WAL in the form host:port. If not
98 : /// specified, listen_pg is used to advertise instead.
99 : #[arg(long, default_value = None)]
100 : advertise_pg: Option<String>,
101 : /// Availability zone of the safekeeper.
102 : #[arg(long)]
103 : availability_zone: Option<String>,
104 : /// Do not wait for changes to be written safely to disk. Unsafe.
105 : #[arg(short, long)]
106 0 : no_sync: bool,
107 : /// Dump control file at path specified by this argument and exit.
108 : #[arg(long)]
109 : dump_control_file: Option<Utf8PathBuf>,
110 : /// Broker endpoint for storage nodes coordination in the form
111 : /// http[s]://host:port. In case of https schema TLS is connection is
112 : /// established; plaintext otherwise.
113 : #[arg(long, default_value = DEFAULT_ENDPOINT, verbatim_doc_comment)]
114 0 : broker_endpoint: Uri,
115 : /// Broker keepalive interval.
116 : #[arg(long, value_parser= humantime::parse_duration, default_value = storage_broker::DEFAULT_KEEPALIVE_INTERVAL)]
117 0 : broker_keepalive_interval: Duration,
118 : /// Peer safekeeper is considered dead after not receiving heartbeats from
119 : /// it during this period passed as a human readable duration.
120 : #[arg(long, value_parser= humantime::parse_duration, default_value = DEFAULT_HEARTBEAT_TIMEOUT, verbatim_doc_comment)]
121 0 : heartbeat_timeout: Duration,
122 : /// Enable/disable peer recovery.
123 : #[arg(long, default_value = "false", action=ArgAction::Set)]
124 0 : peer_recovery: bool,
125 : /// Remote storage configuration for WAL backup (offloading to s3) as TOML
126 : /// inline table, e.g.
127 : /// {max_concurrent_syncs = 17, max_sync_errors = 13, bucket_name = "<BUCKETNAME>", bucket_region = "<REGION>", concurrency_limit = 119}
128 : /// Safekeeper offloads WAL to
129 : /// [prefix_in_bucket/]<tenant_id>/<timeline_id>/<segment_file>, mirroring
130 : /// structure on the file system.
131 : #[arg(long, value_parser = parse_remote_storage, verbatim_doc_comment)]
132 : remote_storage: Option<RemoteStorageConfig>,
133 : /// Safekeeper won't be elected for WAL offloading if it is lagging for more than this value in bytes
134 1 : #[arg(long, default_value_t = DEFAULT_MAX_OFFLOADER_LAG_BYTES)]
135 0 : max_offloader_lag: u64,
136 : /// Number of max parallel WAL segments to be offloaded to remote storage.
137 : #[arg(long, default_value = "5")]
138 0 : wal_backup_parallel_jobs: usize,
139 : /// Disable WAL backup to s3. When disabled, safekeeper removes WAL ignoring
140 : /// WAL backup horizon.
141 : #[arg(long)]
142 0 : disable_wal_backup: bool,
143 : /// If given, enables auth on incoming connections to WAL service endpoint
144 : /// (--listen-pg). Value specifies path to a .pem public key used for
145 : /// validations of JWT tokens. Empty string is allowed and means disabling
146 : /// auth.
147 : #[arg(long, verbatim_doc_comment, value_parser = opt_pathbuf_parser)]
148 : pg_auth_public_key_path: Option<Utf8PathBuf>,
149 : /// If given, enables auth on incoming connections to tenant only WAL
150 : /// service endpoint (--listen-pg-tenant-only). Value specifies path to a
151 : /// .pem public key used for validations of JWT tokens. Empty string is
152 : /// allowed and means disabling auth.
153 : #[arg(long, verbatim_doc_comment, value_parser = opt_pathbuf_parser)]
154 : pg_tenant_only_auth_public_key_path: Option<Utf8PathBuf>,
155 : /// If given, enables auth on incoming connections to http management
156 : /// service endpoint (--listen-http). Value specifies path to a .pem public
157 : /// key used for validations of JWT tokens. Empty string is allowed and
158 : /// means disabling auth.
159 : #[arg(long, verbatim_doc_comment, value_parser = opt_pathbuf_parser)]
160 : http_auth_public_key_path: Option<Utf8PathBuf>,
161 : /// Format for logging, either 'plain' or 'json'.
162 : #[arg(long, default_value = "plain")]
163 0 : log_format: String,
164 : /// Run everything in single threaded current thread runtime, might be
165 : /// useful for debugging.
166 : #[arg(long)]
167 0 : current_thread_runtime: bool,
168 : /// Keep horizon for walsenders, i.e. don't remove WAL segments that are
169 : /// still needed for existing replication connection.
170 : #[arg(long)]
171 0 : walsenders_keep_horizon: bool,
172 : /// Controls how long backup will wait until uploading the partial segment.
173 : #[arg(long, value_parser = humantime::parse_duration, default_value = DEFAULT_PARTIAL_BACKUP_TIMEOUT, verbatim_doc_comment)]
174 0 : partial_backup_timeout: Duration,
175 : /// Disable task to push messages to broker every second. Supposed to
176 : /// be used in tests.
177 : #[arg(long)]
178 0 : disable_periodic_broker_push: bool,
179 : /// Enable automatic switching to offloaded state.
180 : #[arg(long)]
181 0 : enable_offload: bool,
182 : /// Delete local WAL files after offloading. When disabled, they will be left on disk.
183 : #[arg(long)]
184 0 : delete_offloaded_wal: bool,
185 : /// Pending updates to control file will be automatically saved after this interval.
186 : #[arg(long, value_parser = humantime::parse_duration, default_value = DEFAULT_CONTROL_FILE_SAVE_INTERVAL)]
187 0 : control_file_save_interval: Duration,
188 : /// Number of allowed concurrent uploads of partial segments to remote storage.
189 : #[arg(long, default_value = DEFAULT_PARTIAL_BACKUP_CONCURRENCY)]
190 0 : partial_backup_concurrency: usize,
191 : /// How long a timeline must be resident before it is eligible for eviction.
192 : /// Usually, timeline eviction has to wait for `partial_backup_timeout` before being eligible for eviction,
193 : /// but if a timeline is un-evicted and then _not_ written to, it would immediately flap to evicting again,
194 : /// if it weren't for `eviction_min_resident` preventing that.
195 : ///
196 : /// Also defines interval for eviction retries.
197 : #[arg(long, value_parser = humantime::parse_duration, default_value = DEFAULT_EVICTION_MIN_RESIDENT)]
198 0 : eviction_min_resident: Duration,
199 : /// Enable fanning out WAL to different shards from the same reader
200 : #[arg(long)]
201 0 : wal_reader_fanout: bool,
202 : /// Only fan out the WAL reader if the absoulte delta between the new requested position
203 : /// and the current position of the reader is smaller than this value.
204 : #[arg(long)]
205 : max_delta_for_fanout: Option<u64>,
206 : }
207 :
208 : // Like PathBufValueParser, but allows empty string.
209 0 : fn opt_pathbuf_parser(s: &str) -> Result<Utf8PathBuf, String> {
210 0 : Ok(Utf8PathBuf::from_str(s).unwrap())
211 0 : }
212 :
213 : #[tokio::main(flavor = "current_thread")]
214 0 : async fn main() -> anyhow::Result<()> {
215 0 : // We want to allow multiple occurences of the same arg (taking the last) so
216 0 : // that neon_local could generate command with defaults + overrides without
217 0 : // getting 'argument cannot be used multiple times' error. This seems to be
218 0 : // impossible with pure Derive API, so convert struct to Command, modify it,
219 0 : // parse arguments, and then fill the struct back.
220 0 : let cmd = <Args as clap::CommandFactory>::command()
221 0 : .args_override_self(true)
222 0 : .version(version());
223 0 : let mut matches = cmd.get_matches();
224 0 : let mut args = <Args as clap::FromArgMatches>::from_arg_matches_mut(&mut matches)?;
225 0 :
226 0 : // I failed to modify opt_pathbuf_parser to return Option<PathBuf> in
227 0 : // reasonable time, so turn empty string into option post factum.
228 0 : if let Some(pb) = &args.pg_auth_public_key_path {
229 0 : if pb.as_os_str().is_empty() {
230 0 : args.pg_auth_public_key_path = None;
231 0 : }
232 0 : }
233 0 : if let Some(pb) = &args.pg_tenant_only_auth_public_key_path {
234 0 : if pb.as_os_str().is_empty() {
235 0 : args.pg_tenant_only_auth_public_key_path = None;
236 0 : }
237 0 : }
238 0 : if let Some(pb) = &args.http_auth_public_key_path {
239 0 : if pb.as_os_str().is_empty() {
240 0 : args.http_auth_public_key_path = None;
241 0 : }
242 0 : }
243 0 :
244 0 : if let Some(addr) = args.dump_control_file {
245 0 : let state = control_file::FileStorage::load_control_file(addr)?;
246 0 : let json = serde_json::to_string(&state)?;
247 0 : print!("{json}");
248 0 : return Ok(());
249 0 : }
250 0 :
251 0 : // important to keep the order of:
252 0 : // 1. init logging
253 0 : // 2. tracing panic hook
254 0 : // 3. sentry
255 0 : logging::init(
256 0 : LogFormat::from_config(&args.log_format)?,
257 0 : logging::TracingErrorLayerEnablement::Disabled,
258 0 : logging::Output::Stdout,
259 0 : )?;
260 0 : logging::replace_panic_hook_with_tracing_panic_hook().forget();
261 0 : info!("version: {GIT_VERSION}");
262 0 : info!("buld_tag: {BUILD_TAG}");
263 0 :
264 0 : let args_workdir = &args.datadir;
265 0 : let workdir = args_workdir.canonicalize_utf8().with_context(|| {
266 0 : format!("Failed to get the absolute path for input workdir {args_workdir:?}")
267 0 : })?;
268 0 :
269 0 : // Change into the data directory.
270 0 : std::env::set_current_dir(&workdir)?;
271 0 :
272 0 : // Prevent running multiple safekeepers on the same directory
273 0 : let lock_file_path = workdir.join(PID_FILE_NAME);
274 0 : let lock_file =
275 0 : pid_file::claim_for_current_process(&lock_file_path).context("claim pid file")?;
276 0 : info!("claimed pid file at {lock_file_path:?}");
277 0 : // ensure that the lock file is held even if the main thread of the process is panics
278 0 : // we need to release the lock file only when the current process is gone
279 0 : std::mem::forget(lock_file);
280 0 :
281 0 : // Set or read our ID.
282 0 : let id = set_id(&workdir, args.id.map(NodeId))?;
283 0 : if args.init {
284 0 : return Ok(());
285 0 : }
286 0 :
287 0 : let pg_auth = match args.pg_auth_public_key_path.as_ref() {
288 0 : None => {
289 0 : info!("pg auth is disabled");
290 0 : None
291 0 : }
292 0 : Some(path) => {
293 0 : info!("loading pg auth JWT key from {path}");
294 0 : Some(Arc::new(
295 0 : JwtAuth::from_key_path(path).context("failed to load the auth key")?,
296 0 : ))
297 0 : }
298 0 : };
299 0 : let pg_tenant_only_auth = match args.pg_tenant_only_auth_public_key_path.as_ref() {
300 0 : None => {
301 0 : info!("pg tenant only auth is disabled");
302 0 : None
303 0 : }
304 0 : Some(path) => {
305 0 : info!("loading pg tenant only auth JWT key from {path}");
306 0 : Some(Arc::new(
307 0 : JwtAuth::from_key_path(path).context("failed to load the auth key")?,
308 0 : ))
309 0 : }
310 0 : };
311 0 : let http_auth = match args.http_auth_public_key_path.as_ref() {
312 0 : None => {
313 0 : info!("http auth is disabled");
314 0 : None
315 0 : }
316 0 : Some(path) => {
317 0 : info!("loading http auth JWT key(s) from {path}");
318 0 : let jwt_auth = JwtAuth::from_key_path(path).context("failed to load the auth key")?;
319 0 : Some(Arc::new(SwappableJwtAuth::new(jwt_auth)))
320 0 : }
321 0 : };
322 0 :
323 0 : // Load JWT auth token to connect to other safekeepers for pull_timeline.
324 0 : let sk_auth_token = match var("SAFEKEEPER_AUTH_TOKEN") {
325 0 : Ok(v) => {
326 0 : info!("loaded JWT token for authentication with safekeepers");
327 0 : Some(SecretString::from(v))
328 0 : }
329 0 : Err(VarError::NotPresent) => {
330 0 : info!("no JWT token for authentication with safekeepers detected");
331 0 : None
332 0 : }
333 0 : Err(_) => {
334 0 : warn!("JWT token for authentication with safekeepers is not unicode");
335 0 : None
336 0 : }
337 0 : };
338 0 :
339 0 : let conf = Arc::new(SafeKeeperConf {
340 0 : workdir,
341 0 : my_id: id,
342 0 : listen_pg_addr: args.listen_pg,
343 0 : listen_pg_addr_tenant_only: args.listen_pg_tenant_only,
344 0 : listen_http_addr: args.listen_http,
345 0 : advertise_pg_addr: args.advertise_pg,
346 0 : availability_zone: args.availability_zone,
347 0 : no_sync: args.no_sync,
348 0 : broker_endpoint: args.broker_endpoint,
349 0 : broker_keepalive_interval: args.broker_keepalive_interval,
350 0 : heartbeat_timeout: args.heartbeat_timeout,
351 0 : peer_recovery_enabled: args.peer_recovery,
352 0 : remote_storage: args.remote_storage,
353 0 : max_offloader_lag_bytes: args.max_offloader_lag,
354 0 : wal_backup_enabled: !args.disable_wal_backup,
355 0 : backup_parallel_jobs: args.wal_backup_parallel_jobs,
356 0 : pg_auth,
357 0 : pg_tenant_only_auth,
358 0 : http_auth,
359 0 : sk_auth_token,
360 0 : current_thread_runtime: args.current_thread_runtime,
361 0 : walsenders_keep_horizon: args.walsenders_keep_horizon,
362 0 : partial_backup_timeout: args.partial_backup_timeout,
363 0 : disable_periodic_broker_push: args.disable_periodic_broker_push,
364 0 : enable_offload: args.enable_offload,
365 0 : delete_offloaded_wal: args.delete_offloaded_wal,
366 0 : control_file_save_interval: args.control_file_save_interval,
367 0 : partial_backup_concurrency: args.partial_backup_concurrency,
368 0 : eviction_min_resident: args.eviction_min_resident,
369 0 : wal_reader_fanout: args.wal_reader_fanout,
370 0 : max_delta_for_fanout: args.max_delta_for_fanout,
371 0 : });
372 0 :
373 0 : // initialize sentry if SENTRY_DSN is provided
374 0 : let _sentry_guard = init_sentry(
375 0 : Some(GIT_VERSION.into()),
376 0 : &[("node_id", &conf.my_id.to_string())],
377 0 : );
378 0 : start_safekeeper(conf).await
379 0 : }
380 :
381 : /// Result of joining any of main tasks: upper error means task failed to
382 : /// complete, e.g. panicked, inner is error produced by task itself.
383 : type JoinTaskRes = Result<anyhow::Result<()>, JoinError>;
384 :
385 0 : async fn start_safekeeper(conf: Arc<SafeKeeperConf>) -> Result<()> {
386 0 : // fsync the datadir to make sure we have a consistent state on disk.
387 0 : if !conf.no_sync {
388 0 : let dfd = File::open(&conf.workdir).context("open datadir for syncfs")?;
389 0 : let started = Instant::now();
390 0 : utils::crashsafe::syncfs(dfd)?;
391 0 : let elapsed = started.elapsed();
392 0 : info!(
393 0 : elapsed_ms = elapsed.as_millis(),
394 0 : "syncfs data directory done"
395 : );
396 0 : }
397 :
398 0 : info!("starting safekeeper WAL service on {}", conf.listen_pg_addr);
399 0 : let pg_listener = tcp_listener::bind(conf.listen_pg_addr.clone()).map_err(|e| {
400 0 : error!("failed to bind to address {}: {}", conf.listen_pg_addr, e);
401 0 : e
402 0 : })?;
403 :
404 0 : let pg_listener_tenant_only =
405 0 : if let Some(listen_pg_addr_tenant_only) = &conf.listen_pg_addr_tenant_only {
406 0 : info!(
407 0 : "starting safekeeper tenant scoped WAL service on {}",
408 : listen_pg_addr_tenant_only
409 : );
410 0 : let listener = tcp_listener::bind(listen_pg_addr_tenant_only.clone()).map_err(|e| {
411 0 : error!(
412 0 : "failed to bind to address {}: {}",
413 : listen_pg_addr_tenant_only, e
414 : );
415 0 : e
416 0 : })?;
417 0 : Some(listener)
418 : } else {
419 0 : None
420 : };
421 :
422 0 : info!(
423 0 : "starting safekeeper HTTP service on {}",
424 0 : conf.listen_http_addr
425 : );
426 0 : let http_listener = tcp_listener::bind(conf.listen_http_addr.clone()).map_err(|e| {
427 0 : error!("failed to bind to address {}: {}", conf.listen_http_addr, e);
428 0 : e
429 0 : })?;
430 :
431 0 : let global_timelines = Arc::new(GlobalTimelines::new(conf.clone()));
432 0 :
433 0 : // Register metrics collector for active timelines. It's important to do this
434 0 : // after daemonizing, otherwise process collector will be upset.
435 0 : let timeline_collector = safekeeper::metrics::TimelineCollector::new(global_timelines.clone());
436 0 : metrics::register_internal(Box::new(timeline_collector))?;
437 :
438 0 : wal_backup::init_remote_storage(&conf).await;
439 :
440 : // Keep handles to main tasks to die if any of them disappears.
441 0 : let mut tasks_handles: FuturesUnordered<BoxFuture<(String, JoinTaskRes)>> =
442 0 : FuturesUnordered::new();
443 0 :
444 0 : // Start wal backup launcher before loading timelines as we'll notify it
445 0 : // through the channel about timelines which need offloading, not draining
446 0 : // the channel would cause deadlock.
447 0 : let current_thread_rt = conf
448 0 : .current_thread_runtime
449 0 : .then(|| Handle::try_current().expect("no runtime in main"));
450 0 :
451 0 : // Load all timelines from disk to memory.
452 0 : global_timelines.init().await?;
453 :
454 : // Run everything in current thread rt, if asked.
455 0 : if conf.current_thread_runtime {
456 0 : info!("running in current thread runtime");
457 0 : }
458 :
459 0 : let wal_service_handle = current_thread_rt
460 0 : .as_ref()
461 0 : .unwrap_or_else(|| WAL_SERVICE_RUNTIME.handle())
462 0 : .spawn(wal_service::task_main(
463 0 : conf.clone(),
464 0 : pg_listener,
465 0 : Scope::SafekeeperData,
466 0 : global_timelines.clone(),
467 0 : ))
468 0 : // wrap with task name for error reporting
469 0 : .map(|res| ("WAL service main".to_owned(), res));
470 0 : tasks_handles.push(Box::pin(wal_service_handle));
471 0 :
472 0 : let global_timelines_ = global_timelines.clone();
473 0 : let timeline_housekeeping_handle = current_thread_rt
474 0 : .as_ref()
475 0 : .unwrap_or_else(|| WAL_SERVICE_RUNTIME.handle())
476 0 : .spawn(async move {
477 : const TOMBSTONE_TTL: Duration = Duration::from_secs(3600 * 24);
478 : loop {
479 0 : tokio::time::sleep(TOMBSTONE_TTL).await;
480 0 : global_timelines_.housekeeping(&TOMBSTONE_TTL);
481 : }
482 0 : })
483 0 : .map(|res| ("Timeline map housekeeping".to_owned(), res));
484 0 : tasks_handles.push(Box::pin(timeline_housekeeping_handle));
485 :
486 0 : if let Some(pg_listener_tenant_only) = pg_listener_tenant_only {
487 0 : let wal_service_handle = current_thread_rt
488 0 : .as_ref()
489 0 : .unwrap_or_else(|| WAL_SERVICE_RUNTIME.handle())
490 0 : .spawn(wal_service::task_main(
491 0 : conf.clone(),
492 0 : pg_listener_tenant_only,
493 0 : Scope::Tenant,
494 0 : global_timelines.clone(),
495 0 : ))
496 0 : // wrap with task name for error reporting
497 0 : .map(|res| ("WAL service tenant only main".to_owned(), res));
498 0 : tasks_handles.push(Box::pin(wal_service_handle));
499 0 : }
500 :
501 0 : let http_handle = current_thread_rt
502 0 : .as_ref()
503 0 : .unwrap_or_else(|| HTTP_RUNTIME.handle())
504 0 : .spawn(http::task_main(
505 0 : conf.clone(),
506 0 : http_listener,
507 0 : global_timelines.clone(),
508 0 : ))
509 0 : .map(|res| ("HTTP service main".to_owned(), res));
510 0 : tasks_handles.push(Box::pin(http_handle));
511 :
512 0 : let broker_task_handle = current_thread_rt
513 0 : .as_ref()
514 0 : .unwrap_or_else(|| BROKER_RUNTIME.handle())
515 0 : .spawn(
516 0 : broker::task_main(conf.clone(), global_timelines.clone())
517 0 : .instrument(info_span!("broker")),
518 : )
519 0 : .map(|res| ("broker main".to_owned(), res));
520 0 : tasks_handles.push(Box::pin(broker_task_handle));
521 0 :
522 0 : set_build_info_metric(GIT_VERSION, BUILD_TAG);
523 :
524 : // TODO: update tokio-stream, convert to real async Stream with
525 : // SignalStream, map it to obtain missing signal name, combine streams into
526 : // single stream we can easily sit on.
527 0 : let mut sigquit_stream = signal(SignalKind::quit())?;
528 0 : let mut sigint_stream = signal(SignalKind::interrupt())?;
529 0 : let mut sigterm_stream = signal(SignalKind::terminate())?;
530 :
531 : // Notify systemd that we are ready. This is important as currently loading
532 : // timelines takes significant time (~30s in busy regions).
533 0 : if let Err(e) = sd_notify::notify(true, &[NotifyState::Ready]) {
534 0 : warn!("systemd notify failed: {:?}", e);
535 0 : }
536 :
537 0 : tokio::select! {
538 0 : Some((task_name, res)) = tasks_handles.next()=> {
539 0 : error!("{} task failed: {:?}, exiting", task_name, res);
540 0 : std::process::exit(1);
541 : }
542 : // On any shutdown signal, log receival and exit. Additionally, handling
543 : // SIGQUIT prevents coredump.
544 0 : _ = sigquit_stream.recv() => info!("received SIGQUIT, terminating"),
545 0 : _ = sigint_stream.recv() => info!("received SIGINT, terminating"),
546 0 : _ = sigterm_stream.recv() => info!("received SIGTERM, terminating")
547 :
548 : };
549 0 : std::process::exit(0);
550 0 : }
551 :
552 : /// Determine safekeeper id.
553 0 : fn set_id(workdir: &Utf8Path, given_id: Option<NodeId>) -> Result<NodeId> {
554 0 : let id_file_path = workdir.join(ID_FILE_NAME);
555 0 :
556 0 : let my_id: NodeId;
557 0 : // If file with ID exists, read it in; otherwise set one passed.
558 0 : match fs::read(&id_file_path) {
559 0 : Ok(id_serialized) => {
560 0 : my_id = NodeId(
561 0 : std::str::from_utf8(&id_serialized)
562 0 : .context("failed to parse safekeeper id")?
563 0 : .parse()
564 0 : .context("failed to parse safekeeper id")?,
565 : );
566 0 : if let Some(given_id) = given_id {
567 0 : if given_id != my_id {
568 0 : bail!(
569 0 : "safekeeper already initialized with id {}, can't set {}",
570 0 : my_id,
571 0 : given_id
572 0 : );
573 0 : }
574 0 : }
575 0 : info!("safekeeper ID {}", my_id);
576 : }
577 0 : Err(error) => match error.kind() {
578 : ErrorKind::NotFound => {
579 0 : my_id = if let Some(given_id) = given_id {
580 0 : given_id
581 : } else {
582 0 : bail!("safekeeper id is not specified");
583 : };
584 0 : let mut f = File::create(&id_file_path)
585 0 : .with_context(|| format!("Failed to create id file at {id_file_path:?}"))?;
586 0 : f.write_all(my_id.to_string().as_bytes())?;
587 0 : f.sync_all()?;
588 0 : info!("initialized safekeeper id {}", my_id);
589 : }
590 : _ => {
591 0 : return Err(error.into());
592 : }
593 : },
594 : }
595 0 : Ok(my_id)
596 0 : }
597 :
598 0 : fn parse_remote_storage(storage_conf: &str) -> anyhow::Result<RemoteStorageConfig> {
599 0 : RemoteStorageConfig::from_toml(&storage_conf.parse()?)
600 0 : }
601 :
602 : #[test]
603 1 : fn verify_cli() {
604 : use clap::CommandFactory;
605 1 : Args::command().debug_assert()
606 1 : }
|