Line data Source code
1 : //
2 : // Main entry point for the safekeeper executable
3 : //
4 : use std::fs::{self, File};
5 : use std::io::{ErrorKind, Write};
6 : use std::str::FromStr;
7 : use std::sync::Arc;
8 : use std::time::{Duration, Instant};
9 :
10 : use anyhow::{Context, Result, bail};
11 : use camino::{Utf8Path, Utf8PathBuf};
12 : use clap::{ArgAction, Parser};
13 : use futures::future::BoxFuture;
14 : use futures::stream::FuturesUnordered;
15 : use futures::{FutureExt, StreamExt};
16 : use http_utils::tls_certs::ReloadingCertificateResolver;
17 : use metrics::set_build_info_metric;
18 : use remote_storage::RemoteStorageConfig;
19 : use safekeeper::defaults::{
20 : DEFAULT_CONTROL_FILE_SAVE_INTERVAL, DEFAULT_EVICTION_MIN_RESIDENT, DEFAULT_HEARTBEAT_TIMEOUT,
21 : DEFAULT_HTTP_LISTEN_ADDR, DEFAULT_MAX_OFFLOADER_LAG_BYTES,
22 : DEFAULT_MAX_REELECT_OFFLOADER_LAG_BYTES, DEFAULT_MAX_TIMELINE_DISK_USAGE_BYTES,
23 : DEFAULT_PARTIAL_BACKUP_CONCURRENCY, DEFAULT_PARTIAL_BACKUP_TIMEOUT, DEFAULT_PG_LISTEN_ADDR,
24 : DEFAULT_SSL_CERT_FILE, DEFAULT_SSL_CERT_RELOAD_PERIOD, DEFAULT_SSL_KEY_FILE,
25 : };
26 : use safekeeper::hadron;
27 : use safekeeper::wal_backup::WalBackup;
28 : use safekeeper::{
29 : BACKGROUND_RUNTIME, BROKER_RUNTIME, GlobalTimelines, HTTP_RUNTIME, SafeKeeperConf,
30 : WAL_SERVICE_RUNTIME, broker, control_file, http, wal_service,
31 : };
32 : use sd_notify::NotifyState;
33 : use storage_broker::{DEFAULT_ENDPOINT, Uri};
34 : use tokio::runtime::Handle;
35 : use tokio::signal::unix::{SignalKind, signal};
36 : use tokio::task::JoinError;
37 : use tracing::*;
38 : use utils::auth::{JwtAuth, Scope, SwappableJwtAuth};
39 : use utils::id::NodeId;
40 : use utils::logging::{self, LogFormat, SecretString};
41 : use utils::metrics_collector::{METRICS_COLLECTION_INTERVAL, METRICS_COLLECTOR};
42 : use utils::sentry_init::init_sentry;
43 : use utils::{pid_file, project_build_tag, project_git_version, tcp_listener};
44 :
45 : #[global_allocator]
46 : static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
47 :
48 : /// Configure jemalloc to profile heap allocations by sampling stack traces every 2 MB (1 << 21).
49 : /// This adds roughly 3% overhead for allocations on average, which is acceptable considering
50 : /// performance-sensitive code will avoid allocations as far as possible anyway.
51 : #[allow(non_upper_case_globals)]
52 : #[unsafe(export_name = "malloc_conf")]
53 : pub static malloc_conf: &[u8] = b"prof:true,prof_active:true,lg_prof_sample:21\0";
54 :
55 : const PID_FILE_NAME: &str = "safekeeper.pid";
56 : const ID_FILE_NAME: &str = "safekeeper.id";
57 :
58 : project_git_version!(GIT_VERSION);
59 : project_build_tag!(BUILD_TAG);
60 :
61 : const FEATURES: &[&str] = &[
62 : #[cfg(feature = "testing")]
63 : "testing",
64 : ];
65 :
66 0 : fn version() -> String {
67 0 : format!(
68 0 : "{GIT_VERSION} failpoints: {}, features: {:?}",
69 0 : fail::has_failpoints(),
70 : FEATURES,
71 : )
72 0 : }
73 :
74 : const ABOUT: &str = r#"
75 : A fleet of safekeepers is responsible for reliably storing WAL received from
76 : compute, passing it through consensus (mitigating potential computes brain
77 : split), and serving the hardened part further downstream to pageserver(s).
78 : "#;
79 :
80 : #[derive(Parser)]
81 : #[command(name = "Neon safekeeper", version = GIT_VERSION, about = ABOUT, long_about = None)]
82 : struct Args {
83 : /// Path to the safekeeper data directory.
84 : #[arg(short = 'D', long, default_value = "./")]
85 : datadir: Utf8PathBuf,
86 : /// Safekeeper node id.
87 : #[arg(long)]
88 : id: Option<u64>,
89 : /// Initialize safekeeper with given id and exit.
90 : #[arg(long)]
91 : init: bool,
92 : /// Listen endpoint for receiving/sending WAL in the form host:port.
93 : #[arg(short, long, default_value = DEFAULT_PG_LISTEN_ADDR)]
94 : listen_pg: String,
95 : /// Listen endpoint for receiving/sending WAL in the form host:port allowing
96 : /// only tenant scoped auth tokens. Pointless if auth is disabled.
97 : #[arg(long, default_value = None, verbatim_doc_comment)]
98 : listen_pg_tenant_only: Option<String>,
99 : /// Listen http endpoint for management and metrics in the form host:port.
100 : #[arg(long, default_value = DEFAULT_HTTP_LISTEN_ADDR)]
101 : listen_http: String,
102 : /// Listen https endpoint for management and metrics in the form host:port.
103 : #[arg(long, default_value = None)]
104 : listen_https: Option<String>,
105 : /// Advertised endpoint for receiving/sending WAL in the form host:port. If not
106 : /// specified, listen_pg is used to advertise instead.
107 : #[arg(long, default_value = None)]
108 : advertise_pg: Option<String>,
109 : /// Availability zone of the safekeeper.
110 : #[arg(long)]
111 : availability_zone: Option<String>,
112 : /// Do not wait for changes to be written safely to disk. Unsafe.
113 : #[arg(short, long)]
114 : no_sync: bool,
115 : /// Dump control file at path specified by this argument and exit.
116 : #[arg(long)]
117 : dump_control_file: Option<Utf8PathBuf>,
118 : /// Broker endpoint for storage nodes coordination in the form
119 : /// http[s]://host:port. In case of https schema TLS is connection is
120 : /// established; plaintext otherwise.
121 : #[arg(long, default_value = DEFAULT_ENDPOINT, verbatim_doc_comment)]
122 : broker_endpoint: Uri,
123 : /// Broker keepalive interval.
124 : #[arg(long, value_parser= humantime::parse_duration, default_value = storage_broker::DEFAULT_KEEPALIVE_INTERVAL)]
125 : broker_keepalive_interval: Duration,
126 : /// Peer safekeeper is considered dead after not receiving heartbeats from
127 : /// it during this period passed as a human readable duration.
128 : #[arg(long, value_parser= humantime::parse_duration, default_value = DEFAULT_HEARTBEAT_TIMEOUT, verbatim_doc_comment)]
129 : heartbeat_timeout: Duration,
130 : /// Enable/disable peer recovery.
131 : #[arg(long, default_value = "false", action=ArgAction::Set)]
132 : peer_recovery: bool,
133 : /// Remote storage configuration for WAL backup (offloading to s3) as TOML
134 : /// inline table, e.g.
135 : /// {max_concurrent_syncs = 17, max_sync_errors = 13, bucket_name = "<BUCKETNAME>", bucket_region = "<REGION>", concurrency_limit = 119}
136 : /// Safekeeper offloads WAL to
137 : /// [prefix_in_bucket/]<tenant_id>/<timeline_id>/<segment_file>, mirroring
138 : /// structure on the file system.
139 : #[arg(long, value_parser = parse_remote_storage, verbatim_doc_comment)]
140 : remote_storage: Option<RemoteStorageConfig>,
141 : /// Safekeeper won't be elected for WAL offloading if it is lagging for more than this value in bytes
142 : #[arg(long, default_value_t = DEFAULT_MAX_OFFLOADER_LAG_BYTES)]
143 : max_offloader_lag: u64,
144 : /* BEGIN_HADRON */
145 : /// Safekeeper will re-elect a new offloader if the current backup lagging for more than this value in bytes
146 : #[arg(long, default_value_t = DEFAULT_MAX_REELECT_OFFLOADER_LAG_BYTES)]
147 : max_reelect_offloader_lag_bytes: u64,
148 : /// Safekeeper will stop accepting new WALs if the timeline disk usage exceeds this value in bytes.
149 : /// Setting this value to 0 disables the limit.
150 : #[arg(long, default_value_t = DEFAULT_MAX_TIMELINE_DISK_USAGE_BYTES)]
151 : max_timeline_disk_usage_bytes: u64,
152 : /* END_HADRON */
153 : /// Number of max parallel WAL segments to be offloaded to remote storage.
154 : #[arg(long, default_value = "5")]
155 : wal_backup_parallel_jobs: usize,
156 : /// Disable WAL backup to s3. When disabled, safekeeper removes WAL ignoring
157 : /// WAL backup horizon.
158 : #[arg(long)]
159 : disable_wal_backup: bool,
160 : /// If given, enables auth on incoming connections to WAL service endpoint
161 : /// (--listen-pg). Value specifies path to a .pem public key used for
162 : /// validations of JWT tokens. Empty string is allowed and means disabling
163 : /// auth.
164 : #[arg(long, verbatim_doc_comment, value_parser = opt_pathbuf_parser)]
165 : pg_auth_public_key_path: Option<Utf8PathBuf>,
166 : /// If given, enables auth on incoming connections to tenant only WAL
167 : /// service endpoint (--listen-pg-tenant-only). Value specifies path to a
168 : /// .pem public key used for validations of JWT tokens. Empty string is
169 : /// allowed and means disabling auth.
170 : #[arg(long, verbatim_doc_comment, value_parser = opt_pathbuf_parser)]
171 : pg_tenant_only_auth_public_key_path: Option<Utf8PathBuf>,
172 : /// If given, enables auth on incoming connections to http management
173 : /// service endpoint (--listen-http). Value specifies path to a .pem public
174 : /// key used for validations of JWT tokens. Empty string is allowed and
175 : /// means disabling auth.
176 : #[arg(long, verbatim_doc_comment, value_parser = opt_pathbuf_parser)]
177 : http_auth_public_key_path: Option<Utf8PathBuf>,
178 : /// Format for logging, either 'plain' or 'json'.
179 : #[arg(long, default_value = "plain")]
180 : log_format: String,
181 : /// Run everything in single threaded current thread runtime, might be
182 : /// useful for debugging.
183 : #[arg(long)]
184 : current_thread_runtime: bool,
185 : /// Keep horizon for walsenders, i.e. don't remove WAL segments that are
186 : /// still needed for existing replication connection.
187 : #[arg(long)]
188 : walsenders_keep_horizon: bool,
189 : /// Controls how long backup will wait until uploading the partial segment.
190 : #[arg(long, value_parser = humantime::parse_duration, default_value = DEFAULT_PARTIAL_BACKUP_TIMEOUT, verbatim_doc_comment)]
191 : partial_backup_timeout: Duration,
192 : /// Disable task to push messages to broker every second. Supposed to
193 : /// be used in tests.
194 : #[arg(long)]
195 : disable_periodic_broker_push: bool,
196 : /// Enable automatic switching to offloaded state.
197 : #[arg(long)]
198 : enable_offload: bool,
199 : /// Delete local WAL files after offloading. When disabled, they will be left on disk.
200 : #[arg(long)]
201 : delete_offloaded_wal: bool,
202 : /// Pending updates to control file will be automatically saved after this interval.
203 : #[arg(long, value_parser = humantime::parse_duration, default_value = DEFAULT_CONTROL_FILE_SAVE_INTERVAL)]
204 : control_file_save_interval: Duration,
205 : /// Number of allowed concurrent uploads of partial segments to remote storage.
206 : #[arg(long, default_value = DEFAULT_PARTIAL_BACKUP_CONCURRENCY)]
207 : partial_backup_concurrency: usize,
208 : /// How long a timeline must be resident before it is eligible for eviction.
209 : /// Usually, timeline eviction has to wait for `partial_backup_timeout` before being eligible for eviction,
210 : /// but if a timeline is un-evicted and then _not_ written to, it would immediately flap to evicting again,
211 : /// if it weren't for `eviction_min_resident` preventing that.
212 : ///
213 : /// Also defines interval for eviction retries.
214 : #[arg(long, value_parser = humantime::parse_duration, default_value = DEFAULT_EVICTION_MIN_RESIDENT)]
215 : eviction_min_resident: Duration,
216 : /// Enable fanning out WAL to different shards from the same reader
217 : #[arg(long)]
218 : wal_reader_fanout: bool,
219 : /// Only fan out the WAL reader if the absoulte delta between the new requested position
220 : /// and the current position of the reader is smaller than this value.
221 : #[arg(long)]
222 : max_delta_for_fanout: Option<u64>,
223 : /// Path to a file with certificate's private key for https API.
224 : #[arg(long, default_value = DEFAULT_SSL_KEY_FILE)]
225 : ssl_key_file: Utf8PathBuf,
226 : /// Path to a file with a X509 certificate for https API.
227 : #[arg(long, default_value = DEFAULT_SSL_CERT_FILE)]
228 : ssl_cert_file: Utf8PathBuf,
229 : /// Period to reload certificate and private key from files.
230 : #[arg(long, value_parser = humantime::parse_duration, default_value = DEFAULT_SSL_CERT_RELOAD_PERIOD)]
231 : ssl_cert_reload_period: Duration,
232 : /// Trusted root CA certificates to use in https APIs.
233 : #[arg(long)]
234 : ssl_ca_file: Option<Utf8PathBuf>,
235 : /// Flag to use https for requests to peer's safekeeper API.
236 : #[arg(long)]
237 : use_https_safekeeper_api: bool,
238 : /// Path to the JWT auth token used to authenticate with other safekeepers.
239 : #[arg(long)]
240 : auth_token_path: Option<Utf8PathBuf>,
241 :
242 : /// Enable TLS in WAL service API.
243 : /// Does not force TLS: the client negotiates TLS usage during the handshake.
244 : /// Uses key and certificate from ssl_key_file/ssl_cert_file.
245 : #[arg(long)]
246 : enable_tls_wal_service_api: bool,
247 :
248 : /// Controls whether to collect all metrics on each scrape or to return potentially stale
249 : /// results.
250 : #[arg(long, default_value_t = true)]
251 : force_metric_collection_on_scrape: bool,
252 :
253 : /// Run in development mode (disables security checks)
254 : #[arg(long, help = "Run in development mode (disables security checks)")]
255 : dev: bool,
256 : /* BEGIN_HADRON */
257 : #[arg(long)]
258 : enable_pull_timeline_on_startup: bool,
259 : /* END_HADRON */
260 : }
261 :
262 : // Like PathBufValueParser, but allows empty string.
263 0 : fn opt_pathbuf_parser(s: &str) -> Result<Utf8PathBuf, String> {
264 0 : Ok(Utf8PathBuf::from_str(s).unwrap())
265 0 : }
266 :
267 : #[tokio::main(flavor = "current_thread")]
268 0 : async fn main() -> anyhow::Result<()> {
269 : // We want to allow multiple occurences of the same arg (taking the last) so
270 : // that neon_local could generate command with defaults + overrides without
271 : // getting 'argument cannot be used multiple times' error. This seems to be
272 : // impossible with pure Derive API, so convert struct to Command, modify it,
273 : // parse arguments, and then fill the struct back.
274 0 : let cmd = <Args as clap::CommandFactory>::command()
275 0 : .args_override_self(true)
276 0 : .version(version());
277 0 : let mut matches = cmd.get_matches();
278 0 : let mut args = <Args as clap::FromArgMatches>::from_arg_matches_mut(&mut matches)?;
279 :
280 : // I failed to modify opt_pathbuf_parser to return Option<PathBuf> in
281 : // reasonable time, so turn empty string into option post factum.
282 0 : if let Some(pb) = &args.pg_auth_public_key_path {
283 0 : if pb.as_os_str().is_empty() {
284 0 : args.pg_auth_public_key_path = None;
285 0 : }
286 0 : }
287 0 : if let Some(pb) = &args.pg_tenant_only_auth_public_key_path {
288 0 : if pb.as_os_str().is_empty() {
289 0 : args.pg_tenant_only_auth_public_key_path = None;
290 0 : }
291 0 : }
292 0 : if let Some(pb) = &args.http_auth_public_key_path {
293 0 : if pb.as_os_str().is_empty() {
294 0 : args.http_auth_public_key_path = None;
295 0 : }
296 0 : }
297 :
298 0 : if let Some(addr) = args.dump_control_file {
299 0 : let state = control_file::FileStorage::load_control_file(addr)?;
300 0 : let json = serde_json::to_string(&state)?;
301 0 : print!("{json}");
302 0 : return Ok(());
303 0 : }
304 :
305 : // important to keep the order of:
306 : // 1. init logging
307 : // 2. tracing panic hook
308 : // 3. sentry
309 0 : logging::init(
310 0 : LogFormat::from_config(&args.log_format)?,
311 0 : logging::TracingErrorLayerEnablement::Disabled,
312 0 : logging::Output::Stdout,
313 0 : )?;
314 0 : logging::replace_panic_hook_with_tracing_panic_hook().forget();
315 0 : info!("version: {GIT_VERSION}");
316 0 : info!("buld_tag: {BUILD_TAG}");
317 :
318 0 : let args_workdir = &args.datadir;
319 0 : let workdir = args_workdir.canonicalize_utf8().with_context(|| {
320 0 : format!("Failed to get the absolute path for input workdir {args_workdir:?}")
321 0 : })?;
322 :
323 : // Change into the data directory.
324 0 : std::env::set_current_dir(&workdir)?;
325 :
326 : // Prevent running multiple safekeepers on the same directory
327 0 : let lock_file_path = workdir.join(PID_FILE_NAME);
328 0 : let lock_file =
329 0 : pid_file::claim_for_current_process(&lock_file_path).context("claim pid file")?;
330 0 : info!("claimed pid file at {lock_file_path:?}");
331 : // ensure that the lock file is held even if the main thread of the process is panics
332 : // we need to release the lock file only when the current process is gone
333 0 : std::mem::forget(lock_file);
334 :
335 : // Set or read our ID.
336 0 : let id = set_id(&workdir, args.id.map(NodeId))?;
337 0 : if args.init {
338 0 : return Ok(());
339 0 : }
340 :
341 0 : let pg_auth = match args.pg_auth_public_key_path.as_ref() {
342 : None => {
343 0 : info!("pg auth is disabled");
344 0 : None
345 : }
346 0 : Some(path) => {
347 0 : info!("loading pg auth JWT key from {path}");
348 0 : Some(Arc::new(
349 0 : JwtAuth::from_key_path(path).context("failed to load the auth key")?,
350 : ))
351 : }
352 : };
353 0 : let pg_tenant_only_auth = match args.pg_tenant_only_auth_public_key_path.as_ref() {
354 : None => {
355 0 : info!("pg tenant only auth is disabled");
356 0 : None
357 : }
358 0 : Some(path) => {
359 0 : info!("loading pg tenant only auth JWT key from {path}");
360 0 : Some(Arc::new(
361 0 : JwtAuth::from_key_path(path).context("failed to load the auth key")?,
362 : ))
363 : }
364 : };
365 0 : let http_auth = match args.http_auth_public_key_path.as_ref() {
366 : None => {
367 0 : info!("http auth is disabled");
368 0 : None
369 : }
370 0 : Some(path) => {
371 0 : info!("loading http auth JWT key(s) from {path}");
372 0 : let jwt_auth = JwtAuth::from_key_path(path).context("failed to load the auth key")?;
373 0 : Some(Arc::new(SwappableJwtAuth::new(jwt_auth)))
374 : }
375 : };
376 :
377 : // Load JWT auth token to connect to other safekeepers for pull_timeline.
378 0 : let sk_auth_token = if let Some(auth_token_path) = args.auth_token_path.as_ref() {
379 0 : info!("loading JWT token for authentication with safekeepers from {auth_token_path}");
380 0 : let auth_token = tokio::fs::read_to_string(auth_token_path).await?;
381 0 : Some(SecretString::from(auth_token.trim().to_owned()))
382 : } else {
383 0 : info!("no JWT token for authentication with safekeepers detected");
384 0 : None
385 : };
386 :
387 0 : let ssl_ca_certs = match args.ssl_ca_file.as_ref() {
388 0 : Some(ssl_ca_file) => {
389 0 : tracing::info!("Using ssl root CA file: {ssl_ca_file:?}");
390 0 : let buf = tokio::fs::read(ssl_ca_file).await?;
391 0 : pem::parse_many(&buf)?
392 0 : .into_iter()
393 0 : .filter(|pem| pem.tag() == "CERTIFICATE")
394 0 : .collect()
395 : }
396 0 : None => Vec::new(),
397 : };
398 :
399 0 : let conf = Arc::new(SafeKeeperConf {
400 0 : workdir,
401 0 : my_id: id,
402 0 : listen_pg_addr: args.listen_pg,
403 0 : listen_pg_addr_tenant_only: args.listen_pg_tenant_only,
404 0 : listen_http_addr: args.listen_http,
405 0 : listen_https_addr: args.listen_https,
406 0 : advertise_pg_addr: args.advertise_pg,
407 0 : availability_zone: args.availability_zone,
408 0 : no_sync: args.no_sync,
409 0 : broker_endpoint: args.broker_endpoint,
410 0 : broker_keepalive_interval: args.broker_keepalive_interval,
411 0 : heartbeat_timeout: args.heartbeat_timeout,
412 0 : peer_recovery_enabled: args.peer_recovery,
413 0 : remote_storage: args.remote_storage,
414 0 : max_offloader_lag_bytes: args.max_offloader_lag,
415 0 : /* BEGIN_HADRON */
416 0 : max_reelect_offloader_lag_bytes: args.max_reelect_offloader_lag_bytes,
417 0 : max_timeline_disk_usage_bytes: args.max_timeline_disk_usage_bytes,
418 0 : /* END_HADRON */
419 0 : wal_backup_enabled: !args.disable_wal_backup,
420 0 : backup_parallel_jobs: args.wal_backup_parallel_jobs,
421 0 : pg_auth,
422 0 : pg_tenant_only_auth,
423 0 : http_auth,
424 0 : sk_auth_token,
425 0 : current_thread_runtime: args.current_thread_runtime,
426 0 : walsenders_keep_horizon: args.walsenders_keep_horizon,
427 0 : partial_backup_timeout: args.partial_backup_timeout,
428 0 : disable_periodic_broker_push: args.disable_periodic_broker_push,
429 0 : enable_offload: args.enable_offload,
430 0 : delete_offloaded_wal: args.delete_offloaded_wal,
431 0 : control_file_save_interval: args.control_file_save_interval,
432 0 : partial_backup_concurrency: args.partial_backup_concurrency,
433 0 : eviction_min_resident: args.eviction_min_resident,
434 0 : wal_reader_fanout: args.wal_reader_fanout,
435 0 : max_delta_for_fanout: args.max_delta_for_fanout,
436 0 : ssl_key_file: args.ssl_key_file,
437 0 : ssl_cert_file: args.ssl_cert_file,
438 0 : ssl_cert_reload_period: args.ssl_cert_reload_period,
439 0 : ssl_ca_certs,
440 0 : use_https_safekeeper_api: args.use_https_safekeeper_api,
441 0 : enable_tls_wal_service_api: args.enable_tls_wal_service_api,
442 0 : force_metric_collection_on_scrape: args.force_metric_collection_on_scrape,
443 0 : /* BEGIN_HADRON */
444 0 : advertise_pg_addr_tenant_only: None,
445 0 : enable_pull_timeline_on_startup: args.enable_pull_timeline_on_startup,
446 0 : hcc_base_url: None,
447 0 : /* END_HADRON */
448 0 : });
449 :
450 : // initialize sentry if SENTRY_DSN is provided
451 0 : let _sentry_guard = init_sentry(
452 0 : Some(GIT_VERSION.into()),
453 0 : &[("node_id", &conf.my_id.to_string())],
454 : );
455 0 : start_safekeeper(conf).await
456 0 : }
457 :
458 : /// Result of joining any of main tasks: upper error means task failed to
459 : /// complete, e.g. panicked, inner is error produced by task itself.
460 : type JoinTaskRes = Result<anyhow::Result<()>, JoinError>;
461 :
462 0 : async fn start_safekeeper(conf: Arc<SafeKeeperConf>) -> Result<()> {
463 : // fsync the datadir to make sure we have a consistent state on disk.
464 0 : if !conf.no_sync {
465 0 : let dfd = File::open(&conf.workdir).context("open datadir for syncfs")?;
466 0 : let started = Instant::now();
467 0 : utils::crashsafe::syncfs(dfd)?;
468 0 : let elapsed = started.elapsed();
469 0 : info!(
470 0 : elapsed_ms = elapsed.as_millis(),
471 0 : "syncfs data directory done"
472 : );
473 0 : }
474 :
475 0 : info!("starting safekeeper WAL service on {}", conf.listen_pg_addr);
476 0 : let pg_listener = tcp_listener::bind(conf.listen_pg_addr.clone()).map_err(|e| {
477 0 : error!("failed to bind to address {}: {}", conf.listen_pg_addr, e);
478 0 : e
479 0 : })?;
480 :
481 0 : let pg_listener_tenant_only =
482 0 : if let Some(listen_pg_addr_tenant_only) = &conf.listen_pg_addr_tenant_only {
483 0 : info!(
484 0 : "starting safekeeper tenant scoped WAL service on {}",
485 : listen_pg_addr_tenant_only
486 : );
487 0 : let listener = tcp_listener::bind(listen_pg_addr_tenant_only.clone()).map_err(|e| {
488 0 : error!(
489 0 : "failed to bind to address {}: {}",
490 : listen_pg_addr_tenant_only, e
491 : );
492 0 : e
493 0 : })?;
494 0 : Some(listener)
495 : } else {
496 0 : None
497 : };
498 :
499 0 : info!(
500 0 : "starting safekeeper HTTP service on {}",
501 0 : conf.listen_http_addr
502 : );
503 0 : let http_listener = tcp_listener::bind(conf.listen_http_addr.clone()).map_err(|e| {
504 0 : error!("failed to bind to address {}: {}", conf.listen_http_addr, e);
505 0 : e
506 0 : })?;
507 :
508 0 : let https_listener = match conf.listen_https_addr.as_ref() {
509 0 : Some(listen_https_addr) => {
510 0 : info!("starting safekeeper HTTPS service on {}", listen_https_addr);
511 0 : Some(tcp_listener::bind(listen_https_addr).map_err(|e| {
512 0 : error!("failed to bind to address {}: {}", listen_https_addr, e);
513 0 : e
514 0 : })?)
515 : }
516 0 : None => None,
517 : };
518 :
519 0 : let wal_backup = Arc::new(WalBackup::new(&conf).await?);
520 :
521 0 : let global_timelines = Arc::new(GlobalTimelines::new(conf.clone(), wal_backup.clone()));
522 :
523 : // Register metrics collector for active timelines. It's important to do this
524 : // after daemonizing, otherwise process collector will be upset.
525 0 : let timeline_collector = safekeeper::metrics::TimelineCollector::new(global_timelines.clone());
526 0 : metrics::register_internal(Box::new(timeline_collector))?;
527 :
528 : // Keep handles to main tasks to die if any of them disappears.
529 0 : let mut tasks_handles: FuturesUnordered<BoxFuture<(String, JoinTaskRes)>> =
530 0 : FuturesUnordered::new();
531 :
532 : // Start wal backup launcher before loading timelines as we'll notify it
533 : // through the channel about timelines which need offloading, not draining
534 : // the channel would cause deadlock.
535 0 : let current_thread_rt = conf
536 0 : .current_thread_runtime
537 0 : .then(|| Handle::try_current().expect("no runtime in main"));
538 :
539 : // Load all timelines from disk to memory.
540 0 : global_timelines.init().await?;
541 :
542 : /* BEGIN_HADRON */
543 0 : if conf.enable_pull_timeline_on_startup && global_timelines.timelines_count() == 0 {
544 0 : match hadron::hcc_pull_timelines(&conf, global_timelines.clone()).await {
545 : Ok(_) => {
546 0 : info!("Successfully pulled all timelines from peer safekeepers");
547 : }
548 0 : Err(e) => {
549 0 : error!("Failed to pull timelines from peer safekeepers: {:?}", e);
550 0 : return Err(e);
551 : }
552 : }
553 0 : }
554 : /* END_HADRON */
555 :
556 : // Run everything in current thread rt, if asked.
557 0 : if conf.current_thread_runtime {
558 0 : info!("running in current thread runtime");
559 0 : }
560 :
561 0 : let tls_server_config = if conf.listen_https_addr.is_some() || conf.enable_tls_wal_service_api {
562 0 : let ssl_key_file = conf.ssl_key_file.clone();
563 0 : let ssl_cert_file = conf.ssl_cert_file.clone();
564 0 : let ssl_cert_reload_period = conf.ssl_cert_reload_period;
565 :
566 : // Create resolver in BACKGROUND_RUNTIME, so the background certificate reloading
567 : // task is run in this runtime.
568 0 : let cert_resolver = current_thread_rt
569 0 : .as_ref()
570 0 : .unwrap_or_else(|| BACKGROUND_RUNTIME.handle())
571 0 : .spawn(async move {
572 0 : ReloadingCertificateResolver::new(
573 0 : "main",
574 0 : &ssl_key_file,
575 0 : &ssl_cert_file,
576 0 : ssl_cert_reload_period,
577 0 : )
578 0 : .await
579 0 : })
580 0 : .await??;
581 :
582 0 : let config = rustls::ServerConfig::builder()
583 0 : .with_no_client_auth()
584 0 : .with_cert_resolver(cert_resolver);
585 :
586 0 : Some(Arc::new(config))
587 : } else {
588 0 : None
589 : };
590 :
591 0 : let wal_service_handle = current_thread_rt
592 0 : .as_ref()
593 0 : .unwrap_or_else(|| WAL_SERVICE_RUNTIME.handle())
594 0 : .spawn(wal_service::task_main(
595 0 : conf.clone(),
596 0 : pg_listener,
597 0 : Scope::SafekeeperData,
598 0 : conf.enable_tls_wal_service_api
599 0 : .then(|| tls_server_config.clone())
600 0 : .flatten(),
601 0 : global_timelines.clone(),
602 : ))
603 : // wrap with task name for error reporting
604 0 : .map(|res| ("WAL service main".to_owned(), res));
605 0 : tasks_handles.push(Box::pin(wal_service_handle));
606 :
607 0 : let global_timelines_ = global_timelines.clone();
608 0 : let timeline_housekeeping_handle = current_thread_rt
609 0 : .as_ref()
610 0 : .unwrap_or_else(|| WAL_SERVICE_RUNTIME.handle())
611 0 : .spawn(async move {
612 : const TOMBSTONE_TTL: Duration = Duration::from_secs(3600 * 24);
613 : loop {
614 0 : tokio::time::sleep(TOMBSTONE_TTL).await;
615 0 : global_timelines_.housekeeping(&TOMBSTONE_TTL);
616 : }
617 : })
618 0 : .map(|res| ("Timeline map housekeeping".to_owned(), res));
619 0 : tasks_handles.push(Box::pin(timeline_housekeeping_handle));
620 :
621 0 : if let Some(pg_listener_tenant_only) = pg_listener_tenant_only {
622 0 : let wal_service_handle = current_thread_rt
623 0 : .as_ref()
624 0 : .unwrap_or_else(|| WAL_SERVICE_RUNTIME.handle())
625 0 : .spawn(wal_service::task_main(
626 0 : conf.clone(),
627 0 : pg_listener_tenant_only,
628 0 : Scope::Tenant,
629 0 : conf.enable_tls_wal_service_api
630 0 : .then(|| tls_server_config.clone())
631 0 : .flatten(),
632 0 : global_timelines.clone(),
633 : ))
634 : // wrap with task name for error reporting
635 0 : .map(|res| ("WAL service tenant only main".to_owned(), res));
636 0 : tasks_handles.push(Box::pin(wal_service_handle));
637 0 : }
638 :
639 0 : let http_handle = current_thread_rt
640 0 : .as_ref()
641 0 : .unwrap_or_else(|| HTTP_RUNTIME.handle())
642 0 : .spawn(http::task_main_http(
643 0 : conf.clone(),
644 0 : http_listener,
645 0 : global_timelines.clone(),
646 : ))
647 0 : .map(|res| ("HTTP service main".to_owned(), res));
648 0 : tasks_handles.push(Box::pin(http_handle));
649 :
650 0 : if let Some(https_listener) = https_listener {
651 0 : let https_handle = current_thread_rt
652 0 : .as_ref()
653 0 : .unwrap_or_else(|| HTTP_RUNTIME.handle())
654 0 : .spawn(http::task_main_https(
655 0 : conf.clone(),
656 0 : https_listener,
657 0 : tls_server_config.expect("tls_server_config is set earlier if https is enabled"),
658 0 : global_timelines.clone(),
659 : ))
660 0 : .map(|res| ("HTTPS service main".to_owned(), res));
661 0 : tasks_handles.push(Box::pin(https_handle));
662 0 : }
663 :
664 0 : let broker_task_handle = current_thread_rt
665 0 : .as_ref()
666 0 : .unwrap_or_else(|| BROKER_RUNTIME.handle())
667 0 : .spawn(
668 0 : broker::task_main(conf.clone(), global_timelines.clone())
669 0 : .instrument(info_span!("broker")),
670 : )
671 0 : .map(|res| ("broker main".to_owned(), res));
672 0 : tasks_handles.push(Box::pin(broker_task_handle));
673 :
674 : /* BEGIN_HADRON */
675 0 : if conf.force_metric_collection_on_scrape {
676 0 : let metrics_handle = current_thread_rt
677 0 : .as_ref()
678 0 : .unwrap_or_else(|| BACKGROUND_RUNTIME.handle())
679 0 : .spawn(async move {
680 0 : let mut interval: tokio::time::Interval =
681 0 : tokio::time::interval(METRICS_COLLECTION_INTERVAL);
682 : loop {
683 0 : interval.tick().await;
684 0 : tokio::task::spawn_blocking(|| {
685 0 : METRICS_COLLECTOR.run_once(true);
686 0 : });
687 : }
688 : })
689 0 : .map(|res| ("broker main".to_owned(), res));
690 0 : tasks_handles.push(Box::pin(metrics_handle));
691 0 : }
692 : /* END_HADRON */
693 :
694 0 : set_build_info_metric(GIT_VERSION, BUILD_TAG);
695 :
696 : // TODO: update tokio-stream, convert to real async Stream with
697 : // SignalStream, map it to obtain missing signal name, combine streams into
698 : // single stream we can easily sit on.
699 0 : let mut sigquit_stream = signal(SignalKind::quit())?;
700 0 : let mut sigint_stream = signal(SignalKind::interrupt())?;
701 0 : let mut sigterm_stream = signal(SignalKind::terminate())?;
702 :
703 : // Notify systemd that we are ready. This is important as currently loading
704 : // timelines takes significant time (~30s in busy regions).
705 0 : if let Err(e) = sd_notify::notify(true, &[NotifyState::Ready]) {
706 0 : warn!("systemd notify failed: {:?}", e);
707 0 : }
708 :
709 0 : tokio::select! {
710 0 : Some((task_name, res)) = tasks_handles.next()=> {
711 0 : error!("{} task failed: {:?}, exiting", task_name, res);
712 0 : std::process::exit(1);
713 : }
714 : // On any shutdown signal, log receival and exit. Additionally, handling
715 : // SIGQUIT prevents coredump.
716 0 : _ = sigquit_stream.recv() => info!("received SIGQUIT, terminating"),
717 0 : _ = sigint_stream.recv() => info!("received SIGINT, terminating"),
718 0 : _ = sigterm_stream.recv() => info!("received SIGTERM, terminating")
719 :
720 : };
721 0 : std::process::exit(0);
722 0 : }
723 :
724 : /// Determine safekeeper id.
725 0 : fn set_id(workdir: &Utf8Path, given_id: Option<NodeId>) -> Result<NodeId> {
726 0 : let id_file_path = workdir.join(ID_FILE_NAME);
727 :
728 : let my_id: NodeId;
729 : // If file with ID exists, read it in; otherwise set one passed.
730 0 : match fs::read(&id_file_path) {
731 0 : Ok(id_serialized) => {
732 : my_id = NodeId(
733 0 : std::str::from_utf8(&id_serialized)
734 0 : .context("failed to parse safekeeper id")?
735 0 : .parse()
736 0 : .context("failed to parse safekeeper id")?,
737 : );
738 0 : if let Some(given_id) = given_id {
739 0 : if given_id != my_id {
740 0 : bail!(
741 0 : "safekeeper already initialized with id {}, can't set {}",
742 : my_id,
743 : given_id
744 : );
745 0 : }
746 0 : }
747 0 : info!("safekeeper ID {}", my_id);
748 : }
749 0 : Err(error) => match error.kind() {
750 : ErrorKind::NotFound => {
751 0 : my_id = if let Some(given_id) = given_id {
752 0 : given_id
753 : } else {
754 0 : bail!("safekeeper id is not specified");
755 : };
756 0 : let mut f = File::create(&id_file_path)
757 0 : .with_context(|| format!("Failed to create id file at {id_file_path:?}"))?;
758 0 : f.write_all(my_id.to_string().as_bytes())?;
759 0 : f.sync_all()?;
760 0 : info!("initialized safekeeper id {}", my_id);
761 : }
762 : _ => {
763 0 : return Err(error.into());
764 : }
765 : },
766 : }
767 0 : Ok(my_id)
768 0 : }
769 :
770 0 : fn parse_remote_storage(storage_conf: &str) -> anyhow::Result<RemoteStorageConfig> {
771 0 : RemoteStorageConfig::from_toml(&storage_conf.parse()?)
772 0 : }
773 :
774 : #[test]
775 1 : fn verify_cli() {
776 : use clap::CommandFactory;
777 1 : Args::command().debug_assert()
778 1 : }
|