Line data Source code
1 : use std::path::PathBuf;
2 : use std::sync::Arc;
3 : use std::time::Duration;
4 :
5 : use anyhow::{Context, anyhow};
6 : use clap::Parser;
7 : use hyper0::Uri;
8 : use metrics::BuildInfo;
9 : use metrics::launch_timestamp::LaunchTimestamp;
10 : use storage_controller::http::make_router;
11 : use storage_controller::metrics::preinitialize_metrics;
12 : use storage_controller::persistence::Persistence;
13 : use storage_controller::service::chaos_injector::ChaosInjector;
14 : use storage_controller::service::{
15 : Config, HEARTBEAT_INTERVAL_DEFAULT, LONG_RECONCILE_THRESHOLD_DEFAULT,
16 : MAX_OFFLINE_INTERVAL_DEFAULT, MAX_WARMING_UP_INTERVAL_DEFAULT,
17 : PRIORITY_RECONCILER_CONCURRENCY_DEFAULT, RECONCILER_CONCURRENCY_DEFAULT, Service,
18 : };
19 : use tokio::signal::unix::SignalKind;
20 : use tokio_util::sync::CancellationToken;
21 : use tracing::Instrument;
22 : use utils::auth::{JwtAuth, SwappableJwtAuth};
23 : use utils::logging::{self, LogFormat};
24 : use utils::sentry_init::init_sentry;
25 : use utils::{project_build_tag, project_git_version, tcp_listener};
26 :
27 : project_git_version!(GIT_VERSION);
28 : project_build_tag!(BUILD_TAG);
29 :
30 : #[global_allocator]
31 : static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
32 :
33 : /// Configure jemalloc to profile heap allocations by sampling stack traces every 2 MB (1 << 21).
34 : /// This adds roughly 3% overhead for allocations on average, which is acceptable considering
35 : /// performance-sensitive code will avoid allocations as far as possible anyway.
36 : #[allow(non_upper_case_globals)]
37 : #[unsafe(export_name = "malloc_conf")]
38 : pub static malloc_conf: &[u8] = b"prof:true,prof_active:true,lg_prof_sample:21\0";
39 :
40 : #[derive(Parser)]
41 : #[command(author, version, about, long_about = None)]
42 : #[command(arg_required_else_help(true))]
43 : struct Cli {
44 : /// Host and port to listen on, like `127.0.0.1:1234`
45 : #[arg(short, long)]
46 0 : listen: std::net::SocketAddr,
47 :
48 : /// Public key for JWT authentication of clients
49 : #[arg(long)]
50 : public_key: Option<String>,
51 :
52 : /// Token for authenticating this service with the pageservers it controls
53 : #[arg(long)]
54 : jwt_token: Option<String>,
55 :
56 : /// Token for authenticating this service with the safekeepers it controls
57 : #[arg(long)]
58 : safekeeper_jwt_token: Option<String>,
59 :
60 : /// Token for authenticating this service with the control plane, when calling
61 : /// the compute notification endpoint
62 : #[arg(long)]
63 : control_plane_jwt_token: Option<String>,
64 :
65 : #[arg(long)]
66 : peer_jwt_token: Option<String>,
67 :
68 : /// URL to control plane compute notification endpoint
69 : #[arg(long)]
70 : compute_hook_url: Option<String>,
71 :
72 : /// URL to connect to postgres, like postgresql://localhost:1234/storage_controller
73 : #[arg(long)]
74 : database_url: Option<String>,
75 :
76 : /// Flag to enable dev mode, which permits running without auth
77 : #[arg(long, default_value = "false")]
78 0 : dev: bool,
79 :
80 : /// Grace period before marking unresponsive pageserver offline
81 : #[arg(long)]
82 : max_offline_interval: Option<humantime::Duration>,
83 :
84 : /// More tolerant grace period before marking unresponsive pagserver offline used
85 : /// around pageserver restarts
86 : #[arg(long)]
87 : max_warming_up_interval: Option<humantime::Duration>,
88 :
89 : /// Size threshold for automatically splitting shards (disabled by default)
90 : #[arg(long)]
91 : split_threshold: Option<u64>,
92 :
93 : /// Maximum number of normal-priority reconcilers that may run in parallel
94 : #[arg(long)]
95 : reconciler_concurrency: Option<usize>,
96 :
97 : /// Maximum number of high-priority reconcilers that may run in parallel
98 : #[arg(long)]
99 : priority_reconciler_concurrency: Option<usize>,
100 :
101 : /// How long to wait for the initial database connection to be available.
102 : #[arg(long, default_value = "5s")]
103 0 : db_connect_timeout: humantime::Duration,
104 :
105 : #[arg(long, default_value = "false")]
106 0 : start_as_candidate: bool,
107 :
108 : // TODO: make this mandatory once the helm chart gets updated
109 : #[arg(long)]
110 : address_for_peers: Option<Uri>,
111 :
112 : /// `neon_local` sets this to the path of the neon_local repo dir.
113 : /// Only relevant for testing.
114 : // TODO: make `cfg(feature = "testing")`
115 : #[arg(long)]
116 : neon_local_repo_dir: Option<PathBuf>,
117 :
118 : /// Chaos testing: exercise tenant migrations
119 : #[arg(long)]
120 : chaos_interval: Option<humantime::Duration>,
121 :
122 : /// Chaos testing: exercise an immediate exit
123 : #[arg(long)]
124 : chaos_exit_crontab: Option<cron::Schedule>,
125 :
126 : // Maximum acceptable lag for the secondary location while draining
127 : // a pageserver
128 : #[arg(long)]
129 : max_secondary_lag_bytes: Option<u64>,
130 :
131 : // Period with which to send heartbeats to registered nodes
132 : #[arg(long)]
133 : heartbeat_interval: Option<humantime::Duration>,
134 :
135 : #[arg(long)]
136 : long_reconcile_threshold: Option<humantime::Duration>,
137 :
138 : // Flag to use https for requests to pageserver API.
139 : #[arg(long, default_value = "false")]
140 0 : use_https_pageserver_api: bool,
141 :
142 : /// Whether to load safekeeprs from the database and heartbeat them
143 : #[arg(long, default_value = "false")]
144 0 : load_safekeepers: bool,
145 : }
146 :
147 : enum StrictMode {
148 : /// In strict mode, we will require that all secrets are loaded, i.e. security features
149 : /// may not be implicitly turned off by omitting secrets in the environment.
150 : Strict,
151 : /// In dev mode, secrets are optional, and omitting a particular secret will implicitly
152 : /// disable the auth related to it (e.g. no pageserver jwt key -> send unauthenticated
153 : /// requests, no public key -> don't authenticate incoming requests).
154 : Dev,
155 : }
156 :
157 : impl Default for StrictMode {
158 0 : fn default() -> Self {
159 0 : Self::Strict
160 0 : }
161 : }
162 :
163 : /// Secrets may either be provided on the command line (for testing), or loaded from AWS SecretManager: this
164 : /// type encapsulates the logic to decide which and do the loading.
165 : struct Secrets {
166 : database_url: String,
167 : public_key: Option<JwtAuth>,
168 : pageserver_jwt_token: Option<String>,
169 : safekeeper_jwt_token: Option<String>,
170 : control_plane_jwt_token: Option<String>,
171 : peer_jwt_token: Option<String>,
172 : }
173 :
174 : impl Secrets {
175 : const DATABASE_URL_ENV: &'static str = "DATABASE_URL";
176 : const PAGESERVER_JWT_TOKEN_ENV: &'static str = "PAGESERVER_JWT_TOKEN";
177 : const SAFEKEEPER_JWT_TOKEN_ENV: &'static str = "SAFEKEEPER_JWT_TOKEN";
178 : const CONTROL_PLANE_JWT_TOKEN_ENV: &'static str = "CONTROL_PLANE_JWT_TOKEN";
179 : const PEER_JWT_TOKEN_ENV: &'static str = "PEER_JWT_TOKEN";
180 : const PUBLIC_KEY_ENV: &'static str = "PUBLIC_KEY";
181 :
182 : /// Load secrets from, in order of preference:
183 : /// - CLI args if database URL is provided on the CLI
184 : /// - Environment variables if DATABASE_URL is set.
185 0 : async fn load(args: &Cli) -> anyhow::Result<Self> {
186 0 : let Some(database_url) = Self::load_secret(&args.database_url, Self::DATABASE_URL_ENV)
187 : else {
188 0 : anyhow::bail!(
189 0 : "Database URL is not set (set `--database-url`, or `DATABASE_URL` environment)"
190 0 : )
191 : };
192 :
193 0 : let public_key = match Self::load_secret(&args.public_key, Self::PUBLIC_KEY_ENV) {
194 0 : Some(v) => Some(JwtAuth::from_key(v).context("Loading public key")?),
195 0 : None => None,
196 : };
197 :
198 0 : let this = Self {
199 0 : database_url,
200 0 : public_key,
201 0 : pageserver_jwt_token: Self::load_secret(
202 0 : &args.jwt_token,
203 0 : Self::PAGESERVER_JWT_TOKEN_ENV,
204 0 : ),
205 0 : safekeeper_jwt_token: Self::load_secret(
206 0 : &args.safekeeper_jwt_token,
207 0 : Self::SAFEKEEPER_JWT_TOKEN_ENV,
208 0 : ),
209 0 : control_plane_jwt_token: Self::load_secret(
210 0 : &args.control_plane_jwt_token,
211 0 : Self::CONTROL_PLANE_JWT_TOKEN_ENV,
212 0 : ),
213 0 : peer_jwt_token: Self::load_secret(&args.peer_jwt_token, Self::PEER_JWT_TOKEN_ENV),
214 0 : };
215 0 :
216 0 : Ok(this)
217 0 : }
218 :
219 0 : fn load_secret(cli: &Option<String>, env_name: &str) -> Option<String> {
220 0 : if let Some(v) = cli {
221 0 : Some(v.clone())
222 0 : } else if let Ok(v) = std::env::var(env_name) {
223 0 : Some(v)
224 : } else {
225 0 : None
226 : }
227 0 : }
228 : }
229 :
230 0 : fn main() -> anyhow::Result<()> {
231 0 : logging::init(
232 0 : LogFormat::Plain,
233 0 : logging::TracingErrorLayerEnablement::Disabled,
234 0 : logging::Output::Stdout,
235 0 : )?;
236 :
237 : // log using tracing so we don't get confused output by default hook writing to stderr
238 0 : utils::logging::replace_panic_hook_with_tracing_panic_hook().forget();
239 0 :
240 0 : let _sentry_guard = init_sentry(Some(GIT_VERSION.into()), &[]);
241 0 :
242 0 : let hook = std::panic::take_hook();
243 0 : std::panic::set_hook(Box::new(move |info| {
244 0 : // let sentry send a message (and flush)
245 0 : // and trace the error
246 0 : hook(info);
247 0 :
248 0 : std::process::exit(1);
249 0 : }));
250 0 :
251 0 : tokio::runtime::Builder::new_current_thread()
252 0 : // We use spawn_blocking for database operations, so require approximately
253 0 : // as many blocking threads as we will open database connections.
254 0 : .max_blocking_threads(Persistence::MAX_CONNECTIONS as usize)
255 0 : .enable_all()
256 0 : .build()
257 0 : .unwrap()
258 0 : .block_on(async_main())
259 0 : }
260 :
261 0 : async fn async_main() -> anyhow::Result<()> {
262 0 : let launch_ts = Box::leak(Box::new(LaunchTimestamp::generate()));
263 0 :
264 0 : preinitialize_metrics();
265 0 :
266 0 : let args = Cli::parse();
267 0 : tracing::info!(
268 0 : "version: {}, launch_timestamp: {}, build_tag {}, listening on {}",
269 0 : GIT_VERSION,
270 0 : launch_ts.to_string(),
271 : BUILD_TAG,
272 : args.listen
273 : );
274 :
275 0 : let build_info = BuildInfo {
276 0 : revision: GIT_VERSION,
277 0 : build_tag: BUILD_TAG,
278 0 : };
279 :
280 0 : let strict_mode = if args.dev {
281 0 : StrictMode::Dev
282 : } else {
283 0 : StrictMode::Strict
284 : };
285 :
286 0 : let secrets = Secrets::load(&args).await?;
287 :
288 : // TODO: once we've rolled out the safekeeper JWT token everywhere, put it into the validation code below
289 0 : tracing::info!(
290 0 : "safekeeper_jwt_token set: {:?}",
291 0 : secrets.safekeeper_jwt_token.is_some()
292 : );
293 :
294 : // Validate required secrets and arguments are provided in strict mode
295 0 : match strict_mode {
296 : StrictMode::Strict
297 0 : if (secrets.public_key.is_none()
298 0 : || secrets.pageserver_jwt_token.is_none()
299 0 : || secrets.control_plane_jwt_token.is_none()) =>
300 0 : {
301 0 : // Production systems should always have secrets configured: if public_key was not set
302 0 : // then we would implicitly disable auth.
303 0 : anyhow::bail!(
304 0 : "Insecure config! One or more secrets is not set. This is only permitted in `--dev` mode"
305 0 : );
306 : }
307 0 : StrictMode::Strict if args.compute_hook_url.is_none() => {
308 0 : // Production systems should always have a compute hook set, to prevent falling
309 0 : // back to trying to use neon_local.
310 0 : anyhow::bail!(
311 0 : "`--compute-hook-url` is not set: this is only permitted in `--dev` mode"
312 0 : );
313 : }
314 : StrictMode::Strict => {
315 0 : tracing::info!("Starting in strict mode: configuration is OK.")
316 : }
317 : StrictMode::Dev => {
318 0 : tracing::warn!("Starting in dev mode: this may be an insecure configuration.")
319 : }
320 : }
321 :
322 0 : let config = Config {
323 0 : pageserver_jwt_token: secrets.pageserver_jwt_token,
324 0 : safekeeper_jwt_token: secrets.safekeeper_jwt_token,
325 0 : control_plane_jwt_token: secrets.control_plane_jwt_token,
326 0 : peer_jwt_token: secrets.peer_jwt_token,
327 0 : compute_hook_url: args.compute_hook_url,
328 0 : max_offline_interval: args
329 0 : .max_offline_interval
330 0 : .map(humantime::Duration::into)
331 0 : .unwrap_or(MAX_OFFLINE_INTERVAL_DEFAULT),
332 0 : max_warming_up_interval: args
333 0 : .max_warming_up_interval
334 0 : .map(humantime::Duration::into)
335 0 : .unwrap_or(MAX_WARMING_UP_INTERVAL_DEFAULT),
336 0 : reconciler_concurrency: args
337 0 : .reconciler_concurrency
338 0 : .unwrap_or(RECONCILER_CONCURRENCY_DEFAULT),
339 0 : priority_reconciler_concurrency: args
340 0 : .priority_reconciler_concurrency
341 0 : .unwrap_or(PRIORITY_RECONCILER_CONCURRENCY_DEFAULT),
342 0 : split_threshold: args.split_threshold,
343 0 : neon_local_repo_dir: args.neon_local_repo_dir,
344 0 : max_secondary_lag_bytes: args.max_secondary_lag_bytes,
345 0 : heartbeat_interval: args
346 0 : .heartbeat_interval
347 0 : .map(humantime::Duration::into)
348 0 : .unwrap_or(HEARTBEAT_INTERVAL_DEFAULT),
349 0 : long_reconcile_threshold: args
350 0 : .long_reconcile_threshold
351 0 : .map(humantime::Duration::into)
352 0 : .unwrap_or(LONG_RECONCILE_THRESHOLD_DEFAULT),
353 0 : address_for_peers: args.address_for_peers,
354 0 : start_as_candidate: args.start_as_candidate,
355 0 : http_service_port: args.listen.port() as i32,
356 0 : use_https_pageserver_api: args.use_https_pageserver_api,
357 0 : load_safekeepers: args.load_safekeepers,
358 0 : };
359 0 :
360 0 : // Validate that we can connect to the database
361 0 : Persistence::await_connection(&secrets.database_url, args.db_connect_timeout.into()).await?;
362 :
363 0 : let persistence = Arc::new(Persistence::new(secrets.database_url).await);
364 :
365 0 : let service = Service::spawn(config, persistence.clone()).await?;
366 :
367 0 : let http_listener = tcp_listener::bind(args.listen)?;
368 :
369 0 : let auth = secrets
370 0 : .public_key
371 0 : .map(|jwt_auth| Arc::new(SwappableJwtAuth::new(jwt_auth)));
372 0 : let router = make_router(service.clone(), auth, build_info)
373 0 : .build()
374 0 : .map_err(|err| anyhow!(err))?;
375 0 : let router_service = http_utils::RouterService::new(router).unwrap();
376 0 :
377 0 : // Start HTTP server
378 0 : let server_shutdown = CancellationToken::new();
379 0 : let server = hyper0::Server::from_tcp(http_listener)?
380 0 : .serve(router_service)
381 0 : .with_graceful_shutdown({
382 0 : let server_shutdown = server_shutdown.clone();
383 0 : async move {
384 0 : server_shutdown.cancelled().await;
385 0 : }
386 0 : });
387 0 : tracing::info!("Serving on {0}", args.listen);
388 0 : let server_task = tokio::task::spawn(server);
389 0 :
390 0 : let chaos_task = args.chaos_interval.map(|interval| {
391 0 : let service = service.clone();
392 0 : let cancel = CancellationToken::new();
393 0 : let cancel_bg = cancel.clone();
394 0 : let chaos_exit_crontab = args.chaos_exit_crontab;
395 0 : (
396 0 : tokio::task::spawn(
397 0 : async move {
398 0 : let mut chaos_injector =
399 0 : ChaosInjector::new(service, interval.into(), chaos_exit_crontab);
400 0 : chaos_injector.run(cancel_bg).await
401 0 : }
402 0 : .instrument(tracing::info_span!("chaos_injector")),
403 : ),
404 0 : cancel,
405 0 : )
406 0 : });
407 :
408 : // Wait until we receive a signal
409 0 : let mut sigint = tokio::signal::unix::signal(SignalKind::interrupt())?;
410 0 : let mut sigquit = tokio::signal::unix::signal(SignalKind::quit())?;
411 0 : let mut sigterm = tokio::signal::unix::signal(SignalKind::terminate())?;
412 0 : tokio::select! {
413 0 : _ = sigint.recv() => {},
414 0 : _ = sigterm.recv() => {},
415 0 : _ = sigquit.recv() => {},
416 : }
417 0 : tracing::info!("Terminating on signal");
418 :
419 : // Stop HTTP server first, so that we don't have to service requests
420 : // while shutting down Service.
421 0 : server_shutdown.cancel();
422 0 : match tokio::time::timeout(Duration::from_secs(5), server_task).await {
423 : Ok(Ok(_)) => {
424 0 : tracing::info!("Joined HTTP server task");
425 : }
426 0 : Ok(Err(e)) => {
427 0 : tracing::error!("Error joining HTTP server task: {e}")
428 : }
429 : Err(_) => {
430 0 : tracing::warn!("Timed out joining HTTP server task");
431 : // We will fall through and shut down the service anyway, any request handlers
432 : // in flight will experience cancellation & their clients will see a torn connection.
433 : }
434 : }
435 :
436 : // If we were injecting chaos, stop that so that we're not calling into Service while it shuts down
437 0 : if let Some((chaos_jh, chaos_cancel)) = chaos_task {
438 0 : chaos_cancel.cancel();
439 0 : chaos_jh.await.ok();
440 0 : }
441 :
442 0 : service.shutdown().await;
443 0 : tracing::info!("Service shutdown complete");
444 :
445 0 : std::process::exit(0);
446 0 : }
|