Line data Source code
1 : #![recursion_limit = "300"]
2 :
3 : //! Main entry point for the Page Server executable.
4 :
5 : use std::env;
6 : use std::env::{VarError, var};
7 : use std::io::Read;
8 : use std::str::FromStr;
9 : use std::sync::Arc;
10 : use std::time::Duration;
11 :
12 : use anyhow::{Context, anyhow};
13 : use camino::Utf8Path;
14 : use clap::{Arg, ArgAction, Command};
15 : use metrics::launch_timestamp::{LaunchTimestamp, set_launch_timestamp_metric};
16 : use metrics::set_build_info_metric;
17 : use nix::sys::socket::{setsockopt, sockopt};
18 : use pageserver::config::{PageServerConf, PageserverIdentity};
19 : use pageserver::controller_upcall_client::ControllerUpcallClient;
20 : use pageserver::deletion_queue::DeletionQueue;
21 : use pageserver::disk_usage_eviction_task::{self, launch_disk_usage_global_eviction_task};
22 : use pageserver::metrics::{STARTUP_DURATION, STARTUP_IS_LOADING};
23 : use pageserver::task_mgr::{
24 : BACKGROUND_RUNTIME, COMPUTE_REQUEST_RUNTIME, MGMT_REQUEST_RUNTIME, WALRECEIVER_RUNTIME,
25 : };
26 : use pageserver::tenant::{TenantSharedResources, mgr, secondary};
27 : use pageserver::{
28 : CancellableTask, ConsumptionMetricsTasks, HttpEndpointListener, HttpsEndpointListener, http,
29 : page_cache, page_service, task_mgr, virtual_file,
30 : };
31 : use postgres_backend::AuthType;
32 : use remote_storage::GenericRemoteStorage;
33 : use rustls_pki_types::{CertificateDer, PrivateKeyDer};
34 : use tokio::signal::unix::SignalKind;
35 : use tokio::time::Instant;
36 : use tokio_util::sync::CancellationToken;
37 : use tracing::*;
38 : use utils::auth::{JwtAuth, SwappableJwtAuth};
39 : use utils::crashsafe::syncfs;
40 : use utils::logging::TracingErrorLayerEnablement;
41 : use utils::sentry_init::init_sentry;
42 : use utils::{failpoint_support, logging, project_build_tag, project_git_version, tcp_listener};
43 :
44 : project_git_version!(GIT_VERSION);
45 : project_build_tag!(BUILD_TAG);
46 :
47 : #[global_allocator]
48 : static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
49 :
50 : /// Configure jemalloc to profile heap allocations by sampling stack traces every 2 MB (1 << 21).
51 : /// This adds roughly 3% overhead for allocations on average, which is acceptable considering
52 : /// performance-sensitive code will avoid allocations as far as possible anyway.
53 : #[allow(non_upper_case_globals)]
54 : #[unsafe(export_name = "malloc_conf")]
55 : pub static malloc_conf: &[u8] = b"prof:true,prof_active:true,lg_prof_sample:21\0";
56 :
57 : const PID_FILE_NAME: &str = "pageserver.pid";
58 :
59 : const FEATURES: &[&str] = &[
60 : #[cfg(feature = "testing")]
61 : "testing",
62 : ];
63 :
64 4 : fn version() -> String {
65 4 : format!(
66 4 : "{GIT_VERSION} failpoints: {}, features: {:?}",
67 4 : fail::has_failpoints(),
68 4 : FEATURES,
69 4 : )
70 4 : }
71 :
72 0 : fn main() -> anyhow::Result<()> {
73 0 : let launch_ts = Box::leak(Box::new(LaunchTimestamp::generate()));
74 0 :
75 0 : let arg_matches = cli().get_matches();
76 0 :
77 0 : if arg_matches.get_flag("enabled-features") {
78 0 : println!("{{\"features\": {FEATURES:?} }}");
79 0 : return Ok(());
80 0 : }
81 0 :
82 0 : // Initialize up failpoints support
83 0 : let scenario = failpoint_support::init();
84 0 :
85 0 : let workdir = arg_matches
86 0 : .get_one::<String>("workdir")
87 0 : .map(Utf8Path::new)
88 0 : .unwrap_or_else(|| Utf8Path::new(".neon"));
89 0 : let workdir = workdir
90 0 : .canonicalize_utf8()
91 0 : .with_context(|| format!("Error opening workdir '{workdir}'"))?;
92 :
93 0 : let cfg_file_path = workdir.join("pageserver.toml");
94 0 : let identity_file_path = workdir.join("identity.toml");
95 0 :
96 0 : // Set CWD to workdir for non-daemon modes
97 0 : env::set_current_dir(&workdir)
98 0 : .with_context(|| format!("Failed to set application's current dir to '{workdir}'"))?;
99 :
100 0 : let conf = initialize_config(&identity_file_path, &cfg_file_path, &workdir)?;
101 :
102 : // Initialize logging.
103 : //
104 : // It must be initialized before the custom panic hook is installed below.
105 : //
106 : // Regarding tracing_error enablement: at this time, we only use the
107 : // tracing_error crate to debug_assert that log spans contain tenant and timeline ids.
108 : // See `debug_assert_current_span_has_tenant_and_timeline_id` in the timeline module
109 0 : let tracing_error_layer_enablement = if cfg!(debug_assertions) {
110 0 : TracingErrorLayerEnablement::EnableWithRustLogFilter
111 : } else {
112 0 : TracingErrorLayerEnablement::Disabled
113 : };
114 :
115 0 : logging::init(
116 0 : conf.log_format,
117 0 : tracing_error_layer_enablement,
118 0 : logging::Output::Stdout,
119 0 : )?;
120 :
121 : // mind the order required here: 1. logging, 2. panic_hook, 3. sentry.
122 : // disarming this hook on pageserver, because we never tear down tracing.
123 0 : logging::replace_panic_hook_with_tracing_panic_hook().forget();
124 0 :
125 0 : // initialize sentry if SENTRY_DSN is provided
126 0 : let _sentry_guard = init_sentry(
127 0 : Some(GIT_VERSION.into()),
128 0 : &[("node_id", &conf.id.to_string())],
129 0 : );
130 0 :
131 0 : // after setting up logging, log the effective IO engine choice and read path implementations
132 0 : info!(?conf.virtual_file_io_engine, "starting with virtual_file IO engine");
133 0 : info!(?conf.virtual_file_io_mode, "starting with virtual_file IO mode");
134 0 : info!(?conf.wal_receiver_protocol, "starting with WAL receiver protocol");
135 0 : info!(?conf.validate_wal_contiguity, "starting with WAL contiguity validation");
136 0 : info!(?conf.page_service_pipelining, "starting with page service pipelining config");
137 0 : info!(?conf.get_vectored_concurrent_io, "starting with get_vectored IO concurrency config");
138 :
139 : // The tenants directory contains all the pageserver local disk state.
140 : // Create if not exists and make sure all the contents are durable before proceeding.
141 : // Ensuring durability eliminates a whole bug class where we come up after an unclean shutdown.
142 : // After unclea shutdown, we don't know if all the filesystem content we can read via syscalls is actually durable or not.
143 : // Examples for that: OOM kill, systemd killing us during shutdown, self abort due to unrecoverable IO error.
144 0 : let tenants_path = conf.tenants_path();
145 0 : {
146 0 : let open = || {
147 0 : nix::dir::Dir::open(
148 0 : tenants_path.as_std_path(),
149 0 : nix::fcntl::OFlag::O_DIRECTORY | nix::fcntl::OFlag::O_RDONLY,
150 0 : nix::sys::stat::Mode::empty(),
151 0 : )
152 0 : };
153 0 : let dirfd = match open() {
154 0 : Ok(dirfd) => dirfd,
155 0 : Err(e) => match e {
156 : nix::errno::Errno::ENOENT => {
157 0 : utils::crashsafe::create_dir_all(&tenants_path).with_context(|| {
158 0 : format!("Failed to create tenants root dir at '{tenants_path}'")
159 0 : })?;
160 0 : open().context("open tenants dir after creating it")?
161 : }
162 0 : e => anyhow::bail!(e),
163 : },
164 : };
165 :
166 0 : if conf.no_sync {
167 0 : info!("Skipping syncfs on startup");
168 : } else {
169 0 : let started = Instant::now();
170 0 : syncfs(dirfd)?;
171 0 : let elapsed = started.elapsed();
172 0 : info!(
173 0 : elapsed_ms = elapsed.as_millis(),
174 0 : "made tenant directory contents durable"
175 : );
176 : }
177 : }
178 :
179 : // Basic initialization of things that don't change after startup
180 0 : tracing::info!("Initializing virtual_file...");
181 : virtual_file::init(
182 0 : conf.max_file_descriptors,
183 0 : conf.virtual_file_io_engine,
184 0 : conf.virtual_file_io_mode,
185 0 : if conf.no_sync {
186 0 : virtual_file::SyncMode::UnsafeNoSync
187 : } else {
188 0 : virtual_file::SyncMode::Sync
189 : },
190 : );
191 0 : tracing::info!("Initializing page_cache...");
192 0 : page_cache::init(conf.page_cache_size);
193 0 :
194 0 : start_pageserver(launch_ts, conf).context("Failed to start pageserver")?;
195 :
196 0 : scenario.teardown();
197 0 : Ok(())
198 0 : }
199 :
200 0 : fn initialize_config(
201 0 : identity_file_path: &Utf8Path,
202 0 : cfg_file_path: &Utf8Path,
203 0 : workdir: &Utf8Path,
204 0 : ) -> anyhow::Result<&'static PageServerConf> {
205 : // The deployment orchestrator writes out an indentity file containing the node id
206 : // for all pageservers. This file is the source of truth for the node id. In order
207 : // to allow for rolling back pageserver releases, the node id is also included in
208 : // the pageserver config that the deployment orchestrator writes to disk for the pageserver.
209 : // A rolled back version of the pageserver will get the node id from the pageserver.toml
210 : // config file.
211 0 : let identity = match std::fs::File::open(identity_file_path) {
212 0 : Ok(mut f) => {
213 0 : let md = f.metadata().context("stat config file")?;
214 0 : if !md.is_file() {
215 0 : anyhow::bail!(
216 0 : "Pageserver found identity file but it is a dir entry: {identity_file_path}. Aborting start up ..."
217 0 : );
218 0 : }
219 0 :
220 0 : let mut s = String::new();
221 0 : f.read_to_string(&mut s).context("read identity file")?;
222 0 : toml_edit::de::from_str::<PageserverIdentity>(&s)?
223 : }
224 0 : Err(e) => {
225 0 : anyhow::bail!(
226 0 : "Pageserver could not read identity file: {identity_file_path}: {e}. Aborting start up ..."
227 0 : );
228 : }
229 : };
230 :
231 0 : let config_file_contents =
232 0 : std::fs::read_to_string(cfg_file_path).context("read config file from filesystem")?;
233 0 : let config_toml = serde_path_to_error::deserialize(
234 0 : toml_edit::de::Deserializer::from_str(&config_file_contents)
235 0 : .context("build toml deserializer")?,
236 : )
237 0 : .context("deserialize config toml")?;
238 0 : let conf = PageServerConf::parse_and_validate(identity.id, config_toml, workdir)
239 0 : .context("runtime-validation of config toml")?;
240 :
241 0 : Ok(Box::leak(Box::new(conf)))
242 0 : }
243 :
244 : struct WaitForPhaseResult<F: std::future::Future + Unpin> {
245 : timeout_remaining: Duration,
246 : skipped: Option<F>,
247 : }
248 :
249 : /// During startup, we apply a timeout to our waits for readiness, to avoid
250 : /// stalling the whole service if one Tenant experiences some problem. Each
251 : /// phase may consume some of the timeout: this function returns the updated
252 : /// timeout for use in the next call.
253 0 : async fn wait_for_phase<F>(phase: &str, mut fut: F, timeout: Duration) -> WaitForPhaseResult<F>
254 0 : where
255 0 : F: std::future::Future + Unpin,
256 0 : {
257 0 : let initial_t = Instant::now();
258 0 : let skipped = match tokio::time::timeout(timeout, &mut fut).await {
259 0 : Ok(_) => None,
260 : Err(_) => {
261 0 : tracing::info!(
262 0 : timeout_millis = timeout.as_millis(),
263 0 : %phase,
264 0 : "Startup phase timed out, proceeding anyway"
265 : );
266 0 : Some(fut)
267 : }
268 : };
269 :
270 0 : WaitForPhaseResult {
271 0 : timeout_remaining: timeout
272 0 : .checked_sub(Instant::now().duration_since(initial_t))
273 0 : .unwrap_or(Duration::ZERO),
274 0 : skipped,
275 0 : }
276 0 : }
277 :
278 0 : fn startup_checkpoint(started_at: Instant, phase: &str, human_phase: &str) {
279 0 : let elapsed = started_at.elapsed();
280 0 : let secs = elapsed.as_secs_f64();
281 0 : STARTUP_DURATION.with_label_values(&[phase]).set(secs);
282 0 :
283 0 : info!(
284 0 : elapsed_ms = elapsed.as_millis(),
285 0 : "{human_phase} ({secs:.3}s since start)"
286 : )
287 0 : }
288 :
289 0 : fn start_pageserver(
290 0 : launch_ts: &'static LaunchTimestamp,
291 0 : conf: &'static PageServerConf,
292 0 : ) -> anyhow::Result<()> {
293 0 : // Monotonic time for later calculating startup duration
294 0 : let started_startup_at = Instant::now();
295 0 :
296 0 : // Print version and launch timestamp to the log,
297 0 : // and expose them as prometheus metrics.
298 0 : // A changed version string indicates changed software.
299 0 : // A changed launch timestamp indicates a pageserver restart.
300 0 : info!(
301 0 : "version: {} launch_timestamp: {} build_tag: {}",
302 0 : version(),
303 0 : launch_ts.to_string(),
304 : BUILD_TAG,
305 : );
306 0 : set_build_info_metric(GIT_VERSION, BUILD_TAG);
307 0 : set_launch_timestamp_metric(launch_ts);
308 0 : #[cfg(target_os = "linux")]
309 0 : metrics::register_internal(Box::new(metrics::more_process_metrics::Collector::new())).unwrap();
310 0 : metrics::register_internal(Box::new(
311 0 : pageserver::metrics::tokio_epoll_uring::Collector::new(),
312 0 : ))
313 0 : .unwrap();
314 0 : pageserver::preinitialize_metrics(conf);
315 0 :
316 0 : // If any failpoints were set from FAILPOINTS environment variable,
317 0 : // print them to the log for debugging purposes
318 0 : let failpoints = fail::list();
319 0 : if !failpoints.is_empty() {
320 0 : info!(
321 0 : "started with failpoints: {}",
322 0 : failpoints
323 0 : .iter()
324 0 : .map(|(name, actions)| format!("{name}={actions}"))
325 0 : .collect::<Vec<String>>()
326 0 : .join(";")
327 : )
328 0 : }
329 :
330 : // Create and lock PID file. This ensures that there cannot be more than one
331 : // pageserver process running at the same time.
332 0 : let lock_file_path = conf.workdir.join(PID_FILE_NAME);
333 0 : info!("Claiming pid file at {lock_file_path:?}...");
334 0 : let lock_file =
335 0 : utils::pid_file::claim_for_current_process(&lock_file_path).context("claim pid file")?;
336 0 : info!("Claimed pid file at {lock_file_path:?}");
337 :
338 : // Ensure that the lock file is held even if the main thread of the process panics.
339 : // We need to release the lock file only when the process exits.
340 0 : std::mem::forget(lock_file);
341 0 :
342 0 : // Bind the HTTP and libpq ports early, so that if they are in use by some other
343 0 : // process, we error out early.
344 0 : let http_addr = &conf.listen_http_addr;
345 0 : info!("Starting pageserver http handler on {http_addr}");
346 0 : let http_listener = tcp_listener::bind(http_addr)?;
347 :
348 0 : let https_listener = match conf.listen_https_addr.as_ref() {
349 0 : Some(https_addr) => {
350 0 : info!("Starting pageserver https handler on {https_addr}");
351 0 : Some(tcp_listener::bind(https_addr)?)
352 : }
353 0 : None => None,
354 : };
355 :
356 0 : let pg_addr = &conf.listen_pg_addr;
357 0 : info!("Starting pageserver pg protocol handler on {pg_addr}");
358 0 : let pageserver_listener = tcp_listener::bind(pg_addr)?;
359 :
360 : // Enable SO_KEEPALIVE on the socket, to detect dead connections faster.
361 : // These are configured via net.ipv4.tcp_keepalive_* sysctls.
362 : //
363 : // TODO: also set this on the walreceiver socket, but tokio-postgres doesn't
364 : // support enabling keepalives while using the default OS sysctls.
365 0 : setsockopt(&pageserver_listener, sockopt::KeepAlive, &true)?;
366 :
367 : // Launch broker client
368 : // The storage_broker::connect call needs to happen inside a tokio runtime thread.
369 0 : let broker_client = WALRECEIVER_RUNTIME
370 0 : .block_on(async {
371 0 : // Note: we do not attempt connecting here (but validate endpoints sanity).
372 0 : storage_broker::connect(conf.broker_endpoint.clone(), conf.broker_keepalive_interval)
373 0 : })
374 0 : .with_context(|| {
375 0 : format!(
376 0 : "create broker client for uri={:?} keepalive_interval={:?}",
377 0 : &conf.broker_endpoint, conf.broker_keepalive_interval,
378 0 : )
379 0 : })?;
380 :
381 : // Initialize authentication for incoming connections
382 : let http_auth;
383 : let pg_auth;
384 0 : if conf.http_auth_type == AuthType::NeonJWT || conf.pg_auth_type == AuthType::NeonJWT {
385 : // unwrap is ok because check is performed when creating config, so path is set and exists
386 0 : let key_path = conf.auth_validation_public_key_path.as_ref().unwrap();
387 0 : info!("Loading public key(s) for verifying JWT tokens from {key_path:?}");
388 :
389 0 : let jwt_auth = JwtAuth::from_key_path(key_path)?;
390 0 : let auth: Arc<SwappableJwtAuth> = Arc::new(SwappableJwtAuth::new(jwt_auth));
391 :
392 0 : http_auth = match &conf.http_auth_type {
393 0 : AuthType::Trust => None,
394 0 : AuthType::NeonJWT => Some(auth.clone()),
395 : };
396 0 : pg_auth = match &conf.pg_auth_type {
397 0 : AuthType::Trust => None,
398 0 : AuthType::NeonJWT => Some(auth),
399 : };
400 0 : } else {
401 0 : http_auth = None;
402 0 : pg_auth = None;
403 0 : }
404 0 : info!("Using auth for http API: {:#?}", conf.http_auth_type);
405 0 : info!("Using auth for pg connections: {:#?}", conf.pg_auth_type);
406 :
407 0 : match var("NEON_AUTH_TOKEN") {
408 0 : Ok(v) => {
409 0 : info!("Loaded JWT token for authentication with Safekeeper");
410 0 : pageserver::config::SAFEKEEPER_AUTH_TOKEN
411 0 : .set(Arc::new(v))
412 0 : .map_err(|_| anyhow!("Could not initialize SAFEKEEPER_AUTH_TOKEN"))?;
413 : }
414 : Err(VarError::NotPresent) => {
415 0 : info!("No JWT token for authentication with Safekeeper detected");
416 : }
417 0 : Err(e) => return Err(e).with_context(
418 0 : || "Failed to either load to detect non-present NEON_AUTH_TOKEN environment variable",
419 0 : ),
420 : };
421 :
422 : // Top-level cancellation token for the process
423 0 : let shutdown_pageserver = tokio_util::sync::CancellationToken::new();
424 :
425 : // Set up remote storage client
426 0 : let remote_storage = BACKGROUND_RUNTIME.block_on(create_remote_storage_client(conf))?;
427 :
428 : // Set up deletion queue
429 0 : let (deletion_queue, deletion_workers) = DeletionQueue::new(
430 0 : remote_storage.clone(),
431 0 : ControllerUpcallClient::new(conf, &shutdown_pageserver),
432 0 : conf,
433 0 : );
434 0 : deletion_workers.spawn_with(BACKGROUND_RUNTIME.handle());
435 0 :
436 0 : // Up to this point no significant I/O has been done: this should have been fast. Record
437 0 : // duration prior to starting I/O intensive phase of startup.
438 0 : startup_checkpoint(started_startup_at, "initial", "Starting loading tenants");
439 0 : STARTUP_IS_LOADING.set(1);
440 0 :
441 0 : // Startup staging or optimizing:
442 0 : //
443 0 : // We want to minimize downtime for `page_service` connections, and trying not to overload
444 0 : // BACKGROUND_RUNTIME by doing initial compactions and initial logical sizes at the same time.
445 0 : //
446 0 : // init_done_rx will notify when all initial load operations have completed.
447 0 : //
448 0 : // background_jobs_can_start (same name used to hold off background jobs from starting at
449 0 : // consumer side) will be dropped once we can start the background jobs. Currently it is behind
450 0 : // completing all initial logical size calculations (init_logical_size_done_rx) and a timeout
451 0 : // (background_task_maximum_delay).
452 0 : let (init_remote_done_tx, init_remote_done_rx) = utils::completion::channel();
453 0 : let (init_done_tx, init_done_rx) = utils::completion::channel();
454 0 :
455 0 : let (background_jobs_can_start, background_jobs_barrier) = utils::completion::channel();
456 0 :
457 0 : let order = pageserver::InitializationOrder {
458 0 : initial_tenant_load_remote: Some(init_done_tx),
459 0 : initial_tenant_load: Some(init_remote_done_tx),
460 0 : background_jobs_can_start: background_jobs_barrier.clone(),
461 0 : };
462 0 :
463 0 : info!(config=?conf.l0_flush, "using l0_flush config");
464 0 : let l0_flush_global_state =
465 0 : pageserver::l0_flush::L0FlushGlobalState::new(conf.l0_flush.clone());
466 0 :
467 0 : // Scan the local 'tenants/' directory and start loading the tenants
468 0 : let deletion_queue_client = deletion_queue.new_client();
469 0 : let background_purges = mgr::BackgroundPurges::default();
470 0 : let tenant_manager = BACKGROUND_RUNTIME.block_on(mgr::init_tenant_mgr(
471 0 : conf,
472 0 : background_purges.clone(),
473 0 : TenantSharedResources {
474 0 : broker_client: broker_client.clone(),
475 0 : remote_storage: remote_storage.clone(),
476 0 : deletion_queue_client,
477 0 : l0_flush_global_state,
478 0 : },
479 0 : order,
480 0 : shutdown_pageserver.clone(),
481 0 : ))?;
482 0 : let tenant_manager = Arc::new(tenant_manager);
483 0 :
484 0 : BACKGROUND_RUNTIME.spawn({
485 0 : let shutdown_pageserver = shutdown_pageserver.clone();
486 0 : let drive_init = async move {
487 0 : // NOTE: unlike many futures in pageserver, this one is cancellation-safe
488 0 : let guard = scopeguard::guard_on_success((), |_| {
489 0 : tracing::info!("Cancelled before initial load completed")
490 0 : });
491 0 :
492 0 : let timeout = conf.background_task_maximum_delay;
493 0 :
494 0 : let init_remote_done = std::pin::pin!(async {
495 0 : init_remote_done_rx.wait().await;
496 0 : startup_checkpoint(
497 0 : started_startup_at,
498 0 : "initial_tenant_load_remote",
499 0 : "Remote part of initial load completed",
500 0 : );
501 0 : });
502 :
503 : let WaitForPhaseResult {
504 0 : timeout_remaining: timeout,
505 0 : skipped: init_remote_skipped,
506 0 : } = wait_for_phase("initial_tenant_load_remote", init_remote_done, timeout).await;
507 :
508 0 : let init_load_done = std::pin::pin!(async {
509 0 : init_done_rx.wait().await;
510 0 : startup_checkpoint(
511 0 : started_startup_at,
512 0 : "initial_tenant_load",
513 0 : "Initial load completed",
514 0 : );
515 0 : STARTUP_IS_LOADING.set(0);
516 0 : });
517 :
518 : let WaitForPhaseResult {
519 0 : timeout_remaining: _timeout,
520 0 : skipped: init_load_skipped,
521 0 : } = wait_for_phase("initial_tenant_load", init_load_done, timeout).await;
522 :
523 : // initial logical sizes can now start, as they were waiting on init_done_rx.
524 :
525 0 : scopeguard::ScopeGuard::into_inner(guard);
526 0 :
527 0 : // allow background jobs to start: we either completed prior stages, or they reached timeout
528 0 : // and were skipped. It is important that we do not let them block background jobs indefinitely,
529 0 : // because things like consumption metrics for billing are blocked by this barrier.
530 0 : drop(background_jobs_can_start);
531 0 : startup_checkpoint(
532 0 : started_startup_at,
533 0 : "background_jobs_can_start",
534 0 : "Starting background jobs",
535 0 : );
536 0 :
537 0 : // We are done. If we skipped any phases due to timeout, run them to completion here so that
538 0 : // they will eventually update their startup_checkpoint, and so that we do not declare the
539 0 : // 'complete' stage until all the other stages are really done.
540 0 : let guard = scopeguard::guard_on_success((), |_| {
541 0 : tracing::info!("Cancelled before waiting for skipped phases done")
542 0 : });
543 0 : if let Some(f) = init_remote_skipped {
544 0 : f.await;
545 0 : }
546 0 : if let Some(f) = init_load_skipped {
547 0 : f.await;
548 0 : }
549 0 : scopeguard::ScopeGuard::into_inner(guard);
550 0 :
551 0 : startup_checkpoint(started_startup_at, "complete", "Startup complete");
552 0 : };
553 0 :
554 0 : async move {
555 0 : let mut drive_init = std::pin::pin!(drive_init);
556 0 : // just race these tasks
557 0 : tokio::select! {
558 0 : _ = shutdown_pageserver.cancelled() => {},
559 0 : _ = &mut drive_init => {},
560 : }
561 0 : }
562 0 : });
563 0 :
564 0 : let (secondary_controller, secondary_controller_tasks) = secondary::spawn_tasks(
565 0 : tenant_manager.clone(),
566 0 : remote_storage.clone(),
567 0 : background_jobs_barrier.clone(),
568 0 : shutdown_pageserver.clone(),
569 0 : );
570 0 :
571 0 : // shared state between the disk-usage backed eviction background task and the http endpoint
572 0 : // that allows triggering disk-usage based eviction manually. note that the http endpoint
573 0 : // is still accessible even if background task is not configured as long as remote storage has
574 0 : // been configured.
575 0 : let disk_usage_eviction_state: Arc<disk_usage_eviction_task::State> = Arc::default();
576 0 :
577 0 : let disk_usage_eviction_task = launch_disk_usage_global_eviction_task(
578 0 : conf,
579 0 : remote_storage.clone(),
580 0 : disk_usage_eviction_state.clone(),
581 0 : tenant_manager.clone(),
582 0 : background_jobs_barrier.clone(),
583 0 : );
584 :
585 : // Start up the service to handle HTTP mgmt API request. We created the
586 : // listener earlier already.
587 0 : let (http_endpoint_listener, https_endpoint_listener) = {
588 0 : let _rt_guard = MGMT_REQUEST_RUNTIME.enter(); // for hyper
589 :
590 0 : let router_state = Arc::new(
591 0 : http::routes::State::new(
592 0 : conf,
593 0 : tenant_manager.clone(),
594 0 : http_auth.clone(),
595 0 : remote_storage.clone(),
596 0 : broker_client.clone(),
597 0 : disk_usage_eviction_state,
598 0 : deletion_queue.new_client(),
599 0 : secondary_controller,
600 0 : )
601 0 : .context("Failed to initialize router state")?,
602 : );
603 :
604 0 : let router = http::make_router(router_state, launch_ts, http_auth.clone())?
605 0 : .build()
606 0 : .map_err(|err| anyhow!(err))?;
607 :
608 0 : let service =
609 0 : Arc::new(http_utils::RequestServiceBuilder::new(router).map_err(|err| anyhow!(err))?);
610 :
611 0 : let http_task = {
612 0 : let server =
613 0 : http_utils::server::Server::new(Arc::clone(&service), http_listener, None)?;
614 0 : let cancel = CancellationToken::new();
615 0 :
616 0 : let task = MGMT_REQUEST_RUNTIME.spawn(task_mgr::exit_on_panic_or_error(
617 0 : "http endpoint listener",
618 0 : server.serve(cancel.clone()),
619 0 : ));
620 0 : HttpEndpointListener(CancellableTask { task, cancel })
621 : };
622 :
623 0 : let https_task = match https_listener {
624 0 : Some(https_listener) => {
625 0 : let certs = load_certs(&conf.ssl_cert_file)?;
626 0 : let key = load_private_key(&conf.ssl_key_file)?;
627 :
628 0 : let server_config = rustls::ServerConfig::builder()
629 0 : .with_no_client_auth()
630 0 : .with_single_cert(certs, key)?;
631 :
632 0 : let tls_acceptor = tokio_rustls::TlsAcceptor::from(Arc::new(server_config));
633 :
634 0 : let server =
635 0 : http_utils::server::Server::new(service, https_listener, Some(tls_acceptor))?;
636 0 : let cancel = CancellationToken::new();
637 0 :
638 0 : let task = MGMT_REQUEST_RUNTIME.spawn(task_mgr::exit_on_panic_or_error(
639 0 : "https endpoint listener",
640 0 : server.serve(cancel.clone()),
641 0 : ));
642 0 : Some(HttpsEndpointListener(CancellableTask { task, cancel }))
643 : }
644 0 : None => None,
645 : };
646 :
647 0 : (http_task, https_task)
648 0 : };
649 0 :
650 0 : let consumption_metrics_tasks = {
651 0 : let cancel = shutdown_pageserver.child_token();
652 0 : let task = crate::BACKGROUND_RUNTIME.spawn({
653 0 : let tenant_manager = tenant_manager.clone();
654 0 : let cancel = cancel.clone();
655 0 : async move {
656 0 : // first wait until background jobs are cleared to launch.
657 0 : //
658 0 : // this is because we only process active tenants and timelines, and the
659 0 : // Timeline::get_current_logical_size will spawn the logical size calculation,
660 0 : // which will not be rate-limited.
661 0 : tokio::select! {
662 0 : _ = cancel.cancelled() => { return; },
663 0 : _ = background_jobs_barrier.wait() => {}
664 0 : };
665 0 :
666 0 : pageserver::consumption_metrics::run(conf, tenant_manager, cancel).await;
667 0 : }
668 0 : });
669 0 : ConsumptionMetricsTasks(CancellableTask { task, cancel })
670 : };
671 :
672 : // Spawn a task to listen for libpq connections. It will spawn further tasks
673 : // for each connection. We created the listener earlier already.
674 0 : let page_service = page_service::spawn(conf, tenant_manager.clone(), pg_auth, {
675 0 : let _entered = COMPUTE_REQUEST_RUNTIME.enter(); // TcpListener::from_std requires it
676 0 : pageserver_listener
677 0 : .set_nonblocking(true)
678 0 : .context("set listener to nonblocking")?;
679 0 : tokio::net::TcpListener::from_std(pageserver_listener).context("create tokio listener")?
680 : });
681 :
682 : // All started up! Now just sit and wait for shutdown signal.
683 0 : BACKGROUND_RUNTIME.block_on(async move {
684 0 : let signal_token = CancellationToken::new();
685 0 : let signal_cancel = signal_token.child_token();
686 0 :
687 0 : // Spawn signal handlers. Runs in a loop since we want to be responsive to multiple signals
688 0 : // even after triggering shutdown (e.g. a SIGQUIT after a slow SIGTERM shutdown). See:
689 0 : // https://github.com/neondatabase/neon/issues/9740.
690 0 : tokio::spawn(async move {
691 0 : let mut sigint = tokio::signal::unix::signal(SignalKind::interrupt()).unwrap();
692 0 : let mut sigterm = tokio::signal::unix::signal(SignalKind::terminate()).unwrap();
693 0 : let mut sigquit = tokio::signal::unix::signal(SignalKind::quit()).unwrap();
694 :
695 : loop {
696 0 : let signal = tokio::select! {
697 0 : _ = sigquit.recv() => {
698 0 : info!("Got signal SIGQUIT. Terminating in immediate shutdown mode.");
699 0 : std::process::exit(111);
700 : }
701 0 : _ = sigint.recv() => "SIGINT",
702 0 : _ = sigterm.recv() => "SIGTERM",
703 : };
704 :
705 0 : if !signal_token.is_cancelled() {
706 0 : info!("Got signal {signal}. Terminating gracefully in fast shutdown mode.");
707 0 : signal_token.cancel();
708 : } else {
709 0 : info!("Got signal {signal}. Already shutting down.");
710 : }
711 : }
712 0 : });
713 0 :
714 0 : // Wait for cancellation signal and shut down the pageserver.
715 0 : //
716 0 : // This cancels the `shutdown_pageserver` cancellation tree. Right now that tree doesn't
717 0 : // reach very far, and `task_mgr` is used instead. The plan is to change that over time.
718 0 : signal_cancel.cancelled().await;
719 :
720 0 : shutdown_pageserver.cancel();
721 0 : pageserver::shutdown_pageserver(
722 0 : http_endpoint_listener,
723 0 : https_endpoint_listener,
724 0 : page_service,
725 0 : consumption_metrics_tasks,
726 0 : disk_usage_eviction_task,
727 0 : &tenant_manager,
728 0 : background_purges,
729 0 : deletion_queue.clone(),
730 0 : secondary_controller_tasks,
731 0 : 0,
732 0 : )
733 0 : .await;
734 0 : unreachable!();
735 0 : })
736 0 : }
737 :
738 0 : fn load_certs(filename: &Utf8Path) -> std::io::Result<Vec<CertificateDer<'static>>> {
739 0 : let file = std::fs::File::open(filename)?;
740 0 : let mut reader = std::io::BufReader::new(file);
741 0 :
742 0 : rustls_pemfile::certs(&mut reader).collect()
743 0 : }
744 :
745 0 : fn load_private_key(filename: &Utf8Path) -> anyhow::Result<PrivateKeyDer<'static>> {
746 0 : let file = std::fs::File::open(filename)?;
747 0 : let mut reader = std::io::BufReader::new(file);
748 :
749 0 : let key = rustls_pemfile::private_key(&mut reader)?;
750 :
751 0 : key.ok_or(anyhow::anyhow!(
752 0 : "no private key found in {}",
753 0 : filename.as_str(),
754 0 : ))
755 0 : }
756 :
757 0 : async fn create_remote_storage_client(
758 0 : conf: &'static PageServerConf,
759 0 : ) -> anyhow::Result<GenericRemoteStorage> {
760 0 : let config = if let Some(config) = &conf.remote_storage_config {
761 0 : config
762 : } else {
763 0 : anyhow::bail!("no remote storage configured, this is a deprecated configuration");
764 : };
765 :
766 : // Create the client
767 0 : let mut remote_storage = GenericRemoteStorage::from_config(config).await?;
768 :
769 : // If `test_remote_failures` is non-zero, wrap the client with a
770 : // wrapper that simulates failures.
771 0 : if conf.test_remote_failures > 0 {
772 0 : if !cfg!(feature = "testing") {
773 0 : anyhow::bail!(
774 0 : "test_remote_failures option is not available because pageserver was compiled without the 'testing' feature"
775 0 : );
776 0 : }
777 0 : info!(
778 0 : "Simulating remote failures for first {} attempts of each op",
779 : conf.test_remote_failures
780 : );
781 0 : remote_storage =
782 0 : GenericRemoteStorage::unreliable_wrapper(remote_storage, conf.test_remote_failures);
783 0 : }
784 :
785 0 : Ok(remote_storage)
786 0 : }
787 :
788 4 : fn cli() -> Command {
789 4 : Command::new("Neon page server")
790 4 : .about("Materializes WAL stream to pages and serves them to the postgres")
791 4 : .version(version())
792 4 : .arg(
793 4 : Arg::new("workdir")
794 4 : .short('D')
795 4 : .long("workdir")
796 4 : .help("Working directory for the pageserver"),
797 4 : )
798 4 : .arg(
799 4 : Arg::new("enabled-features")
800 4 : .long("enabled-features")
801 4 : .action(ArgAction::SetTrue)
802 4 : .help("Show enabled compile time features"),
803 4 : )
804 4 : }
805 :
806 : #[test]
807 4 : fn verify_cli() {
808 4 : cli().debug_assert();
809 4 : }
|