Line data Source code
1 : #![recursion_limit = "300"]
2 :
3 : //! Main entry point for the Page Server executable.
4 :
5 : use std::env;
6 : use std::env::{VarError, var};
7 : use std::io::Read;
8 : use std::str::FromStr;
9 : use std::sync::Arc;
10 : use std::time::Duration;
11 :
12 : use anyhow::{Context, anyhow};
13 : use camino::Utf8Path;
14 : use clap::{Arg, ArgAction, Command};
15 : use http_utils::tls_certs::ReloadingCertificateResolver;
16 : use metrics::launch_timestamp::{LaunchTimestamp, set_launch_timestamp_metric};
17 : use metrics::set_build_info_metric;
18 : use nix::sys::socket::{setsockopt, sockopt};
19 : use pageserver::basebackup_cache::BasebackupCache;
20 : use pageserver::config::{PageServerConf, PageserverIdentity, ignored_fields};
21 : use pageserver::controller_upcall_client::StorageControllerUpcallClient;
22 : use pageserver::deletion_queue::DeletionQueue;
23 : use pageserver::disk_usage_eviction_task::{self, launch_disk_usage_global_eviction_task};
24 : use pageserver::feature_resolver::FeatureResolver;
25 : use pageserver::metrics::{STARTUP_DURATION, STARTUP_IS_LOADING};
26 : use pageserver::page_service::GrpcPageServiceHandler;
27 : use pageserver::task_mgr::{
28 : BACKGROUND_RUNTIME, COMPUTE_REQUEST_RUNTIME, MGMT_REQUEST_RUNTIME, WALRECEIVER_RUNTIME,
29 : };
30 : use pageserver::tenant::{TenantSharedResources, mgr, secondary};
31 : use pageserver::{
32 : CancellableTask, ConsumptionMetricsTasks, HttpEndpointListener, HttpsEndpointListener,
33 : MetricsCollectionTask, http, page_cache, page_service, task_mgr, virtual_file,
34 : };
35 : use postgres_backend::AuthType;
36 : use remote_storage::GenericRemoteStorage;
37 : use tokio::time::Instant;
38 : use tokio_util::sync::CancellationToken;
39 : use tracing::*;
40 : use tracing_utils::OtelGuard;
41 : use utils::auth::{JwtAuth, SwappableJwtAuth};
42 : use utils::crashsafe::syncfs;
43 : use utils::logging::TracingErrorLayerEnablement;
44 : use utils::metrics_collector::{METRICS_COLLECTION_INTERVAL, METRICS_COLLECTOR};
45 : use utils::sentry_init::init_sentry;
46 : use utils::{failpoint_support, logging, project_build_tag, project_git_version, tcp_listener};
47 :
48 : project_git_version!(GIT_VERSION);
49 : project_build_tag!(BUILD_TAG);
50 :
51 : #[global_allocator]
52 : static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
53 :
54 : /// Configure jemalloc to profile heap allocations by sampling stack traces every 2 MB (1 << 21).
55 : /// This adds roughly 3% overhead for allocations on average, which is acceptable considering
56 : /// performance-sensitive code will avoid allocations as far as possible anyway.
57 : #[allow(non_upper_case_globals)]
58 : #[unsafe(export_name = "malloc_conf")]
59 : pub static malloc_conf: &[u8] = b"prof:true,prof_active:true,lg_prof_sample:21\0";
60 :
61 : const PID_FILE_NAME: &str = "pageserver.pid";
62 :
63 : const FEATURES: &[&str] = &[
64 : #[cfg(feature = "testing")]
65 : "testing",
66 : ];
67 :
68 1 : fn version() -> String {
69 1 : format!(
70 1 : "{GIT_VERSION} failpoints: {}, features: {:?}",
71 1 : fail::has_failpoints(),
72 : FEATURES,
73 : )
74 1 : }
75 :
76 0 : fn main() -> anyhow::Result<()> {
77 0 : let launch_ts = Box::leak(Box::new(LaunchTimestamp::generate()));
78 :
79 0 : let arg_matches = cli().get_matches();
80 :
81 0 : if arg_matches.get_flag("enabled-features") {
82 0 : println!("{{\"features\": {FEATURES:?} }}");
83 0 : return Ok(());
84 0 : }
85 :
86 : // Initialize up failpoints support
87 0 : let scenario = failpoint_support::init();
88 :
89 0 : let workdir = arg_matches
90 0 : .get_one::<String>("workdir")
91 0 : .map(Utf8Path::new)
92 0 : .unwrap_or_else(|| Utf8Path::new(".neon"));
93 0 : let workdir = workdir
94 0 : .canonicalize_utf8()
95 0 : .with_context(|| format!("Error opening workdir '{workdir}'"))?;
96 :
97 0 : let cfg_file_path = workdir.join("pageserver.toml");
98 0 : let identity_file_path = workdir.join("identity.toml");
99 :
100 : // Set CWD to workdir for non-daemon modes
101 0 : env::set_current_dir(&workdir)
102 0 : .with_context(|| format!("Failed to set application's current dir to '{workdir}'"))?;
103 :
104 0 : let (conf, ignored) = initialize_config(&identity_file_path, &cfg_file_path, &workdir)?;
105 :
106 : // Initialize logging.
107 : //
108 : // It must be initialized before the custom panic hook is installed below.
109 : //
110 : // Regarding tracing_error enablement: at this time, we only use the
111 : // tracing_error crate to debug_assert that log spans contain tenant and timeline ids.
112 : // See `debug_assert_current_span_has_tenant_and_timeline_id` in the timeline module
113 0 : let tracing_error_layer_enablement = if cfg!(debug_assertions) {
114 0 : TracingErrorLayerEnablement::EnableWithRustLogFilter
115 : } else {
116 0 : TracingErrorLayerEnablement::Disabled
117 : };
118 :
119 0 : logging::init(
120 0 : conf.log_format,
121 0 : tracing_error_layer_enablement,
122 0 : logging::Output::Stdout,
123 0 : )?;
124 :
125 0 : let otel_enablement = match &conf.tracing {
126 0 : Some(cfg) => tracing_utils::OtelEnablement::Enabled {
127 0 : service_name: "pageserver".to_string(),
128 0 : export_config: (&cfg.export_config).into(),
129 0 : runtime: *COMPUTE_REQUEST_RUNTIME,
130 0 : },
131 0 : None => tracing_utils::OtelEnablement::Disabled,
132 : };
133 :
134 0 : let otel_guard = tracing_utils::init_performance_tracing(otel_enablement);
135 :
136 0 : if otel_guard.is_some() {
137 0 : info!(?conf.tracing, "starting with OTEL tracing enabled");
138 0 : }
139 :
140 : // mind the order required here: 1. logging, 2. panic_hook, 3. sentry.
141 : // disarming this hook on pageserver, because we never tear down tracing.
142 0 : logging::replace_panic_hook_with_tracing_panic_hook().forget();
143 :
144 : // initialize sentry if SENTRY_DSN is provided
145 0 : let _sentry_guard = init_sentry(
146 0 : Some(GIT_VERSION.into()),
147 0 : &[("node_id", &conf.id.to_string())],
148 : );
149 :
150 : // Warn about ignored config items; see pageserver_api::config::ConfigToml
151 : // doc comment for rationale why we prefer this over serde(deny_unknown_fields).
152 : {
153 0 : let ignored_fields::Paths { paths } = &ignored;
154 0 : for path in paths {
155 0 : warn!(?path, "ignoring unknown configuration item");
156 : }
157 : }
158 :
159 : // Log configuration items for feature-flag-like config
160 : // (maybe we should automate this with a visitor?).
161 0 : info!(?conf.virtual_file_io_engine, "starting with virtual_file IO engine");
162 0 : info!(?conf.virtual_file_io_mode, "starting with virtual_file IO mode");
163 0 : info!(?conf.validate_wal_contiguity, "starting with WAL contiguity validation");
164 0 : info!(?conf.page_service_pipelining, "starting with page service pipelining config");
165 0 : info!(?conf.get_vectored_concurrent_io, "starting with get_vectored IO concurrency config");
166 :
167 : // The tenants directory contains all the pageserver local disk state.
168 : // Create if not exists and make sure all the contents are durable before proceeding.
169 : // Ensuring durability eliminates a whole bug class where we come up after an unclean shutdown.
170 : // After unclea shutdown, we don't know if all the filesystem content we can read via syscalls is actually durable or not.
171 : // Examples for that: OOM kill, systemd killing us during shutdown, self abort due to unrecoverable IO error.
172 0 : let tenants_path = conf.tenants_path();
173 : {
174 0 : let open = || {
175 0 : nix::dir::Dir::open(
176 0 : tenants_path.as_std_path(),
177 0 : nix::fcntl::OFlag::O_DIRECTORY | nix::fcntl::OFlag::O_RDONLY,
178 0 : nix::sys::stat::Mode::empty(),
179 : )
180 0 : };
181 0 : let dirfd = match open() {
182 0 : Ok(dirfd) => dirfd,
183 0 : Err(e) => match e {
184 : nix::errno::Errno::ENOENT => {
185 0 : utils::crashsafe::create_dir_all(&tenants_path).with_context(|| {
186 0 : format!("Failed to create tenants root dir at '{tenants_path}'")
187 0 : })?;
188 0 : open().context("open tenants dir after creating it")?
189 : }
190 0 : e => anyhow::bail!(e),
191 : },
192 : };
193 :
194 0 : if conf.no_sync {
195 0 : info!("Skipping syncfs on startup");
196 : } else {
197 0 : let started = Instant::now();
198 0 : syncfs(dirfd)?;
199 0 : let elapsed = started.elapsed();
200 0 : info!(
201 0 : elapsed_ms = elapsed.as_millis(),
202 0 : "made tenant directory contents durable"
203 : );
204 : }
205 : }
206 :
207 : // Basic initialization of things that don't change after startup
208 0 : tracing::info!("Initializing virtual_file...");
209 0 : virtual_file::init(
210 0 : conf.max_file_descriptors,
211 0 : conf.virtual_file_io_engine,
212 0 : conf.virtual_file_io_mode,
213 0 : if conf.no_sync {
214 0 : virtual_file::SyncMode::UnsafeNoSync
215 : } else {
216 0 : virtual_file::SyncMode::Sync
217 : },
218 : );
219 0 : tracing::info!("Initializing page_cache...");
220 0 : page_cache::init(conf.page_cache_size);
221 :
222 0 : start_pageserver(launch_ts, conf, ignored, otel_guard).context("Failed to start pageserver")?;
223 :
224 0 : scenario.teardown();
225 0 : Ok(())
226 0 : }
227 :
228 0 : fn initialize_config(
229 0 : identity_file_path: &Utf8Path,
230 0 : cfg_file_path: &Utf8Path,
231 0 : workdir: &Utf8Path,
232 0 : ) -> anyhow::Result<(&'static PageServerConf, ignored_fields::Paths)> {
233 : // The deployment orchestrator writes out an indentity file containing the node id
234 : // for all pageservers. This file is the source of truth for the node id. In order
235 : // to allow for rolling back pageserver releases, the node id is also included in
236 : // the pageserver config that the deployment orchestrator writes to disk for the pageserver.
237 : // A rolled back version of the pageserver will get the node id from the pageserver.toml
238 : // config file.
239 0 : let identity = match std::fs::File::open(identity_file_path) {
240 0 : Ok(mut f) => {
241 0 : let md = f.metadata().context("stat config file")?;
242 0 : if !md.is_file() {
243 0 : anyhow::bail!(
244 0 : "Pageserver found identity file but it is a dir entry: {identity_file_path}. Aborting start up ..."
245 : );
246 0 : }
247 :
248 0 : let mut s = String::new();
249 0 : f.read_to_string(&mut s).context("read identity file")?;
250 0 : toml_edit::de::from_str::<PageserverIdentity>(&s)?
251 : }
252 0 : Err(e) => {
253 0 : anyhow::bail!(
254 0 : "Pageserver could not read identity file: {identity_file_path}: {e}. Aborting start up ..."
255 : );
256 : }
257 : };
258 :
259 0 : let config_file_contents =
260 0 : std::fs::read_to_string(cfg_file_path).context("read config file from filesystem")?;
261 :
262 : // Deserialize the config file contents into a ConfigToml.
263 0 : let config_toml: pageserver_api::config::ConfigToml = {
264 0 : let deserializer = toml_edit::de::Deserializer::from_str(&config_file_contents)
265 0 : .context("build toml deserializer")?;
266 0 : let mut path_to_error_track = serde_path_to_error::Track::new();
267 0 : let deserializer =
268 0 : serde_path_to_error::Deserializer::new(deserializer, &mut path_to_error_track);
269 0 : serde::Deserialize::deserialize(deserializer).context("deserialize config toml")?
270 : };
271 :
272 : // Find unknown fields by re-serializing the parsed ConfigToml and comparing it to the on-disk file.
273 : // Any fields that are only in the on-disk version are unknown.
274 : // (The assumption here is that the ConfigToml doesn't to skip_serializing_if.)
275 : // (Make sure to read the ConfigToml doc comment on why we only want to warn about, but not fail startup, on unknown fields).
276 0 : let ignored = {
277 0 : let ondisk_toml = config_file_contents
278 0 : .parse::<toml_edit::DocumentMut>()
279 0 : .context("parse original config as toml document")?;
280 0 : let parsed_toml = toml_edit::ser::to_document(&config_toml)
281 0 : .context("re-serialize config to toml document")?;
282 0 : pageserver::config::ignored_fields::find(ondisk_toml, parsed_toml)
283 : };
284 :
285 : // Construct the runtime god object (it's called PageServerConf but actually is just global shared state).
286 0 : let conf = PageServerConf::parse_and_validate(identity.id, config_toml, workdir)
287 0 : .context("runtime-validation of config toml")?;
288 0 : let conf = Box::leak(Box::new(conf));
289 :
290 0 : Ok((conf, ignored))
291 0 : }
292 :
293 : struct WaitForPhaseResult<F: std::future::Future + Unpin> {
294 : timeout_remaining: Duration,
295 : skipped: Option<F>,
296 : }
297 :
298 : /// During startup, we apply a timeout to our waits for readiness, to avoid
299 : /// stalling the whole service if one Tenant experiences some problem. Each
300 : /// phase may consume some of the timeout: this function returns the updated
301 : /// timeout for use in the next call.
302 0 : async fn wait_for_phase<F>(phase: &str, mut fut: F, timeout: Duration) -> WaitForPhaseResult<F>
303 0 : where
304 0 : F: std::future::Future + Unpin,
305 0 : {
306 0 : let initial_t = Instant::now();
307 0 : let skipped = match tokio::time::timeout(timeout, &mut fut).await {
308 0 : Ok(_) => None,
309 : Err(_) => {
310 0 : tracing::info!(
311 0 : timeout_millis = timeout.as_millis(),
312 : %phase,
313 0 : "Startup phase timed out, proceeding anyway"
314 : );
315 0 : Some(fut)
316 : }
317 : };
318 :
319 0 : WaitForPhaseResult {
320 0 : timeout_remaining: timeout
321 0 : .checked_sub(Instant::now().duration_since(initial_t))
322 0 : .unwrap_or(Duration::ZERO),
323 0 : skipped,
324 0 : }
325 0 : }
326 :
327 0 : fn startup_checkpoint(started_at: Instant, phase: &str, human_phase: &str) {
328 0 : let elapsed = started_at.elapsed();
329 0 : let secs = elapsed.as_secs_f64();
330 0 : STARTUP_DURATION.with_label_values(&[phase]).set(secs);
331 :
332 0 : info!(
333 0 : elapsed_ms = elapsed.as_millis(),
334 0 : "{human_phase} ({secs:.3}s since start)"
335 : )
336 0 : }
337 :
338 0 : fn start_pageserver(
339 0 : launch_ts: &'static LaunchTimestamp,
340 0 : conf: &'static PageServerConf,
341 0 : ignored: ignored_fields::Paths,
342 0 : otel_guard: Option<OtelGuard>,
343 0 : ) -> anyhow::Result<()> {
344 : // Monotonic time for later calculating startup duration
345 0 : let started_startup_at = Instant::now();
346 :
347 : // Print version and launch timestamp to the log,
348 : // and expose them as prometheus metrics.
349 : // A changed version string indicates changed software.
350 : // A changed launch timestamp indicates a pageserver restart.
351 0 : info!(
352 0 : "version: {} launch_timestamp: {} build_tag: {}",
353 0 : version(),
354 0 : launch_ts.to_string(),
355 : BUILD_TAG,
356 : );
357 0 : set_build_info_metric(GIT_VERSION, BUILD_TAG);
358 0 : set_launch_timestamp_metric(launch_ts);
359 : #[cfg(target_os = "linux")]
360 0 : metrics::register_internal(Box::new(metrics::more_process_metrics::Collector::new())).unwrap();
361 0 : metrics::register_internal(Box::new(
362 0 : pageserver::metrics::tokio_epoll_uring::Collector::new(),
363 0 : ))
364 0 : .unwrap();
365 0 : pageserver::preinitialize_metrics(conf, ignored);
366 :
367 : // If any failpoints were set from FAILPOINTS environment variable,
368 : // print them to the log for debugging purposes
369 0 : let failpoints = fail::list();
370 0 : if !failpoints.is_empty() {
371 0 : info!(
372 0 : "started with failpoints: {}",
373 0 : failpoints
374 0 : .iter()
375 0 : .map(|(name, actions)| format!("{name}={actions}"))
376 0 : .collect::<Vec<String>>()
377 0 : .join(";")
378 : )
379 0 : }
380 :
381 : // Create and lock PID file. This ensures that there cannot be more than one
382 : // pageserver process running at the same time.
383 0 : let lock_file_path = conf.workdir.join(PID_FILE_NAME);
384 0 : info!("Claiming pid file at {lock_file_path:?}...");
385 0 : let lock_file =
386 0 : utils::pid_file::claim_for_current_process(&lock_file_path).context("claim pid file")?;
387 0 : info!("Claimed pid file at {lock_file_path:?}");
388 :
389 : // Ensure that the lock file is held even if the main thread of the process panics.
390 : // We need to release the lock file only when the process exits.
391 0 : std::mem::forget(lock_file);
392 :
393 : // Bind the HTTP, libpq, and gRPC ports early, to error out if they are
394 : // already in use.
395 0 : info!(
396 0 : "Starting pageserver http handler on {} with auth {:#?}",
397 : conf.listen_http_addr, conf.http_auth_type
398 : );
399 0 : let http_listener = tcp_listener::bind(&conf.listen_http_addr)?;
400 :
401 0 : let https_listener = match conf.listen_https_addr.as_ref() {
402 0 : Some(https_addr) => {
403 0 : info!(
404 0 : "Starting pageserver https handler on {https_addr} with auth {:#?}",
405 : conf.http_auth_type
406 : );
407 0 : Some(tcp_listener::bind(https_addr)?)
408 : }
409 0 : None => None,
410 : };
411 :
412 0 : info!(
413 0 : "Starting pageserver pg protocol handler on {} with auth {:#?}",
414 : conf.listen_pg_addr, conf.pg_auth_type,
415 : );
416 0 : let pageserver_listener = tcp_listener::bind(&conf.listen_pg_addr)?;
417 :
418 : // Enable SO_KEEPALIVE on the socket, to detect dead connections faster.
419 : // These are configured via net.ipv4.tcp_keepalive_* sysctls.
420 : //
421 : // TODO: also set this on the walreceiver socket, but tokio-postgres doesn't
422 : // support enabling keepalives while using the default OS sysctls.
423 0 : setsockopt(&pageserver_listener, sockopt::KeepAlive, &true)?;
424 :
425 0 : let mut grpc_listener = None;
426 0 : if let Some(grpc_addr) = &conf.listen_grpc_addr {
427 0 : info!(
428 0 : "Starting pageserver gRPC handler on {grpc_addr} with auth {:#?}",
429 : conf.grpc_auth_type
430 : );
431 0 : grpc_listener = Some(tcp_listener::bind(grpc_addr).map_err(|e| anyhow!("{e}"))?);
432 0 : }
433 :
434 : // Launch broker client
435 : // The storage_broker::connect call needs to happen inside a tokio runtime thread.
436 0 : let broker_client = WALRECEIVER_RUNTIME
437 0 : .block_on(async {
438 0 : let tls_config = storage_broker::ClientTlsConfig::new().ca_certificates(
439 0 : conf.ssl_ca_certs
440 0 : .iter()
441 0 : .map(pem::encode)
442 0 : .map(storage_broker::Certificate::from_pem),
443 : );
444 : // Note: we do not attempt connecting here (but validate endpoints sanity).
445 0 : storage_broker::connect(
446 0 : conf.broker_endpoint.clone(),
447 0 : conf.broker_keepalive_interval,
448 0 : tls_config,
449 : )
450 0 : })
451 0 : .with_context(|| {
452 0 : format!(
453 0 : "create broker client for uri={:?} keepalive_interval={:?}",
454 0 : &conf.broker_endpoint, conf.broker_keepalive_interval,
455 : )
456 0 : })?;
457 :
458 : // Initialize authentication for incoming connections
459 : let http_auth;
460 : let pg_auth;
461 : let grpc_auth;
462 0 : if [conf.http_auth_type, conf.pg_auth_type, conf.grpc_auth_type].contains(&AuthType::NeonJWT) {
463 : // unwrap is ok because check is performed when creating config, so path is set and exists
464 0 : let key_path = conf.auth_validation_public_key_path.as_ref().unwrap();
465 0 : info!("Loading public key(s) for verifying JWT tokens from {key_path:?}");
466 :
467 0 : let jwt_auth = JwtAuth::from_key_path(key_path)?;
468 0 : let auth: Arc<SwappableJwtAuth> = Arc::new(SwappableJwtAuth::new(jwt_auth));
469 :
470 0 : http_auth = match conf.http_auth_type {
471 0 : AuthType::Trust => None,
472 0 : AuthType::NeonJWT => Some(auth.clone()),
473 : };
474 0 : pg_auth = match conf.pg_auth_type {
475 0 : AuthType::Trust => None,
476 0 : AuthType::NeonJWT => Some(auth.clone()),
477 : };
478 0 : grpc_auth = match conf.grpc_auth_type {
479 0 : AuthType::Trust => None,
480 0 : AuthType::NeonJWT => Some(auth),
481 : };
482 0 : } else {
483 0 : http_auth = None;
484 0 : pg_auth = None;
485 0 : grpc_auth = None;
486 0 : }
487 :
488 0 : let tls_server_config = if conf.listen_https_addr.is_some() || conf.enable_tls_page_service_api
489 : {
490 0 : let resolver = BACKGROUND_RUNTIME.block_on(ReloadingCertificateResolver::new(
491 0 : "main",
492 0 : &conf.ssl_key_file,
493 0 : &conf.ssl_cert_file,
494 0 : conf.ssl_cert_reload_period,
495 0 : ))?;
496 :
497 0 : let server_config = rustls::ServerConfig::builder()
498 0 : .with_no_client_auth()
499 0 : .with_cert_resolver(resolver);
500 :
501 0 : Some(Arc::new(server_config))
502 : } else {
503 0 : None
504 : };
505 :
506 0 : match var("NEON_AUTH_TOKEN") {
507 0 : Ok(v) => {
508 0 : info!("Loaded JWT token for authentication with Safekeeper");
509 0 : pageserver::config::SAFEKEEPER_AUTH_TOKEN
510 0 : .set(Arc::new(v))
511 0 : .map_err(|_| anyhow!("Could not initialize SAFEKEEPER_AUTH_TOKEN"))?;
512 : }
513 : Err(VarError::NotPresent) => {
514 0 : info!("No JWT token for authentication with Safekeeper detected");
515 : }
516 0 : Err(e) => return Err(e).with_context(
517 : || "Failed to either load to detect non-present NEON_AUTH_TOKEN environment variable",
518 : ),
519 : };
520 :
521 : // Top-level cancellation token for the process
522 0 : let shutdown_pageserver = tokio_util::sync::CancellationToken::new();
523 :
524 : // Set up remote storage client
525 0 : let remote_storage = BACKGROUND_RUNTIME.block_on(create_remote_storage_client(conf))?;
526 :
527 0 : let feature_resolver = create_feature_resolver(
528 0 : conf,
529 0 : shutdown_pageserver.clone(),
530 0 : BACKGROUND_RUNTIME.handle(),
531 0 : )?;
532 :
533 : // Set up deletion queue
534 0 : let (deletion_queue, deletion_workers) = DeletionQueue::new(
535 0 : remote_storage.clone(),
536 0 : StorageControllerUpcallClient::new(conf, &shutdown_pageserver),
537 0 : conf,
538 0 : );
539 0 : deletion_workers.spawn_with(BACKGROUND_RUNTIME.handle());
540 :
541 : // Up to this point no significant I/O has been done: this should have been fast. Record
542 : // duration prior to starting I/O intensive phase of startup.
543 0 : startup_checkpoint(started_startup_at, "initial", "Starting loading tenants");
544 0 : STARTUP_IS_LOADING.set(1);
545 :
546 : // Startup staging or optimizing:
547 : //
548 : // We want to minimize downtime for `page_service` connections, and trying not to overload
549 : // BACKGROUND_RUNTIME by doing initial compactions and initial logical sizes at the same time.
550 : //
551 : // init_done_rx will notify when all initial load operations have completed.
552 : //
553 : // background_jobs_can_start (same name used to hold off background jobs from starting at
554 : // consumer side) will be dropped once we can start the background jobs. Currently it is behind
555 : // completing all initial logical size calculations (init_logical_size_done_rx) and a timeout
556 : // (background_task_maximum_delay).
557 0 : let (init_remote_done_tx, init_remote_done_rx) = utils::completion::channel();
558 0 : let (init_done_tx, init_done_rx) = utils::completion::channel();
559 :
560 0 : let (background_jobs_can_start, background_jobs_barrier) = utils::completion::channel();
561 :
562 0 : let order = pageserver::InitializationOrder {
563 0 : initial_tenant_load_remote: Some(init_done_tx),
564 0 : initial_tenant_load: Some(init_remote_done_tx),
565 0 : background_jobs_can_start: background_jobs_barrier.clone(),
566 0 : };
567 :
568 0 : info!(config=?conf.l0_flush, "using l0_flush config");
569 0 : let l0_flush_global_state =
570 0 : pageserver::l0_flush::L0FlushGlobalState::new(conf.l0_flush.clone());
571 :
572 : // Scan the local 'tenants/' directory and start loading the tenants
573 0 : let (basebackup_cache, basebackup_prepare_receiver) = BasebackupCache::new(
574 0 : conf.basebackup_cache_dir(),
575 0 : conf.basebackup_cache_config.clone(),
576 0 : );
577 0 : let deletion_queue_client = deletion_queue.new_client();
578 0 : let background_purges = mgr::BackgroundPurges::default();
579 :
580 0 : let tenant_manager = mgr::init(
581 0 : conf,
582 0 : background_purges.clone(),
583 0 : TenantSharedResources {
584 0 : broker_client: broker_client.clone(),
585 0 : remote_storage: remote_storage.clone(),
586 0 : deletion_queue_client,
587 0 : l0_flush_global_state,
588 0 : basebackup_cache: Arc::clone(&basebackup_cache),
589 0 : feature_resolver: feature_resolver.clone(),
590 0 : },
591 0 : shutdown_pageserver.clone(),
592 : );
593 0 : let tenant_manager = Arc::new(tenant_manager);
594 0 : BACKGROUND_RUNTIME.block_on(mgr::init_tenant_mgr(tenant_manager.clone(), order))?;
595 :
596 0 : basebackup_cache.spawn_background_task(
597 0 : BACKGROUND_RUNTIME.handle(),
598 0 : basebackup_prepare_receiver,
599 0 : Arc::clone(&tenant_manager),
600 0 : shutdown_pageserver.child_token(),
601 : );
602 :
603 0 : BACKGROUND_RUNTIME.spawn({
604 0 : let shutdown_pageserver = shutdown_pageserver.clone();
605 0 : let drive_init = async move {
606 : // NOTE: unlike many futures in pageserver, this one is cancellation-safe
607 0 : let guard = scopeguard::guard_on_success((), |_| {
608 0 : tracing::info!("Cancelled before initial load completed")
609 0 : });
610 :
611 0 : let timeout = conf.background_task_maximum_delay;
612 :
613 0 : let init_remote_done = std::pin::pin!(async {
614 0 : init_remote_done_rx.wait().await;
615 0 : startup_checkpoint(
616 0 : started_startup_at,
617 0 : "initial_tenant_load_remote",
618 0 : "Remote part of initial load completed",
619 : );
620 0 : });
621 :
622 : let WaitForPhaseResult {
623 0 : timeout_remaining: timeout,
624 0 : skipped: init_remote_skipped,
625 0 : } = wait_for_phase("initial_tenant_load_remote", init_remote_done, timeout).await;
626 :
627 0 : let init_load_done = std::pin::pin!(async {
628 0 : init_done_rx.wait().await;
629 0 : startup_checkpoint(
630 0 : started_startup_at,
631 0 : "initial_tenant_load",
632 0 : "Initial load completed",
633 : );
634 0 : STARTUP_IS_LOADING.set(0);
635 0 : });
636 :
637 : let WaitForPhaseResult {
638 0 : timeout_remaining: _timeout,
639 0 : skipped: init_load_skipped,
640 0 : } = wait_for_phase("initial_tenant_load", init_load_done, timeout).await;
641 :
642 : // initial logical sizes can now start, as they were waiting on init_done_rx.
643 :
644 0 : scopeguard::ScopeGuard::into_inner(guard);
645 :
646 : // allow background jobs to start: we either completed prior stages, or they reached timeout
647 : // and were skipped. It is important that we do not let them block background jobs indefinitely,
648 : // because things like consumption metrics for billing are blocked by this barrier.
649 0 : drop(background_jobs_can_start);
650 0 : startup_checkpoint(
651 0 : started_startup_at,
652 0 : "background_jobs_can_start",
653 0 : "Starting background jobs",
654 : );
655 :
656 : // We are done. If we skipped any phases due to timeout, run them to completion here so that
657 : // they will eventually update their startup_checkpoint, and so that we do not declare the
658 : // 'complete' stage until all the other stages are really done.
659 0 : let guard = scopeguard::guard_on_success((), |_| {
660 0 : tracing::info!("Cancelled before waiting for skipped phases done")
661 0 : });
662 0 : if let Some(f) = init_remote_skipped {
663 0 : f.await;
664 0 : }
665 0 : if let Some(f) = init_load_skipped {
666 0 : f.await;
667 0 : }
668 0 : scopeguard::ScopeGuard::into_inner(guard);
669 :
670 0 : startup_checkpoint(started_startup_at, "complete", "Startup complete");
671 0 : };
672 :
673 0 : async move {
674 0 : let mut drive_init = std::pin::pin!(drive_init);
675 : // just race these tasks
676 0 : tokio::select! {
677 0 : _ = shutdown_pageserver.cancelled() => {},
678 0 : _ = &mut drive_init => {},
679 : }
680 0 : }
681 : });
682 :
683 0 : let (secondary_controller, secondary_controller_tasks) = secondary::spawn_tasks(
684 0 : tenant_manager.clone(),
685 0 : remote_storage.clone(),
686 0 : background_jobs_barrier.clone(),
687 0 : shutdown_pageserver.clone(),
688 0 : );
689 :
690 : // shared state between the disk-usage backed eviction background task and the http endpoint
691 : // that allows triggering disk-usage based eviction manually. note that the http endpoint
692 : // is still accessible even if background task is not configured as long as remote storage has
693 : // been configured.
694 0 : let disk_usage_eviction_state: Arc<disk_usage_eviction_task::State> = Arc::default();
695 :
696 0 : let disk_usage_eviction_task = launch_disk_usage_global_eviction_task(
697 0 : conf,
698 0 : remote_storage.clone(),
699 0 : disk_usage_eviction_state.clone(),
700 0 : tenant_manager.clone(),
701 0 : background_jobs_barrier.clone(),
702 : );
703 :
704 : // Start up the service to handle HTTP mgmt API request. We created the
705 : // listener earlier already.
706 0 : let (http_endpoint_listener, https_endpoint_listener) = {
707 0 : let _rt_guard = MGMT_REQUEST_RUNTIME.enter(); // for hyper
708 :
709 0 : let router_state = Arc::new(
710 0 : http::routes::State::new(
711 0 : conf,
712 0 : tenant_manager.clone(),
713 0 : http_auth.clone(),
714 0 : remote_storage.clone(),
715 0 : broker_client.clone(),
716 0 : disk_usage_eviction_state,
717 0 : deletion_queue.new_client(),
718 0 : secondary_controller,
719 0 : feature_resolver,
720 : )
721 0 : .context("Failed to initialize router state")?,
722 : );
723 :
724 0 : let router = http::make_router(router_state, launch_ts, http_auth.clone())?
725 0 : .build()
726 0 : .map_err(|err| anyhow!(err))?;
727 :
728 0 : let service =
729 0 : Arc::new(http_utils::RequestServiceBuilder::new(router).map_err(|err| anyhow!(err))?);
730 :
731 0 : let http_task = {
732 0 : let server =
733 0 : http_utils::server::Server::new(Arc::clone(&service), http_listener, None)?;
734 0 : let cancel = CancellationToken::new();
735 :
736 0 : let task = MGMT_REQUEST_RUNTIME.spawn(task_mgr::exit_on_panic_or_error(
737 0 : "http endpoint listener",
738 0 : server.serve(cancel.clone()),
739 0 : ));
740 0 : HttpEndpointListener(CancellableTask { task, cancel })
741 : };
742 :
743 0 : let https_task = match https_listener {
744 0 : Some(https_listener) => {
745 0 : let tls_server_config = tls_server_config
746 0 : .clone()
747 0 : .expect("tls_server_config is set earlier if https is enabled");
748 :
749 0 : let tls_acceptor = tokio_rustls::TlsAcceptor::from(tls_server_config);
750 :
751 0 : let server =
752 0 : http_utils::server::Server::new(service, https_listener, Some(tls_acceptor))?;
753 0 : let cancel = CancellationToken::new();
754 :
755 0 : let task = MGMT_REQUEST_RUNTIME.spawn(task_mgr::exit_on_panic_or_error(
756 0 : "https endpoint listener",
757 0 : server.serve(cancel.clone()),
758 0 : ));
759 0 : Some(HttpsEndpointListener(CancellableTask { task, cancel }))
760 : }
761 0 : None => None,
762 : };
763 :
764 0 : (http_task, https_task)
765 : };
766 :
767 : /* BEGIN_HADRON */
768 0 : let metrics_collection_task = {
769 0 : let cancel = shutdown_pageserver.child_token();
770 0 : let task = crate::BACKGROUND_RUNTIME.spawn({
771 0 : let cancel = cancel.clone();
772 0 : let background_jobs_barrier = background_jobs_barrier.clone();
773 0 : async move {
774 0 : if conf.force_metric_collection_on_scrape {
775 0 : return;
776 0 : }
777 :
778 : // first wait until background jobs are cleared to launch.
779 0 : tokio::select! {
780 0 : _ = cancel.cancelled() => { return; },
781 0 : _ = background_jobs_barrier.wait() => {}
782 : };
783 0 : let mut interval = tokio::time::interval(METRICS_COLLECTION_INTERVAL);
784 : loop {
785 0 : tokio::select! {
786 0 : _ = cancel.cancelled() => {
787 0 : tracing::info!("cancelled metrics collection task, exiting...");
788 0 : break;
789 : },
790 0 : _ = interval.tick() => {}
791 : }
792 0 : tokio::task::spawn_blocking(|| {
793 0 : METRICS_COLLECTOR.run_once(true);
794 0 : });
795 : }
796 0 : }
797 : });
798 0 : MetricsCollectionTask(CancellableTask { task, cancel })
799 : };
800 : /* END_HADRON */
801 :
802 0 : let consumption_metrics_tasks = {
803 0 : let cancel = shutdown_pageserver.child_token();
804 0 : let task = crate::BACKGROUND_RUNTIME.spawn({
805 0 : let tenant_manager = tenant_manager.clone();
806 0 : let cancel = cancel.clone();
807 0 : async move {
808 : // first wait until background jobs are cleared to launch.
809 : //
810 : // this is because we only process active tenants and timelines, and the
811 : // Timeline::get_current_logical_size will spawn the logical size calculation,
812 : // which will not be rate-limited.
813 0 : tokio::select! {
814 0 : _ = cancel.cancelled() => { return; },
815 0 : _ = background_jobs_barrier.wait() => {}
816 : };
817 :
818 0 : pageserver::consumption_metrics::run(conf, tenant_manager, cancel).await;
819 0 : }
820 : });
821 0 : ConsumptionMetricsTasks(CancellableTask { task, cancel })
822 : };
823 :
824 : // Spawn a task to listen for libpq connections. It will spawn further tasks
825 : // for each connection. We created the listener earlier already.
826 0 : let perf_trace_dispatch = otel_guard.as_ref().map(|g| g.dispatch.clone());
827 0 : let page_service = page_service::spawn(
828 0 : conf,
829 0 : tenant_manager.clone(),
830 0 : pg_auth,
831 0 : perf_trace_dispatch,
832 : {
833 0 : let _entered = COMPUTE_REQUEST_RUNTIME.enter(); // TcpListener::from_std requires it
834 0 : pageserver_listener
835 0 : .set_nonblocking(true)
836 0 : .context("set listener to nonblocking")?;
837 0 : tokio::net::TcpListener::from_std(pageserver_listener)
838 0 : .context("create tokio listener")?
839 : },
840 0 : if conf.enable_tls_page_service_api {
841 0 : tls_server_config
842 : } else {
843 0 : None
844 : },
845 : );
846 :
847 : // Spawn a Pageserver gRPC server task. It will spawn separate tasks for
848 : // each stream/request.
849 : //
850 : // TODO: this uses a separate Tokio runtime for the page service. If we want
851 : // other gRPC services, they will need their own port and runtime. Is this
852 : // necessary?
853 0 : let mut page_service_grpc = None;
854 0 : if let Some(grpc_listener) = grpc_listener {
855 0 : page_service_grpc = Some(GrpcPageServiceHandler::spawn(
856 0 : tenant_manager.clone(),
857 0 : grpc_auth,
858 0 : otel_guard.as_ref().map(|g| g.dispatch.clone()),
859 0 : conf.get_vectored_concurrent_io,
860 0 : grpc_listener,
861 0 : )?);
862 0 : }
863 :
864 : // All started up! Now just sit and wait for shutdown signal.
865 0 : BACKGROUND_RUNTIME.block_on(async move {
866 0 : let signal_token = CancellationToken::new();
867 0 : let signal_cancel = signal_token.child_token();
868 :
869 0 : tokio::spawn(utils::signals::signal_handler(signal_token));
870 :
871 : // Wait for cancellation signal and shut down the pageserver.
872 : //
873 : // This cancels the `shutdown_pageserver` cancellation tree. Right now that tree doesn't
874 : // reach very far, and `task_mgr` is used instead. The plan is to change that over time.
875 0 : signal_cancel.cancelled().await;
876 :
877 0 : shutdown_pageserver.cancel();
878 0 : pageserver::shutdown_pageserver(
879 0 : http_endpoint_listener,
880 0 : https_endpoint_listener,
881 0 : page_service,
882 0 : page_service_grpc,
883 0 : metrics_collection_task,
884 0 : consumption_metrics_tasks,
885 0 : disk_usage_eviction_task,
886 0 : &tenant_manager,
887 0 : background_purges,
888 0 : deletion_queue.clone(),
889 0 : secondary_controller_tasks,
890 0 : 0,
891 0 : )
892 0 : .await;
893 0 : unreachable!();
894 : })
895 0 : }
896 :
897 0 : fn create_feature_resolver(
898 0 : conf: &'static PageServerConf,
899 0 : shutdown_pageserver: CancellationToken,
900 0 : handle: &tokio::runtime::Handle,
901 0 : ) -> anyhow::Result<FeatureResolver> {
902 0 : FeatureResolver::spawn(conf, shutdown_pageserver, handle)
903 0 : }
904 :
905 0 : async fn create_remote_storage_client(
906 0 : conf: &'static PageServerConf,
907 0 : ) -> anyhow::Result<GenericRemoteStorage> {
908 0 : let config = if let Some(config) = &conf.remote_storage_config {
909 0 : config
910 : } else {
911 0 : anyhow::bail!("no remote storage configured, this is a deprecated configuration");
912 : };
913 :
914 : // Create the client
915 0 : let mut remote_storage = GenericRemoteStorage::from_config(config).await?;
916 :
917 : // If `test_remote_failures` is non-zero, wrap the client with a
918 : // wrapper that simulates failures.
919 0 : if conf.test_remote_failures > 0 {
920 0 : info!(
921 0 : "Simulating remote failures for first {} attempts of each op",
922 : conf.test_remote_failures
923 : );
924 0 : remote_storage = GenericRemoteStorage::unreliable_wrapper(
925 0 : remote_storage,
926 0 : conf.test_remote_failures,
927 0 : conf.test_remote_failures_probability,
928 : );
929 0 : }
930 :
931 0 : Ok(remote_storage)
932 0 : }
933 :
934 1 : fn cli() -> Command {
935 1 : Command::new("Neon page server")
936 1 : .about("Materializes WAL stream to pages and serves them to the postgres")
937 1 : .version(version())
938 1 : .arg(
939 1 : Arg::new("workdir")
940 1 : .short('D')
941 1 : .long("workdir")
942 1 : .help("Working directory for the pageserver"),
943 : )
944 1 : .arg(
945 1 : Arg::new("enabled-features")
946 1 : .long("enabled-features")
947 1 : .action(ArgAction::SetTrue)
948 1 : .help("Show enabled compile time features"),
949 : )
950 1 : }
951 :
952 : #[test]
953 1 : fn verify_cli() {
954 1 : cli().debug_assert();
955 1 : }
|