Line data Source code
1 : use std::net::SocketAddr;
2 : use std::pin::pin;
3 : use std::str::FromStr;
4 : use std::sync::Arc;
5 : use std::time::Duration;
6 :
7 : use anyhow::{Context, bail, ensure};
8 : use arc_swap::ArcSwapOption;
9 : use camino::{Utf8Path, Utf8PathBuf};
10 : use clap::Parser;
11 : use compute_api::spec::LocalProxySpec;
12 : use futures::future::Either;
13 : use thiserror::Error;
14 : use tokio::net::TcpListener;
15 : use tokio::sync::Notify;
16 : use tokio::task::JoinSet;
17 : use tokio_util::sync::CancellationToken;
18 : use tracing::{debug, error, info, warn};
19 : use utils::sentry_init::init_sentry;
20 : use utils::{pid_file, project_build_tag, project_git_version};
21 :
22 : use crate::auth::backend::jwt::JwkCache;
23 : use crate::auth::backend::local::{JWKS_ROLE_MAP, LocalBackend};
24 : use crate::auth::{self};
25 : use crate::cancellation::CancellationHandler;
26 : use crate::config::{
27 : self, AuthenticationConfig, ComputeConfig, HttpConfig, ProxyConfig, RetryConfig,
28 : };
29 : use crate::control_plane::locks::ApiLocks;
30 : use crate::control_plane::messages::{EndpointJwksResponse, JwksSettings};
31 : use crate::ext::TaskExt;
32 : use crate::http::health_server::AppMetrics;
33 : use crate::intern::RoleNameInt;
34 : use crate::metrics::{Metrics, ThreadPoolMetrics};
35 : use crate::rate_limiter::{EndpointRateLimiter, LeakyBucketConfig, RateBucketInfo};
36 : use crate::scram::threadpool::ThreadPool;
37 : use crate::serverless::cancel_set::CancelSet;
38 : use crate::serverless::{self, GlobalConnPoolOptions};
39 : use crate::tls::client_config::compute_client_config_with_root_certs;
40 : use crate::types::RoleName;
41 : use crate::url::ApiUrl;
42 :
43 : project_git_version!(GIT_VERSION);
44 : project_build_tag!(BUILD_TAG);
45 :
46 : /// Neon proxy/router
47 : #[derive(Parser)]
48 : #[command(version = GIT_VERSION, about)]
49 : struct LocalProxyCliArgs {
50 : /// listen for incoming metrics connections on ip:port
51 : #[clap(long, default_value = "127.0.0.1:7001")]
52 0 : metrics: String,
53 : /// listen for incoming http connections on ip:port
54 : #[clap(long)]
55 0 : http: String,
56 : /// timeout for the TLS handshake
57 : #[clap(long, default_value = "15s", value_parser = humantime::parse_duration)]
58 0 : handshake_timeout: tokio::time::Duration,
59 : /// lock for `connect_compute` api method. example: "shards=32,permits=4,epoch=10m,timeout=1s". (use `permits=0` to disable).
60 : #[clap(long, default_value = config::ConcurrencyLockOptions::DEFAULT_OPTIONS_CONNECT_COMPUTE_LOCK)]
61 0 : connect_compute_lock: String,
62 : #[clap(flatten)]
63 : sql_over_http: SqlOverHttpArgs,
64 : /// User rate limiter max number of requests per second.
65 : ///
66 : /// Provided in the form `<Requests Per Second>@<Bucket Duration Size>`.
67 : /// Can be given multiple times for different bucket sizes.
68 0 : #[clap(long, default_values_t = RateBucketInfo::DEFAULT_ENDPOINT_SET)]
69 0 : user_rps_limit: Vec<RateBucketInfo>,
70 : /// Whether to retry the connection to the compute node
71 : #[clap(long, default_value = config::RetryConfig::CONNECT_TO_COMPUTE_DEFAULT_VALUES)]
72 0 : connect_to_compute_retry: String,
73 : /// Address of the postgres server
74 : #[clap(long, default_value = "127.0.0.1:5432")]
75 0 : postgres: SocketAddr,
76 : /// Address of the internal compute-ctl api service
77 : #[clap(long, default_value = "http://127.0.0.1:3081/")]
78 0 : compute_ctl: ApiUrl,
79 : /// Path of the local proxy config file
80 : #[clap(long, default_value = "./local_proxy.json")]
81 0 : config_path: Utf8PathBuf,
82 : /// Path of the local proxy PID file
83 : #[clap(long, default_value = "./local_proxy.pid")]
84 0 : pid_path: Utf8PathBuf,
85 : }
86 :
87 : #[derive(clap::Args, Clone, Copy, Debug)]
88 : struct SqlOverHttpArgs {
89 : /// How many connections to pool for each endpoint. Excess connections are discarded
90 0 : #[clap(long, default_value_t = 200)]
91 0 : sql_over_http_pool_max_total_conns: usize,
92 :
93 : /// How long pooled connections should remain idle for before closing
94 : #[clap(long, default_value = "5m", value_parser = humantime::parse_duration)]
95 0 : sql_over_http_idle_timeout: tokio::time::Duration,
96 :
97 0 : #[clap(long, default_value_t = 100)]
98 0 : sql_over_http_client_conn_threshold: u64,
99 :
100 0 : #[clap(long, default_value_t = 16)]
101 0 : sql_over_http_cancel_set_shards: usize,
102 :
103 0 : #[clap(long, default_value_t = 10 * 1024 * 1024)] // 10 MiB
104 0 : sql_over_http_max_request_size_bytes: usize,
105 :
106 0 : #[clap(long, default_value_t = 10 * 1024 * 1024)] // 10 MiB
107 0 : sql_over_http_max_response_size_bytes: usize,
108 : }
109 :
110 0 : pub async fn run() -> anyhow::Result<()> {
111 0 : let _logging_guard = crate::logging::init_local_proxy()?;
112 0 : let _panic_hook_guard = utils::logging::replace_panic_hook_with_tracing_panic_hook();
113 0 : let _sentry_guard = init_sentry(Some(GIT_VERSION.into()), &[]);
114 0 :
115 0 : Metrics::install(Arc::new(ThreadPoolMetrics::new(0)));
116 0 :
117 0 : // TODO: refactor these to use labels
118 0 : debug!("Version: {GIT_VERSION}");
119 0 : debug!("Build_tag: {BUILD_TAG}");
120 0 : let neon_metrics = ::metrics::NeonMetrics::new(::metrics::BuildInfo {
121 0 : revision: GIT_VERSION,
122 0 : build_tag: BUILD_TAG,
123 0 : });
124 :
125 0 : let jemalloc = match crate::jemalloc::MetricRecorder::new() {
126 0 : Ok(t) => Some(t),
127 0 : Err(e) => {
128 0 : tracing::error!(error = ?e, "could not start jemalloc metrics loop");
129 0 : None
130 : }
131 : };
132 :
133 0 : let args = LocalProxyCliArgs::parse();
134 0 : let config = build_config(&args)?;
135 0 : let auth_backend = build_auth_backend(&args);
136 :
137 : // before we bind to any ports, write the process ID to a file
138 : // so that compute-ctl can find our process later
139 : // in order to trigger the appropriate SIGHUP on config change.
140 : //
141 : // This also claims a "lock" that makes sure only one instance
142 : // of local_proxy runs at a time.
143 0 : let _process_guard = loop {
144 0 : match pid_file::claim_for_current_process(&args.pid_path) {
145 0 : Ok(guard) => break guard,
146 0 : Err(e) => {
147 0 : // compute-ctl might have tried to read the pid-file to let us
148 0 : // know about some config change. We should try again.
149 0 : error!(path=?args.pid_path, "could not claim PID file guard: {e:?}");
150 0 : tokio::time::sleep(Duration::from_secs(1)).await;
151 : }
152 : }
153 : };
154 :
155 0 : let metrics_listener = TcpListener::bind(args.metrics).await?.into_std()?;
156 0 : let http_listener = TcpListener::bind(args.http).await?;
157 0 : let shutdown = CancellationToken::new();
158 0 :
159 0 : // todo: should scale with CU
160 0 : let endpoint_rate_limiter = Arc::new(EndpointRateLimiter::new_with_shards(
161 0 : LeakyBucketConfig {
162 0 : rps: 10.0,
163 0 : max: 100.0,
164 0 : },
165 0 : 16,
166 0 : ));
167 0 :
168 0 : let mut maintenance_tasks = JoinSet::new();
169 0 :
170 0 : let refresh_config_notify = Arc::new(Notify::new());
171 0 : maintenance_tasks.spawn(crate::signals::handle(shutdown.clone(), {
172 0 : let refresh_config_notify = Arc::clone(&refresh_config_notify);
173 0 : move || {
174 0 : refresh_config_notify.notify_one();
175 0 : }
176 0 : }));
177 0 :
178 0 : // trigger the first config load **after** setting up the signal hook
179 0 : // to avoid the race condition where:
180 0 : // 1. No config file registered when local_proxy starts up
181 0 : // 2. The config file is written but the signal hook is not yet received
182 0 : // 3. local_proxy completes startup but has no config loaded, despite there being a registerd config.
183 0 : refresh_config_notify.notify_one();
184 0 : tokio::spawn(refresh_config_loop(
185 0 : config,
186 0 : args.config_path,
187 0 : refresh_config_notify,
188 0 : ));
189 0 :
190 0 : maintenance_tasks.spawn(crate::http::health_server::task_main(
191 0 : metrics_listener,
192 0 : AppMetrics {
193 0 : jemalloc,
194 0 : neon_metrics,
195 0 : proxy: crate::metrics::Metrics::get(),
196 0 : },
197 0 : ));
198 0 :
199 0 : let task = serverless::task_main(
200 0 : config,
201 0 : auth_backend,
202 0 : http_listener,
203 0 : shutdown.clone(),
204 0 : Arc::new(CancellationHandler::new(&config.connect_to_compute, None)),
205 0 : endpoint_rate_limiter,
206 0 : );
207 0 :
208 0 : match futures::future::select(pin!(maintenance_tasks.join_next()), pin!(task)).await {
209 : // exit immediately on maintenance task completion
210 0 : Either::Left((Some(res), _)) => match crate::error::flatten_err(res)? {},
211 : // exit with error immediately if all maintenance tasks have ceased (should be caught by branch above)
212 0 : Either::Left((None, _)) => bail!("no maintenance tasks running. invalid state"),
213 : // exit immediately on client task error
214 0 : Either::Right((res, _)) => res?,
215 : }
216 :
217 0 : Ok(())
218 0 : }
219 :
220 : /// ProxyConfig is created at proxy startup, and lives forever.
221 0 : fn build_config(args: &LocalProxyCliArgs) -> anyhow::Result<&'static ProxyConfig> {
222 : let config::ConcurrencyLockOptions {
223 0 : shards,
224 0 : limiter,
225 0 : epoch,
226 0 : timeout,
227 0 : } = args.connect_compute_lock.parse()?;
228 0 : info!(
229 : ?limiter,
230 : shards,
231 : ?epoch,
232 0 : "Using NodeLocks (connect_compute)"
233 : );
234 0 : let connect_compute_locks = ApiLocks::new(
235 0 : "connect_compute_lock",
236 0 : limiter,
237 0 : shards,
238 0 : timeout,
239 0 : epoch,
240 0 : &Metrics::get().proxy.connect_compute_lock,
241 0 : );
242 0 :
243 0 : let http_config = HttpConfig {
244 0 : accept_websockets: false,
245 0 : pool_options: GlobalConnPoolOptions {
246 0 : gc_epoch: Duration::from_secs(60),
247 0 : pool_shards: 2,
248 0 : idle_timeout: args.sql_over_http.sql_over_http_idle_timeout,
249 0 : opt_in: false,
250 0 :
251 0 : max_conns_per_endpoint: args.sql_over_http.sql_over_http_pool_max_total_conns,
252 0 : max_total_conns: args.sql_over_http.sql_over_http_pool_max_total_conns,
253 0 : },
254 0 : cancel_set: CancelSet::new(args.sql_over_http.sql_over_http_cancel_set_shards),
255 0 : client_conn_threshold: args.sql_over_http.sql_over_http_client_conn_threshold,
256 0 : max_request_size_bytes: args.sql_over_http.sql_over_http_max_request_size_bytes,
257 0 : max_response_size_bytes: args.sql_over_http.sql_over_http_max_response_size_bytes,
258 0 : };
259 :
260 0 : let compute_config = ComputeConfig {
261 0 : retry: RetryConfig::parse(RetryConfig::CONNECT_TO_COMPUTE_DEFAULT_VALUES)?,
262 0 : tls: Arc::new(compute_client_config_with_root_certs()?),
263 0 : timeout: Duration::from_secs(2),
264 0 : };
265 0 :
266 0 : Ok(Box::leak(Box::new(ProxyConfig {
267 0 : tls_config: ArcSwapOption::from(None),
268 0 : metric_collection: None,
269 0 : http_config,
270 0 : authentication_config: AuthenticationConfig {
271 0 : jwks_cache: JwkCache::default(),
272 0 : thread_pool: ThreadPool::new(0),
273 0 : scram_protocol_timeout: Duration::from_secs(10),
274 0 : ip_allowlist_check_enabled: true,
275 0 : is_vpc_acccess_proxy: false,
276 0 : is_auth_broker: false,
277 0 : accept_jwts: true,
278 0 : console_redirect_confirmation_timeout: Duration::ZERO,
279 0 : },
280 0 : proxy_protocol_v2: config::ProxyProtocolV2::Rejected,
281 0 : handshake_timeout: Duration::from_secs(10),
282 0 : region: "local".into(),
283 0 : wake_compute_retry_config: RetryConfig::parse(RetryConfig::WAKE_COMPUTE_DEFAULT_VALUES)?,
284 0 : connect_compute_locks,
285 0 : connect_to_compute: compute_config,
286 : })))
287 0 : }
288 :
289 : /// auth::Backend is created at proxy startup, and lives forever.
290 0 : fn build_auth_backend(args: &LocalProxyCliArgs) -> &'static auth::Backend<'static, ()> {
291 0 : let auth_backend = crate::auth::Backend::Local(crate::auth::backend::MaybeOwned::Owned(
292 0 : LocalBackend::new(args.postgres, args.compute_ctl.clone()),
293 0 : ));
294 0 :
295 0 : Box::leak(Box::new(auth_backend))
296 0 : }
297 :
298 : #[derive(Error, Debug)]
299 : enum RefreshConfigError {
300 : #[error(transparent)]
301 : Read(#[from] std::io::Error),
302 : #[error(transparent)]
303 : Parse(#[from] serde_json::Error),
304 : #[error(transparent)]
305 : Validate(anyhow::Error),
306 : #[error(transparent)]
307 : Tls(anyhow::Error),
308 : }
309 :
310 0 : async fn refresh_config_loop(config: &ProxyConfig, path: Utf8PathBuf, rx: Arc<Notify>) {
311 0 : let mut init = true;
312 : loop {
313 0 : rx.notified().await;
314 :
315 0 : match refresh_config_inner(config, &path).await {
316 0 : Ok(()) => {}
317 : // don't log for file not found errors if this is the first time we are checking
318 : // for computes that don't use local_proxy, this is not an error.
319 0 : Err(RefreshConfigError::Read(e))
320 0 : if init && e.kind() == std::io::ErrorKind::NotFound =>
321 0 : {
322 0 : debug!(error=?e, ?path, "could not read config file");
323 : }
324 0 : Err(RefreshConfigError::Tls(e)) => {
325 0 : error!(error=?e, ?path, "could not read TLS certificates");
326 : }
327 0 : Err(e) => {
328 0 : error!(error=?e, ?path, "could not read config file");
329 : }
330 : }
331 :
332 0 : init = false;
333 : }
334 : }
335 :
336 0 : async fn refresh_config_inner(
337 0 : config: &ProxyConfig,
338 0 : path: &Utf8Path,
339 0 : ) -> Result<(), RefreshConfigError> {
340 0 : let bytes = tokio::fs::read(&path).await?;
341 0 : let data: LocalProxySpec = serde_json::from_slice(&bytes)?;
342 :
343 0 : let mut jwks_set = vec![];
344 :
345 0 : fn parse_jwks_settings(jwks: compute_api::spec::JwksSettings) -> anyhow::Result<JwksSettings> {
346 0 : let mut jwks_url = url::Url::from_str(&jwks.jwks_url).context("parsing JWKS url")?;
347 :
348 0 : ensure!(
349 0 : jwks_url.has_authority()
350 0 : && (jwks_url.scheme() == "http" || jwks_url.scheme() == "https"),
351 0 : "Invalid JWKS url. Must be HTTP",
352 : );
353 :
354 0 : ensure!(
355 0 : jwks_url.host().is_some_and(|h| h != url::Host::Domain("")),
356 0 : "Invalid JWKS url. No domain listed",
357 : );
358 :
359 : // clear username, password and ports
360 0 : jwks_url
361 0 : .set_username("")
362 0 : .expect("url can be a base and has a valid host and is not a file. should not error");
363 0 : jwks_url
364 0 : .set_password(None)
365 0 : .expect("url can be a base and has a valid host and is not a file. should not error");
366 0 : // local testing is hard if we need to have a specific restricted port
367 0 : if cfg!(not(feature = "testing")) {
368 0 : jwks_url.set_port(None).expect(
369 0 : "url can be a base and has a valid host and is not a file. should not error",
370 0 : );
371 0 : }
372 :
373 : // clear query params
374 0 : jwks_url.set_fragment(None);
375 0 : jwks_url.query_pairs_mut().clear().finish();
376 0 :
377 0 : if jwks_url.scheme() != "https" {
378 : // local testing is hard if we need to set up https support.
379 0 : if cfg!(not(feature = "testing")) {
380 0 : jwks_url
381 0 : .set_scheme("https")
382 0 : .expect("should not error to set the scheme to https if it was http");
383 0 : } else {
384 0 : warn!(scheme = jwks_url.scheme(), "JWKS url is not HTTPS");
385 : }
386 0 : }
387 :
388 0 : Ok(JwksSettings {
389 0 : id: jwks.id,
390 0 : jwks_url,
391 0 : _provider_name: jwks.provider_name,
392 0 : jwt_audience: jwks.jwt_audience,
393 0 : role_names: jwks
394 0 : .role_names
395 0 : .into_iter()
396 0 : .map(RoleName::from)
397 0 : .map(|s| RoleNameInt::from(&s))
398 0 : .collect(),
399 0 : })
400 0 : }
401 :
402 0 : for jwks in data.jwks.into_iter().flatten() {
403 0 : jwks_set.push(parse_jwks_settings(jwks).map_err(RefreshConfigError::Validate)?);
404 : }
405 :
406 0 : info!("successfully loaded new config");
407 0 : JWKS_ROLE_MAP.store(Some(Arc::new(EndpointJwksResponse { jwks: jwks_set })));
408 :
409 0 : if let Some(tls_config) = data.tls {
410 0 : let tls_config = tokio::task::spawn_blocking(move || {
411 0 : crate::tls::server_config::configure_tls(
412 0 : tls_config.key_path.as_ref(),
413 0 : tls_config.cert_path.as_ref(),
414 0 : None,
415 0 : false,
416 0 : )
417 0 : })
418 0 : .await
419 0 : .propagate_task_panic()
420 0 : .map_err(RefreshConfigError::Tls)?;
421 0 : config.tls_config.store(Some(Arc::new(tls_config)));
422 0 : }
423 :
424 0 : Ok(())
425 0 : }
|