Line data Source code
1 : use std::io;
2 : use std::sync::Arc;
3 : use std::time::Duration;
4 :
5 : use async_trait::async_trait;
6 : use hyper_util::rt::{TokioExecutor, TokioIo, TokioTimer};
7 : use p256::ecdsa::SigningKey;
8 : use p256::elliptic_curve::JwkEcKey;
9 : use rand::rngs::OsRng;
10 : use tokio::net::{lookup_host, TcpStream};
11 : use tracing::field::display;
12 : use tracing::{debug, info};
13 :
14 : use super::conn_pool::poll_client;
15 : use super::conn_pool_lib::{Client, ConnInfo, EndpointConnPool, GlobalConnPool};
16 : use super::http_conn_pool::{self, poll_http2_client, HttpConnPool, Send};
17 : use super::local_conn_pool::{self, LocalConnPool, EXT_NAME, EXT_SCHEMA, EXT_VERSION};
18 : use crate::auth::backend::local::StaticAuthRules;
19 : use crate::auth::backend::{ComputeCredentials, ComputeUserInfo};
20 : use crate::auth::{self, check_peer_addr_is_in_list, AuthError};
21 : use crate::compute;
22 : use crate::compute_ctl::{
23 : ComputeCtlError, ExtensionInstallRequest, Privilege, SetRoleGrantsRequest,
24 : };
25 : use crate::config::ProxyConfig;
26 : use crate::context::RequestContext;
27 : use crate::control_plane::client::ApiLockError;
28 : use crate::control_plane::errors::{GetAuthInfoError, WakeComputeError};
29 : use crate::control_plane::locks::ApiLocks;
30 : use crate::control_plane::CachedNodeInfo;
31 : use crate::error::{ErrorKind, ReportableError, UserFacingError};
32 : use crate::intern::EndpointIdInt;
33 : use crate::proxy::connect_compute::ConnectMechanism;
34 : use crate::proxy::retry::{CouldRetry, ShouldRetryWakeCompute};
35 : use crate::rate_limiter::EndpointRateLimiter;
36 : use crate::types::{EndpointId, Host, LOCAL_PROXY_SUFFIX};
37 :
38 : pub(crate) struct PoolingBackend {
39 : pub(crate) http_conn_pool: Arc<GlobalConnPool<Send, HttpConnPool<Send>>>,
40 : pub(crate) local_pool: Arc<LocalConnPool<tokio_postgres::Client>>,
41 : pub(crate) pool:
42 : Arc<GlobalConnPool<tokio_postgres::Client, EndpointConnPool<tokio_postgres::Client>>>,
43 :
44 : pub(crate) config: &'static ProxyConfig,
45 : pub(crate) auth_backend: &'static crate::auth::Backend<'static, ()>,
46 : pub(crate) endpoint_rate_limiter: Arc<EndpointRateLimiter>,
47 : }
48 :
49 : impl PoolingBackend {
50 0 : pub(crate) async fn authenticate_with_password(
51 0 : &self,
52 0 : ctx: &RequestContext,
53 0 : user_info: &ComputeUserInfo,
54 0 : password: &[u8],
55 0 : ) -> Result<ComputeCredentials, AuthError> {
56 0 : let user_info = user_info.clone();
57 0 : let backend = self.auth_backend.as_ref().map(|()| user_info.clone());
58 0 : let (allowed_ips, maybe_secret) = backend.get_allowed_ips_and_secret(ctx).await?;
59 0 : if self.config.authentication_config.ip_allowlist_check_enabled
60 0 : && !check_peer_addr_is_in_list(&ctx.peer_addr(), &allowed_ips)
61 : {
62 0 : return Err(AuthError::ip_address_not_allowed(ctx.peer_addr()));
63 0 : }
64 0 : if !self
65 0 : .endpoint_rate_limiter
66 0 : .check(user_info.endpoint.clone().into(), 1)
67 : {
68 0 : return Err(AuthError::too_many_connections());
69 0 : }
70 0 : let cached_secret = match maybe_secret {
71 0 : Some(secret) => secret,
72 0 : None => backend.get_role_secret(ctx).await?,
73 : };
74 :
75 0 : let secret = match cached_secret.value.clone() {
76 0 : Some(secret) => self.config.authentication_config.check_rate_limit(
77 0 : ctx,
78 0 : secret,
79 0 : &user_info.endpoint,
80 0 : true,
81 0 : )?,
82 : None => {
83 : // If we don't have an authentication secret, for the http flow we can just return an error.
84 0 : info!("authentication info not found");
85 0 : return Err(AuthError::password_failed(&*user_info.user));
86 : }
87 : };
88 0 : let ep = EndpointIdInt::from(&user_info.endpoint);
89 0 : let auth_outcome = crate::auth::validate_password_and_exchange(
90 0 : &self.config.authentication_config.thread_pool,
91 0 : ep,
92 0 : password,
93 0 : secret,
94 0 : )
95 0 : .await?;
96 0 : let res = match auth_outcome {
97 0 : crate::sasl::Outcome::Success(key) => {
98 0 : info!("user successfully authenticated");
99 0 : Ok(key)
100 : }
101 0 : crate::sasl::Outcome::Failure(reason) => {
102 0 : info!("auth backend failed with an error: {reason}");
103 0 : Err(AuthError::password_failed(&*user_info.user))
104 : }
105 : };
106 0 : res.map(|key| ComputeCredentials {
107 0 : info: user_info,
108 0 : keys: key,
109 0 : })
110 0 : }
111 :
112 0 : pub(crate) async fn authenticate_with_jwt(
113 0 : &self,
114 0 : ctx: &RequestContext,
115 0 : user_info: &ComputeUserInfo,
116 0 : jwt: String,
117 0 : ) -> Result<ComputeCredentials, AuthError> {
118 0 : match &self.auth_backend {
119 0 : crate::auth::Backend::ControlPlane(console, ()) => {
120 0 : self.config
121 0 : .authentication_config
122 0 : .jwks_cache
123 0 : .check_jwt(
124 0 : ctx,
125 0 : user_info.endpoint.clone(),
126 0 : &user_info.user,
127 0 : &**console,
128 0 : &jwt,
129 0 : )
130 0 : .await?;
131 :
132 0 : Ok(ComputeCredentials {
133 0 : info: user_info.clone(),
134 0 : keys: crate::auth::backend::ComputeCredentialKeys::None,
135 0 : })
136 : }
137 : crate::auth::Backend::Local(_) => {
138 0 : let keys = self
139 0 : .config
140 0 : .authentication_config
141 0 : .jwks_cache
142 0 : .check_jwt(
143 0 : ctx,
144 0 : user_info.endpoint.clone(),
145 0 : &user_info.user,
146 0 : &StaticAuthRules,
147 0 : &jwt,
148 0 : )
149 0 : .await?;
150 :
151 0 : Ok(ComputeCredentials {
152 0 : info: user_info.clone(),
153 0 : keys,
154 0 : })
155 : }
156 : }
157 0 : }
158 :
159 : // Wake up the destination if needed. Code here is a bit involved because
160 : // we reuse the code from the usual proxy and we need to prepare few structures
161 : // that this code expects.
162 0 : #[tracing::instrument(fields(pid = tracing::field::Empty), skip_all)]
163 : pub(crate) async fn connect_to_compute(
164 : &self,
165 : ctx: &RequestContext,
166 : conn_info: ConnInfo,
167 : keys: ComputeCredentials,
168 : force_new: bool,
169 : ) -> Result<Client<tokio_postgres::Client>, HttpConnError> {
170 : let maybe_client = if force_new {
171 : debug!("pool: pool is disabled");
172 : None
173 : } else {
174 : debug!("pool: looking for an existing connection");
175 : self.pool.get(ctx, &conn_info)?
176 : };
177 :
178 : if let Some(client) = maybe_client {
179 : return Ok(client);
180 : }
181 : let conn_id = uuid::Uuid::new_v4();
182 : tracing::Span::current().record("conn_id", display(conn_id));
183 : info!(%conn_id, "pool: opening a new connection '{conn_info}'");
184 0 : let backend = self.auth_backend.as_ref().map(|()| keys);
185 : crate::proxy::connect_compute::connect_to_compute(
186 : ctx,
187 : &TokioMechanism {
188 : conn_id,
189 : conn_info,
190 : pool: self.pool.clone(),
191 : locks: &self.config.connect_compute_locks,
192 : },
193 : &backend,
194 : false, // do not allow self signed compute for http flow
195 : self.config.wake_compute_retry_config,
196 : self.config.connect_to_compute_retry_config,
197 : )
198 : .await
199 : }
200 :
201 : // Wake up the destination if needed
202 0 : #[tracing::instrument(fields(pid = tracing::field::Empty), skip_all)]
203 : pub(crate) async fn connect_to_local_proxy(
204 : &self,
205 : ctx: &RequestContext,
206 : conn_info: ConnInfo,
207 : ) -> Result<http_conn_pool::Client<Send>, HttpConnError> {
208 : debug!("pool: looking for an existing connection");
209 : if let Ok(Some(client)) = self.http_conn_pool.get(ctx, &conn_info) {
210 : return Ok(client);
211 : }
212 :
213 : let conn_id = uuid::Uuid::new_v4();
214 : tracing::Span::current().record("conn_id", display(conn_id));
215 : debug!(%conn_id, "pool: opening a new connection '{conn_info}'");
216 0 : let backend = self.auth_backend.as_ref().map(|()| ComputeCredentials {
217 0 : info: ComputeUserInfo {
218 0 : user: conn_info.user_info.user.clone(),
219 0 : endpoint: EndpointId::from(format!(
220 0 : "{}{LOCAL_PROXY_SUFFIX}",
221 0 : conn_info.user_info.endpoint.normalize()
222 0 : )),
223 0 : options: conn_info.user_info.options.clone(),
224 0 : },
225 0 : keys: crate::auth::backend::ComputeCredentialKeys::None,
226 0 : });
227 : crate::proxy::connect_compute::connect_to_compute(
228 : ctx,
229 : &HyperMechanism {
230 : conn_id,
231 : conn_info,
232 : pool: self.http_conn_pool.clone(),
233 : locks: &self.config.connect_compute_locks,
234 : },
235 : &backend,
236 : false, // do not allow self signed compute for http flow
237 : self.config.wake_compute_retry_config,
238 : self.config.connect_to_compute_retry_config,
239 : )
240 : .await
241 : }
242 :
243 : /// Connect to postgres over localhost.
244 : ///
245 : /// We expect postgres to be started here, so we won't do any retries.
246 : ///
247 : /// # Panics
248 : ///
249 : /// Panics if called with a non-local_proxy backend.
250 0 : #[tracing::instrument(fields(pid = tracing::field::Empty), skip_all)]
251 : pub(crate) async fn connect_to_local_postgres(
252 : &self,
253 : ctx: &RequestContext,
254 : conn_info: ConnInfo,
255 : ) -> Result<Client<tokio_postgres::Client>, HttpConnError> {
256 : if let Some(client) = self.local_pool.get(ctx, &conn_info)? {
257 : return Ok(client);
258 : }
259 :
260 : let local_backend = match &self.auth_backend {
261 : auth::Backend::ControlPlane(_, ()) => {
262 : unreachable!("only local_proxy can connect to local postgres")
263 : }
264 : auth::Backend::Local(local) => local,
265 : };
266 :
267 : if !self.local_pool.initialized(&conn_info) {
268 : // only install and grant usage one at a time.
269 : let _permit = local_backend.initialize.acquire().await.unwrap();
270 :
271 : // check again for race
272 : if !self.local_pool.initialized(&conn_info) {
273 : local_backend
274 : .compute_ctl
275 : .install_extension(&ExtensionInstallRequest {
276 : extension: EXT_NAME,
277 : database: conn_info.dbname.clone(),
278 : version: EXT_VERSION,
279 : })
280 : .await?;
281 :
282 : local_backend
283 : .compute_ctl
284 : .grant_role(&SetRoleGrantsRequest {
285 : schema: EXT_SCHEMA,
286 : privileges: vec![Privilege::Usage],
287 : database: conn_info.dbname.clone(),
288 : role: conn_info.user_info.user.clone(),
289 : })
290 : .await?;
291 :
292 : self.local_pool.set_initialized(&conn_info);
293 : }
294 : }
295 :
296 : let conn_id = uuid::Uuid::new_v4();
297 : tracing::Span::current().record("conn_id", display(conn_id));
298 : info!(%conn_id, "local_pool: opening a new connection '{conn_info}'");
299 :
300 : let mut node_info = local_backend.node_info.clone();
301 :
302 : let (key, jwk) = create_random_jwk();
303 :
304 : let config = node_info
305 : .config
306 : .user(&conn_info.user_info.user)
307 : .dbname(&conn_info.dbname)
308 : .options(&format!(
309 : "-c pg_session_jwt.jwk={}",
310 : serde_json::to_string(&jwk).expect("serializing jwk to json should not fail")
311 : ));
312 :
313 : let pause = ctx.latency_timer_pause(crate::metrics::Waiting::Compute);
314 : let (client, connection) = config.connect(tokio_postgres::NoTls).await?;
315 : drop(pause);
316 :
317 : let pid = client.get_process_id();
318 : tracing::Span::current().record("pid", pid);
319 :
320 : let mut handle = local_conn_pool::poll_client(
321 : self.local_pool.clone(),
322 : ctx,
323 : conn_info,
324 : client,
325 : connection,
326 : key,
327 : conn_id,
328 : node_info.aux.clone(),
329 : );
330 :
331 : {
332 : let (client, mut discard) = handle.inner();
333 : debug!("setting up backend session state");
334 :
335 : // initiates the auth session
336 : if let Err(e) = client.query("select auth.init()", &[]).await {
337 : discard.discard();
338 : return Err(e.into());
339 : }
340 :
341 : info!("backend session state initialized");
342 : }
343 :
344 : Ok(handle)
345 : }
346 : }
347 :
348 0 : fn create_random_jwk() -> (SigningKey, JwkEcKey) {
349 0 : let key = SigningKey::random(&mut OsRng);
350 0 : let jwk = p256::PublicKey::from(key.verifying_key()).to_jwk();
351 0 : (key, jwk)
352 0 : }
353 :
354 0 : #[derive(Debug, thiserror::Error)]
355 : pub(crate) enum HttpConnError {
356 : #[error("pooled connection closed at inconsistent state")]
357 : ConnectionClosedAbruptly(#[from] tokio::sync::watch::error::SendError<uuid::Uuid>),
358 : #[error("could not connection to postgres in compute")]
359 : PostgresConnectionError(#[from] tokio_postgres::Error),
360 : #[error("could not connection to local-proxy in compute")]
361 : LocalProxyConnectionError(#[from] LocalProxyConnError),
362 : #[error("could not parse JWT payload")]
363 : JwtPayloadError(serde_json::Error),
364 :
365 : #[error("could not install extension: {0}")]
366 : ComputeCtl(#[from] ComputeCtlError),
367 : #[error("could not get auth info")]
368 : GetAuthInfo(#[from] GetAuthInfoError),
369 : #[error("user not authenticated")]
370 : AuthError(#[from] AuthError),
371 : #[error("wake_compute returned error")]
372 : WakeCompute(#[from] WakeComputeError),
373 : #[error("error acquiring resource permit: {0}")]
374 : TooManyConnectionAttempts(#[from] ApiLockError),
375 : }
376 :
377 0 : #[derive(Debug, thiserror::Error)]
378 : pub(crate) enum LocalProxyConnError {
379 : #[error("error with connection to local-proxy")]
380 : Io(#[source] std::io::Error),
381 : #[error("could not establish h2 connection")]
382 : H2(#[from] hyper::Error),
383 : }
384 :
385 : impl ReportableError for HttpConnError {
386 0 : fn get_error_kind(&self) -> ErrorKind {
387 0 : match self {
388 0 : HttpConnError::ConnectionClosedAbruptly(_) => ErrorKind::Compute,
389 0 : HttpConnError::PostgresConnectionError(p) => p.get_error_kind(),
390 0 : HttpConnError::LocalProxyConnectionError(_) => ErrorKind::Compute,
391 0 : HttpConnError::ComputeCtl(_) => ErrorKind::Service,
392 0 : HttpConnError::JwtPayloadError(_) => ErrorKind::User,
393 0 : HttpConnError::GetAuthInfo(a) => a.get_error_kind(),
394 0 : HttpConnError::AuthError(a) => a.get_error_kind(),
395 0 : HttpConnError::WakeCompute(w) => w.get_error_kind(),
396 0 : HttpConnError::TooManyConnectionAttempts(w) => w.get_error_kind(),
397 : }
398 0 : }
399 : }
400 :
401 : impl UserFacingError for HttpConnError {
402 0 : fn to_string_client(&self) -> String {
403 0 : match self {
404 0 : HttpConnError::ConnectionClosedAbruptly(_) => self.to_string(),
405 0 : HttpConnError::PostgresConnectionError(p) => p.to_string(),
406 0 : HttpConnError::LocalProxyConnectionError(p) => p.to_string(),
407 0 : HttpConnError::ComputeCtl(_) => "could not set up the JWT authorization database extension".to_string(),
408 0 : HttpConnError::JwtPayloadError(p) => p.to_string(),
409 0 : HttpConnError::GetAuthInfo(c) => c.to_string_client(),
410 0 : HttpConnError::AuthError(c) => c.to_string_client(),
411 0 : HttpConnError::WakeCompute(c) => c.to_string_client(),
412 : HttpConnError::TooManyConnectionAttempts(_) => {
413 0 : "Failed to acquire permit to connect to the database. Too many database connection attempts are currently ongoing.".to_owned()
414 : }
415 : }
416 0 : }
417 : }
418 :
419 : impl CouldRetry for HttpConnError {
420 0 : fn could_retry(&self) -> bool {
421 0 : match self {
422 0 : HttpConnError::PostgresConnectionError(e) => e.could_retry(),
423 0 : HttpConnError::LocalProxyConnectionError(e) => e.could_retry(),
424 0 : HttpConnError::ComputeCtl(_) => false,
425 0 : HttpConnError::ConnectionClosedAbruptly(_) => false,
426 0 : HttpConnError::JwtPayloadError(_) => false,
427 0 : HttpConnError::GetAuthInfo(_) => false,
428 0 : HttpConnError::AuthError(_) => false,
429 0 : HttpConnError::WakeCompute(_) => false,
430 0 : HttpConnError::TooManyConnectionAttempts(_) => false,
431 : }
432 0 : }
433 : }
434 : impl ShouldRetryWakeCompute for HttpConnError {
435 0 : fn should_retry_wake_compute(&self) -> bool {
436 0 : match self {
437 0 : HttpConnError::PostgresConnectionError(e) => e.should_retry_wake_compute(),
438 : // we never checked cache validity
439 0 : HttpConnError::TooManyConnectionAttempts(_) => false,
440 0 : _ => true,
441 : }
442 0 : }
443 : }
444 :
445 : impl ReportableError for LocalProxyConnError {
446 0 : fn get_error_kind(&self) -> ErrorKind {
447 0 : match self {
448 0 : LocalProxyConnError::Io(_) => ErrorKind::Compute,
449 0 : LocalProxyConnError::H2(_) => ErrorKind::Compute,
450 : }
451 0 : }
452 : }
453 :
454 : impl UserFacingError for LocalProxyConnError {
455 0 : fn to_string_client(&self) -> String {
456 0 : "Could not establish HTTP connection to the database".to_string()
457 0 : }
458 : }
459 :
460 : impl CouldRetry for LocalProxyConnError {
461 0 : fn could_retry(&self) -> bool {
462 0 : match self {
463 0 : LocalProxyConnError::Io(_) => false,
464 0 : LocalProxyConnError::H2(_) => false,
465 : }
466 0 : }
467 : }
468 : impl ShouldRetryWakeCompute for LocalProxyConnError {
469 0 : fn should_retry_wake_compute(&self) -> bool {
470 0 : match self {
471 0 : LocalProxyConnError::Io(_) => false,
472 0 : LocalProxyConnError::H2(_) => false,
473 : }
474 0 : }
475 : }
476 :
477 : struct TokioMechanism {
478 : pool: Arc<GlobalConnPool<tokio_postgres::Client, EndpointConnPool<tokio_postgres::Client>>>,
479 : conn_info: ConnInfo,
480 : conn_id: uuid::Uuid,
481 :
482 : /// connect_to_compute concurrency lock
483 : locks: &'static ApiLocks<Host>,
484 : }
485 :
486 : #[async_trait]
487 : impl ConnectMechanism for TokioMechanism {
488 : type Connection = Client<tokio_postgres::Client>;
489 : type ConnectError = HttpConnError;
490 : type Error = HttpConnError;
491 :
492 0 : async fn connect_once(
493 0 : &self,
494 0 : ctx: &RequestContext,
495 0 : node_info: &CachedNodeInfo,
496 0 : timeout: Duration,
497 0 : ) -> Result<Self::Connection, Self::ConnectError> {
498 0 : let host = node_info.config.get_host()?;
499 0 : let permit = self.locks.get_permit(&host).await?;
500 :
501 0 : let mut config = (*node_info.config).clone();
502 0 : let config = config
503 0 : .user(&self.conn_info.user_info.user)
504 0 : .dbname(&self.conn_info.dbname)
505 0 : .connect_timeout(timeout);
506 0 :
507 0 : let pause = ctx.latency_timer_pause(crate::metrics::Waiting::Compute);
508 0 : let res = config.connect(tokio_postgres::NoTls).await;
509 0 : drop(pause);
510 0 : let (client, connection) = permit.release_result(res)?;
511 :
512 0 : tracing::Span::current().record("pid", tracing::field::display(client.get_process_id()));
513 0 : Ok(poll_client(
514 0 : self.pool.clone(),
515 0 : ctx,
516 0 : self.conn_info.clone(),
517 0 : client,
518 0 : connection,
519 0 : self.conn_id,
520 0 : node_info.aux.clone(),
521 0 : ))
522 0 : }
523 :
524 0 : fn update_connect_config(&self, _config: &mut compute::ConnCfg) {}
525 : }
526 :
527 : struct HyperMechanism {
528 : pool: Arc<GlobalConnPool<Send, HttpConnPool<Send>>>,
529 : conn_info: ConnInfo,
530 : conn_id: uuid::Uuid,
531 :
532 : /// connect_to_compute concurrency lock
533 : locks: &'static ApiLocks<Host>,
534 : }
535 :
536 : #[async_trait]
537 : impl ConnectMechanism for HyperMechanism {
538 : type Connection = http_conn_pool::Client<Send>;
539 : type ConnectError = HttpConnError;
540 : type Error = HttpConnError;
541 :
542 0 : async fn connect_once(
543 0 : &self,
544 0 : ctx: &RequestContext,
545 0 : node_info: &CachedNodeInfo,
546 0 : timeout: Duration,
547 0 : ) -> Result<Self::Connection, Self::ConnectError> {
548 0 : let host = node_info.config.get_host()?;
549 0 : let permit = self.locks.get_permit(&host).await?;
550 :
551 0 : let pause = ctx.latency_timer_pause(crate::metrics::Waiting::Compute);
552 :
553 0 : let port = *node_info.config.get_ports().first().ok_or_else(|| {
554 0 : HttpConnError::WakeCompute(WakeComputeError::BadComputeAddress(
555 0 : "local-proxy port missing on compute address".into(),
556 0 : ))
557 0 : })?;
558 0 : let res = connect_http2(&host, port, timeout).await;
559 0 : drop(pause);
560 0 : let (client, connection) = permit.release_result(res)?;
561 :
562 0 : Ok(poll_http2_client(
563 0 : self.pool.clone(),
564 0 : ctx,
565 0 : &self.conn_info,
566 0 : client,
567 0 : connection,
568 0 : self.conn_id,
569 0 : node_info.aux.clone(),
570 0 : ))
571 0 : }
572 :
573 0 : fn update_connect_config(&self, _config: &mut compute::ConnCfg) {}
574 : }
575 :
576 0 : async fn connect_http2(
577 0 : host: &str,
578 0 : port: u16,
579 0 : timeout: Duration,
580 0 : ) -> Result<(http_conn_pool::Send, http_conn_pool::Connect), LocalProxyConnError> {
581 : // assumption: host is an ip address so this should not actually perform any requests.
582 : // todo: add that assumption as a guarantee in the control-plane API.
583 0 : let mut addrs = lookup_host((host, port))
584 0 : .await
585 0 : .map_err(LocalProxyConnError::Io)?;
586 :
587 0 : let mut last_err = None;
588 :
589 0 : let stream = loop {
590 0 : let Some(addr) = addrs.next() else {
591 0 : return Err(last_err.unwrap_or_else(|| {
592 0 : LocalProxyConnError::Io(io::Error::new(
593 0 : io::ErrorKind::InvalidInput,
594 0 : "could not resolve any addresses",
595 0 : ))
596 0 : }));
597 : };
598 :
599 0 : match tokio::time::timeout(timeout, TcpStream::connect(addr)).await {
600 0 : Ok(Ok(stream)) => {
601 0 : stream.set_nodelay(true).map_err(LocalProxyConnError::Io)?;
602 0 : break stream;
603 : }
604 0 : Ok(Err(e)) => {
605 0 : last_err = Some(LocalProxyConnError::Io(e));
606 0 : }
607 0 : Err(e) => {
608 0 : last_err = Some(LocalProxyConnError::Io(io::Error::new(
609 0 : io::ErrorKind::TimedOut,
610 0 : e,
611 0 : )));
612 0 : }
613 : };
614 : };
615 :
616 0 : let (client, connection) = hyper::client::conn::http2::Builder::new(TokioExecutor::new())
617 0 : .timer(TokioTimer::new())
618 0 : .keep_alive_interval(Duration::from_secs(20))
619 0 : .keep_alive_while_idle(true)
620 0 : .keep_alive_timeout(Duration::from_secs(5))
621 0 : .handshake(TokioIo::new(stream))
622 0 : .await?;
623 :
624 0 : Ok((client, connection))
625 0 : }
|