LCOV - code coverage report
Current view: top level - proxy/src - console_redirect_proxy.rs (source / functions) Coverage Total Hit
Test: 07bee600374ccd486c69370d0972d9035964fe68.info Lines: 0.0 % 182 0
Test Date: 2025-02-20 13:11:02 Functions: 0.0 % 13 0

            Line data    Source code
       1              : use std::sync::Arc;
       2              : 
       3              : use futures::{FutureExt, TryFutureExt};
       4              : use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
       5              : use tokio_util::sync::CancellationToken;
       6              : use tracing::{debug, error, info, Instrument};
       7              : 
       8              : use crate::auth::backend::ConsoleRedirectBackend;
       9              : use crate::cancellation::CancellationHandler;
      10              : use crate::config::{ProxyConfig, ProxyProtocolV2};
      11              : use crate::context::RequestContext;
      12              : use crate::error::ReportableError;
      13              : use crate::metrics::{Metrics, NumClientConnectionsGuard};
      14              : use crate::protocol2::{read_proxy_protocol, ConnectHeader, ConnectionInfo};
      15              : use crate::proxy::connect_compute::{connect_to_compute, TcpMechanism};
      16              : use crate::proxy::handshake::{handshake, HandshakeData};
      17              : use crate::proxy::passthrough::ProxyPassthrough;
      18              : use crate::proxy::{
      19              :     prepare_client_connection, run_until_cancelled, ClientRequestError, ErrorSource,
      20              : };
      21              : 
      22            0 : pub async fn task_main(
      23            0 :     config: &'static ProxyConfig,
      24            0 :     backend: &'static ConsoleRedirectBackend,
      25            0 :     listener: tokio::net::TcpListener,
      26            0 :     cancellation_token: CancellationToken,
      27            0 :     cancellation_handler: Arc<CancellationHandler>,
      28            0 : ) -> anyhow::Result<()> {
      29            0 :     scopeguard::defer! {
      30            0 :         info!("proxy has shut down");
      31            0 :     }
      32            0 : 
      33            0 :     // When set for the server socket, the keepalive setting
      34            0 :     // will be inherited by all accepted client sockets.
      35            0 :     socket2::SockRef::from(&listener).set_keepalive(true)?;
      36              : 
      37            0 :     let connections = tokio_util::task::task_tracker::TaskTracker::new();
      38            0 :     let cancellations = tokio_util::task::task_tracker::TaskTracker::new();
      39              : 
      40            0 :     while let Some(accept_result) =
      41            0 :         run_until_cancelled(listener.accept(), &cancellation_token).await
      42              :     {
      43            0 :         let (socket, peer_addr) = accept_result?;
      44              : 
      45            0 :         let conn_gauge = Metrics::get()
      46            0 :             .proxy
      47            0 :             .client_connections
      48            0 :             .guard(crate::metrics::Protocol::Tcp);
      49            0 : 
      50            0 :         let session_id = uuid::Uuid::new_v4();
      51            0 :         let cancellation_handler = Arc::clone(&cancellation_handler);
      52            0 :         let cancellations = cancellations.clone();
      53            0 : 
      54            0 :         debug!(protocol = "tcp", %session_id, "accepted new TCP connection");
      55              : 
      56            0 :         connections.spawn(async move {
      57            0 :             let (socket, peer_addr) = match read_proxy_protocol(socket).await {
      58            0 :                 Err(e) => {
      59            0 :                     error!("per-client task finished with an error: {e:#}");
      60            0 :                     return;
      61              :                 }
      62              :                 // our load balancers will not send any more data. let's just exit immediately
      63            0 :                 Ok((_socket, ConnectHeader::Local)) => {
      64            0 :                     debug!("healthcheck received");
      65            0 :                     return;
      66              :                 }
      67            0 :                 Ok((_socket, ConnectHeader::Missing)) if config.proxy_protocol_v2 == ProxyProtocolV2::Required => {
      68            0 :                     error!("missing required proxy protocol header");
      69            0 :                     return;
      70              :                 }
      71            0 :                 Ok((_socket, ConnectHeader::Proxy(_))) if config.proxy_protocol_v2 == ProxyProtocolV2::Rejected => {
      72            0 :                     error!("proxy protocol header not supported");
      73            0 :                     return;
      74              :                 }
      75            0 :                 Ok((socket, ConnectHeader::Proxy(info))) => (socket, info),
      76            0 :                 Ok((socket, ConnectHeader::Missing)) => (socket, ConnectionInfo{ addr: peer_addr, extra: None }),
      77              :             };
      78              : 
      79            0 :             match socket.inner.set_nodelay(true) {
      80            0 :                 Ok(()) => {}
      81            0 :                 Err(e) => {
      82            0 :                     error!("per-client task finished with an error: failed to set socket option: {e:#}");
      83            0 :                     return;
      84              :                 }
      85              :             }
      86              : 
      87            0 :             let ctx = RequestContext::new(
      88            0 :                 session_id,
      89            0 :                 peer_addr,
      90            0 :                 crate::metrics::Protocol::Tcp,
      91            0 :                 &config.region,
      92            0 :             );
      93              : 
      94            0 :             let res = handle_client(
      95            0 :                 config,
      96            0 :                 backend,
      97            0 :                 &ctx,
      98            0 :                 cancellation_handler,
      99            0 :                 socket,
     100            0 :                 conn_gauge,
     101            0 :                 cancellations,
     102            0 :             )
     103            0 :             .instrument(ctx.span())
     104            0 :             .boxed()
     105            0 :             .await;
     106              : 
     107            0 :             match res {
     108            0 :                 Err(e) => {
     109            0 :                     ctx.set_error_kind(e.get_error_kind());
     110            0 :                     error!(parent: &ctx.span(), "per-client task finished with an error: {e:#}");
     111              :                 }
     112            0 :                 Ok(None) => {
     113            0 :                     ctx.set_success();
     114            0 :                 }
     115            0 :                 Ok(Some(p)) => {
     116            0 :                     ctx.set_success();
     117            0 :                     let _disconnect = ctx.log_connect();
     118            0 :                     match p.proxy_pass(&config.connect_to_compute).await {
     119            0 :                         Ok(()) => {}
     120            0 :                         Err(ErrorSource::Client(e)) => {
     121            0 :                             error!(?session_id, "per-client task finished with an IO error from the client: {e:#}");
     122              :                         }
     123            0 :                         Err(ErrorSource::Compute(e)) => {
     124            0 :                             error!(?session_id, "per-client task finished with an IO error from the compute: {e:#}");
     125              :                         }
     126              :                     }
     127              :                 }
     128              :             }
     129            0 :         });
     130              :     }
     131              : 
     132            0 :     connections.close();
     133            0 :     cancellations.close();
     134            0 :     drop(listener);
     135            0 : 
     136            0 :     // Drain connections
     137            0 :     connections.wait().await;
     138            0 :     cancellations.wait().await;
     139              : 
     140            0 :     Ok(())
     141            0 : }
     142              : 
     143              : #[allow(clippy::too_many_arguments)]
     144            0 : pub(crate) async fn handle_client<S: AsyncRead + AsyncWrite + Unpin>(
     145            0 :     config: &'static ProxyConfig,
     146            0 :     backend: &'static ConsoleRedirectBackend,
     147            0 :     ctx: &RequestContext,
     148            0 :     cancellation_handler: Arc<CancellationHandler>,
     149            0 :     stream: S,
     150            0 :     conn_gauge: NumClientConnectionsGuard<'static>,
     151            0 :     cancellations: tokio_util::task::task_tracker::TaskTracker,
     152            0 : ) -> Result<Option<ProxyPassthrough<S>>, ClientRequestError> {
     153            0 :     debug!(
     154            0 :         protocol = %ctx.protocol(),
     155            0 :         "handling interactive connection from client"
     156              :     );
     157              : 
     158            0 :     let metrics = &Metrics::get().proxy;
     159            0 :     let proto = ctx.protocol();
     160            0 :     let request_gauge = metrics.connection_requests.guard(proto);
     161            0 : 
     162            0 :     let tls = config.tls_config.as_ref();
     163            0 : 
     164            0 :     let record_handshake_error = !ctx.has_private_peer_addr();
     165            0 :     let pause = ctx.latency_timer_pause(crate::metrics::Waiting::Client);
     166            0 :     let do_handshake = handshake(ctx, stream, tls, record_handshake_error);
     167              : 
     168            0 :     let (mut stream, params) = match tokio::time::timeout(config.handshake_timeout, do_handshake)
     169            0 :         .await??
     170              :     {
     171            0 :         HandshakeData::Startup(stream, params) => (stream, params),
     172            0 :         HandshakeData::Cancel(cancel_key_data) => {
     173            0 :             // spawn a task to cancel the session, but don't wait for it
     174            0 :             cancellations.spawn({
     175            0 :                 let cancellation_handler_clone  = Arc::clone(&cancellation_handler);
     176            0 :                 let ctx = ctx.clone();
     177            0 :                 let cancel_span = tracing::span!(parent: None, tracing::Level::INFO, "cancel_session", session_id = ?ctx.session_id());
     178            0 :                 cancel_span.follows_from(tracing::Span::current());
     179            0 :                 async move {
     180            0 :                     cancellation_handler_clone
     181            0 :                         .cancel_session(
     182            0 :                             cancel_key_data,
     183            0 :                             ctx,
     184            0 :                             config.authentication_config.ip_allowlist_check_enabled,
     185            0 :                             config.authentication_config.is_vpc_acccess_proxy,
     186            0 :                             backend.get_api(),
     187            0 :                         )
     188            0 :                         .await
     189            0 :                         .inspect_err(|e | debug!(error = ?e, "cancel_session failed")).ok();
     190            0 :                 }.instrument(cancel_span)
     191            0 :             });
     192            0 : 
     193            0 :             return Ok(None);
     194              :         }
     195              :     };
     196            0 :     drop(pause);
     197            0 : 
     198            0 :     ctx.set_db_options(params.clone());
     199              : 
     200            0 :     let (node_info, user_info, _ip_allowlist) = match backend
     201            0 :         .authenticate(ctx, &config.authentication_config, &mut stream)
     202            0 :         .await
     203              :     {
     204            0 :         Ok(auth_result) => auth_result,
     205            0 :         Err(e) => {
     206            0 :             return stream.throw_error(e).await?;
     207              :         }
     208              :     };
     209              : 
     210            0 :     let mut node = connect_to_compute(
     211            0 :         ctx,
     212            0 :         &TcpMechanism {
     213            0 :             user_info,
     214            0 :             params_compat: true,
     215            0 :             params: &params,
     216            0 :             locks: &config.connect_compute_locks,
     217            0 :         },
     218            0 :         &node_info,
     219            0 :         config.wake_compute_retry_config,
     220            0 :         &config.connect_to_compute,
     221            0 :     )
     222            0 :     .or_else(|e| stream.throw_error(e))
     223            0 :     .await?;
     224              : 
     225            0 :     let cancellation_handler_clone = Arc::clone(&cancellation_handler);
     226            0 :     let session = cancellation_handler_clone.get_key();
     227            0 : 
     228            0 :     session
     229            0 :         .write_cancel_key(node.cancel_closure.clone())
     230            0 :         .await?;
     231              : 
     232            0 :     prepare_client_connection(&node, *session.key(), &mut stream).await?;
     233              : 
     234              :     // Before proxy passing, forward to compute whatever data is left in the
     235              :     // PqStream input buffer. Normally there is none, but our serverless npm
     236              :     // driver in pipeline mode sends startup, password and first query
     237              :     // immediately after opening the connection.
     238            0 :     let (stream, read_buf) = stream.into_inner();
     239            0 :     node.stream.write_all(&read_buf).await?;
     240              : 
     241            0 :     Ok(Some(ProxyPassthrough {
     242            0 :         client: stream,
     243            0 :         aux: node.aux.clone(),
     244            0 :         compute: node,
     245            0 :         session_id: ctx.session_id(),
     246            0 :         cancel: session,
     247            0 :         _req: request_gauge,
     248            0 :         _conn: conn_gauge,
     249            0 :     }))
     250            0 : }
        

Generated by: LCOV version 2.1-beta