Line data Source code
1 : use std::sync::Arc;
2 :
3 : use futures::TryFutureExt;
4 : use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
5 : use tokio_util::sync::CancellationToken;
6 : use tracing::{debug, error, info, Instrument};
7 :
8 : use crate::auth::backend::ConsoleRedirectBackend;
9 : use crate::cancellation::{CancellationHandlerMain, CancellationHandlerMainInternal};
10 : use crate::config::{ProxyConfig, ProxyProtocolV2};
11 : use crate::context::RequestMonitoring;
12 : use crate::error::ReportableError;
13 : use crate::metrics::{Metrics, NumClientConnectionsGuard};
14 : use crate::protocol2::{read_proxy_protocol, ConnectHeader, ConnectionInfo};
15 : use crate::proxy::connect_compute::{connect_to_compute, TcpMechanism};
16 : use crate::proxy::handshake::{handshake, HandshakeData};
17 : use crate::proxy::passthrough::ProxyPassthrough;
18 : use crate::proxy::{
19 : prepare_client_connection, run_until_cancelled, ClientRequestError, ErrorSource,
20 : };
21 :
22 0 : pub async fn task_main(
23 0 : config: &'static ProxyConfig,
24 0 : backend: &'static ConsoleRedirectBackend,
25 0 : listener: tokio::net::TcpListener,
26 0 : cancellation_token: CancellationToken,
27 0 : cancellation_handler: Arc<CancellationHandlerMain>,
28 0 : ) -> anyhow::Result<()> {
29 0 : scopeguard::defer! {
30 0 : info!("proxy has shut down");
31 0 : }
32 0 :
33 0 : // When set for the server socket, the keepalive setting
34 0 : // will be inherited by all accepted client sockets.
35 0 : socket2::SockRef::from(&listener).set_keepalive(true)?;
36 :
37 0 : let connections = tokio_util::task::task_tracker::TaskTracker::new();
38 :
39 0 : while let Some(accept_result) =
40 0 : run_until_cancelled(listener.accept(), &cancellation_token).await
41 : {
42 0 : let (socket, peer_addr) = accept_result?;
43 :
44 0 : let conn_gauge = Metrics::get()
45 0 : .proxy
46 0 : .client_connections
47 0 : .guard(crate::metrics::Protocol::Tcp);
48 0 :
49 0 : let session_id = uuid::Uuid::new_v4();
50 0 : let cancellation_handler = Arc::clone(&cancellation_handler);
51 0 :
52 0 : debug!(protocol = "tcp", %session_id, "accepted new TCP connection");
53 :
54 0 : connections.spawn(async move {
55 0 : let (socket, peer_addr) = match read_proxy_protocol(socket).await {
56 0 : Err(e) => {
57 0 : error!("per-client task finished with an error: {e:#}");
58 0 : return;
59 : }
60 : // our load balancers will not send any more data. let's just exit immediately
61 0 : Ok((_socket, ConnectHeader::Local)) => {
62 0 : debug!("healthcheck received");
63 0 : return;
64 : }
65 0 : Ok((_socket, ConnectHeader::Missing)) if config.proxy_protocol_v2 == ProxyProtocolV2::Required => {
66 0 : error!("missing required proxy protocol header");
67 0 : return;
68 : }
69 0 : Ok((_socket, ConnectHeader::Proxy(_))) if config.proxy_protocol_v2 == ProxyProtocolV2::Rejected => {
70 0 : error!("proxy protocol header not supported");
71 0 : return;
72 : }
73 0 : Ok((socket, ConnectHeader::Proxy(info))) => (socket, info),
74 0 : Ok((socket, ConnectHeader::Missing)) => (socket, ConnectionInfo{ addr: peer_addr, extra: None }),
75 : };
76 :
77 0 : match socket.inner.set_nodelay(true) {
78 0 : Ok(()) => {}
79 0 : Err(e) => {
80 0 : error!("per-client task finished with an error: failed to set socket option: {e:#}");
81 0 : return;
82 : }
83 : };
84 :
85 0 : let ctx = RequestMonitoring::new(
86 0 : session_id,
87 0 : peer_addr,
88 0 : crate::metrics::Protocol::Tcp,
89 0 : &config.region,
90 0 : );
91 0 : let span = ctx.span();
92 0 :
93 0 : let startup = Box::pin(
94 0 : handle_client(
95 0 : config,
96 0 : backend,
97 0 : &ctx,
98 0 : cancellation_handler,
99 0 : socket,
100 0 : conn_gauge,
101 0 : )
102 0 : .instrument(span.clone()),
103 0 : );
104 0 : let res = startup.await;
105 :
106 0 : match res {
107 0 : Err(e) => {
108 0 : // todo: log and push to ctx the error kind
109 0 : ctx.set_error_kind(e.get_error_kind());
110 0 : error!(parent: &span, "per-client task finished with an error: {e:#}");
111 : }
112 0 : Ok(None) => {
113 0 : ctx.set_success();
114 0 : }
115 0 : Ok(Some(p)) => {
116 0 : ctx.set_success();
117 0 : ctx.log_connect();
118 0 : match p.proxy_pass().instrument(span.clone()).await {
119 0 : Ok(()) => {}
120 0 : Err(ErrorSource::Client(e)) => {
121 0 : error!(parent: &span, "per-client task finished with an IO error from the client: {e:#}");
122 : }
123 0 : Err(ErrorSource::Compute(e)) => {
124 0 : error!(parent: &span, "per-client task finished with an IO error from the compute: {e:#}");
125 : }
126 : }
127 : }
128 : }
129 0 : });
130 : }
131 :
132 0 : connections.close();
133 0 : drop(listener);
134 0 :
135 0 : // Drain connections
136 0 : connections.wait().await;
137 :
138 0 : Ok(())
139 0 : }
140 :
141 0 : pub(crate) async fn handle_client<S: AsyncRead + AsyncWrite + Unpin>(
142 0 : config: &'static ProxyConfig,
143 0 : backend: &'static ConsoleRedirectBackend,
144 0 : ctx: &RequestMonitoring,
145 0 : cancellation_handler: Arc<CancellationHandlerMain>,
146 0 : stream: S,
147 0 : conn_gauge: NumClientConnectionsGuard<'static>,
148 0 : ) -> Result<Option<ProxyPassthrough<CancellationHandlerMainInternal, S>>, ClientRequestError> {
149 0 : info!(
150 0 : protocol = %ctx.protocol(),
151 0 : "handling interactive connection from client"
152 : );
153 :
154 0 : let metrics = &Metrics::get().proxy;
155 0 : let proto = ctx.protocol();
156 0 : let request_gauge = metrics.connection_requests.guard(proto);
157 0 :
158 0 : let tls = config.tls_config.as_ref();
159 0 :
160 0 : let record_handshake_error = !ctx.has_private_peer_addr();
161 0 : let pause = ctx.latency_timer_pause(crate::metrics::Waiting::Client);
162 0 : let do_handshake = handshake(ctx, stream, tls, record_handshake_error);
163 0 : let (mut stream, params) =
164 0 : match tokio::time::timeout(config.handshake_timeout, do_handshake).await?? {
165 0 : HandshakeData::Startup(stream, params) => (stream, params),
166 0 : HandshakeData::Cancel(cancel_key_data) => {
167 0 : return Ok(cancellation_handler
168 0 : .cancel_session(cancel_key_data, ctx.session_id())
169 0 : .await
170 0 : .map(|()| None)?)
171 : }
172 : };
173 0 : drop(pause);
174 0 :
175 0 : ctx.set_db_options(params.clone());
176 :
177 0 : let user_info = match backend
178 0 : .authenticate(ctx, &config.authentication_config, &mut stream)
179 0 : .await
180 : {
181 0 : Ok(auth_result) => auth_result,
182 0 : Err(e) => {
183 0 : return stream.throw_error(e).await?;
184 : }
185 : };
186 :
187 0 : let mut node = connect_to_compute(
188 0 : ctx,
189 0 : &TcpMechanism {
190 0 : params: ¶ms,
191 0 : locks: &config.connect_compute_locks,
192 0 : },
193 0 : &user_info,
194 0 : config.allow_self_signed_compute,
195 0 : config.wake_compute_retry_config,
196 0 : config.connect_to_compute_retry_config,
197 0 : )
198 0 : .or_else(|e| stream.throw_error(e))
199 0 : .await?;
200 :
201 0 : let session = cancellation_handler.get_session();
202 0 : prepare_client_connection(&node, &session, &mut stream).await?;
203 :
204 : // Before proxy passing, forward to compute whatever data is left in the
205 : // PqStream input buffer. Normally there is none, but our serverless npm
206 : // driver in pipeline mode sends startup, password and first query
207 : // immediately after opening the connection.
208 0 : let (stream, read_buf) = stream.into_inner();
209 0 : node.stream.write_all(&read_buf).await?;
210 :
211 0 : Ok(Some(ProxyPassthrough {
212 0 : client: stream,
213 0 : aux: node.aux.clone(),
214 0 : compute: node,
215 0 : _req: request_gauge,
216 0 : _conn: conn_gauge,
217 0 : _cancel: session,
218 0 : }))
219 0 : }
|