Line data Source code
1 : use std::future::Future;
2 : use std::io::Write as _;
3 : use std::str::FromStr;
4 : use std::time::Duration;
5 :
6 : use anyhow::{Context, anyhow};
7 : use bytes::{Bytes, BytesMut};
8 : use hyper::header::{AUTHORIZATION, CONTENT_DISPOSITION, CONTENT_TYPE, HeaderName};
9 : use hyper::http::HeaderValue;
10 : use hyper::{Body, Method, Request, Response};
11 : use jsonwebtoken::TokenData;
12 : use metrics::{Encoder, IntCounter, TextEncoder, register_int_counter};
13 : use once_cell::sync::Lazy;
14 : use pprof::ProfilerGuardBuilder;
15 : use pprof::protos::Message as _;
16 : use routerify::ext::RequestExt;
17 : use routerify::{Middleware, RequestInfo, Router, RouterBuilder};
18 : use tokio::sync::{Mutex, Notify, mpsc};
19 : use tokio_stream::wrappers::ReceiverStream;
20 : use tokio_util::io::ReaderStream;
21 : use tracing::{Instrument, debug, info, info_span, warn};
22 : use utils::auth::{AuthError, Claims, SwappableJwtAuth};
23 :
24 : use crate::error::{ApiError, api_error_handler, route_error_handler};
25 : use crate::request::{get_query_param, parse_query_param};
26 :
27 0 : static SERVE_METRICS_COUNT: Lazy<IntCounter> = Lazy::new(|| {
28 0 : register_int_counter!(
29 0 : "libmetrics_metric_handler_requests_total",
30 0 : "Number of metric requests made"
31 0 : )
32 0 : .expect("failed to define a metric")
33 0 : });
34 :
35 : static X_REQUEST_ID_HEADER_STR: &str = "x-request-id";
36 :
37 : static X_REQUEST_ID_HEADER: HeaderName = HeaderName::from_static(X_REQUEST_ID_HEADER_STR);
38 : #[derive(Debug, Default, Clone)]
39 : struct RequestId(String);
40 :
41 : /// Adds a tracing info_span! instrumentation around the handler events,
42 : /// logs the request start and end events for non-GET requests and non-200 responses.
43 : ///
44 : /// Usage: Replace `my_handler` with `|r| request_span(r, my_handler)`
45 : ///
46 : /// Use this to distinguish between logs of different HTTP requests: every request handler wrapped
47 : /// with this will get request info logged in the wrapping span, including the unique request ID.
48 : ///
49 : /// This also handles errors, logging them and converting them to an HTTP error response.
50 : ///
51 : /// NB: If the client disconnects, Hyper will drop the Future, without polling it to
52 : /// completion. In other words, the handler must be async cancellation safe! request_span
53 : /// prints a warning to the log when that happens, so that you have some trace of it in
54 : /// the log.
55 : ///
56 : ///
57 : /// There could be other ways to implement similar functionality:
58 : ///
59 : /// * procmacros placed on top of all handler methods
60 : /// With all the drawbacks of procmacros, brings no difference implementation-wise,
61 : /// and little code reduction compared to the existing approach.
62 : ///
63 : /// * Another `TraitExt` with e.g. the `get_with_span`, `post_with_span` methods to do similar logic,
64 : /// implemented for [`RouterBuilder`].
65 : /// Could be simpler, but we don't want to depend on [`routerify`] more, targeting to use other library later.
66 : ///
67 : /// * In theory, a span guard could've been created in a pre-request middleware and placed into a global collection, to be dropped
68 : /// later, in a post-response middleware.
69 : /// Due to suspendable nature of the futures, would give contradictive results which is exactly the opposite of what `tracing-futures`
70 : /// tries to achive with its `.instrument` used in the current approach.
71 : ///
72 : /// If needed, a declarative macro to substitute the |r| ... closure boilerplate could be introduced.
73 0 : pub async fn request_span<R, H>(request: Request<Body>, handler: H) -> R::Output
74 0 : where
75 0 : R: Future<Output = Result<Response<Body>, ApiError>> + Send + 'static,
76 0 : H: FnOnce(Request<Body>) -> R + Send + Sync + 'static,
77 0 : {
78 0 : let request_id = request.context::<RequestId>().unwrap_or_default().0;
79 0 : let method = request.method();
80 0 : let path = request.uri().path();
81 0 : let request_span = info_span!("request", %method, %path, %request_id);
82 :
83 0 : let log_quietly = method == Method::GET;
84 0 : async move {
85 0 : let cancellation_guard = RequestCancelled::warn_when_dropped_without_responding();
86 0 : if log_quietly {
87 0 : debug!("Handling request");
88 : } else {
89 0 : info!("Handling request");
90 : }
91 :
92 : // No special handling for panics here. There's a `tracing_panic_hook` from another
93 : // module to do that globally.
94 0 : let res = handler(request).await;
95 :
96 0 : cancellation_guard.disarm();
97 0 :
98 0 : // Log the result if needed.
99 0 : //
100 0 : // We also convert any errors into an Ok response with HTTP error code here.
101 0 : // `make_router` sets a last-resort error handler that would do the same, but
102 0 : // we prefer to do it here, before we exit the request span, so that the error
103 0 : // is still logged with the span.
104 0 : //
105 0 : // (Because we convert errors to Ok response, we never actually return an error,
106 0 : // and we could declare the function to return the never type (`!`). However,
107 0 : // using `routerify::RouterBuilder` requires a proper error type.)
108 0 : match res {
109 0 : Ok(response) => {
110 0 : let response_status = response.status();
111 0 : if log_quietly && response_status.is_success() {
112 0 : debug!("Request handled, status: {response_status}");
113 : } else {
114 0 : info!("Request handled, status: {response_status}");
115 : }
116 0 : Ok(response)
117 : }
118 0 : Err(err) => Ok(api_error_handler(err)),
119 : }
120 0 : }
121 0 : .instrument(request_span)
122 0 : .await
123 0 : }
124 :
125 : /// Drop guard to WARN in case the request was dropped before completion.
126 : struct RequestCancelled {
127 : warn: Option<tracing::Span>,
128 : }
129 :
130 : impl RequestCancelled {
131 : /// Create the drop guard using the [`tracing::Span::current`] as the span.
132 0 : fn warn_when_dropped_without_responding() -> Self {
133 0 : RequestCancelled {
134 0 : warn: Some(tracing::Span::current()),
135 0 : }
136 0 : }
137 :
138 : /// Consume the drop guard without logging anything.
139 0 : fn disarm(mut self) {
140 0 : self.warn = None;
141 0 : }
142 : }
143 :
144 : impl Drop for RequestCancelled {
145 0 : fn drop(&mut self) {
146 0 : if std::thread::panicking() {
147 0 : // we are unwinding due to panicking, assume we are not dropped for cancellation
148 0 : } else if let Some(span) = self.warn.take() {
149 : // the span has all of the info already, but the outer `.instrument(span)` has already
150 : // been dropped, so we need to manually re-enter it for this message.
151 : //
152 : // this is what the instrument would do before polling so it is fine.
153 0 : let _g = span.entered();
154 0 : warn!("request was dropped before completing");
155 0 : }
156 0 : }
157 : }
158 :
159 : /// An [`std::io::Write`] implementation on top of a channel sending [`bytes::Bytes`] chunks.
160 : pub struct ChannelWriter {
161 : buffer: BytesMut,
162 : pub tx: mpsc::Sender<std::io::Result<Bytes>>,
163 : written: usize,
164 : /// Time spent waiting for the channel to make progress. It is not the same as time to upload a
165 : /// buffer because we cannot know anything about that, but this should allow us to understand
166 : /// the actual time taken without the time spent `std::thread::park`ed.
167 : wait_time: std::time::Duration,
168 : }
169 :
170 : impl ChannelWriter {
171 0 : pub fn new(buf_len: usize, tx: mpsc::Sender<std::io::Result<Bytes>>) -> Self {
172 0 : assert_ne!(buf_len, 0);
173 0 : ChannelWriter {
174 0 : // split about half off the buffer from the start, because we flush depending on
175 0 : // capacity. first flush will come sooner than without this, but now resizes will
176 0 : // have better chance of picking up the "other" half. not guaranteed of course.
177 0 : buffer: BytesMut::with_capacity(buf_len).split_off(buf_len / 2),
178 0 : tx,
179 0 : written: 0,
180 0 : wait_time: std::time::Duration::ZERO,
181 0 : }
182 0 : }
183 :
184 0 : pub fn flush0(&mut self) -> std::io::Result<usize> {
185 0 : let n = self.buffer.len();
186 0 : if n == 0 {
187 0 : return Ok(0);
188 0 : }
189 0 :
190 0 : tracing::trace!(n, "flushing");
191 0 : let ready = self.buffer.split().freeze();
192 0 :
193 0 : let wait_started_at = std::time::Instant::now();
194 0 :
195 0 : // not ideal to call from blocking code to block_on, but we are sure that this
196 0 : // operation does not spawn_blocking other tasks
197 0 : let res: Result<(), ()> = tokio::runtime::Handle::current().block_on(async {
198 0 : self.tx.send(Ok(ready)).await.map_err(|_| ())?;
199 :
200 : // throttle sending to allow reuse of our buffer in `write`.
201 0 : self.tx.reserve().await.map_err(|_| ())?;
202 :
203 : // now the response task has picked up the buffer and hopefully started
204 : // sending it to the client.
205 0 : Ok(())
206 0 : });
207 0 :
208 0 : self.wait_time += wait_started_at.elapsed();
209 0 :
210 0 : if res.is_err() {
211 0 : return Err(std::io::ErrorKind::BrokenPipe.into());
212 0 : }
213 0 : self.written += n;
214 0 : Ok(n)
215 0 : }
216 :
217 0 : pub fn flushed_bytes(&self) -> usize {
218 0 : self.written
219 0 : }
220 :
221 0 : pub fn wait_time(&self) -> std::time::Duration {
222 0 : self.wait_time
223 0 : }
224 : }
225 :
226 : impl std::io::Write for ChannelWriter {
227 0 : fn write(&mut self, mut buf: &[u8]) -> std::io::Result<usize> {
228 0 : let remaining = self.buffer.capacity() - self.buffer.len();
229 0 :
230 0 : let out_of_space = remaining < buf.len();
231 0 :
232 0 : let original_len = buf.len();
233 0 :
234 0 : if out_of_space {
235 0 : let can_still_fit = buf.len() - remaining;
236 0 : self.buffer.extend_from_slice(&buf[..can_still_fit]);
237 0 : buf = &buf[can_still_fit..];
238 0 : self.flush0()?;
239 0 : }
240 :
241 : // assume that this will often under normal operation just move the pointer back to the
242 : // beginning of allocation, because previous split off parts are already sent and
243 : // dropped.
244 0 : self.buffer.extend_from_slice(buf);
245 0 : Ok(original_len)
246 0 : }
247 :
248 0 : fn flush(&mut self) -> std::io::Result<()> {
249 0 : self.flush0().map(|_| ())
250 0 : }
251 : }
252 :
253 0 : pub async fn prometheus_metrics_handler(_req: Request<Body>) -> Result<Response<Body>, ApiError> {
254 0 : SERVE_METRICS_COUNT.inc();
255 0 :
256 0 : let started_at = std::time::Instant::now();
257 0 :
258 0 : let (tx, rx) = mpsc::channel(1);
259 0 :
260 0 : let body = Body::wrap_stream(ReceiverStream::new(rx));
261 0 :
262 0 : let mut writer = ChannelWriter::new(128 * 1024, tx);
263 0 :
264 0 : let encoder = TextEncoder::new();
265 0 :
266 0 : let response = Response::builder()
267 0 : .status(200)
268 0 : .header(CONTENT_TYPE, encoder.format_type())
269 0 : .body(body)
270 0 : .unwrap();
271 :
272 0 : let span = info_span!("blocking");
273 0 : tokio::task::spawn_blocking(move || {
274 0 : // there are situations where we lose scraped metrics under load, try to gather some clues
275 0 : // since all nodes are queried this, keep the message count low.
276 0 : let spawned_at = std::time::Instant::now();
277 0 :
278 0 : let _span = span.entered();
279 0 :
280 0 : let metrics = metrics::gather();
281 0 :
282 0 : let gathered_at = std::time::Instant::now();
283 0 :
284 0 : let res = encoder
285 0 : .encode(&metrics, &mut writer)
286 0 : .and_then(|_| writer.flush().map_err(|e| e.into()));
287 0 :
288 0 : // this instant is not when we finally got the full response sent, sending is done by hyper
289 0 : // in another task.
290 0 : let encoded_at = std::time::Instant::now();
291 0 :
292 0 : let spawned_in = spawned_at - started_at;
293 0 : let collected_in = gathered_at - spawned_at;
294 0 : // remove the wait time here in case the tcp connection was clogged
295 0 : let encoded_in = encoded_at - gathered_at - writer.wait_time();
296 0 : let total = encoded_at - started_at;
297 0 :
298 0 : match res {
299 : Ok(()) => {
300 0 : tracing::info!(
301 0 : bytes = writer.flushed_bytes(),
302 0 : total_ms = total.as_millis(),
303 0 : spawning_ms = spawned_in.as_millis(),
304 0 : collection_ms = collected_in.as_millis(),
305 0 : encoding_ms = encoded_in.as_millis(),
306 0 : "responded /metrics"
307 : );
308 : }
309 0 : Err(e) => {
310 0 : // there is a chance that this error is not the BrokenPipe we generate in the writer
311 0 : // for "closed connection", but it is highly unlikely.
312 0 : tracing::warn!(
313 0 : after_bytes = writer.flushed_bytes(),
314 0 : total_ms = total.as_millis(),
315 0 : spawning_ms = spawned_in.as_millis(),
316 0 : collection_ms = collected_in.as_millis(),
317 0 : encoding_ms = encoded_in.as_millis(),
318 0 : "failed to write out /metrics response: {e:?}"
319 : );
320 : // semantics of this error are quite... unclear. we want to error the stream out to
321 : // abort the response to somehow notify the client that we failed.
322 : //
323 : // though, most likely the reason for failure is that the receiver is already gone.
324 0 : drop(
325 0 : writer
326 0 : .tx
327 0 : .blocking_send(Err(std::io::ErrorKind::BrokenPipe.into())),
328 0 : );
329 : }
330 : }
331 0 : });
332 0 :
333 0 : Ok(response)
334 0 : }
335 :
336 : /// Generates CPU profiles.
337 0 : pub async fn profile_cpu_handler(req: Request<Body>) -> Result<Response<Body>, ApiError> {
338 : enum Format {
339 : Pprof,
340 : Svg,
341 : }
342 :
343 : // Parameters.
344 0 : let format = match get_query_param(&req, "format")?.as_deref() {
345 0 : None => Format::Pprof,
346 0 : Some("pprof") => Format::Pprof,
347 0 : Some("svg") => Format::Svg,
348 0 : Some(format) => return Err(ApiError::BadRequest(anyhow!("invalid format {format}"))),
349 : };
350 0 : let seconds = match parse_query_param(&req, "seconds")? {
351 0 : None => 5,
352 0 : Some(seconds @ 1..=60) => seconds,
353 0 : Some(_) => return Err(ApiError::BadRequest(anyhow!("duration must be 1-60 secs"))),
354 : };
355 0 : let frequency_hz = match parse_query_param(&req, "frequency")? {
356 0 : None => 99,
357 0 : Some(1001..) => return Err(ApiError::BadRequest(anyhow!("frequency must be <=1000 Hz"))),
358 0 : Some(frequency) => frequency,
359 : };
360 0 : let force: bool = parse_query_param(&req, "force")?.unwrap_or_default();
361 :
362 : // Take the profile.
363 0 : static PROFILE_LOCK: Lazy<Mutex<()>> = Lazy::new(|| Mutex::new(()));
364 : static PROFILE_CANCEL: Lazy<Notify> = Lazy::new(Notify::new);
365 :
366 0 : let report = {
367 : // Only allow one profiler at a time. If force is true, cancel a running profile (e.g. a
368 : // Grafana continuous profile). We use a try_lock() loop when cancelling instead of waiting
369 : // for a lock(), to avoid races where the notify isn't currently awaited.
370 0 : let _lock = loop {
371 0 : match PROFILE_LOCK.try_lock() {
372 0 : Ok(lock) => break lock,
373 0 : Err(_) if force => PROFILE_CANCEL.notify_waiters(),
374 : Err(_) => {
375 0 : return Err(ApiError::Conflict(
376 0 : "profiler already running (use ?force=true to cancel it)".into(),
377 0 : ));
378 : }
379 : }
380 0 : tokio::time::sleep(Duration::from_millis(1)).await; // don't busy-wait
381 : };
382 :
383 0 : let guard = ProfilerGuardBuilder::default()
384 0 : .frequency(frequency_hz)
385 0 : .blocklist(&["libc", "libgcc", "pthread", "vdso"])
386 0 : .build()
387 0 : .map_err(|err| ApiError::InternalServerError(err.into()))?;
388 :
389 0 : tokio::select! {
390 0 : _ = tokio::time::sleep(Duration::from_secs(seconds)) => {},
391 0 : _ = PROFILE_CANCEL.notified() => {},
392 : };
393 :
394 0 : guard
395 0 : .report()
396 0 : .build()
397 0 : .map_err(|err| ApiError::InternalServerError(err.into()))?
398 : };
399 :
400 : // Return the report in the requested format.
401 0 : match format {
402 : Format::Pprof => {
403 0 : let body = report
404 0 : .pprof()
405 0 : .map_err(|err| ApiError::InternalServerError(err.into()))?
406 0 : .encode_to_vec();
407 0 :
408 0 : Response::builder()
409 0 : .status(200)
410 0 : .header(CONTENT_TYPE, "application/octet-stream")
411 0 : .header(CONTENT_DISPOSITION, "attachment; filename=\"profile.pb\"")
412 0 : .body(Body::from(body))
413 0 : .map_err(|err| ApiError::InternalServerError(err.into()))
414 : }
415 :
416 : Format::Svg => {
417 0 : let mut body = Vec::new();
418 0 : report
419 0 : .flamegraph(&mut body)
420 0 : .map_err(|err| ApiError::InternalServerError(err.into()))?;
421 0 : Response::builder()
422 0 : .status(200)
423 0 : .header(CONTENT_TYPE, "image/svg+xml")
424 0 : .body(Body::from(body))
425 0 : .map_err(|err| ApiError::InternalServerError(err.into()))
426 : }
427 : }
428 0 : }
429 :
430 : /// Generates heap profiles.
431 : ///
432 : /// This only works with jemalloc on Linux.
433 0 : pub async fn profile_heap_handler(req: Request<Body>) -> Result<Response<Body>, ApiError> {
434 : enum Format {
435 : Jemalloc,
436 : Pprof,
437 : Svg,
438 : }
439 :
440 : // Parameters.
441 0 : let format = match get_query_param(&req, "format")?.as_deref() {
442 0 : None => Format::Pprof,
443 0 : Some("jemalloc") => Format::Jemalloc,
444 0 : Some("pprof") => Format::Pprof,
445 0 : Some("svg") => Format::Svg,
446 0 : Some(format) => return Err(ApiError::BadRequest(anyhow!("invalid format {format}"))),
447 : };
448 :
449 : // Obtain profiler handle.
450 0 : let mut prof_ctl = jemalloc_pprof::PROF_CTL
451 0 : .as_ref()
452 0 : .ok_or(ApiError::InternalServerError(anyhow!(
453 0 : "heap profiling not enabled"
454 0 : )))?
455 0 : .lock()
456 0 : .await;
457 0 : if !prof_ctl.activated() {
458 0 : return Err(ApiError::InternalServerError(anyhow!(
459 0 : "heap profiling not enabled"
460 0 : )));
461 0 : }
462 0 :
463 0 : // Take and return the profile.
464 0 : match format {
465 : Format::Jemalloc => {
466 : // NB: file is an open handle to a tempfile that's already deleted.
467 0 : let file = tokio::task::spawn_blocking(move || prof_ctl.dump())
468 0 : .await
469 0 : .map_err(|join_err| ApiError::InternalServerError(join_err.into()))?
470 0 : .map_err(ApiError::InternalServerError)?;
471 0 : let stream = ReaderStream::new(tokio::fs::File::from_std(file));
472 0 : Response::builder()
473 0 : .status(200)
474 0 : .header(CONTENT_TYPE, "application/octet-stream")
475 0 : .header(CONTENT_DISPOSITION, "attachment; filename=\"heap.dump\"")
476 0 : .body(Body::wrap_stream(stream))
477 0 : .map_err(|err| ApiError::InternalServerError(err.into()))
478 : }
479 :
480 : Format::Pprof => {
481 0 : let data = tokio::task::spawn_blocking(move || prof_ctl.dump_pprof())
482 0 : .await
483 0 : .map_err(|join_err| ApiError::InternalServerError(join_err.into()))?
484 0 : .map_err(ApiError::InternalServerError)?;
485 0 : Response::builder()
486 0 : .status(200)
487 0 : .header(CONTENT_TYPE, "application/octet-stream")
488 0 : .header(CONTENT_DISPOSITION, "attachment; filename=\"heap.pb.gz\"")
489 0 : .body(Body::from(data))
490 0 : .map_err(|err| ApiError::InternalServerError(err.into()))
491 : }
492 :
493 : Format::Svg => {
494 0 : let svg = tokio::task::spawn_blocking(move || prof_ctl.dump_flamegraph())
495 0 : .await
496 0 : .map_err(|join_err| ApiError::InternalServerError(join_err.into()))?
497 0 : .map_err(ApiError::InternalServerError)?;
498 0 : Response::builder()
499 0 : .status(200)
500 0 : .header(CONTENT_TYPE, "image/svg+xml")
501 0 : .body(Body::from(svg))
502 0 : .map_err(|err| ApiError::InternalServerError(err.into()))
503 : }
504 : }
505 0 : }
506 :
507 2 : pub fn add_request_id_middleware<B: hyper::body::HttpBody + Send + Sync + 'static>()
508 2 : -> Middleware<B, ApiError> {
509 2 : Middleware::pre(move |req| async move {
510 2 : let request_id = match req.headers().get(&X_REQUEST_ID_HEADER) {
511 1 : Some(request_id) => request_id
512 1 : .to_str()
513 1 : .expect("extract request id value")
514 1 : .to_owned(),
515 : None => {
516 1 : let request_id = uuid::Uuid::new_v4();
517 1 : request_id.to_string()
518 : }
519 : };
520 2 : req.set_context(RequestId(request_id));
521 2 :
522 2 : Ok(req)
523 2 : })
524 2 : }
525 :
526 2 : async fn add_request_id_header_to_response(
527 2 : mut res: Response<Body>,
528 2 : req_info: RequestInfo,
529 2 : ) -> Result<Response<Body>, ApiError> {
530 2 : if let Some(request_id) = req_info.context::<RequestId>() {
531 2 : if let Ok(request_header_value) = HeaderValue::from_str(&request_id.0) {
532 2 : res.headers_mut()
533 2 : .insert(&X_REQUEST_ID_HEADER, request_header_value);
534 2 : };
535 0 : };
536 :
537 2 : Ok(res)
538 2 : }
539 :
540 2 : pub fn make_router() -> RouterBuilder<hyper::Body, ApiError> {
541 2 : Router::builder()
542 2 : .middleware(add_request_id_middleware())
543 2 : .middleware(Middleware::post_with_info(
544 2 : add_request_id_header_to_response,
545 2 : ))
546 2 : .err_handler(route_error_handler)
547 2 : }
548 :
549 0 : pub fn attach_openapi_ui(
550 0 : router_builder: RouterBuilder<hyper::Body, ApiError>,
551 0 : spec: &'static [u8],
552 0 : spec_mount_path: &'static str,
553 0 : ui_mount_path: &'static str,
554 0 : ) -> RouterBuilder<hyper::Body, ApiError> {
555 0 : router_builder
556 0 : .get(spec_mount_path,
557 0 : move |r| request_span(r, move |_| async move {
558 0 : Ok(Response::builder().body(Body::from(spec)).unwrap())
559 0 : })
560 0 : )
561 0 : .get(ui_mount_path,
562 0 : move |r| request_span(r, move |_| async move {
563 0 : Ok(Response::builder().body(Body::from(format!(r#"
564 0 : <!DOCTYPE html>
565 0 : <html lang="en">
566 0 : <head>
567 0 : <title>rweb</title>
568 0 : <link href="https://cdn.jsdelivr.net/npm/swagger-ui-dist@3/swagger-ui.css" rel="stylesheet">
569 0 : </head>
570 0 : <body>
571 0 : <div id="swagger-ui"></div>
572 0 : <script src="https://cdn.jsdelivr.net/npm/swagger-ui-dist@3/swagger-ui-bundle.js" charset="UTF-8"> </script>
573 0 : <script>
574 0 : window.onload = function() {{
575 0 : const ui = SwaggerUIBundle({{
576 0 : "dom_id": "\#swagger-ui",
577 0 : presets: [
578 0 : SwaggerUIBundle.presets.apis,
579 0 : SwaggerUIBundle.SwaggerUIStandalonePreset
580 0 : ],
581 0 : layout: "BaseLayout",
582 0 : deepLinking: true,
583 0 : showExtensions: true,
584 0 : showCommonExtensions: true,
585 0 : url: "{}",
586 0 : }})
587 0 : window.ui = ui;
588 0 : }};
589 0 : </script>
590 0 : </body>
591 0 : </html>
592 0 : "#, spec_mount_path))).unwrap())
593 0 : })
594 0 : )
595 0 : }
596 :
597 0 : fn parse_token(header_value: &str) -> Result<&str, ApiError> {
598 : // header must be in form Bearer <token>
599 0 : let (prefix, token) = header_value
600 0 : .split_once(' ')
601 0 : .ok_or_else(|| ApiError::Unauthorized("malformed authorization header".to_string()))?;
602 0 : if prefix != "Bearer" {
603 0 : return Err(ApiError::Unauthorized(
604 0 : "malformed authorization header".to_string(),
605 0 : ));
606 0 : }
607 0 : Ok(token)
608 0 : }
609 :
610 0 : pub fn auth_middleware<B: hyper::body::HttpBody + Send + Sync + 'static>(
611 0 : provide_auth: fn(&Request<Body>) -> Option<&SwappableJwtAuth>,
612 0 : ) -> Middleware<B, ApiError> {
613 0 : Middleware::pre(move |req| async move {
614 0 : if let Some(auth) = provide_auth(&req) {
615 0 : match req.headers().get(AUTHORIZATION) {
616 0 : Some(value) => {
617 0 : let header_value = value.to_str().map_err(|_| {
618 0 : ApiError::Unauthorized("malformed authorization header".to_string())
619 0 : })?;
620 0 : let token = parse_token(header_value)?;
621 :
622 0 : let data: TokenData<Claims> = auth.decode(token).map_err(|err| {
623 0 : warn!("Authentication error: {err}");
624 : // Rely on From<AuthError> for ApiError impl
625 0 : err
626 0 : })?;
627 0 : req.set_context(data.claims);
628 : }
629 : None => {
630 0 : return Err(ApiError::Unauthorized(
631 0 : "missing authorization header".to_string(),
632 0 : ));
633 : }
634 : }
635 0 : }
636 0 : Ok(req)
637 0 : })
638 0 : }
639 :
640 0 : pub fn add_response_header_middleware<B>(
641 0 : header: &str,
642 0 : value: &str,
643 0 : ) -> anyhow::Result<Middleware<B, ApiError>>
644 0 : where
645 0 : B: hyper::body::HttpBody + Send + Sync + 'static,
646 0 : {
647 0 : let name =
648 0 : HeaderName::from_str(header).with_context(|| format!("invalid header name: {header}"))?;
649 0 : let value =
650 0 : HeaderValue::from_str(value).with_context(|| format!("invalid header value: {value}"))?;
651 0 : Ok(Middleware::post_with_info(
652 0 : move |mut response, request_info| {
653 0 : let name = name.clone();
654 0 : let value = value.clone();
655 0 : async move {
656 0 : let headers = response.headers_mut();
657 0 : if headers.contains_key(&name) {
658 0 : warn!(
659 0 : "{} response already contains header {:?}",
660 0 : request_info.uri(),
661 0 : &name,
662 : );
663 0 : } else {
664 0 : headers.insert(name, value);
665 0 : }
666 0 : Ok(response)
667 0 : }
668 0 : },
669 0 : ))
670 0 : }
671 :
672 0 : pub fn check_permission_with(
673 0 : req: &Request<Body>,
674 0 : check_permission: impl Fn(&Claims) -> Result<(), AuthError>,
675 0 : ) -> Result<(), ApiError> {
676 0 : match req.context::<Claims>() {
677 0 : Some(claims) => Ok(check_permission(&claims)
678 0 : .map_err(|_err| ApiError::Forbidden("JWT authentication error".to_string()))?),
679 0 : None => Ok(()), // claims is None because auth is disabled
680 : }
681 0 : }
682 :
683 : #[cfg(test)]
684 : mod tests {
685 : use std::future::poll_fn;
686 : use std::net::{IpAddr, SocketAddr};
687 :
688 : use hyper::service::Service;
689 : use routerify::RequestServiceBuilder;
690 :
691 : use super::*;
692 :
693 : #[tokio::test]
694 1 : async fn test_request_id_returned() {
695 1 : let builder = RequestServiceBuilder::new(make_router().build().unwrap()).unwrap();
696 1 : let remote_addr = SocketAddr::new(IpAddr::from_str("127.0.0.1").unwrap(), 80);
697 1 : let mut service = builder.build(remote_addr);
698 1 : if let Err(e) = poll_fn(|ctx| service.poll_ready(ctx)).await {
699 1 : panic!("request service is not ready: {:?}", e);
700 1 : }
701 1 :
702 1 : let mut req: Request<Body> = Request::default();
703 1 : req.headers_mut()
704 1 : .append(&X_REQUEST_ID_HEADER, HeaderValue::from_str("42").unwrap());
705 1 :
706 1 : let resp: Response<hyper::body::Body> = service.call(req).await.unwrap();
707 1 :
708 1 : let header_val = resp.headers().get(&X_REQUEST_ID_HEADER).unwrap();
709 1 :
710 1 : assert!(header_val == "42", "response header mismatch");
711 1 : }
712 :
713 : #[tokio::test]
714 1 : async fn test_request_id_empty() {
715 1 : let builder = RequestServiceBuilder::new(make_router().build().unwrap()).unwrap();
716 1 : let remote_addr = SocketAddr::new(IpAddr::from_str("127.0.0.1").unwrap(), 80);
717 1 : let mut service = builder.build(remote_addr);
718 1 : if let Err(e) = poll_fn(|ctx| service.poll_ready(ctx)).await {
719 1 : panic!("request service is not ready: {:?}", e);
720 1 : }
721 1 :
722 1 : let req: Request<Body> = Request::default();
723 1 : let resp: Response<hyper::body::Body> = service.call(req).await.unwrap();
724 1 :
725 1 : let header_val = resp.headers().get(&X_REQUEST_ID_HEADER);
726 1 :
727 1 : assert_ne!(header_val, None, "response header should NOT be empty");
728 1 : }
729 : }
|