LCOV - code coverage report
Current view: top level - storage_controller/src - http.rs (source / functions) Coverage Total Hit
Test: b837401fb09d2d9818b70e630fdb67e9799b7b0d.info Lines: 0.0 % 679 0
Test Date: 2024-04-18 15:32:49 Functions: 0.0 % 258 0

            Line data    Source code
       1              : use crate::metrics::{
       2              :     HttpRequestLatencyLabelGroup, HttpRequestStatusLabelGroup, PageserverRequestLabelGroup,
       3              :     METRICS_REGISTRY,
       4              : };
       5              : use crate::reconciler::ReconcileError;
       6              : use crate::service::{Service, STARTUP_RECONCILE_TIMEOUT};
       7              : use futures::Future;
       8              : use hyper::header::CONTENT_TYPE;
       9              : use hyper::{Body, Request, Response};
      10              : use hyper::{StatusCode, Uri};
      11              : use metrics::{BuildInfo, NeonMetrics};
      12              : use pageserver_api::models::{
      13              :     TenantConfigRequest, TenantCreateRequest, TenantLocationConfigRequest, TenantShardSplitRequest,
      14              :     TenantTimeTravelRequest, TimelineCreateRequest,
      15              : };
      16              : use pageserver_api::shard::TenantShardId;
      17              : use pageserver_client::mgmt_api;
      18              : use std::sync::Arc;
      19              : use std::time::{Duration, Instant};
      20              : use tokio_util::sync::CancellationToken;
      21              : use utils::auth::{Scope, SwappableJwtAuth};
      22              : use utils::failpoint_support::failpoints_handler;
      23              : use utils::http::endpoint::{auth_middleware, check_permission_with, request_span};
      24              : use utils::http::request::{must_get_query_param, parse_query_param, parse_request_param};
      25              : use utils::id::{TenantId, TimelineId};
      26              : 
      27              : use utils::{
      28              :     http::{
      29              :         endpoint::{self},
      30              :         error::ApiError,
      31              :         json::{json_request, json_response},
      32              :         RequestExt, RouterBuilder,
      33              :     },
      34              :     id::NodeId,
      35              : };
      36              : 
      37              : use pageserver_api::controller_api::{
      38              :     NodeAvailability, NodeConfigureRequest, NodeRegisterRequest, TenantPolicyRequest,
      39              :     TenantShardMigrateRequest,
      40              : };
      41              : use pageserver_api::upcall_api::{ReAttachRequest, ValidateRequest};
      42              : 
      43              : use control_plane::storage_controller::{AttachHookRequest, InspectRequest};
      44              : 
      45              : use routerify::Middleware;
      46              : 
      47              : /// State available to HTTP request handlers
      48              : pub struct HttpState {
      49              :     service: Arc<crate::service::Service>,
      50              :     auth: Option<Arc<SwappableJwtAuth>>,
      51              :     neon_metrics: NeonMetrics,
      52              :     allowlist_routes: Vec<Uri>,
      53              : }
      54              : 
      55              : impl HttpState {
      56            0 :     pub fn new(
      57            0 :         service: Arc<crate::service::Service>,
      58            0 :         auth: Option<Arc<SwappableJwtAuth>>,
      59            0 :         build_info: BuildInfo,
      60            0 :     ) -> Self {
      61            0 :         let allowlist_routes = ["/status", "/ready", "/metrics"]
      62            0 :             .iter()
      63            0 :             .map(|v| v.parse().unwrap())
      64            0 :             .collect::<Vec<_>>();
      65            0 :         Self {
      66            0 :             service,
      67            0 :             auth,
      68            0 :             neon_metrics: NeonMetrics::new(build_info),
      69            0 :             allowlist_routes,
      70            0 :         }
      71            0 :     }
      72              : }
      73              : 
      74              : #[inline(always)]
      75            0 : fn get_state(request: &Request<Body>) -> &HttpState {
      76            0 :     request
      77            0 :         .data::<Arc<HttpState>>()
      78            0 :         .expect("unknown state type")
      79            0 :         .as_ref()
      80            0 : }
      81              : 
      82              : /// Pageserver calls into this on startup, to learn which tenants it should attach
      83            0 : async fn handle_re_attach(mut req: Request<Body>) -> Result<Response<Body>, ApiError> {
      84            0 :     check_permissions(&req, Scope::GenerationsApi)?;
      85              : 
      86            0 :     let reattach_req = json_request::<ReAttachRequest>(&mut req).await?;
      87            0 :     let state = get_state(&req);
      88            0 :     json_response(StatusCode::OK, state.service.re_attach(reattach_req).await?)
      89            0 : }
      90              : 
      91              : /// Pageserver calls into this before doing deletions, to confirm that it still
      92              : /// holds the latest generation for the tenants with deletions enqueued
      93            0 : async fn handle_validate(mut req: Request<Body>) -> Result<Response<Body>, ApiError> {
      94            0 :     check_permissions(&req, Scope::GenerationsApi)?;
      95              : 
      96            0 :     let validate_req = json_request::<ValidateRequest>(&mut req).await?;
      97            0 :     let state = get_state(&req);
      98            0 :     json_response(StatusCode::OK, state.service.validate(validate_req))
      99            0 : }
     100              : 
     101              : /// Call into this before attaching a tenant to a pageserver, to acquire a generation number
     102              : /// (in the real control plane this is unnecessary, because the same program is managing
     103              : ///  generation numbers and doing attachments).
     104            0 : async fn handle_attach_hook(mut req: Request<Body>) -> Result<Response<Body>, ApiError> {
     105            0 :     check_permissions(&req, Scope::Admin)?;
     106              : 
     107            0 :     let attach_req = json_request::<AttachHookRequest>(&mut req).await?;
     108            0 :     let state = get_state(&req);
     109            0 : 
     110            0 :     json_response(
     111            0 :         StatusCode::OK,
     112            0 :         state
     113            0 :             .service
     114            0 :             .attach_hook(attach_req)
     115            0 :             .await
     116            0 :             .map_err(ApiError::InternalServerError)?,
     117              :     )
     118            0 : }
     119              : 
     120            0 : async fn handle_inspect(mut req: Request<Body>) -> Result<Response<Body>, ApiError> {
     121            0 :     check_permissions(&req, Scope::Admin)?;
     122              : 
     123            0 :     let inspect_req = json_request::<InspectRequest>(&mut req).await?;
     124              : 
     125            0 :     let state = get_state(&req);
     126            0 : 
     127            0 :     json_response(StatusCode::OK, state.service.inspect(inspect_req))
     128            0 : }
     129              : 
     130            0 : async fn handle_tenant_create(
     131            0 :     service: Arc<Service>,
     132            0 :     mut req: Request<Body>,
     133            0 : ) -> Result<Response<Body>, ApiError> {
     134            0 :     check_permissions(&req, Scope::PageServerApi)?;
     135              : 
     136            0 :     let create_req = json_request::<TenantCreateRequest>(&mut req).await?;
     137              : 
     138              :     json_response(
     139              :         StatusCode::CREATED,
     140            0 :         service.tenant_create(create_req).await?,
     141              :     )
     142            0 : }
     143              : 
     144              : // For tenant and timeline deletions, which both implement an "initially return 202, then 404 once
     145              : // we're done" semantic, we wrap with a retry loop to expose a simpler API upstream.  This avoids
     146              : // needing to track a "deleting" state for tenants.
     147            0 : async fn deletion_wrapper<R, F>(service: Arc<Service>, f: F) -> Result<Response<Body>, ApiError>
     148            0 : where
     149            0 :     R: std::future::Future<Output = Result<StatusCode, ApiError>> + Send + 'static,
     150            0 :     F: Fn(Arc<Service>) -> R + Send + Sync + 'static,
     151            0 : {
     152            0 :     let started_at = Instant::now();
     153            0 :     // To keep deletion reasonably snappy for small tenants, initially check after 1 second if deletion
     154            0 :     // completed.
     155            0 :     let mut retry_period = Duration::from_secs(1);
     156            0 :     // On subsequent retries, wait longer.
     157            0 :     let max_retry_period = Duration::from_secs(5);
     158            0 :     // Enable callers with a 30 second request timeout to reliably get a response
     159            0 :     let max_wait = Duration::from_secs(25);
     160              : 
     161              :     loop {
     162            0 :         let status = f(service.clone()).await?;
     163            0 :         match status {
     164              :             StatusCode::ACCEPTED => {
     165            0 :                 tracing::info!("Deletion accepted, waiting to try again...");
     166            0 :                 tokio::time::sleep(retry_period).await;
     167            0 :                 retry_period = max_retry_period;
     168              :             }
     169              :             StatusCode::NOT_FOUND => {
     170            0 :                 tracing::info!("Deletion complete");
     171            0 :                 return json_response(StatusCode::OK, ());
     172              :             }
     173              :             _ => {
     174            0 :                 tracing::warn!("Unexpected status {status}");
     175            0 :                 return json_response(status, ());
     176              :             }
     177              :         }
     178              : 
     179            0 :         let now = Instant::now();
     180            0 :         if now + retry_period > started_at + max_wait {
     181            0 :             tracing::info!("Deletion timed out waiting for 404");
     182              :             // REQUEST_TIMEOUT would be more appropriate, but CONFLICT is already part of
     183              :             // the pageserver's swagger definition for this endpoint, and has the same desired
     184              :             // effect of causing the control plane to retry later.
     185            0 :             return json_response(StatusCode::CONFLICT, ());
     186            0 :         }
     187              :     }
     188            0 : }
     189              : 
     190            0 : async fn handle_tenant_location_config(
     191            0 :     service: Arc<Service>,
     192            0 :     mut req: Request<Body>,
     193            0 : ) -> Result<Response<Body>, ApiError> {
     194            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&req, "tenant_shard_id")?;
     195            0 :     check_permissions(&req, Scope::PageServerApi)?;
     196              : 
     197            0 :     let config_req = json_request::<TenantLocationConfigRequest>(&mut req).await?;
     198              :     json_response(
     199              :         StatusCode::OK,
     200            0 :         service
     201            0 :             .tenant_location_config(tenant_shard_id, config_req)
     202            0 :             .await?,
     203              :     )
     204            0 : }
     205              : 
     206            0 : async fn handle_tenant_config_set(
     207            0 :     service: Arc<Service>,
     208            0 :     mut req: Request<Body>,
     209            0 : ) -> Result<Response<Body>, ApiError> {
     210            0 :     check_permissions(&req, Scope::PageServerApi)?;
     211              : 
     212            0 :     let config_req = json_request::<TenantConfigRequest>(&mut req).await?;
     213              : 
     214            0 :     json_response(StatusCode::OK, service.tenant_config_set(config_req).await?)
     215            0 : }
     216              : 
     217            0 : async fn handle_tenant_config_get(
     218            0 :     service: Arc<Service>,
     219            0 :     req: Request<Body>,
     220            0 : ) -> Result<Response<Body>, ApiError> {
     221            0 :     let tenant_id: TenantId = parse_request_param(&req, "tenant_id")?;
     222            0 :     check_permissions(&req, Scope::PageServerApi)?;
     223              : 
     224            0 :     json_response(StatusCode::OK, service.tenant_config_get(tenant_id)?)
     225            0 : }
     226              : 
     227            0 : async fn handle_tenant_time_travel_remote_storage(
     228            0 :     service: Arc<Service>,
     229            0 :     mut req: Request<Body>,
     230            0 : ) -> Result<Response<Body>, ApiError> {
     231            0 :     let tenant_id: TenantId = parse_request_param(&req, "tenant_id")?;
     232            0 :     check_permissions(&req, Scope::PageServerApi)?;
     233              : 
     234            0 :     let time_travel_req = json_request::<TenantTimeTravelRequest>(&mut req).await?;
     235              : 
     236            0 :     let timestamp_raw = must_get_query_param(&req, "travel_to")?;
     237            0 :     let _timestamp = humantime::parse_rfc3339(&timestamp_raw).map_err(|_e| {
     238            0 :         ApiError::BadRequest(anyhow::anyhow!(
     239            0 :             "Invalid time for travel_to: {timestamp_raw:?}"
     240            0 :         ))
     241            0 :     })?;
     242              : 
     243            0 :     let done_if_after_raw = must_get_query_param(&req, "done_if_after")?;
     244            0 :     let _done_if_after = humantime::parse_rfc3339(&done_if_after_raw).map_err(|_e| {
     245            0 :         ApiError::BadRequest(anyhow::anyhow!(
     246            0 :             "Invalid time for done_if_after: {done_if_after_raw:?}"
     247            0 :         ))
     248            0 :     })?;
     249              : 
     250            0 :     service
     251            0 :         .tenant_time_travel_remote_storage(
     252            0 :             &time_travel_req,
     253            0 :             tenant_id,
     254            0 :             timestamp_raw,
     255            0 :             done_if_after_raw,
     256            0 :         )
     257            0 :         .await?;
     258            0 :     json_response(StatusCode::OK, ())
     259            0 : }
     260              : 
     261            0 : async fn handle_tenant_secondary_download(
     262            0 :     service: Arc<Service>,
     263            0 :     req: Request<Body>,
     264            0 : ) -> Result<Response<Body>, ApiError> {
     265            0 :     let tenant_id: TenantId = parse_request_param(&req, "tenant_id")?;
     266            0 :     let wait = parse_query_param(&req, "wait_ms")?.map(Duration::from_millis);
     267              : 
     268            0 :     let (status, progress) = service.tenant_secondary_download(tenant_id, wait).await?;
     269            0 :     json_response(status, progress)
     270            0 : }
     271              : 
     272            0 : async fn handle_tenant_delete(
     273            0 :     service: Arc<Service>,
     274            0 :     req: Request<Body>,
     275            0 : ) -> Result<Response<Body>, ApiError> {
     276            0 :     let tenant_id: TenantId = parse_request_param(&req, "tenant_id")?;
     277            0 :     check_permissions(&req, Scope::PageServerApi)?;
     278              : 
     279            0 :     deletion_wrapper(service, move |service| async move {
     280            0 :         service.tenant_delete(tenant_id).await
     281            0 :     })
     282            0 :     .await
     283            0 : }
     284              : 
     285            0 : async fn handle_tenant_timeline_create(
     286            0 :     service: Arc<Service>,
     287            0 :     mut req: Request<Body>,
     288            0 : ) -> Result<Response<Body>, ApiError> {
     289            0 :     let tenant_id: TenantId = parse_request_param(&req, "tenant_id")?;
     290            0 :     check_permissions(&req, Scope::PageServerApi)?;
     291              : 
     292            0 :     let create_req = json_request::<TimelineCreateRequest>(&mut req).await?;
     293              :     json_response(
     294              :         StatusCode::CREATED,
     295            0 :         service
     296            0 :             .tenant_timeline_create(tenant_id, create_req)
     297            0 :             .await?,
     298              :     )
     299            0 : }
     300              : 
     301            0 : async fn handle_tenant_timeline_delete(
     302            0 :     service: Arc<Service>,
     303            0 :     req: Request<Body>,
     304            0 : ) -> Result<Response<Body>, ApiError> {
     305            0 :     let tenant_id: TenantId = parse_request_param(&req, "tenant_id")?;
     306            0 :     check_permissions(&req, Scope::PageServerApi)?;
     307              : 
     308            0 :     let timeline_id: TimelineId = parse_request_param(&req, "timeline_id")?;
     309              : 
     310            0 :     deletion_wrapper(service, move |service| async move {
     311            0 :         service.tenant_timeline_delete(tenant_id, timeline_id).await
     312            0 :     })
     313            0 :     .await
     314            0 : }
     315              : 
     316            0 : async fn handle_tenant_timeline_passthrough(
     317            0 :     service: Arc<Service>,
     318            0 :     req: Request<Body>,
     319            0 : ) -> Result<Response<Body>, ApiError> {
     320            0 :     let tenant_id: TenantId = parse_request_param(&req, "tenant_id")?;
     321            0 :     check_permissions(&req, Scope::PageServerApi)?;
     322              : 
     323            0 :     let Some(path) = req.uri().path_and_query() else {
     324              :         // This should never happen, our request router only calls us if there is a path
     325            0 :         return Err(ApiError::BadRequest(anyhow::anyhow!("Missing path")));
     326              :     };
     327              : 
     328            0 :     tracing::info!("Proxying request for tenant {} ({})", tenant_id, path);
     329              : 
     330              :     // Find the node that holds shard zero
     331            0 :     let (node, tenant_shard_id) = service.tenant_shard0_node(tenant_id)?;
     332              : 
     333              :     // Callers will always pass an unsharded tenant ID.  Before proxying, we must
     334              :     // rewrite this to a shard-aware shard zero ID.
     335            0 :     let path = format!("{}", path);
     336            0 :     let tenant_str = tenant_id.to_string();
     337            0 :     let tenant_shard_str = format!("{}", tenant_shard_id);
     338            0 :     let path = path.replace(&tenant_str, &tenant_shard_str);
     339            0 : 
     340            0 :     let latency = &METRICS_REGISTRY
     341            0 :         .metrics_group
     342            0 :         .storage_controller_passthrough_request_latency;
     343            0 : 
     344            0 :     // This is a bit awkward. We remove the param from the request
     345            0 :     // and join the words by '_' to get a label for the request.
     346            0 :     let just_path = path.replace(&tenant_shard_str, "");
     347            0 :     let path_label = just_path
     348            0 :         .split('/')
     349            0 :         .filter(|token| !token.is_empty())
     350            0 :         .collect::<Vec<_>>()
     351            0 :         .join("_");
     352            0 :     let labels = PageserverRequestLabelGroup {
     353            0 :         pageserver_id: &node.get_id().to_string(),
     354            0 :         path: &path_label,
     355            0 :         method: crate::metrics::Method::Get,
     356            0 :     };
     357            0 : 
     358            0 :     let _timer = latency.start_timer(labels.clone());
     359            0 : 
     360            0 :     let client = mgmt_api::Client::new(node.base_url(), service.get_config().jwt_token.as_deref());
     361            0 :     let resp = client.get_raw(path).await.map_err(|_e|
     362              :         // FIXME: give APiError a proper Unavailable variant.  We return 503 here because
     363              :         // if we can't successfully send a request to the pageserver, we aren't available.
     364            0 :         ApiError::ShuttingDown)?;
     365              : 
     366            0 :     if !resp.status().is_success() {
     367            0 :         let error_counter = &METRICS_REGISTRY
     368            0 :             .metrics_group
     369            0 :             .storage_controller_passthrough_request_error;
     370            0 :         error_counter.inc(labels);
     371            0 :     }
     372              : 
     373              :     // We have a reqest::Response, would like a http::Response
     374            0 :     let mut builder = hyper::Response::builder()
     375            0 :         .status(resp.status())
     376            0 :         .version(resp.version());
     377            0 :     for (k, v) in resp.headers() {
     378            0 :         builder = builder.header(k, v);
     379            0 :     }
     380              : 
     381            0 :     let response = builder
     382            0 :         .body(Body::wrap_stream(resp.bytes_stream()))
     383            0 :         .map_err(|e| ApiError::InternalServerError(e.into()))?;
     384              : 
     385            0 :     Ok(response)
     386            0 : }
     387              : 
     388            0 : async fn handle_tenant_locate(
     389            0 :     service: Arc<Service>,
     390            0 :     req: Request<Body>,
     391            0 : ) -> Result<Response<Body>, ApiError> {
     392            0 :     check_permissions(&req, Scope::Admin)?;
     393              : 
     394            0 :     let tenant_id: TenantId = parse_request_param(&req, "tenant_id")?;
     395            0 :     json_response(StatusCode::OK, service.tenant_locate(tenant_id)?)
     396            0 : }
     397              : 
     398            0 : async fn handle_tenant_describe(
     399            0 :     service: Arc<Service>,
     400            0 :     req: Request<Body>,
     401            0 : ) -> Result<Response<Body>, ApiError> {
     402            0 :     check_permissions(&req, Scope::Admin)?;
     403              : 
     404            0 :     let tenant_id: TenantId = parse_request_param(&req, "tenant_id")?;
     405            0 :     json_response(StatusCode::OK, service.tenant_describe(tenant_id)?)
     406            0 : }
     407              : 
     408            0 : async fn handle_tenant_list(
     409            0 :     service: Arc<Service>,
     410            0 :     req: Request<Body>,
     411            0 : ) -> Result<Response<Body>, ApiError> {
     412            0 :     check_permissions(&req, Scope::Admin)?;
     413              : 
     414            0 :     json_response(StatusCode::OK, service.tenant_list())
     415            0 : }
     416              : 
     417            0 : async fn handle_node_register(mut req: Request<Body>) -> Result<Response<Body>, ApiError> {
     418            0 :     check_permissions(&req, Scope::Admin)?;
     419              : 
     420            0 :     let register_req = json_request::<NodeRegisterRequest>(&mut req).await?;
     421            0 :     let state = get_state(&req);
     422            0 :     state.service.node_register(register_req).await?;
     423            0 :     json_response(StatusCode::OK, ())
     424            0 : }
     425              : 
     426            0 : async fn handle_node_list(req: Request<Body>) -> Result<Response<Body>, ApiError> {
     427            0 :     check_permissions(&req, Scope::Admin)?;
     428              : 
     429            0 :     let state = get_state(&req);
     430            0 :     let nodes = state.service.node_list().await?;
     431            0 :     let api_nodes = nodes.into_iter().map(|n| n.describe()).collect::<Vec<_>>();
     432            0 : 
     433            0 :     json_response(StatusCode::OK, api_nodes)
     434            0 : }
     435              : 
     436            0 : async fn handle_node_drop(req: Request<Body>) -> Result<Response<Body>, ApiError> {
     437            0 :     check_permissions(&req, Scope::Admin)?;
     438              : 
     439            0 :     let state = get_state(&req);
     440            0 :     let node_id: NodeId = parse_request_param(&req, "node_id")?;
     441            0 :     json_response(StatusCode::OK, state.service.node_drop(node_id).await?)
     442            0 : }
     443              : 
     444            0 : async fn handle_node_configure(mut req: Request<Body>) -> Result<Response<Body>, ApiError> {
     445            0 :     check_permissions(&req, Scope::Admin)?;
     446              : 
     447            0 :     let node_id: NodeId = parse_request_param(&req, "node_id")?;
     448            0 :     let config_req = json_request::<NodeConfigureRequest>(&mut req).await?;
     449            0 :     if node_id != config_req.node_id {
     450            0 :         return Err(ApiError::BadRequest(anyhow::anyhow!(
     451            0 :             "Path and body node_id differ"
     452            0 :         )));
     453            0 :     }
     454            0 :     let state = get_state(&req);
     455            0 : 
     456            0 :     json_response(
     457            0 :         StatusCode::OK,
     458            0 :         state
     459            0 :             .service
     460            0 :             .node_configure(
     461            0 :                 config_req.node_id,
     462            0 :                 config_req.availability.map(NodeAvailability::from),
     463            0 :                 config_req.scheduling,
     464            0 :             )
     465            0 :             .await?,
     466              :     )
     467            0 : }
     468              : 
     469            0 : async fn handle_tenant_shard_split(
     470            0 :     service: Arc<Service>,
     471            0 :     mut req: Request<Body>,
     472            0 : ) -> Result<Response<Body>, ApiError> {
     473            0 :     check_permissions(&req, Scope::Admin)?;
     474              : 
     475            0 :     let tenant_id: TenantId = parse_request_param(&req, "tenant_id")?;
     476            0 :     let split_req = json_request::<TenantShardSplitRequest>(&mut req).await?;
     477              : 
     478              :     json_response(
     479              :         StatusCode::OK,
     480            0 :         service.tenant_shard_split(tenant_id, split_req).await?,
     481              :     )
     482            0 : }
     483              : 
     484            0 : async fn handle_tenant_shard_migrate(
     485            0 :     service: Arc<Service>,
     486            0 :     mut req: Request<Body>,
     487            0 : ) -> Result<Response<Body>, ApiError> {
     488            0 :     check_permissions(&req, Scope::Admin)?;
     489              : 
     490            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&req, "tenant_shard_id")?;
     491            0 :     let migrate_req = json_request::<TenantShardMigrateRequest>(&mut req).await?;
     492              :     json_response(
     493              :         StatusCode::OK,
     494            0 :         service
     495            0 :             .tenant_shard_migrate(tenant_shard_id, migrate_req)
     496            0 :             .await?,
     497              :     )
     498            0 : }
     499              : 
     500            0 : async fn handle_tenant_update_policy(mut req: Request<Body>) -> Result<Response<Body>, ApiError> {
     501            0 :     check_permissions(&req, Scope::Admin)?;
     502              : 
     503            0 :     let tenant_id: TenantId = parse_request_param(&req, "tenant_id")?;
     504            0 :     let update_req = json_request::<TenantPolicyRequest>(&mut req).await?;
     505            0 :     let state = get_state(&req);
     506            0 : 
     507            0 :     json_response(
     508            0 :         StatusCode::OK,
     509            0 :         state
     510            0 :             .service
     511            0 :             .tenant_update_policy(tenant_id, update_req)
     512            0 :             .await?,
     513              :     )
     514            0 : }
     515              : 
     516            0 : async fn handle_tenant_drop(req: Request<Body>) -> Result<Response<Body>, ApiError> {
     517            0 :     let tenant_id: TenantId = parse_request_param(&req, "tenant_id")?;
     518            0 :     check_permissions(&req, Scope::PageServerApi)?;
     519              : 
     520            0 :     let state = get_state(&req);
     521            0 : 
     522            0 :     json_response(StatusCode::OK, state.service.tenant_drop(tenant_id).await?)
     523            0 : }
     524              : 
     525            0 : async fn handle_tenants_dump(req: Request<Body>) -> Result<Response<Body>, ApiError> {
     526            0 :     check_permissions(&req, Scope::Admin)?;
     527              : 
     528            0 :     let state = get_state(&req);
     529            0 :     state.service.tenants_dump()
     530            0 : }
     531              : 
     532            0 : async fn handle_scheduler_dump(req: Request<Body>) -> Result<Response<Body>, ApiError> {
     533            0 :     check_permissions(&req, Scope::Admin)?;
     534              : 
     535            0 :     let state = get_state(&req);
     536            0 :     state.service.scheduler_dump()
     537            0 : }
     538              : 
     539            0 : async fn handle_consistency_check(req: Request<Body>) -> Result<Response<Body>, ApiError> {
     540            0 :     check_permissions(&req, Scope::Admin)?;
     541              : 
     542            0 :     let state = get_state(&req);
     543            0 : 
     544            0 :     json_response(StatusCode::OK, state.service.consistency_check().await?)
     545            0 : }
     546              : 
     547            0 : async fn handle_reconcile_all(req: Request<Body>) -> Result<Response<Body>, ApiError> {
     548            0 :     check_permissions(&req, Scope::Admin)?;
     549              : 
     550            0 :     let state = get_state(&req);
     551            0 : 
     552            0 :     json_response(StatusCode::OK, state.service.reconcile_all_now().await?)
     553            0 : }
     554              : 
     555              : /// Status endpoint is just used for checking that our HTTP listener is up
     556            0 : async fn handle_status(_req: Request<Body>) -> Result<Response<Body>, ApiError> {
     557            0 :     json_response(StatusCode::OK, ())
     558            0 : }
     559              : 
     560              : /// Readiness endpoint indicates when we're done doing startup I/O (e.g. reconciling
     561              : /// with remote pageserver nodes).  This is intended for use as a kubernetes readiness probe.
     562            0 : async fn handle_ready(req: Request<Body>) -> Result<Response<Body>, ApiError> {
     563            0 :     let state = get_state(&req);
     564            0 :     if state.service.startup_complete.is_ready() {
     565            0 :         json_response(StatusCode::OK, ())
     566              :     } else {
     567            0 :         json_response(StatusCode::SERVICE_UNAVAILABLE, ())
     568              :     }
     569            0 : }
     570              : 
     571              : impl From<ReconcileError> for ApiError {
     572            0 :     fn from(value: ReconcileError) -> Self {
     573            0 :         ApiError::Conflict(format!("Reconciliation error: {}", value))
     574            0 :     }
     575              : }
     576              : 
     577              : /// Common wrapper for request handlers that call into Service and will operate on tenants: they must only
     578              : /// be allowed to run if Service has finished its initial reconciliation.
     579            0 : async fn tenant_service_handler<R, H>(
     580            0 :     request: Request<Body>,
     581            0 :     handler: H,
     582            0 :     request_name: RequestName,
     583            0 : ) -> R::Output
     584            0 : where
     585            0 :     R: std::future::Future<Output = Result<Response<Body>, ApiError>> + Send + 'static,
     586            0 :     H: FnOnce(Arc<Service>, Request<Body>) -> R + Send + Sync + 'static,
     587            0 : {
     588            0 :     let state = get_state(&request);
     589            0 :     let service = state.service.clone();
     590            0 : 
     591            0 :     let startup_complete = service.startup_complete.clone();
     592            0 :     if tokio::time::timeout(STARTUP_RECONCILE_TIMEOUT, startup_complete.wait())
     593            0 :         .await
     594            0 :         .is_err()
     595              :     {
     596              :         // This shouldn't happen: it is the responsibilty of [`Service::startup_reconcile`] to use appropriate
     597              :         // timeouts around its remote calls, to bound its runtime.
     598            0 :         return Err(ApiError::Timeout(
     599            0 :             "Timed out waiting for service readiness".into(),
     600            0 :         ));
     601            0 :     }
     602            0 : 
     603            0 :     named_request_span(
     604            0 :         request,
     605            0 :         |request| async move { handler(service, request).await },
     606            0 :         request_name,
     607            0 :     )
     608            0 :     .await
     609            0 : }
     610              : 
     611              : /// Check if the required scope is held in the request's token, or if the request has
     612              : /// a token with 'admin' scope then always permit it.
     613            0 : fn check_permissions(request: &Request<Body>, required_scope: Scope) -> Result<(), ApiError> {
     614            0 :     check_permission_with(request, |claims| {
     615            0 :         match crate::auth::check_permission(claims, required_scope) {
     616            0 :             Err(e) => match crate::auth::check_permission(claims, Scope::Admin) {
     617            0 :                 Ok(()) => Ok(()),
     618            0 :                 Err(_) => Err(e),
     619              :             },
     620            0 :             Ok(()) => Ok(()),
     621              :         }
     622            0 :     })
     623            0 : }
     624              : 
     625              : #[derive(Clone, Debug)]
     626              : struct RequestMeta {
     627              :     method: hyper::http::Method,
     628              :     at: Instant,
     629              : }
     630              : 
     631            0 : fn prologue_metrics_middleware<B: hyper::body::HttpBody + Send + Sync + 'static>(
     632            0 : ) -> Middleware<B, ApiError> {
     633            0 :     Middleware::pre(move |req| async move {
     634            0 :         let meta = RequestMeta {
     635            0 :             method: req.method().clone(),
     636            0 :             at: Instant::now(),
     637            0 :         };
     638            0 : 
     639            0 :         req.set_context(meta);
     640            0 : 
     641            0 :         Ok(req)
     642            0 :     })
     643            0 : }
     644              : 
     645            0 : fn epilogue_metrics_middleware<B: hyper::body::HttpBody + Send + Sync + 'static>(
     646            0 : ) -> Middleware<B, ApiError> {
     647            0 :     Middleware::post_with_info(move |resp, req_info| async move {
     648            0 :         let request_name = match req_info.context::<RequestName>() {
     649            0 :             Some(name) => name,
     650              :             None => {
     651            0 :                 return Ok(resp);
     652              :             }
     653              :         };
     654              : 
     655            0 :         if let Some(meta) = req_info.context::<RequestMeta>() {
     656            0 :             let status = &crate::metrics::METRICS_REGISTRY
     657            0 :                 .metrics_group
     658            0 :                 .storage_controller_http_request_status;
     659            0 :             let latency = &crate::metrics::METRICS_REGISTRY
     660            0 :                 .metrics_group
     661            0 :                 .storage_controller_http_request_latency;
     662            0 : 
     663            0 :             status.inc(HttpRequestStatusLabelGroup {
     664            0 :                 path: request_name.0,
     665            0 :                 method: meta.method.clone().into(),
     666            0 :                 status: crate::metrics::StatusCode(resp.status()),
     667            0 :             });
     668            0 : 
     669            0 :             latency.observe(
     670            0 :                 HttpRequestLatencyLabelGroup {
     671            0 :                     path: request_name.0,
     672            0 :                     method: meta.method.into(),
     673            0 :                 },
     674            0 :                 meta.at.elapsed().as_secs_f64(),
     675            0 :             );
     676            0 :         }
     677            0 :         Ok(resp)
     678            0 :     })
     679            0 : }
     680              : 
     681            0 : pub async fn measured_metrics_handler(req: Request<Body>) -> Result<Response<Body>, ApiError> {
     682            0 :     pub const TEXT_FORMAT: &str = "text/plain; version=0.0.4";
     683            0 : 
     684            0 :     let state = get_state(&req);
     685            0 :     let payload = crate::metrics::METRICS_REGISTRY.encode(&state.neon_metrics);
     686            0 :     let response = Response::builder()
     687            0 :         .status(200)
     688            0 :         .header(CONTENT_TYPE, TEXT_FORMAT)
     689            0 :         .body(payload.into())
     690            0 :         .unwrap();
     691            0 : 
     692            0 :     Ok(response)
     693            0 : }
     694              : 
     695              : #[derive(Clone)]
     696              : struct RequestName(&'static str);
     697              : 
     698            0 : async fn named_request_span<R, H>(
     699            0 :     request: Request<Body>,
     700            0 :     handler: H,
     701            0 :     name: RequestName,
     702            0 : ) -> R::Output
     703            0 : where
     704            0 :     R: Future<Output = Result<Response<Body>, ApiError>> + Send + 'static,
     705            0 :     H: FnOnce(Request<Body>) -> R + Send + Sync + 'static,
     706            0 : {
     707            0 :     request.set_context(name);
     708            0 :     request_span(request, handler).await
     709            0 : }
     710              : 
     711            0 : pub fn make_router(
     712            0 :     service: Arc<Service>,
     713            0 :     auth: Option<Arc<SwappableJwtAuth>>,
     714            0 :     build_info: BuildInfo,
     715            0 : ) -> RouterBuilder<hyper::Body, ApiError> {
     716            0 :     let mut router = endpoint::make_router()
     717            0 :         .middleware(prologue_metrics_middleware())
     718            0 :         .middleware(epilogue_metrics_middleware());
     719            0 :     if auth.is_some() {
     720            0 :         router = router.middleware(auth_middleware(|request| {
     721            0 :             let state = get_state(request);
     722            0 :             if state.allowlist_routes.contains(request.uri()) {
     723            0 :                 None
     724              :             } else {
     725            0 :                 state.auth.as_deref()
     726              :             }
     727            0 :         }));
     728            0 :     }
     729              : 
     730            0 :     router
     731            0 :         .data(Arc::new(HttpState::new(service, auth, build_info)))
     732            0 :         .get("/metrics", |r| {
     733            0 :             named_request_span(r, measured_metrics_handler, RequestName("metrics"))
     734            0 :         })
     735            0 :         // Non-prefixed generic endpoints (status, metrics)
     736            0 :         .get("/status", |r| {
     737            0 :             named_request_span(r, handle_status, RequestName("status"))
     738            0 :         })
     739            0 :         .get("/ready", |r| {
     740            0 :             named_request_span(r, handle_ready, RequestName("ready"))
     741            0 :         })
     742            0 :         // Upcalls for the pageserver: point the pageserver's `control_plane_api` config to this prefix
     743            0 :         .post("/upcall/v1/re-attach", |r| {
     744            0 :             named_request_span(r, handle_re_attach, RequestName("upcall_v1_reattach"))
     745            0 :         })
     746            0 :         .post("/upcall/v1/validate", |r| {
     747            0 :             named_request_span(r, handle_validate, RequestName("upcall_v1_validate"))
     748            0 :         })
     749            0 :         // Test/dev/debug endpoints
     750            0 :         .post("/debug/v1/attach-hook", |r| {
     751            0 :             named_request_span(r, handle_attach_hook, RequestName("debug_v1_attach_hook"))
     752            0 :         })
     753            0 :         .post("/debug/v1/inspect", |r| {
     754            0 :             named_request_span(r, handle_inspect, RequestName("debug_v1_inspect"))
     755            0 :         })
     756            0 :         .post("/debug/v1/tenant/:tenant_id/drop", |r| {
     757            0 :             named_request_span(r, handle_tenant_drop, RequestName("debug_v1_tenant_drop"))
     758            0 :         })
     759            0 :         .post("/debug/v1/node/:node_id/drop", |r| {
     760            0 :             named_request_span(r, handle_node_drop, RequestName("debug_v1_node_drop"))
     761            0 :         })
     762            0 :         .get("/debug/v1/tenant", |r| {
     763            0 :             named_request_span(r, handle_tenants_dump, RequestName("debug_v1_tenant"))
     764            0 :         })
     765            0 :         .get("/debug/v1/tenant/:tenant_id/locate", |r| {
     766            0 :             tenant_service_handler(
     767            0 :                 r,
     768            0 :                 handle_tenant_locate,
     769            0 :                 RequestName("debug_v1_tenant_locate"),
     770            0 :             )
     771            0 :         })
     772            0 :         .get("/debug/v1/scheduler", |r| {
     773            0 :             named_request_span(r, handle_scheduler_dump, RequestName("debug_v1_scheduler"))
     774            0 :         })
     775            0 :         .post("/debug/v1/consistency_check", |r| {
     776            0 :             named_request_span(
     777            0 :                 r,
     778            0 :                 handle_consistency_check,
     779            0 :                 RequestName("debug_v1_consistency_check"),
     780            0 :             )
     781            0 :         })
     782            0 :         .post("/debug/v1/reconcile_all", |r| {
     783            0 :             request_span(r, handle_reconcile_all)
     784            0 :         })
     785            0 :         .put("/debug/v1/failpoints", |r| {
     786            0 :             request_span(r, |r| failpoints_handler(r, CancellationToken::new()))
     787            0 :         })
     788            0 :         // Node operations
     789            0 :         .post("/control/v1/node", |r| {
     790            0 :             named_request_span(r, handle_node_register, RequestName("control_v1_node"))
     791            0 :         })
     792            0 :         .get("/control/v1/node", |r| {
     793            0 :             named_request_span(r, handle_node_list, RequestName("control_v1_node"))
     794            0 :         })
     795            0 :         .put("/control/v1/node/:node_id/config", |r| {
     796            0 :             named_request_span(
     797            0 :                 r,
     798            0 :                 handle_node_configure,
     799            0 :                 RequestName("control_v1_node_config"),
     800            0 :             )
     801            0 :         })
     802            0 :         // Tenant Shard operations
     803            0 :         .put("/control/v1/tenant/:tenant_shard_id/migrate", |r| {
     804            0 :             tenant_service_handler(
     805            0 :                 r,
     806            0 :                 handle_tenant_shard_migrate,
     807            0 :                 RequestName("control_v1_tenant_migrate"),
     808            0 :             )
     809            0 :         })
     810            0 :         .put("/control/v1/tenant/:tenant_id/shard_split", |r| {
     811            0 :             tenant_service_handler(
     812            0 :                 r,
     813            0 :                 handle_tenant_shard_split,
     814            0 :                 RequestName("control_v1_tenant_shard_split"),
     815            0 :             )
     816            0 :         })
     817            0 :         .get("/control/v1/tenant/:tenant_id", |r| {
     818            0 :             tenant_service_handler(
     819            0 :                 r,
     820            0 :                 handle_tenant_describe,
     821            0 :                 RequestName("control_v1_tenant_describe"),
     822            0 :             )
     823            0 :         })
     824            0 :         .get("/control/v1/tenant", |r| {
     825            0 :             tenant_service_handler(r, handle_tenant_list, RequestName("control_v1_tenant_list"))
     826            0 :         })
     827            0 :         .put("/control/v1/tenant/:tenant_id/policy", |r| {
     828            0 :             named_request_span(
     829            0 :                 r,
     830            0 :                 handle_tenant_update_policy,
     831            0 :                 RequestName("control_v1_tenant_policy"),
     832            0 :             )
     833            0 :         })
     834            0 :         // Tenant operations
     835            0 :         // The ^/v1/ endpoints act as a "Virtual Pageserver", enabling shard-naive clients to call into
     836            0 :         // this service to manage tenants that actually consist of many tenant shards, as if they are a single entity.
     837            0 :         .post("/v1/tenant", |r| {
     838            0 :             tenant_service_handler(r, handle_tenant_create, RequestName("v1_tenant"))
     839            0 :         })
     840            0 :         .delete("/v1/tenant/:tenant_id", |r| {
     841            0 :             tenant_service_handler(r, handle_tenant_delete, RequestName("v1_tenant"))
     842            0 :         })
     843            0 :         .put("/v1/tenant/config", |r| {
     844            0 :             tenant_service_handler(r, handle_tenant_config_set, RequestName("v1_tenant_config"))
     845            0 :         })
     846            0 :         .get("/v1/tenant/:tenant_id/config", |r| {
     847            0 :             tenant_service_handler(r, handle_tenant_config_get, RequestName("v1_tenant_config"))
     848            0 :         })
     849            0 :         .put("/v1/tenant/:tenant_shard_id/location_config", |r| {
     850            0 :             tenant_service_handler(
     851            0 :                 r,
     852            0 :                 handle_tenant_location_config,
     853            0 :                 RequestName("v1_tenant_location_config"),
     854            0 :             )
     855            0 :         })
     856            0 :         .put("/v1/tenant/:tenant_id/time_travel_remote_storage", |r| {
     857            0 :             tenant_service_handler(
     858            0 :                 r,
     859            0 :                 handle_tenant_time_travel_remote_storage,
     860            0 :                 RequestName("v1_tenant_time_travel_remote_storage"),
     861            0 :             )
     862            0 :         })
     863            0 :         .post("/v1/tenant/:tenant_id/secondary/download", |r| {
     864            0 :             tenant_service_handler(
     865            0 :                 r,
     866            0 :                 handle_tenant_secondary_download,
     867            0 :                 RequestName("v1_tenant_secondary_download"),
     868            0 :             )
     869            0 :         })
     870            0 :         // Timeline operations
     871            0 :         .delete("/v1/tenant/:tenant_id/timeline/:timeline_id", |r| {
     872            0 :             tenant_service_handler(
     873            0 :                 r,
     874            0 :                 handle_tenant_timeline_delete,
     875            0 :                 RequestName("v1_tenant_timeline"),
     876            0 :             )
     877            0 :         })
     878            0 :         .post("/v1/tenant/:tenant_id/timeline", |r| {
     879            0 :             tenant_service_handler(
     880            0 :                 r,
     881            0 :                 handle_tenant_timeline_create,
     882            0 :                 RequestName("v1_tenant_timeline"),
     883            0 :             )
     884            0 :         })
     885            0 :         // Tenant detail GET passthrough to shard zero
     886            0 :         .get("/v1/tenant/:tenant_id", |r| {
     887            0 :             tenant_service_handler(
     888            0 :                 r,
     889            0 :                 handle_tenant_timeline_passthrough,
     890            0 :                 RequestName("v1_tenant_passthrough"),
     891            0 :             )
     892            0 :         })
     893            0 :         // Timeline GET passthrough to shard zero.  Note that the `*` in the URL is a wildcard: any future
     894            0 :         // timeline GET APIs will be implicitly included.
     895            0 :         .get("/v1/tenant/:tenant_id/timeline*", |r| {
     896            0 :             tenant_service_handler(
     897            0 :                 r,
     898            0 :                 handle_tenant_timeline_passthrough,
     899            0 :                 RequestName("v1_tenant_timeline_passthrough"),
     900            0 :             )
     901            0 :         })
     902            0 : }
        

Generated by: LCOV version 2.1-beta